flowey_core/
pipeline.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Core types and traits used to create and work with flowey pipelines.
5
6mod artifact;
7
8pub use artifact::Artifact;
9
10use self::internal::*;
11use crate::node::FlowArch;
12use crate::node::FlowNodeBase;
13use crate::node::FlowPlatform;
14use crate::node::FlowPlatformLinuxDistro;
15use crate::node::GhUserSecretVar;
16use crate::node::IntoRequest;
17use crate::node::NodeHandle;
18use crate::node::ReadVar;
19use crate::node::WriteVar;
20use crate::node::steps::ado::AdoResourcesRepositoryId;
21use crate::node::user_facing::AdoRuntimeVar;
22use crate::node::user_facing::GhPermission;
23use crate::node::user_facing::GhPermissionValue;
24use crate::patch::PatchResolver;
25use crate::patch::ResolvedPatches;
26use serde::Serialize;
27use serde::de::DeserializeOwned;
28use std::collections::BTreeMap;
29use std::collections::BTreeSet;
30use std::path::PathBuf;
31
32/// Pipeline types which are considered "user facing", and included in the
33/// `flowey` prelude.
34pub mod user_facing {
35    pub use super::AdoCiTriggers;
36    pub use super::AdoPool;
37    pub use super::AdoPrTriggers;
38    pub use super::AdoResourcesRepository;
39    pub use super::AdoResourcesRepositoryRef;
40    pub use super::AdoResourcesRepositoryType;
41    pub use super::AdoScheduleTriggers;
42    pub use super::GhCiTriggers;
43    pub use super::GhPrTriggers;
44    pub use super::GhRunner;
45    pub use super::GhRunnerOsLabel;
46    pub use super::GhScheduleTriggers;
47    pub use super::HostExt;
48    pub use super::IntoPipeline;
49    pub use super::ParameterKind;
50    pub use super::Pipeline;
51    pub use super::PipelineBackendHint;
52    pub use super::PipelineJob;
53    pub use super::PipelineJobCtx;
54    pub use super::PipelineJobHandle;
55    pub use super::PublishArtifact;
56    pub use super::PublishTypedArtifact;
57    pub use super::UseArtifact;
58    pub use super::UseParameter;
59    pub use super::UseTypedArtifact;
60    pub use crate::node::FlowArch;
61    pub use crate::node::FlowPlatform;
62}
63
64fn linux_distro() -> FlowPlatformLinuxDistro {
65    // Check for nix environment first - takes precedence over distro detection
66    if std::env::var("IN_NIX_SHELL").is_ok() {
67        return FlowPlatformLinuxDistro::Nix;
68    }
69
70    // A `nix develop` shell doesn't set `IN_NIX_SHELL`, but the PATH should include a nix store path
71    if std::env::var("PATH").is_ok_and(|path| path.contains("/nix/store")) {
72        return FlowPlatformLinuxDistro::Nix;
73    }
74
75    if let Ok(etc_os_release) = fs_err::read_to_string("/etc/os-release") {
76        if etc_os_release.contains("ID=ubuntu") {
77            FlowPlatformLinuxDistro::Ubuntu
78        } else if etc_os_release.contains("ID=fedora") {
79            FlowPlatformLinuxDistro::Fedora
80        } else if etc_os_release.contains("ID=azurelinux") || etc_os_release.contains("ID=mariner")
81        {
82            FlowPlatformLinuxDistro::AzureLinux
83        } else if etc_os_release.contains("ID=arch") {
84            FlowPlatformLinuxDistro::Arch
85        } else {
86            FlowPlatformLinuxDistro::Unknown
87        }
88    } else {
89        FlowPlatformLinuxDistro::Unknown
90    }
91}
92
93pub trait HostExt: Sized {
94    /// Return the value for the current host machine.
95    ///
96    /// Will panic on non-local backends.
97    fn host(backend_hint: PipelineBackendHint) -> Self;
98}
99
100impl HostExt for FlowPlatform {
101    /// Return the platform of the current host machine.
102    ///
103    /// Will panic on non-local backends.
104    fn host(backend_hint: PipelineBackendHint) -> Self {
105        if !matches!(backend_hint, PipelineBackendHint::Local) {
106            panic!("can only use `FlowPlatform::host` when defining a local-only pipeline");
107        }
108
109        if cfg!(target_os = "windows") {
110            Self::Windows
111        } else if cfg!(target_os = "linux") {
112            Self::Linux(linux_distro())
113        } else if cfg!(target_os = "macos") {
114            Self::MacOs
115        } else {
116            panic!("no valid host-os")
117        }
118    }
119}
120
121impl HostExt for FlowArch {
122    /// Return the arch of the current host machine.
123    ///
124    /// Will panic on non-local backends.
125    fn host(backend_hint: PipelineBackendHint) -> Self {
126        if !matches!(backend_hint, PipelineBackendHint::Local) {
127            panic!("can only use `FlowArch::host` when defining a local-only pipeline");
128        }
129
130        // xtask-fmt allow-target-arch oneoff-flowey
131        if cfg!(target_arch = "x86_64") {
132            Self::X86_64
133        // xtask-fmt allow-target-arch oneoff-flowey
134        } else if cfg!(target_arch = "aarch64") {
135            Self::Aarch64
136        } else {
137            panic!("no valid host-arch")
138        }
139    }
140}
141
142/// Trigger ADO pipelines via Continuous Integration
143#[derive(Default, Debug)]
144pub struct AdoScheduleTriggers {
145    /// Friendly name for the scheduled run
146    pub display_name: String,
147    /// Run the pipeline whenever there is a commit on these specified branches
148    /// (supports glob syntax)
149    pub branches: Vec<String>,
150    /// Specify any branches which should be filtered out from the list of
151    /// `branches` (supports glob syntax)
152    pub exclude_branches: Vec<String>,
153    /// Run the pipeline in a schedule, as specified by a cron string
154    pub cron: String,
155}
156
157/// Trigger ADO pipelines per PR
158#[derive(Debug)]
159pub struct AdoPrTriggers {
160    /// Run the pipeline whenever there is a PR to these specified branches
161    /// (supports glob syntax)
162    pub branches: Vec<String>,
163    /// Specify any branches which should be filtered out from the list of
164    /// `branches` (supports glob syntax)
165    pub exclude_branches: Vec<String>,
166    /// Run the pipeline even if the PR is a draft PR. Defaults to `false`.
167    pub run_on_draft: bool,
168    /// Automatically cancel the pipeline run if a new commit lands in the
169    /// branch. Defaults to `true`.
170    pub auto_cancel: bool,
171}
172
173/// Trigger ADO pipelines per PR
174#[derive(Debug, Default)]
175pub struct AdoCiTriggers {
176    /// Run the pipeline whenever there is a change to these specified branches
177    /// (supports glob syntax)
178    pub branches: Vec<String>,
179    /// Specify any branches which should be filtered out from the list of
180    /// `branches` (supports glob syntax)
181    pub exclude_branches: Vec<String>,
182    /// Run the pipeline whenever a matching tag is created (supports glob
183    /// syntax)
184    pub tags: Vec<String>,
185    /// Specify any tags which should be filtered out from the list of `tags`
186    /// (supports glob syntax)
187    pub exclude_tags: Vec<String>,
188    /// Whether to batch changes per branch.
189    pub batch: bool,
190}
191
192impl Default for AdoPrTriggers {
193    fn default() -> Self {
194        Self {
195            branches: Vec::new(),
196            exclude_branches: Vec::new(),
197            run_on_draft: false,
198            auto_cancel: true,
199        }
200    }
201}
202
203/// ADO repository resource.
204#[derive(Debug)]
205pub struct AdoResourcesRepository {
206    /// Type of repo that is being connected to.
207    pub repo_type: AdoResourcesRepositoryType,
208    /// Repository name. Format depends on `repo_type`.
209    pub name: String,
210    /// git ref to checkout.
211    pub git_ref: AdoResourcesRepositoryRef,
212    /// (optional) ID of the service endpoint connecting to this repository.
213    pub endpoint: Option<String>,
214}
215
216/// ADO repository resource type
217#[derive(Debug)]
218pub enum AdoResourcesRepositoryType {
219    /// Azure Repos Git repository
220    AzureReposGit,
221    /// Github repository
222    GitHub,
223}
224
225/// ADO repository ref
226#[derive(Debug)]
227pub enum AdoResourcesRepositoryRef<P = UseParameter<String>> {
228    /// Hard-coded ref (e.g: refs/heads/main)
229    Fixed(String),
230    /// Connected to pipeline-level parameter
231    Parameter(P),
232}
233
234/// Trigger Github Actions pipelines via Continuous Integration
235///
236/// NOTE: Github Actions doesn't support specifying the branch when triggered by `schedule`.
237/// To run on a specific branch, modify the branch checked out in the pipeline.
238#[derive(Default, Debug)]
239pub struct GhScheduleTriggers {
240    /// Run the pipeline in a schedule, as specified by a cron string
241    pub cron: String,
242}
243
244/// Trigger Github Actions pipelines per PR
245#[derive(Debug)]
246pub struct GhPrTriggers {
247    /// Run the pipeline whenever there is a PR to these specified branches
248    /// (supports glob syntax)
249    pub branches: Vec<String>,
250    /// Specify any branches which should be filtered out from the list of
251    /// `branches` (supports glob syntax)
252    pub exclude_branches: Vec<String>,
253    /// Automatically cancel the pipeline run if a new commit lands in the
254    /// branch. Defaults to `true`.
255    pub auto_cancel: bool,
256    /// Run the pipeline whenever the PR trigger matches the specified types
257    pub types: Vec<String>,
258}
259
260/// Trigger Github Actions pipelines per PR
261#[derive(Debug, Default)]
262pub struct GhCiTriggers {
263    /// Run the pipeline whenever there is a change to these specified branches
264    /// (supports glob syntax)
265    pub branches: Vec<String>,
266    /// Specify any branches which should be filtered out from the list of
267    /// `branches` (supports glob syntax)
268    pub exclude_branches: Vec<String>,
269    /// Run the pipeline whenever a matching tag is created (supports glob
270    /// syntax)
271    pub tags: Vec<String>,
272    /// Specify any tags which should be filtered out from the list of `tags`
273    /// (supports glob syntax)
274    pub exclude_tags: Vec<String>,
275}
276
277impl GhPrTriggers {
278    /// Triggers the pipeline on the default PR events plus when a draft is marked as ready for review.
279    pub fn new_draftable() -> Self {
280        Self {
281            branches: Vec::new(),
282            exclude_branches: Vec::new(),
283            types: vec![
284                "opened".into(),
285                "synchronize".into(),
286                "reopened".into(),
287                "ready_for_review".into(),
288            ],
289            auto_cancel: true,
290        }
291    }
292}
293
294#[derive(Debug, Clone, PartialEq)]
295pub enum GhRunnerOsLabel {
296    UbuntuLatest,
297    Ubuntu2404,
298    Ubuntu2204,
299    WindowsLatest,
300    Windows2025,
301    Windows2022,
302    Ubuntu2404Arm,
303    Ubuntu2204Arm,
304    Windows11Arm,
305    Custom(String),
306}
307
308/// GitHub runner type
309#[derive(Debug, Clone, PartialEq)]
310pub enum GhRunner {
311    // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
312    // for more details.
313    GhHosted(GhRunnerOsLabel),
314    // Self hosted runners are selected by matching runner labels to <labels>.
315    // 'self-hosted' is a common label for self hosted runners, but is not required.
316    // Labels are case-insensitive and can take the form of arbitrary strings.
317    // See <https://docs.github.com/en/actions/hosting-your-own-runners> for more details.
318    SelfHosted(Vec<String>),
319    // This uses a runner belonging to <group> that matches all labels in <labels>.
320    // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
321    // for more details.
322    RunnerGroup { group: String, labels: Vec<String> },
323}
324
325impl GhRunner {
326    /// Whether this is a self-hosted runner with the provided label
327    pub fn is_self_hosted_with_label(&self, label: &str) -> bool {
328        matches!(self, GhRunner::SelfHosted(labels) if labels.iter().any(|s| s.as_str() == label))
329    }
330}
331
332// TODO: support a more structured format for demands
333// See https://learn.microsoft.com/en-us/azure/devops/pipelines/yaml-schema/pool-demands
334#[derive(Debug, Clone)]
335pub struct AdoPool {
336    pub name: String,
337    pub demands: Vec<String>,
338}
339
340/// Parameter type (unstable / stable).
341#[derive(Debug, Clone)]
342pub enum ParameterKind {
343    // The parameter is considered an unstable API, and should not be
344    // taken as a dependency.
345    Unstable,
346    // The parameter is considered a stable API, and can be used by
347    // external pipelines to control behavior of the pipeline.
348    Stable,
349}
350
351#[derive(Clone, Debug)]
352#[must_use]
353pub struct UseParameter<T> {
354    idx: usize,
355    _kind: std::marker::PhantomData<T>,
356}
357
358/// Opaque handle to an artifact which must be published by a single job.
359#[must_use]
360pub struct PublishArtifact {
361    idx: usize,
362}
363
364/// Opaque handle to an artifact which can be used by one or more jobs.
365#[derive(Clone)]
366#[must_use]
367pub struct UseArtifact {
368    idx: usize,
369}
370
371/// Opaque handle to an artifact of type `T` which must be published by a single job.
372#[must_use]
373pub struct PublishTypedArtifact<T>(PublishArtifact, std::marker::PhantomData<fn() -> T>);
374
375/// Opaque handle to an artifact of type `T` which can be used by one or more
376/// jobs.
377#[must_use]
378pub struct UseTypedArtifact<T>(UseArtifact, std::marker::PhantomData<fn(T)>);
379
380impl<T> Clone for UseTypedArtifact<T> {
381    fn clone(&self) -> Self {
382        UseTypedArtifact(self.0.clone(), std::marker::PhantomData)
383    }
384}
385
386#[derive(Default)]
387pub struct Pipeline {
388    jobs: Vec<PipelineJobMetadata>,
389    artifacts: Vec<ArtifactMeta>,
390    parameters: Vec<ParameterMeta>,
391    extra_deps: BTreeSet<(usize, usize)>,
392    // builder internal
393    artifact_names: BTreeSet<String>,
394    dummy_done_idx: usize,
395    artifact_map_idx: usize,
396    global_patchfns: Vec<crate::patch::PatchFn>,
397    inject_all_jobs_with: Option<Box<dyn for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a>>>,
398    // backend specific
399    ado_name: Option<String>,
400    ado_job_id_overrides: BTreeMap<usize, String>,
401    ado_schedule_triggers: Vec<AdoScheduleTriggers>,
402    ado_ci_triggers: Option<AdoCiTriggers>,
403    ado_pr_triggers: Option<AdoPrTriggers>,
404    ado_resources_repository: Vec<InternalAdoResourcesRepository>,
405    ado_bootstrap_template: String,
406    ado_variables: BTreeMap<String, String>,
407    ado_post_process_yaml_cb: Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
408    gh_name: Option<String>,
409    gh_schedule_triggers: Vec<GhScheduleTriggers>,
410    gh_ci_triggers: Option<GhCiTriggers>,
411    gh_pr_triggers: Option<GhPrTriggers>,
412    gh_bootstrap_template: String,
413}
414
415impl Pipeline {
416    pub fn new() -> Pipeline {
417        Pipeline::default()
418    }
419
420    /// Inject all pipeline jobs with some common logic. (e.g: to resolve common
421    /// configuration requirements shared by all jobs).
422    ///
423    /// Can only be invoked once per pipeline.
424    #[track_caller]
425    pub fn inject_all_jobs_with(
426        &mut self,
427        cb: impl for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a> + 'static,
428    ) -> &mut Self {
429        if self.inject_all_jobs_with.is_some() {
430            panic!("can only call inject_all_jobs_with once!")
431        }
432        self.inject_all_jobs_with = Some(Box::new(cb));
433        self
434    }
435
436    /// (ADO only) Provide a YAML template used to bootstrap flowey at the start
437    /// of an ADO pipeline.
438    ///
439    /// The template has access to the following vars, which will be statically
440    /// interpolated into the template's text:
441    ///
442    /// - `{{FLOWEY_OUTDIR}}`
443    ///     - Directory to copy artifacts into.
444    ///     - NOTE: this var will include `\` on Windows, and `/` on linux!
445    /// - `{{FLOWEY_BIN_EXTENSION}}`
446    ///     - Extension of the expected flowey bin (either "", or ".exe")
447    /// - `{{FLOWEY_CRATE}}`
448    ///     - Name of the project-specific flowey crate to be built
449    /// - `{{FLOWEY_TARGET}}`
450    ///     - The target-triple flowey is being built for
451    /// - `{{FLOWEY_PIPELINE_PATH}}`
452    ///     - Repo-root relative path to the pipeline (as provided when
453    ///       generating the pipeline via the flowey CLI)
454    ///
455    /// The template's sole responsibility is to copy 3 files into the
456    /// `{{FLOWEY_OUTDIR}}`:
457    ///
458    /// 1. The bootstrapped flowey binary, with the file name
459    ///    `flowey{{FLOWEY_BIN_EXTENSION}}`
460    /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
461    ///    copied of the pipeline YAML and pipeline JSON currently being run.
462    ///    `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
463    ///    cases where the same template is being for multiple pipelines (e.g: a
464    ///    debug vs. release pipeline).
465    pub fn ado_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
466        self.ado_bootstrap_template = template;
467        self
468    }
469
470    /// (ADO only) Provide a callback function which will be used to
471    /// post-process any YAML flowey generates for the pipeline.
472    ///
473    /// Until flowey defines a stable API for maintaining out-of-tree backends,
474    /// this method can be used to integrate the output from the generic ADO
475    /// backend with any organization-specific templates that one may be
476    /// required to use (e.g: for compliance reasons).
477    pub fn ado_post_process_yaml(
478        &mut self,
479        cb: impl FnOnce(serde_yaml::Value) -> serde_yaml::Value + 'static,
480    ) -> &mut Self {
481        self.ado_post_process_yaml_cb = Some(Box::new(cb));
482        self
483    }
484
485    /// (ADO only) Add a new scheduled CI trigger. Can be called multiple times
486    /// to set up multiple schedules runs.
487    pub fn ado_add_schedule_trigger(&mut self, triggers: AdoScheduleTriggers) -> &mut Self {
488        self.ado_schedule_triggers.push(triggers);
489        self
490    }
491
492    /// (ADO only) Set a PR trigger. Calling this method multiple times will
493    /// overwrite any previously set triggers.
494    pub fn ado_set_pr_triggers(&mut self, triggers: AdoPrTriggers) -> &mut Self {
495        self.ado_pr_triggers = Some(triggers);
496        self
497    }
498
499    /// (ADO only) Set a CI trigger. Calling this method multiple times will
500    /// overwrite any previously set triggers.
501    pub fn ado_set_ci_triggers(&mut self, triggers: AdoCiTriggers) -> &mut Self {
502        self.ado_ci_triggers = Some(triggers);
503        self
504    }
505
506    /// (ADO only) Declare a new repository resource, returning a type-safe
507    /// handle which downstream ADO steps are able to consume via
508    /// [`AdoStepServices::resolve_repository_id`](crate::node::user_facing::AdoStepServices::resolve_repository_id).
509    pub fn ado_add_resources_repository(
510        &mut self,
511        repo: AdoResourcesRepository,
512    ) -> AdoResourcesRepositoryId {
513        let AdoResourcesRepository {
514            repo_type,
515            name,
516            git_ref,
517            endpoint,
518        } = repo;
519
520        let repo_id = format!("repo{}", self.ado_resources_repository.len());
521
522        self.ado_resources_repository
523            .push(InternalAdoResourcesRepository {
524                repo_id: repo_id.clone(),
525                repo_type,
526                name,
527                git_ref: match git_ref {
528                    AdoResourcesRepositoryRef::Fixed(s) => AdoResourcesRepositoryRef::Fixed(s),
529                    AdoResourcesRepositoryRef::Parameter(p) => {
530                        AdoResourcesRepositoryRef::Parameter(p.idx)
531                    }
532                },
533                endpoint,
534            });
535        AdoResourcesRepositoryId { repo_id }
536    }
537
538    /// (GitHub Actions only) Set the pipeline-level name.
539    ///
540    /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#name>
541    pub fn gh_set_name(&mut self, name: impl AsRef<str>) -> &mut Self {
542        self.gh_name = Some(name.as_ref().into());
543        self
544    }
545
546    /// Provide a YAML template used to bootstrap flowey at the start of an GitHub
547    /// pipeline.
548    ///
549    /// The template has access to the following vars, which will be statically
550    /// interpolated into the template's text:
551    ///
552    /// - `{{FLOWEY_OUTDIR}}`
553    ///     - Directory to copy artifacts into.
554    ///     - NOTE: this var will include `\` on Windows, and `/` on linux!
555    /// - `{{FLOWEY_BIN_EXTENSION}}`
556    ///     - Extension of the expected flowey bin (either "", or ".exe")
557    /// - `{{FLOWEY_CRATE}}`
558    ///     - Name of the project-specific flowey crate to be built
559    /// - `{{FLOWEY_TARGET}}`
560    ///     - The target-triple flowey is being built for
561    /// - `{{FLOWEY_PIPELINE_PATH}}`
562    ///     - Repo-root relative path to the pipeline (as provided when
563    ///       generating the pipeline via the flowey CLI)
564    ///
565    /// The template's sole responsibility is to copy 3 files into the
566    /// `{{FLOWEY_OUTDIR}}`:
567    ///
568    /// 1. The bootstrapped flowey binary, with the file name
569    ///    `flowey{{FLOWEY_BIN_EXTENSION}}`
570    /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
571    ///    copied of the pipeline YAML and pipeline JSON currently being run.
572    ///    `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
573    ///    cases where the same template is being for multiple pipelines (e.g: a
574    ///    debug vs. release pipeline).
575    pub fn gh_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
576        self.gh_bootstrap_template = template;
577        self
578    }
579
580    /// (GitHub Actions only) Add a new scheduled CI trigger. Can be called multiple times
581    /// to set up multiple schedules runs.
582    pub fn gh_add_schedule_trigger(&mut self, triggers: GhScheduleTriggers) -> &mut Self {
583        self.gh_schedule_triggers.push(triggers);
584        self
585    }
586
587    /// (GitHub Actions only) Set a PR trigger. Calling this method multiple times will
588    /// overwrite any previously set triggers.
589    pub fn gh_set_pr_triggers(&mut self, triggers: GhPrTriggers) -> &mut Self {
590        self.gh_pr_triggers = Some(triggers);
591        self
592    }
593
594    /// (GitHub Actions only) Set a CI trigger. Calling this method multiple times will
595    /// overwrite any previously set triggers.
596    pub fn gh_set_ci_triggers(&mut self, triggers: GhCiTriggers) -> &mut Self {
597        self.gh_ci_triggers = Some(triggers);
598        self
599    }
600
601    /// (GitHub Actions only) Use a pre-defined GitHub Actions secret variable.
602    ///
603    /// For more information on defining secrets for use in GitHub Actions, see
604    /// <https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions>
605    pub fn gh_use_secret(&mut self, secret_name: impl AsRef<str>) -> GhUserSecretVar {
606        GhUserSecretVar(secret_name.as_ref().to_string())
607    }
608
609    pub fn new_job(
610        &mut self,
611        platform: FlowPlatform,
612        arch: FlowArch,
613        label: impl AsRef<str>,
614    ) -> PipelineJob<'_> {
615        let idx = self.jobs.len();
616        self.jobs.push(PipelineJobMetadata {
617            root_nodes: BTreeMap::new(),
618            patches: ResolvedPatches::build(),
619            label: label.as_ref().into(),
620            platform,
621            arch,
622            cond_param_idx: None,
623            timeout_minutes: None,
624            command_wrapper: None,
625            ado_pool: None,
626            ado_variables: BTreeMap::new(),
627            gh_override_if: None,
628            gh_global_env: BTreeMap::new(),
629            gh_pool: None,
630            gh_permissions: BTreeMap::new(),
631        });
632
633        PipelineJob {
634            pipeline: self,
635            job_idx: idx,
636        }
637    }
638
639    /// Declare a dependency between two jobs that does is not a result of an
640    /// artifact.
641    pub fn non_artifact_dep(
642        &mut self,
643        job: &PipelineJobHandle,
644        depends_on_job: &PipelineJobHandle,
645    ) -> &mut Self {
646        self.extra_deps
647            .insert((depends_on_job.job_idx, job.job_idx));
648        self
649    }
650
651    #[track_caller]
652    pub fn new_artifact(&mut self, name: impl AsRef<str>) -> (PublishArtifact, UseArtifact) {
653        let name = name.as_ref();
654        let owned_name = name.to_string();
655
656        let not_exists = self.artifact_names.insert(owned_name.clone());
657        if !not_exists {
658            panic!("duplicate artifact name: {}", name)
659        }
660
661        let idx = self.artifacts.len();
662        self.artifacts.push(ArtifactMeta {
663            name: owned_name,
664            published_by_job: None,
665            used_by_jobs: BTreeSet::new(),
666        });
667
668        (PublishArtifact { idx }, UseArtifact { idx })
669    }
670
671    /// Returns a pair of opaque handles to a new artifact for use across jobs
672    /// in the pipeline.
673    #[track_caller]
674    pub fn new_typed_artifact<T: Artifact>(
675        &mut self,
676        name: impl AsRef<str>,
677    ) -> (PublishTypedArtifact<T>, UseTypedArtifact<T>) {
678        let (publish, use_artifact) = self.new_artifact(name);
679        (
680            PublishTypedArtifact(publish, std::marker::PhantomData),
681            UseTypedArtifact(use_artifact, std::marker::PhantomData),
682        )
683    }
684
685    /// (ADO only) Set the pipeline-level name.
686    ///
687    /// <https://learn.microsoft.com/en-us/azure/devops/pipelines/process/run-number?view=azure-devops&tabs=yaml>
688    pub fn ado_add_name(&mut self, name: String) -> &mut Self {
689        self.ado_name = Some(name);
690        self
691    }
692
693    /// (ADO only) Declare a pipeline-level, named, read-only ADO variable.
694    ///
695    /// `name` and `value` are both arbitrary strings.
696    ///
697    /// Returns an instance of [`AdoRuntimeVar`], which, if need be, can be
698    /// converted into a [`ReadVar<String>`] using
699    /// [`NodeCtx::get_ado_variable`].
700    ///
701    /// NOTE: Unless required by some particular third-party task, it's strongly
702    /// recommended to _avoid_ using this method, and to simply use
703    /// [`ReadVar::from_static`] to get a obtain a static variable.
704    ///
705    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
706    pub fn ado_new_named_variable(
707        &mut self,
708        name: impl AsRef<str>,
709        value: impl AsRef<str>,
710    ) -> AdoRuntimeVar {
711        let name = name.as_ref();
712        let value = value.as_ref();
713
714        self.ado_variables.insert(name.into(), value.into());
715
716        // safe, since we'll ensure that the global exists in the ADO backend
717        AdoRuntimeVar::dangerous_from_global(name, false)
718    }
719
720    /// (ADO only) Declare multiple pipeline-level, named, read-only ADO
721    /// variables at once.
722    ///
723    /// This is a convenience method to streamline invoking
724    /// [`Self::ado_new_named_variable`] multiple times.
725    ///
726    /// NOTE: Unless required by some particular third-party task, it's strongly
727    /// recommended to _avoid_ using this method, and to simply use
728    /// [`ReadVar::from_static`] to get a obtain a static variable.
729    ///
730    /// DEVNOTE: In the future, this API may be updated to return a handle that
731    /// will allow resolving the resulting `AdoRuntimeVar`, but for
732    /// implementation expediency, this API does not currently do this. If you
733    /// need to read the value of this variable at runtime, you may need to
734    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
735    ///
736    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
737    pub fn ado_new_named_variables<K, V>(
738        &mut self,
739        vars: impl IntoIterator<Item = (K, V)>,
740    ) -> &mut Self
741    where
742        K: AsRef<str>,
743        V: AsRef<str>,
744    {
745        self.ado_variables.extend(
746            vars.into_iter()
747                .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
748        );
749        self
750    }
751
752    /// Declare a pipeline-level runtime parameter with type `bool`.
753    ///
754    /// To obtain a [`ReadVar<bool>`] that can be used within a node, use the
755    /// [`PipelineJobCtx::use_parameter`] method.
756    ///
757    /// `name` is the name of the parameter.
758    ///
759    /// `description` is an arbitrary string, which will be be shown to users.
760    ///
761    /// `kind` is the type of parameter and if it should be treated as a stable
762    /// external API to callers of the pipeline.
763    ///
764    /// `default` is the default value for the parameter. If none is provided,
765    /// the parameter _must_ be specified in order for the pipeline to run.
766    ///
767    /// `possible_values` can be used to limit the set of valid values the
768    /// parameter accepts.
769    pub fn new_parameter_bool(
770        &mut self,
771        name: impl AsRef<str>,
772        description: impl AsRef<str>,
773        kind: ParameterKind,
774        default: Option<bool>,
775    ) -> UseParameter<bool> {
776        let idx = self.parameters.len();
777        let name = new_parameter_name(name, kind.clone());
778        self.parameters.push(ParameterMeta {
779            parameter: Parameter::Bool {
780                name,
781                description: description.as_ref().into(),
782                kind,
783                default,
784            },
785            used_by_jobs: BTreeSet::new(),
786        });
787
788        UseParameter {
789            idx,
790            _kind: std::marker::PhantomData,
791        }
792    }
793
794    /// Declare a pipeline-level runtime parameter with type `i64`.
795    ///
796    /// To obtain a [`ReadVar<i64>`] that can be used within a node, use the
797    /// [`PipelineJobCtx::use_parameter`] method.
798    ///
799    /// `name` is the name of the parameter.
800    ///
801    /// `description` is an arbitrary string, which will be be shown to users.
802    ///
803    /// `kind` is the type of parameter and if it should be treated as a stable
804    /// external API to callers of the pipeline.
805    ///
806    /// `default` is the default value for the parameter. If none is provided,
807    /// the parameter _must_ be specified in order for the pipeline to run.
808    ///
809    /// `possible_values` can be used to limit the set of valid values the
810    /// parameter accepts.
811    pub fn new_parameter_num(
812        &mut self,
813        name: impl AsRef<str>,
814        description: impl AsRef<str>,
815        kind: ParameterKind,
816        default: Option<i64>,
817        possible_values: Option<Vec<i64>>,
818    ) -> UseParameter<i64> {
819        let idx = self.parameters.len();
820        let name = new_parameter_name(name, kind.clone());
821        self.parameters.push(ParameterMeta {
822            parameter: Parameter::Num {
823                name,
824                description: description.as_ref().into(),
825                kind,
826                default,
827                possible_values,
828            },
829            used_by_jobs: BTreeSet::new(),
830        });
831
832        UseParameter {
833            idx,
834            _kind: std::marker::PhantomData,
835        }
836    }
837
838    /// Declare a pipeline-level runtime parameter with type `String`.
839    ///
840    /// To obtain a [`ReadVar<String>`] that can be used within a node, use the
841    /// [`PipelineJobCtx::use_parameter`] method.
842    ///
843    /// `name` is the name of the parameter.
844    ///
845    /// `description` is an arbitrary string, which will be be shown to users.
846    ///
847    /// `kind` is the type of parameter and if it should be treated as a stable
848    /// external API to callers of the pipeline.
849    ///
850    /// `default` is the default value for the parameter. If none is provided,
851    /// the parameter _must_ be specified in order for the pipeline to run.
852    ///
853    /// `possible_values` allows restricting inputs to a set of possible values.
854    /// Depending on the backend, these options may be presented as a set of
855    /// radio buttons, a dropdown menu, or something in that vein. If `None`,
856    /// then any string is allowed.
857    pub fn new_parameter_string(
858        &mut self,
859        name: impl AsRef<str>,
860        description: impl AsRef<str>,
861        kind: ParameterKind,
862        default: Option<impl AsRef<str>>,
863        possible_values: Option<Vec<String>>,
864    ) -> UseParameter<String> {
865        let idx = self.parameters.len();
866        let name = new_parameter_name(name, kind.clone());
867        self.parameters.push(ParameterMeta {
868            parameter: Parameter::String {
869                name,
870                description: description.as_ref().into(),
871                kind,
872                default: default.map(|x| x.as_ref().into()),
873                possible_values,
874            },
875            used_by_jobs: BTreeSet::new(),
876        });
877
878        UseParameter {
879            idx,
880            _kind: std::marker::PhantomData,
881        }
882    }
883}
884
885pub struct PipelineJobCtx<'a> {
886    pipeline: &'a mut Pipeline,
887    job_idx: usize,
888}
889
890impl PipelineJobCtx<'_> {
891    /// Create a new `WriteVar<SideEffect>` anchored to the pipeline job.
892    pub fn new_done_handle(&mut self) -> WriteVar<crate::node::SideEffect> {
893        self.pipeline.dummy_done_idx += 1;
894        crate::node::thin_air_write_runtime_var(format!("start{}", self.pipeline.dummy_done_idx))
895    }
896
897    /// Claim that this job will use this artifact, obtaining a path to a folder
898    /// with the artifact's contents.
899    pub fn use_artifact(&mut self, artifact: &UseArtifact) -> ReadVar<PathBuf> {
900        self.pipeline.artifacts[artifact.idx]
901            .used_by_jobs
902            .insert(self.job_idx);
903
904        crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
905            &self.pipeline.artifacts[artifact.idx].name,
906            true,
907        ))
908    }
909
910    /// Claim that this job will publish this artifact, obtaining a path to a
911    /// fresh, empty folder which will be published as the specific artifact at
912    /// the end of the job.
913    pub fn publish_artifact(&mut self, artifact: PublishArtifact) -> ReadVar<PathBuf> {
914        let existing = self.pipeline.artifacts[artifact.idx]
915            .published_by_job
916            .replace(self.job_idx);
917        assert!(existing.is_none()); // PublishArtifact isn't cloneable
918
919        crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
920            &self.pipeline.artifacts[artifact.idx].name,
921            false,
922        ))
923    }
924
925    fn helper_request<R: IntoRequest>(&mut self, req: R)
926    where
927        R::Node: 'static,
928    {
929        self.pipeline.jobs[self.job_idx]
930            .root_nodes
931            .entry(NodeHandle::from_type::<R::Node>())
932            .or_default()
933            .push(serde_json::to_vec(&req.into_request()).unwrap().into());
934    }
935
936    fn new_artifact_map_vars<T: Artifact>(&mut self) -> (ReadVar<T>, WriteVar<T>) {
937        let artifact_map_idx = self.pipeline.artifact_map_idx;
938        self.pipeline.artifact_map_idx += 1;
939
940        let backing_var = format!("artifact_map{}", artifact_map_idx);
941        let read_var = crate::node::thin_air_read_runtime_var(backing_var.clone());
942        let write_var = crate::node::thin_air_write_runtime_var(backing_var);
943        (read_var, write_var)
944    }
945
946    /// Claim that this job will use this artifact, obtaining the resolved
947    /// contents of the artifact.
948    pub fn use_typed_artifact<T: Artifact>(
949        &mut self,
950        artifact: &UseTypedArtifact<T>,
951    ) -> ReadVar<T> {
952        let artifact_path = self.use_artifact(&artifact.0);
953        let (read, write) = self.new_artifact_map_vars::<T>();
954        self.helper_request(artifact::resolve::Request::new(artifact_path, write));
955        read
956    }
957
958    /// Claim that this job will publish this artifact, obtaining a variable to
959    /// write the artifact's contents to. The artifact will be published at
960    /// the end of the job.
961    pub fn publish_typed_artifact<T: Artifact>(
962        &mut self,
963        artifact: PublishTypedArtifact<T>,
964    ) -> WriteVar<T> {
965        let artifact_path = self.publish_artifact(artifact.0);
966        let (read, write) = self.new_artifact_map_vars::<T>();
967        let done = self.new_done_handle();
968        self.helper_request(artifact::publish::Request::new(read, artifact_path, done));
969        write
970    }
971
972    /// Obtain a `ReadVar<T>` corresponding to a pipeline parameter which is
973    /// specified at runtime.
974    pub fn use_parameter<T>(&mut self, param: UseParameter<T>) -> ReadVar<T>
975    where
976        T: Serialize + DeserializeOwned,
977    {
978        self.pipeline.parameters[param.idx]
979            .used_by_jobs
980            .insert(self.job_idx);
981
982        crate::node::thin_air_read_runtime_var(
983            self.pipeline.parameters[param.idx]
984                .parameter
985                .name()
986                .to_string(),
987        )
988    }
989
990    /// Shortcut which allows defining a bool pipeline parameter within a Job.
991    ///
992    /// To share a single parameter between multiple jobs, don't use this method
993    /// - use [`Pipeline::new_parameter_bool`] + [`Self::use_parameter`] instead.
994    pub fn new_parameter_bool(
995        &mut self,
996        name: impl AsRef<str>,
997        description: impl AsRef<str>,
998        kind: ParameterKind,
999        default: Option<bool>,
1000    ) -> ReadVar<bool> {
1001        let param = self
1002            .pipeline
1003            .new_parameter_bool(name, description, kind, default);
1004        self.use_parameter(param)
1005    }
1006
1007    /// Shortcut which allows defining a number pipeline parameter within a Job.
1008    ///
1009    /// To share a single parameter between multiple jobs, don't use this method
1010    /// - use [`Pipeline::new_parameter_num`] + [`Self::use_parameter`] instead.
1011    pub fn new_parameter_num(
1012        &mut self,
1013        name: impl AsRef<str>,
1014        description: impl AsRef<str>,
1015        kind: ParameterKind,
1016        default: Option<i64>,
1017        possible_values: Option<Vec<i64>>,
1018    ) -> ReadVar<i64> {
1019        let param =
1020            self.pipeline
1021                .new_parameter_num(name, description, kind, default, possible_values);
1022        self.use_parameter(param)
1023    }
1024
1025    /// Shortcut which allows defining a string pipeline parameter within a Job.
1026    ///
1027    /// To share a single parameter between multiple jobs, don't use this method
1028    /// - use [`Pipeline::new_parameter_string`] + [`Self::use_parameter`] instead.
1029    pub fn new_parameter_string(
1030        &mut self,
1031        name: impl AsRef<str>,
1032        description: impl AsRef<str>,
1033        kind: ParameterKind,
1034        default: Option<String>,
1035        possible_values: Option<Vec<String>>,
1036    ) -> ReadVar<String> {
1037        let param =
1038            self.pipeline
1039                .new_parameter_string(name, description, kind, default, possible_values);
1040        self.use_parameter(param)
1041    }
1042}
1043
1044#[must_use]
1045pub struct PipelineJob<'a> {
1046    pipeline: &'a mut Pipeline,
1047    job_idx: usize,
1048}
1049
1050impl PipelineJob<'_> {
1051    /// (ADO only) specify which agent pool this job will be run on.
1052    pub fn ado_set_pool(self, pool: AdoPool) -> Self {
1053        self.pipeline.jobs[self.job_idx].ado_pool = Some(pool);
1054        self
1055    }
1056
1057    /// (ADO only) specify which agent pool this job will be run on, with
1058    /// additional special runner demands.
1059    pub fn ado_set_pool_with_demands(self, pool: impl AsRef<str>, demands: Vec<String>) -> Self {
1060        self.pipeline.jobs[self.job_idx].ado_pool = Some(AdoPool {
1061            name: pool.as_ref().into(),
1062            demands,
1063        });
1064        self
1065    }
1066
1067    /// (ADO only) Declare a job-level, named, read-only ADO variable.
1068    ///
1069    /// `name` and `value` are both arbitrary strings, which may include ADO
1070    /// template expressions.
1071    ///
1072    /// NOTE: Unless required by some particular third-party task, it's strongly
1073    /// recommended to _avoid_ using this method, and to simply use
1074    /// [`ReadVar::from_static`] to get a obtain a static variable.
1075    ///
1076    /// DEVNOTE: In the future, this API may be updated to return a handle that
1077    /// will allow resolving the resulting `AdoRuntimeVar`, but for
1078    /// implementation expediency, this API does not currently do this. If you
1079    /// need to read the value of this variable at runtime, you may need to
1080    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1081    ///
1082    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1083    pub fn ado_new_named_variable(self, name: impl AsRef<str>, value: impl AsRef<str>) -> Self {
1084        let name = name.as_ref();
1085        let value = value.as_ref();
1086        self.pipeline.jobs[self.job_idx]
1087            .ado_variables
1088            .insert(name.into(), value.into());
1089        self
1090    }
1091
1092    /// (ADO only) Declare multiple job-level, named, read-only ADO variables at
1093    /// once.
1094    ///
1095    /// This is a convenience method to streamline invoking
1096    /// [`Self::ado_new_named_variable`] multiple times.
1097    ///
1098    /// NOTE: Unless required by some particular third-party task, it's strongly
1099    /// recommended to _avoid_ using this method, and to simply use
1100    /// [`ReadVar::from_static`] to get a obtain a static variable.
1101    ///
1102    /// DEVNOTE: In the future, this API may be updated to return a handle that
1103    /// will allow resolving the resulting `AdoRuntimeVar`, but for
1104    /// implementation expediency, this API does not currently do this. If you
1105    /// need to read the value of this variable at runtime, you may need to
1106    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1107    ///
1108    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1109    pub fn ado_new_named_variables<K, V>(self, vars: impl IntoIterator<Item = (K, V)>) -> Self
1110    where
1111        K: AsRef<str>,
1112        V: AsRef<str>,
1113    {
1114        self.pipeline.jobs[self.job_idx].ado_variables.extend(
1115            vars.into_iter()
1116                .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
1117        );
1118        self
1119    }
1120
1121    /// Overrides the id of the job.
1122    ///
1123    /// Flowey typically generates a reasonable job ID but some use cases that depend
1124    /// on the ID may find it useful to override it to something custom.
1125    pub fn ado_override_job_id(self, name: impl AsRef<str>) -> Self {
1126        self.pipeline
1127            .ado_job_id_overrides
1128            .insert(self.job_idx, name.as_ref().into());
1129        self
1130    }
1131
1132    /// (GitHub Actions only) specify which Github runner this job will be run on.
1133    pub fn gh_set_pool(self, pool: GhRunner) -> Self {
1134        self.pipeline.jobs[self.job_idx].gh_pool = Some(pool);
1135        self
1136    }
1137
1138    /// (GitHub Actions only) Manually override the `if:` condition for this
1139    /// particular job.
1140    ///
1141    /// **This is dangerous**, as an improperly set `if` condition may break
1142    /// downstream flowey jobs which assume flowey is in control of the job's
1143    /// scheduling logic.
1144    ///
1145    /// See
1146    /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idif>
1147    /// for more info.
1148    pub fn gh_dangerous_override_if(self, condition: impl AsRef<str>) -> Self {
1149        self.pipeline.jobs[self.job_idx].gh_override_if = Some(condition.as_ref().into());
1150        self
1151    }
1152
1153    /// (GitHub Actions only) Declare a global job-level environment variable,
1154    /// visible to all downstream steps.
1155    ///
1156    /// `name` and `value` are both arbitrary strings, which may include GitHub
1157    /// Actions template expressions.
1158    ///
1159    /// **This is dangerous**, as it is easy to misuse this API in order to
1160    /// write a node which takes an implicit dependency on there being a global
1161    /// variable set on its behalf by the top-level pipeline code, making it
1162    /// difficult to "locally reason" about the behavior of a node simply by
1163    /// reading its code.
1164    ///
1165    /// Whenever possible, nodes should "late bind" environment variables:
1166    /// accepting a compile-time / runtime flowey parameter, and then setting it
1167    /// prior to executing a child command that requires it.
1168    ///
1169    /// Only use this API in exceptional cases, such as obtaining an environment
1170    /// variable whose value is determined by a job-level GitHub Actions
1171    /// expression evaluation.
1172    pub fn gh_dangerous_global_env_var(
1173        self,
1174        name: impl AsRef<str>,
1175        value: impl AsRef<str>,
1176    ) -> Self {
1177        let name = name.as_ref();
1178        let value = value.as_ref();
1179        self.pipeline.jobs[self.job_idx]
1180            .gh_global_env
1181            .insert(name.into(), value.into());
1182        self
1183    }
1184
1185    /// (GitHub Actions only) Grant permissions required by nodes in the job.
1186    ///
1187    /// For a given node handle, grant the specified permissions.
1188    /// The list provided must match the permissions specified within the node
1189    /// using `requires_permission`.
1190    ///
1191    /// NOTE: While this method is called at a node-level for auditability, the emitted
1192    /// yaml grants permissions at the job-level.
1193    ///
1194    /// This can lead to weird situations where node 1 might not specify a permission
1195    /// required according to Github Actions, but due to job-level granting of the permission
1196    /// by another node 2, the pipeline executes even though it wouldn't if node 2 was removed.
1197    ///
1198    /// For available permission scopes and their descriptions, see
1199    /// <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions>.
1200    pub fn gh_grant_permissions<N: FlowNodeBase + 'static>(
1201        self,
1202        permissions: impl IntoIterator<Item = (GhPermission, GhPermissionValue)>,
1203    ) -> Self {
1204        let node_handle = NodeHandle::from_type::<N>();
1205        for (permission, value) in permissions {
1206            self.pipeline.jobs[self.job_idx]
1207                .gh_permissions
1208                .entry(node_handle)
1209                .or_default()
1210                .insert(permission, value);
1211        }
1212        self
1213    }
1214
1215    pub fn apply_patchfn(self, patchfn: crate::patch::PatchFn) -> Self {
1216        self.pipeline.jobs[self.job_idx]
1217            .patches
1218            .apply_patchfn(patchfn);
1219        self
1220    }
1221
1222    /// Set a timeout for the job, in minutes.
1223    ///
1224    /// Not calling this will result in the platform's default timeout being used,
1225    /// which is typically 60 minutes, but may vary.
1226    pub fn with_timeout_in_minutes(self, timeout: u32) -> Self {
1227        self.pipeline.jobs[self.job_idx].timeout_minutes = Some(timeout);
1228        self
1229    }
1230
1231    /// (ADO+Local Only) Only run the job if the specified condition is true.
1232    pub fn with_condition(self, cond: UseParameter<bool>) -> Self {
1233        self.pipeline.jobs[self.job_idx].cond_param_idx = Some(cond.idx);
1234        self.pipeline.parameters[cond.idx]
1235            .used_by_jobs
1236            .insert(self.job_idx);
1237        self
1238    }
1239
1240    /// Set a [`CommandWrapperKind`] that will be applied to all shell
1241    /// commands executed in this job's steps.
1242    ///
1243    /// The wrapper is applied both when running locally (via direct run)
1244    /// and when running in CI (the kind is serialized into
1245    /// `pipeline.json` and reconstructed at runtime).
1246    ///
1247    /// [`CommandWrapperKind`]: crate::shell::CommandWrapperKind
1248    pub fn set_command_wrapper(self, wrapper: crate::shell::CommandWrapperKind) -> Self {
1249        self.pipeline.jobs[self.job_idx].command_wrapper = Some(wrapper);
1250        self
1251    }
1252
1253    /// Add a flow node which will be run as part of the job.
1254    pub fn dep_on<R: IntoRequest + 'static>(
1255        self,
1256        f: impl FnOnce(&mut PipelineJobCtx<'_>) -> R,
1257    ) -> Self {
1258        // JobToNodeCtx will ensure artifact deps are taken care of
1259        let req = f(&mut PipelineJobCtx {
1260            pipeline: self.pipeline,
1261            job_idx: self.job_idx,
1262        });
1263
1264        self.pipeline.jobs[self.job_idx]
1265            .root_nodes
1266            .entry(NodeHandle::from_type::<R::Node>())
1267            .or_default()
1268            .push(serde_json::to_vec(&req.into_request()).unwrap().into());
1269
1270        self
1271    }
1272
1273    /// Finish describing the pipeline job.
1274    pub fn finish(self) -> PipelineJobHandle {
1275        PipelineJobHandle {
1276            job_idx: self.job_idx,
1277        }
1278    }
1279
1280    /// Return the job's platform.
1281    pub fn get_platform(&self) -> FlowPlatform {
1282        self.pipeline.jobs[self.job_idx].platform
1283    }
1284
1285    /// Return the job's architecture.
1286    pub fn get_arch(&self) -> FlowArch {
1287        self.pipeline.jobs[self.job_idx].arch
1288    }
1289}
1290
1291#[derive(Clone)]
1292pub struct PipelineJobHandle {
1293    job_idx: usize,
1294}
1295
1296impl PipelineJobHandle {
1297    pub fn is_handle_for(&self, job: &PipelineJob<'_>) -> bool {
1298        self.job_idx == job.job_idx
1299    }
1300}
1301
1302#[derive(Clone, Copy)]
1303pub enum PipelineBackendHint {
1304    /// Pipeline is being run on the user's dev machine (via bash / direct run)
1305    Local,
1306    /// Pipeline is run on ADO
1307    Ado,
1308    /// Pipeline is run on GitHub Actions
1309    Github,
1310}
1311
1312/// Trait for types that can be converted into a [`Pipeline`].
1313///
1314/// This is the primary entry point for defining flowey pipelines. Implement this trait
1315/// to create a pipeline definition that can be executed locally or converted to CI YAML.
1316///
1317/// # Example
1318///
1319/// ```rust,no_run
1320/// use flowey_core::pipeline::{IntoPipeline, Pipeline, PipelineBackendHint};
1321/// use flowey_core::node::{FlowPlatform, FlowPlatformLinuxDistro, FlowArch};
1322///
1323/// struct MyPipeline;
1324///
1325/// impl IntoPipeline for MyPipeline {
1326///     fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline> {
1327///         let mut pipeline = Pipeline::new();
1328///
1329///         // Define a job that runs on Linux x86_64
1330///         let _job = pipeline
1331///             .new_job(
1332///                 FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1333///                 FlowArch::X86_64,
1334///                 "build"
1335///             )
1336///             .finish();
1337///
1338///         Ok(pipeline)
1339///     }
1340/// }
1341/// ```
1342///
1343/// # Complex Example with Parameters and Artifacts
1344///
1345/// ```rust,ignore
1346/// use flowey_core::pipeline::{IntoPipeline, Pipeline, PipelineBackendHint, ParameterKind};
1347/// use flowey_core::node::{FlowPlatform, FlowPlatformLinuxDistro, FlowArch};
1348///
1349/// struct BuildPipeline;
1350///
1351/// impl IntoPipeline for BuildPipeline {
1352///     fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline> {
1353///         let mut pipeline = Pipeline::new();
1354///
1355///         // Define a runtime parameter
1356///         let enable_tests = pipeline.new_parameter_bool(
1357///             "enable_tests",
1358///             "Whether to run tests",
1359///             ParameterKind::Stable,
1360///             Some(true) // default value
1361///         );
1362///
1363///         // Create an artifact for passing data between jobs
1364///         let (publish_build, use_build) = pipeline.new_artifact("build-output");
1365///
1366///         // Job 1: Build
1367///         let build_job = pipeline
1368///             .new_job(
1369///                 FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1370///                 FlowArch::X86_64,
1371///                 "build"
1372///             )
1373///             .with_timeout_in_minutes(30)
1374///             .dep_on(|ctx| flowey_lib_hvlite::_jobs::example_node::Request {
1375///                 output_dir: ctx.publish_artifact(publish_build),
1376///             })
1377///             .finish();
1378///
1379///         // Job 2: Test (conditionally run based on parameter)
1380///         let _test_job = pipeline
1381///             .new_job(
1382///                 FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1383///                 FlowArch::X86_64,
1384///                 "test"
1385///             )
1386///             .with_condition(enable_tests)
1387///             .dep_on(|ctx| flowey_lib_hvlite::_jobs::example_node2::Request {
1388///                 input_dir: ctx.use_artifact(&use_build),
1389///             })
1390///             .finish();
1391///
1392///         Ok(pipeline)
1393///     }
1394/// }
1395/// ```
1396pub trait IntoPipeline {
1397    fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline>;
1398}
1399
1400fn new_parameter_name(name: impl AsRef<str>, kind: ParameterKind) -> String {
1401    match kind {
1402        ParameterKind::Unstable => format!("__unstable_{}", name.as_ref()),
1403        ParameterKind::Stable => name.as_ref().into(),
1404    }
1405}
1406
1407/// Structs which should only be used by top-level flowey emitters. If you're a
1408/// pipeline author, these are not types you need to care about!
1409pub mod internal {
1410    use super::*;
1411    use std::collections::BTreeMap;
1412
1413    pub fn consistent_artifact_runtime_var_name(artifact: impl AsRef<str>, is_use: bool) -> String {
1414        format!(
1415            "artifact_{}_{}",
1416            if is_use { "use_from" } else { "publish_from" },
1417            artifact.as_ref()
1418        )
1419    }
1420
1421    #[derive(Debug)]
1422    pub struct InternalAdoResourcesRepository {
1423        /// flowey-generated unique repo identifier
1424        pub repo_id: String,
1425        /// Type of repo that is being connected to.
1426        pub repo_type: AdoResourcesRepositoryType,
1427        /// Repository name. Format depends on `repo_type`.
1428        pub name: String,
1429        /// git ref to checkout.
1430        pub git_ref: AdoResourcesRepositoryRef<usize>,
1431        /// (optional) ID of the service endpoint connecting to this repository.
1432        pub endpoint: Option<String>,
1433    }
1434
1435    pub struct PipelineJobMetadata {
1436        pub root_nodes: BTreeMap<NodeHandle, Vec<Box<[u8]>>>,
1437        pub patches: PatchResolver,
1438        pub label: String,
1439        pub platform: FlowPlatform,
1440        pub arch: FlowArch,
1441        pub cond_param_idx: Option<usize>,
1442        pub timeout_minutes: Option<u32>,
1443        pub command_wrapper: Option<crate::shell::CommandWrapperKind>,
1444        // backend specific
1445        pub ado_pool: Option<AdoPool>,
1446        pub ado_variables: BTreeMap<String, String>,
1447        pub gh_override_if: Option<String>,
1448        pub gh_pool: Option<GhRunner>,
1449        pub gh_global_env: BTreeMap<String, String>,
1450        pub gh_permissions: BTreeMap<NodeHandle, BTreeMap<GhPermission, GhPermissionValue>>,
1451    }
1452
1453    #[derive(Debug)]
1454    pub struct ArtifactMeta {
1455        pub name: String,
1456        pub published_by_job: Option<usize>,
1457        pub used_by_jobs: BTreeSet<usize>,
1458    }
1459
1460    #[derive(Debug)]
1461    pub struct ParameterMeta {
1462        pub parameter: Parameter,
1463        pub used_by_jobs: BTreeSet<usize>,
1464    }
1465
1466    /// Mirror of [`Pipeline`], except with all field marked as `pub`.
1467    pub struct PipelineFinalized {
1468        pub jobs: Vec<PipelineJobMetadata>,
1469        pub artifacts: Vec<ArtifactMeta>,
1470        pub parameters: Vec<ParameterMeta>,
1471        pub extra_deps: BTreeSet<(usize, usize)>,
1472        // backend specific
1473        pub ado_name: Option<String>,
1474        pub ado_schedule_triggers: Vec<AdoScheduleTriggers>,
1475        pub ado_ci_triggers: Option<AdoCiTriggers>,
1476        pub ado_pr_triggers: Option<AdoPrTriggers>,
1477        pub ado_bootstrap_template: String,
1478        pub ado_resources_repository: Vec<InternalAdoResourcesRepository>,
1479        pub ado_post_process_yaml_cb:
1480            Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
1481        pub ado_variables: BTreeMap<String, String>,
1482        pub ado_job_id_overrides: BTreeMap<usize, String>,
1483        pub gh_name: Option<String>,
1484        pub gh_schedule_triggers: Vec<GhScheduleTriggers>,
1485        pub gh_ci_triggers: Option<GhCiTriggers>,
1486        pub gh_pr_triggers: Option<GhPrTriggers>,
1487        pub gh_bootstrap_template: String,
1488    }
1489
1490    impl PipelineFinalized {
1491        pub fn from_pipeline(mut pipeline: Pipeline) -> Self {
1492            if let Some(cb) = pipeline.inject_all_jobs_with.take() {
1493                for job_idx in 0..pipeline.jobs.len() {
1494                    let _ = cb(PipelineJob {
1495                        pipeline: &mut pipeline,
1496                        job_idx,
1497                    });
1498                }
1499            }
1500
1501            let Pipeline {
1502                mut jobs,
1503                artifacts,
1504                parameters,
1505                extra_deps,
1506                ado_name,
1507                ado_bootstrap_template,
1508                ado_schedule_triggers,
1509                ado_ci_triggers,
1510                ado_pr_triggers,
1511                ado_resources_repository,
1512                ado_post_process_yaml_cb,
1513                ado_variables,
1514                ado_job_id_overrides,
1515                gh_name,
1516                gh_schedule_triggers,
1517                gh_ci_triggers,
1518                gh_pr_triggers,
1519                gh_bootstrap_template,
1520                // not relevant to consumer code
1521                dummy_done_idx: _,
1522                artifact_map_idx: _,
1523                artifact_names: _,
1524                global_patchfns,
1525                inject_all_jobs_with: _, // processed above
1526            } = pipeline;
1527
1528            for patchfn in global_patchfns {
1529                for job in &mut jobs {
1530                    job.patches.apply_patchfn(patchfn)
1531                }
1532            }
1533
1534            Self {
1535                jobs,
1536                artifacts,
1537                parameters,
1538                extra_deps,
1539                ado_name,
1540                ado_schedule_triggers,
1541                ado_ci_triggers,
1542                ado_pr_triggers,
1543                ado_bootstrap_template,
1544                ado_resources_repository,
1545                ado_post_process_yaml_cb,
1546                ado_variables,
1547                ado_job_id_overrides,
1548                gh_name,
1549                gh_schedule_triggers,
1550                gh_ci_triggers,
1551                gh_pr_triggers,
1552                gh_bootstrap_template,
1553            }
1554        }
1555    }
1556
1557    #[derive(Debug, Clone)]
1558    pub enum Parameter {
1559        Bool {
1560            name: String,
1561            description: String,
1562            kind: ParameterKind,
1563            default: Option<bool>,
1564        },
1565        String {
1566            name: String,
1567            description: String,
1568            default: Option<String>,
1569            kind: ParameterKind,
1570            possible_values: Option<Vec<String>>,
1571        },
1572        Num {
1573            name: String,
1574            description: String,
1575            default: Option<i64>,
1576            kind: ParameterKind,
1577            possible_values: Option<Vec<i64>>,
1578        },
1579    }
1580
1581    impl Parameter {
1582        pub fn name(&self) -> &str {
1583            match self {
1584                Parameter::Bool { name, .. } => name,
1585                Parameter::String { name, .. } => name,
1586                Parameter::Num { name, .. } => name,
1587            }
1588        }
1589    }
1590}