flowey_core/
pipeline.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Core types and traits used to create and work with flowey pipelines.
5
6mod artifact;
7
8pub use artifact::Artifact;
9
10use self::internal::*;
11use crate::node::FlowArch;
12use crate::node::FlowNodeBase;
13use crate::node::FlowPlatform;
14use crate::node::FlowPlatformLinuxDistro;
15use crate::node::GhUserSecretVar;
16use crate::node::IntoRequest;
17use crate::node::NodeHandle;
18use crate::node::ReadVar;
19use crate::node::WriteVar;
20use crate::node::steps::ado::AdoResourcesRepositoryId;
21use crate::node::user_facing::AdoRuntimeVar;
22use crate::node::user_facing::GhPermission;
23use crate::node::user_facing::GhPermissionValue;
24use crate::patch::PatchResolver;
25use crate::patch::ResolvedPatches;
26use serde::Serialize;
27use serde::de::DeserializeOwned;
28use std::collections::BTreeMap;
29use std::collections::BTreeSet;
30use std::path::PathBuf;
31
32/// Pipeline types which are considered "user facing", and included in the
33/// `flowey` prelude.
34pub mod user_facing {
35    pub use super::AdoCiTriggers;
36    pub use super::AdoPrTriggers;
37    pub use super::AdoResourcesRepository;
38    pub use super::AdoResourcesRepositoryRef;
39    pub use super::AdoResourcesRepositoryType;
40    pub use super::AdoScheduleTriggers;
41    pub use super::GhCiTriggers;
42    pub use super::GhPrTriggers;
43    pub use super::GhRunner;
44    pub use super::GhRunnerOsLabel;
45    pub use super::GhScheduleTriggers;
46    pub use super::HostExt;
47    pub use super::IntoPipeline;
48    pub use super::ParameterKind;
49    pub use super::Pipeline;
50    pub use super::PipelineBackendHint;
51    pub use super::PipelineJob;
52    pub use super::PipelineJobCtx;
53    pub use super::PipelineJobHandle;
54    pub use super::PublishArtifact;
55    pub use super::PublishTypedArtifact;
56    pub use super::UseArtifact;
57    pub use super::UseParameter;
58    pub use super::UseTypedArtifact;
59    pub use crate::node::FlowArch;
60    pub use crate::node::FlowPlatform;
61}
62
63fn linux_distro() -> FlowPlatformLinuxDistro {
64    if let Ok(etc_os_release) = fs_err::read_to_string("/etc/os-release") {
65        if etc_os_release.contains("ID=ubuntu") {
66            FlowPlatformLinuxDistro::Ubuntu
67        } else if etc_os_release.contains("ID=fedora") {
68            FlowPlatformLinuxDistro::Fedora
69        } else {
70            FlowPlatformLinuxDistro::Unknown
71        }
72    } else {
73        FlowPlatformLinuxDistro::Unknown
74    }
75}
76
77pub trait HostExt: Sized {
78    /// Return the value for the current host machine.
79    ///
80    /// Will panic on non-local backends.
81    fn host(backend_hint: PipelineBackendHint) -> Self;
82}
83
84impl HostExt for FlowPlatform {
85    /// Return the platform of the current host machine.
86    ///
87    /// Will panic on non-local backends.
88    fn host(backend_hint: PipelineBackendHint) -> Self {
89        if !matches!(backend_hint, PipelineBackendHint::Local) {
90            panic!("can only use `FlowPlatform::host` when defining a local-only pipeline");
91        }
92
93        if cfg!(target_os = "windows") {
94            Self::Windows
95        } else if cfg!(target_os = "linux") {
96            Self::Linux(linux_distro())
97        } else if cfg!(target_os = "macos") {
98            Self::MacOs
99        } else {
100            panic!("no valid host-os")
101        }
102    }
103}
104
105impl HostExt for FlowArch {
106    /// Return the arch of the current host machine.
107    ///
108    /// Will panic on non-local backends.
109    fn host(backend_hint: PipelineBackendHint) -> Self {
110        if !matches!(backend_hint, PipelineBackendHint::Local) {
111            panic!("can only use `FlowArch::host` when defining a local-only pipeline");
112        }
113
114        // xtask-fmt allow-target-arch oneoff-flowey
115        if cfg!(target_arch = "x86_64") {
116            Self::X86_64
117        // xtask-fmt allow-target-arch oneoff-flowey
118        } else if cfg!(target_arch = "aarch64") {
119            Self::Aarch64
120        } else {
121            panic!("no valid host-arch")
122        }
123    }
124}
125
126/// Trigger ADO pipelines via Continuous Integration
127#[derive(Default, Debug)]
128pub struct AdoScheduleTriggers {
129    /// Friendly name for the scheduled run
130    pub display_name: String,
131    /// Run the pipeline whenever there is a commit on these specified branches
132    /// (supports glob syntax)
133    pub branches: Vec<String>,
134    /// Specify any branches which should be filtered out from the list of
135    /// `branches` (supports glob syntax)
136    pub exclude_branches: Vec<String>,
137    /// Run the pipeline in a schedule, as specified by a cron string
138    pub cron: String,
139}
140
141/// Trigger ADO pipelines per PR
142#[derive(Debug)]
143pub struct AdoPrTriggers {
144    /// Run the pipeline whenever there is a PR to these specified branches
145    /// (supports glob syntax)
146    pub branches: Vec<String>,
147    /// Specify any branches which should be filtered out from the list of
148    /// `branches` (supports glob syntax)
149    pub exclude_branches: Vec<String>,
150    /// Run the pipeline even if the PR is a draft PR. Defaults to `false`.
151    pub run_on_draft: bool,
152    /// Automatically cancel the pipeline run if a new commit lands in the
153    /// branch. Defaults to `true`.
154    pub auto_cancel: bool,
155}
156
157/// Trigger ADO pipelines per PR
158#[derive(Debug, Default)]
159pub struct AdoCiTriggers {
160    /// Run the pipeline whenever there is a change to these specified branches
161    /// (supports glob syntax)
162    pub branches: Vec<String>,
163    /// Specify any branches which should be filtered out from the list of
164    /// `branches` (supports glob syntax)
165    pub exclude_branches: Vec<String>,
166    /// Run the pipeline whenever a matching tag is created (supports glob
167    /// syntax)
168    pub tags: Vec<String>,
169    /// Specify any tags which should be filtered out from the list of `tags`
170    /// (supports glob syntax)
171    pub exclude_tags: Vec<String>,
172    /// Whether to batch changes per branch.
173    pub batch: bool,
174}
175
176impl Default for AdoPrTriggers {
177    fn default() -> Self {
178        Self {
179            branches: Vec::new(),
180            exclude_branches: Vec::new(),
181            run_on_draft: false,
182            auto_cancel: true,
183        }
184    }
185}
186
187/// ADO repository resource.
188#[derive(Debug)]
189pub struct AdoResourcesRepository {
190    /// Type of repo that is being connected to.
191    pub repo_type: AdoResourcesRepositoryType,
192    /// Repository name. Format depends on `repo_type`.
193    pub name: String,
194    /// git ref to checkout.
195    pub git_ref: AdoResourcesRepositoryRef,
196    /// (optional) ID of the service endpoint connecting to this repository.
197    pub endpoint: Option<String>,
198}
199
200/// ADO repository resource type
201#[derive(Debug)]
202pub enum AdoResourcesRepositoryType {
203    /// Azure Repos Git repository
204    AzureReposGit,
205    /// Github repository
206    GitHub,
207}
208
209/// ADO repository ref
210#[derive(Debug)]
211pub enum AdoResourcesRepositoryRef<P = UseParameter<String>> {
212    /// Hard-coded ref (e.g: refs/heads/main)
213    Fixed(String),
214    /// Connected to pipeline-level parameter
215    Parameter(P),
216}
217
218/// Trigger Github Actions pipelines via Continuous Integration
219///
220/// NOTE: Github Actions doesn't support specifying the branch when triggered by `schedule`.
221/// To run on a specific branch, modify the branch checked out in the pipeline.
222#[derive(Default, Debug)]
223pub struct GhScheduleTriggers {
224    /// Run the pipeline in a schedule, as specified by a cron string
225    pub cron: String,
226}
227
228/// Trigger Github Actions pipelines per PR
229#[derive(Debug)]
230pub struct GhPrTriggers {
231    /// Run the pipeline whenever there is a PR to these specified branches
232    /// (supports glob syntax)
233    pub branches: Vec<String>,
234    /// Specify any branches which should be filtered out from the list of
235    /// `branches` (supports glob syntax)
236    pub exclude_branches: Vec<String>,
237    /// Automatically cancel the pipeline run if a new commit lands in the
238    /// branch. Defaults to `true`.
239    pub auto_cancel: bool,
240    /// Run the pipeline whenever the PR trigger matches the specified types
241    pub types: Vec<String>,
242}
243
244/// Trigger Github Actions pipelines per PR
245#[derive(Debug, Default)]
246pub struct GhCiTriggers {
247    /// Run the pipeline whenever there is a change to these specified branches
248    /// (supports glob syntax)
249    pub branches: Vec<String>,
250    /// Specify any branches which should be filtered out from the list of
251    /// `branches` (supports glob syntax)
252    pub exclude_branches: Vec<String>,
253    /// Run the pipeline whenever a matching tag is created (supports glob
254    /// syntax)
255    pub tags: Vec<String>,
256    /// Specify any tags which should be filtered out from the list of `tags`
257    /// (supports glob syntax)
258    pub exclude_tags: Vec<String>,
259}
260
261impl GhPrTriggers {
262    /// Triggers the pipeline on the default PR events plus when a draft is marked as ready for review.
263    pub fn new_draftable() -> Self {
264        Self {
265            branches: Vec::new(),
266            exclude_branches: Vec::new(),
267            types: vec![
268                "opened".into(),
269                "synchronize".into(),
270                "reopened".into(),
271                "ready_for_review".into(),
272            ],
273            auto_cancel: true,
274        }
275    }
276}
277
278#[derive(Debug, Clone, PartialEq)]
279pub enum GhRunnerOsLabel {
280    UbuntuLatest,
281    Ubuntu2204,
282    Ubuntu2004,
283    WindowsLatest,
284    Windows2022,
285    Windows2019,
286    MacOsLatest,
287    MacOs14,
288    MacOs13,
289    MacOs12,
290    MacOs11,
291    Custom(String),
292}
293
294/// GitHub runner type
295#[derive(Debug, Clone, PartialEq)]
296pub enum GhRunner {
297    // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
298    // for more details.
299    GhHosted(GhRunnerOsLabel),
300    // Self hosted runners are selected by matching runner labels to <labels>.
301    // 'self-hosted' is a common label for self hosted runners, but is not required.
302    // Labels are case-insensitive and can take the form of arbitrary strings.
303    // See <https://docs.github.com/en/actions/hosting-your-own-runners> for more details.
304    SelfHosted(Vec<String>),
305    // This uses a runner belonging to <group> that matches all labels in <labels>.
306    // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
307    // for more details.
308    RunnerGroup { group: String, labels: Vec<String> },
309}
310
311impl GhRunner {
312    /// Whether this is a self-hosted runner with the provided label
313    pub fn is_self_hosted_with_label(&self, label: &str) -> bool {
314        matches!(self, GhRunner::SelfHosted(labels) if labels.iter().any(|s| s.as_str() == label))
315    }
316}
317
318/// Parameter type (unstable / stable).
319#[derive(Debug, Clone)]
320pub enum ParameterKind {
321    // The parameter is considered an unstable API, and should not be
322    // taken as a dependency.
323    Unstable,
324    // The parameter is considered a stable API, and can be used by
325    // external pipelines to control behavior of the pipeline.
326    Stable,
327}
328
329#[derive(Clone, Debug)]
330#[must_use]
331pub struct UseParameter<T> {
332    idx: usize,
333    _kind: std::marker::PhantomData<T>,
334}
335
336/// Opaque handle to an artifact which must be published by a single job.
337#[must_use]
338pub struct PublishArtifact {
339    idx: usize,
340}
341
342/// Opaque handle to an artifact which can be used by one or more jobs.
343#[derive(Clone)]
344#[must_use]
345pub struct UseArtifact {
346    idx: usize,
347}
348
349/// Opaque handle to an artifact of type `T` which must be published by a single job.
350#[must_use]
351pub struct PublishTypedArtifact<T>(PublishArtifact, std::marker::PhantomData<fn() -> T>);
352
353/// Opaque handle to an artifact of type `T` which can be used by one or more
354/// jobs.
355#[must_use]
356pub struct UseTypedArtifact<T>(UseArtifact, std::marker::PhantomData<fn(T)>);
357
358impl<T> Clone for UseTypedArtifact<T> {
359    fn clone(&self) -> Self {
360        UseTypedArtifact(self.0.clone(), std::marker::PhantomData)
361    }
362}
363
364#[derive(Default)]
365pub struct Pipeline {
366    jobs: Vec<PipelineJobMetadata>,
367    artifacts: Vec<ArtifactMeta>,
368    parameters: Vec<ParameterMeta>,
369    extra_deps: BTreeSet<(usize, usize)>,
370    // builder internal
371    artifact_names: BTreeSet<String>,
372    dummy_done_idx: usize,
373    artifact_map_idx: usize,
374    global_patchfns: Vec<crate::patch::PatchFn>,
375    inject_all_jobs_with: Option<Box<dyn for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a>>>,
376    // backend specific
377    ado_name: Option<String>,
378    ado_job_id_overrides: BTreeMap<usize, String>,
379    ado_schedule_triggers: Vec<AdoScheduleTriggers>,
380    ado_ci_triggers: Option<AdoCiTriggers>,
381    ado_pr_triggers: Option<AdoPrTriggers>,
382    ado_resources_repository: Vec<InternalAdoResourcesRepository>,
383    ado_bootstrap_template: String,
384    ado_variables: BTreeMap<String, String>,
385    ado_post_process_yaml_cb: Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
386    gh_name: Option<String>,
387    gh_schedule_triggers: Vec<GhScheduleTriggers>,
388    gh_ci_triggers: Option<GhCiTriggers>,
389    gh_pr_triggers: Option<GhPrTriggers>,
390    gh_bootstrap_template: String,
391}
392
393impl Pipeline {
394    pub fn new() -> Pipeline {
395        Pipeline::default()
396    }
397
398    /// Inject all pipeline jobs with some common logic. (e.g: to resolve common
399    /// configuration requirements shared by all jobs).
400    ///
401    /// Can only be invoked once per pipeline.
402    #[track_caller]
403    pub fn inject_all_jobs_with(
404        &mut self,
405        cb: impl for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a> + 'static,
406    ) -> &mut Self {
407        if self.inject_all_jobs_with.is_some() {
408            panic!("can only call inject_all_jobs_with once!")
409        }
410        self.inject_all_jobs_with = Some(Box::new(cb));
411        self
412    }
413
414    /// (ADO only) Provide a YAML template used to bootstrap flowey at the start
415    /// of an ADO pipeline.
416    ///
417    /// The template has access to the following vars, which will be statically
418    /// interpolated into the template's text:
419    ///
420    /// - `{{FLOWEY_OUTDIR}}`
421    ///     - Directory to copy artifacts into.
422    ///     - NOTE: this var will include `\` on Windows, and `/` on linux!
423    /// - `{{FLOWEY_BIN_EXTENSION}}`
424    ///     - Extension of the expected flowey bin (either "", or ".exe")
425    /// - `{{FLOWEY_CRATE}}`
426    ///     - Name of the project-specific flowey crate to be built
427    /// - `{{FLOWEY_TARGET}}`
428    ///     - The target-triple flowey is being built for
429    /// - `{{FLOWEY_PIPELINE_PATH}}`
430    ///     - Repo-root relative path to the pipeline (as provided when
431    ///       generating the pipeline via the flowey CLI)
432    ///
433    /// The template's sole responsibility is to copy 3 files into the
434    /// `{{FLOWEY_OUTDIR}}`:
435    ///
436    /// 1. The bootstrapped flowey binary, with the file name
437    ///    `flowey{{FLOWEY_BIN_EXTENSION}}`
438    /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
439    ///    copied of the pipeline YAML and pipeline JSON currently being run.
440    ///    `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
441    ///    cases where the same template is being for multiple pipelines (e.g: a
442    ///    debug vs. release pipeline).
443    pub fn ado_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
444        self.ado_bootstrap_template = template;
445        self
446    }
447
448    /// (ADO only) Provide a callback function which will be used to
449    /// post-process any YAML flowey generates for the pipeline.
450    ///
451    /// Until flowey defines a stable API for maintaining out-of-tree backends,
452    /// this method can be used to integrate the output from the generic ADO
453    /// backend with any organization-specific templates that one may be
454    /// required to use (e.g: for compliance reasons).
455    pub fn ado_post_process_yaml(
456        &mut self,
457        cb: impl FnOnce(serde_yaml::Value) -> serde_yaml::Value + 'static,
458    ) -> &mut Self {
459        self.ado_post_process_yaml_cb = Some(Box::new(cb));
460        self
461    }
462
463    /// (ADO only) Add a new scheduled CI trigger. Can be called multiple times
464    /// to set up multiple schedules runs.
465    pub fn ado_add_schedule_trigger(&mut self, triggers: AdoScheduleTriggers) -> &mut Self {
466        self.ado_schedule_triggers.push(triggers);
467        self
468    }
469
470    /// (ADO only) Set a PR trigger. Calling this method multiple times will
471    /// overwrite any previously set triggers.
472    pub fn ado_set_pr_triggers(&mut self, triggers: AdoPrTriggers) -> &mut Self {
473        self.ado_pr_triggers = Some(triggers);
474        self
475    }
476
477    /// (ADO only) Set a CI trigger. Calling this method multiple times will
478    /// overwrite any previously set triggers.
479    pub fn ado_set_ci_triggers(&mut self, triggers: AdoCiTriggers) -> &mut Self {
480        self.ado_ci_triggers = Some(triggers);
481        self
482    }
483
484    /// (ADO only) Declare a new repository resource, returning a type-safe
485    /// handle which downstream ADO steps are able to consume via
486    /// [`AdoStepServices::resolve_repository_id`](crate::node::user_facing::AdoStepServices::resolve_repository_id).
487    pub fn ado_add_resources_repository(
488        &mut self,
489        repo: AdoResourcesRepository,
490    ) -> AdoResourcesRepositoryId {
491        let AdoResourcesRepository {
492            repo_type,
493            name,
494            git_ref,
495            endpoint,
496        } = repo;
497
498        let repo_id = format!("repo{}", self.ado_resources_repository.len());
499
500        self.ado_resources_repository
501            .push(InternalAdoResourcesRepository {
502                repo_id: repo_id.clone(),
503                repo_type,
504                name,
505                git_ref: match git_ref {
506                    AdoResourcesRepositoryRef::Fixed(s) => AdoResourcesRepositoryRef::Fixed(s),
507                    AdoResourcesRepositoryRef::Parameter(p) => {
508                        AdoResourcesRepositoryRef::Parameter(p.idx)
509                    }
510                },
511                endpoint,
512            });
513        AdoResourcesRepositoryId { repo_id }
514    }
515
516    /// (GitHub Actions only) Set the pipeline-level name.
517    ///
518    /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#name>
519    pub fn gh_set_name(&mut self, name: impl AsRef<str>) -> &mut Self {
520        self.gh_name = Some(name.as_ref().into());
521        self
522    }
523
524    /// Provide a YAML template used to bootstrap flowey at the start of an GitHub
525    /// pipeline.
526    ///
527    /// The template has access to the following vars, which will be statically
528    /// interpolated into the template's text:
529    ///
530    /// - `{{FLOWEY_OUTDIR}}`
531    ///     - Directory to copy artifacts into.
532    ///     - NOTE: this var will include `\` on Windows, and `/` on linux!
533    /// - `{{FLOWEY_BIN_EXTENSION}}`
534    ///     - Extension of the expected flowey bin (either "", or ".exe")
535    /// - `{{FLOWEY_CRATE}}`
536    ///     - Name of the project-specific flowey crate to be built
537    /// - `{{FLOWEY_TARGET}}`
538    ///     - The target-triple flowey is being built for
539    /// - `{{FLOWEY_PIPELINE_PATH}}`
540    ///     - Repo-root relative path to the pipeline (as provided when
541    ///       generating the pipeline via the flowey CLI)
542    ///
543    /// The template's sole responsibility is to copy 3 files into the
544    /// `{{FLOWEY_OUTDIR}}`:
545    ///
546    /// 1. The bootstrapped flowey binary, with the file name
547    ///    `flowey{{FLOWEY_BIN_EXTENSION}}`
548    /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
549    ///    copied of the pipeline YAML and pipeline JSON currently being run.
550    ///    `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
551    ///    cases where the same template is being for multiple pipelines (e.g: a
552    ///    debug vs. release pipeline).
553    pub fn gh_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
554        self.gh_bootstrap_template = template;
555        self
556    }
557
558    /// (GitHub Actions only) Add a new scheduled CI trigger. Can be called multiple times
559    /// to set up multiple schedules runs.
560    pub fn gh_add_schedule_trigger(&mut self, triggers: GhScheduleTriggers) -> &mut Self {
561        self.gh_schedule_triggers.push(triggers);
562        self
563    }
564
565    /// (GitHub Actions only) Set a PR trigger. Calling this method multiple times will
566    /// overwrite any previously set triggers.
567    pub fn gh_set_pr_triggers(&mut self, triggers: GhPrTriggers) -> &mut Self {
568        self.gh_pr_triggers = Some(triggers);
569        self
570    }
571
572    /// (GitHub Actions only) Set a CI trigger. Calling this method multiple times will
573    /// overwrite any previously set triggers.
574    pub fn gh_set_ci_triggers(&mut self, triggers: GhCiTriggers) -> &mut Self {
575        self.gh_ci_triggers = Some(triggers);
576        self
577    }
578
579    /// (GitHub Actions only) Use a pre-defined GitHub Actions secret variable.
580    ///
581    /// For more information on defining secrets for use in GitHub Actions, see
582    /// <https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions>
583    pub fn gh_use_secret(&mut self, secret_name: impl AsRef<str>) -> GhUserSecretVar {
584        GhUserSecretVar(secret_name.as_ref().to_string())
585    }
586
587    pub fn new_job(
588        &mut self,
589        platform: FlowPlatform,
590        arch: FlowArch,
591        label: impl AsRef<str>,
592    ) -> PipelineJob<'_> {
593        let idx = self.jobs.len();
594        self.jobs.push(PipelineJobMetadata {
595            root_nodes: BTreeMap::new(),
596            patches: ResolvedPatches::build(),
597            label: label.as_ref().into(),
598            platform,
599            arch,
600            cond_param_idx: None,
601            ado_pool: None,
602            ado_variables: BTreeMap::new(),
603            gh_override_if: None,
604            gh_global_env: BTreeMap::new(),
605            gh_pool: None,
606            gh_permissions: BTreeMap::new(),
607        });
608
609        PipelineJob {
610            pipeline: self,
611            job_idx: idx,
612        }
613    }
614
615    /// Declare a dependency between two jobs that does is not a result of an
616    /// artifact.
617    pub fn non_artifact_dep(
618        &mut self,
619        job: &PipelineJobHandle,
620        depends_on_job: &PipelineJobHandle,
621    ) -> &mut Self {
622        self.extra_deps
623            .insert((depends_on_job.job_idx, job.job_idx));
624        self
625    }
626
627    #[track_caller]
628    pub fn new_artifact(&mut self, name: impl AsRef<str>) -> (PublishArtifact, UseArtifact) {
629        let name = name.as_ref();
630        let owned_name = name.to_string();
631
632        let not_exists = self.artifact_names.insert(owned_name.clone());
633        if !not_exists {
634            panic!("duplicate artifact name: {}", name)
635        }
636
637        let idx = self.artifacts.len();
638        self.artifacts.push(ArtifactMeta {
639            name: owned_name,
640            published_by_job: None,
641            used_by_jobs: BTreeSet::new(),
642        });
643
644        (PublishArtifact { idx }, UseArtifact { idx })
645    }
646
647    /// Returns a pair of opaque handles to a new artifact for use across jobs
648    /// in the pipeline.
649    #[track_caller]
650    pub fn new_typed_artifact<T: Artifact>(
651        &mut self,
652        name: impl AsRef<str>,
653    ) -> (PublishTypedArtifact<T>, UseTypedArtifact<T>) {
654        let (publish, use_artifact) = self.new_artifact(name);
655        (
656            PublishTypedArtifact(publish, std::marker::PhantomData),
657            UseTypedArtifact(use_artifact, std::marker::PhantomData),
658        )
659    }
660
661    /// (ADO only) Set the pipeline-level name.
662    ///
663    /// <https://learn.microsoft.com/en-us/azure/devops/pipelines/process/run-number?view=azure-devops&tabs=yaml>
664    pub fn ado_add_name(&mut self, name: String) -> &mut Self {
665        self.ado_name = Some(name);
666        self
667    }
668
669    /// (ADO only) Declare a pipeline-level, named, read-only ADO variable.
670    ///
671    /// `name` and `value` are both arbitrary strings.
672    ///
673    /// Returns an instance of [`AdoRuntimeVar`], which, if need be, can be
674    /// converted into a [`ReadVar<String>`] using
675    /// [`NodeCtx::get_ado_variable`].
676    ///
677    /// NOTE: Unless required by some particular third-party task, it's strongly
678    /// recommended to _avoid_ using this method, and to simply use
679    /// [`ReadVar::from_static`] to get a obtain a static variable.
680    ///
681    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
682    pub fn ado_new_named_variable(
683        &mut self,
684        name: impl AsRef<str>,
685        value: impl AsRef<str>,
686    ) -> AdoRuntimeVar {
687        let name = name.as_ref();
688        let value = value.as_ref();
689
690        self.ado_variables.insert(name.into(), value.into());
691
692        // safe, since we'll ensure that the global exists in the ADO backend
693        AdoRuntimeVar::dangerous_from_global(name, false)
694    }
695
696    /// (ADO only) Declare multiple pipeline-level, named, read-only ADO
697    /// variables at once.
698    ///
699    /// This is a convenience method to streamline invoking
700    /// [`Self::ado_new_named_variable`] multiple times.
701    ///
702    /// NOTE: Unless required by some particular third-party task, it's strongly
703    /// recommended to _avoid_ using this method, and to simply use
704    /// [`ReadVar::from_static`] to get a obtain a static variable.
705    ///
706    /// DEVNOTE: In the future, this API may be updated to return a handle that
707    /// will allow resolving the resulting `AdoRuntimeVar`, but for
708    /// implementation expediency, this API does not currently do this. If you
709    /// need to read the value of this variable at runtime, you may need to
710    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
711    ///
712    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
713    pub fn ado_new_named_variables<K, V>(
714        &mut self,
715        vars: impl IntoIterator<Item = (K, V)>,
716    ) -> &mut Self
717    where
718        K: AsRef<str>,
719        V: AsRef<str>,
720    {
721        self.ado_variables.extend(
722            vars.into_iter()
723                .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
724        );
725        self
726    }
727
728    /// Declare a pipeline-level runtime parameter with type `bool`.
729    ///
730    /// To obtain a [`ReadVar<bool>`] that can be used within a node, use the
731    /// [`PipelineJobCtx::use_parameter`] method.
732    ///
733    /// `name` is the name of the parameter.
734    ///
735    /// `description` is an arbitrary string, which will be be shown to users.
736    ///
737    /// `kind` is the type of parameter and if it should be treated as a stable
738    /// external API to callers of the pipeline.
739    ///
740    /// `default` is the default value for the parameter. If none is provided,
741    /// the parameter _must_ be specified in order for the pipeline to run.
742    ///
743    /// `possible_values` can be used to limit the set of valid values the
744    /// parameter accepts.
745    pub fn new_parameter_bool(
746        &mut self,
747        name: impl AsRef<str>,
748        description: impl AsRef<str>,
749        kind: ParameterKind,
750        default: Option<bool>,
751    ) -> UseParameter<bool> {
752        let idx = self.parameters.len();
753        let name = new_parameter_name(name, kind.clone());
754        self.parameters.push(ParameterMeta {
755            parameter: Parameter::Bool {
756                name,
757                description: description.as_ref().into(),
758                kind,
759                default,
760            },
761            used_by_jobs: BTreeSet::new(),
762        });
763
764        UseParameter {
765            idx,
766            _kind: std::marker::PhantomData,
767        }
768    }
769
770    /// Declare a pipeline-level runtime parameter with type `i64`.
771    ///
772    /// To obtain a [`ReadVar<i64>`] that can be used within a node, use the
773    /// [`PipelineJobCtx::use_parameter`] method.
774    ///
775    /// `name` is the name of the parameter.
776    ///
777    /// `description` is an arbitrary string, which will be be shown to users.
778    ///
779    /// `kind` is the type of parameter and if it should be treated as a stable
780    /// external API to callers of the pipeline.
781    ///
782    /// `default` is the default value for the parameter. If none is provided,
783    /// the parameter _must_ be specified in order for the pipeline to run.
784    ///
785    /// `possible_values` can be used to limit the set of valid values the
786    /// parameter accepts.
787    pub fn new_parameter_num(
788        &mut self,
789        name: impl AsRef<str>,
790        description: impl AsRef<str>,
791        kind: ParameterKind,
792        default: Option<i64>,
793        possible_values: Option<Vec<i64>>,
794    ) -> UseParameter<i64> {
795        let idx = self.parameters.len();
796        let name = new_parameter_name(name, kind.clone());
797        self.parameters.push(ParameterMeta {
798            parameter: Parameter::Num {
799                name,
800                description: description.as_ref().into(),
801                kind,
802                default,
803                possible_values,
804            },
805            used_by_jobs: BTreeSet::new(),
806        });
807
808        UseParameter {
809            idx,
810            _kind: std::marker::PhantomData,
811        }
812    }
813
814    /// Declare a pipeline-level runtime parameter with type `String`.
815    ///
816    /// To obtain a [`ReadVar<String>`] that can be used within a node, use the
817    /// [`PipelineJobCtx::use_parameter`] method.
818    ///
819    /// `name` is the name of the parameter.
820    ///
821    /// `description` is an arbitrary string, which will be be shown to users.
822    ///
823    /// `kind` is the type of parameter and if it should be treated as a stable
824    /// external API to callers of the pipeline.
825    ///
826    /// `default` is the default value for the parameter. If none is provided,
827    /// the parameter _must_ be specified in order for the pipeline to run.
828    ///
829    /// `possible_values` allows restricting inputs to a set of possible values.
830    /// Depending on the backend, these options may be presented as a set of
831    /// radio buttons, a dropdown menu, or something in that vein. If `None`,
832    /// then any string is allowed.
833    pub fn new_parameter_string(
834        &mut self,
835        name: impl AsRef<str>,
836        description: impl AsRef<str>,
837        kind: ParameterKind,
838        default: Option<impl AsRef<str>>,
839        possible_values: Option<Vec<String>>,
840    ) -> UseParameter<String> {
841        let idx = self.parameters.len();
842        let name = new_parameter_name(name, kind.clone());
843        self.parameters.push(ParameterMeta {
844            parameter: Parameter::String {
845                name,
846                description: description.as_ref().into(),
847                kind,
848                default: default.map(|x| x.as_ref().into()),
849                possible_values,
850            },
851            used_by_jobs: BTreeSet::new(),
852        });
853
854        UseParameter {
855            idx,
856            _kind: std::marker::PhantomData,
857        }
858    }
859}
860
861pub struct PipelineJobCtx<'a> {
862    pipeline: &'a mut Pipeline,
863    job_idx: usize,
864}
865
866impl PipelineJobCtx<'_> {
867    /// Create a new `WriteVar<SideEffect>` anchored to the pipeline job.
868    pub fn new_done_handle(&mut self) -> WriteVar<crate::node::SideEffect> {
869        self.pipeline.dummy_done_idx += 1;
870        crate::node::thin_air_write_runtime_var(format!("start{}", self.pipeline.dummy_done_idx))
871    }
872
873    /// Claim that this job will use this artifact, obtaining a path to a folder
874    /// with the artifact's contents.
875    pub fn use_artifact(&mut self, artifact: &UseArtifact) -> ReadVar<PathBuf> {
876        self.pipeline.artifacts[artifact.idx]
877            .used_by_jobs
878            .insert(self.job_idx);
879
880        crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
881            &self.pipeline.artifacts[artifact.idx].name,
882            true,
883        ))
884    }
885
886    /// Claim that this job will publish this artifact, obtaining a path to a
887    /// fresh, empty folder which will be published as the specific artifact at
888    /// the end of the job.
889    pub fn publish_artifact(&mut self, artifact: PublishArtifact) -> ReadVar<PathBuf> {
890        let existing = self.pipeline.artifacts[artifact.idx]
891            .published_by_job
892            .replace(self.job_idx);
893        assert!(existing.is_none()); // PublishArtifact isn't cloneable
894
895        crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
896            &self.pipeline.artifacts[artifact.idx].name,
897            false,
898        ))
899    }
900
901    fn helper_request<R: IntoRequest>(&mut self, req: R)
902    where
903        R::Node: 'static,
904    {
905        self.pipeline.jobs[self.job_idx]
906            .root_nodes
907            .entry(NodeHandle::from_type::<R::Node>())
908            .or_default()
909            .push(serde_json::to_vec(&req.into_request()).unwrap().into());
910    }
911
912    fn new_artifact_map_vars<T: Artifact>(&mut self) -> (ReadVar<T>, WriteVar<T>) {
913        let artifact_map_idx = self.pipeline.artifact_map_idx;
914        self.pipeline.artifact_map_idx += 1;
915
916        let backing_var = format!("artifact_map{}", artifact_map_idx);
917        let read_var = crate::node::thin_air_read_runtime_var(backing_var.clone());
918        let write_var = crate::node::thin_air_write_runtime_var(backing_var);
919        (read_var, write_var)
920    }
921
922    /// Claim that this job will use this artifact, obtaining the resolved
923    /// contents of the artifact.
924    pub fn use_typed_artifact<T: Artifact>(
925        &mut self,
926        artifact: &UseTypedArtifact<T>,
927    ) -> ReadVar<T> {
928        let artifact_path = self.use_artifact(&artifact.0);
929        let (read, write) = self.new_artifact_map_vars::<T>();
930        self.helper_request(artifact::resolve::Request::new(artifact_path, write));
931        read
932    }
933
934    /// Claim that this job will publish this artifact, obtaining a variable to
935    /// write the artifact's contents to. The artifact will be published at
936    /// the end of the job.
937    pub fn publish_typed_artifact<T: Artifact>(
938        &mut self,
939        artifact: PublishTypedArtifact<T>,
940    ) -> WriteVar<T> {
941        let artifact_path = self.publish_artifact(artifact.0);
942        let (read, write) = self.new_artifact_map_vars::<T>();
943        let done = self.new_done_handle();
944        self.helper_request(artifact::publish::Request::new(read, artifact_path, done));
945        write
946    }
947
948    /// Obtain a `ReadVar<T>` corresponding to a pipeline parameter which is
949    /// specified at runtime.
950    pub fn use_parameter<T>(&mut self, param: UseParameter<T>) -> ReadVar<T>
951    where
952        T: Serialize + DeserializeOwned,
953    {
954        self.pipeline.parameters[param.idx]
955            .used_by_jobs
956            .insert(self.job_idx);
957
958        crate::node::thin_air_read_runtime_var(
959            self.pipeline.parameters[param.idx]
960                .parameter
961                .name()
962                .to_string(),
963        )
964    }
965
966    /// Shortcut which allows defining a bool pipeline parameter within a Job.
967    ///
968    /// To share a single parameter between multiple jobs, don't use this method
969    /// - use [`Pipeline::new_parameter_bool`] + [`Self::use_parameter`] instead.
970    pub fn new_parameter_bool(
971        &mut self,
972        name: impl AsRef<str>,
973        description: impl AsRef<str>,
974        kind: ParameterKind,
975        default: Option<bool>,
976    ) -> ReadVar<bool> {
977        let param = self
978            .pipeline
979            .new_parameter_bool(name, description, kind, default);
980        self.use_parameter(param)
981    }
982
983    /// Shortcut which allows defining a number pipeline parameter within a Job.
984    ///
985    /// To share a single parameter between multiple jobs, don't use this method
986    /// - use [`Pipeline::new_parameter_num`] + [`Self::use_parameter`] instead.
987    pub fn new_parameter_num(
988        &mut self,
989        name: impl AsRef<str>,
990        description: impl AsRef<str>,
991        kind: ParameterKind,
992        default: Option<i64>,
993        possible_values: Option<Vec<i64>>,
994    ) -> ReadVar<i64> {
995        let param =
996            self.pipeline
997                .new_parameter_num(name, description, kind, default, possible_values);
998        self.use_parameter(param)
999    }
1000
1001    /// Shortcut which allows defining a string pipeline parameter within a Job.
1002    ///
1003    /// To share a single parameter between multiple jobs, don't use this method
1004    /// - use [`Pipeline::new_parameter_string`] + [`Self::use_parameter`] instead.
1005    pub fn new_parameter_string(
1006        &mut self,
1007        name: impl AsRef<str>,
1008        description: impl AsRef<str>,
1009        kind: ParameterKind,
1010        default: Option<String>,
1011        possible_values: Option<Vec<String>>,
1012    ) -> ReadVar<String> {
1013        let param =
1014            self.pipeline
1015                .new_parameter_string(name, description, kind, default, possible_values);
1016        self.use_parameter(param)
1017    }
1018}
1019
1020#[must_use]
1021pub struct PipelineJob<'a> {
1022    pipeline: &'a mut Pipeline,
1023    job_idx: usize,
1024}
1025
1026impl PipelineJob<'_> {
1027    /// (ADO only) specify which agent pool this job will be run on.
1028    pub fn ado_set_pool(self, pool: impl AsRef<str>) -> Self {
1029        self.ado_set_pool_with_demands(pool, Vec::new())
1030    }
1031
1032    /// (ADO only) specify which agent pool this job will be run on, with
1033    /// additional special runner demands.
1034    pub fn ado_set_pool_with_demands(self, pool: impl AsRef<str>, demands: Vec<String>) -> Self {
1035        self.pipeline.jobs[self.job_idx].ado_pool = Some(AdoPool {
1036            name: pool.as_ref().into(),
1037            demands,
1038        });
1039        self
1040    }
1041
1042    /// (ADO only) Declare a job-level, named, read-only ADO variable.
1043    ///
1044    /// `name` and `value` are both arbitrary strings, which may include ADO
1045    /// template expressions.
1046    ///
1047    /// NOTE: Unless required by some particular third-party task, it's strongly
1048    /// recommended to _avoid_ using this method, and to simply use
1049    /// [`ReadVar::from_static`] to get a obtain a static variable.
1050    ///
1051    /// DEVNOTE: In the future, this API may be updated to return a handle that
1052    /// will allow resolving the resulting `AdoRuntimeVar`, but for
1053    /// implementation expediency, this API does not currently do this. If you
1054    /// need to read the value of this variable at runtime, you may need to
1055    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1056    ///
1057    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1058    pub fn ado_new_named_variable(self, name: impl AsRef<str>, value: impl AsRef<str>) -> Self {
1059        let name = name.as_ref();
1060        let value = value.as_ref();
1061        self.pipeline.jobs[self.job_idx]
1062            .ado_variables
1063            .insert(name.into(), value.into());
1064        self
1065    }
1066
1067    /// (ADO only) Declare multiple job-level, named, read-only ADO variables at
1068    /// once.
1069    ///
1070    /// This is a convenience method to streamline invoking
1071    /// [`Self::ado_new_named_variable`] multiple times.
1072    ///
1073    /// NOTE: Unless required by some particular third-party task, it's strongly
1074    /// recommended to _avoid_ using this method, and to simply use
1075    /// [`ReadVar::from_static`] to get a obtain a static variable.
1076    ///
1077    /// DEVNOTE: In the future, this API may be updated to return a handle that
1078    /// will allow resolving the resulting `AdoRuntimeVar`, but for
1079    /// implementation expediency, this API does not currently do this. If you
1080    /// need to read the value of this variable at runtime, you may need to
1081    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1082    ///
1083    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1084    pub fn ado_new_named_variables<K, V>(self, vars: impl IntoIterator<Item = (K, V)>) -> Self
1085    where
1086        K: AsRef<str>,
1087        V: AsRef<str>,
1088    {
1089        self.pipeline.jobs[self.job_idx].ado_variables.extend(
1090            vars.into_iter()
1091                .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
1092        );
1093        self
1094    }
1095
1096    /// Overrides the id of the job.
1097    ///
1098    /// Flowey typically generates a reasonable job ID but some use cases that depend
1099    /// on the ID may find it useful to override it to something custom.
1100    pub fn ado_override_job_id(self, name: impl AsRef<str>) -> Self {
1101        self.pipeline
1102            .ado_job_id_overrides
1103            .insert(self.job_idx, name.as_ref().into());
1104        self
1105    }
1106
1107    /// (GitHub Actions only) specify which Github runner this job will be run on.
1108    pub fn gh_set_pool(self, pool: GhRunner) -> Self {
1109        self.pipeline.jobs[self.job_idx].gh_pool = Some(pool);
1110        self
1111    }
1112
1113    /// (GitHub Actions only) Manually override the `if:` condition for this
1114    /// particular job.
1115    ///
1116    /// **This is dangerous**, as an improperly set `if` condition may break
1117    /// downstream flowey jobs which assume flowey is in control of the job's
1118    /// scheduling logic.
1119    ///
1120    /// See
1121    /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idif>
1122    /// for more info.
1123    pub fn gh_dangerous_override_if(self, condition: impl AsRef<str>) -> Self {
1124        self.pipeline.jobs[self.job_idx].gh_override_if = Some(condition.as_ref().into());
1125        self
1126    }
1127
1128    /// (GitHub Actions only) Declare a global job-level environment variable,
1129    /// visible to all downstream steps.
1130    ///
1131    /// `name` and `value` are both arbitrary strings, which may include GitHub
1132    /// Actions template expressions.
1133    ///
1134    /// **This is dangerous**, as it is easy to misuse this API in order to
1135    /// write a node which takes an implicit dependency on there being a global
1136    /// variable set on its behalf by the top-level pipeline code, making it
1137    /// difficult to "locally reason" about the behavior of a node simply by
1138    /// reading its code.
1139    ///
1140    /// Whenever possible, nodes should "late bind" environment variables:
1141    /// accepting a compile-time / runtime flowey parameter, and then setting it
1142    /// prior to executing a child command that requires it.
1143    ///
1144    /// Only use this API in exceptional cases, such as obtaining an environment
1145    /// variable whose value is determined by a job-level GitHub Actions
1146    /// expression evaluation.
1147    pub fn gh_dangerous_global_env_var(
1148        self,
1149        name: impl AsRef<str>,
1150        value: impl AsRef<str>,
1151    ) -> Self {
1152        let name = name.as_ref();
1153        let value = value.as_ref();
1154        self.pipeline.jobs[self.job_idx]
1155            .gh_global_env
1156            .insert(name.into(), value.into());
1157        self
1158    }
1159
1160    /// (GitHub Actions only) Grant permissions required by nodes in the job.
1161    ///
1162    /// For a given node handle, grant the specified permissions.
1163    /// The list provided must match the permissions specified within the node
1164    /// using `requires_permission`.
1165    ///
1166    /// NOTE: While this method is called at a node-level for auditability, the emitted
1167    /// yaml grants permissions at the job-level.
1168    ///
1169    /// This can lead to weird situations where node 1 might not specify a permission
1170    /// required according to Github Actions, but due to job-level granting of the permission
1171    /// by another node 2, the pipeline executes even though it wouldn't if node 2 was removed.
1172    ///
1173    /// For available permission scopes and their descriptions, see
1174    /// <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions>.
1175    pub fn gh_grant_permissions<N: FlowNodeBase + 'static>(
1176        self,
1177        permissions: impl IntoIterator<Item = (GhPermission, GhPermissionValue)>,
1178    ) -> Self {
1179        let node_handle = NodeHandle::from_type::<N>();
1180        for (permission, value) in permissions {
1181            self.pipeline.jobs[self.job_idx]
1182                .gh_permissions
1183                .entry(node_handle)
1184                .or_default()
1185                .insert(permission, value);
1186        }
1187        self
1188    }
1189
1190    pub fn apply_patchfn(self, patchfn: crate::patch::PatchFn) -> Self {
1191        self.pipeline.jobs[self.job_idx]
1192            .patches
1193            .apply_patchfn(patchfn);
1194        self
1195    }
1196
1197    /// Only run the job if the specified condition is true.
1198    ///
1199    /// When running locally, the `cond`'s default value will be used to
1200    /// determine if the job will be run.
1201    pub fn with_condition(self, cond: UseParameter<bool>) -> Self {
1202        self.pipeline.jobs[self.job_idx].cond_param_idx = Some(cond.idx);
1203        self
1204    }
1205
1206    /// Add a flow node which will be run as part of the job.
1207    pub fn dep_on<R: IntoRequest + 'static>(
1208        self,
1209        f: impl FnOnce(&mut PipelineJobCtx<'_>) -> R,
1210    ) -> Self {
1211        // JobToNodeCtx will ensure artifact deps are taken care of
1212        let req = f(&mut PipelineJobCtx {
1213            pipeline: self.pipeline,
1214            job_idx: self.job_idx,
1215        });
1216
1217        self.pipeline.jobs[self.job_idx]
1218            .root_nodes
1219            .entry(NodeHandle::from_type::<R::Node>())
1220            .or_default()
1221            .push(serde_json::to_vec(&req.into_request()).unwrap().into());
1222
1223        self
1224    }
1225
1226    /// Finish describing the pipeline job.
1227    pub fn finish(self) -> PipelineJobHandle {
1228        PipelineJobHandle {
1229            job_idx: self.job_idx,
1230        }
1231    }
1232
1233    /// Return the job's platform.
1234    pub fn get_platform(&self) -> FlowPlatform {
1235        self.pipeline.jobs[self.job_idx].platform
1236    }
1237
1238    /// Return the job's architecture.
1239    pub fn get_arch(&self) -> FlowArch {
1240        self.pipeline.jobs[self.job_idx].arch
1241    }
1242}
1243
1244#[derive(Clone)]
1245pub struct PipelineJobHandle {
1246    job_idx: usize,
1247}
1248
1249impl PipelineJobHandle {
1250    pub fn is_handle_for(&self, job: &PipelineJob<'_>) -> bool {
1251        self.job_idx == job.job_idx
1252    }
1253}
1254
1255#[derive(Clone, Copy)]
1256pub enum PipelineBackendHint {
1257    /// Pipeline is being run on the user's dev machine (via bash / direct run)
1258    Local,
1259    /// Pipeline is run on ADO
1260    Ado,
1261    /// Pipeline is run on GitHub Actions
1262    Github,
1263}
1264
1265pub trait IntoPipeline {
1266    fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline>;
1267}
1268
1269fn new_parameter_name(name: impl AsRef<str>, kind: ParameterKind) -> String {
1270    match kind {
1271        ParameterKind::Unstable => format!("__unstable_{}", name.as_ref()),
1272        ParameterKind::Stable => name.as_ref().into(),
1273    }
1274}
1275
1276/// Structs which should only be used by top-level flowey emitters. If you're a
1277/// pipeline author, these are not types you need to care about!
1278pub mod internal {
1279    use super::*;
1280    use std::collections::BTreeMap;
1281
1282    pub fn consistent_artifact_runtime_var_name(artifact: impl AsRef<str>, is_use: bool) -> String {
1283        format!(
1284            "artifact_{}_{}",
1285            if is_use { "use_from" } else { "publish_from" },
1286            artifact.as_ref()
1287        )
1288    }
1289
1290    #[derive(Debug)]
1291    pub struct InternalAdoResourcesRepository {
1292        /// flowey-generated unique repo identifier
1293        pub repo_id: String,
1294        /// Type of repo that is being connected to.
1295        pub repo_type: AdoResourcesRepositoryType,
1296        /// Repository name. Format depends on `repo_type`.
1297        pub name: String,
1298        /// git ref to checkout.
1299        pub git_ref: AdoResourcesRepositoryRef<usize>,
1300        /// (optional) ID of the service endpoint connecting to this repository.
1301        pub endpoint: Option<String>,
1302    }
1303
1304    pub struct PipelineJobMetadata {
1305        pub root_nodes: BTreeMap<NodeHandle, Vec<Box<[u8]>>>,
1306        pub patches: PatchResolver,
1307        pub label: String,
1308        pub platform: FlowPlatform,
1309        pub arch: FlowArch,
1310        pub cond_param_idx: Option<usize>,
1311        // backend specific
1312        pub ado_pool: Option<AdoPool>,
1313        pub ado_variables: BTreeMap<String, String>,
1314        pub gh_override_if: Option<String>,
1315        pub gh_pool: Option<GhRunner>,
1316        pub gh_global_env: BTreeMap<String, String>,
1317        pub gh_permissions: BTreeMap<NodeHandle, BTreeMap<GhPermission, GhPermissionValue>>,
1318    }
1319
1320    // TODO: support a more structured format for demands
1321    // See https://learn.microsoft.com/en-us/azure/devops/pipelines/yaml-schema/pool-demands
1322    #[derive(Debug, Clone)]
1323    pub struct AdoPool {
1324        pub name: String,
1325        pub demands: Vec<String>,
1326    }
1327
1328    #[derive(Debug)]
1329    pub struct ArtifactMeta {
1330        pub name: String,
1331        pub published_by_job: Option<usize>,
1332        pub used_by_jobs: BTreeSet<usize>,
1333    }
1334
1335    #[derive(Debug)]
1336    pub struct ParameterMeta {
1337        pub parameter: Parameter,
1338        pub used_by_jobs: BTreeSet<usize>,
1339    }
1340
1341    /// Mirror of [`Pipeline`], except with all field marked as `pub`.
1342    pub struct PipelineFinalized {
1343        pub jobs: Vec<PipelineJobMetadata>,
1344        pub artifacts: Vec<ArtifactMeta>,
1345        pub parameters: Vec<ParameterMeta>,
1346        pub extra_deps: BTreeSet<(usize, usize)>,
1347        // backend specific
1348        pub ado_name: Option<String>,
1349        pub ado_schedule_triggers: Vec<AdoScheduleTriggers>,
1350        pub ado_ci_triggers: Option<AdoCiTriggers>,
1351        pub ado_pr_triggers: Option<AdoPrTriggers>,
1352        pub ado_bootstrap_template: String,
1353        pub ado_resources_repository: Vec<InternalAdoResourcesRepository>,
1354        pub ado_post_process_yaml_cb:
1355            Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
1356        pub ado_variables: BTreeMap<String, String>,
1357        pub ado_job_id_overrides: BTreeMap<usize, String>,
1358        pub gh_name: Option<String>,
1359        pub gh_schedule_triggers: Vec<GhScheduleTriggers>,
1360        pub gh_ci_triggers: Option<GhCiTriggers>,
1361        pub gh_pr_triggers: Option<GhPrTriggers>,
1362        pub gh_bootstrap_template: String,
1363    }
1364
1365    impl PipelineFinalized {
1366        pub fn from_pipeline(mut pipeline: Pipeline) -> Self {
1367            if let Some(cb) = pipeline.inject_all_jobs_with.take() {
1368                for job_idx in 0..pipeline.jobs.len() {
1369                    let _ = cb(PipelineJob {
1370                        pipeline: &mut pipeline,
1371                        job_idx,
1372                    });
1373                }
1374            }
1375
1376            let Pipeline {
1377                mut jobs,
1378                artifacts,
1379                parameters,
1380                extra_deps,
1381                ado_name,
1382                ado_bootstrap_template,
1383                ado_schedule_triggers,
1384                ado_ci_triggers,
1385                ado_pr_triggers,
1386                ado_resources_repository,
1387                ado_post_process_yaml_cb,
1388                ado_variables,
1389                ado_job_id_overrides,
1390                gh_name,
1391                gh_schedule_triggers,
1392                gh_ci_triggers,
1393                gh_pr_triggers,
1394                gh_bootstrap_template,
1395                // not relevant to consumer code
1396                dummy_done_idx: _,
1397                artifact_map_idx: _,
1398                artifact_names: _,
1399                global_patchfns,
1400                inject_all_jobs_with: _, // processed above
1401            } = pipeline;
1402
1403            for patchfn in global_patchfns {
1404                for job in &mut jobs {
1405                    job.patches.apply_patchfn(patchfn)
1406                }
1407            }
1408
1409            Self {
1410                jobs,
1411                artifacts,
1412                parameters,
1413                extra_deps,
1414                ado_name,
1415                ado_schedule_triggers,
1416                ado_ci_triggers,
1417                ado_pr_triggers,
1418                ado_bootstrap_template,
1419                ado_resources_repository,
1420                ado_post_process_yaml_cb,
1421                ado_variables,
1422                ado_job_id_overrides,
1423                gh_name,
1424                gh_schedule_triggers,
1425                gh_ci_triggers,
1426                gh_pr_triggers,
1427                gh_bootstrap_template,
1428            }
1429        }
1430    }
1431
1432    #[derive(Debug, Clone)]
1433    pub enum Parameter {
1434        Bool {
1435            name: String,
1436            description: String,
1437            kind: ParameterKind,
1438            default: Option<bool>,
1439        },
1440        String {
1441            name: String,
1442            description: String,
1443            default: Option<String>,
1444            kind: ParameterKind,
1445            possible_values: Option<Vec<String>>,
1446        },
1447        Num {
1448            name: String,
1449            description: String,
1450            default: Option<i64>,
1451            kind: ParameterKind,
1452            possible_values: Option<Vec<i64>>,
1453        },
1454    }
1455
1456    impl Parameter {
1457        pub fn name(&self) -> &str {
1458            match self {
1459                Parameter::Bool { name, .. } => name,
1460                Parameter::String { name, .. } => name,
1461                Parameter::Num { name, .. } => name,
1462            }
1463        }
1464    }
1465}