flowey_core/
pipeline.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Core types and traits used to create and work with flowey pipelines.
5
6mod artifact;
7
8pub use artifact::Artifact;
9
10use self::internal::*;
11use crate::node::FlowArch;
12use crate::node::FlowNodeBase;
13use crate::node::FlowPlatform;
14use crate::node::FlowPlatformLinuxDistro;
15use crate::node::GhUserSecretVar;
16use crate::node::IntoRequest;
17use crate::node::NodeHandle;
18use crate::node::ReadVar;
19use crate::node::WriteVar;
20use crate::node::steps::ado::AdoResourcesRepositoryId;
21use crate::node::user_facing::AdoRuntimeVar;
22use crate::node::user_facing::GhPermission;
23use crate::node::user_facing::GhPermissionValue;
24use crate::patch::PatchResolver;
25use crate::patch::ResolvedPatches;
26use serde::Serialize;
27use serde::de::DeserializeOwned;
28use std::collections::BTreeMap;
29use std::collections::BTreeSet;
30use std::path::PathBuf;
31
32/// Pipeline types which are considered "user facing", and included in the
33/// `flowey` prelude.
34pub mod user_facing {
35    pub use super::AdoCiTriggers;
36    pub use super::AdoPrTriggers;
37    pub use super::AdoResourcesRepository;
38    pub use super::AdoResourcesRepositoryRef;
39    pub use super::AdoResourcesRepositoryType;
40    pub use super::AdoScheduleTriggers;
41    pub use super::GhCiTriggers;
42    pub use super::GhPrTriggers;
43    pub use super::GhRunner;
44    pub use super::GhRunnerOsLabel;
45    pub use super::GhScheduleTriggers;
46    pub use super::HostExt;
47    pub use super::IntoPipeline;
48    pub use super::ParameterKind;
49    pub use super::Pipeline;
50    pub use super::PipelineBackendHint;
51    pub use super::PipelineJob;
52    pub use super::PipelineJobCtx;
53    pub use super::PipelineJobHandle;
54    pub use super::PublishArtifact;
55    pub use super::PublishTypedArtifact;
56    pub use super::UseArtifact;
57    pub use super::UseParameter;
58    pub use super::UseTypedArtifact;
59    pub use crate::node::FlowArch;
60    pub use crate::node::FlowPlatform;
61}
62
63fn linux_distro() -> FlowPlatformLinuxDistro {
64    if let Ok(etc_os_release) = fs_err::read_to_string("/etc/os-release") {
65        if etc_os_release.contains("ID=ubuntu") {
66            FlowPlatformLinuxDistro::Ubuntu
67        } else if etc_os_release.contains("ID=fedora") {
68            FlowPlatformLinuxDistro::Fedora
69        } else if etc_os_release.contains("ID=arch") {
70            FlowPlatformLinuxDistro::Arch
71        } else {
72            FlowPlatformLinuxDistro::Unknown
73        }
74    } else {
75        FlowPlatformLinuxDistro::Unknown
76    }
77}
78
79pub trait HostExt: Sized {
80    /// Return the value for the current host machine.
81    ///
82    /// Will panic on non-local backends.
83    fn host(backend_hint: PipelineBackendHint) -> Self;
84}
85
86impl HostExt for FlowPlatform {
87    /// Return the platform of the current host machine.
88    ///
89    /// Will panic on non-local backends.
90    fn host(backend_hint: PipelineBackendHint) -> Self {
91        if !matches!(backend_hint, PipelineBackendHint::Local) {
92            panic!("can only use `FlowPlatform::host` when defining a local-only pipeline");
93        }
94
95        if cfg!(target_os = "windows") {
96            Self::Windows
97        } else if cfg!(target_os = "linux") {
98            Self::Linux(linux_distro())
99        } else if cfg!(target_os = "macos") {
100            Self::MacOs
101        } else {
102            panic!("no valid host-os")
103        }
104    }
105}
106
107impl HostExt for FlowArch {
108    /// Return the arch of the current host machine.
109    ///
110    /// Will panic on non-local backends.
111    fn host(backend_hint: PipelineBackendHint) -> Self {
112        if !matches!(backend_hint, PipelineBackendHint::Local) {
113            panic!("can only use `FlowArch::host` when defining a local-only pipeline");
114        }
115
116        // xtask-fmt allow-target-arch oneoff-flowey
117        if cfg!(target_arch = "x86_64") {
118            Self::X86_64
119        // xtask-fmt allow-target-arch oneoff-flowey
120        } else if cfg!(target_arch = "aarch64") {
121            Self::Aarch64
122        } else {
123            panic!("no valid host-arch")
124        }
125    }
126}
127
128/// Trigger ADO pipelines via Continuous Integration
129#[derive(Default, Debug)]
130pub struct AdoScheduleTriggers {
131    /// Friendly name for the scheduled run
132    pub display_name: String,
133    /// Run the pipeline whenever there is a commit on these specified branches
134    /// (supports glob syntax)
135    pub branches: Vec<String>,
136    /// Specify any branches which should be filtered out from the list of
137    /// `branches` (supports glob syntax)
138    pub exclude_branches: Vec<String>,
139    /// Run the pipeline in a schedule, as specified by a cron string
140    pub cron: String,
141}
142
143/// Trigger ADO pipelines per PR
144#[derive(Debug)]
145pub struct AdoPrTriggers {
146    /// Run the pipeline whenever there is a PR to these specified branches
147    /// (supports glob syntax)
148    pub branches: Vec<String>,
149    /// Specify any branches which should be filtered out from the list of
150    /// `branches` (supports glob syntax)
151    pub exclude_branches: Vec<String>,
152    /// Run the pipeline even if the PR is a draft PR. Defaults to `false`.
153    pub run_on_draft: bool,
154    /// Automatically cancel the pipeline run if a new commit lands in the
155    /// branch. Defaults to `true`.
156    pub auto_cancel: bool,
157}
158
159/// Trigger ADO pipelines per PR
160#[derive(Debug, Default)]
161pub struct AdoCiTriggers {
162    /// Run the pipeline whenever there is a change to these specified branches
163    /// (supports glob syntax)
164    pub branches: Vec<String>,
165    /// Specify any branches which should be filtered out from the list of
166    /// `branches` (supports glob syntax)
167    pub exclude_branches: Vec<String>,
168    /// Run the pipeline whenever a matching tag is created (supports glob
169    /// syntax)
170    pub tags: Vec<String>,
171    /// Specify any tags which should be filtered out from the list of `tags`
172    /// (supports glob syntax)
173    pub exclude_tags: Vec<String>,
174    /// Whether to batch changes per branch.
175    pub batch: bool,
176}
177
178impl Default for AdoPrTriggers {
179    fn default() -> Self {
180        Self {
181            branches: Vec::new(),
182            exclude_branches: Vec::new(),
183            run_on_draft: false,
184            auto_cancel: true,
185        }
186    }
187}
188
189/// ADO repository resource.
190#[derive(Debug)]
191pub struct AdoResourcesRepository {
192    /// Type of repo that is being connected to.
193    pub repo_type: AdoResourcesRepositoryType,
194    /// Repository name. Format depends on `repo_type`.
195    pub name: String,
196    /// git ref to checkout.
197    pub git_ref: AdoResourcesRepositoryRef,
198    /// (optional) ID of the service endpoint connecting to this repository.
199    pub endpoint: Option<String>,
200}
201
202/// ADO repository resource type
203#[derive(Debug)]
204pub enum AdoResourcesRepositoryType {
205    /// Azure Repos Git repository
206    AzureReposGit,
207    /// Github repository
208    GitHub,
209}
210
211/// ADO repository ref
212#[derive(Debug)]
213pub enum AdoResourcesRepositoryRef<P = UseParameter<String>> {
214    /// Hard-coded ref (e.g: refs/heads/main)
215    Fixed(String),
216    /// Connected to pipeline-level parameter
217    Parameter(P),
218}
219
220/// Trigger Github Actions pipelines via Continuous Integration
221///
222/// NOTE: Github Actions doesn't support specifying the branch when triggered by `schedule`.
223/// To run on a specific branch, modify the branch checked out in the pipeline.
224#[derive(Default, Debug)]
225pub struct GhScheduleTriggers {
226    /// Run the pipeline in a schedule, as specified by a cron string
227    pub cron: String,
228}
229
230/// Trigger Github Actions pipelines per PR
231#[derive(Debug)]
232pub struct GhPrTriggers {
233    /// Run the pipeline whenever there is a PR to these specified branches
234    /// (supports glob syntax)
235    pub branches: Vec<String>,
236    /// Specify any branches which should be filtered out from the list of
237    /// `branches` (supports glob syntax)
238    pub exclude_branches: Vec<String>,
239    /// Automatically cancel the pipeline run if a new commit lands in the
240    /// branch. Defaults to `true`.
241    pub auto_cancel: bool,
242    /// Run the pipeline whenever the PR trigger matches the specified types
243    pub types: Vec<String>,
244}
245
246/// Trigger Github Actions pipelines per PR
247#[derive(Debug, Default)]
248pub struct GhCiTriggers {
249    /// Run the pipeline whenever there is a change to these specified branches
250    /// (supports glob syntax)
251    pub branches: Vec<String>,
252    /// Specify any branches which should be filtered out from the list of
253    /// `branches` (supports glob syntax)
254    pub exclude_branches: Vec<String>,
255    /// Run the pipeline whenever a matching tag is created (supports glob
256    /// syntax)
257    pub tags: Vec<String>,
258    /// Specify any tags which should be filtered out from the list of `tags`
259    /// (supports glob syntax)
260    pub exclude_tags: Vec<String>,
261}
262
263impl GhPrTriggers {
264    /// Triggers the pipeline on the default PR events plus when a draft is marked as ready for review.
265    pub fn new_draftable() -> Self {
266        Self {
267            branches: Vec::new(),
268            exclude_branches: Vec::new(),
269            types: vec![
270                "opened".into(),
271                "synchronize".into(),
272                "reopened".into(),
273                "ready_for_review".into(),
274            ],
275            auto_cancel: true,
276        }
277    }
278}
279
280#[derive(Debug, Clone, PartialEq)]
281pub enum GhRunnerOsLabel {
282    UbuntuLatest,
283    Ubuntu2204,
284    Ubuntu2004,
285    WindowsLatest,
286    Windows2022,
287    Windows2019,
288    MacOsLatest,
289    MacOs14,
290    MacOs13,
291    MacOs12,
292    MacOs11,
293    Custom(String),
294}
295
296/// GitHub runner type
297#[derive(Debug, Clone, PartialEq)]
298pub enum GhRunner {
299    // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
300    // for more details.
301    GhHosted(GhRunnerOsLabel),
302    // Self hosted runners are selected by matching runner labels to <labels>.
303    // 'self-hosted' is a common label for self hosted runners, but is not required.
304    // Labels are case-insensitive and can take the form of arbitrary strings.
305    // See <https://docs.github.com/en/actions/hosting-your-own-runners> for more details.
306    SelfHosted(Vec<String>),
307    // This uses a runner belonging to <group> that matches all labels in <labels>.
308    // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
309    // for more details.
310    RunnerGroup { group: String, labels: Vec<String> },
311}
312
313impl GhRunner {
314    /// Whether this is a self-hosted runner with the provided label
315    pub fn is_self_hosted_with_label(&self, label: &str) -> bool {
316        matches!(self, GhRunner::SelfHosted(labels) if labels.iter().any(|s| s.as_str() == label))
317    }
318}
319
320/// Parameter type (unstable / stable).
321#[derive(Debug, Clone)]
322pub enum ParameterKind {
323    // The parameter is considered an unstable API, and should not be
324    // taken as a dependency.
325    Unstable,
326    // The parameter is considered a stable API, and can be used by
327    // external pipelines to control behavior of the pipeline.
328    Stable,
329}
330
331#[derive(Clone, Debug)]
332#[must_use]
333pub struct UseParameter<T> {
334    idx: usize,
335    _kind: std::marker::PhantomData<T>,
336}
337
338/// Opaque handle to an artifact which must be published by a single job.
339#[must_use]
340pub struct PublishArtifact {
341    idx: usize,
342}
343
344/// Opaque handle to an artifact which can be used by one or more jobs.
345#[derive(Clone)]
346#[must_use]
347pub struct UseArtifact {
348    idx: usize,
349}
350
351/// Opaque handle to an artifact of type `T` which must be published by a single job.
352#[must_use]
353pub struct PublishTypedArtifact<T>(PublishArtifact, std::marker::PhantomData<fn() -> T>);
354
355/// Opaque handle to an artifact of type `T` which can be used by one or more
356/// jobs.
357#[must_use]
358pub struct UseTypedArtifact<T>(UseArtifact, std::marker::PhantomData<fn(T)>);
359
360impl<T> Clone for UseTypedArtifact<T> {
361    fn clone(&self) -> Self {
362        UseTypedArtifact(self.0.clone(), std::marker::PhantomData)
363    }
364}
365
366#[derive(Default)]
367pub struct Pipeline {
368    jobs: Vec<PipelineJobMetadata>,
369    artifacts: Vec<ArtifactMeta>,
370    parameters: Vec<ParameterMeta>,
371    extra_deps: BTreeSet<(usize, usize)>,
372    // builder internal
373    artifact_names: BTreeSet<String>,
374    dummy_done_idx: usize,
375    artifact_map_idx: usize,
376    global_patchfns: Vec<crate::patch::PatchFn>,
377    inject_all_jobs_with: Option<Box<dyn for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a>>>,
378    // backend specific
379    ado_name: Option<String>,
380    ado_job_id_overrides: BTreeMap<usize, String>,
381    ado_schedule_triggers: Vec<AdoScheduleTriggers>,
382    ado_ci_triggers: Option<AdoCiTriggers>,
383    ado_pr_triggers: Option<AdoPrTriggers>,
384    ado_resources_repository: Vec<InternalAdoResourcesRepository>,
385    ado_bootstrap_template: String,
386    ado_variables: BTreeMap<String, String>,
387    ado_post_process_yaml_cb: Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
388    gh_name: Option<String>,
389    gh_schedule_triggers: Vec<GhScheduleTriggers>,
390    gh_ci_triggers: Option<GhCiTriggers>,
391    gh_pr_triggers: Option<GhPrTriggers>,
392    gh_bootstrap_template: String,
393}
394
395impl Pipeline {
396    pub fn new() -> Pipeline {
397        Pipeline::default()
398    }
399
400    /// Inject all pipeline jobs with some common logic. (e.g: to resolve common
401    /// configuration requirements shared by all jobs).
402    ///
403    /// Can only be invoked once per pipeline.
404    #[track_caller]
405    pub fn inject_all_jobs_with(
406        &mut self,
407        cb: impl for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a> + 'static,
408    ) -> &mut Self {
409        if self.inject_all_jobs_with.is_some() {
410            panic!("can only call inject_all_jobs_with once!")
411        }
412        self.inject_all_jobs_with = Some(Box::new(cb));
413        self
414    }
415
416    /// (ADO only) Provide a YAML template used to bootstrap flowey at the start
417    /// of an ADO pipeline.
418    ///
419    /// The template has access to the following vars, which will be statically
420    /// interpolated into the template's text:
421    ///
422    /// - `{{FLOWEY_OUTDIR}}`
423    ///     - Directory to copy artifacts into.
424    ///     - NOTE: this var will include `\` on Windows, and `/` on linux!
425    /// - `{{FLOWEY_BIN_EXTENSION}}`
426    ///     - Extension of the expected flowey bin (either "", or ".exe")
427    /// - `{{FLOWEY_CRATE}}`
428    ///     - Name of the project-specific flowey crate to be built
429    /// - `{{FLOWEY_TARGET}}`
430    ///     - The target-triple flowey is being built for
431    /// - `{{FLOWEY_PIPELINE_PATH}}`
432    ///     - Repo-root relative path to the pipeline (as provided when
433    ///       generating the pipeline via the flowey CLI)
434    ///
435    /// The template's sole responsibility is to copy 3 files into the
436    /// `{{FLOWEY_OUTDIR}}`:
437    ///
438    /// 1. The bootstrapped flowey binary, with the file name
439    ///    `flowey{{FLOWEY_BIN_EXTENSION}}`
440    /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
441    ///    copied of the pipeline YAML and pipeline JSON currently being run.
442    ///    `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
443    ///    cases where the same template is being for multiple pipelines (e.g: a
444    ///    debug vs. release pipeline).
445    pub fn ado_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
446        self.ado_bootstrap_template = template;
447        self
448    }
449
450    /// (ADO only) Provide a callback function which will be used to
451    /// post-process any YAML flowey generates for the pipeline.
452    ///
453    /// Until flowey defines a stable API for maintaining out-of-tree backends,
454    /// this method can be used to integrate the output from the generic ADO
455    /// backend with any organization-specific templates that one may be
456    /// required to use (e.g: for compliance reasons).
457    pub fn ado_post_process_yaml(
458        &mut self,
459        cb: impl FnOnce(serde_yaml::Value) -> serde_yaml::Value + 'static,
460    ) -> &mut Self {
461        self.ado_post_process_yaml_cb = Some(Box::new(cb));
462        self
463    }
464
465    /// (ADO only) Add a new scheduled CI trigger. Can be called multiple times
466    /// to set up multiple schedules runs.
467    pub fn ado_add_schedule_trigger(&mut self, triggers: AdoScheduleTriggers) -> &mut Self {
468        self.ado_schedule_triggers.push(triggers);
469        self
470    }
471
472    /// (ADO only) Set a PR trigger. Calling this method multiple times will
473    /// overwrite any previously set triggers.
474    pub fn ado_set_pr_triggers(&mut self, triggers: AdoPrTriggers) -> &mut Self {
475        self.ado_pr_triggers = Some(triggers);
476        self
477    }
478
479    /// (ADO only) Set a CI trigger. Calling this method multiple times will
480    /// overwrite any previously set triggers.
481    pub fn ado_set_ci_triggers(&mut self, triggers: AdoCiTriggers) -> &mut Self {
482        self.ado_ci_triggers = Some(triggers);
483        self
484    }
485
486    /// (ADO only) Declare a new repository resource, returning a type-safe
487    /// handle which downstream ADO steps are able to consume via
488    /// [`AdoStepServices::resolve_repository_id`](crate::node::user_facing::AdoStepServices::resolve_repository_id).
489    pub fn ado_add_resources_repository(
490        &mut self,
491        repo: AdoResourcesRepository,
492    ) -> AdoResourcesRepositoryId {
493        let AdoResourcesRepository {
494            repo_type,
495            name,
496            git_ref,
497            endpoint,
498        } = repo;
499
500        let repo_id = format!("repo{}", self.ado_resources_repository.len());
501
502        self.ado_resources_repository
503            .push(InternalAdoResourcesRepository {
504                repo_id: repo_id.clone(),
505                repo_type,
506                name,
507                git_ref: match git_ref {
508                    AdoResourcesRepositoryRef::Fixed(s) => AdoResourcesRepositoryRef::Fixed(s),
509                    AdoResourcesRepositoryRef::Parameter(p) => {
510                        AdoResourcesRepositoryRef::Parameter(p.idx)
511                    }
512                },
513                endpoint,
514            });
515        AdoResourcesRepositoryId { repo_id }
516    }
517
518    /// (GitHub Actions only) Set the pipeline-level name.
519    ///
520    /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#name>
521    pub fn gh_set_name(&mut self, name: impl AsRef<str>) -> &mut Self {
522        self.gh_name = Some(name.as_ref().into());
523        self
524    }
525
526    /// Provide a YAML template used to bootstrap flowey at the start of an GitHub
527    /// pipeline.
528    ///
529    /// The template has access to the following vars, which will be statically
530    /// interpolated into the template's text:
531    ///
532    /// - `{{FLOWEY_OUTDIR}}`
533    ///     - Directory to copy artifacts into.
534    ///     - NOTE: this var will include `\` on Windows, and `/` on linux!
535    /// - `{{FLOWEY_BIN_EXTENSION}}`
536    ///     - Extension of the expected flowey bin (either "", or ".exe")
537    /// - `{{FLOWEY_CRATE}}`
538    ///     - Name of the project-specific flowey crate to be built
539    /// - `{{FLOWEY_TARGET}}`
540    ///     - The target-triple flowey is being built for
541    /// - `{{FLOWEY_PIPELINE_PATH}}`
542    ///     - Repo-root relative path to the pipeline (as provided when
543    ///       generating the pipeline via the flowey CLI)
544    ///
545    /// The template's sole responsibility is to copy 3 files into the
546    /// `{{FLOWEY_OUTDIR}}`:
547    ///
548    /// 1. The bootstrapped flowey binary, with the file name
549    ///    `flowey{{FLOWEY_BIN_EXTENSION}}`
550    /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
551    ///    copied of the pipeline YAML and pipeline JSON currently being run.
552    ///    `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
553    ///    cases where the same template is being for multiple pipelines (e.g: a
554    ///    debug vs. release pipeline).
555    pub fn gh_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
556        self.gh_bootstrap_template = template;
557        self
558    }
559
560    /// (GitHub Actions only) Add a new scheduled CI trigger. Can be called multiple times
561    /// to set up multiple schedules runs.
562    pub fn gh_add_schedule_trigger(&mut self, triggers: GhScheduleTriggers) -> &mut Self {
563        self.gh_schedule_triggers.push(triggers);
564        self
565    }
566
567    /// (GitHub Actions only) Set a PR trigger. Calling this method multiple times will
568    /// overwrite any previously set triggers.
569    pub fn gh_set_pr_triggers(&mut self, triggers: GhPrTriggers) -> &mut Self {
570        self.gh_pr_triggers = Some(triggers);
571        self
572    }
573
574    /// (GitHub Actions only) Set a CI trigger. Calling this method multiple times will
575    /// overwrite any previously set triggers.
576    pub fn gh_set_ci_triggers(&mut self, triggers: GhCiTriggers) -> &mut Self {
577        self.gh_ci_triggers = Some(triggers);
578        self
579    }
580
581    /// (GitHub Actions only) Use a pre-defined GitHub Actions secret variable.
582    ///
583    /// For more information on defining secrets for use in GitHub Actions, see
584    /// <https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions>
585    pub fn gh_use_secret(&mut self, secret_name: impl AsRef<str>) -> GhUserSecretVar {
586        GhUserSecretVar(secret_name.as_ref().to_string())
587    }
588
589    pub fn new_job(
590        &mut self,
591        platform: FlowPlatform,
592        arch: FlowArch,
593        label: impl AsRef<str>,
594    ) -> PipelineJob<'_> {
595        let idx = self.jobs.len();
596        self.jobs.push(PipelineJobMetadata {
597            root_nodes: BTreeMap::new(),
598            patches: ResolvedPatches::build(),
599            label: label.as_ref().into(),
600            platform,
601            arch,
602            cond_param_idx: None,
603            ado_pool: None,
604            ado_variables: BTreeMap::new(),
605            gh_override_if: None,
606            gh_global_env: BTreeMap::new(),
607            gh_pool: None,
608            gh_permissions: BTreeMap::new(),
609        });
610
611        PipelineJob {
612            pipeline: self,
613            job_idx: idx,
614        }
615    }
616
617    /// Declare a dependency between two jobs that does is not a result of an
618    /// artifact.
619    pub fn non_artifact_dep(
620        &mut self,
621        job: &PipelineJobHandle,
622        depends_on_job: &PipelineJobHandle,
623    ) -> &mut Self {
624        self.extra_deps
625            .insert((depends_on_job.job_idx, job.job_idx));
626        self
627    }
628
629    #[track_caller]
630    pub fn new_artifact(&mut self, name: impl AsRef<str>) -> (PublishArtifact, UseArtifact) {
631        let name = name.as_ref();
632        let owned_name = name.to_string();
633
634        let not_exists = self.artifact_names.insert(owned_name.clone());
635        if !not_exists {
636            panic!("duplicate artifact name: {}", name)
637        }
638
639        let idx = self.artifacts.len();
640        self.artifacts.push(ArtifactMeta {
641            name: owned_name,
642            published_by_job: None,
643            used_by_jobs: BTreeSet::new(),
644        });
645
646        (PublishArtifact { idx }, UseArtifact { idx })
647    }
648
649    /// Returns a pair of opaque handles to a new artifact for use across jobs
650    /// in the pipeline.
651    #[track_caller]
652    pub fn new_typed_artifact<T: Artifact>(
653        &mut self,
654        name: impl AsRef<str>,
655    ) -> (PublishTypedArtifact<T>, UseTypedArtifact<T>) {
656        let (publish, use_artifact) = self.new_artifact(name);
657        (
658            PublishTypedArtifact(publish, std::marker::PhantomData),
659            UseTypedArtifact(use_artifact, std::marker::PhantomData),
660        )
661    }
662
663    /// (ADO only) Set the pipeline-level name.
664    ///
665    /// <https://learn.microsoft.com/en-us/azure/devops/pipelines/process/run-number?view=azure-devops&tabs=yaml>
666    pub fn ado_add_name(&mut self, name: String) -> &mut Self {
667        self.ado_name = Some(name);
668        self
669    }
670
671    /// (ADO only) Declare a pipeline-level, named, read-only ADO variable.
672    ///
673    /// `name` and `value` are both arbitrary strings.
674    ///
675    /// Returns an instance of [`AdoRuntimeVar`], which, if need be, can be
676    /// converted into a [`ReadVar<String>`] using
677    /// [`NodeCtx::get_ado_variable`].
678    ///
679    /// NOTE: Unless required by some particular third-party task, it's strongly
680    /// recommended to _avoid_ using this method, and to simply use
681    /// [`ReadVar::from_static`] to get a obtain a static variable.
682    ///
683    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
684    pub fn ado_new_named_variable(
685        &mut self,
686        name: impl AsRef<str>,
687        value: impl AsRef<str>,
688    ) -> AdoRuntimeVar {
689        let name = name.as_ref();
690        let value = value.as_ref();
691
692        self.ado_variables.insert(name.into(), value.into());
693
694        // safe, since we'll ensure that the global exists in the ADO backend
695        AdoRuntimeVar::dangerous_from_global(name, false)
696    }
697
698    /// (ADO only) Declare multiple pipeline-level, named, read-only ADO
699    /// variables at once.
700    ///
701    /// This is a convenience method to streamline invoking
702    /// [`Self::ado_new_named_variable`] multiple times.
703    ///
704    /// NOTE: Unless required by some particular third-party task, it's strongly
705    /// recommended to _avoid_ using this method, and to simply use
706    /// [`ReadVar::from_static`] to get a obtain a static variable.
707    ///
708    /// DEVNOTE: In the future, this API may be updated to return a handle that
709    /// will allow resolving the resulting `AdoRuntimeVar`, but for
710    /// implementation expediency, this API does not currently do this. If you
711    /// need to read the value of this variable at runtime, you may need to
712    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
713    ///
714    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
715    pub fn ado_new_named_variables<K, V>(
716        &mut self,
717        vars: impl IntoIterator<Item = (K, V)>,
718    ) -> &mut Self
719    where
720        K: AsRef<str>,
721        V: AsRef<str>,
722    {
723        self.ado_variables.extend(
724            vars.into_iter()
725                .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
726        );
727        self
728    }
729
730    /// Declare a pipeline-level runtime parameter with type `bool`.
731    ///
732    /// To obtain a [`ReadVar<bool>`] that can be used within a node, use the
733    /// [`PipelineJobCtx::use_parameter`] method.
734    ///
735    /// `name` is the name of the parameter.
736    ///
737    /// `description` is an arbitrary string, which will be be shown to users.
738    ///
739    /// `kind` is the type of parameter and if it should be treated as a stable
740    /// external API to callers of the pipeline.
741    ///
742    /// `default` is the default value for the parameter. If none is provided,
743    /// the parameter _must_ be specified in order for the pipeline to run.
744    ///
745    /// `possible_values` can be used to limit the set of valid values the
746    /// parameter accepts.
747    pub fn new_parameter_bool(
748        &mut self,
749        name: impl AsRef<str>,
750        description: impl AsRef<str>,
751        kind: ParameterKind,
752        default: Option<bool>,
753    ) -> UseParameter<bool> {
754        let idx = self.parameters.len();
755        let name = new_parameter_name(name, kind.clone());
756        self.parameters.push(ParameterMeta {
757            parameter: Parameter::Bool {
758                name,
759                description: description.as_ref().into(),
760                kind,
761                default,
762            },
763            used_by_jobs: BTreeSet::new(),
764        });
765
766        UseParameter {
767            idx,
768            _kind: std::marker::PhantomData,
769        }
770    }
771
772    /// Declare a pipeline-level runtime parameter with type `i64`.
773    ///
774    /// To obtain a [`ReadVar<i64>`] that can be used within a node, use the
775    /// [`PipelineJobCtx::use_parameter`] method.
776    ///
777    /// `name` is the name of the parameter.
778    ///
779    /// `description` is an arbitrary string, which will be be shown to users.
780    ///
781    /// `kind` is the type of parameter and if it should be treated as a stable
782    /// external API to callers of the pipeline.
783    ///
784    /// `default` is the default value for the parameter. If none is provided,
785    /// the parameter _must_ be specified in order for the pipeline to run.
786    ///
787    /// `possible_values` can be used to limit the set of valid values the
788    /// parameter accepts.
789    pub fn new_parameter_num(
790        &mut self,
791        name: impl AsRef<str>,
792        description: impl AsRef<str>,
793        kind: ParameterKind,
794        default: Option<i64>,
795        possible_values: Option<Vec<i64>>,
796    ) -> UseParameter<i64> {
797        let idx = self.parameters.len();
798        let name = new_parameter_name(name, kind.clone());
799        self.parameters.push(ParameterMeta {
800            parameter: Parameter::Num {
801                name,
802                description: description.as_ref().into(),
803                kind,
804                default,
805                possible_values,
806            },
807            used_by_jobs: BTreeSet::new(),
808        });
809
810        UseParameter {
811            idx,
812            _kind: std::marker::PhantomData,
813        }
814    }
815
816    /// Declare a pipeline-level runtime parameter with type `String`.
817    ///
818    /// To obtain a [`ReadVar<String>`] that can be used within a node, use the
819    /// [`PipelineJobCtx::use_parameter`] method.
820    ///
821    /// `name` is the name of the parameter.
822    ///
823    /// `description` is an arbitrary string, which will be be shown to users.
824    ///
825    /// `kind` is the type of parameter and if it should be treated as a stable
826    /// external API to callers of the pipeline.
827    ///
828    /// `default` is the default value for the parameter. If none is provided,
829    /// the parameter _must_ be specified in order for the pipeline to run.
830    ///
831    /// `possible_values` allows restricting inputs to a set of possible values.
832    /// Depending on the backend, these options may be presented as a set of
833    /// radio buttons, a dropdown menu, or something in that vein. If `None`,
834    /// then any string is allowed.
835    pub fn new_parameter_string(
836        &mut self,
837        name: impl AsRef<str>,
838        description: impl AsRef<str>,
839        kind: ParameterKind,
840        default: Option<impl AsRef<str>>,
841        possible_values: Option<Vec<String>>,
842    ) -> UseParameter<String> {
843        let idx = self.parameters.len();
844        let name = new_parameter_name(name, kind.clone());
845        self.parameters.push(ParameterMeta {
846            parameter: Parameter::String {
847                name,
848                description: description.as_ref().into(),
849                kind,
850                default: default.map(|x| x.as_ref().into()),
851                possible_values,
852            },
853            used_by_jobs: BTreeSet::new(),
854        });
855
856        UseParameter {
857            idx,
858            _kind: std::marker::PhantomData,
859        }
860    }
861}
862
863pub struct PipelineJobCtx<'a> {
864    pipeline: &'a mut Pipeline,
865    job_idx: usize,
866}
867
868impl PipelineJobCtx<'_> {
869    /// Create a new `WriteVar<SideEffect>` anchored to the pipeline job.
870    pub fn new_done_handle(&mut self) -> WriteVar<crate::node::SideEffect> {
871        self.pipeline.dummy_done_idx += 1;
872        crate::node::thin_air_write_runtime_var(format!("start{}", self.pipeline.dummy_done_idx))
873    }
874
875    /// Claim that this job will use this artifact, obtaining a path to a folder
876    /// with the artifact's contents.
877    pub fn use_artifact(&mut self, artifact: &UseArtifact) -> ReadVar<PathBuf> {
878        self.pipeline.artifacts[artifact.idx]
879            .used_by_jobs
880            .insert(self.job_idx);
881
882        crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
883            &self.pipeline.artifacts[artifact.idx].name,
884            true,
885        ))
886    }
887
888    /// Claim that this job will publish this artifact, obtaining a path to a
889    /// fresh, empty folder which will be published as the specific artifact at
890    /// the end of the job.
891    pub fn publish_artifact(&mut self, artifact: PublishArtifact) -> ReadVar<PathBuf> {
892        let existing = self.pipeline.artifacts[artifact.idx]
893            .published_by_job
894            .replace(self.job_idx);
895        assert!(existing.is_none()); // PublishArtifact isn't cloneable
896
897        crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
898            &self.pipeline.artifacts[artifact.idx].name,
899            false,
900        ))
901    }
902
903    fn helper_request<R: IntoRequest>(&mut self, req: R)
904    where
905        R::Node: 'static,
906    {
907        self.pipeline.jobs[self.job_idx]
908            .root_nodes
909            .entry(NodeHandle::from_type::<R::Node>())
910            .or_default()
911            .push(serde_json::to_vec(&req.into_request()).unwrap().into());
912    }
913
914    fn new_artifact_map_vars<T: Artifact>(&mut self) -> (ReadVar<T>, WriteVar<T>) {
915        let artifact_map_idx = self.pipeline.artifact_map_idx;
916        self.pipeline.artifact_map_idx += 1;
917
918        let backing_var = format!("artifact_map{}", artifact_map_idx);
919        let read_var = crate::node::thin_air_read_runtime_var(backing_var.clone());
920        let write_var = crate::node::thin_air_write_runtime_var(backing_var);
921        (read_var, write_var)
922    }
923
924    /// Claim that this job will use this artifact, obtaining the resolved
925    /// contents of the artifact.
926    pub fn use_typed_artifact<T: Artifact>(
927        &mut self,
928        artifact: &UseTypedArtifact<T>,
929    ) -> ReadVar<T> {
930        let artifact_path = self.use_artifact(&artifact.0);
931        let (read, write) = self.new_artifact_map_vars::<T>();
932        self.helper_request(artifact::resolve::Request::new(artifact_path, write));
933        read
934    }
935
936    /// Claim that this job will publish this artifact, obtaining a variable to
937    /// write the artifact's contents to. The artifact will be published at
938    /// the end of the job.
939    pub fn publish_typed_artifact<T: Artifact>(
940        &mut self,
941        artifact: PublishTypedArtifact<T>,
942    ) -> WriteVar<T> {
943        let artifact_path = self.publish_artifact(artifact.0);
944        let (read, write) = self.new_artifact_map_vars::<T>();
945        let done = self.new_done_handle();
946        self.helper_request(artifact::publish::Request::new(read, artifact_path, done));
947        write
948    }
949
950    /// Obtain a `ReadVar<T>` corresponding to a pipeline parameter which is
951    /// specified at runtime.
952    pub fn use_parameter<T>(&mut self, param: UseParameter<T>) -> ReadVar<T>
953    where
954        T: Serialize + DeserializeOwned,
955    {
956        self.pipeline.parameters[param.idx]
957            .used_by_jobs
958            .insert(self.job_idx);
959
960        crate::node::thin_air_read_runtime_var(
961            self.pipeline.parameters[param.idx]
962                .parameter
963                .name()
964                .to_string(),
965        )
966    }
967
968    /// Shortcut which allows defining a bool pipeline parameter within a Job.
969    ///
970    /// To share a single parameter between multiple jobs, don't use this method
971    /// - use [`Pipeline::new_parameter_bool`] + [`Self::use_parameter`] instead.
972    pub fn new_parameter_bool(
973        &mut self,
974        name: impl AsRef<str>,
975        description: impl AsRef<str>,
976        kind: ParameterKind,
977        default: Option<bool>,
978    ) -> ReadVar<bool> {
979        let param = self
980            .pipeline
981            .new_parameter_bool(name, description, kind, default);
982        self.use_parameter(param)
983    }
984
985    /// Shortcut which allows defining a number pipeline parameter within a Job.
986    ///
987    /// To share a single parameter between multiple jobs, don't use this method
988    /// - use [`Pipeline::new_parameter_num`] + [`Self::use_parameter`] instead.
989    pub fn new_parameter_num(
990        &mut self,
991        name: impl AsRef<str>,
992        description: impl AsRef<str>,
993        kind: ParameterKind,
994        default: Option<i64>,
995        possible_values: Option<Vec<i64>>,
996    ) -> ReadVar<i64> {
997        let param =
998            self.pipeline
999                .new_parameter_num(name, description, kind, default, possible_values);
1000        self.use_parameter(param)
1001    }
1002
1003    /// Shortcut which allows defining a string pipeline parameter within a Job.
1004    ///
1005    /// To share a single parameter between multiple jobs, don't use this method
1006    /// - use [`Pipeline::new_parameter_string`] + [`Self::use_parameter`] instead.
1007    pub fn new_parameter_string(
1008        &mut self,
1009        name: impl AsRef<str>,
1010        description: impl AsRef<str>,
1011        kind: ParameterKind,
1012        default: Option<String>,
1013        possible_values: Option<Vec<String>>,
1014    ) -> ReadVar<String> {
1015        let param =
1016            self.pipeline
1017                .new_parameter_string(name, description, kind, default, possible_values);
1018        self.use_parameter(param)
1019    }
1020}
1021
1022#[must_use]
1023pub struct PipelineJob<'a> {
1024    pipeline: &'a mut Pipeline,
1025    job_idx: usize,
1026}
1027
1028impl PipelineJob<'_> {
1029    /// (ADO only) specify which agent pool this job will be run on.
1030    pub fn ado_set_pool(self, pool: impl AsRef<str>) -> Self {
1031        self.ado_set_pool_with_demands(pool, Vec::new())
1032    }
1033
1034    /// (ADO only) specify which agent pool this job will be run on, with
1035    /// additional special runner demands.
1036    pub fn ado_set_pool_with_demands(self, pool: impl AsRef<str>, demands: Vec<String>) -> Self {
1037        self.pipeline.jobs[self.job_idx].ado_pool = Some(AdoPool {
1038            name: pool.as_ref().into(),
1039            demands,
1040        });
1041        self
1042    }
1043
1044    /// (ADO only) Declare a job-level, named, read-only ADO variable.
1045    ///
1046    /// `name` and `value` are both arbitrary strings, which may include ADO
1047    /// template expressions.
1048    ///
1049    /// NOTE: Unless required by some particular third-party task, it's strongly
1050    /// recommended to _avoid_ using this method, and to simply use
1051    /// [`ReadVar::from_static`] to get a obtain a static variable.
1052    ///
1053    /// DEVNOTE: In the future, this API may be updated to return a handle that
1054    /// will allow resolving the resulting `AdoRuntimeVar`, but for
1055    /// implementation expediency, this API does not currently do this. If you
1056    /// need to read the value of this variable at runtime, you may need to
1057    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1058    ///
1059    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1060    pub fn ado_new_named_variable(self, name: impl AsRef<str>, value: impl AsRef<str>) -> Self {
1061        let name = name.as_ref();
1062        let value = value.as_ref();
1063        self.pipeline.jobs[self.job_idx]
1064            .ado_variables
1065            .insert(name.into(), value.into());
1066        self
1067    }
1068
1069    /// (ADO only) Declare multiple job-level, named, read-only ADO variables at
1070    /// once.
1071    ///
1072    /// This is a convenience method to streamline invoking
1073    /// [`Self::ado_new_named_variable`] multiple times.
1074    ///
1075    /// NOTE: Unless required by some particular third-party task, it's strongly
1076    /// recommended to _avoid_ using this method, and to simply use
1077    /// [`ReadVar::from_static`] to get a obtain a static variable.
1078    ///
1079    /// DEVNOTE: In the future, this API may be updated to return a handle that
1080    /// will allow resolving the resulting `AdoRuntimeVar`, but for
1081    /// implementation expediency, this API does not currently do this. If you
1082    /// need to read the value of this variable at runtime, you may need to
1083    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1084    ///
1085    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1086    pub fn ado_new_named_variables<K, V>(self, vars: impl IntoIterator<Item = (K, V)>) -> Self
1087    where
1088        K: AsRef<str>,
1089        V: AsRef<str>,
1090    {
1091        self.pipeline.jobs[self.job_idx].ado_variables.extend(
1092            vars.into_iter()
1093                .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
1094        );
1095        self
1096    }
1097
1098    /// Overrides the id of the job.
1099    ///
1100    /// Flowey typically generates a reasonable job ID but some use cases that depend
1101    /// on the ID may find it useful to override it to something custom.
1102    pub fn ado_override_job_id(self, name: impl AsRef<str>) -> Self {
1103        self.pipeline
1104            .ado_job_id_overrides
1105            .insert(self.job_idx, name.as_ref().into());
1106        self
1107    }
1108
1109    /// (GitHub Actions only) specify which Github runner this job will be run on.
1110    pub fn gh_set_pool(self, pool: GhRunner) -> Self {
1111        self.pipeline.jobs[self.job_idx].gh_pool = Some(pool);
1112        self
1113    }
1114
1115    /// (GitHub Actions only) Manually override the `if:` condition for this
1116    /// particular job.
1117    ///
1118    /// **This is dangerous**, as an improperly set `if` condition may break
1119    /// downstream flowey jobs which assume flowey is in control of the job's
1120    /// scheduling logic.
1121    ///
1122    /// See
1123    /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idif>
1124    /// for more info.
1125    pub fn gh_dangerous_override_if(self, condition: impl AsRef<str>) -> Self {
1126        self.pipeline.jobs[self.job_idx].gh_override_if = Some(condition.as_ref().into());
1127        self
1128    }
1129
1130    /// (GitHub Actions only) Declare a global job-level environment variable,
1131    /// visible to all downstream steps.
1132    ///
1133    /// `name` and `value` are both arbitrary strings, which may include GitHub
1134    /// Actions template expressions.
1135    ///
1136    /// **This is dangerous**, as it is easy to misuse this API in order to
1137    /// write a node which takes an implicit dependency on there being a global
1138    /// variable set on its behalf by the top-level pipeline code, making it
1139    /// difficult to "locally reason" about the behavior of a node simply by
1140    /// reading its code.
1141    ///
1142    /// Whenever possible, nodes should "late bind" environment variables:
1143    /// accepting a compile-time / runtime flowey parameter, and then setting it
1144    /// prior to executing a child command that requires it.
1145    ///
1146    /// Only use this API in exceptional cases, such as obtaining an environment
1147    /// variable whose value is determined by a job-level GitHub Actions
1148    /// expression evaluation.
1149    pub fn gh_dangerous_global_env_var(
1150        self,
1151        name: impl AsRef<str>,
1152        value: impl AsRef<str>,
1153    ) -> Self {
1154        let name = name.as_ref();
1155        let value = value.as_ref();
1156        self.pipeline.jobs[self.job_idx]
1157            .gh_global_env
1158            .insert(name.into(), value.into());
1159        self
1160    }
1161
1162    /// (GitHub Actions only) Grant permissions required by nodes in the job.
1163    ///
1164    /// For a given node handle, grant the specified permissions.
1165    /// The list provided must match the permissions specified within the node
1166    /// using `requires_permission`.
1167    ///
1168    /// NOTE: While this method is called at a node-level for auditability, the emitted
1169    /// yaml grants permissions at the job-level.
1170    ///
1171    /// This can lead to weird situations where node 1 might not specify a permission
1172    /// required according to Github Actions, but due to job-level granting of the permission
1173    /// by another node 2, the pipeline executes even though it wouldn't if node 2 was removed.
1174    ///
1175    /// For available permission scopes and their descriptions, see
1176    /// <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions>.
1177    pub fn gh_grant_permissions<N: FlowNodeBase + 'static>(
1178        self,
1179        permissions: impl IntoIterator<Item = (GhPermission, GhPermissionValue)>,
1180    ) -> Self {
1181        let node_handle = NodeHandle::from_type::<N>();
1182        for (permission, value) in permissions {
1183            self.pipeline.jobs[self.job_idx]
1184                .gh_permissions
1185                .entry(node_handle)
1186                .or_default()
1187                .insert(permission, value);
1188        }
1189        self
1190    }
1191
1192    pub fn apply_patchfn(self, patchfn: crate::patch::PatchFn) -> Self {
1193        self.pipeline.jobs[self.job_idx]
1194            .patches
1195            .apply_patchfn(patchfn);
1196        self
1197    }
1198
1199    /// Only run the job if the specified condition is true.
1200    ///
1201    /// When running locally, the `cond`'s default value will be used to
1202    /// determine if the job will be run.
1203    pub fn with_condition(self, cond: UseParameter<bool>) -> Self {
1204        self.pipeline.jobs[self.job_idx].cond_param_idx = Some(cond.idx);
1205        self
1206    }
1207
1208    /// Add a flow node which will be run as part of the job.
1209    pub fn dep_on<R: IntoRequest + 'static>(
1210        self,
1211        f: impl FnOnce(&mut PipelineJobCtx<'_>) -> R,
1212    ) -> Self {
1213        // JobToNodeCtx will ensure artifact deps are taken care of
1214        let req = f(&mut PipelineJobCtx {
1215            pipeline: self.pipeline,
1216            job_idx: self.job_idx,
1217        });
1218
1219        self.pipeline.jobs[self.job_idx]
1220            .root_nodes
1221            .entry(NodeHandle::from_type::<R::Node>())
1222            .or_default()
1223            .push(serde_json::to_vec(&req.into_request()).unwrap().into());
1224
1225        self
1226    }
1227
1228    /// Finish describing the pipeline job.
1229    pub fn finish(self) -> PipelineJobHandle {
1230        PipelineJobHandle {
1231            job_idx: self.job_idx,
1232        }
1233    }
1234
1235    /// Return the job's platform.
1236    pub fn get_platform(&self) -> FlowPlatform {
1237        self.pipeline.jobs[self.job_idx].platform
1238    }
1239
1240    /// Return the job's architecture.
1241    pub fn get_arch(&self) -> FlowArch {
1242        self.pipeline.jobs[self.job_idx].arch
1243    }
1244}
1245
1246#[derive(Clone)]
1247pub struct PipelineJobHandle {
1248    job_idx: usize,
1249}
1250
1251impl PipelineJobHandle {
1252    pub fn is_handle_for(&self, job: &PipelineJob<'_>) -> bool {
1253        self.job_idx == job.job_idx
1254    }
1255}
1256
1257#[derive(Clone, Copy)]
1258pub enum PipelineBackendHint {
1259    /// Pipeline is being run on the user's dev machine (via bash / direct run)
1260    Local,
1261    /// Pipeline is run on ADO
1262    Ado,
1263    /// Pipeline is run on GitHub Actions
1264    Github,
1265}
1266
1267pub trait IntoPipeline {
1268    fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline>;
1269}
1270
1271fn new_parameter_name(name: impl AsRef<str>, kind: ParameterKind) -> String {
1272    match kind {
1273        ParameterKind::Unstable => format!("__unstable_{}", name.as_ref()),
1274        ParameterKind::Stable => name.as_ref().into(),
1275    }
1276}
1277
1278/// Structs which should only be used by top-level flowey emitters. If you're a
1279/// pipeline author, these are not types you need to care about!
1280pub mod internal {
1281    use super::*;
1282    use std::collections::BTreeMap;
1283
1284    pub fn consistent_artifact_runtime_var_name(artifact: impl AsRef<str>, is_use: bool) -> String {
1285        format!(
1286            "artifact_{}_{}",
1287            if is_use { "use_from" } else { "publish_from" },
1288            artifact.as_ref()
1289        )
1290    }
1291
1292    #[derive(Debug)]
1293    pub struct InternalAdoResourcesRepository {
1294        /// flowey-generated unique repo identifier
1295        pub repo_id: String,
1296        /// Type of repo that is being connected to.
1297        pub repo_type: AdoResourcesRepositoryType,
1298        /// Repository name. Format depends on `repo_type`.
1299        pub name: String,
1300        /// git ref to checkout.
1301        pub git_ref: AdoResourcesRepositoryRef<usize>,
1302        /// (optional) ID of the service endpoint connecting to this repository.
1303        pub endpoint: Option<String>,
1304    }
1305
1306    pub struct PipelineJobMetadata {
1307        pub root_nodes: BTreeMap<NodeHandle, Vec<Box<[u8]>>>,
1308        pub patches: PatchResolver,
1309        pub label: String,
1310        pub platform: FlowPlatform,
1311        pub arch: FlowArch,
1312        pub cond_param_idx: Option<usize>,
1313        // backend specific
1314        pub ado_pool: Option<AdoPool>,
1315        pub ado_variables: BTreeMap<String, String>,
1316        pub gh_override_if: Option<String>,
1317        pub gh_pool: Option<GhRunner>,
1318        pub gh_global_env: BTreeMap<String, String>,
1319        pub gh_permissions: BTreeMap<NodeHandle, BTreeMap<GhPermission, GhPermissionValue>>,
1320    }
1321
1322    // TODO: support a more structured format for demands
1323    // See https://learn.microsoft.com/en-us/azure/devops/pipelines/yaml-schema/pool-demands
1324    #[derive(Debug, Clone)]
1325    pub struct AdoPool {
1326        pub name: String,
1327        pub demands: Vec<String>,
1328    }
1329
1330    #[derive(Debug)]
1331    pub struct ArtifactMeta {
1332        pub name: String,
1333        pub published_by_job: Option<usize>,
1334        pub used_by_jobs: BTreeSet<usize>,
1335    }
1336
1337    #[derive(Debug)]
1338    pub struct ParameterMeta {
1339        pub parameter: Parameter,
1340        pub used_by_jobs: BTreeSet<usize>,
1341    }
1342
1343    /// Mirror of [`Pipeline`], except with all field marked as `pub`.
1344    pub struct PipelineFinalized {
1345        pub jobs: Vec<PipelineJobMetadata>,
1346        pub artifacts: Vec<ArtifactMeta>,
1347        pub parameters: Vec<ParameterMeta>,
1348        pub extra_deps: BTreeSet<(usize, usize)>,
1349        // backend specific
1350        pub ado_name: Option<String>,
1351        pub ado_schedule_triggers: Vec<AdoScheduleTriggers>,
1352        pub ado_ci_triggers: Option<AdoCiTriggers>,
1353        pub ado_pr_triggers: Option<AdoPrTriggers>,
1354        pub ado_bootstrap_template: String,
1355        pub ado_resources_repository: Vec<InternalAdoResourcesRepository>,
1356        pub ado_post_process_yaml_cb:
1357            Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
1358        pub ado_variables: BTreeMap<String, String>,
1359        pub ado_job_id_overrides: BTreeMap<usize, String>,
1360        pub gh_name: Option<String>,
1361        pub gh_schedule_triggers: Vec<GhScheduleTriggers>,
1362        pub gh_ci_triggers: Option<GhCiTriggers>,
1363        pub gh_pr_triggers: Option<GhPrTriggers>,
1364        pub gh_bootstrap_template: String,
1365    }
1366
1367    impl PipelineFinalized {
1368        pub fn from_pipeline(mut pipeline: Pipeline) -> Self {
1369            if let Some(cb) = pipeline.inject_all_jobs_with.take() {
1370                for job_idx in 0..pipeline.jobs.len() {
1371                    let _ = cb(PipelineJob {
1372                        pipeline: &mut pipeline,
1373                        job_idx,
1374                    });
1375                }
1376            }
1377
1378            let Pipeline {
1379                mut jobs,
1380                artifacts,
1381                parameters,
1382                extra_deps,
1383                ado_name,
1384                ado_bootstrap_template,
1385                ado_schedule_triggers,
1386                ado_ci_triggers,
1387                ado_pr_triggers,
1388                ado_resources_repository,
1389                ado_post_process_yaml_cb,
1390                ado_variables,
1391                ado_job_id_overrides,
1392                gh_name,
1393                gh_schedule_triggers,
1394                gh_ci_triggers,
1395                gh_pr_triggers,
1396                gh_bootstrap_template,
1397                // not relevant to consumer code
1398                dummy_done_idx: _,
1399                artifact_map_idx: _,
1400                artifact_names: _,
1401                global_patchfns,
1402                inject_all_jobs_with: _, // processed above
1403            } = pipeline;
1404
1405            for patchfn in global_patchfns {
1406                for job in &mut jobs {
1407                    job.patches.apply_patchfn(patchfn)
1408                }
1409            }
1410
1411            Self {
1412                jobs,
1413                artifacts,
1414                parameters,
1415                extra_deps,
1416                ado_name,
1417                ado_schedule_triggers,
1418                ado_ci_triggers,
1419                ado_pr_triggers,
1420                ado_bootstrap_template,
1421                ado_resources_repository,
1422                ado_post_process_yaml_cb,
1423                ado_variables,
1424                ado_job_id_overrides,
1425                gh_name,
1426                gh_schedule_triggers,
1427                gh_ci_triggers,
1428                gh_pr_triggers,
1429                gh_bootstrap_template,
1430            }
1431        }
1432    }
1433
1434    #[derive(Debug, Clone)]
1435    pub enum Parameter {
1436        Bool {
1437            name: String,
1438            description: String,
1439            kind: ParameterKind,
1440            default: Option<bool>,
1441        },
1442        String {
1443            name: String,
1444            description: String,
1445            default: Option<String>,
1446            kind: ParameterKind,
1447            possible_values: Option<Vec<String>>,
1448        },
1449        Num {
1450            name: String,
1451            description: String,
1452            default: Option<i64>,
1453            kind: ParameterKind,
1454            possible_values: Option<Vec<i64>>,
1455        },
1456    }
1457
1458    impl Parameter {
1459        pub fn name(&self) -> &str {
1460            match self {
1461                Parameter::Bool { name, .. } => name,
1462                Parameter::String { name, .. } => name,
1463                Parameter::Num { name, .. } => name,
1464            }
1465        }
1466    }
1467}