flowey_core/
pipeline.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Core types and traits used to create and work with flowey pipelines.
5
6mod artifact;
7
8pub use artifact::Artifact;
9
10use self::internal::*;
11use crate::node::FlowArch;
12use crate::node::FlowNodeBase;
13use crate::node::FlowPlatform;
14use crate::node::FlowPlatformLinuxDistro;
15use crate::node::GhUserSecretVar;
16use crate::node::IntoRequest;
17use crate::node::NodeHandle;
18use crate::node::ReadVar;
19use crate::node::WriteVar;
20use crate::node::steps::ado::AdoResourcesRepositoryId;
21use crate::node::user_facing::AdoRuntimeVar;
22use crate::node::user_facing::GhPermission;
23use crate::node::user_facing::GhPermissionValue;
24use crate::patch::PatchResolver;
25use crate::patch::ResolvedPatches;
26use serde::Serialize;
27use serde::de::DeserializeOwned;
28use std::collections::BTreeMap;
29use std::collections::BTreeSet;
30use std::path::PathBuf;
31
32/// Pipeline types which are considered "user facing", and included in the
33/// `flowey` prelude.
34pub mod user_facing {
35    pub use super::AdoCiTriggers;
36    pub use super::AdoPrTriggers;
37    pub use super::AdoResourcesRepository;
38    pub use super::AdoResourcesRepositoryRef;
39    pub use super::AdoResourcesRepositoryType;
40    pub use super::AdoScheduleTriggers;
41    pub use super::GhCiTriggers;
42    pub use super::GhPrTriggers;
43    pub use super::GhRunner;
44    pub use super::GhRunnerOsLabel;
45    pub use super::GhScheduleTriggers;
46    pub use super::HostExt;
47    pub use super::IntoPipeline;
48    pub use super::ParameterKind;
49    pub use super::Pipeline;
50    pub use super::PipelineBackendHint;
51    pub use super::PipelineJob;
52    pub use super::PipelineJobCtx;
53    pub use super::PipelineJobHandle;
54    pub use super::PublishArtifact;
55    pub use super::PublishTypedArtifact;
56    pub use super::UseArtifact;
57    pub use super::UseParameter;
58    pub use super::UseTypedArtifact;
59    pub use crate::node::FlowArch;
60    pub use crate::node::FlowPlatform;
61}
62
63fn linux_distro() -> FlowPlatformLinuxDistro {
64    // Check for nix environment first - takes precedence over distro detection
65    if std::env::var("IN_NIX_SHELL").is_ok() {
66        return FlowPlatformLinuxDistro::Nix;
67    }
68
69    // A `nix develop` shell doesn't set `IN_NIX_SHELL`, but the PATH should include a nix store path
70    if std::env::var("PATH").is_ok_and(|path| path.contains("/nix/store")) {
71        return FlowPlatformLinuxDistro::Nix;
72    }
73
74    if let Ok(etc_os_release) = fs_err::read_to_string("/etc/os-release") {
75        if etc_os_release.contains("ID=ubuntu") {
76            FlowPlatformLinuxDistro::Ubuntu
77        } else if etc_os_release.contains("ID=fedora") {
78            FlowPlatformLinuxDistro::Fedora
79        } else if etc_os_release.contains("ID=arch") {
80            FlowPlatformLinuxDistro::Arch
81        } else {
82            FlowPlatformLinuxDistro::Unknown
83        }
84    } else {
85        FlowPlatformLinuxDistro::Unknown
86    }
87}
88
89pub trait HostExt: Sized {
90    /// Return the value for the current host machine.
91    ///
92    /// Will panic on non-local backends.
93    fn host(backend_hint: PipelineBackendHint) -> Self;
94}
95
96impl HostExt for FlowPlatform {
97    /// Return the platform of the current host machine.
98    ///
99    /// Will panic on non-local backends.
100    fn host(backend_hint: PipelineBackendHint) -> Self {
101        if !matches!(backend_hint, PipelineBackendHint::Local) {
102            panic!("can only use `FlowPlatform::host` when defining a local-only pipeline");
103        }
104
105        if cfg!(target_os = "windows") {
106            Self::Windows
107        } else if cfg!(target_os = "linux") {
108            Self::Linux(linux_distro())
109        } else if cfg!(target_os = "macos") {
110            Self::MacOs
111        } else {
112            panic!("no valid host-os")
113        }
114    }
115}
116
117impl HostExt for FlowArch {
118    /// Return the arch of the current host machine.
119    ///
120    /// Will panic on non-local backends.
121    fn host(backend_hint: PipelineBackendHint) -> Self {
122        if !matches!(backend_hint, PipelineBackendHint::Local) {
123            panic!("can only use `FlowArch::host` when defining a local-only pipeline");
124        }
125
126        // xtask-fmt allow-target-arch oneoff-flowey
127        if cfg!(target_arch = "x86_64") {
128            Self::X86_64
129        // xtask-fmt allow-target-arch oneoff-flowey
130        } else if cfg!(target_arch = "aarch64") {
131            Self::Aarch64
132        } else {
133            panic!("no valid host-arch")
134        }
135    }
136}
137
138/// Trigger ADO pipelines via Continuous Integration
139#[derive(Default, Debug)]
140pub struct AdoScheduleTriggers {
141    /// Friendly name for the scheduled run
142    pub display_name: String,
143    /// Run the pipeline whenever there is a commit on these specified branches
144    /// (supports glob syntax)
145    pub branches: Vec<String>,
146    /// Specify any branches which should be filtered out from the list of
147    /// `branches` (supports glob syntax)
148    pub exclude_branches: Vec<String>,
149    /// Run the pipeline in a schedule, as specified by a cron string
150    pub cron: String,
151}
152
153/// Trigger ADO pipelines per PR
154#[derive(Debug)]
155pub struct AdoPrTriggers {
156    /// Run the pipeline whenever there is a PR to these specified branches
157    /// (supports glob syntax)
158    pub branches: Vec<String>,
159    /// Specify any branches which should be filtered out from the list of
160    /// `branches` (supports glob syntax)
161    pub exclude_branches: Vec<String>,
162    /// Run the pipeline even if the PR is a draft PR. Defaults to `false`.
163    pub run_on_draft: bool,
164    /// Automatically cancel the pipeline run if a new commit lands in the
165    /// branch. Defaults to `true`.
166    pub auto_cancel: bool,
167}
168
169/// Trigger ADO pipelines per PR
170#[derive(Debug, Default)]
171pub struct AdoCiTriggers {
172    /// Run the pipeline whenever there is a change to these specified branches
173    /// (supports glob syntax)
174    pub branches: Vec<String>,
175    /// Specify any branches which should be filtered out from the list of
176    /// `branches` (supports glob syntax)
177    pub exclude_branches: Vec<String>,
178    /// Run the pipeline whenever a matching tag is created (supports glob
179    /// syntax)
180    pub tags: Vec<String>,
181    /// Specify any tags which should be filtered out from the list of `tags`
182    /// (supports glob syntax)
183    pub exclude_tags: Vec<String>,
184    /// Whether to batch changes per branch.
185    pub batch: bool,
186}
187
188impl Default for AdoPrTriggers {
189    fn default() -> Self {
190        Self {
191            branches: Vec::new(),
192            exclude_branches: Vec::new(),
193            run_on_draft: false,
194            auto_cancel: true,
195        }
196    }
197}
198
199/// ADO repository resource.
200#[derive(Debug)]
201pub struct AdoResourcesRepository {
202    /// Type of repo that is being connected to.
203    pub repo_type: AdoResourcesRepositoryType,
204    /// Repository name. Format depends on `repo_type`.
205    pub name: String,
206    /// git ref to checkout.
207    pub git_ref: AdoResourcesRepositoryRef,
208    /// (optional) ID of the service endpoint connecting to this repository.
209    pub endpoint: Option<String>,
210}
211
212/// ADO repository resource type
213#[derive(Debug)]
214pub enum AdoResourcesRepositoryType {
215    /// Azure Repos Git repository
216    AzureReposGit,
217    /// Github repository
218    GitHub,
219}
220
221/// ADO repository ref
222#[derive(Debug)]
223pub enum AdoResourcesRepositoryRef<P = UseParameter<String>> {
224    /// Hard-coded ref (e.g: refs/heads/main)
225    Fixed(String),
226    /// Connected to pipeline-level parameter
227    Parameter(P),
228}
229
230/// Trigger Github Actions pipelines via Continuous Integration
231///
232/// NOTE: Github Actions doesn't support specifying the branch when triggered by `schedule`.
233/// To run on a specific branch, modify the branch checked out in the pipeline.
234#[derive(Default, Debug)]
235pub struct GhScheduleTriggers {
236    /// Run the pipeline in a schedule, as specified by a cron string
237    pub cron: String,
238}
239
240/// Trigger Github Actions pipelines per PR
241#[derive(Debug)]
242pub struct GhPrTriggers {
243    /// Run the pipeline whenever there is a PR to these specified branches
244    /// (supports glob syntax)
245    pub branches: Vec<String>,
246    /// Specify any branches which should be filtered out from the list of
247    /// `branches` (supports glob syntax)
248    pub exclude_branches: Vec<String>,
249    /// Automatically cancel the pipeline run if a new commit lands in the
250    /// branch. Defaults to `true`.
251    pub auto_cancel: bool,
252    /// Run the pipeline whenever the PR trigger matches the specified types
253    pub types: Vec<String>,
254}
255
256/// Trigger Github Actions pipelines per PR
257#[derive(Debug, Default)]
258pub struct GhCiTriggers {
259    /// Run the pipeline whenever there is a change to these specified branches
260    /// (supports glob syntax)
261    pub branches: Vec<String>,
262    /// Specify any branches which should be filtered out from the list of
263    /// `branches` (supports glob syntax)
264    pub exclude_branches: Vec<String>,
265    /// Run the pipeline whenever a matching tag is created (supports glob
266    /// syntax)
267    pub tags: Vec<String>,
268    /// Specify any tags which should be filtered out from the list of `tags`
269    /// (supports glob syntax)
270    pub exclude_tags: Vec<String>,
271}
272
273impl GhPrTriggers {
274    /// Triggers the pipeline on the default PR events plus when a draft is marked as ready for review.
275    pub fn new_draftable() -> Self {
276        Self {
277            branches: Vec::new(),
278            exclude_branches: Vec::new(),
279            types: vec![
280                "opened".into(),
281                "synchronize".into(),
282                "reopened".into(),
283                "ready_for_review".into(),
284            ],
285            auto_cancel: true,
286        }
287    }
288}
289
290#[derive(Debug, Clone, PartialEq)]
291pub enum GhRunnerOsLabel {
292    UbuntuLatest,
293    Ubuntu2404,
294    Ubuntu2204,
295    WindowsLatest,
296    Windows2025,
297    Windows2022,
298    Ubuntu2404Arm,
299    Ubuntu2204Arm,
300    Windows11Arm,
301    Custom(String),
302}
303
304/// GitHub runner type
305#[derive(Debug, Clone, PartialEq)]
306pub enum GhRunner {
307    // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
308    // for more details.
309    GhHosted(GhRunnerOsLabel),
310    // Self hosted runners are selected by matching runner labels to <labels>.
311    // 'self-hosted' is a common label for self hosted runners, but is not required.
312    // Labels are case-insensitive and can take the form of arbitrary strings.
313    // See <https://docs.github.com/en/actions/hosting-your-own-runners> for more details.
314    SelfHosted(Vec<String>),
315    // This uses a runner belonging to <group> that matches all labels in <labels>.
316    // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
317    // for more details.
318    RunnerGroup { group: String, labels: Vec<String> },
319}
320
321impl GhRunner {
322    /// Whether this is a self-hosted runner with the provided label
323    pub fn is_self_hosted_with_label(&self, label: &str) -> bool {
324        matches!(self, GhRunner::SelfHosted(labels) if labels.iter().any(|s| s.as_str() == label))
325    }
326}
327
328/// Parameter type (unstable / stable).
329#[derive(Debug, Clone)]
330pub enum ParameterKind {
331    // The parameter is considered an unstable API, and should not be
332    // taken as a dependency.
333    Unstable,
334    // The parameter is considered a stable API, and can be used by
335    // external pipelines to control behavior of the pipeline.
336    Stable,
337}
338
339#[derive(Clone, Debug)]
340#[must_use]
341pub struct UseParameter<T> {
342    idx: usize,
343    _kind: std::marker::PhantomData<T>,
344}
345
346/// Opaque handle to an artifact which must be published by a single job.
347#[must_use]
348pub struct PublishArtifact {
349    idx: usize,
350}
351
352/// Opaque handle to an artifact which can be used by one or more jobs.
353#[derive(Clone)]
354#[must_use]
355pub struct UseArtifact {
356    idx: usize,
357}
358
359/// Opaque handle to an artifact of type `T` which must be published by a single job.
360#[must_use]
361pub struct PublishTypedArtifact<T>(PublishArtifact, std::marker::PhantomData<fn() -> T>);
362
363/// Opaque handle to an artifact of type `T` which can be used by one or more
364/// jobs.
365#[must_use]
366pub struct UseTypedArtifact<T>(UseArtifact, std::marker::PhantomData<fn(T)>);
367
368impl<T> Clone for UseTypedArtifact<T> {
369    fn clone(&self) -> Self {
370        UseTypedArtifact(self.0.clone(), std::marker::PhantomData)
371    }
372}
373
374#[derive(Default)]
375pub struct Pipeline {
376    jobs: Vec<PipelineJobMetadata>,
377    artifacts: Vec<ArtifactMeta>,
378    parameters: Vec<ParameterMeta>,
379    extra_deps: BTreeSet<(usize, usize)>,
380    // builder internal
381    artifact_names: BTreeSet<String>,
382    dummy_done_idx: usize,
383    artifact_map_idx: usize,
384    global_patchfns: Vec<crate::patch::PatchFn>,
385    inject_all_jobs_with: Option<Box<dyn for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a>>>,
386    // backend specific
387    ado_name: Option<String>,
388    ado_job_id_overrides: BTreeMap<usize, String>,
389    ado_schedule_triggers: Vec<AdoScheduleTriggers>,
390    ado_ci_triggers: Option<AdoCiTriggers>,
391    ado_pr_triggers: Option<AdoPrTriggers>,
392    ado_resources_repository: Vec<InternalAdoResourcesRepository>,
393    ado_bootstrap_template: String,
394    ado_variables: BTreeMap<String, String>,
395    ado_post_process_yaml_cb: Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
396    gh_name: Option<String>,
397    gh_schedule_triggers: Vec<GhScheduleTriggers>,
398    gh_ci_triggers: Option<GhCiTriggers>,
399    gh_pr_triggers: Option<GhPrTriggers>,
400    gh_bootstrap_template: String,
401}
402
403impl Pipeline {
404    pub fn new() -> Pipeline {
405        Pipeline::default()
406    }
407
408    /// Inject all pipeline jobs with some common logic. (e.g: to resolve common
409    /// configuration requirements shared by all jobs).
410    ///
411    /// Can only be invoked once per pipeline.
412    #[track_caller]
413    pub fn inject_all_jobs_with(
414        &mut self,
415        cb: impl for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a> + 'static,
416    ) -> &mut Self {
417        if self.inject_all_jobs_with.is_some() {
418            panic!("can only call inject_all_jobs_with once!")
419        }
420        self.inject_all_jobs_with = Some(Box::new(cb));
421        self
422    }
423
424    /// (ADO only) Provide a YAML template used to bootstrap flowey at the start
425    /// of an ADO pipeline.
426    ///
427    /// The template has access to the following vars, which will be statically
428    /// interpolated into the template's text:
429    ///
430    /// - `{{FLOWEY_OUTDIR}}`
431    ///     - Directory to copy artifacts into.
432    ///     - NOTE: this var will include `\` on Windows, and `/` on linux!
433    /// - `{{FLOWEY_BIN_EXTENSION}}`
434    ///     - Extension of the expected flowey bin (either "", or ".exe")
435    /// - `{{FLOWEY_CRATE}}`
436    ///     - Name of the project-specific flowey crate to be built
437    /// - `{{FLOWEY_TARGET}}`
438    ///     - The target-triple flowey is being built for
439    /// - `{{FLOWEY_PIPELINE_PATH}}`
440    ///     - Repo-root relative path to the pipeline (as provided when
441    ///       generating the pipeline via the flowey CLI)
442    ///
443    /// The template's sole responsibility is to copy 3 files into the
444    /// `{{FLOWEY_OUTDIR}}`:
445    ///
446    /// 1. The bootstrapped flowey binary, with the file name
447    ///    `flowey{{FLOWEY_BIN_EXTENSION}}`
448    /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
449    ///    copied of the pipeline YAML and pipeline JSON currently being run.
450    ///    `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
451    ///    cases where the same template is being for multiple pipelines (e.g: a
452    ///    debug vs. release pipeline).
453    pub fn ado_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
454        self.ado_bootstrap_template = template;
455        self
456    }
457
458    /// (ADO only) Provide a callback function which will be used to
459    /// post-process any YAML flowey generates for the pipeline.
460    ///
461    /// Until flowey defines a stable API for maintaining out-of-tree backends,
462    /// this method can be used to integrate the output from the generic ADO
463    /// backend with any organization-specific templates that one may be
464    /// required to use (e.g: for compliance reasons).
465    pub fn ado_post_process_yaml(
466        &mut self,
467        cb: impl FnOnce(serde_yaml::Value) -> serde_yaml::Value + 'static,
468    ) -> &mut Self {
469        self.ado_post_process_yaml_cb = Some(Box::new(cb));
470        self
471    }
472
473    /// (ADO only) Add a new scheduled CI trigger. Can be called multiple times
474    /// to set up multiple schedules runs.
475    pub fn ado_add_schedule_trigger(&mut self, triggers: AdoScheduleTriggers) -> &mut Self {
476        self.ado_schedule_triggers.push(triggers);
477        self
478    }
479
480    /// (ADO only) Set a PR trigger. Calling this method multiple times will
481    /// overwrite any previously set triggers.
482    pub fn ado_set_pr_triggers(&mut self, triggers: AdoPrTriggers) -> &mut Self {
483        self.ado_pr_triggers = Some(triggers);
484        self
485    }
486
487    /// (ADO only) Set a CI trigger. Calling this method multiple times will
488    /// overwrite any previously set triggers.
489    pub fn ado_set_ci_triggers(&mut self, triggers: AdoCiTriggers) -> &mut Self {
490        self.ado_ci_triggers = Some(triggers);
491        self
492    }
493
494    /// (ADO only) Declare a new repository resource, returning a type-safe
495    /// handle which downstream ADO steps are able to consume via
496    /// [`AdoStepServices::resolve_repository_id`](crate::node::user_facing::AdoStepServices::resolve_repository_id).
497    pub fn ado_add_resources_repository(
498        &mut self,
499        repo: AdoResourcesRepository,
500    ) -> AdoResourcesRepositoryId {
501        let AdoResourcesRepository {
502            repo_type,
503            name,
504            git_ref,
505            endpoint,
506        } = repo;
507
508        let repo_id = format!("repo{}", self.ado_resources_repository.len());
509
510        self.ado_resources_repository
511            .push(InternalAdoResourcesRepository {
512                repo_id: repo_id.clone(),
513                repo_type,
514                name,
515                git_ref: match git_ref {
516                    AdoResourcesRepositoryRef::Fixed(s) => AdoResourcesRepositoryRef::Fixed(s),
517                    AdoResourcesRepositoryRef::Parameter(p) => {
518                        AdoResourcesRepositoryRef::Parameter(p.idx)
519                    }
520                },
521                endpoint,
522            });
523        AdoResourcesRepositoryId { repo_id }
524    }
525
526    /// (GitHub Actions only) Set the pipeline-level name.
527    ///
528    /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#name>
529    pub fn gh_set_name(&mut self, name: impl AsRef<str>) -> &mut Self {
530        self.gh_name = Some(name.as_ref().into());
531        self
532    }
533
534    /// Provide a YAML template used to bootstrap flowey at the start of an GitHub
535    /// pipeline.
536    ///
537    /// The template has access to the following vars, which will be statically
538    /// interpolated into the template's text:
539    ///
540    /// - `{{FLOWEY_OUTDIR}}`
541    ///     - Directory to copy artifacts into.
542    ///     - NOTE: this var will include `\` on Windows, and `/` on linux!
543    /// - `{{FLOWEY_BIN_EXTENSION}}`
544    ///     - Extension of the expected flowey bin (either "", or ".exe")
545    /// - `{{FLOWEY_CRATE}}`
546    ///     - Name of the project-specific flowey crate to be built
547    /// - `{{FLOWEY_TARGET}}`
548    ///     - The target-triple flowey is being built for
549    /// - `{{FLOWEY_PIPELINE_PATH}}`
550    ///     - Repo-root relative path to the pipeline (as provided when
551    ///       generating the pipeline via the flowey CLI)
552    ///
553    /// The template's sole responsibility is to copy 3 files into the
554    /// `{{FLOWEY_OUTDIR}}`:
555    ///
556    /// 1. The bootstrapped flowey binary, with the file name
557    ///    `flowey{{FLOWEY_BIN_EXTENSION}}`
558    /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
559    ///    copied of the pipeline YAML and pipeline JSON currently being run.
560    ///    `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
561    ///    cases where the same template is being for multiple pipelines (e.g: a
562    ///    debug vs. release pipeline).
563    pub fn gh_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
564        self.gh_bootstrap_template = template;
565        self
566    }
567
568    /// (GitHub Actions only) Add a new scheduled CI trigger. Can be called multiple times
569    /// to set up multiple schedules runs.
570    pub fn gh_add_schedule_trigger(&mut self, triggers: GhScheduleTriggers) -> &mut Self {
571        self.gh_schedule_triggers.push(triggers);
572        self
573    }
574
575    /// (GitHub Actions only) Set a PR trigger. Calling this method multiple times will
576    /// overwrite any previously set triggers.
577    pub fn gh_set_pr_triggers(&mut self, triggers: GhPrTriggers) -> &mut Self {
578        self.gh_pr_triggers = Some(triggers);
579        self
580    }
581
582    /// (GitHub Actions only) Set a CI trigger. Calling this method multiple times will
583    /// overwrite any previously set triggers.
584    pub fn gh_set_ci_triggers(&mut self, triggers: GhCiTriggers) -> &mut Self {
585        self.gh_ci_triggers = Some(triggers);
586        self
587    }
588
589    /// (GitHub Actions only) Use a pre-defined GitHub Actions secret variable.
590    ///
591    /// For more information on defining secrets for use in GitHub Actions, see
592    /// <https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions>
593    pub fn gh_use_secret(&mut self, secret_name: impl AsRef<str>) -> GhUserSecretVar {
594        GhUserSecretVar(secret_name.as_ref().to_string())
595    }
596
597    pub fn new_job(
598        &mut self,
599        platform: FlowPlatform,
600        arch: FlowArch,
601        label: impl AsRef<str>,
602    ) -> PipelineJob<'_> {
603        let idx = self.jobs.len();
604        self.jobs.push(PipelineJobMetadata {
605            root_nodes: BTreeMap::new(),
606            patches: ResolvedPatches::build(),
607            label: label.as_ref().into(),
608            platform,
609            arch,
610            cond_param_idx: None,
611            timeout_minutes: None,
612            ado_pool: None,
613            ado_variables: BTreeMap::new(),
614            gh_override_if: None,
615            gh_global_env: BTreeMap::new(),
616            gh_pool: None,
617            gh_permissions: BTreeMap::new(),
618        });
619
620        PipelineJob {
621            pipeline: self,
622            job_idx: idx,
623        }
624    }
625
626    /// Declare a dependency between two jobs that does is not a result of an
627    /// artifact.
628    pub fn non_artifact_dep(
629        &mut self,
630        job: &PipelineJobHandle,
631        depends_on_job: &PipelineJobHandle,
632    ) -> &mut Self {
633        self.extra_deps
634            .insert((depends_on_job.job_idx, job.job_idx));
635        self
636    }
637
638    #[track_caller]
639    pub fn new_artifact(&mut self, name: impl AsRef<str>) -> (PublishArtifact, UseArtifact) {
640        let name = name.as_ref();
641        let owned_name = name.to_string();
642
643        let not_exists = self.artifact_names.insert(owned_name.clone());
644        if !not_exists {
645            panic!("duplicate artifact name: {}", name)
646        }
647
648        let idx = self.artifacts.len();
649        self.artifacts.push(ArtifactMeta {
650            name: owned_name,
651            published_by_job: None,
652            used_by_jobs: BTreeSet::new(),
653        });
654
655        (PublishArtifact { idx }, UseArtifact { idx })
656    }
657
658    /// Returns a pair of opaque handles to a new artifact for use across jobs
659    /// in the pipeline.
660    #[track_caller]
661    pub fn new_typed_artifact<T: Artifact>(
662        &mut self,
663        name: impl AsRef<str>,
664    ) -> (PublishTypedArtifact<T>, UseTypedArtifact<T>) {
665        let (publish, use_artifact) = self.new_artifact(name);
666        (
667            PublishTypedArtifact(publish, std::marker::PhantomData),
668            UseTypedArtifact(use_artifact, std::marker::PhantomData),
669        )
670    }
671
672    /// (ADO only) Set the pipeline-level name.
673    ///
674    /// <https://learn.microsoft.com/en-us/azure/devops/pipelines/process/run-number?view=azure-devops&tabs=yaml>
675    pub fn ado_add_name(&mut self, name: String) -> &mut Self {
676        self.ado_name = Some(name);
677        self
678    }
679
680    /// (ADO only) Declare a pipeline-level, named, read-only ADO variable.
681    ///
682    /// `name` and `value` are both arbitrary strings.
683    ///
684    /// Returns an instance of [`AdoRuntimeVar`], which, if need be, can be
685    /// converted into a [`ReadVar<String>`] using
686    /// [`NodeCtx::get_ado_variable`].
687    ///
688    /// NOTE: Unless required by some particular third-party task, it's strongly
689    /// recommended to _avoid_ using this method, and to simply use
690    /// [`ReadVar::from_static`] to get a obtain a static variable.
691    ///
692    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
693    pub fn ado_new_named_variable(
694        &mut self,
695        name: impl AsRef<str>,
696        value: impl AsRef<str>,
697    ) -> AdoRuntimeVar {
698        let name = name.as_ref();
699        let value = value.as_ref();
700
701        self.ado_variables.insert(name.into(), value.into());
702
703        // safe, since we'll ensure that the global exists in the ADO backend
704        AdoRuntimeVar::dangerous_from_global(name, false)
705    }
706
707    /// (ADO only) Declare multiple pipeline-level, named, read-only ADO
708    /// variables at once.
709    ///
710    /// This is a convenience method to streamline invoking
711    /// [`Self::ado_new_named_variable`] multiple times.
712    ///
713    /// NOTE: Unless required by some particular third-party task, it's strongly
714    /// recommended to _avoid_ using this method, and to simply use
715    /// [`ReadVar::from_static`] to get a obtain a static variable.
716    ///
717    /// DEVNOTE: In the future, this API may be updated to return a handle that
718    /// will allow resolving the resulting `AdoRuntimeVar`, but for
719    /// implementation expediency, this API does not currently do this. If you
720    /// need to read the value of this variable at runtime, you may need to
721    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
722    ///
723    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
724    pub fn ado_new_named_variables<K, V>(
725        &mut self,
726        vars: impl IntoIterator<Item = (K, V)>,
727    ) -> &mut Self
728    where
729        K: AsRef<str>,
730        V: AsRef<str>,
731    {
732        self.ado_variables.extend(
733            vars.into_iter()
734                .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
735        );
736        self
737    }
738
739    /// Declare a pipeline-level runtime parameter with type `bool`.
740    ///
741    /// To obtain a [`ReadVar<bool>`] that can be used within a node, use the
742    /// [`PipelineJobCtx::use_parameter`] method.
743    ///
744    /// `name` is the name of the parameter.
745    ///
746    /// `description` is an arbitrary string, which will be be shown to users.
747    ///
748    /// `kind` is the type of parameter and if it should be treated as a stable
749    /// external API to callers of the pipeline.
750    ///
751    /// `default` is the default value for the parameter. If none is provided,
752    /// the parameter _must_ be specified in order for the pipeline to run.
753    ///
754    /// `possible_values` can be used to limit the set of valid values the
755    /// parameter accepts.
756    pub fn new_parameter_bool(
757        &mut self,
758        name: impl AsRef<str>,
759        description: impl AsRef<str>,
760        kind: ParameterKind,
761        default: Option<bool>,
762    ) -> UseParameter<bool> {
763        let idx = self.parameters.len();
764        let name = new_parameter_name(name, kind.clone());
765        self.parameters.push(ParameterMeta {
766            parameter: Parameter::Bool {
767                name,
768                description: description.as_ref().into(),
769                kind,
770                default,
771            },
772            used_by_jobs: BTreeSet::new(),
773        });
774
775        UseParameter {
776            idx,
777            _kind: std::marker::PhantomData,
778        }
779    }
780
781    /// Declare a pipeline-level runtime parameter with type `i64`.
782    ///
783    /// To obtain a [`ReadVar<i64>`] that can be used within a node, use the
784    /// [`PipelineJobCtx::use_parameter`] method.
785    ///
786    /// `name` is the name of the parameter.
787    ///
788    /// `description` is an arbitrary string, which will be be shown to users.
789    ///
790    /// `kind` is the type of parameter and if it should be treated as a stable
791    /// external API to callers of the pipeline.
792    ///
793    /// `default` is the default value for the parameter. If none is provided,
794    /// the parameter _must_ be specified in order for the pipeline to run.
795    ///
796    /// `possible_values` can be used to limit the set of valid values the
797    /// parameter accepts.
798    pub fn new_parameter_num(
799        &mut self,
800        name: impl AsRef<str>,
801        description: impl AsRef<str>,
802        kind: ParameterKind,
803        default: Option<i64>,
804        possible_values: Option<Vec<i64>>,
805    ) -> UseParameter<i64> {
806        let idx = self.parameters.len();
807        let name = new_parameter_name(name, kind.clone());
808        self.parameters.push(ParameterMeta {
809            parameter: Parameter::Num {
810                name,
811                description: description.as_ref().into(),
812                kind,
813                default,
814                possible_values,
815            },
816            used_by_jobs: BTreeSet::new(),
817        });
818
819        UseParameter {
820            idx,
821            _kind: std::marker::PhantomData,
822        }
823    }
824
825    /// Declare a pipeline-level runtime parameter with type `String`.
826    ///
827    /// To obtain a [`ReadVar<String>`] that can be used within a node, use the
828    /// [`PipelineJobCtx::use_parameter`] method.
829    ///
830    /// `name` is the name of the parameter.
831    ///
832    /// `description` is an arbitrary string, which will be be shown to users.
833    ///
834    /// `kind` is the type of parameter and if it should be treated as a stable
835    /// external API to callers of the pipeline.
836    ///
837    /// `default` is the default value for the parameter. If none is provided,
838    /// the parameter _must_ be specified in order for the pipeline to run.
839    ///
840    /// `possible_values` allows restricting inputs to a set of possible values.
841    /// Depending on the backend, these options may be presented as a set of
842    /// radio buttons, a dropdown menu, or something in that vein. If `None`,
843    /// then any string is allowed.
844    pub fn new_parameter_string(
845        &mut self,
846        name: impl AsRef<str>,
847        description: impl AsRef<str>,
848        kind: ParameterKind,
849        default: Option<impl AsRef<str>>,
850        possible_values: Option<Vec<String>>,
851    ) -> UseParameter<String> {
852        let idx = self.parameters.len();
853        let name = new_parameter_name(name, kind.clone());
854        self.parameters.push(ParameterMeta {
855            parameter: Parameter::String {
856                name,
857                description: description.as_ref().into(),
858                kind,
859                default: default.map(|x| x.as_ref().into()),
860                possible_values,
861            },
862            used_by_jobs: BTreeSet::new(),
863        });
864
865        UseParameter {
866            idx,
867            _kind: std::marker::PhantomData,
868        }
869    }
870}
871
872pub struct PipelineJobCtx<'a> {
873    pipeline: &'a mut Pipeline,
874    job_idx: usize,
875}
876
877impl PipelineJobCtx<'_> {
878    /// Create a new `WriteVar<SideEffect>` anchored to the pipeline job.
879    pub fn new_done_handle(&mut self) -> WriteVar<crate::node::SideEffect> {
880        self.pipeline.dummy_done_idx += 1;
881        crate::node::thin_air_write_runtime_var(format!("start{}", self.pipeline.dummy_done_idx))
882    }
883
884    /// Claim that this job will use this artifact, obtaining a path to a folder
885    /// with the artifact's contents.
886    pub fn use_artifact(&mut self, artifact: &UseArtifact) -> ReadVar<PathBuf> {
887        self.pipeline.artifacts[artifact.idx]
888            .used_by_jobs
889            .insert(self.job_idx);
890
891        crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
892            &self.pipeline.artifacts[artifact.idx].name,
893            true,
894        ))
895    }
896
897    /// Claim that this job will publish this artifact, obtaining a path to a
898    /// fresh, empty folder which will be published as the specific artifact at
899    /// the end of the job.
900    pub fn publish_artifact(&mut self, artifact: PublishArtifact) -> ReadVar<PathBuf> {
901        let existing = self.pipeline.artifacts[artifact.idx]
902            .published_by_job
903            .replace(self.job_idx);
904        assert!(existing.is_none()); // PublishArtifact isn't cloneable
905
906        crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
907            &self.pipeline.artifacts[artifact.idx].name,
908            false,
909        ))
910    }
911
912    fn helper_request<R: IntoRequest>(&mut self, req: R)
913    where
914        R::Node: 'static,
915    {
916        self.pipeline.jobs[self.job_idx]
917            .root_nodes
918            .entry(NodeHandle::from_type::<R::Node>())
919            .or_default()
920            .push(serde_json::to_vec(&req.into_request()).unwrap().into());
921    }
922
923    fn new_artifact_map_vars<T: Artifact>(&mut self) -> (ReadVar<T>, WriteVar<T>) {
924        let artifact_map_idx = self.pipeline.artifact_map_idx;
925        self.pipeline.artifact_map_idx += 1;
926
927        let backing_var = format!("artifact_map{}", artifact_map_idx);
928        let read_var = crate::node::thin_air_read_runtime_var(backing_var.clone());
929        let write_var = crate::node::thin_air_write_runtime_var(backing_var);
930        (read_var, write_var)
931    }
932
933    /// Claim that this job will use this artifact, obtaining the resolved
934    /// contents of the artifact.
935    pub fn use_typed_artifact<T: Artifact>(
936        &mut self,
937        artifact: &UseTypedArtifact<T>,
938    ) -> ReadVar<T> {
939        let artifact_path = self.use_artifact(&artifact.0);
940        let (read, write) = self.new_artifact_map_vars::<T>();
941        self.helper_request(artifact::resolve::Request::new(artifact_path, write));
942        read
943    }
944
945    /// Claim that this job will publish this artifact, obtaining a variable to
946    /// write the artifact's contents to. The artifact will be published at
947    /// the end of the job.
948    pub fn publish_typed_artifact<T: Artifact>(
949        &mut self,
950        artifact: PublishTypedArtifact<T>,
951    ) -> WriteVar<T> {
952        let artifact_path = self.publish_artifact(artifact.0);
953        let (read, write) = self.new_artifact_map_vars::<T>();
954        let done = self.new_done_handle();
955        self.helper_request(artifact::publish::Request::new(read, artifact_path, done));
956        write
957    }
958
959    /// Obtain a `ReadVar<T>` corresponding to a pipeline parameter which is
960    /// specified at runtime.
961    pub fn use_parameter<T>(&mut self, param: UseParameter<T>) -> ReadVar<T>
962    where
963        T: Serialize + DeserializeOwned,
964    {
965        self.pipeline.parameters[param.idx]
966            .used_by_jobs
967            .insert(self.job_idx);
968
969        crate::node::thin_air_read_runtime_var(
970            self.pipeline.parameters[param.idx]
971                .parameter
972                .name()
973                .to_string(),
974        )
975    }
976
977    /// Shortcut which allows defining a bool pipeline parameter within a Job.
978    ///
979    /// To share a single parameter between multiple jobs, don't use this method
980    /// - use [`Pipeline::new_parameter_bool`] + [`Self::use_parameter`] instead.
981    pub fn new_parameter_bool(
982        &mut self,
983        name: impl AsRef<str>,
984        description: impl AsRef<str>,
985        kind: ParameterKind,
986        default: Option<bool>,
987    ) -> ReadVar<bool> {
988        let param = self
989            .pipeline
990            .new_parameter_bool(name, description, kind, default);
991        self.use_parameter(param)
992    }
993
994    /// Shortcut which allows defining a number pipeline parameter within a Job.
995    ///
996    /// To share a single parameter between multiple jobs, don't use this method
997    /// - use [`Pipeline::new_parameter_num`] + [`Self::use_parameter`] instead.
998    pub fn new_parameter_num(
999        &mut self,
1000        name: impl AsRef<str>,
1001        description: impl AsRef<str>,
1002        kind: ParameterKind,
1003        default: Option<i64>,
1004        possible_values: Option<Vec<i64>>,
1005    ) -> ReadVar<i64> {
1006        let param =
1007            self.pipeline
1008                .new_parameter_num(name, description, kind, default, possible_values);
1009        self.use_parameter(param)
1010    }
1011
1012    /// Shortcut which allows defining a string pipeline parameter within a Job.
1013    ///
1014    /// To share a single parameter between multiple jobs, don't use this method
1015    /// - use [`Pipeline::new_parameter_string`] + [`Self::use_parameter`] instead.
1016    pub fn new_parameter_string(
1017        &mut self,
1018        name: impl AsRef<str>,
1019        description: impl AsRef<str>,
1020        kind: ParameterKind,
1021        default: Option<String>,
1022        possible_values: Option<Vec<String>>,
1023    ) -> ReadVar<String> {
1024        let param =
1025            self.pipeline
1026                .new_parameter_string(name, description, kind, default, possible_values);
1027        self.use_parameter(param)
1028    }
1029}
1030
1031#[must_use]
1032pub struct PipelineJob<'a> {
1033    pipeline: &'a mut Pipeline,
1034    job_idx: usize,
1035}
1036
1037impl PipelineJob<'_> {
1038    /// (ADO only) specify which agent pool this job will be run on.
1039    pub fn ado_set_pool(self, pool: impl AsRef<str>) -> Self {
1040        self.ado_set_pool_with_demands(pool, Vec::new())
1041    }
1042
1043    /// (ADO only) specify which agent pool this job will be run on, with
1044    /// additional special runner demands.
1045    pub fn ado_set_pool_with_demands(self, pool: impl AsRef<str>, demands: Vec<String>) -> Self {
1046        self.pipeline.jobs[self.job_idx].ado_pool = Some(AdoPool {
1047            name: pool.as_ref().into(),
1048            demands,
1049        });
1050        self
1051    }
1052
1053    /// (ADO only) Declare a job-level, named, read-only ADO variable.
1054    ///
1055    /// `name` and `value` are both arbitrary strings, which may include ADO
1056    /// template expressions.
1057    ///
1058    /// NOTE: Unless required by some particular third-party task, it's strongly
1059    /// recommended to _avoid_ using this method, and to simply use
1060    /// [`ReadVar::from_static`] to get a obtain a static variable.
1061    ///
1062    /// DEVNOTE: In the future, this API may be updated to return a handle that
1063    /// will allow resolving the resulting `AdoRuntimeVar`, but for
1064    /// implementation expediency, this API does not currently do this. If you
1065    /// need to read the value of this variable at runtime, you may need to
1066    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1067    ///
1068    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1069    pub fn ado_new_named_variable(self, name: impl AsRef<str>, value: impl AsRef<str>) -> Self {
1070        let name = name.as_ref();
1071        let value = value.as_ref();
1072        self.pipeline.jobs[self.job_idx]
1073            .ado_variables
1074            .insert(name.into(), value.into());
1075        self
1076    }
1077
1078    /// (ADO only) Declare multiple job-level, named, read-only ADO variables at
1079    /// once.
1080    ///
1081    /// This is a convenience method to streamline invoking
1082    /// [`Self::ado_new_named_variable`] multiple times.
1083    ///
1084    /// NOTE: Unless required by some particular third-party task, it's strongly
1085    /// recommended to _avoid_ using this method, and to simply use
1086    /// [`ReadVar::from_static`] to get a obtain a static variable.
1087    ///
1088    /// DEVNOTE: In the future, this API may be updated to return a handle that
1089    /// will allow resolving the resulting `AdoRuntimeVar`, but for
1090    /// implementation expediency, this API does not currently do this. If you
1091    /// need to read the value of this variable at runtime, you may need to
1092    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1093    ///
1094    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1095    pub fn ado_new_named_variables<K, V>(self, vars: impl IntoIterator<Item = (K, V)>) -> Self
1096    where
1097        K: AsRef<str>,
1098        V: AsRef<str>,
1099    {
1100        self.pipeline.jobs[self.job_idx].ado_variables.extend(
1101            vars.into_iter()
1102                .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
1103        );
1104        self
1105    }
1106
1107    /// Overrides the id of the job.
1108    ///
1109    /// Flowey typically generates a reasonable job ID but some use cases that depend
1110    /// on the ID may find it useful to override it to something custom.
1111    pub fn ado_override_job_id(self, name: impl AsRef<str>) -> Self {
1112        self.pipeline
1113            .ado_job_id_overrides
1114            .insert(self.job_idx, name.as_ref().into());
1115        self
1116    }
1117
1118    /// (GitHub Actions only) specify which Github runner this job will be run on.
1119    pub fn gh_set_pool(self, pool: GhRunner) -> Self {
1120        self.pipeline.jobs[self.job_idx].gh_pool = Some(pool);
1121        self
1122    }
1123
1124    /// (GitHub Actions only) Manually override the `if:` condition for this
1125    /// particular job.
1126    ///
1127    /// **This is dangerous**, as an improperly set `if` condition may break
1128    /// downstream flowey jobs which assume flowey is in control of the job's
1129    /// scheduling logic.
1130    ///
1131    /// See
1132    /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idif>
1133    /// for more info.
1134    pub fn gh_dangerous_override_if(self, condition: impl AsRef<str>) -> Self {
1135        self.pipeline.jobs[self.job_idx].gh_override_if = Some(condition.as_ref().into());
1136        self
1137    }
1138
1139    /// (GitHub Actions only) Declare a global job-level environment variable,
1140    /// visible to all downstream steps.
1141    ///
1142    /// `name` and `value` are both arbitrary strings, which may include GitHub
1143    /// Actions template expressions.
1144    ///
1145    /// **This is dangerous**, as it is easy to misuse this API in order to
1146    /// write a node which takes an implicit dependency on there being a global
1147    /// variable set on its behalf by the top-level pipeline code, making it
1148    /// difficult to "locally reason" about the behavior of a node simply by
1149    /// reading its code.
1150    ///
1151    /// Whenever possible, nodes should "late bind" environment variables:
1152    /// accepting a compile-time / runtime flowey parameter, and then setting it
1153    /// prior to executing a child command that requires it.
1154    ///
1155    /// Only use this API in exceptional cases, such as obtaining an environment
1156    /// variable whose value is determined by a job-level GitHub Actions
1157    /// expression evaluation.
1158    pub fn gh_dangerous_global_env_var(
1159        self,
1160        name: impl AsRef<str>,
1161        value: impl AsRef<str>,
1162    ) -> Self {
1163        let name = name.as_ref();
1164        let value = value.as_ref();
1165        self.pipeline.jobs[self.job_idx]
1166            .gh_global_env
1167            .insert(name.into(), value.into());
1168        self
1169    }
1170
1171    /// (GitHub Actions only) Grant permissions required by nodes in the job.
1172    ///
1173    /// For a given node handle, grant the specified permissions.
1174    /// The list provided must match the permissions specified within the node
1175    /// using `requires_permission`.
1176    ///
1177    /// NOTE: While this method is called at a node-level for auditability, the emitted
1178    /// yaml grants permissions at the job-level.
1179    ///
1180    /// This can lead to weird situations where node 1 might not specify a permission
1181    /// required according to Github Actions, but due to job-level granting of the permission
1182    /// by another node 2, the pipeline executes even though it wouldn't if node 2 was removed.
1183    ///
1184    /// For available permission scopes and their descriptions, see
1185    /// <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions>.
1186    pub fn gh_grant_permissions<N: FlowNodeBase + 'static>(
1187        self,
1188        permissions: impl IntoIterator<Item = (GhPermission, GhPermissionValue)>,
1189    ) -> Self {
1190        let node_handle = NodeHandle::from_type::<N>();
1191        for (permission, value) in permissions {
1192            self.pipeline.jobs[self.job_idx]
1193                .gh_permissions
1194                .entry(node_handle)
1195                .or_default()
1196                .insert(permission, value);
1197        }
1198        self
1199    }
1200
1201    pub fn apply_patchfn(self, patchfn: crate::patch::PatchFn) -> Self {
1202        self.pipeline.jobs[self.job_idx]
1203            .patches
1204            .apply_patchfn(patchfn);
1205        self
1206    }
1207
1208    /// Set a timeout for the job, in minutes.
1209    ///
1210    /// Not calling this will result in the platform's default timeout being used,
1211    /// which is typically 60 minutes, but may vary.
1212    pub fn with_timeout_in_minutes(self, timeout: u32) -> Self {
1213        self.pipeline.jobs[self.job_idx].timeout_minutes = Some(timeout);
1214        self
1215    }
1216
1217    /// (ADO+Local Only) Only run the job if the specified condition is true.
1218    pub fn with_condition(self, cond: UseParameter<bool>) -> Self {
1219        self.pipeline.jobs[self.job_idx].cond_param_idx = Some(cond.idx);
1220        self.pipeline.parameters[cond.idx]
1221            .used_by_jobs
1222            .insert(self.job_idx);
1223        self
1224    }
1225
1226    /// Add a flow node which will be run as part of the job.
1227    pub fn dep_on<R: IntoRequest + 'static>(
1228        self,
1229        f: impl FnOnce(&mut PipelineJobCtx<'_>) -> R,
1230    ) -> Self {
1231        // JobToNodeCtx will ensure artifact deps are taken care of
1232        let req = f(&mut PipelineJobCtx {
1233            pipeline: self.pipeline,
1234            job_idx: self.job_idx,
1235        });
1236
1237        self.pipeline.jobs[self.job_idx]
1238            .root_nodes
1239            .entry(NodeHandle::from_type::<R::Node>())
1240            .or_default()
1241            .push(serde_json::to_vec(&req.into_request()).unwrap().into());
1242
1243        self
1244    }
1245
1246    /// Finish describing the pipeline job.
1247    pub fn finish(self) -> PipelineJobHandle {
1248        PipelineJobHandle {
1249            job_idx: self.job_idx,
1250        }
1251    }
1252
1253    /// Return the job's platform.
1254    pub fn get_platform(&self) -> FlowPlatform {
1255        self.pipeline.jobs[self.job_idx].platform
1256    }
1257
1258    /// Return the job's architecture.
1259    pub fn get_arch(&self) -> FlowArch {
1260        self.pipeline.jobs[self.job_idx].arch
1261    }
1262}
1263
1264#[derive(Clone)]
1265pub struct PipelineJobHandle {
1266    job_idx: usize,
1267}
1268
1269impl PipelineJobHandle {
1270    pub fn is_handle_for(&self, job: &PipelineJob<'_>) -> bool {
1271        self.job_idx == job.job_idx
1272    }
1273}
1274
1275#[derive(Clone, Copy)]
1276pub enum PipelineBackendHint {
1277    /// Pipeline is being run on the user's dev machine (via bash / direct run)
1278    Local,
1279    /// Pipeline is run on ADO
1280    Ado,
1281    /// Pipeline is run on GitHub Actions
1282    Github,
1283}
1284
1285/// Trait for types that can be converted into a [`Pipeline`].
1286///
1287/// This is the primary entry point for defining flowey pipelines. Implement this trait
1288/// to create a pipeline definition that can be executed locally or converted to CI YAML.
1289///
1290/// # Example
1291///
1292/// ```rust,no_run
1293/// use flowey_core::pipeline::{IntoPipeline, Pipeline, PipelineBackendHint};
1294/// use flowey_core::node::{FlowPlatform, FlowPlatformLinuxDistro, FlowArch};
1295///
1296/// struct MyPipeline;
1297///
1298/// impl IntoPipeline for MyPipeline {
1299///     fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline> {
1300///         let mut pipeline = Pipeline::new();
1301///
1302///         // Define a job that runs on Linux x86_64
1303///         let _job = pipeline
1304///             .new_job(
1305///                 FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1306///                 FlowArch::X86_64,
1307///                 "build"
1308///             )
1309///             .finish();
1310///
1311///         Ok(pipeline)
1312///     }
1313/// }
1314/// ```
1315///
1316/// # Complex Example with Parameters and Artifacts
1317///
1318/// ```rust,ignore
1319/// use flowey_core::pipeline::{IntoPipeline, Pipeline, PipelineBackendHint, ParameterKind};
1320/// use flowey_core::node::{FlowPlatform, FlowPlatformLinuxDistro, FlowArch};
1321///
1322/// struct BuildPipeline;
1323///
1324/// impl IntoPipeline for BuildPipeline {
1325///     fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline> {
1326///         let mut pipeline = Pipeline::new();
1327///
1328///         // Define a runtime parameter
1329///         let enable_tests = pipeline.new_parameter_bool(
1330///             "enable_tests",
1331///             "Whether to run tests",
1332///             ParameterKind::Stable,
1333///             Some(true) // default value
1334///         );
1335///
1336///         // Create an artifact for passing data between jobs
1337///         let (publish_build, use_build) = pipeline.new_artifact("build-output");
1338///
1339///         // Job 1: Build
1340///         let build_job = pipeline
1341///             .new_job(
1342///                 FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1343///                 FlowArch::X86_64,
1344///                 "build"
1345///             )
1346///             .with_timeout_in_minutes(30)
1347///             .dep_on(|ctx| flowey_lib_hvlite::_jobs::example_node::Request {
1348///                 output_dir: ctx.publish_artifact(publish_build),
1349///             })
1350///             .finish();
1351///
1352///         // Job 2: Test (conditionally run based on parameter)
1353///         let _test_job = pipeline
1354///             .new_job(
1355///                 FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1356///                 FlowArch::X86_64,
1357///                 "test"
1358///             )
1359///             .with_condition(enable_tests)
1360///             .dep_on(|ctx| flowey_lib_hvlite::_jobs::example_node2::Request {
1361///                 input_dir: ctx.use_artifact(&use_build),
1362///             })
1363///             .finish();
1364///
1365///         Ok(pipeline)
1366///     }
1367/// }
1368/// ```
1369pub trait IntoPipeline {
1370    fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline>;
1371}
1372
1373fn new_parameter_name(name: impl AsRef<str>, kind: ParameterKind) -> String {
1374    match kind {
1375        ParameterKind::Unstable => format!("__unstable_{}", name.as_ref()),
1376        ParameterKind::Stable => name.as_ref().into(),
1377    }
1378}
1379
1380/// Structs which should only be used by top-level flowey emitters. If you're a
1381/// pipeline author, these are not types you need to care about!
1382pub mod internal {
1383    use super::*;
1384    use std::collections::BTreeMap;
1385
1386    pub fn consistent_artifact_runtime_var_name(artifact: impl AsRef<str>, is_use: bool) -> String {
1387        format!(
1388            "artifact_{}_{}",
1389            if is_use { "use_from" } else { "publish_from" },
1390            artifact.as_ref()
1391        )
1392    }
1393
1394    #[derive(Debug)]
1395    pub struct InternalAdoResourcesRepository {
1396        /// flowey-generated unique repo identifier
1397        pub repo_id: String,
1398        /// Type of repo that is being connected to.
1399        pub repo_type: AdoResourcesRepositoryType,
1400        /// Repository name. Format depends on `repo_type`.
1401        pub name: String,
1402        /// git ref to checkout.
1403        pub git_ref: AdoResourcesRepositoryRef<usize>,
1404        /// (optional) ID of the service endpoint connecting to this repository.
1405        pub endpoint: Option<String>,
1406    }
1407
1408    pub struct PipelineJobMetadata {
1409        pub root_nodes: BTreeMap<NodeHandle, Vec<Box<[u8]>>>,
1410        pub patches: PatchResolver,
1411        pub label: String,
1412        pub platform: FlowPlatform,
1413        pub arch: FlowArch,
1414        pub cond_param_idx: Option<usize>,
1415        pub timeout_minutes: Option<u32>,
1416        // backend specific
1417        pub ado_pool: Option<AdoPool>,
1418        pub ado_variables: BTreeMap<String, String>,
1419        pub gh_override_if: Option<String>,
1420        pub gh_pool: Option<GhRunner>,
1421        pub gh_global_env: BTreeMap<String, String>,
1422        pub gh_permissions: BTreeMap<NodeHandle, BTreeMap<GhPermission, GhPermissionValue>>,
1423    }
1424
1425    // TODO: support a more structured format for demands
1426    // See https://learn.microsoft.com/en-us/azure/devops/pipelines/yaml-schema/pool-demands
1427    #[derive(Debug, Clone)]
1428    pub struct AdoPool {
1429        pub name: String,
1430        pub demands: Vec<String>,
1431    }
1432
1433    #[derive(Debug)]
1434    pub struct ArtifactMeta {
1435        pub name: String,
1436        pub published_by_job: Option<usize>,
1437        pub used_by_jobs: BTreeSet<usize>,
1438    }
1439
1440    #[derive(Debug)]
1441    pub struct ParameterMeta {
1442        pub parameter: Parameter,
1443        pub used_by_jobs: BTreeSet<usize>,
1444    }
1445
1446    /// Mirror of [`Pipeline`], except with all field marked as `pub`.
1447    pub struct PipelineFinalized {
1448        pub jobs: Vec<PipelineJobMetadata>,
1449        pub artifacts: Vec<ArtifactMeta>,
1450        pub parameters: Vec<ParameterMeta>,
1451        pub extra_deps: BTreeSet<(usize, usize)>,
1452        // backend specific
1453        pub ado_name: Option<String>,
1454        pub ado_schedule_triggers: Vec<AdoScheduleTriggers>,
1455        pub ado_ci_triggers: Option<AdoCiTriggers>,
1456        pub ado_pr_triggers: Option<AdoPrTriggers>,
1457        pub ado_bootstrap_template: String,
1458        pub ado_resources_repository: Vec<InternalAdoResourcesRepository>,
1459        pub ado_post_process_yaml_cb:
1460            Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
1461        pub ado_variables: BTreeMap<String, String>,
1462        pub ado_job_id_overrides: BTreeMap<usize, String>,
1463        pub gh_name: Option<String>,
1464        pub gh_schedule_triggers: Vec<GhScheduleTriggers>,
1465        pub gh_ci_triggers: Option<GhCiTriggers>,
1466        pub gh_pr_triggers: Option<GhPrTriggers>,
1467        pub gh_bootstrap_template: String,
1468    }
1469
1470    impl PipelineFinalized {
1471        pub fn from_pipeline(mut pipeline: Pipeline) -> Self {
1472            if let Some(cb) = pipeline.inject_all_jobs_with.take() {
1473                for job_idx in 0..pipeline.jobs.len() {
1474                    let _ = cb(PipelineJob {
1475                        pipeline: &mut pipeline,
1476                        job_idx,
1477                    });
1478                }
1479            }
1480
1481            let Pipeline {
1482                mut jobs,
1483                artifacts,
1484                parameters,
1485                extra_deps,
1486                ado_name,
1487                ado_bootstrap_template,
1488                ado_schedule_triggers,
1489                ado_ci_triggers,
1490                ado_pr_triggers,
1491                ado_resources_repository,
1492                ado_post_process_yaml_cb,
1493                ado_variables,
1494                ado_job_id_overrides,
1495                gh_name,
1496                gh_schedule_triggers,
1497                gh_ci_triggers,
1498                gh_pr_triggers,
1499                gh_bootstrap_template,
1500                // not relevant to consumer code
1501                dummy_done_idx: _,
1502                artifact_map_idx: _,
1503                artifact_names: _,
1504                global_patchfns,
1505                inject_all_jobs_with: _, // processed above
1506            } = pipeline;
1507
1508            for patchfn in global_patchfns {
1509                for job in &mut jobs {
1510                    job.patches.apply_patchfn(patchfn)
1511                }
1512            }
1513
1514            Self {
1515                jobs,
1516                artifacts,
1517                parameters,
1518                extra_deps,
1519                ado_name,
1520                ado_schedule_triggers,
1521                ado_ci_triggers,
1522                ado_pr_triggers,
1523                ado_bootstrap_template,
1524                ado_resources_repository,
1525                ado_post_process_yaml_cb,
1526                ado_variables,
1527                ado_job_id_overrides,
1528                gh_name,
1529                gh_schedule_triggers,
1530                gh_ci_triggers,
1531                gh_pr_triggers,
1532                gh_bootstrap_template,
1533            }
1534        }
1535    }
1536
1537    #[derive(Debug, Clone)]
1538    pub enum Parameter {
1539        Bool {
1540            name: String,
1541            description: String,
1542            kind: ParameterKind,
1543            default: Option<bool>,
1544        },
1545        String {
1546            name: String,
1547            description: String,
1548            default: Option<String>,
1549            kind: ParameterKind,
1550            possible_values: Option<Vec<String>>,
1551        },
1552        Num {
1553            name: String,
1554            description: String,
1555            default: Option<i64>,
1556            kind: ParameterKind,
1557            possible_values: Option<Vec<i64>>,
1558        },
1559    }
1560
1561    impl Parameter {
1562        pub fn name(&self) -> &str {
1563            match self {
1564                Parameter::Bool { name, .. } => name,
1565                Parameter::String { name, .. } => name,
1566                Parameter::Num { name, .. } => name,
1567            }
1568        }
1569    }
1570}