flowey_core/
pipeline.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Core types and traits used to create and work with flowey pipelines.
5
6mod artifact;
7
8pub use artifact::Artifact;
9
10use self::internal::*;
11use crate::node::FlowArch;
12use crate::node::FlowNodeBase;
13use crate::node::FlowPlatform;
14use crate::node::FlowPlatformLinuxDistro;
15use crate::node::GhUserSecretVar;
16use crate::node::IntoRequest;
17use crate::node::NodeHandle;
18use crate::node::ReadVar;
19use crate::node::WriteVar;
20use crate::node::steps::ado::AdoResourcesRepositoryId;
21use crate::node::user_facing::AdoRuntimeVar;
22use crate::node::user_facing::GhPermission;
23use crate::node::user_facing::GhPermissionValue;
24use crate::patch::PatchResolver;
25use crate::patch::ResolvedPatches;
26use serde::Serialize;
27use serde::de::DeserializeOwned;
28use std::collections::BTreeMap;
29use std::collections::BTreeSet;
30use std::path::PathBuf;
31
32/// Pipeline types which are considered "user facing", and included in the
33/// `flowey` prelude.
34pub mod user_facing {
35    pub use super::AdoCiTriggers;
36    pub use super::AdoPrTriggers;
37    pub use super::AdoResourcesRepository;
38    pub use super::AdoResourcesRepositoryRef;
39    pub use super::AdoResourcesRepositoryType;
40    pub use super::AdoScheduleTriggers;
41    pub use super::GhCiTriggers;
42    pub use super::GhPrTriggers;
43    pub use super::GhRunner;
44    pub use super::GhRunnerOsLabel;
45    pub use super::GhScheduleTriggers;
46    pub use super::HostExt;
47    pub use super::IntoPipeline;
48    pub use super::ParameterKind;
49    pub use super::Pipeline;
50    pub use super::PipelineBackendHint;
51    pub use super::PipelineJob;
52    pub use super::PipelineJobCtx;
53    pub use super::PipelineJobHandle;
54    pub use super::PublishArtifact;
55    pub use super::PublishTypedArtifact;
56    pub use super::UseArtifact;
57    pub use super::UseParameter;
58    pub use super::UseTypedArtifact;
59    pub use crate::node::FlowArch;
60    pub use crate::node::FlowPlatform;
61}
62
63fn linux_distro() -> FlowPlatformLinuxDistro {
64    // Check for nix environment first - takes precedence over distro detection
65    if std::env::var("IN_NIX_SHELL").is_ok() {
66        return FlowPlatformLinuxDistro::Nix;
67    }
68
69    // A `nix develop` shell doesn't set `IN_NIX_SHELL`, but the PATH should include a nix store path
70    if std::env::var("PATH").is_ok_and(|path| path.contains("/nix/store")) {
71        return FlowPlatformLinuxDistro::Nix;
72    }
73
74    if let Ok(etc_os_release) = fs_err::read_to_string("/etc/os-release") {
75        if etc_os_release.contains("ID=ubuntu") {
76            FlowPlatformLinuxDistro::Ubuntu
77        } else if etc_os_release.contains("ID=fedora") {
78            FlowPlatformLinuxDistro::Fedora
79        } else if etc_os_release.contains("ID=azurelinux") || etc_os_release.contains("ID=mariner")
80        {
81            FlowPlatformLinuxDistro::AzureLinux
82        } else if etc_os_release.contains("ID=arch") {
83            FlowPlatformLinuxDistro::Arch
84        } else {
85            FlowPlatformLinuxDistro::Unknown
86        }
87    } else {
88        FlowPlatformLinuxDistro::Unknown
89    }
90}
91
92pub trait HostExt: Sized {
93    /// Return the value for the current host machine.
94    ///
95    /// Will panic on non-local backends.
96    fn host(backend_hint: PipelineBackendHint) -> Self;
97}
98
99impl HostExt for FlowPlatform {
100    /// Return the platform of the current host machine.
101    ///
102    /// Will panic on non-local backends.
103    fn host(backend_hint: PipelineBackendHint) -> Self {
104        if !matches!(backend_hint, PipelineBackendHint::Local) {
105            panic!("can only use `FlowPlatform::host` when defining a local-only pipeline");
106        }
107
108        if cfg!(target_os = "windows") {
109            Self::Windows
110        } else if cfg!(target_os = "linux") {
111            Self::Linux(linux_distro())
112        } else if cfg!(target_os = "macos") {
113            Self::MacOs
114        } else {
115            panic!("no valid host-os")
116        }
117    }
118}
119
120impl HostExt for FlowArch {
121    /// Return the arch of the current host machine.
122    ///
123    /// Will panic on non-local backends.
124    fn host(backend_hint: PipelineBackendHint) -> Self {
125        if !matches!(backend_hint, PipelineBackendHint::Local) {
126            panic!("can only use `FlowArch::host` when defining a local-only pipeline");
127        }
128
129        // xtask-fmt allow-target-arch oneoff-flowey
130        if cfg!(target_arch = "x86_64") {
131            Self::X86_64
132        // xtask-fmt allow-target-arch oneoff-flowey
133        } else if cfg!(target_arch = "aarch64") {
134            Self::Aarch64
135        } else {
136            panic!("no valid host-arch")
137        }
138    }
139}
140
141/// Trigger ADO pipelines via Continuous Integration
142#[derive(Default, Debug)]
143pub struct AdoScheduleTriggers {
144    /// Friendly name for the scheduled run
145    pub display_name: String,
146    /// Run the pipeline whenever there is a commit on these specified branches
147    /// (supports glob syntax)
148    pub branches: Vec<String>,
149    /// Specify any branches which should be filtered out from the list of
150    /// `branches` (supports glob syntax)
151    pub exclude_branches: Vec<String>,
152    /// Run the pipeline in a schedule, as specified by a cron string
153    pub cron: String,
154}
155
156/// Trigger ADO pipelines per PR
157#[derive(Debug)]
158pub struct AdoPrTriggers {
159    /// Run the pipeline whenever there is a PR to these specified branches
160    /// (supports glob syntax)
161    pub branches: Vec<String>,
162    /// Specify any branches which should be filtered out from the list of
163    /// `branches` (supports glob syntax)
164    pub exclude_branches: Vec<String>,
165    /// Run the pipeline even if the PR is a draft PR. Defaults to `false`.
166    pub run_on_draft: bool,
167    /// Automatically cancel the pipeline run if a new commit lands in the
168    /// branch. Defaults to `true`.
169    pub auto_cancel: bool,
170}
171
172/// Trigger ADO pipelines per PR
173#[derive(Debug, Default)]
174pub struct AdoCiTriggers {
175    /// Run the pipeline whenever there is a change to these specified branches
176    /// (supports glob syntax)
177    pub branches: Vec<String>,
178    /// Specify any branches which should be filtered out from the list of
179    /// `branches` (supports glob syntax)
180    pub exclude_branches: Vec<String>,
181    /// Run the pipeline whenever a matching tag is created (supports glob
182    /// syntax)
183    pub tags: Vec<String>,
184    /// Specify any tags which should be filtered out from the list of `tags`
185    /// (supports glob syntax)
186    pub exclude_tags: Vec<String>,
187    /// Whether to batch changes per branch.
188    pub batch: bool,
189}
190
191impl Default for AdoPrTriggers {
192    fn default() -> Self {
193        Self {
194            branches: Vec::new(),
195            exclude_branches: Vec::new(),
196            run_on_draft: false,
197            auto_cancel: true,
198        }
199    }
200}
201
202/// ADO repository resource.
203#[derive(Debug)]
204pub struct AdoResourcesRepository {
205    /// Type of repo that is being connected to.
206    pub repo_type: AdoResourcesRepositoryType,
207    /// Repository name. Format depends on `repo_type`.
208    pub name: String,
209    /// git ref to checkout.
210    pub git_ref: AdoResourcesRepositoryRef,
211    /// (optional) ID of the service endpoint connecting to this repository.
212    pub endpoint: Option<String>,
213}
214
215/// ADO repository resource type
216#[derive(Debug)]
217pub enum AdoResourcesRepositoryType {
218    /// Azure Repos Git repository
219    AzureReposGit,
220    /// Github repository
221    GitHub,
222}
223
224/// ADO repository ref
225#[derive(Debug)]
226pub enum AdoResourcesRepositoryRef<P = UseParameter<String>> {
227    /// Hard-coded ref (e.g: refs/heads/main)
228    Fixed(String),
229    /// Connected to pipeline-level parameter
230    Parameter(P),
231}
232
233/// Trigger Github Actions pipelines via Continuous Integration
234///
235/// NOTE: Github Actions doesn't support specifying the branch when triggered by `schedule`.
236/// To run on a specific branch, modify the branch checked out in the pipeline.
237#[derive(Default, Debug)]
238pub struct GhScheduleTriggers {
239    /// Run the pipeline in a schedule, as specified by a cron string
240    pub cron: String,
241}
242
243/// Trigger Github Actions pipelines per PR
244#[derive(Debug)]
245pub struct GhPrTriggers {
246    /// Run the pipeline whenever there is a PR to these specified branches
247    /// (supports glob syntax)
248    pub branches: Vec<String>,
249    /// Specify any branches which should be filtered out from the list of
250    /// `branches` (supports glob syntax)
251    pub exclude_branches: Vec<String>,
252    /// Automatically cancel the pipeline run if a new commit lands in the
253    /// branch. Defaults to `true`.
254    pub auto_cancel: bool,
255    /// Run the pipeline whenever the PR trigger matches the specified types
256    pub types: Vec<String>,
257}
258
259/// Trigger Github Actions pipelines per PR
260#[derive(Debug, Default)]
261pub struct GhCiTriggers {
262    /// Run the pipeline whenever there is a change to these specified branches
263    /// (supports glob syntax)
264    pub branches: Vec<String>,
265    /// Specify any branches which should be filtered out from the list of
266    /// `branches` (supports glob syntax)
267    pub exclude_branches: Vec<String>,
268    /// Run the pipeline whenever a matching tag is created (supports glob
269    /// syntax)
270    pub tags: Vec<String>,
271    /// Specify any tags which should be filtered out from the list of `tags`
272    /// (supports glob syntax)
273    pub exclude_tags: Vec<String>,
274}
275
276impl GhPrTriggers {
277    /// Triggers the pipeline on the default PR events plus when a draft is marked as ready for review.
278    pub fn new_draftable() -> Self {
279        Self {
280            branches: Vec::new(),
281            exclude_branches: Vec::new(),
282            types: vec![
283                "opened".into(),
284                "synchronize".into(),
285                "reopened".into(),
286                "ready_for_review".into(),
287            ],
288            auto_cancel: true,
289        }
290    }
291}
292
293#[derive(Debug, Clone, PartialEq)]
294pub enum GhRunnerOsLabel {
295    UbuntuLatest,
296    Ubuntu2404,
297    Ubuntu2204,
298    WindowsLatest,
299    Windows2025,
300    Windows2022,
301    Ubuntu2404Arm,
302    Ubuntu2204Arm,
303    Windows11Arm,
304    Custom(String),
305}
306
307/// GitHub runner type
308#[derive(Debug, Clone, PartialEq)]
309pub enum GhRunner {
310    // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
311    // for more details.
312    GhHosted(GhRunnerOsLabel),
313    // Self hosted runners are selected by matching runner labels to <labels>.
314    // 'self-hosted' is a common label for self hosted runners, but is not required.
315    // Labels are case-insensitive and can take the form of arbitrary strings.
316    // See <https://docs.github.com/en/actions/hosting-your-own-runners> for more details.
317    SelfHosted(Vec<String>),
318    // This uses a runner belonging to <group> that matches all labels in <labels>.
319    // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
320    // for more details.
321    RunnerGroup { group: String, labels: Vec<String> },
322}
323
324impl GhRunner {
325    /// Whether this is a self-hosted runner with the provided label
326    pub fn is_self_hosted_with_label(&self, label: &str) -> bool {
327        matches!(self, GhRunner::SelfHosted(labels) if labels.iter().any(|s| s.as_str() == label))
328    }
329}
330
331/// Parameter type (unstable / stable).
332#[derive(Debug, Clone)]
333pub enum ParameterKind {
334    // The parameter is considered an unstable API, and should not be
335    // taken as a dependency.
336    Unstable,
337    // The parameter is considered a stable API, and can be used by
338    // external pipelines to control behavior of the pipeline.
339    Stable,
340}
341
342#[derive(Clone, Debug)]
343#[must_use]
344pub struct UseParameter<T> {
345    idx: usize,
346    _kind: std::marker::PhantomData<T>,
347}
348
349/// Opaque handle to an artifact which must be published by a single job.
350#[must_use]
351pub struct PublishArtifact {
352    idx: usize,
353}
354
355/// Opaque handle to an artifact which can be used by one or more jobs.
356#[derive(Clone)]
357#[must_use]
358pub struct UseArtifact {
359    idx: usize,
360}
361
362/// Opaque handle to an artifact of type `T` which must be published by a single job.
363#[must_use]
364pub struct PublishTypedArtifact<T>(PublishArtifact, std::marker::PhantomData<fn() -> T>);
365
366/// Opaque handle to an artifact of type `T` which can be used by one or more
367/// jobs.
368#[must_use]
369pub struct UseTypedArtifact<T>(UseArtifact, std::marker::PhantomData<fn(T)>);
370
371impl<T> Clone for UseTypedArtifact<T> {
372    fn clone(&self) -> Self {
373        UseTypedArtifact(self.0.clone(), std::marker::PhantomData)
374    }
375}
376
377#[derive(Default)]
378pub struct Pipeline {
379    jobs: Vec<PipelineJobMetadata>,
380    artifacts: Vec<ArtifactMeta>,
381    parameters: Vec<ParameterMeta>,
382    extra_deps: BTreeSet<(usize, usize)>,
383    // builder internal
384    artifact_names: BTreeSet<String>,
385    dummy_done_idx: usize,
386    artifact_map_idx: usize,
387    global_patchfns: Vec<crate::patch::PatchFn>,
388    inject_all_jobs_with: Option<Box<dyn for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a>>>,
389    // backend specific
390    ado_name: Option<String>,
391    ado_job_id_overrides: BTreeMap<usize, String>,
392    ado_schedule_triggers: Vec<AdoScheduleTriggers>,
393    ado_ci_triggers: Option<AdoCiTriggers>,
394    ado_pr_triggers: Option<AdoPrTriggers>,
395    ado_resources_repository: Vec<InternalAdoResourcesRepository>,
396    ado_bootstrap_template: String,
397    ado_variables: BTreeMap<String, String>,
398    ado_post_process_yaml_cb: Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
399    gh_name: Option<String>,
400    gh_schedule_triggers: Vec<GhScheduleTriggers>,
401    gh_ci_triggers: Option<GhCiTriggers>,
402    gh_pr_triggers: Option<GhPrTriggers>,
403    gh_bootstrap_template: String,
404}
405
406impl Pipeline {
407    pub fn new() -> Pipeline {
408        Pipeline::default()
409    }
410
411    /// Inject all pipeline jobs with some common logic. (e.g: to resolve common
412    /// configuration requirements shared by all jobs).
413    ///
414    /// Can only be invoked once per pipeline.
415    #[track_caller]
416    pub fn inject_all_jobs_with(
417        &mut self,
418        cb: impl for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a> + 'static,
419    ) -> &mut Self {
420        if self.inject_all_jobs_with.is_some() {
421            panic!("can only call inject_all_jobs_with once!")
422        }
423        self.inject_all_jobs_with = Some(Box::new(cb));
424        self
425    }
426
427    /// (ADO only) Provide a YAML template used to bootstrap flowey at the start
428    /// of an ADO pipeline.
429    ///
430    /// The template has access to the following vars, which will be statically
431    /// interpolated into the template's text:
432    ///
433    /// - `{{FLOWEY_OUTDIR}}`
434    ///     - Directory to copy artifacts into.
435    ///     - NOTE: this var will include `\` on Windows, and `/` on linux!
436    /// - `{{FLOWEY_BIN_EXTENSION}}`
437    ///     - Extension of the expected flowey bin (either "", or ".exe")
438    /// - `{{FLOWEY_CRATE}}`
439    ///     - Name of the project-specific flowey crate to be built
440    /// - `{{FLOWEY_TARGET}}`
441    ///     - The target-triple flowey is being built for
442    /// - `{{FLOWEY_PIPELINE_PATH}}`
443    ///     - Repo-root relative path to the pipeline (as provided when
444    ///       generating the pipeline via the flowey CLI)
445    ///
446    /// The template's sole responsibility is to copy 3 files into the
447    /// `{{FLOWEY_OUTDIR}}`:
448    ///
449    /// 1. The bootstrapped flowey binary, with the file name
450    ///    `flowey{{FLOWEY_BIN_EXTENSION}}`
451    /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
452    ///    copied of the pipeline YAML and pipeline JSON currently being run.
453    ///    `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
454    ///    cases where the same template is being for multiple pipelines (e.g: a
455    ///    debug vs. release pipeline).
456    pub fn ado_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
457        self.ado_bootstrap_template = template;
458        self
459    }
460
461    /// (ADO only) Provide a callback function which will be used to
462    /// post-process any YAML flowey generates for the pipeline.
463    ///
464    /// Until flowey defines a stable API for maintaining out-of-tree backends,
465    /// this method can be used to integrate the output from the generic ADO
466    /// backend with any organization-specific templates that one may be
467    /// required to use (e.g: for compliance reasons).
468    pub fn ado_post_process_yaml(
469        &mut self,
470        cb: impl FnOnce(serde_yaml::Value) -> serde_yaml::Value + 'static,
471    ) -> &mut Self {
472        self.ado_post_process_yaml_cb = Some(Box::new(cb));
473        self
474    }
475
476    /// (ADO only) Add a new scheduled CI trigger. Can be called multiple times
477    /// to set up multiple schedules runs.
478    pub fn ado_add_schedule_trigger(&mut self, triggers: AdoScheduleTriggers) -> &mut Self {
479        self.ado_schedule_triggers.push(triggers);
480        self
481    }
482
483    /// (ADO only) Set a PR trigger. Calling this method multiple times will
484    /// overwrite any previously set triggers.
485    pub fn ado_set_pr_triggers(&mut self, triggers: AdoPrTriggers) -> &mut Self {
486        self.ado_pr_triggers = Some(triggers);
487        self
488    }
489
490    /// (ADO only) Set a CI trigger. Calling this method multiple times will
491    /// overwrite any previously set triggers.
492    pub fn ado_set_ci_triggers(&mut self, triggers: AdoCiTriggers) -> &mut Self {
493        self.ado_ci_triggers = Some(triggers);
494        self
495    }
496
497    /// (ADO only) Declare a new repository resource, returning a type-safe
498    /// handle which downstream ADO steps are able to consume via
499    /// [`AdoStepServices::resolve_repository_id`](crate::node::user_facing::AdoStepServices::resolve_repository_id).
500    pub fn ado_add_resources_repository(
501        &mut self,
502        repo: AdoResourcesRepository,
503    ) -> AdoResourcesRepositoryId {
504        let AdoResourcesRepository {
505            repo_type,
506            name,
507            git_ref,
508            endpoint,
509        } = repo;
510
511        let repo_id = format!("repo{}", self.ado_resources_repository.len());
512
513        self.ado_resources_repository
514            .push(InternalAdoResourcesRepository {
515                repo_id: repo_id.clone(),
516                repo_type,
517                name,
518                git_ref: match git_ref {
519                    AdoResourcesRepositoryRef::Fixed(s) => AdoResourcesRepositoryRef::Fixed(s),
520                    AdoResourcesRepositoryRef::Parameter(p) => {
521                        AdoResourcesRepositoryRef::Parameter(p.idx)
522                    }
523                },
524                endpoint,
525            });
526        AdoResourcesRepositoryId { repo_id }
527    }
528
529    /// (GitHub Actions only) Set the pipeline-level name.
530    ///
531    /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#name>
532    pub fn gh_set_name(&mut self, name: impl AsRef<str>) -> &mut Self {
533        self.gh_name = Some(name.as_ref().into());
534        self
535    }
536
537    /// Provide a YAML template used to bootstrap flowey at the start of an GitHub
538    /// pipeline.
539    ///
540    /// The template has access to the following vars, which will be statically
541    /// interpolated into the template's text:
542    ///
543    /// - `{{FLOWEY_OUTDIR}}`
544    ///     - Directory to copy artifacts into.
545    ///     - NOTE: this var will include `\` on Windows, and `/` on linux!
546    /// - `{{FLOWEY_BIN_EXTENSION}}`
547    ///     - Extension of the expected flowey bin (either "", or ".exe")
548    /// - `{{FLOWEY_CRATE}}`
549    ///     - Name of the project-specific flowey crate to be built
550    /// - `{{FLOWEY_TARGET}}`
551    ///     - The target-triple flowey is being built for
552    /// - `{{FLOWEY_PIPELINE_PATH}}`
553    ///     - Repo-root relative path to the pipeline (as provided when
554    ///       generating the pipeline via the flowey CLI)
555    ///
556    /// The template's sole responsibility is to copy 3 files into the
557    /// `{{FLOWEY_OUTDIR}}`:
558    ///
559    /// 1. The bootstrapped flowey binary, with the file name
560    ///    `flowey{{FLOWEY_BIN_EXTENSION}}`
561    /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
562    ///    copied of the pipeline YAML and pipeline JSON currently being run.
563    ///    `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
564    ///    cases where the same template is being for multiple pipelines (e.g: a
565    ///    debug vs. release pipeline).
566    pub fn gh_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
567        self.gh_bootstrap_template = template;
568        self
569    }
570
571    /// (GitHub Actions only) Add a new scheduled CI trigger. Can be called multiple times
572    /// to set up multiple schedules runs.
573    pub fn gh_add_schedule_trigger(&mut self, triggers: GhScheduleTriggers) -> &mut Self {
574        self.gh_schedule_triggers.push(triggers);
575        self
576    }
577
578    /// (GitHub Actions only) Set a PR trigger. Calling this method multiple times will
579    /// overwrite any previously set triggers.
580    pub fn gh_set_pr_triggers(&mut self, triggers: GhPrTriggers) -> &mut Self {
581        self.gh_pr_triggers = Some(triggers);
582        self
583    }
584
585    /// (GitHub Actions only) Set a CI trigger. Calling this method multiple times will
586    /// overwrite any previously set triggers.
587    pub fn gh_set_ci_triggers(&mut self, triggers: GhCiTriggers) -> &mut Self {
588        self.gh_ci_triggers = Some(triggers);
589        self
590    }
591
592    /// (GitHub Actions only) Use a pre-defined GitHub Actions secret variable.
593    ///
594    /// For more information on defining secrets for use in GitHub Actions, see
595    /// <https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions>
596    pub fn gh_use_secret(&mut self, secret_name: impl AsRef<str>) -> GhUserSecretVar {
597        GhUserSecretVar(secret_name.as_ref().to_string())
598    }
599
600    pub fn new_job(
601        &mut self,
602        platform: FlowPlatform,
603        arch: FlowArch,
604        label: impl AsRef<str>,
605    ) -> PipelineJob<'_> {
606        let idx = self.jobs.len();
607        self.jobs.push(PipelineJobMetadata {
608            root_nodes: BTreeMap::new(),
609            patches: ResolvedPatches::build(),
610            label: label.as_ref().into(),
611            platform,
612            arch,
613            cond_param_idx: None,
614            timeout_minutes: None,
615            command_wrapper: None,
616            ado_pool: None,
617            ado_variables: BTreeMap::new(),
618            gh_override_if: None,
619            gh_global_env: BTreeMap::new(),
620            gh_pool: None,
621            gh_permissions: BTreeMap::new(),
622        });
623
624        PipelineJob {
625            pipeline: self,
626            job_idx: idx,
627        }
628    }
629
630    /// Declare a dependency between two jobs that does is not a result of an
631    /// artifact.
632    pub fn non_artifact_dep(
633        &mut self,
634        job: &PipelineJobHandle,
635        depends_on_job: &PipelineJobHandle,
636    ) -> &mut Self {
637        self.extra_deps
638            .insert((depends_on_job.job_idx, job.job_idx));
639        self
640    }
641
642    #[track_caller]
643    pub fn new_artifact(&mut self, name: impl AsRef<str>) -> (PublishArtifact, UseArtifact) {
644        let name = name.as_ref();
645        let owned_name = name.to_string();
646
647        let not_exists = self.artifact_names.insert(owned_name.clone());
648        if !not_exists {
649            panic!("duplicate artifact name: {}", name)
650        }
651
652        let idx = self.artifacts.len();
653        self.artifacts.push(ArtifactMeta {
654            name: owned_name,
655            published_by_job: None,
656            used_by_jobs: BTreeSet::new(),
657        });
658
659        (PublishArtifact { idx }, UseArtifact { idx })
660    }
661
662    /// Returns a pair of opaque handles to a new artifact for use across jobs
663    /// in the pipeline.
664    #[track_caller]
665    pub fn new_typed_artifact<T: Artifact>(
666        &mut self,
667        name: impl AsRef<str>,
668    ) -> (PublishTypedArtifact<T>, UseTypedArtifact<T>) {
669        let (publish, use_artifact) = self.new_artifact(name);
670        (
671            PublishTypedArtifact(publish, std::marker::PhantomData),
672            UseTypedArtifact(use_artifact, std::marker::PhantomData),
673        )
674    }
675
676    /// (ADO only) Set the pipeline-level name.
677    ///
678    /// <https://learn.microsoft.com/en-us/azure/devops/pipelines/process/run-number?view=azure-devops&tabs=yaml>
679    pub fn ado_add_name(&mut self, name: String) -> &mut Self {
680        self.ado_name = Some(name);
681        self
682    }
683
684    /// (ADO only) Declare a pipeline-level, named, read-only ADO variable.
685    ///
686    /// `name` and `value` are both arbitrary strings.
687    ///
688    /// Returns an instance of [`AdoRuntimeVar`], which, if need be, can be
689    /// converted into a [`ReadVar<String>`] using
690    /// [`NodeCtx::get_ado_variable`].
691    ///
692    /// NOTE: Unless required by some particular third-party task, it's strongly
693    /// recommended to _avoid_ using this method, and to simply use
694    /// [`ReadVar::from_static`] to get a obtain a static variable.
695    ///
696    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
697    pub fn ado_new_named_variable(
698        &mut self,
699        name: impl AsRef<str>,
700        value: impl AsRef<str>,
701    ) -> AdoRuntimeVar {
702        let name = name.as_ref();
703        let value = value.as_ref();
704
705        self.ado_variables.insert(name.into(), value.into());
706
707        // safe, since we'll ensure that the global exists in the ADO backend
708        AdoRuntimeVar::dangerous_from_global(name, false)
709    }
710
711    /// (ADO only) Declare multiple pipeline-level, named, read-only ADO
712    /// variables at once.
713    ///
714    /// This is a convenience method to streamline invoking
715    /// [`Self::ado_new_named_variable`] multiple times.
716    ///
717    /// NOTE: Unless required by some particular third-party task, it's strongly
718    /// recommended to _avoid_ using this method, and to simply use
719    /// [`ReadVar::from_static`] to get a obtain a static variable.
720    ///
721    /// DEVNOTE: In the future, this API may be updated to return a handle that
722    /// will allow resolving the resulting `AdoRuntimeVar`, but for
723    /// implementation expediency, this API does not currently do this. If you
724    /// need to read the value of this variable at runtime, you may need to
725    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
726    ///
727    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
728    pub fn ado_new_named_variables<K, V>(
729        &mut self,
730        vars: impl IntoIterator<Item = (K, V)>,
731    ) -> &mut Self
732    where
733        K: AsRef<str>,
734        V: AsRef<str>,
735    {
736        self.ado_variables.extend(
737            vars.into_iter()
738                .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
739        );
740        self
741    }
742
743    /// Declare a pipeline-level runtime parameter with type `bool`.
744    ///
745    /// To obtain a [`ReadVar<bool>`] that can be used within a node, use the
746    /// [`PipelineJobCtx::use_parameter`] method.
747    ///
748    /// `name` is the name of the parameter.
749    ///
750    /// `description` is an arbitrary string, which will be be shown to users.
751    ///
752    /// `kind` is the type of parameter and if it should be treated as a stable
753    /// external API to callers of the pipeline.
754    ///
755    /// `default` is the default value for the parameter. If none is provided,
756    /// the parameter _must_ be specified in order for the pipeline to run.
757    ///
758    /// `possible_values` can be used to limit the set of valid values the
759    /// parameter accepts.
760    pub fn new_parameter_bool(
761        &mut self,
762        name: impl AsRef<str>,
763        description: impl AsRef<str>,
764        kind: ParameterKind,
765        default: Option<bool>,
766    ) -> UseParameter<bool> {
767        let idx = self.parameters.len();
768        let name = new_parameter_name(name, kind.clone());
769        self.parameters.push(ParameterMeta {
770            parameter: Parameter::Bool {
771                name,
772                description: description.as_ref().into(),
773                kind,
774                default,
775            },
776            used_by_jobs: BTreeSet::new(),
777        });
778
779        UseParameter {
780            idx,
781            _kind: std::marker::PhantomData,
782        }
783    }
784
785    /// Declare a pipeline-level runtime parameter with type `i64`.
786    ///
787    /// To obtain a [`ReadVar<i64>`] that can be used within a node, use the
788    /// [`PipelineJobCtx::use_parameter`] method.
789    ///
790    /// `name` is the name of the parameter.
791    ///
792    /// `description` is an arbitrary string, which will be be shown to users.
793    ///
794    /// `kind` is the type of parameter and if it should be treated as a stable
795    /// external API to callers of the pipeline.
796    ///
797    /// `default` is the default value for the parameter. If none is provided,
798    /// the parameter _must_ be specified in order for the pipeline to run.
799    ///
800    /// `possible_values` can be used to limit the set of valid values the
801    /// parameter accepts.
802    pub fn new_parameter_num(
803        &mut self,
804        name: impl AsRef<str>,
805        description: impl AsRef<str>,
806        kind: ParameterKind,
807        default: Option<i64>,
808        possible_values: Option<Vec<i64>>,
809    ) -> UseParameter<i64> {
810        let idx = self.parameters.len();
811        let name = new_parameter_name(name, kind.clone());
812        self.parameters.push(ParameterMeta {
813            parameter: Parameter::Num {
814                name,
815                description: description.as_ref().into(),
816                kind,
817                default,
818                possible_values,
819            },
820            used_by_jobs: BTreeSet::new(),
821        });
822
823        UseParameter {
824            idx,
825            _kind: std::marker::PhantomData,
826        }
827    }
828
829    /// Declare a pipeline-level runtime parameter with type `String`.
830    ///
831    /// To obtain a [`ReadVar<String>`] that can be used within a node, use the
832    /// [`PipelineJobCtx::use_parameter`] method.
833    ///
834    /// `name` is the name of the parameter.
835    ///
836    /// `description` is an arbitrary string, which will be be shown to users.
837    ///
838    /// `kind` is the type of parameter and if it should be treated as a stable
839    /// external API to callers of the pipeline.
840    ///
841    /// `default` is the default value for the parameter. If none is provided,
842    /// the parameter _must_ be specified in order for the pipeline to run.
843    ///
844    /// `possible_values` allows restricting inputs to a set of possible values.
845    /// Depending on the backend, these options may be presented as a set of
846    /// radio buttons, a dropdown menu, or something in that vein. If `None`,
847    /// then any string is allowed.
848    pub fn new_parameter_string(
849        &mut self,
850        name: impl AsRef<str>,
851        description: impl AsRef<str>,
852        kind: ParameterKind,
853        default: Option<impl AsRef<str>>,
854        possible_values: Option<Vec<String>>,
855    ) -> UseParameter<String> {
856        let idx = self.parameters.len();
857        let name = new_parameter_name(name, kind.clone());
858        self.parameters.push(ParameterMeta {
859            parameter: Parameter::String {
860                name,
861                description: description.as_ref().into(),
862                kind,
863                default: default.map(|x| x.as_ref().into()),
864                possible_values,
865            },
866            used_by_jobs: BTreeSet::new(),
867        });
868
869        UseParameter {
870            idx,
871            _kind: std::marker::PhantomData,
872        }
873    }
874}
875
876pub struct PipelineJobCtx<'a> {
877    pipeline: &'a mut Pipeline,
878    job_idx: usize,
879}
880
881impl PipelineJobCtx<'_> {
882    /// Create a new `WriteVar<SideEffect>` anchored to the pipeline job.
883    pub fn new_done_handle(&mut self) -> WriteVar<crate::node::SideEffect> {
884        self.pipeline.dummy_done_idx += 1;
885        crate::node::thin_air_write_runtime_var(format!("start{}", self.pipeline.dummy_done_idx))
886    }
887
888    /// Claim that this job will use this artifact, obtaining a path to a folder
889    /// with the artifact's contents.
890    pub fn use_artifact(&mut self, artifact: &UseArtifact) -> ReadVar<PathBuf> {
891        self.pipeline.artifacts[artifact.idx]
892            .used_by_jobs
893            .insert(self.job_idx);
894
895        crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
896            &self.pipeline.artifacts[artifact.idx].name,
897            true,
898        ))
899    }
900
901    /// Claim that this job will publish this artifact, obtaining a path to a
902    /// fresh, empty folder which will be published as the specific artifact at
903    /// the end of the job.
904    pub fn publish_artifact(&mut self, artifact: PublishArtifact) -> ReadVar<PathBuf> {
905        let existing = self.pipeline.artifacts[artifact.idx]
906            .published_by_job
907            .replace(self.job_idx);
908        assert!(existing.is_none()); // PublishArtifact isn't cloneable
909
910        crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
911            &self.pipeline.artifacts[artifact.idx].name,
912            false,
913        ))
914    }
915
916    fn helper_request<R: IntoRequest>(&mut self, req: R)
917    where
918        R::Node: 'static,
919    {
920        self.pipeline.jobs[self.job_idx]
921            .root_nodes
922            .entry(NodeHandle::from_type::<R::Node>())
923            .or_default()
924            .push(serde_json::to_vec(&req.into_request()).unwrap().into());
925    }
926
927    fn new_artifact_map_vars<T: Artifact>(&mut self) -> (ReadVar<T>, WriteVar<T>) {
928        let artifact_map_idx = self.pipeline.artifact_map_idx;
929        self.pipeline.artifact_map_idx += 1;
930
931        let backing_var = format!("artifact_map{}", artifact_map_idx);
932        let read_var = crate::node::thin_air_read_runtime_var(backing_var.clone());
933        let write_var = crate::node::thin_air_write_runtime_var(backing_var);
934        (read_var, write_var)
935    }
936
937    /// Claim that this job will use this artifact, obtaining the resolved
938    /// contents of the artifact.
939    pub fn use_typed_artifact<T: Artifact>(
940        &mut self,
941        artifact: &UseTypedArtifact<T>,
942    ) -> ReadVar<T> {
943        let artifact_path = self.use_artifact(&artifact.0);
944        let (read, write) = self.new_artifact_map_vars::<T>();
945        self.helper_request(artifact::resolve::Request::new(artifact_path, write));
946        read
947    }
948
949    /// Claim that this job will publish this artifact, obtaining a variable to
950    /// write the artifact's contents to. The artifact will be published at
951    /// the end of the job.
952    pub fn publish_typed_artifact<T: Artifact>(
953        &mut self,
954        artifact: PublishTypedArtifact<T>,
955    ) -> WriteVar<T> {
956        let artifact_path = self.publish_artifact(artifact.0);
957        let (read, write) = self.new_artifact_map_vars::<T>();
958        let done = self.new_done_handle();
959        self.helper_request(artifact::publish::Request::new(read, artifact_path, done));
960        write
961    }
962
963    /// Obtain a `ReadVar<T>` corresponding to a pipeline parameter which is
964    /// specified at runtime.
965    pub fn use_parameter<T>(&mut self, param: UseParameter<T>) -> ReadVar<T>
966    where
967        T: Serialize + DeserializeOwned,
968    {
969        self.pipeline.parameters[param.idx]
970            .used_by_jobs
971            .insert(self.job_idx);
972
973        crate::node::thin_air_read_runtime_var(
974            self.pipeline.parameters[param.idx]
975                .parameter
976                .name()
977                .to_string(),
978        )
979    }
980
981    /// Shortcut which allows defining a bool pipeline parameter within a Job.
982    ///
983    /// To share a single parameter between multiple jobs, don't use this method
984    /// - use [`Pipeline::new_parameter_bool`] + [`Self::use_parameter`] instead.
985    pub fn new_parameter_bool(
986        &mut self,
987        name: impl AsRef<str>,
988        description: impl AsRef<str>,
989        kind: ParameterKind,
990        default: Option<bool>,
991    ) -> ReadVar<bool> {
992        let param = self
993            .pipeline
994            .new_parameter_bool(name, description, kind, default);
995        self.use_parameter(param)
996    }
997
998    /// Shortcut which allows defining a number pipeline parameter within a Job.
999    ///
1000    /// To share a single parameter between multiple jobs, don't use this method
1001    /// - use [`Pipeline::new_parameter_num`] + [`Self::use_parameter`] instead.
1002    pub fn new_parameter_num(
1003        &mut self,
1004        name: impl AsRef<str>,
1005        description: impl AsRef<str>,
1006        kind: ParameterKind,
1007        default: Option<i64>,
1008        possible_values: Option<Vec<i64>>,
1009    ) -> ReadVar<i64> {
1010        let param =
1011            self.pipeline
1012                .new_parameter_num(name, description, kind, default, possible_values);
1013        self.use_parameter(param)
1014    }
1015
1016    /// Shortcut which allows defining a string pipeline parameter within a Job.
1017    ///
1018    /// To share a single parameter between multiple jobs, don't use this method
1019    /// - use [`Pipeline::new_parameter_string`] + [`Self::use_parameter`] instead.
1020    pub fn new_parameter_string(
1021        &mut self,
1022        name: impl AsRef<str>,
1023        description: impl AsRef<str>,
1024        kind: ParameterKind,
1025        default: Option<String>,
1026        possible_values: Option<Vec<String>>,
1027    ) -> ReadVar<String> {
1028        let param =
1029            self.pipeline
1030                .new_parameter_string(name, description, kind, default, possible_values);
1031        self.use_parameter(param)
1032    }
1033}
1034
1035#[must_use]
1036pub struct PipelineJob<'a> {
1037    pipeline: &'a mut Pipeline,
1038    job_idx: usize,
1039}
1040
1041impl PipelineJob<'_> {
1042    /// (ADO only) specify which agent pool this job will be run on.
1043    pub fn ado_set_pool(self, pool: impl AsRef<str>) -> Self {
1044        self.ado_set_pool_with_demands(pool, Vec::new())
1045    }
1046
1047    /// (ADO only) specify which agent pool this job will be run on, with
1048    /// additional special runner demands.
1049    pub fn ado_set_pool_with_demands(self, pool: impl AsRef<str>, demands: Vec<String>) -> Self {
1050        self.pipeline.jobs[self.job_idx].ado_pool = Some(AdoPool {
1051            name: pool.as_ref().into(),
1052            demands,
1053        });
1054        self
1055    }
1056
1057    /// (ADO only) Declare a job-level, named, read-only ADO variable.
1058    ///
1059    /// `name` and `value` are both arbitrary strings, which may include ADO
1060    /// template expressions.
1061    ///
1062    /// NOTE: Unless required by some particular third-party task, it's strongly
1063    /// recommended to _avoid_ using this method, and to simply use
1064    /// [`ReadVar::from_static`] to get a obtain a static variable.
1065    ///
1066    /// DEVNOTE: In the future, this API may be updated to return a handle that
1067    /// will allow resolving the resulting `AdoRuntimeVar`, but for
1068    /// implementation expediency, this API does not currently do this. If you
1069    /// need to read the value of this variable at runtime, you may need to
1070    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1071    ///
1072    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1073    pub fn ado_new_named_variable(self, name: impl AsRef<str>, value: impl AsRef<str>) -> Self {
1074        let name = name.as_ref();
1075        let value = value.as_ref();
1076        self.pipeline.jobs[self.job_idx]
1077            .ado_variables
1078            .insert(name.into(), value.into());
1079        self
1080    }
1081
1082    /// (ADO only) Declare multiple job-level, named, read-only ADO variables at
1083    /// once.
1084    ///
1085    /// This is a convenience method to streamline invoking
1086    /// [`Self::ado_new_named_variable`] multiple times.
1087    ///
1088    /// NOTE: Unless required by some particular third-party task, it's strongly
1089    /// recommended to _avoid_ using this method, and to simply use
1090    /// [`ReadVar::from_static`] to get a obtain a static variable.
1091    ///
1092    /// DEVNOTE: In the future, this API may be updated to return a handle that
1093    /// will allow resolving the resulting `AdoRuntimeVar`, but for
1094    /// implementation expediency, this API does not currently do this. If you
1095    /// need to read the value of this variable at runtime, you may need to
1096    /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1097    ///
1098    /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1099    pub fn ado_new_named_variables<K, V>(self, vars: impl IntoIterator<Item = (K, V)>) -> Self
1100    where
1101        K: AsRef<str>,
1102        V: AsRef<str>,
1103    {
1104        self.pipeline.jobs[self.job_idx].ado_variables.extend(
1105            vars.into_iter()
1106                .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
1107        );
1108        self
1109    }
1110
1111    /// Overrides the id of the job.
1112    ///
1113    /// Flowey typically generates a reasonable job ID but some use cases that depend
1114    /// on the ID may find it useful to override it to something custom.
1115    pub fn ado_override_job_id(self, name: impl AsRef<str>) -> Self {
1116        self.pipeline
1117            .ado_job_id_overrides
1118            .insert(self.job_idx, name.as_ref().into());
1119        self
1120    }
1121
1122    /// (GitHub Actions only) specify which Github runner this job will be run on.
1123    pub fn gh_set_pool(self, pool: GhRunner) -> Self {
1124        self.pipeline.jobs[self.job_idx].gh_pool = Some(pool);
1125        self
1126    }
1127
1128    /// (GitHub Actions only) Manually override the `if:` condition for this
1129    /// particular job.
1130    ///
1131    /// **This is dangerous**, as an improperly set `if` condition may break
1132    /// downstream flowey jobs which assume flowey is in control of the job's
1133    /// scheduling logic.
1134    ///
1135    /// See
1136    /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idif>
1137    /// for more info.
1138    pub fn gh_dangerous_override_if(self, condition: impl AsRef<str>) -> Self {
1139        self.pipeline.jobs[self.job_idx].gh_override_if = Some(condition.as_ref().into());
1140        self
1141    }
1142
1143    /// (GitHub Actions only) Declare a global job-level environment variable,
1144    /// visible to all downstream steps.
1145    ///
1146    /// `name` and `value` are both arbitrary strings, which may include GitHub
1147    /// Actions template expressions.
1148    ///
1149    /// **This is dangerous**, as it is easy to misuse this API in order to
1150    /// write a node which takes an implicit dependency on there being a global
1151    /// variable set on its behalf by the top-level pipeline code, making it
1152    /// difficult to "locally reason" about the behavior of a node simply by
1153    /// reading its code.
1154    ///
1155    /// Whenever possible, nodes should "late bind" environment variables:
1156    /// accepting a compile-time / runtime flowey parameter, and then setting it
1157    /// prior to executing a child command that requires it.
1158    ///
1159    /// Only use this API in exceptional cases, such as obtaining an environment
1160    /// variable whose value is determined by a job-level GitHub Actions
1161    /// expression evaluation.
1162    pub fn gh_dangerous_global_env_var(
1163        self,
1164        name: impl AsRef<str>,
1165        value: impl AsRef<str>,
1166    ) -> Self {
1167        let name = name.as_ref();
1168        let value = value.as_ref();
1169        self.pipeline.jobs[self.job_idx]
1170            .gh_global_env
1171            .insert(name.into(), value.into());
1172        self
1173    }
1174
1175    /// (GitHub Actions only) Grant permissions required by nodes in the job.
1176    ///
1177    /// For a given node handle, grant the specified permissions.
1178    /// The list provided must match the permissions specified within the node
1179    /// using `requires_permission`.
1180    ///
1181    /// NOTE: While this method is called at a node-level for auditability, the emitted
1182    /// yaml grants permissions at the job-level.
1183    ///
1184    /// This can lead to weird situations where node 1 might not specify a permission
1185    /// required according to Github Actions, but due to job-level granting of the permission
1186    /// by another node 2, the pipeline executes even though it wouldn't if node 2 was removed.
1187    ///
1188    /// For available permission scopes and their descriptions, see
1189    /// <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions>.
1190    pub fn gh_grant_permissions<N: FlowNodeBase + 'static>(
1191        self,
1192        permissions: impl IntoIterator<Item = (GhPermission, GhPermissionValue)>,
1193    ) -> Self {
1194        let node_handle = NodeHandle::from_type::<N>();
1195        for (permission, value) in permissions {
1196            self.pipeline.jobs[self.job_idx]
1197                .gh_permissions
1198                .entry(node_handle)
1199                .or_default()
1200                .insert(permission, value);
1201        }
1202        self
1203    }
1204
1205    pub fn apply_patchfn(self, patchfn: crate::patch::PatchFn) -> Self {
1206        self.pipeline.jobs[self.job_idx]
1207            .patches
1208            .apply_patchfn(patchfn);
1209        self
1210    }
1211
1212    /// Set a timeout for the job, in minutes.
1213    ///
1214    /// Not calling this will result in the platform's default timeout being used,
1215    /// which is typically 60 minutes, but may vary.
1216    pub fn with_timeout_in_minutes(self, timeout: u32) -> Self {
1217        self.pipeline.jobs[self.job_idx].timeout_minutes = Some(timeout);
1218        self
1219    }
1220
1221    /// (ADO+Local Only) Only run the job if the specified condition is true.
1222    pub fn with_condition(self, cond: UseParameter<bool>) -> Self {
1223        self.pipeline.jobs[self.job_idx].cond_param_idx = Some(cond.idx);
1224        self.pipeline.parameters[cond.idx]
1225            .used_by_jobs
1226            .insert(self.job_idx);
1227        self
1228    }
1229
1230    /// Set a [`CommandWrapperKind`] that will be applied to all shell
1231    /// commands executed in this job's steps.
1232    ///
1233    /// The wrapper is applied both when running locally (via direct run)
1234    /// and when running in CI (the kind is serialized into
1235    /// `pipeline.json` and reconstructed at runtime).
1236    ///
1237    /// [`CommandWrapperKind`]: crate::shell::CommandWrapperKind
1238    pub fn set_command_wrapper(self, wrapper: crate::shell::CommandWrapperKind) -> Self {
1239        self.pipeline.jobs[self.job_idx].command_wrapper = Some(wrapper);
1240        self
1241    }
1242
1243    /// Add a flow node which will be run as part of the job.
1244    pub fn dep_on<R: IntoRequest + 'static>(
1245        self,
1246        f: impl FnOnce(&mut PipelineJobCtx<'_>) -> R,
1247    ) -> Self {
1248        // JobToNodeCtx will ensure artifact deps are taken care of
1249        let req = f(&mut PipelineJobCtx {
1250            pipeline: self.pipeline,
1251            job_idx: self.job_idx,
1252        });
1253
1254        self.pipeline.jobs[self.job_idx]
1255            .root_nodes
1256            .entry(NodeHandle::from_type::<R::Node>())
1257            .or_default()
1258            .push(serde_json::to_vec(&req.into_request()).unwrap().into());
1259
1260        self
1261    }
1262
1263    /// Finish describing the pipeline job.
1264    pub fn finish(self) -> PipelineJobHandle {
1265        PipelineJobHandle {
1266            job_idx: self.job_idx,
1267        }
1268    }
1269
1270    /// Return the job's platform.
1271    pub fn get_platform(&self) -> FlowPlatform {
1272        self.pipeline.jobs[self.job_idx].platform
1273    }
1274
1275    /// Return the job's architecture.
1276    pub fn get_arch(&self) -> FlowArch {
1277        self.pipeline.jobs[self.job_idx].arch
1278    }
1279}
1280
1281#[derive(Clone)]
1282pub struct PipelineJobHandle {
1283    job_idx: usize,
1284}
1285
1286impl PipelineJobHandle {
1287    pub fn is_handle_for(&self, job: &PipelineJob<'_>) -> bool {
1288        self.job_idx == job.job_idx
1289    }
1290}
1291
1292#[derive(Clone, Copy)]
1293pub enum PipelineBackendHint {
1294    /// Pipeline is being run on the user's dev machine (via bash / direct run)
1295    Local,
1296    /// Pipeline is run on ADO
1297    Ado,
1298    /// Pipeline is run on GitHub Actions
1299    Github,
1300}
1301
1302/// Trait for types that can be converted into a [`Pipeline`].
1303///
1304/// This is the primary entry point for defining flowey pipelines. Implement this trait
1305/// to create a pipeline definition that can be executed locally or converted to CI YAML.
1306///
1307/// # Example
1308///
1309/// ```rust,no_run
1310/// use flowey_core::pipeline::{IntoPipeline, Pipeline, PipelineBackendHint};
1311/// use flowey_core::node::{FlowPlatform, FlowPlatformLinuxDistro, FlowArch};
1312///
1313/// struct MyPipeline;
1314///
1315/// impl IntoPipeline for MyPipeline {
1316///     fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline> {
1317///         let mut pipeline = Pipeline::new();
1318///
1319///         // Define a job that runs on Linux x86_64
1320///         let _job = pipeline
1321///             .new_job(
1322///                 FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1323///                 FlowArch::X86_64,
1324///                 "build"
1325///             )
1326///             .finish();
1327///
1328///         Ok(pipeline)
1329///     }
1330/// }
1331/// ```
1332///
1333/// # Complex Example with Parameters and Artifacts
1334///
1335/// ```rust,ignore
1336/// use flowey_core::pipeline::{IntoPipeline, Pipeline, PipelineBackendHint, ParameterKind};
1337/// use flowey_core::node::{FlowPlatform, FlowPlatformLinuxDistro, FlowArch};
1338///
1339/// struct BuildPipeline;
1340///
1341/// impl IntoPipeline for BuildPipeline {
1342///     fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline> {
1343///         let mut pipeline = Pipeline::new();
1344///
1345///         // Define a runtime parameter
1346///         let enable_tests = pipeline.new_parameter_bool(
1347///             "enable_tests",
1348///             "Whether to run tests",
1349///             ParameterKind::Stable,
1350///             Some(true) // default value
1351///         );
1352///
1353///         // Create an artifact for passing data between jobs
1354///         let (publish_build, use_build) = pipeline.new_artifact("build-output");
1355///
1356///         // Job 1: Build
1357///         let build_job = pipeline
1358///             .new_job(
1359///                 FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1360///                 FlowArch::X86_64,
1361///                 "build"
1362///             )
1363///             .with_timeout_in_minutes(30)
1364///             .dep_on(|ctx| flowey_lib_hvlite::_jobs::example_node::Request {
1365///                 output_dir: ctx.publish_artifact(publish_build),
1366///             })
1367///             .finish();
1368///
1369///         // Job 2: Test (conditionally run based on parameter)
1370///         let _test_job = pipeline
1371///             .new_job(
1372///                 FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1373///                 FlowArch::X86_64,
1374///                 "test"
1375///             )
1376///             .with_condition(enable_tests)
1377///             .dep_on(|ctx| flowey_lib_hvlite::_jobs::example_node2::Request {
1378///                 input_dir: ctx.use_artifact(&use_build),
1379///             })
1380///             .finish();
1381///
1382///         Ok(pipeline)
1383///     }
1384/// }
1385/// ```
1386pub trait IntoPipeline {
1387    fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline>;
1388}
1389
1390fn new_parameter_name(name: impl AsRef<str>, kind: ParameterKind) -> String {
1391    match kind {
1392        ParameterKind::Unstable => format!("__unstable_{}", name.as_ref()),
1393        ParameterKind::Stable => name.as_ref().into(),
1394    }
1395}
1396
1397/// Structs which should only be used by top-level flowey emitters. If you're a
1398/// pipeline author, these are not types you need to care about!
1399pub mod internal {
1400    use super::*;
1401    use std::collections::BTreeMap;
1402
1403    pub fn consistent_artifact_runtime_var_name(artifact: impl AsRef<str>, is_use: bool) -> String {
1404        format!(
1405            "artifact_{}_{}",
1406            if is_use { "use_from" } else { "publish_from" },
1407            artifact.as_ref()
1408        )
1409    }
1410
1411    #[derive(Debug)]
1412    pub struct InternalAdoResourcesRepository {
1413        /// flowey-generated unique repo identifier
1414        pub repo_id: String,
1415        /// Type of repo that is being connected to.
1416        pub repo_type: AdoResourcesRepositoryType,
1417        /// Repository name. Format depends on `repo_type`.
1418        pub name: String,
1419        /// git ref to checkout.
1420        pub git_ref: AdoResourcesRepositoryRef<usize>,
1421        /// (optional) ID of the service endpoint connecting to this repository.
1422        pub endpoint: Option<String>,
1423    }
1424
1425    pub struct PipelineJobMetadata {
1426        pub root_nodes: BTreeMap<NodeHandle, Vec<Box<[u8]>>>,
1427        pub patches: PatchResolver,
1428        pub label: String,
1429        pub platform: FlowPlatform,
1430        pub arch: FlowArch,
1431        pub cond_param_idx: Option<usize>,
1432        pub timeout_minutes: Option<u32>,
1433        pub command_wrapper: Option<crate::shell::CommandWrapperKind>,
1434        // backend specific
1435        pub ado_pool: Option<AdoPool>,
1436        pub ado_variables: BTreeMap<String, String>,
1437        pub gh_override_if: Option<String>,
1438        pub gh_pool: Option<GhRunner>,
1439        pub gh_global_env: BTreeMap<String, String>,
1440        pub gh_permissions: BTreeMap<NodeHandle, BTreeMap<GhPermission, GhPermissionValue>>,
1441    }
1442
1443    // TODO: support a more structured format for demands
1444    // See https://learn.microsoft.com/en-us/azure/devops/pipelines/yaml-schema/pool-demands
1445    #[derive(Debug, Clone)]
1446    pub struct AdoPool {
1447        pub name: String,
1448        pub demands: Vec<String>,
1449    }
1450
1451    #[derive(Debug)]
1452    pub struct ArtifactMeta {
1453        pub name: String,
1454        pub published_by_job: Option<usize>,
1455        pub used_by_jobs: BTreeSet<usize>,
1456    }
1457
1458    #[derive(Debug)]
1459    pub struct ParameterMeta {
1460        pub parameter: Parameter,
1461        pub used_by_jobs: BTreeSet<usize>,
1462    }
1463
1464    /// Mirror of [`Pipeline`], except with all field marked as `pub`.
1465    pub struct PipelineFinalized {
1466        pub jobs: Vec<PipelineJobMetadata>,
1467        pub artifacts: Vec<ArtifactMeta>,
1468        pub parameters: Vec<ParameterMeta>,
1469        pub extra_deps: BTreeSet<(usize, usize)>,
1470        // backend specific
1471        pub ado_name: Option<String>,
1472        pub ado_schedule_triggers: Vec<AdoScheduleTriggers>,
1473        pub ado_ci_triggers: Option<AdoCiTriggers>,
1474        pub ado_pr_triggers: Option<AdoPrTriggers>,
1475        pub ado_bootstrap_template: String,
1476        pub ado_resources_repository: Vec<InternalAdoResourcesRepository>,
1477        pub ado_post_process_yaml_cb:
1478            Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
1479        pub ado_variables: BTreeMap<String, String>,
1480        pub ado_job_id_overrides: BTreeMap<usize, String>,
1481        pub gh_name: Option<String>,
1482        pub gh_schedule_triggers: Vec<GhScheduleTriggers>,
1483        pub gh_ci_triggers: Option<GhCiTriggers>,
1484        pub gh_pr_triggers: Option<GhPrTriggers>,
1485        pub gh_bootstrap_template: String,
1486    }
1487
1488    impl PipelineFinalized {
1489        pub fn from_pipeline(mut pipeline: Pipeline) -> Self {
1490            if let Some(cb) = pipeline.inject_all_jobs_with.take() {
1491                for job_idx in 0..pipeline.jobs.len() {
1492                    let _ = cb(PipelineJob {
1493                        pipeline: &mut pipeline,
1494                        job_idx,
1495                    });
1496                }
1497            }
1498
1499            let Pipeline {
1500                mut jobs,
1501                artifacts,
1502                parameters,
1503                extra_deps,
1504                ado_name,
1505                ado_bootstrap_template,
1506                ado_schedule_triggers,
1507                ado_ci_triggers,
1508                ado_pr_triggers,
1509                ado_resources_repository,
1510                ado_post_process_yaml_cb,
1511                ado_variables,
1512                ado_job_id_overrides,
1513                gh_name,
1514                gh_schedule_triggers,
1515                gh_ci_triggers,
1516                gh_pr_triggers,
1517                gh_bootstrap_template,
1518                // not relevant to consumer code
1519                dummy_done_idx: _,
1520                artifact_map_idx: _,
1521                artifact_names: _,
1522                global_patchfns,
1523                inject_all_jobs_with: _, // processed above
1524            } = pipeline;
1525
1526            for patchfn in global_patchfns {
1527                for job in &mut jobs {
1528                    job.patches.apply_patchfn(patchfn)
1529                }
1530            }
1531
1532            Self {
1533                jobs,
1534                artifacts,
1535                parameters,
1536                extra_deps,
1537                ado_name,
1538                ado_schedule_triggers,
1539                ado_ci_triggers,
1540                ado_pr_triggers,
1541                ado_bootstrap_template,
1542                ado_resources_repository,
1543                ado_post_process_yaml_cb,
1544                ado_variables,
1545                ado_job_id_overrides,
1546                gh_name,
1547                gh_schedule_triggers,
1548                gh_ci_triggers,
1549                gh_pr_triggers,
1550                gh_bootstrap_template,
1551            }
1552        }
1553    }
1554
1555    #[derive(Debug, Clone)]
1556    pub enum Parameter {
1557        Bool {
1558            name: String,
1559            description: String,
1560            kind: ParameterKind,
1561            default: Option<bool>,
1562        },
1563        String {
1564            name: String,
1565            description: String,
1566            default: Option<String>,
1567            kind: ParameterKind,
1568            possible_values: Option<Vec<String>>,
1569        },
1570        Num {
1571            name: String,
1572            description: String,
1573            default: Option<i64>,
1574            kind: ParameterKind,
1575            possible_values: Option<Vec<i64>>,
1576        },
1577    }
1578
1579    impl Parameter {
1580        pub fn name(&self) -> &str {
1581            match self {
1582                Parameter::Bool { name, .. } => name,
1583                Parameter::String { name, .. } => name,
1584                Parameter::Num { name, .. } => name,
1585            }
1586        }
1587    }
1588}