flowey_core/pipeline.rs
1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Core types and traits used to create and work with flowey pipelines.
5
6mod artifact;
7
8pub use artifact::Artifact;
9
10use self::internal::*;
11use crate::node::FlowArch;
12use crate::node::FlowNodeBase;
13use crate::node::FlowPlatform;
14use crate::node::FlowPlatformLinuxDistro;
15use crate::node::GhUserSecretVar;
16use crate::node::IntoConfig;
17use crate::node::IntoRequest;
18use crate::node::NodeHandle;
19use crate::node::ReadVar;
20use crate::node::WriteVar;
21use crate::node::steps::ado::AdoResourcesRepositoryId;
22use crate::node::user_facing::AdoRuntimeVar;
23use crate::node::user_facing::GhPermission;
24use crate::node::user_facing::GhPermissionValue;
25use crate::patch::PatchResolver;
26use crate::patch::ResolvedPatches;
27use serde::Serialize;
28use serde::de::DeserializeOwned;
29use std::collections::BTreeMap;
30use std::collections::BTreeSet;
31use std::path::PathBuf;
32
33/// Pipeline types which are considered "user facing", and included in the
34/// `flowey` prelude.
35pub mod user_facing {
36 pub use super::AdoCiTriggers;
37 pub use super::AdoPool;
38 pub use super::AdoPrTriggers;
39 pub use super::AdoResourcesRepository;
40 pub use super::AdoResourcesRepositoryRef;
41 pub use super::AdoResourcesRepositoryType;
42 pub use super::AdoScheduleTriggers;
43 pub use super::GhCiTriggers;
44 pub use super::GhPrTriggers;
45 pub use super::GhRunner;
46 pub use super::GhRunnerOsLabel;
47 pub use super::GhScheduleTriggers;
48 pub use super::HostExt;
49 pub use super::IntoPipeline;
50 pub use super::ParameterKind;
51 pub use super::Pipeline;
52 pub use super::PipelineBackendHint;
53 pub use super::PipelineJob;
54 pub use super::PipelineJobCtx;
55 pub use super::PipelineJobHandle;
56 pub use super::PublishArtifact;
57 pub use super::PublishTypedArtifact;
58 pub use super::UseArtifact;
59 pub use super::UseParameter;
60 pub use super::UseTypedArtifact;
61 pub use crate::node::FlowArch;
62 pub use crate::node::FlowPlatform;
63}
64
65fn linux_distro() -> FlowPlatformLinuxDistro {
66 // Check for nix environment first - takes precedence over distro detection
67 if std::env::var("IN_NIX_SHELL").is_ok() {
68 return FlowPlatformLinuxDistro::Nix;
69 }
70
71 // A `nix develop` shell doesn't set `IN_NIX_SHELL`, but the PATH should include a nix store path
72 if std::env::var("PATH").is_ok_and(|path| path.contains("/nix/store")) {
73 return FlowPlatformLinuxDistro::Nix;
74 }
75
76 if let Ok(etc_os_release) = fs_err::read_to_string("/etc/os-release") {
77 if etc_os_release.contains("ID=ubuntu") {
78 FlowPlatformLinuxDistro::Ubuntu
79 } else if etc_os_release.contains("ID=fedora") {
80 FlowPlatformLinuxDistro::Fedora
81 } else if etc_os_release.contains("ID=azurelinux") || etc_os_release.contains("ID=mariner")
82 {
83 FlowPlatformLinuxDistro::AzureLinux
84 } else if etc_os_release.contains("ID=arch") {
85 FlowPlatformLinuxDistro::Arch
86 } else {
87 FlowPlatformLinuxDistro::Unknown
88 }
89 } else {
90 FlowPlatformLinuxDistro::Unknown
91 }
92}
93
94pub trait HostExt: Sized {
95 /// Return the value for the current host machine.
96 ///
97 /// Will panic on non-local backends.
98 fn host(backend_hint: PipelineBackendHint) -> Self;
99}
100
101impl HostExt for FlowPlatform {
102 /// Return the platform of the current host machine.
103 ///
104 /// Will panic on non-local backends.
105 fn host(backend_hint: PipelineBackendHint) -> Self {
106 if !matches!(backend_hint, PipelineBackendHint::Local) {
107 panic!("can only use `FlowPlatform::host` when defining a local-only pipeline");
108 }
109
110 if cfg!(target_os = "windows") {
111 Self::Windows
112 } else if cfg!(target_os = "linux") {
113 Self::Linux(linux_distro())
114 } else if cfg!(target_os = "macos") {
115 Self::MacOs
116 } else {
117 panic!("no valid host-os")
118 }
119 }
120}
121
122impl HostExt for FlowArch {
123 /// Return the arch of the current host machine.
124 ///
125 /// Will panic on non-local backends.
126 fn host(backend_hint: PipelineBackendHint) -> Self {
127 if !matches!(backend_hint, PipelineBackendHint::Local) {
128 panic!("can only use `FlowArch::host` when defining a local-only pipeline");
129 }
130
131 // xtask-fmt allow-target-arch oneoff-flowey
132 if cfg!(target_arch = "x86_64") {
133 Self::X86_64
134 // xtask-fmt allow-target-arch oneoff-flowey
135 } else if cfg!(target_arch = "aarch64") {
136 Self::Aarch64
137 } else {
138 panic!("no valid host-arch")
139 }
140 }
141}
142
143/// Trigger ADO pipelines via Continuous Integration
144#[derive(Default, Debug)]
145pub struct AdoScheduleTriggers {
146 /// Friendly name for the scheduled run
147 pub display_name: String,
148 /// Run the pipeline whenever there is a commit on these specified branches
149 /// (supports glob syntax)
150 pub branches: Vec<String>,
151 /// Specify any branches which should be filtered out from the list of
152 /// `branches` (supports glob syntax)
153 pub exclude_branches: Vec<String>,
154 /// Run the pipeline in a schedule, as specified by a cron string
155 pub cron: String,
156}
157
158/// Trigger ADO pipelines per PR
159#[derive(Debug)]
160pub struct AdoPrTriggers {
161 /// Run the pipeline whenever there is a PR to these specified branches
162 /// (supports glob syntax)
163 pub branches: Vec<String>,
164 /// Specify any branches which should be filtered out from the list of
165 /// `branches` (supports glob syntax)
166 pub exclude_branches: Vec<String>,
167 /// Run the pipeline even if the PR is a draft PR. Defaults to `false`.
168 pub run_on_draft: bool,
169 /// Automatically cancel the pipeline run if a new commit lands in the
170 /// branch. Defaults to `true`.
171 pub auto_cancel: bool,
172 /// Only run the pipeline when files matching these paths are changed
173 /// (supports glob syntax)
174 pub paths: Vec<String>,
175 /// Specify any paths which should be filtered out from the list of
176 /// `paths` (supports glob syntax)
177 pub exclude_paths: Vec<String>,
178}
179
180/// Trigger ADO pipelines per CI
181#[derive(Debug, Default)]
182pub struct AdoCiTriggers {
183 /// Run the pipeline whenever there is a change to these specified branches
184 /// (supports glob syntax)
185 pub branches: Vec<String>,
186 /// Specify any branches which should be filtered out from the list of
187 /// `branches` (supports glob syntax)
188 pub exclude_branches: Vec<String>,
189 /// Run the pipeline whenever a matching tag is created (supports glob
190 /// syntax)
191 pub tags: Vec<String>,
192 /// Specify any tags which should be filtered out from the list of `tags`
193 /// (supports glob syntax)
194 pub exclude_tags: Vec<String>,
195 /// Whether to batch changes per branch.
196 pub batch: bool,
197 /// Only run the pipeline when files matching these paths are changed
198 /// (supports glob syntax)
199 pub paths: Vec<String>,
200 /// Specify any paths which should be filtered out from the list of
201 /// `paths` (supports glob syntax)
202 pub exclude_paths: Vec<String>,
203}
204
205impl Default for AdoPrTriggers {
206 fn default() -> Self {
207 Self {
208 branches: Vec::new(),
209 exclude_branches: Vec::new(),
210 run_on_draft: false,
211 auto_cancel: true,
212 paths: Vec::new(),
213 exclude_paths: Vec::new(),
214 }
215 }
216}
217
218/// ADO repository resource.
219#[derive(Debug)]
220pub struct AdoResourcesRepository {
221 /// Type of repo that is being connected to.
222 pub repo_type: AdoResourcesRepositoryType,
223 /// Repository name. Format depends on `repo_type`.
224 pub name: String,
225 /// git ref to checkout.
226 pub git_ref: AdoResourcesRepositoryRef,
227 /// (optional) ID of the service endpoint connecting to this repository.
228 pub endpoint: Option<String>,
229}
230
231/// ADO repository resource type
232#[derive(Debug)]
233pub enum AdoResourcesRepositoryType {
234 /// Azure Repos Git repository
235 AzureReposGit,
236 /// Github repository
237 GitHub,
238}
239
240/// ADO repository ref
241#[derive(Debug)]
242pub enum AdoResourcesRepositoryRef<P = UseParameter<String>> {
243 /// Hard-coded ref (e.g: refs/heads/main)
244 Fixed(String),
245 /// Connected to pipeline-level parameter
246 Parameter(P),
247}
248
249/// Trigger Github Actions pipelines via Continuous Integration
250///
251/// NOTE: Github Actions doesn't support specifying the branch when triggered by `schedule`.
252/// To run on a specific branch, modify the branch checked out in the pipeline.
253#[derive(Default, Debug)]
254pub struct GhScheduleTriggers {
255 /// Run the pipeline in a schedule, as specified by a cron string
256 pub cron: String,
257}
258
259/// Trigger Github Actions pipelines per PR
260#[derive(Debug)]
261pub struct GhPrTriggers {
262 /// Run the pipeline whenever there is a PR to these specified branches
263 /// (supports glob syntax)
264 pub branches: Vec<String>,
265 /// Specify any branches which should be filtered out from the list of
266 /// `branches` (supports glob syntax)
267 pub exclude_branches: Vec<String>,
268 /// Automatically cancel the pipeline run if a new commit lands in the
269 /// branch. Defaults to `true`.
270 pub auto_cancel: bool,
271 /// Run the pipeline whenever the PR trigger matches the specified types
272 pub types: Vec<String>,
273 /// Only run the pipeline when files matching these paths are changed
274 /// (supports glob syntax)
275 pub paths: Vec<String>,
276 /// Specify any paths which should be filtered out from the list of
277 /// `paths` (supports glob syntax)
278 pub paths_ignore: Vec<String>,
279}
280
281/// Trigger Github Actions pipelines per PR
282#[derive(Debug, Default)]
283pub struct GhCiTriggers {
284 /// Run the pipeline whenever there is a change to these specified branches
285 /// (supports glob syntax)
286 pub branches: Vec<String>,
287 /// Specify any branches which should be filtered out from the list of
288 /// `branches` (supports glob syntax)
289 pub exclude_branches: Vec<String>,
290 /// Run the pipeline whenever a matching tag is created (supports glob
291 /// syntax)
292 pub tags: Vec<String>,
293 /// Specify any tags which should be filtered out from the list of `tags`
294 /// (supports glob syntax)
295 pub exclude_tags: Vec<String>,
296 /// Only run the pipeline when files matching these paths are changed
297 /// (supports glob syntax)
298 pub paths: Vec<String>,
299 /// Specify any paths which should be filtered out from the list of
300 /// `paths` (supports glob syntax)
301 pub paths_ignore: Vec<String>,
302}
303
304impl GhPrTriggers {
305 /// Triggers the pipeline on the default PR events plus when a draft is marked as ready for review.
306 pub fn new_draftable() -> Self {
307 Self {
308 branches: Vec::new(),
309 exclude_branches: Vec::new(),
310 types: vec![
311 "opened".into(),
312 "synchronize".into(),
313 "reopened".into(),
314 "ready_for_review".into(),
315 ],
316 auto_cancel: true,
317 paths: Vec::new(),
318 paths_ignore: Vec::new(),
319 }
320 }
321}
322
323#[derive(Debug, Clone, PartialEq)]
324pub enum GhRunnerOsLabel {
325 UbuntuLatest,
326 Ubuntu2404,
327 Ubuntu2204,
328 WindowsLatest,
329 Windows2025,
330 Windows2022,
331 Ubuntu2404Arm,
332 Ubuntu2204Arm,
333 Windows11Arm,
334 Custom(String),
335}
336
337/// GitHub runner type
338#[derive(Debug, Clone, PartialEq)]
339pub enum GhRunner {
340 // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
341 // for more details.
342 GhHosted(GhRunnerOsLabel),
343 // Self hosted runners are selected by matching runner labels to <labels>.
344 // 'self-hosted' is a common label for self hosted runners, but is not required.
345 // Labels are case-insensitive and can take the form of arbitrary strings.
346 // See <https://docs.github.com/en/actions/hosting-your-own-runners> for more details.
347 SelfHosted(Vec<String>),
348 // This uses a runner belonging to <group> that matches all labels in <labels>.
349 // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
350 // for more details.
351 RunnerGroup { group: String, labels: Vec<String> },
352}
353
354impl GhRunner {
355 /// Whether this is a self-hosted runner with the provided label
356 pub fn is_self_hosted_with_label(&self, label: &str) -> bool {
357 matches!(self, GhRunner::SelfHosted(labels) if labels.iter().any(|s| s.as_str() == label))
358 }
359}
360
361// TODO: support a more structured format for demands
362// See https://learn.microsoft.com/en-us/azure/devops/pipelines/yaml-schema/pool-demands
363#[derive(Debug, Clone)]
364pub struct AdoPool {
365 pub name: String,
366 pub demands: Vec<String>,
367}
368
369/// Parameter type (unstable / stable).
370#[derive(Debug, Clone)]
371pub enum ParameterKind {
372 // The parameter is considered an unstable API, and should not be
373 // taken as a dependency.
374 Unstable,
375 // The parameter is considered a stable API, and can be used by
376 // external pipelines to control behavior of the pipeline.
377 Stable,
378}
379
380#[derive(Clone, Debug)]
381#[must_use]
382pub struct UseParameter<T> {
383 idx: usize,
384 _kind: std::marker::PhantomData<T>,
385}
386
387/// Opaque handle to an artifact which must be published by a single job.
388#[must_use]
389pub struct PublishArtifact {
390 idx: usize,
391}
392
393/// Opaque handle to an artifact which can be used by one or more jobs.
394#[derive(Clone)]
395#[must_use]
396pub struct UseArtifact {
397 idx: usize,
398}
399
400/// Opaque handle to an artifact of type `T` which must be published by a single job.
401#[must_use]
402pub struct PublishTypedArtifact<T>(PublishArtifact, std::marker::PhantomData<fn() -> T>);
403
404/// Opaque handle to an artifact of type `T` which can be used by one or more
405/// jobs.
406#[must_use]
407pub struct UseTypedArtifact<T>(UseArtifact, std::marker::PhantomData<fn(T)>);
408
409impl<T> Clone for UseTypedArtifact<T> {
410 fn clone(&self) -> Self {
411 UseTypedArtifact(self.0.clone(), std::marker::PhantomData)
412 }
413}
414
415#[derive(Default)]
416pub struct Pipeline {
417 jobs: Vec<PipelineJobMetadata>,
418 artifacts: Vec<ArtifactMeta>,
419 parameters: Vec<ParameterMeta>,
420 extra_deps: BTreeSet<(usize, usize)>,
421 // builder internal
422 artifact_names: BTreeSet<String>,
423 dummy_done_idx: usize,
424 artifact_map_idx: usize,
425 global_patchfns: Vec<crate::patch::PatchFn>,
426 inject_all_jobs_with: Option<Box<dyn for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a>>>,
427 // backend specific
428 ado_name: Option<String>,
429 ado_job_id_overrides: BTreeMap<usize, String>,
430 ado_schedule_triggers: Vec<AdoScheduleTriggers>,
431 ado_ci_triggers: Option<AdoCiTriggers>,
432 ado_pr_triggers: Option<AdoPrTriggers>,
433 ado_resources_repository: Vec<InternalAdoResourcesRepository>,
434 ado_bootstrap_template: String,
435 ado_variables: BTreeMap<String, String>,
436 ado_post_process_yaml_cb: Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
437 gh_name: Option<String>,
438 gh_schedule_triggers: Vec<GhScheduleTriggers>,
439 gh_ci_triggers: Option<GhCiTriggers>,
440 gh_pr_triggers: Option<GhPrTriggers>,
441 gh_bootstrap_template: String,
442}
443
444impl Pipeline {
445 pub fn new() -> Pipeline {
446 Pipeline::default()
447 }
448
449 /// Inject all pipeline jobs with some common logic. (e.g: to resolve common
450 /// configuration requirements shared by all jobs).
451 ///
452 /// Can only be invoked once per pipeline.
453 #[track_caller]
454 pub fn inject_all_jobs_with(
455 &mut self,
456 cb: impl for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a> + 'static,
457 ) -> &mut Self {
458 if self.inject_all_jobs_with.is_some() {
459 panic!("can only call inject_all_jobs_with once!")
460 }
461 self.inject_all_jobs_with = Some(Box::new(cb));
462 self
463 }
464
465 /// (ADO only) Provide a YAML template used to bootstrap flowey at the start
466 /// of an ADO pipeline.
467 ///
468 /// The template has access to the following vars, which will be statically
469 /// interpolated into the template's text:
470 ///
471 /// - `{{FLOWEY_OUTDIR}}`
472 /// - Directory to copy artifacts into.
473 /// - NOTE: this var will include `\` on Windows, and `/` on linux!
474 /// - `{{FLOWEY_BIN_EXTENSION}}`
475 /// - Extension of the expected flowey bin (either "", or ".exe")
476 /// - `{{FLOWEY_CRATE}}`
477 /// - Name of the project-specific flowey crate to be built
478 /// - `{{FLOWEY_TARGET}}`
479 /// - The target-triple flowey is being built for
480 /// - `{{FLOWEY_PIPELINE_PATH}}`
481 /// - Repo-root relative path to the pipeline (as provided when
482 /// generating the pipeline via the flowey CLI)
483 ///
484 /// The template's sole responsibility is to copy 3 files into the
485 /// `{{FLOWEY_OUTDIR}}`:
486 ///
487 /// 1. The bootstrapped flowey binary, with the file name
488 /// `flowey{{FLOWEY_BIN_EXTENSION}}`
489 /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
490 /// copied of the pipeline YAML and pipeline JSON currently being run.
491 /// `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
492 /// cases where the same template is being for multiple pipelines (e.g: a
493 /// debug vs. release pipeline).
494 pub fn ado_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
495 self.ado_bootstrap_template = template;
496 self
497 }
498
499 /// (ADO only) Provide a callback function which will be used to
500 /// post-process any YAML flowey generates for the pipeline.
501 ///
502 /// Until flowey defines a stable API for maintaining out-of-tree backends,
503 /// this method can be used to integrate the output from the generic ADO
504 /// backend with any organization-specific templates that one may be
505 /// required to use (e.g: for compliance reasons).
506 pub fn ado_post_process_yaml(
507 &mut self,
508 cb: impl FnOnce(serde_yaml::Value) -> serde_yaml::Value + 'static,
509 ) -> &mut Self {
510 self.ado_post_process_yaml_cb = Some(Box::new(cb));
511 self
512 }
513
514 /// (ADO only) Add a new scheduled CI trigger. Can be called multiple times
515 /// to set up multiple schedules runs.
516 pub fn ado_add_schedule_trigger(&mut self, triggers: AdoScheduleTriggers) -> &mut Self {
517 self.ado_schedule_triggers.push(triggers);
518 self
519 }
520
521 /// (ADO only) Set a PR trigger. Calling this method multiple times will
522 /// overwrite any previously set triggers.
523 pub fn ado_set_pr_triggers(&mut self, triggers: AdoPrTriggers) -> &mut Self {
524 self.ado_pr_triggers = Some(triggers);
525 self
526 }
527
528 /// (ADO only) Set a CI trigger. Calling this method multiple times will
529 /// overwrite any previously set triggers.
530 pub fn ado_set_ci_triggers(&mut self, triggers: AdoCiTriggers) -> &mut Self {
531 self.ado_ci_triggers = Some(triggers);
532 self
533 }
534
535 /// (ADO only) Declare a new repository resource, returning a type-safe
536 /// handle which downstream ADO steps are able to consume via
537 /// [`AdoStepServices::resolve_repository_id`](crate::node::user_facing::AdoStepServices::resolve_repository_id).
538 pub fn ado_add_resources_repository(
539 &mut self,
540 repo: AdoResourcesRepository,
541 ) -> AdoResourcesRepositoryId {
542 let AdoResourcesRepository {
543 repo_type,
544 name,
545 git_ref,
546 endpoint,
547 } = repo;
548
549 let repo_id = format!("repo{}", self.ado_resources_repository.len());
550
551 self.ado_resources_repository
552 .push(InternalAdoResourcesRepository {
553 repo_id: repo_id.clone(),
554 repo_type,
555 name,
556 git_ref: match git_ref {
557 AdoResourcesRepositoryRef::Fixed(s) => AdoResourcesRepositoryRef::Fixed(s),
558 AdoResourcesRepositoryRef::Parameter(p) => {
559 AdoResourcesRepositoryRef::Parameter(p.idx)
560 }
561 },
562 endpoint,
563 });
564 AdoResourcesRepositoryId { repo_id }
565 }
566
567 /// (GitHub Actions only) Set the pipeline-level name.
568 ///
569 /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#name>
570 pub fn gh_set_name(&mut self, name: impl AsRef<str>) -> &mut Self {
571 self.gh_name = Some(name.as_ref().into());
572 self
573 }
574
575 /// Provide a YAML template used to bootstrap flowey at the start of an GitHub
576 /// pipeline.
577 ///
578 /// The template has access to the following vars, which will be statically
579 /// interpolated into the template's text:
580 ///
581 /// - `{{FLOWEY_OUTDIR}}`
582 /// - Directory to copy artifacts into.
583 /// - NOTE: this var will include `\` on Windows, and `/` on linux!
584 /// - `{{FLOWEY_BIN_EXTENSION}}`
585 /// - Extension of the expected flowey bin (either "", or ".exe")
586 /// - `{{FLOWEY_CRATE}}`
587 /// - Name of the project-specific flowey crate to be built
588 /// - `{{FLOWEY_TARGET}}`
589 /// - The target-triple flowey is being built for
590 /// - `{{FLOWEY_PIPELINE_PATH}}`
591 /// - Repo-root relative path to the pipeline (as provided when
592 /// generating the pipeline via the flowey CLI)
593 ///
594 /// The template's sole responsibility is to copy 3 files into the
595 /// `{{FLOWEY_OUTDIR}}`:
596 ///
597 /// 1. The bootstrapped flowey binary, with the file name
598 /// `flowey{{FLOWEY_BIN_EXTENSION}}`
599 /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
600 /// copied of the pipeline YAML and pipeline JSON currently being run.
601 /// `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
602 /// cases where the same template is being for multiple pipelines (e.g: a
603 /// debug vs. release pipeline).
604 pub fn gh_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
605 self.gh_bootstrap_template = template;
606 self
607 }
608
609 /// (GitHub Actions only) Add a new scheduled CI trigger. Can be called multiple times
610 /// to set up multiple schedules runs.
611 pub fn gh_add_schedule_trigger(&mut self, triggers: GhScheduleTriggers) -> &mut Self {
612 self.gh_schedule_triggers.push(triggers);
613 self
614 }
615
616 /// (GitHub Actions only) Set a PR trigger. Calling this method multiple times will
617 /// overwrite any previously set triggers.
618 pub fn gh_set_pr_triggers(&mut self, triggers: GhPrTriggers) -> &mut Self {
619 self.gh_pr_triggers = Some(triggers);
620 self
621 }
622
623 /// (GitHub Actions only) Set a CI trigger. Calling this method multiple times will
624 /// overwrite any previously set triggers.
625 pub fn gh_set_ci_triggers(&mut self, triggers: GhCiTriggers) -> &mut Self {
626 self.gh_ci_triggers = Some(triggers);
627 self
628 }
629
630 /// (GitHub Actions only) Use a pre-defined GitHub Actions secret variable.
631 ///
632 /// For more information on defining secrets for use in GitHub Actions, see
633 /// <https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions>
634 pub fn gh_use_secret(&mut self, secret_name: impl AsRef<str>) -> GhUserSecretVar {
635 GhUserSecretVar(secret_name.as_ref().to_string())
636 }
637
638 pub fn new_job(
639 &mut self,
640 platform: FlowPlatform,
641 arch: FlowArch,
642 label: impl AsRef<str>,
643 ) -> PipelineJob<'_> {
644 let idx = self.jobs.len();
645 self.jobs.push(PipelineJobMetadata {
646 root_nodes: BTreeMap::new(),
647 root_configs: BTreeMap::new(),
648 patches: ResolvedPatches::build(),
649 label: label.as_ref().into(),
650 platform,
651 arch,
652 cond_param_idx: None,
653 timeout_minutes: None,
654 command_wrapper: None,
655 ado_pool: None,
656 ado_variables: BTreeMap::new(),
657 gh_override_if: None,
658 gh_global_env: BTreeMap::new(),
659 gh_pool: None,
660 gh_permissions: BTreeMap::new(),
661 });
662
663 PipelineJob {
664 pipeline: self,
665 job_idx: idx,
666 }
667 }
668
669 /// Declare a dependency between two jobs that does is not a result of an
670 /// artifact.
671 pub fn non_artifact_dep(
672 &mut self,
673 job: &PipelineJobHandle,
674 depends_on_job: &PipelineJobHandle,
675 ) -> &mut Self {
676 self.extra_deps
677 .insert((depends_on_job.job_idx, job.job_idx));
678 self
679 }
680
681 #[track_caller]
682 pub fn new_artifact(&mut self, name: impl AsRef<str>) -> (PublishArtifact, UseArtifact) {
683 let name = name.as_ref();
684 let owned_name = name.to_string();
685
686 let not_exists = self.artifact_names.insert(owned_name.clone());
687 if !not_exists {
688 panic!("duplicate artifact name: {}", name)
689 }
690
691 let idx = self.artifacts.len();
692 self.artifacts.push(ArtifactMeta {
693 name: owned_name,
694 published_by_job: None,
695 used_by_jobs: BTreeSet::new(),
696 });
697
698 (PublishArtifact { idx }, UseArtifact { idx })
699 }
700
701 /// Returns a pair of opaque handles to a new artifact for use across jobs
702 /// in the pipeline.
703 #[track_caller]
704 pub fn new_typed_artifact<T: Artifact>(
705 &mut self,
706 name: impl AsRef<str>,
707 ) -> (PublishTypedArtifact<T>, UseTypedArtifact<T>) {
708 let (publish, use_artifact) = self.new_artifact(name);
709 (
710 PublishTypedArtifact(publish, std::marker::PhantomData),
711 UseTypedArtifact(use_artifact, std::marker::PhantomData),
712 )
713 }
714
715 /// (ADO only) Set the pipeline-level name.
716 ///
717 /// <https://learn.microsoft.com/en-us/azure/devops/pipelines/process/run-number?view=azure-devops&tabs=yaml>
718 pub fn ado_add_name(&mut self, name: String) -> &mut Self {
719 self.ado_name = Some(name);
720 self
721 }
722
723 /// (ADO only) Declare a pipeline-level, named, read-only ADO variable.
724 ///
725 /// `name` and `value` are both arbitrary strings.
726 ///
727 /// Returns an instance of [`AdoRuntimeVar`], which, if need be, can be
728 /// converted into a [`ReadVar<String>`] using
729 /// [`NodeCtx::get_ado_variable`].
730 ///
731 /// NOTE: Unless required by some particular third-party task, it's strongly
732 /// recommended to _avoid_ using this method, and to simply use
733 /// [`ReadVar::from_static`] to get a obtain a static variable.
734 ///
735 /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
736 pub fn ado_new_named_variable(
737 &mut self,
738 name: impl AsRef<str>,
739 value: impl AsRef<str>,
740 ) -> AdoRuntimeVar {
741 let name = name.as_ref();
742 let value = value.as_ref();
743
744 self.ado_variables.insert(name.into(), value.into());
745
746 // safe, since we'll ensure that the global exists in the ADO backend
747 AdoRuntimeVar::dangerous_from_global(name, false)
748 }
749
750 /// (ADO only) Declare multiple pipeline-level, named, read-only ADO
751 /// variables at once.
752 ///
753 /// This is a convenience method to streamline invoking
754 /// [`Self::ado_new_named_variable`] multiple times.
755 ///
756 /// NOTE: Unless required by some particular third-party task, it's strongly
757 /// recommended to _avoid_ using this method, and to simply use
758 /// [`ReadVar::from_static`] to get a obtain a static variable.
759 ///
760 /// DEVNOTE: In the future, this API may be updated to return a handle that
761 /// will allow resolving the resulting `AdoRuntimeVar`, but for
762 /// implementation expediency, this API does not currently do this. If you
763 /// need to read the value of this variable at runtime, you may need to
764 /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
765 ///
766 /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
767 pub fn ado_new_named_variables<K, V>(
768 &mut self,
769 vars: impl IntoIterator<Item = (K, V)>,
770 ) -> &mut Self
771 where
772 K: AsRef<str>,
773 V: AsRef<str>,
774 {
775 self.ado_variables.extend(
776 vars.into_iter()
777 .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
778 );
779 self
780 }
781
782 /// Declare a pipeline-level runtime parameter with type `bool`.
783 ///
784 /// To obtain a [`ReadVar<bool>`] that can be used within a node, use the
785 /// [`PipelineJobCtx::use_parameter`] method.
786 ///
787 /// `name` is the name of the parameter.
788 ///
789 /// `description` is an arbitrary string, which will be be shown to users.
790 ///
791 /// `kind` is the type of parameter and if it should be treated as a stable
792 /// external API to callers of the pipeline.
793 ///
794 /// `default` is the default value for the parameter. If none is provided,
795 /// the parameter _must_ be specified in order for the pipeline to run.
796 ///
797 /// `possible_values` can be used to limit the set of valid values the
798 /// parameter accepts.
799 pub fn new_parameter_bool(
800 &mut self,
801 name: impl AsRef<str>,
802 description: impl AsRef<str>,
803 kind: ParameterKind,
804 default: Option<bool>,
805 ) -> UseParameter<bool> {
806 let idx = self.parameters.len();
807 let name = new_parameter_name(name, kind.clone());
808 self.parameters.push(ParameterMeta {
809 parameter: Parameter::Bool {
810 name,
811 description: description.as_ref().into(),
812 kind,
813 default,
814 },
815 used_by_jobs: BTreeSet::new(),
816 });
817
818 UseParameter {
819 idx,
820 _kind: std::marker::PhantomData,
821 }
822 }
823
824 /// Declare a pipeline-level runtime parameter with type `i64`.
825 ///
826 /// To obtain a [`ReadVar<i64>`] that can be used within a node, use the
827 /// [`PipelineJobCtx::use_parameter`] method.
828 ///
829 /// `name` is the name of the parameter.
830 ///
831 /// `description` is an arbitrary string, which will be be shown to users.
832 ///
833 /// `kind` is the type of parameter and if it should be treated as a stable
834 /// external API to callers of the pipeline.
835 ///
836 /// `default` is the default value for the parameter. If none is provided,
837 /// the parameter _must_ be specified in order for the pipeline to run.
838 ///
839 /// `possible_values` can be used to limit the set of valid values the
840 /// parameter accepts.
841 pub fn new_parameter_num(
842 &mut self,
843 name: impl AsRef<str>,
844 description: impl AsRef<str>,
845 kind: ParameterKind,
846 default: Option<i64>,
847 possible_values: Option<Vec<i64>>,
848 ) -> UseParameter<i64> {
849 let idx = self.parameters.len();
850 let name = new_parameter_name(name, kind.clone());
851 self.parameters.push(ParameterMeta {
852 parameter: Parameter::Num {
853 name,
854 description: description.as_ref().into(),
855 kind,
856 default,
857 possible_values,
858 },
859 used_by_jobs: BTreeSet::new(),
860 });
861
862 UseParameter {
863 idx,
864 _kind: std::marker::PhantomData,
865 }
866 }
867
868 /// Declare a pipeline-level runtime parameter with type `String`.
869 ///
870 /// To obtain a [`ReadVar<String>`] that can be used within a node, use the
871 /// [`PipelineJobCtx::use_parameter`] method.
872 ///
873 /// `name` is the name of the parameter.
874 ///
875 /// `description` is an arbitrary string, which will be be shown to users.
876 ///
877 /// `kind` is the type of parameter and if it should be treated as a stable
878 /// external API to callers of the pipeline.
879 ///
880 /// `default` is the default value for the parameter. If none is provided,
881 /// the parameter _must_ be specified in order for the pipeline to run.
882 ///
883 /// `possible_values` allows restricting inputs to a set of possible values.
884 /// Depending on the backend, these options may be presented as a set of
885 /// radio buttons, a dropdown menu, or something in that vein. If `None`,
886 /// then any string is allowed.
887 pub fn new_parameter_string(
888 &mut self,
889 name: impl AsRef<str>,
890 description: impl AsRef<str>,
891 kind: ParameterKind,
892 default: Option<impl AsRef<str>>,
893 possible_values: Option<Vec<String>>,
894 ) -> UseParameter<String> {
895 let idx = self.parameters.len();
896 let name = new_parameter_name(name, kind.clone());
897 self.parameters.push(ParameterMeta {
898 parameter: Parameter::String {
899 name,
900 description: description.as_ref().into(),
901 kind,
902 default: default.map(|x| x.as_ref().into()),
903 possible_values,
904 },
905 used_by_jobs: BTreeSet::new(),
906 });
907
908 UseParameter {
909 idx,
910 _kind: std::marker::PhantomData,
911 }
912 }
913}
914
915pub struct PipelineJobCtx<'a> {
916 pipeline: &'a mut Pipeline,
917 job_idx: usize,
918}
919
920impl PipelineJobCtx<'_> {
921 /// Create a new `WriteVar<SideEffect>` anchored to the pipeline job.
922 pub fn new_done_handle(&mut self) -> WriteVar<crate::node::SideEffect> {
923 self.pipeline.dummy_done_idx += 1;
924 crate::node::thin_air_write_runtime_var(format!("start{}", self.pipeline.dummy_done_idx))
925 }
926
927 /// Claim that this job will use this artifact, obtaining a path to a folder
928 /// with the artifact's contents.
929 pub fn use_artifact(&mut self, artifact: &UseArtifact) -> ReadVar<PathBuf> {
930 self.pipeline.artifacts[artifact.idx]
931 .used_by_jobs
932 .insert(self.job_idx);
933
934 crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
935 &self.pipeline.artifacts[artifact.idx].name,
936 true,
937 ))
938 }
939
940 /// Claim that this job will publish this artifact, obtaining a path to a
941 /// fresh, empty folder which will be published as the specific artifact at
942 /// the end of the job.
943 pub fn publish_artifact(&mut self, artifact: PublishArtifact) -> ReadVar<PathBuf> {
944 let existing = self.pipeline.artifacts[artifact.idx]
945 .published_by_job
946 .replace(self.job_idx);
947 assert!(existing.is_none()); // PublishArtifact isn't cloneable
948
949 crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
950 &self.pipeline.artifacts[artifact.idx].name,
951 false,
952 ))
953 }
954
955 fn helper_request<R: IntoRequest>(&mut self, req: R)
956 where
957 R::Node: 'static,
958 {
959 self.pipeline.jobs[self.job_idx]
960 .root_nodes
961 .entry(NodeHandle::from_type::<R::Node>())
962 .or_default()
963 .push(serde_json::to_vec(&req.into_request()).unwrap().into());
964 }
965
966 fn new_artifact_map_vars<T: Artifact>(&mut self) -> (ReadVar<T>, WriteVar<T>) {
967 let artifact_map_idx = self.pipeline.artifact_map_idx;
968 self.pipeline.artifact_map_idx += 1;
969
970 let backing_var = format!("artifact_map{}", artifact_map_idx);
971 let read_var = crate::node::thin_air_read_runtime_var(backing_var.clone());
972 let write_var = crate::node::thin_air_write_runtime_var(backing_var);
973 (read_var, write_var)
974 }
975
976 /// Claim that this job will use this artifact, obtaining the resolved
977 /// contents of the artifact.
978 pub fn use_typed_artifact<T: Artifact>(
979 &mut self,
980 artifact: &UseTypedArtifact<T>,
981 ) -> ReadVar<T> {
982 let artifact_path = self.use_artifact(&artifact.0);
983 let (read, write) = self.new_artifact_map_vars::<T>();
984 self.helper_request(artifact::resolve::Request::new(artifact_path, write));
985 read
986 }
987
988 /// Claim that this job will publish this artifact, obtaining a variable to
989 /// write the artifact's contents to. The artifact will be published at
990 /// the end of the job.
991 pub fn publish_typed_artifact<T: Artifact>(
992 &mut self,
993 artifact: PublishTypedArtifact<T>,
994 ) -> WriteVar<T> {
995 let artifact_path = self.publish_artifact(artifact.0);
996 let (read, write) = self.new_artifact_map_vars::<T>();
997 let done = self.new_done_handle();
998 self.helper_request(artifact::publish::Request::new(read, artifact_path, done));
999 write
1000 }
1001
1002 /// Obtain a `ReadVar<T>` corresponding to a pipeline parameter which is
1003 /// specified at runtime.
1004 pub fn use_parameter<T>(&mut self, param: UseParameter<T>) -> ReadVar<T>
1005 where
1006 T: Serialize + DeserializeOwned,
1007 {
1008 self.pipeline.parameters[param.idx]
1009 .used_by_jobs
1010 .insert(self.job_idx);
1011
1012 crate::node::thin_air_read_runtime_var(
1013 self.pipeline.parameters[param.idx]
1014 .parameter
1015 .name()
1016 .to_string(),
1017 )
1018 }
1019
1020 /// Shortcut which allows defining a bool pipeline parameter within a Job.
1021 ///
1022 /// To share a single parameter between multiple jobs, don't use this method
1023 /// - use [`Pipeline::new_parameter_bool`] + [`Self::use_parameter`] instead.
1024 pub fn new_parameter_bool(
1025 &mut self,
1026 name: impl AsRef<str>,
1027 description: impl AsRef<str>,
1028 kind: ParameterKind,
1029 default: Option<bool>,
1030 ) -> ReadVar<bool> {
1031 let param = self
1032 .pipeline
1033 .new_parameter_bool(name, description, kind, default);
1034 self.use_parameter(param)
1035 }
1036
1037 /// Shortcut which allows defining a number pipeline parameter within a Job.
1038 ///
1039 /// To share a single parameter between multiple jobs, don't use this method
1040 /// - use [`Pipeline::new_parameter_num`] + [`Self::use_parameter`] instead.
1041 pub fn new_parameter_num(
1042 &mut self,
1043 name: impl AsRef<str>,
1044 description: impl AsRef<str>,
1045 kind: ParameterKind,
1046 default: Option<i64>,
1047 possible_values: Option<Vec<i64>>,
1048 ) -> ReadVar<i64> {
1049 let param =
1050 self.pipeline
1051 .new_parameter_num(name, description, kind, default, possible_values);
1052 self.use_parameter(param)
1053 }
1054
1055 /// Shortcut which allows defining a string pipeline parameter within a Job.
1056 ///
1057 /// To share a single parameter between multiple jobs, don't use this method
1058 /// - use [`Pipeline::new_parameter_string`] + [`Self::use_parameter`] instead.
1059 pub fn new_parameter_string(
1060 &mut self,
1061 name: impl AsRef<str>,
1062 description: impl AsRef<str>,
1063 kind: ParameterKind,
1064 default: Option<String>,
1065 possible_values: Option<Vec<String>>,
1066 ) -> ReadVar<String> {
1067 let param =
1068 self.pipeline
1069 .new_parameter_string(name, description, kind, default, possible_values);
1070 self.use_parameter(param)
1071 }
1072}
1073
1074#[must_use]
1075pub struct PipelineJob<'a> {
1076 pipeline: &'a mut Pipeline,
1077 job_idx: usize,
1078}
1079
1080impl PipelineJob<'_> {
1081 /// (ADO only) specify which agent pool this job will be run on.
1082 pub fn ado_set_pool(self, pool: AdoPool) -> Self {
1083 self.pipeline.jobs[self.job_idx].ado_pool = Some(pool);
1084 self
1085 }
1086
1087 /// (ADO only) specify which agent pool this job will be run on, with
1088 /// additional special runner demands.
1089 pub fn ado_set_pool_with_demands(self, pool: impl AsRef<str>, demands: Vec<String>) -> Self {
1090 self.pipeline.jobs[self.job_idx].ado_pool = Some(AdoPool {
1091 name: pool.as_ref().into(),
1092 demands,
1093 });
1094 self
1095 }
1096
1097 /// (ADO only) Declare a job-level, named, read-only ADO variable.
1098 ///
1099 /// `name` and `value` are both arbitrary strings, which may include ADO
1100 /// template expressions.
1101 ///
1102 /// NOTE: Unless required by some particular third-party task, it's strongly
1103 /// recommended to _avoid_ using this method, and to simply use
1104 /// [`ReadVar::from_static`] to get a obtain a static variable.
1105 ///
1106 /// DEVNOTE: In the future, this API may be updated to return a handle that
1107 /// will allow resolving the resulting `AdoRuntimeVar`, but for
1108 /// implementation expediency, this API does not currently do this. If you
1109 /// need to read the value of this variable at runtime, you may need to
1110 /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1111 ///
1112 /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1113 pub fn ado_new_named_variable(self, name: impl AsRef<str>, value: impl AsRef<str>) -> Self {
1114 let name = name.as_ref();
1115 let value = value.as_ref();
1116 self.pipeline.jobs[self.job_idx]
1117 .ado_variables
1118 .insert(name.into(), value.into());
1119 self
1120 }
1121
1122 /// (ADO only) Declare multiple job-level, named, read-only ADO variables at
1123 /// once.
1124 ///
1125 /// This is a convenience method to streamline invoking
1126 /// [`Self::ado_new_named_variable`] multiple times.
1127 ///
1128 /// NOTE: Unless required by some particular third-party task, it's strongly
1129 /// recommended to _avoid_ using this method, and to simply use
1130 /// [`ReadVar::from_static`] to get a obtain a static variable.
1131 ///
1132 /// DEVNOTE: In the future, this API may be updated to return a handle that
1133 /// will allow resolving the resulting `AdoRuntimeVar`, but for
1134 /// implementation expediency, this API does not currently do this. If you
1135 /// need to read the value of this variable at runtime, you may need to
1136 /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1137 ///
1138 /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1139 pub fn ado_new_named_variables<K, V>(self, vars: impl IntoIterator<Item = (K, V)>) -> Self
1140 where
1141 K: AsRef<str>,
1142 V: AsRef<str>,
1143 {
1144 self.pipeline.jobs[self.job_idx].ado_variables.extend(
1145 vars.into_iter()
1146 .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
1147 );
1148 self
1149 }
1150
1151 /// Overrides the id of the job.
1152 ///
1153 /// Flowey typically generates a reasonable job ID but some use cases that depend
1154 /// on the ID may find it useful to override it to something custom.
1155 pub fn ado_override_job_id(self, name: impl AsRef<str>) -> Self {
1156 self.pipeline
1157 .ado_job_id_overrides
1158 .insert(self.job_idx, name.as_ref().into());
1159 self
1160 }
1161
1162 /// (GitHub Actions only) specify which Github runner this job will be run on.
1163 pub fn gh_set_pool(self, pool: GhRunner) -> Self {
1164 self.pipeline.jobs[self.job_idx].gh_pool = Some(pool);
1165 self
1166 }
1167
1168 /// (GitHub Actions only) Manually override the `if:` condition for this
1169 /// particular job.
1170 ///
1171 /// **This is dangerous**, as an improperly set `if` condition may break
1172 /// downstream flowey jobs which assume flowey is in control of the job's
1173 /// scheduling logic.
1174 ///
1175 /// See
1176 /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idif>
1177 /// for more info.
1178 pub fn gh_dangerous_override_if(self, condition: impl AsRef<str>) -> Self {
1179 self.pipeline.jobs[self.job_idx].gh_override_if = Some(condition.as_ref().into());
1180 self
1181 }
1182
1183 /// (GitHub Actions only) Declare a global job-level environment variable,
1184 /// visible to all downstream steps.
1185 ///
1186 /// `name` and `value` are both arbitrary strings, which may include GitHub
1187 /// Actions template expressions.
1188 ///
1189 /// **This is dangerous**, as it is easy to misuse this API in order to
1190 /// write a node which takes an implicit dependency on there being a global
1191 /// variable set on its behalf by the top-level pipeline code, making it
1192 /// difficult to "locally reason" about the behavior of a node simply by
1193 /// reading its code.
1194 ///
1195 /// Whenever possible, nodes should "late bind" environment variables:
1196 /// accepting a compile-time / runtime flowey parameter, and then setting it
1197 /// prior to executing a child command that requires it.
1198 ///
1199 /// Only use this API in exceptional cases, such as obtaining an environment
1200 /// variable whose value is determined by a job-level GitHub Actions
1201 /// expression evaluation.
1202 pub fn gh_dangerous_global_env_var(
1203 self,
1204 name: impl AsRef<str>,
1205 value: impl AsRef<str>,
1206 ) -> Self {
1207 let name = name.as_ref();
1208 let value = value.as_ref();
1209 self.pipeline.jobs[self.job_idx]
1210 .gh_global_env
1211 .insert(name.into(), value.into());
1212 self
1213 }
1214
1215 /// (GitHub Actions only) Grant permissions required by nodes in the job.
1216 ///
1217 /// For a given node handle, grant the specified permissions.
1218 /// The list provided must match the permissions specified within the node
1219 /// using `requires_permission`.
1220 ///
1221 /// NOTE: While this method is called at a node-level for auditability, the emitted
1222 /// yaml grants permissions at the job-level.
1223 ///
1224 /// This can lead to weird situations where node 1 might not specify a permission
1225 /// required according to Github Actions, but due to job-level granting of the permission
1226 /// by another node 2, the pipeline executes even though it wouldn't if node 2 was removed.
1227 ///
1228 /// For available permission scopes and their descriptions, see
1229 /// <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions>.
1230 pub fn gh_grant_permissions<N: FlowNodeBase + 'static>(
1231 self,
1232 permissions: impl IntoIterator<Item = (GhPermission, GhPermissionValue)>,
1233 ) -> Self {
1234 let node_handle = NodeHandle::from_type::<N>();
1235 for (permission, value) in permissions {
1236 self.pipeline.jobs[self.job_idx]
1237 .gh_permissions
1238 .entry(node_handle)
1239 .or_default()
1240 .insert(permission, value);
1241 }
1242 self
1243 }
1244
1245 pub fn apply_patchfn(self, patchfn: crate::patch::PatchFn) -> Self {
1246 self.pipeline.jobs[self.job_idx]
1247 .patches
1248 .apply_patchfn(patchfn);
1249 self
1250 }
1251
1252 /// Set a timeout for the job, in minutes.
1253 ///
1254 /// Not calling this will result in the platform's default timeout being used,
1255 /// which is typically 60 minutes, but may vary.
1256 pub fn with_timeout_in_minutes(self, timeout: u32) -> Self {
1257 self.pipeline.jobs[self.job_idx].timeout_minutes = Some(timeout);
1258 self
1259 }
1260
1261 /// (ADO+Local Only) Only run the job if the specified condition is true.
1262 pub fn with_condition(self, cond: UseParameter<bool>) -> Self {
1263 self.pipeline.jobs[self.job_idx].cond_param_idx = Some(cond.idx);
1264 self.pipeline.parameters[cond.idx]
1265 .used_by_jobs
1266 .insert(self.job_idx);
1267 self
1268 }
1269
1270 /// Set a [`CommandWrapperKind`] that will be applied to all shell
1271 /// commands executed in this job's steps.
1272 ///
1273 /// The wrapper is applied both when running locally (via direct run)
1274 /// and when running in CI (the kind is serialized into
1275 /// `pipeline.json` and reconstructed at runtime).
1276 ///
1277 /// [`CommandWrapperKind`]: crate::shell::CommandWrapperKind
1278 pub fn set_command_wrapper(self, wrapper: crate::shell::CommandWrapperKind) -> Self {
1279 self.pipeline.jobs[self.job_idx].command_wrapper = Some(wrapper);
1280 self
1281 }
1282
1283 /// Add a flow node which will be run as part of the job.
1284 pub fn dep_on<R: IntoRequest + 'static>(
1285 self,
1286 f: impl FnOnce(&mut PipelineJobCtx<'_>) -> R,
1287 ) -> Self {
1288 // JobToNodeCtx will ensure artifact deps are taken care of
1289 let req = f(&mut PipelineJobCtx {
1290 pipeline: self.pipeline,
1291 job_idx: self.job_idx,
1292 });
1293
1294 self.pipeline.jobs[self.job_idx]
1295 .root_nodes
1296 .entry(NodeHandle::from_type::<R::Node>())
1297 .or_default()
1298 .push(serde_json::to_vec(&req.into_request()).unwrap().into());
1299
1300 self
1301 }
1302
1303 /// Set config on a node for this job.
1304 ///
1305 /// This is the pipeline-level equivalent of [`NodeCtx::config`]. Config
1306 /// set here is merged with any config set by nodes within the job.
1307 ///
1308 /// [`NodeCtx::config`]: crate::node::NodeCtx::config
1309 pub fn config<C: IntoConfig + 'static>(self, config: C) -> Self {
1310 self.pipeline.jobs[self.job_idx]
1311 .root_configs
1312 .entry(NodeHandle::from_type::<C::Node>())
1313 .or_default()
1314 .push(serde_json::to_vec(&config).unwrap().into());
1315
1316 self
1317 }
1318
1319 /// Finish describing the pipeline job.
1320 pub fn finish(self) -> PipelineJobHandle {
1321 PipelineJobHandle {
1322 job_idx: self.job_idx,
1323 }
1324 }
1325
1326 /// Return the job's platform.
1327 pub fn get_platform(&self) -> FlowPlatform {
1328 self.pipeline.jobs[self.job_idx].platform
1329 }
1330
1331 /// Return the job's architecture.
1332 pub fn get_arch(&self) -> FlowArch {
1333 self.pipeline.jobs[self.job_idx].arch
1334 }
1335}
1336
1337#[derive(Clone)]
1338pub struct PipelineJobHandle {
1339 job_idx: usize,
1340}
1341
1342impl PipelineJobHandle {
1343 pub fn is_handle_for(&self, job: &PipelineJob<'_>) -> bool {
1344 self.job_idx == job.job_idx
1345 }
1346}
1347
1348#[derive(Clone, Copy)]
1349pub enum PipelineBackendHint {
1350 /// Pipeline is being run on the user's dev machine (via bash / direct run)
1351 Local,
1352 /// Pipeline is run on ADO
1353 Ado,
1354 /// Pipeline is run on GitHub Actions
1355 Github,
1356}
1357
1358/// Trait for types that can be converted into a [`Pipeline`].
1359///
1360/// This is the primary entry point for defining flowey pipelines. Implement this trait
1361/// to create a pipeline definition that can be executed locally or converted to CI YAML.
1362///
1363/// # Example
1364///
1365/// ```rust,no_run
1366/// use flowey_core::pipeline::{IntoPipeline, Pipeline, PipelineBackendHint};
1367/// use flowey_core::node::{FlowPlatform, FlowPlatformLinuxDistro, FlowArch};
1368///
1369/// struct MyPipeline;
1370///
1371/// impl IntoPipeline for MyPipeline {
1372/// fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline> {
1373/// let mut pipeline = Pipeline::new();
1374///
1375/// // Define a job that runs on Linux x86_64
1376/// let _job = pipeline
1377/// .new_job(
1378/// FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1379/// FlowArch::X86_64,
1380/// "build"
1381/// )
1382/// .finish();
1383///
1384/// Ok(pipeline)
1385/// }
1386/// }
1387/// ```
1388///
1389/// # Complex Example with Parameters and Artifacts
1390///
1391/// ```rust,ignore
1392/// use flowey_core::pipeline::{IntoPipeline, Pipeline, PipelineBackendHint, ParameterKind};
1393/// use flowey_core::node::{FlowPlatform, FlowPlatformLinuxDistro, FlowArch};
1394///
1395/// struct BuildPipeline;
1396///
1397/// impl IntoPipeline for BuildPipeline {
1398/// fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline> {
1399/// let mut pipeline = Pipeline::new();
1400///
1401/// // Define a runtime parameter
1402/// let enable_tests = pipeline.new_parameter_bool(
1403/// "enable_tests",
1404/// "Whether to run tests",
1405/// ParameterKind::Stable,
1406/// Some(true) // default value
1407/// );
1408///
1409/// // Create an artifact for passing data between jobs
1410/// let (publish_build, use_build) = pipeline.new_artifact("build-output");
1411///
1412/// // Job 1: Build
1413/// let build_job = pipeline
1414/// .new_job(
1415/// FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1416/// FlowArch::X86_64,
1417/// "build"
1418/// )
1419/// .with_timeout_in_minutes(30)
1420/// .dep_on(|ctx| flowey_lib_hvlite::_jobs::example_node::Request {
1421/// output_dir: ctx.publish_artifact(publish_build),
1422/// })
1423/// .finish();
1424///
1425/// // Job 2: Test (conditionally run based on parameter)
1426/// let _test_job = pipeline
1427/// .new_job(
1428/// FlowPlatform::Linux(FlowPlatformLinuxDistro::Ubuntu),
1429/// FlowArch::X86_64,
1430/// "test"
1431/// )
1432/// .with_condition(enable_tests)
1433/// .dep_on(|ctx| flowey_lib_hvlite::_jobs::example_node2::Request {
1434/// input_dir: ctx.use_artifact(&use_build),
1435/// })
1436/// .finish();
1437///
1438/// Ok(pipeline)
1439/// }
1440/// }
1441/// ```
1442pub trait IntoPipeline {
1443 fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline>;
1444}
1445
1446fn new_parameter_name(name: impl AsRef<str>, kind: ParameterKind) -> String {
1447 match kind {
1448 ParameterKind::Unstable => format!("__unstable_{}", name.as_ref()),
1449 ParameterKind::Stable => name.as_ref().into(),
1450 }
1451}
1452
1453/// Structs which should only be used by top-level flowey emitters. If you're a
1454/// pipeline author, these are not types you need to care about!
1455pub mod internal {
1456 use super::*;
1457 use std::collections::BTreeMap;
1458
1459 pub fn consistent_artifact_runtime_var_name(artifact: impl AsRef<str>, is_use: bool) -> String {
1460 format!(
1461 "artifact_{}_{}",
1462 if is_use { "use_from" } else { "publish_from" },
1463 artifact.as_ref()
1464 )
1465 }
1466
1467 #[derive(Debug)]
1468 pub struct InternalAdoResourcesRepository {
1469 /// flowey-generated unique repo identifier
1470 pub repo_id: String,
1471 /// Type of repo that is being connected to.
1472 pub repo_type: AdoResourcesRepositoryType,
1473 /// Repository name. Format depends on `repo_type`.
1474 pub name: String,
1475 /// git ref to checkout.
1476 pub git_ref: AdoResourcesRepositoryRef<usize>,
1477 /// (optional) ID of the service endpoint connecting to this repository.
1478 pub endpoint: Option<String>,
1479 }
1480
1481 pub struct PipelineJobMetadata {
1482 pub root_nodes: BTreeMap<NodeHandle, Vec<Box<[u8]>>>,
1483 pub root_configs: BTreeMap<NodeHandle, Vec<Box<[u8]>>>,
1484 pub patches: PatchResolver,
1485 pub label: String,
1486 pub platform: FlowPlatform,
1487 pub arch: FlowArch,
1488 pub cond_param_idx: Option<usize>,
1489 pub timeout_minutes: Option<u32>,
1490 pub command_wrapper: Option<crate::shell::CommandWrapperKind>,
1491 // backend specific
1492 pub ado_pool: Option<AdoPool>,
1493 pub ado_variables: BTreeMap<String, String>,
1494 pub gh_override_if: Option<String>,
1495 pub gh_pool: Option<GhRunner>,
1496 pub gh_global_env: BTreeMap<String, String>,
1497 pub gh_permissions: BTreeMap<NodeHandle, BTreeMap<GhPermission, GhPermissionValue>>,
1498 }
1499
1500 #[derive(Debug)]
1501 pub struct ArtifactMeta {
1502 pub name: String,
1503 pub published_by_job: Option<usize>,
1504 pub used_by_jobs: BTreeSet<usize>,
1505 }
1506
1507 #[derive(Debug)]
1508 pub struct ParameterMeta {
1509 pub parameter: Parameter,
1510 pub used_by_jobs: BTreeSet<usize>,
1511 }
1512
1513 /// Mirror of [`Pipeline`], except with all field marked as `pub`.
1514 pub struct PipelineFinalized {
1515 pub jobs: Vec<PipelineJobMetadata>,
1516 pub artifacts: Vec<ArtifactMeta>,
1517 pub parameters: Vec<ParameterMeta>,
1518 pub extra_deps: BTreeSet<(usize, usize)>,
1519 // backend specific
1520 pub ado_name: Option<String>,
1521 pub ado_schedule_triggers: Vec<AdoScheduleTriggers>,
1522 pub ado_ci_triggers: Option<AdoCiTriggers>,
1523 pub ado_pr_triggers: Option<AdoPrTriggers>,
1524 pub ado_bootstrap_template: String,
1525 pub ado_resources_repository: Vec<InternalAdoResourcesRepository>,
1526 pub ado_post_process_yaml_cb:
1527 Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
1528 pub ado_variables: BTreeMap<String, String>,
1529 pub ado_job_id_overrides: BTreeMap<usize, String>,
1530 pub gh_name: Option<String>,
1531 pub gh_schedule_triggers: Vec<GhScheduleTriggers>,
1532 pub gh_ci_triggers: Option<GhCiTriggers>,
1533 pub gh_pr_triggers: Option<GhPrTriggers>,
1534 pub gh_bootstrap_template: String,
1535 }
1536
1537 impl PipelineFinalized {
1538 pub fn from_pipeline(mut pipeline: Pipeline) -> Self {
1539 if let Some(cb) = pipeline.inject_all_jobs_with.take() {
1540 for job_idx in 0..pipeline.jobs.len() {
1541 let _ = cb(PipelineJob {
1542 pipeline: &mut pipeline,
1543 job_idx,
1544 });
1545 }
1546 }
1547
1548 let Pipeline {
1549 mut jobs,
1550 artifacts,
1551 parameters,
1552 extra_deps,
1553 ado_name,
1554 ado_bootstrap_template,
1555 ado_schedule_triggers,
1556 ado_ci_triggers,
1557 ado_pr_triggers,
1558 ado_resources_repository,
1559 ado_post_process_yaml_cb,
1560 ado_variables,
1561 ado_job_id_overrides,
1562 gh_name,
1563 gh_schedule_triggers,
1564 gh_ci_triggers,
1565 gh_pr_triggers,
1566 gh_bootstrap_template,
1567 // not relevant to consumer code
1568 dummy_done_idx: _,
1569 artifact_map_idx: _,
1570 artifact_names: _,
1571 global_patchfns,
1572 inject_all_jobs_with: _, // processed above
1573 } = pipeline;
1574
1575 for patchfn in global_patchfns {
1576 for job in &mut jobs {
1577 job.patches.apply_patchfn(patchfn)
1578 }
1579 }
1580
1581 Self {
1582 jobs,
1583 artifacts,
1584 parameters,
1585 extra_deps,
1586 ado_name,
1587 ado_schedule_triggers,
1588 ado_ci_triggers,
1589 ado_pr_triggers,
1590 ado_bootstrap_template,
1591 ado_resources_repository,
1592 ado_post_process_yaml_cb,
1593 ado_variables,
1594 ado_job_id_overrides,
1595 gh_name,
1596 gh_schedule_triggers,
1597 gh_ci_triggers,
1598 gh_pr_triggers,
1599 gh_bootstrap_template,
1600 }
1601 }
1602 }
1603
1604 #[derive(Debug, Clone)]
1605 pub enum Parameter {
1606 Bool {
1607 name: String,
1608 description: String,
1609 kind: ParameterKind,
1610 default: Option<bool>,
1611 },
1612 String {
1613 name: String,
1614 description: String,
1615 default: Option<String>,
1616 kind: ParameterKind,
1617 possible_values: Option<Vec<String>>,
1618 },
1619 Num {
1620 name: String,
1621 description: String,
1622 default: Option<i64>,
1623 kind: ParameterKind,
1624 possible_values: Option<Vec<i64>>,
1625 },
1626 }
1627
1628 impl Parameter {
1629 pub fn name(&self) -> &str {
1630 match self {
1631 Parameter::Bool { name, .. } => name,
1632 Parameter::String { name, .. } => name,
1633 Parameter::Num { name, .. } => name,
1634 }
1635 }
1636 }
1637}