flowey_core/pipeline.rs
1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Core types and traits used to create and work with flowey pipelines.
5
6mod artifact;
7
8pub use artifact::Artifact;
9
10use self::internal::*;
11use crate::node::FlowArch;
12use crate::node::FlowNodeBase;
13use crate::node::FlowPlatform;
14use crate::node::FlowPlatformLinuxDistro;
15use crate::node::GhUserSecretVar;
16use crate::node::IntoRequest;
17use crate::node::NodeHandle;
18use crate::node::ReadVar;
19use crate::node::WriteVar;
20use crate::node::steps::ado::AdoResourcesRepositoryId;
21use crate::node::user_facing::AdoRuntimeVar;
22use crate::node::user_facing::GhPermission;
23use crate::node::user_facing::GhPermissionValue;
24use crate::patch::PatchResolver;
25use crate::patch::ResolvedPatches;
26use serde::Serialize;
27use serde::de::DeserializeOwned;
28use std::collections::BTreeMap;
29use std::collections::BTreeSet;
30use std::path::PathBuf;
31
32/// Pipeline types which are considered "user facing", and included in the
33/// `flowey` prelude.
34pub mod user_facing {
35 pub use super::AdoCiTriggers;
36 pub use super::AdoPrTriggers;
37 pub use super::AdoResourcesRepository;
38 pub use super::AdoResourcesRepositoryRef;
39 pub use super::AdoResourcesRepositoryType;
40 pub use super::AdoScheduleTriggers;
41 pub use super::GhCiTriggers;
42 pub use super::GhPrTriggers;
43 pub use super::GhRunner;
44 pub use super::GhRunnerOsLabel;
45 pub use super::GhScheduleTriggers;
46 pub use super::HostExt;
47 pub use super::IntoPipeline;
48 pub use super::ParameterKind;
49 pub use super::Pipeline;
50 pub use super::PipelineBackendHint;
51 pub use super::PipelineJob;
52 pub use super::PipelineJobCtx;
53 pub use super::PipelineJobHandle;
54 pub use super::PublishArtifact;
55 pub use super::PublishTypedArtifact;
56 pub use super::UseArtifact;
57 pub use super::UseParameter;
58 pub use super::UseTypedArtifact;
59 pub use crate::node::FlowArch;
60 pub use crate::node::FlowPlatform;
61}
62
63fn linux_distro() -> FlowPlatformLinuxDistro {
64 if let Ok(etc_os_release) = fs_err::read_to_string("/etc/os-release") {
65 if etc_os_release.contains("ID=ubuntu") {
66 FlowPlatformLinuxDistro::Ubuntu
67 } else if etc_os_release.contains("ID=fedora") {
68 FlowPlatformLinuxDistro::Fedora
69 } else if etc_os_release.contains("ID=arch") {
70 FlowPlatformLinuxDistro::Arch
71 } else {
72 FlowPlatformLinuxDistro::Unknown
73 }
74 } else {
75 FlowPlatformLinuxDistro::Unknown
76 }
77}
78
79pub trait HostExt: Sized {
80 /// Return the value for the current host machine.
81 ///
82 /// Will panic on non-local backends.
83 fn host(backend_hint: PipelineBackendHint) -> Self;
84}
85
86impl HostExt for FlowPlatform {
87 /// Return the platform of the current host machine.
88 ///
89 /// Will panic on non-local backends.
90 fn host(backend_hint: PipelineBackendHint) -> Self {
91 if !matches!(backend_hint, PipelineBackendHint::Local) {
92 panic!("can only use `FlowPlatform::host` when defining a local-only pipeline");
93 }
94
95 if cfg!(target_os = "windows") {
96 Self::Windows
97 } else if cfg!(target_os = "linux") {
98 Self::Linux(linux_distro())
99 } else if cfg!(target_os = "macos") {
100 Self::MacOs
101 } else {
102 panic!("no valid host-os")
103 }
104 }
105}
106
107impl HostExt for FlowArch {
108 /// Return the arch of the current host machine.
109 ///
110 /// Will panic on non-local backends.
111 fn host(backend_hint: PipelineBackendHint) -> Self {
112 if !matches!(backend_hint, PipelineBackendHint::Local) {
113 panic!("can only use `FlowArch::host` when defining a local-only pipeline");
114 }
115
116 // xtask-fmt allow-target-arch oneoff-flowey
117 if cfg!(target_arch = "x86_64") {
118 Self::X86_64
119 // xtask-fmt allow-target-arch oneoff-flowey
120 } else if cfg!(target_arch = "aarch64") {
121 Self::Aarch64
122 } else {
123 panic!("no valid host-arch")
124 }
125 }
126}
127
128/// Trigger ADO pipelines via Continuous Integration
129#[derive(Default, Debug)]
130pub struct AdoScheduleTriggers {
131 /// Friendly name for the scheduled run
132 pub display_name: String,
133 /// Run the pipeline whenever there is a commit on these specified branches
134 /// (supports glob syntax)
135 pub branches: Vec<String>,
136 /// Specify any branches which should be filtered out from the list of
137 /// `branches` (supports glob syntax)
138 pub exclude_branches: Vec<String>,
139 /// Run the pipeline in a schedule, as specified by a cron string
140 pub cron: String,
141}
142
143/// Trigger ADO pipelines per PR
144#[derive(Debug)]
145pub struct AdoPrTriggers {
146 /// Run the pipeline whenever there is a PR to these specified branches
147 /// (supports glob syntax)
148 pub branches: Vec<String>,
149 /// Specify any branches which should be filtered out from the list of
150 /// `branches` (supports glob syntax)
151 pub exclude_branches: Vec<String>,
152 /// Run the pipeline even if the PR is a draft PR. Defaults to `false`.
153 pub run_on_draft: bool,
154 /// Automatically cancel the pipeline run if a new commit lands in the
155 /// branch. Defaults to `true`.
156 pub auto_cancel: bool,
157}
158
159/// Trigger ADO pipelines per PR
160#[derive(Debug, Default)]
161pub struct AdoCiTriggers {
162 /// Run the pipeline whenever there is a change to these specified branches
163 /// (supports glob syntax)
164 pub branches: Vec<String>,
165 /// Specify any branches which should be filtered out from the list of
166 /// `branches` (supports glob syntax)
167 pub exclude_branches: Vec<String>,
168 /// Run the pipeline whenever a matching tag is created (supports glob
169 /// syntax)
170 pub tags: Vec<String>,
171 /// Specify any tags which should be filtered out from the list of `tags`
172 /// (supports glob syntax)
173 pub exclude_tags: Vec<String>,
174 /// Whether to batch changes per branch.
175 pub batch: bool,
176}
177
178impl Default for AdoPrTriggers {
179 fn default() -> Self {
180 Self {
181 branches: Vec::new(),
182 exclude_branches: Vec::new(),
183 run_on_draft: false,
184 auto_cancel: true,
185 }
186 }
187}
188
189/// ADO repository resource.
190#[derive(Debug)]
191pub struct AdoResourcesRepository {
192 /// Type of repo that is being connected to.
193 pub repo_type: AdoResourcesRepositoryType,
194 /// Repository name. Format depends on `repo_type`.
195 pub name: String,
196 /// git ref to checkout.
197 pub git_ref: AdoResourcesRepositoryRef,
198 /// (optional) ID of the service endpoint connecting to this repository.
199 pub endpoint: Option<String>,
200}
201
202/// ADO repository resource type
203#[derive(Debug)]
204pub enum AdoResourcesRepositoryType {
205 /// Azure Repos Git repository
206 AzureReposGit,
207 /// Github repository
208 GitHub,
209}
210
211/// ADO repository ref
212#[derive(Debug)]
213pub enum AdoResourcesRepositoryRef<P = UseParameter<String>> {
214 /// Hard-coded ref (e.g: refs/heads/main)
215 Fixed(String),
216 /// Connected to pipeline-level parameter
217 Parameter(P),
218}
219
220/// Trigger Github Actions pipelines via Continuous Integration
221///
222/// NOTE: Github Actions doesn't support specifying the branch when triggered by `schedule`.
223/// To run on a specific branch, modify the branch checked out in the pipeline.
224#[derive(Default, Debug)]
225pub struct GhScheduleTriggers {
226 /// Run the pipeline in a schedule, as specified by a cron string
227 pub cron: String,
228}
229
230/// Trigger Github Actions pipelines per PR
231#[derive(Debug)]
232pub struct GhPrTriggers {
233 /// Run the pipeline whenever there is a PR to these specified branches
234 /// (supports glob syntax)
235 pub branches: Vec<String>,
236 /// Specify any branches which should be filtered out from the list of
237 /// `branches` (supports glob syntax)
238 pub exclude_branches: Vec<String>,
239 /// Automatically cancel the pipeline run if a new commit lands in the
240 /// branch. Defaults to `true`.
241 pub auto_cancel: bool,
242 /// Run the pipeline whenever the PR trigger matches the specified types
243 pub types: Vec<String>,
244}
245
246/// Trigger Github Actions pipelines per PR
247#[derive(Debug, Default)]
248pub struct GhCiTriggers {
249 /// Run the pipeline whenever there is a change to these specified branches
250 /// (supports glob syntax)
251 pub branches: Vec<String>,
252 /// Specify any branches which should be filtered out from the list of
253 /// `branches` (supports glob syntax)
254 pub exclude_branches: Vec<String>,
255 /// Run the pipeline whenever a matching tag is created (supports glob
256 /// syntax)
257 pub tags: Vec<String>,
258 /// Specify any tags which should be filtered out from the list of `tags`
259 /// (supports glob syntax)
260 pub exclude_tags: Vec<String>,
261}
262
263impl GhPrTriggers {
264 /// Triggers the pipeline on the default PR events plus when a draft is marked as ready for review.
265 pub fn new_draftable() -> Self {
266 Self {
267 branches: Vec::new(),
268 exclude_branches: Vec::new(),
269 types: vec![
270 "opened".into(),
271 "synchronize".into(),
272 "reopened".into(),
273 "ready_for_review".into(),
274 ],
275 auto_cancel: true,
276 }
277 }
278}
279
280#[derive(Debug, Clone, PartialEq)]
281pub enum GhRunnerOsLabel {
282 UbuntuLatest,
283 Ubuntu2404,
284 Ubuntu2204,
285 WindowsLatest,
286 Windows2025,
287 Windows2022,
288 Ubuntu2404Arm,
289 Ubuntu2204Arm,
290 Windows11Arm,
291 Custom(String),
292}
293
294/// GitHub runner type
295#[derive(Debug, Clone, PartialEq)]
296pub enum GhRunner {
297 // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
298 // for more details.
299 GhHosted(GhRunnerOsLabel),
300 // Self hosted runners are selected by matching runner labels to <labels>.
301 // 'self-hosted' is a common label for self hosted runners, but is not required.
302 // Labels are case-insensitive and can take the form of arbitrary strings.
303 // See <https://docs.github.com/en/actions/hosting-your-own-runners> for more details.
304 SelfHosted(Vec<String>),
305 // This uses a runner belonging to <group> that matches all labels in <labels>.
306 // See <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners>
307 // for more details.
308 RunnerGroup { group: String, labels: Vec<String> },
309}
310
311impl GhRunner {
312 /// Whether this is a self-hosted runner with the provided label
313 pub fn is_self_hosted_with_label(&self, label: &str) -> bool {
314 matches!(self, GhRunner::SelfHosted(labels) if labels.iter().any(|s| s.as_str() == label))
315 }
316}
317
318/// Parameter type (unstable / stable).
319#[derive(Debug, Clone)]
320pub enum ParameterKind {
321 // The parameter is considered an unstable API, and should not be
322 // taken as a dependency.
323 Unstable,
324 // The parameter is considered a stable API, and can be used by
325 // external pipelines to control behavior of the pipeline.
326 Stable,
327}
328
329#[derive(Clone, Debug)]
330#[must_use]
331pub struct UseParameter<T> {
332 idx: usize,
333 _kind: std::marker::PhantomData<T>,
334}
335
336/// Opaque handle to an artifact which must be published by a single job.
337#[must_use]
338pub struct PublishArtifact {
339 idx: usize,
340}
341
342/// Opaque handle to an artifact which can be used by one or more jobs.
343#[derive(Clone)]
344#[must_use]
345pub struct UseArtifact {
346 idx: usize,
347}
348
349/// Opaque handle to an artifact of type `T` which must be published by a single job.
350#[must_use]
351pub struct PublishTypedArtifact<T>(PublishArtifact, std::marker::PhantomData<fn() -> T>);
352
353/// Opaque handle to an artifact of type `T` which can be used by one or more
354/// jobs.
355#[must_use]
356pub struct UseTypedArtifact<T>(UseArtifact, std::marker::PhantomData<fn(T)>);
357
358impl<T> Clone for UseTypedArtifact<T> {
359 fn clone(&self) -> Self {
360 UseTypedArtifact(self.0.clone(), std::marker::PhantomData)
361 }
362}
363
364#[derive(Default)]
365pub struct Pipeline {
366 jobs: Vec<PipelineJobMetadata>,
367 artifacts: Vec<ArtifactMeta>,
368 parameters: Vec<ParameterMeta>,
369 extra_deps: BTreeSet<(usize, usize)>,
370 // builder internal
371 artifact_names: BTreeSet<String>,
372 dummy_done_idx: usize,
373 artifact_map_idx: usize,
374 global_patchfns: Vec<crate::patch::PatchFn>,
375 inject_all_jobs_with: Option<Box<dyn for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a>>>,
376 // backend specific
377 ado_name: Option<String>,
378 ado_job_id_overrides: BTreeMap<usize, String>,
379 ado_schedule_triggers: Vec<AdoScheduleTriggers>,
380 ado_ci_triggers: Option<AdoCiTriggers>,
381 ado_pr_triggers: Option<AdoPrTriggers>,
382 ado_resources_repository: Vec<InternalAdoResourcesRepository>,
383 ado_bootstrap_template: String,
384 ado_variables: BTreeMap<String, String>,
385 ado_post_process_yaml_cb: Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
386 gh_name: Option<String>,
387 gh_schedule_triggers: Vec<GhScheduleTriggers>,
388 gh_ci_triggers: Option<GhCiTriggers>,
389 gh_pr_triggers: Option<GhPrTriggers>,
390 gh_bootstrap_template: String,
391}
392
393impl Pipeline {
394 pub fn new() -> Pipeline {
395 Pipeline::default()
396 }
397
398 /// Inject all pipeline jobs with some common logic. (e.g: to resolve common
399 /// configuration requirements shared by all jobs).
400 ///
401 /// Can only be invoked once per pipeline.
402 #[track_caller]
403 pub fn inject_all_jobs_with(
404 &mut self,
405 cb: impl for<'a> Fn(PipelineJob<'a>) -> PipelineJob<'a> + 'static,
406 ) -> &mut Self {
407 if self.inject_all_jobs_with.is_some() {
408 panic!("can only call inject_all_jobs_with once!")
409 }
410 self.inject_all_jobs_with = Some(Box::new(cb));
411 self
412 }
413
414 /// (ADO only) Provide a YAML template used to bootstrap flowey at the start
415 /// of an ADO pipeline.
416 ///
417 /// The template has access to the following vars, which will be statically
418 /// interpolated into the template's text:
419 ///
420 /// - `{{FLOWEY_OUTDIR}}`
421 /// - Directory to copy artifacts into.
422 /// - NOTE: this var will include `\` on Windows, and `/` on linux!
423 /// - `{{FLOWEY_BIN_EXTENSION}}`
424 /// - Extension of the expected flowey bin (either "", or ".exe")
425 /// - `{{FLOWEY_CRATE}}`
426 /// - Name of the project-specific flowey crate to be built
427 /// - `{{FLOWEY_TARGET}}`
428 /// - The target-triple flowey is being built for
429 /// - `{{FLOWEY_PIPELINE_PATH}}`
430 /// - Repo-root relative path to the pipeline (as provided when
431 /// generating the pipeline via the flowey CLI)
432 ///
433 /// The template's sole responsibility is to copy 3 files into the
434 /// `{{FLOWEY_OUTDIR}}`:
435 ///
436 /// 1. The bootstrapped flowey binary, with the file name
437 /// `flowey{{FLOWEY_BIN_EXTENSION}}`
438 /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
439 /// copied of the pipeline YAML and pipeline JSON currently being run.
440 /// `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
441 /// cases where the same template is being for multiple pipelines (e.g: a
442 /// debug vs. release pipeline).
443 pub fn ado_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
444 self.ado_bootstrap_template = template;
445 self
446 }
447
448 /// (ADO only) Provide a callback function which will be used to
449 /// post-process any YAML flowey generates for the pipeline.
450 ///
451 /// Until flowey defines a stable API for maintaining out-of-tree backends,
452 /// this method can be used to integrate the output from the generic ADO
453 /// backend with any organization-specific templates that one may be
454 /// required to use (e.g: for compliance reasons).
455 pub fn ado_post_process_yaml(
456 &mut self,
457 cb: impl FnOnce(serde_yaml::Value) -> serde_yaml::Value + 'static,
458 ) -> &mut Self {
459 self.ado_post_process_yaml_cb = Some(Box::new(cb));
460 self
461 }
462
463 /// (ADO only) Add a new scheduled CI trigger. Can be called multiple times
464 /// to set up multiple schedules runs.
465 pub fn ado_add_schedule_trigger(&mut self, triggers: AdoScheduleTriggers) -> &mut Self {
466 self.ado_schedule_triggers.push(triggers);
467 self
468 }
469
470 /// (ADO only) Set a PR trigger. Calling this method multiple times will
471 /// overwrite any previously set triggers.
472 pub fn ado_set_pr_triggers(&mut self, triggers: AdoPrTriggers) -> &mut Self {
473 self.ado_pr_triggers = Some(triggers);
474 self
475 }
476
477 /// (ADO only) Set a CI trigger. Calling this method multiple times will
478 /// overwrite any previously set triggers.
479 pub fn ado_set_ci_triggers(&mut self, triggers: AdoCiTriggers) -> &mut Self {
480 self.ado_ci_triggers = Some(triggers);
481 self
482 }
483
484 /// (ADO only) Declare a new repository resource, returning a type-safe
485 /// handle which downstream ADO steps are able to consume via
486 /// [`AdoStepServices::resolve_repository_id`](crate::node::user_facing::AdoStepServices::resolve_repository_id).
487 pub fn ado_add_resources_repository(
488 &mut self,
489 repo: AdoResourcesRepository,
490 ) -> AdoResourcesRepositoryId {
491 let AdoResourcesRepository {
492 repo_type,
493 name,
494 git_ref,
495 endpoint,
496 } = repo;
497
498 let repo_id = format!("repo{}", self.ado_resources_repository.len());
499
500 self.ado_resources_repository
501 .push(InternalAdoResourcesRepository {
502 repo_id: repo_id.clone(),
503 repo_type,
504 name,
505 git_ref: match git_ref {
506 AdoResourcesRepositoryRef::Fixed(s) => AdoResourcesRepositoryRef::Fixed(s),
507 AdoResourcesRepositoryRef::Parameter(p) => {
508 AdoResourcesRepositoryRef::Parameter(p.idx)
509 }
510 },
511 endpoint,
512 });
513 AdoResourcesRepositoryId { repo_id }
514 }
515
516 /// (GitHub Actions only) Set the pipeline-level name.
517 ///
518 /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#name>
519 pub fn gh_set_name(&mut self, name: impl AsRef<str>) -> &mut Self {
520 self.gh_name = Some(name.as_ref().into());
521 self
522 }
523
524 /// Provide a YAML template used to bootstrap flowey at the start of an GitHub
525 /// pipeline.
526 ///
527 /// The template has access to the following vars, which will be statically
528 /// interpolated into the template's text:
529 ///
530 /// - `{{FLOWEY_OUTDIR}}`
531 /// - Directory to copy artifacts into.
532 /// - NOTE: this var will include `\` on Windows, and `/` on linux!
533 /// - `{{FLOWEY_BIN_EXTENSION}}`
534 /// - Extension of the expected flowey bin (either "", or ".exe")
535 /// - `{{FLOWEY_CRATE}}`
536 /// - Name of the project-specific flowey crate to be built
537 /// - `{{FLOWEY_TARGET}}`
538 /// - The target-triple flowey is being built for
539 /// - `{{FLOWEY_PIPELINE_PATH}}`
540 /// - Repo-root relative path to the pipeline (as provided when
541 /// generating the pipeline via the flowey CLI)
542 ///
543 /// The template's sole responsibility is to copy 3 files into the
544 /// `{{FLOWEY_OUTDIR}}`:
545 ///
546 /// 1. The bootstrapped flowey binary, with the file name
547 /// `flowey{{FLOWEY_BIN_EXTENSION}}`
548 /// 2. Two files called `pipeline.yaml` and `pipeline.json`, which are
549 /// copied of the pipeline YAML and pipeline JSON currently being run.
550 /// `{{FLOWEY_PIPELINE_PATH}}` is provided as a way to disambiguate in
551 /// cases where the same template is being for multiple pipelines (e.g: a
552 /// debug vs. release pipeline).
553 pub fn gh_set_flowey_bootstrap_template(&mut self, template: String) -> &mut Self {
554 self.gh_bootstrap_template = template;
555 self
556 }
557
558 /// (GitHub Actions only) Add a new scheduled CI trigger. Can be called multiple times
559 /// to set up multiple schedules runs.
560 pub fn gh_add_schedule_trigger(&mut self, triggers: GhScheduleTriggers) -> &mut Self {
561 self.gh_schedule_triggers.push(triggers);
562 self
563 }
564
565 /// (GitHub Actions only) Set a PR trigger. Calling this method multiple times will
566 /// overwrite any previously set triggers.
567 pub fn gh_set_pr_triggers(&mut self, triggers: GhPrTriggers) -> &mut Self {
568 self.gh_pr_triggers = Some(triggers);
569 self
570 }
571
572 /// (GitHub Actions only) Set a CI trigger. Calling this method multiple times will
573 /// overwrite any previously set triggers.
574 pub fn gh_set_ci_triggers(&mut self, triggers: GhCiTriggers) -> &mut Self {
575 self.gh_ci_triggers = Some(triggers);
576 self
577 }
578
579 /// (GitHub Actions only) Use a pre-defined GitHub Actions secret variable.
580 ///
581 /// For more information on defining secrets for use in GitHub Actions, see
582 /// <https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions>
583 pub fn gh_use_secret(&mut self, secret_name: impl AsRef<str>) -> GhUserSecretVar {
584 GhUserSecretVar(secret_name.as_ref().to_string())
585 }
586
587 pub fn new_job(
588 &mut self,
589 platform: FlowPlatform,
590 arch: FlowArch,
591 label: impl AsRef<str>,
592 ) -> PipelineJob<'_> {
593 let idx = self.jobs.len();
594 self.jobs.push(PipelineJobMetadata {
595 root_nodes: BTreeMap::new(),
596 patches: ResolvedPatches::build(),
597 label: label.as_ref().into(),
598 platform,
599 arch,
600 cond_param_idx: None,
601 timeout_minutes: None,
602 ado_pool: None,
603 ado_variables: BTreeMap::new(),
604 gh_override_if: None,
605 gh_global_env: BTreeMap::new(),
606 gh_pool: None,
607 gh_permissions: BTreeMap::new(),
608 });
609
610 PipelineJob {
611 pipeline: self,
612 job_idx: idx,
613 }
614 }
615
616 /// Declare a dependency between two jobs that does is not a result of an
617 /// artifact.
618 pub fn non_artifact_dep(
619 &mut self,
620 job: &PipelineJobHandle,
621 depends_on_job: &PipelineJobHandle,
622 ) -> &mut Self {
623 self.extra_deps
624 .insert((depends_on_job.job_idx, job.job_idx));
625 self
626 }
627
628 #[track_caller]
629 pub fn new_artifact(&mut self, name: impl AsRef<str>) -> (PublishArtifact, UseArtifact) {
630 let name = name.as_ref();
631 let owned_name = name.to_string();
632
633 let not_exists = self.artifact_names.insert(owned_name.clone());
634 if !not_exists {
635 panic!("duplicate artifact name: {}", name)
636 }
637
638 let idx = self.artifacts.len();
639 self.artifacts.push(ArtifactMeta {
640 name: owned_name,
641 published_by_job: None,
642 used_by_jobs: BTreeSet::new(),
643 });
644
645 (PublishArtifact { idx }, UseArtifact { idx })
646 }
647
648 /// Returns a pair of opaque handles to a new artifact for use across jobs
649 /// in the pipeline.
650 #[track_caller]
651 pub fn new_typed_artifact<T: Artifact>(
652 &mut self,
653 name: impl AsRef<str>,
654 ) -> (PublishTypedArtifact<T>, UseTypedArtifact<T>) {
655 let (publish, use_artifact) = self.new_artifact(name);
656 (
657 PublishTypedArtifact(publish, std::marker::PhantomData),
658 UseTypedArtifact(use_artifact, std::marker::PhantomData),
659 )
660 }
661
662 /// (ADO only) Set the pipeline-level name.
663 ///
664 /// <https://learn.microsoft.com/en-us/azure/devops/pipelines/process/run-number?view=azure-devops&tabs=yaml>
665 pub fn ado_add_name(&mut self, name: String) -> &mut Self {
666 self.ado_name = Some(name);
667 self
668 }
669
670 /// (ADO only) Declare a pipeline-level, named, read-only ADO variable.
671 ///
672 /// `name` and `value` are both arbitrary strings.
673 ///
674 /// Returns an instance of [`AdoRuntimeVar`], which, if need be, can be
675 /// converted into a [`ReadVar<String>`] using
676 /// [`NodeCtx::get_ado_variable`].
677 ///
678 /// NOTE: Unless required by some particular third-party task, it's strongly
679 /// recommended to _avoid_ using this method, and to simply use
680 /// [`ReadVar::from_static`] to get a obtain a static variable.
681 ///
682 /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
683 pub fn ado_new_named_variable(
684 &mut self,
685 name: impl AsRef<str>,
686 value: impl AsRef<str>,
687 ) -> AdoRuntimeVar {
688 let name = name.as_ref();
689 let value = value.as_ref();
690
691 self.ado_variables.insert(name.into(), value.into());
692
693 // safe, since we'll ensure that the global exists in the ADO backend
694 AdoRuntimeVar::dangerous_from_global(name, false)
695 }
696
697 /// (ADO only) Declare multiple pipeline-level, named, read-only ADO
698 /// variables at once.
699 ///
700 /// This is a convenience method to streamline invoking
701 /// [`Self::ado_new_named_variable`] multiple times.
702 ///
703 /// NOTE: Unless required by some particular third-party task, it's strongly
704 /// recommended to _avoid_ using this method, and to simply use
705 /// [`ReadVar::from_static`] to get a obtain a static variable.
706 ///
707 /// DEVNOTE: In the future, this API may be updated to return a handle that
708 /// will allow resolving the resulting `AdoRuntimeVar`, but for
709 /// implementation expediency, this API does not currently do this. If you
710 /// need to read the value of this variable at runtime, you may need to
711 /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
712 ///
713 /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
714 pub fn ado_new_named_variables<K, V>(
715 &mut self,
716 vars: impl IntoIterator<Item = (K, V)>,
717 ) -> &mut Self
718 where
719 K: AsRef<str>,
720 V: AsRef<str>,
721 {
722 self.ado_variables.extend(
723 vars.into_iter()
724 .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
725 );
726 self
727 }
728
729 /// Declare a pipeline-level runtime parameter with type `bool`.
730 ///
731 /// To obtain a [`ReadVar<bool>`] that can be used within a node, use the
732 /// [`PipelineJobCtx::use_parameter`] method.
733 ///
734 /// `name` is the name of the parameter.
735 ///
736 /// `description` is an arbitrary string, which will be be shown to users.
737 ///
738 /// `kind` is the type of parameter and if it should be treated as a stable
739 /// external API to callers of the pipeline.
740 ///
741 /// `default` is the default value for the parameter. If none is provided,
742 /// the parameter _must_ be specified in order for the pipeline to run.
743 ///
744 /// `possible_values` can be used to limit the set of valid values the
745 /// parameter accepts.
746 pub fn new_parameter_bool(
747 &mut self,
748 name: impl AsRef<str>,
749 description: impl AsRef<str>,
750 kind: ParameterKind,
751 default: Option<bool>,
752 ) -> UseParameter<bool> {
753 let idx = self.parameters.len();
754 let name = new_parameter_name(name, kind.clone());
755 self.parameters.push(ParameterMeta {
756 parameter: Parameter::Bool {
757 name,
758 description: description.as_ref().into(),
759 kind,
760 default,
761 },
762 used_by_jobs: BTreeSet::new(),
763 });
764
765 UseParameter {
766 idx,
767 _kind: std::marker::PhantomData,
768 }
769 }
770
771 /// Declare a pipeline-level runtime parameter with type `i64`.
772 ///
773 /// To obtain a [`ReadVar<i64>`] that can be used within a node, use the
774 /// [`PipelineJobCtx::use_parameter`] method.
775 ///
776 /// `name` is the name of the parameter.
777 ///
778 /// `description` is an arbitrary string, which will be be shown to users.
779 ///
780 /// `kind` is the type of parameter and if it should be treated as a stable
781 /// external API to callers of the pipeline.
782 ///
783 /// `default` is the default value for the parameter. If none is provided,
784 /// the parameter _must_ be specified in order for the pipeline to run.
785 ///
786 /// `possible_values` can be used to limit the set of valid values the
787 /// parameter accepts.
788 pub fn new_parameter_num(
789 &mut self,
790 name: impl AsRef<str>,
791 description: impl AsRef<str>,
792 kind: ParameterKind,
793 default: Option<i64>,
794 possible_values: Option<Vec<i64>>,
795 ) -> UseParameter<i64> {
796 let idx = self.parameters.len();
797 let name = new_parameter_name(name, kind.clone());
798 self.parameters.push(ParameterMeta {
799 parameter: Parameter::Num {
800 name,
801 description: description.as_ref().into(),
802 kind,
803 default,
804 possible_values,
805 },
806 used_by_jobs: BTreeSet::new(),
807 });
808
809 UseParameter {
810 idx,
811 _kind: std::marker::PhantomData,
812 }
813 }
814
815 /// Declare a pipeline-level runtime parameter with type `String`.
816 ///
817 /// To obtain a [`ReadVar<String>`] that can be used within a node, use the
818 /// [`PipelineJobCtx::use_parameter`] method.
819 ///
820 /// `name` is the name of the parameter.
821 ///
822 /// `description` is an arbitrary string, which will be be shown to users.
823 ///
824 /// `kind` is the type of parameter and if it should be treated as a stable
825 /// external API to callers of the pipeline.
826 ///
827 /// `default` is the default value for the parameter. If none is provided,
828 /// the parameter _must_ be specified in order for the pipeline to run.
829 ///
830 /// `possible_values` allows restricting inputs to a set of possible values.
831 /// Depending on the backend, these options may be presented as a set of
832 /// radio buttons, a dropdown menu, or something in that vein. If `None`,
833 /// then any string is allowed.
834 pub fn new_parameter_string(
835 &mut self,
836 name: impl AsRef<str>,
837 description: impl AsRef<str>,
838 kind: ParameterKind,
839 default: Option<impl AsRef<str>>,
840 possible_values: Option<Vec<String>>,
841 ) -> UseParameter<String> {
842 let idx = self.parameters.len();
843 let name = new_parameter_name(name, kind.clone());
844 self.parameters.push(ParameterMeta {
845 parameter: Parameter::String {
846 name,
847 description: description.as_ref().into(),
848 kind,
849 default: default.map(|x| x.as_ref().into()),
850 possible_values,
851 },
852 used_by_jobs: BTreeSet::new(),
853 });
854
855 UseParameter {
856 idx,
857 _kind: std::marker::PhantomData,
858 }
859 }
860}
861
862pub struct PipelineJobCtx<'a> {
863 pipeline: &'a mut Pipeline,
864 job_idx: usize,
865}
866
867impl PipelineJobCtx<'_> {
868 /// Create a new `WriteVar<SideEffect>` anchored to the pipeline job.
869 pub fn new_done_handle(&mut self) -> WriteVar<crate::node::SideEffect> {
870 self.pipeline.dummy_done_idx += 1;
871 crate::node::thin_air_write_runtime_var(format!("start{}", self.pipeline.dummy_done_idx))
872 }
873
874 /// Claim that this job will use this artifact, obtaining a path to a folder
875 /// with the artifact's contents.
876 pub fn use_artifact(&mut self, artifact: &UseArtifact) -> ReadVar<PathBuf> {
877 self.pipeline.artifacts[artifact.idx]
878 .used_by_jobs
879 .insert(self.job_idx);
880
881 crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
882 &self.pipeline.artifacts[artifact.idx].name,
883 true,
884 ))
885 }
886
887 /// Claim that this job will publish this artifact, obtaining a path to a
888 /// fresh, empty folder which will be published as the specific artifact at
889 /// the end of the job.
890 pub fn publish_artifact(&mut self, artifact: PublishArtifact) -> ReadVar<PathBuf> {
891 let existing = self.pipeline.artifacts[artifact.idx]
892 .published_by_job
893 .replace(self.job_idx);
894 assert!(existing.is_none()); // PublishArtifact isn't cloneable
895
896 crate::node::thin_air_read_runtime_var(consistent_artifact_runtime_var_name(
897 &self.pipeline.artifacts[artifact.idx].name,
898 false,
899 ))
900 }
901
902 fn helper_request<R: IntoRequest>(&mut self, req: R)
903 where
904 R::Node: 'static,
905 {
906 self.pipeline.jobs[self.job_idx]
907 .root_nodes
908 .entry(NodeHandle::from_type::<R::Node>())
909 .or_default()
910 .push(serde_json::to_vec(&req.into_request()).unwrap().into());
911 }
912
913 fn new_artifact_map_vars<T: Artifact>(&mut self) -> (ReadVar<T>, WriteVar<T>) {
914 let artifact_map_idx = self.pipeline.artifact_map_idx;
915 self.pipeline.artifact_map_idx += 1;
916
917 let backing_var = format!("artifact_map{}", artifact_map_idx);
918 let read_var = crate::node::thin_air_read_runtime_var(backing_var.clone());
919 let write_var = crate::node::thin_air_write_runtime_var(backing_var);
920 (read_var, write_var)
921 }
922
923 /// Claim that this job will use this artifact, obtaining the resolved
924 /// contents of the artifact.
925 pub fn use_typed_artifact<T: Artifact>(
926 &mut self,
927 artifact: &UseTypedArtifact<T>,
928 ) -> ReadVar<T> {
929 let artifact_path = self.use_artifact(&artifact.0);
930 let (read, write) = self.new_artifact_map_vars::<T>();
931 self.helper_request(artifact::resolve::Request::new(artifact_path, write));
932 read
933 }
934
935 /// Claim that this job will publish this artifact, obtaining a variable to
936 /// write the artifact's contents to. The artifact will be published at
937 /// the end of the job.
938 pub fn publish_typed_artifact<T: Artifact>(
939 &mut self,
940 artifact: PublishTypedArtifact<T>,
941 ) -> WriteVar<T> {
942 let artifact_path = self.publish_artifact(artifact.0);
943 let (read, write) = self.new_artifact_map_vars::<T>();
944 let done = self.new_done_handle();
945 self.helper_request(artifact::publish::Request::new(read, artifact_path, done));
946 write
947 }
948
949 /// Obtain a `ReadVar<T>` corresponding to a pipeline parameter which is
950 /// specified at runtime.
951 pub fn use_parameter<T>(&mut self, param: UseParameter<T>) -> ReadVar<T>
952 where
953 T: Serialize + DeserializeOwned,
954 {
955 self.pipeline.parameters[param.idx]
956 .used_by_jobs
957 .insert(self.job_idx);
958
959 crate::node::thin_air_read_runtime_var(
960 self.pipeline.parameters[param.idx]
961 .parameter
962 .name()
963 .to_string(),
964 )
965 }
966
967 /// Shortcut which allows defining a bool pipeline parameter within a Job.
968 ///
969 /// To share a single parameter between multiple jobs, don't use this method
970 /// - use [`Pipeline::new_parameter_bool`] + [`Self::use_parameter`] instead.
971 pub fn new_parameter_bool(
972 &mut self,
973 name: impl AsRef<str>,
974 description: impl AsRef<str>,
975 kind: ParameterKind,
976 default: Option<bool>,
977 ) -> ReadVar<bool> {
978 let param = self
979 .pipeline
980 .new_parameter_bool(name, description, kind, default);
981 self.use_parameter(param)
982 }
983
984 /// Shortcut which allows defining a number pipeline parameter within a Job.
985 ///
986 /// To share a single parameter between multiple jobs, don't use this method
987 /// - use [`Pipeline::new_parameter_num`] + [`Self::use_parameter`] instead.
988 pub fn new_parameter_num(
989 &mut self,
990 name: impl AsRef<str>,
991 description: impl AsRef<str>,
992 kind: ParameterKind,
993 default: Option<i64>,
994 possible_values: Option<Vec<i64>>,
995 ) -> ReadVar<i64> {
996 let param =
997 self.pipeline
998 .new_parameter_num(name, description, kind, default, possible_values);
999 self.use_parameter(param)
1000 }
1001
1002 /// Shortcut which allows defining a string pipeline parameter within a Job.
1003 ///
1004 /// To share a single parameter between multiple jobs, don't use this method
1005 /// - use [`Pipeline::new_parameter_string`] + [`Self::use_parameter`] instead.
1006 pub fn new_parameter_string(
1007 &mut self,
1008 name: impl AsRef<str>,
1009 description: impl AsRef<str>,
1010 kind: ParameterKind,
1011 default: Option<String>,
1012 possible_values: Option<Vec<String>>,
1013 ) -> ReadVar<String> {
1014 let param =
1015 self.pipeline
1016 .new_parameter_string(name, description, kind, default, possible_values);
1017 self.use_parameter(param)
1018 }
1019}
1020
1021#[must_use]
1022pub struct PipelineJob<'a> {
1023 pipeline: &'a mut Pipeline,
1024 job_idx: usize,
1025}
1026
1027impl PipelineJob<'_> {
1028 /// (ADO only) specify which agent pool this job will be run on.
1029 pub fn ado_set_pool(self, pool: impl AsRef<str>) -> Self {
1030 self.ado_set_pool_with_demands(pool, Vec::new())
1031 }
1032
1033 /// (ADO only) specify which agent pool this job will be run on, with
1034 /// additional special runner demands.
1035 pub fn ado_set_pool_with_demands(self, pool: impl AsRef<str>, demands: Vec<String>) -> Self {
1036 self.pipeline.jobs[self.job_idx].ado_pool = Some(AdoPool {
1037 name: pool.as_ref().into(),
1038 demands,
1039 });
1040 self
1041 }
1042
1043 /// (ADO only) Declare a job-level, named, read-only ADO variable.
1044 ///
1045 /// `name` and `value` are both arbitrary strings, which may include ADO
1046 /// template expressions.
1047 ///
1048 /// NOTE: Unless required by some particular third-party task, it's strongly
1049 /// recommended to _avoid_ using this method, and to simply use
1050 /// [`ReadVar::from_static`] to get a obtain a static variable.
1051 ///
1052 /// DEVNOTE: In the future, this API may be updated to return a handle that
1053 /// will allow resolving the resulting `AdoRuntimeVar`, but for
1054 /// implementation expediency, this API does not currently do this. If you
1055 /// need to read the value of this variable at runtime, you may need to
1056 /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1057 ///
1058 /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1059 pub fn ado_new_named_variable(self, name: impl AsRef<str>, value: impl AsRef<str>) -> Self {
1060 let name = name.as_ref();
1061 let value = value.as_ref();
1062 self.pipeline.jobs[self.job_idx]
1063 .ado_variables
1064 .insert(name.into(), value.into());
1065 self
1066 }
1067
1068 /// (ADO only) Declare multiple job-level, named, read-only ADO variables at
1069 /// once.
1070 ///
1071 /// This is a convenience method to streamline invoking
1072 /// [`Self::ado_new_named_variable`] multiple times.
1073 ///
1074 /// NOTE: Unless required by some particular third-party task, it's strongly
1075 /// recommended to _avoid_ using this method, and to simply use
1076 /// [`ReadVar::from_static`] to get a obtain a static variable.
1077 ///
1078 /// DEVNOTE: In the future, this API may be updated to return a handle that
1079 /// will allow resolving the resulting `AdoRuntimeVar`, but for
1080 /// implementation expediency, this API does not currently do this. If you
1081 /// need to read the value of this variable at runtime, you may need to
1082 /// invoke [`AdoRuntimeVar::dangerous_from_global`] manually.
1083 ///
1084 /// [`NodeCtx::get_ado_variable`]: crate::node::NodeCtx::get_ado_variable
1085 pub fn ado_new_named_variables<K, V>(self, vars: impl IntoIterator<Item = (K, V)>) -> Self
1086 where
1087 K: AsRef<str>,
1088 V: AsRef<str>,
1089 {
1090 self.pipeline.jobs[self.job_idx].ado_variables.extend(
1091 vars.into_iter()
1092 .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())),
1093 );
1094 self
1095 }
1096
1097 /// Overrides the id of the job.
1098 ///
1099 /// Flowey typically generates a reasonable job ID but some use cases that depend
1100 /// on the ID may find it useful to override it to something custom.
1101 pub fn ado_override_job_id(self, name: impl AsRef<str>) -> Self {
1102 self.pipeline
1103 .ado_job_id_overrides
1104 .insert(self.job_idx, name.as_ref().into());
1105 self
1106 }
1107
1108 /// (GitHub Actions only) specify which Github runner this job will be run on.
1109 pub fn gh_set_pool(self, pool: GhRunner) -> Self {
1110 self.pipeline.jobs[self.job_idx].gh_pool = Some(pool);
1111 self
1112 }
1113
1114 /// (GitHub Actions only) Manually override the `if:` condition for this
1115 /// particular job.
1116 ///
1117 /// **This is dangerous**, as an improperly set `if` condition may break
1118 /// downstream flowey jobs which assume flowey is in control of the job's
1119 /// scheduling logic.
1120 ///
1121 /// See
1122 /// <https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idif>
1123 /// for more info.
1124 pub fn gh_dangerous_override_if(self, condition: impl AsRef<str>) -> Self {
1125 self.pipeline.jobs[self.job_idx].gh_override_if = Some(condition.as_ref().into());
1126 self
1127 }
1128
1129 /// (GitHub Actions only) Declare a global job-level environment variable,
1130 /// visible to all downstream steps.
1131 ///
1132 /// `name` and `value` are both arbitrary strings, which may include GitHub
1133 /// Actions template expressions.
1134 ///
1135 /// **This is dangerous**, as it is easy to misuse this API in order to
1136 /// write a node which takes an implicit dependency on there being a global
1137 /// variable set on its behalf by the top-level pipeline code, making it
1138 /// difficult to "locally reason" about the behavior of a node simply by
1139 /// reading its code.
1140 ///
1141 /// Whenever possible, nodes should "late bind" environment variables:
1142 /// accepting a compile-time / runtime flowey parameter, and then setting it
1143 /// prior to executing a child command that requires it.
1144 ///
1145 /// Only use this API in exceptional cases, such as obtaining an environment
1146 /// variable whose value is determined by a job-level GitHub Actions
1147 /// expression evaluation.
1148 pub fn gh_dangerous_global_env_var(
1149 self,
1150 name: impl AsRef<str>,
1151 value: impl AsRef<str>,
1152 ) -> Self {
1153 let name = name.as_ref();
1154 let value = value.as_ref();
1155 self.pipeline.jobs[self.job_idx]
1156 .gh_global_env
1157 .insert(name.into(), value.into());
1158 self
1159 }
1160
1161 /// (GitHub Actions only) Grant permissions required by nodes in the job.
1162 ///
1163 /// For a given node handle, grant the specified permissions.
1164 /// The list provided must match the permissions specified within the node
1165 /// using `requires_permission`.
1166 ///
1167 /// NOTE: While this method is called at a node-level for auditability, the emitted
1168 /// yaml grants permissions at the job-level.
1169 ///
1170 /// This can lead to weird situations where node 1 might not specify a permission
1171 /// required according to Github Actions, but due to job-level granting of the permission
1172 /// by another node 2, the pipeline executes even though it wouldn't if node 2 was removed.
1173 ///
1174 /// For available permission scopes and their descriptions, see
1175 /// <https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions>.
1176 pub fn gh_grant_permissions<N: FlowNodeBase + 'static>(
1177 self,
1178 permissions: impl IntoIterator<Item = (GhPermission, GhPermissionValue)>,
1179 ) -> Self {
1180 let node_handle = NodeHandle::from_type::<N>();
1181 for (permission, value) in permissions {
1182 self.pipeline.jobs[self.job_idx]
1183 .gh_permissions
1184 .entry(node_handle)
1185 .or_default()
1186 .insert(permission, value);
1187 }
1188 self
1189 }
1190
1191 pub fn apply_patchfn(self, patchfn: crate::patch::PatchFn) -> Self {
1192 self.pipeline.jobs[self.job_idx]
1193 .patches
1194 .apply_patchfn(patchfn);
1195 self
1196 }
1197
1198 /// Set a timeout for the job, in minutes.
1199 ///
1200 /// Not calling this will result in the platform's default timeout being used,
1201 /// which is typically 60 minutes, but may vary.
1202 pub fn with_timeout_in_minutes(self, timeout: u32) -> Self {
1203 self.pipeline.jobs[self.job_idx].timeout_minutes = Some(timeout);
1204 self
1205 }
1206
1207 /// (ADO+Local Only) Only run the job if the specified condition is true.
1208 pub fn with_condition(self, cond: UseParameter<bool>) -> Self {
1209 self.pipeline.jobs[self.job_idx].cond_param_idx = Some(cond.idx);
1210 self.pipeline.parameters[cond.idx]
1211 .used_by_jobs
1212 .insert(self.job_idx);
1213 self
1214 }
1215
1216 /// Add a flow node which will be run as part of the job.
1217 pub fn dep_on<R: IntoRequest + 'static>(
1218 self,
1219 f: impl FnOnce(&mut PipelineJobCtx<'_>) -> R,
1220 ) -> Self {
1221 // JobToNodeCtx will ensure artifact deps are taken care of
1222 let req = f(&mut PipelineJobCtx {
1223 pipeline: self.pipeline,
1224 job_idx: self.job_idx,
1225 });
1226
1227 self.pipeline.jobs[self.job_idx]
1228 .root_nodes
1229 .entry(NodeHandle::from_type::<R::Node>())
1230 .or_default()
1231 .push(serde_json::to_vec(&req.into_request()).unwrap().into());
1232
1233 self
1234 }
1235
1236 /// Finish describing the pipeline job.
1237 pub fn finish(self) -> PipelineJobHandle {
1238 PipelineJobHandle {
1239 job_idx: self.job_idx,
1240 }
1241 }
1242
1243 /// Return the job's platform.
1244 pub fn get_platform(&self) -> FlowPlatform {
1245 self.pipeline.jobs[self.job_idx].platform
1246 }
1247
1248 /// Return the job's architecture.
1249 pub fn get_arch(&self) -> FlowArch {
1250 self.pipeline.jobs[self.job_idx].arch
1251 }
1252}
1253
1254#[derive(Clone)]
1255pub struct PipelineJobHandle {
1256 job_idx: usize,
1257}
1258
1259impl PipelineJobHandle {
1260 pub fn is_handle_for(&self, job: &PipelineJob<'_>) -> bool {
1261 self.job_idx == job.job_idx
1262 }
1263}
1264
1265#[derive(Clone, Copy)]
1266pub enum PipelineBackendHint {
1267 /// Pipeline is being run on the user's dev machine (via bash / direct run)
1268 Local,
1269 /// Pipeline is run on ADO
1270 Ado,
1271 /// Pipeline is run on GitHub Actions
1272 Github,
1273}
1274
1275pub trait IntoPipeline {
1276 fn into_pipeline(self, backend_hint: PipelineBackendHint) -> anyhow::Result<Pipeline>;
1277}
1278
1279fn new_parameter_name(name: impl AsRef<str>, kind: ParameterKind) -> String {
1280 match kind {
1281 ParameterKind::Unstable => format!("__unstable_{}", name.as_ref()),
1282 ParameterKind::Stable => name.as_ref().into(),
1283 }
1284}
1285
1286/// Structs which should only be used by top-level flowey emitters. If you're a
1287/// pipeline author, these are not types you need to care about!
1288pub mod internal {
1289 use super::*;
1290 use std::collections::BTreeMap;
1291
1292 pub fn consistent_artifact_runtime_var_name(artifact: impl AsRef<str>, is_use: bool) -> String {
1293 format!(
1294 "artifact_{}_{}",
1295 if is_use { "use_from" } else { "publish_from" },
1296 artifact.as_ref()
1297 )
1298 }
1299
1300 #[derive(Debug)]
1301 pub struct InternalAdoResourcesRepository {
1302 /// flowey-generated unique repo identifier
1303 pub repo_id: String,
1304 /// Type of repo that is being connected to.
1305 pub repo_type: AdoResourcesRepositoryType,
1306 /// Repository name. Format depends on `repo_type`.
1307 pub name: String,
1308 /// git ref to checkout.
1309 pub git_ref: AdoResourcesRepositoryRef<usize>,
1310 /// (optional) ID of the service endpoint connecting to this repository.
1311 pub endpoint: Option<String>,
1312 }
1313
1314 pub struct PipelineJobMetadata {
1315 pub root_nodes: BTreeMap<NodeHandle, Vec<Box<[u8]>>>,
1316 pub patches: PatchResolver,
1317 pub label: String,
1318 pub platform: FlowPlatform,
1319 pub arch: FlowArch,
1320 pub cond_param_idx: Option<usize>,
1321 pub timeout_minutes: Option<u32>,
1322 // backend specific
1323 pub ado_pool: Option<AdoPool>,
1324 pub ado_variables: BTreeMap<String, String>,
1325 pub gh_override_if: Option<String>,
1326 pub gh_pool: Option<GhRunner>,
1327 pub gh_global_env: BTreeMap<String, String>,
1328 pub gh_permissions: BTreeMap<NodeHandle, BTreeMap<GhPermission, GhPermissionValue>>,
1329 }
1330
1331 // TODO: support a more structured format for demands
1332 // See https://learn.microsoft.com/en-us/azure/devops/pipelines/yaml-schema/pool-demands
1333 #[derive(Debug, Clone)]
1334 pub struct AdoPool {
1335 pub name: String,
1336 pub demands: Vec<String>,
1337 }
1338
1339 #[derive(Debug)]
1340 pub struct ArtifactMeta {
1341 pub name: String,
1342 pub published_by_job: Option<usize>,
1343 pub used_by_jobs: BTreeSet<usize>,
1344 }
1345
1346 #[derive(Debug)]
1347 pub struct ParameterMeta {
1348 pub parameter: Parameter,
1349 pub used_by_jobs: BTreeSet<usize>,
1350 }
1351
1352 /// Mirror of [`Pipeline`], except with all field marked as `pub`.
1353 pub struct PipelineFinalized {
1354 pub jobs: Vec<PipelineJobMetadata>,
1355 pub artifacts: Vec<ArtifactMeta>,
1356 pub parameters: Vec<ParameterMeta>,
1357 pub extra_deps: BTreeSet<(usize, usize)>,
1358 // backend specific
1359 pub ado_name: Option<String>,
1360 pub ado_schedule_triggers: Vec<AdoScheduleTriggers>,
1361 pub ado_ci_triggers: Option<AdoCiTriggers>,
1362 pub ado_pr_triggers: Option<AdoPrTriggers>,
1363 pub ado_bootstrap_template: String,
1364 pub ado_resources_repository: Vec<InternalAdoResourcesRepository>,
1365 pub ado_post_process_yaml_cb:
1366 Option<Box<dyn FnOnce(serde_yaml::Value) -> serde_yaml::Value>>,
1367 pub ado_variables: BTreeMap<String, String>,
1368 pub ado_job_id_overrides: BTreeMap<usize, String>,
1369 pub gh_name: Option<String>,
1370 pub gh_schedule_triggers: Vec<GhScheduleTriggers>,
1371 pub gh_ci_triggers: Option<GhCiTriggers>,
1372 pub gh_pr_triggers: Option<GhPrTriggers>,
1373 pub gh_bootstrap_template: String,
1374 }
1375
1376 impl PipelineFinalized {
1377 pub fn from_pipeline(mut pipeline: Pipeline) -> Self {
1378 if let Some(cb) = pipeline.inject_all_jobs_with.take() {
1379 for job_idx in 0..pipeline.jobs.len() {
1380 let _ = cb(PipelineJob {
1381 pipeline: &mut pipeline,
1382 job_idx,
1383 });
1384 }
1385 }
1386
1387 let Pipeline {
1388 mut jobs,
1389 artifacts,
1390 parameters,
1391 extra_deps,
1392 ado_name,
1393 ado_bootstrap_template,
1394 ado_schedule_triggers,
1395 ado_ci_triggers,
1396 ado_pr_triggers,
1397 ado_resources_repository,
1398 ado_post_process_yaml_cb,
1399 ado_variables,
1400 ado_job_id_overrides,
1401 gh_name,
1402 gh_schedule_triggers,
1403 gh_ci_triggers,
1404 gh_pr_triggers,
1405 gh_bootstrap_template,
1406 // not relevant to consumer code
1407 dummy_done_idx: _,
1408 artifact_map_idx: _,
1409 artifact_names: _,
1410 global_patchfns,
1411 inject_all_jobs_with: _, // processed above
1412 } = pipeline;
1413
1414 for patchfn in global_patchfns {
1415 for job in &mut jobs {
1416 job.patches.apply_patchfn(patchfn)
1417 }
1418 }
1419
1420 Self {
1421 jobs,
1422 artifacts,
1423 parameters,
1424 extra_deps,
1425 ado_name,
1426 ado_schedule_triggers,
1427 ado_ci_triggers,
1428 ado_pr_triggers,
1429 ado_bootstrap_template,
1430 ado_resources_repository,
1431 ado_post_process_yaml_cb,
1432 ado_variables,
1433 ado_job_id_overrides,
1434 gh_name,
1435 gh_schedule_triggers,
1436 gh_ci_triggers,
1437 gh_pr_triggers,
1438 gh_bootstrap_template,
1439 }
1440 }
1441 }
1442
1443 #[derive(Debug, Clone)]
1444 pub enum Parameter {
1445 Bool {
1446 name: String,
1447 description: String,
1448 kind: ParameterKind,
1449 default: Option<bool>,
1450 },
1451 String {
1452 name: String,
1453 description: String,
1454 default: Option<String>,
1455 kind: ParameterKind,
1456 possible_values: Option<Vec<String>>,
1457 },
1458 Num {
1459 name: String,
1460 description: String,
1461 default: Option<i64>,
1462 kind: ParameterKind,
1463 possible_values: Option<Vec<i64>>,
1464 },
1465 }
1466
1467 impl Parameter {
1468 pub fn name(&self) -> &str {
1469 match self {
1470 Parameter::Bool { name, .. } => name,
1471 Parameter::String { name, .. } => name,
1472 Parameter::Num { name, .. } => name,
1473 }
1474 }
1475 }
1476}