openhcl_boot/
memory.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Address space allocator for VTL2 memory used by the bootshim.
5
6use crate::host_params::MAX_VTL2_RAM_RANGES;
7use arrayvec::ArrayVec;
8use host_fdt_parser::MemoryEntry;
9#[cfg(test)]
10use igvm_defs::MemoryMapEntryType;
11use loader_defs::shim::MemoryVtlType;
12use memory_range::MemoryRange;
13use memory_range::RangeWalkResult;
14use memory_range::walk_ranges;
15use thiserror::Error;
16
17const PAGE_SIZE_4K: u64 = 4096;
18
19/// The maximum number of reserved memory ranges that we might use.
20/// See [`ReservedMemoryType`] definition for details.
21pub const MAX_RESERVED_MEM_RANGES: usize = 6 + sidecar_defs::MAX_NODES;
22
23const MAX_MEMORY_RANGES: usize = MAX_VTL2_RAM_RANGES + MAX_RESERVED_MEM_RANGES;
24
25/// Maximum number of ranges in the address space manager.
26/// For simplicity, make it twice the memory and reserved ranges.
27const MAX_ADDRESS_RANGES: usize = MAX_MEMORY_RANGES * 2;
28
29#[derive(Clone, Copy, Debug, PartialEq, Eq)]
30pub enum ReservedMemoryType {
31    /// VTL2 parameter regions (could be up to 2).
32    Vtl2Config,
33    /// Reserved memory that should not be used by the kernel or usermode. There
34    /// should only be one.
35    Vtl2Reserved,
36    /// Sidecar image. There should only be one.
37    SidecarImage,
38    /// A reserved range per sidecar node.
39    SidecarNode,
40    /// Persistent VTL2 memory used for page allocations in usermode. This
41    /// memory is persisted, both location and contents, across servicing.
42    /// Today, we only support a single range.
43    Vtl2GpaPool,
44    /// Page tables that are used for AP startup, on TDX.
45    TdxPageTables,
46    /// In-memory bootshim log buffer.
47    BootshimLogBuffer,
48    /// Persisted state header.
49    PersistedStateHeader,
50    /// Persisted state payload.
51    PersistedStatePayload,
52}
53
54impl From<ReservedMemoryType> for MemoryVtlType {
55    fn from(r: ReservedMemoryType) -> Self {
56        match r {
57            ReservedMemoryType::Vtl2Config => MemoryVtlType::VTL2_CONFIG,
58            ReservedMemoryType::SidecarImage => MemoryVtlType::VTL2_SIDECAR_IMAGE,
59            ReservedMemoryType::SidecarNode => MemoryVtlType::VTL2_SIDECAR_NODE,
60            ReservedMemoryType::Vtl2Reserved => MemoryVtlType::VTL2_RESERVED,
61            ReservedMemoryType::Vtl2GpaPool => MemoryVtlType::VTL2_GPA_POOL,
62            ReservedMemoryType::TdxPageTables => MemoryVtlType::VTL2_TDX_PAGE_TABLES,
63            ReservedMemoryType::BootshimLogBuffer => MemoryVtlType::VTL2_BOOTSHIM_LOG_BUFFER,
64            ReservedMemoryType::PersistedStateHeader => MemoryVtlType::VTL2_PERSISTED_STATE_HEADER,
65            ReservedMemoryType::PersistedStatePayload => {
66                MemoryVtlType::VTL2_PERSISTED_STATE_PROTOBUF
67            }
68        }
69    }
70}
71
72#[derive(Clone, Copy, Debug, PartialEq, Eq)]
73enum AddressUsage {
74    /// Free for allocation
75    Free,
76    /// Used by the bootshim (usually build time), but free for kernel use
77    Used,
78    /// Reserved and should not be reported to the kernel as usable RAM.
79    Reserved(ReservedMemoryType),
80}
81
82#[derive(Debug)]
83struct AddressRange {
84    range: MemoryRange,
85    vnode: u32,
86    usage: AddressUsage,
87}
88
89impl From<AddressUsage> for MemoryVtlType {
90    fn from(usage: AddressUsage) -> Self {
91        match usage {
92            AddressUsage::Free => MemoryVtlType::VTL2_RAM,
93            AddressUsage::Used => MemoryVtlType::VTL2_RAM,
94            AddressUsage::Reserved(r) => r.into(),
95        }
96    }
97}
98
99#[derive(Debug, Clone, Copy)]
100pub struct AllocatedRange {
101    pub range: MemoryRange,
102    pub vnode: u32,
103}
104
105#[derive(Debug, Error)]
106pub enum Error {
107    #[error("ram len {len} greater than maximum {max}")]
108    RamLen { len: u64, max: u64 },
109    #[error("already initialized")]
110    AlreadyInitialized,
111    #[error(
112        "reserved range {reserved:#x?}, type {typ:?} outside of bootshim used {bootshim_used:#x?}"
113    )]
114    ReservedRangeOutsideBootshimUsed {
115        reserved: MemoryRange,
116        typ: ReservedMemoryType,
117        bootshim_used: MemoryRange,
118    },
119}
120
121#[derive(Debug)]
122pub struct AddressSpaceManager {
123    /// Track the whole address space - this must be sorted.
124    address_space: ArrayVec<AddressRange, MAX_ADDRESS_RANGES>,
125
126    /// Track that the VTL2 GPA pool has at least one allocation.
127    vtl2_pool: bool,
128}
129
130/// A builder used to initialize an [`AddressSpaceManager`].
131pub struct AddressSpaceManagerBuilder<'a, I: Iterator<Item = MemoryRange>> {
132    manager: &'a mut AddressSpaceManager,
133    vtl2_ram: &'a [MemoryEntry],
134    bootshim_used: MemoryRange,
135    persisted_state_region: MemoryRange,
136    vtl2_config: I,
137    reserved_range: Option<MemoryRange>,
138    sidecar_image: Option<MemoryRange>,
139    page_tables: Option<MemoryRange>,
140    log_buffer: Option<MemoryRange>,
141    pool_range: Option<MemoryRange>,
142}
143
144impl<'a, I: Iterator<Item = MemoryRange>> AddressSpaceManagerBuilder<'a, I> {
145    /// Create a new builder to initialize an [`AddressSpaceManager`].
146    ///
147    /// `vtl2_ram` is the list of ram ranges for VTL2, which must be sorted.
148    ///
149    /// `bootshim_used` is the range used by the bootshim, but may be reclaimed
150    /// as ram by the kernel.
151    ///
152    /// `persisted_state_region` is the range used to store the persisted state
153    /// header described by [`loader_defs::shim::PersistedStateHeader`] and
154    /// corresponding protbuf payload.
155    ///
156    /// Other ranges described by other methods must lie within `bootshim_used`.
157    pub fn new(
158        manager: &'a mut AddressSpaceManager,
159        vtl2_ram: &'a [MemoryEntry],
160        bootshim_used: MemoryRange,
161        persisted_state_region: MemoryRange,
162        vtl2_config: I,
163    ) -> AddressSpaceManagerBuilder<'a, I> {
164        AddressSpaceManagerBuilder {
165            manager,
166            vtl2_ram,
167            bootshim_used,
168            persisted_state_region,
169            vtl2_config,
170            reserved_range: None,
171            sidecar_image: None,
172            page_tables: None,
173            log_buffer: None,
174            pool_range: None,
175        }
176    }
177
178    /// A reserved range reported as type [`MemoryVtlType::VTL2_RESERVED`].
179    pub fn with_reserved_range(mut self, reserved_range: MemoryRange) -> Self {
180        self.reserved_range = Some(reserved_range);
181        self
182    }
183
184    /// The sidecar image, reported as type [`MemoryVtlType::VTL2_SIDECAR_IMAGE`].
185    pub fn with_sidecar_image(mut self, sidecar_image: MemoryRange) -> Self {
186        self.sidecar_image = Some(sidecar_image);
187        self
188    }
189
190    /// Log buffer that is reported as type [`MemoryVtlType::VTL2_BOOTSHIM_LOG_BUFFER`].
191    pub fn with_log_buffer(mut self, log_buffer: MemoryRange) -> Self {
192        self.log_buffer = Some(log_buffer);
193        self
194    }
195
196    /// Existing VTL2 GPA pool ranges, reported as type [`MemoryVtlType::VTL2_GPA_POOL`].
197    pub fn with_pool_range(mut self, pool_range: MemoryRange) -> Self {
198        self.pool_range = Some(pool_range);
199        self
200    }
201
202    /// Consume the builder and initialize the address space manager.
203    pub fn init(self) -> Result<&'a mut AddressSpaceManager, Error> {
204        let Self {
205            manager,
206            vtl2_ram,
207            bootshim_used,
208            persisted_state_region,
209            vtl2_config,
210            reserved_range,
211            sidecar_image,
212            page_tables,
213            log_buffer,
214            pool_range,
215        } = self;
216
217        if vtl2_ram.len() > MAX_VTL2_RAM_RANGES {
218            return Err(Error::RamLen {
219                len: vtl2_ram.len() as u64,
220                max: MAX_VTL2_RAM_RANGES as u64,
221            });
222        }
223
224        if !manager.address_space.is_empty() {
225            return Err(Error::AlreadyInitialized);
226        }
227
228        // Split the persisted state region into two: the header which is the
229        // first 4K page, and the remainder which is the protobuf payload. Both
230        // are reserved ranges.
231        let (persisted_header, persisted_payload) =
232            persisted_state_region.split_at_offset(PAGE_SIZE_4K);
233
234        // The other ranges are reserved, and must overlap with the used range.
235        let mut reserved: ArrayVec<(MemoryRange, ReservedMemoryType), 20> = ArrayVec::new();
236        reserved.push((persisted_header, ReservedMemoryType::PersistedStateHeader));
237        reserved.push((persisted_payload, ReservedMemoryType::PersistedStatePayload));
238        reserved.extend(vtl2_config.map(|r| (r, ReservedMemoryType::Vtl2Config)));
239        reserved.extend(
240            reserved_range
241                .into_iter()
242                .map(|r| (r, ReservedMemoryType::Vtl2Reserved)),
243        );
244        reserved.extend(
245            sidecar_image
246                .into_iter()
247                .map(|r| (r, ReservedMemoryType::SidecarImage)),
248        );
249        reserved.extend(
250            page_tables
251                .into_iter()
252                .map(|r| (r, ReservedMemoryType::TdxPageTables)),
253        );
254        reserved.extend(
255            log_buffer
256                .into_iter()
257                .map(|r| (r, ReservedMemoryType::BootshimLogBuffer)),
258        );
259        reserved.sort_unstable_by_key(|(r, _)| r.start());
260
261        let mut used_ranges: ArrayVec<(MemoryRange, AddressUsage), 13> = ArrayVec::new();
262
263        // Construct initial used ranges by walking both the bootshim_used range
264        // and all reserved ranges that overlap.
265        for (entry, r) in walk_ranges(
266            core::iter::once((bootshim_used, AddressUsage::Used)),
267            reserved.iter().cloned(),
268        ) {
269            match r {
270                RangeWalkResult::Left(_) => {
271                    used_ranges.push((entry, AddressUsage::Used));
272                }
273                RangeWalkResult::Both(_, reserved_type) => {
274                    used_ranges.push((entry, AddressUsage::Reserved(reserved_type)));
275                }
276                RangeWalkResult::Right(typ) => {
277                    return Err(Error::ReservedRangeOutsideBootshimUsed {
278                        reserved: entry,
279                        typ,
280                        bootshim_used,
281                    });
282                }
283                RangeWalkResult::Neither => {}
284            }
285        }
286
287        // Add any existing pool range as reserved.
288        if let Some(range) = pool_range {
289            used_ranges.push((
290                range,
291                AddressUsage::Reserved(ReservedMemoryType::Vtl2GpaPool),
292            ));
293            manager.vtl2_pool = true;
294        }
295        used_ranges.sort_unstable_by_key(|(r, _)| r.start());
296
297        // Construct the initial state of VTL2 address space by walking ram and reserved ranges
298        assert!(manager.address_space.is_empty());
299        for (entry, r) in walk_ranges(
300            vtl2_ram.iter().map(|e| (e.range, e.vnode)),
301            used_ranges.iter().map(|(r, usage)| (*r, usage)),
302        ) {
303            match r {
304                RangeWalkResult::Left(vnode) => {
305                    // VTL2 normal ram, unused by anything.
306                    manager.address_space.push(AddressRange {
307                        range: entry,
308                        vnode,
309                        usage: AddressUsage::Free,
310                    });
311                }
312                RangeWalkResult::Both(vnode, usage) => {
313                    // VTL2 ram, currently in use.
314                    manager.address_space.push(AddressRange {
315                        range: entry,
316                        vnode,
317                        usage: *usage,
318                    });
319                }
320                RangeWalkResult::Right(usage) => {
321                    panic!("vtl2 range {entry:#x?} used by {usage:?} not contained in vtl2 ram");
322                }
323                RangeWalkResult::Neither => {}
324            }
325        }
326
327        Ok(manager)
328    }
329}
330
331impl AddressSpaceManager {
332    pub const fn new_const() -> Self {
333        Self {
334            address_space: ArrayVec::new_const(),
335            vtl2_pool: false,
336        }
337    }
338
339    /// Split a free range into two, with allocation policy deciding if we
340    /// allocate the low part or high part.
341    ///
342    /// Requires that the caller provides a memory range that has room to
343    /// be chopped up into an aligned range of length len.
344    fn allocate_range(
345        &mut self,
346        index: usize,
347        len: u64,
348        usage: AddressUsage,
349        allocation_policy: AllocationPolicy,
350        alignment: Option<u64>,
351    ) -> AllocatedRange {
352        assert!(usage != AddressUsage::Free);
353        let range = self.address_space.get_mut(index).expect("valid index");
354        assert_eq!(range.usage, AddressUsage::Free);
355
356        let subrange = if let Some(alignment) = alignment {
357            range.range.aligned_subrange(alignment)
358        } else {
359            range.range
360        };
361
362        assert!(subrange.len() >= len);
363        assert_ne!(subrange, MemoryRange::EMPTY);
364
365        let used = match allocation_policy {
366            AllocationPolicy::LowMemory => {
367                // Allocate from the beginning (low addresses)
368                let (used, _) = subrange.split_at_offset(len);
369                used
370            }
371            AllocationPolicy::HighMemory => {
372                // Allocate from the end (high addresses)
373                let offset = subrange.len() - len;
374                let (_, used) = subrange.split_at_offset(offset);
375                used
376            }
377        };
378
379        let left = MemoryRange::new(range.range.start()..used.start());
380        let right = MemoryRange::new(used.end()..range.range.end());
381
382        let to_address_range = |r: MemoryRange| -> Option<AddressRange> {
383            if !r.is_empty() {
384                Some(AddressRange {
385                    range: r,
386                    vnode: range.vnode,
387                    usage: AddressUsage::Free,
388                })
389            } else {
390                None
391            }
392        };
393
394        let left = to_address_range(left);
395        let right = to_address_range(right);
396
397        // Update this range to mark it as used
398        range.usage = usage;
399        range.range = used;
400        let allocated = AllocatedRange {
401            range: used,
402            vnode: range.vnode,
403        };
404
405        if let Some(right) = right {
406            self.address_space.insert(index + 1, right);
407        }
408
409        if let Some(left) = left {
410            self.address_space.insert(index, left);
411        }
412
413        allocated
414    }
415
416    fn allocate_inner(
417        &mut self,
418        required_vnode: Option<u32>,
419        len: u64,
420        allocation_type: AllocationType,
421        allocation_policy: AllocationPolicy,
422        alignment: Option<u64>,
423    ) -> Option<AllocatedRange> {
424        if len == 0 {
425            return None;
426        }
427
428        // Round up to the next 4k page size, if the caller did not specify a
429        // multiple of 4k.
430        let len = len.div_ceil(PAGE_SIZE_4K) * PAGE_SIZE_4K;
431
432        fn find_index<'a>(
433            mut iter: impl Iterator<Item = (usize, &'a AddressRange)>,
434            preferred_vnode: Option<u32>,
435            len: u64,
436            alignment: Option<u64>,
437        ) -> Option<usize> {
438            iter.find_map(|(index, range)| {
439                let is_aligned: bool = alignment.is_none()
440                    || (alignment.is_some()
441                        && range.range.aligned_subrange(alignment.unwrap()).len() >= len);
442                if range.usage == AddressUsage::Free
443                    && range.range.len() >= len
444                    && preferred_vnode.map(|pv| pv == range.vnode).unwrap_or(true)
445                    && is_aligned
446                {
447                    Some(index)
448                } else {
449                    None
450                }
451            })
452        }
453
454        // Walk ranges in forward/reverse order, depending on allocation policy.
455        let index = {
456            let iter = self.address_space.iter().enumerate();
457            match allocation_policy {
458                AllocationPolicy::LowMemory => find_index(iter, required_vnode, len, alignment),
459                AllocationPolicy::HighMemory => {
460                    find_index(iter.rev(), required_vnode, len, alignment)
461                }
462            }
463        };
464
465        let address_usage = match allocation_type {
466            AllocationType::GpaPool => AddressUsage::Reserved(ReservedMemoryType::Vtl2GpaPool),
467            AllocationType::SidecarNode => AddressUsage::Reserved(ReservedMemoryType::SidecarNode),
468            AllocationType::TdxPageTables => {
469                AddressUsage::Reserved(ReservedMemoryType::TdxPageTables)
470            }
471        };
472
473        let alloc = index.map(|index| {
474            self.allocate_range(index, len, address_usage, allocation_policy, alignment)
475        });
476
477        if allocation_type == AllocationType::GpaPool && alloc.is_some() {
478            self.vtl2_pool = true;
479        }
480
481        alloc
482    }
483
484    /// Allocate a new range of memory with the given type and policy. None is
485    /// returned if the allocation was unable to be satisfied.
486    ///
487    /// `len` is the number of bytes to allocate. The number of bytes are
488    /// rounded up to the next 4K page size increment. if `len` is 0, then
489    /// `None` is returned.
490    ///
491    /// `required_vnode` if `Some(u32)` is the vnode to allocate from. If there
492    /// are no free ranges left in that vnode, None is returned.
493    pub fn allocate(
494        &mut self,
495        required_vnode: Option<u32>,
496        len: u64,
497        allocation_type: AllocationType,
498        allocation_policy: AllocationPolicy,
499    ) -> Option<AllocatedRange> {
500        self.allocate_inner(
501            required_vnode,
502            len,
503            allocation_type,
504            allocation_policy,
505            None,
506        )
507    }
508
509    /// Allocate a new range of memory with the given type and policy. None is
510    /// returned if the allocation was unable to be satisfied.
511    ///
512    /// `len` is the number of bytes to allocate. The number of bytes are
513    /// rounded up to the next 4K page size increment. if `len` is 0, then
514    /// `None` is returned.
515    ///
516    /// `required_vnode` if `Some(u32)` is the vnode to allocate from. If there
517    /// are no free ranges left in that vnode, None is returned.
518    ///
519    /// `alignment` aligns the top of HighMemory allocations to `alignment`
520    /// bytes, and aligns the bottom of LowMemory allocations
521    #[cfg_attr(all(target_arch = "aarch64", not(test)), expect(dead_code))]
522    pub fn allocate_aligned(
523        &mut self,
524        required_vnode: Option<u32>,
525        len: u64,
526        allocation_type: AllocationType,
527        allocation_policy: AllocationPolicy,
528        alignment: u64,
529    ) -> Option<AllocatedRange> {
530        self.allocate_inner(
531            required_vnode,
532            len,
533            allocation_type,
534            allocation_policy,
535            Some(alignment),
536        )
537    }
538
539    /// Returns an iterator for all VTL2 ranges.
540    pub fn vtl2_ranges(&self) -> impl Iterator<Item = (MemoryRange, MemoryVtlType)> + use<'_> {
541        memory_range::merge_adjacent_ranges(
542            self.address_space.iter().map(|r| (r.range, r.usage.into())),
543        )
544    }
545
546    /// Returns an iterator for reserved VTL2 ranges that should not be
547    /// described as ram to the kernel.
548    pub fn reserved_vtl2_ranges(
549        &self,
550    ) -> impl Iterator<Item = (MemoryRange, ReservedMemoryType)> + use<'_> {
551        self.address_space.iter().filter_map(|r| match r.usage {
552            AddressUsage::Reserved(typ) => Some((r.range, typ)),
553            _ => None,
554        })
555    }
556
557    /// Returns true if there are VTL2 pool allocations.
558    pub fn has_vtl2_pool(&self) -> bool {
559        self.vtl2_pool
560    }
561}
562
563#[derive(Debug, Clone, Copy, PartialEq, Eq)]
564pub enum AllocationType {
565    GpaPool,
566    SidecarNode,
567    #[cfg_attr(target_arch = "aarch64", expect(dead_code))]
568    TdxPageTables,
569}
570
571pub enum AllocationPolicy {
572    // prefer low memory
573    LowMemory,
574    // prefer high memory
575    // TODO: only used in tests, but will be used in an upcoming change
576    #[allow(dead_code)]
577    HighMemory,
578}
579
580#[cfg(test)]
581mod tests {
582    use super::*;
583
584    #[test]
585    fn test_allocate() {
586        let mut address_space = AddressSpaceManager::new_const();
587        let vtl2_ram = &[MemoryEntry {
588            range: MemoryRange::new(0x0..0x20000),
589            vnode: 0,
590            mem_type: MemoryMapEntryType::MEMORY,
591        }];
592
593        AddressSpaceManagerBuilder::new(
594            &mut address_space,
595            vtl2_ram,
596            MemoryRange::new(0x0..0xF000),
597            MemoryRange::new(0x0..0x2000),
598            [
599                MemoryRange::new(0x3000..0x4000),
600                MemoryRange::new(0x5000..0x6000),
601            ]
602            .iter()
603            .cloned(),
604        )
605        .with_reserved_range(MemoryRange::new(0x8000..0xA000))
606        .with_sidecar_image(MemoryRange::new(0xA000..0xC000))
607        .init()
608        .unwrap();
609
610        let range = address_space
611            .allocate(
612                None,
613                0x1000,
614                AllocationType::GpaPool,
615                AllocationPolicy::HighMemory,
616            )
617            .unwrap();
618        assert_eq!(range.range, MemoryRange::new(0x1F000..0x20000));
619        assert!(address_space.has_vtl2_pool());
620
621        let range = address_space
622            .allocate(
623                None,
624                0x2000,
625                AllocationType::GpaPool,
626                AllocationPolicy::HighMemory,
627            )
628            .unwrap();
629        assert_eq!(range.range, MemoryRange::new(0x1D000..0x1F000));
630
631        let range = address_space
632            .allocate(
633                None,
634                0x3000,
635                AllocationType::GpaPool,
636                AllocationPolicy::LowMemory,
637            )
638            .unwrap();
639        assert_eq!(range.range, MemoryRange::new(0xF000..0x12000));
640
641        let range = address_space
642            .allocate(
643                None,
644                0x1000,
645                AllocationType::GpaPool,
646                AllocationPolicy::LowMemory,
647            )
648            .unwrap();
649        assert_eq!(range.range, MemoryRange::new(0x12000..0x13000));
650    }
651
652    #[test]
653    fn test_allocate_aligned() {
654        let mut address_space = AddressSpaceManager::new_const();
655        let vtl2_ram = &[MemoryEntry {
656            range: MemoryRange::new(0x0..0x20000),
657            vnode: 0,
658            mem_type: MemoryMapEntryType::MEMORY,
659        }];
660
661        AddressSpaceManagerBuilder::new(
662            &mut address_space,
663            vtl2_ram,
664            MemoryRange::new(0x0..0xF000),
665            MemoryRange::new(0x0..0x2000),
666            [
667                MemoryRange::new(0x3000..0x4000),
668                MemoryRange::new(0x5000..0x6000),
669            ]
670            .iter()
671            .cloned(),
672        )
673        .with_reserved_range(MemoryRange::new(0x8000..0xA000))
674        .with_sidecar_image(MemoryRange::new(0xA000..0xC000))
675        .init()
676        .unwrap();
677
678        let alignment = 4096 * 16;
679        let range = address_space
680            .allocate_aligned(
681                None,
682                0x1000,
683                AllocationType::GpaPool,
684                AllocationPolicy::LowMemory,
685                alignment,
686            )
687            .unwrap();
688
689        assert_eq!(0, range.range.start() % alignment);
690
691        let alignment = 4096 * 4;
692        let range = address_space
693            .allocate_aligned(
694                None,
695                0x1000,
696                AllocationType::GpaPool,
697                AllocationPolicy::HighMemory,
698                alignment,
699            )
700            .unwrap();
701
702        assert_eq!(0, range.range.end() % alignment);
703    }
704
705    #[test]
706    fn test_failed_alignment() {
707        let mut address_space = AddressSpaceManager::new_const();
708        let vtl2_ram = &[MemoryEntry {
709            range: MemoryRange::new(0x0..0x20000),
710            vnode: 0,
711            mem_type: MemoryMapEntryType::MEMORY,
712        }];
713
714        AddressSpaceManagerBuilder::new(
715            &mut address_space,
716            vtl2_ram,
717            MemoryRange::new(0x0..0xF000),
718            MemoryRange::new(0x0..0x2000),
719            [
720                MemoryRange::new(0x3000..0x4000),
721                MemoryRange::new(0x5000..0x6000),
722            ]
723            .iter()
724            .cloned(),
725        )
726        .with_reserved_range(MemoryRange::new(0x8000..0xA000))
727        .with_sidecar_image(MemoryRange::new(0xA000..0xC000))
728        .init()
729        .unwrap();
730
731        let alignment = 1024 * 1024 * 2;
732        let range = address_space.allocate_aligned(
733            None,
734            0x1000,
735            AllocationType::GpaPool,
736            AllocationPolicy::LowMemory,
737            alignment,
738        );
739        assert!(range.is_none());
740    }
741
742    // test numa allocation
743    #[test]
744    fn test_allocate_numa() {
745        let mut address_space = AddressSpaceManager::new_const();
746        let vtl2_ram = &[
747            MemoryEntry {
748                range: MemoryRange::new(0x0..0x20000),
749                vnode: 0,
750                mem_type: MemoryMapEntryType::MEMORY,
751            },
752            MemoryEntry {
753                range: MemoryRange::new(0x20000..0x40000),
754                vnode: 1,
755                mem_type: MemoryMapEntryType::MEMORY,
756            },
757            MemoryEntry {
758                range: MemoryRange::new(0x40000..0x60000),
759                vnode: 2,
760                mem_type: MemoryMapEntryType::MEMORY,
761            },
762            MemoryEntry {
763                range: MemoryRange::new(0x60000..0x80000),
764                vnode: 3,
765                mem_type: MemoryMapEntryType::MEMORY,
766            },
767        ];
768
769        AddressSpaceManagerBuilder::new(
770            &mut address_space,
771            vtl2_ram,
772            MemoryRange::new(0x0..0x10000),
773            MemoryRange::new(0x0..0x2000),
774            [
775                MemoryRange::new(0x3000..0x4000),
776                MemoryRange::new(0x5000..0x6000),
777            ]
778            .iter()
779            .cloned(),
780        )
781        .with_reserved_range(MemoryRange::new(0x8000..0xA000))
782        .with_sidecar_image(MemoryRange::new(0xA000..0xC000))
783        .init()
784        .unwrap();
785
786        let range = address_space
787            .allocate(
788                Some(0),
789                0x1000,
790                AllocationType::GpaPool,
791                AllocationPolicy::HighMemory,
792            )
793            .unwrap();
794        assert_eq!(range.range, MemoryRange::new(0x1F000..0x20000));
795        assert_eq!(range.vnode, 0);
796
797        let range = address_space
798            .allocate(
799                Some(0),
800                0x2000,
801                AllocationType::SidecarNode,
802                AllocationPolicy::HighMemory,
803            )
804            .unwrap();
805        assert_eq!(range.range, MemoryRange::new(0x1D000..0x1F000));
806        assert_eq!(range.vnode, 0);
807
808        let range = address_space
809            .allocate(
810                Some(2),
811                0x3000,
812                AllocationType::GpaPool,
813                AllocationPolicy::HighMemory,
814            )
815            .unwrap();
816        assert_eq!(range.range, MemoryRange::new(0x5D000..0x60000));
817        assert_eq!(range.vnode, 2);
818
819        // allocate all of node 3, then subsequent allocations fail
820        let range = address_space
821            .allocate(
822                Some(3),
823                0x20000,
824                AllocationType::SidecarNode,
825                AllocationPolicy::HighMemory,
826            )
827            .unwrap();
828        assert_eq!(range.range, MemoryRange::new(0x60000..0x80000));
829        assert_eq!(range.vnode, 3);
830
831        let range = address_space.allocate(
832            Some(3),
833            0x1000,
834            AllocationType::SidecarNode,
835            AllocationPolicy::HighMemory,
836        );
837        assert!(
838            range.is_none(),
839            "allocation should fail, no space left for node 3"
840        );
841    }
842
843    // test unaligned 4k allocations
844    #[test]
845    fn test_unaligned_allocations() {
846        let mut address_space = AddressSpaceManager::new_const();
847        let vtl2_ram = &[MemoryEntry {
848            range: MemoryRange::new(0x0..0x20000),
849            vnode: 0,
850            mem_type: MemoryMapEntryType::MEMORY,
851        }];
852
853        AddressSpaceManagerBuilder::new(
854            &mut address_space,
855            vtl2_ram,
856            MemoryRange::new(0x0..0xF000),
857            MemoryRange::new(0x0..0x2000),
858            [
859                MemoryRange::new(0x3000..0x4000),
860                MemoryRange::new(0x5000..0x6000),
861            ]
862            .iter()
863            .cloned(),
864        )
865        .with_reserved_range(MemoryRange::new(0x8000..0xA000))
866        .with_sidecar_image(MemoryRange::new(0xA000..0xC000))
867        .init()
868        .unwrap();
869
870        let range = address_space
871            .allocate(
872                None,
873                0x1001,
874                AllocationType::GpaPool,
875                AllocationPolicy::HighMemory,
876            )
877            .unwrap();
878        assert_eq!(range.range, MemoryRange::new(0x1E000..0x20000));
879
880        let range = address_space
881            .allocate(
882                None,
883                0xFFF,
884                AllocationType::GpaPool,
885                AllocationPolicy::HighMemory,
886            )
887            .unwrap();
888        assert_eq!(range.range, MemoryRange::new(0x1D000..0x1E000));
889
890        let range = address_space.allocate(
891            None,
892            0,
893            AllocationType::GpaPool,
894            AllocationPolicy::HighMemory,
895        );
896        assert!(range.is_none());
897    }
898
899    // test invalid init ranges
900    #[test]
901    fn test_invalid_init_ranges() {
902        let vtl2_ram = [MemoryEntry {
903            range: MemoryRange::new(0x0..0x20000),
904            vnode: 0,
905            mem_type: MemoryMapEntryType::MEMORY,
906        }];
907        let bootshim_used = MemoryRange::new(0x0..0xF000);
908
909        // test config range completely outside of bootshim_used
910        let mut address_space = AddressSpaceManager::new_const();
911
912        let result = AddressSpaceManagerBuilder::new(
913            &mut address_space,
914            &vtl2_ram,
915            bootshim_used,
916            MemoryRange::new(0x0..0x2000),
917            [MemoryRange::new(0x10000..0x11000)].iter().cloned(), // completely outside
918        )
919        .init();
920
921        assert!(matches!(
922            result,
923            Err(Error::ReservedRangeOutsideBootshimUsed { .. })
924        ));
925
926        // test config range partially overlapping with bootshim_used
927
928        let mut address_space = AddressSpaceManager::new_const();
929        let result = AddressSpaceManagerBuilder::new(
930            &mut address_space,
931            &vtl2_ram,
932            bootshim_used,
933            MemoryRange::new(0x0..0x2000),
934            [MemoryRange::new(0xE000..0x10000)].iter().cloned(), // partially overlapping
935        )
936        .init();
937
938        assert!(matches!(
939            result,
940            Err(Error::ReservedRangeOutsideBootshimUsed { .. })
941        ));
942
943        // test persisted region outside of bootshim_used
944        let mut address_space = AddressSpaceManager::new_const();
945        let result = AddressSpaceManagerBuilder::new(
946            &mut address_space,
947            &vtl2_ram,
948            bootshim_used,
949            MemoryRange::new(0x10000..0x14000), // outside
950            [MemoryRange::new(0xE000..0xF000)].iter().cloned(),
951        )
952        .init();
953
954        assert!(matches!(
955            result,
956            Err(Error::ReservedRangeOutsideBootshimUsed { .. })
957        ));
958    }
959
960    #[test]
961    fn test_persisted_range() {
962        let vtl2_ram = [MemoryEntry {
963            range: MemoryRange::new(0x0..0x20000),
964            vnode: 0,
965            mem_type: MemoryMapEntryType::MEMORY,
966        }];
967        let bootshim_used = MemoryRange::new(0x0..0xF000);
968
969        let mut address_space = AddressSpaceManager::new_const();
970        AddressSpaceManagerBuilder::new(
971            &mut address_space,
972            &vtl2_ram,
973            bootshim_used,
974            MemoryRange::new(0x0..0xE000),
975            [MemoryRange::new(0xE000..0xF000)].iter().cloned(),
976        )
977        .init()
978        .unwrap();
979
980        let expected = [
981            (
982                MemoryRange::new(0x0..0x1000),
983                MemoryVtlType::VTL2_PERSISTED_STATE_HEADER,
984            ),
985            (
986                MemoryRange::new(0x1000..0xE000),
987                MemoryVtlType::VTL2_PERSISTED_STATE_PROTOBUF,
988            ),
989            (MemoryRange::new(0xE000..0xF000), MemoryVtlType::VTL2_CONFIG),
990            (MemoryRange::new(0xF000..0x20000), MemoryVtlType::VTL2_RAM),
991        ];
992
993        for (expected, actual) in expected.iter().zip(address_space.vtl2_ranges()) {
994            assert_eq!(*expected, actual);
995        }
996
997        // test with free space between state region and config
998        let mut address_space = AddressSpaceManager::new_const();
999        AddressSpaceManagerBuilder::new(
1000            &mut address_space,
1001            &vtl2_ram,
1002            bootshim_used,
1003            MemoryRange::new(0x0..0xA000),
1004            [MemoryRange::new(0xE000..0xF000)].iter().cloned(),
1005        )
1006        .init()
1007        .unwrap();
1008
1009        let expected = [
1010            (
1011                MemoryRange::new(0x0..0x1000),
1012                MemoryVtlType::VTL2_PERSISTED_STATE_HEADER,
1013            ),
1014            (
1015                MemoryRange::new(0x1000..0xA000),
1016                MemoryVtlType::VTL2_PERSISTED_STATE_PROTOBUF,
1017            ),
1018            (MemoryRange::new(0xA000..0xE000), MemoryVtlType::VTL2_RAM),
1019            (MemoryRange::new(0xE000..0xF000), MemoryVtlType::VTL2_CONFIG),
1020            (MemoryRange::new(0xF000..0x20000), MemoryVtlType::VTL2_RAM),
1021        ];
1022
1023        for (expected, actual) in expected.iter().zip(address_space.vtl2_ranges()) {
1024            assert_eq!(*expected, actual);
1025        }
1026    }
1027
1028    // FIXME: test pool ranges
1029}