1use crate::Error;
7use crate::IdentityMapSize;
8use zerocopy::FromBytes;
9use zerocopy::Immutable;
10use zerocopy::IntoBytes;
11use zerocopy::KnownLayout;
12
13const X64_PTE_PRESENT: u64 = 1;
14const X64_PTE_READ_WRITE: u64 = 1 << 1;
15const X64_PTE_ACCESSED: u64 = 1 << 5;
16const X64_PTE_DIRTY: u64 = 1 << 6;
17const X64_PTE_LARGE_PAGE: u64 = 1 << 7;
18
19const PAGE_TABLE_ENTRY_COUNT: usize = 512;
20const PAGE_TABLE_ENTRY_SIZE: usize = 8;
21
22const X64_PAGE_SHIFT: u64 = 12;
23const X64_PTE_BITS: u64 = 9;
24
25pub const X64_PAGE_SIZE: u64 = 4096;
27
28pub const X64_LARGE_PAGE_SIZE: u64 = 0x200000;
30
31pub const X64_1GB_PAGE_SIZE: u64 = 0x40000000;
33
34pub const PAGE_TABLE_MAX_COUNT: usize = 20;
39
40static_assertions::const_assert_eq!(
41 PAGE_TABLE_ENTRY_SIZE * PAGE_TABLE_ENTRY_COUNT,
42 X64_PAGE_SIZE as usize
43);
44const PAGE_TABLE_SIZE: usize = PAGE_TABLE_ENTRY_COUNT * PAGE_TABLE_ENTRY_SIZE;
45
46pub const PAGE_TABLE_MAX_BYTES: usize = PAGE_TABLE_MAX_COUNT * X64_PAGE_SIZE as usize;
48
49#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
50#[repr(transparent)]
51pub struct PageTableEntry {
53 pub(crate) entry: u64,
54}
55
56#[derive(Copy, Clone, Debug)]
59pub struct MappedRange {
60 start: u64,
61 end: u64,
62 permissions: u64,
63}
64
65impl MappedRange {
66 pub fn new(start: u64, end: u64) -> Self {
68 Self {
69 start,
70 end,
71 permissions: X64_PTE_PRESENT | X64_PTE_ACCESSED | X64_PTE_READ_WRITE,
72 }
73 }
74
75 pub fn start(&self) -> u64 {
77 self.start
78 }
79
80 pub fn end(&self) -> u64 {
82 self.end
83 }
84
85 pub fn read_only(mut self) -> Self {
87 self.permissions &= !X64_PTE_READ_WRITE;
88 self
89 }
90}
91
92impl core::fmt::Debug for PageTableEntry {
93 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
94 f.debug_struct("PageTableEntry")
95 .field("entry", &self.entry)
96 .field("is_present", &self.is_present())
97 .field("gpa", &self.gpa())
98 .finish()
99 }
100}
101
102#[derive(Debug, Copy, Clone)]
103pub enum PageTableEntryType {
104 Leaf1GbPage(u64),
106 Leaf2MbPage(u64),
108 Leaf4kPage(u64),
110 Pde(u64),
112}
113
114#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
116#[repr(u8)]
117pub enum EntryLevel {
118 Pml4 = 3,
119 Pdpt = 2,
120 Pd = 1,
121 Pt = 0,
122}
123
124impl EntryLevel {
125 pub fn mapping_size(self) -> u64 {
130 match self {
131 Self::Pml4 => X64_1GB_PAGE_SIZE * 512,
132 Self::Pdpt => X64_1GB_PAGE_SIZE,
133 Self::Pd => X64_LARGE_PAGE_SIZE,
134 Self::Pt => X64_PAGE_SIZE,
135 }
136 }
137
138 pub fn leaf(self, va: u64) -> PageTableEntryType {
140 match self {
141 Self::Pml4 => panic!("cannot insert a leaf entry into a PML4 table"),
142 Self::Pdpt => PageTableEntryType::Leaf1GbPage(va),
143 Self::Pd => PageTableEntryType::Leaf2MbPage(va),
144 Self::Pt => PageTableEntryType::Leaf4kPage(va),
145 }
146 }
147
148 fn pa_mask(self) -> u64 {
149 match self {
150 Self::Pml4 => 0x000f_ffff_c000_0000,
151 Self::Pdpt => 0x000f_ffff_ffe0_0000,
152 Self::Pd => 0x000f_ffff_ffff_f000,
153 Self::Pt => 0x000f_ffff_ffff_f000,
154 }
155 }
156
157 pub fn directory_pa(self, va: u64) -> u64 {
160 va & self.pa_mask()
161 }
162}
163
164impl PageTableEntry {
165 const VALID_BITS: u64 = 0x000f_ffff_ffff_f000;
166
167 pub fn set_entry(&mut self, entry_type: PageTableEntryType) {
170 self.entry = X64_PTE_PRESENT | X64_PTE_ACCESSED | X64_PTE_READ_WRITE;
171
172 match entry_type {
173 PageTableEntryType::Leaf1GbPage(address) => {
174 assert!(address % X64_1GB_PAGE_SIZE == 0);
176 self.entry |= address;
177 self.entry |= X64_PTE_LARGE_PAGE | X64_PTE_DIRTY;
178 }
179 PageTableEntryType::Leaf2MbPage(address) => {
180 assert!(address % X64_LARGE_PAGE_SIZE == 0);
182 self.entry |= address;
183 self.entry |= X64_PTE_LARGE_PAGE | X64_PTE_DIRTY;
184 }
185 PageTableEntryType::Leaf4kPage(address) => {
186 assert!(address % X64_PAGE_SIZE == 0);
188 self.entry |= address;
189 self.entry |= X64_PTE_DIRTY;
190 }
191 PageTableEntryType::Pde(address) => {
192 assert!(address % X64_PAGE_SIZE == 0);
194 self.entry |= address;
195 }
196 }
197 }
198
199 pub fn is_present(&self) -> bool {
201 self.entry & X64_PTE_PRESENT == X64_PTE_PRESENT
202 }
203
204 pub fn gpa(&self) -> Option<u64> {
206 if self.is_present() {
207 Some(self.entry & Self::VALID_BITS)
209 } else {
210 None
211 }
212 }
213
214 pub fn set_addr(&mut self, addr: u64) {
216 assert!(addr & !Self::VALID_BITS == 0);
217
218 self.entry &= !Self::VALID_BITS;
220 self.entry |= addr;
221 }
222
223 pub fn get_addr(&self) -> u64 {
225 self.entry & Self::VALID_BITS
226 }
227
228 pub fn clear(&mut self) {
230 self.entry = 0;
231 }
232}
233
234#[repr(C)]
235#[derive(Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
236pub struct PageTable {
238 entries: [PageTableEntry; PAGE_TABLE_ENTRY_COUNT],
239}
240
241impl PageTable {
242 pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PageTableEntry> {
244 self.entries.iter_mut()
245 }
246
247 pub fn entry(&mut self, gva: u64, level: u8) -> &mut PageTableEntry {
249 let index = get_amd64_pte_index(gva, level as u64) as usize;
250 &mut self.entries[index]
251 }
252}
253
254impl core::ops::Index<usize> for PageTable {
255 type Output = PageTableEntry;
256
257 fn index(&self, index: usize) -> &Self::Output {
258 &self.entries[index]
259 }
260}
261
262impl core::ops::IndexMut<usize> for PageTable {
263 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
264 &mut self.entries[index]
265 }
266}
267
268pub fn get_amd64_pte_index(gva: u64, page_map_level: u64) -> u64 {
270 let index = gva >> (X64_PAGE_SHIFT + page_map_level * X64_PTE_BITS);
271 index & ((1 << X64_PTE_BITS) - 1)
272}
273
274pub fn calculate_pde_table_count(start_gpa: u64, size: u64) -> u64 {
276 let mut count = 0;
277
278 let start_aligned_up = align_up_to_1_gb_page_size(start_gpa);
280 let end_gpa = start_gpa + size;
281 let end_aligned_down = (end_gpa / X64_1GB_PAGE_SIZE) * X64_1GB_PAGE_SIZE;
282
283 if size < X64_1GB_PAGE_SIZE {
285 if end_gpa > end_aligned_down && start_gpa < end_aligned_down {
287 count = 2;
288 } else {
289 count = 1;
290 }
291 } else {
292 if start_gpa != start_aligned_up {
294 count += 1;
295 }
296
297 if end_aligned_down > start_aligned_up {
299 count += (end_aligned_down - start_aligned_up) / X64_1GB_PAGE_SIZE;
300 }
301
302 if end_gpa > end_aligned_down {
304 count += 1;
305 }
306 }
307
308 count
309}
310
311#[derive(Debug, Clone)]
312struct PageTableBuilderInner {
313 page_table_gpa: u64,
314 confidential_bit: Option<u32>,
315}
316
317pub struct PageTableBuilder<'a> {
319 inner: PageTableBuilderInner,
321 page_table: &'a mut [PageTable],
323 flattened_page_table: &'a mut [u8],
325 ranges: &'a [MappedRange],
327}
328
329impl PageTableBuilderInner {
330 fn get_addr_mask(&self) -> u64 {
331 const ALL_ADDR_BITS: u64 = 0x000f_ffff_ffff_f000;
332 ALL_ADDR_BITS & !self.get_confidential_mask()
333 }
334
335 fn get_confidential_mask(&self) -> u64 {
336 if let Some(confidential_bit) = self.confidential_bit {
337 1u64 << confidential_bit
338 } else {
339 0
340 }
341 }
342
343 fn build_pte(&self, entry_type: PageTableEntryType, permissions: u64) -> PageTableEntry {
344 let mut entry: u64 = permissions;
345
346 match entry_type {
347 PageTableEntryType::Leaf1GbPage(address) => {
348 assert_eq!(address % X64_1GB_PAGE_SIZE, 0);
350 entry |= address;
351 entry |= X64_PTE_LARGE_PAGE | X64_PTE_DIRTY;
352 }
353 PageTableEntryType::Leaf2MbPage(address) => {
354 assert_eq!(address % X64_LARGE_PAGE_SIZE, 0);
356 entry |= address;
357 entry |= X64_PTE_LARGE_PAGE | X64_PTE_DIRTY;
358 }
359 PageTableEntryType::Leaf4kPage(address) => {
360 assert_eq!(address % X64_PAGE_SIZE, 0);
362 entry |= address;
363 entry |= X64_PTE_DIRTY;
364 }
365 PageTableEntryType::Pde(address) => {
366 assert_eq!(address % X64_PAGE_SIZE, 0);
368 entry |= address;
369 }
370 }
371
372 let mask = self.get_confidential_mask();
373 if self.confidential_bit.is_some() {
374 entry |= mask;
375 } else {
376 entry &= !mask;
377 }
378
379 PageTableEntry { entry }
380 }
381
382 fn get_addr_from_pte(&self, pte: &PageTableEntry) -> u64 {
383 pte.entry & self.get_addr_mask()
384 }
385}
386
387impl<'a> PageTableBuilder<'a> {
388 pub fn new(
396 page_table_gpa: u64,
397 page_table: &'a mut [PageTable],
398 flattened_page_table: &'a mut [u8],
399 ranges: &'a [MappedRange],
400 ) -> Result<Self, Error> {
401 if flattened_page_table.len() != (page_table.len() * PAGE_TABLE_SIZE) {
404 Err(Error::BadBufferSize {
405 bytes_buf: flattened_page_table.len(),
406 struct_buf: page_table.len() * PAGE_TABLE_SIZE,
407 })
408 } else {
409 for range in ranges.iter() {
410 if range.start() > range.end() {
411 return Err(Error::InvalidRange);
412 }
413 }
414
415 for window in ranges.windows(2) {
416 let (l, r) = (&window[0], &window[1]);
417
418 if r.start() < l.start() {
419 return Err(Error::UnsortedMappings);
420 }
421
422 if l.end() > r.start() {
423 return Err(Error::OverlappingMappings);
424 }
425 }
426 Ok(PageTableBuilder {
427 inner: PageTableBuilderInner {
428 page_table_gpa,
429 confidential_bit: None,
430 },
431 page_table,
432 flattened_page_table,
433 ranges,
434 })
435 }
436 }
437
438 pub fn with_confidential_bit(mut self, bit_position: u32) -> Self {
440 self.inner.confidential_bit = Some(bit_position);
441 self
442 }
443
444 pub fn build(self) -> Result<&'a [u8], Error> {
448 let PageTableBuilder {
449 page_table,
450 flattened_page_table,
451 ranges,
452 inner,
453 } = self;
454
455 let (mut page_table_index, pml4_table_index) = (0, 0);
457
458 let mut link_tables = |start_va: u64, end_va: u64, permissions: u64| -> Result<(), Error> {
460 let mut current_va = start_va;
461 let mut get_or_insert_entry = |table_index: usize,
462 entry_level: EntryLevel,
463 current_va: &mut u64|
464 -> Result<Option<usize>, Error> {
465 if (*current_va).is_multiple_of(entry_level.mapping_size())
467 && (*current_va + entry_level.mapping_size() <= end_va)
468 {
469 let entry = page_table[table_index].entry(*current_va, entry_level as u8);
470 if entry.is_present() {
471 return Err(Error::AttemptedEntryOverwrite);
473 }
474
475 #[cfg(feature = "tracing")]
476 tracing::trace!(
477 "inserting entry for va: {:#X} at level {:?}",
478 current_va,
479 entry_level
480 );
481
482 let new_entry = inner.build_pte(entry_level.leaf(*current_va), permissions);
483 *entry = new_entry;
484 *current_va += entry_level.mapping_size();
485
486 Ok(None)
487 }
488 else {
493 let directory_pa = entry_level.directory_pa(*current_va);
494 let len = page_table.len();
495 let entry = page_table[table_index].entry(directory_pa, entry_level as u8);
496
497 if !entry.is_present() {
498 page_table_index += 1;
499
500 if page_table_index >= len {
501 return Err(Error::NotEnoughMemory);
502 }
503 let output_address =
505 inner.page_table_gpa + page_table_index as u64 * X64_PAGE_SIZE;
506
507 let new_entry = inner.build_pte(
512 PageTableEntryType::Pde(output_address),
513 X64_PTE_PRESENT | X64_PTE_ACCESSED | X64_PTE_READ_WRITE,
514 );
515
516 #[cfg(feature = "tracing")]
517 tracing::trace!(
518 "creating directory for va: {:#X} at level {:?}",
519 directory_pa,
520 entry_level
521 );
522 *entry = new_entry;
523
524 Ok(Some(page_table_index))
525 } else {
526 Ok(Some(
527 ((inner.get_addr_from_pte(entry) - inner.page_table_gpa)
528 / X64_PAGE_SIZE)
529 .try_into()
530 .expect("Valid page table index"),
531 ))
532 }
533 }
534 };
535
536 while current_va < end_va {
537 #[cfg(feature = "tracing")]
538 tracing::trace!("creating entry for va: {:#X}", current_va);
539 let pdpt_table_index =
542 get_or_insert_entry(pml4_table_index, EntryLevel::Pml4, &mut current_va)?;
543 if let Some(pdpt_table_index) = pdpt_table_index {
544 let pd_table_index =
545 get_or_insert_entry(pdpt_table_index, EntryLevel::Pdpt, &mut current_va)?;
546 if let Some(pd_table_index) = pd_table_index {
547 let pt_table_index =
548 get_or_insert_entry(pd_table_index, EntryLevel::Pd, &mut current_va)?;
549 if let Some(pt_table_index) = pt_table_index {
550 get_or_insert_entry(pt_table_index, EntryLevel::Pt, &mut current_va)?;
551 }
552 }
553 }
554 }
555
556 Ok(())
557 };
558
559 for range in ranges {
560 link_tables(range.start, range.end, range.permissions)?;
561 }
562
563 Ok(flatten_page_table(
565 page_table,
566 flattened_page_table,
567 page_table_index + 1,
568 ))
569 }
570}
571
572#[derive(Debug, Clone)]
573struct IdentityMapBuilderParams {
574 page_table_gpa: u64,
575 identity_map_size: IdentityMapSize,
576 address_bias: u64,
577 pml4e_link: Option<(u64, u64)>,
578}
579
580pub struct IdentityMapBuilder<'a> {
583 params: IdentityMapBuilderParams,
584 page_table: &'a mut [PageTable],
586 flattened_page_table: &'a mut [u8],
588}
589
590impl<'a> IdentityMapBuilder<'a> {
591 pub fn new(
599 page_table_gpa: u64,
600 identity_map_size: IdentityMapSize,
601 page_table: &'a mut [PageTable],
602 flattened_page_table: &'a mut [u8],
603 ) -> Result<Self, Error> {
604 if flattened_page_table.len() != (page_table.len() * PAGE_TABLE_SIZE) {
605 Err(Error::BadBufferSize {
606 bytes_buf: flattened_page_table.len(),
607 struct_buf: page_table.len() * PAGE_TABLE_SIZE,
608 })
609 } else {
610 Ok(IdentityMapBuilder {
611 params: IdentityMapBuilderParams {
612 page_table_gpa,
613 identity_map_size,
614 address_bias: 0,
615 pml4e_link: None,
616 },
617 page_table,
618 flattened_page_table,
619 })
620 }
621 }
622
623 pub fn with_address_bias(mut self, address_bias: u64) -> Self {
626 self.params.address_bias = address_bias;
627 self
628 }
629
630 pub fn with_pml4e_link(mut self, pml4e_link: (u64, u64)) -> Self {
634 self.params.pml4e_link = Some(pml4e_link);
635 self
636 }
637
638 pub fn build(self) -> &'a [u8] {
641 let IdentityMapBuilder {
642 page_table,
643 flattened_page_table,
644 params,
645 } = self;
646
647 let leaf_page_table_count = match params.identity_map_size {
653 IdentityMapSize::Size4Gb => 4,
654 IdentityMapSize::Size8Gb => 8,
655 };
656 let page_table_count = leaf_page_table_count + if params.address_bias == 0 { 2 } else { 1 };
657 let mut page_table_allocator = page_table.iter_mut().enumerate();
658
659 let pdpte_table = if params.address_bias == 0 {
661 let (_, pml4e_table) = page_table_allocator
663 .next()
664 .expect("pagetable should always be available, code bug if not");
665
666 let (pdpte_table_index, pdpte_table) = page_table_allocator
668 .next()
669 .expect("pagetable should always be available, code bug if not");
670
671 let output_address = params.page_table_gpa + pdpte_table_index as u64 * X64_PAGE_SIZE;
673 pml4e_table.entries[0].set_entry(PageTableEntryType::Pde(output_address));
674
675 if let Some((link_target_gpa, linkage_gpa)) = params.pml4e_link {
677 assert!((linkage_gpa & 0x7FFFFFFFFF) == 0);
678 pml4e_table.entries[linkage_gpa as usize >> 39]
679 .set_entry(PageTableEntryType::Pde(link_target_gpa));
680 }
681
682 pdpte_table
683 } else {
684 page_table_allocator
686 .next()
687 .expect("pagetable should always be available, code bug if not")
688 .1
689 };
690
691 let top_address = match params.identity_map_size {
693 IdentityMapSize::Size4Gb => 0x100000000u64,
694 IdentityMapSize::Size8Gb => 0x200000000u64,
695 };
696 let mut current_va = 0;
697
698 while current_va < top_address {
699 let (pde_table_index, pde_table) = page_table_allocator
701 .next()
702 .expect("pagetable should always be available, code bug if not");
703
704 let pdpte_index = get_amd64_pte_index(current_va, 2);
706 let output_address = params.page_table_gpa + pde_table_index as u64 * X64_PAGE_SIZE;
707 let pdpte_entry = &mut pdpte_table.entries[pdpte_index as usize];
708 assert!(!pdpte_entry.is_present());
709 pdpte_entry.set_entry(PageTableEntryType::Pde(output_address));
710
711 for entry in pde_table.iter_mut() {
713 entry.set_entry(PageTableEntryType::Leaf2MbPage(
714 current_va + params.address_bias,
715 ));
716 current_va += X64_LARGE_PAGE_SIZE;
717 }
718 }
719
720 flatten_page_table(page_table, flattened_page_table, page_table_count)
722 }
723}
724
725pub fn align_up_to_page_size(address: u64) -> u64 {
727 (address + X64_PAGE_SIZE - 1) & !(X64_PAGE_SIZE - 1)
728}
729
730pub fn align_up_to_large_page_size(address: u64) -> u64 {
732 (address + X64_LARGE_PAGE_SIZE - 1) & !(X64_LARGE_PAGE_SIZE - 1)
733}
734
735pub fn align_up_to_1_gb_page_size(address: u64) -> u64 {
737 (address + X64_1GB_PAGE_SIZE - 1) & !(X64_1GB_PAGE_SIZE - 1)
738}
739
740fn flatten_page_table<'a>(
741 page_table: &mut [PageTable],
742 flattened_page_table: &'a mut [u8],
743 page_table_count: usize,
744) -> &'a [u8] {
745 for (page_table, dst) in page_table
746 .iter()
747 .take(page_table_count)
748 .zip(flattened_page_table.chunks_mut(PAGE_TABLE_SIZE))
749 {
750 let src = page_table.as_bytes();
751 dst.copy_from_slice(src);
752 }
753
754 &flattened_page_table[0..PAGE_TABLE_SIZE * page_table_count]
755}
756
757#[cfg(test)]
758mod tests {
759 use std;
760 use std::vec;
761
762 use super::Error;
763 use super::MappedRange;
764 use super::PAGE_TABLE_MAX_BYTES;
765 use super::PAGE_TABLE_MAX_COUNT;
766 use super::PageTable;
767 use super::PageTableBuilder;
768 use super::X64_1GB_PAGE_SIZE;
769 use super::align_up_to_large_page_size;
770 use super::align_up_to_page_size;
771 use super::calculate_pde_table_count;
772 use zerocopy::FromZeros;
773
774 #[test]
775 fn test_align_up() {
776 assert_eq!(align_up_to_page_size(4096), 4096);
777 assert_eq!(align_up_to_page_size(4095), 4096);
778 assert_eq!(align_up_to_page_size(4097), 8192);
779 }
780
781 #[test]
782 fn test_large_align_up() {
783 assert_eq!(align_up_to_large_page_size(0), 0);
784 assert_eq!(align_up_to_large_page_size(4096), 0x200000);
785 assert_eq!(align_up_to_large_page_size(0x200000), 0x200000);
786 assert_eq!(align_up_to_large_page_size(0x200001), 0x400000);
787 }
788
789 #[test]
790 fn test_pde_size_calc() {
791 assert_eq!(calculate_pde_table_count(0, 512), 1);
792 assert_eq!(calculate_pde_table_count(0, 1024 * 1024), 1);
793 assert_eq!(calculate_pde_table_count(512, 1024 * 1024), 1);
794 assert_eq!(calculate_pde_table_count(X64_1GB_PAGE_SIZE - 512, 1024), 2);
795 assert_eq!(calculate_pde_table_count(X64_1GB_PAGE_SIZE - 512, 512), 1);
796 assert_eq!(calculate_pde_table_count(0, X64_1GB_PAGE_SIZE), 1);
797 assert_eq!(calculate_pde_table_count(0, X64_1GB_PAGE_SIZE + 1), 2);
798 assert_eq!(calculate_pde_table_count(1, X64_1GB_PAGE_SIZE + 1), 2);
799 assert_eq!(calculate_pde_table_count(512, X64_1GB_PAGE_SIZE * 2), 3);
800
801 assert_eq!(calculate_pde_table_count(0, X64_1GB_PAGE_SIZE * 3), 3);
802 assert_eq!(
803 calculate_pde_table_count(X64_1GB_PAGE_SIZE, X64_1GB_PAGE_SIZE * 3),
804 3
805 );
806 }
807
808 fn check_page_table_count(ranges: &[MappedRange], count: usize) {
809 let mut page_table_work_buffer: Vec<PageTable> =
810 vec![PageTable::new_zeroed(); PAGE_TABLE_MAX_COUNT];
811 let mut page_table: Vec<u8> = vec![0; PAGE_TABLE_MAX_BYTES];
812
813 let page_table_builder = PageTableBuilder::new(
814 0,
815 page_table_work_buffer.as_mut_slice(),
816 page_table.as_mut_slice(),
817 ranges,
818 )
819 .expect("page table builder initialization should succeed");
820
821 let page_table = page_table_builder.build().expect("building should succeed");
822 assert_eq!(page_table.len(), count);
823 }
824
825 fn page_table_builder_error(ranges: &[MappedRange]) -> Option<Error> {
826 let mut page_table_work_buffer: Vec<PageTable> =
827 vec![PageTable::new_zeroed(); PAGE_TABLE_MAX_COUNT];
828 let mut page_table: Vec<u8> = vec![0; PAGE_TABLE_MAX_BYTES];
829
830 PageTableBuilder::new(
831 0,
832 page_table_work_buffer.as_mut_slice(),
833 page_table.as_mut_slice(),
834 ranges,
835 )
836 .err()
837 }
838
839 #[test]
840 fn test_page_table_entry_sizing() {
841 const ONE_GIG: u64 = 1024 * 1024 * 1024;
842 const TWO_MB: u64 = 1024 * 1024 * 2;
843 const FOUR_KB: u64 = 4096;
844
845 check_page_table_count(&[MappedRange::new(0, ONE_GIG)], 4096 * 2);
846 check_page_table_count(&[MappedRange::new(0, TWO_MB)], 4096 * 3);
847 check_page_table_count(&[MappedRange::new(0, FOUR_KB)], 4096 * 4);
848 check_page_table_count(&[MappedRange::new(FOUR_KB, ONE_GIG)], 4096 * 4);
849 check_page_table_count(&[MappedRange::new(TWO_MB, ONE_GIG)], 4096 * 3);
850 check_page_table_count(&[MappedRange::new(TWO_MB, ONE_GIG + FOUR_KB)], 4096 * 5);
851 check_page_table_count(&[MappedRange::new(TWO_MB, ONE_GIG + TWO_MB)], 4096 * 4);
852 }
853
854 #[test]
855 fn test_page_table_builder_overlapping_range() {
856 const ONE_GIG: u64 = 1024 * 1024 * 1024;
857 const TWO_MB: u64 = 1024 * 1024 * 2;
858 const FOUR_KB: u64 = 4096;
859
860 let err = page_table_builder_error(&[
861 MappedRange::new(FOUR_KB, ONE_GIG),
862 MappedRange::new(TWO_MB, ONE_GIG),
863 ])
864 .expect("must fail");
865 assert!(matches!(err, Error::OverlappingMappings));
866 }
867
868 #[test]
869 fn test_page_table_builder_invalid_range() {
870 const ONE_GIG: u64 = 1024 * 1024 * 1024;
871 const FOUR_KB: u64 = 4096;
872
873 let err =
874 page_table_builder_error(&[MappedRange::new(ONE_GIG, FOUR_KB)]).expect("must fail");
875 assert!(matches!(err, Error::InvalidRange));
876 }
877
878 #[test]
879 fn test_page_table_builder_oom() {
880 const ONE_GIG: u64 = 1024 * 1024 * 1024;
881
882 let mut page_table_work_buffer: Vec<PageTable> = vec![PageTable::new_zeroed(); 1];
883 let mut page_table: Vec<u8> = vec![0; 4096];
884
885 let err = PageTableBuilder::new(
886 0,
887 page_table_work_buffer.as_mut_slice(),
888 page_table.as_mut_slice(),
889 &[MappedRange::new(0, ONE_GIG)],
890 )
891 .expect("page table builder initialization should succeed")
892 .build()
893 .expect_err("building page tables should fail");
894
895 assert!(matches!(err, Error::NotEnoughMemory));
896 }
897
898 #[test]
899 fn test_page_table_builder_mismatched_buffers() {
900 const ONE_GIG: u64 = 1024 * 1024 * 1024;
901
902 let mut page_table_work_buffer: Vec<PageTable> = vec![PageTable::new_zeroed(); 4];
903 let mut page_table: Vec<u8> = vec![0; 4096 * 5];
904
905 let err = PageTableBuilder::new(
906 0,
907 page_table_work_buffer.as_mut_slice(),
908 page_table.as_mut_slice(),
909 &[MappedRange::new(0, ONE_GIG)],
910 )
911 .err()
912 .expect("building page tables should fail");
913
914 assert!(matches!(
915 err,
916 Error::BadBufferSize {
917 bytes_buf: _,
918 struct_buf: _
919 }
920 ));
921 }
922}