1use bitfield_struct::bitfield;
7
8#[derive(Debug, PartialEq, Eq, Clone, Copy, Default)]
11#[repr(u8)]
12#[allow(non_camel_case_types)]
13pub enum MemoryAttributeEl1 {
14 #[default]
17 Device_nGnRnE = 0,
18 Normal_NonCacheable = 0x44,
22 Normal_WriteThrough = 0xbb,
26 Normal_WriteBack = 0xff,
29}
30
31impl From<u8> for MemoryAttributeEl1 {
32 fn from(value: u8) -> Self {
33 match value {
34 0 => MemoryAttributeEl1::Device_nGnRnE,
35 0x44 => MemoryAttributeEl1::Normal_NonCacheable,
36 0xbb => MemoryAttributeEl1::Normal_WriteThrough,
37 0xff => MemoryAttributeEl1::Normal_WriteBack,
38 _ => panic!("memory type is not supported"),
39 }
40 }
41}
42
43#[derive(Debug, PartialEq, Eq, Clone, Copy)]
45#[repr(u64)]
46pub enum MemoryAttributeIndex {
47 Index0,
48 Index1,
49 Index2,
50 Index3,
51 Index4,
52 Index5,
53 Index6,
54 Index7,
55}
56
57impl MemoryAttributeIndex {
58 const fn from_bits(value: u64) -> Self {
59 match value {
60 0 => MemoryAttributeIndex::Index0,
61 1 => MemoryAttributeIndex::Index1,
62 2 => MemoryAttributeIndex::Index2,
63 3 => MemoryAttributeIndex::Index3,
64 4 => MemoryAttributeIndex::Index4,
65 5 => MemoryAttributeIndex::Index5,
66 6 => MemoryAttributeIndex::Index6,
67 7 => MemoryAttributeIndex::Index7,
68 _ => panic!("illegal state when looking for memory attribute index"),
69 }
70 }
71
72 const fn into_bits(value: Self) -> u64 {
73 value as u64
74 }
75}
76
77impl From<MemoryAttributeIndex> for u64 {
78 fn from(value: MemoryAttributeIndex) -> Self {
79 MemoryAttributeIndex::into_bits(value)
80 }
81}
82
83impl From<u64> for MemoryAttributeIndex {
84 fn from(value: u64) -> Self {
85 Self::from_bits(value)
86 }
87}
88
89impl From<usize> for MemoryAttributeIndex {
90 fn from(value: usize) -> Self {
91 Self::from_bits(value as u64)
92 }
93}
94
95#[derive(Debug, PartialEq, Eq, Clone, Copy, Default)]
98pub struct MemoryAttributeIndirectionEl1(pub [MemoryAttributeEl1; 8]);
99
100impl MemoryAttributeIndirectionEl1 {
101 pub fn index_of(&self, needle: MemoryAttributeEl1) -> Option<MemoryAttributeIndex> {
102 for (idx, &attr) in self.0.iter().enumerate() {
103 if attr == needle {
104 return Some(idx.into());
105 }
106 }
107
108 None
109 }
110}
111
112impl From<MemoryAttributeIndirectionEl1> for u64 {
113 fn from(value: MemoryAttributeIndirectionEl1) -> Self {
114 u64::from_le_bytes(value.0.map(|x| x as u8))
115 }
116}
117
118impl From<u64> for MemoryAttributeIndirectionEl1 {
119 fn from(value: u64) -> Self {
120 MemoryAttributeIndirectionEl1(value.to_le_bytes().map(|x| x.into()))
121 }
122}
123
124#[bitfield(u64)]
125pub struct Aarch64PageTableEntry {
126 pub valid: bool,
127 pub table: bool, #[bits(10)]
129 _mbz0: u64,
130 #[bits(35)]
131 pub next_table_pfn: u64,
132 #[bits(12)]
133 _mbz1: u64,
134 pub priv_x_never: bool,
135 pub user_x_never: bool,
136 #[bits(2)]
141 pub access_perm: u64,
142 pub non_secure: bool,
143}
144
145#[bitfield(u64)]
146pub struct Aarch64PageBlockEntry {
147 pub valid: bool,
148 pub page: bool,
149 #[bits(3)]
150 pub mair_idx: MemoryAttributeIndex,
151 #[bits(1)]
152 _mbz0: u64,
153 #[bits(2)]
158 pub access_perm: u64,
159 #[bits(2)]
163 pub share_perm: u64,
164 pub accessed: bool,
165 pub not_global: bool,
166 #[bits(35)]
167 pub address_pfn: u64,
168 #[bits(4)]
169 _mbz1: u64,
170 pub dirty: bool,
171 pub contig: bool,
172 pub priv_x_never: bool,
173 pub user_x_never: bool,
174 #[bits(9)]
175 _mbz2: u64,
176}
177
178#[bitfield(u64)]
179pub struct Arm64PageTableEntry {
180 pub valid: bool,
181 pub table: bool, #[bits(10)]
183 _mbz0: u64,
184 #[bits(35)]
185 pub next_table_pfn: u64,
186 #[bits(12)]
187 _mbz1: u64,
188 pub priv_x_never: bool,
189 pub user_x_never: bool,
190 #[bits(2)]
195 pub access_perm: u64,
196 pub non_secure: bool,
197}
198
199#[bitfield(u64)]
200pub struct Arm64PageBlockEntry {
201 pub valid: bool,
202 pub page: bool,
203 #[bits(3)]
204 pub mair_idx: usize,
205 #[bits(1)]
206 _mbz0: u64,
207 #[bits(2)]
212 pub access_perm: u64,
213 #[bits(2)]
217 pub share_perm: u64,
218 pub accessed: bool,
219 pub not_global: bool,
220 #[bits(35)]
221 pub address_pfn: u64,
222 #[bits(4)]
223 _mbz1: u64,
224 pub dirty: bool,
225 pub contig: bool,
226 pub priv_x_never: bool,
227 pub user_x_never: bool,
228 #[bits(9)]
229 _mbz2: u64,
230}
231
232#[bitfield(u64)]
233pub struct VirtualAddress {
234 #[bits(12)]
235 pub offset: u64,
236 #[bits(9)]
237 pub lvl3: usize,
238 #[bits(9)]
239 pub lvl2: usize,
240 #[bits(9)]
241 pub lvl1: usize,
242 #[bits(9)]
243 pub lvl0: usize,
244 #[bits(16)]
245 pub asid: usize,
246}
247
248impl VirtualAddress {
249 pub fn is_canonical(&self) -> bool {
250 ((self.0 as i64) << 16 >> 16) == self.0 as i64
252 }
253
254 pub fn lvl_index(&self, index: usize) -> usize {
255 match index {
256 3 => self.lvl3(),
257 2 => self.lvl2(),
258 1 => self.lvl1(),
259 0 => self.lvl0(),
260 _ => panic!("invalid VA level index"),
261 }
262 }
263}
264
265const PAGE_SHIFT_4K: u64 = 12;
266const PAGE_SHIFT_2M: u64 = 21;
267const PAGE_SHIFT_1G: u64 = 30;
268
269const PAGE_SIZE_4K: u64 = 1 << PAGE_SHIFT_4K;
270const PAGE_SIZE_2M: u64 = 1 << PAGE_SHIFT_2M;
271const PAGE_SIZE_1G: u64 = 1 << PAGE_SHIFT_1G;
272
273#[derive(Debug, Clone, Copy, PartialEq, Eq)]
274pub enum Arm64PageMapError {
275 OutOfMemory,
276 NonCanonicalVirtAddress,
277 MisalignedVirtAddress,
278 MisalignedPhysAddress,
279 InvalidMappingSize,
280 EmptyMapping,
281 AlreadyMapped,
282}
283
284#[derive(Debug, Clone, Copy)]
285#[repr(u64)]
286pub enum Arm64PageSize {
287 Small = PAGE_SIZE_4K,
288 Large = PAGE_SIZE_2M,
289 Huge = PAGE_SIZE_1G,
290}
291
292impl From<Arm64PageSize> for u64 {
293 fn from(value: Arm64PageSize) -> Self {
294 value as u64
295 }
296}
297
298impl From<Arm64PageSize> for usize {
299 fn from(value: Arm64PageSize) -> Self {
300 value as usize
301 }
302}
303
304const fn align_up(x: u64, page_size: Arm64PageSize) -> u64 {
305 let ones_enough = page_size as u64 - 1;
306 (x + ones_enough) & !ones_enough
307}
308
309const fn align_down(x: u64, page_size: Arm64PageSize) -> u64 {
310 let ones_enough = page_size as u64 - 1;
311 x & !ones_enough
312}
313
314const fn aligned(x: u64, page_size: Arm64PageSize) -> bool {
315 let ones_enough = page_size as u64 - 1;
316 (x & ones_enough) == 0
317}
318
319#[derive(Debug, Copy, Clone)]
320pub enum Arm64NoExecute {
321 Off,
322 UserOnly,
323 PrivilegedOnly,
324 Full,
325}
326
327impl Arm64PageBlockEntry {
328 pub fn set_xn(&mut self, xn: Arm64NoExecute) {
329 match xn {
330 Arm64NoExecute::Off => {
331 self.set_user_x_never(false);
332 self.set_priv_x_never(false);
333 }
334 Arm64NoExecute::UserOnly => {
335 self.set_user_x_never(true);
336 self.set_priv_x_never(false);
337 }
338 Arm64NoExecute::PrivilegedOnly => {
339 self.set_user_x_never(false);
340 self.set_priv_x_never(true);
341 }
342 Arm64NoExecute::Full => {
343 self.set_user_x_never(true);
344 self.set_priv_x_never(true);
345 }
346 }
347 }
348}
349
350#[derive(Debug)]
351pub struct Arm64PageTableSpace<'a> {
352 phys_page_table_root: usize,
355 space: &'a mut [u8],
357 brk: usize,
361 lvl_stats: [usize; 4],
364}
365
366impl<'a> Arm64PageTableSpace<'a> {
367 pub fn new(phys_start: usize, space: &'a mut [u8]) -> Result<Self, Arm64PageMapError> {
368 if !aligned(phys_start as u64, Arm64PageSize::Small) {
369 return Err(Arm64PageMapError::MisalignedPhysAddress);
370 }
371 if !aligned(space.len() as u64, Arm64PageSize::Small) {
372 return Err(Arm64PageMapError::InvalidMappingSize);
373 }
374 if space.is_empty() {
375 return Err(Arm64PageMapError::EmptyMapping);
376 }
377
378 space[..PAGE_SIZE_4K as usize].fill(0xfe);
382
383 Ok(Self {
384 phys_page_table_root: phys_start,
385 space,
386 brk: phys_start + PAGE_SIZE_4K as usize,
387 lvl_stats: [1, 0, 0, 0],
388 })
389 }
390
391 fn allocate_page_table(&mut self, level: usize) -> Result<u64, Arm64PageMapError> {
392 if self.brk >= self.phys_page_table_root + self.space.len() {
393 return Err(Arm64PageMapError::OutOfMemory);
394 }
395 let page_table_phys_addr = self.brk;
396 self.brk += PAGE_SIZE_4K as usize;
397 self.lvl_stats[level] += 1;
398
399 Ok(page_table_phys_addr as u64)
400 }
401
402 pub fn used_space(&self) -> usize {
403 self.brk - self.phys_page_table_root
404 }
405
406 pub fn lvl_stats(&self) -> [usize; 4] {
407 self.lvl_stats
408 }
409
410 fn read_entry(&self, phys_table_start: u64, index: usize) -> u64 {
411 debug_assert!(
412 (phys_table_start as usize) < self.phys_page_table_root + self.space.len()
413 && (phys_table_start as usize) >= self.phys_page_table_root
414 );
415 debug_assert!(aligned(phys_table_start, Arm64PageSize::Small));
416 debug_assert!(index < PAGE_SIZE_4K as usize / size_of::<Arm64PageTableEntry>());
417
418 let pos = phys_table_start as usize - self.phys_page_table_root
419 + index * size_of::<Arm64PageTableEntry>();
420 u64::from_le_bytes([
421 self.space[pos],
422 self.space[pos + 1],
423 self.space[pos + 2],
424 self.space[pos + 3],
425 self.space[pos + 4],
426 self.space[pos + 5],
427 self.space[pos + 6],
428 self.space[pos + 7],
429 ])
430 }
431
432 fn write_entry(&mut self, phys_table_start: u64, index: usize, entry: u64) {
433 debug_assert!(
434 (phys_table_start as usize) < self.phys_page_table_root + self.space.len()
435 && (phys_table_start as usize) >= self.phys_page_table_root
436 );
437 debug_assert!(aligned(phys_table_start, Arm64PageSize::Small));
438 debug_assert!(index < PAGE_SIZE_4K as usize / size_of::<Arm64PageTableEntry>());
439
440 tracing::debug!(
441 "Writing page table entry {entry:#016x}, index {index:#x}, table {phys_table_start:#x}"
442 );
443
444 let pos = phys_table_start as usize - self.phys_page_table_root
445 + index * size_of::<Arm64PageTableEntry>();
446 self.space[pos..pos + 8].copy_from_slice(&entry.to_le_bytes());
447 }
448
449 fn check_addresses_and_map_size(
450 &self,
451 phys_addr: u64,
452 virt_addr: VirtualAddress,
453 page_size: Arm64PageSize,
454 ) -> Result<(), Arm64PageMapError> {
455 if virt_addr.offset() != 0 {
456 return Err(Arm64PageMapError::MisalignedVirtAddress);
457 }
458 if !virt_addr.is_canonical() {
459 return Err(Arm64PageMapError::NonCanonicalVirtAddress);
460 }
461
462 if !aligned(phys_addr, page_size) {
463 return Err(Arm64PageMapError::MisalignedPhysAddress);
464 }
465 if !aligned(virt_addr.0, page_size) {
466 return Err(Arm64PageMapError::MisalignedVirtAddress);
467 }
468
469 Ok(())
470 }
471
472 fn map_page(
473 &mut self,
474 phys_addr: u64,
475 virt_addr: VirtualAddress,
476 memory_attribute_index: MemoryAttributeIndex,
477 page_size: Arm64PageSize,
478 xn: Arm64NoExecute,
479 ) -> Result<(), Arm64PageMapError> {
480 let mut table_phys_addr = self.phys_page_table_root as u64;
481 let mut level = 0;
482 let leaf_level = match page_size {
483 Arm64PageSize::Small => 3,
484 Arm64PageSize::Large => 2,
485 Arm64PageSize::Huge => 1,
486 };
487 while level < leaf_level {
488 let mut table_entry = Arm64PageTableEntry::from(
489 self.read_entry(table_phys_addr, virt_addr.lvl_index(level)),
490 );
491
492 if table_entry.valid() && !table_entry.table() {
493 return Err(Arm64PageMapError::AlreadyMapped);
494 }
495
496 if !table_entry.valid() {
497 let next_table_phys_addr = self.allocate_page_table(level + 1)?;
498
499 table_entry = Arm64PageTableEntry::new()
500 .with_valid(true)
501 .with_table(true)
502 .with_next_table_pfn(next_table_phys_addr >> PAGE_SHIFT_4K);
503
504 self.write_entry(
505 table_phys_addr,
506 virt_addr.lvl_index(level),
507 table_entry.into(),
508 );
509 }
510 table_phys_addr = table_entry.next_table_pfn() << PAGE_SHIFT_4K;
511
512 level += 1;
513 }
514
515 let mut page_entry =
516 Arm64PageBlockEntry::from(self.read_entry(table_phys_addr, virt_addr.lvl_index(level)));
517 if page_entry.valid() {
518 return Err(Arm64PageMapError::AlreadyMapped);
519 }
520
521 page_entry = Arm64PageBlockEntry::new()
526 .with_valid(true)
527 .with_page(leaf_level == 3)
528 .with_accessed(true)
529 .with_share_perm(3)
530 .with_mair_idx(memory_attribute_index as usize)
531 .with_address_pfn(phys_addr >> PAGE_SHIFT_4K);
532 page_entry.set_xn(xn);
533
534 self.write_entry(
535 table_phys_addr,
536 virt_addr.lvl_index(level),
537 page_entry.into(),
538 );
539
540 Ok(())
541 }
542
543 pub fn map_pages(
544 &mut self,
545 phys_addr: u64,
546 virt_addr: VirtualAddress,
547 page_count: usize,
548 page_size: Arm64PageSize,
549 memory_attribute_index: MemoryAttributeIndex,
550 xn: Arm64NoExecute,
551 ) -> Result<(), Arm64PageMapError> {
552 self.check_addresses_and_map_size(phys_addr, virt_addr, page_size)?;
553
554 if page_count == 0 {
555 return Err(Arm64PageMapError::EmptyMapping);
556 }
557
558 let pages_to_map = page_count;
559 let mut pages_mapped = 0;
560 let mut phys_addr = phys_addr;
561 let mut virt_addr = virt_addr.0;
562 while pages_mapped < pages_to_map {
563 self.map_page(
564 phys_addr,
565 VirtualAddress(virt_addr),
566 memory_attribute_index,
567 page_size,
568 xn,
569 )?;
570
571 pages_mapped += 1;
572 phys_addr += page_size as u64;
573 virt_addr += page_size as u64;
574 }
575
576 Ok(())
577 }
578
579 fn get_page_size_and_page_count(
580 &self,
581 non_mapped: u64,
582 phys_addr: u64,
583 virt_addr: u64,
584 ) -> (Arm64PageSize, u64) {
585 if aligned(phys_addr, Arm64PageSize::Huge)
589 && aligned(virt_addr, Arm64PageSize::Huge)
590 && non_mapped >= PAGE_SIZE_1G
591 {
592 (Arm64PageSize::Huge, non_mapped / Arm64PageSize::Huge as u64)
593 } else if aligned(phys_addr, Arm64PageSize::Large)
594 && aligned(virt_addr, Arm64PageSize::Large)
595 && non_mapped >= PAGE_SIZE_2M
596 {
597 let before_huge_page = align_up(virt_addr, Arm64PageSize::Huge) - virt_addr;
598 let page_count = align_down(
599 if before_huge_page > 0 && before_huge_page < non_mapped {
600 before_huge_page
601 } else {
602 non_mapped
603 },
604 Arm64PageSize::Large,
605 ) / Arm64PageSize::Large as u64;
606
607 (Arm64PageSize::Large, page_count)
608 } else {
609 let before_huge_page = align_up(virt_addr, Arm64PageSize::Huge) - virt_addr;
610 let page_count = if before_huge_page > 0 && before_huge_page < non_mapped {
611 before_huge_page
612 } else {
613 let before_large_page = align_up(virt_addr, Arm64PageSize::Large) - virt_addr;
614 if before_large_page > 0 && before_large_page < non_mapped {
615 before_large_page
616 } else {
617 non_mapped
618 }
619 } / Arm64PageSize::Small as u64;
620
621 (Arm64PageSize::Small, page_count)
622 }
623 }
624
625 pub fn map_range(
626 &mut self,
627 phys_addr: u64,
628 virt_addr: VirtualAddress,
629 size: u64,
630 memory_attribute_index: MemoryAttributeIndex,
631 xn: Arm64NoExecute,
632 ) -> Result<(), Arm64PageMapError> {
633 if !aligned(phys_addr, Arm64PageSize::Small) {
634 return Err(Arm64PageMapError::MisalignedPhysAddress);
635 }
636 if !aligned(size, Arm64PageSize::Small) {
637 return Err(Arm64PageMapError::InvalidMappingSize);
638 }
639 if size == 0 {
640 return Err(Arm64PageMapError::EmptyMapping);
641 }
642 if virt_addr.offset() != 0 {
643 return Err(Arm64PageMapError::MisalignedVirtAddress);
644 }
645 if !virt_addr.is_canonical() {
646 return Err(Arm64PageMapError::NonCanonicalVirtAddress);
647 }
648
649 let mut non_mapped = size;
650 let mut phys_addr = phys_addr;
651 let mut virt_addr = virt_addr.into();
652
653 let mut mapped = 0;
654 while mapped < size {
655 let (page_size, page_count) =
656 self.get_page_size_and_page_count(non_mapped, phys_addr, virt_addr);
657 self.map_pages(
658 phys_addr,
659 VirtualAddress(virt_addr),
660 page_count as usize,
661 page_size,
662 memory_attribute_index,
663 xn,
664 )?;
665
666 let just_mapped = page_count * page_size as u64;
667 mapped += just_mapped;
668 non_mapped -= just_mapped;
669 phys_addr += just_mapped;
670 virt_addr += just_mapped;
671 }
672
673 debug_assert!(mapped == size);
674 debug_assert!(non_mapped == 0);
675 Ok(())
676 }
677}
678
679pub fn build_identity_page_tables_aarch64(
681 page_table_gpa: u64,
682 start_gpa: u64,
683 size: u64,
684 memory_attribute_indirection: MemoryAttributeIndirectionEl1,
685 page_table_region_size: usize,
686) -> Vec<u8> {
687 if !aligned(start_gpa, Arm64PageSize::Large) {
689 panic!("start_gpa not 2mb aligned");
690 }
691
692 if !aligned(size, Arm64PageSize::Large) {
693 panic!("size not 2mb aligned");
694 }
695
696 tracing::debug!(
697 "Creating Aarch64 page tables at {page_table_gpa:#x} mapping starting at {start_gpa:#x} of size {size} bytes"
698 );
699
700 let mut page_table_space = vec![0; page_table_region_size];
701 let mut page_tables =
702 Arm64PageTableSpace::new(page_table_gpa as usize, &mut page_table_space).unwrap();
703 page_tables
704 .map_range(
705 start_gpa,
706 VirtualAddress(start_gpa),
707 size,
708 memory_attribute_indirection
709 .index_of(MemoryAttributeEl1::Normal_WriteBack)
710 .unwrap(),
711 Arm64NoExecute::UserOnly,
712 )
713 .unwrap();
714
715 let used_space = page_tables.used_space();
716 tracing::debug!("Page tables use {used_space} bytes");
717 tracing::debug!("Page tables stats by level: {:?}", page_tables.lvl_stats());
718
719 page_table_space.truncate(used_space);
720
721 page_table_space
722}
723
724#[cfg(test)]
725mod tests {
726 use super::*;
727
728 const DUMP_PAGE_TABLES: bool = false;
729
730 #[test]
731 fn test_mmu_small_pages() {
732 let mut space = vec![0xaa; 0x100000];
733 let mut page_tables = Arm64PageTableSpace::new(0x00000040248000, &mut space)
734 .expect("Can initialize page tables");
735
736 let mair_el1 = MemoryAttributeIndirectionEl1([
737 MemoryAttributeEl1::Device_nGnRnE,
738 MemoryAttributeEl1::Normal_NonCacheable,
739 MemoryAttributeEl1::Normal_WriteThrough,
740 MemoryAttributeEl1::Normal_WriteBack,
741 MemoryAttributeEl1::Device_nGnRnE,
742 MemoryAttributeEl1::Device_nGnRnE,
743 MemoryAttributeEl1::Device_nGnRnE,
744 MemoryAttributeEl1::Device_nGnRnE,
745 ]);
746
747 let wb_index = mair_el1
748 .index_of(MemoryAttributeEl1::Normal_WriteBack)
749 .expect("must be some WB memory available");
750
751 let res = page_tables.map_pages(
752 0x4000,
753 VirtualAddress::from(0x4000),
754 1,
755 Arm64PageSize::Small,
756 wb_index,
757 Arm64NoExecute::Full,
758 );
759 assert_eq!(res, Ok(()));
760 assert_eq!(page_tables.lvl_stats(), [1, 1, 1, 1]);
761
762 let res = page_tables.map_pages(
763 0x5000,
764 VirtualAddress::from(0x5000),
765 1,
766 Arm64PageSize::Small,
767 wb_index,
768 Arm64NoExecute::Full,
769 );
770 assert_eq!(res, Ok(()));
771 assert_eq!(page_tables.lvl_stats(), [1, 1, 1, 1]);
772
773 let res = page_tables.map_pages(
774 0x200000,
775 VirtualAddress::from(0x200000),
776 1,
777 Arm64PageSize::Small,
778 wb_index,
779 Arm64NoExecute::Full,
780 );
781 assert_eq!(res, Ok(()));
782 assert_eq!(page_tables.lvl_stats(), [1, 1, 1, 2]);
783
784 let res = page_tables.map_pages(
785 0x201000,
786 VirtualAddress::from(0x201000),
787 1,
788 Arm64PageSize::Small,
789 wb_index,
790 Arm64NoExecute::Full,
791 );
792 assert_eq!(res, Ok(()));
793 assert_eq!(page_tables.lvl_stats(), [1, 1, 1, 2]);
794
795 let res = page_tables.map_pages(
796 0x4000,
797 VirtualAddress::from(0xffff_8000_0000_4000),
798 1,
799 Arm64PageSize::Small,
800 wb_index,
801 Arm64NoExecute::Full,
802 );
803 assert_eq!(res, Ok(()));
804 assert_eq!(page_tables.lvl_stats(), [1, 2, 2, 3]);
805
806 let res = page_tables.map_pages(
807 0x5000,
808 VirtualAddress::from(0xffff_8000_0000_5000),
809 1,
810 Arm64PageSize::Small,
811 wb_index,
812 Arm64NoExecute::Full,
813 );
814 assert_eq!(res, Ok(()));
815 assert_eq!(page_tables.lvl_stats(), [1, 2, 2, 3]);
816
817 let res = page_tables.map_pages(
818 0x4000_0000,
819 VirtualAddress::from(0x4000_0000),
820 0x200,
821 Arm64PageSize::Small,
822 wb_index,
823 Arm64NoExecute::Full,
824 );
825 assert_eq!(res, Ok(()));
826 assert_eq!(page_tables.lvl_stats(), [1, 2, 3, 4]);
827
828 if DUMP_PAGE_TABLES {
829 std::fs::write("page_tables.bin", space).expect("can dump the page tables");
830 }
831 }
832
833 #[test]
834 fn test_mmu_large_pages() {
835 let mut space = vec![0xaa; 0x100000];
836 let mut page_tables = Arm64PageTableSpace::new(0x00000040248000, &mut space)
837 .expect("Can initialize page tables");
838
839 let mair_el1 = MemoryAttributeIndirectionEl1([
840 MemoryAttributeEl1::Device_nGnRnE,
841 MemoryAttributeEl1::Normal_NonCacheable,
842 MemoryAttributeEl1::Normal_WriteThrough,
843 MemoryAttributeEl1::Normal_WriteBack,
844 MemoryAttributeEl1::Device_nGnRnE,
845 MemoryAttributeEl1::Device_nGnRnE,
846 MemoryAttributeEl1::Device_nGnRnE,
847 MemoryAttributeEl1::Device_nGnRnE,
848 ]);
849
850 let wb_index = mair_el1
851 .index_of(MemoryAttributeEl1::Normal_WriteBack)
852 .expect("must be some WB memory available");
853
854 let res = page_tables.map_pages(
855 0,
856 VirtualAddress::from(0),
857 0x2000,
858 Arm64PageSize::Large,
859 wb_index,
860 Arm64NoExecute::Full,
861 );
862 assert_eq!(res, Ok(()));
863 assert_eq!(page_tables.lvl_stats(), [1, 1, 16, 0]);
864
865 let res = page_tables.map_pages(
866 0x4000,
867 VirtualAddress::from(0x4000),
868 4,
869 Arm64PageSize::Small,
870 wb_index,
871 Arm64NoExecute::Full,
872 );
873 assert_eq!(res, Err(Arm64PageMapError::AlreadyMapped));
874 assert_eq!(page_tables.lvl_stats(), [1, 1, 16, 0]);
875
876 if DUMP_PAGE_TABLES {
877 std::fs::write("page_tables_large.bin", space).expect("can dump the page tables");
878 }
879 }
880
881 #[test]
882 fn test_mmu_huge_pages() {
883 let mut space = vec![0xaa; 0x100000];
884 let mut page_tables = Arm64PageTableSpace::new(0x00000040248000, &mut space)
885 .expect("Can initialize page tables");
886
887 let mair_el1 = MemoryAttributeIndirectionEl1([
888 MemoryAttributeEl1::Device_nGnRnE,
889 MemoryAttributeEl1::Normal_NonCacheable,
890 MemoryAttributeEl1::Normal_WriteThrough,
891 MemoryAttributeEl1::Normal_WriteBack,
892 MemoryAttributeEl1::Device_nGnRnE,
893 MemoryAttributeEl1::Device_nGnRnE,
894 MemoryAttributeEl1::Device_nGnRnE,
895 MemoryAttributeEl1::Device_nGnRnE,
896 ]);
897
898 let wb_index = mair_el1
899 .index_of(MemoryAttributeEl1::Normal_WriteBack)
900 .expect("must be some WB memory available");
901
902 let res = page_tables.map_pages(
903 0,
904 VirtualAddress::from(0),
905 4,
906 Arm64PageSize::Huge,
907 wb_index,
908 Arm64NoExecute::Full,
909 );
910 assert_eq!(res, Ok(()));
911 assert_eq!(page_tables.lvl_stats(), [1, 1, 0, 0]);
912
913 let res = page_tables.map_pages(
914 1 << 30,
915 VirtualAddress::from(0x4000_0000),
916 4,
917 Arm64PageSize::Small,
918 wb_index,
919 Arm64NoExecute::Full,
920 );
921 assert_eq!(res, Err(Arm64PageMapError::AlreadyMapped));
922 assert_eq!(page_tables.lvl_stats(), [1, 1, 0, 0]);
923
924 if DUMP_PAGE_TABLES {
925 std::fs::write("page_tables_huge.bin", space).expect("can dump the page tables");
926 }
927 }
928
929 #[test]
930 fn test_mmu_page_mix() {
931 let mut space = vec![0xaa; 0x100000];
932 let mut page_tables = Arm64PageTableSpace::new(0x00000040248000, &mut space)
933 .expect("Can initialize page tables");
934
935 let mair_el1 = MemoryAttributeIndirectionEl1([
936 MemoryAttributeEl1::Device_nGnRnE,
937 MemoryAttributeEl1::Normal_NonCacheable,
938 MemoryAttributeEl1::Normal_WriteThrough,
939 MemoryAttributeEl1::Normal_WriteBack,
940 MemoryAttributeEl1::Device_nGnRnE,
941 MemoryAttributeEl1::Device_nGnRnE,
942 MemoryAttributeEl1::Device_nGnRnE,
943 MemoryAttributeEl1::Device_nGnRnE,
944 ]);
945
946 let wb_index = mair_el1
947 .index_of(MemoryAttributeEl1::Normal_WriteBack)
948 .expect("must be some WB memory available");
949
950 const ONE_GIB: u64 = 1 << 30;
951
952 let addr = ONE_GIB - 0x1000;
953 let res = page_tables.map_range(
954 addr,
955 VirtualAddress::from(addr),
956 3 * ONE_GIB,
957 wb_index,
958 Arm64NoExecute::Full,
959 );
960 assert_eq!(res, Ok(()));
961 assert_eq!(page_tables.lvl_stats(), [1, 1, 2, 2]);
962 }
963}