1use bitfield_struct::bitfield;
7
8#[derive(Debug, PartialEq, Eq, Clone, Copy, Default)]
11#[repr(u8)]
12#[expect(non_camel_case_types)]
13pub enum MemoryAttributeEl1 {
14 #[default]
17 Device_nGnRnE = 0,
18 Normal_NonCacheable = 0x44,
22 Normal_WriteThrough = 0xbb,
26 Normal_WriteBack = 0xff,
29}
30
31impl From<u8> for MemoryAttributeEl1 {
32 fn from(value: u8) -> Self {
33 match value {
34 0 => MemoryAttributeEl1::Device_nGnRnE,
35 0x44 => MemoryAttributeEl1::Normal_NonCacheable,
36 0xbb => MemoryAttributeEl1::Normal_WriteThrough,
37 0xff => MemoryAttributeEl1::Normal_WriteBack,
38 _ => panic!("memory type is not supported"),
39 }
40 }
41}
42
43#[derive(Debug, PartialEq, Eq, Clone, Copy)]
45#[repr(u64)]
46pub enum MemoryAttributeIndex {
47 Index0,
48 Index1,
49 Index2,
50 Index3,
51 Index4,
52 Index5,
53 Index6,
54 Index7,
55}
56
57impl MemoryAttributeIndex {
58 const fn from_bits(value: u64) -> Self {
59 match value {
60 0 => MemoryAttributeIndex::Index0,
61 1 => MemoryAttributeIndex::Index1,
62 2 => MemoryAttributeIndex::Index2,
63 3 => MemoryAttributeIndex::Index3,
64 4 => MemoryAttributeIndex::Index4,
65 5 => MemoryAttributeIndex::Index5,
66 6 => MemoryAttributeIndex::Index6,
67 7 => MemoryAttributeIndex::Index7,
68 _ => panic!("illegal state when looking for memory attribute index"),
69 }
70 }
71
72 const fn into_bits(value: Self) -> u64 {
73 value as u64
74 }
75}
76
77impl From<MemoryAttributeIndex> for u64 {
78 fn from(value: MemoryAttributeIndex) -> Self {
79 MemoryAttributeIndex::into_bits(value)
80 }
81}
82
83impl From<u64> for MemoryAttributeIndex {
84 fn from(value: u64) -> Self {
85 Self::from_bits(value)
86 }
87}
88
89impl From<usize> for MemoryAttributeIndex {
90 fn from(value: usize) -> Self {
91 Self::from_bits(value as u64)
92 }
93}
94
95#[derive(Debug, PartialEq, Eq, Clone, Copy, Default)]
98pub struct MemoryAttributeIndirectionEl1(pub [MemoryAttributeEl1; 8]);
99
100impl MemoryAttributeIndirectionEl1 {
101 pub fn index_of(&self, needle: MemoryAttributeEl1) -> Option<MemoryAttributeIndex> {
102 for (idx, &attr) in self.0.iter().enumerate() {
103 if attr == needle {
104 return Some(idx.into());
105 }
106 }
107
108 None
109 }
110}
111
112impl From<MemoryAttributeIndirectionEl1> for u64 {
113 fn from(value: MemoryAttributeIndirectionEl1) -> Self {
114 u64::from_le_bytes(value.0.map(|x| x as u8))
115 }
116}
117
118impl From<u64> for MemoryAttributeIndirectionEl1 {
119 fn from(value: u64) -> Self {
120 MemoryAttributeIndirectionEl1(value.to_le_bytes().map(|x| x.into()))
121 }
122}
123
124#[bitfield(u64)]
125pub struct Aarch64PageTableEntry {
126 pub valid: bool,
127 pub table: bool, #[bits(10)]
129 _mbz0: u64,
130 #[bits(35)]
131 pub next_table_pfn: u64,
132 #[bits(12)]
133 _mbz1: u64,
134 pub priv_x_never: bool,
135 pub user_x_never: bool,
136 #[bits(2)]
141 pub access_perm: u64,
142 pub non_secure: bool,
143}
144
145#[bitfield(u64)]
146pub struct Aarch64PageBlockEntry {
147 pub valid: bool,
148 pub page: bool,
149 #[bits(3)]
150 pub mair_idx: MemoryAttributeIndex,
151 #[bits(1)]
152 _mbz0: u64,
153 #[bits(2)]
158 pub access_perm: u64,
159 #[bits(2)]
163 pub share_perm: u64,
164 pub accessed: bool,
165 pub not_global: bool,
166 #[bits(35)]
167 pub address_pfn: u64,
168 #[bits(4)]
169 _mbz1: u64,
170 pub dirty: bool,
171 pub contig: bool,
172 pub priv_x_never: bool,
173 pub user_x_never: bool,
174 #[bits(9)]
175 _mbz2: u64,
176}
177
178#[bitfield(u64)]
179pub struct Arm64PageTableEntry {
180 pub valid: bool,
181 pub table: bool, #[bits(10)]
183 _mbz0: u64,
184 #[bits(35)]
185 pub next_table_pfn: u64,
186 #[bits(12)]
187 _mbz1: u64,
188 pub priv_x_never: bool,
189 pub user_x_never: bool,
190 #[bits(2)]
195 pub access_perm: u64,
196 pub non_secure: bool,
197}
198
199#[bitfield(u64)]
200pub struct Arm64PageBlockEntry {
201 pub valid: bool,
202 pub page: bool,
203 #[bits(3)]
204 pub mair_idx: usize,
205 #[bits(1)]
206 _mbz0: u64,
207 #[bits(2)]
212 pub access_perm: u64,
213 #[bits(2)]
217 pub share_perm: u64,
218 pub accessed: bool,
219 pub not_global: bool,
220 #[bits(35)]
221 pub address_pfn: u64,
222 #[bits(4)]
223 _mbz1: u64,
224 pub dirty: bool,
225 pub contig: bool,
226 pub priv_x_never: bool,
227 pub user_x_never: bool,
228 #[bits(9)]
229 _mbz2: u64,
230}
231
232#[bitfield(u64)]
233pub struct VirtualAddress {
234 #[bits(12)]
235 pub offset: u64,
236 #[bits(9)]
237 pub lvl3: usize,
238 #[bits(9)]
239 pub lvl2: usize,
240 #[bits(9)]
241 pub lvl1: usize,
242 #[bits(9)]
243 pub lvl0: usize,
244 #[bits(16)]
245 pub asid: usize,
246}
247
248impl VirtualAddress {
249 pub fn is_canonical(&self) -> bool {
250 ((self.0 as i64) << 16 >> 16) == self.0 as i64
252 }
253
254 pub fn lvl_index(&self, index: usize) -> usize {
255 match index {
256 3 => self.lvl3(),
257 2 => self.lvl2(),
258 1 => self.lvl1(),
259 0 => self.lvl0(),
260 _ => panic!("invalid VA level index"),
261 }
262 }
263}
264
265const PAGE_SHIFT_4K: u64 = 12;
266const PAGE_SHIFT_2M: u64 = 21;
267const PAGE_SHIFT_1G: u64 = 30;
268
269const PAGE_SIZE_4K: u64 = 1 << PAGE_SHIFT_4K;
270const PAGE_SIZE_2M: u64 = 1 << PAGE_SHIFT_2M;
271const PAGE_SIZE_1G: u64 = 1 << PAGE_SHIFT_1G;
272
273#[derive(Debug, Clone, Copy, PartialEq, Eq)]
274pub enum Arm64PageMapError {
275 OutOfMemory,
276 NonCanonicalVirtAddress,
277 MisalignedVirtAddress,
278 MisalignedPhysAddress,
279 InvalidMappingSize,
280 EmptyMapping,
281 AlreadyMapped,
282}
283
284#[derive(Debug, Clone, Copy)]
285#[repr(u64)]
286pub enum Arm64PageSize {
287 Small = PAGE_SIZE_4K,
288 Large = PAGE_SIZE_2M,
289 Huge = PAGE_SIZE_1G,
290}
291
292impl From<Arm64PageSize> for u64 {
293 fn from(value: Arm64PageSize) -> Self {
294 value as u64
295 }
296}
297
298impl From<Arm64PageSize> for usize {
299 fn from(value: Arm64PageSize) -> Self {
300 value as usize
301 }
302}
303
304const fn align_up(x: u64, page_size: Arm64PageSize) -> u64 {
305 let ones_enough = page_size as u64 - 1;
306 (x + ones_enough) & !ones_enough
307}
308
309const fn align_down(x: u64, page_size: Arm64PageSize) -> u64 {
310 let ones_enough = page_size as u64 - 1;
311 x & !ones_enough
312}
313
314const fn aligned(x: u64, page_size: Arm64PageSize) -> bool {
315 let ones_enough = page_size as u64 - 1;
316 (x & ones_enough) == 0
317}
318
319#[derive(Debug, Copy, Clone)]
320pub enum Arm64NoExecute {
321 Off,
322 UserOnly,
323 PrivilegedOnly,
324 Full,
325}
326
327impl Arm64PageBlockEntry {
328 pub fn set_xn(&mut self, xn: Arm64NoExecute) {
329 match xn {
330 Arm64NoExecute::Off => {
331 self.set_user_x_never(false);
332 self.set_priv_x_never(false);
333 }
334 Arm64NoExecute::UserOnly => {
335 self.set_user_x_never(true);
336 self.set_priv_x_never(false);
337 }
338 Arm64NoExecute::PrivilegedOnly => {
339 self.set_user_x_never(false);
340 self.set_priv_x_never(true);
341 }
342 Arm64NoExecute::Full => {
343 self.set_user_x_never(true);
344 self.set_priv_x_never(true);
345 }
346 }
347 }
348}
349
350#[derive(Debug)]
351pub struct Arm64PageTableSpace<'a> {
352 phys_page_table_root: usize,
355 space: &'a mut [u8],
357 brk: usize,
361 lvl_stats: [usize; 4],
364}
365
366impl<'a> Arm64PageTableSpace<'a> {
367 pub fn new(phys_start: usize, space: &'a mut [u8]) -> Result<Self, Arm64PageMapError> {
368 if !aligned(phys_start as u64, Arm64PageSize::Small) {
369 return Err(Arm64PageMapError::MisalignedPhysAddress);
370 }
371 if !aligned(space.len() as u64, Arm64PageSize::Small) {
372 return Err(Arm64PageMapError::InvalidMappingSize);
373 }
374 if space.is_empty() {
375 return Err(Arm64PageMapError::EmptyMapping);
376 }
377
378 space[..PAGE_SIZE_4K as usize].fill(0xfe);
382
383 Ok(Self {
384 phys_page_table_root: phys_start,
385 space,
386 brk: phys_start + PAGE_SIZE_4K as usize,
387 lvl_stats: [1, 0, 0, 0],
388 })
389 }
390
391 fn allocate_page_table(&mut self, level: usize) -> Result<u64, Arm64PageMapError> {
392 if self.brk >= self.phys_page_table_root + self.space.len() {
393 return Err(Arm64PageMapError::OutOfMemory);
394 }
395 let page_table_phys_addr = self.brk;
396 self.brk += PAGE_SIZE_4K as usize;
397 self.lvl_stats[level] += 1;
398
399 Ok(page_table_phys_addr as u64)
400 }
401
402 pub fn used_space(&self) -> usize {
403 self.brk - self.phys_page_table_root
404 }
405
406 pub fn lvl_stats(&self) -> [usize; 4] {
407 self.lvl_stats
408 }
409
410 fn read_entry(&self, phys_table_start: u64, index: usize) -> u64 {
411 debug_assert!(
412 (phys_table_start as usize) < self.phys_page_table_root + self.space.len()
413 && (phys_table_start as usize) >= self.phys_page_table_root
414 );
415 debug_assert!(aligned(phys_table_start, Arm64PageSize::Small));
416 debug_assert!(index < PAGE_SIZE_4K as usize / size_of::<Arm64PageTableEntry>());
417
418 let pos = phys_table_start as usize - self.phys_page_table_root
419 + index * size_of::<Arm64PageTableEntry>();
420 u64::from_le_bytes([
421 self.space[pos],
422 self.space[pos + 1],
423 self.space[pos + 2],
424 self.space[pos + 3],
425 self.space[pos + 4],
426 self.space[pos + 5],
427 self.space[pos + 6],
428 self.space[pos + 7],
429 ])
430 }
431
432 fn write_entry(&mut self, phys_table_start: u64, index: usize, entry: u64) {
433 debug_assert!(
434 (phys_table_start as usize) < self.phys_page_table_root + self.space.len()
435 && (phys_table_start as usize) >= self.phys_page_table_root
436 );
437 debug_assert!(aligned(phys_table_start, Arm64PageSize::Small));
438 debug_assert!(index < PAGE_SIZE_4K as usize / size_of::<Arm64PageTableEntry>());
439
440 #[cfg(feature = "tracing")]
441 tracing::debug!(
442 "Writing page table entry {entry:#016x}, index {index:#x}, table {phys_table_start:#x}"
443 );
444
445 let pos = phys_table_start as usize - self.phys_page_table_root
446 + index * size_of::<Arm64PageTableEntry>();
447 self.space[pos..pos + 8].copy_from_slice(&entry.to_le_bytes());
448 }
449
450 fn check_addresses_and_map_size(
451 &self,
452 phys_addr: u64,
453 virt_addr: VirtualAddress,
454 page_size: Arm64PageSize,
455 ) -> Result<(), Arm64PageMapError> {
456 if virt_addr.offset() != 0 {
457 return Err(Arm64PageMapError::MisalignedVirtAddress);
458 }
459 if !virt_addr.is_canonical() {
460 return Err(Arm64PageMapError::NonCanonicalVirtAddress);
461 }
462
463 if !aligned(phys_addr, page_size) {
464 return Err(Arm64PageMapError::MisalignedPhysAddress);
465 }
466 if !aligned(virt_addr.0, page_size) {
467 return Err(Arm64PageMapError::MisalignedVirtAddress);
468 }
469
470 Ok(())
471 }
472
473 fn map_page(
474 &mut self,
475 phys_addr: u64,
476 virt_addr: VirtualAddress,
477 memory_attribute_index: MemoryAttributeIndex,
478 page_size: Arm64PageSize,
479 xn: Arm64NoExecute,
480 ) -> Result<(), Arm64PageMapError> {
481 let mut table_phys_addr = self.phys_page_table_root as u64;
482 let mut level = 0;
483 let leaf_level = match page_size {
484 Arm64PageSize::Small => 3,
485 Arm64PageSize::Large => 2,
486 Arm64PageSize::Huge => 1,
487 };
488 while level < leaf_level {
489 let mut table_entry = Arm64PageTableEntry::from(
490 self.read_entry(table_phys_addr, virt_addr.lvl_index(level)),
491 );
492
493 if table_entry.valid() && !table_entry.table() {
494 return Err(Arm64PageMapError::AlreadyMapped);
495 }
496
497 if !table_entry.valid() {
498 let next_table_phys_addr = self.allocate_page_table(level + 1)?;
499
500 table_entry = Arm64PageTableEntry::new()
501 .with_valid(true)
502 .with_table(true)
503 .with_next_table_pfn(next_table_phys_addr >> PAGE_SHIFT_4K);
504
505 self.write_entry(
506 table_phys_addr,
507 virt_addr.lvl_index(level),
508 table_entry.into(),
509 );
510 }
511 table_phys_addr = table_entry.next_table_pfn() << PAGE_SHIFT_4K;
512
513 level += 1;
514 }
515
516 let mut page_entry =
517 Arm64PageBlockEntry::from(self.read_entry(table_phys_addr, virt_addr.lvl_index(level)));
518 if page_entry.valid() {
519 return Err(Arm64PageMapError::AlreadyMapped);
520 }
521
522 page_entry = Arm64PageBlockEntry::new()
527 .with_valid(true)
528 .with_page(leaf_level == 3)
529 .with_accessed(true)
530 .with_share_perm(3)
531 .with_mair_idx(memory_attribute_index as usize)
532 .with_address_pfn(phys_addr >> PAGE_SHIFT_4K);
533 page_entry.set_xn(xn);
534
535 self.write_entry(
536 table_phys_addr,
537 virt_addr.lvl_index(level),
538 page_entry.into(),
539 );
540
541 Ok(())
542 }
543
544 pub fn map_pages(
545 &mut self,
546 phys_addr: u64,
547 virt_addr: VirtualAddress,
548 page_count: usize,
549 page_size: Arm64PageSize,
550 memory_attribute_index: MemoryAttributeIndex,
551 xn: Arm64NoExecute,
552 ) -> Result<(), Arm64PageMapError> {
553 self.check_addresses_and_map_size(phys_addr, virt_addr, page_size)?;
554
555 if page_count == 0 {
556 return Err(Arm64PageMapError::EmptyMapping);
557 }
558
559 let pages_to_map = page_count;
560 let mut pages_mapped = 0;
561 let mut phys_addr = phys_addr;
562 let mut virt_addr = virt_addr.0;
563 while pages_mapped < pages_to_map {
564 self.map_page(
565 phys_addr,
566 VirtualAddress(virt_addr),
567 memory_attribute_index,
568 page_size,
569 xn,
570 )?;
571
572 pages_mapped += 1;
573 phys_addr += page_size as u64;
574 virt_addr += page_size as u64;
575 }
576
577 Ok(())
578 }
579
580 fn get_page_size_and_page_count(
581 &self,
582 non_mapped: u64,
583 phys_addr: u64,
584 virt_addr: u64,
585 ) -> (Arm64PageSize, u64) {
586 if aligned(phys_addr, Arm64PageSize::Huge)
590 && aligned(virt_addr, Arm64PageSize::Huge)
591 && non_mapped >= PAGE_SIZE_1G
592 {
593 (Arm64PageSize::Huge, non_mapped / Arm64PageSize::Huge as u64)
594 } else if aligned(phys_addr, Arm64PageSize::Large)
595 && aligned(virt_addr, Arm64PageSize::Large)
596 && non_mapped >= PAGE_SIZE_2M
597 {
598 let before_huge_page = align_up(virt_addr, Arm64PageSize::Huge) - virt_addr;
599 let page_count = align_down(
600 if before_huge_page > 0 && before_huge_page < non_mapped {
601 before_huge_page
602 } else {
603 non_mapped
604 },
605 Arm64PageSize::Large,
606 ) / Arm64PageSize::Large as u64;
607
608 (Arm64PageSize::Large, page_count)
609 } else {
610 let before_huge_page = align_up(virt_addr, Arm64PageSize::Huge) - virt_addr;
611 let page_count = if before_huge_page > 0 && before_huge_page < non_mapped {
612 before_huge_page
613 } else {
614 let before_large_page = align_up(virt_addr, Arm64PageSize::Large) - virt_addr;
615 if before_large_page > 0 && before_large_page < non_mapped {
616 before_large_page
617 } else {
618 non_mapped
619 }
620 } / Arm64PageSize::Small as u64;
621
622 (Arm64PageSize::Small, page_count)
623 }
624 }
625
626 pub fn map_range(
627 &mut self,
628 phys_addr: u64,
629 virt_addr: VirtualAddress,
630 size: u64,
631 memory_attribute_index: MemoryAttributeIndex,
632 xn: Arm64NoExecute,
633 ) -> Result<(), Arm64PageMapError> {
634 if !aligned(phys_addr, Arm64PageSize::Small) {
635 return Err(Arm64PageMapError::MisalignedPhysAddress);
636 }
637 if !aligned(size, Arm64PageSize::Small) {
638 return Err(Arm64PageMapError::InvalidMappingSize);
639 }
640 if size == 0 {
641 return Err(Arm64PageMapError::EmptyMapping);
642 }
643 if virt_addr.offset() != 0 {
644 return Err(Arm64PageMapError::MisalignedVirtAddress);
645 }
646 if !virt_addr.is_canonical() {
647 return Err(Arm64PageMapError::NonCanonicalVirtAddress);
648 }
649
650 let mut non_mapped = size;
651 let mut phys_addr = phys_addr;
652 let mut virt_addr = virt_addr.into();
653
654 let mut mapped = 0;
655 while mapped < size {
656 let (page_size, page_count) =
657 self.get_page_size_and_page_count(non_mapped, phys_addr, virt_addr);
658 self.map_pages(
659 phys_addr,
660 VirtualAddress(virt_addr),
661 page_count as usize,
662 page_size,
663 memory_attribute_index,
664 xn,
665 )?;
666
667 let just_mapped = page_count * page_size as u64;
668 mapped += just_mapped;
669 non_mapped -= just_mapped;
670 phys_addr += just_mapped;
671 virt_addr += just_mapped;
672 }
673
674 debug_assert!(mapped == size);
675 debug_assert!(non_mapped == 0);
676 Ok(())
677 }
678}
679
680pub fn build_identity_page_tables_aarch64(
682 page_table_gpa: u64,
683 start_gpa: u64,
684 size: u64,
685 memory_attribute_indirection: MemoryAttributeIndirectionEl1,
686 page_table_space: &mut [u8],
687) -> &[u8] {
688 if !aligned(start_gpa, Arm64PageSize::Large) {
690 panic!("start_gpa not 2mb aligned");
691 }
692
693 if !aligned(size, Arm64PageSize::Large) {
694 panic!("size not 2mb aligned");
695 }
696
697 #[cfg(feature = "tracing")]
698 tracing::debug!(
699 "Creating Aarch64 page tables at {page_table_gpa:#x} mapping starting at {start_gpa:#x} of size {size} bytes"
700 );
701
702 let mut page_tables =
703 Arm64PageTableSpace::new(page_table_gpa as usize, page_table_space).unwrap();
704 page_tables
705 .map_range(
706 start_gpa,
707 VirtualAddress(start_gpa),
708 size,
709 memory_attribute_indirection
710 .index_of(MemoryAttributeEl1::Normal_WriteBack)
711 .unwrap(),
712 Arm64NoExecute::UserOnly,
713 )
714 .unwrap();
715
716 let used_space = page_tables.used_space();
717
718 #[cfg(feature = "tracing")]
719 {
720 tracing::debug!("Page tables use {used_space} bytes");
721 tracing::debug!("Page tables stats by level: {:?}", page_tables.lvl_stats());
722 }
723
724 &page_table_space[0..used_space]
725}
726
727#[cfg(test)]
728mod tests {
729 use std;
730
731 use super::*;
732 use std::vec;
733
734 const DUMP_PAGE_TABLES: bool = false;
735
736 #[test]
737 fn test_mmu_small_pages() {
738 let mut space = vec![0xaa; 0x100000];
739 let mut page_tables = Arm64PageTableSpace::new(0x00000040248000, &mut space)
740 .expect("Can initialize page tables");
741
742 let mair_el1 = MemoryAttributeIndirectionEl1([
743 MemoryAttributeEl1::Device_nGnRnE,
744 MemoryAttributeEl1::Normal_NonCacheable,
745 MemoryAttributeEl1::Normal_WriteThrough,
746 MemoryAttributeEl1::Normal_WriteBack,
747 MemoryAttributeEl1::Device_nGnRnE,
748 MemoryAttributeEl1::Device_nGnRnE,
749 MemoryAttributeEl1::Device_nGnRnE,
750 MemoryAttributeEl1::Device_nGnRnE,
751 ]);
752
753 let wb_index = mair_el1
754 .index_of(MemoryAttributeEl1::Normal_WriteBack)
755 .expect("must be some WB memory available");
756
757 let res = page_tables.map_pages(
758 0x4000,
759 VirtualAddress::from(0x4000),
760 1,
761 Arm64PageSize::Small,
762 wb_index,
763 Arm64NoExecute::Full,
764 );
765 assert_eq!(res, Ok(()));
766 assert_eq!(page_tables.lvl_stats(), [1, 1, 1, 1]);
767
768 let res = page_tables.map_pages(
769 0x5000,
770 VirtualAddress::from(0x5000),
771 1,
772 Arm64PageSize::Small,
773 wb_index,
774 Arm64NoExecute::Full,
775 );
776 assert_eq!(res, Ok(()));
777 assert_eq!(page_tables.lvl_stats(), [1, 1, 1, 1]);
778
779 let res = page_tables.map_pages(
780 0x200000,
781 VirtualAddress::from(0x200000),
782 1,
783 Arm64PageSize::Small,
784 wb_index,
785 Arm64NoExecute::Full,
786 );
787 assert_eq!(res, Ok(()));
788 assert_eq!(page_tables.lvl_stats(), [1, 1, 1, 2]);
789
790 let res = page_tables.map_pages(
791 0x201000,
792 VirtualAddress::from(0x201000),
793 1,
794 Arm64PageSize::Small,
795 wb_index,
796 Arm64NoExecute::Full,
797 );
798 assert_eq!(res, Ok(()));
799 assert_eq!(page_tables.lvl_stats(), [1, 1, 1, 2]);
800
801 let res = page_tables.map_pages(
802 0x4000,
803 VirtualAddress::from(0xffff_8000_0000_4000),
804 1,
805 Arm64PageSize::Small,
806 wb_index,
807 Arm64NoExecute::Full,
808 );
809 assert_eq!(res, Ok(()));
810 assert_eq!(page_tables.lvl_stats(), [1, 2, 2, 3]);
811
812 let res = page_tables.map_pages(
813 0x5000,
814 VirtualAddress::from(0xffff_8000_0000_5000),
815 1,
816 Arm64PageSize::Small,
817 wb_index,
818 Arm64NoExecute::Full,
819 );
820 assert_eq!(res, Ok(()));
821 assert_eq!(page_tables.lvl_stats(), [1, 2, 2, 3]);
822
823 let res = page_tables.map_pages(
824 0x4000_0000,
825 VirtualAddress::from(0x4000_0000),
826 0x200,
827 Arm64PageSize::Small,
828 wb_index,
829 Arm64NoExecute::Full,
830 );
831 assert_eq!(res, Ok(()));
832 assert_eq!(page_tables.lvl_stats(), [1, 2, 3, 4]);
833
834 if DUMP_PAGE_TABLES {
835 std::fs::write("page_tables.bin", space).expect("can dump the page tables");
836 }
837 }
838
839 #[test]
840 fn test_mmu_large_pages() {
841 let mut space = vec![0xaa; 0x100000];
842 let mut page_tables = Arm64PageTableSpace::new(0x00000040248000, &mut space)
843 .expect("Can initialize page tables");
844
845 let mair_el1 = MemoryAttributeIndirectionEl1([
846 MemoryAttributeEl1::Device_nGnRnE,
847 MemoryAttributeEl1::Normal_NonCacheable,
848 MemoryAttributeEl1::Normal_WriteThrough,
849 MemoryAttributeEl1::Normal_WriteBack,
850 MemoryAttributeEl1::Device_nGnRnE,
851 MemoryAttributeEl1::Device_nGnRnE,
852 MemoryAttributeEl1::Device_nGnRnE,
853 MemoryAttributeEl1::Device_nGnRnE,
854 ]);
855
856 let wb_index = mair_el1
857 .index_of(MemoryAttributeEl1::Normal_WriteBack)
858 .expect("must be some WB memory available");
859
860 let res = page_tables.map_pages(
861 0,
862 VirtualAddress::from(0),
863 0x2000,
864 Arm64PageSize::Large,
865 wb_index,
866 Arm64NoExecute::Full,
867 );
868 assert_eq!(res, Ok(()));
869 assert_eq!(page_tables.lvl_stats(), [1, 1, 16, 0]);
870
871 let res = page_tables.map_pages(
872 0x4000,
873 VirtualAddress::from(0x4000),
874 4,
875 Arm64PageSize::Small,
876 wb_index,
877 Arm64NoExecute::Full,
878 );
879 assert_eq!(res, Err(Arm64PageMapError::AlreadyMapped));
880 assert_eq!(page_tables.lvl_stats(), [1, 1, 16, 0]);
881
882 if DUMP_PAGE_TABLES {
883 std::fs::write("page_tables_large.bin", space).expect("can dump the page tables");
884 }
885 }
886
887 #[test]
888 fn test_mmu_huge_pages() {
889 let mut space = vec![0xaa; 0x100000];
890 let mut page_tables = Arm64PageTableSpace::new(0x00000040248000, &mut space)
891 .expect("Can initialize page tables");
892
893 let mair_el1 = MemoryAttributeIndirectionEl1([
894 MemoryAttributeEl1::Device_nGnRnE,
895 MemoryAttributeEl1::Normal_NonCacheable,
896 MemoryAttributeEl1::Normal_WriteThrough,
897 MemoryAttributeEl1::Normal_WriteBack,
898 MemoryAttributeEl1::Device_nGnRnE,
899 MemoryAttributeEl1::Device_nGnRnE,
900 MemoryAttributeEl1::Device_nGnRnE,
901 MemoryAttributeEl1::Device_nGnRnE,
902 ]);
903
904 let wb_index = mair_el1
905 .index_of(MemoryAttributeEl1::Normal_WriteBack)
906 .expect("must be some WB memory available");
907
908 let res = page_tables.map_pages(
909 0,
910 VirtualAddress::from(0),
911 4,
912 Arm64PageSize::Huge,
913 wb_index,
914 Arm64NoExecute::Full,
915 );
916 assert_eq!(res, Ok(()));
917 assert_eq!(page_tables.lvl_stats(), [1, 1, 0, 0]);
918
919 let res = page_tables.map_pages(
920 1 << 30,
921 VirtualAddress::from(0x4000_0000),
922 4,
923 Arm64PageSize::Small,
924 wb_index,
925 Arm64NoExecute::Full,
926 );
927 assert_eq!(res, Err(Arm64PageMapError::AlreadyMapped));
928 assert_eq!(page_tables.lvl_stats(), [1, 1, 0, 0]);
929
930 if DUMP_PAGE_TABLES {
931 std::fs::write("page_tables_huge.bin", space).expect("can dump the page tables");
932 }
933 }
934
935 #[test]
936 fn test_mmu_page_mix() {
937 let mut space = vec![0xaa; 0x100000];
938 let mut page_tables = Arm64PageTableSpace::new(0x00000040248000, &mut space)
939 .expect("Can initialize page tables");
940
941 let mair_el1 = MemoryAttributeIndirectionEl1([
942 MemoryAttributeEl1::Device_nGnRnE,
943 MemoryAttributeEl1::Normal_NonCacheable,
944 MemoryAttributeEl1::Normal_WriteThrough,
945 MemoryAttributeEl1::Normal_WriteBack,
946 MemoryAttributeEl1::Device_nGnRnE,
947 MemoryAttributeEl1::Device_nGnRnE,
948 MemoryAttributeEl1::Device_nGnRnE,
949 MemoryAttributeEl1::Device_nGnRnE,
950 ]);
951
952 let wb_index = mair_el1
953 .index_of(MemoryAttributeEl1::Normal_WriteBack)
954 .expect("must be some WB memory available");
955
956 const ONE_GIB: u64 = 1 << 30;
957
958 let addr = ONE_GIB - 0x1000;
959 let res = page_tables.map_range(
960 addr,
961 VirtualAddress::from(addr),
962 3 * ONE_GIB,
963 wb_index,
964 Arm64NoExecute::Full,
965 );
966 assert_eq!(res, Ok(()));
967 assert_eq!(page_tables.lvl_stats(), [1, 1, 2, 2]);
968 }
969}