page_table/
x64.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Methods to construct page tables on x64.
5
6use crate::IdentityMapSize;
7use zerocopy::FromBytes;
8use zerocopy::FromZeros;
9use zerocopy::Immutable;
10use zerocopy::IntoBytes;
11use zerocopy::KnownLayout;
12
13const X64_PTE_PRESENT: u64 = 1;
14const X64_PTE_READ_WRITE: u64 = 1 << 1;
15const X64_PTE_ACCESSED: u64 = 1 << 5;
16const X64_PTE_DIRTY: u64 = 1 << 6;
17const X64_PTE_LARGE_PAGE: u64 = 1 << 7;
18
19const PAGE_TABLE_ENTRY_COUNT: usize = 512;
20
21const X64_PAGE_SHIFT: u64 = 12;
22const X64_PTE_BITS: u64 = 9;
23
24/// Number of bytes in a page for X64.
25pub const X64_PAGE_SIZE: u64 = 4096;
26
27/// Number of bytes in a large page for X64.
28pub const X64_LARGE_PAGE_SIZE: u64 = 0x200000;
29
30/// Number of bytes in a 1GB page for X64.
31pub const X64_1GB_PAGE_SIZE: u64 = 0x40000000;
32
33#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
34#[repr(transparent)]
35pub struct PageTableEntry {
36    pub(crate) entry: u64,
37}
38
39impl std::fmt::Debug for PageTableEntry {
40    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
41        f.debug_struct("PageTableEntry")
42            .field("entry", &self.entry)
43            .field("is_present", &self.is_present())
44            .field("is_large_page", &self.is_large_page())
45            .field("gpa", &self.gpa())
46            .finish()
47    }
48}
49
50#[derive(Debug, Copy, Clone)]
51pub enum PageTableEntryType {
52    Leaf1GbPage(u64),
53    Leaf2MbPage(u64),
54    Leaf4kPage(u64),
55    Pde(u64),
56}
57
58pub trait PteOps {
59    fn get_addr_mask(&self) -> u64;
60    fn get_confidential_mask(&self) -> u64;
61
62    fn build_pte(entry_type: PageTableEntryType) -> PageTableEntry {
63        let mut entry: u64 = X64_PTE_PRESENT | X64_PTE_ACCESSED | X64_PTE_READ_WRITE;
64
65        match entry_type {
66            PageTableEntryType::Leaf1GbPage(address) => {
67                // Must be 1GB aligned.
68                assert!(address % X64_1GB_PAGE_SIZE == 0);
69                entry |= address;
70                entry |= X64_PTE_LARGE_PAGE | X64_PTE_DIRTY;
71            }
72            PageTableEntryType::Leaf2MbPage(address) => {
73                // Leaf entry, set like UEFI does for 2MB pages. Must be 2MB aligned.
74                assert!(address % X64_LARGE_PAGE_SIZE == 0);
75                entry |= address;
76                entry |= X64_PTE_LARGE_PAGE | X64_PTE_DIRTY;
77            }
78            PageTableEntryType::Leaf4kPage(address) => {
79                // Must be 4K aligned.
80                assert!(address % X64_PAGE_SIZE == 0);
81                entry |= address;
82                entry |= X64_PTE_DIRTY;
83            }
84            PageTableEntryType::Pde(address) => {
85                // Points to another pagetable.
86                assert!(address % X64_PAGE_SIZE == 0);
87                entry |= address;
88            }
89        }
90
91        PageTableEntry { entry }
92    }
93
94    fn is_pte_present(pte: &PageTableEntry) -> bool {
95        pte.is_present()
96    }
97
98    fn is_pte_large_page(pte: &PageTableEntry) -> bool {
99        pte.is_large_page()
100    }
101
102    fn get_gpa_from_pte(&self, pte: &PageTableEntry) -> Option<u64> {
103        if pte.is_present() {
104            Some(self.get_addr_from_pte(pte))
105        } else {
106            None
107        }
108    }
109
110    fn get_addr_from_pte(&self, pte: &PageTableEntry) -> u64 {
111        pte.entry & self.get_addr_mask()
112    }
113
114    fn set_addr_in_pte(&self, pte: &mut PageTableEntry, address: u64) {
115        let mask = self.get_addr_mask();
116        pte.entry = (pte.entry & !mask) | (address & mask);
117    }
118
119    fn set_pte_confidentiality(&self, pte: &mut PageTableEntry, confidential: bool) {
120        let mask = self.get_confidential_mask();
121        if confidential {
122            pte.entry |= mask;
123        } else {
124            pte.entry &= !mask;
125        }
126    }
127}
128
129impl PageTableEntry {
130    const VALID_BITS: u64 = 0x000f_ffff_ffff_f000;
131
132    /// Set an AMD64 PDE to either represent a leaf 2MB page or PDE.
133    /// This sets the PTE to preset, accessed, dirty, read write execute.
134    pub fn set_entry(&mut self, entry_type: PageTableEntryType) {
135        self.entry = X64_PTE_PRESENT | X64_PTE_ACCESSED | X64_PTE_READ_WRITE;
136
137        match entry_type {
138            PageTableEntryType::Leaf1GbPage(address) => {
139                // Must be 1GB aligned.
140                assert!(address % X64_1GB_PAGE_SIZE == 0);
141                self.entry |= address;
142                self.entry |= X64_PTE_LARGE_PAGE | X64_PTE_DIRTY;
143            }
144            PageTableEntryType::Leaf2MbPage(address) => {
145                // Leaf entry, set like UEFI does for 2MB pages. Must be 2MB aligned.
146                assert!(address % X64_LARGE_PAGE_SIZE == 0);
147                self.entry |= address;
148                self.entry |= X64_PTE_LARGE_PAGE | X64_PTE_DIRTY;
149            }
150            PageTableEntryType::Leaf4kPage(address) => {
151                // Must be 4K aligned.
152                assert!(address % X64_PAGE_SIZE == 0);
153                self.entry |= address;
154                self.entry |= X64_PTE_DIRTY;
155            }
156            PageTableEntryType::Pde(address) => {
157                // Points to another pagetable.
158                assert!(address % X64_PAGE_SIZE == 0);
159                self.entry |= address;
160            }
161        }
162    }
163
164    pub fn is_present(&self) -> bool {
165        self.entry & X64_PTE_PRESENT == X64_PTE_PRESENT
166    }
167
168    pub fn is_large_page(&self) -> bool {
169        self.entry & X64_PTE_LARGE_PAGE == X64_PTE_LARGE_PAGE
170    }
171
172    pub fn gpa(&self) -> Option<u64> {
173        if self.is_present() {
174            // bits 51 to 12 describe the gpa of the next page table
175            Some(self.entry & Self::VALID_BITS)
176        } else {
177            None
178        }
179    }
180
181    pub fn set_addr(&mut self, addr: u64) {
182        assert!(addr & !Self::VALID_BITS == 0);
183
184        // clear addr bits, set new addr
185        self.entry &= !Self::VALID_BITS;
186        self.entry |= addr;
187    }
188
189    pub fn get_addr(&self) -> u64 {
190        self.entry & Self::VALID_BITS
191    }
192
193    pub fn clear(&mut self) {
194        self.entry = 0;
195    }
196}
197
198#[repr(C)]
199#[derive(Debug, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
200pub struct PageTable {
201    entries: [PageTableEntry; PAGE_TABLE_ENTRY_COUNT],
202}
203
204impl PageTable {
205    // fn iter(&self) -> impl Iterator<Item = &PageTableEntry> {
206    //     self.entries.iter()
207    // }
208
209    pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PageTableEntry> {
210        self.entries.iter_mut()
211    }
212
213    /// Treat this page table as a page table of a given level, and locate the entry corresponding to a va.
214    pub fn entry(&mut self, gva: u64, level: u8) -> &mut PageTableEntry {
215        let index = get_amd64_pte_index(gva, level as u64) as usize;
216        &mut self.entries[index]
217    }
218}
219
220impl std::ops::Index<usize> for PageTable {
221    type Output = PageTableEntry;
222
223    fn index(&self, index: usize) -> &Self::Output {
224        &self.entries[index]
225    }
226}
227
228impl std::ops::IndexMut<usize> for PageTable {
229    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
230        &mut self.entries[index]
231    }
232}
233
234/// Get an AMD64 PTE index based on page table level.
235pub fn get_amd64_pte_index(gva: u64, page_map_level: u64) -> u64 {
236    let index = gva >> (X64_PAGE_SHIFT + page_map_level * X64_PTE_BITS);
237    index & ((1 << X64_PTE_BITS) - 1)
238}
239
240/// Calculate the number of PDE page tables required to identity map a given gpa and size.
241pub fn calculate_pde_table_count(start_gpa: u64, size: u64) -> u64 {
242    let mut count = 0;
243
244    // Determine the number of bytes from start up to the next 1GB aligned
245    let start_aligned_up = align_up_to_1_gb_page_size(start_gpa);
246    let end_gpa = start_gpa + size;
247    let end_aligned_down = (end_gpa / X64_1GB_PAGE_SIZE) * X64_1GB_PAGE_SIZE;
248
249    // Ranges sized less than 1GB are treated differently.
250    if size < X64_1GB_PAGE_SIZE {
251        // A range either takes one or two pages depending on if it crosses a 1GB boundary.
252        if end_gpa > end_aligned_down && start_gpa < end_aligned_down {
253            count = 2;
254        } else {
255            count = 1;
256        }
257    } else {
258        // Count the first unaligned start up to an aligned 1GB range.
259        if start_gpa != start_aligned_up {
260            count += 1;
261        }
262
263        // Add the inner ranges that are 1GB aligned.
264        if end_aligned_down > start_aligned_up {
265            count += (end_aligned_down - start_aligned_up) / X64_1GB_PAGE_SIZE;
266        }
267
268        // Add any unaligned end range.
269        if end_gpa > end_aligned_down {
270            count += 1;
271        }
272    }
273
274    count
275}
276
277#[derive(Debug, Clone)]
278pub struct PageTableBuilder {
279    page_table_gpa: u64,
280    start_gpa: u64,
281    size: u64,
282    local_map: Option<(u64, u64)>,
283    confidential_bit: Option<u32>,
284    map_reset_vector: bool,
285}
286
287impl PteOps for PageTableBuilder {
288    fn get_addr_mask(&self) -> u64 {
289        const ALL_ADDR_BITS: u64 = 0x000f_ffff_ffff_f000;
290        ALL_ADDR_BITS & !self.get_confidential_mask()
291    }
292
293    fn get_confidential_mask(&self) -> u64 {
294        if let Some(confidential_bit) = self.confidential_bit {
295            1u64 << confidential_bit
296        } else {
297            0
298        }
299    }
300}
301
302impl PageTableBuilder {
303    pub fn new(page_table_gpa: u64) -> Self {
304        PageTableBuilder {
305            page_table_gpa,
306            start_gpa: 0,
307            size: 0,
308            local_map: None,
309            confidential_bit: None,
310            map_reset_vector: false,
311        }
312    }
313
314    pub fn with_mapped_region(mut self, start_gpa: u64, size: u64) -> Self {
315        self.start_gpa = start_gpa;
316        self.size = size;
317        self
318    }
319
320    pub fn with_local_map(mut self, start_va: u64, size: u64) -> Self {
321        self.local_map = Some((start_va, size));
322        self
323    }
324
325    pub fn with_confidential_bit(mut self, bit_position: u32) -> Self {
326        self.confidential_bit = Some(bit_position);
327        self
328    }
329
330    /// Map the reset vector at page 0xFFFFF with a single page.
331    pub fn with_reset_vector(mut self, map_reset_vector: bool) -> Self {
332        self.map_reset_vector = map_reset_vector;
333        self
334    }
335
336    /// Build a set of X64 page tables identity mapping the given region. `size` must be less than 512GB.
337    /// This creates up to 3+N page tables: 1 PML4E and up to 2 PDPTE tables, and N page tables counted at 1 per GB of size,
338    /// for 2MB mappings.
339    pub fn build(self) -> Vec<u8> {
340        const SIZE_512_GB: u64 = 0x8000000000;
341
342        if self.size == 0 {
343            panic!("size not set");
344        }
345
346        if self.size > SIZE_512_GB {
347            panic!("more than 512 gb size not supported");
348        }
349
350        if self.size % X64_LARGE_PAGE_SIZE != 0 {
351            panic!("size not 2mb aligned");
352        }
353
354        // start_gpa and size must be 2MB aligned.
355        if self.start_gpa % X64_LARGE_PAGE_SIZE != 0 {
356            panic!("start_gpa not 2mb aligned");
357        }
358
359        let start_va = self.start_gpa;
360        let end_va = start_va + self.size;
361        let page_table_gpa = self.page_table_gpa;
362
363        if let Some((local_map_start, local_map_size)) = self.local_map {
364            if local_map_start % X64_LARGE_PAGE_SIZE != 0 {
365                panic!("local map address not 2 mb aligned");
366            }
367
368            if local_map_size % X64_LARGE_PAGE_SIZE != 0 {
369                panic!("local map size not 2 mb aligned");
370            }
371
372            if local_map_size == 0 {
373                panic!("local map size cannot be 0");
374            }
375
376            let local_map_end = local_map_start + local_map_size;
377            if local_map_end > start_va && local_map_start < end_va {
378                panic!("local map overlaps with mapped region");
379            }
380        }
381
382        // Allocate single PML4E page table.
383        let mut page_table: Vec<PageTable> = Vec::new();
384        page_table.push(PageTable::new_zeroed());
385        let pml4_table_index = 0;
386        let confidential = self.confidential_bit.is_some();
387
388        let mut link_tables = |start_va: u64, end_va: u64, use_large_pages: bool| {
389            let mut current_va = start_va;
390            while current_va < end_va {
391                tracing::trace!(current_va);
392
393                let pdpte_table_index = {
394                    let next_index = page_table.len();
395                    let pml4_entry = page_table[pml4_table_index].entry(current_va, 3);
396                    if !pml4_entry.is_present() {
397                        // Allocate and link PDPTE table.
398                        let output_address = page_table_gpa + next_index as u64 * X64_PAGE_SIZE;
399                        let mut new_entry =
400                            Self::build_pte(PageTableEntryType::Pde(output_address));
401                        self.set_pte_confidentiality(&mut new_entry, confidential);
402                        *pml4_entry = new_entry;
403                        page_table.push(PageTable::new_zeroed());
404
405                        next_index
406                    } else {
407                        ((self.get_addr_from_pte(pml4_entry) - page_table_gpa) / X64_PAGE_SIZE)
408                            .try_into()
409                            .expect("Valid page table index")
410                    }
411                };
412
413                tracing::trace!(pdpte_table_index);
414
415                let pde_table_index = {
416                    let next_index = page_table.len();
417                    let pdpte_entry = page_table[pdpte_table_index].entry(current_va, 2);
418                    if !pdpte_entry.is_present() {
419                        // Allocate and link PDE table.
420                        let output_address = page_table_gpa + next_index as u64 * X64_PAGE_SIZE;
421                        let mut new_entry =
422                            Self::build_pte(PageTableEntryType::Pde(output_address));
423                        self.set_pte_confidentiality(&mut new_entry, confidential);
424                        *pdpte_entry = new_entry;
425                        page_table.push(PageTable::new_zeroed());
426
427                        next_index
428                    } else {
429                        ((self.get_addr_from_pte(pdpte_entry) - page_table_gpa) / X64_PAGE_SIZE)
430                            .try_into()
431                            .expect("Valid page table index")
432                    }
433                };
434
435                tracing::trace!(pde_table_index);
436
437                let next_index = page_table.len();
438                let pde_entry = page_table[pde_table_index].entry(current_va, 1);
439                assert!(!pde_entry.is_present());
440
441                if use_large_pages {
442                    assert!(!pde_entry.is_present());
443
444                    let mut new_entry =
445                        Self::build_pte(PageTableEntryType::Leaf2MbPage(current_va));
446                    self.set_pte_confidentiality(&mut new_entry, confidential);
447                    *pde_entry = new_entry;
448                    current_va += X64_LARGE_PAGE_SIZE;
449                } else {
450                    let pt_table_index = if !pde_entry.is_present() {
451                        // Allocate and link page table.
452                        let output_address = page_table_gpa + next_index as u64 * X64_PAGE_SIZE;
453                        let mut new_entry =
454                            Self::build_pte(PageTableEntryType::Pde(output_address));
455                        self.set_pte_confidentiality(&mut new_entry, confidential);
456                        *pde_entry = new_entry;
457                        page_table.push(PageTable::new_zeroed());
458
459                        next_index
460                    } else {
461                        ((self.get_addr_from_pte(pde_entry) - page_table_gpa) / X64_PAGE_SIZE)
462                            .try_into()
463                            .expect("Valid page table index")
464                    };
465
466                    tracing::trace!(pt_table_index);
467
468                    let pt_entry = page_table[pt_table_index].entry(current_va, 0);
469                    let mut new_entry = Self::build_pte(PageTableEntryType::Leaf4kPage(current_va));
470                    self.set_pte_confidentiality(&mut new_entry, confidential);
471                    *pt_entry = new_entry;
472                    current_va += X64_PAGE_SIZE;
473                }
474            }
475        };
476
477        link_tables(start_va, end_va, true);
478
479        // Create local map area if present.
480        if let Some((local_map_start, local_map_size)) = self.local_map {
481            link_tables(local_map_start, local_map_start + local_map_size, true);
482        }
483
484        if self.map_reset_vector {
485            // Map the reset vector pfn of 0xFFFFF
486            tracing::trace!("identity mapping reset page 0xFFFFF");
487            let reset_vector_addr = 0xFFFFF * X64_PAGE_SIZE;
488            link_tables(reset_vector_addr, reset_vector_addr + X64_PAGE_SIZE, false);
489        }
490
491        // Flatten page table vec into u8 vec
492        flatten_page_table(page_table)
493    }
494}
495
496/// Build a set of X64 page tables identity mapping the bottom address
497/// space with an optional address bias.
498///
499/// An optional PML4E entry may be linked, with arguments being (link_target_gpa, linkage_gpa).
500/// link_target_gpa represents the GPA of the PML4E to link into the built page table.
501/// linkage_gpa represents the GPA at which the linked PML4E should be linked.
502pub fn build_page_tables_64(
503    page_table_gpa: u64,
504    address_bias: u64,
505    identity_map_size: IdentityMapSize,
506    pml4e_link: Option<(u64, u64)>,
507) -> Vec<u8> {
508    // Allocate page tables. There are up to 6 total page tables:
509    //      1 PML4E (Level 4) (omitted if the address bias is non-zero)
510    //      1 PDPTE (Level 3)
511    //      4 or 8 PDE tables (Level 2)
512    // Note that there are no level 1 page tables, as 2MB pages are used.
513    let leaf_page_table_count = match identity_map_size {
514        IdentityMapSize::Size4Gb => 4,
515        IdentityMapSize::Size8Gb => 8,
516    };
517    let page_table_count = leaf_page_table_count + if address_bias == 0 { 2 } else { 1 };
518    let mut page_table: Vec<PageTable> = vec![PageTable::new_zeroed(); page_table_count];
519    let mut page_table_allocator = page_table.iter_mut().enumerate();
520
521    // Allocate single PDPTE table.
522    let pdpte_table = if address_bias == 0 {
523        // Allocate single PML4E page table.
524        let (_, pml4e_table) = page_table_allocator
525            .next()
526            .expect("pagetable should always be available, code bug if not");
527
528        // PDPTE table is the next pagetable.
529        let (pdpte_table_index, pdpte_table) = page_table_allocator
530            .next()
531            .expect("pagetable should always be available, code bug if not");
532
533        // Set PML4E entry linking PML4E to PDPTE.
534        let output_address = page_table_gpa + pdpte_table_index as u64 * X64_PAGE_SIZE;
535        pml4e_table[0].set_entry(PageTableEntryType::Pde(output_address));
536
537        // Set PML4E entry to link the additional entry if specified.
538        if let Some((link_target_gpa, linkage_gpa)) = pml4e_link {
539            assert!((linkage_gpa & 0x7FFFFFFFFF) == 0);
540            pml4e_table[linkage_gpa as usize >> 39]
541                .set_entry(PageTableEntryType::Pde(link_target_gpa));
542        }
543
544        pdpte_table
545    } else {
546        // PDPTE table is the first table, if no PML4E.
547        page_table_allocator
548            .next()
549            .expect("pagetable should always be available, code bug if not")
550            .1
551    };
552
553    // Build PDEs that point to 2 MB pages.
554    let top_address = match identity_map_size {
555        IdentityMapSize::Size4Gb => 0x100000000u64,
556        IdentityMapSize::Size8Gb => 0x200000000u64,
557    };
558    let mut current_va = 0;
559
560    while current_va < top_address {
561        // Allocate a new PDE table
562        let (pde_table_index, pde_table) = page_table_allocator
563            .next()
564            .expect("pagetable should always be available, code bug if not");
565
566        // Link PDPTE table to PDE table (L3 to L2)
567        let pdpte_index = get_amd64_pte_index(current_va, 2);
568        let output_address = page_table_gpa + pde_table_index as u64 * X64_PAGE_SIZE;
569        let pdpte_entry = &mut pdpte_table[pdpte_index as usize];
570        assert!(!pdpte_entry.is_present());
571        pdpte_entry.set_entry(PageTableEntryType::Pde(output_address));
572
573        // Set all 2MB entries in this PDE table.
574        for entry in pde_table.iter_mut() {
575            entry.set_entry(PageTableEntryType::Leaf2MbPage(current_va + address_bias));
576            current_va += X64_LARGE_PAGE_SIZE;
577        }
578    }
579
580    // All pagetables should be used, code bug if not.
581    assert!(page_table_allocator.next().is_none());
582
583    // Flatten page table vec into u8 vec
584    flatten_page_table(page_table)
585}
586
587/// Align an address up to the start of the next page.
588pub fn align_up_to_page_size(address: u64) -> u64 {
589    (address + X64_PAGE_SIZE - 1) & !(X64_PAGE_SIZE - 1)
590}
591
592/// Align an address up to the start of the next large (2MB) page.
593pub fn align_up_to_large_page_size(address: u64) -> u64 {
594    (address + X64_LARGE_PAGE_SIZE - 1) & !(X64_LARGE_PAGE_SIZE - 1)
595}
596
597/// Align an address up to the start of the next 1GB page.
598pub fn align_up_to_1_gb_page_size(address: u64) -> u64 {
599    (address + X64_1GB_PAGE_SIZE - 1) & !(X64_1GB_PAGE_SIZE - 1)
600}
601
602fn flatten_page_table(page_table: Vec<PageTable>) -> Vec<u8> {
603    let mut flat_tables = Vec::with_capacity(page_table.len() * X64_PAGE_SIZE as usize);
604    for table in page_table {
605        flat_tables.extend_from_slice(table.as_bytes());
606    }
607
608    flat_tables
609}
610
611#[cfg(test)]
612mod tests {
613    use super::X64_1GB_PAGE_SIZE;
614    use super::align_up_to_large_page_size;
615    use super::align_up_to_page_size;
616    use super::calculate_pde_table_count;
617
618    #[test]
619    fn test_align_up() {
620        assert_eq!(align_up_to_page_size(4096), 4096);
621        assert_eq!(align_up_to_page_size(4095), 4096);
622        assert_eq!(align_up_to_page_size(4097), 8192);
623    }
624
625    #[test]
626    fn test_large_align_up() {
627        assert_eq!(align_up_to_large_page_size(0), 0);
628        assert_eq!(align_up_to_large_page_size(4096), 0x200000);
629        assert_eq!(align_up_to_large_page_size(0x200000), 0x200000);
630        assert_eq!(align_up_to_large_page_size(0x200001), 0x400000);
631    }
632
633    #[test]
634    fn test_pde_size_calc() {
635        assert_eq!(calculate_pde_table_count(0, 512), 1);
636        assert_eq!(calculate_pde_table_count(0, 1024 * 1024), 1);
637        assert_eq!(calculate_pde_table_count(512, 1024 * 1024), 1);
638        assert_eq!(calculate_pde_table_count(X64_1GB_PAGE_SIZE - 512, 1024), 2);
639        assert_eq!(calculate_pde_table_count(X64_1GB_PAGE_SIZE - 512, 512), 1);
640        assert_eq!(calculate_pde_table_count(0, X64_1GB_PAGE_SIZE), 1);
641        assert_eq!(calculate_pde_table_count(0, X64_1GB_PAGE_SIZE + 1), 2);
642        assert_eq!(calculate_pde_table_count(1, X64_1GB_PAGE_SIZE + 1), 2);
643        assert_eq!(calculate_pde_table_count(512, X64_1GB_PAGE_SIZE * 2), 3);
644
645        assert_eq!(calculate_pde_table_count(0, X64_1GB_PAGE_SIZE * 3), 3);
646        assert_eq!(
647            calculate_pde_table_count(X64_1GB_PAGE_SIZE, X64_1GB_PAGE_SIZE * 3),
648            3
649        );
650    }
651}