openhcl_boot/arch/x86_64/
address_space.rs1use crate::single_threaded::SingleThreaded;
14use core::arch::asm;
15use core::cell::Cell;
16use core::marker::PhantomData;
17use core::sync::atomic::AtomicU64;
18use core::sync::atomic::Ordering;
19use core::sync::atomic::compiler_fence;
20use hvdef::HV_PAGE_SIZE;
21use memory_range::MemoryRange;
22use x86defs::X64_LARGE_PAGE_SIZE;
23use x86defs::tdx::TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT;
24use zerocopy::FromBytes;
25use zerocopy::Immutable;
26use zerocopy::IntoBytes;
27use zerocopy::KnownLayout;
28
29const X64_PTE_PRESENT: u64 = 1;
30const X64_PTE_READ_WRITE: u64 = 1 << 1;
31const X64_PTE_ACCESSED: u64 = 1 << 5;
32const X64_PTE_DIRTY: u64 = 1 << 6;
33const X64_PTE_LARGE_PAGE: u64 = 1 << 7;
34const X64_PTE_CONFIDENTIAL: u64 = 1 << 51;
35
36const PAGE_TABLE_ENTRY_COUNT: usize = 512;
37
38const X64_PAGE_SHIFT: u64 = 12;
39const X64_PTE_BITS: u64 = 9;
40
41#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
42#[repr(transparent)]
43struct PageTableEntry {
44 entry: u64,
45}
46#[derive(Debug, Copy, Clone)]
47pub enum PageTableEntryType {
48 Leaf2MbPage(u64),
49}
50
51impl PageTableEntry {
52 fn atomic_pte<'a>(&self) -> &'a AtomicU64 {
53 unsafe {
56 let ptr = &self.entry as *const u64;
57 &*ptr.cast()
58 }
59 }
60
61 fn write_pte(&mut self, val: u64) {
62 self.atomic_pte().store(val, Ordering::SeqCst);
63 }
64
65 fn read_pte(&self) -> u64 {
66 self.atomic_pte().load(Ordering::Relaxed)
67 }
68
69 pub fn set_entry(&mut self, entry_type: PageTableEntryType, confidential: bool) {
72 let mut entry: u64 = X64_PTE_PRESENT | X64_PTE_ACCESSED | X64_PTE_READ_WRITE;
73 if confidential {
74 entry |= X64_PTE_CONFIDENTIAL;
75 }
76
77 match entry_type {
78 PageTableEntryType::Leaf2MbPage(address) => {
79 assert!(address % X64_LARGE_PAGE_SIZE == 0);
81 entry |= address;
82 entry |= X64_PTE_LARGE_PAGE | X64_PTE_DIRTY;
83 }
84 }
85
86 self.write_pte(entry);
87 }
88
89 pub fn is_present(&self) -> bool {
90 self.read_pte() & X64_PTE_PRESENT == X64_PTE_PRESENT
91 }
92
93 pub fn is_large_page(&self) -> bool {
94 self.entry & X64_PTE_LARGE_PAGE == X64_PTE_LARGE_PAGE
95 }
96
97 pub fn get_addr(&self) -> u64 {
98 const VALID_BITS: u64 = 0x000f_ffff_ffff_f000;
99
100 self.read_pte() & VALID_BITS & !X64_PTE_CONFIDENTIAL
101 }
102
103 pub fn clear(&mut self) {
104 self.write_pte(0);
105 }
106
107 pub fn tdx_is_shared(&mut self) -> bool {
109 let val = self.read_pte();
110 val & TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT == TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT
111 }
112
113 pub fn tdx_set_shared(&mut self) {
115 let mut val = self.read_pte();
116 val |= TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT;
117 self.write_pte(val);
118 }
119
120 pub fn tdx_set_private(&mut self) {
122 let mut val = self.read_pte();
123 val &= !TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT;
124 self.write_pte(val);
125 }
126}
127
128#[repr(C)]
129#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
130struct PageTable {
131 entries: [PageTableEntry; PAGE_TABLE_ENTRY_COUNT],
132}
133
134impl PageTable {
135 pub fn entry(&mut self, gva: u64, level: u8) -> &mut PageTableEntry {
138 let index = get_amd64_pte_index(gva, level as u64) as usize;
139 &mut self.entries[index]
140 }
141}
142
143fn get_amd64_pte_index(gva: u64, page_map_level: u64) -> u64 {
145 let index = gva >> (X64_PAGE_SHIFT + page_map_level * X64_PTE_BITS);
146 index & ((1 << X64_PTE_BITS) - 1)
147}
148
149pub struct LocalMap<'a> {
152 pte_ptr: *mut PageTableEntry,
153 va: u64,
154 _dummy: PhantomData<&'a ()>,
155}
156
157impl<'a> LocalMap<'a> {
158 pub fn map_pages<'b>(
162 &'b mut self,
163 range: MemoryRange,
164 confidential: bool,
165 ) -> LocalMapMapping<'a, 'b> {
166 let offset = range.start() % X64_LARGE_PAGE_SIZE;
167 assert!(offset + range.len() <= X64_LARGE_PAGE_SIZE, "{range}");
168
169 let aligned_gpa = range.start() - offset;
170 let entry = self.local_map_entry();
171 assert!(!entry.is_present());
172 entry.set_entry(PageTableEntryType::Leaf2MbPage(aligned_gpa), confidential);
173 let va = self.va + offset;
174 compiler_fence(Ordering::SeqCst);
177 let buffer =
180 unsafe { core::slice::from_raw_parts_mut(va as *mut u8, range.len() as usize) };
181 LocalMapMapping {
182 data: buffer,
183 local_map: self,
184 }
185 }
186
187 fn local_map_entry(&self) -> &'a mut PageTableEntry {
188 unsafe { &mut *self.pte_ptr }
190 }
191}
192
193pub struct LocalMapMapping<'a, 'b> {
194 pub data: &'a mut [u8],
195 local_map: &'b mut LocalMap<'a>,
196}
197
198impl Drop for LocalMapMapping<'_, '_> {
199 fn drop(&mut self) {
200 unmap_page_helper(self.local_map);
201 }
202}
203
204fn unmap_page_helper(local_map: &LocalMap<'_>) {
205 compiler_fence(Ordering::SeqCst);
207 unsafe {
210 let entry = &mut *local_map.pte_ptr;
211 entry.clear();
212 let va = local_map.va;
213 asm!("invlpg [{0}]", in(reg) va);
214 }
215}
216
217unsafe fn page_table_at_address(address: u64) -> &'static mut PageTable {
223 unsafe { &mut *(address as *mut u64).cast() }
225}
226
227unsafe fn get_pde_for_va(va: u64) -> &'static mut PageTableEntry {
235 let mut page_table_base: u64;
236
237 unsafe {
239 asm!("mov {0}, cr3", out(reg) page_table_base);
240 let pml4 = page_table_at_address(page_table_base);
241 let entry = pml4.entry(va, 3);
242 assert!(entry.is_present());
243 let pdpt = page_table_at_address(entry.get_addr());
244 let entry = pdpt.entry(va, 2);
245 assert!(entry.is_present());
246 let pd = page_table_at_address(entry.get_addr());
247 let entry = pd.entry(va, 1);
248 entry
249 }
250}
251
252static LOCAL_MAP_INITIALIZED: SingleThreaded<Cell<bool>> = SingleThreaded(Cell::new(false));
253
254pub fn init_local_map(va: u64) -> LocalMap<'static> {
258 assert!(va % X64_LARGE_PAGE_SIZE == 0);
259
260 let local_map = unsafe {
264 assert!(!LOCAL_MAP_INITIALIZED.get());
265 LOCAL_MAP_INITIALIZED.set(true);
266 let entry = get_pde_for_va(va);
267 assert!(entry.is_present() && entry.is_large_page());
268
269 LocalMap {
270 pte_ptr: core::ptr::from_mut(entry),
271 va,
272 _dummy: PhantomData,
273 }
274 };
275
276 unmap_page_helper(&local_map);
277 local_map
278}
279
280pub struct TdxHypercallPage(u64);
284
285impl TdxHypercallPage {
286 pub unsafe fn new(va: u64) -> Self {
292 unsafe {
294 let entry = get_pde_for_va(va);
295 assert!(entry.is_present() & entry.is_large_page());
296 assert!(va % X64_LARGE_PAGE_SIZE == 0);
297 assert!(entry.tdx_is_shared());
298 TdxHypercallPage(va)
299 }
300 }
301
302 pub fn base(&self) -> u64 {
304 self.0
305 }
306
307 pub fn input(&self) -> u64 {
309 self.0
310 }
311
312 pub fn output(&self) -> u64 {
314 self.0 + HV_PAGE_SIZE
315 }
316}
317
318pub unsafe fn tdx_share_large_page(va: u64) {
324 unsafe {
326 let entry = get_pde_for_va(va);
327 entry.tdx_set_shared();
328 }
329}
330
331pub fn tdx_unshare_large_page(va: TdxHypercallPage) {
333 unsafe {
336 let entry = get_pde_for_va(va.base());
337 entry.tdx_set_private();
338 }
339}