openhcl_boot/arch/x86_64/
address_space.rs1use crate::single_threaded::SingleThreaded;
14use core::arch::asm;
15use core::cell::Cell;
16use core::marker::PhantomData;
17use core::sync::atomic::AtomicU64;
18use core::sync::atomic::Ordering;
19use core::sync::atomic::compiler_fence;
20use hvdef::HV_PAGE_SIZE;
21use memory_range::MemoryRange;
22use x86defs::X64_LARGE_PAGE_SIZE;
23use x86defs::tdx::TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT;
24use zerocopy::FromBytes;
25use zerocopy::IntoBytes;
26use zerocopy::KnownLayout;
27
28const X64_PTE_PRESENT: u64 = 1;
29const X64_PTE_READ_WRITE: u64 = 1 << 1;
30const X64_PTE_ACCESSED: u64 = 1 << 5;
31const X64_PTE_DIRTY: u64 = 1 << 6;
32const X64_PTE_LARGE_PAGE: u64 = 1 << 7;
33const X64_PTE_CONFIDENTIAL: u64 = 1 << 51;
34
35const PAGE_TABLE_ENTRY_COUNT: usize = 512;
36
37const X64_PAGE_SHIFT: u64 = 12;
38const X64_PTE_BITS: u64 = 9;
39
40#[derive(Debug, IntoBytes, KnownLayout, FromBytes)]
41#[repr(transparent)]
42struct PageTableEntry {
43 entry: AtomicU64,
44}
45#[derive(Debug, Copy, Clone)]
46pub enum PageTableEntryType {
47 Leaf2MbPage(u64),
48}
49
50impl PageTableEntry {
51 fn write_pte(&mut self, val: u64) {
52 self.entry.store(val, Ordering::SeqCst);
53 }
54
55 fn read_pte(&self) -> u64 {
56 self.entry.load(Ordering::Relaxed)
57 }
58
59 pub fn set_entry(&mut self, entry_type: PageTableEntryType, confidential: bool) {
62 let mut entry: u64 = X64_PTE_PRESENT | X64_PTE_ACCESSED | X64_PTE_READ_WRITE;
63 if confidential {
64 entry |= X64_PTE_CONFIDENTIAL;
65 }
66
67 match entry_type {
68 PageTableEntryType::Leaf2MbPage(address) => {
69 assert!(address % X64_LARGE_PAGE_SIZE == 0);
71 entry |= address;
72 entry |= X64_PTE_LARGE_PAGE | X64_PTE_DIRTY;
73 }
74 }
75
76 self.write_pte(entry);
77 }
78
79 pub fn is_present(&self) -> bool {
80 self.read_pte() & X64_PTE_PRESENT == X64_PTE_PRESENT
81 }
82
83 pub fn is_large_page(&self) -> bool {
84 self.read_pte() & X64_PTE_LARGE_PAGE == X64_PTE_LARGE_PAGE
85 }
86
87 pub fn get_addr(&self) -> u64 {
88 const VALID_BITS: u64 = 0x000f_ffff_ffff_f000;
89
90 self.read_pte() & VALID_BITS & !X64_PTE_CONFIDENTIAL
91 }
92
93 pub fn clear(&mut self) {
94 self.write_pte(0);
95 }
96
97 pub fn tdx_is_shared(&mut self) -> bool {
99 let val = self.read_pte();
100 val & TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT == TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT
101 }
102
103 pub fn tdx_set_shared(&mut self) {
105 let mut val = self.read_pte();
106 val |= TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT;
107 self.write_pte(val);
108 }
109
110 pub fn tdx_set_private(&mut self) {
112 let mut val = self.read_pte();
113 val &= !TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT;
114 self.write_pte(val);
115 }
116}
117
118#[repr(C)]
119#[derive(Debug, IntoBytes, KnownLayout, FromBytes)]
120struct PageTable {
121 entries: [PageTableEntry; PAGE_TABLE_ENTRY_COUNT],
122}
123
124impl PageTable {
125 pub fn entry(&mut self, gva: u64, level: u8) -> &mut PageTableEntry {
128 let index = get_amd64_pte_index(gva, level as u64) as usize;
129 &mut self.entries[index]
130 }
131}
132
133fn get_amd64_pte_index(gva: u64, page_map_level: u64) -> u64 {
135 let index = gva >> (X64_PAGE_SHIFT + page_map_level * X64_PTE_BITS);
136 index & ((1 << X64_PTE_BITS) - 1)
137}
138
139pub struct LocalMap<'a> {
142 pte_ptr: *mut PageTableEntry,
143 va: u64,
144 _dummy: PhantomData<&'a ()>,
145}
146
147impl<'a> LocalMap<'a> {
148 pub fn map_pages<'b>(
152 &'b mut self,
153 range: MemoryRange,
154 confidential: bool,
155 ) -> LocalMapMapping<'a, 'b> {
156 let offset = range.start() % X64_LARGE_PAGE_SIZE;
157 assert!(offset + range.len() <= X64_LARGE_PAGE_SIZE, "{range}");
158
159 let aligned_gpa = range.start() - offset;
160 let entry = self.local_map_entry();
161 assert!(!entry.is_present());
162 entry.set_entry(PageTableEntryType::Leaf2MbPage(aligned_gpa), confidential);
163 let va = self.va + offset;
164 compiler_fence(Ordering::SeqCst);
167 let buffer =
170 unsafe { core::slice::from_raw_parts_mut(va as *mut u8, range.len() as usize) };
171 LocalMapMapping {
172 data: buffer,
173 local_map: self,
174 }
175 }
176
177 fn local_map_entry(&self) -> &'a mut PageTableEntry {
178 unsafe { &mut *self.pte_ptr }
180 }
181}
182
183pub struct LocalMapMapping<'a, 'b> {
184 pub data: &'a mut [u8],
185 local_map: &'b mut LocalMap<'a>,
186}
187
188impl Drop for LocalMapMapping<'_, '_> {
189 fn drop(&mut self) {
190 unmap_page_helper(self.local_map);
191 }
192}
193
194fn unmap_page_helper(local_map: &LocalMap<'_>) {
195 compiler_fence(Ordering::SeqCst);
197 unsafe {
200 let entry = &mut *local_map.pte_ptr;
201 entry.clear();
202 let va = local_map.va;
203 asm!("invlpg [{0}]", in(reg) va);
204 }
205}
206
207unsafe fn page_table_at_address(address: u64) -> &'static mut PageTable {
213 unsafe { &mut *(address as *mut u64).cast() }
215}
216
217unsafe fn get_pde_for_va(va: u64) -> &'static mut PageTableEntry {
225 let mut page_table_base: u64;
226
227 unsafe {
229 asm!("mov {0}, cr3", out(reg) page_table_base);
230 let pml4 = page_table_at_address(page_table_base);
231 let entry = pml4.entry(va, 3);
232 assert!(entry.is_present());
233 let pdpt = page_table_at_address(entry.get_addr());
234 let entry = pdpt.entry(va, 2);
235 assert!(entry.is_present());
236 let pd = page_table_at_address(entry.get_addr());
237 pd.entry(va, 1)
238 }
239}
240
241static LOCAL_MAP_INITIALIZED: SingleThreaded<Cell<bool>> = SingleThreaded(Cell::new(false));
242
243pub fn init_local_map(va: u64) -> LocalMap<'static> {
247 assert!(va.is_multiple_of(X64_LARGE_PAGE_SIZE));
248
249 let local_map = unsafe {
253 assert!(!LOCAL_MAP_INITIALIZED.get());
254 LOCAL_MAP_INITIALIZED.set(true);
255 let entry = get_pde_for_va(va);
256 assert!(entry.is_present() && entry.is_large_page());
257
258 LocalMap {
259 pte_ptr: core::ptr::from_mut(entry),
260 va,
261 _dummy: PhantomData,
262 }
263 };
264
265 unmap_page_helper(&local_map);
266 local_map
267}
268
269pub struct TdxHypercallPage(u64);
273
274impl TdxHypercallPage {
275 pub unsafe fn new(va: u64) -> Self {
281 unsafe {
283 let entry = get_pde_for_va(va);
284 assert!(entry.is_present() & entry.is_large_page());
285 assert!(va.is_multiple_of(X64_LARGE_PAGE_SIZE));
286 assert!(entry.tdx_is_shared());
287 TdxHypercallPage(va)
288 }
289 }
290
291 pub fn base(&self) -> u64 {
293 self.0
294 }
295
296 pub fn input(&self) -> u64 {
298 self.0
299 }
300
301 pub fn output(&self) -> u64 {
303 self.0 + HV_PAGE_SIZE
304 }
305}
306
307pub unsafe fn tdx_share_large_page(va: u64) {
313 unsafe {
315 let entry = get_pde_for_va(va);
316 entry.tdx_set_shared();
317 }
318}
319
320pub fn tdx_unshare_large_page(va: TdxHypercallPage) {
322 unsafe {
325 let entry = get_pde_for_va(va.base());
326 entry.tdx_set_private();
327 }
328}