1#[cfg(target_arch = "x86_64")]
7use crate::arch::TdxHypercallPage;
8#[cfg(target_arch = "x86_64")]
9use crate::arch::tdx::invoke_tdcall_hypercall;
10use crate::single_threaded::SingleThreaded;
11use arrayvec::ArrayVec;
12use cfg_if::cfg_if;
13use core::cell::RefCell;
14use core::cell::UnsafeCell;
15use core::mem::size_of;
16use hvdef::HV_PAGE_SIZE;
17use hvdef::Vtl;
18use hvdef::hypercall::HvInputVtl;
19#[cfg(target_arch = "x86_64")]
20use hvdef::hypercall::StartVirtualProcessorX64;
21use memory_range::MemoryRange;
22use minimal_rt::arch::hypercall::invoke_hypercall;
23use zerocopy::FromBytes;
24use zerocopy::IntoBytes;
25
26#[repr(C, align(4096))]
28struct HvcallPage {
29 buffer: [u8; HV_PAGE_SIZE as usize],
30}
31
32impl HvcallPage {
33 pub const fn new() -> Self {
34 HvcallPage {
35 buffer: [0; HV_PAGE_SIZE as usize],
36 }
37 }
38
39 fn address(&self) -> u64 {
41 let addr = self.buffer.as_ptr() as u64;
42
43 assert!(addr % HV_PAGE_SIZE == 0);
45
46 addr
47 }
48}
49
50static HVCALL_INPUT: SingleThreaded<UnsafeCell<HvcallPage>> =
52 SingleThreaded(UnsafeCell::new(HvcallPage::new()));
53
54static HVCALL_OUTPUT: SingleThreaded<UnsafeCell<HvcallPage>> =
56 SingleThreaded(UnsafeCell::new(HvcallPage::new()));
57
58static HVCALL: SingleThreaded<RefCell<HvCall>> = SingleThreaded(RefCell::new(HvCall {
59 initialized: false,
60 vtl: Vtl::Vtl0,
61 #[cfg(target_arch = "x86_64")]
66 tdx_io_page: None,
67}));
68
69pub struct HvCall {
74 initialized: bool,
75 vtl: Vtl,
76 #[cfg(target_arch = "x86_64")]
77 tdx_io_page: Option<TdxHypercallPage>,
78}
79
80#[track_caller]
84pub fn hvcall() -> core::cell::RefMut<'static, HvCall> {
85 HVCALL.borrow_mut()
86}
87
88impl HvCall {
89 fn input_page() -> &'static mut HvcallPage {
90 unsafe { &mut *HVCALL_INPUT.get() }
92 }
93
94 fn output_page() -> &'static mut HvcallPage {
95 unsafe { &mut *HVCALL_OUTPUT.get() }
97 }
98
99 #[cfg(target_arch = "x86_64")]
102 pub fn hypercall_page(&mut self) -> u64 {
103 self.init_if_needed();
104 core::ptr::addr_of!(minimal_rt::arch::hypercall::HYPERCALL_PAGE) as u64
105 }
106
107 #[cfg(target_arch = "x86_64")]
108 fn init_if_needed(&mut self) {
109 if !self.initialized {
110 self.initialize();
111 }
112 }
113
114 pub fn initialize(&mut self) {
116 assert!(!self.initialized);
117 let guest_os_id = hvdef::hypercall::HvGuestOsMicrosoft::new().with_os_id(1);
119
120 crate::arch::hypercall::initialize(guest_os_id);
121 self.initialized = true;
122
123 self.vtl = self
124 .get_register(hvdef::HvAllArchRegisterName::VsmVpStatus.into())
125 .map_or(Vtl::Vtl0, |status| {
126 hvdef::HvRegisterVsmVpStatus::from(status.as_u64())
127 .active_vtl()
128 .try_into()
129 .unwrap()
130 });
131 }
132
133 pub fn uninitialize(&mut self) {
135 if self.initialized {
136 self.initialized = false;
137
138 cfg_if! {
144 if #[cfg(target_arch = "x86_64")] {
145 if self.tdx_io_page.is_some() {
146 self.uninitialize_tdx()
147 }
148 else {
149 crate::arch::hypercall::uninitialize();
150 }
151 }
152 else {
153 crate::arch::hypercall::uninitialize();
154 }
155 }
156 }
157 }
158
159 pub fn vtl(&mut self) -> Vtl {
161 assert!(self.initialized);
162 self.vtl
163 }
164
165 fn dispatch_hvcall(
168 &mut self,
169 code: hvdef::HypercallCode,
170 rep_count: Option<usize>,
171 ) -> hvdef::hypercall::HypercallOutput {
172 assert!(self.initialized);
173
174 let control = hvdef::hypercall::Control::new()
175 .with_code(code.0)
176 .with_rep_count(rep_count.unwrap_or_default());
177
178 #[cfg(target_arch = "x86_64")]
179 if self.tdx_io_page.is_some() {
180 return invoke_tdcall_hypercall(control, self.tdx_io_page.as_ref().unwrap());
181 }
182 unsafe {
184 invoke_hypercall(
185 control,
186 Self::input_page().address(),
187 Self::output_page().address(),
188 )
189 }
190 }
191
192 pub fn set_register(
194 &mut self,
195 name: hvdef::HvRegisterName,
196 value: hvdef::HvRegisterValue,
197 ) -> Result<(), hvdef::HvError> {
198 const HEADER_SIZE: usize = size_of::<hvdef::hypercall::GetSetVpRegisters>();
199
200 let header = hvdef::hypercall::GetSetVpRegisters {
201 partition_id: hvdef::HV_PARTITION_ID_SELF,
202 vp_index: hvdef::HV_VP_INDEX_SELF,
203 target_vtl: HvInputVtl::CURRENT_VTL,
204 rsvd: [0; 3],
205 };
206
207 header
209 .write_to_prefix(Self::input_page().buffer.as_mut_slice())
210 .unwrap();
211
212 let reg = hvdef::hypercall::HvRegisterAssoc {
213 name,
214 pad: Default::default(),
215 value,
216 };
217
218 reg.write_to_prefix(&mut Self::input_page().buffer[HEADER_SIZE..])
220 .unwrap();
221
222 let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(1));
223
224 output.result()
225 }
226
227 pub fn get_register(
229 &mut self,
230 name: hvdef::HvRegisterName,
231 ) -> Result<hvdef::HvRegisterValue, hvdef::HvError> {
232 const HEADER_SIZE: usize = size_of::<hvdef::hypercall::GetSetVpRegisters>();
233
234 let header = hvdef::hypercall::GetSetVpRegisters {
235 partition_id: hvdef::HV_PARTITION_ID_SELF,
236 vp_index: hvdef::HV_VP_INDEX_SELF,
237 target_vtl: HvInputVtl::CURRENT_VTL,
238 rsvd: [0; 3],
239 };
240
241 header
243 .write_to_prefix(Self::input_page().buffer.as_mut_slice())
244 .unwrap();
245 name.write_to_prefix(&mut Self::input_page().buffer[HEADER_SIZE..])
247 .unwrap();
248
249 let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallGetVpRegisters, Some(1));
250 output.result()?;
251 let value = hvdef::HvRegisterValue::read_from_prefix(&Self::output_page().buffer)
252 .unwrap()
253 .0; Ok(value)
255 }
256
257 #[cfg_attr(target_arch = "aarch64", expect(dead_code))]
259 pub fn apply_vtl2_protections(&mut self, range: MemoryRange) -> Result<(), hvdef::HvError> {
260 const HEADER_SIZE: usize = size_of::<hvdef::hypercall::ModifyVtlProtectionMask>();
261 const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::<u64>();
262
263 let header = hvdef::hypercall::ModifyVtlProtectionMask {
264 partition_id: hvdef::HV_PARTITION_ID_SELF,
265 map_flags: hvdef::HV_MAP_GPA_PERMISSIONS_NONE,
266 target_vtl: HvInputVtl::CURRENT_VTL,
267 reserved: [0; 3],
268 };
269
270 let mut current_page = range.start_4k_gpn();
271 while current_page < range.end_4k_gpn() {
272 let remaining_pages = range.end_4k_gpn() - current_page;
273 let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64) as usize;
274
275 header
277 .write_to_prefix(Self::input_page().buffer.as_mut_slice())
278 .unwrap();
279
280 let mut input_offset = HEADER_SIZE;
281 for i in 0..count {
282 let page_num = current_page + i as u64;
283 page_num
285 .write_to_prefix(&mut Self::input_page().buffer[input_offset..])
286 .unwrap();
287 input_offset += size_of::<u64>();
288 }
289
290 let output = self.dispatch_hvcall(
291 hvdef::HypercallCode::HvCallModifyVtlProtectionMask,
292 Some(count),
293 );
294
295 output.result()?;
296
297 current_page += count as u64;
298 }
299
300 Ok(())
301 }
302
303 #[cfg(target_arch = "aarch64")]
305 pub fn enable_vp_vtl(&mut self, vp_index: u32) -> Result<(), hvdef::HvError> {
306 let header = hvdef::hypercall::EnableVpVtlArm64 {
307 partition_id: hvdef::HV_PARTITION_ID_SELF,
308 vp_index,
309 target_vtl: Vtl::Vtl2.into(),
312 reserved: [0; 3],
313 vp_vtl_context: zerocopy::FromZeros::new_zeroed(),
314 };
315
316 header
318 .write_to_prefix(Self::input_page().buffer.as_mut_slice())
319 .unwrap();
320
321 let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnableVpVtl, None);
322 match output.result() {
323 Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()),
324 err => err,
325 }
326 }
327
328 #[cfg_attr(target_arch = "aarch64", expect(dead_code))]
331 pub fn accept_vtl2_pages(
332 &mut self,
333 range: MemoryRange,
334 memory_type: hvdef::hypercall::AcceptMemoryType,
335 ) -> Result<(), hvdef::HvError> {
336 const HEADER_SIZE: usize = size_of::<hvdef::hypercall::AcceptGpaPages>();
337 const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::<u64>();
338
339 let mut current_page = range.start_4k_gpn();
340 while current_page < range.end_4k_gpn() {
341 let header = hvdef::hypercall::AcceptGpaPages {
342 partition_id: hvdef::HV_PARTITION_ID_SELF,
343 page_attributes: hvdef::hypercall::AcceptPagesAttributes::new()
344 .with_memory_type(memory_type.0)
345 .with_host_visibility(hvdef::hypercall::HostVisibilityType::PRIVATE) .with_vtl_set(1 << 2), vtl_permission_set: hvdef::hypercall::VtlPermissionSet {
348 vtl_permission_from_1: [0; hvdef::hypercall::HV_VTL_PERMISSION_SET_SIZE],
349 },
350 gpa_page_base: current_page,
351 };
352
353 let remaining_pages = range.end_4k_gpn() - current_page;
354 let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64) as usize;
355
356 header
358 .write_to_prefix(Self::input_page().buffer.as_mut_slice())
359 .unwrap();
360
361 let output =
362 self.dispatch_hvcall(hvdef::HypercallCode::HvCallAcceptGpaPages, Some(count));
363
364 output.result()?;
365
366 current_page += count as u64;
367 }
368
369 Ok(())
370 }
371
372 pub fn get_vp_index_from_hw_id<const N: usize>(
379 &mut self,
380 hw_ids: &[HwId],
381 output: &mut ArrayVec<u32, N>,
382 ) -> Result<(), hvdef::HvError> {
383 let header = hvdef::hypercall::GetVpIndexFromApicId {
384 partition_id: hvdef::HV_PARTITION_ID_SELF,
385 target_vtl: 0,
386 reserved: [0; 7],
387 };
388
389 const MAX_PER_CALL: usize = 512;
391
392 for hw_ids in hw_ids.chunks(MAX_PER_CALL) {
393 header
395 .write_to_prefix(Self::input_page().buffer.as_mut_slice())
396 .unwrap();
397 hw_ids
402 .write_to_prefix(&mut Self::input_page().buffer[header.as_bytes().len()..])
403 .unwrap();
404
405 let r = self.dispatch_hvcall(
408 hvdef::HypercallCode::HvCallGetVpIndexFromApicId,
409 Some(hw_ids.len()),
410 );
411
412 let n = r.elements_processed();
413 output.extend(
414 <[u32]>::ref_from_bytes(&Self::output_page().buffer[..n * 4])
415 .unwrap()
416 .iter()
417 .copied(),
418 );
419 r.result()?;
420 assert_eq!(n, hw_ids.len());
421 }
422
423 Ok(())
424 }
425}
426
427#[cfg(target_arch = "x86_64")]
429impl HvCall {
430 pub fn initialize_tdx(&mut self, tdx_io_page: TdxHypercallPage) {
432 assert!(!self.initialized);
433 self.initialized = true;
434 self.vtl = Vtl::Vtl2;
435 self.tdx_io_page = Some(tdx_io_page);
436
437 let guest_os_id = hvdef::hypercall::HvGuestOsMicrosoft::new().with_os_id(1);
439
440 crate::arch::tdx::initialize_hypercalls(
441 guest_os_id.into(),
442 self.tdx_io_page.as_ref().unwrap(),
443 );
444 }
445
446 pub fn uninitialize_tdx(&mut self) {
448 crate::arch::tdx::uninitialize_hypercalls(
449 self.tdx_io_page
450 .take()
451 .expect("an initialized instance of HvCall on TDX must have an io page"),
452 );
453 }
454
455 pub fn tdx_enable_vp_vtl2(&mut self, vp_index: u32) -> Result<(), hvdef::HvError> {
457 let header = hvdef::hypercall::EnableVpVtlX64 {
458 partition_id: hvdef::HV_PARTITION_ID_SELF,
459 vp_index,
460 target_vtl: Vtl::Vtl2.into(),
463 reserved: [0; 3],
464 vp_vtl_context: zerocopy::FromZeros::new_zeroed(),
466 };
467 assert!(self.initialized);
468
469 let input_page_addr = self
470 .tdx_io_page
471 .as_ref()
472 .expect("an initialized instance of HvCall on TDX must have an io page")
473 .input();
474 let input_page = unsafe { &mut *(input_page_addr as *mut [u8; 4096]) };
478 header
479 .write_to_prefix(input_page)
480 .expect("unable to write to hypercall page");
481
482 let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnableVpVtl, None);
483 match output.result() {
484 Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()),
485 err => err,
486 }
487 }
488
489 pub fn tdx_start_vp(&mut self, vp_index: u32) -> Result<(), hvdef::HvError> {
491 let header = StartVirtualProcessorX64 {
492 partition_id: hvdef::HV_PARTITION_ID_SELF,
493 vp_index,
494 target_vtl: Vtl::Vtl2.into(),
495 rsvd0: 0,
496 rsvd1: 0,
497 vp_context: zerocopy::FromZeros::new_zeroed(),
499 };
500 assert!(self.initialized);
501
502 let input_page_addr = self
503 .tdx_io_page
504 .as_ref()
505 .expect("an initialized instance of HvCall on TDX must have an io page")
506 .input();
507 let input_page = unsafe { &mut *(input_page_addr as *mut [u8; 4096]) };
510 header
511 .write_to_prefix(input_page)
512 .expect("unable to write to hypercall page");
513
514 let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallStartVirtualProcessor, None);
515 match output.result() {
516 Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()),
517 err => err,
518 }
519 }
520}
521
522#[cfg(target_arch = "x86_64")]
525pub type HwId = u32;
526
527#[cfg(target_arch = "aarch64")]
530pub type HwId = u64;