sidecar/arch/x86_64/
mod.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! x86_64-specific sidecar code.
5
6#![cfg(target_arch = "x86_64")]
7// UNSAFETY: Interacting with low level hardware and memory primitives.
8#![expect(unsafe_code)]
9
10mod init;
11mod temporary_map;
12mod vp;
13
14use core::fmt::Write;
15use core::sync::atomic::AtomicBool;
16use core::sync::atomic::Ordering::Acquire;
17use hvdef::HvError;
18use hvdef::HypercallCode;
19use minimal_rt::arch::Serial;
20use minimal_rt::arch::msr::write_msr;
21use x86defs::Exception;
22use zerocopy::FromBytes;
23use zerocopy::IntoBytes;
24
25mod addr_space {
26    use super::VpGlobals;
27    use memory_range::MemoryRange;
28
29    // These must match their use in entry.S.
30    const PTE_SELF: usize = 0;
31    const PTE_HYPERCALL_INPUT: usize = 1;
32    const PTE_HYPERCALL_OUTPUT: usize = 2;
33    const PTE_COMMAND_PAGE: usize = 3;
34    const PTE_GLOBALS: usize = 4;
35    const PTE_ASSIST_PAGE: usize = 5;
36    const PTE_CONTROL_PAGE: usize = 6;
37    const PTE_REGISTER_PAGE: usize = 7;
38    const PTE_TEMPORARY_MAP: usize = 256;
39    const PTE_STACK: usize = PTE_STACK_END - sidecar_defs::STACK_PAGES;
40    const PTE_STACK_END: usize = 512;
41
42    const PAGE_SIZE: u64 = 0x1000;
43
44    unsafe extern "C" {
45        static __ehdr_start: u8;
46    }
47
48    fn pte_data(addr: u64) -> x86defs::Pte {
49        x86defs::Pte::new()
50            .with_address(addr)
51            .with_read_write(true)
52            .with_present(true)
53            .with_no_execute(true)
54    }
55
56    /// Returns the physical address of the globals.
57    pub fn init_ap(
58        pt: &mut [x86defs::Pte; 512],
59        pt_pa: u64,
60        control_page_pa: u64,
61        command_page_pa: u64,
62        reg_page_pa: u64,
63        memory: &mut impl Iterator<Item = u64>,
64    ) -> u64 {
65        pt.fill(x86defs::Pte::new());
66        for i in 0..sidecar_defs::STACK_PAGES {
67            pt[PTE_STACK + i] = pte_data(memory.next().unwrap());
68        }
69        pt[PTE_SELF] = pte_data(pt_pa);
70        pt[PTE_COMMAND_PAGE] = pte_data(command_page_pa);
71        let globals_pa = memory.next().unwrap();
72        pt[PTE_GLOBALS] = pte_data(globals_pa);
73        pt[PTE_ASSIST_PAGE] = pte_data(memory.next().unwrap());
74        pt[PTE_HYPERCALL_INPUT] = pte_data(memory.next().unwrap());
75        pt[PTE_HYPERCALL_OUTPUT] = pte_data(memory.next().unwrap());
76        pt[PTE_CONTROL_PAGE] = pte_data(control_page_pa);
77        pt[PTE_REGISTER_PAGE] = pte_data(reg_page_pa);
78        globals_pa
79    }
80
81    fn base_address() -> usize {
82        core::ptr::addr_of!(__ehdr_start) as usize
83    }
84
85    fn per_vp(page: usize) -> usize {
86        base_address() + 0x200000 + page * PAGE_SIZE as usize
87    }
88
89    fn pte(page: usize) -> *mut x86defs::Pte {
90        (per_vp(PTE_SELF) as *mut x86defs::Pte).wrapping_add(page)
91    }
92
93    pub fn temporary_map() -> usize {
94        per_vp(PTE_TEMPORARY_MAP)
95    }
96
97    pub fn temp_ptes() -> *mut x86defs::Pte {
98        pte(PTE_TEMPORARY_MAP)
99    }
100
101    pub fn stack() -> MemoryRange {
102        MemoryRange::new(per_vp(PTE_STACK) as u64..per_vp(PTE_STACK_END) as u64)
103    }
104
105    pub fn stack_base_pa() -> usize {
106        // SAFETY: the stack PTE is not changing concurrently.
107        unsafe { pte(PTE_STACK).read() }.address() as usize
108    }
109
110    pub fn command_page() -> *mut sidecar_defs::CommandPage {
111        per_vp(PTE_COMMAND_PAGE) as *mut _
112    }
113
114    pub fn globals() -> *mut VpGlobals {
115        (per_vp(PTE_GLOBALS)) as *mut _
116    }
117
118    pub fn assist_page() -> *mut hvdef::HvVpAssistPage {
119        (per_vp(PTE_ASSIST_PAGE)) as *mut _
120    }
121
122    pub fn assist_page_pa() -> u64 {
123        // SAFETY: the assist page PTE is not changing concurrently.
124        unsafe { pte(PTE_ASSIST_PAGE).read() }.address()
125    }
126
127    pub fn register_page() -> *mut hvdef::HvX64RegisterPage {
128        (per_vp(PTE_REGISTER_PAGE)) as *mut _
129    }
130
131    pub fn register_page_pa() -> u64 {
132        // SAFETY: the register page PTE is not changing concurrently.
133        unsafe { pte(PTE_REGISTER_PAGE).read() }.address()
134    }
135
136    pub fn hypercall_input() -> *mut [u8; 4096] {
137        (per_vp(PTE_HYPERCALL_INPUT)) as *mut [u8; 4096]
138    }
139
140    pub fn hypercall_input_pa() -> u64 {
141        // SAFETY: the hypercall input PTE is not changing concurrently.
142        unsafe { pte(PTE_HYPERCALL_INPUT).read() }.address()
143    }
144
145    pub fn hypercall_output() -> *mut [u8; 4096] {
146        (per_vp(PTE_HYPERCALL_OUTPUT)) as *mut [u8; 4096]
147    }
148
149    pub fn hypercall_output_pa() -> u64 {
150        // SAFETY: the hypercall output PTE is not changing concurrently.
151        unsafe { pte(PTE_HYPERCALL_OUTPUT).read() }.address()
152    }
153
154    pub fn control_page() -> *const sidecar_defs::ControlPage {
155        (per_vp(PTE_CONTROL_PAGE)) as *const _
156    }
157}
158
159struct VpGlobals {
160    hv_vp_index: u32,
161    node_cpu_index: u32,
162    overlays_mapped: bool,
163    register_page_mapped: bool,
164}
165
166const _: () = assert!(size_of::<VpGlobals>() <= 0x1000);
167
168static mut VTL_RETURN_OFFSET: u16 = 0;
169static mut VSM_CAPABILITIES: hvdef::HvRegisterVsmCapabilities =
170    hvdef::HvRegisterVsmCapabilities::new();
171static AFTER_INIT: AtomicBool = AtomicBool::new(false);
172static ENABLE_LOG: AtomicBool = AtomicBool::new(false);
173
174macro_rules! log {
175    () => {};
176    ($($arg:tt)*) => {
177        if $crate::arch::x86_64::ENABLE_LOG.load(core::sync::atomic::Ordering::Relaxed) {
178            $crate::arch::x86_64::log_fmt(format_args!($($arg)*));
179        }
180    };
181}
182use core::mem::size_of;
183use hvdef::HvRegisterName;
184use hvdef::HvRegisterValue;
185use hvdef::hypercall::HvInputVtl;
186pub(crate) use log;
187use minimal_rt::arch::InstrIoAccess;
188
189fn log_fmt(args: core::fmt::Arguments<'_>) {
190    if ENABLE_LOG.load(Acquire) {
191        if AFTER_INIT.load(Acquire) {
192            // SAFETY: `hv_vp_index` is not being concurrently modified.
193            // TODO: improve how per-VP globals work.
194            let vp_index = unsafe { &*addr_space::globals() }.hv_vp_index;
195            let _ = writeln!(Serial::new(InstrIoAccess), "sidecar#{vp_index}: {}", args);
196        } else {
197            let _ = writeln!(Serial::new(InstrIoAccess), "sidecar: {}", args);
198        }
199    }
200}
201
202#[cfg_attr(minimal_rt, panic_handler)]
203#[cfg_attr(not(minimal_rt), expect(dead_code))]
204fn panic(panic: &core::panic::PanicInfo<'_>) -> ! {
205    let stack_va_to_pa = |ptr| {
206        addr_space::stack()
207            .offset_of(ptr as u64)
208            .map(|offset| addr_space::stack_base_pa() + offset as usize)
209    };
210    minimal_rt::enlightened_panic::report(*b"SIDECARK", panic, stack_va_to_pa);
211    if !AFTER_INIT.load(Acquire) {
212        let _ = writeln!(Serial::new(InstrIoAccess), "{panic}");
213    }
214    minimal_rt::arch::fault();
215}
216
217struct CommandErrorWriter<'a>(&'a mut sidecar_defs::CommandError);
218
219impl Write for CommandErrorWriter<'_> {
220    fn write_str(&mut self, s: &str) -> core::fmt::Result {
221        let s = s.as_bytes();
222        let buf = &mut self.0.buf[self.0.len as usize..];
223        let n = buf.len().min(s.len());
224        buf[..n].copy_from_slice(&s[..n]);
225        self.0.len += n as u8;
226        Ok(())
227    }
228}
229
230fn hypercall(code: HypercallCode, rep_count: usize) -> Result<(), HvError> {
231    let control = hvdef::hypercall::Control::new()
232        .with_code(code.0)
233        .with_rep_count(rep_count);
234
235    // SAFETY: the caller guarantees the safety of the hypercall, including that
236    // the input and output pages are not concurrently accessed.
237    unsafe {
238        minimal_rt::arch::hypercall::invoke_hypercall(
239            control,
240            addr_space::hypercall_input_pa(),
241            addr_space::hypercall_output_pa(),
242        )
243        .result()
244    }
245}
246
247fn get_hv_vp_register(
248    target_vtl: HvInputVtl,
249    name: HvRegisterName,
250) -> Result<HvRegisterValue, HvError> {
251    {
252        // SAFETY: the input page is not concurrently accessed.
253        let input = unsafe { &mut *addr_space::hypercall_input() };
254
255        hvdef::hypercall::GetSetVpRegisters {
256            partition_id: hvdef::HV_PARTITION_ID_SELF,
257            vp_index: hvdef::HV_VP_INDEX_SELF,
258            target_vtl,
259            rsvd: [0; 3],
260        }
261        .write_to_prefix(input)
262        .unwrap();
263
264        name.write_to_prefix(&mut input[size_of::<hvdef::hypercall::GetSetVpRegisters>()..])
265            .unwrap();
266    }
267
268    hypercall(HypercallCode::HvCallGetVpRegisters, 1)?;
269    // SAFETY: the output is not concurrently accessed.
270    let output = unsafe { &*addr_space::hypercall_output() };
271    Ok(HvRegisterValue::read_from_prefix(output).unwrap().0) // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
272}
273
274fn set_hv_vp_register(
275    target_vtl: HvInputVtl,
276    name: HvRegisterName,
277    value: HvRegisterValue,
278) -> Result<(), HvError> {
279    {
280        // SAFETY: the input page is not concurrently accessed.
281        let input = unsafe { &mut *addr_space::hypercall_input() };
282
283        hvdef::hypercall::GetSetVpRegisters {
284            partition_id: hvdef::HV_PARTITION_ID_SELF,
285            vp_index: hvdef::HV_VP_INDEX_SELF,
286            target_vtl,
287            rsvd: [0; 3],
288        }
289        .write_to_prefix(input)
290        .unwrap();
291
292        hvdef::hypercall::HvRegisterAssoc {
293            name,
294            pad: [0; 3],
295            value,
296        }
297        .write_to_prefix(&mut input[size_of::<hvdef::hypercall::GetSetVpRegisters>()..])
298        .unwrap();
299    }
300
301    hypercall(HypercallCode::HvCallSetVpRegisters, 1)?;
302    Ok(())
303}
304
305fn eoi() {
306    // SAFETY: no safety requirements for EOI.
307    unsafe {
308        write_msr(x86defs::apic::ApicRegister::EOI.x2apic_msr(), 0);
309    }
310}
311
312#[cfg_attr(not(minimal_rt), expect(dead_code))]
313extern "C" fn irq_handler() {
314    eoi();
315    log!("irq");
316}
317
318#[cfg_attr(not(minimal_rt), expect(dead_code))]
319extern "C" fn exception_handler(exception: Exception, rsp: u64) -> ! {
320    // SAFETY: reading cr2 has no safety requirements.
321    let cr2 = unsafe {
322        let cr2: u64;
323        core::arch::asm!("mov {}, cr2", out(reg) cr2);
324        cr2
325    };
326    panic!("unexpected exception {exception:?} cr2 = {cr2:#x} rsp = {rsp:#x}");
327}
328
329#[cfg(minimal_rt)]
330core::arch::global_asm! {
331    include_str!("entry.S"),
332    start = sym init::start,
333    relocate = sym minimal_rt::reloc::relocate,
334    irq_handler = sym irq_handler,
335    exception_handler = sym exception_handler,
336}