sidecar/arch/x86_64/
vp.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.

//! Sidecar code that runs on the APs, after initialization. This code all runs
//! with per-AP page tables, concurrently with the main kernel.

use super::CommandErrorWriter;
use super::VSM_CAPABILITIES;
use super::VTL_RETURN_OFFSET;
use super::VpGlobals;
use super::addr_space;
use super::get_hv_vp_register;
use super::hypercall;
use super::log;
use super::set_hv_vp_register;
use core::fmt::Write;
use core::mem::size_of;
use core::ptr::addr_of;
use core::sync::atomic::AtomicU8;
use core::sync::atomic::Ordering::Acquire;
use core::sync::atomic::Ordering::Relaxed;
use core::sync::atomic::Ordering::Release;
use hvdef::HV_PAGE_SHIFT;
use hvdef::HV_PARTITION_ID_SELF;
use hvdef::HV_VP_INDEX_SELF;
use hvdef::HvStatus;
use hvdef::HvVtlEntryReason;
use hvdef::HvX64RegisterName;
use hvdef::HypercallCode;
use hvdef::hypercall::HvInputVtl;
use hvdef::hypercall::HvRegisterAssoc;
use hvdef::hypercall::TranslateVirtualAddressX64;
use minimal_rt::arch::hypercall::HYPERCALL_PAGE;
use minimal_rt::arch::msr::read_msr;
use minimal_rt::arch::msr::write_msr;
use sidecar_defs::CommandPage;
use sidecar_defs::ControlPage;
use sidecar_defs::CpuContextX64;
use sidecar_defs::CpuStatus;
use sidecar_defs::GetSetVpRegisterRequest;
use sidecar_defs::RunVpResponse;
use sidecar_defs::SidecarCommand;
use sidecar_defs::TranslateGvaRequest;
use sidecar_defs::TranslateGvaResponse;
use x86defs::apic::ApicBase;
use zerocopy::FromBytes;
use zerocopy::FromZeros;
use zerocopy::IntoBytes;

/// Entry point for an AP. Called with per-VP state (page tables, stack,
/// globals) already initialized, and IDT and GDT set appropriately.
///
/// # Safety
/// Must be called as an AP entry point.
pub unsafe fn ap_entry() -> ! {
    // SAFETY: the globals are only accessed by this CPU, and so there are no
    // concurrent accesses.
    let globals = unsafe { &mut *addr_space::globals() };

    // Set fs base to point to the CPU context, for use in `run_vp`.
    //
    // SAFETY: just getting the address.
    let fs_base = unsafe { addr_of!((*addr_space::command_page()).cpu_context) as u64 };
    // SAFETY: no safety requirements.
    unsafe { write_msr(x86defs::X64_MSR_FS_BASE, fs_base) };

    // Enable the X2APIC.
    //
    // SAFETY: no safety requirements.
    let apic_base = ApicBase::from(unsafe { read_msr(x86defs::X86X_MSR_APIC_BASE) });
    // SAFETY: no safety requirements.
    unsafe {
        write_msr(
            x86defs::X86X_MSR_APIC_BASE,
            apic_base.with_enable(true).with_x2apic(true).into(),
        )
    }

    // Software enable the APIC.
    //
    // SAFETY: the IDT is initialized appropriately.
    unsafe {
        write_msr(
            x86defs::apic::ApicRegister::SVR.x2apic_msr(),
            u32::from(x86defs::apic::Svr::new().with_enable(true).with_vector(!0)).into(),
        )
    }

    // Notify the BSP that we are ready.
    let old_state = globals.cpu_status().swap(CpuStatus::IDLE.0, Release);
    assert_eq!(old_state, CpuStatus::RUN.0);

    // Run the AP command dispatch loop until we receive a remove request.
    ap_run(globals);

    log!("removing");

    // Disable the VP assist page.
    //
    // SAFETY: no safety requirements.
    unsafe { write_msr(hvdef::HV_X64_MSR_VP_ASSIST_PAGE, 0) };

    // Disable the register page.
    if globals.register_page_mapped {
        set_hv_vp_register(
            HvInputVtl::CURRENT_VTL,
            HvX64RegisterName::RegisterPage.into(),
            0u64.into(),
        )
        .unwrap();
    }

    // Software disable the APIC. Leave the hardware enabled so that we can send
    // the response IPI.
    log!("disabling apic");
    // SAFETY: no safety requirements.
    unsafe {
        write_msr(
            x86defs::apic::ApicRegister::SVR.x2apic_msr(),
            u32::from(x86defs::apic::Svr::new().with_enable(false).with_vector(!0)).into(),
        );
    }
    globals.cpu_status().store(CpuStatus::REMOVED.0, Release);
    raise_attention();
    park_until(|| None)
}

fn map_overlays(globals: &mut VpGlobals) {
    // Enable the VP assist page.
    //
    // SAFETY: the VP assist page is reserved for this use and will not alias
    // with other Rust memory.
    unsafe {
        write_msr(
            hvdef::HV_X64_MSR_VP_ASSIST_PAGE,
            hvdef::HvRegisterVpAssistPage::new()
                .with_enabled(true)
                .with_gpa_page_number(addr_space::assist_page_pa() >> HV_PAGE_SHIFT)
                .into(),
        );
    }

    // Map the register page. We don't currently use it directly, but it is
    // provided to the VMM.
    match set_hv_vp_register(
        HvInputVtl::CURRENT_VTL,
        HvX64RegisterName::RegisterPage.into(),
        u64::from(
            hvdef::HvSynicSimpSiefp::new()
                .with_base_gpn(globals.reg_page_pa >> HV_PAGE_SHIFT)
                .with_enabled(true),
        )
        .into(),
    ) {
        Ok(()) => globals.register_page_mapped = true,
        Err(err) => {
            // This may be an expected condition if the hypervisor does not support
            // the register page for VTL2.
            log!("failed to map register page: {err}");
        }
    }
}

/// Runs the command dispatch loop for an AP until a remove request is received.
fn ap_run(globals: &mut VpGlobals) {
    let cpu_status = globals.cpu_status();

    loop {
        // Wait for a run request.
        let status = park_until(|| {
            let status = CpuStatus(cpu_status.load(Acquire));
            (status != CpuStatus::IDLE).then_some(status)
        });
        match status {
            CpuStatus::RUN | CpuStatus::STOP => {
                // Still run the request if a stop is requested, since there
                // is no generic way to report that the request was
                // cancelled before it ran.
            }
            CpuStatus::REMOVE => return,
            status => panic!("unexpected cpu request {status:?}"),
        }

        // Dispatch on the command page.
        {
            // SAFETY: we now have exclusive access to the state.
            let command_page = unsafe { &mut *addr_space::command_page() };
            command_page.has_error = 0;
            let command = core::mem::replace(&mut command_page.command, SidecarCommand::NONE);
            log!("request {command:?}");
            match command {
                SidecarCommand::NONE => {}
                SidecarCommand::RUN_VP => run_vp(globals, command_page, cpu_status),
                SidecarCommand::GET_VP_REGISTERS => get_vp_registers(command_page),
                SidecarCommand::SET_VP_REGISTERS => set_vp_registers(command_page),
                SidecarCommand::TRANSLATE_GVA => translate_gva(command_page),
                command => set_error(command_page, format_args!("unknown command {command:?}")),
            }
        };

        log!("request done");
        cpu_status.store(CpuStatus::IDLE.0, Release);
        raise_attention();
    }
}

fn control() -> &'static ControlPage {
    // SAFETY: all mutable fields of the control page have interior mutability,
    // so this is a valid dereference.
    unsafe { &*addr_space::control_page() }
}

impl VpGlobals {
    fn cpu_status(&self) -> &'static AtomicU8 {
        &control().cpu_status[self.node_cpu_index as usize]
    }
}

fn set_error(command_page: &mut CommandPage, err: impl core::fmt::Display) {
    command_page.has_error = 1;
    command_page.error.len = 0;
    let mut writer = CommandErrorWriter(&mut command_page.error);
    let _ = write!(writer, "{err}");
}

fn run_vp(globals: &mut VpGlobals, command_page: &mut CommandPage, cpu_status: &AtomicU8) {
    // Map the register page and VP assist page now.
    //
    // The hypervisor has a concurrency bug if the pages are mapped while other
    // VPs are starting up, so work around this by delaying it until now.
    //
    // The VP assist page is only needed in this path. The register page is
    // technically used by the user-mode VMM earlier, but the hypervisor doesn't
    // mark it valid until the first time the VP is run anyway.
    if !globals.overlays_mapped {
        map_overlays(globals);
        globals.overlays_mapped = true;
    }

    let mut intercept = false;
    while cpu_status.load(Relaxed) != CpuStatus::STOP.0 {
        match run_vp_once(command_page) {
            Ok(true) => {
                intercept = true;
                break;
            }
            Ok(false) => {}
            Err(()) => return,
        }
    }

    RunVpResponse {
        intercept: intercept as u8,
    }
    .write_to_prefix(command_page.request_data.as_mut_bytes())
    .unwrap(); // PANIC: will not panic, since sizeof(RunVpResponse) is 1, whereas the buffer is statically declared as 16 bytes long.
}

fn run_vp_once(command_page: &mut CommandPage) -> Result<bool, ()> {
    let cpu_context = &mut command_page.cpu_context;
    // Write rax and rcx to the VP assist page.
    //
    // SAFETY: the assist page is not concurrently modified.
    unsafe {
        (*addr_space::assist_page()).vtl_control.registers = [
            cpu_context.gps[CpuContextX64::RAX],
            cpu_context.gps[CpuContextX64::RCX],
        ];
    }
    // Dispatch the VP.
    //
    // SAFETY: no safety requirements for this hypercall.
    unsafe {
        core::arch::asm! {
            "push rbp",
            "push rbx",
            "mov rbp, fs:[0x28]",
            "mov rbx, fs:[0x18]",
            "call rax",
            "mov fs:[0x18], rbx",
            "mov fs:[0x28], rbp",
            "mov rbx, cr2",
            "mov fs:[0x20], rbx",
            "pop rbx",
            "pop rbp",
            "fxsave fs:[0x80]",
            in("rax") addr_of!(HYPERCALL_PAGE) as usize + *addr_of!(VTL_RETURN_OFFSET) as usize,
            lateout("rax") cpu_context.gps[CpuContextX64::RAX],
            inout("rcx") 0u64 => cpu_context.gps[CpuContextX64::RCX], // normal return
            inout("rdx") cpu_context.gps[CpuContextX64::RDX],
            inout("rsi") cpu_context.gps[CpuContextX64::RSI],
            inout("rdi") cpu_context.gps[CpuContextX64::RDI],
            inout("r8") cpu_context.gps[CpuContextX64::R8],
            inout("r9") cpu_context.gps[CpuContextX64::R9],
            inout("r10") cpu_context.gps[CpuContextX64::R10],
            inout("r11") cpu_context.gps[CpuContextX64::R11],
            inout("r12") cpu_context.gps[CpuContextX64::R12],
            inout("r13") cpu_context.gps[CpuContextX64::R13],
            inout("r14") cpu_context.gps[CpuContextX64::R14],
            inout("r15") cpu_context.gps[CpuContextX64::R15],
        }
    }
    // SAFETY: the assist page is not concurrently modified.
    let entry_reason = unsafe { (*addr_space::assist_page()).vtl_control.entry_reason };
    match entry_reason {
        HvVtlEntryReason::INTERRUPT => Ok(false),
        HvVtlEntryReason::INTERCEPT => {
            // SAFETY: the assist page is not concurrently modified.
            let intercept_message =
                unsafe { &*addr_of!((*addr_space::assist_page()).intercept_message) };
            command_page.intercept_message = *intercept_message;
            Ok(true)
        }
        entry_reason => {
            set_error(
                command_page,
                format_args!("unexpected entry reason {entry_reason:?}"),
            );
            Err(())
        }
    }
}

fn shared_msr(name: HvX64RegisterName) -> Option<u32> {
    let msr = match name {
        HvX64RegisterName::MsrMtrrDefType => x86defs::X86X_MSR_MTRR_DEF_TYPE,
        HvX64RegisterName::MsrMtrrFix64k00000 => x86defs::X86X_MSR_MTRR_FIX64K_00000,
        HvX64RegisterName::MsrMtrrFix16k80000 => x86defs::X86X_MSR_MTRR_FIX16K_80000,
        HvX64RegisterName::MsrMtrrFix16kA0000 => x86defs::X86X_MSR_MTRR_FIX16K_A0000,
        HvX64RegisterName::MsrMtrrFix4kC0000 => x86defs::X86X_MSR_MTRR_FIX4K_C0000,
        HvX64RegisterName::MsrMtrrFix4kC8000 => x86defs::X86X_MSR_MTRR_FIX4K_C8000,
        HvX64RegisterName::MsrMtrrFix4kD0000 => x86defs::X86X_MSR_MTRR_FIX4K_D0000,
        HvX64RegisterName::MsrMtrrFix4kD8000 => x86defs::X86X_MSR_MTRR_FIX4K_D8000,
        HvX64RegisterName::MsrMtrrFix4kE0000 => x86defs::X86X_MSR_MTRR_FIX4K_E0000,
        HvX64RegisterName::MsrMtrrFix4kE8000 => x86defs::X86X_MSR_MTRR_FIX4K_E8000,
        HvX64RegisterName::MsrMtrrFix4kF0000 => x86defs::X86X_MSR_MTRR_FIX4K_F0000,
        HvX64RegisterName::MsrMtrrFix4kF8000 => x86defs::X86X_MSR_MTRR_FIX4K_F8000,
        HvX64RegisterName::MsrMtrrPhysBase0 => x86defs::X86X_MSR_MTRR_PHYSBASE0,
        HvX64RegisterName::MsrMtrrPhysMask0 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 1,
        HvX64RegisterName::MsrMtrrPhysBase1 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 2,
        HvX64RegisterName::MsrMtrrPhysMask1 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 3,
        HvX64RegisterName::MsrMtrrPhysBase2 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 4,
        HvX64RegisterName::MsrMtrrPhysMask2 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 5,
        HvX64RegisterName::MsrMtrrPhysBase3 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 6,
        HvX64RegisterName::MsrMtrrPhysMask3 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 7,
        HvX64RegisterName::MsrMtrrPhysBase4 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 8,
        HvX64RegisterName::MsrMtrrPhysMask4 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 9,
        HvX64RegisterName::MsrMtrrPhysBase5 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 10,
        HvX64RegisterName::MsrMtrrPhysMask5 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 11,
        HvX64RegisterName::MsrMtrrPhysBase6 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 12,
        HvX64RegisterName::MsrMtrrPhysMask6 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 13,
        HvX64RegisterName::MsrMtrrPhysBase7 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 14,
        HvX64RegisterName::MsrMtrrPhysMask7 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 15,
        _ => return None,
    };
    Some(msr)
}

fn set_debug_register(name: HvX64RegisterName, value: u64) -> bool {
    // SAFETY: debug registers are unused by sidecar.
    unsafe {
        match name {
            HvX64RegisterName::Dr0 => core::arch::asm!("mov dr0, {}", in(reg) value),
            HvX64RegisterName::Dr1 => core::arch::asm!("mov dr1, {}", in(reg) value),
            HvX64RegisterName::Dr2 => core::arch::asm!("mov dr2, {}", in(reg) value),
            HvX64RegisterName::Dr3 => core::arch::asm!("mov dr3, {}", in(reg) value),
            HvX64RegisterName::Dr6 if (&raw const VSM_CAPABILITIES).read().dr6_shared() => {
                core::arch::asm!("mov dr6, {}", in(reg) value)
            }
            _ => return false,
        }
    }

    true
}

fn get_debug_register(name: HvX64RegisterName) -> Option<u64> {
    let v: u64;
    // SAFETY: debug registers are unused by sidecar.
    unsafe {
        match name {
            HvX64RegisterName::Dr0 => core::arch::asm!("mov {}, dr0", lateout(reg) v),
            HvX64RegisterName::Dr1 => core::arch::asm!("mov {}, dr1", lateout(reg) v),
            HvX64RegisterName::Dr2 => core::arch::asm!("mov {}, dr2", lateout(reg) v),
            HvX64RegisterName::Dr3 => core::arch::asm!("mov {}, dr3", lateout(reg) v),
            HvX64RegisterName::Dr6 if (&raw const VSM_CAPABILITIES).read().dr6_shared() => {
                core::arch::asm!("mov {}, dr6", lateout(reg) v)
            }
            _ => return None,
        }
    }
    Some(v)
}

fn get_vp_registers(command_page: &mut CommandPage) {
    let (request, regs) = command_page
        .request_data
        .as_mut_bytes()
        .split_at_mut(size_of::<GetSetVpRegisterRequest>());
    let &mut GetSetVpRegisterRequest {
        count,
        target_vtl,
        rsvd: _,
        ref mut status,
        rsvd2: _,
        regs: [],
    } = FromBytes::mut_from_bytes(request).unwrap();

    let Ok((regs, _)) = <[HvRegisterAssoc]>::mut_from_prefix_with_elems(regs, count.into()) else {
        // TODO: zerocopy: err (https://github.com/microsoft/openvmm/issues/759)
        set_error(
            command_page,
            format_args!("invalid register name count: {count}"),
        );
        return;
    };

    *status = HvStatus::SUCCESS;
    for &mut HvRegisterAssoc {
        name,
        pad: _,
        ref mut value,
    } in regs
    {
        let r = if let Some(msr) = shared_msr(name.into()) {
            // SAFETY: the shared MSRs are not used by this kernel, so they cannot
            // affect this kernel's functioning.
            Ok(unsafe { read_msr(msr).into() })
        } else if let Some(value) = get_debug_register(name.into()) {
            Ok(value.into())
        } else {
            // FUTURE: consider batching these hypercalls if this becomes a bottleneck.
            get_hv_vp_register(target_vtl, name)
        };

        match r {
            Ok(v) => *value = v,
            Err(err) => {
                *status = Err(err).into();
                break;
            }
        };
    }
}

fn set_vp_registers(command_page: &mut CommandPage) {
    let (request, regs) = command_page
        .request_data
        .as_mut_bytes()
        .split_at_mut(size_of::<GetSetVpRegisterRequest>());
    let &mut GetSetVpRegisterRequest {
        count,
        target_vtl,
        rsvd: _,
        ref mut status,
        rsvd2: _,
        regs: [],
    } = FromBytes::mut_from_bytes(request).unwrap();

    let Ok((assoc, _)) = <[HvRegisterAssoc]>::ref_from_prefix_with_elems(regs, count.into()) else {
        // TODO: zerocopy: err (https://github.com/microsoft/openvmm/issues/759)
        set_error(
            command_page,
            format_args!("invalid register count: {count}"),
        );
        return;
    };

    *status = HvStatus::SUCCESS;
    for &HvRegisterAssoc {
        name,
        value,
        pad: _,
    } in assoc
    {
        let r = if let Some(msr) = shared_msr(name.into()) {
            // SAFETY: the shared MSRs are not used by this kernel, so they cannot
            // affect this kernel's functioning.
            unsafe { write_msr(msr, value.as_u64()) }
            Ok(())
        } else if set_debug_register(name.into(), value.as_u64()) {
            Ok(())
        } else {
            // FUTURE: consider batching these hypercalls if this becomes a bottleneck.
            set_hv_vp_register(target_vtl, name, value)
        };

        if r.is_err() {
            *status = r.into();
            break;
        }
    }
}

fn translate_gva(command_page: &mut CommandPage) {
    let TranslateGvaRequest { gvn, control_flags } =
        FromBytes::read_from_prefix(command_page.request_data.as_bytes())
            .unwrap()
            .0; // TODO: zerocopy: use-rest-of-range, zerocopy: err (https://github.com/microsoft/openvmm/issues/759)
    {
        // SAFETY: the input page is not concurrently accessed.
        let input = unsafe { &mut *addr_space::hypercall_input() };

        TranslateVirtualAddressX64 {
            partition_id: HV_PARTITION_ID_SELF,
            vp_index: HV_VP_INDEX_SELF,
            reserved: 0,
            control_flags,
            gva_page: gvn,
        }
        .write_to_prefix(input)
        .unwrap();
    }

    let result = hypercall(HypercallCode::HvCallTranslateVirtualAddressEx, 0);
    let output = if result.is_ok() {
        // SAFETY: the output is not concurrently accessed
        let output = unsafe { &*addr_space::hypercall_output() };
        FromBytes::read_from_prefix(output).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
    } else {
        FromZeros::new_zeroed()
    };

    TranslateGvaResponse {
        status: result.into(),
        rsvd: [0; 7],
        output,
    }
    .write_to_prefix(command_page.request_data.as_mut_bytes())
    .unwrap();
}

fn raise_attention() {
    let control = control();
    control.needs_attention.store(1, Release);
    let vector = control.response_vector.load(Relaxed);
    if vector != 0 {
        log!("ipi vector {vector}");
        // SAFETY: no safety requirements.
        unsafe {
            write_msr(
                x86defs::apic::ApicRegister::ICR0.x2apic_msr(),
                x86defs::apic::Icr::new()
                    .with_x2apic_mda(control.response_cpu.load(Relaxed))
                    .with_vector(vector as u8)
                    .into(),
            );
        }
    }
}

fn park_until<F: FnMut() -> Option<R>, R>(mut f: F) -> R {
    loop {
        if let Some(r) = f() {
            break r;
        } else {
            // Enable interrupts and halt the processor. Disable interrupts
            // after waking up.
            //
            // SAFETY: no safety requirements.
            unsafe {
                core::arch::asm!("sti; hlt; cli");
            }
        }
    }
}