sidecar/arch/x86_64/
vp.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Sidecar code that runs on the APs, after initialization. This code all runs
5//! with per-AP page tables, concurrently with the main kernel.
6
7use super::CommandErrorWriter;
8use super::VSM_CAPABILITIES;
9use super::VTL_RETURN_OFFSET;
10use super::VpGlobals;
11use super::addr_space;
12use super::get_hv_vp_register;
13use super::hypercall;
14use super::log;
15use super::set_hv_vp_register;
16use core::fmt::Write;
17use core::mem::size_of;
18use core::ptr::addr_of;
19use core::sync::atomic::AtomicU8;
20use core::sync::atomic::Ordering::Acquire;
21use core::sync::atomic::Ordering::Relaxed;
22use core::sync::atomic::Ordering::Release;
23use hvdef::HV_PAGE_SHIFT;
24use hvdef::HV_PARTITION_ID_SELF;
25use hvdef::HV_VP_INDEX_SELF;
26use hvdef::HvStatus;
27use hvdef::HvVtlEntryReason;
28use hvdef::HvX64RegisterName;
29use hvdef::HypercallCode;
30use hvdef::hypercall::HvInputVtl;
31use hvdef::hypercall::HvRegisterAssoc;
32use hvdef::hypercall::TranslateVirtualAddressX64;
33use minimal_rt::arch::hypercall::HYPERCALL_PAGE;
34use minimal_rt::arch::msr::read_msr;
35use minimal_rt::arch::msr::write_msr;
36use sidecar_defs::CommandPage;
37use sidecar_defs::ControlPage;
38use sidecar_defs::CpuContextX64;
39use sidecar_defs::CpuStatus;
40use sidecar_defs::GetSetVpRegisterRequest;
41use sidecar_defs::PAGE_SIZE;
42use sidecar_defs::RunVpResponse;
43use sidecar_defs::SidecarCommand;
44use sidecar_defs::TranslateGvaRequest;
45use sidecar_defs::TranslateGvaResponse;
46use x86defs::apic::ApicBase;
47use zerocopy::FromBytes;
48use zerocopy::FromZeros;
49use zerocopy::IntoBytes;
50
51/// Entry point for an AP. Called with per-VP state (page tables, stack,
52/// globals) already initialized, and IDT and GDT set appropriately.
53///
54/// # Safety
55/// Must be called as an AP entry point.
56pub unsafe fn ap_entry() -> ! {
57    // SAFETY: the globals are only accessed by this CPU, and so there are no
58    // concurrent accesses.
59    let globals = unsafe { &mut *addr_space::globals() };
60
61    // Set fs base to point to the CPU context, for use in `run_vp`.
62    //
63    // SAFETY: just getting the address.
64    let fs_base = unsafe { addr_of!((*addr_space::command_page()).cpu_context) as u64 };
65    // SAFETY: no safety requirements.
66    unsafe { write_msr(x86defs::X64_MSR_FS_BASE, fs_base) };
67
68    // Enable the X2APIC.
69    //
70    // SAFETY: no safety requirements.
71    let apic_base = ApicBase::from(unsafe { read_msr(x86defs::X86X_MSR_APIC_BASE) });
72    // SAFETY: no safety requirements.
73    unsafe {
74        write_msr(
75            x86defs::X86X_MSR_APIC_BASE,
76            apic_base.with_enable(true).with_x2apic(true).into(),
77        )
78    }
79
80    // Software enable the APIC.
81    //
82    // SAFETY: the IDT is initialized appropriately.
83    unsafe {
84        write_msr(
85            x86defs::apic::ApicRegister::SVR.x2apic_msr(),
86            u32::from(x86defs::apic::Svr::new().with_enable(true).with_vector(!0)).into(),
87        )
88    }
89
90    // Zero the register page, since it has not yet been mapped (and may never
91    // be mapped if the hypervisor does not support it).
92    //
93    // Note that it is not safe to access the register page after this point,
94    // since it is owned by the VMM.
95    //
96    // SAFETY: we are still booting, so the VMM is not using this yet.
97    unsafe {
98        (*addr_space::register_page().cast::<[u8; PAGE_SIZE]>()).fill(0);
99    }
100
101    // Zero the command page, too, so that it doesn't leak data to the VMM from
102    // a previous boot.
103    //
104    // SAFETY: we are still booting, so the VMM is not using this yet.
105    unsafe {
106        (*addr_space::command_page().cast::<[u8; PAGE_SIZE]>()).fill(0);
107    }
108
109    // Notify the BSP that we are ready.
110    let old_state = globals.cpu_status().swap(CpuStatus::IDLE.0, Release);
111    assert_eq!(old_state, CpuStatus::RUN.0);
112
113    // Run the AP command dispatch loop until we receive a remove request.
114    ap_run(globals);
115
116    log!("removing");
117
118    // Disable the VP assist page.
119    //
120    // SAFETY: no safety requirements.
121    unsafe { write_msr(hvdef::HV_X64_MSR_VP_ASSIST_PAGE, 0) };
122
123    // Disable the register page.
124    if globals.register_page_mapped {
125        set_hv_vp_register(
126            HvInputVtl::CURRENT_VTL,
127            HvX64RegisterName::RegisterPage.into(),
128            0u64.into(),
129        )
130        .unwrap();
131    }
132
133    // Software disable the APIC. Leave the hardware enabled so that we can send
134    // the response IPI.
135    log!("disabling apic");
136    // SAFETY: no safety requirements.
137    unsafe {
138        write_msr(
139            x86defs::apic::ApicRegister::SVR.x2apic_msr(),
140            u32::from(x86defs::apic::Svr::new().with_enable(false).with_vector(!0)).into(),
141        );
142    }
143    globals.cpu_status().store(CpuStatus::REMOVED.0, Release);
144    raise_attention();
145    park_until(|| None)
146}
147
148fn map_overlays(globals: &mut VpGlobals) {
149    // Enable the VP assist page.
150    //
151    // SAFETY: the VP assist page is reserved for this use and will not alias
152    // with other Rust memory.
153    unsafe {
154        write_msr(
155            hvdef::HV_X64_MSR_VP_ASSIST_PAGE,
156            hvdef::HvRegisterVpAssistPage::new()
157                .with_enabled(true)
158                .with_gpa_page_number(addr_space::assist_page_pa() >> HV_PAGE_SHIFT)
159                .into(),
160        );
161    }
162
163    // Map the register page. We don't currently use it directly, but it is
164    // provided to the VMM.
165    match set_hv_vp_register(
166        HvInputVtl::CURRENT_VTL,
167        HvX64RegisterName::RegisterPage.into(),
168        u64::from(
169            hvdef::HvSynicSimpSiefp::new()
170                .with_base_gpn(addr_space::register_page_pa() >> HV_PAGE_SHIFT)
171                .with_enabled(true),
172        )
173        .into(),
174    ) {
175        Ok(()) => globals.register_page_mapped = true,
176        Err(err) => {
177            // This may be an expected condition if the hypervisor does not support
178            // the register page for VTL2.
179            log!("failed to map register page: {err}");
180        }
181    }
182}
183
184/// Runs the command dispatch loop for an AP until a remove request is received.
185fn ap_run(globals: &mut VpGlobals) {
186    let cpu_status = globals.cpu_status();
187
188    loop {
189        // Wait for a run request.
190        let status = park_until(|| {
191            let status = CpuStatus(cpu_status.load(Acquire));
192            (status != CpuStatus::IDLE).then_some(status)
193        });
194        match status {
195            CpuStatus::RUN | CpuStatus::STOP => {
196                // Still run the request if a stop is requested, since there
197                // is no generic way to report that the request was
198                // cancelled before it ran.
199            }
200            CpuStatus::REMOVE => return,
201            status => panic!("unexpected cpu request {status:?}"),
202        }
203
204        // Dispatch on the command page.
205        {
206            // SAFETY: we now have exclusive access to the state.
207            let command_page = unsafe { &mut *addr_space::command_page() };
208            command_page.has_error = 0;
209            let command = core::mem::replace(&mut command_page.command, SidecarCommand::NONE);
210            log!("request {command:?}");
211            match command {
212                SidecarCommand::NONE => {}
213                SidecarCommand::RUN_VP => run_vp(globals, command_page, cpu_status),
214                SidecarCommand::GET_VP_REGISTERS => get_vp_registers(command_page),
215                SidecarCommand::SET_VP_REGISTERS => set_vp_registers(command_page),
216                SidecarCommand::TRANSLATE_GVA => translate_gva(command_page),
217                command => set_error(command_page, format_args!("unknown command {command:?}")),
218            }
219        };
220
221        log!("request done");
222        cpu_status.store(CpuStatus::IDLE.0, Release);
223        raise_attention();
224    }
225}
226
227fn control() -> &'static ControlPage {
228    // SAFETY: all mutable fields of the control page have interior mutability,
229    // so this is a valid dereference.
230    unsafe { &*addr_space::control_page() }
231}
232
233impl VpGlobals {
234    fn cpu_status(&self) -> &'static AtomicU8 {
235        &control().cpu_status[self.node_cpu_index as usize]
236    }
237}
238
239fn set_error(command_page: &mut CommandPage, err: impl core::fmt::Display) {
240    command_page.has_error = 1;
241    command_page.error.len = 0;
242    let mut writer = CommandErrorWriter(&mut command_page.error);
243    let _ = write!(writer, "{err}");
244}
245
246fn run_vp(globals: &mut VpGlobals, command_page: &mut CommandPage, cpu_status: &AtomicU8) {
247    // Map the register page and VP assist page now.
248    //
249    // The hypervisor has a concurrency bug if the pages are mapped while other
250    // VPs are starting up, so work around this by delaying it until now.
251    //
252    // The VP assist page is only needed in this path. The register page is
253    // technically used by the user-mode VMM earlier, but the hypervisor doesn't
254    // mark it valid until the first time the VP is run anyway.
255    if !globals.overlays_mapped {
256        map_overlays(globals);
257        globals.overlays_mapped = true;
258    }
259
260    let mut intercept = false;
261    while cpu_status.load(Relaxed) != CpuStatus::STOP.0 {
262        match run_vp_once(command_page) {
263            Ok(true) => {
264                intercept = true;
265                break;
266            }
267            Ok(false) => {}
268            Err(()) => return,
269        }
270    }
271
272    RunVpResponse {
273        intercept: intercept as u8,
274    }
275    .write_to_prefix(command_page.request_data.as_mut_bytes())
276    .unwrap(); // PANIC: will not panic, since sizeof(RunVpResponse) is 1, whereas the buffer is statically declared as 16 bytes long.
277}
278
279fn run_vp_once(command_page: &mut CommandPage) -> Result<bool, ()> {
280    let cpu_context = &mut command_page.cpu_context;
281    // Write rax and rcx to the VP assist page.
282    //
283    // SAFETY: the assist page is not concurrently modified.
284    unsafe {
285        (*addr_space::assist_page()).vtl_control.registers = [
286            cpu_context.gps[CpuContextX64::RAX],
287            cpu_context.gps[CpuContextX64::RCX],
288        ];
289    }
290    // Dispatch the VP.
291    //
292    // SAFETY: no safety requirements for this hypercall.
293    unsafe {
294        core::arch::asm! {
295            "push rbp",
296            "push rbx",
297            "mov rbp, fs:[0x28]",
298            "mov rbx, fs:[0x18]",
299            "call rax",
300            "mov fs:[0x18], rbx",
301            "mov fs:[0x28], rbp",
302            "mov rbx, cr2",
303            "mov fs:[0x20], rbx",
304            "pop rbx",
305            "pop rbp",
306            "fxsave fs:[0x80]",
307            in("rax") addr_of!(HYPERCALL_PAGE) as usize + *addr_of!(VTL_RETURN_OFFSET) as usize,
308            lateout("rax") cpu_context.gps[CpuContextX64::RAX],
309            inout("rcx") 0u64 => cpu_context.gps[CpuContextX64::RCX], // normal return
310            inout("rdx") cpu_context.gps[CpuContextX64::RDX],
311            inout("rsi") cpu_context.gps[CpuContextX64::RSI],
312            inout("rdi") cpu_context.gps[CpuContextX64::RDI],
313            inout("r8") cpu_context.gps[CpuContextX64::R8],
314            inout("r9") cpu_context.gps[CpuContextX64::R9],
315            inout("r10") cpu_context.gps[CpuContextX64::R10],
316            inout("r11") cpu_context.gps[CpuContextX64::R11],
317            inout("r12") cpu_context.gps[CpuContextX64::R12],
318            inout("r13") cpu_context.gps[CpuContextX64::R13],
319            inout("r14") cpu_context.gps[CpuContextX64::R14],
320            inout("r15") cpu_context.gps[CpuContextX64::R15],
321        }
322    }
323    // SAFETY: the assist page is not concurrently modified.
324    let entry_reason = unsafe { (*addr_space::assist_page()).vtl_control.entry_reason };
325    match entry_reason {
326        HvVtlEntryReason::INTERRUPT => Ok(false),
327        HvVtlEntryReason::INTERCEPT => {
328            // SAFETY: the assist page is not concurrently modified.
329            let intercept_message =
330                unsafe { &*addr_of!((*addr_space::assist_page()).intercept_message) };
331            command_page.intercept_message = *intercept_message;
332            Ok(true)
333        }
334        entry_reason => {
335            set_error(
336                command_page,
337                format_args!("unexpected entry reason {entry_reason:?}"),
338            );
339            Err(())
340        }
341    }
342}
343
344fn shared_msr(name: HvX64RegisterName) -> Option<u32> {
345    let msr = match name {
346        HvX64RegisterName::MsrMtrrDefType => x86defs::X86X_MSR_MTRR_DEF_TYPE,
347        HvX64RegisterName::MsrMtrrFix64k00000 => x86defs::X86X_MSR_MTRR_FIX64K_00000,
348        HvX64RegisterName::MsrMtrrFix16k80000 => x86defs::X86X_MSR_MTRR_FIX16K_80000,
349        HvX64RegisterName::MsrMtrrFix16kA0000 => x86defs::X86X_MSR_MTRR_FIX16K_A0000,
350        HvX64RegisterName::MsrMtrrFix4kC0000 => x86defs::X86X_MSR_MTRR_FIX4K_C0000,
351        HvX64RegisterName::MsrMtrrFix4kC8000 => x86defs::X86X_MSR_MTRR_FIX4K_C8000,
352        HvX64RegisterName::MsrMtrrFix4kD0000 => x86defs::X86X_MSR_MTRR_FIX4K_D0000,
353        HvX64RegisterName::MsrMtrrFix4kD8000 => x86defs::X86X_MSR_MTRR_FIX4K_D8000,
354        HvX64RegisterName::MsrMtrrFix4kE0000 => x86defs::X86X_MSR_MTRR_FIX4K_E0000,
355        HvX64RegisterName::MsrMtrrFix4kE8000 => x86defs::X86X_MSR_MTRR_FIX4K_E8000,
356        HvX64RegisterName::MsrMtrrFix4kF0000 => x86defs::X86X_MSR_MTRR_FIX4K_F0000,
357        HvX64RegisterName::MsrMtrrFix4kF8000 => x86defs::X86X_MSR_MTRR_FIX4K_F8000,
358        HvX64RegisterName::MsrMtrrPhysBase0 => x86defs::X86X_MSR_MTRR_PHYSBASE0,
359        HvX64RegisterName::MsrMtrrPhysMask0 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 1,
360        HvX64RegisterName::MsrMtrrPhysBase1 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 2,
361        HvX64RegisterName::MsrMtrrPhysMask1 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 3,
362        HvX64RegisterName::MsrMtrrPhysBase2 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 4,
363        HvX64RegisterName::MsrMtrrPhysMask2 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 5,
364        HvX64RegisterName::MsrMtrrPhysBase3 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 6,
365        HvX64RegisterName::MsrMtrrPhysMask3 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 7,
366        HvX64RegisterName::MsrMtrrPhysBase4 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 8,
367        HvX64RegisterName::MsrMtrrPhysMask4 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 9,
368        HvX64RegisterName::MsrMtrrPhysBase5 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 10,
369        HvX64RegisterName::MsrMtrrPhysMask5 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 11,
370        HvX64RegisterName::MsrMtrrPhysBase6 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 12,
371        HvX64RegisterName::MsrMtrrPhysMask6 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 13,
372        HvX64RegisterName::MsrMtrrPhysBase7 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 14,
373        HvX64RegisterName::MsrMtrrPhysMask7 => x86defs::X86X_MSR_MTRR_PHYSBASE0 + 15,
374        _ => return None,
375    };
376    Some(msr)
377}
378
379fn set_debug_register(name: HvX64RegisterName, value: u64) -> bool {
380    // SAFETY: debug registers are unused by sidecar.
381    unsafe {
382        match name {
383            HvX64RegisterName::Dr0 => core::arch::asm!("mov dr0, {}", in(reg) value),
384            HvX64RegisterName::Dr1 => core::arch::asm!("mov dr1, {}", in(reg) value),
385            HvX64RegisterName::Dr2 => core::arch::asm!("mov dr2, {}", in(reg) value),
386            HvX64RegisterName::Dr3 => core::arch::asm!("mov dr3, {}", in(reg) value),
387            HvX64RegisterName::Dr6 if (&raw const VSM_CAPABILITIES).read().dr6_shared() => {
388                core::arch::asm!("mov dr6, {}", in(reg) value)
389            }
390            _ => return false,
391        }
392    }
393
394    true
395}
396
397fn get_debug_register(name: HvX64RegisterName) -> Option<u64> {
398    let v: u64;
399    // SAFETY: debug registers are unused by sidecar.
400    unsafe {
401        match name {
402            HvX64RegisterName::Dr0 => core::arch::asm!("mov {}, dr0", lateout(reg) v),
403            HvX64RegisterName::Dr1 => core::arch::asm!("mov {}, dr1", lateout(reg) v),
404            HvX64RegisterName::Dr2 => core::arch::asm!("mov {}, dr2", lateout(reg) v),
405            HvX64RegisterName::Dr3 => core::arch::asm!("mov {}, dr3", lateout(reg) v),
406            HvX64RegisterName::Dr6 if (&raw const VSM_CAPABILITIES).read().dr6_shared() => {
407                core::arch::asm!("mov {}, dr6", lateout(reg) v)
408            }
409            _ => return None,
410        }
411    }
412    Some(v)
413}
414
415fn get_vp_registers(command_page: &mut CommandPage) {
416    let (request, regs) = command_page
417        .request_data
418        .as_mut_bytes()
419        .split_at_mut(size_of::<GetSetVpRegisterRequest>());
420    let &mut GetSetVpRegisterRequest {
421        count,
422        target_vtl,
423        rsvd: _,
424        ref mut status,
425        rsvd2: _,
426        regs: [],
427    } = FromBytes::mut_from_bytes(request).unwrap();
428
429    let Ok((regs, _)) = <[HvRegisterAssoc]>::mut_from_prefix_with_elems(regs, count.into()) else {
430        // TODO: zerocopy: err (https://github.com/microsoft/openvmm/issues/759)
431        set_error(
432            command_page,
433            format_args!("invalid register name count: {count}"),
434        );
435        return;
436    };
437
438    *status = HvStatus::SUCCESS;
439    for &mut HvRegisterAssoc {
440        name,
441        pad: _,
442        ref mut value,
443    } in regs
444    {
445        let r = if let Some(msr) = shared_msr(name.into()) {
446            // SAFETY: the shared MSRs are not used by this kernel, so they cannot
447            // affect this kernel's functioning.
448            Ok(unsafe { read_msr(msr).into() })
449        } else if let Some(value) = get_debug_register(name.into()) {
450            Ok(value.into())
451        } else {
452            // FUTURE: consider batching these hypercalls if this becomes a bottleneck.
453            get_hv_vp_register(target_vtl, name)
454        };
455
456        match r {
457            Ok(v) => *value = v,
458            Err(err) => {
459                *status = Err(err).into();
460                break;
461            }
462        };
463    }
464}
465
466fn set_vp_registers(command_page: &mut CommandPage) {
467    let (request, regs) = command_page
468        .request_data
469        .as_mut_bytes()
470        .split_at_mut(size_of::<GetSetVpRegisterRequest>());
471    let &mut GetSetVpRegisterRequest {
472        count,
473        target_vtl,
474        rsvd: _,
475        ref mut status,
476        rsvd2: _,
477        regs: [],
478    } = FromBytes::mut_from_bytes(request).unwrap();
479
480    let Ok((assoc, _)) = <[HvRegisterAssoc]>::ref_from_prefix_with_elems(regs, count.into()) else {
481        // TODO: zerocopy: err (https://github.com/microsoft/openvmm/issues/759)
482        set_error(
483            command_page,
484            format_args!("invalid register count: {count}"),
485        );
486        return;
487    };
488
489    *status = HvStatus::SUCCESS;
490    for &HvRegisterAssoc {
491        name,
492        value,
493        pad: _,
494    } in assoc
495    {
496        let r = if let Some(msr) = shared_msr(name.into()) {
497            // SAFETY: the shared MSRs are not used by this kernel, so they cannot
498            // affect this kernel's functioning.
499            unsafe { write_msr(msr, value.as_u64()) }
500            Ok(())
501        } else if set_debug_register(name.into(), value.as_u64()) {
502            Ok(())
503        } else {
504            // FUTURE: consider batching these hypercalls if this becomes a bottleneck.
505            set_hv_vp_register(target_vtl, name, value)
506        };
507
508        if r.is_err() {
509            *status = r.into();
510            break;
511        }
512    }
513}
514
515fn translate_gva(command_page: &mut CommandPage) {
516    let TranslateGvaRequest { gvn, control_flags } =
517        FromBytes::read_from_prefix(command_page.request_data.as_bytes())
518            .unwrap()
519            .0; // TODO: zerocopy: use-rest-of-range, zerocopy: err (https://github.com/microsoft/openvmm/issues/759)
520    {
521        // SAFETY: the input page is not concurrently accessed.
522        let input = unsafe { &mut *addr_space::hypercall_input() };
523
524        TranslateVirtualAddressX64 {
525            partition_id: HV_PARTITION_ID_SELF,
526            vp_index: HV_VP_INDEX_SELF,
527            reserved: 0,
528            control_flags,
529            gva_page: gvn,
530        }
531        .write_to_prefix(input)
532        .unwrap();
533    }
534
535    let result = hypercall(HypercallCode::HvCallTranslateVirtualAddressEx, 0);
536    let output = if result.is_ok() {
537        // SAFETY: the output is not concurrently accessed
538        let output = unsafe { &*addr_space::hypercall_output() };
539        FromBytes::read_from_prefix(output).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
540    } else {
541        FromZeros::new_zeroed()
542    };
543
544    TranslateGvaResponse {
545        status: result.into(),
546        rsvd: [0; 7],
547        output,
548    }
549    .write_to_prefix(command_page.request_data.as_mut_bytes())
550    .unwrap();
551}
552
553fn raise_attention() {
554    let control = control();
555    control.needs_attention.store(1, Release);
556    let vector = control.response_vector.load(Relaxed);
557    if vector != 0 {
558        log!("ipi vector {vector}");
559        // SAFETY: no safety requirements.
560        unsafe {
561            write_msr(
562                x86defs::apic::ApicRegister::ICR0.x2apic_msr(),
563                x86defs::apic::Icr::new()
564                    .with_x2apic_mda(control.response_cpu.load(Relaxed))
565                    .with_vector(vector as u8)
566                    .into(),
567            );
568        }
569    }
570}
571
572fn park_until<F: FnMut() -> Option<R>, R>(mut f: F) -> R {
573    loop {
574        if let Some(r) = f() {
575            break r;
576        } else {
577            // Enable interrupts and halt the processor. Disable interrupts
578            // after waking up.
579            //
580            // SAFETY: no safety requirements.
581            unsafe {
582                core::arch::asm!("sti; hlt; cli");
583            }
584        }
585    }
586}