loader/
common.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Common helper routines for all loaders.
5
6use crate::importer::BootPageAcceptance;
7use crate::importer::ImageLoad;
8use crate::importer::SegmentRegister;
9use crate::importer::TableRegister;
10use crate::importer::X86Register;
11use hvdef::HV_PAGE_SIZE;
12use thiserror::Error;
13use vm_topology::memory::MemoryLayout;
14use x86defs::GdtEntry;
15use x86defs::X64_DEFAULT_CODE_SEGMENT_ATTRIBUTES;
16use x86defs::X64_DEFAULT_DATA_SEGMENT_ATTRIBUTES;
17use zerocopy::FromZeros;
18use zerocopy::IntoBytes;
19
20const DEFAULT_GDT_COUNT: usize = 4;
21/// The size of the default GDT table, in bytes.
22pub const DEFAULT_GDT_SIZE: u64 = HV_PAGE_SIZE;
23
24/// Import a default GDT at the given address, with one page imported.
25/// The GDT is used with cs as entry 1, and data segments (ds, es, fs, gs, ss) as entry 2.
26/// Registers using the GDT are imported with vtl 0 only.
27pub fn import_default_gdt(
28    importer: &mut dyn ImageLoad<X86Register>,
29    gdt_page_base: u64,
30) -> anyhow::Result<()> {
31    // Create a default GDT consisting of two entries.
32    // ds, es, fs, gs, ss are entry 2 (linear_selector)
33    // cs is entry 1 (linear_code64_selector)
34    let default_data_attributes: u16 = X64_DEFAULT_DATA_SEGMENT_ATTRIBUTES.into();
35    let default_code_attributes: u16 = X64_DEFAULT_CODE_SEGMENT_ATTRIBUTES.into();
36    let gdt: [GdtEntry; DEFAULT_GDT_COUNT] = [
37        GdtEntry::new_zeroed(),
38        GdtEntry {
39            limit_low: 0xffff,
40            attr_low: default_code_attributes as u8,
41            attr_high: (default_code_attributes >> 8) as u8,
42            ..GdtEntry::new_zeroed()
43        },
44        GdtEntry {
45            limit_low: 0xffff,
46            attr_low: default_data_attributes as u8,
47            attr_high: (default_data_attributes >> 8) as u8,
48            ..GdtEntry::new_zeroed()
49        },
50        GdtEntry::new_zeroed(),
51    ];
52    let gdt_entry_size = size_of::<GdtEntry>();
53    let linear_selector_offset = 2 * gdt_entry_size;
54    let linear_code64_selector_offset = gdt_entry_size;
55
56    // Import the GDT into the specified base page.
57    importer.import_pages(
58        gdt_page_base,
59        DEFAULT_GDT_SIZE / HV_PAGE_SIZE,
60        "default-gdt",
61        BootPageAcceptance::Exclusive,
62        gdt.as_bytes(),
63    )?;
64
65    // Import GDTR and selectors.
66    let mut import_reg = |register| importer.import_vp_register(register);
67    import_reg(X86Register::Gdtr(TableRegister {
68        base: gdt_page_base * HV_PAGE_SIZE,
69        limit: (size_of::<GdtEntry>() * DEFAULT_GDT_COUNT - 1) as u16,
70    }))?;
71
72    let ds = SegmentRegister {
73        selector: linear_selector_offset as u16,
74        base: 0,
75        limit: 0xffffffff,
76        attributes: default_data_attributes,
77    };
78    import_reg(X86Register::Ds(ds))?;
79    import_reg(X86Register::Es(ds))?;
80    import_reg(X86Register::Fs(ds))?;
81    import_reg(X86Register::Gs(ds))?;
82    import_reg(X86Register::Ss(ds))?;
83
84    let cs = SegmentRegister {
85        selector: linear_code64_selector_offset as u16,
86        base: 0,
87        limit: 0xffffffff,
88        attributes: default_code_attributes,
89    };
90    import_reg(X86Register::Cs(cs))?;
91
92    Ok(())
93}
94
95/// Returned when the MMIO layout is not supported.
96#[derive(Debug, Error)]
97#[error("exactly two MMIO gaps are required")]
98pub struct UnsupportedMmio;
99
100/// Computes the x86 variable MTRRs that describe the given memory layout. This
101/// is intended to be used to setup MTRRs for booting a guest with two mmio
102/// gaps, such as booting Linux, UEFI, or PCAT.
103///
104/// N.B. Currently this panics if there are not exactly two MMIO ranges.
105pub fn compute_variable_mtrrs(
106    memory: &MemoryLayout,
107    physical_address_width: u8,
108) -> Result<Vec<X86Register>, UnsupportedMmio> {
109    const WRITEBACK: u64 = 0x6;
110
111    let &[mmio_gap_low, mmio_gap_high] = memory.mmio().try_into().map_err(|_| UnsupportedMmio)?;
112
113    // Clamp the width to something reasonable.
114    let gpa_space_size = physical_address_width.clamp(36, 52);
115
116    // The MMIO limits will be the basis of the MTRR calculations
117    // as page count doesn't work when there may be gaps between memory blocks.
118
119    let mut result = Vec::with_capacity(8);
120
121    // Our PCAT firmware sets MTRR 200 and MTRR Mask 201 to 128 MB during boot, so we
122    // mimic that here.
123    let pcat_mtrr_size = 128 * 1024 * 1024;
124
125    result.push(X86Register::MtrrPhysBase0(WRITEBACK));
126    result.push(X86Register::MtrrPhysMask0(mtrr_mask(
127        gpa_space_size,
128        pcat_mtrr_size - 1,
129    )));
130
131    // If there is more than 128 MB, use MTRR 202 and MTRR Mask 203 to cover the
132    // amount of memory below the 3.8GB memory gap.
133    if memory.end_of_ram() > pcat_mtrr_size {
134        result.push(X86Register::MtrrPhysBase1(pcat_mtrr_size | WRITEBACK));
135        result.push(X86Register::MtrrPhysMask1(mtrr_mask(
136            gpa_space_size,
137            mmio_gap_low.start() - 1,
138        )));
139    }
140
141    // If there is more than ~3.8GB of memory, use MTRR 204 and MTRR Mask 205 to cover
142    // the amount of memory above 4GB.
143    if memory.end_of_ram() > mmio_gap_low.end() {
144        result.push(X86Register::MtrrPhysBase2(mmio_gap_low.end() | WRITEBACK));
145        result.push(X86Register::MtrrPhysMask2(mtrr_mask(
146            gpa_space_size,
147            mmio_gap_high.start() - 1,
148        )));
149    }
150
151    // If there is more memory than 64GB then use MTRR 206 and MTRR Mask 207 and possibly
152    // MTRR 208 and MTRR Mask 209 depending on maximum address width. Both MTRR pairs are
153    // used with the magic 8TB boundary to work around a bug in older Linux kernels
154    // (e.g. RHEL 6.x, etc.)
155    if memory.end_of_ram() > mmio_gap_high.end() {
156        result.push(X86Register::MtrrPhysBase3(mmio_gap_high.end() | WRITEBACK));
157        result.push(X86Register::MtrrPhysMask3(mtrr_mask(
158            gpa_space_size,
159            (1 << std::cmp::min(gpa_space_size, 43)) - 1,
160        )));
161        if gpa_space_size > 43 {
162            result.push(X86Register::MtrrPhysBase4((1 << 43) | WRITEBACK));
163            result.push(X86Register::MtrrPhysMask4(mtrr_mask(
164                gpa_space_size,
165                (1 << gpa_space_size) - 1,
166            )));
167        }
168    }
169
170    Ok(result)
171}
172
173fn mtrr_mask(gpa_space_size: u8, maximum_address: u64) -> u64 {
174    const ENABLED: u64 = 1 << 11;
175
176    let mut result = ENABLED;
177
178    // Set all the bits above bit 11 to 1's to cover the gpa_space_size
179    for index in 12..gpa_space_size {
180        result |= 1 << index;
181    }
182
183    // Clear the span of bits above bit 11 to cover the maximum address
184    for index in 12..gpa_space_size {
185        let test_maximum_address = 1 << index;
186
187        if maximum_address >= test_maximum_address {
188            // Turn the correct bit off
189            result &= !(1 << index);
190        } else {
191            // Done clearing the span of bits
192            break;
193        }
194    }
195
196    result
197}