minimal_rt/
reloc.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Code to apply relocations in an environment without a runtime.
5//!
6//! Do not reach out to global variables or function pointers (and the Rust
7//! formatting facilities in particular or panic processing) from this code
8//! as they generate relocation records.
9
10/// Stores error code, line number, and the pointer to the file name in the registers.
11/// Cannot call into the panic facilities before relocation, that won't be debuggable at all.
12macro_rules! panic_no_relocs {
13    ($code:expr) => {{
14        let _code = $code;
15        crate::arch::dead_loop(_code as u64, line!() as u64, file!().as_ptr() as u64);
16    }};
17}
18
19#[derive(Clone, Copy, PartialEq, Eq)]
20#[repr(u64)]
21#[expect(dead_code)]
22enum ElfDynTag {
23    Null = 0,
24    RelA = 7,
25    RelASz = 8,
26    RelAEnt = 9,
27    Rel = 17,
28    RelSz = 18,
29    RelEnt = 19,
30    RelACount = 0x6ffffff9,
31    RelCount = 0x6ffffffa,
32}
33
34const R_ERROR_RELA: u64 = 1;
35const R_ERROR_RELASZ: u64 = 2;
36const R_ERROR_REL: u64 = 3;
37const R_ERROR_RELSZ: u64 = 4;
38
39const fn r_relative() -> u32 {
40    // `cfg_if::cfg_if` and `cfg_if!` would not work in the const context.
41    #[cfg(target_arch = "x86_64")]
42    {
43        const R_X64_RELATIVE: u32 = 8;
44        R_X64_RELATIVE
45    }
46
47    #[cfg(target_arch = "aarch64")]
48    {
49        const R_AARCH64_RELATIVE: u32 = 0x403;
50        R_AARCH64_RELATIVE
51    }
52
53    #[cfg(not(any(target_arch = "aarch64", target_arch = "x86_64")))]
54    {
55        compile_error!("Unsupported architecture")
56    }
57}
58
59const R_RELATIVE: u32 = r_relative();
60
61#[derive(Clone, Copy)]
62#[repr(C)]
63struct Elf64Dyn {
64    tag: ElfDynTag,
65    val: usize,
66}
67
68#[derive(Clone, Copy)]
69#[repr(C)]
70struct Elf64Rela {
71    offset: u64,
72    info: u64,
73    addend: u64,
74}
75
76#[derive(Clone, Copy)]
77#[repr(C)]
78struct Elf64Rel {
79    offset: u64,
80    info: u64,
81}
82
83fn rela_type(rela: &Elf64Rela) -> u32 {
84    rela.info as u32
85}
86
87fn rel_type(rel: &Elf64Rel) -> u32 {
88    rel.info as u32
89}
90
91fn apply_rel(mapped_addr: u64, vaddr: u64, begin: usize, end: usize) {
92    // SAFETY: constructing a slice of relocation records from
93    // the pointer and the size coming from the `.dynamic` ELF section.
94    let rel = unsafe {
95        core::slice::from_raw_parts_mut(
96            begin as *mut Elf64Rel,
97            (end - begin) / size_of::<Elf64Rel>(),
98        )
99    };
100    for rel in rel {
101        if rel_type(rel) != R_RELATIVE {
102            panic_no_relocs!(R_ERROR_REL)
103        }
104
105        let rel_addr = rel.offset.wrapping_add(mapped_addr) as *mut u64;
106
107        // SAFETY: updating the address as prescribed by the ELF
108        // ABI.
109        unsafe {
110            let rel = core::ptr::read_unaligned(rel_addr);
111            core::ptr::write_unaligned(rel_addr, rel.wrapping_add(vaddr));
112        }
113    }
114}
115
116fn apply_rela(mapped_addr: u64, vaddr: u64, begin: usize, end: usize) {
117    // SAFETY: constructing a slice of relocation records from
118    // the pointer and the size coming from the `.dynamic` ELF section.
119    let rela = unsafe {
120        core::slice::from_raw_parts_mut(
121            begin as *mut Elf64Rela,
122            (end - begin) / size_of::<Elf64Rela>(),
123        )
124    };
125    for rel in rela {
126        if rela_type(rel) != R_RELATIVE {
127            panic_no_relocs!(R_ERROR_RELA);
128        }
129
130        // SAFETY: updating the address as prescribed by the ELF
131        // ABI.
132        unsafe {
133            core::ptr::write_unaligned(
134                rel.offset.wrapping_add(mapped_addr) as *mut u64,
135                rel.addend.wrapping_add(vaddr),
136            );
137        }
138    }
139}
140
141/// Apply relocations to the image mapped at `mapped_addr` so that it can be run
142/// at `vaddr`, using the _DYNAMIC section at `dynamic_addr`.
143///
144/// # Safety
145/// The caller must ensure that this is called only during startup, with the
146/// appropriate arguments, since this updates code and data across the binary.
147pub unsafe extern "C" fn relocate(mapped_addr: usize, vaddr: usize, dynamic_addr: usize) {
148    if mapped_addr == dynamic_addr {
149        // Empty dynamic section or wrong linker flags (no PIE?),
150        // exit
151        return;
152    }
153
154    let mut rela_offset = None;
155    let mut rela_entry_size = 0;
156    let mut rela_count = 0;
157
158    let mut rel_offset = None;
159    let mut rel_entry_size = 0;
160    let mut rel_count = 0;
161
162    let mut dynamic = dynamic_addr as *mut Elf64Dyn;
163    // SAFETY: Following the ELF specification. Not creating data races,
164    // invalid values, dangling references, or modifying immutables.
165    while unsafe { dynamic.read_unaligned().tag } != ElfDynTag::Null {
166        // SAFETY: Following the ELF specification. Not creating data races,
167        // invalid values, dangling references, or modifying immutables.
168        let Elf64Dyn { tag, val } = unsafe { *dynamic };
169        match tag {
170            ElfDynTag::RelA => {
171                rela_offset = Some(val);
172            }
173            ElfDynTag::RelAEnt => {
174                rela_entry_size = val;
175            }
176            ElfDynTag::Rel => {
177                rel_offset = Some(val);
178            }
179            ElfDynTag::RelEnt => {
180                rel_entry_size = val;
181            }
182            ElfDynTag::RelACount => {
183                rela_count = val;
184            }
185            ElfDynTag::RelCount => {
186                rel_count = val;
187            }
188            _ => {}
189        }
190
191        dynamic = dynamic.wrapping_add(1);
192    }
193
194    if let Some(rela_offset) = rela_offset {
195        const RELA_ENTRY_SIZE: usize = size_of::<Elf64Rela>();
196        if rela_entry_size != RELA_ENTRY_SIZE {
197            panic_no_relocs!(R_ERROR_RELASZ);
198        }
199
200        let begin = mapped_addr + rela_offset;
201        let end = begin + rela_count * RELA_ENTRY_SIZE;
202        apply_rela(mapped_addr as u64, vaddr as u64, begin, end);
203    }
204
205    if let Some(rel_offset) = rel_offset {
206        const REL_ENTRY_SIZE: usize = size_of::<Elf64Rel>();
207        if rel_entry_size != REL_ENTRY_SIZE {
208            panic_no_relocs!(R_ERROR_RELSZ);
209        }
210
211        let begin = mapped_addr + rel_offset;
212        let end = begin + rel_count * REL_ENTRY_SIZE;
213        apply_rel(mapped_addr as u64, vaddr as u64, begin, end);
214    }
215}