membacking/mapping_manager/
manager.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.

//! Implements the mapping manager, which keeps track of the VA mappers and
//! their currently active mappings. It is responsible for invalidating mappings
//! in each VA range when they are torn down by the region manager.

use super::mappable::Mappable;
use super::object_cache::ObjectCache;
use super::object_cache::ObjectId;
use super::va_mapper::VaMapper;
use super::va_mapper::VaMapperError;
use crate::RemoteProcess;
use futures::StreamExt;
use futures::future::join_all;
use inspect::Inspect;
use inspect::InspectMut;
use memory_range::MemoryRange;
use mesh::MeshPayload;
use mesh::rpc::Rpc;
use mesh::rpc::RpcSend;
use pal_async::task::Spawn;
use slab::Slab;
use std::sync::Arc;

/// The mapping manager.
#[derive(Debug)]
pub struct MappingManager {
    client: MappingManagerClient,
}

impl Inspect for MappingManager {
    fn inspect(&self, req: inspect::Request<'_>) {
        self.client
            .req_send
            .send(MappingRequest::Inspect(req.defer()));
    }
}

impl MappingManager {
    /// Returns a new mapping manager that can map addresses up to `max_addr`.
    pub fn new(spawn: impl Spawn, max_addr: u64) -> Self {
        let (req_send, mut req_recv) = mesh::mpsc_channel();
        spawn
            .spawn("mapping_manager", {
                let mut task = MappingManagerTask::new();
                async move {
                    task.run(&mut req_recv).await;
                }
            })
            .detach();
        Self {
            client: MappingManagerClient {
                id: ObjectId::new(),
                req_send,
                max_addr,
            },
        }
    }

    /// Returns an object used to access the mapping manager, potentially from a
    /// remote process.
    pub fn client(&self) -> &MappingManagerClient {
        &self.client
    }
}

/// Provides access to the mapping manager.
#[derive(Debug, MeshPayload, Clone)]
pub struct MappingManagerClient {
    req_send: mesh::Sender<MappingRequest>,
    id: ObjectId,
    max_addr: u64,
}

static MAPPER_CACHE: ObjectCache<VaMapper> = ObjectCache::new();

impl MappingManagerClient {
    /// Returns a VA mapper for this guest memory.
    ///
    /// This will single instance the mapper, so this is safe to call multiple times.
    pub async fn new_mapper(&self) -> Result<Arc<VaMapper>, VaMapperError> {
        // Get the VA mapper from the mapper cache if possible to avoid keeping
        // multiple VA ranges for this memory per process.
        MAPPER_CACHE
            .get_or_insert_with(&self.id, async {
                VaMapper::new(self.req_send.clone(), self.max_addr, None).await
            })
            .await
    }

    /// Returns a VA mapper for this guest memory, but map everything into the
    /// address space of `process`.
    ///
    /// Each call will allocate a new unique mapper.
    pub async fn new_remote_mapper(
        &self,
        process: RemoteProcess,
    ) -> Result<Arc<VaMapper>, VaMapperError> {
        Ok(Arc::new(
            VaMapper::new(self.req_send.clone(), self.max_addr, Some(process)).await?,
        ))
    }

    /// Adds an active mapping.
    ///
    /// TODO: currently this will panic if the mapping overlaps an existing
    /// mapping. This needs to be fixed to allow this to overlap existing
    /// mappings, in which case the old ones will be split and replaced.
    pub async fn add_mapping(
        &self,
        range: MemoryRange,
        mappable: Mappable,
        file_offset: u64,
        writable: bool,
    ) {
        let params = MappingParams {
            range,
            mappable,
            file_offset,
            writable,
        };

        self.req_send
            .call(MappingRequest::AddMapping, params)
            .await
            .unwrap();
    }

    /// Removes all mappings in `range`.
    ///
    /// TODO: allow this to split existing mappings.
    pub async fn remove_mappings(&self, range: MemoryRange) {
        self.req_send
            .call(MappingRequest::RemoveMappings, range)
            .await
            .unwrap();
    }
}

/// A mapping request message.
#[derive(MeshPayload)]
pub enum MappingRequest {
    AddMapper(Rpc<mesh::Sender<MapperRequest>, MapperId>),
    RemoveMapper(MapperId),
    SendMappings(MapperId, MemoryRange),
    AddMapping(Rpc<MappingParams, ()>),
    RemoveMappings(Rpc<MemoryRange, ()>),
    Inspect(inspect::Deferred),
}

#[derive(InspectMut)]
struct MappingManagerTask {
    #[inspect(with = "inspect_mappings")]
    mappings: Vec<Mapping>,
    #[inspect(skip)]
    mappers: Mappers,
}

fn inspect_mappings(mappings: &Vec<Mapping>) -> impl '_ + Inspect {
    inspect::adhoc(move |req| {
        let mut resp = req.respond();
        for mapping in mappings {
            resp.field(
                &mapping.params.range.to_string(),
                inspect::adhoc(|req| {
                    req.respond()
                        .field("writable", mapping.params.writable)
                        .hex("file_offset", mapping.params.file_offset);
                }),
            );
        }
    })
}

struct Mapping {
    params: MappingParams,
    active_mappers: Vec<MapperId>,
}

/// The mapping parameters.
#[derive(MeshPayload, Clone)]
pub struct MappingParams {
    /// The memory range for the mapping.
    pub range: MemoryRange,
    /// The OS object to map.
    pub mappable: Mappable,
    /// The file offset into `mappable`.
    pub file_offset: u64,
    /// Whether to map the memory as writable.
    pub writable: bool,
}

struct Mappers {
    mappers: Slab<MapperComm>,
}

struct MapperComm {
    req_send: mesh::Sender<MapperRequest>,
}

#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, MeshPayload)]
pub struct MapperId(usize);

/// A request to a VA mapper.
#[derive(MeshPayload)]
pub enum MapperRequest {
    /// Map the specified mapping.
    Map(MappingParams),
    /// There is no mapping for the specified range, so release anything waiting
    /// on such a mapping to arrive.
    NoMapping(MemoryRange),
    /// Unmap the specified range and send a response when it's done.
    Unmap(Rpc<MemoryRange, ()>),
}

impl MappingManagerTask {
    fn new() -> Self {
        Self {
            mappers: Mappers {
                mappers: Slab::new(),
            },
            mappings: Vec::new(),
        }
    }

    async fn run(&mut self, req_recv: &mut mesh::Receiver<MappingRequest>) {
        while let Some(req) = req_recv.next().await {
            match req {
                MappingRequest::AddMapper(rpc) => rpc.handle_sync(|send| self.add_mapper(send)),
                MappingRequest::RemoveMapper(id) => {
                    self.remove_mapper(id);
                }
                MappingRequest::SendMappings(id, range) => {
                    self.send_mappings(id, range);
                }
                MappingRequest::AddMapping(rpc) => {
                    rpc.handle_sync(|params| self.add_mapping(params))
                }
                MappingRequest::RemoveMappings(rpc) => {
                    rpc.handle(async |range| self.remove_mappings(range).await)
                        .await
                }
                MappingRequest::Inspect(deferred) => deferred.inspect(&mut *self),
            }
        }
    }

    fn add_mapper(&mut self, req_send: mesh::Sender<MapperRequest>) -> MapperId {
        let id = self.mappers.mappers.insert(MapperComm { req_send });
        tracing::debug!(?id, "adding mapper");
        MapperId(id)
    }

    fn remove_mapper(&mut self, id: MapperId) {
        tracing::debug!(?id, "removing mapper");
        self.mappers.mappers.remove(id.0);
        for mapping in &mut self.mappings {
            mapping.active_mappers.retain(|m| m != &id);
        }
    }

    fn send_mappings(&mut self, id: MapperId, mut range: MemoryRange) {
        while !range.is_empty() {
            // Find the next mapping that overlaps range.
            let (this_end, params) = if let Some(mapping) = self
                .mappings
                .iter_mut()
                .filter(|mapping| mapping.params.range.overlaps(&range))
                .min_by_key(|mapping| mapping.params.range.start())
            {
                if mapping.params.range.start() <= range.start() {
                    if !mapping.active_mappers.contains(&id) {
                        mapping.active_mappers.push(id);
                    }
                    // The next mapping overlaps with the start of our range.
                    (
                        mapping.params.range.end().min(range.end()),
                        Some(mapping.params.clone()),
                    )
                } else {
                    // There's a gap before the next mapping.
                    (mapping.params.range.start(), None)
                }
            } else {
                // No matching mappings, consume the rest of the range.
                (range.end(), None)
            };
            let this_range = MemoryRange::new(range.start()..this_end);
            let req = if let Some(params) = params {
                tracing::debug!(range = %this_range, full_range = %params.range, "sending mapping for range");
                MapperRequest::Map(params)
            } else {
                tracing::debug!(range = %this_range, "no mapping for range");
                MapperRequest::NoMapping(this_range)
            };
            self.mappers.mappers[id.0].req_send.send(req);
            range = MemoryRange::new(this_end..range.end());
        }
    }

    fn add_mapping(&mut self, params: MappingParams) {
        tracing::debug!(range = %params.range, "adding mapping");

        assert!(!self.mappings.iter().any(|m| m.params.range == params.range));

        self.mappings.push(Mapping {
            params,
            active_mappers: Vec::new(),
        });
    }

    async fn remove_mappings(&mut self, range: MemoryRange) {
        let mut mappers = Vec::new();
        self.mappings.retain_mut(|mapping| {
            if !range.contains(&mapping.params.range) {
                assert!(
                    !range.overlaps(&mapping.params.range),
                    "no partial unmappings allowed"
                );
                return true;
            }
            tracing::debug!(range = %mapping.params.range, "removing mapping");
            mappers.append(&mut mapping.active_mappers);
            false
        });
        mappers.sort();
        mappers.dedup();
        self.mappers.invalidate(&mappers, range).await;
    }
}

impl Mappers {
    async fn invalidate(&self, ids: &[MapperId], range: MemoryRange) {
        tracing::debug!(mapper_count = ids.len(), %range, "sending invalidations");
        join_all(ids.iter().map(async |&MapperId(i)| {
            if let Err(err) = self.mappers[i]
                .req_send
                .call(MapperRequest::Unmap, range)
                .await
            {
                tracing::warn!(
                    error = &err as &dyn std::error::Error,
                    "mapper dropped invalidate request"
                );
            }
        }))
        .await;
    }
}