disklayer_sqlite/
lib.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.

//! SQLite-backed disk layer implementation.
//!
//! At this time, **this layer is only designed for use in dev/test scenarios!**
//!
//! # DISCLAIMER: Stability
//!
//! There are no stability guarantees around the on-disk data format! The schema
//! can and will change without warning!
//!
//! # DISCLAIMER: Performance
//!
//! This implementation has only been minimally optimized! Don't expect to get
//! incredible perf from this disk backend!
//!
//! Notably:
//!
//! - Data is stored within a single `sectors` table as tuples of `(sector:
//!   INTEGER, sector_data: BLOB(sector_size))`. All data is accessed in
//!   `sector_size` chunks (i.e: without performing any kind of adjacent-sector
//!   coalescing).
//! - Reads and writes currently allocate many temporary `Vec<u8>` buffers per
//!   operation, without any buffer reuse.
//!
//! These design choices were made with simplicity and expediency in mind, given
//! that the primary use-case for this backend is for dev/test scenarios. If
//! performance ever becomes a concern, there are various optimizations that
//! should be possible to implement here, though quite frankly, investing in a
//! cross-platform QCOW2 or VHDX disk backend is likely a far more worthwhile
//! endeavor.
//!
//! # Context
//!
//! In late 2024, OpenVMM was missing a _cross-platform_ disk backend that
//! supported the following key features:
//!
//! - Used a dynamically-sized file as the disks's backing store
//! - Supported snapshots / differencing disks
//!
//! While OpenVMM will eventually need to support for one or more of the current
//! "industry standard" virtual disk formats that supports these features (e.g:
//! QCOW2, VHDX), we really wanted some sort of "stop-gap" solution to unblock
//! various dev/test use-cases.
//!
//! And thus, `disklayer_sqlite` was born!
//!
//! The initial implementation took less than a day to get up and running, and
//! worked "well enough" to support the dev/test scenarios we were interested
//! in, such as:
//!
//! - Having a cross-platform _sparsely allocated_ virtual disk file.
//! - Having a _persistent_ diff-disk on-top of an existing disk (as opposed to
//!   `ramdiff`, which is in-memory and _ephemeral_)
//! - Having a "cache" layer for JIT-accessed disks, such as `disk_blob`
//!
//! The idea of using SQLite as a backing store - while wacky - proved to be an
//! excellent way to quickly bring up a dynamically-sized, sparsely-allocated
//! disk format for testing in OpenVMM.

#![forbid(unsafe_code)]

mod auto_cache;
pub mod resolver;

use anyhow::Context;
use blocking::unblock;
use disk_backend::DiskError;
use disk_backend::UnmapBehavior;
use disk_layered::LayerAttach;
use disk_layered::LayerIo;
use disk_layered::SectorMarker;
use disk_layered::WriteNoOverwrite;
use futures::lock::Mutex;
use futures::lock::OwnedMutexGuard;
use guestmem::MemoryRead;
use guestmem::MemoryWrite;
use inspect::Inspect;
use rusqlite::Connection;
use scsi_buffers::RequestBuffers;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;

/// Formatting parameters provided to [`FormatOnAttachSqliteDiskLayer::new`].
///
/// Optional parameters which are not provided will be determined by reading the
/// metadata of the layer being attached to.
#[derive(Inspect, Copy, Clone)]
pub struct IncompleteFormatParams {
    /// Should the layer be considered logically read only (i.e: a cache layer)
    pub logically_read_only: bool,
    /// The size of the layer in bytes.
    pub len: Option<u64>,
}

/// Formatting parameters provided to [`SqliteDiskLayer::new`]
#[derive(Inspect, Copy, Clone)]
pub struct FormatParams {
    /// Should the layer be considered logically read only (i.e: a cache layer)
    pub logically_read_only: bool,
    /// The size of the layer in bytes. Must be divisible by `sector_size`.
    pub len: u64,
    /// The size of each sector.
    pub sector_size: u32,
}

/// A disk layer backed by sqlite, which lazily infers its topology from the
/// layer it is being stacked on-top of.
pub struct FormatOnAttachSqliteDiskLayer {
    dbhd_path: PathBuf,
    read_only: bool,
    format_dbhd: IncompleteFormatParams,
}

impl FormatOnAttachSqliteDiskLayer {
    /// Create a new sqlite-backed disk layer, which is formatted when it is
    /// attached.
    pub fn new(dbhd_path: PathBuf, read_only: bool, format_dbhd: IncompleteFormatParams) -> Self {
        Self {
            dbhd_path,
            read_only,
            format_dbhd,
        }
    }
}

/// A disk layer backed entirely by sqlite.
#[derive(Inspect)]
pub struct SqliteDiskLayer {
    #[inspect(skip)]
    conn: Arc<Mutex<Connection>>, // FUTURE: switch to connection-pool instead
    meta: schema::DiskMeta,
}

impl SqliteDiskLayer {
    /// Create a new sqlite-backed disk layer.
    pub fn new(
        dbhd_path: &Path,
        read_only: bool,
        format_dbhd: Option<FormatParams>,
    ) -> anyhow::Result<Self> {
        // DEVNOTE: sqlite _really_ want to be in control of opening the file,
        // since it also wants to read/write to the runtime "sidecar" files that
        // get created when accessing the DB (i.e: the `*-shm` and `*-wal`
        // files)
        //
        // This will make it tricky to sandbox SQLite in the future...
        //
        // One idea: maybe we could implement a small SQLite `vfs` shim that
        // lets use pre-open those particular files on the caller side, and hand
        // them to sqlite when requested (vs. having it `open()` them itself?)
        let conn = Connection::open_with_flags(dbhd_path, {
            use rusqlite::OpenFlags;

            let mut flags = OpenFlags::SQLITE_OPEN_NO_MUTEX;

            if read_only {
                flags |= OpenFlags::SQLITE_OPEN_READ_ONLY;
            } else {
                flags |= OpenFlags::SQLITE_OPEN_READ_WRITE;
            }

            // FUTURE: if/when the VFS layer is implemented, it _may_ be worth
            // removing this flag entirely, and relying on the VFS to ensure
            // that the (possibly blank) db file has been created. Emphasis on
            // the word "may", as its unclear what the best approach will be
            // until if/when we have more of the VFS infrastructure in place.
            if format_dbhd.is_some() {
                flags |= OpenFlags::SQLITE_OPEN_CREATE
            }

            flags
        })?;

        let meta = if let Some(FormatParams {
            logically_read_only,
            len,
            sector_size,
        }) = format_dbhd
        {
            use rusqlite::config::DbConfig;

            // Wipe any existing contents.
            //
            // see https://www.sqlite.org/c3ref/c_dbconfig_defensive.html#sqlitedbconfigresetdatabase
            conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, true)?;
            conn.execute("VACUUM", ())?;
            conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, false)?;

            // Set core database config, and initialize table structure
            conn.pragma_update(None, "journal_mode", "WAL")?;
            conn.execute(schema::DEFINE_TABLE_SECTORS, [])?;
            conn.execute(schema::DEFINE_TABLE_METADATA, [])?;

            if len % sector_size as u64 != 0 {
                anyhow::bail!(
                    "failed to format: len={len} must be multiple of sector_size={sector_size}"
                );
            }
            let sector_count = len / sector_size as u64;

            let meta = schema::DiskMeta {
                logically_read_only,
                sector_count,
                sector_size,
            };

            conn.execute(
                "INSERT INTO meta VALUES (json(?))",
                [serde_json::to_string(&meta).unwrap()],
            )?;

            meta
        } else {
            use rusqlite::OptionalExtension;
            let data: String = conn
                .query_row("SELECT json_extract(metadata, '$') FROM meta", [], |row| {
                    row.get(0)
                })
                .optional()?
                .context("missing `meta` table")?;
            serde_json::from_str(&data)?
        };

        Ok(SqliteDiskLayer {
            conn: Arc::new(Mutex::new(conn)),
            meta,
        })
    }

    async fn write_maybe_overwrite(
        &self,
        buffers: &RequestBuffers<'_>,
        sector: u64,
        overwrite: bool,
    ) -> Result<(), DiskError> {
        assert!(!(overwrite && self.meta.logically_read_only));

        let count = buffers.len() / self.meta.sector_size as usize;
        tracing::trace!(sector, count, "write");

        let buf = buffers.reader().read_all()?;
        unblock({
            let conn = self.conn.clone().lock_owned().await;
            let sector_size = self.meta.sector_size;
            move || write_sectors(conn, sector_size, sector, buf, overwrite)
        })
        .await
        .map_err(|e| DiskError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?;

        Ok(())
    }
}

impl LayerAttach for FormatOnAttachSqliteDiskLayer {
    type Error = anyhow::Error;
    type Layer = SqliteDiskLayer;

    async fn attach(
        self,
        lower_layer_metadata: Option<disk_layered::DiskLayerMetadata>,
    ) -> Result<Self::Layer, Self::Error> {
        let len = {
            let lower_len = lower_layer_metadata
                .as_ref()
                .map(|m| m.sector_count * m.sector_size as u64);
            self.format_dbhd
                .len
                .or(lower_len)
                .context("no base layer to infer sector_count from")?
        };
        // FUTURE: make sector-size configurable
        let sector_size = lower_layer_metadata.map(|x| x.sector_size).unwrap_or(512);

        SqliteDiskLayer::new(
            &self.dbhd_path,
            self.read_only,
            Some(FormatParams {
                logically_read_only: self.format_dbhd.logically_read_only,
                len,
                sector_size,
            }),
        )
    }
}

impl LayerIo for SqliteDiskLayer {
    fn layer_type(&self) -> &str {
        "sqlite"
    }

    fn sector_count(&self) -> u64 {
        self.meta.sector_count
    }

    fn sector_size(&self) -> u32 {
        self.meta.sector_size
    }

    fn is_logically_read_only(&self) -> bool {
        self.meta.logically_read_only
    }

    fn disk_id(&self) -> Option<[u8; 16]> {
        None
    }

    fn physical_sector_size(&self) -> u32 {
        self.meta.sector_size
    }

    fn is_fua_respected(&self) -> bool {
        false
    }

    async fn read(
        &self,
        buffers: &RequestBuffers<'_>,
        sector: u64,
        mut marker: SectorMarker<'_>,
    ) -> Result<(), DiskError> {
        let sector_count = (buffers.len() / self.meta.sector_size as usize) as u64;
        let end_sector = sector + sector_count;
        tracing::trace!(sector, sector_count, "read");
        if end_sector > self.meta.sector_count {
            return Err(DiskError::IllegalBlock);
        }

        let valid_sectors = unblock({
            let conn = self.conn.clone().lock_owned().await;
            let end_sector = sector + sector_count;
            let sector_size = self.meta.sector_size;
            move || read_sectors(conn, sector_size, sector, end_sector)
        })
        .await
        .map_err(|e| DiskError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?;

        for (s, data) in valid_sectors {
            let offset = (s - sector) as usize * self.meta.sector_size as usize;
            let subrange = buffers.subrange(offset, self.meta.sector_size as usize);
            let mut writer = subrange.writer();
            match data {
                SectorKind::AllZero => writer.zero(self.meta.sector_size as usize)?,
                SectorKind::Data(data) => writer.write(&data)?,
            };

            marker.set(s);
        }

        Ok(())
    }

    async fn write(
        &self,
        buffers: &RequestBuffers<'_>,
        sector: u64,
        _fua: bool,
    ) -> Result<(), DiskError> {
        self.write_maybe_overwrite(buffers, sector, true).await
    }

    fn write_no_overwrite(&self) -> Option<impl WriteNoOverwrite> {
        Some(self)
    }

    async fn sync_cache(&self) -> Result<(), DiskError> {
        tracing::trace!("sync_cache");

        unblock({
            let mut conn = self.conn.clone().lock_owned().await;
            move || -> rusqlite::Result<()> {
                // https://sqlite-users.sqlite.narkive.com/LX75NOma/forcing-a-manual-fsync-in-wal-normal-mode
                conn.pragma_update(None, "synchronous", "FULL")?;
                {
                    let tx = conn.transaction()?;
                    tx.pragma_update(None, "user_version", "0")?;
                }
                conn.pragma_update(None, "synchronous", "NORMAL")?;
                Ok(())
            }
        })
        .await
        .map_err(|e| DiskError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))
    }

    async fn unmap(
        &self,
        sector_offset: u64,
        sector_count: u64,
        _block_level_only: bool,
        next_is_zero: bool,
    ) -> Result<(), DiskError> {
        tracing::trace!(sector_offset, sector_count, "unmap");
        if sector_offset + sector_count > self.meta.sector_count {
            return Err(DiskError::IllegalBlock);
        }

        unblock({
            let conn = self.conn.clone().lock_owned().await;
            move || unmap_sectors(conn, sector_offset, sector_count, next_is_zero)
        })
        .await
        .map_err(|e| DiskError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?;

        Ok(())
    }

    fn unmap_behavior(&self) -> UnmapBehavior {
        UnmapBehavior::Zeroes
    }

    fn optimal_unmap_sectors(&self) -> u32 {
        1
    }
}

impl WriteNoOverwrite for SqliteDiskLayer {
    async fn write_no_overwrite(
        &self,
        buffers: &RequestBuffers<'_>,
        sector: u64,
    ) -> Result<(), DiskError> {
        self.write_maybe_overwrite(buffers, sector, false).await
    }
}

enum SectorKind {
    AllZero,
    Data(Vec<u8>),
}

// FUTURE: read from sqlite directly into `RequestBuffers`.
fn read_sectors(
    conn: OwnedMutexGuard<Connection>,
    sector_size: u32,
    start_sector: u64,
    end_sector: u64,
) -> anyhow::Result<Vec<(u64, SectorKind)>> {
    let mut select_stmt = conn.prepare_cached(
        "SELECT sector, data
        FROM sectors
        WHERE sector >= ? AND sector < ?
        ORDER BY sector ASC",
    )?;
    let mut rows = select_stmt.query(rusqlite::params![start_sector, end_sector])?;

    let mut res = Vec::new();
    while let Some(row) = rows.next()? {
        let sector: u64 = row.get(0)?;
        let data: Option<&[u8]> = row.get_ref(1)?.as_blob_or_null()?;
        let data = if let Some(data) = data {
            if data.len() != sector_size as usize {
                anyhow::bail!(
                    "db contained sector with unexpected size (expected={}, found={}, sector={:#x})",
                    sector_size,
                    data.len(),
                    sector
                )
            }
            SectorKind::Data(data.into())
        } else {
            SectorKind::AllZero
        };
        res.push((sector, data));
    }

    Ok(res)
}

// FUTURE: write into sqlite directly from `RequestBuffers`.
fn write_sectors(
    mut conn: OwnedMutexGuard<Connection>,
    sector_size: u32,
    mut sector: u64,
    buf: Vec<u8>,
    overwrite: bool,
) -> Result<(), rusqlite::Error> {
    let tx = conn.transaction()?;
    {
        let mut stmt = if overwrite {
            tx.prepare_cached("INSERT OR REPLACE INTO sectors (sector, data) VALUES (?, ?)")?
        } else {
            tx.prepare_cached("INSERT OR IGNORE INTO sectors (sector, data) VALUES (?, ?)")?
        };

        let chunks = buf.chunks_exact(sector_size as usize);
        assert!(chunks.remainder().is_empty());
        for chunk in chunks {
            if chunk.iter().all(|x| *x == 0) {
                stmt.execute(rusqlite::params![sector, rusqlite::types::Null])?;
            } else {
                stmt.execute(rusqlite::params![sector, chunk])?;
            };

            sector += 1;
        }
    }
    tx.commit()?;

    Ok(())
}

fn unmap_sectors(
    mut conn: OwnedMutexGuard<Connection>,
    sector_offset: u64,
    sector_count: u64,
    next_is_zero: bool,
) -> Result<(), rusqlite::Error> {
    if next_is_zero {
        let mut clear_stmt =
            conn.prepare_cached("DELETE FROM sectors WHERE sector BETWEEN ? AND ?")?;
        clear_stmt.execute(rusqlite::params![
            sector_offset,
            sector_offset + sector_count - 1
        ])?;
    } else {
        let tx = conn.transaction()?;
        {
            let mut stmt =
                tx.prepare_cached("INSERT OR REPLACE INTO sectors (sector, data) VALUES (?, ?)")?;

            for sector in sector_offset..(sector_offset + sector_count) {
                stmt.execute(rusqlite::params![sector, rusqlite::types::Null])?;
            }
        }
        tx.commit()?;
    }

    Ok(())
}

mod schema {
    use inspect::Inspect;
    use serde::Deserialize;
    use serde::Serialize;

    // DENOTE: SQLite actually saves the _plaintext_ of CREATE TABLE
    // statements in its file format, which makes it a pretty good place to
    // stash inline comments about the schema being used
    //
    // DEVNOTE: the choice to use the len of the blob as a marker for all
    // zero / all one sectors has not been profiled relative to other
    // implementation (e.g: having a third "kind" column).
    pub const DEFINE_TABLE_SECTORS: &str = r#"
CREATE TABLE sectors (
    -- if data is NULL, that indicates an all-zero sector.
    -- otherwise, data has len == SECTOR_SIZE, containing the sector data.
    sector INTEGER NOT NULL,
    data   BLOB,
    PRIMARY KEY (sector)
)
"#; // TODO?: enforce sqlite >3.37.0 so we can use STRICT

    // DEVNOTE: Given that this is a singleton table, we might as well use JSON
    // + serde to store whatever metadata we want here, vs. trying to bend our
    // metadata structure to sqlite's native data types.
    //
    // Using JSON (vs, say, protobuf) has the added benefit of allowing existing
    // external sqlite tooling to more easily read and manipulate the metadata
    // using sqlite's built-in JSON handling functions.
    pub const DEFINE_TABLE_METADATA: &str = r#"
CREATE TABLE meta (
    metadata TEXT NOT NULL -- stored as JSON
)
"#;

    #[derive(Debug, PartialEq, PartialOrd, Eq, Ord, Serialize, Deserialize, Inspect)]
    pub struct DiskMeta {
        pub logically_read_only: bool,
        pub sector_count: u64,
        pub sector_size: u32,
    }
}