nvme_driver/
queues.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Implementation of submission and completion queues.
5
6use super::spec;
7use crate::driver::save_restore::CompletionQueueSavedState;
8use crate::driver::save_restore::SubmissionQueueSavedState;
9use crate::registers::DeviceRegisters;
10use inspect::Inspect;
11use safeatomic::AtomicSliceOps;
12use std::sync::atomic::AtomicU64;
13use std::sync::atomic::Ordering::Acquire;
14use std::sync::atomic::Ordering::Relaxed;
15use user_driver::DeviceBacking;
16use user_driver::memory::MemoryBlock;
17
18#[derive(Inspect)]
19pub(crate) struct SubmissionQueue {
20    sqid: u16,
21    head: u32,
22    tail: u32,
23    committed_tail: u32,
24    len: u32,
25    #[inspect(skip)]
26    mem: MemoryBlock,
27}
28
29#[derive(Debug)]
30pub(crate) struct QueueFull;
31
32impl SubmissionQueue {
33    pub fn new(sqid: u16, len: u16, mem: MemoryBlock) -> Self {
34        Self {
35            sqid,
36            head: 0,
37            tail: 0,
38            committed_tail: 0,
39            len: len.into(),
40            mem,
41        }
42    }
43
44    pub fn id(&self) -> u16 {
45        self.sqid
46    }
47
48    pub fn update_head(&mut self, head: u16) {
49        let head = head as u32;
50        assert!(head < self.len);
51        self.head = head;
52    }
53
54    pub fn is_full(&self) -> bool {
55        advance(self.tail, self.len) == self.head
56    }
57
58    pub fn write(&mut self, command: spec::Command) -> Result<(), QueueFull> {
59        let next_tail = advance(self.tail, self.len);
60        if next_tail == self.head {
61            return Err(QueueFull);
62        }
63        self.mem
64            .write_obj(self.tail as usize * size_of_val(&command), &command);
65        self.tail = next_tail;
66        Ok(())
67    }
68
69    pub fn commit<T: DeviceBacking>(&mut self, region: &DeviceRegisters<T>) {
70        if self.tail != self.committed_tail {
71            safe_intrinsics::store_fence();
72            region.doorbell(self.sqid, false, self.tail);
73            self.committed_tail = self.tail;
74        }
75    }
76
77    /// Saves queue data for servicing.
78    pub fn save(&self) -> SubmissionQueueSavedState {
79        SubmissionQueueSavedState {
80            sqid: self.sqid,
81            head: self.head,
82            tail: self.tail,
83            committed_tail: self.committed_tail,
84            len: self.len,
85        }
86    }
87
88    /// Restores queue data after servicing.
89    pub fn restore(
90        mem: MemoryBlock,
91        saved_state: &SubmissionQueueSavedState,
92    ) -> anyhow::Result<Self> {
93        let SubmissionQueueSavedState {
94            sqid,
95            head,
96            tail,
97            committed_tail,
98            len,
99        } = saved_state;
100        Ok(Self {
101            sqid: *sqid,
102            head: *head,
103            tail: *tail,
104            committed_tail: *committed_tail,
105            len: *len,
106            mem,
107        })
108    }
109}
110
111#[derive(Inspect)]
112pub(crate) struct CompletionQueue {
113    cqid: u16,
114    head: u32,
115    committed_head: u32,
116    /// Queue size in entries.
117    len: u32,
118    phase: bool,
119    #[inspect(skip)]
120    mem: MemoryBlock,
121}
122
123impl CompletionQueue {
124    pub fn new(cqid: u16, len: u16, mem: MemoryBlock) -> CompletionQueue {
125        Self {
126            cqid,
127            head: 0,
128            committed_head: 0,
129            len: len.into(),
130            phase: true,
131            mem,
132        }
133    }
134
135    pub fn _id(&self) -> u16 {
136        self.cqid
137    }
138
139    pub fn read(&mut self) -> Option<spec::Completion> {
140        let completion_mem = self.mem.as_slice()
141            [self.head as usize * size_of::<spec::Completion>()..]
142            [..size_of::<spec::Completion>() * 2]
143            .as_atomic_slice::<AtomicU64>()
144            .unwrap();
145
146        // Check the phase bit, using an acquire read to ensure the rest of the
147        // completion is read with or after the phase bit.
148        let high = completion_mem[1].load(Acquire);
149        let status = spec::CompletionStatus::from((high >> 48) as u16);
150        if status.phase() != self.phase {
151            return None;
152        }
153        let low = completion_mem[0].load(Relaxed);
154        let completion: spec::Completion = zerocopy::transmute!([low, high]);
155        self.head += 1;
156        if self.head == self.len {
157            self.head = 0;
158            self.phase = !self.phase;
159        }
160        Some(completion)
161    }
162
163    pub fn commit<T: DeviceBacking>(&mut self, registers: &DeviceRegisters<T>) {
164        if self.head != self.committed_head {
165            safe_intrinsics::store_fence();
166            registers.doorbell(self.cqid, true, self.head);
167            self.committed_head = self.head;
168        }
169    }
170
171    /// Saves queue data for servicing.
172    pub fn save(&self) -> CompletionQueueSavedState {
173        CompletionQueueSavedState {
174            cqid: self.cqid,
175            head: self.head,
176            committed_head: self.committed_head,
177            len: self.len,
178            phase: self.phase,
179        }
180    }
181
182    /// Restores queue data after servicing.
183    pub fn restore(
184        mem: MemoryBlock,
185        saved_state: &CompletionQueueSavedState,
186    ) -> anyhow::Result<Self> {
187        let CompletionQueueSavedState {
188            cqid,
189            head,
190            committed_head,
191            len,
192            phase,
193        } = saved_state;
194
195        Ok(Self {
196            cqid: *cqid,
197            head: *head,
198            committed_head: *committed_head,
199            len: *len,
200            phase: *phase,
201            mem,
202        })
203    }
204}
205
206fn advance(n: u32, l: u32) -> u32 {
207    if n + 1 < l { n + 1 } else { 0 }
208}