1use super::spec;
7use crate::driver::save_restore::CompletionQueueSavedState;
8use crate::driver::save_restore::SubmissionQueueSavedState;
9use crate::registers::DeviceRegisters;
10use inspect::Inspect;
11use safeatomic::AtomicSliceOps;
12use std::sync::atomic::AtomicU64;
13use std::sync::atomic::Ordering::Acquire;
14use std::sync::atomic::Ordering::Relaxed;
15use user_driver::DeviceBacking;
16use user_driver::memory::MemoryBlock;
17
18#[derive(Inspect)]
19pub(crate) struct SubmissionQueue {
20 sqid: u16,
21 head: u32,
22 tail: u32,
23 committed_tail: u32,
24 len: u32,
25 #[inspect(skip)]
26 mem: MemoryBlock,
27}
28
29#[derive(Debug)]
30pub(crate) struct QueueFull;
31
32impl SubmissionQueue {
33 pub fn new(sqid: u16, len: u16, mem: MemoryBlock) -> Self {
34 tracing::debug!(sqid, len, pfns = ?mem.pfns(), "new submission queue");
35
36 Self {
37 sqid,
38 head: 0,
39 tail: 0,
40 committed_tail: 0,
41 len: len.into(),
42 mem,
43 }
44 }
45
46 pub fn id(&self) -> u16 {
47 self.sqid
48 }
49
50 pub fn update_head(&mut self, head: u16) {
51 let head = head as u32;
52 assert!(head < self.len);
53 self.head = head;
54 }
55
56 pub fn is_full(&self) -> bool {
57 advance(self.tail, self.len) == self.head
58 }
59
60 pub fn write(&mut self, command: spec::Command) -> Result<(), QueueFull> {
61 let next_tail = advance(self.tail, self.len);
62 if next_tail == self.head {
63 return Err(QueueFull);
64 }
65 self.mem
66 .write_obj(self.tail as usize * size_of_val(&command), &command);
67 self.tail = next_tail;
68 Ok(())
69 }
70
71 pub fn commit<T: DeviceBacking>(&mut self, region: &DeviceRegisters<T>) {
72 if self.tail != self.committed_tail {
73 safe_intrinsics::store_fence();
74 region.doorbell(self.sqid, false, self.tail);
75 self.committed_tail = self.tail;
76 }
77 }
78
79 pub fn save(&self) -> SubmissionQueueSavedState {
81 SubmissionQueueSavedState {
82 sqid: self.sqid,
83 head: self.head,
84 tail: self.tail,
85 committed_tail: self.committed_tail,
86 len: self.len,
87 }
88 }
89
90 pub fn restore(
92 mem: MemoryBlock,
93 saved_state: &SubmissionQueueSavedState,
94 ) -> anyhow::Result<Self> {
95 let SubmissionQueueSavedState {
96 sqid,
97 head,
98 tail,
99 committed_tail,
100 len,
101 } = saved_state;
102 Ok(Self {
103 sqid: *sqid,
104 head: *head,
105 tail: *tail,
106 committed_tail: *committed_tail,
107 len: *len,
108 mem,
109 })
110 }
111}
112
113#[derive(Inspect)]
114pub(crate) struct CompletionQueue {
115 cqid: u16,
116 head: u32,
117 committed_head: u32,
118 len: u32,
120 phase: bool,
121 #[inspect(skip)]
122 mem: MemoryBlock,
123}
124
125impl CompletionQueue {
126 pub fn new(cqid: u16, len: u16, mem: MemoryBlock) -> CompletionQueue {
127 tracing::debug!(cqid, len, pfns = ?mem.pfns(), "new completion queue");
128 Self {
129 cqid,
130 head: 0,
131 committed_head: 0,
132 len: len.into(),
133 phase: true,
134 mem,
135 }
136 }
137
138 pub fn _id(&self) -> u16 {
139 self.cqid
140 }
141
142 pub fn peek(&self) -> CqPeekResult {
146 let completion_mem = self.mem.as_slice()
147 [self.head as usize * size_of::<spec::Completion>()..][..size_of::<spec::Completion>()]
148 .as_atomic_slice::<AtomicU64>()
149 .unwrap();
150
151 let high = completion_mem[1].load(Acquire);
152 let status = spec::CompletionStatus::from((high >> 48) as u16);
153 let low = completion_mem[0].load(Relaxed);
154 let completion: spec::Completion = zerocopy::transmute!([low, high]);
155 CqPeekResult {
156 completion,
157 phase_match: status.phase() == self.phase,
158 head: self.head,
159 expected_phase: self.phase,
160 }
161 }
162
163 pub fn read(&mut self) -> Option<spec::Completion> {
164 let completion_mem = self.mem.as_slice()
165 [self.head as usize * size_of::<spec::Completion>()..][..size_of::<spec::Completion>()]
166 .as_atomic_slice::<AtomicU64>()
167 .unwrap();
168
169 let high = completion_mem[1].load(Acquire);
172 let status = spec::CompletionStatus::from((high >> 48) as u16);
173 if status.phase() != self.phase {
174 return None;
175 }
176 let low = completion_mem[0].load(Relaxed);
177 let completion: spec::Completion = zerocopy::transmute!([low, high]);
178 self.head += 1;
179 if self.head == self.len {
180 self.head = 0;
181 self.phase = !self.phase;
182 }
183 Some(completion)
184 }
185
186 pub fn commit<T: DeviceBacking>(&mut self, registers: &DeviceRegisters<T>) {
187 if self.head != self.committed_head {
188 safe_intrinsics::store_fence();
189 registers.doorbell(self.cqid, true, self.head);
190 self.committed_head = self.head;
191 }
192 }
193
194 pub fn save(&self) -> CompletionQueueSavedState {
196 CompletionQueueSavedState {
197 cqid: self.cqid,
198 head: self.head,
199 committed_head: self.committed_head,
200 len: self.len,
201 phase: self.phase,
202 }
203 }
204
205 pub fn restore(
207 mem: MemoryBlock,
208 saved_state: &CompletionQueueSavedState,
209 ) -> anyhow::Result<Self> {
210 let CompletionQueueSavedState {
211 cqid,
212 head,
213 committed_head,
214 len,
215 phase,
216 } = saved_state;
217
218 Ok(Self {
219 cqid: *cqid,
220 head: *head,
221 committed_head: *committed_head,
222 len: *len,
223 phase: *phase,
224 mem,
225 })
226 }
227}
228
229pub(crate) struct CqPeekResult {
231 pub completion: spec::Completion,
233 pub phase_match: bool,
236 pub head: u32,
238 pub expected_phase: bool,
240}
241
242fn advance(n: u32, l: u32) -> u32 {
243 if n + 1 < l { n + 1 } else { 0 }
244}