1use gdma_defs::CLIENT_OOB_8;
7use gdma_defs::CLIENT_OOB_24;
8use gdma_defs::CLIENT_OOB_32;
9use gdma_defs::CqEqDoorbellValue;
10use gdma_defs::Cqe;
11use gdma_defs::DB_CQ;
12use gdma_defs::DB_EQ;
13use gdma_defs::DB_RQ;
14use gdma_defs::DB_SQ;
15use gdma_defs::Eqe;
16use gdma_defs::GdmaQueueType;
17use gdma_defs::OWNER_BITS;
18use gdma_defs::OWNER_MASK;
19use gdma_defs::Sge;
20use gdma_defs::WQE_ALIGNMENT;
21use gdma_defs::WqDoorbellValue;
22use gdma_defs::WqeHeader;
23use gdma_defs::WqeParams;
24use inspect::Inspect;
25use std::marker::PhantomData;
26use std::sync::Arc;
27use std::sync::atomic::Ordering::Acquire;
28use user_driver::memory::MemoryBlock;
29use zerocopy::FromBytes;
30use zerocopy::Immutable;
31use zerocopy::IntoBytes;
32use zerocopy::KnownLayout;
33
34pub trait Doorbell: Send + Sync {
36 fn page_count(&self) -> u32;
38 fn write(&self, page: u32, address: u32, value: u64);
40}
41
42struct NullDoorbell;
43
44impl Doorbell for NullDoorbell {
45 fn page_count(&self) -> u32 {
46 0
47 }
48
49 fn write(&self, _page: u32, _address: u32, _value: u64) {}
50}
51
52#[derive(Clone)]
54pub struct DoorbellPage {
55 doorbell: Arc<dyn Doorbell>,
56 doorbell_id: u32,
57}
58
59impl DoorbellPage {
60 pub(crate) fn null() -> Self {
61 Self {
62 doorbell: Arc::new(NullDoorbell),
63 doorbell_id: 0,
64 }
65 }
66
67 pub fn new(doorbell: Arc<dyn Doorbell>, doorbell_id: u32) -> anyhow::Result<Self> {
69 let page_count = doorbell.page_count();
70 if doorbell_id >= page_count {
71 anyhow::bail!(
72 "doorbell id {} exceeds page count {}",
73 doorbell_id,
74 page_count
75 );
76 }
77 Ok(Self {
78 doorbell,
79 doorbell_id,
80 })
81 }
82
83 pub fn write(&self, address: u32, value: u64) {
85 assert!(address < 4096);
86 self.doorbell.write(self.doorbell_id, address, value);
87 }
88}
89
90pub struct CqEq<T> {
92 doorbell: DoorbellPage,
93 doorbell_addr: u32,
94 queue_type: GdmaQueueType,
95 mem: MemoryBlock,
96 id: u32,
97 next: u32,
98 size: u32,
99 shift: u32,
100 _phantom: PhantomData<fn() -> T>,
101}
102
103impl<T> Inspect for CqEq<T> {
104 fn inspect(&self, req: inspect::Request<'_>) {
105 req.respond()
106 .field("id", self.id)
107 .hex("size", self.size)
108 .hex("next", self.next);
109 }
110}
111
112impl CqEq<Cqe> {
113 pub fn new_cq(mem: MemoryBlock, doorbell: DoorbellPage, id: u32) -> Self {
115 Self::new(GdmaQueueType::GDMA_CQ, DB_CQ, mem, doorbell, id)
116 }
117}
118
119impl CqEq<Eqe> {
120 pub fn new_eq(mem: MemoryBlock, doorbell: DoorbellPage, id: u32) -> Self {
122 Self::new(GdmaQueueType::GDMA_EQ, DB_EQ, mem, doorbell, id)
123 }
124}
125
126impl<T: IntoBytes + FromBytes + Immutable + KnownLayout> CqEq<T> {
127 fn new(
129 queue_type: GdmaQueueType,
130 doorbell_addr: u32,
131 mem: MemoryBlock,
132 doorbell: DoorbellPage,
133 id: u32,
134 ) -> Self {
135 let size = mem.len();
136 assert!(size.is_power_of_two());
137 Self {
138 doorbell,
139 doorbell_addr,
140 queue_type,
141 mem,
142 id,
143 next: size as u32,
144 size: size as u32,
145 shift: size.trailing_zeros(),
146 _phantom: PhantomData,
147 }
148 }
149
150 pub(crate) fn set_id(&mut self, id: u32) {
152 self.id = id;
153 }
154
155 pub(crate) fn set_doorbell(&mut self, page: DoorbellPage) {
157 self.doorbell = page;
158 }
159
160 pub fn id(&self) -> u32 {
162 self.id
163 }
164
165 fn read_next<U: FromBytes + Immutable + KnownLayout>(&self, offset: u32) -> U {
166 assert!((offset as usize & (size_of::<T>() - 1)) + size_of::<U>() <= size_of::<T>());
167 self.mem
168 .read_obj((self.next.wrapping_add(offset) & (self.size - 1)) as usize)
169 }
170
171 pub fn pop(&mut self) -> Option<T> {
173 let b = self.mem.as_slice()
176 [(self.next.wrapping_add(size_of::<T>() as u32 - 1) & (self.size - 1)) as usize]
177 .load(Acquire);
178 let owner_count = b >> 5;
179 let cur_owner_count = (self.next >> self.shift) as u8;
180 if owner_count == (cur_owner_count.wrapping_sub(1)) & OWNER_MASK as u8 {
181 None
182 } else if owner_count == cur_owner_count & OWNER_MASK as u8 {
183 let qe = self.read_next::<T>(0);
184 self.next = self.next.wrapping_add(size_of_val(&qe) as u32);
185 Some(qe)
186 } else {
187 tracing::error!(next = self.next, owner_count, queue_type = ?self.queue_type, id = self.id, "eq/cq wrapped");
188 None
189 }
190 }
191
192 fn flush(&mut self, arm: bool) {
193 let tail = self.next & ((self.size << OWNER_BITS) - 1);
194 let value = CqEqDoorbellValue::new()
195 .with_arm(arm)
196 .with_id(self.id)
197 .with_tail(tail / size_of::<T>() as u32);
198 tracing::trace!(queue_type = ?self.queue_type, id = self.id, ?value, "cq/eq doorbell write");
199 self.doorbell.write(self.doorbell_addr, value.into());
200 }
201
202 pub fn arm(&mut self) {
205 self.flush(true);
206 }
207
208 pub fn ack(&mut self) {
210 self.flush(false);
211 }
212
213 pub fn get_next(&mut self) -> u32 {
215 self.next
216 }
217}
218
219pub type Cq = CqEq<Cqe>;
221
222pub type Eq = CqEq<Eqe>;
224
225pub struct Wq {
227 doorbell: DoorbellPage,
228 queue_type: GdmaQueueType,
229 doorbell_addr: u32,
230 mem: MemoryBlock,
231 id: u32,
232 head: u32,
233 tail: u32,
234 mask: u32,
235 uncommitted_count: u32,
236}
237
238impl Inspect for Wq {
239 fn inspect(&self, req: inspect::Request<'_>) {
240 req.respond()
241 .field("id", self.id)
242 .hex("size", self.mask + 1)
243 .hex("head", self.head)
244 .hex("tail", self.tail)
245 .field("uncommited", self.uncommitted_count);
246 }
247}
248
249#[derive(Debug)]
251pub struct QueueFull;
252
253impl Wq {
254 pub fn new_sq(mem: MemoryBlock, doorbell: DoorbellPage, id: u32) -> Self {
256 Self::new(GdmaQueueType::GDMA_SQ, DB_SQ, mem, doorbell, id)
257 }
258
259 pub fn new_rq(mem: MemoryBlock, doorbell: DoorbellPage, id: u32) -> Self {
261 Self::new(GdmaQueueType::GDMA_RQ, DB_RQ, mem, doorbell, id)
262 }
263
264 fn new(
266 queue_type: GdmaQueueType,
267 doorbell_addr: u32,
268 mem: MemoryBlock,
269 doorbell: DoorbellPage,
270 id: u32,
271 ) -> Self {
272 let size = mem.len() as u32;
273 assert!(size.is_power_of_two());
274 Self {
275 doorbell,
276 queue_type,
277 doorbell_addr,
278 mem,
279 id,
280 head: size,
281 tail: 0,
282 mask: size - 1,
283 uncommitted_count: 0,
284 }
285 }
286
287 pub fn id(&self) -> u32 {
289 self.id
290 }
291
292 pub fn advance_head(&mut self, n: u32) {
294 assert!(n % WQE_ALIGNMENT as u32 == 0);
295 self.head = self.head.wrapping_add(n);
296 }
297
298 fn write_tail(&self, offset: u32, data: &[u8]) {
299 assert!(
300 offset as usize % WQE_ALIGNMENT + data.len() <= WQE_ALIGNMENT,
301 "can't write more than one queue segment at a time to avoid wrapping"
302 );
303 self.mem
304 .write_at((self.tail.wrapping_add(offset) & self.mask) as usize, data);
305 }
306
307 pub fn available(&self) -> u32 {
309 self.head.wrapping_sub(self.tail)
310 }
311
312 pub const fn entry_size(oob_len: usize, sge_count: usize) -> u32 {
315 let len = size_of::<WqeHeader>() + oob_len + size_of::<Sge>() * sge_count;
316 let len = (len + WQE_ALIGNMENT - 1) & !(WQE_ALIGNMENT - 1);
317 len as u32
318 }
319
320 pub fn push<I: IntoIterator<Item = Sge>>(
323 &mut self,
324 oob: &(impl IntoBytes + Immutable + KnownLayout),
325 sgl: I,
326 client_oob_in_sgl: Option<u8>,
327 gd_client_unit_data: u16,
328 ) -> Result<u32, QueueFull>
329 where
330 I::IntoIter: ExactSizeIterator,
331 {
332 let sgl = sgl.into_iter();
333 let oob_size = match size_of_val(oob) {
334 0 | 8 => CLIENT_OOB_8,
335 24 => CLIENT_OOB_24,
336 32 => CLIENT_OOB_32,
337 _ => panic!("invalid oob size"),
338 };
339 let len = Self::entry_size(size_of_val(oob), sgl.len());
340 if self.available() < len {
341 return Err(QueueFull);
342 }
343
344 let hdr = WqeHeader {
345 reserved: [0; 3],
346 last_vbytes: client_oob_in_sgl.unwrap_or(0),
347 params: WqeParams::new()
348 .with_num_sgl_entries(sgl.len() as u8)
349 .with_inline_client_oob_size(oob_size)
350 .with_client_oob_in_sgl(client_oob_in_sgl.is_some())
351 .with_gd_client_unit_data(gd_client_unit_data),
352 };
353
354 self.write_tail(0, hdr.as_bytes());
355
356 let offset = match size_of_val(oob) {
357 0 => 16,
358 8 => {
359 self.write_tail(8, oob.as_bytes());
360 16
361 }
362 24 => {
363 self.write_tail(8, oob.as_bytes());
364 32
365 }
366 32 => {
367 self.write_tail(8, &oob.as_bytes()[..24]);
368 self.mem.write_at(32, &oob.as_bytes()[24..]);
369 48
370 }
371 _ => unreachable!(),
372 };
373
374 for (i, sge) in sgl.enumerate() {
375 self.write_tail(offset + i as u32 * 16, sge.as_bytes());
376 }
377
378 self.tail = self.tail.wrapping_add(len);
379 self.uncommitted_count += 1;
380 Ok(len)
381 }
382
383 pub fn commit(&mut self) {
386 let mut value = WqDoorbellValue::new().with_id(self.id).with_tail(self.tail);
388 if self.queue_type == GdmaQueueType::GDMA_RQ {
389 value.set_num_rwqe(self.uncommitted_count as u8);
392 }
393 tracing::trace!(queue_type = ?self.queue_type, id = self.id, ?value, "wq doorbell write");
394 self.doorbell.write(self.doorbell_addr, value.into());
395 self.uncommitted_count = 0;
396 }
397
398 pub fn get_tail(&mut self) -> u32 {
400 self.tail
401 }
402}