1pub use crate::bnic_driver::RxConfig;
7pub use crate::resources::ResourceArena;
8
9use crate::bnic_driver::BnicDriver;
10use crate::bnic_driver::WqConfig;
11use crate::gdma_driver::GdmaDriver;
12use crate::queues;
13use crate::queues::Doorbell;
14use crate::queues::DoorbellPage;
15use anyhow::Context;
16use futures::StreamExt;
17use futures::lock::Mutex;
18use gdma_defs::GdmaDevId;
19use gdma_defs::GdmaDevType;
20use gdma_defs::GdmaQueueType;
21use gdma_defs::GdmaRegisterDeviceResp;
22use gdma_defs::bnic::ManaQueryDeviceCfgResp;
23use gdma_defs::bnic::ManaQueryFilterStateResponse;
24use gdma_defs::bnic::ManaQueryStatisticsResponse;
25use gdma_defs::bnic::ManaQueryVportCfgResp;
26use gdma_defs::bnic::STATISTICS_FLAGS_ALL;
27use inspect::Inspect;
28use net_backend_resources::mac_address::MacAddress;
29use pal_async::driver::SpawnDriver;
30use pal_async::task::Spawn;
31use pal_async::task::Task;
32use std::sync::Arc;
33use tracing::Instrument;
34use user_driver::DeviceBacking;
35use user_driver::DmaClient;
36use user_driver::interrupt::DeviceInterrupt;
37use user_driver::memory::MemoryBlock;
38use user_driver::memory::PAGE_SIZE;
39use vmcore::vm_task::VmTaskDriverSource;
40
41enum LinkStatus {
42 Default,
43 Pending(bool),
44 Active {
45 sender: mesh::Sender<bool>,
46 connected: bool,
47 },
48}
49
50pub struct ManaDevice<T: DeviceBacking> {
52 inner: Arc<Inner<T>>,
53 inspect_task: Task<()>,
54 hwc_task: Option<Task<()>>,
55 inspect_send: mesh::Sender<inspect::Deferred>,
56}
57
58impl<T: DeviceBacking> Inspect for ManaDevice<T> {
59 fn inspect(&self, req: inspect::Request<'_>) {
60 self.inspect_send.send(req.defer());
61 }
62}
63
64struct Inner<T: DeviceBacking> {
65 gdma: Mutex<GdmaDriver<T>>,
66 dev_id: GdmaDevId,
67 dev_data: GdmaRegisterDeviceResp,
68 dev_config: ManaQueryDeviceCfgResp,
69 doorbell: Arc<dyn Doorbell>,
70 vport_link_status: Arc<Mutex<Vec<LinkStatus>>>,
71}
72
73impl<T: DeviceBacking> ManaDevice<T> {
74 pub async fn new(
76 driver: &impl SpawnDriver,
77 device: T,
78 num_vps: u32,
79 max_queues_per_vport: u16,
80 ) -> anyhow::Result<Self> {
81 let mut gdma = GdmaDriver::new(driver, device, num_vps)
82 .instrument(tracing::info_span!("new_gdma_driver"))
83 .await?;
84 gdma.test_eq().await?;
85
86 gdma.verify_vf_driver_version().await?;
87
88 let dev_id = gdma
89 .list_devices()
90 .await?
91 .iter()
92 .copied()
93 .find(|dev_id| dev_id.ty == GdmaDevType::GDMA_DEVICE_MANA)
94 .context("no mana device found")?;
95
96 let dev_data = gdma.register_device(dev_id).await?;
97
98 let mut bnic = BnicDriver::new(&mut gdma, dev_id);
99 let dev_config = bnic.query_dev_config().await?;
100 tracing::info!(mana_dev_config = ?dev_config);
101 let num_queues_needed = dev_config.max_num_vports as u32 * max_queues_per_vport as u32;
102 gdma.check_vf_resources(num_vps, num_queues_needed);
103
104 let doorbell = gdma.doorbell();
105 let vport_link_status = (0..dev_config.max_num_vports)
106 .map(|_| LinkStatus::Default)
107 .collect();
108 let inner = Arc::new(Inner {
109 gdma: Mutex::new(gdma),
110 dev_id,
111 dev_data,
112 dev_config,
113 doorbell,
114 vport_link_status: Arc::new(Mutex::new(vport_link_status)),
115 });
116
117 let (inspect_send, mut inspect_recv) = mesh::channel::<inspect::Deferred>();
118 let inspect_task = driver.spawn("mana-inspect", {
119 let inner = inner.clone();
120 async move {
121 while let Some(deferred) = inspect_recv.next().await {
122 let Inner {
123 gdma,
124 dev_id: _,
125 dev_data: _,
126 dev_config: _,
127 doorbell: _,
128 vport_link_status: _,
129 } = inner.as_ref();
130 let gdma = gdma.lock().await;
131 deferred.respond(|resp| {
132 resp.merge(&*gdma);
133 })
134 }
135 }
136 });
137
138 let device = Self {
139 inner,
140 inspect_send,
141 inspect_task,
142 hwc_task: None,
143 };
144 Ok(device)
145 }
146
147 pub fn num_vports(&self) -> u32 {
149 self.inner.dev_config.max_num_vports.into()
150 }
151
152 pub fn dev_config(&self) -> &ManaQueryDeviceCfgResp {
154 &self.inner.dev_config
155 }
156
157 pub async fn start_notification_task(&mut self, driver_source: &VmTaskDriverSource) {
160 if self.hwc_task.is_some() {
161 return;
162 }
163
164 let inner = self.inner.clone();
165 let hwc_task = driver_source.simple().spawn("mana-hwc", {
166 let mut gdma = self.inner.gdma.lock().await;
167 let mut hwc_event = gdma.hwc_subscribe();
168 async move {
169 loop {
170 hwc_event.wait().await;
171 let mut gdma = inner.gdma.lock().await;
172 if gdma.process_all_eqs() {
173 let mut vport_link_status = inner.vport_link_status.lock().await;
174 for (vport_index, current) in gdma.get_link_toggle_list() {
175 let vport_index = vport_index as usize;
176 if vport_index >= vport_link_status.len() {
177 tracing::error!(vport_index, "Invalid vport index");
178 continue;
179 }
180 if let LinkStatus::Active { sender, connected } =
181 &mut vport_link_status[vport_index]
182 {
183 *connected = current;
184 sender.send(*connected);
185 } else {
186 let _ = std::mem::replace(
187 &mut vport_link_status[vport_index],
188 LinkStatus::Pending(current),
189 );
190 }
191 }
192 }
193 }
194 }
195 });
196 self.hwc_task = Some(hwc_task);
197 }
198
199 pub async fn new_vport(
201 &self,
202 index: u32,
203 vport_state: Option<VportState>,
204 dev_config: &ManaQueryDeviceCfgResp,
205 ) -> anyhow::Result<Vport<T>> {
206 let vport_config = self.query_vport_config(index).await?;
207
208 let vport_state = vport_state.unwrap_or(VportState::new(None, None));
209
210 let vport = Vport {
211 inner: self.inner.clone(),
212 config: vport_config,
213 vport_state,
214 id: index,
215 };
216
217 if dev_config.cap_filter_state_query() {
218 if let Ok(resp) = vport.query_filter_state(vport.id.into()).await {
219 tracing::debug!(
220 mac_address = %vport.mac_address(),
221 direction_to_vtl0 = resp.direction_to_vtl0,
222 "query_filter_state"
223 );
224 vport
225 .vport_state
226 .set_direction_to_vtl0(resp.direction_to_vtl0 == 1);
227 }
228 }
229
230 Ok(vport)
231 }
232
233 pub async fn shutdown(self) -> (anyhow::Result<()>, T) {
235 self.inspect_task.cancel().await;
236 if let Some(hwc_task) = self.hwc_task {
237 hwc_task.cancel().await;
238 }
239 let inner = Arc::into_inner(self.inner).unwrap();
240 let mut driver = inner.gdma.into_inner();
241 let result = driver.deregister_device(inner.dev_id).await;
242 (result, driver.into_device())
243 }
244 pub async fn query_vport_config(&self, vport: u32) -> anyhow::Result<ManaQueryVportCfgResp> {
246 let mut gdma = self.inner.gdma.lock().await;
247 BnicDriver::new(&mut *gdma, self.inner.dev_id)
248 .query_vport_config(vport)
249 .await
250 }
251}
252
253#[derive(Clone)]
255pub struct VportState {
256 direction_to_vtl0: Arc<parking_lot::Mutex<Option<bool>>>,
257 state_change_callback: Arc<Option<Box<dyn Fn(bool) + Send + Sync>>>,
258}
259
260impl VportState {
261 pub fn new(
263 direction_to_vtl0: Option<bool>,
264 state_change_callback: Option<Box<dyn Fn(bool) + Send + Sync>>,
265 ) -> Self {
266 Self {
267 direction_to_vtl0: Arc::new(parking_lot::Mutex::new(direction_to_vtl0)),
268 state_change_callback: Arc::new(state_change_callback),
269 }
270 }
271
272 pub fn set_direction_to_vtl0(&self, direction_to_vtl0: bool) {
274 *self.direction_to_vtl0.lock() = Some(direction_to_vtl0);
275 if let Some(callback) = self.state_change_callback.as_ref() {
276 (callback)(direction_to_vtl0);
277 }
278 }
279
280 pub fn get_direction_to_vtl0(&self) -> Option<bool> {
282 let direction_to_vtl0 = *self.direction_to_vtl0.lock();
283 direction_to_vtl0
284 }
285}
286
287pub struct Vport<T: DeviceBacking> {
289 inner: Arc<Inner<T>>,
290 config: ManaQueryVportCfgResp,
291 vport_state: VportState,
292 id: u32,
293}
294
295impl<T: DeviceBacking> Vport<T> {
296 pub fn max_tx_queues(&self) -> u32 {
298 self.config.max_num_sq
299 }
300
301 pub fn max_rx_queues(&self) -> u32 {
303 self.config.max_num_rq
304 }
305
306 pub fn mac_address(&self) -> MacAddress {
308 self.config.mac_addr.into()
309 }
310
311 pub fn gpa_mkey(&self) -> u32 {
313 self.inner.dev_data.gpa_mkey
314 }
315
316 pub fn id(&self) -> u32 {
318 self.id
319 }
320
321 pub fn num_indirection_ent(&self) -> u32 {
323 self.config.num_indirection_ent
324 }
325
326 pub async fn new_eq(
328 &self,
329 arena: &mut ResourceArena,
330 size: u32,
331 cpu: u32,
332 ) -> anyhow::Result<BnicEq> {
333 let mut gdma = self.inner.gdma.lock().await;
334 let dma_client = gdma.device().dma_client();
335 let mem = dma_client
336 .allocate_dma_buffer(size as usize)
337 .context("Failed to allocate DMA buffer")?;
338
339 let gdma_region = gdma
340 .create_dma_region(arena, self.inner.dev_id, mem.clone())
341 .await
342 .context("failed to create eq dma region")?;
343 let (id, interrupt) = gdma
344 .create_eq(
345 arena,
346 self.inner.dev_id,
347 gdma_region,
348 size,
349 self.inner.dev_data.pdid,
350 self.inner.dev_data.db_id,
351 cpu,
352 )
353 .await
354 .context("failed to create eq")?;
355 Ok(BnicEq {
356 doorbell: DoorbellPage::new(self.inner.doorbell.clone(), self.inner.dev_data.db_id)?,
357 mem,
358 id,
359 interrupt,
360 })
361 }
362
363 pub async fn new_wq(
365 &self,
366 arena: &mut ResourceArena,
367 is_send: bool,
368 wq_size: u32,
369 cq_size: u32,
370 eq_id: u32,
371 ) -> anyhow::Result<BnicWq> {
372 assert!(wq_size >= PAGE_SIZE as u32 && wq_size.is_power_of_two());
373 assert!(cq_size >= PAGE_SIZE as u32 && cq_size.is_power_of_two());
374 let mut gdma = self.inner.gdma.lock().await;
375
376 let dma_client = gdma.device().dma_client();
377
378 let mem = dma_client
379 .allocate_dma_buffer((wq_size + cq_size) as usize)
380 .context("failed to allocate DMA buffer")?;
381
382 let wq_mem = mem.subblock(0, wq_size as usize);
383 let cq_mem = mem.subblock(wq_size as usize, cq_size as usize);
384
385 let wq_gdma_region = gdma
386 .create_dma_region(arena, self.inner.dev_id, wq_mem.clone())
387 .await?;
388 let cq_gdma_region = gdma
389 .create_dma_region(arena, self.inner.dev_id, cq_mem.clone())
390 .await?;
391 let wq_type = if is_send {
392 GdmaQueueType::GDMA_SQ
393 } else {
394 GdmaQueueType::GDMA_RQ
395 };
396 let doorbell = DoorbellPage::new(self.inner.doorbell.clone(), self.inner.dev_data.db_id)?;
397 let resp = BnicDriver::new(&mut *gdma, self.inner.dev_id)
398 .create_wq_obj(
399 arena,
400 self.config.vport,
401 wq_type,
402 &WqConfig {
403 wq_gdma_region,
404 cq_gdma_region,
405 wq_size,
406 cq_size,
407 cq_moderation_ctx_id: 0,
408 eq_id,
409 },
410 )
411 .await?;
412
413 Ok(BnicWq {
414 doorbell,
415 wq_mem,
416 cq_mem,
417 wq_id: resp.wq_id,
418 cq_id: resp.cq_id,
419 is_send,
420 wq_obj: resp.wq_obj,
421 })
422 }
423
424 pub async fn config_tx(&self) -> anyhow::Result<TxConfig> {
426 let mut gdma = self.inner.gdma.lock().await;
427 let resp = BnicDriver::new(&mut *gdma, self.inner.dev_id)
428 .config_vport_tx(
429 self.config.vport,
430 self.inner.dev_data.pdid,
431 self.inner.dev_data.db_id,
432 )
433 .await?;
434
435 let config = TxConfig {
436 tx_vport_offset: resp.tx_vport_offset,
437 };
438 Ok(config)
439 }
440
441 pub async fn config_rx(&self, config: &RxConfig<'_>) -> anyhow::Result<()> {
443 let mut gdma = self.inner.gdma.lock().await;
444 BnicDriver::new(&mut *gdma, self.inner.dev_id)
445 .config_vport_rx(self.config.vport, config)
446 .await?;
447
448 Ok(())
449 }
450
451 pub async fn move_filter(&self, direction_to_vtl0: u8) -> anyhow::Result<()> {
453 if let Some(to_vtl0) = self.vport_state.get_direction_to_vtl0() {
454 if to_vtl0 == (direction_to_vtl0 == 1) {
455 return Ok(());
456 }
457 }
458 let mut gdma = self.inner.gdma.lock().await;
459 let hwc_activity_id = BnicDriver::new(&mut *gdma, self.inner.dev_id)
460 .move_vport_filter(self.config.vport, direction_to_vtl0)
461 .await?;
462 self.vport_state
463 .set_direction_to_vtl0(direction_to_vtl0 == 1);
464 tracing::info!(
465 mac_address = %self.mac_address(),
466 direction_to_vtl0,
467 hwc_activity_id,
468 "switch data path for mac",
469 );
470 Ok(())
471 }
472
473 pub async fn get_direction_to_vtl0(&self) -> Option<bool> {
475 self.vport_state.get_direction_to_vtl0()
476 }
477
478 pub async fn set_serial_no(&self, serial_no: u32) -> anyhow::Result<()> {
480 let mut gdma = self.inner.gdma.lock().await;
481 BnicDriver::new(&mut *gdma, self.inner.dev_id)
482 .set_vport_serial_no(self.config.vport, serial_no)
483 .await?;
484 Ok(())
485 }
486
487 pub async fn query_stats(&self) -> anyhow::Result<ManaQueryStatisticsResponse> {
489 let mut gdma = self.inner.gdma.lock().await;
490 BnicDriver::new(&mut *gdma, self.inner.dev_id)
491 .query_stats(STATISTICS_FLAGS_ALL)
492 .await
493 }
494
495 pub async fn query_filter_state(
497 &self,
498 vport: u64,
499 ) -> anyhow::Result<ManaQueryFilterStateResponse> {
500 let mut gdma = self.inner.gdma.lock().await;
501 BnicDriver::new(&mut *gdma, self.inner.dev_id)
502 .query_filter_state(vport)
503 .await
504 }
505
506 pub async fn destroy(&self, arena: ResourceArena) {
508 let mut gdma = self.inner.gdma.lock().await;
509 arena.destroy(&mut *gdma).await;
510 }
511
512 pub async fn retarget_interrupt(
514 &self,
515 eq_id: u32,
516 cpu: u32,
517 ) -> anyhow::Result<Option<DeviceInterrupt>> {
518 let mut gdma = self.inner.gdma.lock().await;
519 gdma.retarget_eq(self.inner.dev_id, eq_id, cpu).await
520 }
521
522 pub async fn register_link_status_notifier(&self, sender: mesh::Sender<bool>) {
524 let mut vport_link_status = self.inner.vport_link_status.lock().await;
525 let vport_index = self.id as usize;
526 let (send, connected) = match vport_link_status[vport_index] {
527 LinkStatus::Pending(connected) => (true, connected),
529 LinkStatus::Active { connected, .. } => (!connected, connected),
531 _ => (false, true),
533 };
534 if send {
535 sender.send(connected);
536 }
537 vport_link_status[vport_index] = LinkStatus::Active { sender, connected };
538 }
539
540 pub async fn dma_client(&self) -> Arc<dyn DmaClient> {
542 self.inner.gdma.lock().await.device().dma_client()
543 }
544}
545
546pub struct TxConfig {
548 pub tx_vport_offset: u16,
550}
551
552pub struct BnicEq {
554 doorbell: DoorbellPage,
555 mem: MemoryBlock,
556 id: u32,
557 interrupt: DeviceInterrupt,
558}
559
560impl BnicEq {
561 pub fn id(&self) -> u32 {
563 self.id
564 }
565
566 pub fn interrupt(&self) -> DeviceInterrupt {
568 self.interrupt.clone()
569 }
570
571 pub fn queue(&self) -> queues::Eq {
573 queues::Eq::new_eq(self.mem.clone(), self.doorbell.clone(), self.id)
574 }
575}
576
577pub struct BnicWq {
579 doorbell: DoorbellPage,
580 wq_mem: MemoryBlock,
581 cq_mem: MemoryBlock,
582 wq_id: u32,
583 cq_id: u32,
584 is_send: bool,
585 wq_obj: u64,
586}
587
588impl BnicWq {
589 pub fn wq(&self) -> queues::Wq {
591 if self.is_send {
592 queues::Wq::new_sq(self.wq_mem.clone(), self.doorbell.clone(), self.wq_id)
593 } else {
594 queues::Wq::new_rq(self.wq_mem.clone(), self.doorbell.clone(), self.wq_id)
595 }
596 }
597
598 pub fn cq(&self) -> queues::Cq {
600 queues::Cq::new_cq(self.cq_mem.clone(), self.doorbell.clone(), self.cq_id)
601 }
602
603 pub fn wq_obj(&self) -> u64 {
605 self.wq_obj
606 }
607}