1#![forbid(unsafe_code)]
8#![expect(missing_docs)]
9
10pub mod access;
11pub mod bnic;
12
13use bitfield_struct::bitfield;
14use inspect::Inspect;
15use open_enum::open_enum;
16use std::fmt::Debug;
17use zerocopy::FromBytes;
18use zerocopy::Immutable;
19use zerocopy::IntoBytes;
20use zerocopy::KnownLayout;
21
22pub const VENDOR_ID: u16 = 0x1414;
23pub const DEVICE_ID: u16 = 0x00BA;
24
25pub const PAGE_SIZE32: u32 = 4096;
26pub const PAGE_SIZE64: u64 = 4096;
27
28#[repr(C)]
29#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes, Inspect)]
30pub struct RegMap {
31 #[inspect(hex)]
32 pub micro_version_number: u16,
33 #[inspect(hex)]
34 pub minor_version_number: u8,
35 #[inspect(hex)]
36 pub major_version_number: u8,
37 #[inspect(hex)]
38 pub reserved: u32,
39 #[inspect(hex)]
40 pub vf_db_pages_zone_offset: u64,
41 #[inspect(hex)]
42 pub vf_db_page_sz: u16,
43 #[inspect(hex)]
44 pub reserved2: u16,
45 #[inspect(hex)]
46 pub reserved3: u32,
47 #[inspect(hex)]
48 pub vf_gdma_sriov_shared_reg_start: u64,
49 #[inspect(hex)]
50 pub vf_gdma_sriov_shared_sz: u16,
51 #[inspect(hex)]
52 pub reserved4: u16,
53 #[inspect(hex)]
54 pub reserved5: u32,
55}
56
57pub const DB_SQ: u32 = 0;
58pub const DB_RQ: u32 = 0x400;
59pub const DB_RQ_CLIENT_DATA: u32 = 0x408;
60pub const DB_CQ: u32 = 0x800;
61pub const DB_EQ: u32 = 0xff8;
62
63#[bitfield(u64)]
64pub struct CqEqDoorbellValue {
65 #[bits(24)]
66 pub id: u32,
67 pub reserved: u8,
68 #[bits(31)]
69 pub tail: u32,
70 pub arm: bool,
71}
72
73#[bitfield(u64)]
74pub struct WqDoorbellValue {
75 #[bits(24)]
76 pub id: u32,
77 pub num_rwqe: u8,
78 pub tail: u32,
79}
80
81#[bitfield(u32)]
83#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
84pub struct SmcProtoHdr {
85 #[bits(3)]
86 pub msg_type: u8,
87 #[bits(3)]
88 pub msg_version: u8,
89 pub reserved_1: bool,
90 pub is_response: bool,
91 pub status: u8,
92 pub reserved_2: u8,
93 pub reset_vf: bool,
94 #[bits(6)]
95 pub reserved_3: u8,
96 pub owner_is_pf: bool,
97}
98
99open_enum! {
100 pub enum SmcMessageType: u8 {
101 SMC_MSG_TYPE_ESTABLISH_HWC = 1,
102 SMC_MSG_TYPE_DESTROY_HWC = 2,
103 SMC_MSG_TYPE_REPORT_HWC_TIMEOUT = 4,
104 }
105}
106
107pub const SMC_MSG_TYPE_ESTABLISH_HWC_VERSION: u8 = 0;
108pub const SMC_MSG_TYPE_DESTROY_HWC_VERSION: u8 = 0;
109pub const SMC_MSG_TYPE_REPORT_HWC_TIMEOUT_VERSION: u8 = 1;
110
111#[repr(C)]
112#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
113pub struct EstablishHwc {
114 pub eq: [u8; 6],
115 pub cq: [u8; 6],
116 pub rq: [u8; 6],
117 pub sq: [u8; 6],
118 pub high: u16,
119 pub msix: u16,
120 pub hdr: SmcProtoHdr,
121}
122
123#[repr(C, align(8))]
125#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
126pub struct Wqe {
127 pub header: WqeHeader,
128 pub data: [u8; 512 - 8],
129}
130
131pub const WQE_ALIGNMENT: usize = 32;
132
133impl Wqe {
134 pub fn oob(&self) -> &[u8] {
135 &self.data[..self.header.oob_len()]
136 }
137
138 pub fn sgl(&self) -> &[Sge] {
139 <[Sge]>::ref_from_prefix_with_elems(
140 &self.data[self.header.sgl_offset()..],
141 self.header.params.num_sgl_entries() as usize,
142 )
143 .unwrap()
144 .0
145 }
146}
147
148#[repr(C)]
149#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
150pub struct WqeHeader {
151 pub reserved: [u8; 3],
152 pub last_vbytes: u8,
153 pub params: WqeParams,
154}
155
156impl WqeHeader {
157 pub fn total_len(&self) -> usize {
158 (self.data_len() + 8 + WQE_ALIGNMENT - 1) & !(WQE_ALIGNMENT - 1)
159 }
160
161 pub fn data_len(&self) -> usize {
162 self.oob_len() + self.sgl_len()
163 }
164
165 pub fn oob_len(&self) -> usize {
166 match self.params.inline_client_oob_size() {
167 CLIENT_OOB_8 => 8,
168 CLIENT_OOB_24 => 24,
169 CLIENT_OOB_32 => 32,
170 _ => 8,
171 }
172 }
173
174 pub fn sgl_offset(&self) -> usize {
175 ((8 + self.oob_len() + 15) & !15) - 8
176 }
177
178 pub fn sgl_len(&self) -> usize {
179 self.params.num_sgl_entries() as usize * 16
180 }
181
182 pub fn sgl_direct_len(&self) -> usize {
183 debug_assert!(self.params.sgl_direct());
184 let last = (self.last_vbytes.wrapping_sub(1) & 15) + 1;
185 self.sgl_len().wrapping_sub(16).wrapping_add(last as usize) & 31
186 }
187}
188
189#[bitfield(u32)]
190#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
191pub struct WqeParams {
192 pub num_sgl_entries: u8,
193 #[bits(3)]
194 pub inline_client_oob_size: u8,
195 pub client_oob_in_sgl: bool,
196 #[bits(4)]
197 pub reserved: u8,
198 #[bits(14)]
199 pub gd_client_unit_data: u16,
200 pub reserved2: bool,
201 pub sgl_direct: bool,
202}
203
204pub const CLIENT_OOB_8: u8 = 2;
205pub const CLIENT_OOB_24: u8 = 6;
206pub const CLIENT_OOB_32: u8 = 7;
207
208#[repr(C)]
209#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
210pub struct Sge {
211 pub address: u64,
212 pub mem_key: u32,
213 pub size: u32,
214}
215
216#[repr(C)]
217#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
218pub struct Cqe {
219 pub data: [u8; 60],
220 pub params: CqeParams,
221}
222
223#[bitfield(u32)]
224#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
225pub struct CqeParams {
226 #[bits(24)]
227 pub wq_number: u32,
228 pub is_send_wq: bool,
229 pub cmpln: bool,
230 #[bits(3)]
231 pub reserved: u8,
232 #[bits(3)]
233 pub owner_count: u8,
234}
235
236pub const OWNER_BITS: u32 = 3;
237pub const OWNER_MASK: u32 = (1 << OWNER_BITS) - 1;
238
239#[repr(C)]
240#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
241pub struct Eqe {
242 pub data: [u8; 12],
243 pub params: EqeParams,
244}
245
246#[bitfield(u32)]
247#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
248pub struct EqeParams {
249 pub event_type: u8,
250 pub reserved: u8,
251 #[bits(13)]
252 pub reserved2: u16,
253 #[bits(3)]
254 pub owner_count: u8,
255}
256
257pub const GDMA_EQE_COMPLETION: u8 = 3;
258pub const GDMA_EQE_TEST_EVENT: u8 = 64;
259pub const GDMA_EQE_HWC_INIT_EQ_ID_DB: u8 = 129;
260pub const GDMA_EQE_HWC_INIT_DATA: u8 = 130;
261pub const GDMA_EQE_HWC_INIT_DONE: u8 = 131;
262pub const GDMA_EQE_HWC_RECONFIG_DATA: u8 = 133;
263pub const GDMA_EQE_HWC_RECONFIG_VF: u8 = 135;
265
266#[bitfield(u32)]
267#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
268pub struct HwcInitEqIdDb {
269 pub eq_id: u16,
270 pub doorbell: u16,
271}
272
273#[bitfield(u32)]
274#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
275pub struct HwcInitTypeData {
276 #[bits(24)]
277 pub value: u32,
278 pub ty: u8,
279}
280
281pub const HWC_DATA_CONFIG_HWC_TIMEOUT: u8 = 1;
282pub const HWC_DATA_TYPE_HW_VPORT_LINK_CONNECT: u8 = 2;
283pub const HWC_DATA_TYPE_HW_VPORT_LINK_DISCONNECT: u8 = 3;
284#[repr(C)]
285#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
286pub struct EqeDataReconfig {
287 pub data: [u8; 3],
288 pub data_type: u8,
289 pub reserved1: [u8; 8],
290}
291
292pub const HWC_INIT_DATA_CQID: u8 = 1;
293pub const HWC_INIT_DATA_RQID: u8 = 2;
294pub const HWC_INIT_DATA_SQID: u8 = 3;
295pub const HWC_INIT_DATA_QUEUE_DEPTH: u8 = 4;
296pub const HWC_INIT_DATA_MAX_REQUEST: u8 = 5;
297pub const HWC_INIT_DATA_MAX_RESPONSE: u8 = 6;
298pub const HWC_INIT_DATA_MAX_NUM_CQS: u8 = 7;
299pub const HWC_INIT_DATA_PDID: u8 = 8;
300pub const HWC_INIT_DATA_GPA_MKEY: u8 = 9;
301
302open_enum! {
303 #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
304 pub enum GdmaRequestType : u32 {
305 GDMA_VERIFY_VF_DRIVER_VERSION = 1,
306 GDMA_QUERY_MAX_RESOURCES = 2,
307 GDMA_LIST_DEVICES = 3,
308 GDMA_REGISTER_DEVICE = 4,
309 GDMA_DEREGISTER_DEVICE = 5,
310 GDMA_GENERATE_TEST_EQE = 10,
311 GDMA_GENERATE_RECONFIG_VF_EVENT = 11,
312 GDMA_CREATE_QUEUE = 12,
313 GDMA_DISABLE_QUEUE = 13,
314 GDMA_CREATE_DMA_REGION = 25,
315 GDMA_DMA_REGION_ADD_PAGES = 26,
316 GDMA_DESTROY_DMA_REGION = 27,
317 GDMA_CHANGE_MSIX_FOR_EQ = 81,
318 }
319}
320
321#[repr(C)]
322#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
323pub struct GdmaMsgHdr {
324 pub hdr_type: u32,
325 pub msg_type: u32,
326 pub msg_version: u16,
327 pub hwc_msg_id: u16,
328 pub msg_size: u32,
329}
330
331pub const GDMA_STANDARD_HEADER_TYPE: u32 = 0;
332
333pub const GDMA_MESSAGE_V1: u16 = 1;
334
335#[repr(C)]
336#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
337pub struct GdmaDevId {
338 pub ty: GdmaDevType,
339 pub instance: u16,
340}
341
342open_enum! {
343 #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
344 pub enum GdmaDevType: u16 {
345 GDMA_DEVICE_NONE = 0,
346 GDMA_DEVICE_HWC = 1,
347 GDMA_DEVICE_MANA = 2,
348 }
349}
350
351pub const HWC_DEV_ID: GdmaDevId = GdmaDevId {
352 ty: GdmaDevType::GDMA_DEVICE_HWC,
353 instance: 0,
354};
355
356#[repr(C)]
357#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
358pub struct GdmaReqHdr {
359 pub req: GdmaMsgHdr,
360 pub resp: GdmaMsgHdr,
361 pub dev_id: GdmaDevId,
362 pub activity_id: u32,
363}
364
365#[repr(C)]
366#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
367pub struct GdmaRespHdr {
368 pub response: GdmaMsgHdr,
369 pub dev_id: GdmaDevId,
370 pub activity_id: u32,
371 pub status: u32,
372 pub reserved: u32,
373}
374
375#[repr(C)]
376#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
377pub struct GdmaGenerateTestEventReq {
378 pub queue_index: u32,
379}
380
381#[repr(C)]
382#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
383pub struct HwcTxOob {
384 pub reserved: u64,
385 pub flags1: HwcTxOobFlags1,
386 pub flags2: HwcTxOobFlags2,
387 pub flags3: HwcTxOobFlags3,
388 pub flags4: HwcTxOobFlags4,
389}
390
391#[bitfield(u32)]
392#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
393pub struct HwcTxOobFlags1 {
394 #[bits(24)]
395 pub vrq_id: u32,
396 pub dest_vfid: u8,
397}
398
399#[bitfield(u32)]
400#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
401pub struct HwcTxOobFlags2 {
402 #[bits(24)]
403 pub vrcq_id: u32,
404 pub reserved: u8,
405}
406
407#[bitfield(u32)]
408#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
409pub struct HwcTxOobFlags3 {
410 #[bits(24)]
411 pub vscq_id: u32,
412 pub loopback: bool,
413 pub lso_override: bool,
414 pub dest_pf: bool,
415 #[bits(5)]
416 pub reserved: u8,
417}
418
419#[bitfield(u32)]
420#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
421pub struct HwcTxOobFlags4 {
422 #[bits(24)]
423 pub vsq_id: u32,
424 pub reserved: u8,
425}
426
427#[repr(C)]
428#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
429pub struct HwcRxOob {
430 pub flags: HwcRxOobFlags,
431 pub reserved2: u32,
432 pub wqe_addr_low_or_offset: u32,
433 pub wqe_addr_high: u32,
434 pub client_data_unit: u32,
435 pub tx_oob_data_size: u32,
436 pub chunk_offset: u32,
437}
438
439#[bitfield(u64)]
440#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
441pub struct HwcRxOobFlags {
442 #[bits(6)]
443 pub ty: u8,
444 pub eom: bool,
445 pub som: bool,
446 pub vendor_err: u8,
447 pub reserved1: u16,
448
449 #[bits(24)]
450 pub src_virt_wq: u32,
451 pub src_vfid: u8,
452}
453
454pub const DRIVER_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG: u64 = 0x08;
455pub const DRIVER_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT: u64 = 0x20;
456pub const DRIVER_CAP_FLAG_1_HW_VPORT_LINK_AWARE: u64 = 0x40;
457
458#[repr(C)]
459#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
460pub struct GdmaVerifyVerReq {
461 pub protocol_ver_min: u64,
462 pub protocol_ver_max: u64,
463 pub gd_drv_cap_flags1: u64,
464 pub gd_drv_cap_flags2: u64,
465 pub gd_drv_cap_flags3: u64,
466 pub gd_drv_cap_flags4: u64,
467 pub drv_ver: u64,
468 pub os_type: u32,
469 pub reserved: u32,
470 pub os_ver_major: u32,
471 pub os_ver_minor: u32,
472 pub os_ver_build: u32,
473 pub os_ver_platform: u32,
474 pub reserved_2: u64,
475 pub os_ver_str1: [u8; 128],
476 pub os_ver_str2: [u8; 128],
477 pub os_ver_str3: [u8; 128],
478 pub os_ver_str4: [u8; 128],
479}
480
481#[repr(C)]
482#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
483pub struct GdmaVerifyVerResp {
484 pub gdma_protocol_ver: u64,
485 pub pf_cap_flags1: u64,
486 pub pf_cap_flags2: u64,
487 pub pf_cap_flags3: u64,
488 pub pf_cap_flags4: u64,
489}
490
491#[repr(C)]
492#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
493pub struct GdmaQueryMaxResourcesResp {
494 pub status: u32,
495 pub max_sq: u32,
496 pub max_rq: u32,
497 pub max_cq: u32,
498 pub max_eq: u32,
499 pub max_db: u32,
500 pub max_mst: u32,
501 pub max_cq_mod_ctx: u32,
502 pub max_mod_cq: u32,
503 pub max_msix: u32,
504}
505
506#[repr(C)]
507#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
508pub struct GdmaListDevicesResp {
509 pub num_of_devs: u32,
510 pub reserved: u32,
511 pub devs: [GdmaDevId; 64],
512}
513
514#[repr(C)]
515#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
516pub struct GdmaRegisterDeviceResp {
517 pub pdid: u32,
518 pub gpa_mkey: u32,
519 pub db_id: u32,
520}
521
522#[repr(C)]
523#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
524pub struct GdmaCreateDmaRegionReq {
525 pub length: u64,
526 pub offset_in_page: u32,
527 pub gdma_page_type: u32,
528 pub page_count: u32,
529 pub page_addr_list_len: u32,
530 }
532
533pub const GDMA_PAGE_TYPE_4K: u32 = 0;
534
535#[repr(C)]
536#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
537pub struct GdmaCreateDmaRegionResp {
538 pub gdma_region: u64,
539}
540
541#[repr(C)]
542#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
543pub struct GdmaDestroyDmaRegionReq {
544 pub gdma_region: u64,
545}
546
547#[repr(C)]
548#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
549pub struct GdmaCreateQueueReq {
550 pub queue_type: GdmaQueueType,
551 pub reserved1: u32,
552 pub pdid: u32,
553 pub doorbell_id: u32,
554 pub gdma_region: u64,
555 pub reserved2: u32,
556 pub queue_size: u32,
557 pub log2_throttle_limit: u32,
558 pub eq_pci_msix_index: u32,
559 pub cq_mod_ctx_id: u32,
560 pub cq_parent_eq_id: u32,
561 pub rq_drop_on_overrun: u8,
562 pub rq_err_on_wqe_overflow: u8,
563 pub rq_chain_rec_wqes: u8,
564 pub sq_hw_db: u8,
565 pub reserved3: u32,
566}
567
568open_enum! {
569 #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
570 pub enum GdmaQueueType: u32 {
571 GDMA_SQ = 1,
572 GDMA_RQ = 2,
573 GDMA_CQ = 3,
574 GDMA_EQ = 4,
575 }
576}
577
578#[repr(C)]
579#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
580pub struct GdmaCreateQueueResp {
581 pub queue_index: u32,
582}
583
584#[repr(C)]
585#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
586pub struct GdmaDisableQueueReq {
587 pub queue_type: GdmaQueueType,
588 pub queue_index: u32,
589 pub alloc_res_id_on_creation: u32,
590}
591
592#[repr(C)]
593#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
594pub struct GdmaChangeMsixVectorIndexForEq {
595 pub queue_index: u32,
596 pub msix: u32,
597 pub reserved1: u32,
598 pub reserved2: u32,
599}