gdma_defs/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Hardware definitions for the GDMA/MANA device, which is the NIC exposed by
5//! new Azure hardware SKUs.
6
7#![forbid(unsafe_code)]
8#![expect(missing_docs)]
9
10pub mod access;
11pub mod bnic;
12
13use bitfield_struct::bitfield;
14use inspect::Inspect;
15use open_enum::open_enum;
16use std::fmt::Debug;
17use zerocopy::FromBytes;
18use zerocopy::Immutable;
19use zerocopy::IntoBytes;
20use zerocopy::KnownLayout;
21
22pub const VENDOR_ID: u16 = 0x1414;
23pub const DEVICE_ID: u16 = 0x00BA;
24
25pub const PAGE_SIZE32: u32 = 4096;
26pub const PAGE_SIZE64: u64 = 4096;
27
28#[repr(C)]
29#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes, Inspect)]
30pub struct RegMap {
31    #[inspect(hex)]
32    pub micro_version_number: u16,
33    #[inspect(hex)]
34    pub minor_version_number: u8,
35    #[inspect(hex)]
36    pub major_version_number: u8,
37    #[inspect(hex)]
38    pub reserved: u32,
39    #[inspect(hex)]
40    pub vf_db_pages_zone_offset: u64,
41    #[inspect(hex)]
42    pub vf_db_page_sz: u16,
43    #[inspect(hex)]
44    pub reserved2: u16,
45    #[inspect(hex)]
46    pub reserved3: u32,
47    #[inspect(hex)]
48    pub vf_gdma_sriov_shared_reg_start: u64,
49    #[inspect(hex)]
50    pub vf_gdma_sriov_shared_sz: u16,
51    #[inspect(hex)]
52    pub reserved4: u16,
53    #[inspect(hex)]
54    pub reserved5: u32,
55}
56
57pub const DB_SQ: u32 = 0;
58pub const DB_RQ: u32 = 0x400;
59pub const DB_RQ_CLIENT_DATA: u32 = 0x408;
60pub const DB_CQ: u32 = 0x800;
61pub const DB_EQ: u32 = 0xff8;
62
63#[bitfield(u64)]
64pub struct CqEqDoorbellValue {
65    #[bits(24)]
66    pub id: u32,
67    pub reserved: u8,
68    #[bits(31)]
69    pub tail: u32,
70    pub arm: bool,
71}
72
73#[bitfield(u64)]
74pub struct WqDoorbellValue {
75    #[bits(24)]
76    pub id: u32,
77    pub num_rwqe: u8,
78    pub tail: u32,
79}
80
81// Shmem
82#[bitfield(u32)]
83#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
84pub struct SmcProtoHdr {
85    #[bits(3)]
86    pub msg_type: u8,
87    #[bits(3)]
88    pub msg_version: u8,
89    pub reserved_1: bool,
90    pub is_response: bool,
91    pub status: u8,
92    pub reserved_2: u8,
93    pub reset_vf: bool,
94    #[bits(6)]
95    pub reserved_3: u8,
96    pub owner_is_pf: bool,
97}
98
99open_enum! {
100    pub enum SmcMessageType: u8 {
101        SMC_MSG_TYPE_ESTABLISH_HWC = 1,
102        SMC_MSG_TYPE_DESTROY_HWC = 2,
103        SMC_MSG_TYPE_REPORT_HWC_TIMEOUT = 4,
104    }
105}
106
107pub const SMC_MSG_TYPE_ESTABLISH_HWC_VERSION: u8 = 0;
108pub const SMC_MSG_TYPE_DESTROY_HWC_VERSION: u8 = 0;
109pub const SMC_MSG_TYPE_REPORT_HWC_TIMEOUT_VERSION: u8 = 1;
110
111#[repr(C)]
112#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
113pub struct EstablishHwc {
114    pub eq: [u8; 6],
115    pub cq: [u8; 6],
116    pub rq: [u8; 6],
117    pub sq: [u8; 6],
118    pub high: u16,
119    pub msix: u16,
120    pub hdr: SmcProtoHdr,
121}
122
123// Wq
124#[repr(C, align(8))]
125#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
126pub struct Wqe {
127    pub header: WqeHeader,
128    pub data: [u8; 512 - 8],
129}
130
131pub const WQE_ALIGNMENT: usize = 32;
132
133impl Wqe {
134    pub fn oob(&self) -> &[u8] {
135        &self.data[..self.header.oob_len()]
136    }
137
138    pub fn sgl(&self) -> &[Sge] {
139        <[Sge]>::ref_from_prefix_with_elems(
140            &self.data[self.header.sgl_offset()..],
141            self.header.params.num_sgl_entries() as usize,
142        )
143        .unwrap()
144        .0
145    }
146}
147
148#[repr(C)]
149#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
150pub struct WqeHeader {
151    pub reserved: [u8; 3],
152    pub last_vbytes: u8,
153    pub params: WqeParams,
154}
155
156impl WqeHeader {
157    pub fn total_len(&self) -> usize {
158        (self.data_len() + 8 + WQE_ALIGNMENT - 1) & !(WQE_ALIGNMENT - 1)
159    }
160
161    pub fn data_len(&self) -> usize {
162        self.oob_len() + self.sgl_len()
163    }
164
165    pub fn oob_len(&self) -> usize {
166        match self.params.inline_client_oob_size() {
167            CLIENT_OOB_8 => 8,
168            CLIENT_OOB_24 => 24,
169            CLIENT_OOB_32 => 32,
170            _ => 8,
171        }
172    }
173
174    pub fn sgl_offset(&self) -> usize {
175        ((8 + self.oob_len() + 15) & !15) - 8
176    }
177
178    pub fn sgl_len(&self) -> usize {
179        self.params.num_sgl_entries() as usize * 16
180    }
181
182    pub fn sgl_direct_len(&self) -> usize {
183        debug_assert!(self.params.sgl_direct());
184        let last = (self.last_vbytes.wrapping_sub(1) & 15) + 1;
185        self.sgl_len().wrapping_sub(16).wrapping_add(last as usize) & 31
186    }
187}
188
189#[bitfield(u32)]
190#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
191pub struct WqeParams {
192    pub num_sgl_entries: u8,
193    #[bits(3)]
194    pub inline_client_oob_size: u8,
195    pub client_oob_in_sgl: bool,
196    #[bits(4)]
197    pub reserved: u8,
198    #[bits(14)]
199    pub gd_client_unit_data: u16,
200    pub reserved2: bool,
201    pub sgl_direct: bool,
202}
203
204pub const CLIENT_OOB_8: u8 = 2;
205pub const CLIENT_OOB_24: u8 = 6;
206pub const CLIENT_OOB_32: u8 = 7;
207
208#[repr(C)]
209#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
210pub struct Sge {
211    pub address: u64,
212    pub mem_key: u32,
213    pub size: u32,
214}
215
216#[repr(C)]
217#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
218pub struct Cqe {
219    pub data: [u8; 60],
220    pub params: CqeParams,
221}
222
223#[bitfield(u32)]
224#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
225pub struct CqeParams {
226    #[bits(24)]
227    pub wq_number: u32,
228    pub is_send_wq: bool,
229    pub cmpln: bool,
230    #[bits(3)]
231    pub reserved: u8,
232    #[bits(3)]
233    pub owner_count: u8,
234}
235
236pub const OWNER_BITS: u32 = 3;
237pub const OWNER_MASK: u32 = (1 << OWNER_BITS) - 1;
238
239#[repr(C)]
240#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
241pub struct Eqe {
242    pub data: [u8; 12],
243    pub params: EqeParams,
244}
245
246#[bitfield(u32)]
247#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
248pub struct EqeParams {
249    pub event_type: u8,
250    pub reserved: u8,
251    #[bits(13)]
252    pub reserved2: u16,
253    #[bits(3)]
254    pub owner_count: u8,
255}
256
257pub const GDMA_EQE_COMPLETION: u8 = 3;
258pub const GDMA_EQE_TEST_EVENT: u8 = 64;
259pub const GDMA_EQE_HWC_INIT_EQ_ID_DB: u8 = 129;
260pub const GDMA_EQE_HWC_INIT_DATA: u8 = 130;
261pub const GDMA_EQE_HWC_INIT_DONE: u8 = 131;
262pub const GDMA_EQE_HWC_RECONFIG_DATA: u8 = 133;
263
264#[bitfield(u32)]
265#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
266pub struct HwcInitEqIdDb {
267    pub eq_id: u16,
268    pub doorbell: u16,
269}
270
271#[bitfield(u32)]
272#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
273pub struct HwcInitTypeData {
274    #[bits(24)]
275    pub value: u32,
276    pub ty: u8,
277}
278
279pub const HWC_DATA_CONFIG_HWC_TIMEOUT: u8 = 1;
280pub const HWC_DATA_TYPE_HW_VPORT_LINK_CONNECT: u8 = 2;
281pub const HWC_DATA_TYPE_HW_VPORT_LINK_DISCONNECT: u8 = 3;
282#[repr(C)]
283#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
284pub struct EqeDataReconfig {
285    pub data: [u8; 3],
286    pub data_type: u8,
287    pub reserved1: [u8; 8],
288}
289
290pub const HWC_INIT_DATA_CQID: u8 = 1;
291pub const HWC_INIT_DATA_RQID: u8 = 2;
292pub const HWC_INIT_DATA_SQID: u8 = 3;
293pub const HWC_INIT_DATA_QUEUE_DEPTH: u8 = 4;
294pub const HWC_INIT_DATA_MAX_REQUEST: u8 = 5;
295pub const HWC_INIT_DATA_MAX_RESPONSE: u8 = 6;
296pub const HWC_INIT_DATA_MAX_NUM_CQS: u8 = 7;
297pub const HWC_INIT_DATA_PDID: u8 = 8;
298pub const HWC_INIT_DATA_GPA_MKEY: u8 = 9;
299
300open_enum! {
301    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
302    pub enum GdmaRequestType : u32 {
303        GDMA_VERIFY_VF_DRIVER_VERSION = 1,
304        GDMA_QUERY_MAX_RESOURCES = 2,
305        GDMA_LIST_DEVICES = 3,
306        GDMA_REGISTER_DEVICE = 4,
307        GDMA_DEREGISTER_DEVICE = 5,
308        GDMA_GENERATE_TEST_EQE = 10,
309        GDMA_CREATE_QUEUE = 12,
310        GDMA_DISABLE_QUEUE = 13,
311        GDMA_CREATE_DMA_REGION = 25,
312        GDMA_DMA_REGION_ADD_PAGES = 26,
313        GDMA_DESTROY_DMA_REGION = 27,
314        GDMA_CHANGE_MSIX_FOR_EQ = 81,
315    }
316}
317
318#[repr(C)]
319#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
320pub struct GdmaMsgHdr {
321    pub hdr_type: u32,
322    pub msg_type: u32,
323    pub msg_version: u16,
324    pub hwc_msg_id: u16,
325    pub msg_size: u32,
326}
327
328pub const GDMA_STANDARD_HEADER_TYPE: u32 = 0;
329
330pub const GDMA_MESSAGE_V1: u16 = 1;
331
332#[repr(C)]
333#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
334pub struct GdmaDevId {
335    pub ty: GdmaDevType,
336    pub instance: u16,
337}
338
339open_enum! {
340    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
341    pub enum GdmaDevType: u16 {
342        GDMA_DEVICE_NONE = 0,
343        GDMA_DEVICE_HWC = 1,
344        GDMA_DEVICE_MANA = 2,
345    }
346}
347
348pub const HWC_DEV_ID: GdmaDevId = GdmaDevId {
349    ty: GdmaDevType::GDMA_DEVICE_HWC,
350    instance: 0,
351};
352
353#[repr(C)]
354#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
355pub struct GdmaReqHdr {
356    pub req: GdmaMsgHdr,
357    pub resp: GdmaMsgHdr,
358    pub dev_id: GdmaDevId,
359    pub activity_id: u32,
360}
361
362#[repr(C)]
363#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
364pub struct GdmaRespHdr {
365    pub response: GdmaMsgHdr,
366    pub dev_id: GdmaDevId,
367    pub activity_id: u32,
368    pub status: u32,
369    pub reserved: u32,
370}
371
372#[repr(C)]
373#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
374pub struct GdmaGenerateTestEventReq {
375    pub queue_index: u32,
376}
377
378#[repr(C)]
379#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
380pub struct HwcTxOob {
381    pub reserved: u64,
382    pub flags1: HwcTxOobFlags1,
383    pub flags2: HwcTxOobFlags2,
384    pub flags3: HwcTxOobFlags3,
385    pub flags4: HwcTxOobFlags4,
386}
387
388#[bitfield(u32)]
389#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
390pub struct HwcTxOobFlags1 {
391    #[bits(24)]
392    pub vrq_id: u32,
393    pub dest_vfid: u8,
394}
395
396#[bitfield(u32)]
397#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
398pub struct HwcTxOobFlags2 {
399    #[bits(24)]
400    pub vrcq_id: u32,
401    pub reserved: u8,
402}
403
404#[bitfield(u32)]
405#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
406pub struct HwcTxOobFlags3 {
407    #[bits(24)]
408    pub vscq_id: u32,
409    pub loopback: bool,
410    pub lso_override: bool,
411    pub dest_pf: bool,
412    #[bits(5)]
413    pub reserved: u8,
414}
415
416#[bitfield(u32)]
417#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
418pub struct HwcTxOobFlags4 {
419    #[bits(24)]
420    pub vsq_id: u32,
421    pub reserved: u8,
422}
423
424#[repr(C)]
425#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
426pub struct HwcRxOob {
427    pub flags: HwcRxOobFlags,
428    pub reserved2: u32,
429    pub wqe_addr_low_or_offset: u32,
430    pub wqe_addr_high: u32,
431    pub client_data_unit: u32,
432    pub tx_oob_data_size: u32,
433    pub chunk_offset: u32,
434}
435
436#[bitfield(u64)]
437#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
438pub struct HwcRxOobFlags {
439    #[bits(6)]
440    pub ty: u8,
441    pub eom: bool,
442    pub som: bool,
443    pub vendor_err: u8,
444    pub reserved1: u16,
445
446    #[bits(24)]
447    pub src_virt_wq: u32,
448    pub src_vfid: u8,
449}
450
451pub const DRIVER_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG: u64 = 0x08;
452pub const DRIVER_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT: u64 = 0x20;
453pub const DRIVER_CAP_FLAG_1_HW_VPORT_LINK_AWARE: u64 = 0x40;
454
455#[repr(C)]
456#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
457pub struct GdmaVerifyVerReq {
458    pub protocol_ver_min: u64,
459    pub protocol_ver_max: u64,
460    pub gd_drv_cap_flags1: u64,
461    pub gd_drv_cap_flags2: u64,
462    pub gd_drv_cap_flags3: u64,
463    pub gd_drv_cap_flags4: u64,
464    pub drv_ver: u64,
465    pub os_type: u32,
466    pub reserved: u32,
467    pub os_ver_major: u32,
468    pub os_ver_minor: u32,
469    pub os_ver_build: u32,
470    pub os_ver_platform: u32,
471    pub reserved_2: u64,
472    pub os_ver_str1: [u8; 128],
473    pub os_ver_str2: [u8; 128],
474    pub os_ver_str3: [u8; 128],
475    pub os_ver_str4: [u8; 128],
476}
477
478#[repr(C)]
479#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
480pub struct GdmaVerifyVerResp {
481    pub gdma_protocol_ver: u64,
482    pub pf_cap_flags1: u64,
483    pub pf_cap_flags2: u64,
484    pub pf_cap_flags3: u64,
485    pub pf_cap_flags4: u64,
486}
487
488#[repr(C)]
489#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
490pub struct GdmaQueryMaxResourcesResp {
491    pub status: u32,
492    pub max_sq: u32,
493    pub max_rq: u32,
494    pub max_cq: u32,
495    pub max_eq: u32,
496    pub max_db: u32,
497    pub max_mst: u32,
498    pub max_cq_mod_ctx: u32,
499    pub max_mod_cq: u32,
500    pub max_msix: u32,
501}
502
503#[repr(C)]
504#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
505pub struct GdmaListDevicesResp {
506    pub num_of_devs: u32,
507    pub reserved: u32,
508    pub devs: [GdmaDevId; 64],
509}
510
511#[repr(C)]
512#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
513pub struct GdmaRegisterDeviceResp {
514    pub pdid: u32,
515    pub gpa_mkey: u32,
516    pub db_id: u32,
517}
518
519#[repr(C)]
520#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
521pub struct GdmaCreateDmaRegionReq {
522    pub length: u64,
523    pub offset_in_page: u32,
524    pub gdma_page_type: u32,
525    pub page_count: u32,
526    pub page_addr_list_len: u32,
527    // Followed by u64 page list.
528}
529
530pub const GDMA_PAGE_TYPE_4K: u32 = 0;
531
532#[repr(C)]
533#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
534pub struct GdmaCreateDmaRegionResp {
535    pub gdma_region: u64,
536}
537
538#[repr(C)]
539#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
540pub struct GdmaDestroyDmaRegionReq {
541    pub gdma_region: u64,
542}
543
544#[repr(C)]
545#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
546pub struct GdmaCreateQueueReq {
547    pub queue_type: GdmaQueueType,
548    pub reserved1: u32,
549    pub pdid: u32,
550    pub doorbell_id: u32,
551    pub gdma_region: u64,
552    pub reserved2: u32,
553    pub queue_size: u32,
554    pub log2_throttle_limit: u32,
555    pub eq_pci_msix_index: u32,
556    pub cq_mod_ctx_id: u32,
557    pub cq_parent_eq_id: u32,
558    pub rq_drop_on_overrun: u8,
559    pub rq_err_on_wqe_overflow: u8,
560    pub rq_chain_rec_wqes: u8,
561    pub sq_hw_db: u8,
562    pub reserved3: u32,
563}
564
565open_enum! {
566    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
567    pub enum GdmaQueueType: u32 {
568        GDMA_SQ = 1,
569        GDMA_RQ = 2,
570        GDMA_CQ = 3,
571        GDMA_EQ = 4,
572    }
573}
574
575#[repr(C)]
576#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
577pub struct GdmaCreateQueueResp {
578    pub queue_index: u32,
579}
580
581#[repr(C)]
582#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
583pub struct GdmaDisableQueueReq {
584    pub queue_type: GdmaQueueType,
585    pub queue_index: u32,
586    pub alloc_res_id_on_creation: u32,
587}
588
589#[repr(C)]
590#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
591pub struct GdmaChangeMsixVectorIndexForEq {
592    pub queue_index: u32,
593    pub msix: u32,
594    pub reserved1: u32,
595    pub reserved2: u32,
596}