net_backend/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Network backend traits and infrastructure.
5//!
6//! This crate defines the abstraction boundary between network
7//! **frontends** (guest-facing devices) and network **backends**
8//! (host-side packet I/O). The key types are:
9//!
10//! * [`Endpoint`] — a backend factory. One per NIC, responsible for
11//!   creating [`Queue`] objects when the frontend activates the device.
12//!
13//! * [`Queue`] — a single TX/RX data path. Backends implement this to
14//!   send and receive packets. A device may have multiple queues (RSS).
15//!
16//! * [`BufferAccess`] — owned by the frontend, provides access to
17//!   guest memory receive buffers. Passed by `&mut` reference to every
18//!   [`Queue`] method that needs it, so the frontend retains exclusive
19//!   ownership and no internal locking is required.
20//!
21//! ## Lifecycle
22//!
23//! 1. The frontend creates a [`BufferAccess`] implementation and one
24//!    [`QueueConfig`] per desired queue (containing just a driver).
25//! 2. It calls [`Endpoint::get_queues`], which returns boxed [`Queue`]
26//!    objects.
27//! 3. The frontend posts initial receive buffers by calling
28//!    [`Queue::rx_avail`] with its [`BufferAccess`].
29//! 4. The main loop polls [`Queue::poll_ready`] for backend events,
30//!    then calls [`Queue::rx_poll`] / [`Queue::tx_avail`] /
31//!    [`Queue::tx_poll`] to exchange packets—always passing
32//!    `&mut dyn BufferAccess`.
33//! 5. On shutdown, queues are dropped and [`Endpoint::stop`] is called.
34
35#![expect(missing_docs)]
36#![forbid(unsafe_code)]
37
38pub mod loopback;
39pub mod null;
40pub mod resolve;
41pub mod tests;
42
43use async_trait::async_trait;
44use bitfield_struct::bitfield;
45use futures::FutureExt;
46use futures::StreamExt;
47use futures::TryFutureExt;
48use futures::lock::Mutex;
49use futures_concurrency::future::Race;
50use guestmem::GuestMemory;
51use guestmem::GuestMemoryError;
52use inspect::InspectMut;
53use inspect_counters::Counter;
54use mesh::rpc::Rpc;
55use mesh::rpc::RpcSend;
56use null::NullEndpoint;
57use pal_async::driver::Driver;
58use std::future::pending;
59use std::sync::Arc;
60use std::task::Context;
61use std::task::Poll;
62use thiserror::Error;
63
64/// Per-queue configuration passed to [`Endpoint::get_queues`].
65///
66/// Contains only an async driver handle. Receive buffers are posted
67/// separately via [`Queue::rx_avail`] after queue creation.
68pub struct QueueConfig {
69    pub driver: Box<dyn Driver>,
70}
71
72/// A network endpoint — the backend side of a NIC.
73///
74/// An endpoint is a factory for [`Queue`] objects. It represents a
75/// connection to some packet transport (TAP device, hardware NIC,
76/// user-space network stack, etc.) and can create one or more queues
77/// for parallel TX/RX processing.
78///
79/// Frontends (e.g. `virtio_net`, `netvsp`, `gdma`) own the endpoint
80/// and call [`get_queues`](Endpoint::get_queues) when the guest
81/// activates the NIC.
82#[async_trait]
83pub trait Endpoint: Send + Sync + InspectMut {
84    /// Returns an informational endpoint type.
85    fn endpoint_type(&self) -> &'static str;
86
87    /// Initializes the queues associated with the endpoint.
88    async fn get_queues(
89        &mut self,
90        config: Vec<QueueConfig>,
91        rss: Option<&RssConfig<'_>>,
92        queues: &mut Vec<Box<dyn Queue>>,
93    ) -> anyhow::Result<()>;
94
95    /// Stops the endpoint.
96    ///
97    /// All queues returned via `get_queues` must have been dropped.
98    async fn stop(&mut self);
99
100    /// Specifies whether packets are always completed in order.
101    fn is_ordered(&self) -> bool {
102        false
103    }
104
105    /// Specifies the supported set of transmit offloads.
106    fn tx_offload_support(&self) -> TxOffloadSupport {
107        TxOffloadSupport::default()
108    }
109
110    /// Specifies parameters related to supporting multiple queues.
111    fn multiqueue_support(&self) -> MultiQueueSupport {
112        MultiQueueSupport {
113            max_queues: 1,
114            indirection_table_size: 0,
115        }
116    }
117
118    /// If true, transmits are guaranteed to complete quickly. This is used to
119    /// allow eliding tx notifications from the guest when there are already
120    /// some tx packets in flight.
121    fn tx_fast_completions(&self) -> bool {
122        false
123    }
124
125    /// Sets the current data path for packet flow (e.g. via vmbus synthnic or through virtual function).
126    /// This is only supported for endpoints that pair with an accelerated device.
127    async fn set_data_path_to_guest_vf(&self, _use_vf: bool) -> anyhow::Result<()> {
128        Err(anyhow::Error::msg("Unsupported in current endpoint"))
129    }
130
131    async fn get_data_path_to_guest_vf(&self) -> anyhow::Result<bool> {
132        Err(anyhow::Error::msg("Unsupported in current endpoint"))
133    }
134
135    /// On completion, the return value indicates the specific endpoint action to take.
136    async fn wait_for_endpoint_action(&mut self) -> EndpointAction {
137        pending().await
138    }
139
140    /// Link speed in bps.
141    fn link_speed(&self) -> u64 {
142        // Reporting a reasonable default value (10Gbps) here that the individual endpoints
143        // can overwrite.
144        10 * 1000 * 1000 * 1000
145    }
146}
147
148/// Multi-queue related support.
149#[derive(Debug, Copy, Clone)]
150pub struct MultiQueueSupport {
151    /// The number of supported queues.
152    pub max_queues: u16,
153    /// The size of the RSS indirection table.
154    pub indirection_table_size: u16,
155}
156
157/// The set of supported transmit offloads.
158#[derive(Debug, Copy, Clone, Default)]
159pub struct TxOffloadSupport {
160    /// IPv4 header checksum offload.
161    pub ipv4_header: bool,
162    /// TCP checksum offload.
163    pub tcp: bool,
164    /// UDP checksum offload.
165    pub udp: bool,
166    /// TCP segmentation offload.
167    pub tso: bool,
168}
169
170#[derive(Debug, Clone)]
171pub struct RssConfig<'a> {
172    pub key: &'a [u8],
173    pub indirection_table: &'a [u16],
174    pub flags: u32, // TODO
175}
176
177#[derive(Error, Debug)]
178pub enum TxError {
179    #[error("error requiring queue restart. {0}")]
180    TryRestart(#[source] anyhow::Error),
181    #[error("unrecoverable error. {0}")]
182    Fatal(#[source] anyhow::Error),
183}
184pub trait BackendQueueStats {
185    fn rx_errors(&self) -> Counter;
186    fn tx_errors(&self) -> Counter;
187    fn rx_packets(&self) -> Counter;
188    fn tx_packets(&self) -> Counter;
189}
190
191/// A single TX/RX data path for sending and receiving network packets.
192///
193/// Created by [`Endpoint::get_queues`] and driven by the frontend in
194/// a poll loop. Every method that touches receive buffers takes
195/// `pool: &mut dyn BufferAccess` so the frontend retains ownership
196/// of guest memory state.
197///
198/// Typical poll loop:
199/// ```text
200/// loop {
201///     poll_ready(cx, pool)  // wait for backend events
202///     rx_poll(pool, ..)     // drain completed receives
203///     tx_avail(pool, ..)    // post guest TX packets
204///     tx_poll(pool, ..)     // drain TX completions
205/// }
206/// ```
207#[async_trait]
208pub trait Queue: Send + InspectMut {
209    /// Updates the queue's target VP.
210    async fn update_target_vp(&mut self, target_vp: u32) {
211        let _ = target_vp;
212    }
213
214    /// Polls the queue for readiness.
215    fn poll_ready(&mut self, cx: &mut Context<'_>, pool: &mut dyn BufferAccess) -> Poll<()>;
216
217    /// Makes receive buffers available for use by the device.
218    fn rx_avail(&mut self, pool: &mut dyn BufferAccess, done: &[RxId]);
219
220    /// Polls the device for receives.
221    fn rx_poll(
222        &mut self,
223        pool: &mut dyn BufferAccess,
224        packets: &mut [RxId],
225    ) -> anyhow::Result<usize>;
226
227    /// Posts transmits to the device.
228    ///
229    /// Returns `Ok(false)` if the segments will complete asynchronously.
230    fn tx_avail(
231        &mut self,
232        pool: &mut dyn BufferAccess,
233        segments: &[TxSegment],
234    ) -> anyhow::Result<(bool, usize)>;
235
236    /// Polls the device for transmit completions.
237    fn tx_poll(&mut self, pool: &mut dyn BufferAccess, done: &mut [TxId])
238    -> Result<usize, TxError>;
239
240    /// Get queue statistics
241    fn queue_stats(&self) -> Option<&dyn BackendQueueStats> {
242        None // Default implementation - not all queues implement stats
243    }
244}
245
246/// Frontend-owned access to guest receive buffers.
247///
248/// Each frontend implements this trait to map [`RxId`] values to
249/// guest memory regions. The backend writes received packet data
250/// and metadata through these methods.
251///
252/// The frontend owns the `BufferAccess` and passes `&mut` references
253/// to [`Queue`] methods. This means no `Arc`/`Mutex` is needed
254/// between the frontend and backend for buffer access—the borrow
255/// checker enforces exclusive access statically.
256pub trait BufferAccess {
257    /// The associated guest memory accessor.
258    fn guest_memory(&self) -> &GuestMemory;
259
260    /// Writes data to the specified buffer.
261    fn write_data(&mut self, id: RxId, data: &[u8]);
262
263    /// Appends the guest address segments for the specified buffer to `buf`.
264    ///
265    /// Callers must clear `buf` before calling if they do not want segments
266    /// from a previous call to be retained.
267    fn push_guest_addresses(&self, id: RxId, buf: &mut Vec<RxBufferSegment>);
268
269    /// The capacity of the specified buffer in bytes.
270    fn capacity(&self, id: RxId) -> u32;
271
272    /// Sets the packet metadata for the receive.
273    fn write_header(&mut self, id: RxId, metadata: &RxMetadata);
274
275    /// Writes the packet header and data in a single call.
276    fn write_packet(&mut self, id: RxId, metadata: &RxMetadata, data: &[u8]) {
277        self.write_data(id, data);
278        self.write_header(id, metadata);
279    }
280}
281
282/// A receive buffer ID.
283#[derive(Debug, Copy, Clone)]
284#[repr(transparent)]
285pub struct RxId(pub u32);
286
287/// An individual segment in guest memory of a receive buffer.
288#[derive(Debug, Copy, Clone)]
289pub struct RxBufferSegment {
290    /// Guest physical address.
291    pub gpa: u64,
292    /// The number of bytes in this range.
293    pub len: u32,
294}
295
296/// Receive packet metadata.
297#[derive(Debug, Copy, Clone)]
298pub struct RxMetadata {
299    /// The offset of the packet data from the beginning of the receive buffer.
300    pub offset: usize,
301    /// The length of the packet in bytes.
302    pub len: usize,
303    /// The IP checksum validation state.
304    pub ip_checksum: RxChecksumState,
305    /// The L4 checksum validation state.
306    pub l4_checksum: RxChecksumState,
307    /// The L4 protocol.
308    pub l4_protocol: L4Protocol,
309}
310
311impl Default for RxMetadata {
312    fn default() -> Self {
313        Self {
314            offset: 0,
315            len: 0,
316            ip_checksum: RxChecksumState::Unknown,
317            l4_checksum: RxChecksumState::Unknown,
318            l4_protocol: L4Protocol::Unknown,
319        }
320    }
321}
322
323/// The "L3" protocol: the IP layer.
324#[derive(Debug, Copy, Clone, PartialEq, Eq)]
325pub enum L3Protocol {
326    Unknown,
327    Ipv4,
328    Ipv6,
329}
330
331/// The "L4" protocol: the TCP/UDP layer.
332#[derive(Debug, Copy, Clone, PartialEq, Eq)]
333pub enum L4Protocol {
334    Unknown,
335    Tcp,
336    Udp,
337}
338
339/// The receive checksum state for a packet.
340#[derive(Debug, Copy, Clone, PartialEq, Eq)]
341pub enum RxChecksumState {
342    /// The checksum was not evaluated.
343    Unknown,
344    /// The checksum value is correct.
345    Good,
346    /// The checksum value is incorrect.
347    Bad,
348    /// The checksum has been validated, but the value in the header is wrong.
349    ///
350    /// This occurs when LRO/RSC offload has been performed--multiple packet
351    /// payloads are glommed together without updating the checksum in the first
352    /// packet's header.
353    ValidatedButWrong,
354}
355
356impl RxChecksumState {
357    /// Returns true if the checksum has been validated.
358    pub fn is_valid(self) -> bool {
359        self == Self::Good || self == Self::ValidatedButWrong
360    }
361}
362
363/// A transmit ID. This may be used by multiple segments at the same time.
364#[derive(Debug, Copy, Clone)]
365#[repr(transparent)]
366pub struct TxId(pub u32);
367
368#[derive(Debug, Clone)]
369/// The segment type.
370pub enum TxSegmentType {
371    /// The start of a packet.
372    Head(TxMetadata),
373    /// A packet continuation.
374    Tail,
375}
376
377#[derive(Debug, Clone)]
378/// Transmit packet metadata.
379pub struct TxMetadata {
380    /// The transmit ID.
381    pub id: TxId,
382    /// The number of segments, including this one.
383    pub segment_count: u8,
384    /// Flags.
385    pub flags: TxFlags,
386    /// The total length of the packet in bytes.
387    pub len: u32,
388    /// The length of the Ethernet frame header. Only guaranteed to be set if
389    /// various offload flags are set.
390    pub l2_len: u8,
391    /// The length of the IP header. Only guaranteed to be set if various
392    /// offload flags are set.
393    pub l3_len: u16,
394    /// The length of the TCP header. Only guaranteed to be set if various
395    /// offload flags are set.
396    pub l4_len: u8,
397    /// The maximum TCP segment size, used for segmentation. Only guaranteed to
398    /// be set if [`TxFlags::offload_tcp_segmentation`] is set.
399    pub max_tcp_segment_size: u16,
400}
401
402/// Flags affecting transmit behavior.
403#[bitfield(u8)]
404pub struct TxFlags {
405    /// Offload IPv4 header checksum calculation.
406    ///
407    /// `l3_protocol`, `l2_len`, and `l3_len` must be set.
408    pub offload_ip_header_checksum: bool,
409    /// Offload the TCP checksum calculation.
410    ///
411    /// `l3_protocol`, `l2_len`, and `l3_len` must be set.
412    pub offload_tcp_checksum: bool,
413    /// Offload the UDP checksum calculation.
414    ///
415    /// `l3_protocol`, `l2_len`, and `l3_len` must be set.
416    pub offload_udp_checksum: bool,
417    /// Offload the TCP segmentation, allowing packets to be larger than the
418    /// MTU.
419    ///
420    /// `l3_protocol`, `l2_len`, `l3_len`, `l4_len`, and `tcp_segment_size` must
421    /// be set.
422    pub offload_tcp_segmentation: bool,
423    /// If true, the packet is IPv4.
424    pub is_ipv4: bool,
425    /// If true, the packet is IPv6. Mutually exclusive with `is_ipv4`.
426    pub is_ipv6: bool,
427    #[bits(2)]
428    _reserved: u8,
429}
430
431impl Default for TxMetadata {
432    fn default() -> Self {
433        Self {
434            id: TxId(0),
435            segment_count: 0,
436            len: 0,
437            flags: TxFlags::new(),
438            l2_len: 0,
439            l3_len: 0,
440            l4_len: 0,
441            max_tcp_segment_size: 0,
442        }
443    }
444}
445
446#[derive(Debug, Clone)]
447/// A transmit packet segment.
448pub struct TxSegment {
449    /// The segment type (head or tail).
450    pub ty: TxSegmentType,
451    /// The guest address of this segment.
452    pub gpa: u64,
453    /// The length of this segment.
454    pub len: u32,
455}
456
457/// Computes the number of packets in `segments`.
458pub fn packet_count(mut segments: &[TxSegment]) -> usize {
459    let mut packet_count = 0;
460    while let Some(head) = segments.first() {
461        let TxSegmentType::Head(metadata) = &head.ty else {
462            unreachable!()
463        };
464        segments = &segments[metadata.segment_count as usize..];
465        packet_count += 1;
466    }
467    packet_count
468}
469
470/// Gets the next packet from a list of segments, returning the packet metadata,
471/// the segments in the packet, and the remaining segments.
472pub fn next_packet(segments: &[TxSegment]) -> (&TxMetadata, &[TxSegment], &[TxSegment]) {
473    let metadata = if let TxSegmentType::Head(metadata) = &segments[0].ty {
474        metadata
475    } else {
476        unreachable!();
477    };
478    let (this, rest) = segments.split_at(metadata.segment_count.into());
479    (metadata, this, rest)
480}
481
482/// Linearizes the next packet in a list of segments, returning the buffer data
483/// and advancing the segment list.
484pub fn linearize(
485    pool: &dyn BufferAccess,
486    segments: &mut &[TxSegment],
487) -> Result<Vec<u8>, GuestMemoryError> {
488    let (head, this, rest) = next_packet(segments);
489    let mut v = vec![0; head.len as usize];
490    let mut offset = 0;
491    let mem = pool.guest_memory();
492    for segment in this {
493        let dest = &mut v[offset..offset + segment.len as usize];
494        mem.read_at(segment.gpa, dest)?;
495        offset += segment.len as usize;
496    }
497    assert_eq!(v.len(), offset);
498    *segments = rest;
499    Ok(v)
500}
501
502#[derive(PartialEq, Debug)]
503pub enum EndpointAction {
504    RestartRequired,
505    LinkStatusNotify(bool),
506}
507
508enum DisconnectableEndpointUpdate {
509    EndpointConnected(Box<dyn Endpoint>),
510    EndpointDisconnected(Rpc<(), Option<Box<dyn Endpoint>>>),
511}
512
513pub struct DisconnectableEndpointControl {
514    send_update: mesh::Sender<DisconnectableEndpointUpdate>,
515}
516
517impl DisconnectableEndpointControl {
518    pub fn connect(&mut self, endpoint: Box<dyn Endpoint>) -> anyhow::Result<()> {
519        self.send_update
520            .send(DisconnectableEndpointUpdate::EndpointConnected(endpoint));
521        Ok(())
522    }
523
524    pub async fn disconnect(&mut self) -> anyhow::Result<Option<Box<dyn Endpoint>>> {
525        self.send_update
526            .call(DisconnectableEndpointUpdate::EndpointDisconnected, ())
527            .map_err(anyhow::Error::from)
528            .await
529    }
530}
531
532pub struct DisconnectableEndpointCachedState {
533    is_ordered: bool,
534    tx_offload_support: TxOffloadSupport,
535    multiqueue_support: MultiQueueSupport,
536    tx_fast_completions: bool,
537    link_speed: u64,
538}
539
540pub struct DisconnectableEndpoint {
541    endpoint: Option<Box<dyn Endpoint>>,
542    null_endpoint: Box<dyn Endpoint>,
543    cached_state: Option<DisconnectableEndpointCachedState>,
544    receive_update: Arc<Mutex<mesh::Receiver<DisconnectableEndpointUpdate>>>,
545    notify_disconnect_complete: Option<(
546        Rpc<(), Option<Box<dyn Endpoint>>>,
547        Option<Box<dyn Endpoint>>,
548    )>,
549}
550
551impl InspectMut for DisconnectableEndpoint {
552    fn inspect_mut(&mut self, req: inspect::Request<'_>) {
553        self.current_mut().inspect_mut(req)
554    }
555}
556
557impl DisconnectableEndpoint {
558    pub fn new() -> (Self, DisconnectableEndpointControl) {
559        let (endpoint_tx, endpoint_rx) = mesh::channel();
560        let control = DisconnectableEndpointControl {
561            send_update: endpoint_tx,
562        };
563        (
564            Self {
565                endpoint: None,
566                null_endpoint: Box::new(NullEndpoint::new()),
567                cached_state: None,
568                receive_update: Arc::new(Mutex::new(endpoint_rx)),
569                notify_disconnect_complete: None,
570            },
571            control,
572        )
573    }
574
575    fn current(&self) -> &dyn Endpoint {
576        self.endpoint
577            .as_ref()
578            .unwrap_or(&self.null_endpoint)
579            .as_ref()
580    }
581
582    fn current_mut(&mut self) -> &mut dyn Endpoint {
583        self.endpoint
584            .as_mut()
585            .unwrap_or(&mut self.null_endpoint)
586            .as_mut()
587    }
588}
589
590#[async_trait]
591impl Endpoint for DisconnectableEndpoint {
592    fn endpoint_type(&self) -> &'static str {
593        self.current().endpoint_type()
594    }
595
596    async fn get_queues(
597        &mut self,
598        config: Vec<QueueConfig>,
599        rss: Option<&RssConfig<'_>>,
600        queues: &mut Vec<Box<dyn Queue>>,
601    ) -> anyhow::Result<()> {
602        self.current_mut().get_queues(config, rss, queues).await
603    }
604
605    async fn stop(&mut self) {
606        self.current_mut().stop().await
607    }
608
609    fn is_ordered(&self) -> bool {
610        self.cached_state
611            .as_ref()
612            .expect("Endpoint needs connected at least once before use")
613            .is_ordered
614    }
615
616    fn tx_offload_support(&self) -> TxOffloadSupport {
617        self.cached_state
618            .as_ref()
619            .expect("Endpoint needs connected at least once before use")
620            .tx_offload_support
621    }
622
623    fn multiqueue_support(&self) -> MultiQueueSupport {
624        self.cached_state
625            .as_ref()
626            .expect("Endpoint needs connected at least once before use")
627            .multiqueue_support
628    }
629
630    fn tx_fast_completions(&self) -> bool {
631        self.cached_state
632            .as_ref()
633            .expect("Endpoint needs connected at least once before use")
634            .tx_fast_completions
635    }
636
637    async fn set_data_path_to_guest_vf(&self, use_vf: bool) -> anyhow::Result<()> {
638        self.current().set_data_path_to_guest_vf(use_vf).await
639    }
640
641    async fn get_data_path_to_guest_vf(&self) -> anyhow::Result<bool> {
642        self.current().get_data_path_to_guest_vf().await
643    }
644
645    async fn wait_for_endpoint_action(&mut self) -> EndpointAction {
646        // If the previous message disconnected the endpoint, notify the caller
647        // that the operation has completed, returning the old endpoint.
648        if let Some((rpc, old_endpoint)) = self.notify_disconnect_complete.take() {
649            rpc.handle(async |_| old_endpoint).await;
650        }
651
652        enum Message {
653            DisconnectableEndpointUpdate(DisconnectableEndpointUpdate),
654            UpdateFromEndpoint(EndpointAction),
655        }
656        let receiver = self.receive_update.clone();
657        let mut receive_update = receiver.lock().await;
658        let update = async {
659            match receive_update.next().await {
660                Some(m) => Message::DisconnectableEndpointUpdate(m),
661                None => {
662                    pending::<()>().await;
663                    unreachable!()
664                }
665            }
666        };
667        let ep_update = self
668            .current_mut()
669            .wait_for_endpoint_action()
670            .map(Message::UpdateFromEndpoint);
671        let m = (update, ep_update).race().await;
672        match m {
673            Message::DisconnectableEndpointUpdate(
674                DisconnectableEndpointUpdate::EndpointConnected(endpoint),
675            ) => {
676                let old_endpoint = self.endpoint.take();
677                assert!(old_endpoint.is_none());
678                self.endpoint = Some(endpoint);
679                self.cached_state = Some(DisconnectableEndpointCachedState {
680                    is_ordered: self.current().is_ordered(),
681                    tx_offload_support: self.current().tx_offload_support(),
682                    multiqueue_support: self.current().multiqueue_support(),
683                    tx_fast_completions: self.current().tx_fast_completions(),
684                    link_speed: self.current().link_speed(),
685                });
686                EndpointAction::RestartRequired
687            }
688            Message::DisconnectableEndpointUpdate(
689                DisconnectableEndpointUpdate::EndpointDisconnected(rpc),
690            ) => {
691                let old_endpoint = self.endpoint.take();
692                // Wait until the next call into this function to notify the
693                // caller that the operation has completed. This makes it more
694                // likely that the endpoint is no longer referenced (old queues
695                // have been disposed, etc.).
696                self.notify_disconnect_complete = Some((rpc, old_endpoint));
697                EndpointAction::RestartRequired
698            }
699            Message::UpdateFromEndpoint(update) => update,
700        }
701    }
702
703    fn link_speed(&self) -> u64 {
704        self.cached_state
705            .as_ref()
706            .expect("Endpoint needs connected at least once before use")
707            .link_speed
708    }
709}