1use safeatomic::AtomicSliceOps;
7use std::sync::Arc;
8use std::sync::atomic::AtomicU8;
9use zerocopy::FromBytes;
10use zerocopy::Immutable;
11use zerocopy::IntoBytes;
12use zerocopy::KnownLayout;
13
14pub const PAGE_SIZE: usize = 4096;
16pub const PAGE_SIZE32: u32 = 4096;
17pub const PAGE_SIZE64: u64 = PAGE_SIZE as u64;
18
19pub unsafe trait MappedDmaTarget: Send + Sync {
25 fn base(&self) -> *const u8;
27
28 fn len(&self) -> usize;
30
31 fn pfns(&self) -> &[u64];
34
35 fn pfn_bias(&self) -> u64;
37
38 #[doc(hidden)]
44 fn view(&self, offset: usize, len: usize) -> Option<MemoryBlock> {
45 let _ = (offset, len);
46 None
47 }
48}
49
50struct RestrictedView {
51 mem: Arc<dyn MappedDmaTarget>,
52 len: usize,
53 offset: usize,
54}
55
56impl RestrictedView {
57 fn new(mem: Arc<dyn MappedDmaTarget>, offset: usize, len: usize) -> Self {
59 let mem_len = mem.len();
60 assert!(mem_len >= offset && mem_len - offset >= len);
61 Self { len, offset, mem }
62 }
63}
64
65unsafe impl MappedDmaTarget for RestrictedView {
68 fn base(&self) -> *const u8 {
69 unsafe { self.mem.base().add(self.offset) }
71 }
72
73 fn len(&self) -> usize {
74 self.len
75 }
76
77 fn pfns(&self) -> &[u64] {
78 let start = self.offset / PAGE_SIZE;
79 let count = (self.base() as usize % PAGE_SIZE + self.len + 0xfff) / PAGE_SIZE;
80 let pages = self.mem.pfns();
81 &pages[start..][..count]
82 }
83
84 fn pfn_bias(&self) -> u64 {
85 self.mem.pfn_bias()
86 }
87
88 fn view(&self, offset: usize, len: usize) -> Option<MemoryBlock> {
89 Some(MemoryBlock::new(RestrictedView::new(
90 self.mem.clone(),
91 self.offset.checked_add(offset).unwrap(),
92 len,
93 )))
94 }
95}
96
97#[derive(Clone)]
99pub struct MemoryBlock {
100 base: *const u8,
101 len: usize,
102 mem: Arc<dyn MappedDmaTarget>,
103}
104
105impl std::fmt::Debug for MemoryBlock {
106 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
107 f.debug_struct("MemoryBlock")
108 .field("base", &self.base)
109 .field("len", &self.len)
110 .field("pfns", &self.pfns())
111 .field("pfn_bias", &self.pfn_bias())
112 .finish()
113 }
114}
115
116unsafe impl Send for MemoryBlock {}
118unsafe impl Sync for MemoryBlock {}
120
121impl MemoryBlock {
122 pub fn new<T: 'static + MappedDmaTarget>(mem: T) -> Self {
124 Self {
125 base: mem.base(),
126 len: mem.len(),
127 mem: Arc::new(mem),
128 }
129 }
130
131 pub fn subblock(&self, offset: usize, len: usize) -> Self {
133 match self.mem.view(offset, len) {
134 Some(view) => view,
135 None => Self::new(RestrictedView::new(self.mem.clone(), offset, len)),
136 }
137 }
138
139 pub fn base(&self) -> *const u8 {
141 self.base
142 }
143
144 pub fn len(&self) -> usize {
146 self.len
147 }
148
149 pub fn pfns(&self) -> &[u64] {
151 self.mem.pfns()
152 }
153
154 pub fn pfn_bias(&self) -> u64 {
156 self.mem.pfn_bias()
157 }
158
159 pub fn as_slice(&self) -> &[AtomicU8] {
161 unsafe { std::slice::from_raw_parts(self.base.cast(), self.len) }
163 }
164
165 pub fn read_at(&self, offset: usize, data: &mut [u8]) {
167 self.as_slice()[offset..][..data.len()].atomic_read(data);
168 }
169
170 pub fn read_obj<T: FromBytes + Immutable + KnownLayout>(&self, offset: usize) -> T {
172 self.as_slice()[offset..][..size_of::<T>()].atomic_read_obj()
173 }
174
175 pub fn write_at(&self, offset: usize, data: &[u8]) {
177 self.as_slice()[offset..][..data.len()].atomic_write(data);
178 }
179
180 pub fn write_obj<T: IntoBytes + Immutable + KnownLayout>(&self, offset: usize, data: &T) {
182 self.as_slice()[offset..][..size_of::<T>()].atomic_write_obj(data);
183 }
184
185 pub fn offset_in_page(&self) -> u32 {
188 self.base as u32 % PAGE_SIZE as u32
189 }
190}