1#![expect(missing_docs)]
7#![forbid(unsafe_code)]
8
9pub use gicd::Distributor;
10pub use gicr::Redistributor;
11
12mod gicd {
13 use super::Redistributor;
14 use super::gicr::SharedState;
15 use aarch64defs::MpidrEl1;
16 use aarch64defs::SystemReg;
17 use aarch64defs::gic::GicdCtlr;
18 use aarch64defs::gic::GicdRegister;
19 use aarch64defs::gic::GicdTyper;
20 use aarch64defs::gic::GicdTyper2;
21 use aarch64defs::gic::GicrSgi;
22 use inspect::Inspect;
23 use memory_range::MemoryRange;
24 use parking_lot::Mutex;
25 use std::sync::Arc;
26 use vm_topology::processor::VpIndex;
27
28 #[derive(Debug, Inspect)]
29 pub struct Distributor {
30 state: Mutex<DistributorState>,
31 max_spi_intid: u32,
32 #[inspect(skip)]
33 gicr: Vec<Arc<SharedState>>,
34 gicd_range: MemoryRange,
35 gicr_range: MemoryRange,
36 }
37
38 #[derive(Debug, Inspect)]
39 struct DistributorState {
40 #[inspect(iter_by_index)]
41 pending: Vec<u32>,
42 #[inspect(iter_by_index)]
43 active: Vec<u32>,
44 #[inspect(iter_by_index)]
45 group: Vec<u32>,
46 #[inspect(iter_by_index)]
47 enable: Vec<u32>,
48 #[inspect(iter_by_index)]
49 cfg: Vec<u32>,
50 #[inspect(iter_by_index)]
51 priority: Vec<u32>,
52 #[inspect(iter_by_index)]
53 route: Vec<u64>,
54 enable_grp0: bool,
55 enable_grp1: bool,
56 }
57
58 impl Distributor {
59 pub fn new(gicd_base: u64, gicr_range: MemoryRange, max_spis: u32) -> Self {
60 let n = (max_spis as usize + 1) / 32;
61 Self {
62 state: Mutex::new(DistributorState {
63 pending: vec![0; n],
64 active: vec![0; n],
65 group: vec![0; n],
66 enable: vec![0; n],
67 cfg: vec![0; n * 2],
68 priority: vec![0; n * 8],
69 route: vec![0; n * 64],
70 enable_grp0: false,
71 enable_grp1: false,
72 }),
73 max_spi_intid: 32 + max_spis - 1,
74 gicr: Default::default(),
75 gicd_range: MemoryRange::new(
76 gicd_base..gicd_base + aarch64defs::GIC_DISTRIBUTOR_SIZE,
77 ),
78 gicr_range,
79 }
80 }
81
82 pub fn add_redistributor(&mut self, mpidr: u64, last: bool) -> Redistributor {
83 let mpidr = mpidr & u64::from(MpidrEl1::AFFINITY_MASK);
84 let (gicr, state) = Redistributor::new(self.gicr.len(), mpidr, last);
85 self.gicr.push(state);
86 assert!(
87 (self.gicr.len() as u64)
88 <= self.gicr_range.len() / aarch64defs::GIC_REDISTRIBUTOR_SIZE
89 );
90 gicr
91 }
92
93 pub fn raise_ppi(&self, vp: VpIndex, intid: u32) -> bool {
94 if let Some(gicr) = self.gicr.get(vp.index() as usize) {
95 gicr.raise(intid)
96 } else {
97 false
98 }
99 }
100
101 pub fn set_pending(&self, intid: u32, pending: bool) -> Option<u32> {
102 let v = &mut self.state.lock().pending[intid as usize / 32];
103 let mask = 1 << (intid & 31);
104 if (*v & mask != 0) != pending {
105 tracing::debug!(intid, pending, "set pending");
106 }
107 if pending {
108 *v |= mask;
109 Some(0)
110 } else {
111 *v &= !mask;
112 None
113 }
114 }
115
116 pub fn irq_pending(&self, gicr: &Redistributor) -> bool {
117 if gicr.irq_pending() {
118 return true;
119 }
120 if gicr.index != 0 {
121 return false;
122 }
123 let state = self.state.lock();
124 state
125 .pending
126 .iter()
127 .zip(&state.active)
128 .zip(&state.enable)
129 .any(|((&p, &a), e)| p & !a & e != 0)
130 }
131
132 pub fn ack(&self, gicr: &mut Redistributor, group1: bool) -> u32 {
133 if let Some(intid) = gicr.ack(group1) {
134 return intid;
135 }
136 if gicr.index != 0 {
137 return 1023;
138 }
139 let mut state = self.state.lock();
140 let state = &mut *state;
141 if let Some((i, (p, a))) = state
142 .pending
143 .iter_mut()
144 .zip(&mut state.active)
145 .enumerate()
146 .find(|(_, (p, a))| **p & !**a != 0)
147 {
148 let v = 31 - (*p & !*a).leading_zeros();
149 *p &= !(1 << v);
150 *a |= 1 << v;
151 let intid = i as u32 * 32 + v;
152 tracing::debug!(intid, "gicd ack");
153 intid
154 } else {
155 1023
156 }
157 }
158
159 pub fn write_sysreg(
160 &self,
161 gicr: &mut Redistributor,
162 reg: SystemReg,
163 value: u64,
164 wake: impl FnMut(usize),
165 ) -> bool {
166 match reg {
167 SystemReg::ICC_EOIR0_EL1 => self.eoi(gicr, false, value as u32),
168 SystemReg::ICC_EOIR1_EL1 => self.eoi(gicr, true, value as u32),
169 SystemReg::ICC_SGI0R_EL1 => self.sgi(gicr, false, value, wake),
170 SystemReg::ICC_SGI1R_EL1 => self.sgi(gicr, true, value, wake),
171 _ => return false,
172 }
173 true
174 }
175
176 fn sgi(
177 &self,
178 this: &mut Redistributor,
179 _group1: bool,
180 value: u64,
181 mut wake: impl FnMut(usize),
182 ) {
183 let value = GicrSgi::from(value);
184 for (index, gicr) in self.gicr.iter().enumerate() {
185 if (value.irm() && !Arc::ptr_eq(&this.shared, gicr))
186 || (!value.irm()
187 && gicr.mpidr.aff3() == value.aff3()
188 && gicr.mpidr.aff2() == value.aff2()
189 && gicr.mpidr.aff1() == value.aff1()
190 && (1 << gicr.mpidr.aff0()) & value.target_list() != 0)
191 {
192 if gicr.raise(value.intid()) {
193 wake(index);
194 }
195 }
196 }
197 }
198
199 pub fn read_sysreg(&self, gicr: &mut Redistributor, reg: SystemReg) -> Option<u64> {
200 let v = match reg {
201 SystemReg::ICC_IAR0_EL1 => self.ack(gicr, false).into(),
202 SystemReg::ICC_IAR1_EL1 => self.ack(gicr, true).into(),
203 _ => return None,
204 };
205 Some(v)
206 }
207
208 fn eoi(&self, gicr: &mut Redistributor, group1: bool, intid: u32) {
209 if intid < 32 {
210 gicr.eoi(group1, intid);
211 return;
212 }
213 if gicr.index != 0 {
214 return;
215 }
216 tracing::debug!(intid, "gicd eoi");
217 let v = &mut self.state.lock().active[intid as usize / 32];
218 *v &= !(1 << (intid & 31));
219 }
220
221 fn write32(&self, address: GicdRegister, value: u32) -> bool {
222 assert!(address.0 & 3 == 0);
223 match address {
224 GicdRegister::CTLR => {
225 let ctlr = GicdCtlr::from(value);
226 let mut state = self.state.lock();
227 let state = &mut *state;
228 state.enable_grp0 = ctlr.enable_grp0();
229 state.enable_grp1 = ctlr.enable_grp1();
230 }
231 r if GicdRegister::IGROUPR.contains(&r.0) => {
232 let n = (r.0 & 0x7f) / 4;
233 if n != 0 {
234 if let Some(group) = self.state.lock().group.get_mut(n as usize) {
235 *group = value;
236 }
237 }
238 }
239 r if GicdRegister::ISENABLER.contains(&r.0) => {
240 let n = (r.0 & 0x7f) / 4;
241 if n != 0 {
242 if let Some(enable) = self.state.lock().enable.get_mut(n as usize) {
243 *enable |= value;
244 }
245 }
246 }
247 r if GicdRegister::ICENABLER.contains(&r.0) => {
248 let n = (r.0 & 0x7f) / 4;
249 if n != 0 {
250 if let Some(enable) = self.state.lock().enable.get_mut(n as usize) {
251 *enable &= !value;
252 }
253 }
254 }
255 r if GicdRegister::ICFGR.contains(&r.0) => {
256 let n = (r.0 & 0xff) / 4;
257 if n >= 2 {
258 if let Some(cfg) = self.state.lock().cfg.get_mut(n as usize) {
259 *cfg = value & 0xaaaaaaaa;
261 }
262 }
263 }
264 r if GicdRegister::IPRIORITYR.contains(&r.0) => {
265 let n = (r.0 & 0x3ff) / 4;
266 if n >= 8 {
267 if let Some(cfg) = self.state.lock().cfg.get_mut(n as usize) {
268 *cfg = value;
269 }
270 }
271 }
272 r if GicdRegister::ISACTIVER.contains(&r.0) => {
273 let n = (r.0 & 0x7f) / 4;
274 if n != 0 {
275 if let Some(active) = self.state.lock().active.get_mut(n as usize) {
276 *active |= value;
277 }
278 }
279 }
280 r if GicdRegister::ICACTIVER.contains(&r.0) => {
281 let n = (r.0 & 0x7f) / 4;
282 if n != 0 {
283 if let Some(active) = self.state.lock().active.get_mut(n as usize) {
284 *active &= !value;
285 }
286 }
287 }
288 _ => return false,
289 }
290 true
291 }
292
293 fn read32(&self, address: GicdRegister) -> Option<u32> {
294 assert!(address.0 & 3 == 0);
295 let v = match address {
296 GicdRegister::PIDR2 => {
297 3 << 4
299 }
300 GicdRegister::TYPER => GicdTyper::new()
301 .with_it_lines_number(31)
302 .with_id_bits(5)
303 .into(),
304 GicdRegister::IIDR => 0,
305 GicdRegister::TYPER2 => GicdTyper2::new().into(),
306 GicdRegister::CTLR => {
307 let state = self.state.lock();
308 GicdCtlr::new()
309 .with_enable_grp0(state.enable_grp0)
310 .with_enable_grp1(state.enable_grp1)
311 .with_ds(true)
312 .with_are(true)
313 .into()
314 }
315 r if GicdRegister::IGROUPR.contains(&r.0) => {
316 let n = (r.0 & 0x7f) / 4;
317 self.state
318 .lock()
319 .group
320 .get(n as usize)
321 .copied()
322 .unwrap_or(0)
323 }
324 r if GicdRegister::ICENABLER.contains(&r.0)
325 || GicdRegister::ISENABLER.contains(&r.0) =>
326 {
327 let n = (r.0 & 0x7f) / 4;
328 self.state
329 .lock()
330 .enable
331 .get(n as usize)
332 .copied()
333 .unwrap_or(0)
334 }
335 r if GicdRegister::ICFGR.contains(&r.0) => {
336 let n = (r.0 & 0xff) / 4;
337 self.state.lock().cfg.get(n as usize).copied().unwrap_or(0)
338 }
339 r if GicdRegister::IPRIORITYR.contains(&r.0) => {
340 let n = (r.0 & 0x3ff) / 4;
341 self.state
342 .lock()
343 .priority
344 .get(n as usize)
345 .copied()
346 .unwrap_or(0)
347 }
348 r if GicdRegister::ICACTIVER.contains(&r.0)
349 || GicdRegister::ISACTIVER.contains(&r.0) =>
350 {
351 let n = (r.0 & 0x7f) / 4;
352 self.state
353 .lock()
354 .active
355 .get(n as usize)
356 .copied()
357 .unwrap_or(0)
358 }
359 r if GicdRegister::ICPENDR.contains(&r.0)
360 || GicdRegister::ISPENDR.contains(&r.0) =>
361 {
362 let n = (r.0 & 0x7f) / 4;
363 self.state
364 .lock()
365 .pending
366 .get(n as usize)
367 .copied()
368 .unwrap_or(0)
369 }
370 _ => return None,
371 };
372 Some(v)
373 }
374
375 fn write64(&self, address: GicdRegister, value: u64) -> bool {
376 assert!(address.0 & 7 == 0);
377 match address {
378 r if GicdRegister::IROUTER.contains(&r.0) => {
379 let n = (r.0 & 0x1fff) / 8;
380 if n >= 32 {
381 if let Some(route) = self.state.lock().route.get_mut(n as usize) {
382 *route = value;
383 }
384 }
385 }
386 _ => return false,
387 }
388 true
389 }
390
391 fn read64(&self, address: GicdRegister) -> Option<u64> {
392 assert!(address.0 & 7 == 0);
393 let v = match address {
394 r if GicdRegister::IROUTER.contains(&r.0) => {
395 let n = (r.0 & 0x1fff) / 8;
396 self.state
397 .lock()
398 .route
399 .get(n as usize)
400 .copied()
401 .unwrap_or(0)
402 }
403 _ => return None,
404 };
405 Some(v)
406 }
407
408 pub fn read(&self, address: u64, data: &mut [u8]) -> bool {
409 if self.gicd_range.contains_addr(address) {
410 self.read_gicd(address - self.gicd_range.start(), data);
411 } else if self.gicr_range.contains_addr(address) {
412 let vp = (address - self.gicr_range.start()) / aarch64defs::GIC_REDISTRIBUTOR_SIZE;
413 if let Some(gicr) = self.gicr.get(vp as usize) {
414 gicr.read(address - self.gicr_range.start(), data);
415 } else {
416 tracelimit::warn_ratelimited!(
417 address,
418 ?data,
419 "gicr read unallocated redistributor"
420 );
421 data.fill(0);
422 }
423 } else {
424 return false;
425 }
426 true
427 }
428
429 fn read_gicd(&self, address: u64, data: &mut [u8]) {
430 if address & (data.len() as u64 - 1) != 0 {
431 data.fill(!0);
432 tracing::warn!(address, ?data, "gicd read unaligned access");
433 return;
434 }
435 let address = GicdRegister(address as u16);
436 let handled = match data.len() {
437 4 => {
438 if let Some(v) = self.read32(address) {
439 data.copy_from_slice(&v.to_ne_bytes());
440 true
441 } else {
442 false
443 }
444 }
445 8 => {
446 if let Some(v) = self.read64(address) {
447 data.copy_from_slice(&v.to_ne_bytes());
448 true
449 } else {
450 false
451 }
452 }
453 _ => false,
454 };
455 if !handled {
456 data.fill(0);
457 tracelimit::warn_ratelimited!(?address, ?data, "unsupported gicd register read");
458 }
459 }
460
461 pub fn write(&self, address: u64, data: &[u8]) -> bool {
462 if self.gicd_range.contains_addr(address) {
463 self.write_gicd(address - self.gicd_range.start(), data);
464 } else if self.gicr_range.contains_addr(address) {
465 let vp = (address - self.gicr_range.start()) / aarch64defs::GIC_REDISTRIBUTOR_SIZE;
466 if let Some(gicr) = self.gicr.get(vp as usize) {
467 gicr.write(address - self.gicr_range.start(), data);
468 } else {
469 tracelimit::warn_ratelimited!(
470 address,
471 ?data,
472 "gicr write unallocated redistributor"
473 );
474 }
475 } else {
476 return false;
477 }
478 true
479 }
480
481 fn write_gicd(&self, address: u64, data: &[u8]) {
482 if address & (data.len() as u64 - 1) != 0 {
483 tracing::warn!(address, ?data, "gicd write unaligned access");
484 return;
485 }
486 let address = GicdRegister(address as u16);
487 let handled = match data.len() {
488 4 => self.write32(address, u32::from_ne_bytes(data.try_into().unwrap())),
489 8 => self.write64(address, u64::from_ne_bytes(data.try_into().unwrap())),
490 _ => false,
491 };
492 if !handled {
493 tracelimit::warn_ratelimited!(?address, ?data, "unsupported gicd register write");
494 }
495 }
496 }
497}
498
499mod gicr {
500 use aarch64defs::MpidrEl1;
501 use aarch64defs::gic::GicrCtlr;
502 use aarch64defs::gic::GicrRdRegister;
503 use aarch64defs::gic::GicrSgiRegister;
504 use aarch64defs::gic::GicrTyper;
505 use aarch64defs::gic::GicrWaker;
506 use inspect::Inspect;
507 use parking_lot::Mutex;
508 use std::sync::Arc;
509 use std::sync::atomic::AtomicU32;
510 use std::sync::atomic::Ordering;
511
512 #[derive(Debug, Inspect)]
513 pub struct Redistributor {
514 #[inspect(flatten)]
515 pub(super) shared: Arc<SharedState>,
516 pub(super) index: usize,
517 }
518
519 #[derive(Debug, Inspect)]
520 pub(crate) struct SharedState {
521 pub(super) pending: AtomicU32,
522 #[inspect(with = "|&x| u64::from(x)")]
523 pub(super) mpidr: MpidrEl1,
524 last: bool,
525 mutable: Mutex<SharedMutState>,
526 }
527
528 #[derive(Debug, Inspect)]
529 struct SharedMutState {
530 #[inspect(hex)]
531 active: u32,
532 #[inspect(hex)]
533 group: u32,
534 #[inspect(hex)]
535 enable: u32,
536 #[inspect(hex)]
537 ppi_cfg: u32,
538 #[inspect(iter_by_index)]
539 priority: [u32; 8],
540 sleep: bool,
541 }
542
543 impl SharedState {
544 pub fn raise(&self, intid: u32) -> bool {
545 let mask = 1 << intid;
546 self.pending.fetch_or(mask, Ordering::Relaxed) & mask == 0
547 }
548
549 pub fn read(&self, address: u64, data: &mut [u8]) {
550 if address & (data.len() as u64 - 1) != 0 {
551 data.fill(!0);
552 tracing::warn!(address, ?data, "gicr read unaligned access");
553 return;
554 }
555
556 if address & 0x10000 == 0 {
557 let address = GicrRdRegister(address as u16);
558 let handled = match data.len() {
559 4 => {
560 if let Some(v) = self.rd_read32(address) {
561 data.copy_from_slice(&v.to_ne_bytes());
562 true
563 } else {
564 false
565 }
566 }
567 8 => {
568 if let Some(v) = self.rd_read64(address) {
569 data.copy_from_slice(&v.to_ne_bytes());
570 true
571 } else {
572 false
573 }
574 }
575 _ => false,
576 };
577 if !handled {
578 data.fill(0);
579 tracelimit::warn_ratelimited!(?address, "unsupported gicr rd register read");
580 }
581 } else {
582 let address = GicrSgiRegister(address as u16);
583 let handled = match data.len() {
584 4 => {
585 if let Some(v) = self.sgi_read32(address) {
586 data.copy_from_slice(&v.to_ne_bytes());
587 true
588 } else {
589 false
590 }
591 }
592 _ => false,
593 };
594 if !handled {
595 data.fill(0);
596 tracelimit::warn_ratelimited!(
597 ?address,
598 ?data,
599 "unsupported gicr sgi register read"
600 );
601 }
602 }
603 }
604
605 pub fn write(&self, address: u64, data: &[u8]) {
606 if address & (data.len() as u64 - 1) != 0 {
607 tracing::warn!(address, ?data, "gicr write unaligned access");
608 return;
609 }
610
611 if address & 0x10000 == 0 {
612 let address = GicrRdRegister(address as u16);
613 let handled = match data.len() {
614 4 => {
615 let data = u32::from_ne_bytes(data.try_into().unwrap());
616 self.rd_write32(address, data)
617 }
618 8 => {
619 let data = u64::from_ne_bytes(data.try_into().unwrap());
620 self.rd_write64(address, data)
621 }
622 _ => false,
623 };
624 if !handled {
625 tracelimit::warn_ratelimited!(
626 ?address,
627 ?data,
628 "unsupported gicr rd register write"
629 );
630 }
631 } else {
632 let address = GicrSgiRegister(address as u16);
633 let handled = match data.len() {
634 4 => {
635 let data = u32::from_ne_bytes(data.try_into().unwrap());
636 self.sgi_write32(address, data)
637 }
638 _ => false,
639 };
640 if !handled {
641 tracelimit::warn_ratelimited!(
642 ?address,
643 ?data,
644 "unsupported gicr sgi register write"
645 );
646 }
647 }
648 }
649
650 fn rd_read32(&self, address: GicrRdRegister) -> Option<u32> {
651 let v = match address {
652 GicrRdRegister::PIDR2 => {
653 3 << 4
655 }
656 GicrRdRegister::CTLR => GicrCtlr::new().into(),
657 GicrRdRegister::WAKER => {
658 let sleep = self.mutable.lock().sleep;
659 GicrWaker::new()
660 .with_processor_sleep(sleep)
661 .with_children_asleep(sleep)
662 .into()
663 }
664 _ => return None,
665 };
666 tracing::debug!(?address, v, "gicr rd read32");
667 Some(v)
668 }
669
670 fn rd_write32(&self, address: GicrRdRegister, data: u32) -> bool {
671 match address {
672 GicrRdRegister::CTLR => {}
673 GicrRdRegister::WAKER => {
674 let v = GicrWaker::from(data);
675 self.mutable.lock().sleep = v.processor_sleep();
676 }
677 _ => return false,
678 }
679 tracing::debug!(?address, data, "gicr rd write32");
680 true
681 }
682
683 fn rd_read64(&self, address: GicrRdRegister) -> Option<u64> {
684 let v = match address {
685 GicrRdRegister::TYPER => GicrTyper::new()
686 .with_aff0(self.mpidr.aff0())
687 .with_aff1(self.mpidr.aff1())
688 .with_aff2(self.mpidr.aff2())
689 .with_aff3(self.mpidr.aff3())
690 .with_last(self.last)
691 .into(),
692 _ => return None,
693 };
694 Some(v)
695 }
696
697 fn rd_write64(&self, _address: GicrRdRegister, _data: u64) -> bool {
698 false
699 }
700
701 fn sgi_read32(&self, address: GicrSgiRegister) -> Option<u32> {
702 let v = match address {
703 GicrSgiRegister::IGROUPR0 => self.mutable.lock().group,
704 GicrSgiRegister::ICACTIVER0 | GicrSgiRegister::ISACTIVER0 => {
705 self.mutable.lock().active
706 }
707 GicrSgiRegister::ICENABLER0 | GicrSgiRegister::ISENABLER0 => {
708 self.mutable.lock().enable
709 }
710 GicrSgiRegister::ICPENDR0 | GicrSgiRegister::ISPENDR0 => {
711 self.pending.load(Ordering::Relaxed)
712 }
713 GicrSgiRegister::ICFGR0 => {
714 0xaaaaaaaa
716 }
717 GicrSgiRegister::ICFGR1 => self.mutable.lock().ppi_cfg,
718 r if GicrSgiRegister::IPRIORITYR.contains(&r.0) => {
719 let n = (r.0 & 0x1f) / 4;
720 self.mutable.lock().priority[n as usize]
721 }
722 _ => return None,
723 };
724 tracing::debug!(?address, v, "gicr sgi read32");
725 Some(v)
726 }
727
728 fn sgi_write32(&self, address: GicrSgiRegister, data: u32) -> bool {
729 match address {
730 GicrSgiRegister::IGROUPR0 => self.mutable.lock().group = data,
731 GicrSgiRegister::ISACTIVER0 => self.mutable.lock().active |= data,
732 GicrSgiRegister::ICACTIVER0 => self.mutable.lock().active &= !data,
733 GicrSgiRegister::ISENABLER0 => self.mutable.lock().enable |= data,
734 GicrSgiRegister::ICENABLER0 => self.mutable.lock().enable &= !data,
735 GicrSgiRegister::ICFGR0 => {
736 }
738 GicrSgiRegister::ICFGR1 => self.mutable.lock().ppi_cfg = data,
739 r if GicrSgiRegister::IPRIORITYR.contains(&r.0) => {
740 let n = (r.0 & 0x1f) / 4;
741 self.mutable.lock().priority[n as usize] = data;
742 }
743 _ => return false,
744 }
745 tracing::debug!(?address, data, "gicr sgi write32");
746 true
747 }
748 }
749
750 impl Redistributor {
751 pub(crate) fn new(index: usize, mpidr: u64, last: bool) -> (Self, Arc<SharedState>) {
752 let shared = Arc::new(SharedState {
753 pending: AtomicU32::new(0),
754 mpidr: mpidr.into(),
755 last,
756 mutable: Mutex::new(SharedMutState {
757 active: 0,
758 group: 0,
759 enable: 0,
760 ppi_cfg: 0,
761 priority: [0; 8],
762 sleep: false,
763 }),
764 });
765 (
766 Self {
767 index,
768 shared: shared.clone(),
769 },
770 shared,
771 )
772 }
773
774 pub fn raise(&mut self, intid: u32) {
775 self.shared.pending.fetch_or(1 << intid, Ordering::Relaxed);
776 }
777
778 pub(crate) fn irq_pending(&self) -> bool {
779 let pending = self.shared.pending.load(Ordering::Relaxed);
780 if pending == 0 {
781 return false;
782 }
783 let state = self.shared.mutable.lock();
784 (pending & !state.active & state.enable & state.group) != 0
785 }
786
787 pub fn is_pending_or_active(&self, intid: u32) -> bool {
788 let state = self.shared.mutable.lock();
789 (self.shared.pending.load(Ordering::Relaxed) | state.active) & (1 << intid) != 0
790 }
791
792 pub(crate) fn ack(&mut self, _group1: bool) -> Option<u32> {
793 let pending = self.shared.pending.load(Ordering::Relaxed);
794 if pending == 0 {
795 None
796 } else {
797 let mut state = self.shared.mutable.lock();
798 let intid = 31 - (pending & !state.active).leading_zeros();
799 tracing::trace!(intid, "ack");
800 self.shared
801 .pending
802 .fetch_and(!(1 << intid), Ordering::Relaxed);
803 state.active |= 1 << intid;
804 Some(intid)
805 }
806 }
807
808 pub(crate) fn eoi(&mut self, _group1: bool, intid: u32) {
809 assert!(intid < 32);
810 tracing::trace!(intid, "eoi");
811 self.shared.mutable.lock().active &= !(1 << intid);
812 }
813 }
814}