vmbus_server/
channel_bitmap.rsuse guestmem::LockedPages;
use parking_lot::RwLock;
use safeatomic::AtomicSliceOps;
use std::sync::Arc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use vmcore::interrupt::Interrupt;
pub(crate) struct ChannelBitmap {
interrupt_page: LockedPages,
channel_table: RwLock<Vec<Option<Interrupt>>>,
}
const INTERRUPT_PAGE_SIZE: usize = 2048;
impl ChannelBitmap {
pub fn new(interrupt_page: LockedPages) -> Self {
Self {
interrupt_page,
channel_table: RwLock::new(vec![None; crate::channels::MAX_CHANNELS]),
}
}
pub fn register_channel(&self, event_flag: u16, event: Interrupt) {
let mut channel_table = self.channel_table.write();
channel_table[event_flag as usize] = Some(event);
}
pub fn unregister_channel(&self, event_flag: u16) {
let mut channel_table = self.channel_table.write();
channel_table[event_flag as usize] = None;
}
pub fn handle_shared_interrupt(&self) {
let bitmap = AtomicBitmap::new(self.get_recv_page());
let channel_table = self.channel_table.read();
bitmap.scan_and_clear(|event_flag| {
let event = channel_table.get(event_flag);
if let Some(Some(event)) = event {
event.deliver();
} else {
tracelimit::warn_ratelimited!(event_flag, "Guest signaled unknown channel");
}
});
}
pub fn set_flag(&self, event_flag: u16) {
let bitmap = AtomicBitmap::new(self.get_send_page());
bitmap.set(event_flag as usize);
}
pub fn create_interrupt(
channel_bitmap: &Option<Arc<ChannelBitmap>>,
interrupt: Interrupt,
event_flag: u16,
) -> Interrupt {
if let Some(channel_bitmap) = channel_bitmap {
let channel_bitmap = channel_bitmap.clone();
Interrupt::from_fn(move || {
channel_bitmap.set_flag(event_flag);
interrupt.deliver();
})
} else {
interrupt
}
}
fn get_send_page(&self) -> &[AtomicU64] {
self.interrupt_page.pages()[0][..INTERRUPT_PAGE_SIZE]
.as_atomic_slice()
.unwrap()
}
fn get_recv_page(&self) -> &[AtomicU64] {
self.interrupt_page.pages()[0][INTERRUPT_PAGE_SIZE..]
.as_atomic_slice()
.unwrap()
}
}
struct AtomicBitmap<'a> {
bits: &'a [AtomicU64],
}
const BITS_PER_WORD: usize = size_of::<AtomicU64>() * 8;
impl<'a> AtomicBitmap<'a> {
pub fn new(bits: &'a [AtomicU64]) -> Self {
Self { bits }
}
pub fn set(&self, index: usize) {
let bit = 1 << (index % BITS_PER_WORD);
self.bits[index / BITS_PER_WORD].fetch_or(bit, Ordering::SeqCst);
}
pub fn scan_and_clear(&self, mut callback: impl FnMut(usize)) {
for (word_index, word) in self.bits.iter().enumerate() {
let mut value = word.swap(0, Ordering::SeqCst);
while value != 0 {
let index = value.trailing_zeros();
value &= !(1u64 << index);
let index = word_index * BITS_PER_WORD + (index as usize);
callback(index);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_atomic_bitmap() {
let bits: [AtomicU64; 128] = [0; 128].map(AtomicU64::new);
let bitmap = AtomicBitmap::new(&bits);
let mut expected_bits = [0u64; 128];
bitmap.set(5);
expected_bits[0] = 1u64 << 5;
compare_bits(&bits, &expected_bits);
bitmap.set(500);
expected_bits[7] = 1u64 << 52;
compare_bits(&bits, &expected_bits);
let mut set = vec![];
bitmap.scan_and_clear(|index| set.push(index));
assert_eq!(set, vec![5, 500]);
expected_bits = [0u64; 128];
compare_bits(&bits, &expected_bits);
}
fn compare_bits(bits: &[AtomicU64; 128], expected_bits: &[u64; 128]) {
bits.iter().zip(expected_bits).for_each(|(left, right)| {
assert_eq!(left.load(Ordering::Acquire), *right);
})
}
}