1use super::DriveRegister;
7use crate::DmaType;
8use crate::protocol;
9use crate::protocol::DeviceControlReg;
10use crate::protocol::DeviceHeadReg;
11use crate::protocol::ErrorReg;
12use crate::protocol::IdeCommand;
13use crate::protocol::Status;
14use guestmem::AlignedHeapMemory;
15use guestmem::GuestMemory;
16use guestmem::ranges::PagedRange;
17use ide_resources::IdePath;
18use inspect::Inspect;
19use safeatomic::AtomicSliceOps;
20use scsi::AdditionalSenseCode;
21use scsi::SenseKey;
22use scsi_buffers::RequestBuffers;
23use scsi_core::AsyncScsiDisk;
24use scsi_core::ScsiResult;
25use scsi_defs as scsi;
26use std::future::Future;
27use std::pin::Pin;
28use std::sync::Arc;
29use std::task::Context;
30use std::task::Poll;
31use std::task::Waker;
32use zerocopy::FromZeros;
33use zerocopy::IntoBytes;
34
35const COMMAND_PACKET_SIZE: usize = 12;
37
38const MAX_TRANSFER_LEN: usize = 128 * 1024;
39
40#[derive(Debug, Copy, Clone, PartialEq, Inspect)]
41struct Sense {
42 #[inspect(debug)]
43 sense_key: SenseKey,
44 #[inspect(debug)]
45 additional_sense_code: AdditionalSenseCode,
46 additional_sense_code_qualifier: u8,
47}
48
49impl Default for Sense {
50 fn default() -> Self {
51 Self {
52 sense_key: FromZeros::new_zeroed(),
53 additional_sense_code: FromZeros::new_zeroed(),
54 additional_sense_code_qualifier: Default::default(),
55 }
56 }
57}
58
59impl Sense {
60 fn new(
61 sense_key: SenseKey,
62 additional_sense_code: AdditionalSenseCode,
63 additional_sense_code_qualifier: u8,
64 ) -> Self {
65 Self {
66 sense_key,
67 additional_sense_code,
68 additional_sense_code_qualifier,
69 }
70 }
71}
72
73#[derive(Debug, Inspect)]
76struct Registers {
77 error: ErrorReg, #[inspect(hex)]
79 features: u8,
80 device_head: DeviceHeadReg,
81 #[inspect(hex)]
82 lba_low: u8, #[inspect(hex)]
84 byte_count_low: u8,
85 #[inspect(hex)]
86 byte_count_high: u8,
87 #[inspect(hex)]
88 sector_count: u8,
89 device_control_reg: DeviceControlReg,
90 status: Status,
91}
92
93impl Registers {
94 fn at_reset() -> Self {
95 Self {
96 byte_count_low: protocol::ATAPI_RESET_LBA_MID,
97 byte_count_high: protocol::ATAPI_RESET_LBA_HIGH,
98 lba_low: 1, device_head: DeviceHeadReg::new(),
100 error: ErrorReg::new().with_amnf_ili_default(true),
101 features: 0,
102 sector_count: protocol::ATAPI_READY_FOR_PACKET_DEFAULT,
103 device_control_reg: DeviceControlReg::new(),
104 status: Status::new(),
105 }
106 }
107
108 fn reset_signature(&mut self, reset_dev: bool) {
109 self.byte_count_low = protocol::ATAPI_RESET_LBA_MID;
110 self.byte_count_high = protocol::ATAPI_RESET_LBA_HIGH;
111 self.lba_low = 1;
112 self.sector_count = 1;
113 let dev = if reset_dev {
114 false
115 } else {
116 self.device_head.dev()
117 };
118 self.device_head = DeviceHeadReg::new().with_dev(dev);
119 }
120}
121
122#[derive(Debug)]
123struct CommandBuffer {
124 buffer: Arc<AlignedHeapMemory>,
125}
126
127#[derive(Debug)]
128struct CommandBufferAccess {
129 memory: GuestMemory,
130}
131
132impl CommandBuffer {
133 fn new() -> Self {
134 Self {
135 buffer: Arc::new(AlignedHeapMemory::new(MAX_TRANSFER_LEN)),
136 }
137 }
138
139 fn access(&self) -> CommandBufferAccess {
140 CommandBufferAccess {
141 memory: GuestMemory::new("atapi_buffer", self.buffer.clone()),
142 }
143 }
144}
145
146impl CommandBufferAccess {
147 fn buffers(&self, offset: usize, len: usize, is_write: bool) -> RequestBuffers<'_> {
148 static BUFFER_RANGE: Option<PagedRange<'_>> = PagedRange::new(
150 0,
151 MAX_TRANSFER_LEN,
152 &[
153 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
154 23, 24, 25, 26, 27, 28, 29, 30, 31,
155 ],
156 );
157
158 RequestBuffers::new(
159 &self.memory,
160 BUFFER_RANGE.unwrap().subrange(offset, len),
161 is_write,
162 )
163 }
164}
165
166#[derive(Debug, Inspect)]
167struct BufferState {
168 current_byte: u32,
170 total_bytes: u32,
172 dma_type: Option<DmaType>,
174}
175
176impl BufferState {
177 fn new(len: u32, dma: Option<DmaType>) -> Self {
178 assert!(len != 0);
179 assert!((len as usize) <= MAX_TRANSFER_LEN);
180 Self {
181 current_byte: 0,
182 total_bytes: len,
183 dma_type: dma,
184 }
185 }
186
187 fn range(&self) -> std::ops::Range<usize> {
188 self.current_byte as usize..self.total_bytes as usize
189 }
190
191 #[must_use]
193 fn advance(&mut self, n: u32) -> bool {
194 self.current_byte += n;
195 assert!(self.current_byte <= self.total_bytes);
196 self.is_empty()
197 }
198
199 fn is_empty(&self) -> bool {
200 self.len() == 0
201 }
202
203 fn len(&self) -> u32 {
204 self.total_bytes - self.current_byte
205 }
206}
207
208enum IoPortData<'a> {
209 Read(&'a mut [u8]),
210 Write(&'a [u8]),
211}
212
213#[derive(Debug, Inspect)]
214struct AtapiDriveState {
215 regs: Registers,
216 pending_software_reset: bool,
217 pending_interrupt: bool,
218 error_pending: bool,
219 pending_packet_command: bool,
220 buffer: Option<BufferState>,
221}
222
223impl AtapiDriveState {
224 fn new() -> Self {
225 Self {
226 regs: Registers::at_reset(),
227 pending_software_reset: false,
228 pending_interrupt: false,
229 error_pending: false,
230 pending_packet_command: false,
231 buffer: None,
232 }
233 }
234}
235
236struct Io(Pin<Box<dyn Send + Future<Output = ScsiResult>>>);
237
238impl std::fmt::Debug for Io {
239 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
240 f.pad("io")
241 }
242}
243
244#[derive(Inspect)]
245pub(crate) struct AtapiDrive {
246 scsi_disk: Arc<dyn AsyncScsiDisk>,
247 state: AtapiDriveState,
248 disk_path: IdePath,
249
250 #[inspect(skip)]
251 command_buffer: CommandBuffer,
252
253 #[inspect(skip)]
254 io: Option<Io>,
255 #[inspect(skip)]
256 waker: Option<Waker>,
257}
258
259impl AtapiDrive {
260 pub fn reset(&mut self) {
261 tracing::debug!(path = ?self.disk_path, "drive reset");
262 self.state = AtapiDriveState::new();
263 }
264
265 pub fn pio_read(&mut self, data: &mut [u8]) {
266 if self.is_selected() {
267 self.data_port_io(IoPortData::Read(data));
268 }
269 }
270
271 pub fn pio_write(&mut self, data: &[u8]) {
272 if self.is_selected() {
273 self.data_port_io(IoPortData::Write(data));
274 }
275 }
276
277 pub fn read_register(&mut self, register: DriveRegister) -> u8 {
278 tracing::trace!(path = ?self.disk_path, ?register, "read_register");
279 if !self.is_selected() {
280 return 0;
286 }
287
288 let regs = &self.state.regs;
289 match register {
290 DriveRegister::ErrorFeatures => regs.error.into_bits(),
291 DriveRegister::SectorCount => regs.sector_count,
292 DriveRegister::LbaLow => regs.lba_low,
293 DriveRegister::LbaMid => regs.byte_count_low,
294 DriveRegister::LbaHigh => regs.byte_count_high,
295 DriveRegister::DeviceHead => regs.device_head.into(),
296 DriveRegister::StatusCmd => {
297 let status = self.state.regs.status;
298 tracing::trace!(status = ?status, path = ?self.disk_path, "status query, deasserting");
299 self.request_interrupt(false);
300 status.into_bits()
301 }
302 DriveRegister::AlternateStatusDeviceControl => {
303 let status = self.state.regs.status;
304 tracing::trace!(status = ?status, path = ?self.disk_path, "alter status query");
305 status.into_bits()
306 }
307 }
308 }
309
310 pub fn write_register(&mut self, register: DriveRegister, data: u8) {
311 tracing::trace!(path = ?self.disk_path, ?register, ?data, "write_register");
312 let regs = &mut self.state.regs;
313 match register {
314 DriveRegister::ErrorFeatures => regs.features = data,
315 DriveRegister::SectorCount => regs.sector_count = data,
316 DriveRegister::LbaLow => regs.lba_low = data,
317 DriveRegister::LbaMid => regs.byte_count_low = data,
318 DriveRegister::LbaHigh => regs.byte_count_high = data,
319 DriveRegister::DeviceHead => {
320 self.write_device_head(data);
321 }
322 DriveRegister::StatusCmd => {
323 if self.is_selected() || data == IdeCommand::EXECUTE_DEVICE_DIAGNOSTIC.0 {
327 self.handle_command(data);
328 }
329 }
330 DriveRegister::AlternateStatusDeviceControl => {
331 let v = DeviceControlReg::from_bits_truncate(data);
332 self.state.regs.device_control_reg = v.with_reset(false);
333 if v.reset() && !self.state.pending_software_reset {
334 if !self.state.regs.status.bsy() {
335 self.reset();
336 } else {
337 self.state.pending_software_reset = true;
338 self.state.regs.status.set_bsy(true);
339 }
340 }
341 }
342 }
343 }
344
345 pub fn interrupt_pending(&self) -> bool {
346 self.state.pending_interrupt
347 && !self.state.regs.device_control_reg.interrupt_mask()
348 && self.is_selected()
349 && !self.state.pending_software_reset
350 }
351
352 pub fn dma_request(&self) -> Option<(&DmaType, usize)> {
353 if let Some(buffer) = &self.state.buffer {
354 buffer
355 .dma_type
356 .as_ref()
357 .map(|ty| (ty, buffer.len() as usize))
358 } else {
359 None
360 }
361 }
362
363 pub fn dma_transfer(&mut self, guest_memory: &GuestMemory, gpa: u64, len: usize) {
364 let buffer = self.state.buffer.as_ref().unwrap();
365 assert!(len <= buffer.len() as usize);
366 let dma_type = *buffer.dma_type.as_ref().unwrap();
367 tracing::trace!(
368 ?dma_type,
369 gpa,
370 len,
371 cur_byte = buffer.current_byte,
372 "performing dma"
373 );
374 let range = buffer.range();
375 let buffer_ptr = &self.command_buffer.buffer[range][..len];
376 let r = match dma_type {
377 DmaType::Write => guest_memory.write_from_atomic(gpa, buffer_ptr),
378 DmaType::Read => guest_memory.read_to_atomic(gpa, buffer_ptr),
379 };
380 if let Err(err) = r {
381 tracelimit::error_ratelimited!(
382 error = &err as &dyn std::error::Error,
383 "dma transfer failed"
384 );
385 }
386 if self.state.buffer.as_mut().unwrap().advance(len as u32) {
387 match dma_type {
388 DmaType::Write => self.read_data_port_buffer_complete(),
389 DmaType::Read => self.write_data_port_buffer_complete(),
390 };
391 if self.state.buffer.is_none() {
392 assert!(!self.state.regs.status.bsy());
393 assert!(!self.state.regs.status.drq());
394 self.request_interrupt(true);
395 }
396 }
397 }
398
399 pub fn dma_advance_buffer(&mut self, len: usize) {
400 let buffer = self.state.buffer.as_ref().unwrap();
401 assert!(len <= buffer.len() as usize);
402 let dma_type = *buffer.dma_type.as_ref().unwrap();
403 tracing::trace!(
404 ?dma_type,
405 len,
406 cur_byte = buffer.current_byte,
407 "advancing dma buffer"
408 );
409 if self.state.buffer.as_mut().unwrap().advance(len as u32) {
410 match dma_type {
411 DmaType::Write => self.read_data_port_buffer_complete(),
412 DmaType::Read => self.write_data_port_buffer_complete(),
413 };
414 if self.state.buffer.is_none() {
415 assert!(!self.state.regs.status.bsy());
416 assert!(!self.state.regs.status.drq());
417 self.request_interrupt(true);
418 }
419 }
420 }
421}
422
423impl AtapiDrive {
424 fn request_interrupt(&mut self, v: bool) {
425 tracing::trace!(pending_interrupt = v, "request_interrupt");
426 self.state.pending_interrupt = v;
427 }
428
429 fn write_device_head(&mut self, data: u8) {
430 let old_device_head = self.state.regs.device_head;
431 self.state.regs.device_head = data.into();
432 tracing::trace!(
433 path = ?self.disk_path,
434 old_device = old_device_head.dev() as u8,
435 new_device = self.state.regs.device_head.dev() as u8,
436 "write_device_head"
437 );
438 if self.is_selected() && self.state.regs.status.into_bits() == 0 {
440 self.state.regs.status = Status::new().with_drdy(true).with_dsc(true);
442 }
443 }
444
445 fn read_data_port_buffer_complete(&mut self) {
446 if self.state.pending_packet_command {
447 self.signal_atapi_command_done(Sense::default());
448 } else {
449 self.complete_data_port_read();
450 }
451 }
452
453 fn complete_data_port_read(&mut self) {
454 self.state.regs.status.set_bsy(false);
455 self.state.regs.status.set_err(false);
456 self.state.regs.status.set_drq(false);
457 self.state.regs.status.set_drdy(true);
458 self.state.regs.status.set_dsc(true);
459
460 self.state.regs.sector_count = protocol::ATAPI_COMMAND_COMPLETE;
461 self.state.buffer = None;
462 }
463
464 fn write_data_port_buffer_complete(&mut self) {
465 self.handle_atapi_packet_command()
466 }
467
468 fn handle_command(&mut self, command: u8) {
469 self.handle_atapi_command(command);
470 }
471
472 fn is_selected(&self) -> bool {
477 self.state.regs.device_head.dev() as u8 == self.disk_path.drive
478 }
479
480 fn data_port_io(&mut self, mut io_type: IoPortData<'_>) {
481 let Some(buffer_state) = &mut self.state.buffer else {
482 tracelimit::warn_ratelimited!("no buffer available");
483 return;
484 };
485
486 let length = match io_type {
487 IoPortData::Read(ref data) => {
488 tracing::trace!(
489 cur_byte = buffer_state.current_byte,
490 total_bytes = buffer_state.total_bytes,
491 length = data.len(),
492 path = ?self.disk_path,
493 "data port read"
494 );
495
496 data.len()
497 }
498 IoPortData::Write(data) => {
499 tracing::trace!(
500 cur_byte = buffer_state.current_byte,
501 total_bytes = buffer_state.total_bytes,
502 length = data.len(),
503 path = ?self.disk_path,
504 "data port write"
505 );
506 data.len()
507 }
508 } as u32;
509
510 let range = buffer_state.range();
511 let current_buffer = &self.command_buffer.buffer[range];
512
513 let length = length.min(current_buffer.len() as u32);
514 if length == 0 {
515 return;
516 }
517
518 match io_type {
520 IoPortData::Read(ref mut data) => {
521 current_buffer[..length as usize].atomic_read(&mut data[..length as usize]);
522 tracing::trace!(?data, "data payload");
523 }
524 IoPortData::Write(data) => {
525 current_buffer[..length as usize].atomic_write(&data[..length as usize]);
526 tracing::trace!(?current_buffer, ?data, "data_port_io");
527 }
528 }
529
530 if self.state.buffer.as_mut().unwrap().advance(length) {
531 match io_type {
532 IoPortData::Read(_) => self.read_data_port_buffer_complete(),
533 IoPortData::Write(_) => self.write_data_port_buffer_complete(),
534 }
535 }
536 }
537}
538
539impl AtapiDrive {
540 pub fn new(scsi_disk: Arc<dyn AsyncScsiDisk>, disk_path: IdePath) -> Self {
541 Self {
542 scsi_disk,
543 state: AtapiDriveState::new(),
544 disk_path,
545 command_buffer: CommandBuffer::new(),
546 io: None,
547 waker: None,
548 }
549 }
550
551 fn state(&self) -> &AtapiDriveState {
552 &self.state
553 }
554 fn state_mut(&mut self) -> &mut AtapiDriveState {
555 &mut self.state
556 }
557
558 pub fn handle_read_dma_descriptor_error(&mut self) -> bool {
559 if self.io.is_none() {
561 if self.state.pending_software_reset {
562 self.reset();
563 }
564 self.state.regs.status.set_bsy(false);
565 self.state.regs.status.set_drq(false);
566 return true;
567 }
568
569 false
571 }
572
573 fn handle_atapi_command(&mut self, command: u8) {
574 let command = IdeCommand(command);
575 tracing::debug!(path = ?self.disk_path, ?command, ?self.state.regs, "atapi command");
576
577 if self.state.regs.status.bsy() {
578 tracelimit::warn_ratelimited!(new_command = ?command, "A command is already pending");
579 return;
580 }
581
582 if self.state.regs.status.drq() {
583 tracelimit::warn_ratelimited!(new_command = ?command, "data transfer is in progress");
584 return;
585 }
586
587 self.state.regs.status.set_drdy(false);
588 self.state.regs.status.set_bsy(true);
589 self.state.regs.error = ErrorReg::new();
590 self.state.regs.status.set_err(false);
591
592 match command {
593 IdeCommand::DEVICE_RESET => {
594 return self.handle_soft_reset(false);
596 }
597 IdeCommand::EXECUTE_DEVICE_DIAGNOSTIC => {
598 self.state.regs.reset_signature(true);
600 self.state.regs.error = ErrorReg::new().with_amnf_ili_default(true);
601 }
602 IdeCommand::PACKET_COMMAND => {
603 return self.prepare_atapi_command_packet();
605 }
606 IdeCommand::IDENTIFY_PACKET_DEVICE => {
607 self.read_features();
608 }
609 IdeCommand::CHECK_POWER_MODE => {
613 self.state.regs.sector_count = protocol::DEVICE_ACTIVE_OR_IDLE;
614 }
615 IdeCommand::SET_FEATURES => {
616 }
618 IdeCommand::IDENTIFY_DEVICE | IdeCommand::READ_SECTORS => {
619 self.state.regs.reset_signature(false);
621 self.state.regs.status.set_err(true);
622 self.state.regs.error = ErrorReg::new().with_unknown_command(true);
623 }
624 command => {
625 tracing::debug!(?command, "unknown command");
626 self.state.regs.status.set_err(true);
627 self.state.regs.error = ErrorReg::new().with_unknown_command(true);
628 }
629 };
630
631 self.state.regs.status.set_bsy(false);
632 self.state.regs.status.set_drdy(true);
633 self.state.regs.status.set_dsc(true);
634 self.request_interrupt(true);
635 }
636
637 fn get_dma(&self) -> Option<DmaType> {
638 if self.state.regs.features & 0x1 != 0x0 {
639 Some(DmaType::Write)
640 } else {
641 None
642 }
643 }
644
645 fn prepare_atapi_command_packet(&mut self) {
649 let dma = self.get_dma();
650 tracing::trace!(?dma, "prepare atapi command");
651 self.state.buffer = Some(BufferState::new(COMMAND_PACKET_SIZE as u32, dma));
653 self.state.regs.sector_count = protocol::ATAPI_READY_FOR_PACKET_DEFAULT;
654 self.state.regs.status.set_bsy(false);
655 self.state.regs.status.set_drdy(true);
656 self.state.regs.status.set_dsc(true);
657 self.state.regs.status.set_drq(true);
658 }
659
660 fn handle_soft_reset(&mut self, reset_dev: bool) {
661 tracing::debug!(path = ?self.disk_path, "Command Soft Reset");
662 self.state.buffer = None;
663
664 self.state.regs.reset_signature(reset_dev);
665 self.state.regs.error = ErrorReg::new().with_amnf_ili_default(true);
666 self.state.regs.status = Status::new();
667 }
668
669 fn read_features(&mut self) {
673 let features = protocol::IdeFeatures {
674 config_bits: 0x85C0, serial_no: *b" ",
676 buffer_size: 0x0080,
677 firmware_revision: *b" ",
678 model_number: "iVtrau lDC ".as_bytes()[..]
679 .try_into()
680 .unwrap(),
681 capabilities: 0x0300, pio_cycle_times: 0x0200, dma_cycle_times: 0x0200, new_words_valid_flags: 0x0003, multi_sector_capabilities: 0x0100_u16 | protocol::MAX_SECTORS_MULT_TRANSFER_DEFAULT,
686 single_word_dma_mode: 0x0007, multi_word_dma_mode: 0x0407, enhanced_pio_mode: 0x0003, min_multi_dma_time: 0x0078,
690 recommended_multi_dma_time: 0x0078,
691 min_pio_cycle_time_no_flow: 0x01FC, min_pio_cycle_time_flow: 0x00B4, ..FromZeros::new_zeroed()
694 };
695
696 self.command_buffer.buffer[..protocol::IDENTIFY_DEVICE_BYTES].atomic_write_obj(&features);
697 self.state.buffer = Some(BufferState::new(
698 protocol::IDENTIFY_DEVICE_BYTES as u32,
699 None,
700 ));
701 self.state.regs.sector_count = protocol::ATAPI_DATA_FOR_HOST;
702 self.state.regs.status.set_drq(true);
703 }
704
705 fn handle_atapi_packet_command(&mut self) {
708 assert!(self.state.buffer.is_some());
709
710 if self.state.pending_packet_command {
711 tracelimit::error_ratelimited!(path = ?self.disk_path, "Unexpected: pending_packet_command at beginning of atapi packet command");
712 }
713
714 self.state.regs.status.set_drdy(false);
715 self.state.regs.status.set_drq(false);
716 self.state.regs.status.set_bsy(true);
717
718 self.state.regs.byte_count_high = 0;
719 self.state.regs.byte_count_low = 0;
720 self.state.regs.sector_count = protocol::ATAPI_COMMAND_COMPLETE;
721
722 let len = COMMAND_PACKET_SIZE;
723 let buffer_ptr = &self.command_buffer.buffer[..len];
724
725 let mut cdb = [0_u8; size_of::<scsi::Cdb16>()];
726 buffer_ptr.atomic_read(&mut cdb[..len]);
728 tracing::debug!(path = ?self.disk_path, ?buffer_ptr, ?cdb, "Handle ATAPI packet command");
729
730 self.state.buffer = None;
731 let request = scsi_core::Request { cdb, srb_flags: 0 };
732 self.set_io(request);
733 }
734
735 fn process_atapi_command_result(&mut self, result: ScsiResult) {
736 let sense = if let Some(sense_data) = result.sense_data {
738 Sense::new(
739 sense_data.header.sense_key,
740 sense_data.additional_sense_code,
741 sense_data.additional_sense_code_qualifier,
742 )
743 } else {
744 Sense::default()
745 };
746
747 if sense != Sense::default() {
748 tracing::debug!(path = ?self.disk_path, ?sense, "Issue ATAPI command error");
749 return self.signal_atapi_command_done(sense);
750 }
751
752 if result.tx == 0 {
754 self.signal_atapi_command_done(sense);
755 } else {
756 self.signal_atapi_data_ready(result.tx);
757 }
758 }
759
760 fn signal_atapi_command_done(&mut self, sense: Sense) {
765 tracing::debug!(path = ?self.disk_path, "Signal ATAPI Command done");
766
767 self.state.regs.error = (sense.sense_key.0 << 4).into();
771 self.state.regs.status.set_bsy(false);
772 self.state.regs.status.set_err(false);
773 self.state.regs.status.set_drq(false);
774 self.state.regs.status.set_drdy(true);
775 self.state.regs.status.set_dsc(true);
776
777 if self.state.regs.error != ErrorReg::new() {
778 if sense.sense_key == SenseKey::ILLEGAL_REQUEST
780 || sense.sense_key == SenseKey::ABORTED_COMMAND
781 {
782 self.state.regs.error.set_unknown_command(true);
783 }
784
785 self.state.regs.status.set_err(true);
786 }
787
788 self.state.regs.sector_count = protocol::ATAPI_COMMAND_COMPLETE;
789 self.state.buffer = None;
790 self.state.pending_packet_command = false;
791 self.request_interrupt(true);
792 }
793
794 fn signal_atapi_data_ready(&mut self, tx: usize) {
797 assert!(tx > 0);
798 tracing::trace!(path = ?self.disk_path, tx, "Signal ATAPI data ready");
799 let use_dma = (self.state.regs.features & 0x1) != 0;
800 if use_dma {
801 assert!(tx <= MAX_TRANSFER_LEN);
802 }
803 self.state.buffer = Some(BufferState {
804 current_byte: 0,
805 total_bytes: tx as u32,
806 dma_type: if use_dma { Some(DmaType::Write) } else { None },
807 });
808
809 self.state.pending_packet_command = true;
812 self.state.regs.byte_count_low = (tx & 0x00FF) as u8;
813 self.state.regs.byte_count_high = ((tx & 0xFF00) >> 8) as u8;
814 self.state.regs.sector_count = protocol::ATAPI_DATA_FOR_HOST;
815
816 self.state.regs.status.set_bsy(false);
817 self.state.regs.status.set_drq(true);
818 self.state.regs.status.set_drdy(true);
819 self.request_interrupt(true);
820 }
821
822 fn set_io(&mut self, request: scsi_core::Request) {
824 assert!(self.io.is_none());
825 let scsi_disk = self.scsi_disk.clone();
826 let access = self.command_buffer.access();
827 let fut = async move {
828 let buffers = access.buffers(0, MAX_TRANSFER_LEN, true);
829 scsi_disk.execute_scsi(&buffers, &request).await
830 };
831 self.io = Some(Io(Box::pin(fut)));
832 if let Some(waker) = self.waker.take() {
834 waker.wake();
835 }
836 }
837
838 pub fn poll_device(&mut self, cx: &mut Context<'_>) {
839 if let Some(io) = self.io.as_mut() {
840 if let Poll::Ready(result) = io.0.as_mut().poll(cx) {
841 self.io = None;
842 self.process_atapi_command_result(result);
843
844 if !self.state.regs.status.bsy() && self.state.pending_software_reset {
846 self.reset();
847 }
848 }
849 }
850 self.waker = Some(cx.waker().clone());
851 }
852}
853
854pub(crate) mod save_restore {
855 use self::state::SavedAtapiDriveState;
856 use self::state::SavedDmaType;
857 use self::state::SavedRegisterState;
858 use super::*;
859 use std::sync::atomic::Ordering;
860 use vmcore::save_restore::RestoreError;
861 use vmcore::save_restore::SaveError;
862
863 pub mod state {
864 use mesh::payload::Protobuf;
865 use scsi_core::save_restore::ScsiSavedState;
866
867 #[derive(Protobuf)]
868 #[mesh(package = "storage.ide.device.atapi")]
869 pub struct SavedRegisterState {
870 #[mesh(1)]
871 pub error: u8,
872 #[mesh(2)]
873 pub features: u8,
874 #[mesh(3)]
875 pub device_head: u8,
876 #[mesh(4)]
877 pub lba_low: u8,
878 #[mesh(5)]
879 pub byte_count_low: u8,
880 #[mesh(6)]
881 pub byte_count_high: u8,
882 #[mesh(7)]
883 pub sector_count: u8,
884 #[mesh(8)]
885 pub device_control_reg: u8,
886 #[mesh(9)]
887 pub status: u8,
888 }
889
890 #[derive(Protobuf)]
891 #[mesh(package = "storage.ide.device.atapi")]
892 pub enum SavedDmaType {
893 #[mesh(1)]
894 Read,
895 #[mesh(2)]
896 Write,
897 }
898
899 #[derive(Protobuf)]
900 #[mesh(package = "storage.ide.device.atapi")]
901 pub struct SavedAtapiDriveState {
902 #[mesh(1)]
903 pub registers: SavedRegisterState,
904
905 #[mesh(2)]
907 pub pending_interrupt: bool,
908 #[mesh(3)]
909 pub error_pending: bool,
910
911 #[mesh(4)]
913 pub scsi: ScsiSavedState,
914
915 #[mesh(5)]
917 pub dma_type: Option<SavedDmaType>,
918 #[mesh(6)]
919 pub command_buffer: Vec<u8>,
920
921 #[mesh(7)]
928 pub pending_packet_command: bool,
929 #[mesh(8)]
930 pub pending_software_reset: bool,
931 }
932 }
933
934 impl AtapiDrive {
935 pub fn save(&self) -> Result<SavedAtapiDriveState, SaveError> {
936 let AtapiDriveState {
937 regs:
938 Registers {
939 error,
940 features,
941 device_head,
942 byte_count_low,
943 byte_count_high,
944 lba_low,
945 sector_count,
946 device_control_reg,
947 status,
948 },
949 pending_software_reset,
950 pending_interrupt,
951 error_pending,
952 pending_packet_command,
953 buffer,
954 } = self.state();
955
956 let scsi = self.scsi_disk.save()?.unwrap();
957
958 let command_buffer = if let Some(buffer_state) = &self.state.buffer {
959 self.command_buffer.buffer[buffer_state.range()]
960 .iter()
961 .map(|val| val.load(Ordering::Relaxed))
962 .collect()
963 } else {
964 Vec::new()
965 };
966
967 Ok(SavedAtapiDriveState {
968 registers: SavedRegisterState {
969 error: error.into_bits(),
970 features: *features,
971 device_head: (*device_head).into(),
972 lba_low: *lba_low,
973 byte_count_low: *byte_count_low,
974 byte_count_high: *byte_count_high,
975 sector_count: *sector_count,
976 device_control_reg: device_control_reg.into_bits(),
977 status: status.into_bits(),
978 },
979 pending_interrupt: *pending_interrupt,
980 error_pending: *error_pending,
981 scsi,
982 dma_type: match buffer {
983 Some(buffer_state) => buffer_state.dma_type.as_ref().map(|dma| match dma {
984 DmaType::Read => SavedDmaType::Read,
985 DmaType::Write => SavedDmaType::Write,
986 }),
987 None => None,
988 },
989 command_buffer,
990 pending_packet_command: *pending_packet_command,
991 pending_software_reset: *pending_software_reset,
992 })
993 }
994
995 pub fn restore(&mut self, state: SavedAtapiDriveState) -> Result<(), RestoreError> {
996 let SavedAtapiDriveState {
997 registers:
998 SavedRegisterState {
999 error,
1000 features,
1001 device_head,
1002 lba_low,
1003 byte_count_low: lba_mid,
1004 byte_count_high: lba_high,
1005 sector_count,
1006 device_control_reg,
1007 status,
1008 },
1009 pending_interrupt,
1010 error_pending,
1011 scsi,
1012 dma_type,
1013 command_buffer,
1014 pending_packet_command,
1015 pending_software_reset,
1016 } = state;
1017
1018 self.scsi_disk.restore(&scsi)?;
1019
1020 *self.state_mut() = AtapiDriveState {
1021 regs: Registers {
1022 error: error.into(),
1023 features,
1024 device_head: device_head.into(),
1025 byte_count_low: lba_mid,
1026 byte_count_high: lba_high,
1027 lba_low,
1028 sector_count,
1029 device_control_reg: DeviceControlReg::from_bits(device_control_reg),
1030 status: Status::from_bits(status),
1031 },
1032 pending_software_reset,
1033 pending_interrupt,
1034 error_pending,
1035 pending_packet_command,
1036 buffer: if command_buffer.is_empty() {
1037 None
1038 } else {
1039 self.command_buffer.buffer[..command_buffer.len()]
1040 .atomic_write(command_buffer.as_bytes());
1041
1042 Some(BufferState {
1043 current_byte: 0,
1044 total_bytes: command_buffer.len() as u32,
1045 dma_type: dma_type.map(|dma| match dma {
1046 SavedDmaType::Read => DmaType::Read,
1047 SavedDmaType::Write => DmaType::Write,
1048 }),
1049 })
1050 },
1051 };
1052
1053 Ok(())
1054 }
1055 }
1056}