1use crate::Cpu;
7use crate::registers::Bitness;
8use crate::registers::Gp;
9use crate::registers::GpSize;
10use crate::registers::RegisterIndex;
11use crate::registers::Segment;
12use crate::registers::bitness;
13use iced_x86::Code;
14use iced_x86::Decoder;
15use iced_x86::DecoderError;
16use iced_x86::DecoderOptions;
17use iced_x86::Instruction;
18use iced_x86::OpKind;
19use iced_x86::Register;
20use thiserror::Error;
21use x86defs::Exception;
22
23mod arith;
24mod bt;
25mod cmpxchg816;
26mod cond;
27pub mod fast_path;
28mod instruction;
29mod mov;
30mod muldiv;
31mod rep;
32mod rflags;
33mod shift_rotate;
34
35pub use rep::MAX_REP_LOOPS;
36
37trait EmulatorRegister {
39 type Array: std::ops::Index<std::ops::RangeTo<usize>, Output = [u8]>
41 + std::ops::IndexMut<std::ops::RangeTo<usize>>;
42 fn empty_bytes() -> Self::Array;
43 fn from_le_bytes(bytes: Self::Array) -> Self;
44 fn to_le_bytes(&self) -> Self::Array;
45}
46impl EmulatorRegister for u64 {
47 type Array = [u8; 8];
48 fn empty_bytes() -> Self::Array {
49 [0; 8]
50 }
51 fn from_le_bytes(bytes: Self::Array) -> Self {
52 Self::from_le_bytes(bytes)
53 }
54 fn to_le_bytes(&self) -> Self::Array {
55 (*self).to_le_bytes()
56 }
57}
58impl EmulatorRegister for u128 {
59 type Array = [u8; 16];
60 fn empty_bytes() -> Self::Array {
61 [0; 16]
62 }
63 fn from_le_bytes(bytes: Self::Array) -> Self {
64 Self::from_le_bytes(bytes)
65 }
66 fn to_le_bytes(&self) -> Self::Array {
67 (*self).to_le_bytes()
68 }
69}
70
71impl From<Register> for RegisterIndex {
72 fn from(val: Register) -> Self {
73 let size = match val.size() {
74 1 => {
75 if val >= Register::SPL || val < Register::AH {
76 GpSize::BYTE(0)
77 } else {
78 GpSize::BYTE(8)
79 }
80 }
81 2 => GpSize::WORD,
82 4 => GpSize::DWORD,
83 8 => GpSize::QWORD,
84 _ => panic!("invalid gp register size"),
85 };
86 let extended_index = match val.full_register() {
87 Register::RAX => Gp::RAX,
88 Register::RCX => Gp::RCX,
89 Register::RDX => Gp::RDX,
90 Register::RBX => Gp::RBX,
91 Register::RSP => Gp::RSP,
92 Register::RBP => Gp::RBP,
93 Register::RSI => Gp::RSI,
94 Register::RDI => Gp::RDI,
95 Register::R8 => Gp::R8,
96 Register::R9 => Gp::R9,
97 Register::R10 => Gp::R10,
98 Register::R11 => Gp::R11,
99 Register::R12 => Gp::R12,
100 Register::R13 => Gp::R13,
101 Register::R14 => Gp::R14,
102 Register::R15 => Gp::R15,
103 _ => panic!("invalid gp register index"),
104 };
105 RegisterIndex {
106 extended_index,
107 size,
108 }
109 }
110}
111
112impl From<Register> for Segment {
113 fn from(val: Register) -> Self {
114 match val {
115 Register::ES => Segment::ES,
116 Register::CS => Segment::CS,
117 Register::SS => Segment::SS,
118 Register::DS => Segment::DS,
119 Register::FS => Segment::FS,
120 Register::GS => Segment::GS,
121 _ => panic!("invalid segment register index"),
122 }
123 }
124}
125
126#[derive(Debug)]
128pub struct Emulator<'a, T> {
129 cpu: T,
130 decoder_options: u32,
131 bytes: &'a [u8],
132}
133
134#[derive(Debug, Error)]
135pub enum Error<E> {
136 #[error("asked to emulate an instruction that doesn't touch memory or PIO")]
137 NonMemoryOrPortInstruction(Vec<u8>),
138 #[error("unsupported instruction")]
139 UnsupportedInstruction(Vec<u8>),
140 #[error("memory access error - {1:?} @ {0:#x}")]
141 MemoryAccess(u64, OperationKind, #[source] E),
142 #[error("io port access error - {1:?} @ {0:#x}")]
143 IoPort(u16, OperationKind, #[source] E),
144 #[error("XMM register access error - {1:?} @ {0:#x}")]
145 XmmRegister(usize, OperationKind, #[source] E),
146 #[error("executing instruction caused exception due to {2:?} - {0:?}({1:?})")]
147 InstructionException(Exception, Option<u32>, ExceptionCause),
148 #[error("decode failure")]
149 DecodeFailure,
150 #[error("not enough instruction bytes")]
151 NotEnoughBytes,
152}
153
154enum InternalError<E> {
155 Retry,
158 Error(Box<Error<E>>),
160}
161
162impl<E> From<Error<E>> for InternalError<E> {
163 fn from(err: Error<E>) -> Self {
164 InternalError::Error(Box::new(err))
165 }
166}
167
168#[derive(Debug)]
169pub enum ExceptionCause {
170 MandatoryAlignment,
171 AlignmentCheck,
172 DebugTrap,
173 DivideOverflow,
174 DivideByZero,
175 IoPrivilegeLevel,
176 SegmentValidity,
177}
178
179#[derive(Debug, Clone, Copy, PartialEq)]
180pub enum OperationKind {
181 Read,
182 Write,
183 AddressComputation,
184}
185
186#[derive(Copy, Clone)]
187pub enum AlignmentMode {
188 Aligned(u64),
191 Unaligned,
193 Standard,
196}
197
198impl<'a, T: Cpu> Emulator<'a, T> {
199 pub fn new(cpu: T, vendor: x86defs::cpuid::Vendor, bytes: &'a [u8]) -> Self {
201 let mut decoder_options = 0;
202 if vendor.is_amd_compatible() {
203 decoder_options |= DecoderOptions::AMD;
204 }
205 Emulator {
206 cpu,
207 decoder_options,
208 bytes,
209 }
210 }
211
212 pub fn linear_ip(&mut self, offset: u64) -> Option<u64> {
216 let rip = self.cpu.rip().wrapping_add(offset);
217 let cr0 = self.cpu.cr0();
218 let efer = self.cpu.efer();
219 let cs = self.cpu.segment(Segment::CS);
220
221 match bitness(cr0, efer, cs) {
222 Bitness::Bit64 => Some(rip),
223 Bitness::Bit32 | Bitness::Bit16 => {
224 self.verify_segment_access(
225 Segment::CS,
226 OperationKind::AddressComputation,
227 offset,
228 1,
229 )
230 .ok()?;
231 Some(cs.base.wrapping_add(rip))
232 }
233 }
234 }
235
236 fn current_privilege_level(&mut self) -> u8 {
238 self.cpu
239 .segment(Segment::SS)
240 .attributes
241 .descriptor_privilege_level()
242 }
243
244 pub fn is_user_mode(&mut self) -> bool {
246 self.current_privilege_level() == x86defs::USER_MODE_DPL
247 }
248
249 fn memory_op_offset(&mut self, instr: &Instruction, operand: u32) -> u64 {
251 instruction::memory_op_offset(&mut self.cpu, instr, operand)
252 }
253
254 fn compute_and_validate_gva(
257 &mut self,
258 segment: Segment,
259 offset: u64,
260 len: usize,
261 op: OperationKind,
262 alignment: AlignmentMode,
263 ) -> Result<u64, Error<T::Error>> {
264 let cr0 = self.cpu.cr0();
265 let efer = self.cpu.efer();
266 let cs = self.cpu.segment(Segment::CS);
267
268 let base = match bitness(cr0, efer, cs) {
269 Bitness::Bit64 => {
270 if matches!(segment, Segment::FS | Segment::GS) {
271 self.cpu.segment(segment).base
272 } else {
273 0
274 }
275 }
276 Bitness::Bit32 | Bitness::Bit16 => {
277 self.verify_segment_access(segment, op, offset, len)?;
278 self.cpu.segment(segment).base
279 }
280 };
281
282 let gva = base.wrapping_add(offset);
283 tracing::trace!(?op, base, offset, gva, "compute_gva");
284 self.verify_gva_alignment(gva, len, alignment)?;
285 Ok(gva)
286 }
287
288 fn verify_segment_access(
292 &mut self,
293 segment: Segment,
294 op: OperationKind,
295 offset: u64,
296 len: usize,
297 ) -> Result<(), Error<T::Error>> {
298 let cr0 = self.cpu.cr0();
300 let efer = self.cpu.efer();
301 let cs = self.cpu.segment(Segment::CS);
302 let bitness = bitness(cr0, efer, cs);
303 assert_ne!(bitness, Bitness::Bit64);
304
305 let segment_value = self.cpu.segment(segment);
306
307 let offset_end = offset + len as u64 - 1;
310
311 let gp0 = Error::InstructionException(
312 Exception::GENERAL_PROTECTION_FAULT,
313 Some(0),
314 ExceptionCause::SegmentValidity,
315 );
316 let gpindex = Error::InstructionException(
317 Exception::GENERAL_PROTECTION_FAULT,
318 Some(segment_value.selector.into()),
319 ExceptionCause::SegmentValidity,
320 );
321
322 if matches!(segment, Segment::CS) {
325 if bitness == Bitness::Bit32 && op == OperationKind::Write {
327 return Err(gp0);
328 }
329
330 if op == OperationKind::Read && segment_value.attributes.segment_type() & 0b0010 == 0 {
332 return Err(gp0);
333 }
334
335 if offset_end > segment_value.limit as u64 {
337 return Err(gp0);
338 }
339 } else {
340 if !segment_value.attributes.present() {
342 Err(Error::InstructionException(
343 Exception::SEGMENT_NOT_PRESENT,
344 None,
345 ExceptionCause::SegmentValidity,
346 ))?;
347 }
348
349 if bitness == Bitness::Bit32 && segment_value.selector & !0x3 == 0 {
351 return Err(gp0);
352 }
353
354 let rpl = if matches!(bitness, Bitness::Bit32) {
356 (segment_value.selector & 0x3) as u8
357 } else {
358 0
359 };
360 let cpl = self.current_privilege_level();
361 let dpl = segment_value.attributes.descriptor_privilege_level();
362 if rpl > dpl || cpl > dpl {
363 return Err(gpindex);
364 }
365
366 if !(segment_value.attributes.non_system_segment()
368 && segment_value.attributes.segment_type() & 0b1000 == 0)
369 {
370 return Err(gpindex);
371 }
372
373 if op == OperationKind::Write && segment_value.attributes.segment_type() & 0b0010 == 0 {
375 return Err(gp0);
376 }
377
378 if segment_value.attributes.segment_type() & 0b0100 == 0 {
380 if offset_end > segment_value.limit as u64 {
382 return Err(gp0);
383 }
384 } else {
385 let max = if segment_value.attributes.default() {
387 u32::MAX as u64
388 } else {
389 u16::MAX as u64
390 };
391 if offset <= segment_value.limit as u64 || offset_end > max {
392 return Err(gp0);
393 }
394 };
395 }
396
397 Ok(())
398 }
399
400 fn verify_gva_alignment(
403 &mut self,
404 gva: u64,
405 len: usize,
406 alignment: AlignmentMode,
407 ) -> Result<(), Error<T::Error>> {
408 match alignment {
409 AlignmentMode::Aligned(a) => {
410 if gva % a != 0 {
411 Err(Error::InstructionException(
412 Exception::GENERAL_PROTECTION_FAULT,
413 Some(0),
414 ExceptionCause::MandatoryAlignment,
415 ))?
416 }
417 }
418 AlignmentMode::Unaligned => {}
419 AlignmentMode::Standard => {
420 if self.is_user_mode()
421 && self.cpu.rflags().alignment_check()
422 && self.cpu.cr0() & x86defs::X64_CR0_AM != 0
423 {
424 if gva % len as u64 != 0 {
425 Err(Error::InstructionException(
426 Exception::ALIGNMENT_CHECK,
427 None,
428 ExceptionCause::AlignmentCheck,
429 ))?
430 }
431 }
432 }
433 }
434 Ok(())
435 }
436
437 pub async fn read_memory(
439 &mut self,
440 segment: Segment,
441 offset: u64,
442 alignment: AlignmentMode,
443 data: &mut [u8],
444 ) -> Result<(), Error<T::Error>> {
445 let gva = self.compute_and_validate_gva(
446 segment,
447 offset,
448 data.len(),
449 OperationKind::Read,
450 alignment,
451 )?;
452 let user_mode = self.is_user_mode();
453 self.cpu
454 .read_memory(gva, data, user_mode)
455 .await
456 .map_err(|err| Error::MemoryAccess(gva, OperationKind::Read, err))?;
457
458 Ok(())
459 }
460
461 pub async fn write_memory(
463 &mut self,
464 segment: Segment,
465 offset: u64,
466 alignment: AlignmentMode,
467 data: &[u8],
468 ) -> Result<(), Error<T::Error>> {
469 let gva = self.compute_and_validate_gva(
470 segment,
471 offset,
472 data.len(),
473 OperationKind::Write,
474 alignment,
475 )?;
476 let cpl = self.is_user_mode();
477 self.cpu
478 .write_memory(gva, data, cpl)
479 .await
480 .map_err(|err| Error::MemoryAccess(gva, OperationKind::Write, err))?;
481
482 Ok(())
483 }
484
485 async fn compare_and_write_memory(
491 &mut self,
492 segment: Segment,
493 offset: u64,
494 alignment: AlignmentMode,
495 current: &[u8],
496 new: &[u8],
497 ) -> Result<bool, InternalError<T::Error>> {
498 assert_eq!(current.len(), new.len());
499 let user_mode = self.is_user_mode();
500 let gva = self.compute_and_validate_gva(
501 segment,
502 offset,
503 new.len(),
504 OperationKind::Write,
505 alignment,
506 )?;
507 let success = self
508 .cpu
509 .compare_and_write_memory(gva, current, new, user_mode)
510 .await
511 .map_err(|err| Error::MemoryAccess(gva, OperationKind::Write, err))?;
512
513 Ok(success)
514 }
515
516 async fn read_memory_op<R: EmulatorRegister>(
518 &mut self,
519 instr: &Instruction,
520 operand: u32,
521 alignment: AlignmentMode,
522 ) -> Result<R, InternalError<T::Error>> {
523 let offset = self.memory_op_offset(instr, operand);
524 let mut data = R::empty_bytes();
525 self.read_memory(
526 instr.memory_segment().into(),
527 offset,
528 alignment,
529 &mut data[..instr.memory_size().size()],
530 )
531 .await?;
532 Ok(R::from_le_bytes(data))
533 }
534
535 async fn write_memory_op<R: EmulatorRegister>(
537 &mut self,
538 instr: &Instruction,
539 operand: u32,
540 alignment: AlignmentMode,
541 data: R,
542 ) -> Result<(), InternalError<T::Error>> {
543 let offset = self.memory_op_offset(instr, operand);
544 self.write_memory(
545 instr.memory_segment().into(),
546 offset,
547 alignment,
548 &data.to_le_bytes()[..instr.memory_size().size()],
549 )
550 .await?;
551 Ok(())
552 }
553
554 async fn compare_if_locked_and_write_memory_op<R: EmulatorRegister>(
562 &mut self,
563 instr: &Instruction,
564 operand: u32,
565 alignment: AlignmentMode,
566 current: R,
567 new: R,
568 ) -> Result<(), InternalError<T::Error>> {
569 let offset = self.memory_op_offset(instr, operand);
570 if instr.has_lock_prefix() || instr.mnemonic() == iced_x86::Mnemonic::Xchg {
572 if !self
573 .compare_and_write_memory(
574 instr.memory_segment().into(),
575 offset,
576 alignment,
577 ¤t.to_le_bytes()[..instr.memory_size().size()],
578 &new.to_le_bytes()[..instr.memory_size().size()],
579 )
580 .await?
581 {
582 return Err(InternalError::Retry);
583 }
584 } else {
585 self.write_memory(
586 instr.memory_segment().into(),
587 offset,
588 alignment,
589 &new.to_le_bytes()[..instr.memory_size().size()],
590 )
591 .await?;
592 }
593 Ok(())
594 }
595
596 fn check_io_privilege_level(&mut self) -> Result<(), InternalError<T::Error>> {
599 if self.current_privilege_level() > self.cpu.rflags().io_privilege_level() {
600 Err(Error::InstructionException(
601 Exception::GENERAL_PROTECTION_FAULT,
602 Some(0),
603 ExceptionCause::IoPrivilegeLevel,
604 ))?;
605 }
606 Ok(())
607 }
608
609 async fn read_io(&mut self, port: u16, data: &mut [u8]) -> Result<(), InternalError<T::Error>> {
611 self.check_io_privilege_level()?;
612 self.cpu
613 .read_io(port, data)
614 .await
615 .map_err(|err| Error::IoPort(port, OperationKind::Read, err))?;
616
617 Ok(())
618 }
619
620 async fn write_io(&mut self, port: u16, data: &[u8]) -> Result<(), InternalError<T::Error>> {
622 self.check_io_privilege_level()?;
623 self.cpu
624 .write_io(port, data)
625 .await
626 .map_err(|err| Error::IoPort(port, OperationKind::Write, err))?;
627
628 Ok(())
629 }
630
631 async fn op_value(
633 &mut self,
634 instr: &Instruction,
635 operand: u32,
636 ) -> Result<u64, InternalError<T::Error>> {
637 Ok(match instr.op_kind(operand) {
638 OpKind::Memory => {
639 self.read_memory_op(instr, operand, AlignmentMode::Standard)
640 .await?
641 }
642 OpKind::Register => self.cpu.gp(instr.op_register(operand).into()),
643 OpKind::Immediate8
644 | OpKind::Immediate16
645 | OpKind::Immediate32
646 | OpKind::Immediate64
647 | OpKind::Immediate8to16
648 | OpKind::Immediate8to32
649 | OpKind::Immediate8to64
650 | OpKind::Immediate32to64 => instr.immediate(operand),
651 _ => Err(self.unsupported_instruction(instr))?,
652 })
653 }
654
655 async fn op_value_sign_extend(
657 &mut self,
658 instr: &Instruction,
659 operand: u32,
660 ) -> Result<i64, InternalError<T::Error>> {
661 let value = self.op_value(instr, operand).await?;
662 let size = instr.memory_size().size();
663 let shift_size = 64 - (size * 8);
664 let new_value = ((value as i64) << shift_size) >> shift_size;
665 Ok(new_value)
666 }
667
668 async fn write_op_0(
670 &mut self,
671 instr: &Instruction,
672 value: u64,
673 ) -> Result<(), InternalError<T::Error>> {
674 match instr.op0_kind() {
675 OpKind::Memory => {
676 self.write_memory_op(instr, 0, AlignmentMode::Standard, value)
677 .await?
678 }
679 OpKind::Register => {
680 self.cpu.set_gp(instr.op0_register().into(), value);
681 }
682 _ => Err(self.unsupported_instruction(instr))?,
683 };
684 Ok(())
685 }
686
687 async fn compare_if_locked_and_write_op_0(
695 &mut self,
696 instr: &Instruction,
697 current: u64,
698 new: u64,
699 ) -> Result<(), InternalError<T::Error>> {
700 match instr.op0_kind() {
701 OpKind::Memory => {
702 self.compare_if_locked_and_write_memory_op(
703 instr,
704 0,
705 AlignmentMode::Standard,
706 current,
707 new,
708 )
709 .await?
710 }
711 OpKind::Register => {
712 self.cpu.set_gp(instr.op0_register().into(), new);
713 }
714 _ => Err(self.unsupported_instruction(instr))?,
715 };
716 Ok(())
717 }
718
719 pub async fn run(&mut self) -> Result<(), Box<Error<T::Error>>> {
724 let cr0 = self.cpu.cr0();
725 let efer = self.cpu.efer();
726 let cs = self.cpu.segment(Segment::CS);
727 let bitness = bitness(cr0, efer, cs);
728 let mut decoder = Decoder::new(bitness.into(), self.bytes, self.decoder_options);
729 decoder.set_ip(self.cpu.rip());
730 let instr = decoder.decode();
731 if instr.code() == Code::INVALID {
732 match decoder.last_error() {
733 DecoderError::None => unreachable!(),
734 DecoderError::NoMoreBytes => return Err(Box::new(Error::NotEnoughBytes)),
735 err => {
736 tracing::warn!(
737 error = ?err,
738 bytes = ?self.bytes,
739 "could not decode instruction"
740 );
741 return Err(Box::new(Error::DecodeFailure));
742 }
743 }
744 }
745 tracing::trace!(
746 bytes = ?self.bytes[..instr.len()],
747 cs = ?self.cpu.segment(Segment::CS),
748 rip = self.cpu.rip(),
749 ?bitness,
750 "Emulating instruction",
751 );
752 match self.emulate(&instr).await {
753 Ok(()) | Err(InternalError::Retry) => {}
759 Err(InternalError::Error(err)) => return Err(err),
760 }
761 Ok(())
762 }
763
764 async fn emulate(&mut self, instr: &Instruction) -> Result<(), InternalError<T::Error>> {
768 if !instr.op_kinds().any(|x| x == OpKind::Memory) && !instr.is_string_instruction() {
771 Err(Error::NonMemoryOrPortInstruction(
772 self.bytes[..instr.len()].into(),
773 ))?;
774 }
775
776 match instr.code() {
777 Code::Mov_rm8_r8
786 | Code::Mov_rm16_r16
787 | Code::Mov_rm32_r32
788 | Code::Mov_rm64_r64
789 | Code::Mov_r8_rm8
790 | Code::Mov_r16_rm16
791 | Code::Mov_r32_rm32
792 | Code::Mov_r64_rm64
793 | Code::Mov_AL_moffs8
794 | Code::Mov_AX_moffs16
795 | Code::Mov_EAX_moffs32
796 | Code::Mov_RAX_moffs64
797 | Code::Mov_moffs8_AL
798 | Code::Mov_moffs16_AX
799 | Code::Mov_moffs32_EAX
800 | Code::Mov_moffs64_RAX
801 | Code::Mov_rm8_imm8
802 | Code::Mov_rm16_imm16
803 | Code::Mov_rm32_imm32
804 | Code::Mov_rm64_imm32
805 | Code::Movzx_r16_rm8
806 | Code::Movzx_r32_rm8
807 | Code::Movzx_r64_rm8
808 | Code::Movzx_r16_rm16
809 | Code::Movzx_r32_rm16
810 | Code::Movzx_r64_rm16
811 | Code::Movdiri_m32_r32
812 | Code::Movdiri_m64_r64
813 | Code::Movnti_m32_r32
814 | Code::Movnti_m64_r64 => self.mov(instr).await,
815
816 Code::Movsx_r16_rm8
819 | Code::Movsx_r32_rm8
820 | Code::Movsx_r64_rm8
821 | Code::Movsx_r16_rm16
822 | Code::Movsx_r32_rm16
823 | Code::Movsx_r64_rm16
824 | Code::Movsxd_r16_rm16
825 | Code::Movsxd_r32_rm32
826 | Code::Movsxd_r64_rm32 => self.movsx(instr).await,
827
828 Code::Movups_xmm_xmmm128
835 | Code::Movups_xmmm128_xmm
836 | Code::Movupd_xmm_xmmm128
837 | Code::Movupd_xmmm128_xmm
838 | Code::Movdqu_xmm_xmmm128
839 | Code::Movdqu_xmmm128_xmm
840 | Code::Movntdq_m128_xmm
841 | Code::Movntps_m128_xmm
842 | Code::Movntpd_m128_xmm => self.mov_sse(instr, AlignmentMode::Unaligned).await,
843
844 Code::Movaps_xmm_xmmm128
848 | Code::Movaps_xmmm128_xmm
849 | Code::Movapd_xmm_xmmm128
850 | Code::Movapd_xmmm128_xmm
851 | Code::Movdqa_xmm_xmmm128
852 | Code::Movdqa_xmmm128_xmm => self.mov_sse(instr, AlignmentMode::Aligned(16)).await,
853
854 Code::Movdir64b_r16_m512 | Code::Movdir64b_r32_m512 | Code::Movdir64b_r64_m512 => {
855 self.movdir64b(instr).await
856 }
857
858 Code::Movsb_m8_m8 | Code::Movsw_m16_m16 | Code::Movsd_m32_m32 | Code::Movsq_m64_m64 => {
860 self.movs(instr).await
861 }
862
863 Code::Cmp_r64_rm64
865 | Code::Cmp_r32_rm32
866 | Code::Cmp_r16_rm16
867 | Code::Cmp_r8_rm8
868 | Code::Cmp_rm64_r64
869 | Code::Cmp_rm32_r32
870 | Code::Cmp_rm16_r16
871 | Code::Cmp_rm8_r8
872 | Code::Cmp_rm64_imm32
873 | Code::Cmp_rm64_imm8
874 | Code::Cmp_rm32_imm32
875 | Code::Cmp_rm32_imm8
876 | Code::Cmp_rm16_imm16
877 | Code::Cmp_rm16_imm8
878 | Code::Cmp_rm8_imm8 => self.arith::<arith::CmpOp>(instr).await,
879
880 Code::Xchg_rm8_r8 | Code::Xchg_rm16_r16 | Code::Xchg_rm32_r32 | Code::Xchg_rm64_r64 => {
882 self.xchg(instr).await
883 }
884
885 Code::Cmpxchg_rm8_r8
887 | Code::Cmpxchg_rm16_r16
888 | Code::Cmpxchg_rm32_r32
889 | Code::Cmpxchg_rm64_r64 => self.cmpxchg(instr).await,
890
891 Code::Test_rm64_r64
893 | Code::Test_rm32_r32
894 | Code::Test_rm16_r16
895 | Code::Test_rm8_r8
896 | Code::Test_rm64_imm32
897 | Code::Test_rm32_imm32
898 | Code::Test_rm16_imm16
899 | Code::Test_rm8_imm8 => self.arith::<arith::TestOp>(instr).await,
900
901 Code::And_r64_rm64
903 | Code::And_r32_rm32
904 | Code::And_r16_rm16
905 | Code::And_r8_rm8
906 | Code::And_rm64_r64
907 | Code::And_rm32_r32
908 | Code::And_rm16_r16
909 | Code::And_rm8_r8
910 | Code::And_rm64_imm32
911 | Code::And_rm64_imm8
912 | Code::And_rm32_imm32
913 | Code::And_rm32_imm8
914 | Code::And_rm16_imm16
915 | Code::And_rm16_imm8
916 | Code::And_rm8_imm8 => self.arith::<arith::AndOp>(instr).await,
917
918 Code::Add_r64_rm64
920 | Code::Add_r32_rm32
921 | Code::Add_r16_rm16
922 | Code::Add_r8_rm8
923 | Code::Add_rm64_r64
924 | Code::Add_rm32_r32
925 | Code::Add_rm16_r16
926 | Code::Add_rm8_r8
927 | Code::Add_rm64_imm32
928 | Code::Add_rm64_imm8
929 | Code::Add_rm32_imm32
930 | Code::Add_rm32_imm8
931 | Code::Add_rm16_imm16
932 | Code::Add_rm16_imm8
933 | Code::Add_rm8_imm8 => self.arith::<arith::AddOp>(instr).await,
934
935 Code::Adc_r64_rm64
937 | Code::Adc_r32_rm32
938 | Code::Adc_r16_rm16
939 | Code::Adc_r8_rm8
940 | Code::Adc_rm64_r64
941 | Code::Adc_rm32_r32
942 | Code::Adc_rm16_r16
943 | Code::Adc_rm8_r8
944 | Code::Adc_rm64_imm32
945 | Code::Adc_rm64_imm8
946 | Code::Adc_rm32_imm32
947 | Code::Adc_rm32_imm8
948 | Code::Adc_rm16_imm16
949 | Code::Adc_rm16_imm8
950 | Code::Adc_rm8_imm8 => self.arith::<arith::AdcOp>(instr).await,
951
952 Code::Xadd_rm8_r8 | Code::Xadd_rm16_r16 | Code::Xadd_rm32_r32 | Code::Xadd_rm64_r64 => {
953 self.xadd(instr).await
954 }
955
956 Code::Sub_r64_rm64
958 | Code::Sub_r32_rm32
959 | Code::Sub_r16_rm16
960 | Code::Sub_r8_rm8
961 | Code::Sub_rm64_r64
962 | Code::Sub_rm32_r32
963 | Code::Sub_rm16_r16
964 | Code::Sub_rm8_r8
965 | Code::Sub_rm64_imm32
966 | Code::Sub_rm64_imm8
967 | Code::Sub_rm32_imm32
968 | Code::Sub_rm32_imm8
969 | Code::Sub_rm16_imm16
970 | Code::Sub_rm16_imm8
971 | Code::Sub_rm8_imm8 => self.arith::<arith::SubOp>(instr).await,
972
973 Code::Sbb_r64_rm64
975 | Code::Sbb_r32_rm32
976 | Code::Sbb_r16_rm16
977 | Code::Sbb_r8_rm8
978 | Code::Sbb_rm64_r64
979 | Code::Sbb_rm32_r32
980 | Code::Sbb_rm16_r16
981 | Code::Sbb_rm8_r8
982 | Code::Sbb_rm64_imm32
983 | Code::Sbb_rm64_imm8
984 | Code::Sbb_rm32_imm32
985 | Code::Sbb_rm32_imm8
986 | Code::Sbb_rm16_imm16
987 | Code::Sbb_rm16_imm8
988 | Code::Sbb_rm8_imm8 => self.arith::<arith::SbbOp>(instr).await,
989
990 Code::Or_r64_rm64
992 | Code::Or_r32_rm32
993 | Code::Or_r16_rm16
994 | Code::Or_r8_rm8
995 | Code::Or_rm64_r64
996 | Code::Or_rm32_r32
997 | Code::Or_rm16_r16
998 | Code::Or_rm8_r8
999 | Code::Or_rm64_imm32
1000 | Code::Or_rm64_imm8
1001 | Code::Or_rm32_imm32
1002 | Code::Or_rm32_imm8
1003 | Code::Or_rm16_imm16
1004 | Code::Or_rm16_imm8
1005 | Code::Or_rm8_imm8 => self.arith::<arith::OrOp>(instr).await,
1006
1007 Code::Xor_r64_rm64
1009 | Code::Xor_r32_rm32
1010 | Code::Xor_r16_rm16
1011 | Code::Xor_r8_rm8
1012 | Code::Xor_rm64_r64
1013 | Code::Xor_rm32_r32
1014 | Code::Xor_rm16_r16
1015 | Code::Xor_rm8_r8
1016 | Code::Xor_rm64_imm32
1017 | Code::Xor_rm64_imm8
1018 | Code::Xor_rm32_imm32
1019 | Code::Xor_rm32_imm8
1020 | Code::Xor_rm16_imm16
1021 | Code::Xor_rm16_imm8
1022 | Code::Xor_rm8_imm8 => self.arith::<arith::XorOp>(instr).await,
1023
1024 Code::Neg_rm8 | Code::Neg_rm16 | Code::Neg_rm32 | Code::Neg_rm64 => {
1026 self.unary_arith::<arith::NegOp>(instr).await
1027 }
1028
1029 Code::Not_rm8 | Code::Not_rm16 | Code::Not_rm32 | Code::Not_rm64 => {
1031 self.unary_arith::<arith::NotOp>(instr).await
1032 }
1033
1034 Code::Mul_rm8 | Code::Mul_rm16 | Code::Mul_rm32 | Code::Mul_rm64 => {
1036 self.unary_mul(instr).await
1037 }
1038
1039 Code::Imul_rm8 | Code::Imul_rm16 | Code::Imul_rm32 | Code::Imul_rm64 => {
1041 self.unary_imul(instr).await
1042 }
1043
1044 Code::Imul_r16_rm16
1046 | Code::Imul_r32_rm32
1047 | Code::Imul_r64_rm64
1048 | Code::Imul_r16_rm16_imm8
1049 | Code::Imul_r16_rm16_imm16
1050 | Code::Imul_r32_rm32_imm8
1051 | Code::Imul_r32_rm32_imm32
1052 | Code::Imul_r64_rm64_imm8
1053 | Code::Imul_r64_rm64_imm32 => self.imul(instr).await,
1054
1055 Code::Div_rm8 | Code::Div_rm16 | Code::Div_rm32 | Code::Div_rm64 => {
1057 self.unary_div(instr).await
1058 }
1059
1060 Code::Idiv_rm8 | Code::Idiv_rm16 | Code::Idiv_rm32 | Code::Idiv_rm64 => {
1062 self.unary_idiv(instr).await
1063 }
1064
1065 Code::Shl_rm8_1
1067 | Code::Shl_rm8_CL
1068 | Code::Shl_rm8_imm8
1069 | Code::Shl_rm16_1
1070 | Code::Shl_rm16_CL
1071 | Code::Shl_rm16_imm8
1072 | Code::Shl_rm32_1
1073 | Code::Shl_rm32_CL
1074 | Code::Shl_rm32_imm8
1075 | Code::Shl_rm64_1
1076 | Code::Shl_rm64_CL
1077 | Code::Shl_rm64_imm8 => {
1078 self.shift_sign_unextended::<shift_rotate::SxlOp>(instr)
1079 .await
1080 }
1081
1082 Code::Shr_rm8_1
1084 | Code::Shr_rm8_CL
1085 | Code::Shr_rm8_imm8
1086 | Code::Shr_rm16_1
1087 | Code::Shr_rm16_CL
1088 | Code::Shr_rm16_imm8
1089 | Code::Shr_rm32_1
1090 | Code::Shr_rm32_CL
1091 | Code::Shr_rm32_imm8
1092 | Code::Shr_rm64_1
1093 | Code::Shr_rm64_CL
1094 | Code::Shr_rm64_imm8 => {
1095 self.shift_sign_unextended::<shift_rotate::ShrOp>(instr)
1096 .await
1097 }
1098
1099 Code::Sar_rm8_1
1101 | Code::Sar_rm8_CL
1102 | Code::Sar_rm8_imm8
1103 | Code::Sar_rm16_1
1104 | Code::Sar_rm16_CL
1105 | Code::Sar_rm16_imm8
1106 | Code::Sar_rm32_1
1107 | Code::Sar_rm32_CL
1108 | Code::Sar_rm32_imm8
1109 | Code::Sar_rm64_1
1110 | Code::Sar_rm64_CL
1111 | Code::Sar_rm64_imm8 => self.shift_arithmetic_right(instr).await,
1112
1113 Code::Sal_rm8_1
1115 | Code::Sal_rm8_CL
1116 | Code::Sal_rm8_imm8
1117 | Code::Sal_rm16_1
1118 | Code::Sal_rm16_CL
1119 | Code::Sal_rm16_imm8
1120 | Code::Sal_rm32_1
1121 | Code::Sal_rm32_CL
1122 | Code::Sal_rm32_imm8
1123 | Code::Sal_rm64_1
1124 | Code::Sal_rm64_CL
1125 | Code::Sal_rm64_imm8 => {
1126 self.shift_sign_unextended::<shift_rotate::SxlOp>(instr)
1127 .await
1128 }
1129
1130 Code::Shld_rm16_r16_CL
1131 | Code::Shld_rm16_r16_imm8
1132 | Code::Shld_rm32_r32_CL
1133 | Code::Shld_rm32_r32_imm8
1134 | Code::Shld_rm64_r64_CL
1135 | Code::Shld_rm64_r64_imm8 => self.shld(instr).await,
1136
1137 Code::Shrd_rm16_r16_CL
1138 | Code::Shrd_rm16_r16_imm8
1139 | Code::Shrd_rm32_r32_CL
1140 | Code::Shrd_rm32_r32_imm8
1141 | Code::Shrd_rm64_r64_CL
1142 | Code::Shrd_rm64_r64_imm8 => self.shrd(instr).await,
1143
1144 Code::Rcl_rm8_1
1146 | Code::Rcl_rm8_CL
1147 | Code::Rcl_rm8_imm8
1148 | Code::Rcl_rm16_1
1149 | Code::Rcl_rm16_CL
1150 | Code::Rcl_rm16_imm8
1151 | Code::Rcl_rm32_1
1152 | Code::Rcl_rm32_CL
1153 | Code::Rcl_rm32_imm8
1154 | Code::Rcl_rm64_1
1155 | Code::Rcl_rm64_CL
1156 | Code::Rcl_rm64_imm8 => {
1157 self.shift_sign_unextended::<shift_rotate::RclOp>(instr)
1158 .await
1159 }
1160
1161 Code::Rcr_rm8_1
1163 | Code::Rcr_rm8_CL
1164 | Code::Rcr_rm8_imm8
1165 | Code::Rcr_rm16_1
1166 | Code::Rcr_rm16_CL
1167 | Code::Rcr_rm16_imm8
1168 | Code::Rcr_rm32_1
1169 | Code::Rcr_rm32_CL
1170 | Code::Rcr_rm32_imm8
1171 | Code::Rcr_rm64_1
1172 | Code::Rcr_rm64_CL
1173 | Code::Rcr_rm64_imm8 => {
1174 self.shift_sign_unextended::<shift_rotate::RcrOp>(instr)
1175 .await
1176 }
1177
1178 Code::Rol_rm8_1
1180 | Code::Rol_rm8_CL
1181 | Code::Rol_rm8_imm8
1182 | Code::Rol_rm16_1
1183 | Code::Rol_rm16_CL
1184 | Code::Rol_rm16_imm8
1185 | Code::Rol_rm32_1
1186 | Code::Rol_rm32_CL
1187 | Code::Rol_rm32_imm8
1188 | Code::Rol_rm64_1
1189 | Code::Rol_rm64_CL
1190 | Code::Rol_rm64_imm8 => {
1191 self.shift_sign_unextended::<shift_rotate::RolOp>(instr)
1192 .await
1193 }
1194
1195 Code::Ror_rm8_1
1197 | Code::Ror_rm8_CL
1198 | Code::Ror_rm8_imm8
1199 | Code::Ror_rm16_1
1200 | Code::Ror_rm16_CL
1201 | Code::Ror_rm16_imm8
1202 | Code::Ror_rm32_1
1203 | Code::Ror_rm32_CL
1204 | Code::Ror_rm32_imm8
1205 | Code::Ror_rm64_1
1206 | Code::Ror_rm64_CL
1207 | Code::Ror_rm64_imm8 => {
1208 self.shift_sign_unextended::<shift_rotate::RorOp>(instr)
1209 .await
1210 }
1211
1212 Code::Outsb_DX_m8 | Code::Outsw_DX_m16 | Code::Outsd_DX_m32 => self.outs(instr).await,
1214
1215 Code::Insb_m8_DX | Code::Insw_m16_DX | Code::Insd_m32_DX => self.ins(instr).await,
1217
1218 Code::Lodsb_AL_m8 | Code::Lodsw_AX_m16 | Code::Lodsd_EAX_m32 | Code::Lodsq_RAX_m64 => {
1220 self.lods(instr).await
1221 }
1222
1223 Code::Stosb_m8_AL | Code::Stosw_m16_AX | Code::Stosd_m32_EAX | Code::Stosq_m64_RAX => {
1225 self.stos(instr).await
1226 }
1227
1228 Code::Cmpsb_m8_m8 | Code::Cmpsw_m16_m16 | Code::Cmpsd_m32_m32 | Code::Cmpsq_m64_m64 => {
1230 self.cmps(instr).await
1231 }
1232
1233 Code::Scasb_AL_m8 | Code::Scasw_AX_m16 | Code::Scasd_EAX_m32 | Code::Scasq_RAX_m64 => {
1235 self.scas(instr).await
1236 }
1237
1238 Code::Bt_rm16_imm8
1240 | Code::Bt_rm32_imm8
1241 | Code::Bt_rm64_imm8
1242 | Code::Bt_rm16_r16
1243 | Code::Bt_rm32_r32
1244 | Code::Bt_rm64_r64 => self.bt_m::<bt::TestOp>(instr).await,
1245 Code::Bts_rm16_imm8
1246 | Code::Bts_rm32_imm8
1247 | Code::Bts_rm64_imm8
1248 | Code::Bts_rm16_r16
1249 | Code::Bts_rm32_r32
1250 | Code::Bts_rm64_r64 => self.bt_m::<bt::SetOp>(instr).await,
1251 Code::Btr_rm16_imm8
1252 | Code::Btr_rm32_imm8
1253 | Code::Btr_rm64_imm8
1254 | Code::Btr_rm16_r16
1255 | Code::Btr_rm32_r32
1256 | Code::Btr_rm64_r64 => self.bt_m::<bt::ResetOp>(instr).await,
1257 Code::Btc_rm16_imm8
1258 | Code::Btc_rm32_imm8
1259 | Code::Btc_rm64_imm8
1260 | Code::Btc_rm16_r16
1261 | Code::Btc_rm32_r32
1262 | Code::Btc_rm64_r64 => self.bt_m::<bt::ComplementOp>(instr).await,
1263
1264 Code::Inc_rm8 | Code::Inc_rm16 | Code::Inc_rm32 | Code::Inc_rm64 => {
1266 self.unary_arith::<arith::IncOp>(instr).await
1267 }
1268 Code::Dec_rm8 | Code::Dec_rm16 | Code::Dec_rm32 | Code::Dec_rm64 => {
1269 self.unary_arith::<arith::DecOp>(instr).await
1270 }
1271
1272 Code::Seta_rm8
1274 | Code::Setae_rm8
1275 | Code::Setb_rm8
1276 | Code::Setbe_rm8
1277 | Code::Sete_rm8
1278 | Code::Setg_rm8
1279 | Code::Setge_rm8
1280 | Code::Setl_rm8
1281 | Code::Setle_rm8
1282 | Code::Setne_rm8
1283 | Code::Setno_rm8
1284 | Code::Setnp_rm8
1285 | Code::Setns_rm8
1286 | Code::Seto_rm8
1287 | Code::Setp_rm8
1288 | Code::Sets_rm8 => self.setcc(instr).await,
1289
1290 Code::Cmova_r16_rm16
1292 | Code::Cmova_r32_rm32
1293 | Code::Cmova_r64_rm64
1294 | Code::Cmovae_r16_rm16
1295 | Code::Cmovae_r32_rm32
1296 | Code::Cmovae_r64_rm64
1297 | Code::Cmovb_r16_rm16
1298 | Code::Cmovb_r32_rm32
1299 | Code::Cmovb_r64_rm64
1300 | Code::Cmovbe_r16_rm16
1301 | Code::Cmovbe_r32_rm32
1302 | Code::Cmovbe_r64_rm64
1303 | Code::Cmove_r16_rm16
1304 | Code::Cmove_r32_rm32
1305 | Code::Cmove_r64_rm64
1306 | Code::Cmovg_r16_rm16
1307 | Code::Cmovg_r32_rm32
1308 | Code::Cmovg_r64_rm64
1309 | Code::Cmovge_r16_rm16
1310 | Code::Cmovge_r32_rm32
1311 | Code::Cmovge_r64_rm64
1312 | Code::Cmovl_r16_rm16
1313 | Code::Cmovl_r32_rm32
1314 | Code::Cmovl_r64_rm64
1315 | Code::Cmovle_r16_rm16
1316 | Code::Cmovle_r32_rm32
1317 | Code::Cmovle_r64_rm64
1318 | Code::Cmovne_r16_rm16
1319 | Code::Cmovne_r32_rm32
1320 | Code::Cmovne_r64_rm64
1321 | Code::Cmovno_r16_rm16
1322 | Code::Cmovno_r32_rm32
1323 | Code::Cmovno_r64_rm64
1324 | Code::Cmovnp_r16_rm16
1325 | Code::Cmovnp_r32_rm32
1326 | Code::Cmovnp_r64_rm64
1327 | Code::Cmovns_r16_rm16
1328 | Code::Cmovns_r32_rm32
1329 | Code::Cmovns_r64_rm64
1330 | Code::Cmovo_r16_rm16
1331 | Code::Cmovo_r32_rm32
1332 | Code::Cmovo_r64_rm64
1333 | Code::Cmovp_r16_rm16
1334 | Code::Cmovp_r32_rm32
1335 | Code::Cmovp_r64_rm64
1336 | Code::Cmovs_r16_rm16
1337 | Code::Cmovs_r32_rm32
1338 | Code::Cmovs_r64_rm64 => self.cmovcc(instr).await,
1339
1340 Code::Cmpxchg8b_m64 | Code::Cmpxchg16b_m128 => self.cmpxchg8_16(instr).await,
1341
1342 Code::In_AL_imm8
1345 | Code::In_AX_imm8
1346 | Code::In_EAX_imm8
1347 | Code::In_AL_DX
1348 | Code::In_AX_DX
1349 | Code::In_EAX_DX
1350 | Code::Out_imm8_AL
1351 | Code::Out_imm8_AX
1352 | Code::Out_imm8_EAX
1353 | Code::Out_DX_AL
1354 | Code::Out_DX_AX
1355 | Code::Out_DX_EAX
1356 | _ => Err(self.unsupported_instruction(instr).into()),
1357 }?;
1358
1359 self.cpu.set_rip(instr.next_ip());
1361 let mut rflags = self.cpu.rflags();
1362 if rflags.trap() {
1363 rflags.set_trap(false);
1364 self.cpu.set_rflags(rflags);
1365 return Err(Error::InstructionException(
1366 Exception::DEBUG,
1367 None,
1368 ExceptionCause::DebugTrap,
1369 ))?;
1370 }
1371
1372 Ok(())
1373 }
1374
1375 fn unsupported_instruction(&self, instr: &Instruction) -> Error<T::Error> {
1376 Error::UnsupportedInstruction(self.bytes[..instr.len()].into())
1377 }
1378}