1use crate::Cpu;
7use crate::registers::Bitness;
8use crate::registers::Gp;
9use crate::registers::GpSize;
10use crate::registers::RegisterIndex;
11use crate::registers::Segment;
12use crate::registers::bitness;
13use iced_x86::Code;
14use iced_x86::Decoder;
15use iced_x86::DecoderError;
16use iced_x86::DecoderOptions;
17use iced_x86::Instruction;
18use iced_x86::OpKind;
19use iced_x86::Register;
20use thiserror::Error;
21use x86defs::Exception;
22
23mod arith;
24mod bt;
25mod cmpxchg816;
26mod cond;
27pub mod fast_path;
28mod instruction;
29mod mov;
30mod muldiv;
31mod rep;
32mod rflags;
33mod shift_rotate;
34
35pub use rep::MAX_REP_LOOPS;
36
37trait EmulatorRegister {
39 type Array: std::ops::Index<std::ops::RangeTo<usize>, Output = [u8]>
41 + std::ops::IndexMut<std::ops::RangeTo<usize>>;
42 fn empty_bytes() -> Self::Array;
43 fn from_le_bytes(bytes: Self::Array) -> Self;
44 fn to_le_bytes(&self) -> Self::Array;
45}
46impl EmulatorRegister for u64 {
47 type Array = [u8; 8];
48 fn empty_bytes() -> Self::Array {
49 [0; 8]
50 }
51 fn from_le_bytes(bytes: Self::Array) -> Self {
52 Self::from_le_bytes(bytes)
53 }
54 fn to_le_bytes(&self) -> Self::Array {
55 (*self).to_le_bytes()
56 }
57}
58impl EmulatorRegister for u128 {
59 type Array = [u8; 16];
60 fn empty_bytes() -> Self::Array {
61 [0; 16]
62 }
63 fn from_le_bytes(bytes: Self::Array) -> Self {
64 Self::from_le_bytes(bytes)
65 }
66 fn to_le_bytes(&self) -> Self::Array {
67 (*self).to_le_bytes()
68 }
69}
70
71impl From<Register> for RegisterIndex {
72 fn from(val: Register) -> Self {
73 let size = match val.size() {
74 1 => {
75 if val >= Register::SPL || val < Register::AH {
76 GpSize::BYTE(0)
77 } else {
78 GpSize::BYTE(8)
79 }
80 }
81 2 => GpSize::WORD,
82 4 => GpSize::DWORD,
83 8 => GpSize::QWORD,
84 _ => panic!("invalid gp register size"),
85 };
86 let extended_index = match val.full_register() {
87 Register::RAX => Gp::RAX,
88 Register::RCX => Gp::RCX,
89 Register::RDX => Gp::RDX,
90 Register::RBX => Gp::RBX,
91 Register::RSP => Gp::RSP,
92 Register::RBP => Gp::RBP,
93 Register::RSI => Gp::RSI,
94 Register::RDI => Gp::RDI,
95 Register::R8 => Gp::R8,
96 Register::R9 => Gp::R9,
97 Register::R10 => Gp::R10,
98 Register::R11 => Gp::R11,
99 Register::R12 => Gp::R12,
100 Register::R13 => Gp::R13,
101 Register::R14 => Gp::R14,
102 Register::R15 => Gp::R15,
103 _ => panic!("invalid gp register index"),
104 };
105 RegisterIndex {
106 extended_index,
107 size,
108 }
109 }
110}
111
112impl From<Register> for Segment {
113 fn from(val: Register) -> Self {
114 match val {
115 Register::ES => Segment::ES,
116 Register::CS => Segment::CS,
117 Register::SS => Segment::SS,
118 Register::DS => Segment::DS,
119 Register::FS => Segment::FS,
120 Register::GS => Segment::GS,
121 _ => panic!("invalid segment register index"),
122 }
123 }
124}
125
126#[derive(Debug)]
128pub struct Emulator<'a, T> {
129 cpu: T,
130 decoder_options: u32,
131 bytes: &'a [u8],
132}
133
134#[derive(Debug, Error)]
135pub enum Error<E> {
136 #[error("asked to emulate an instruction that doesn't touch memory or PIO")]
137 NonMemoryOrPortInstruction(Vec<u8>),
138 #[error("unsupported instruction")]
139 UnsupportedInstruction(Vec<u8>),
140 #[error("memory access error - {1:?} @ {0:#x}")]
141 MemoryAccess(u64, OperationKind, #[source] E),
142 #[error("io port access error - {1:?} @ {0:#x}")]
143 IoPort(u16, OperationKind, #[source] E),
144 #[error("executing instruction caused exception due to {2:?} - {0:?}({1:?})")]
145 InstructionException(Exception, Option<u32>, ExceptionCause),
146 #[error("decode failure")]
147 DecodeFailure,
148 #[error("not enough instruction bytes")]
149 NotEnoughBytes,
150}
151
152enum InternalError<E> {
153 Retry,
156 Error(Box<Error<E>>),
158}
159
160impl<E> From<Error<E>> for InternalError<E> {
161 fn from(err: Error<E>) -> Self {
162 InternalError::Error(Box::new(err))
163 }
164}
165
166#[derive(Debug)]
167pub enum ExceptionCause {
168 MandatoryAlignment,
169 AlignmentCheck,
170 DebugTrap,
171 DivideOverflow,
172 DivideByZero,
173 IoPrivilegeLevel,
174 SegmentValidity,
175}
176
177#[derive(Debug, Clone, Copy, PartialEq)]
178pub enum OperationKind {
179 Read,
180 Write,
181 AddressComputation,
182}
183
184#[derive(Copy, Clone)]
185pub enum AlignmentMode {
186 Aligned(u64),
189 Unaligned,
191 Standard,
194}
195
196impl<'a, T: Cpu> Emulator<'a, T> {
197 pub fn new(cpu: T, vendor: x86defs::cpuid::Vendor, bytes: &'a [u8]) -> Self {
199 let mut decoder_options = 0;
200 if vendor.is_amd_compatible() {
201 decoder_options |= DecoderOptions::AMD;
202 }
203 Emulator {
204 cpu,
205 decoder_options,
206 bytes,
207 }
208 }
209
210 pub fn linear_ip(&mut self, offset: u64) -> Option<u64> {
214 let rip = self.cpu.rip().wrapping_add(offset);
215 let cr0 = self.cpu.cr0();
216 let efer = self.cpu.efer();
217 let cs = self.cpu.segment(Segment::CS);
218
219 match bitness(cr0, efer, cs) {
220 Bitness::Bit64 => Some(rip),
221 Bitness::Bit32 | Bitness::Bit16 => {
222 self.verify_segment_access(
223 Segment::CS,
224 OperationKind::AddressComputation,
225 offset,
226 1,
227 )
228 .ok()?;
229 Some(cs.base.wrapping_add(rip))
230 }
231 }
232 }
233
234 fn current_privilege_level(&mut self) -> u8 {
236 self.cpu
237 .segment(Segment::SS)
238 .attributes
239 .descriptor_privilege_level()
240 }
241
242 pub fn is_user_mode(&mut self) -> bool {
244 self.current_privilege_level() == x86defs::USER_MODE_DPL
245 }
246
247 fn memory_op_offset(&mut self, instr: &Instruction, operand: u32) -> u64 {
249 instruction::memory_op_offset(&mut self.cpu, instr, operand)
250 }
251
252 fn compute_and_validate_gva(
255 &mut self,
256 segment: Segment,
257 offset: u64,
258 len: usize,
259 op: OperationKind,
260 alignment: AlignmentMode,
261 ) -> Result<u64, Error<T::Error>> {
262 let cr0 = self.cpu.cr0();
263 let efer = self.cpu.efer();
264 let cs = self.cpu.segment(Segment::CS);
265
266 let base = match bitness(cr0, efer, cs) {
267 Bitness::Bit64 => {
268 if matches!(segment, Segment::FS | Segment::GS) {
269 self.cpu.segment(segment).base
270 } else {
271 0
272 }
273 }
274 Bitness::Bit32 | Bitness::Bit16 => {
275 self.verify_segment_access(segment, op, offset, len)?;
276 self.cpu.segment(segment).base
277 }
278 };
279
280 let gva = base.wrapping_add(offset);
281 tracing::trace!(?op, base, offset, gva, "compute_gva");
282 self.verify_gva_alignment(gva, len, alignment)?;
283 Ok(gva)
284 }
285
286 fn verify_segment_access(
290 &mut self,
291 segment: Segment,
292 op: OperationKind,
293 offset: u64,
294 len: usize,
295 ) -> Result<(), Error<T::Error>> {
296 let cr0 = self.cpu.cr0();
298 let efer = self.cpu.efer();
299 let cs = self.cpu.segment(Segment::CS);
300 let bitness = bitness(cr0, efer, cs);
301 assert_ne!(bitness, Bitness::Bit64);
302
303 let segment_value = self.cpu.segment(segment);
304
305 let offset_end = offset + len as u64 - 1;
308
309 let gp0 = Error::InstructionException(
310 Exception::GENERAL_PROTECTION_FAULT,
311 Some(0),
312 ExceptionCause::SegmentValidity,
313 );
314 let gpindex = Error::InstructionException(
315 Exception::GENERAL_PROTECTION_FAULT,
316 Some(segment_value.selector.into()),
317 ExceptionCause::SegmentValidity,
318 );
319
320 if matches!(segment, Segment::CS) {
323 if bitness == Bitness::Bit32 && op == OperationKind::Write {
325 return Err(gp0);
326 }
327
328 if op == OperationKind::Read && segment_value.attributes.segment_type() & 0b0010 == 0 {
330 return Err(gp0);
331 }
332
333 if offset_end > segment_value.limit as u64 {
335 return Err(gp0);
336 }
337 } else {
338 if !segment_value.attributes.present() {
340 Err(Error::InstructionException(
341 Exception::SEGMENT_NOT_PRESENT,
342 None,
343 ExceptionCause::SegmentValidity,
344 ))?;
345 }
346
347 if bitness == Bitness::Bit32 && segment_value.selector & !0x3 == 0 {
349 return Err(gp0);
350 }
351
352 let rpl = if matches!(bitness, Bitness::Bit32) {
354 (segment_value.selector & 0x3) as u8
355 } else {
356 0
357 };
358 let cpl = self.current_privilege_level();
359 let dpl = segment_value.attributes.descriptor_privilege_level();
360 if rpl > dpl || cpl > dpl {
361 return Err(gpindex);
362 }
363
364 if !(segment_value.attributes.non_system_segment()
366 && segment_value.attributes.segment_type() & 0b1000 == 0)
367 {
368 return Err(gpindex);
369 }
370
371 if op == OperationKind::Write && segment_value.attributes.segment_type() & 0b0010 == 0 {
373 return Err(gp0);
374 }
375
376 if segment_value.attributes.segment_type() & 0b0100 == 0 {
378 if offset_end > segment_value.limit as u64 {
380 return Err(gp0);
381 }
382 } else {
383 let max = if segment_value.attributes.default() {
385 u32::MAX as u64
386 } else {
387 u16::MAX as u64
388 };
389 if offset <= segment_value.limit as u64 || offset_end > max {
390 return Err(gp0);
391 }
392 };
393 }
394
395 Ok(())
396 }
397
398 fn verify_gva_alignment(
401 &mut self,
402 gva: u64,
403 len: usize,
404 alignment: AlignmentMode,
405 ) -> Result<(), Error<T::Error>> {
406 match alignment {
407 AlignmentMode::Aligned(a) => {
408 if !gva.is_multiple_of(a) {
409 Err(Error::InstructionException(
410 Exception::GENERAL_PROTECTION_FAULT,
411 Some(0),
412 ExceptionCause::MandatoryAlignment,
413 ))?
414 }
415 }
416 AlignmentMode::Unaligned => {}
417 AlignmentMode::Standard => {
418 if self.is_user_mode()
419 && self.cpu.rflags().alignment_check()
420 && self.cpu.cr0() & x86defs::X64_CR0_AM != 0
421 {
422 if !gva.is_multiple_of(len as u64) {
423 Err(Error::InstructionException(
424 Exception::ALIGNMENT_CHECK,
425 None,
426 ExceptionCause::AlignmentCheck,
427 ))?
428 }
429 }
430 }
431 }
432 Ok(())
433 }
434
435 pub async fn read_memory(
437 &mut self,
438 segment: Segment,
439 offset: u64,
440 alignment: AlignmentMode,
441 data: &mut [u8],
442 ) -> Result<(), Error<T::Error>> {
443 let gva = self.compute_and_validate_gva(
444 segment,
445 offset,
446 data.len(),
447 OperationKind::Read,
448 alignment,
449 )?;
450 let user_mode = self.is_user_mode();
451 self.cpu
452 .read_memory(gva, data, user_mode)
453 .await
454 .map_err(|err| Error::MemoryAccess(gva, OperationKind::Read, err))?;
455
456 Ok(())
457 }
458
459 pub async fn write_memory(
461 &mut self,
462 segment: Segment,
463 offset: u64,
464 alignment: AlignmentMode,
465 data: &[u8],
466 ) -> Result<(), Error<T::Error>> {
467 let gva = self.compute_and_validate_gva(
468 segment,
469 offset,
470 data.len(),
471 OperationKind::Write,
472 alignment,
473 )?;
474 let cpl = self.is_user_mode();
475 self.cpu
476 .write_memory(gva, data, cpl)
477 .await
478 .map_err(|err| Error::MemoryAccess(gva, OperationKind::Write, err))?;
479
480 Ok(())
481 }
482
483 async fn compare_and_write_memory(
489 &mut self,
490 segment: Segment,
491 offset: u64,
492 alignment: AlignmentMode,
493 current: &[u8],
494 new: &[u8],
495 ) -> Result<bool, InternalError<T::Error>> {
496 assert_eq!(current.len(), new.len());
497 let user_mode = self.is_user_mode();
498 let gva = self.compute_and_validate_gva(
499 segment,
500 offset,
501 new.len(),
502 OperationKind::Write,
503 alignment,
504 )?;
505 let success = self
506 .cpu
507 .compare_and_write_memory(gva, current, new, user_mode)
508 .await
509 .map_err(|err| Error::MemoryAccess(gva, OperationKind::Write, err))?;
510
511 Ok(success)
512 }
513
514 async fn read_memory_op<R: EmulatorRegister>(
516 &mut self,
517 instr: &Instruction,
518 operand: u32,
519 alignment: AlignmentMode,
520 ) -> Result<R, InternalError<T::Error>> {
521 let offset = self.memory_op_offset(instr, operand);
522 let mut data = R::empty_bytes();
523 self.read_memory(
524 instr.memory_segment().into(),
525 offset,
526 alignment,
527 &mut data[..instr.memory_size().size()],
528 )
529 .await?;
530 Ok(R::from_le_bytes(data))
531 }
532
533 async fn write_memory_op<R: EmulatorRegister>(
535 &mut self,
536 instr: &Instruction,
537 operand: u32,
538 alignment: AlignmentMode,
539 data: R,
540 ) -> Result<(), InternalError<T::Error>> {
541 let offset = self.memory_op_offset(instr, operand);
542 self.write_memory(
543 instr.memory_segment().into(),
544 offset,
545 alignment,
546 &data.to_le_bytes()[..instr.memory_size().size()],
547 )
548 .await?;
549 Ok(())
550 }
551
552 async fn compare_if_locked_and_write_memory_op<R: EmulatorRegister>(
560 &mut self,
561 instr: &Instruction,
562 operand: u32,
563 alignment: AlignmentMode,
564 current: R,
565 new: R,
566 ) -> Result<(), InternalError<T::Error>> {
567 let offset = self.memory_op_offset(instr, operand);
568 if instr.has_lock_prefix() || instr.mnemonic() == iced_x86::Mnemonic::Xchg {
570 if !self
571 .compare_and_write_memory(
572 instr.memory_segment().into(),
573 offset,
574 alignment,
575 ¤t.to_le_bytes()[..instr.memory_size().size()],
576 &new.to_le_bytes()[..instr.memory_size().size()],
577 )
578 .await?
579 {
580 return Err(InternalError::Retry);
581 }
582 } else {
583 self.write_memory(
584 instr.memory_segment().into(),
585 offset,
586 alignment,
587 &new.to_le_bytes()[..instr.memory_size().size()],
588 )
589 .await?;
590 }
591 Ok(())
592 }
593
594 fn check_io_privilege_level(&mut self) -> Result<(), InternalError<T::Error>> {
597 if self.current_privilege_level() > self.cpu.rflags().io_privilege_level() {
598 Err(Error::InstructionException(
599 Exception::GENERAL_PROTECTION_FAULT,
600 Some(0),
601 ExceptionCause::IoPrivilegeLevel,
602 ))?;
603 }
604 Ok(())
605 }
606
607 async fn read_io(&mut self, port: u16, data: &mut [u8]) -> Result<(), InternalError<T::Error>> {
609 self.check_io_privilege_level()?;
610 self.cpu
611 .read_io(port, data)
612 .await
613 .map_err(|err| Error::IoPort(port, OperationKind::Read, err))?;
614
615 Ok(())
616 }
617
618 async fn write_io(&mut self, port: u16, data: &[u8]) -> Result<(), InternalError<T::Error>> {
620 self.check_io_privilege_level()?;
621 self.cpu
622 .write_io(port, data)
623 .await
624 .map_err(|err| Error::IoPort(port, OperationKind::Write, err))?;
625
626 Ok(())
627 }
628
629 async fn op_value(
631 &mut self,
632 instr: &Instruction,
633 operand: u32,
634 ) -> Result<u64, InternalError<T::Error>> {
635 Ok(match instr.op_kind(operand) {
636 OpKind::Memory => {
637 self.read_memory_op(instr, operand, AlignmentMode::Standard)
638 .await?
639 }
640 OpKind::Register => self.cpu.gp(instr.op_register(operand).into()),
641 OpKind::Immediate8
642 | OpKind::Immediate16
643 | OpKind::Immediate32
644 | OpKind::Immediate64
645 | OpKind::Immediate8to16
646 | OpKind::Immediate8to32
647 | OpKind::Immediate8to64
648 | OpKind::Immediate32to64 => instr.immediate(operand),
649 _ => Err(self.unsupported_instruction(instr))?,
650 })
651 }
652
653 async fn op_value_sign_extend(
655 &mut self,
656 instr: &Instruction,
657 operand: u32,
658 ) -> Result<i64, InternalError<T::Error>> {
659 let value = self.op_value(instr, operand).await?;
660 let size = instr.memory_size().size();
661 let shift_size = 64 - (size * 8);
662 let new_value = ((value as i64) << shift_size) >> shift_size;
663 Ok(new_value)
664 }
665
666 async fn write_op_0(
668 &mut self,
669 instr: &Instruction,
670 value: u64,
671 ) -> Result<(), InternalError<T::Error>> {
672 match instr.op0_kind() {
673 OpKind::Memory => {
674 self.write_memory_op(instr, 0, AlignmentMode::Standard, value)
675 .await?
676 }
677 OpKind::Register => {
678 self.cpu.set_gp(instr.op0_register().into(), value);
679 }
680 _ => Err(self.unsupported_instruction(instr))?,
681 };
682 Ok(())
683 }
684
685 async fn compare_if_locked_and_write_op_0(
693 &mut self,
694 instr: &Instruction,
695 current: u64,
696 new: u64,
697 ) -> Result<(), InternalError<T::Error>> {
698 match instr.op0_kind() {
699 OpKind::Memory => {
700 self.compare_if_locked_and_write_memory_op(
701 instr,
702 0,
703 AlignmentMode::Standard,
704 current,
705 new,
706 )
707 .await?
708 }
709 OpKind::Register => {
710 self.cpu.set_gp(instr.op0_register().into(), new);
711 }
712 _ => Err(self.unsupported_instruction(instr))?,
713 };
714 Ok(())
715 }
716
717 pub async fn run(&mut self) -> Result<(), Box<Error<T::Error>>> {
722 let cr0 = self.cpu.cr0();
723 let efer = self.cpu.efer();
724 let cs = self.cpu.segment(Segment::CS);
725 let bitness = bitness(cr0, efer, cs);
726 let mut decoder = Decoder::new(bitness.into(), self.bytes, self.decoder_options);
727 decoder.set_ip(self.cpu.rip());
728 let instr = decoder.decode();
729 if instr.code() == Code::INVALID {
730 match decoder.last_error() {
731 DecoderError::None => unreachable!(),
732 DecoderError::NoMoreBytes => return Err(Box::new(Error::NotEnoughBytes)),
733 err => {
734 tracing::warn!(
735 error = ?err,
736 bytes = ?self.bytes,
737 "could not decode instruction"
738 );
739 return Err(Box::new(Error::DecodeFailure));
740 }
741 }
742 }
743 tracing::trace!(
744 bytes = ?self.bytes[..instr.len()],
745 cs = ?self.cpu.segment(Segment::CS),
746 rip = self.cpu.rip(),
747 ?bitness,
748 "Emulating instruction",
749 );
750 match self.emulate(&instr).await {
751 Ok(()) | Err(InternalError::Retry) => {}
757 Err(InternalError::Error(err)) => return Err(err),
758 }
759 Ok(())
760 }
761
762 async fn emulate(&mut self, instr: &Instruction) -> Result<(), InternalError<T::Error>> {
766 if !instr.op_kinds().any(|x| x == OpKind::Memory) && !instr.is_string_instruction() {
769 Err(Error::NonMemoryOrPortInstruction(
770 self.bytes[..instr.len()].into(),
771 ))?;
772 }
773
774 match instr.code() {
775 Code::Mov_rm8_r8
784 | Code::Mov_rm16_r16
785 | Code::Mov_rm32_r32
786 | Code::Mov_rm64_r64
787 | Code::Mov_r8_rm8
788 | Code::Mov_r16_rm16
789 | Code::Mov_r32_rm32
790 | Code::Mov_r64_rm64
791 | Code::Mov_AL_moffs8
792 | Code::Mov_AX_moffs16
793 | Code::Mov_EAX_moffs32
794 | Code::Mov_RAX_moffs64
795 | Code::Mov_moffs8_AL
796 | Code::Mov_moffs16_AX
797 | Code::Mov_moffs32_EAX
798 | Code::Mov_moffs64_RAX
799 | Code::Mov_rm8_imm8
800 | Code::Mov_rm16_imm16
801 | Code::Mov_rm32_imm32
802 | Code::Mov_rm64_imm32
803 | Code::Movzx_r16_rm8
804 | Code::Movzx_r32_rm8
805 | Code::Movzx_r64_rm8
806 | Code::Movzx_r16_rm16
807 | Code::Movzx_r32_rm16
808 | Code::Movzx_r64_rm16
809 | Code::Movdiri_m32_r32
810 | Code::Movdiri_m64_r64
811 | Code::Movnti_m32_r32
812 | Code::Movnti_m64_r64 => self.mov(instr).await,
813
814 Code::Movsx_r16_rm8
817 | Code::Movsx_r32_rm8
818 | Code::Movsx_r64_rm8
819 | Code::Movsx_r16_rm16
820 | Code::Movsx_r32_rm16
821 | Code::Movsx_r64_rm16
822 | Code::Movsxd_r16_rm16
823 | Code::Movsxd_r32_rm32
824 | Code::Movsxd_r64_rm32 => self.movsx(instr).await,
825
826 Code::Movups_xmm_xmmm128
833 | Code::Movups_xmmm128_xmm
834 | Code::Movupd_xmm_xmmm128
835 | Code::Movupd_xmmm128_xmm
836 | Code::Movdqu_xmm_xmmm128
837 | Code::Movdqu_xmmm128_xmm
838 | Code::Movntdq_m128_xmm
839 | Code::Movntps_m128_xmm
840 | Code::Movntpd_m128_xmm => self.mov_sse(instr, AlignmentMode::Unaligned).await,
841
842 Code::Movaps_xmm_xmmm128
846 | Code::Movaps_xmmm128_xmm
847 | Code::Movapd_xmm_xmmm128
848 | Code::Movapd_xmmm128_xmm
849 | Code::Movdqa_xmm_xmmm128
850 | Code::Movdqa_xmmm128_xmm => self.mov_sse(instr, AlignmentMode::Aligned(16)).await,
851
852 Code::Movdir64b_r16_m512 | Code::Movdir64b_r32_m512 | Code::Movdir64b_r64_m512 => {
853 self.movdir64b(instr).await
854 }
855
856 Code::Movsb_m8_m8 | Code::Movsw_m16_m16 | Code::Movsd_m32_m32 | Code::Movsq_m64_m64 => {
858 self.movs(instr).await
859 }
860
861 Code::Cmp_r64_rm64
863 | Code::Cmp_r32_rm32
864 | Code::Cmp_r16_rm16
865 | Code::Cmp_r8_rm8
866 | Code::Cmp_rm64_r64
867 | Code::Cmp_rm32_r32
868 | Code::Cmp_rm16_r16
869 | Code::Cmp_rm8_r8
870 | Code::Cmp_rm64_imm32
871 | Code::Cmp_rm64_imm8
872 | Code::Cmp_rm32_imm32
873 | Code::Cmp_rm32_imm8
874 | Code::Cmp_rm16_imm16
875 | Code::Cmp_rm16_imm8
876 | Code::Cmp_rm8_imm8 => self.arith::<arith::CmpOp>(instr).await,
877
878 Code::Xchg_rm8_r8 | Code::Xchg_rm16_r16 | Code::Xchg_rm32_r32 | Code::Xchg_rm64_r64 => {
880 self.xchg(instr).await
881 }
882
883 Code::Cmpxchg_rm8_r8
885 | Code::Cmpxchg_rm16_r16
886 | Code::Cmpxchg_rm32_r32
887 | Code::Cmpxchg_rm64_r64 => self.cmpxchg(instr).await,
888
889 Code::Test_rm64_r64
891 | Code::Test_rm32_r32
892 | Code::Test_rm16_r16
893 | Code::Test_rm8_r8
894 | Code::Test_rm64_imm32
895 | Code::Test_rm32_imm32
896 | Code::Test_rm16_imm16
897 | Code::Test_rm8_imm8 => self.arith::<arith::TestOp>(instr).await,
898
899 Code::And_r64_rm64
901 | Code::And_r32_rm32
902 | Code::And_r16_rm16
903 | Code::And_r8_rm8
904 | Code::And_rm64_r64
905 | Code::And_rm32_r32
906 | Code::And_rm16_r16
907 | Code::And_rm8_r8
908 | Code::And_rm64_imm32
909 | Code::And_rm64_imm8
910 | Code::And_rm32_imm32
911 | Code::And_rm32_imm8
912 | Code::And_rm16_imm16
913 | Code::And_rm16_imm8
914 | Code::And_rm8_imm8 => self.arith::<arith::AndOp>(instr).await,
915
916 Code::Add_r64_rm64
918 | Code::Add_r32_rm32
919 | Code::Add_r16_rm16
920 | Code::Add_r8_rm8
921 | Code::Add_rm64_r64
922 | Code::Add_rm32_r32
923 | Code::Add_rm16_r16
924 | Code::Add_rm8_r8
925 | Code::Add_rm64_imm32
926 | Code::Add_rm64_imm8
927 | Code::Add_rm32_imm32
928 | Code::Add_rm32_imm8
929 | Code::Add_rm16_imm16
930 | Code::Add_rm16_imm8
931 | Code::Add_rm8_imm8 => self.arith::<arith::AddOp>(instr).await,
932
933 Code::Adc_r64_rm64
935 | Code::Adc_r32_rm32
936 | Code::Adc_r16_rm16
937 | Code::Adc_r8_rm8
938 | Code::Adc_rm64_r64
939 | Code::Adc_rm32_r32
940 | Code::Adc_rm16_r16
941 | Code::Adc_rm8_r8
942 | Code::Adc_rm64_imm32
943 | Code::Adc_rm64_imm8
944 | Code::Adc_rm32_imm32
945 | Code::Adc_rm32_imm8
946 | Code::Adc_rm16_imm16
947 | Code::Adc_rm16_imm8
948 | Code::Adc_rm8_imm8 => self.arith::<arith::AdcOp>(instr).await,
949
950 Code::Xadd_rm8_r8 | Code::Xadd_rm16_r16 | Code::Xadd_rm32_r32 | Code::Xadd_rm64_r64 => {
951 self.xadd(instr).await
952 }
953
954 Code::Sub_r64_rm64
956 | Code::Sub_r32_rm32
957 | Code::Sub_r16_rm16
958 | Code::Sub_r8_rm8
959 | Code::Sub_rm64_r64
960 | Code::Sub_rm32_r32
961 | Code::Sub_rm16_r16
962 | Code::Sub_rm8_r8
963 | Code::Sub_rm64_imm32
964 | Code::Sub_rm64_imm8
965 | Code::Sub_rm32_imm32
966 | Code::Sub_rm32_imm8
967 | Code::Sub_rm16_imm16
968 | Code::Sub_rm16_imm8
969 | Code::Sub_rm8_imm8 => self.arith::<arith::SubOp>(instr).await,
970
971 Code::Sbb_r64_rm64
973 | Code::Sbb_r32_rm32
974 | Code::Sbb_r16_rm16
975 | Code::Sbb_r8_rm8
976 | Code::Sbb_rm64_r64
977 | Code::Sbb_rm32_r32
978 | Code::Sbb_rm16_r16
979 | Code::Sbb_rm8_r8
980 | Code::Sbb_rm64_imm32
981 | Code::Sbb_rm64_imm8
982 | Code::Sbb_rm32_imm32
983 | Code::Sbb_rm32_imm8
984 | Code::Sbb_rm16_imm16
985 | Code::Sbb_rm16_imm8
986 | Code::Sbb_rm8_imm8 => self.arith::<arith::SbbOp>(instr).await,
987
988 Code::Or_r64_rm64
990 | Code::Or_r32_rm32
991 | Code::Or_r16_rm16
992 | Code::Or_r8_rm8
993 | Code::Or_rm64_r64
994 | Code::Or_rm32_r32
995 | Code::Or_rm16_r16
996 | Code::Or_rm8_r8
997 | Code::Or_rm64_imm32
998 | Code::Or_rm64_imm8
999 | Code::Or_rm32_imm32
1000 | Code::Or_rm32_imm8
1001 | Code::Or_rm16_imm16
1002 | Code::Or_rm16_imm8
1003 | Code::Or_rm8_imm8 => self.arith::<arith::OrOp>(instr).await,
1004
1005 Code::Xor_r64_rm64
1007 | Code::Xor_r32_rm32
1008 | Code::Xor_r16_rm16
1009 | Code::Xor_r8_rm8
1010 | Code::Xor_rm64_r64
1011 | Code::Xor_rm32_r32
1012 | Code::Xor_rm16_r16
1013 | Code::Xor_rm8_r8
1014 | Code::Xor_rm64_imm32
1015 | Code::Xor_rm64_imm8
1016 | Code::Xor_rm32_imm32
1017 | Code::Xor_rm32_imm8
1018 | Code::Xor_rm16_imm16
1019 | Code::Xor_rm16_imm8
1020 | Code::Xor_rm8_imm8 => self.arith::<arith::XorOp>(instr).await,
1021
1022 Code::Neg_rm8 | Code::Neg_rm16 | Code::Neg_rm32 | Code::Neg_rm64 => {
1024 self.unary_arith::<arith::NegOp>(instr).await
1025 }
1026
1027 Code::Not_rm8 | Code::Not_rm16 | Code::Not_rm32 | Code::Not_rm64 => {
1029 self.unary_arith::<arith::NotOp>(instr).await
1030 }
1031
1032 Code::Mul_rm8 | Code::Mul_rm16 | Code::Mul_rm32 | Code::Mul_rm64 => {
1034 self.unary_mul(instr).await
1035 }
1036
1037 Code::Imul_rm8 | Code::Imul_rm16 | Code::Imul_rm32 | Code::Imul_rm64 => {
1039 self.unary_imul(instr).await
1040 }
1041
1042 Code::Imul_r16_rm16
1044 | Code::Imul_r32_rm32
1045 | Code::Imul_r64_rm64
1046 | Code::Imul_r16_rm16_imm8
1047 | Code::Imul_r16_rm16_imm16
1048 | Code::Imul_r32_rm32_imm8
1049 | Code::Imul_r32_rm32_imm32
1050 | Code::Imul_r64_rm64_imm8
1051 | Code::Imul_r64_rm64_imm32 => self.imul(instr).await,
1052
1053 Code::Div_rm8 | Code::Div_rm16 | Code::Div_rm32 | Code::Div_rm64 => {
1055 self.unary_div(instr).await
1056 }
1057
1058 Code::Idiv_rm8 | Code::Idiv_rm16 | Code::Idiv_rm32 | Code::Idiv_rm64 => {
1060 self.unary_idiv(instr).await
1061 }
1062
1063 Code::Shl_rm8_1
1065 | Code::Shl_rm8_CL
1066 | Code::Shl_rm8_imm8
1067 | Code::Shl_rm16_1
1068 | Code::Shl_rm16_CL
1069 | Code::Shl_rm16_imm8
1070 | Code::Shl_rm32_1
1071 | Code::Shl_rm32_CL
1072 | Code::Shl_rm32_imm8
1073 | Code::Shl_rm64_1
1074 | Code::Shl_rm64_CL
1075 | Code::Shl_rm64_imm8 => {
1076 self.shift_sign_unextended::<shift_rotate::SxlOp>(instr)
1077 .await
1078 }
1079
1080 Code::Shr_rm8_1
1082 | Code::Shr_rm8_CL
1083 | Code::Shr_rm8_imm8
1084 | Code::Shr_rm16_1
1085 | Code::Shr_rm16_CL
1086 | Code::Shr_rm16_imm8
1087 | Code::Shr_rm32_1
1088 | Code::Shr_rm32_CL
1089 | Code::Shr_rm32_imm8
1090 | Code::Shr_rm64_1
1091 | Code::Shr_rm64_CL
1092 | Code::Shr_rm64_imm8 => {
1093 self.shift_sign_unextended::<shift_rotate::ShrOp>(instr)
1094 .await
1095 }
1096
1097 Code::Sar_rm8_1
1099 | Code::Sar_rm8_CL
1100 | Code::Sar_rm8_imm8
1101 | Code::Sar_rm16_1
1102 | Code::Sar_rm16_CL
1103 | Code::Sar_rm16_imm8
1104 | Code::Sar_rm32_1
1105 | Code::Sar_rm32_CL
1106 | Code::Sar_rm32_imm8
1107 | Code::Sar_rm64_1
1108 | Code::Sar_rm64_CL
1109 | Code::Sar_rm64_imm8 => self.shift_arithmetic_right(instr).await,
1110
1111 Code::Sal_rm8_1
1113 | Code::Sal_rm8_CL
1114 | Code::Sal_rm8_imm8
1115 | Code::Sal_rm16_1
1116 | Code::Sal_rm16_CL
1117 | Code::Sal_rm16_imm8
1118 | Code::Sal_rm32_1
1119 | Code::Sal_rm32_CL
1120 | Code::Sal_rm32_imm8
1121 | Code::Sal_rm64_1
1122 | Code::Sal_rm64_CL
1123 | Code::Sal_rm64_imm8 => {
1124 self.shift_sign_unextended::<shift_rotate::SxlOp>(instr)
1125 .await
1126 }
1127
1128 Code::Shld_rm16_r16_CL
1129 | Code::Shld_rm16_r16_imm8
1130 | Code::Shld_rm32_r32_CL
1131 | Code::Shld_rm32_r32_imm8
1132 | Code::Shld_rm64_r64_CL
1133 | Code::Shld_rm64_r64_imm8 => self.shld(instr).await,
1134
1135 Code::Shrd_rm16_r16_CL
1136 | Code::Shrd_rm16_r16_imm8
1137 | Code::Shrd_rm32_r32_CL
1138 | Code::Shrd_rm32_r32_imm8
1139 | Code::Shrd_rm64_r64_CL
1140 | Code::Shrd_rm64_r64_imm8 => self.shrd(instr).await,
1141
1142 Code::Rcl_rm8_1
1144 | Code::Rcl_rm8_CL
1145 | Code::Rcl_rm8_imm8
1146 | Code::Rcl_rm16_1
1147 | Code::Rcl_rm16_CL
1148 | Code::Rcl_rm16_imm8
1149 | Code::Rcl_rm32_1
1150 | Code::Rcl_rm32_CL
1151 | Code::Rcl_rm32_imm8
1152 | Code::Rcl_rm64_1
1153 | Code::Rcl_rm64_CL
1154 | Code::Rcl_rm64_imm8 => {
1155 self.shift_sign_unextended::<shift_rotate::RclOp>(instr)
1156 .await
1157 }
1158
1159 Code::Rcr_rm8_1
1161 | Code::Rcr_rm8_CL
1162 | Code::Rcr_rm8_imm8
1163 | Code::Rcr_rm16_1
1164 | Code::Rcr_rm16_CL
1165 | Code::Rcr_rm16_imm8
1166 | Code::Rcr_rm32_1
1167 | Code::Rcr_rm32_CL
1168 | Code::Rcr_rm32_imm8
1169 | Code::Rcr_rm64_1
1170 | Code::Rcr_rm64_CL
1171 | Code::Rcr_rm64_imm8 => {
1172 self.shift_sign_unextended::<shift_rotate::RcrOp>(instr)
1173 .await
1174 }
1175
1176 Code::Rol_rm8_1
1178 | Code::Rol_rm8_CL
1179 | Code::Rol_rm8_imm8
1180 | Code::Rol_rm16_1
1181 | Code::Rol_rm16_CL
1182 | Code::Rol_rm16_imm8
1183 | Code::Rol_rm32_1
1184 | Code::Rol_rm32_CL
1185 | Code::Rol_rm32_imm8
1186 | Code::Rol_rm64_1
1187 | Code::Rol_rm64_CL
1188 | Code::Rol_rm64_imm8 => {
1189 self.shift_sign_unextended::<shift_rotate::RolOp>(instr)
1190 .await
1191 }
1192
1193 Code::Ror_rm8_1
1195 | Code::Ror_rm8_CL
1196 | Code::Ror_rm8_imm8
1197 | Code::Ror_rm16_1
1198 | Code::Ror_rm16_CL
1199 | Code::Ror_rm16_imm8
1200 | Code::Ror_rm32_1
1201 | Code::Ror_rm32_CL
1202 | Code::Ror_rm32_imm8
1203 | Code::Ror_rm64_1
1204 | Code::Ror_rm64_CL
1205 | Code::Ror_rm64_imm8 => {
1206 self.shift_sign_unextended::<shift_rotate::RorOp>(instr)
1207 .await
1208 }
1209
1210 Code::Outsb_DX_m8 | Code::Outsw_DX_m16 | Code::Outsd_DX_m32 => self.outs(instr).await,
1212
1213 Code::Insb_m8_DX | Code::Insw_m16_DX | Code::Insd_m32_DX => self.ins(instr).await,
1215
1216 Code::Lodsb_AL_m8 | Code::Lodsw_AX_m16 | Code::Lodsd_EAX_m32 | Code::Lodsq_RAX_m64 => {
1218 self.lods(instr).await
1219 }
1220
1221 Code::Stosb_m8_AL | Code::Stosw_m16_AX | Code::Stosd_m32_EAX | Code::Stosq_m64_RAX => {
1223 self.stos(instr).await
1224 }
1225
1226 Code::Cmpsb_m8_m8 | Code::Cmpsw_m16_m16 | Code::Cmpsd_m32_m32 | Code::Cmpsq_m64_m64 => {
1228 self.cmps(instr).await
1229 }
1230
1231 Code::Scasb_AL_m8 | Code::Scasw_AX_m16 | Code::Scasd_EAX_m32 | Code::Scasq_RAX_m64 => {
1233 self.scas(instr).await
1234 }
1235
1236 Code::Bt_rm16_imm8
1238 | Code::Bt_rm32_imm8
1239 | Code::Bt_rm64_imm8
1240 | Code::Bt_rm16_r16
1241 | Code::Bt_rm32_r32
1242 | Code::Bt_rm64_r64 => self.bt_m::<bt::TestOp>(instr).await,
1243 Code::Bts_rm16_imm8
1244 | Code::Bts_rm32_imm8
1245 | Code::Bts_rm64_imm8
1246 | Code::Bts_rm16_r16
1247 | Code::Bts_rm32_r32
1248 | Code::Bts_rm64_r64 => self.bt_m::<bt::SetOp>(instr).await,
1249 Code::Btr_rm16_imm8
1250 | Code::Btr_rm32_imm8
1251 | Code::Btr_rm64_imm8
1252 | Code::Btr_rm16_r16
1253 | Code::Btr_rm32_r32
1254 | Code::Btr_rm64_r64 => self.bt_m::<bt::ResetOp>(instr).await,
1255 Code::Btc_rm16_imm8
1256 | Code::Btc_rm32_imm8
1257 | Code::Btc_rm64_imm8
1258 | Code::Btc_rm16_r16
1259 | Code::Btc_rm32_r32
1260 | Code::Btc_rm64_r64 => self.bt_m::<bt::ComplementOp>(instr).await,
1261
1262 Code::Inc_rm8 | Code::Inc_rm16 | Code::Inc_rm32 | Code::Inc_rm64 => {
1264 self.unary_arith::<arith::IncOp>(instr).await
1265 }
1266 Code::Dec_rm8 | Code::Dec_rm16 | Code::Dec_rm32 | Code::Dec_rm64 => {
1267 self.unary_arith::<arith::DecOp>(instr).await
1268 }
1269
1270 Code::Seta_rm8
1272 | Code::Setae_rm8
1273 | Code::Setb_rm8
1274 | Code::Setbe_rm8
1275 | Code::Sete_rm8
1276 | Code::Setg_rm8
1277 | Code::Setge_rm8
1278 | Code::Setl_rm8
1279 | Code::Setle_rm8
1280 | Code::Setne_rm8
1281 | Code::Setno_rm8
1282 | Code::Setnp_rm8
1283 | Code::Setns_rm8
1284 | Code::Seto_rm8
1285 | Code::Setp_rm8
1286 | Code::Sets_rm8 => self.setcc(instr).await,
1287
1288 Code::Cmova_r16_rm16
1290 | Code::Cmova_r32_rm32
1291 | Code::Cmova_r64_rm64
1292 | Code::Cmovae_r16_rm16
1293 | Code::Cmovae_r32_rm32
1294 | Code::Cmovae_r64_rm64
1295 | Code::Cmovb_r16_rm16
1296 | Code::Cmovb_r32_rm32
1297 | Code::Cmovb_r64_rm64
1298 | Code::Cmovbe_r16_rm16
1299 | Code::Cmovbe_r32_rm32
1300 | Code::Cmovbe_r64_rm64
1301 | Code::Cmove_r16_rm16
1302 | Code::Cmove_r32_rm32
1303 | Code::Cmove_r64_rm64
1304 | Code::Cmovg_r16_rm16
1305 | Code::Cmovg_r32_rm32
1306 | Code::Cmovg_r64_rm64
1307 | Code::Cmovge_r16_rm16
1308 | Code::Cmovge_r32_rm32
1309 | Code::Cmovge_r64_rm64
1310 | Code::Cmovl_r16_rm16
1311 | Code::Cmovl_r32_rm32
1312 | Code::Cmovl_r64_rm64
1313 | Code::Cmovle_r16_rm16
1314 | Code::Cmovle_r32_rm32
1315 | Code::Cmovle_r64_rm64
1316 | Code::Cmovne_r16_rm16
1317 | Code::Cmovne_r32_rm32
1318 | Code::Cmovne_r64_rm64
1319 | Code::Cmovno_r16_rm16
1320 | Code::Cmovno_r32_rm32
1321 | Code::Cmovno_r64_rm64
1322 | Code::Cmovnp_r16_rm16
1323 | Code::Cmovnp_r32_rm32
1324 | Code::Cmovnp_r64_rm64
1325 | Code::Cmovns_r16_rm16
1326 | Code::Cmovns_r32_rm32
1327 | Code::Cmovns_r64_rm64
1328 | Code::Cmovo_r16_rm16
1329 | Code::Cmovo_r32_rm32
1330 | Code::Cmovo_r64_rm64
1331 | Code::Cmovp_r16_rm16
1332 | Code::Cmovp_r32_rm32
1333 | Code::Cmovp_r64_rm64
1334 | Code::Cmovs_r16_rm16
1335 | Code::Cmovs_r32_rm32
1336 | Code::Cmovs_r64_rm64 => self.cmovcc(instr).await,
1337
1338 Code::Cmpxchg8b_m64 | Code::Cmpxchg16b_m128 => self.cmpxchg8_16(instr).await,
1339
1340 Code::In_AL_imm8
1343 | Code::In_AX_imm8
1344 | Code::In_EAX_imm8
1345 | Code::In_AL_DX
1346 | Code::In_AX_DX
1347 | Code::In_EAX_DX
1348 | Code::Out_imm8_AL
1349 | Code::Out_imm8_AX
1350 | Code::Out_imm8_EAX
1351 | Code::Out_DX_AL
1352 | Code::Out_DX_AX
1353 | Code::Out_DX_EAX
1354 | _ => Err(self.unsupported_instruction(instr).into()),
1355 }?;
1356
1357 self.cpu.set_rip(instr.next_ip());
1359 let mut rflags = self.cpu.rflags();
1360 if rflags.trap() {
1361 rflags.set_trap(false);
1362 self.cpu.set_rflags(rflags);
1363 return Err(Error::InstructionException(
1364 Exception::DEBUG,
1365 None,
1366 ExceptionCause::DebugTrap,
1367 ))?;
1368 }
1369
1370 Ok(())
1371 }
1372
1373 fn unsupported_instruction(&self, instr: &Instruction) -> Error<T::Error> {
1374 Error::UnsupportedInstruction(self.bytes[..instr.len()].into())
1375 }
1376}