1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
166 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
168 #define X2(x...) x, x
169 #define X3(x...) X2(x), x
170 #define X4(x...) X2(x), X2(x)
171 #define X5(x...) X4(x), x
172 #define X6(x...) X4(x), X2(x)
173 #define X7(x...) X4(x), X3(x)
174 #define X8(x...) X4(x), X4(x)
175 #define X16(x...) X8(x), X8(x)
177 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
178 #define FASTOP_SIZE 8
181 * fastop functions have a special calling convention:
186 * flags: rflags (in/out)
187 * ex: rsi (in:fastop pointer, out:zero if exception)
189 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
190 * different operand sizes can be reached by calculation, rather than a jump
191 * table (which would be bigger than the code).
193 * fastop functions are declared as taking a never-defined fastop parameter,
194 * so they can't be called from C directly.
203 int (*execute
)(struct x86_emulate_ctxt
*ctxt
);
204 const struct opcode
*group
;
205 const struct group_dual
*gdual
;
206 const struct gprefix
*gprefix
;
207 const struct escape
*esc
;
208 void (*fastop
)(struct fastop
*fake
);
210 int (*check_perm
)(struct x86_emulate_ctxt
*ctxt
);
214 struct opcode mod012
[8];
215 struct opcode mod3
[8];
219 struct opcode pfx_no
;
220 struct opcode pfx_66
;
221 struct opcode pfx_f2
;
222 struct opcode pfx_f3
;
227 struct opcode high
[64];
230 /* EFLAGS bit definitions. */
231 #define EFLG_ID (1<<21)
232 #define EFLG_VIP (1<<20)
233 #define EFLG_VIF (1<<19)
234 #define EFLG_AC (1<<18)
235 #define EFLG_VM (1<<17)
236 #define EFLG_RF (1<<16)
237 #define EFLG_IOPL (3<<12)
238 #define EFLG_NT (1<<14)
239 #define EFLG_OF (1<<11)
240 #define EFLG_DF (1<<10)
241 #define EFLG_IF (1<<9)
242 #define EFLG_TF (1<<8)
243 #define EFLG_SF (1<<7)
244 #define EFLG_ZF (1<<6)
245 #define EFLG_AF (1<<4)
246 #define EFLG_PF (1<<2)
247 #define EFLG_CF (1<<0)
249 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
250 #define EFLG_RESERVED_ONE_MASK 2
252 static ulong
reg_read(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
254 if (!(ctxt
->regs_valid
& (1 << nr
))) {
255 ctxt
->regs_valid
|= 1 << nr
;
256 ctxt
->_regs
[nr
] = ctxt
->ops
->read_gpr(ctxt
, nr
);
258 return ctxt
->_regs
[nr
];
261 static ulong
*reg_write(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
263 ctxt
->regs_valid
|= 1 << nr
;
264 ctxt
->regs_dirty
|= 1 << nr
;
265 return &ctxt
->_regs
[nr
];
268 static ulong
*reg_rmw(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
271 return reg_write(ctxt
, nr
);
274 static void writeback_registers(struct x86_emulate_ctxt
*ctxt
)
278 for_each_set_bit(reg
, (ulong
*)&ctxt
->regs_dirty
, 16)
279 ctxt
->ops
->write_gpr(ctxt
, reg
, ctxt
->_regs
[reg
]);
282 static void invalidate_registers(struct x86_emulate_ctxt
*ctxt
)
284 ctxt
->regs_dirty
= 0;
285 ctxt
->regs_valid
= 0;
289 * These EFLAGS bits are restored from saved value during emulation, and
290 * any changes are written back to the saved value after emulation.
292 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
300 static int fastop(struct x86_emulate_ctxt
*ctxt
, void (*fop
)(struct fastop
*));
302 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
303 #define FOP_RET "ret \n\t"
305 #define FOP_START(op) \
306 extern void em_##op(struct fastop *fake); \
307 asm(".pushsection .text, \"ax\" \n\t" \
308 ".global em_" #op " \n\t" \
315 #define FOPNOP() FOP_ALIGN FOP_RET
317 #define FOP1E(op, dst) \
318 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
320 #define FOP1EEX(op, dst) \
321 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
323 #define FASTOP1(op) \
328 ON64(FOP1E(op##q, rax)) \
331 /* 1-operand, using src2 (for MUL/DIV r/m) */
332 #define FASTOP1SRC2(op, name) \
337 ON64(FOP1E(op, rcx)) \
340 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
341 #define FASTOP1SRC2EX(op, name) \
346 ON64(FOP1EEX(op, rcx)) \
349 #define FOP2E(op, dst, src) \
350 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
352 #define FASTOP2(op) \
354 FOP2E(op##b, al, dl) \
355 FOP2E(op##w, ax, dx) \
356 FOP2E(op##l, eax, edx) \
357 ON64(FOP2E(op##q, rax, rdx)) \
360 /* 2 operand, word only */
361 #define FASTOP2W(op) \
364 FOP2E(op##w, ax, dx) \
365 FOP2E(op##l, eax, edx) \
366 ON64(FOP2E(op##q, rax, rdx)) \
369 /* 2 operand, src is CL */
370 #define FASTOP2CL(op) \
372 FOP2E(op##b, al, cl) \
373 FOP2E(op##w, ax, cl) \
374 FOP2E(op##l, eax, cl) \
375 ON64(FOP2E(op##q, rax, cl)) \
378 #define FOP3E(op, dst, src, src2) \
379 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
381 /* 3-operand, word-only, src2=cl */
382 #define FASTOP3WCL(op) \
385 FOP3E(op##w, ax, dx, cl) \
386 FOP3E(op##l, eax, edx, cl) \
387 ON64(FOP3E(op##q, rax, rdx, cl)) \
390 /* Special case for SETcc - 1 instruction per cc */
391 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
393 asm(".global kvm_fastop_exception \n"
394 "kvm_fastop_exception: xor %esi, %esi; ret");
415 FOP_START(salc
) "pushf; sbb %al, %al; popf \n\t" FOP_RET
418 static int emulator_check_intercept(struct x86_emulate_ctxt
*ctxt
,
419 enum x86_intercept intercept
,
420 enum x86_intercept_stage stage
)
422 struct x86_instruction_info info
= {
423 .intercept
= intercept
,
424 .rep_prefix
= ctxt
->rep_prefix
,
425 .modrm_mod
= ctxt
->modrm_mod
,
426 .modrm_reg
= ctxt
->modrm_reg
,
427 .modrm_rm
= ctxt
->modrm_rm
,
428 .src_val
= ctxt
->src
.val64
,
429 .src_bytes
= ctxt
->src
.bytes
,
430 .dst_bytes
= ctxt
->dst
.bytes
,
431 .ad_bytes
= ctxt
->ad_bytes
,
432 .next_rip
= ctxt
->eip
,
435 return ctxt
->ops
->intercept(ctxt
, &info
, stage
);
438 static void assign_masked(ulong
*dest
, ulong src
, ulong mask
)
440 *dest
= (*dest
& ~mask
) | (src
& mask
);
443 static inline unsigned long ad_mask(struct x86_emulate_ctxt
*ctxt
)
445 return (1UL << (ctxt
->ad_bytes
<< 3)) - 1;
448 static ulong
stack_mask(struct x86_emulate_ctxt
*ctxt
)
451 struct desc_struct ss
;
453 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
455 ctxt
->ops
->get_segment(ctxt
, &sel
, &ss
, NULL
, VCPU_SREG_SS
);
456 return ~0U >> ((ss
.d
^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
459 static int stack_size(struct x86_emulate_ctxt
*ctxt
)
461 return (__fls(stack_mask(ctxt
)) + 1) >> 3;
464 /* Access/update address held in a register, based on addressing mode. */
465 static inline unsigned long
466 address_mask(struct x86_emulate_ctxt
*ctxt
, unsigned long reg
)
468 if (ctxt
->ad_bytes
== sizeof(unsigned long))
471 return reg
& ad_mask(ctxt
);
474 static inline unsigned long
475 register_address(struct x86_emulate_ctxt
*ctxt
, unsigned long reg
)
477 return address_mask(ctxt
, reg
);
480 static void masked_increment(ulong
*reg
, ulong mask
, int inc
)
482 assign_masked(reg
, *reg
+ inc
, mask
);
486 register_address_increment(struct x86_emulate_ctxt
*ctxt
, unsigned long *reg
, int inc
)
490 if (ctxt
->ad_bytes
== sizeof(unsigned long))
493 mask
= ad_mask(ctxt
);
494 masked_increment(reg
, mask
, inc
);
497 static void rsp_increment(struct x86_emulate_ctxt
*ctxt
, int inc
)
499 masked_increment(reg_rmw(ctxt
, VCPU_REGS_RSP
), stack_mask(ctxt
), inc
);
502 static inline void jmp_rel(struct x86_emulate_ctxt
*ctxt
, int rel
)
504 register_address_increment(ctxt
, &ctxt
->_eip
, rel
);
507 static u32
desc_limit_scaled(struct desc_struct
*desc
)
509 u32 limit
= get_desc_limit(desc
);
511 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
514 static void set_seg_override(struct x86_emulate_ctxt
*ctxt
, int seg
)
516 ctxt
->has_seg_override
= true;
517 ctxt
->seg_override
= seg
;
520 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
522 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
525 return ctxt
->ops
->get_cached_segment_base(ctxt
, seg
);
528 static unsigned seg_override(struct x86_emulate_ctxt
*ctxt
)
530 if (!ctxt
->has_seg_override
)
533 return ctxt
->seg_override
;
536 static int emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
537 u32 error
, bool valid
)
539 ctxt
->exception
.vector
= vec
;
540 ctxt
->exception
.error_code
= error
;
541 ctxt
->exception
.error_code_valid
= valid
;
542 return X86EMUL_PROPAGATE_FAULT
;
545 static int emulate_db(struct x86_emulate_ctxt
*ctxt
)
547 return emulate_exception(ctxt
, DB_VECTOR
, 0, false);
550 static int emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
552 return emulate_exception(ctxt
, GP_VECTOR
, err
, true);
555 static int emulate_ss(struct x86_emulate_ctxt
*ctxt
, int err
)
557 return emulate_exception(ctxt
, SS_VECTOR
, err
, true);
560 static int emulate_ud(struct x86_emulate_ctxt
*ctxt
)
562 return emulate_exception(ctxt
, UD_VECTOR
, 0, false);
565 static int emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
567 return emulate_exception(ctxt
, TS_VECTOR
, err
, true);
570 static int emulate_de(struct x86_emulate_ctxt
*ctxt
)
572 return emulate_exception(ctxt
, DE_VECTOR
, 0, false);
575 static int emulate_nm(struct x86_emulate_ctxt
*ctxt
)
577 return emulate_exception(ctxt
, NM_VECTOR
, 0, false);
580 static u16
get_segment_selector(struct x86_emulate_ctxt
*ctxt
, unsigned seg
)
583 struct desc_struct desc
;
585 ctxt
->ops
->get_segment(ctxt
, &selector
, &desc
, NULL
, seg
);
589 static void set_segment_selector(struct x86_emulate_ctxt
*ctxt
, u16 selector
,
594 struct desc_struct desc
;
596 ctxt
->ops
->get_segment(ctxt
, &dummy
, &desc
, &base3
, seg
);
597 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, seg
);
601 * x86 defines three classes of vector instructions: explicitly
602 * aligned, explicitly unaligned, and the rest, which change behaviour
603 * depending on whether they're AVX encoded or not.
605 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
606 * subject to the same check.
608 static bool insn_aligned(struct x86_emulate_ctxt
*ctxt
, unsigned size
)
610 if (likely(size
< 16))
613 if (ctxt
->d
& Aligned
)
615 else if (ctxt
->d
& Unaligned
)
617 else if (ctxt
->d
& Avx
)
623 static int __linearize(struct x86_emulate_ctxt
*ctxt
,
624 struct segmented_address addr
,
625 unsigned size
, bool write
, bool fetch
,
628 struct desc_struct desc
;
635 la
= seg_base(ctxt
, addr
.seg
) + addr
.ea
;
636 switch (ctxt
->mode
) {
637 case X86EMUL_MODE_PROT64
:
638 if (((signed long)la
<< 16) >> 16 != la
)
639 return emulate_gp(ctxt
, 0);
642 usable
= ctxt
->ops
->get_segment(ctxt
, &sel
, &desc
, NULL
,
646 /* code segment in protected mode or read-only data segment */
647 if ((((ctxt
->mode
!= X86EMUL_MODE_REAL
) && (desc
.type
& 8))
648 || !(desc
.type
& 2)) && write
)
650 /* unreadable code segment */
651 if (!fetch
&& (desc
.type
& 8) && !(desc
.type
& 2))
653 lim
= desc_limit_scaled(&desc
);
654 if ((desc
.type
& 8) || !(desc
.type
& 4)) {
655 /* expand-up segment */
656 if (addr
.ea
> lim
|| (u32
)(addr
.ea
+ size
- 1) > lim
)
659 /* expand-down segment */
660 if (addr
.ea
<= lim
|| (u32
)(addr
.ea
+ size
- 1) <= lim
)
662 lim
= desc
.d
? 0xffffffff : 0xffff;
663 if (addr
.ea
> lim
|| (u32
)(addr
.ea
+ size
- 1) > lim
)
666 cpl
= ctxt
->ops
->cpl(ctxt
);
667 if (!(desc
.type
& 8)) {
671 } else if ((desc
.type
& 8) && !(desc
.type
& 4)) {
672 /* nonconforming code segment */
675 } else if ((desc
.type
& 8) && (desc
.type
& 4)) {
676 /* conforming code segment */
682 if (fetch
? ctxt
->mode
!= X86EMUL_MODE_PROT64
: ctxt
->ad_bytes
!= 8)
684 if (insn_aligned(ctxt
, size
) && ((la
& (size
- 1)) != 0))
685 return emulate_gp(ctxt
, 0);
687 return X86EMUL_CONTINUE
;
689 if (addr
.seg
== VCPU_SREG_SS
)
690 return emulate_ss(ctxt
, sel
);
692 return emulate_gp(ctxt
, sel
);
695 static int linearize(struct x86_emulate_ctxt
*ctxt
,
696 struct segmented_address addr
,
697 unsigned size
, bool write
,
700 return __linearize(ctxt
, addr
, size
, write
, false, linear
);
704 static int segmented_read_std(struct x86_emulate_ctxt
*ctxt
,
705 struct segmented_address addr
,
712 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
713 if (rc
!= X86EMUL_CONTINUE
)
715 return ctxt
->ops
->read_std(ctxt
, linear
, data
, size
, &ctxt
->exception
);
719 * Fetch the next byte of the instruction being emulated which is pointed to
720 * by ctxt->_eip, then increment ctxt->_eip.
722 * Also prefetch the remaining bytes of the instruction without crossing page
723 * boundary if they are not in fetch_cache yet.
725 static int do_insn_fetch_byte(struct x86_emulate_ctxt
*ctxt
, u8
*dest
)
727 struct fetch_cache
*fc
= &ctxt
->fetch
;
731 if (ctxt
->_eip
== fc
->end
) {
732 unsigned long linear
;
733 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
735 cur_size
= fc
->end
- fc
->start
;
736 size
= min(15UL - cur_size
,
737 PAGE_SIZE
- offset_in_page(ctxt
->_eip
));
738 rc
= __linearize(ctxt
, addr
, size
, false, true, &linear
);
739 if (unlikely(rc
!= X86EMUL_CONTINUE
))
741 rc
= ctxt
->ops
->fetch(ctxt
, linear
, fc
->data
+ cur_size
,
742 size
, &ctxt
->exception
);
743 if (unlikely(rc
!= X86EMUL_CONTINUE
))
747 *dest
= fc
->data
[ctxt
->_eip
- fc
->start
];
749 return X86EMUL_CONTINUE
;
752 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
753 void *dest
, unsigned size
)
757 /* x86 instructions are limited to 15 bytes. */
758 if (unlikely(ctxt
->_eip
+ size
- ctxt
->eip
> 15))
759 return X86EMUL_UNHANDLEABLE
;
761 rc
= do_insn_fetch_byte(ctxt
, dest
++);
762 if (rc
!= X86EMUL_CONTINUE
)
765 return X86EMUL_CONTINUE
;
768 /* Fetch next part of the instruction being emulated. */
769 #define insn_fetch(_type, _ctxt) \
770 ({ unsigned long _x; \
771 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
772 if (rc != X86EMUL_CONTINUE) \
777 #define insn_fetch_arr(_arr, _size, _ctxt) \
778 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
779 if (rc != X86EMUL_CONTINUE) \
784 * Given the 'reg' portion of a ModRM byte, and a register block, return a
785 * pointer into the block that addresses the relevant register.
786 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
788 static void *decode_register(struct x86_emulate_ctxt
*ctxt
, u8 modrm_reg
,
792 int highbyte_regs
= (ctxt
->rex_prefix
== 0) && byteop
;
794 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
795 p
= (unsigned char *)reg_rmw(ctxt
, modrm_reg
& 3) + 1;
797 p
= reg_rmw(ctxt
, modrm_reg
);
801 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
802 struct segmented_address addr
,
803 u16
*size
, unsigned long *address
, int op_bytes
)
810 rc
= segmented_read_std(ctxt
, addr
, size
, 2);
811 if (rc
!= X86EMUL_CONTINUE
)
814 rc
= segmented_read_std(ctxt
, addr
, address
, op_bytes
);
828 FASTOP1SRC2(mul
, mul_ex
);
829 FASTOP1SRC2(imul
, imul_ex
);
830 FASTOP1SRC2EX(div
, div_ex
);
831 FASTOP1SRC2EX(idiv
, idiv_ex
);
860 static u8
test_cc(unsigned int condition
, unsigned long flags
)
863 void (*fop
)(void) = (void *)em_setcc
+ 4 * (condition
& 0xf);
865 flags
= (flags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
866 asm("push %[flags]; popf; call *%[fastop]"
867 : "=a"(rc
) : [fastop
]"r"(fop
), [flags
]"r"(flags
));
871 static void fetch_register_operand(struct operand
*op
)
875 op
->val
= *(u8
*)op
->addr
.reg
;
878 op
->val
= *(u16
*)op
->addr
.reg
;
881 op
->val
= *(u32
*)op
->addr
.reg
;
884 op
->val
= *(u64
*)op
->addr
.reg
;
889 static void read_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
, int reg
)
891 ctxt
->ops
->get_fpu(ctxt
);
893 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data
)); break;
894 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data
)); break;
895 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data
)); break;
896 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data
)); break;
897 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data
)); break;
898 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data
)); break;
899 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data
)); break;
900 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data
)); break;
902 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data
)); break;
903 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data
)); break;
904 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data
)); break;
905 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data
)); break;
906 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data
)); break;
907 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data
)); break;
908 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data
)); break;
909 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data
)); break;
913 ctxt
->ops
->put_fpu(ctxt
);
916 static void write_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
,
919 ctxt
->ops
->get_fpu(ctxt
);
921 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data
)); break;
922 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data
)); break;
923 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data
)); break;
924 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data
)); break;
925 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data
)); break;
926 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data
)); break;
927 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data
)); break;
928 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data
)); break;
930 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data
)); break;
931 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data
)); break;
932 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data
)); break;
933 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data
)); break;
934 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data
)); break;
935 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data
)); break;
936 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data
)); break;
937 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data
)); break;
941 ctxt
->ops
->put_fpu(ctxt
);
944 static void read_mmx_reg(struct x86_emulate_ctxt
*ctxt
, u64
*data
, int reg
)
946 ctxt
->ops
->get_fpu(ctxt
);
948 case 0: asm("movq %%mm0, %0" : "=m"(*data
)); break;
949 case 1: asm("movq %%mm1, %0" : "=m"(*data
)); break;
950 case 2: asm("movq %%mm2, %0" : "=m"(*data
)); break;
951 case 3: asm("movq %%mm3, %0" : "=m"(*data
)); break;
952 case 4: asm("movq %%mm4, %0" : "=m"(*data
)); break;
953 case 5: asm("movq %%mm5, %0" : "=m"(*data
)); break;
954 case 6: asm("movq %%mm6, %0" : "=m"(*data
)); break;
955 case 7: asm("movq %%mm7, %0" : "=m"(*data
)); break;
958 ctxt
->ops
->put_fpu(ctxt
);
961 static void write_mmx_reg(struct x86_emulate_ctxt
*ctxt
, u64
*data
, int reg
)
963 ctxt
->ops
->get_fpu(ctxt
);
965 case 0: asm("movq %0, %%mm0" : : "m"(*data
)); break;
966 case 1: asm("movq %0, %%mm1" : : "m"(*data
)); break;
967 case 2: asm("movq %0, %%mm2" : : "m"(*data
)); break;
968 case 3: asm("movq %0, %%mm3" : : "m"(*data
)); break;
969 case 4: asm("movq %0, %%mm4" : : "m"(*data
)); break;
970 case 5: asm("movq %0, %%mm5" : : "m"(*data
)); break;
971 case 6: asm("movq %0, %%mm6" : : "m"(*data
)); break;
972 case 7: asm("movq %0, %%mm7" : : "m"(*data
)); break;
975 ctxt
->ops
->put_fpu(ctxt
);
978 static int em_fninit(struct x86_emulate_ctxt
*ctxt
)
980 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
981 return emulate_nm(ctxt
);
983 ctxt
->ops
->get_fpu(ctxt
);
984 asm volatile("fninit");
985 ctxt
->ops
->put_fpu(ctxt
);
986 return X86EMUL_CONTINUE
;
989 static int em_fnstcw(struct x86_emulate_ctxt
*ctxt
)
993 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
994 return emulate_nm(ctxt
);
996 ctxt
->ops
->get_fpu(ctxt
);
997 asm volatile("fnstcw %0": "+m"(fcw
));
998 ctxt
->ops
->put_fpu(ctxt
);
1000 /* force 2 byte destination */
1001 ctxt
->dst
.bytes
= 2;
1002 ctxt
->dst
.val
= fcw
;
1004 return X86EMUL_CONTINUE
;
1007 static int em_fnstsw(struct x86_emulate_ctxt
*ctxt
)
1011 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1012 return emulate_nm(ctxt
);
1014 ctxt
->ops
->get_fpu(ctxt
);
1015 asm volatile("fnstsw %0": "+m"(fsw
));
1016 ctxt
->ops
->put_fpu(ctxt
);
1018 /* force 2 byte destination */
1019 ctxt
->dst
.bytes
= 2;
1020 ctxt
->dst
.val
= fsw
;
1022 return X86EMUL_CONTINUE
;
1025 static void decode_register_operand(struct x86_emulate_ctxt
*ctxt
,
1028 unsigned reg
= ctxt
->modrm_reg
;
1030 if (!(ctxt
->d
& ModRM
))
1031 reg
= (ctxt
->b
& 7) | ((ctxt
->rex_prefix
& 1) << 3);
1033 if (ctxt
->d
& Sse
) {
1037 read_sse_reg(ctxt
, &op
->vec_val
, reg
);
1040 if (ctxt
->d
& Mmx
) {
1049 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1050 op
->addr
.reg
= decode_register(ctxt
, reg
, ctxt
->d
& ByteOp
);
1052 fetch_register_operand(op
);
1053 op
->orig_val
= op
->val
;
1056 static void adjust_modrm_seg(struct x86_emulate_ctxt
*ctxt
, int base_reg
)
1058 if (base_reg
== VCPU_REGS_RSP
|| base_reg
== VCPU_REGS_RBP
)
1059 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1062 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
1066 int index_reg
= 0, base_reg
= 0, scale
;
1067 int rc
= X86EMUL_CONTINUE
;
1070 if (ctxt
->rex_prefix
) {
1071 ctxt
->modrm_reg
= (ctxt
->rex_prefix
& 4) << 1; /* REX.R */
1072 index_reg
= (ctxt
->rex_prefix
& 2) << 2; /* REX.X */
1073 ctxt
->modrm_rm
= base_reg
= (ctxt
->rex_prefix
& 1) << 3; /* REG.B */
1076 ctxt
->modrm_mod
|= (ctxt
->modrm
& 0xc0) >> 6;
1077 ctxt
->modrm_reg
|= (ctxt
->modrm
& 0x38) >> 3;
1078 ctxt
->modrm_rm
|= (ctxt
->modrm
& 0x07);
1079 ctxt
->modrm_seg
= VCPU_SREG_DS
;
1081 if (ctxt
->modrm_mod
== 3 || (ctxt
->d
& NoMod
)) {
1083 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1084 op
->addr
.reg
= decode_register(ctxt
, ctxt
->modrm_rm
,
1086 if (ctxt
->d
& Sse
) {
1089 op
->addr
.xmm
= ctxt
->modrm_rm
;
1090 read_sse_reg(ctxt
, &op
->vec_val
, ctxt
->modrm_rm
);
1093 if (ctxt
->d
& Mmx
) {
1096 op
->addr
.xmm
= ctxt
->modrm_rm
& 7;
1099 fetch_register_operand(op
);
1105 if (ctxt
->ad_bytes
== 2) {
1106 unsigned bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
1107 unsigned bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1108 unsigned si
= reg_read(ctxt
, VCPU_REGS_RSI
);
1109 unsigned di
= reg_read(ctxt
, VCPU_REGS_RDI
);
1111 /* 16-bit ModR/M decode. */
1112 switch (ctxt
->modrm_mod
) {
1114 if (ctxt
->modrm_rm
== 6)
1115 modrm_ea
+= insn_fetch(u16
, ctxt
);
1118 modrm_ea
+= insn_fetch(s8
, ctxt
);
1121 modrm_ea
+= insn_fetch(u16
, ctxt
);
1124 switch (ctxt
->modrm_rm
) {
1126 modrm_ea
+= bx
+ si
;
1129 modrm_ea
+= bx
+ di
;
1132 modrm_ea
+= bp
+ si
;
1135 modrm_ea
+= bp
+ di
;
1144 if (ctxt
->modrm_mod
!= 0)
1151 if (ctxt
->modrm_rm
== 2 || ctxt
->modrm_rm
== 3 ||
1152 (ctxt
->modrm_rm
== 6 && ctxt
->modrm_mod
!= 0))
1153 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1154 modrm_ea
= (u16
)modrm_ea
;
1156 /* 32/64-bit ModR/M decode. */
1157 if ((ctxt
->modrm_rm
& 7) == 4) {
1158 sib
= insn_fetch(u8
, ctxt
);
1159 index_reg
|= (sib
>> 3) & 7;
1160 base_reg
|= sib
& 7;
1163 if ((base_reg
& 7) == 5 && ctxt
->modrm_mod
== 0)
1164 modrm_ea
+= insn_fetch(s32
, ctxt
);
1166 modrm_ea
+= reg_read(ctxt
, base_reg
);
1167 adjust_modrm_seg(ctxt
, base_reg
);
1170 modrm_ea
+= reg_read(ctxt
, index_reg
) << scale
;
1171 } else if ((ctxt
->modrm_rm
& 7) == 5 && ctxt
->modrm_mod
== 0) {
1172 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1173 ctxt
->rip_relative
= 1;
1175 base_reg
= ctxt
->modrm_rm
;
1176 modrm_ea
+= reg_read(ctxt
, base_reg
);
1177 adjust_modrm_seg(ctxt
, base_reg
);
1179 switch (ctxt
->modrm_mod
) {
1181 if (ctxt
->modrm_rm
== 5)
1182 modrm_ea
+= insn_fetch(s32
, ctxt
);
1185 modrm_ea
+= insn_fetch(s8
, ctxt
);
1188 modrm_ea
+= insn_fetch(s32
, ctxt
);
1192 op
->addr
.mem
.ea
= modrm_ea
;
1197 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
1200 int rc
= X86EMUL_CONTINUE
;
1203 switch (ctxt
->ad_bytes
) {
1205 op
->addr
.mem
.ea
= insn_fetch(u16
, ctxt
);
1208 op
->addr
.mem
.ea
= insn_fetch(u32
, ctxt
);
1211 op
->addr
.mem
.ea
= insn_fetch(u64
, ctxt
);
1218 static void fetch_bit_operand(struct x86_emulate_ctxt
*ctxt
)
1222 if (ctxt
->dst
.type
== OP_MEM
&& ctxt
->src
.type
== OP_REG
) {
1223 mask
= ~(ctxt
->dst
.bytes
* 8 - 1);
1225 if (ctxt
->src
.bytes
== 2)
1226 sv
= (s16
)ctxt
->src
.val
& (s16
)mask
;
1227 else if (ctxt
->src
.bytes
== 4)
1228 sv
= (s32
)ctxt
->src
.val
& (s32
)mask
;
1230 ctxt
->dst
.addr
.mem
.ea
+= (sv
>> 3);
1233 /* only subword offset */
1234 ctxt
->src
.val
&= (ctxt
->dst
.bytes
<< 3) - 1;
1237 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1238 unsigned long addr
, void *dest
, unsigned size
)
1241 struct read_cache
*mc
= &ctxt
->mem_read
;
1243 if (mc
->pos
< mc
->end
)
1246 WARN_ON((mc
->end
+ size
) >= sizeof(mc
->data
));
1248 rc
= ctxt
->ops
->read_emulated(ctxt
, addr
, mc
->data
+ mc
->end
, size
,
1250 if (rc
!= X86EMUL_CONTINUE
)
1256 memcpy(dest
, mc
->data
+ mc
->pos
, size
);
1258 return X86EMUL_CONTINUE
;
1261 static int segmented_read(struct x86_emulate_ctxt
*ctxt
,
1262 struct segmented_address addr
,
1269 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
1270 if (rc
!= X86EMUL_CONTINUE
)
1272 return read_emulated(ctxt
, linear
, data
, size
);
1275 static int segmented_write(struct x86_emulate_ctxt
*ctxt
,
1276 struct segmented_address addr
,
1283 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1284 if (rc
!= X86EMUL_CONTINUE
)
1286 return ctxt
->ops
->write_emulated(ctxt
, linear
, data
, size
,
1290 static int segmented_cmpxchg(struct x86_emulate_ctxt
*ctxt
,
1291 struct segmented_address addr
,
1292 const void *orig_data
, const void *data
,
1298 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1299 if (rc
!= X86EMUL_CONTINUE
)
1301 return ctxt
->ops
->cmpxchg_emulated(ctxt
, linear
, orig_data
, data
,
1302 size
, &ctxt
->exception
);
1305 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1306 unsigned int size
, unsigned short port
,
1309 struct read_cache
*rc
= &ctxt
->io_read
;
1311 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1312 unsigned int in_page
, n
;
1313 unsigned int count
= ctxt
->rep_prefix
?
1314 address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) : 1;
1315 in_page
= (ctxt
->eflags
& EFLG_DF
) ?
1316 offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
)) :
1317 PAGE_SIZE
- offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
));
1318 n
= min(min(in_page
, (unsigned int)sizeof(rc
->data
)) / size
,
1322 rc
->pos
= rc
->end
= 0;
1323 if (!ctxt
->ops
->pio_in_emulated(ctxt
, size
, port
, rc
->data
, n
))
1328 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
) &&
1329 !(ctxt
->eflags
& EFLG_DF
)) {
1330 ctxt
->dst
.data
= rc
->data
+ rc
->pos
;
1331 ctxt
->dst
.type
= OP_MEM_STR
;
1332 ctxt
->dst
.count
= (rc
->end
- rc
->pos
) / size
;
1335 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1341 static int read_interrupt_descriptor(struct x86_emulate_ctxt
*ctxt
,
1342 u16 index
, struct desc_struct
*desc
)
1347 ctxt
->ops
->get_idt(ctxt
, &dt
);
1349 if (dt
.size
< index
* 8 + 7)
1350 return emulate_gp(ctxt
, index
<< 3 | 0x2);
1352 addr
= dt
.address
+ index
* 8;
1353 return ctxt
->ops
->read_std(ctxt
, addr
, desc
, sizeof *desc
,
1357 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1358 u16 selector
, struct desc_ptr
*dt
)
1360 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
1363 if (selector
& 1 << 2) {
1364 struct desc_struct desc
;
1367 memset (dt
, 0, sizeof *dt
);
1368 if (!ops
->get_segment(ctxt
, &sel
, &desc
, &base3
,
1372 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1373 dt
->address
= get_desc_base(&desc
) | ((u64
)base3
<< 32);
1375 ops
->get_gdt(ctxt
, dt
);
1378 /* allowed just for 8 bytes segments */
1379 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1380 u16 selector
, struct desc_struct
*desc
,
1384 u16 index
= selector
>> 3;
1387 get_descriptor_table_ptr(ctxt
, selector
, &dt
);
1389 if (dt
.size
< index
* 8 + 7)
1390 return emulate_gp(ctxt
, selector
& 0xfffc);
1392 *desc_addr_p
= addr
= dt
.address
+ index
* 8;
1393 return ctxt
->ops
->read_std(ctxt
, addr
, desc
, sizeof *desc
,
1397 /* allowed just for 8 bytes segments */
1398 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1399 u16 selector
, struct desc_struct
*desc
)
1402 u16 index
= selector
>> 3;
1405 get_descriptor_table_ptr(ctxt
, selector
, &dt
);
1407 if (dt
.size
< index
* 8 + 7)
1408 return emulate_gp(ctxt
, selector
& 0xfffc);
1410 addr
= dt
.address
+ index
* 8;
1411 return ctxt
->ops
->write_std(ctxt
, addr
, desc
, sizeof *desc
,
1415 /* Does not support long mode */
1416 static int __load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1417 u16 selector
, int seg
, u8 cpl
, bool in_task_switch
)
1419 struct desc_struct seg_desc
, old_desc
;
1421 unsigned err_vec
= GP_VECTOR
;
1423 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1429 memset(&seg_desc
, 0, sizeof seg_desc
);
1431 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1432 /* set real mode segment descriptor (keep limit etc. for
1434 ctxt
->ops
->get_segment(ctxt
, &dummy
, &seg_desc
, NULL
, seg
);
1435 set_desc_base(&seg_desc
, selector
<< 4);
1437 } else if (seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
) {
1438 /* VM86 needs a clean new segment descriptor */
1439 set_desc_base(&seg_desc
, selector
<< 4);
1440 set_desc_limit(&seg_desc
, 0xffff);
1450 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1451 if ((seg
== VCPU_SREG_CS
1452 || (seg
== VCPU_SREG_SS
1453 && (ctxt
->mode
!= X86EMUL_MODE_PROT64
|| rpl
!= cpl
))
1454 || seg
== VCPU_SREG_TR
)
1458 /* TR should be in GDT only */
1459 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1462 if (null_selector
) /* for NULL selector skip all following checks */
1465 ret
= read_segment_descriptor(ctxt
, selector
, &seg_desc
, &desc_addr
);
1466 if (ret
!= X86EMUL_CONTINUE
)
1469 err_code
= selector
& 0xfffc;
1470 err_vec
= GP_VECTOR
;
1472 /* can't load system descriptor into segment selector */
1473 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
)
1477 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1486 * segment is not a writable data segment or segment
1487 * selector's RPL != CPL or segment selector's RPL != CPL
1489 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1493 if (in_task_switch
&& rpl
!= dpl
)
1496 if (!(seg_desc
.type
& 8))
1499 if (seg_desc
.type
& 4) {
1505 if (rpl
> cpl
|| dpl
!= cpl
)
1508 /* CS(RPL) <- CPL */
1509 selector
= (selector
& 0xfffc) | cpl
;
1512 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1514 old_desc
= seg_desc
;
1515 seg_desc
.type
|= 2; /* busy */
1516 ret
= ctxt
->ops
->cmpxchg_emulated(ctxt
, desc_addr
, &old_desc
, &seg_desc
,
1517 sizeof(seg_desc
), &ctxt
->exception
);
1518 if (ret
!= X86EMUL_CONTINUE
)
1521 case VCPU_SREG_LDTR
:
1522 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1525 default: /* DS, ES, FS, or GS */
1527 * segment is not a data or readable code segment or
1528 * ((segment is a data or nonconforming code segment)
1529 * and (both RPL and CPL > DPL))
1531 if ((seg_desc
.type
& 0xa) == 0x8 ||
1532 (((seg_desc
.type
& 0xc) != 0xc) &&
1533 (rpl
> dpl
&& cpl
> dpl
)))
1539 /* mark segment as accessed */
1541 ret
= write_segment_descriptor(ctxt
, selector
, &seg_desc
);
1542 if (ret
!= X86EMUL_CONTINUE
)
1544 } else if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1545 ret
= ctxt
->ops
->read_std(ctxt
, desc_addr
+8, &base3
,
1546 sizeof(base3
), &ctxt
->exception
);
1547 if (ret
!= X86EMUL_CONTINUE
)
1551 ctxt
->ops
->set_segment(ctxt
, selector
, &seg_desc
, base3
, seg
);
1552 return X86EMUL_CONTINUE
;
1554 emulate_exception(ctxt
, err_vec
, err_code
, true);
1555 return X86EMUL_PROPAGATE_FAULT
;
1558 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1559 u16 selector
, int seg
)
1561 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
1562 return __load_segment_descriptor(ctxt
, selector
, seg
, cpl
, false);
1565 static void write_register_operand(struct operand
*op
)
1567 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1568 switch (op
->bytes
) {
1570 *(u8
*)op
->addr
.reg
= (u8
)op
->val
;
1573 *(u16
*)op
->addr
.reg
= (u16
)op
->val
;
1576 *op
->addr
.reg
= (u32
)op
->val
;
1577 break; /* 64b: zero-extend */
1579 *op
->addr
.reg
= op
->val
;
1584 static int writeback(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
)
1590 write_register_operand(op
);
1593 if (ctxt
->lock_prefix
)
1594 rc
= segmented_cmpxchg(ctxt
,
1600 rc
= segmented_write(ctxt
,
1604 if (rc
!= X86EMUL_CONTINUE
)
1608 rc
= segmented_write(ctxt
,
1611 op
->bytes
* op
->count
);
1612 if (rc
!= X86EMUL_CONTINUE
)
1616 write_sse_reg(ctxt
, &op
->vec_val
, op
->addr
.xmm
);
1619 write_mmx_reg(ctxt
, &op
->mm_val
, op
->addr
.mm
);
1627 return X86EMUL_CONTINUE
;
1630 static int push(struct x86_emulate_ctxt
*ctxt
, void *data
, int bytes
)
1632 struct segmented_address addr
;
1634 rsp_increment(ctxt
, -bytes
);
1635 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1636 addr
.seg
= VCPU_SREG_SS
;
1638 return segmented_write(ctxt
, addr
, data
, bytes
);
1641 static int em_push(struct x86_emulate_ctxt
*ctxt
)
1643 /* Disable writeback. */
1644 ctxt
->dst
.type
= OP_NONE
;
1645 return push(ctxt
, &ctxt
->src
.val
, ctxt
->op_bytes
);
1648 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1649 void *dest
, int len
)
1652 struct segmented_address addr
;
1654 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1655 addr
.seg
= VCPU_SREG_SS
;
1656 rc
= segmented_read(ctxt
, addr
, dest
, len
);
1657 if (rc
!= X86EMUL_CONTINUE
)
1660 rsp_increment(ctxt
, len
);
1664 static int em_pop(struct x86_emulate_ctxt
*ctxt
)
1666 return emulate_pop(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1669 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1670 void *dest
, int len
)
1673 unsigned long val
, change_mask
;
1674 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1675 int cpl
= ctxt
->ops
->cpl(ctxt
);
1677 rc
= emulate_pop(ctxt
, &val
, len
);
1678 if (rc
!= X86EMUL_CONTINUE
)
1681 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1682 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1684 switch(ctxt
->mode
) {
1685 case X86EMUL_MODE_PROT64
:
1686 case X86EMUL_MODE_PROT32
:
1687 case X86EMUL_MODE_PROT16
:
1689 change_mask
|= EFLG_IOPL
;
1691 change_mask
|= EFLG_IF
;
1693 case X86EMUL_MODE_VM86
:
1695 return emulate_gp(ctxt
, 0);
1696 change_mask
|= EFLG_IF
;
1698 default: /* real mode */
1699 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1703 *(unsigned long *)dest
=
1704 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1709 static int em_popf(struct x86_emulate_ctxt
*ctxt
)
1711 ctxt
->dst
.type
= OP_REG
;
1712 ctxt
->dst
.addr
.reg
= &ctxt
->eflags
;
1713 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
1714 return emulate_popf(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1717 static int em_enter(struct x86_emulate_ctxt
*ctxt
)
1720 unsigned frame_size
= ctxt
->src
.val
;
1721 unsigned nesting_level
= ctxt
->src2
.val
& 31;
1725 return X86EMUL_UNHANDLEABLE
;
1727 rbp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1728 rc
= push(ctxt
, &rbp
, stack_size(ctxt
));
1729 if (rc
!= X86EMUL_CONTINUE
)
1731 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RBP
), reg_read(ctxt
, VCPU_REGS_RSP
),
1733 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
),
1734 reg_read(ctxt
, VCPU_REGS_RSP
) - frame_size
,
1736 return X86EMUL_CONTINUE
;
1739 static int em_leave(struct x86_emulate_ctxt
*ctxt
)
1741 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
), reg_read(ctxt
, VCPU_REGS_RBP
),
1743 return emulate_pop(ctxt
, reg_rmw(ctxt
, VCPU_REGS_RBP
), ctxt
->op_bytes
);
1746 static int em_push_sreg(struct x86_emulate_ctxt
*ctxt
)
1748 int seg
= ctxt
->src2
.val
;
1750 ctxt
->src
.val
= get_segment_selector(ctxt
, seg
);
1752 return em_push(ctxt
);
1755 static int em_pop_sreg(struct x86_emulate_ctxt
*ctxt
)
1757 int seg
= ctxt
->src2
.val
;
1758 unsigned long selector
;
1761 rc
= emulate_pop(ctxt
, &selector
, ctxt
->op_bytes
);
1762 if (rc
!= X86EMUL_CONTINUE
)
1765 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
1766 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
1768 rc
= load_segment_descriptor(ctxt
, (u16
)selector
, seg
);
1772 static int em_pusha(struct x86_emulate_ctxt
*ctxt
)
1774 unsigned long old_esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
1775 int rc
= X86EMUL_CONTINUE
;
1776 int reg
= VCPU_REGS_RAX
;
1778 while (reg
<= VCPU_REGS_RDI
) {
1779 (reg
== VCPU_REGS_RSP
) ?
1780 (ctxt
->src
.val
= old_esp
) : (ctxt
->src
.val
= reg_read(ctxt
, reg
));
1783 if (rc
!= X86EMUL_CONTINUE
)
1792 static int em_pushf(struct x86_emulate_ctxt
*ctxt
)
1794 ctxt
->src
.val
= (unsigned long)ctxt
->eflags
;
1795 return em_push(ctxt
);
1798 static int em_popa(struct x86_emulate_ctxt
*ctxt
)
1800 int rc
= X86EMUL_CONTINUE
;
1801 int reg
= VCPU_REGS_RDI
;
1803 while (reg
>= VCPU_REGS_RAX
) {
1804 if (reg
== VCPU_REGS_RSP
) {
1805 rsp_increment(ctxt
, ctxt
->op_bytes
);
1809 rc
= emulate_pop(ctxt
, reg_rmw(ctxt
, reg
), ctxt
->op_bytes
);
1810 if (rc
!= X86EMUL_CONTINUE
)
1817 static int __emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
1819 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
1826 /* TODO: Add limit checks */
1827 ctxt
->src
.val
= ctxt
->eflags
;
1829 if (rc
!= X86EMUL_CONTINUE
)
1832 ctxt
->eflags
&= ~(EFLG_IF
| EFLG_TF
| EFLG_AC
);
1834 ctxt
->src
.val
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
1836 if (rc
!= X86EMUL_CONTINUE
)
1839 ctxt
->src
.val
= ctxt
->_eip
;
1841 if (rc
!= X86EMUL_CONTINUE
)
1844 ops
->get_idt(ctxt
, &dt
);
1846 eip_addr
= dt
.address
+ (irq
<< 2);
1847 cs_addr
= dt
.address
+ (irq
<< 2) + 2;
1849 rc
= ops
->read_std(ctxt
, cs_addr
, &cs
, 2, &ctxt
->exception
);
1850 if (rc
!= X86EMUL_CONTINUE
)
1853 rc
= ops
->read_std(ctxt
, eip_addr
, &eip
, 2, &ctxt
->exception
);
1854 if (rc
!= X86EMUL_CONTINUE
)
1857 rc
= load_segment_descriptor(ctxt
, cs
, VCPU_SREG_CS
);
1858 if (rc
!= X86EMUL_CONTINUE
)
1866 int emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
1870 invalidate_registers(ctxt
);
1871 rc
= __emulate_int_real(ctxt
, irq
);
1872 if (rc
== X86EMUL_CONTINUE
)
1873 writeback_registers(ctxt
);
1877 static int emulate_int(struct x86_emulate_ctxt
*ctxt
, int irq
)
1879 switch(ctxt
->mode
) {
1880 case X86EMUL_MODE_REAL
:
1881 return __emulate_int_real(ctxt
, irq
);
1882 case X86EMUL_MODE_VM86
:
1883 case X86EMUL_MODE_PROT16
:
1884 case X86EMUL_MODE_PROT32
:
1885 case X86EMUL_MODE_PROT64
:
1887 /* Protected mode interrupts unimplemented yet */
1888 return X86EMUL_UNHANDLEABLE
;
1892 static int emulate_iret_real(struct x86_emulate_ctxt
*ctxt
)
1894 int rc
= X86EMUL_CONTINUE
;
1895 unsigned long temp_eip
= 0;
1896 unsigned long temp_eflags
= 0;
1897 unsigned long cs
= 0;
1898 unsigned long mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_TF
|
1899 EFLG_IF
| EFLG_DF
| EFLG_OF
| EFLG_IOPL
| EFLG_NT
| EFLG_RF
|
1900 EFLG_AC
| EFLG_ID
| (1 << 1); /* Last one is the reserved bit */
1901 unsigned long vm86_mask
= EFLG_VM
| EFLG_VIF
| EFLG_VIP
;
1903 /* TODO: Add stack limit check */
1905 rc
= emulate_pop(ctxt
, &temp_eip
, ctxt
->op_bytes
);
1907 if (rc
!= X86EMUL_CONTINUE
)
1910 if (temp_eip
& ~0xffff)
1911 return emulate_gp(ctxt
, 0);
1913 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
1915 if (rc
!= X86EMUL_CONTINUE
)
1918 rc
= emulate_pop(ctxt
, &temp_eflags
, ctxt
->op_bytes
);
1920 if (rc
!= X86EMUL_CONTINUE
)
1923 rc
= load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
);
1925 if (rc
!= X86EMUL_CONTINUE
)
1928 ctxt
->_eip
= temp_eip
;
1931 if (ctxt
->op_bytes
== 4)
1932 ctxt
->eflags
= ((temp_eflags
& mask
) | (ctxt
->eflags
& vm86_mask
));
1933 else if (ctxt
->op_bytes
== 2) {
1934 ctxt
->eflags
&= ~0xffff;
1935 ctxt
->eflags
|= temp_eflags
;
1938 ctxt
->eflags
&= ~EFLG_RESERVED_ZEROS_MASK
; /* Clear reserved zeros */
1939 ctxt
->eflags
|= EFLG_RESERVED_ONE_MASK
;
1944 static int em_iret(struct x86_emulate_ctxt
*ctxt
)
1946 switch(ctxt
->mode
) {
1947 case X86EMUL_MODE_REAL
:
1948 return emulate_iret_real(ctxt
);
1949 case X86EMUL_MODE_VM86
:
1950 case X86EMUL_MODE_PROT16
:
1951 case X86EMUL_MODE_PROT32
:
1952 case X86EMUL_MODE_PROT64
:
1954 /* iret from protected mode unimplemented yet */
1955 return X86EMUL_UNHANDLEABLE
;
1959 static int em_jmp_far(struct x86_emulate_ctxt
*ctxt
)
1964 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
1966 rc
= load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
);
1967 if (rc
!= X86EMUL_CONTINUE
)
1971 memcpy(&ctxt
->_eip
, ctxt
->src
.valptr
, ctxt
->op_bytes
);
1972 return X86EMUL_CONTINUE
;
1975 static int em_grp45(struct x86_emulate_ctxt
*ctxt
)
1977 int rc
= X86EMUL_CONTINUE
;
1979 switch (ctxt
->modrm_reg
) {
1980 case 2: /* call near abs */ {
1982 old_eip
= ctxt
->_eip
;
1983 ctxt
->_eip
= ctxt
->src
.val
;
1984 ctxt
->src
.val
= old_eip
;
1988 case 4: /* jmp abs */
1989 ctxt
->_eip
= ctxt
->src
.val
;
1991 case 5: /* jmp far */
1992 rc
= em_jmp_far(ctxt
);
2001 static int em_cmpxchg8b(struct x86_emulate_ctxt
*ctxt
)
2003 u64 old
= ctxt
->dst
.orig_val64
;
2005 if (ctxt
->dst
.bytes
== 16)
2006 return X86EMUL_UNHANDLEABLE
;
2008 if (((u32
) (old
>> 0) != (u32
) reg_read(ctxt
, VCPU_REGS_RAX
)) ||
2009 ((u32
) (old
>> 32) != (u32
) reg_read(ctxt
, VCPU_REGS_RDX
))) {
2010 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
) (old
>> 0);
2011 *reg_write(ctxt
, VCPU_REGS_RDX
) = (u32
) (old
>> 32);
2012 ctxt
->eflags
&= ~EFLG_ZF
;
2014 ctxt
->dst
.val64
= ((u64
)reg_read(ctxt
, VCPU_REGS_RCX
) << 32) |
2015 (u32
) reg_read(ctxt
, VCPU_REGS_RBX
);
2017 ctxt
->eflags
|= EFLG_ZF
;
2019 return X86EMUL_CONTINUE
;
2022 static int em_ret(struct x86_emulate_ctxt
*ctxt
)
2024 ctxt
->dst
.type
= OP_REG
;
2025 ctxt
->dst
.addr
.reg
= &ctxt
->_eip
;
2026 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
2027 return em_pop(ctxt
);
2030 static int em_ret_far(struct x86_emulate_ctxt
*ctxt
)
2035 rc
= emulate_pop(ctxt
, &ctxt
->_eip
, ctxt
->op_bytes
);
2036 if (rc
!= X86EMUL_CONTINUE
)
2038 if (ctxt
->op_bytes
== 4)
2039 ctxt
->_eip
= (u32
)ctxt
->_eip
;
2040 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2041 if (rc
!= X86EMUL_CONTINUE
)
2043 rc
= load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
);
2047 static int em_ret_far_imm(struct x86_emulate_ctxt
*ctxt
)
2051 rc
= em_ret_far(ctxt
);
2052 if (rc
!= X86EMUL_CONTINUE
)
2054 rsp_increment(ctxt
, ctxt
->src
.val
);
2055 return X86EMUL_CONTINUE
;
2058 static int em_cmpxchg(struct x86_emulate_ctxt
*ctxt
)
2060 /* Save real source value, then compare EAX against destination. */
2061 ctxt
->dst
.orig_val
= ctxt
->dst
.val
;
2062 ctxt
->dst
.val
= reg_read(ctxt
, VCPU_REGS_RAX
);
2063 ctxt
->src
.orig_val
= ctxt
->src
.val
;
2064 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2065 fastop(ctxt
, em_cmp
);
2067 if (ctxt
->eflags
& EFLG_ZF
) {
2068 /* Success: write back to memory. */
2069 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
2071 /* Failure: write the value we saw to EAX. */
2072 ctxt
->dst
.type
= OP_REG
;
2073 ctxt
->dst
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
2074 ctxt
->dst
.val
= ctxt
->dst
.orig_val
;
2076 return X86EMUL_CONTINUE
;
2079 static int em_lseg(struct x86_emulate_ctxt
*ctxt
)
2081 int seg
= ctxt
->src2
.val
;
2085 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2087 rc
= load_segment_descriptor(ctxt
, sel
, seg
);
2088 if (rc
!= X86EMUL_CONTINUE
)
2091 ctxt
->dst
.val
= ctxt
->src
.val
;
2096 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
2097 struct desc_struct
*cs
, struct desc_struct
*ss
)
2099 cs
->l
= 0; /* will be adjusted later */
2100 set_desc_base(cs
, 0); /* flat segment */
2101 cs
->g
= 1; /* 4kb granularity */
2102 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
2103 cs
->type
= 0x0b; /* Read, Execute, Accessed */
2105 cs
->dpl
= 0; /* will be adjusted later */
2110 set_desc_base(ss
, 0); /* flat segment */
2111 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
2112 ss
->g
= 1; /* 4kb granularity */
2114 ss
->type
= 0x03; /* Read/Write, Accessed */
2115 ss
->d
= 1; /* 32bit stack segment */
2122 static bool vendor_intel(struct x86_emulate_ctxt
*ctxt
)
2124 u32 eax
, ebx
, ecx
, edx
;
2127 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
2128 return ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2129 && ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2130 && edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
;
2133 static bool em_syscall_is_enabled(struct x86_emulate_ctxt
*ctxt
)
2135 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2136 u32 eax
, ebx
, ecx
, edx
;
2139 * syscall should always be enabled in longmode - so only become
2140 * vendor specific (cpuid) if other modes are active...
2142 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2147 ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
2149 * Intel ("GenuineIntel")
2150 * remark: Intel CPUs only support "syscall" in 64bit
2151 * longmode. Also an 64bit guest with a
2152 * 32bit compat-app running will #UD !! While this
2153 * behaviour can be fixed (by emulating) into AMD
2154 * response - CPUs of AMD can't behave like Intel.
2156 if (ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&&
2157 ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&&
2158 edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
)
2161 /* AMD ("AuthenticAMD") */
2162 if (ebx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
&&
2163 ecx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx
&&
2164 edx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_edx
)
2167 /* AMD ("AMDisbetter!") */
2168 if (ebx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx
&&
2169 ecx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx
&&
2170 edx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_edx
)
2173 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2177 static int em_syscall(struct x86_emulate_ctxt
*ctxt
)
2179 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2180 struct desc_struct cs
, ss
;
2185 /* syscall is not available in real mode */
2186 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2187 ctxt
->mode
== X86EMUL_MODE_VM86
)
2188 return emulate_ud(ctxt
);
2190 if (!(em_syscall_is_enabled(ctxt
)))
2191 return emulate_ud(ctxt
);
2193 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2194 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2196 if (!(efer
& EFER_SCE
))
2197 return emulate_ud(ctxt
);
2199 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2201 cs_sel
= (u16
)(msr_data
& 0xfffc);
2202 ss_sel
= (u16
)(msr_data
+ 8);
2204 if (efer
& EFER_LMA
) {
2208 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2209 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2211 *reg_write(ctxt
, VCPU_REGS_RCX
) = ctxt
->_eip
;
2212 if (efer
& EFER_LMA
) {
2213 #ifdef CONFIG_X86_64
2214 *reg_write(ctxt
, VCPU_REGS_R11
) = ctxt
->eflags
& ~EFLG_RF
;
2217 ctxt
->mode
== X86EMUL_MODE_PROT64
?
2218 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
2219 ctxt
->_eip
= msr_data
;
2221 ops
->get_msr(ctxt
, MSR_SYSCALL_MASK
, &msr_data
);
2222 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
2226 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2227 ctxt
->_eip
= (u32
)msr_data
;
2229 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
2232 return X86EMUL_CONTINUE
;
2235 static int em_sysenter(struct x86_emulate_ctxt
*ctxt
)
2237 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2238 struct desc_struct cs
, ss
;
2243 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2244 /* inject #GP if in real mode */
2245 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2246 return emulate_gp(ctxt
, 0);
2249 * Not recognized on AMD in compat mode (but is recognized in legacy
2252 if ((ctxt
->mode
== X86EMUL_MODE_PROT32
) && (efer
& EFER_LMA
)
2253 && !vendor_intel(ctxt
))
2254 return emulate_ud(ctxt
);
2256 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2257 * Therefore, we inject an #UD.
2259 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2260 return emulate_ud(ctxt
);
2262 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2264 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2265 switch (ctxt
->mode
) {
2266 case X86EMUL_MODE_PROT32
:
2267 if ((msr_data
& 0xfffc) == 0x0)
2268 return emulate_gp(ctxt
, 0);
2270 case X86EMUL_MODE_PROT64
:
2271 if (msr_data
== 0x0)
2272 return emulate_gp(ctxt
, 0);
2278 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
2279 cs_sel
= (u16
)msr_data
;
2280 cs_sel
&= ~SELECTOR_RPL_MASK
;
2281 ss_sel
= cs_sel
+ 8;
2282 ss_sel
&= ~SELECTOR_RPL_MASK
;
2283 if (ctxt
->mode
== X86EMUL_MODE_PROT64
|| (efer
& EFER_LMA
)) {
2288 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2289 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2291 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2292 ctxt
->_eip
= msr_data
;
2294 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2295 *reg_write(ctxt
, VCPU_REGS_RSP
) = msr_data
;
2297 return X86EMUL_CONTINUE
;
2300 static int em_sysexit(struct x86_emulate_ctxt
*ctxt
)
2302 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2303 struct desc_struct cs
, ss
;
2306 u16 cs_sel
= 0, ss_sel
= 0;
2308 /* inject #GP if in real mode or Virtual 8086 mode */
2309 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2310 ctxt
->mode
== X86EMUL_MODE_VM86
)
2311 return emulate_gp(ctxt
, 0);
2313 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2315 if ((ctxt
->rex_prefix
& 0x8) != 0x0)
2316 usermode
= X86EMUL_MODE_PROT64
;
2318 usermode
= X86EMUL_MODE_PROT32
;
2322 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2324 case X86EMUL_MODE_PROT32
:
2325 cs_sel
= (u16
)(msr_data
+ 16);
2326 if ((msr_data
& 0xfffc) == 0x0)
2327 return emulate_gp(ctxt
, 0);
2328 ss_sel
= (u16
)(msr_data
+ 24);
2330 case X86EMUL_MODE_PROT64
:
2331 cs_sel
= (u16
)(msr_data
+ 32);
2332 if (msr_data
== 0x0)
2333 return emulate_gp(ctxt
, 0);
2334 ss_sel
= cs_sel
+ 8;
2339 cs_sel
|= SELECTOR_RPL_MASK
;
2340 ss_sel
|= SELECTOR_RPL_MASK
;
2342 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2343 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2345 ctxt
->_eip
= reg_read(ctxt
, VCPU_REGS_RDX
);
2346 *reg_write(ctxt
, VCPU_REGS_RSP
) = reg_read(ctxt
, VCPU_REGS_RCX
);
2348 return X86EMUL_CONTINUE
;
2351 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
)
2354 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2356 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2358 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
2359 return ctxt
->ops
->cpl(ctxt
) > iopl
;
2362 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2365 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2366 struct desc_struct tr_seg
;
2369 u16 tr
, io_bitmap_ptr
, perm
, bit_idx
= port
& 0x7;
2370 unsigned mask
= (1 << len
) - 1;
2373 ops
->get_segment(ctxt
, &tr
, &tr_seg
, &base3
, VCPU_SREG_TR
);
2376 if (desc_limit_scaled(&tr_seg
) < 103)
2378 base
= get_desc_base(&tr_seg
);
2379 #ifdef CONFIG_X86_64
2380 base
|= ((u64
)base3
) << 32;
2382 r
= ops
->read_std(ctxt
, base
+ 102, &io_bitmap_ptr
, 2, NULL
);
2383 if (r
!= X86EMUL_CONTINUE
)
2385 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2387 r
= ops
->read_std(ctxt
, base
+ io_bitmap_ptr
+ port
/8, &perm
, 2, NULL
);
2388 if (r
!= X86EMUL_CONTINUE
)
2390 if ((perm
>> bit_idx
) & mask
)
2395 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2401 if (emulator_bad_iopl(ctxt
))
2402 if (!emulator_io_port_access_allowed(ctxt
, port
, len
))
2405 ctxt
->perm_ok
= true;
2410 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2411 struct tss_segment_16
*tss
)
2413 tss
->ip
= ctxt
->_eip
;
2414 tss
->flag
= ctxt
->eflags
;
2415 tss
->ax
= reg_read(ctxt
, VCPU_REGS_RAX
);
2416 tss
->cx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2417 tss
->dx
= reg_read(ctxt
, VCPU_REGS_RDX
);
2418 tss
->bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
2419 tss
->sp
= reg_read(ctxt
, VCPU_REGS_RSP
);
2420 tss
->bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
2421 tss
->si
= reg_read(ctxt
, VCPU_REGS_RSI
);
2422 tss
->di
= reg_read(ctxt
, VCPU_REGS_RDI
);
2424 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
2425 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2426 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
2427 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
2428 tss
->ldt
= get_segment_selector(ctxt
, VCPU_SREG_LDTR
);
2431 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2432 struct tss_segment_16
*tss
)
2437 ctxt
->_eip
= tss
->ip
;
2438 ctxt
->eflags
= tss
->flag
| 2;
2439 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->ax
;
2440 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->cx
;
2441 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->dx
;
2442 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->bx
;
2443 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->sp
;
2444 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->bp
;
2445 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->si
;
2446 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->di
;
2449 * SDM says that segment selectors are loaded before segment
2452 set_segment_selector(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
);
2453 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
2454 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
2455 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
2456 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
2461 * Now load segment descriptors. If fault happens at this stage
2462 * it is handled in a context of new task
2464 ret
= __load_segment_descriptor(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
, cpl
, true);
2465 if (ret
!= X86EMUL_CONTINUE
)
2467 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
, true);
2468 if (ret
!= X86EMUL_CONTINUE
)
2470 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
, true);
2471 if (ret
!= X86EMUL_CONTINUE
)
2473 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
, true);
2474 if (ret
!= X86EMUL_CONTINUE
)
2476 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
, true);
2477 if (ret
!= X86EMUL_CONTINUE
)
2480 return X86EMUL_CONTINUE
;
2483 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2484 u16 tss_selector
, u16 old_tss_sel
,
2485 ulong old_tss_base
, struct desc_struct
*new_desc
)
2487 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2488 struct tss_segment_16 tss_seg
;
2490 u32 new_tss_base
= get_desc_base(new_desc
);
2492 ret
= ops
->read_std(ctxt
, old_tss_base
, &tss_seg
, sizeof tss_seg
,
2494 if (ret
!= X86EMUL_CONTINUE
)
2495 /* FIXME: need to provide precise fault address */
2498 save_state_to_tss16(ctxt
, &tss_seg
);
2500 ret
= ops
->write_std(ctxt
, old_tss_base
, &tss_seg
, sizeof tss_seg
,
2502 if (ret
!= X86EMUL_CONTINUE
)
2503 /* FIXME: need to provide precise fault address */
2506 ret
= ops
->read_std(ctxt
, new_tss_base
, &tss_seg
, sizeof tss_seg
,
2508 if (ret
!= X86EMUL_CONTINUE
)
2509 /* FIXME: need to provide precise fault address */
2512 if (old_tss_sel
!= 0xffff) {
2513 tss_seg
.prev_task_link
= old_tss_sel
;
2515 ret
= ops
->write_std(ctxt
, new_tss_base
,
2516 &tss_seg
.prev_task_link
,
2517 sizeof tss_seg
.prev_task_link
,
2519 if (ret
!= X86EMUL_CONTINUE
)
2520 /* FIXME: need to provide precise fault address */
2524 return load_state_from_tss16(ctxt
, &tss_seg
);
2527 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
2528 struct tss_segment_32
*tss
)
2530 /* CR3 and ldt selector are not saved intentionally */
2531 tss
->eip
= ctxt
->_eip
;
2532 tss
->eflags
= ctxt
->eflags
;
2533 tss
->eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
2534 tss
->ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2535 tss
->edx
= reg_read(ctxt
, VCPU_REGS_RDX
);
2536 tss
->ebx
= reg_read(ctxt
, VCPU_REGS_RBX
);
2537 tss
->esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
2538 tss
->ebp
= reg_read(ctxt
, VCPU_REGS_RBP
);
2539 tss
->esi
= reg_read(ctxt
, VCPU_REGS_RSI
);
2540 tss
->edi
= reg_read(ctxt
, VCPU_REGS_RDI
);
2542 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
2543 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2544 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
2545 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
2546 tss
->fs
= get_segment_selector(ctxt
, VCPU_SREG_FS
);
2547 tss
->gs
= get_segment_selector(ctxt
, VCPU_SREG_GS
);
2550 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
2551 struct tss_segment_32
*tss
)
2556 if (ctxt
->ops
->set_cr(ctxt
, 3, tss
->cr3
))
2557 return emulate_gp(ctxt
, 0);
2558 ctxt
->_eip
= tss
->eip
;
2559 ctxt
->eflags
= tss
->eflags
| 2;
2561 /* General purpose registers */
2562 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->eax
;
2563 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->ecx
;
2564 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->edx
;
2565 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->ebx
;
2566 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->esp
;
2567 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->ebp
;
2568 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->esi
;
2569 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->edi
;
2572 * SDM says that segment selectors are loaded before segment
2573 * descriptors. This is important because CPL checks will
2576 set_segment_selector(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
2577 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
2578 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
2579 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
2580 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
2581 set_segment_selector(ctxt
, tss
->fs
, VCPU_SREG_FS
);
2582 set_segment_selector(ctxt
, tss
->gs
, VCPU_SREG_GS
);
2585 * If we're switching between Protected Mode and VM86, we need to make
2586 * sure to update the mode before loading the segment descriptors so
2587 * that the selectors are interpreted correctly.
2589 if (ctxt
->eflags
& X86_EFLAGS_VM
) {
2590 ctxt
->mode
= X86EMUL_MODE_VM86
;
2593 ctxt
->mode
= X86EMUL_MODE_PROT32
;
2598 * Now load segment descriptors. If fault happenes at this stage
2599 * it is handled in a context of new task
2601 ret
= __load_segment_descriptor(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
, cpl
, true);
2602 if (ret
!= X86EMUL_CONTINUE
)
2604 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
, true);
2605 if (ret
!= X86EMUL_CONTINUE
)
2607 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
, true);
2608 if (ret
!= X86EMUL_CONTINUE
)
2610 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
, true);
2611 if (ret
!= X86EMUL_CONTINUE
)
2613 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
, true);
2614 if (ret
!= X86EMUL_CONTINUE
)
2616 ret
= __load_segment_descriptor(ctxt
, tss
->fs
, VCPU_SREG_FS
, cpl
, true);
2617 if (ret
!= X86EMUL_CONTINUE
)
2619 ret
= __load_segment_descriptor(ctxt
, tss
->gs
, VCPU_SREG_GS
, cpl
, true);
2620 if (ret
!= X86EMUL_CONTINUE
)
2623 return X86EMUL_CONTINUE
;
2626 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
2627 u16 tss_selector
, u16 old_tss_sel
,
2628 ulong old_tss_base
, struct desc_struct
*new_desc
)
2630 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2631 struct tss_segment_32 tss_seg
;
2633 u32 new_tss_base
= get_desc_base(new_desc
);
2634 u32 eip_offset
= offsetof(struct tss_segment_32
, eip
);
2635 u32 ldt_sel_offset
= offsetof(struct tss_segment_32
, ldt_selector
);
2637 ret
= ops
->read_std(ctxt
, old_tss_base
, &tss_seg
, sizeof tss_seg
,
2639 if (ret
!= X86EMUL_CONTINUE
)
2640 /* FIXME: need to provide precise fault address */
2643 save_state_to_tss32(ctxt
, &tss_seg
);
2645 /* Only GP registers and segment selectors are saved */
2646 ret
= ops
->write_std(ctxt
, old_tss_base
+ eip_offset
, &tss_seg
.eip
,
2647 ldt_sel_offset
- eip_offset
, &ctxt
->exception
);
2648 if (ret
!= X86EMUL_CONTINUE
)
2649 /* FIXME: need to provide precise fault address */
2652 ret
= ops
->read_std(ctxt
, new_tss_base
, &tss_seg
, sizeof tss_seg
,
2654 if (ret
!= X86EMUL_CONTINUE
)
2655 /* FIXME: need to provide precise fault address */
2658 if (old_tss_sel
!= 0xffff) {
2659 tss_seg
.prev_task_link
= old_tss_sel
;
2661 ret
= ops
->write_std(ctxt
, new_tss_base
,
2662 &tss_seg
.prev_task_link
,
2663 sizeof tss_seg
.prev_task_link
,
2665 if (ret
!= X86EMUL_CONTINUE
)
2666 /* FIXME: need to provide precise fault address */
2670 return load_state_from_tss32(ctxt
, &tss_seg
);
2673 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
2674 u16 tss_selector
, int idt_index
, int reason
,
2675 bool has_error_code
, u32 error_code
)
2677 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2678 struct desc_struct curr_tss_desc
, next_tss_desc
;
2680 u16 old_tss_sel
= get_segment_selector(ctxt
, VCPU_SREG_TR
);
2681 ulong old_tss_base
=
2682 ops
->get_cached_segment_base(ctxt
, VCPU_SREG_TR
);
2686 /* FIXME: old_tss_base == ~0 ? */
2688 ret
= read_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
, &desc_addr
);
2689 if (ret
!= X86EMUL_CONTINUE
)
2691 ret
= read_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
, &desc_addr
);
2692 if (ret
!= X86EMUL_CONTINUE
)
2695 /* FIXME: check that next_tss_desc is tss */
2698 * Check privileges. The three cases are task switch caused by...
2700 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2701 * 2. Exception/IRQ/iret: No check is performed
2702 * 3. jmp/call to TSS: Check against DPL of the TSS
2704 if (reason
== TASK_SWITCH_GATE
) {
2705 if (idt_index
!= -1) {
2706 /* Software interrupts */
2707 struct desc_struct task_gate_desc
;
2710 ret
= read_interrupt_descriptor(ctxt
, idt_index
,
2712 if (ret
!= X86EMUL_CONTINUE
)
2715 dpl
= task_gate_desc
.dpl
;
2716 if ((tss_selector
& 3) > dpl
|| ops
->cpl(ctxt
) > dpl
)
2717 return emulate_gp(ctxt
, (idt_index
<< 3) | 0x2);
2719 } else if (reason
!= TASK_SWITCH_IRET
) {
2720 int dpl
= next_tss_desc
.dpl
;
2721 if ((tss_selector
& 3) > dpl
|| ops
->cpl(ctxt
) > dpl
)
2722 return emulate_gp(ctxt
, tss_selector
);
2726 desc_limit
= desc_limit_scaled(&next_tss_desc
);
2727 if (!next_tss_desc
.p
||
2728 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
2729 desc_limit
< 0x2b)) {
2730 emulate_ts(ctxt
, tss_selector
& 0xfffc);
2731 return X86EMUL_PROPAGATE_FAULT
;
2734 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
2735 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
2736 write_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
);
2739 if (reason
== TASK_SWITCH_IRET
)
2740 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
2742 /* set back link to prev task only if NT bit is set in eflags
2743 note that old_tss_sel is not used after this point */
2744 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
2745 old_tss_sel
= 0xffff;
2747 if (next_tss_desc
.type
& 8)
2748 ret
= task_switch_32(ctxt
, tss_selector
, old_tss_sel
,
2749 old_tss_base
, &next_tss_desc
);
2751 ret
= task_switch_16(ctxt
, tss_selector
, old_tss_sel
,
2752 old_tss_base
, &next_tss_desc
);
2753 if (ret
!= X86EMUL_CONTINUE
)
2756 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
2757 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
2759 if (reason
!= TASK_SWITCH_IRET
) {
2760 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
2761 write_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
);
2764 ops
->set_cr(ctxt
, 0, ops
->get_cr(ctxt
, 0) | X86_CR0_TS
);
2765 ops
->set_segment(ctxt
, tss_selector
, &next_tss_desc
, 0, VCPU_SREG_TR
);
2767 if (has_error_code
) {
2768 ctxt
->op_bytes
= ctxt
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
2769 ctxt
->lock_prefix
= 0;
2770 ctxt
->src
.val
= (unsigned long) error_code
;
2771 ret
= em_push(ctxt
);
2777 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
2778 u16 tss_selector
, int idt_index
, int reason
,
2779 bool has_error_code
, u32 error_code
)
2783 invalidate_registers(ctxt
);
2784 ctxt
->_eip
= ctxt
->eip
;
2785 ctxt
->dst
.type
= OP_NONE
;
2787 rc
= emulator_do_task_switch(ctxt
, tss_selector
, idt_index
, reason
,
2788 has_error_code
, error_code
);
2790 if (rc
== X86EMUL_CONTINUE
) {
2791 ctxt
->eip
= ctxt
->_eip
;
2792 writeback_registers(ctxt
);
2795 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
2798 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, int reg
,
2801 int df
= (ctxt
->eflags
& EFLG_DF
) ? -op
->count
: op
->count
;
2803 register_address_increment(ctxt
, reg_rmw(ctxt
, reg
), df
* op
->bytes
);
2804 op
->addr
.mem
.ea
= register_address(ctxt
, reg_read(ctxt
, reg
));
2807 static int em_das(struct x86_emulate_ctxt
*ctxt
)
2810 bool af
, cf
, old_cf
;
2812 cf
= ctxt
->eflags
& X86_EFLAGS_CF
;
2818 af
= ctxt
->eflags
& X86_EFLAGS_AF
;
2819 if ((al
& 0x0f) > 9 || af
) {
2821 cf
= old_cf
| (al
>= 250);
2826 if (old_al
> 0x99 || old_cf
) {
2832 /* Set PF, ZF, SF */
2833 ctxt
->src
.type
= OP_IMM
;
2835 ctxt
->src
.bytes
= 1;
2836 fastop(ctxt
, em_or
);
2837 ctxt
->eflags
&= ~(X86_EFLAGS_AF
| X86_EFLAGS_CF
);
2839 ctxt
->eflags
|= X86_EFLAGS_CF
;
2841 ctxt
->eflags
|= X86_EFLAGS_AF
;
2842 return X86EMUL_CONTINUE
;
2845 static int em_aam(struct x86_emulate_ctxt
*ctxt
)
2849 if (ctxt
->src
.val
== 0)
2850 return emulate_de(ctxt
);
2852 al
= ctxt
->dst
.val
& 0xff;
2853 ah
= al
/ ctxt
->src
.val
;
2854 al
%= ctxt
->src
.val
;
2856 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
| (ah
<< 8);
2858 /* Set PF, ZF, SF */
2859 ctxt
->src
.type
= OP_IMM
;
2861 ctxt
->src
.bytes
= 1;
2862 fastop(ctxt
, em_or
);
2864 return X86EMUL_CONTINUE
;
2867 static int em_aad(struct x86_emulate_ctxt
*ctxt
)
2869 u8 al
= ctxt
->dst
.val
& 0xff;
2870 u8 ah
= (ctxt
->dst
.val
>> 8) & 0xff;
2872 al
= (al
+ (ah
* ctxt
->src
.val
)) & 0xff;
2874 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
;
2876 /* Set PF, ZF, SF */
2877 ctxt
->src
.type
= OP_IMM
;
2879 ctxt
->src
.bytes
= 1;
2880 fastop(ctxt
, em_or
);
2882 return X86EMUL_CONTINUE
;
2885 static int em_call(struct x86_emulate_ctxt
*ctxt
)
2887 long rel
= ctxt
->src
.val
;
2889 ctxt
->src
.val
= (unsigned long)ctxt
->_eip
;
2891 return em_push(ctxt
);
2894 static int em_call_far(struct x86_emulate_ctxt
*ctxt
)
2900 old_cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2901 old_eip
= ctxt
->_eip
;
2903 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2904 if (load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
))
2905 return X86EMUL_CONTINUE
;
2908 memcpy(&ctxt
->_eip
, ctxt
->src
.valptr
, ctxt
->op_bytes
);
2910 ctxt
->src
.val
= old_cs
;
2912 if (rc
!= X86EMUL_CONTINUE
)
2915 ctxt
->src
.val
= old_eip
;
2916 return em_push(ctxt
);
2919 static int em_ret_near_imm(struct x86_emulate_ctxt
*ctxt
)
2923 ctxt
->dst
.type
= OP_REG
;
2924 ctxt
->dst
.addr
.reg
= &ctxt
->_eip
;
2925 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
2926 rc
= emulate_pop(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
2927 if (rc
!= X86EMUL_CONTINUE
)
2929 rsp_increment(ctxt
, ctxt
->src
.val
);
2930 return X86EMUL_CONTINUE
;
2933 static int em_xchg(struct x86_emulate_ctxt
*ctxt
)
2935 /* Write back the register source. */
2936 ctxt
->src
.val
= ctxt
->dst
.val
;
2937 write_register_operand(&ctxt
->src
);
2939 /* Write back the memory destination with implicit LOCK prefix. */
2940 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
2941 ctxt
->lock_prefix
= 1;
2942 return X86EMUL_CONTINUE
;
2945 static int em_imul_3op(struct x86_emulate_ctxt
*ctxt
)
2947 ctxt
->dst
.val
= ctxt
->src2
.val
;
2948 return fastop(ctxt
, em_imul
);
2951 static int em_cwd(struct x86_emulate_ctxt
*ctxt
)
2953 ctxt
->dst
.type
= OP_REG
;
2954 ctxt
->dst
.bytes
= ctxt
->src
.bytes
;
2955 ctxt
->dst
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
2956 ctxt
->dst
.val
= ~((ctxt
->src
.val
>> (ctxt
->src
.bytes
* 8 - 1)) - 1);
2958 return X86EMUL_CONTINUE
;
2961 static int em_rdtsc(struct x86_emulate_ctxt
*ctxt
)
2965 ctxt
->ops
->get_msr(ctxt
, MSR_IA32_TSC
, &tsc
);
2966 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)tsc
;
2967 *reg_write(ctxt
, VCPU_REGS_RDX
) = tsc
>> 32;
2968 return X86EMUL_CONTINUE
;
2971 static int em_rdpmc(struct x86_emulate_ctxt
*ctxt
)
2975 if (ctxt
->ops
->read_pmc(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &pmc
))
2976 return emulate_gp(ctxt
, 0);
2977 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)pmc
;
2978 *reg_write(ctxt
, VCPU_REGS_RDX
) = pmc
>> 32;
2979 return X86EMUL_CONTINUE
;
2982 static int em_mov(struct x86_emulate_ctxt
*ctxt
)
2984 memcpy(ctxt
->dst
.valptr
, ctxt
->src
.valptr
, ctxt
->op_bytes
);
2985 return X86EMUL_CONTINUE
;
2988 #define FFL(x) bit(X86_FEATURE_##x)
2990 static int em_movbe(struct x86_emulate_ctxt
*ctxt
)
2992 u32 ebx
, ecx
, edx
, eax
= 1;
2996 * Check MOVBE is set in the guest-visible CPUID leaf.
2998 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
2999 if (!(ecx
& FFL(MOVBE
)))
3000 return emulate_ud(ctxt
);
3002 switch (ctxt
->op_bytes
) {
3005 * From MOVBE definition: "...When the operand size is 16 bits,
3006 * the upper word of the destination register remains unchanged
3009 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3010 * rules so we have to do the operation almost per hand.
3012 tmp
= (u16
)ctxt
->src
.val
;
3013 ctxt
->dst
.val
&= ~0xffffUL
;
3014 ctxt
->dst
.val
|= (unsigned long)swab16(tmp
);
3017 ctxt
->dst
.val
= swab32((u32
)ctxt
->src
.val
);
3020 ctxt
->dst
.val
= swab64(ctxt
->src
.val
);
3023 return X86EMUL_PROPAGATE_FAULT
;
3025 return X86EMUL_CONTINUE
;
3028 static int em_cr_write(struct x86_emulate_ctxt
*ctxt
)
3030 if (ctxt
->ops
->set_cr(ctxt
, ctxt
->modrm_reg
, ctxt
->src
.val
))
3031 return emulate_gp(ctxt
, 0);
3033 /* Disable writeback. */
3034 ctxt
->dst
.type
= OP_NONE
;
3035 return X86EMUL_CONTINUE
;
3038 static int em_dr_write(struct x86_emulate_ctxt
*ctxt
)
3042 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3043 val
= ctxt
->src
.val
& ~0ULL;
3045 val
= ctxt
->src
.val
& ~0U;
3047 /* #UD condition is already handled. */
3048 if (ctxt
->ops
->set_dr(ctxt
, ctxt
->modrm_reg
, val
) < 0)
3049 return emulate_gp(ctxt
, 0);
3051 /* Disable writeback. */
3052 ctxt
->dst
.type
= OP_NONE
;
3053 return X86EMUL_CONTINUE
;
3056 static int em_wrmsr(struct x86_emulate_ctxt
*ctxt
)
3060 msr_data
= (u32
)reg_read(ctxt
, VCPU_REGS_RAX
)
3061 | ((u64
)reg_read(ctxt
, VCPU_REGS_RDX
) << 32);
3062 if (ctxt
->ops
->set_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), msr_data
))
3063 return emulate_gp(ctxt
, 0);
3065 return X86EMUL_CONTINUE
;
3068 static int em_rdmsr(struct x86_emulate_ctxt
*ctxt
)
3072 if (ctxt
->ops
->get_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &msr_data
))
3073 return emulate_gp(ctxt
, 0);
3075 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)msr_data
;
3076 *reg_write(ctxt
, VCPU_REGS_RDX
) = msr_data
>> 32;
3077 return X86EMUL_CONTINUE
;
3080 static int em_mov_rm_sreg(struct x86_emulate_ctxt
*ctxt
)
3082 if (ctxt
->modrm_reg
> VCPU_SREG_GS
)
3083 return emulate_ud(ctxt
);
3085 ctxt
->dst
.val
= get_segment_selector(ctxt
, ctxt
->modrm_reg
);
3086 return X86EMUL_CONTINUE
;
3089 static int em_mov_sreg_rm(struct x86_emulate_ctxt
*ctxt
)
3091 u16 sel
= ctxt
->src
.val
;
3093 if (ctxt
->modrm_reg
== VCPU_SREG_CS
|| ctxt
->modrm_reg
> VCPU_SREG_GS
)
3094 return emulate_ud(ctxt
);
3096 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
3097 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
3099 /* Disable writeback. */
3100 ctxt
->dst
.type
= OP_NONE
;
3101 return load_segment_descriptor(ctxt
, sel
, ctxt
->modrm_reg
);
3104 static int em_lldt(struct x86_emulate_ctxt
*ctxt
)
3106 u16 sel
= ctxt
->src
.val
;
3108 /* Disable writeback. */
3109 ctxt
->dst
.type
= OP_NONE
;
3110 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_LDTR
);
3113 static int em_ltr(struct x86_emulate_ctxt
*ctxt
)
3115 u16 sel
= ctxt
->src
.val
;
3117 /* Disable writeback. */
3118 ctxt
->dst
.type
= OP_NONE
;
3119 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_TR
);
3122 static int em_invlpg(struct x86_emulate_ctxt
*ctxt
)
3127 rc
= linearize(ctxt
, ctxt
->src
.addr
.mem
, 1, false, &linear
);
3128 if (rc
== X86EMUL_CONTINUE
)
3129 ctxt
->ops
->invlpg(ctxt
, linear
);
3130 /* Disable writeback. */
3131 ctxt
->dst
.type
= OP_NONE
;
3132 return X86EMUL_CONTINUE
;
3135 static int em_clts(struct x86_emulate_ctxt
*ctxt
)
3139 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
3141 ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
3142 return X86EMUL_CONTINUE
;
3145 static int em_vmcall(struct x86_emulate_ctxt
*ctxt
)
3149 if (ctxt
->modrm_mod
!= 3 || ctxt
->modrm_rm
!= 1)
3150 return X86EMUL_UNHANDLEABLE
;
3152 rc
= ctxt
->ops
->fix_hypercall(ctxt
);
3153 if (rc
!= X86EMUL_CONTINUE
)
3156 /* Let the processor re-execute the fixed hypercall */
3157 ctxt
->_eip
= ctxt
->eip
;
3158 /* Disable writeback. */
3159 ctxt
->dst
.type
= OP_NONE
;
3160 return X86EMUL_CONTINUE
;
3163 static int emulate_store_desc_ptr(struct x86_emulate_ctxt
*ctxt
,
3164 void (*get
)(struct x86_emulate_ctxt
*ctxt
,
3165 struct desc_ptr
*ptr
))
3167 struct desc_ptr desc_ptr
;
3169 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3171 get(ctxt
, &desc_ptr
);
3172 if (ctxt
->op_bytes
== 2) {
3174 desc_ptr
.address
&= 0x00ffffff;
3176 /* Disable writeback. */
3177 ctxt
->dst
.type
= OP_NONE
;
3178 return segmented_write(ctxt
, ctxt
->dst
.addr
.mem
,
3179 &desc_ptr
, 2 + ctxt
->op_bytes
);
3182 static int em_sgdt(struct x86_emulate_ctxt
*ctxt
)
3184 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_gdt
);
3187 static int em_sidt(struct x86_emulate_ctxt
*ctxt
)
3189 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_idt
);
3192 static int em_lgdt(struct x86_emulate_ctxt
*ctxt
)
3194 struct desc_ptr desc_ptr
;
3197 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3199 rc
= read_descriptor(ctxt
, ctxt
->src
.addr
.mem
,
3200 &desc_ptr
.size
, &desc_ptr
.address
,
3202 if (rc
!= X86EMUL_CONTINUE
)
3204 ctxt
->ops
->set_gdt(ctxt
, &desc_ptr
);
3205 /* Disable writeback. */
3206 ctxt
->dst
.type
= OP_NONE
;
3207 return X86EMUL_CONTINUE
;
3210 static int em_vmmcall(struct x86_emulate_ctxt
*ctxt
)
3214 rc
= ctxt
->ops
->fix_hypercall(ctxt
);
3216 /* Disable writeback. */
3217 ctxt
->dst
.type
= OP_NONE
;
3221 static int em_lidt(struct x86_emulate_ctxt
*ctxt
)
3223 struct desc_ptr desc_ptr
;
3226 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3228 rc
= read_descriptor(ctxt
, ctxt
->src
.addr
.mem
,
3229 &desc_ptr
.size
, &desc_ptr
.address
,
3231 if (rc
!= X86EMUL_CONTINUE
)
3233 ctxt
->ops
->set_idt(ctxt
, &desc_ptr
);
3234 /* Disable writeback. */
3235 ctxt
->dst
.type
= OP_NONE
;
3236 return X86EMUL_CONTINUE
;
3239 static int em_smsw(struct x86_emulate_ctxt
*ctxt
)
3241 if (ctxt
->dst
.type
== OP_MEM
)
3242 ctxt
->dst
.bytes
= 2;
3243 ctxt
->dst
.val
= ctxt
->ops
->get_cr(ctxt
, 0);
3244 return X86EMUL_CONTINUE
;
3247 static int em_lmsw(struct x86_emulate_ctxt
*ctxt
)
3249 ctxt
->ops
->set_cr(ctxt
, 0, (ctxt
->ops
->get_cr(ctxt
, 0) & ~0x0eul
)
3250 | (ctxt
->src
.val
& 0x0f));
3251 ctxt
->dst
.type
= OP_NONE
;
3252 return X86EMUL_CONTINUE
;
3255 static int em_loop(struct x86_emulate_ctxt
*ctxt
)
3257 register_address_increment(ctxt
, reg_rmw(ctxt
, VCPU_REGS_RCX
), -1);
3258 if ((address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) != 0) &&
3259 (ctxt
->b
== 0xe2 || test_cc(ctxt
->b
^ 0x5, ctxt
->eflags
)))
3260 jmp_rel(ctxt
, ctxt
->src
.val
);
3262 return X86EMUL_CONTINUE
;
3265 static int em_jcxz(struct x86_emulate_ctxt
*ctxt
)
3267 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0)
3268 jmp_rel(ctxt
, ctxt
->src
.val
);
3270 return X86EMUL_CONTINUE
;
3273 static int em_in(struct x86_emulate_ctxt
*ctxt
)
3275 if (!pio_in_emulated(ctxt
, ctxt
->dst
.bytes
, ctxt
->src
.val
,
3277 return X86EMUL_IO_NEEDED
;
3279 return X86EMUL_CONTINUE
;
3282 static int em_out(struct x86_emulate_ctxt
*ctxt
)
3284 ctxt
->ops
->pio_out_emulated(ctxt
, ctxt
->src
.bytes
, ctxt
->dst
.val
,
3286 /* Disable writeback. */
3287 ctxt
->dst
.type
= OP_NONE
;
3288 return X86EMUL_CONTINUE
;
3291 static int em_cli(struct x86_emulate_ctxt
*ctxt
)
3293 if (emulator_bad_iopl(ctxt
))
3294 return emulate_gp(ctxt
, 0);
3296 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
3297 return X86EMUL_CONTINUE
;
3300 static int em_sti(struct x86_emulate_ctxt
*ctxt
)
3302 if (emulator_bad_iopl(ctxt
))
3303 return emulate_gp(ctxt
, 0);
3305 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
3306 ctxt
->eflags
|= X86_EFLAGS_IF
;
3307 return X86EMUL_CONTINUE
;
3310 static int em_cpuid(struct x86_emulate_ctxt
*ctxt
)
3312 u32 eax
, ebx
, ecx
, edx
;
3314 eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3315 ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3316 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
);
3317 *reg_write(ctxt
, VCPU_REGS_RAX
) = eax
;
3318 *reg_write(ctxt
, VCPU_REGS_RBX
) = ebx
;
3319 *reg_write(ctxt
, VCPU_REGS_RCX
) = ecx
;
3320 *reg_write(ctxt
, VCPU_REGS_RDX
) = edx
;
3321 return X86EMUL_CONTINUE
;
3324 static int em_sahf(struct x86_emulate_ctxt
*ctxt
)
3328 flags
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
;
3329 flags
&= *reg_rmw(ctxt
, VCPU_REGS_RAX
) >> 8;
3331 ctxt
->eflags
&= ~0xffUL
;
3332 ctxt
->eflags
|= flags
| X86_EFLAGS_FIXED
;
3333 return X86EMUL_CONTINUE
;
3336 static int em_lahf(struct x86_emulate_ctxt
*ctxt
)
3338 *reg_rmw(ctxt
, VCPU_REGS_RAX
) &= ~0xff00UL
;
3339 *reg_rmw(ctxt
, VCPU_REGS_RAX
) |= (ctxt
->eflags
& 0xff) << 8;
3340 return X86EMUL_CONTINUE
;
3343 static int em_bswap(struct x86_emulate_ctxt
*ctxt
)
3345 switch (ctxt
->op_bytes
) {
3346 #ifdef CONFIG_X86_64
3348 asm("bswap %0" : "+r"(ctxt
->dst
.val
));
3352 asm("bswap %0" : "+r"(*(u32
*)&ctxt
->dst
.val
));
3355 return X86EMUL_CONTINUE
;
3358 static bool valid_cr(int nr
)
3370 static int check_cr_read(struct x86_emulate_ctxt
*ctxt
)
3372 if (!valid_cr(ctxt
->modrm_reg
))
3373 return emulate_ud(ctxt
);
3375 return X86EMUL_CONTINUE
;
3378 static int check_cr_write(struct x86_emulate_ctxt
*ctxt
)
3380 u64 new_val
= ctxt
->src
.val64
;
3381 int cr
= ctxt
->modrm_reg
;
3384 static u64 cr_reserved_bits
[] = {
3385 0xffffffff00000000ULL
,
3386 0, 0, 0, /* CR3 checked later */
3393 return emulate_ud(ctxt
);
3395 if (new_val
& cr_reserved_bits
[cr
])
3396 return emulate_gp(ctxt
, 0);
3401 if (((new_val
& X86_CR0_PG
) && !(new_val
& X86_CR0_PE
)) ||
3402 ((new_val
& X86_CR0_NW
) && !(new_val
& X86_CR0_CD
)))
3403 return emulate_gp(ctxt
, 0);
3405 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
3406 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3408 if ((new_val
& X86_CR0_PG
) && (efer
& EFER_LME
) &&
3409 !(cr4
& X86_CR4_PAE
))
3410 return emulate_gp(ctxt
, 0);
3417 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3418 if (efer
& EFER_LMA
)
3419 rsvd
= CR3_L_MODE_RESERVED_BITS
;
3422 return emulate_gp(ctxt
, 0);
3427 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3429 if ((efer
& EFER_LMA
) && !(new_val
& X86_CR4_PAE
))
3430 return emulate_gp(ctxt
, 0);
3436 return X86EMUL_CONTINUE
;
3439 static int check_dr7_gd(struct x86_emulate_ctxt
*ctxt
)
3443 ctxt
->ops
->get_dr(ctxt
, 7, &dr7
);
3445 /* Check if DR7.Global_Enable is set */
3446 return dr7
& (1 << 13);
3449 static int check_dr_read(struct x86_emulate_ctxt
*ctxt
)
3451 int dr
= ctxt
->modrm_reg
;
3455 return emulate_ud(ctxt
);
3457 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
3458 if ((cr4
& X86_CR4_DE
) && (dr
== 4 || dr
== 5))
3459 return emulate_ud(ctxt
);
3461 if (check_dr7_gd(ctxt
))
3462 return emulate_db(ctxt
);
3464 return X86EMUL_CONTINUE
;
3467 static int check_dr_write(struct x86_emulate_ctxt
*ctxt
)
3469 u64 new_val
= ctxt
->src
.val64
;
3470 int dr
= ctxt
->modrm_reg
;
3472 if ((dr
== 6 || dr
== 7) && (new_val
& 0xffffffff00000000ULL
))
3473 return emulate_gp(ctxt
, 0);
3475 return check_dr_read(ctxt
);
3478 static int check_svme(struct x86_emulate_ctxt
*ctxt
)
3482 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
3484 if (!(efer
& EFER_SVME
))
3485 return emulate_ud(ctxt
);
3487 return X86EMUL_CONTINUE
;
3490 static int check_svme_pa(struct x86_emulate_ctxt
*ctxt
)
3492 u64 rax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3494 /* Valid physical address? */
3495 if (rax
& 0xffff000000000000ULL
)
3496 return emulate_gp(ctxt
, 0);
3498 return check_svme(ctxt
);
3501 static int check_rdtsc(struct x86_emulate_ctxt
*ctxt
)
3503 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
3505 if (cr4
& X86_CR4_TSD
&& ctxt
->ops
->cpl(ctxt
))
3506 return emulate_ud(ctxt
);
3508 return X86EMUL_CONTINUE
;
3511 static int check_rdpmc(struct x86_emulate_ctxt
*ctxt
)
3513 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
3514 u64 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3516 if ((!(cr4
& X86_CR4_PCE
) && ctxt
->ops
->cpl(ctxt
)) ||
3517 ctxt
->ops
->check_pmc(ctxt
, rcx
))
3518 return emulate_gp(ctxt
, 0);
3520 return X86EMUL_CONTINUE
;
3523 static int check_perm_in(struct x86_emulate_ctxt
*ctxt
)
3525 ctxt
->dst
.bytes
= min(ctxt
->dst
.bytes
, 4u);
3526 if (!emulator_io_permited(ctxt
, ctxt
->src
.val
, ctxt
->dst
.bytes
))
3527 return emulate_gp(ctxt
, 0);
3529 return X86EMUL_CONTINUE
;
3532 static int check_perm_out(struct x86_emulate_ctxt
*ctxt
)
3534 ctxt
->src
.bytes
= min(ctxt
->src
.bytes
, 4u);
3535 if (!emulator_io_permited(ctxt
, ctxt
->dst
.val
, ctxt
->src
.bytes
))
3536 return emulate_gp(ctxt
, 0);
3538 return X86EMUL_CONTINUE
;
3541 #define D(_y) { .flags = (_y) }
3542 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3543 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3544 .check_perm = (_p) }
3545 #define N D(NotImpl)
3546 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3547 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3548 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3549 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3550 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3551 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3552 #define II(_f, _e, _i) \
3553 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3554 #define IIP(_f, _e, _i, _p) \
3555 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3556 .check_perm = (_p) }
3557 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3559 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3560 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3561 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3562 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3563 #define I2bvIP(_f, _e, _i, _p) \
3564 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3566 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3567 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3568 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3570 static const struct opcode group7_rm1
[] = {
3571 DI(SrcNone
| Priv
, monitor
),
3572 DI(SrcNone
| Priv
, mwait
),
3576 static const struct opcode group7_rm3
[] = {
3577 DIP(SrcNone
| Prot
| Priv
, vmrun
, check_svme_pa
),
3578 II(SrcNone
| Prot
| EmulateOnUD
, em_vmmcall
, vmmcall
),
3579 DIP(SrcNone
| Prot
| Priv
, vmload
, check_svme_pa
),
3580 DIP(SrcNone
| Prot
| Priv
, vmsave
, check_svme_pa
),
3581 DIP(SrcNone
| Prot
| Priv
, stgi
, check_svme
),
3582 DIP(SrcNone
| Prot
| Priv
, clgi
, check_svme
),
3583 DIP(SrcNone
| Prot
| Priv
, skinit
, check_svme
),
3584 DIP(SrcNone
| Prot
| Priv
, invlpga
, check_svme
),
3587 static const struct opcode group7_rm7
[] = {
3589 DIP(SrcNone
, rdtscp
, check_rdtsc
),
3593 static const struct opcode group1
[] = {
3595 F(Lock
| PageTable
, em_or
),
3598 F(Lock
| PageTable
, em_and
),
3604 static const struct opcode group1A
[] = {
3605 I(DstMem
| SrcNone
| Mov
| Stack
, em_pop
), N
, N
, N
, N
, N
, N
, N
,
3608 static const struct opcode group2
[] = {
3609 F(DstMem
| ModRM
, em_rol
),
3610 F(DstMem
| ModRM
, em_ror
),
3611 F(DstMem
| ModRM
, em_rcl
),
3612 F(DstMem
| ModRM
, em_rcr
),
3613 F(DstMem
| ModRM
, em_shl
),
3614 F(DstMem
| ModRM
, em_shr
),
3615 F(DstMem
| ModRM
, em_shl
),
3616 F(DstMem
| ModRM
, em_sar
),
3619 static const struct opcode group3
[] = {
3620 F(DstMem
| SrcImm
| NoWrite
, em_test
),
3621 F(DstMem
| SrcImm
| NoWrite
, em_test
),
3622 F(DstMem
| SrcNone
| Lock
, em_not
),
3623 F(DstMem
| SrcNone
| Lock
, em_neg
),
3624 F(DstXacc
| Src2Mem
, em_mul_ex
),
3625 F(DstXacc
| Src2Mem
, em_imul_ex
),
3626 F(DstXacc
| Src2Mem
, em_div_ex
),
3627 F(DstXacc
| Src2Mem
, em_idiv_ex
),
3630 static const struct opcode group4
[] = {
3631 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_inc
),
3632 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_dec
),
3636 static const struct opcode group5
[] = {
3637 F(DstMem
| SrcNone
| Lock
, em_inc
),
3638 F(DstMem
| SrcNone
| Lock
, em_dec
),
3639 I(SrcMem
| Stack
, em_grp45
),
3640 I(SrcMemFAddr
| ImplicitOps
| Stack
, em_call_far
),
3641 I(SrcMem
| Stack
, em_grp45
),
3642 I(SrcMemFAddr
| ImplicitOps
, em_grp45
),
3643 I(SrcMem
| Stack
, em_grp45
), D(Undefined
),
3646 static const struct opcode group6
[] = {
3649 II(Prot
| Priv
| SrcMem16
, em_lldt
, lldt
),
3650 II(Prot
| Priv
| SrcMem16
, em_ltr
, ltr
),
3654 static const struct group_dual group7
= { {
3655 II(Mov
| DstMem
, em_sgdt
, sgdt
),
3656 II(Mov
| DstMem
, em_sidt
, sidt
),
3657 II(SrcMem
| Priv
, em_lgdt
, lgdt
),
3658 II(SrcMem
| Priv
, em_lidt
, lidt
),
3659 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
3660 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
3661 II(SrcMem
| ByteOp
| Priv
| NoAccess
, em_invlpg
, invlpg
),
3663 I(SrcNone
| Priv
| EmulateOnUD
, em_vmcall
),
3665 N
, EXT(0, group7_rm3
),
3666 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
3667 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
3671 static const struct opcode group8
[] = {
3673 F(DstMem
| SrcImmByte
| NoWrite
, em_bt
),
3674 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_bts
),
3675 F(DstMem
| SrcImmByte
| Lock
, em_btr
),
3676 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_btc
),
3679 static const struct group_dual group9
= { {
3680 N
, I(DstMem64
| Lock
| PageTable
, em_cmpxchg8b
), N
, N
, N
, N
, N
, N
,
3682 N
, N
, N
, N
, N
, N
, N
, N
,
3685 static const struct opcode group11
[] = {
3686 I(DstMem
| SrcImm
| Mov
| PageTable
, em_mov
),
3690 static const struct gprefix pfx_0f_6f_0f_7f
= {
3691 I(Mmx
, em_mov
), I(Sse
| Aligned
, em_mov
), N
, I(Sse
| Unaligned
, em_mov
),
3694 static const struct gprefix pfx_vmovntpx
= {
3695 I(0, em_mov
), N
, N
, N
,
3698 static const struct gprefix pfx_0f_28_0f_29
= {
3699 I(Aligned
, em_mov
), I(Aligned
, em_mov
), N
, N
,
3702 static const struct escape escape_d9
= { {
3703 N
, N
, N
, N
, N
, N
, N
, I(DstMem
, em_fnstcw
),
3706 N
, N
, N
, N
, N
, N
, N
, N
,
3708 N
, N
, N
, N
, N
, N
, N
, N
,
3710 N
, N
, N
, N
, N
, N
, N
, N
,
3712 N
, N
, N
, N
, N
, N
, N
, N
,
3714 N
, N
, N
, N
, N
, N
, N
, N
,
3716 N
, N
, N
, N
, N
, N
, N
, N
,
3718 N
, N
, N
, N
, N
, N
, N
, N
,
3720 N
, N
, N
, N
, N
, N
, N
, N
,
3723 static const struct escape escape_db
= { {
3724 N
, N
, N
, N
, N
, N
, N
, N
,
3727 N
, N
, N
, N
, N
, N
, N
, N
,
3729 N
, N
, N
, N
, N
, N
, N
, N
,
3731 N
, N
, N
, N
, N
, N
, N
, N
,
3733 N
, N
, N
, N
, N
, N
, N
, N
,
3735 N
, N
, N
, I(ImplicitOps
, em_fninit
), N
, N
, N
, N
,
3737 N
, N
, N
, N
, N
, N
, N
, N
,
3739 N
, N
, N
, N
, N
, N
, N
, N
,
3741 N
, N
, N
, N
, N
, N
, N
, N
,
3744 static const struct escape escape_dd
= { {
3745 N
, N
, N
, N
, N
, N
, N
, I(DstMem
, em_fnstsw
),
3748 N
, N
, N
, N
, N
, N
, N
, N
,
3750 N
, N
, N
, N
, N
, N
, N
, N
,
3752 N
, N
, N
, N
, N
, N
, N
, N
,
3754 N
, N
, N
, N
, N
, N
, N
, N
,
3756 N
, N
, N
, N
, N
, N
, N
, N
,
3758 N
, N
, N
, N
, N
, N
, N
, N
,
3760 N
, N
, N
, N
, N
, N
, N
, N
,
3762 N
, N
, N
, N
, N
, N
, N
, N
,
3765 static const struct opcode opcode_table
[256] = {
3767 F6ALU(Lock
, em_add
),
3768 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_push_sreg
),
3769 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_pop_sreg
),
3771 F6ALU(Lock
| PageTable
, em_or
),
3772 I(ImplicitOps
| Stack
| No64
| Src2CS
, em_push_sreg
),
3775 F6ALU(Lock
, em_adc
),
3776 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_push_sreg
),
3777 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_pop_sreg
),
3779 F6ALU(Lock
, em_sbb
),
3780 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_push_sreg
),
3781 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_pop_sreg
),
3783 F6ALU(Lock
| PageTable
, em_and
), N
, N
,
3785 F6ALU(Lock
, em_sub
), N
, I(ByteOp
| DstAcc
| No64
, em_das
),
3787 F6ALU(Lock
, em_xor
), N
, N
,
3789 F6ALU(NoWrite
, em_cmp
), N
, N
,
3791 X8(F(DstReg
, em_inc
)), X8(F(DstReg
, em_dec
)),
3793 X8(I(SrcReg
| Stack
, em_push
)),
3795 X8(I(DstReg
| Stack
, em_pop
)),
3797 I(ImplicitOps
| Stack
| No64
, em_pusha
),
3798 I(ImplicitOps
| Stack
| No64
, em_popa
),
3799 N
, D(DstReg
| SrcMem32
| ModRM
| Mov
) /* movsxd (x86/64) */ ,
3802 I(SrcImm
| Mov
| Stack
, em_push
),
3803 I(DstReg
| SrcMem
| ModRM
| Src2Imm
, em_imul_3op
),
3804 I(SrcImmByte
| Mov
| Stack
, em_push
),
3805 I(DstReg
| SrcMem
| ModRM
| Src2ImmByte
, em_imul_3op
),
3806 I2bvIP(DstDI
| SrcDX
| Mov
| String
| Unaligned
, em_in
, ins
, check_perm_in
), /* insb, insw/insd */
3807 I2bvIP(SrcSI
| DstDX
| String
, em_out
, outs
, check_perm_out
), /* outsb, outsw/outsd */
3811 G(ByteOp
| DstMem
| SrcImm
, group1
),
3812 G(DstMem
| SrcImm
, group1
),
3813 G(ByteOp
| DstMem
| SrcImm
| No64
, group1
),
3814 G(DstMem
| SrcImmByte
, group1
),
3815 F2bv(DstMem
| SrcReg
| ModRM
| NoWrite
, em_test
),
3816 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
, em_xchg
),
3818 I2bv(DstMem
| SrcReg
| ModRM
| Mov
| PageTable
, em_mov
),
3819 I2bv(DstReg
| SrcMem
| ModRM
| Mov
, em_mov
),
3820 I(DstMem
| SrcNone
| ModRM
| Mov
| PageTable
, em_mov_rm_sreg
),
3821 D(ModRM
| SrcMem
| NoAccess
| DstReg
),
3822 I(ImplicitOps
| SrcMem16
| ModRM
, em_mov_sreg_rm
),
3825 DI(SrcAcc
| DstReg
, pause
), X7(D(SrcAcc
| DstReg
)),
3827 D(DstAcc
| SrcNone
), I(ImplicitOps
| SrcAcc
, em_cwd
),
3828 I(SrcImmFAddr
| No64
, em_call_far
), N
,
3829 II(ImplicitOps
| Stack
, em_pushf
, pushf
),
3830 II(ImplicitOps
| Stack
, em_popf
, popf
),
3831 I(ImplicitOps
, em_sahf
), I(ImplicitOps
, em_lahf
),
3833 I2bv(DstAcc
| SrcMem
| Mov
| MemAbs
, em_mov
),
3834 I2bv(DstMem
| SrcAcc
| Mov
| MemAbs
| PageTable
, em_mov
),
3835 I2bv(SrcSI
| DstDI
| Mov
| String
, em_mov
),
3836 F2bv(SrcSI
| DstDI
| String
| NoWrite
, em_cmp
),
3838 F2bv(DstAcc
| SrcImm
| NoWrite
, em_test
),
3839 I2bv(SrcAcc
| DstDI
| Mov
| String
, em_mov
),
3840 I2bv(SrcSI
| DstAcc
| Mov
| String
, em_mov
),
3841 F2bv(SrcAcc
| DstDI
| String
| NoWrite
, em_cmp
),
3843 X8(I(ByteOp
| DstReg
| SrcImm
| Mov
, em_mov
)),
3845 X8(I(DstReg
| SrcImm64
| Mov
, em_mov
)),
3847 G(ByteOp
| Src2ImmByte
, group2
), G(Src2ImmByte
, group2
),
3848 I(ImplicitOps
| Stack
| SrcImmU16
, em_ret_near_imm
),
3849 I(ImplicitOps
| Stack
, em_ret
),
3850 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2ES
, em_lseg
),
3851 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2DS
, em_lseg
),
3852 G(ByteOp
, group11
), G(0, group11
),
3854 I(Stack
| SrcImmU16
| Src2ImmByte
, em_enter
), I(Stack
, em_leave
),
3855 I(ImplicitOps
| Stack
| SrcImmU16
, em_ret_far_imm
),
3856 I(ImplicitOps
| Stack
, em_ret_far
),
3857 D(ImplicitOps
), DI(SrcImmByte
, intn
),
3858 D(ImplicitOps
| No64
), II(ImplicitOps
, em_iret
, iret
),
3860 G(Src2One
| ByteOp
, group2
), G(Src2One
, group2
),
3861 G(Src2CL
| ByteOp
, group2
), G(Src2CL
, group2
),
3862 I(DstAcc
| SrcImmUByte
| No64
, em_aam
),
3863 I(DstAcc
| SrcImmUByte
| No64
, em_aad
),
3864 F(DstAcc
| ByteOp
| No64
, em_salc
),
3865 I(DstAcc
| SrcXLat
| ByteOp
, em_mov
),
3867 N
, E(0, &escape_d9
), N
, E(0, &escape_db
), N
, E(0, &escape_dd
), N
, N
,
3869 X3(I(SrcImmByte
, em_loop
)),
3870 I(SrcImmByte
, em_jcxz
),
3871 I2bvIP(SrcImmUByte
| DstAcc
, em_in
, in
, check_perm_in
),
3872 I2bvIP(SrcAcc
| DstImmUByte
, em_out
, out
, check_perm_out
),
3874 I(SrcImm
| Stack
, em_call
), D(SrcImm
| ImplicitOps
),
3875 I(SrcImmFAddr
| No64
, em_jmp_far
), D(SrcImmByte
| ImplicitOps
),
3876 I2bvIP(SrcDX
| DstAcc
, em_in
, in
, check_perm_in
),
3877 I2bvIP(SrcAcc
| DstDX
, em_out
, out
, check_perm_out
),
3879 N
, DI(ImplicitOps
, icebp
), N
, N
,
3880 DI(ImplicitOps
| Priv
, hlt
), D(ImplicitOps
),
3881 G(ByteOp
, group3
), G(0, group3
),
3883 D(ImplicitOps
), D(ImplicitOps
),
3884 I(ImplicitOps
, em_cli
), I(ImplicitOps
, em_sti
),
3885 D(ImplicitOps
), D(ImplicitOps
), G(0, group4
), G(0, group5
),
3888 static const struct opcode twobyte_table
[256] = {
3890 G(0, group6
), GD(0, &group7
), N
, N
,
3891 N
, I(ImplicitOps
| EmulateOnUD
, em_syscall
),
3892 II(ImplicitOps
| Priv
, em_clts
, clts
), N
,
3893 DI(ImplicitOps
| Priv
, invd
), DI(ImplicitOps
| Priv
, wbinvd
), N
, N
,
3894 N
, D(ImplicitOps
| ModRM
), N
, N
,
3896 N
, N
, N
, N
, N
, N
, N
, N
,
3897 D(ImplicitOps
| ModRM
), N
, N
, N
, N
, N
, N
, D(ImplicitOps
| ModRM
),
3899 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, cr_read
, check_cr_read
),
3900 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, dr_read
, check_dr_read
),
3901 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_cr_write
, cr_write
,
3903 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_dr_write
, dr_write
,
3906 GP(ModRM
| DstReg
| SrcMem
| Mov
| Sse
, &pfx_0f_28_0f_29
),
3907 GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_28_0f_29
),
3908 N
, GP(ModRM
| DstMem
| SrcReg
| Sse
| Mov
| Aligned
, &pfx_vmovntpx
),
3911 II(ImplicitOps
| Priv
, em_wrmsr
, wrmsr
),
3912 IIP(ImplicitOps
, em_rdtsc
, rdtsc
, check_rdtsc
),
3913 II(ImplicitOps
| Priv
, em_rdmsr
, rdmsr
),
3914 IIP(ImplicitOps
, em_rdpmc
, rdpmc
, check_rdpmc
),
3915 I(ImplicitOps
| EmulateOnUD
, em_sysenter
),
3916 I(ImplicitOps
| Priv
| EmulateOnUD
, em_sysexit
),
3918 N
, N
, N
, N
, N
, N
, N
, N
,
3920 X16(D(DstReg
| SrcMem
| ModRM
| Mov
)),
3922 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
3927 N
, N
, N
, GP(SrcMem
| DstReg
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
3932 N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
3936 X16(D(ByteOp
| DstMem
| SrcNone
| ModRM
| Mov
)),
3938 I(Stack
| Src2FS
, em_push_sreg
), I(Stack
| Src2FS
, em_pop_sreg
),
3939 II(ImplicitOps
, em_cpuid
, cpuid
),
3940 F(DstMem
| SrcReg
| ModRM
| BitOp
| NoWrite
, em_bt
),
3941 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shld
),
3942 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shld
), N
, N
,
3944 I(Stack
| Src2GS
, em_push_sreg
), I(Stack
| Src2GS
, em_pop_sreg
),
3945 DI(ImplicitOps
, rsm
),
3946 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_bts
),
3947 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shrd
),
3948 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shrd
),
3949 D(ModRM
), F(DstReg
| SrcMem
| ModRM
, em_imul
),
3951 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
, em_cmpxchg
),
3952 I(DstReg
| SrcMemFAddr
| ModRM
| Src2SS
, em_lseg
),
3953 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
, em_btr
),
3954 I(DstReg
| SrcMemFAddr
| ModRM
| Src2FS
, em_lseg
),
3955 I(DstReg
| SrcMemFAddr
| ModRM
| Src2GS
, em_lseg
),
3956 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
3960 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_btc
),
3961 F(DstReg
| SrcMem
| ModRM
, em_bsf
), F(DstReg
| SrcMem
| ModRM
, em_bsr
),
3962 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
3964 F2bv(DstMem
| SrcReg
| ModRM
| SrcWrite
| Lock
, em_xadd
),
3965 N
, D(DstMem
| SrcReg
| ModRM
| Mov
),
3966 N
, N
, N
, GD(0, &group9
),
3968 X8(I(DstReg
, em_bswap
)),
3970 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
3972 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
3974 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
3977 static const struct gprefix three_byte_0f_38_f0
= {
3978 I(DstReg
| SrcMem
| Mov
, em_movbe
), N
, N
, N
3981 static const struct gprefix three_byte_0f_38_f1
= {
3982 I(DstMem
| SrcReg
| Mov
, em_movbe
), N
, N
, N
3986 * Insns below are selected by the prefix which indexed by the third opcode
3989 static const struct opcode opcode_map_0f_38
[256] = {
3991 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
3993 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
3995 GP(EmulateOnUD
| ModRM
| Prefix
, &three_byte_0f_38_f0
),
3996 GP(EmulateOnUD
| ModRM
| Prefix
, &three_byte_0f_38_f1
),
4015 static unsigned imm_size(struct x86_emulate_ctxt
*ctxt
)
4019 size
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4025 static int decode_imm(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4026 unsigned size
, bool sign_extension
)
4028 int rc
= X86EMUL_CONTINUE
;
4032 op
->addr
.mem
.ea
= ctxt
->_eip
;
4033 /* NB. Immediates are sign-extended as necessary. */
4034 switch (op
->bytes
) {
4036 op
->val
= insn_fetch(s8
, ctxt
);
4039 op
->val
= insn_fetch(s16
, ctxt
);
4042 op
->val
= insn_fetch(s32
, ctxt
);
4045 op
->val
= insn_fetch(s64
, ctxt
);
4048 if (!sign_extension
) {
4049 switch (op
->bytes
) {
4057 op
->val
&= 0xffffffff;
4065 static int decode_operand(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4068 int rc
= X86EMUL_CONTINUE
;
4072 decode_register_operand(ctxt
, op
);
4075 rc
= decode_imm(ctxt
, op
, 1, false);
4078 ctxt
->memop
.bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4082 if ((ctxt
->d
& BitOp
) && op
== &ctxt
->dst
)
4083 fetch_bit_operand(ctxt
);
4084 op
->orig_val
= op
->val
;
4087 ctxt
->memop
.bytes
= (ctxt
->op_bytes
== 8) ? 16 : 8;
4091 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4092 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
4093 fetch_register_operand(op
);
4094 op
->orig_val
= op
->val
;
4098 op
->bytes
= (ctxt
->d
& ByteOp
) ? 2 : ctxt
->op_bytes
;
4099 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
4100 fetch_register_operand(op
);
4101 op
->orig_val
= op
->val
;
4104 if (ctxt
->d
& ByteOp
) {
4109 op
->bytes
= ctxt
->op_bytes
;
4110 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
4111 fetch_register_operand(op
);
4112 op
->orig_val
= op
->val
;
4116 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4118 register_address(ctxt
, reg_read(ctxt
, VCPU_REGS_RDI
));
4119 op
->addr
.mem
.seg
= VCPU_SREG_ES
;
4126 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
4127 fetch_register_operand(op
);
4131 op
->val
= reg_read(ctxt
, VCPU_REGS_RCX
) & 0xff;
4134 rc
= decode_imm(ctxt
, op
, 1, true);
4141 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), true);
4144 rc
= decode_imm(ctxt
, op
, ctxt
->op_bytes
, true);
4147 ctxt
->memop
.bytes
= 1;
4148 if (ctxt
->memop
.type
== OP_REG
) {
4149 ctxt
->memop
.addr
.reg
= decode_register(ctxt
,
4150 ctxt
->modrm_rm
, true);
4151 fetch_register_operand(&ctxt
->memop
);
4155 ctxt
->memop
.bytes
= 2;
4158 ctxt
->memop
.bytes
= 4;
4161 rc
= decode_imm(ctxt
, op
, 2, false);
4164 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), false);
4168 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4170 register_address(ctxt
, reg_read(ctxt
, VCPU_REGS_RSI
));
4171 op
->addr
.mem
.seg
= seg_override(ctxt
);
4177 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4179 register_address(ctxt
,
4180 reg_read(ctxt
, VCPU_REGS_RBX
) +
4181 (reg_read(ctxt
, VCPU_REGS_RAX
) & 0xff));
4182 op
->addr
.mem
.seg
= seg_override(ctxt
);
4187 op
->addr
.mem
.ea
= ctxt
->_eip
;
4188 op
->bytes
= ctxt
->op_bytes
+ 2;
4189 insn_fetch_arr(op
->valptr
, op
->bytes
, ctxt
);
4192 ctxt
->memop
.bytes
= ctxt
->op_bytes
+ 2;
4195 op
->val
= VCPU_SREG_ES
;
4198 op
->val
= VCPU_SREG_CS
;
4201 op
->val
= VCPU_SREG_SS
;
4204 op
->val
= VCPU_SREG_DS
;
4207 op
->val
= VCPU_SREG_FS
;
4210 op
->val
= VCPU_SREG_GS
;
4213 /* Special instructions do their own operand decoding. */
4215 op
->type
= OP_NONE
; /* Disable writeback. */
4223 int x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, void *insn
, int insn_len
)
4225 int rc
= X86EMUL_CONTINUE
;
4226 int mode
= ctxt
->mode
;
4227 int def_op_bytes
, def_ad_bytes
, goffset
, simd_prefix
;
4228 bool op_prefix
= false;
4229 struct opcode opcode
;
4231 ctxt
->memop
.type
= OP_NONE
;
4232 ctxt
->memopp
= NULL
;
4233 ctxt
->_eip
= ctxt
->eip
;
4234 ctxt
->fetch
.start
= ctxt
->_eip
;
4235 ctxt
->fetch
.end
= ctxt
->fetch
.start
+ insn_len
;
4236 ctxt
->opcode_len
= 1;
4238 memcpy(ctxt
->fetch
.data
, insn
, insn_len
);
4241 case X86EMUL_MODE_REAL
:
4242 case X86EMUL_MODE_VM86
:
4243 case X86EMUL_MODE_PROT16
:
4244 def_op_bytes
= def_ad_bytes
= 2;
4246 case X86EMUL_MODE_PROT32
:
4247 def_op_bytes
= def_ad_bytes
= 4;
4249 #ifdef CONFIG_X86_64
4250 case X86EMUL_MODE_PROT64
:
4256 return EMULATION_FAILED
;
4259 ctxt
->op_bytes
= def_op_bytes
;
4260 ctxt
->ad_bytes
= def_ad_bytes
;
4262 /* Legacy prefixes. */
4264 switch (ctxt
->b
= insn_fetch(u8
, ctxt
)) {
4265 case 0x66: /* operand-size override */
4267 /* switch between 2/4 bytes */
4268 ctxt
->op_bytes
= def_op_bytes
^ 6;
4270 case 0x67: /* address-size override */
4271 if (mode
== X86EMUL_MODE_PROT64
)
4272 /* switch between 4/8 bytes */
4273 ctxt
->ad_bytes
= def_ad_bytes
^ 12;
4275 /* switch between 2/4 bytes */
4276 ctxt
->ad_bytes
= def_ad_bytes
^ 6;
4278 case 0x26: /* ES override */
4279 case 0x2e: /* CS override */
4280 case 0x36: /* SS override */
4281 case 0x3e: /* DS override */
4282 set_seg_override(ctxt
, (ctxt
->b
>> 3) & 3);
4284 case 0x64: /* FS override */
4285 case 0x65: /* GS override */
4286 set_seg_override(ctxt
, ctxt
->b
& 7);
4288 case 0x40 ... 0x4f: /* REX */
4289 if (mode
!= X86EMUL_MODE_PROT64
)
4291 ctxt
->rex_prefix
= ctxt
->b
;
4293 case 0xf0: /* LOCK */
4294 ctxt
->lock_prefix
= 1;
4296 case 0xf2: /* REPNE/REPNZ */
4297 case 0xf3: /* REP/REPE/REPZ */
4298 ctxt
->rep_prefix
= ctxt
->b
;
4304 /* Any legacy prefix after a REX prefix nullifies its effect. */
4306 ctxt
->rex_prefix
= 0;
4312 if (ctxt
->rex_prefix
& 8)
4313 ctxt
->op_bytes
= 8; /* REX.W */
4315 /* Opcode byte(s). */
4316 opcode
= opcode_table
[ctxt
->b
];
4317 /* Two-byte opcode? */
4318 if (ctxt
->b
== 0x0f) {
4319 ctxt
->opcode_len
= 2;
4320 ctxt
->b
= insn_fetch(u8
, ctxt
);
4321 opcode
= twobyte_table
[ctxt
->b
];
4323 /* 0F_38 opcode map */
4324 if (ctxt
->b
== 0x38) {
4325 ctxt
->opcode_len
= 3;
4326 ctxt
->b
= insn_fetch(u8
, ctxt
);
4327 opcode
= opcode_map_0f_38
[ctxt
->b
];
4330 ctxt
->d
= opcode
.flags
;
4332 if (ctxt
->d
& ModRM
)
4333 ctxt
->modrm
= insn_fetch(u8
, ctxt
);
4335 /* vex-prefix instructions are not implemented */
4336 if (ctxt
->opcode_len
== 1 && (ctxt
->b
== 0xc5 || ctxt
->b
== 0xc4) &&
4337 (mode
== X86EMUL_MODE_PROT64
||
4338 (mode
>= X86EMUL_MODE_PROT16
&& (ctxt
->modrm
& 0x80)))) {
4342 while (ctxt
->d
& GroupMask
) {
4343 switch (ctxt
->d
& GroupMask
) {
4345 goffset
= (ctxt
->modrm
>> 3) & 7;
4346 opcode
= opcode
.u
.group
[goffset
];
4349 goffset
= (ctxt
->modrm
>> 3) & 7;
4350 if ((ctxt
->modrm
>> 6) == 3)
4351 opcode
= opcode
.u
.gdual
->mod3
[goffset
];
4353 opcode
= opcode
.u
.gdual
->mod012
[goffset
];
4356 goffset
= ctxt
->modrm
& 7;
4357 opcode
= opcode
.u
.group
[goffset
];
4360 if (ctxt
->rep_prefix
&& op_prefix
)
4361 return EMULATION_FAILED
;
4362 simd_prefix
= op_prefix
? 0x66 : ctxt
->rep_prefix
;
4363 switch (simd_prefix
) {
4364 case 0x00: opcode
= opcode
.u
.gprefix
->pfx_no
; break;
4365 case 0x66: opcode
= opcode
.u
.gprefix
->pfx_66
; break;
4366 case 0xf2: opcode
= opcode
.u
.gprefix
->pfx_f2
; break;
4367 case 0xf3: opcode
= opcode
.u
.gprefix
->pfx_f3
; break;
4371 if (ctxt
->modrm
> 0xbf)
4372 opcode
= opcode
.u
.esc
->high
[ctxt
->modrm
- 0xc0];
4374 opcode
= opcode
.u
.esc
->op
[(ctxt
->modrm
>> 3) & 7];
4377 return EMULATION_FAILED
;
4380 ctxt
->d
&= ~(u64
)GroupMask
;
4381 ctxt
->d
|= opcode
.flags
;
4384 ctxt
->execute
= opcode
.u
.execute
;
4385 ctxt
->check_perm
= opcode
.check_perm
;
4386 ctxt
->intercept
= opcode
.intercept
;
4389 if (ctxt
->d
== 0 || (ctxt
->d
& NotImpl
))
4390 return EMULATION_FAILED
;
4392 if (!(ctxt
->d
& EmulateOnUD
) && ctxt
->ud
)
4393 return EMULATION_FAILED
;
4395 if (mode
== X86EMUL_MODE_PROT64
&& (ctxt
->d
& Stack
))
4398 if (ctxt
->d
& Op3264
) {
4399 if (mode
== X86EMUL_MODE_PROT64
)
4406 ctxt
->op_bytes
= 16;
4407 else if (ctxt
->d
& Mmx
)
4410 /* ModRM and SIB bytes. */
4411 if (ctxt
->d
& ModRM
) {
4412 rc
= decode_modrm(ctxt
, &ctxt
->memop
);
4413 if (!ctxt
->has_seg_override
)
4414 set_seg_override(ctxt
, ctxt
->modrm_seg
);
4415 } else if (ctxt
->d
& MemAbs
)
4416 rc
= decode_abs(ctxt
, &ctxt
->memop
);
4417 if (rc
!= X86EMUL_CONTINUE
)
4420 if (!ctxt
->has_seg_override
)
4421 set_seg_override(ctxt
, VCPU_SREG_DS
);
4423 ctxt
->memop
.addr
.mem
.seg
= seg_override(ctxt
);
4425 if (ctxt
->memop
.type
== OP_MEM
&& ctxt
->ad_bytes
!= 8)
4426 ctxt
->memop
.addr
.mem
.ea
= (u32
)ctxt
->memop
.addr
.mem
.ea
;
4429 * Decode and fetch the source operand: register, memory
4432 rc
= decode_operand(ctxt
, &ctxt
->src
, (ctxt
->d
>> SrcShift
) & OpMask
);
4433 if (rc
!= X86EMUL_CONTINUE
)
4437 * Decode and fetch the second source operand: register, memory
4440 rc
= decode_operand(ctxt
, &ctxt
->src2
, (ctxt
->d
>> Src2Shift
) & OpMask
);
4441 if (rc
!= X86EMUL_CONTINUE
)
4444 /* Decode and fetch the destination operand: register or memory. */
4445 rc
= decode_operand(ctxt
, &ctxt
->dst
, (ctxt
->d
>> DstShift
) & OpMask
);
4448 if (ctxt
->memopp
&& ctxt
->memopp
->type
== OP_MEM
&& ctxt
->rip_relative
)
4449 ctxt
->memopp
->addr
.mem
.ea
+= ctxt
->_eip
;
4451 return (rc
!= X86EMUL_CONTINUE
) ? EMULATION_FAILED
: EMULATION_OK
;
4454 bool x86_page_table_writing_insn(struct x86_emulate_ctxt
*ctxt
)
4456 return ctxt
->d
& PageTable
;
4459 static bool string_insn_completed(struct x86_emulate_ctxt
*ctxt
)
4461 /* The second termination condition only applies for REPE
4462 * and REPNE. Test if the repeat string operation prefix is
4463 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4464 * corresponding termination condition according to:
4465 * - if REPE/REPZ and ZF = 0 then done
4466 * - if REPNE/REPNZ and ZF = 1 then done
4468 if (((ctxt
->b
== 0xa6) || (ctxt
->b
== 0xa7) ||
4469 (ctxt
->b
== 0xae) || (ctxt
->b
== 0xaf))
4470 && (((ctxt
->rep_prefix
== REPE_PREFIX
) &&
4471 ((ctxt
->eflags
& EFLG_ZF
) == 0))
4472 || ((ctxt
->rep_prefix
== REPNE_PREFIX
) &&
4473 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
))))
4479 static int flush_pending_x87_faults(struct x86_emulate_ctxt
*ctxt
)
4483 ctxt
->ops
->get_fpu(ctxt
);
4484 asm volatile("1: fwait \n\t"
4486 ".pushsection .fixup,\"ax\" \n\t"
4488 "movb $1, %[fault] \n\t"
4491 _ASM_EXTABLE(1b
, 3b
)
4492 : [fault
]"+qm"(fault
));
4493 ctxt
->ops
->put_fpu(ctxt
);
4495 if (unlikely(fault
))
4496 return emulate_exception(ctxt
, MF_VECTOR
, 0, false);
4498 return X86EMUL_CONTINUE
;
4501 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt
*ctxt
,
4504 if (op
->type
== OP_MM
)
4505 read_mmx_reg(ctxt
, &op
->mm_val
, op
->addr
.mm
);
4508 static int fastop(struct x86_emulate_ctxt
*ctxt
, void (*fop
)(struct fastop
*))
4510 ulong flags
= (ctxt
->eflags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
4511 if (!(ctxt
->d
& ByteOp
))
4512 fop
+= __ffs(ctxt
->dst
.bytes
) * FASTOP_SIZE
;
4513 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4514 : "+a"(ctxt
->dst
.val
), "+d"(ctxt
->src
.val
), [flags
]"+D"(flags
),
4516 : "c"(ctxt
->src2
.val
));
4517 ctxt
->eflags
= (ctxt
->eflags
& ~EFLAGS_MASK
) | (flags
& EFLAGS_MASK
);
4518 if (!fop
) /* exception is returned in fop variable */
4519 return emulate_de(ctxt
);
4520 return X86EMUL_CONTINUE
;
4523 int x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
)
4525 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
4526 int rc
= X86EMUL_CONTINUE
;
4527 int saved_dst_type
= ctxt
->dst
.type
;
4529 ctxt
->mem_read
.pos
= 0;
4531 if ((ctxt
->mode
== X86EMUL_MODE_PROT64
&& (ctxt
->d
& No64
)) ||
4532 (ctxt
->d
& Undefined
)) {
4533 rc
= emulate_ud(ctxt
);
4537 /* LOCK prefix is allowed only with some instructions */
4538 if (ctxt
->lock_prefix
&& (!(ctxt
->d
& Lock
) || ctxt
->dst
.type
!= OP_MEM
)) {
4539 rc
= emulate_ud(ctxt
);
4543 if ((ctxt
->d
& SrcMask
) == SrcMemFAddr
&& ctxt
->src
.type
!= OP_MEM
) {
4544 rc
= emulate_ud(ctxt
);
4548 if (((ctxt
->d
& (Sse
|Mmx
)) && ((ops
->get_cr(ctxt
, 0) & X86_CR0_EM
)))
4549 || ((ctxt
->d
& Sse
) && !(ops
->get_cr(ctxt
, 4) & X86_CR4_OSFXSR
))) {
4550 rc
= emulate_ud(ctxt
);
4554 if ((ctxt
->d
& (Sse
|Mmx
)) && (ops
->get_cr(ctxt
, 0) & X86_CR0_TS
)) {
4555 rc
= emulate_nm(ctxt
);
4559 if (ctxt
->d
& Mmx
) {
4560 rc
= flush_pending_x87_faults(ctxt
);
4561 if (rc
!= X86EMUL_CONTINUE
)
4564 * Now that we know the fpu is exception safe, we can fetch
4567 fetch_possible_mmx_operand(ctxt
, &ctxt
->src
);
4568 fetch_possible_mmx_operand(ctxt
, &ctxt
->src2
);
4569 if (!(ctxt
->d
& Mov
))
4570 fetch_possible_mmx_operand(ctxt
, &ctxt
->dst
);
4573 if (unlikely(ctxt
->guest_mode
) && ctxt
->intercept
) {
4574 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
4575 X86_ICPT_PRE_EXCEPT
);
4576 if (rc
!= X86EMUL_CONTINUE
)
4580 /* Privileged instruction can be executed only in CPL=0 */
4581 if ((ctxt
->d
& Priv
) && ops
->cpl(ctxt
)) {
4582 rc
= emulate_gp(ctxt
, 0);
4586 /* Instruction can only be executed in protected mode */
4587 if ((ctxt
->d
& Prot
) && ctxt
->mode
< X86EMUL_MODE_PROT16
) {
4588 rc
= emulate_ud(ctxt
);
4592 /* Do instruction specific permission checks */
4593 if (ctxt
->check_perm
) {
4594 rc
= ctxt
->check_perm(ctxt
);
4595 if (rc
!= X86EMUL_CONTINUE
)
4599 if (unlikely(ctxt
->guest_mode
) && ctxt
->intercept
) {
4600 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
4601 X86_ICPT_POST_EXCEPT
);
4602 if (rc
!= X86EMUL_CONTINUE
)
4606 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
4607 /* All REP prefixes have the same first termination condition */
4608 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0) {
4609 ctxt
->eip
= ctxt
->_eip
;
4614 if ((ctxt
->src
.type
== OP_MEM
) && !(ctxt
->d
& NoAccess
)) {
4615 rc
= segmented_read(ctxt
, ctxt
->src
.addr
.mem
,
4616 ctxt
->src
.valptr
, ctxt
->src
.bytes
);
4617 if (rc
!= X86EMUL_CONTINUE
)
4619 ctxt
->src
.orig_val64
= ctxt
->src
.val64
;
4622 if (ctxt
->src2
.type
== OP_MEM
) {
4623 rc
= segmented_read(ctxt
, ctxt
->src2
.addr
.mem
,
4624 &ctxt
->src2
.val
, ctxt
->src2
.bytes
);
4625 if (rc
!= X86EMUL_CONTINUE
)
4629 if ((ctxt
->d
& DstMask
) == ImplicitOps
)
4633 if ((ctxt
->dst
.type
== OP_MEM
) && !(ctxt
->d
& Mov
)) {
4634 /* optimisation - avoid slow emulated read if Mov */
4635 rc
= segmented_read(ctxt
, ctxt
->dst
.addr
.mem
,
4636 &ctxt
->dst
.val
, ctxt
->dst
.bytes
);
4637 if (rc
!= X86EMUL_CONTINUE
)
4640 ctxt
->dst
.orig_val
= ctxt
->dst
.val
;
4644 if (unlikely(ctxt
->guest_mode
) && ctxt
->intercept
) {
4645 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
4646 X86_ICPT_POST_MEMACCESS
);
4647 if (rc
!= X86EMUL_CONTINUE
)
4651 if (ctxt
->execute
) {
4652 if (ctxt
->d
& Fastop
) {
4653 void (*fop
)(struct fastop
*) = (void *)ctxt
->execute
;
4654 rc
= fastop(ctxt
, fop
);
4655 if (rc
!= X86EMUL_CONTINUE
)
4659 rc
= ctxt
->execute(ctxt
);
4660 if (rc
!= X86EMUL_CONTINUE
)
4665 if (ctxt
->opcode_len
== 2)
4667 else if (ctxt
->opcode_len
== 3)
4668 goto threebyte_insn
;
4671 case 0x63: /* movsxd */
4672 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
4673 goto cannot_emulate
;
4674 ctxt
->dst
.val
= (s32
) ctxt
->src
.val
;
4676 case 0x70 ... 0x7f: /* jcc (short) */
4677 if (test_cc(ctxt
->b
, ctxt
->eflags
))
4678 jmp_rel(ctxt
, ctxt
->src
.val
);
4680 case 0x8d: /* lea r16/r32, m */
4681 ctxt
->dst
.val
= ctxt
->src
.addr
.mem
.ea
;
4683 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4684 if (ctxt
->dst
.addr
.reg
== reg_rmw(ctxt
, VCPU_REGS_RAX
))
4688 case 0x98: /* cbw/cwde/cdqe */
4689 switch (ctxt
->op_bytes
) {
4690 case 2: ctxt
->dst
.val
= (s8
)ctxt
->dst
.val
; break;
4691 case 4: ctxt
->dst
.val
= (s16
)ctxt
->dst
.val
; break;
4692 case 8: ctxt
->dst
.val
= (s32
)ctxt
->dst
.val
; break;
4695 case 0xcc: /* int3 */
4696 rc
= emulate_int(ctxt
, 3);
4698 case 0xcd: /* int n */
4699 rc
= emulate_int(ctxt
, ctxt
->src
.val
);
4701 case 0xce: /* into */
4702 if (ctxt
->eflags
& EFLG_OF
)
4703 rc
= emulate_int(ctxt
, 4);
4705 case 0xe9: /* jmp rel */
4706 case 0xeb: /* jmp rel short */
4707 jmp_rel(ctxt
, ctxt
->src
.val
);
4708 ctxt
->dst
.type
= OP_NONE
; /* Disable writeback. */
4710 case 0xf4: /* hlt */
4711 ctxt
->ops
->halt(ctxt
);
4713 case 0xf5: /* cmc */
4714 /* complement carry flag from eflags reg */
4715 ctxt
->eflags
^= EFLG_CF
;
4717 case 0xf8: /* clc */
4718 ctxt
->eflags
&= ~EFLG_CF
;
4720 case 0xf9: /* stc */
4721 ctxt
->eflags
|= EFLG_CF
;
4723 case 0xfc: /* cld */
4724 ctxt
->eflags
&= ~EFLG_DF
;
4726 case 0xfd: /* std */
4727 ctxt
->eflags
|= EFLG_DF
;
4730 goto cannot_emulate
;
4733 if (rc
!= X86EMUL_CONTINUE
)
4737 if (!(ctxt
->d
& NoWrite
)) {
4738 rc
= writeback(ctxt
, &ctxt
->dst
);
4739 if (rc
!= X86EMUL_CONTINUE
)
4742 if (ctxt
->d
& SrcWrite
) {
4743 BUG_ON(ctxt
->src
.type
== OP_MEM
|| ctxt
->src
.type
== OP_MEM_STR
);
4744 rc
= writeback(ctxt
, &ctxt
->src
);
4745 if (rc
!= X86EMUL_CONTINUE
)
4750 * restore dst type in case the decoding will be reused
4751 * (happens for string instruction )
4753 ctxt
->dst
.type
= saved_dst_type
;
4755 if ((ctxt
->d
& SrcMask
) == SrcSI
)
4756 string_addr_inc(ctxt
, VCPU_REGS_RSI
, &ctxt
->src
);
4758 if ((ctxt
->d
& DstMask
) == DstDI
)
4759 string_addr_inc(ctxt
, VCPU_REGS_RDI
, &ctxt
->dst
);
4761 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
4763 struct read_cache
*r
= &ctxt
->io_read
;
4764 if ((ctxt
->d
& SrcMask
) == SrcSI
)
4765 count
= ctxt
->src
.count
;
4767 count
= ctxt
->dst
.count
;
4768 register_address_increment(ctxt
, reg_rmw(ctxt
, VCPU_REGS_RCX
),
4771 if (!string_insn_completed(ctxt
)) {
4773 * Re-enter guest when pio read ahead buffer is empty
4774 * or, if it is not used, after each 1024 iteration.
4776 if ((r
->end
!= 0 || reg_read(ctxt
, VCPU_REGS_RCX
) & 0x3ff) &&
4777 (r
->end
== 0 || r
->end
!= r
->pos
)) {
4779 * Reset read cache. Usually happens before
4780 * decode, but since instruction is restarted
4781 * we have to do it here.
4783 ctxt
->mem_read
.end
= 0;
4784 writeback_registers(ctxt
);
4785 return EMULATION_RESTART
;
4787 goto done
; /* skip rip writeback */
4791 ctxt
->eip
= ctxt
->_eip
;
4794 if (rc
== X86EMUL_PROPAGATE_FAULT
)
4795 ctxt
->have_exception
= true;
4796 if (rc
== X86EMUL_INTERCEPTED
)
4797 return EMULATION_INTERCEPTED
;
4799 if (rc
== X86EMUL_CONTINUE
)
4800 writeback_registers(ctxt
);
4802 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
4806 case 0x09: /* wbinvd */
4807 (ctxt
->ops
->wbinvd
)(ctxt
);
4809 case 0x08: /* invd */
4810 case 0x0d: /* GrpP (prefetch) */
4811 case 0x18: /* Grp16 (prefetch/nop) */
4812 case 0x1f: /* nop */
4814 case 0x20: /* mov cr, reg */
4815 ctxt
->dst
.val
= ops
->get_cr(ctxt
, ctxt
->modrm_reg
);
4817 case 0x21: /* mov from dr to reg */
4818 ops
->get_dr(ctxt
, ctxt
->modrm_reg
, &ctxt
->dst
.val
);
4820 case 0x40 ... 0x4f: /* cmov */
4821 ctxt
->dst
.val
= ctxt
->dst
.orig_val
= ctxt
->src
.val
;
4822 if (!test_cc(ctxt
->b
, ctxt
->eflags
))
4823 ctxt
->dst
.type
= OP_NONE
; /* no writeback */
4825 case 0x80 ... 0x8f: /* jnz rel, etc*/
4826 if (test_cc(ctxt
->b
, ctxt
->eflags
))
4827 jmp_rel(ctxt
, ctxt
->src
.val
);
4829 case 0x90 ... 0x9f: /* setcc r/m8 */
4830 ctxt
->dst
.val
= test_cc(ctxt
->b
, ctxt
->eflags
);
4832 case 0xae: /* clflush */
4834 case 0xb6 ... 0xb7: /* movzx */
4835 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
4836 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (u8
) ctxt
->src
.val
4837 : (u16
) ctxt
->src
.val
;
4839 case 0xbe ... 0xbf: /* movsx */
4840 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
4841 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (s8
) ctxt
->src
.val
:
4842 (s16
) ctxt
->src
.val
;
4844 case 0xc3: /* movnti */
4845 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
4846 ctxt
->dst
.val
= (ctxt
->op_bytes
== 8) ? (u64
) ctxt
->src
.val
:
4847 (u32
) ctxt
->src
.val
;
4850 goto cannot_emulate
;
4855 if (rc
!= X86EMUL_CONTINUE
)
4861 return EMULATION_FAILED
;
4864 void emulator_invalidate_register_cache(struct x86_emulate_ctxt
*ctxt
)
4866 invalidate_registers(ctxt
);
4869 void emulator_writeback_register_cache(struct x86_emulate_ctxt
*ctxt
)
4871 writeback_registers(ctxt
);