1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
25 #include <public/xen.h>
26 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #include <linux/kvm_host.h>
29 #include "kvm_cache_regs.h"
30 #define DPRINTF(x...) do {} while (0)
32 #include <linux/module.h>
33 #include <asm/kvm_emulate.h>
39 * Opcode effective-address decode tables.
40 * Note that we only emulate instructions that have at least one memory
41 * operand (excluding implicit stack references). We assume that stack
42 * references and instruction fetches will never occur in special memory
43 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
47 /* Operand sizes: 8-bit operands or specified/overridden size. */
48 #define ByteOp (1<<0) /* 8-bit operands. */
49 /* Destination operand type. */
50 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
51 #define DstReg (2<<1) /* Register operand. */
52 #define DstMem (3<<1) /* Memory operand. */
53 #define DstAcc (4<<1) /* Destination Accumulator */
54 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
55 #define DstMask (7<<1)
56 /* Source operand type. */
57 #define SrcNone (0<<4) /* No source operand. */
58 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
59 #define SrcReg (1<<4) /* Register operand. */
60 #define SrcMem (2<<4) /* Memory operand. */
61 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
62 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
63 #define SrcImm (5<<4) /* Immediate operand. */
64 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
65 #define SrcOne (7<<4) /* Implied '1' */
66 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
67 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
68 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
69 #define SrcMask (0xf<<4)
70 /* Generic ModRM decode. */
72 /* Destination is only written; never read. */
75 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
76 #define String (1<<12) /* String instruction (rep capable) */
77 #define Stack (1<<13) /* Stack instruction (push/pop) */
78 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
79 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
80 #define GroupMask 0xff /* Group number stored in bits 0:7 */
82 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
83 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
85 /* Source 2 operand type */
86 #define Src2None (0<<29)
87 #define Src2CL (1<<29)
88 #define Src2ImmByte (2<<29)
89 #define Src2One (3<<29)
90 #define Src2Imm16 (4<<29)
91 #define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be
92 in memory and second argument is located
93 immediately after the first one in memory. */
94 #define Src2Mask (7<<29)
97 Group1_80
, Group1_81
, Group1_82
, Group1_83
,
98 Group1A
, Group3_Byte
, Group3
, Group4
, Group5
, Group7
,
102 static u32 opcode_table
[256] = {
104 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
105 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
106 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
107 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
109 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
110 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
111 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
112 ImplicitOps
| Stack
| No64
, 0,
114 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
115 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
116 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
117 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
119 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
120 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
121 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
122 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
124 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
125 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
126 DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
128 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
129 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
132 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
133 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
136 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
137 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
138 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
141 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
143 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
145 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
146 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
148 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
149 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
151 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
152 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
155 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
156 DstDI
| ByteOp
| Mov
| String
, DstDI
| Mov
| String
, /* insb, insw/insd */
157 SrcSI
| ByteOp
| ImplicitOps
| String
, SrcSI
| ImplicitOps
| String
, /* outsb, outsw/outsd */
159 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
160 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
162 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
163 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
165 Group
| Group1_80
, Group
| Group1_81
,
166 Group
| Group1_82
, Group
| Group1_83
,
167 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
168 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
170 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
171 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
172 DstMem
| SrcReg
| ModRM
| Mov
, ModRM
| DstReg
,
173 DstReg
| SrcMem
| ModRM
| Mov
, Group
| Group1A
,
175 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
177 0, 0, SrcImm
| Src2Imm16
| No64
, 0,
178 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
180 ByteOp
| DstReg
| SrcMem
| Mov
| MemAbs
, DstReg
| SrcMem
| Mov
| MemAbs
,
181 ByteOp
| DstMem
| SrcReg
| Mov
| MemAbs
, DstMem
| SrcReg
| Mov
| MemAbs
,
182 ByteOp
| SrcSI
| DstDI
| Mov
| String
, SrcSI
| DstDI
| Mov
| String
,
183 ByteOp
| SrcSI
| DstDI
| String
, SrcSI
| DstDI
| String
,
185 0, 0, ByteOp
| DstDI
| Mov
| String
, DstDI
| Mov
| String
,
186 ByteOp
| SrcSI
| DstAcc
| Mov
| String
, SrcSI
| DstAcc
| Mov
| String
,
187 ByteOp
| DstDI
| String
, DstDI
| String
,
189 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
190 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
191 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
192 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
194 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
195 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
196 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
197 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
199 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
200 0, ImplicitOps
| Stack
, 0, 0,
201 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
203 0, 0, 0, ImplicitOps
| Stack
,
204 ImplicitOps
, SrcImmByte
, ImplicitOps
| No64
, ImplicitOps
,
206 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
207 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
210 0, 0, 0, 0, 0, 0, 0, 0,
213 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
214 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
216 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
217 SrcImmU
| Src2Imm16
| No64
, SrcImmByte
| ImplicitOps
,
218 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
219 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
222 ImplicitOps
| Priv
, ImplicitOps
, Group
| Group3_Byte
, Group
| Group3
,
224 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
225 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
228 static u32 twobyte_table
[256] = {
230 0, Group
| GroupDual
| Group7
, 0, 0,
231 0, ImplicitOps
, ImplicitOps
| Priv
, 0,
232 ImplicitOps
| Priv
, ImplicitOps
| Priv
, 0, 0,
233 0, ImplicitOps
| ModRM
, 0, 0,
235 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
237 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
238 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
240 0, 0, 0, 0, 0, 0, 0, 0,
242 ImplicitOps
| Priv
, 0, ImplicitOps
| Priv
, 0,
243 ImplicitOps
, ImplicitOps
| Priv
, 0, 0,
244 0, 0, 0, 0, 0, 0, 0, 0,
246 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
247 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
248 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
249 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
251 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
252 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
253 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
254 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
256 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
258 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
260 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
262 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
263 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
265 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
267 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
268 0, DstMem
| SrcReg
| ModRM
| BitOp
,
269 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
270 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
272 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
273 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
274 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
275 DstMem
| SrcReg
| Src2CL
| ModRM
,
278 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
279 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
280 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
281 DstReg
| SrcMem16
| ModRM
| Mov
,
284 Group
| Group8
, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
285 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
286 DstReg
| SrcMem16
| ModRM
| Mov
,
288 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
,
289 0, 0, 0, Group
| GroupDual
| Group9
,
290 0, 0, 0, 0, 0, 0, 0, 0,
292 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
294 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
296 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
299 static u32 group_table
[] = {
301 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
302 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
303 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
304 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
305 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
306 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
307 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
308 ByteOp
| DstMem
| SrcImm
| ModRM
,
310 DstMem
| SrcImm
| ModRM
| Lock
,
311 DstMem
| SrcImm
| ModRM
| Lock
,
312 DstMem
| SrcImm
| ModRM
| Lock
,
313 DstMem
| SrcImm
| ModRM
| Lock
,
314 DstMem
| SrcImm
| ModRM
| Lock
,
315 DstMem
| SrcImm
| ModRM
| Lock
,
316 DstMem
| SrcImm
| ModRM
| Lock
,
317 DstMem
| SrcImm
| ModRM
,
319 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
320 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
321 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
322 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
323 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
324 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
325 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
326 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
,
328 DstMem
| SrcImmByte
| ModRM
| Lock
,
329 DstMem
| SrcImmByte
| ModRM
| Lock
,
330 DstMem
| SrcImmByte
| ModRM
| Lock
,
331 DstMem
| SrcImmByte
| ModRM
| Lock
,
332 DstMem
| SrcImmByte
| ModRM
| Lock
,
333 DstMem
| SrcImmByte
| ModRM
| Lock
,
334 DstMem
| SrcImmByte
| ModRM
| Lock
,
335 DstMem
| SrcImmByte
| ModRM
,
337 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
339 ByteOp
| SrcImm
| DstMem
| ModRM
, 0,
340 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
343 DstMem
| SrcImm
| ModRM
, 0,
344 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
347 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
350 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
351 SrcMem
| ModRM
| Stack
, 0,
352 SrcMem
| ModRM
| Stack
, SrcMem
| ModRM
| Src2Mem16
| ImplicitOps
,
353 SrcMem
| ModRM
| Stack
, 0,
355 0, 0, ModRM
| SrcMem
| Priv
, ModRM
| SrcMem
| Priv
,
356 SrcNone
| ModRM
| DstMem
| Mov
, 0,
357 SrcMem16
| ModRM
| Mov
| Priv
, SrcMem
| ModRM
| ByteOp
| Priv
,
360 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
| Lock
,
361 DstMem
| SrcImmByte
| ModRM
| Lock
, DstMem
| SrcImmByte
| ModRM
| Lock
,
363 0, ImplicitOps
| ModRM
| Lock
, 0, 0, 0, 0, 0, 0,
366 static u32 group2_table
[] = {
368 SrcNone
| ModRM
| Priv
, 0, 0, SrcNone
| ModRM
| Priv
,
369 SrcNone
| ModRM
| DstMem
| Mov
, 0,
370 SrcMem16
| ModRM
| Mov
| Priv
, 0,
372 0, 0, 0, 0, 0, 0, 0, 0,
375 /* EFLAGS bit definitions. */
376 #define EFLG_ID (1<<21)
377 #define EFLG_VIP (1<<20)
378 #define EFLG_VIF (1<<19)
379 #define EFLG_AC (1<<18)
380 #define EFLG_VM (1<<17)
381 #define EFLG_RF (1<<16)
382 #define EFLG_IOPL (3<<12)
383 #define EFLG_NT (1<<14)
384 #define EFLG_OF (1<<11)
385 #define EFLG_DF (1<<10)
386 #define EFLG_IF (1<<9)
387 #define EFLG_TF (1<<8)
388 #define EFLG_SF (1<<7)
389 #define EFLG_ZF (1<<6)
390 #define EFLG_AF (1<<4)
391 #define EFLG_PF (1<<2)
392 #define EFLG_CF (1<<0)
395 * Instruction emulation:
396 * Most instructions are emulated directly via a fragment of inline assembly
397 * code. This allows us to save/restore EFLAGS and thus very easily pick up
398 * any modified flags.
401 #if defined(CONFIG_X86_64)
402 #define _LO32 "k" /* force 32-bit operand */
403 #define _STK "%%rsp" /* stack pointer */
404 #elif defined(__i386__)
405 #define _LO32 "" /* force 32-bit operand */
406 #define _STK "%%esp" /* stack pointer */
410 * These EFLAGS bits are restored from saved value during emulation, and
411 * any changes are written back to the saved value after emulation.
413 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
415 /* Before executing instruction: restore necessary bits in EFLAGS. */
416 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
417 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
418 "movl %"_sav",%"_LO32 _tmp"; " \
421 "movl %"_msk",%"_LO32 _tmp"; " \
422 "andl %"_LO32 _tmp",("_STK"); " \
424 "notl %"_LO32 _tmp"; " \
425 "andl %"_LO32 _tmp",("_STK"); " \
426 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
428 "orl %"_LO32 _tmp",("_STK"); " \
432 /* After executing instruction: write-back necessary bits in EFLAGS. */
433 #define _POST_EFLAGS(_sav, _msk, _tmp) \
434 /* _sav |= EFLAGS & _msk; */ \
437 "andl %"_msk",%"_LO32 _tmp"; " \
438 "orl %"_LO32 _tmp",%"_sav"; "
446 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
448 __asm__ __volatile__ ( \
449 _PRE_EFLAGS("0", "4", "2") \
450 _op _suffix " %"_x"3,%1; " \
451 _POST_EFLAGS("0", "4", "2") \
452 : "=m" (_eflags), "=m" ((_dst).val), \
454 : _y ((_src).val), "i" (EFLAGS_MASK)); \
458 /* Raw emulation: instruction has two explicit operands. */
459 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
461 unsigned long _tmp; \
463 switch ((_dst).bytes) { \
465 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
468 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
471 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
476 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
478 unsigned long _tmp; \
479 switch ((_dst).bytes) { \
481 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
484 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
485 _wx, _wy, _lx, _ly, _qx, _qy); \
490 /* Source operand is byte-sized and may be restricted to just %cl. */
491 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
492 __emulate_2op(_op, _src, _dst, _eflags, \
493 "b", "c", "b", "c", "b", "c", "b", "c")
495 /* Source operand is byte, word, long or quad sized. */
496 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
497 __emulate_2op(_op, _src, _dst, _eflags, \
498 "b", "q", "w", "r", _LO32, "r", "", "r")
500 /* Source operand is word, long or quad sized. */
501 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
502 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
503 "w", "r", _LO32, "r", "", "r")
505 /* Instruction has three operands and one operand is stored in ECX register */
506 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
508 unsigned long _tmp; \
509 _type _clv = (_cl).val; \
510 _type _srcv = (_src).val; \
511 _type _dstv = (_dst).val; \
513 __asm__ __volatile__ ( \
514 _PRE_EFLAGS("0", "5", "2") \
515 _op _suffix " %4,%1 \n" \
516 _POST_EFLAGS("0", "5", "2") \
517 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
518 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
521 (_cl).val = (unsigned long) _clv; \
522 (_src).val = (unsigned long) _srcv; \
523 (_dst).val = (unsigned long) _dstv; \
526 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
528 switch ((_dst).bytes) { \
530 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
531 "w", unsigned short); \
534 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
535 "l", unsigned int); \
538 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
539 "q", unsigned long)); \
544 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
546 unsigned long _tmp; \
548 __asm__ __volatile__ ( \
549 _PRE_EFLAGS("0", "3", "2") \
550 _op _suffix " %1; " \
551 _POST_EFLAGS("0", "3", "2") \
552 : "=m" (_eflags), "+m" ((_dst).val), \
554 : "i" (EFLAGS_MASK)); \
557 /* Instruction has only one explicit operand (no source operand). */
558 #define emulate_1op(_op, _dst, _eflags) \
560 switch ((_dst).bytes) { \
561 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
562 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
563 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
564 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
568 /* Fetch next part of the instruction being emulated. */
569 #define insn_fetch(_type, _size, _eip) \
570 ({ unsigned long _x; \
571 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
572 if (rc != X86EMUL_CONTINUE) \
578 static inline unsigned long ad_mask(struct decode_cache
*c
)
580 return (1UL << (c
->ad_bytes
<< 3)) - 1;
583 /* Access/update address held in a register, based on addressing mode. */
584 static inline unsigned long
585 address_mask(struct decode_cache
*c
, unsigned long reg
)
587 if (c
->ad_bytes
== sizeof(unsigned long))
590 return reg
& ad_mask(c
);
593 static inline unsigned long
594 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
596 return base
+ address_mask(c
, reg
);
600 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
602 if (c
->ad_bytes
== sizeof(unsigned long))
605 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
608 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
610 register_address_increment(c
, &c
->eip
, rel
);
613 static void set_seg_override(struct decode_cache
*c
, int seg
)
615 c
->has_seg_override
= true;
616 c
->seg_override
= seg
;
619 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
621 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
624 return kvm_x86_ops
->get_segment_base(ctxt
->vcpu
, seg
);
627 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
628 struct decode_cache
*c
)
630 if (!c
->has_seg_override
)
633 return seg_base(ctxt
, c
->seg_override
);
636 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
)
638 return seg_base(ctxt
, VCPU_SREG_ES
);
641 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
)
643 return seg_base(ctxt
, VCPU_SREG_SS
);
646 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
647 struct x86_emulate_ops
*ops
,
648 unsigned long linear
, u8
*dest
)
650 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
654 if (linear
< fc
->start
|| linear
>= fc
->end
) {
655 size
= min(15UL, PAGE_SIZE
- offset_in_page(linear
));
656 rc
= ops
->fetch(linear
, fc
->data
, size
, ctxt
->vcpu
, NULL
);
657 if (rc
!= X86EMUL_CONTINUE
)
660 fc
->end
= linear
+ size
;
662 *dest
= fc
->data
[linear
- fc
->start
];
663 return X86EMUL_CONTINUE
;
666 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
667 struct x86_emulate_ops
*ops
,
668 unsigned long eip
, void *dest
, unsigned size
)
672 /* x86 instructions are limited to 15 bytes. */
673 if (eip
+ size
- ctxt
->eip
> 15)
674 return X86EMUL_UNHANDLEABLE
;
675 eip
+= ctxt
->cs_base
;
677 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
678 if (rc
!= X86EMUL_CONTINUE
)
681 return X86EMUL_CONTINUE
;
685 * Given the 'reg' portion of a ModRM byte, and a register block, return a
686 * pointer into the block that addresses the relevant register.
687 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
689 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
694 p
= ®s
[modrm_reg
];
695 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
696 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
700 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
701 struct x86_emulate_ops
*ops
,
703 u16
*size
, unsigned long *address
, int op_bytes
)
710 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
712 if (rc
!= X86EMUL_CONTINUE
)
714 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
719 static int test_cc(unsigned int condition
, unsigned int flags
)
723 switch ((condition
& 15) >> 1) {
725 rc
|= (flags
& EFLG_OF
);
727 case 1: /* b/c/nae */
728 rc
|= (flags
& EFLG_CF
);
731 rc
|= (flags
& EFLG_ZF
);
734 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
737 rc
|= (flags
& EFLG_SF
);
740 rc
|= (flags
& EFLG_PF
);
743 rc
|= (flags
& EFLG_ZF
);
746 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
750 /* Odd condition identifiers (lsb == 1) have inverted sense. */
751 return (!!rc
^ (condition
& 1));
754 static void decode_register_operand(struct operand
*op
,
755 struct decode_cache
*c
,
758 unsigned reg
= c
->modrm_reg
;
759 int highbyte_regs
= c
->rex_prefix
== 0;
762 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
764 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
765 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
766 op
->val
= *(u8
*)op
->ptr
;
769 op
->ptr
= decode_register(reg
, c
->regs
, 0);
770 op
->bytes
= c
->op_bytes
;
773 op
->val
= *(u16
*)op
->ptr
;
776 op
->val
= *(u32
*)op
->ptr
;
779 op
->val
= *(u64
*) op
->ptr
;
783 op
->orig_val
= op
->val
;
786 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
787 struct x86_emulate_ops
*ops
)
789 struct decode_cache
*c
= &ctxt
->decode
;
791 int index_reg
= 0, base_reg
= 0, scale
;
792 int rc
= X86EMUL_CONTINUE
;
795 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
796 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
797 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
800 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
801 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
802 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
803 c
->modrm_rm
|= (c
->modrm
& 0x07);
807 if (c
->modrm_mod
== 3) {
808 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
809 c
->regs
, c
->d
& ByteOp
);
810 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
814 if (c
->ad_bytes
== 2) {
815 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
816 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
817 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
818 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
820 /* 16-bit ModR/M decode. */
821 switch (c
->modrm_mod
) {
823 if (c
->modrm_rm
== 6)
824 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
827 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
830 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
833 switch (c
->modrm_rm
) {
835 c
->modrm_ea
+= bx
+ si
;
838 c
->modrm_ea
+= bx
+ di
;
841 c
->modrm_ea
+= bp
+ si
;
844 c
->modrm_ea
+= bp
+ di
;
853 if (c
->modrm_mod
!= 0)
860 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
861 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
862 if (!c
->has_seg_override
)
863 set_seg_override(c
, VCPU_SREG_SS
);
864 c
->modrm_ea
= (u16
)c
->modrm_ea
;
866 /* 32/64-bit ModR/M decode. */
867 if ((c
->modrm_rm
& 7) == 4) {
868 sib
= insn_fetch(u8
, 1, c
->eip
);
869 index_reg
|= (sib
>> 3) & 7;
873 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
874 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
876 c
->modrm_ea
+= c
->regs
[base_reg
];
878 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
879 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
880 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
883 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
884 switch (c
->modrm_mod
) {
886 if (c
->modrm_rm
== 5)
887 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
890 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
893 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
901 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
902 struct x86_emulate_ops
*ops
)
904 struct decode_cache
*c
= &ctxt
->decode
;
905 int rc
= X86EMUL_CONTINUE
;
907 switch (c
->ad_bytes
) {
909 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
912 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
915 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
923 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
925 struct decode_cache
*c
= &ctxt
->decode
;
926 int rc
= X86EMUL_CONTINUE
;
927 int mode
= ctxt
->mode
;
928 int def_op_bytes
, def_ad_bytes
, group
;
931 /* we cannot decode insn before we complete previous rep insn */
932 WARN_ON(ctxt
->restart
);
934 /* Shadow copy of register state. Committed on successful emulation. */
935 memset(c
, 0, sizeof(struct decode_cache
));
937 ctxt
->cs_base
= seg_base(ctxt
, VCPU_SREG_CS
);
938 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
941 case X86EMUL_MODE_REAL
:
942 case X86EMUL_MODE_VM86
:
943 case X86EMUL_MODE_PROT16
:
944 def_op_bytes
= def_ad_bytes
= 2;
946 case X86EMUL_MODE_PROT32
:
947 def_op_bytes
= def_ad_bytes
= 4;
950 case X86EMUL_MODE_PROT64
:
959 c
->op_bytes
= def_op_bytes
;
960 c
->ad_bytes
= def_ad_bytes
;
962 /* Legacy prefixes. */
964 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
965 case 0x66: /* operand-size override */
966 /* switch between 2/4 bytes */
967 c
->op_bytes
= def_op_bytes
^ 6;
969 case 0x67: /* address-size override */
970 if (mode
== X86EMUL_MODE_PROT64
)
971 /* switch between 4/8 bytes */
972 c
->ad_bytes
= def_ad_bytes
^ 12;
974 /* switch between 2/4 bytes */
975 c
->ad_bytes
= def_ad_bytes
^ 6;
977 case 0x26: /* ES override */
978 case 0x2e: /* CS override */
979 case 0x36: /* SS override */
980 case 0x3e: /* DS override */
981 set_seg_override(c
, (c
->b
>> 3) & 3);
983 case 0x64: /* FS override */
984 case 0x65: /* GS override */
985 set_seg_override(c
, c
->b
& 7);
987 case 0x40 ... 0x4f: /* REX */
988 if (mode
!= X86EMUL_MODE_PROT64
)
990 c
->rex_prefix
= c
->b
;
992 case 0xf0: /* LOCK */
995 case 0xf2: /* REPNE/REPNZ */
996 c
->rep_prefix
= REPNE_PREFIX
;
998 case 0xf3: /* REP/REPE/REPZ */
999 c
->rep_prefix
= REPE_PREFIX
;
1005 /* Any legacy prefix after a REX prefix nullifies its effect. */
1014 if (c
->rex_prefix
& 8)
1015 c
->op_bytes
= 8; /* REX.W */
1017 /* Opcode byte(s). */
1018 c
->d
= opcode_table
[c
->b
];
1020 /* Two-byte opcode? */
1023 c
->b
= insn_fetch(u8
, 1, c
->eip
);
1024 c
->d
= twobyte_table
[c
->b
];
1029 group
= c
->d
& GroupMask
;
1030 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
1033 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
1034 if ((c
->d
& GroupDual
) && (c
->modrm
>> 6) == 3)
1035 c
->d
= group2_table
[group
];
1037 c
->d
= group_table
[group
];
1042 DPRINTF("Cannot emulate %02x\n", c
->b
);
1046 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1049 /* ModRM and SIB bytes. */
1051 rc
= decode_modrm(ctxt
, ops
);
1052 else if (c
->d
& MemAbs
)
1053 rc
= decode_abs(ctxt
, ops
);
1054 if (rc
!= X86EMUL_CONTINUE
)
1057 if (!c
->has_seg_override
)
1058 set_seg_override(c
, VCPU_SREG_DS
);
1060 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1061 c
->modrm_ea
+= seg_override_base(ctxt
, c
);
1063 if (c
->ad_bytes
!= 8)
1064 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1066 if (c
->rip_relative
)
1067 c
->modrm_ea
+= c
->eip
;
1070 * Decode and fetch the source operand: register, memory
1073 switch (c
->d
& SrcMask
) {
1077 decode_register_operand(&c
->src
, c
, 0);
1086 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1088 /* Don't fetch the address for invlpg: it could be unmapped. */
1089 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1093 * For instructions with a ModR/M byte, switch to register
1094 * access if Mod = 3.
1096 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1097 c
->src
.type
= OP_REG
;
1098 c
->src
.val
= c
->modrm_val
;
1099 c
->src
.ptr
= c
->modrm_ptr
;
1102 c
->src
.type
= OP_MEM
;
1103 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1108 c
->src
.type
= OP_IMM
;
1109 c
->src
.ptr
= (unsigned long *)c
->eip
;
1110 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1111 if (c
->src
.bytes
== 8)
1113 /* NB. Immediates are sign-extended as necessary. */
1114 switch (c
->src
.bytes
) {
1116 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1119 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1122 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1125 if ((c
->d
& SrcMask
) == SrcImmU
) {
1126 switch (c
->src
.bytes
) {
1131 c
->src
.val
&= 0xffff;
1134 c
->src
.val
&= 0xffffffff;
1141 c
->src
.type
= OP_IMM
;
1142 c
->src
.ptr
= (unsigned long *)c
->eip
;
1144 if ((c
->d
& SrcMask
) == SrcImmByte
)
1145 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1147 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1154 c
->src
.type
= OP_MEM
;
1155 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1156 c
->src
.ptr
= (unsigned long *)
1157 register_address(c
, seg_override_base(ctxt
, c
),
1158 c
->regs
[VCPU_REGS_RSI
]);
1164 * Decode and fetch the second source operand: register, memory
1167 switch (c
->d
& Src2Mask
) {
1172 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1175 c
->src2
.type
= OP_IMM
;
1176 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1178 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1181 c
->src2
.type
= OP_IMM
;
1182 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1184 c
->src2
.val
= insn_fetch(u16
, 2, c
->eip
);
1191 c
->src2
.type
= OP_MEM
;
1193 c
->src2
.ptr
= (unsigned long *)(c
->modrm_ea
+ c
->src
.bytes
);
1198 /* Decode and fetch the destination operand: register or memory. */
1199 switch (c
->d
& DstMask
) {
1201 /* Special instructions do their own operand decoding. */
1204 decode_register_operand(&c
->dst
, c
,
1205 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1208 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1209 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1210 c
->dst
.type
= OP_REG
;
1211 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1212 c
->dst
.ptr
= c
->modrm_ptr
;
1215 c
->dst
.type
= OP_MEM
;
1216 c
->dst
.ptr
= (unsigned long *)c
->modrm_ea
;
1217 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1220 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1222 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1223 (c
->src
.val
& mask
) / 8;
1227 c
->dst
.type
= OP_REG
;
1228 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1229 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1230 switch (c
->dst
.bytes
) {
1232 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1235 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1238 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1241 c
->dst
.val
= *(u64
*)c
->dst
.ptr
;
1244 c
->dst
.orig_val
= c
->dst
.val
;
1247 c
->dst
.type
= OP_MEM
;
1248 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1249 c
->dst
.ptr
= (unsigned long *)
1250 register_address(c
, es_base(ctxt
),
1251 c
->regs
[VCPU_REGS_RDI
]);
1257 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1260 static u32
desc_limit_scaled(struct desc_struct
*desc
)
1262 u32 limit
= get_desc_limit(desc
);
1264 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
1267 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1268 struct x86_emulate_ops
*ops
,
1269 u16 selector
, struct desc_ptr
*dt
)
1271 if (selector
& 1 << 2) {
1272 struct desc_struct desc
;
1273 memset (dt
, 0, sizeof *dt
);
1274 if (!ops
->get_cached_descriptor(&desc
, VCPU_SREG_LDTR
, ctxt
->vcpu
))
1277 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1278 dt
->address
= get_desc_base(&desc
);
1280 ops
->get_gdt(dt
, ctxt
->vcpu
);
1283 /* allowed just for 8 bytes segments */
1284 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1285 struct x86_emulate_ops
*ops
,
1286 u16 selector
, struct desc_struct
*desc
)
1289 u16 index
= selector
>> 3;
1294 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1296 if (dt
.size
< index
* 8 + 7) {
1297 kvm_inject_gp(ctxt
->vcpu
, selector
& 0xfffc);
1298 return X86EMUL_PROPAGATE_FAULT
;
1300 addr
= dt
.address
+ index
* 8;
1301 ret
= ops
->read_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1302 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1303 kvm_inject_page_fault(ctxt
->vcpu
, addr
, err
);
1308 /* allowed just for 8 bytes segments */
1309 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1310 struct x86_emulate_ops
*ops
,
1311 u16 selector
, struct desc_struct
*desc
)
1314 u16 index
= selector
>> 3;
1319 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1321 if (dt
.size
< index
* 8 + 7) {
1322 kvm_inject_gp(ctxt
->vcpu
, selector
& 0xfffc);
1323 return X86EMUL_PROPAGATE_FAULT
;
1326 addr
= dt
.address
+ index
* 8;
1327 ret
= ops
->write_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1328 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1329 kvm_inject_page_fault(ctxt
->vcpu
, addr
, err
);
1334 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1335 struct x86_emulate_ops
*ops
,
1336 u16 selector
, int seg
)
1338 struct desc_struct seg_desc
;
1340 unsigned err_vec
= GP_VECTOR
;
1342 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1345 memset(&seg_desc
, 0, sizeof seg_desc
);
1347 if ((seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
)
1348 || ctxt
->mode
== X86EMUL_MODE_REAL
) {
1349 /* set real mode segment descriptor */
1350 set_desc_base(&seg_desc
, selector
<< 4);
1351 set_desc_limit(&seg_desc
, 0xffff);
1358 /* NULL selector is not valid for TR, CS and SS */
1359 if ((seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_SS
|| seg
== VCPU_SREG_TR
)
1363 /* TR should be in GDT only */
1364 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1367 if (null_selector
) /* for NULL selector skip all following checks */
1370 ret
= read_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1371 if (ret
!= X86EMUL_CONTINUE
)
1374 err_code
= selector
& 0xfffc;
1375 err_vec
= GP_VECTOR
;
1377 /* can't load system descriptor into segment selecor */
1378 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
)
1382 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1388 cpl
= ops
->cpl(ctxt
->vcpu
);
1393 * segment is not a writable data segment or segment
1394 * selector's RPL != CPL or segment selector's RPL != CPL
1396 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1400 if (!(seg_desc
.type
& 8))
1403 if (seg_desc
.type
& 4) {
1409 if (rpl
> cpl
|| dpl
!= cpl
)
1412 /* CS(RPL) <- CPL */
1413 selector
= (selector
& 0xfffc) | cpl
;
1416 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1419 case VCPU_SREG_LDTR
:
1420 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1423 default: /* DS, ES, FS, or GS */
1425 * segment is not a data or readable code segment or
1426 * ((segment is a data or nonconforming code segment)
1427 * and (both RPL and CPL > DPL))
1429 if ((seg_desc
.type
& 0xa) == 0x8 ||
1430 (((seg_desc
.type
& 0xc) != 0xc) &&
1431 (rpl
> dpl
&& cpl
> dpl
)))
1437 /* mark segment as accessed */
1439 ret
= write_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1440 if (ret
!= X86EMUL_CONTINUE
)
1444 ops
->set_segment_selector(selector
, seg
, ctxt
->vcpu
);
1445 ops
->set_cached_descriptor(&seg_desc
, seg
, ctxt
->vcpu
);
1446 return X86EMUL_CONTINUE
;
1448 kvm_queue_exception_e(ctxt
->vcpu
, err_vec
, err_code
);
1449 return X86EMUL_PROPAGATE_FAULT
;
1452 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
)
1454 struct decode_cache
*c
= &ctxt
->decode
;
1456 c
->dst
.type
= OP_MEM
;
1457 c
->dst
.bytes
= c
->op_bytes
;
1458 c
->dst
.val
= c
->src
.val
;
1459 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1460 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
),
1461 c
->regs
[VCPU_REGS_RSP
]);
1464 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1465 struct x86_emulate_ops
*ops
,
1466 void *dest
, int len
)
1468 struct decode_cache
*c
= &ctxt
->decode
;
1471 rc
= ops
->read_emulated(register_address(c
, ss_base(ctxt
),
1472 c
->regs
[VCPU_REGS_RSP
]),
1473 dest
, len
, ctxt
->vcpu
);
1474 if (rc
!= X86EMUL_CONTINUE
)
1477 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1481 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1482 struct x86_emulate_ops
*ops
,
1483 void *dest
, int len
)
1486 unsigned long val
, change_mask
;
1487 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1488 int cpl
= ops
->cpl(ctxt
->vcpu
);
1490 rc
= emulate_pop(ctxt
, ops
, &val
, len
);
1491 if (rc
!= X86EMUL_CONTINUE
)
1494 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1495 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1497 switch(ctxt
->mode
) {
1498 case X86EMUL_MODE_PROT64
:
1499 case X86EMUL_MODE_PROT32
:
1500 case X86EMUL_MODE_PROT16
:
1502 change_mask
|= EFLG_IOPL
;
1504 change_mask
|= EFLG_IF
;
1506 case X86EMUL_MODE_VM86
:
1508 kvm_inject_gp(ctxt
->vcpu
, 0);
1509 return X86EMUL_PROPAGATE_FAULT
;
1511 change_mask
|= EFLG_IF
;
1513 default: /* real mode */
1514 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1518 *(unsigned long *)dest
=
1519 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1524 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
, int seg
)
1526 struct decode_cache
*c
= &ctxt
->decode
;
1527 struct kvm_segment segment
;
1529 kvm_x86_ops
->get_segment(ctxt
->vcpu
, &segment
, seg
);
1531 c
->src
.val
= segment
.selector
;
1535 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1536 struct x86_emulate_ops
*ops
, int seg
)
1538 struct decode_cache
*c
= &ctxt
->decode
;
1539 unsigned long selector
;
1542 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1543 if (rc
!= X86EMUL_CONTINUE
)
1546 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)selector
, seg
);
1550 static void emulate_pusha(struct x86_emulate_ctxt
*ctxt
)
1552 struct decode_cache
*c
= &ctxt
->decode
;
1553 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1554 int reg
= VCPU_REGS_RAX
;
1556 while (reg
<= VCPU_REGS_RDI
) {
1557 (reg
== VCPU_REGS_RSP
) ?
1558 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1565 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1566 struct x86_emulate_ops
*ops
)
1568 struct decode_cache
*c
= &ctxt
->decode
;
1569 int rc
= X86EMUL_CONTINUE
;
1570 int reg
= VCPU_REGS_RDI
;
1572 while (reg
>= VCPU_REGS_RAX
) {
1573 if (reg
== VCPU_REGS_RSP
) {
1574 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1579 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1580 if (rc
!= X86EMUL_CONTINUE
)
1587 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1588 struct x86_emulate_ops
*ops
)
1590 struct decode_cache
*c
= &ctxt
->decode
;
1592 return emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1595 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1597 struct decode_cache
*c
= &ctxt
->decode
;
1598 switch (c
->modrm_reg
) {
1600 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1603 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1606 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1609 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1611 case 4: /* sal/shl */
1612 case 6: /* sal/shl */
1613 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1616 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1619 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1624 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1625 struct x86_emulate_ops
*ops
)
1627 struct decode_cache
*c
= &ctxt
->decode
;
1629 switch (c
->modrm_reg
) {
1630 case 0 ... 1: /* test */
1631 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1634 c
->dst
.val
= ~c
->dst
.val
;
1637 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1645 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1646 struct x86_emulate_ops
*ops
)
1648 struct decode_cache
*c
= &ctxt
->decode
;
1650 switch (c
->modrm_reg
) {
1652 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1655 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1657 case 2: /* call near abs */ {
1660 c
->eip
= c
->src
.val
;
1661 c
->src
.val
= old_eip
;
1665 case 4: /* jmp abs */
1666 c
->eip
= c
->src
.val
;
1672 return X86EMUL_CONTINUE
;
1675 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1676 struct x86_emulate_ops
*ops
)
1678 struct decode_cache
*c
= &ctxt
->decode
;
1682 rc
= ops
->read_emulated(c
->modrm_ea
, &old
, 8, ctxt
->vcpu
);
1683 if (rc
!= X86EMUL_CONTINUE
)
1686 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1687 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1689 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1690 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1691 ctxt
->eflags
&= ~EFLG_ZF
;
1694 new = ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1695 (u32
) c
->regs
[VCPU_REGS_RBX
];
1697 rc
= ops
->cmpxchg_emulated(c
->modrm_ea
, &old
, &new, 8, ctxt
->vcpu
);
1698 if (rc
!= X86EMUL_CONTINUE
)
1700 ctxt
->eflags
|= EFLG_ZF
;
1702 return X86EMUL_CONTINUE
;
1705 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1706 struct x86_emulate_ops
*ops
)
1708 struct decode_cache
*c
= &ctxt
->decode
;
1712 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1713 if (rc
!= X86EMUL_CONTINUE
)
1715 if (c
->op_bytes
== 4)
1716 c
->eip
= (u32
)c
->eip
;
1717 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1718 if (rc
!= X86EMUL_CONTINUE
)
1720 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)cs
, VCPU_SREG_CS
);
1724 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1725 struct x86_emulate_ops
*ops
)
1728 struct decode_cache
*c
= &ctxt
->decode
;
1730 switch (c
->dst
.type
) {
1732 /* The 4-byte case *is* correct:
1733 * in 64-bit mode we zero-extend.
1735 switch (c
->dst
.bytes
) {
1737 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1740 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1743 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1744 break; /* 64b: zero-ext */
1746 *c
->dst
.ptr
= c
->dst
.val
;
1752 rc
= ops
->cmpxchg_emulated(
1753 (unsigned long)c
->dst
.ptr
,
1759 rc
= ops
->write_emulated(
1760 (unsigned long)c
->dst
.ptr
,
1764 if (rc
!= X86EMUL_CONTINUE
)
1773 return X86EMUL_CONTINUE
;
1776 static void toggle_interruptibility(struct x86_emulate_ctxt
*ctxt
, u32 mask
)
1778 u32 int_shadow
= kvm_x86_ops
->get_interrupt_shadow(ctxt
->vcpu
, mask
);
1780 * an sti; sti; sequence only disable interrupts for the first
1781 * instruction. So, if the last instruction, be it emulated or
1782 * not, left the system with the INT_STI flag enabled, it
1783 * means that the last instruction is an sti. We should not
1784 * leave the flag on in this case. The same goes for mov ss
1786 if (!(int_shadow
& mask
))
1787 ctxt
->interruptibility
= mask
;
1791 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1792 struct kvm_segment
*cs
, struct kvm_segment
*ss
)
1794 memset(cs
, 0, sizeof(struct kvm_segment
));
1795 kvm_x86_ops
->get_segment(ctxt
->vcpu
, cs
, VCPU_SREG_CS
);
1796 memset(ss
, 0, sizeof(struct kvm_segment
));
1798 cs
->l
= 0; /* will be adjusted later */
1799 cs
->base
= 0; /* flat segment */
1800 cs
->g
= 1; /* 4kb granularity */
1801 cs
->limit
= 0xffffffff; /* 4GB limit */
1802 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1804 cs
->dpl
= 0; /* will be adjusted later */
1809 ss
->base
= 0; /* flat segment */
1810 ss
->limit
= 0xffffffff; /* 4GB limit */
1811 ss
->g
= 1; /* 4kb granularity */
1813 ss
->type
= 0x03; /* Read/Write, Accessed */
1814 ss
->db
= 1; /* 32bit stack segment */
1820 emulate_syscall(struct x86_emulate_ctxt
*ctxt
)
1822 struct decode_cache
*c
= &ctxt
->decode
;
1823 struct kvm_segment cs
, ss
;
1826 /* syscall is not available in real mode */
1827 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1828 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1829 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1830 return X86EMUL_PROPAGATE_FAULT
;
1833 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1835 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1837 cs
.selector
= (u16
)(msr_data
& 0xfffc);
1838 ss
.selector
= (u16
)(msr_data
+ 8);
1840 if (is_long_mode(ctxt
->vcpu
)) {
1844 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1845 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1847 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1848 if (is_long_mode(ctxt
->vcpu
)) {
1849 #ifdef CONFIG_X86_64
1850 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1852 kvm_x86_ops
->get_msr(ctxt
->vcpu
,
1853 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1854 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1857 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1858 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1862 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1863 c
->eip
= (u32
)msr_data
;
1865 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1868 return X86EMUL_CONTINUE
;
1872 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
)
1874 struct decode_cache
*c
= &ctxt
->decode
;
1875 struct kvm_segment cs
, ss
;
1878 /* inject #GP if in real mode */
1879 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1880 kvm_inject_gp(ctxt
->vcpu
, 0);
1881 return X86EMUL_PROPAGATE_FAULT
;
1884 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1885 * Therefore, we inject an #UD.
1887 if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1888 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1889 return X86EMUL_PROPAGATE_FAULT
;
1892 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1894 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1895 switch (ctxt
->mode
) {
1896 case X86EMUL_MODE_PROT32
:
1897 if ((msr_data
& 0xfffc) == 0x0) {
1898 kvm_inject_gp(ctxt
->vcpu
, 0);
1899 return X86EMUL_PROPAGATE_FAULT
;
1902 case X86EMUL_MODE_PROT64
:
1903 if (msr_data
== 0x0) {
1904 kvm_inject_gp(ctxt
->vcpu
, 0);
1905 return X86EMUL_PROPAGATE_FAULT
;
1910 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1911 cs
.selector
= (u16
)msr_data
;
1912 cs
.selector
&= ~SELECTOR_RPL_MASK
;
1913 ss
.selector
= cs
.selector
+ 8;
1914 ss
.selector
&= ~SELECTOR_RPL_MASK
;
1915 if (ctxt
->mode
== X86EMUL_MODE_PROT64
1916 || is_long_mode(ctxt
->vcpu
)) {
1921 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1922 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1924 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
1927 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
1928 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
1930 return X86EMUL_CONTINUE
;
1934 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
)
1936 struct decode_cache
*c
= &ctxt
->decode
;
1937 struct kvm_segment cs
, ss
;
1941 /* inject #GP if in real mode or Virtual 8086 mode */
1942 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1943 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1944 kvm_inject_gp(ctxt
->vcpu
, 0);
1945 return X86EMUL_PROPAGATE_FAULT
;
1948 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1950 if ((c
->rex_prefix
& 0x8) != 0x0)
1951 usermode
= X86EMUL_MODE_PROT64
;
1953 usermode
= X86EMUL_MODE_PROT32
;
1957 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1959 case X86EMUL_MODE_PROT32
:
1960 cs
.selector
= (u16
)(msr_data
+ 16);
1961 if ((msr_data
& 0xfffc) == 0x0) {
1962 kvm_inject_gp(ctxt
->vcpu
, 0);
1963 return X86EMUL_PROPAGATE_FAULT
;
1965 ss
.selector
= (u16
)(msr_data
+ 24);
1967 case X86EMUL_MODE_PROT64
:
1968 cs
.selector
= (u16
)(msr_data
+ 32);
1969 if (msr_data
== 0x0) {
1970 kvm_inject_gp(ctxt
->vcpu
, 0);
1971 return X86EMUL_PROPAGATE_FAULT
;
1973 ss
.selector
= cs
.selector
+ 8;
1978 cs
.selector
|= SELECTOR_RPL_MASK
;
1979 ss
.selector
|= SELECTOR_RPL_MASK
;
1981 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1982 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1984 c
->eip
= ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RDX
];
1985 c
->regs
[VCPU_REGS_RSP
] = ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RCX
];
1987 return X86EMUL_CONTINUE
;
1990 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
,
1991 struct x86_emulate_ops
*ops
)
1994 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
1996 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
1998 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1999 return ops
->cpl(ctxt
->vcpu
) > iopl
;
2002 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2003 struct x86_emulate_ops
*ops
,
2006 struct kvm_segment tr_seg
;
2009 u8 perm
, bit_idx
= port
& 0x7;
2010 unsigned mask
= (1 << len
) - 1;
2012 kvm_get_segment(ctxt
->vcpu
, &tr_seg
, VCPU_SREG_TR
);
2013 if (tr_seg
.unusable
)
2015 if (tr_seg
.limit
< 103)
2017 r
= ops
->read_std(tr_seg
.base
+ 102, &io_bitmap_ptr
, 2, ctxt
->vcpu
,
2019 if (r
!= X86EMUL_CONTINUE
)
2021 if (io_bitmap_ptr
+ port
/8 > tr_seg
.limit
)
2023 r
= ops
->read_std(tr_seg
.base
+ io_bitmap_ptr
+ port
/8, &perm
, 1,
2025 if (r
!= X86EMUL_CONTINUE
)
2027 if ((perm
>> bit_idx
) & mask
)
2032 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2033 struct x86_emulate_ops
*ops
,
2036 if (emulator_bad_iopl(ctxt
, ops
))
2037 if (!emulator_io_port_access_allowed(ctxt
, ops
, port
, len
))
2042 static u32
get_cached_descriptor_base(struct x86_emulate_ctxt
*ctxt
,
2043 struct x86_emulate_ops
*ops
,
2046 struct desc_struct desc
;
2047 if (ops
->get_cached_descriptor(&desc
, seg
, ctxt
->vcpu
))
2048 return get_desc_base(&desc
);
2053 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2054 struct x86_emulate_ops
*ops
,
2055 struct tss_segment_16
*tss
)
2057 struct decode_cache
*c
= &ctxt
->decode
;
2060 tss
->flag
= ctxt
->eflags
;
2061 tss
->ax
= c
->regs
[VCPU_REGS_RAX
];
2062 tss
->cx
= c
->regs
[VCPU_REGS_RCX
];
2063 tss
->dx
= c
->regs
[VCPU_REGS_RDX
];
2064 tss
->bx
= c
->regs
[VCPU_REGS_RBX
];
2065 tss
->sp
= c
->regs
[VCPU_REGS_RSP
];
2066 tss
->bp
= c
->regs
[VCPU_REGS_RBP
];
2067 tss
->si
= c
->regs
[VCPU_REGS_RSI
];
2068 tss
->di
= c
->regs
[VCPU_REGS_RDI
];
2070 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2071 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2072 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2073 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2074 tss
->ldt
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2077 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2078 struct x86_emulate_ops
*ops
,
2079 struct tss_segment_16
*tss
)
2081 struct decode_cache
*c
= &ctxt
->decode
;
2085 ctxt
->eflags
= tss
->flag
| 2;
2086 c
->regs
[VCPU_REGS_RAX
] = tss
->ax
;
2087 c
->regs
[VCPU_REGS_RCX
] = tss
->cx
;
2088 c
->regs
[VCPU_REGS_RDX
] = tss
->dx
;
2089 c
->regs
[VCPU_REGS_RBX
] = tss
->bx
;
2090 c
->regs
[VCPU_REGS_RSP
] = tss
->sp
;
2091 c
->regs
[VCPU_REGS_RBP
] = tss
->bp
;
2092 c
->regs
[VCPU_REGS_RSI
] = tss
->si
;
2093 c
->regs
[VCPU_REGS_RDI
] = tss
->di
;
2096 * SDM says that segment selectors are loaded before segment
2099 ops
->set_segment_selector(tss
->ldt
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2100 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2101 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2102 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2103 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2106 * Now load segment descriptors. If fault happenes at this stage
2107 * it is handled in a context of new task
2109 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt
, VCPU_SREG_LDTR
);
2110 if (ret
!= X86EMUL_CONTINUE
)
2112 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2113 if (ret
!= X86EMUL_CONTINUE
)
2115 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2116 if (ret
!= X86EMUL_CONTINUE
)
2118 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2119 if (ret
!= X86EMUL_CONTINUE
)
2121 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2122 if (ret
!= X86EMUL_CONTINUE
)
2125 return X86EMUL_CONTINUE
;
2128 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2129 struct x86_emulate_ops
*ops
,
2130 u16 tss_selector
, u16 old_tss_sel
,
2131 ulong old_tss_base
, struct desc_struct
*new_desc
)
2133 struct tss_segment_16 tss_seg
;
2135 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2137 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2139 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2140 /* FIXME: need to provide precise fault address */
2141 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2145 save_state_to_tss16(ctxt
, ops
, &tss_seg
);
2147 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2149 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2150 /* FIXME: need to provide precise fault address */
2151 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2155 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2157 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2158 /* FIXME: need to provide precise fault address */
2159 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2163 if (old_tss_sel
!= 0xffff) {
2164 tss_seg
.prev_task_link
= old_tss_sel
;
2166 ret
= ops
->write_std(new_tss_base
,
2167 &tss_seg
.prev_task_link
,
2168 sizeof tss_seg
.prev_task_link
,
2170 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2171 /* FIXME: need to provide precise fault address */
2172 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2177 return load_state_from_tss16(ctxt
, ops
, &tss_seg
);
2180 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
2181 struct x86_emulate_ops
*ops
,
2182 struct tss_segment_32
*tss
)
2184 struct decode_cache
*c
= &ctxt
->decode
;
2186 tss
->cr3
= ops
->get_cr(3, ctxt
->vcpu
);
2188 tss
->eflags
= ctxt
->eflags
;
2189 tss
->eax
= c
->regs
[VCPU_REGS_RAX
];
2190 tss
->ecx
= c
->regs
[VCPU_REGS_RCX
];
2191 tss
->edx
= c
->regs
[VCPU_REGS_RDX
];
2192 tss
->ebx
= c
->regs
[VCPU_REGS_RBX
];
2193 tss
->esp
= c
->regs
[VCPU_REGS_RSP
];
2194 tss
->ebp
= c
->regs
[VCPU_REGS_RBP
];
2195 tss
->esi
= c
->regs
[VCPU_REGS_RSI
];
2196 tss
->edi
= c
->regs
[VCPU_REGS_RDI
];
2198 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2199 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2200 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2201 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2202 tss
->fs
= ops
->get_segment_selector(VCPU_SREG_FS
, ctxt
->vcpu
);
2203 tss
->gs
= ops
->get_segment_selector(VCPU_SREG_GS
, ctxt
->vcpu
);
2204 tss
->ldt_selector
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2207 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
2208 struct x86_emulate_ops
*ops
,
2209 struct tss_segment_32
*tss
)
2211 struct decode_cache
*c
= &ctxt
->decode
;
2214 ops
->set_cr(3, tss
->cr3
, ctxt
->vcpu
);
2216 ctxt
->eflags
= tss
->eflags
| 2;
2217 c
->regs
[VCPU_REGS_RAX
] = tss
->eax
;
2218 c
->regs
[VCPU_REGS_RCX
] = tss
->ecx
;
2219 c
->regs
[VCPU_REGS_RDX
] = tss
->edx
;
2220 c
->regs
[VCPU_REGS_RBX
] = tss
->ebx
;
2221 c
->regs
[VCPU_REGS_RSP
] = tss
->esp
;
2222 c
->regs
[VCPU_REGS_RBP
] = tss
->ebp
;
2223 c
->regs
[VCPU_REGS_RSI
] = tss
->esi
;
2224 c
->regs
[VCPU_REGS_RDI
] = tss
->edi
;
2227 * SDM says that segment selectors are loaded before segment
2230 ops
->set_segment_selector(tss
->ldt_selector
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2231 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2232 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2233 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2234 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2235 ops
->set_segment_selector(tss
->fs
, VCPU_SREG_FS
, ctxt
->vcpu
);
2236 ops
->set_segment_selector(tss
->gs
, VCPU_SREG_GS
, ctxt
->vcpu
);
2239 * Now load segment descriptors. If fault happenes at this stage
2240 * it is handled in a context of new task
2242 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
2243 if (ret
!= X86EMUL_CONTINUE
)
2245 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2246 if (ret
!= X86EMUL_CONTINUE
)
2248 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2249 if (ret
!= X86EMUL_CONTINUE
)
2251 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2252 if (ret
!= X86EMUL_CONTINUE
)
2254 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2255 if (ret
!= X86EMUL_CONTINUE
)
2257 ret
= load_segment_descriptor(ctxt
, ops
, tss
->fs
, VCPU_SREG_FS
);
2258 if (ret
!= X86EMUL_CONTINUE
)
2260 ret
= load_segment_descriptor(ctxt
, ops
, tss
->gs
, VCPU_SREG_GS
);
2261 if (ret
!= X86EMUL_CONTINUE
)
2264 return X86EMUL_CONTINUE
;
2267 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
2268 struct x86_emulate_ops
*ops
,
2269 u16 tss_selector
, u16 old_tss_sel
,
2270 ulong old_tss_base
, struct desc_struct
*new_desc
)
2272 struct tss_segment_32 tss_seg
;
2274 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2276 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2278 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2279 /* FIXME: need to provide precise fault address */
2280 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2284 save_state_to_tss32(ctxt
, ops
, &tss_seg
);
2286 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2288 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2289 /* FIXME: need to provide precise fault address */
2290 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2294 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2296 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2297 /* FIXME: need to provide precise fault address */
2298 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2302 if (old_tss_sel
!= 0xffff) {
2303 tss_seg
.prev_task_link
= old_tss_sel
;
2305 ret
= ops
->write_std(new_tss_base
,
2306 &tss_seg
.prev_task_link
,
2307 sizeof tss_seg
.prev_task_link
,
2309 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2310 /* FIXME: need to provide precise fault address */
2311 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2316 return load_state_from_tss32(ctxt
, ops
, &tss_seg
);
2319 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
2320 struct x86_emulate_ops
*ops
,
2321 u16 tss_selector
, int reason
)
2323 struct desc_struct curr_tss_desc
, next_tss_desc
;
2325 u16 old_tss_sel
= ops
->get_segment_selector(VCPU_SREG_TR
, ctxt
->vcpu
);
2326 ulong old_tss_base
=
2327 get_cached_descriptor_base(ctxt
, ops
, VCPU_SREG_TR
);
2330 /* FIXME: old_tss_base == ~0 ? */
2332 ret
= read_segment_descriptor(ctxt
, ops
, tss_selector
, &next_tss_desc
);
2333 if (ret
!= X86EMUL_CONTINUE
)
2335 ret
= read_segment_descriptor(ctxt
, ops
, old_tss_sel
, &curr_tss_desc
);
2336 if (ret
!= X86EMUL_CONTINUE
)
2339 /* FIXME: check that next_tss_desc is tss */
2341 if (reason
!= TASK_SWITCH_IRET
) {
2342 if ((tss_selector
& 3) > next_tss_desc
.dpl
||
2343 ops
->cpl(ctxt
->vcpu
) > next_tss_desc
.dpl
) {
2344 kvm_inject_gp(ctxt
->vcpu
, 0);
2345 return X86EMUL_PROPAGATE_FAULT
;
2349 desc_limit
= desc_limit_scaled(&next_tss_desc
);
2350 if (!next_tss_desc
.p
||
2351 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
2352 desc_limit
< 0x2b)) {
2353 kvm_queue_exception_e(ctxt
->vcpu
, TS_VECTOR
,
2354 tss_selector
& 0xfffc);
2355 return X86EMUL_PROPAGATE_FAULT
;
2358 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
2359 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
2360 write_segment_descriptor(ctxt
, ops
, old_tss_sel
,
2364 if (reason
== TASK_SWITCH_IRET
)
2365 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
2367 /* set back link to prev task only if NT bit is set in eflags
2368 note that old_tss_sel is not used afetr this point */
2369 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
2370 old_tss_sel
= 0xffff;
2372 if (next_tss_desc
.type
& 8)
2373 ret
= task_switch_32(ctxt
, ops
, tss_selector
, old_tss_sel
,
2374 old_tss_base
, &next_tss_desc
);
2376 ret
= task_switch_16(ctxt
, ops
, tss_selector
, old_tss_sel
,
2377 old_tss_base
, &next_tss_desc
);
2379 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
2380 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
2382 if (reason
!= TASK_SWITCH_IRET
) {
2383 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
2384 write_segment_descriptor(ctxt
, ops
, tss_selector
,
2388 ops
->set_cr(0, ops
->get_cr(0, ctxt
->vcpu
) | X86_CR0_TS
, ctxt
->vcpu
);
2389 ops
->set_cached_descriptor(&next_tss_desc
, VCPU_SREG_TR
, ctxt
->vcpu
);
2390 ops
->set_segment_selector(tss_selector
, VCPU_SREG_TR
, ctxt
->vcpu
);
2395 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
2396 struct x86_emulate_ops
*ops
,
2397 u16 tss_selector
, int reason
)
2399 struct decode_cache
*c
= &ctxt
->decode
;
2402 memset(c
, 0, sizeof(struct decode_cache
));
2404 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
2406 rc
= emulator_do_task_switch(ctxt
, ops
, tss_selector
, reason
);
2408 if (rc
== X86EMUL_CONTINUE
) {
2409 memcpy(ctxt
->vcpu
->arch
.regs
, c
->regs
, sizeof c
->regs
);
2410 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
2416 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, unsigned long base
,
2417 int reg
, struct operand
*op
)
2419 struct decode_cache
*c
= &ctxt
->decode
;
2420 int df
= (ctxt
->eflags
& EFLG_DF
) ? -1 : 1;
2422 register_address_increment(c
, &c
->regs
[reg
], df
* op
->bytes
);
2423 op
->ptr
= (unsigned long *)register_address(c
, base
, c
->regs
[reg
]);
2427 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2430 struct decode_cache
*c
= &ctxt
->decode
;
2431 int rc
= X86EMUL_CONTINUE
;
2432 int saved_dst_type
= c
->dst
.type
;
2434 ctxt
->interruptibility
= 0;
2436 /* Shadow copy of register state. Committed on successful emulation.
2437 * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
2441 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
2443 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
2444 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2448 /* LOCK prefix is allowed only with some instructions */
2449 if (c
->lock_prefix
&& (!(c
->d
& Lock
) || c
->dst
.type
!= OP_MEM
)) {
2450 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2454 /* Privileged instruction can be executed only in CPL=0 */
2455 if ((c
->d
& Priv
) && ops
->cpl(ctxt
->vcpu
)) {
2456 kvm_inject_gp(ctxt
->vcpu
, 0);
2460 if (c
->rep_prefix
&& (c
->d
& String
)) {
2461 ctxt
->restart
= true;
2462 /* All REP prefixes have the same first termination condition */
2463 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0) {
2465 ctxt
->restart
= false;
2466 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
2469 /* The second termination condition only applies for REPE
2470 * and REPNE. Test if the repeat string operation prefix is
2471 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2472 * corresponding termination condition according to:
2473 * - if REPE/REPZ and ZF = 0 then done
2474 * - if REPNE/REPNZ and ZF = 1 then done
2476 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
2477 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
2478 if ((c
->rep_prefix
== REPE_PREFIX
) &&
2479 ((ctxt
->eflags
& EFLG_ZF
) == 0))
2481 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
2482 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
))
2488 if (c
->src
.type
== OP_MEM
) {
2489 rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
2493 if (rc
!= X86EMUL_CONTINUE
)
2495 c
->src
.orig_val
= c
->src
.val
;
2498 if (c
->src2
.type
== OP_MEM
) {
2499 rc
= ops
->read_emulated((unsigned long)c
->src2
.ptr
,
2503 if (rc
!= X86EMUL_CONTINUE
)
2507 if ((c
->d
& DstMask
) == ImplicitOps
)
2511 if ((c
->dst
.type
== OP_MEM
) && !(c
->d
& Mov
)) {
2512 /* optimisation - avoid slow emulated read if Mov */
2513 rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
, &c
->dst
.val
,
2514 c
->dst
.bytes
, ctxt
->vcpu
);
2515 if (rc
!= X86EMUL_CONTINUE
)
2518 c
->dst
.orig_val
= c
->dst
.val
;
2528 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
2530 case 0x06: /* push es */
2531 emulate_push_sreg(ctxt
, VCPU_SREG_ES
);
2533 case 0x07: /* pop es */
2534 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2535 if (rc
!= X86EMUL_CONTINUE
)
2540 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
2542 case 0x0e: /* push cs */
2543 emulate_push_sreg(ctxt
, VCPU_SREG_CS
);
2547 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
2549 case 0x16: /* push ss */
2550 emulate_push_sreg(ctxt
, VCPU_SREG_SS
);
2552 case 0x17: /* pop ss */
2553 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2554 if (rc
!= X86EMUL_CONTINUE
)
2559 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
2561 case 0x1e: /* push ds */
2562 emulate_push_sreg(ctxt
, VCPU_SREG_DS
);
2564 case 0x1f: /* pop ds */
2565 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2566 if (rc
!= X86EMUL_CONTINUE
)
2571 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
2575 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
2579 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
2583 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2585 case 0x40 ... 0x47: /* inc r16/r32 */
2586 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
2588 case 0x48 ... 0x4f: /* dec r16/r32 */
2589 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
2591 case 0x50 ... 0x57: /* push reg */
2594 case 0x58 ... 0x5f: /* pop reg */
2596 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2597 if (rc
!= X86EMUL_CONTINUE
)
2600 case 0x60: /* pusha */
2601 emulate_pusha(ctxt
);
2603 case 0x61: /* popa */
2604 rc
= emulate_popa(ctxt
, ops
);
2605 if (rc
!= X86EMUL_CONTINUE
)
2608 case 0x63: /* movsxd */
2609 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
2610 goto cannot_emulate
;
2611 c
->dst
.val
= (s32
) c
->src
.val
;
2613 case 0x68: /* push imm */
2614 case 0x6a: /* push imm8 */
2617 case 0x6c: /* insb */
2618 case 0x6d: /* insw/insd */
2619 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2620 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2622 kvm_inject_gp(ctxt
->vcpu
, 0);
2625 if (!ops
->pio_in_emulated(c
->dst
.bytes
, c
->regs
[VCPU_REGS_RDX
],
2626 &c
->dst
.val
, 1, ctxt
->vcpu
))
2627 goto done
; /* IO is needed, skip writeback */
2629 case 0x6e: /* outsb */
2630 case 0x6f: /* outsw/outsd */
2631 c
->src
.bytes
= min(c
->src
.bytes
, 4u);
2632 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2634 kvm_inject_gp(ctxt
->vcpu
, 0);
2637 ops
->pio_out_emulated(c
->src
.bytes
, c
->regs
[VCPU_REGS_RDX
],
2638 &c
->src
.val
, 1, ctxt
->vcpu
);
2640 c
->dst
.type
= OP_NONE
; /* nothing to writeback */
2642 case 0x70 ... 0x7f: /* jcc (short) */
2643 if (test_cc(c
->b
, ctxt
->eflags
))
2644 jmp_rel(c
, c
->src
.val
);
2646 case 0x80 ... 0x83: /* Grp1 */
2647 switch (c
->modrm_reg
) {
2667 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
2669 case 0x86 ... 0x87: /* xchg */
2671 /* Write back the register source. */
2672 switch (c
->dst
.bytes
) {
2674 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
2677 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
2680 *c
->src
.ptr
= (u32
) c
->dst
.val
;
2681 break; /* 64b reg: zero-extend */
2683 *c
->src
.ptr
= c
->dst
.val
;
2687 * Write back the memory destination with implicit LOCK
2690 c
->dst
.val
= c
->src
.val
;
2693 case 0x88 ... 0x8b: /* mov */
2695 case 0x8c: { /* mov r/m, sreg */
2696 struct kvm_segment segreg
;
2698 if (c
->modrm_reg
<= VCPU_SREG_GS
)
2699 kvm_get_segment(ctxt
->vcpu
, &segreg
, c
->modrm_reg
);
2701 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2704 c
->dst
.val
= segreg
.selector
;
2707 case 0x8d: /* lea r16/r32, m */
2708 c
->dst
.val
= c
->modrm_ea
;
2710 case 0x8e: { /* mov seg, r/m16 */
2715 if (c
->modrm_reg
== VCPU_SREG_CS
||
2716 c
->modrm_reg
> VCPU_SREG_GS
) {
2717 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2721 if (c
->modrm_reg
== VCPU_SREG_SS
)
2722 toggle_interruptibility(ctxt
, KVM_X86_SHADOW_INT_MOV_SS
);
2724 rc
= load_segment_descriptor(ctxt
, ops
, sel
, c
->modrm_reg
);
2726 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2729 case 0x8f: /* pop (sole member of Grp1a) */
2730 rc
= emulate_grp1a(ctxt
, ops
);
2731 if (rc
!= X86EMUL_CONTINUE
)
2734 case 0x90: /* nop / xchg r8,rax */
2735 if (!(c
->rex_prefix
& 1)) { /* nop */
2736 c
->dst
.type
= OP_NONE
;
2739 case 0x91 ... 0x97: /* xchg reg,rax */
2740 c
->src
.type
= c
->dst
.type
= OP_REG
;
2741 c
->src
.bytes
= c
->dst
.bytes
= c
->op_bytes
;
2742 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
2743 c
->src
.val
= *(c
->src
.ptr
);
2745 case 0x9c: /* pushf */
2746 c
->src
.val
= (unsigned long) ctxt
->eflags
;
2749 case 0x9d: /* popf */
2750 c
->dst
.type
= OP_REG
;
2751 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
2752 c
->dst
.bytes
= c
->op_bytes
;
2753 rc
= emulate_popf(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2754 if (rc
!= X86EMUL_CONTINUE
)
2757 case 0xa0 ... 0xa1: /* mov */
2758 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2759 c
->dst
.val
= c
->src
.val
;
2761 case 0xa2 ... 0xa3: /* mov */
2762 c
->dst
.val
= (unsigned long)c
->regs
[VCPU_REGS_RAX
];
2764 case 0xa4 ... 0xa5: /* movs */
2766 case 0xa6 ... 0xa7: /* cmps */
2767 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2768 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
2770 case 0xaa ... 0xab: /* stos */
2771 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2773 case 0xac ... 0xad: /* lods */
2775 case 0xae ... 0xaf: /* scas */
2776 DPRINTF("Urk! I don't handle SCAS.\n");
2777 goto cannot_emulate
;
2778 case 0xb0 ... 0xbf: /* mov r, imm */
2783 case 0xc3: /* ret */
2784 c
->dst
.type
= OP_REG
;
2785 c
->dst
.ptr
= &c
->eip
;
2786 c
->dst
.bytes
= c
->op_bytes
;
2787 goto pop_instruction
;
2788 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2790 c
->dst
.val
= c
->src
.val
;
2792 case 0xcb: /* ret far */
2793 rc
= emulate_ret_far(ctxt
, ops
);
2794 if (rc
!= X86EMUL_CONTINUE
)
2797 case 0xd0 ... 0xd1: /* Grp2 */
2801 case 0xd2 ... 0xd3: /* Grp2 */
2802 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2805 case 0xe4: /* inb */
2808 case 0xe6: /* outb */
2809 case 0xe7: /* out */
2811 case 0xe8: /* call (near) */ {
2812 long int rel
= c
->src
.val
;
2813 c
->src
.val
= (unsigned long) c
->eip
;
2818 case 0xe9: /* jmp rel */
2820 case 0xea: /* jmp far */
2822 if (load_segment_descriptor(ctxt
, ops
, c
->src2
.val
,
2826 c
->eip
= c
->src
.val
;
2829 jmp
: /* jmp rel short */
2830 jmp_rel(c
, c
->src
.val
);
2831 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2833 case 0xec: /* in al,dx */
2834 case 0xed: /* in (e/r)ax,dx */
2835 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2837 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2838 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2839 kvm_inject_gp(ctxt
->vcpu
, 0);
2842 if (!ops
->pio_in_emulated(c
->dst
.bytes
, c
->src
.val
,
2843 &c
->dst
.val
, 1, ctxt
->vcpu
))
2844 goto done
; /* IO is needed */
2846 case 0xee: /* out al,dx */
2847 case 0xef: /* out (e/r)ax,dx */
2848 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2850 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2851 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2852 kvm_inject_gp(ctxt
->vcpu
, 0);
2855 ops
->pio_out_emulated(c
->dst
.bytes
, c
->src
.val
, &c
->dst
.val
, 1,
2857 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2859 case 0xf4: /* hlt */
2860 ctxt
->vcpu
->arch
.halt_request
= 1;
2862 case 0xf5: /* cmc */
2863 /* complement carry flag from eflags reg */
2864 ctxt
->eflags
^= EFLG_CF
;
2865 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2867 case 0xf6 ... 0xf7: /* Grp3 */
2868 if (!emulate_grp3(ctxt
, ops
))
2869 goto cannot_emulate
;
2871 case 0xf8: /* clc */
2872 ctxt
->eflags
&= ~EFLG_CF
;
2873 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2875 case 0xfa: /* cli */
2876 if (emulator_bad_iopl(ctxt
, ops
))
2877 kvm_inject_gp(ctxt
->vcpu
, 0);
2879 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2880 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2883 case 0xfb: /* sti */
2884 if (emulator_bad_iopl(ctxt
, ops
))
2885 kvm_inject_gp(ctxt
->vcpu
, 0);
2887 toggle_interruptibility(ctxt
, KVM_X86_SHADOW_INT_STI
);
2888 ctxt
->eflags
|= X86_EFLAGS_IF
;
2889 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2892 case 0xfc: /* cld */
2893 ctxt
->eflags
&= ~EFLG_DF
;
2894 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2896 case 0xfd: /* std */
2897 ctxt
->eflags
|= EFLG_DF
;
2898 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2900 case 0xfe: /* Grp4 */
2902 rc
= emulate_grp45(ctxt
, ops
);
2903 if (rc
!= X86EMUL_CONTINUE
)
2906 case 0xff: /* Grp5 */
2907 if (c
->modrm_reg
== 5)
2913 rc
= writeback(ctxt
, ops
);
2914 if (rc
!= X86EMUL_CONTINUE
)
2918 * restore dst type in case the decoding will be reused
2919 * (happens for string instruction )
2921 c
->dst
.type
= saved_dst_type
;
2923 if ((c
->d
& SrcMask
) == SrcSI
)
2924 string_addr_inc(ctxt
, seg_override_base(ctxt
, c
), VCPU_REGS_RSI
,
2927 if ((c
->d
& DstMask
) == DstDI
)
2928 string_addr_inc(ctxt
, es_base(ctxt
), VCPU_REGS_RDI
, &c
->dst
);
2930 if (c
->rep_prefix
&& (c
->d
& String
)) {
2931 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
2932 if (!(c
->regs
[VCPU_REGS_RCX
] & 0x3ff))
2933 ctxt
->restart
= false;
2936 /* Commit shadow register state. */
2937 memcpy(ctxt
->vcpu
->arch
.regs
, c
->regs
, sizeof c
->regs
);
2938 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
2941 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
2945 case 0x01: /* lgdt, lidt, lmsw */
2946 switch (c
->modrm_reg
) {
2948 unsigned long address
;
2950 case 0: /* vmcall */
2951 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
2952 goto cannot_emulate
;
2954 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2955 if (rc
!= X86EMUL_CONTINUE
)
2958 /* Let the processor re-execute the fixed hypercall */
2960 /* Disable writeback. */
2961 c
->dst
.type
= OP_NONE
;
2964 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2965 &size
, &address
, c
->op_bytes
);
2966 if (rc
!= X86EMUL_CONTINUE
)
2968 realmode_lgdt(ctxt
->vcpu
, size
, address
);
2969 /* Disable writeback. */
2970 c
->dst
.type
= OP_NONE
;
2972 case 3: /* lidt/vmmcall */
2973 if (c
->modrm_mod
== 3) {
2974 switch (c
->modrm_rm
) {
2976 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2977 if (rc
!= X86EMUL_CONTINUE
)
2981 goto cannot_emulate
;
2984 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2987 if (rc
!= X86EMUL_CONTINUE
)
2989 realmode_lidt(ctxt
->vcpu
, size
, address
);
2991 /* Disable writeback. */
2992 c
->dst
.type
= OP_NONE
;
2996 c
->dst
.val
= ops
->get_cr(0, ctxt
->vcpu
);
2999 ops
->set_cr(0, (ops
->get_cr(0, ctxt
->vcpu
) & ~0x0ful
) |
3000 (c
->src
.val
& 0x0f), ctxt
->vcpu
);
3001 c
->dst
.type
= OP_NONE
;
3003 case 5: /* not defined */
3004 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3007 emulate_invlpg(ctxt
->vcpu
, c
->modrm_ea
);
3008 /* Disable writeback. */
3009 c
->dst
.type
= OP_NONE
;
3012 goto cannot_emulate
;
3015 case 0x05: /* syscall */
3016 rc
= emulate_syscall(ctxt
);
3017 if (rc
!= X86EMUL_CONTINUE
)
3023 emulate_clts(ctxt
->vcpu
);
3024 c
->dst
.type
= OP_NONE
;
3026 case 0x08: /* invd */
3027 case 0x09: /* wbinvd */
3028 case 0x0d: /* GrpP (prefetch) */
3029 case 0x18: /* Grp16 (prefetch/nop) */
3030 c
->dst
.type
= OP_NONE
;
3032 case 0x20: /* mov cr, reg */
3033 switch (c
->modrm_reg
) {
3037 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3040 c
->regs
[c
->modrm_rm
] = ops
->get_cr(c
->modrm_reg
, ctxt
->vcpu
);
3041 c
->dst
.type
= OP_NONE
; /* no writeback */
3043 case 0x21: /* mov from dr to reg */
3044 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3045 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3046 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3049 emulator_get_dr(ctxt
, c
->modrm_reg
, &c
->regs
[c
->modrm_rm
]);
3050 c
->dst
.type
= OP_NONE
; /* no writeback */
3052 case 0x22: /* mov reg, cr */
3053 ops
->set_cr(c
->modrm_reg
, c
->modrm_val
, ctxt
->vcpu
);
3054 c
->dst
.type
= OP_NONE
;
3056 case 0x23: /* mov from reg to dr */
3057 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3058 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3059 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3062 emulator_set_dr(ctxt
, c
->modrm_reg
, c
->regs
[c
->modrm_rm
]);
3063 c
->dst
.type
= OP_NONE
; /* no writeback */
3067 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
3068 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
3069 if (kvm_set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
)) {
3070 kvm_inject_gp(ctxt
->vcpu
, 0);
3073 rc
= X86EMUL_CONTINUE
;
3074 c
->dst
.type
= OP_NONE
;
3078 if (kvm_get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
)) {
3079 kvm_inject_gp(ctxt
->vcpu
, 0);
3082 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
3083 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
3085 rc
= X86EMUL_CONTINUE
;
3086 c
->dst
.type
= OP_NONE
;
3088 case 0x34: /* sysenter */
3089 rc
= emulate_sysenter(ctxt
);
3090 if (rc
!= X86EMUL_CONTINUE
)
3095 case 0x35: /* sysexit */
3096 rc
= emulate_sysexit(ctxt
);
3097 if (rc
!= X86EMUL_CONTINUE
)
3102 case 0x40 ... 0x4f: /* cmov */
3103 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
3104 if (!test_cc(c
->b
, ctxt
->eflags
))
3105 c
->dst
.type
= OP_NONE
; /* no writeback */
3107 case 0x80 ... 0x8f: /* jnz rel, etc*/
3108 if (test_cc(c
->b
, ctxt
->eflags
))
3109 jmp_rel(c
, c
->src
.val
);
3110 c
->dst
.type
= OP_NONE
;
3112 case 0xa0: /* push fs */
3113 emulate_push_sreg(ctxt
, VCPU_SREG_FS
);
3115 case 0xa1: /* pop fs */
3116 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3117 if (rc
!= X86EMUL_CONTINUE
)
3122 c
->dst
.type
= OP_NONE
;
3123 /* only subword offset */
3124 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3125 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
3127 case 0xa4: /* shld imm8, r, r/m */
3128 case 0xa5: /* shld cl, r, r/m */
3129 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3131 case 0xa8: /* push gs */
3132 emulate_push_sreg(ctxt
, VCPU_SREG_GS
);
3134 case 0xa9: /* pop gs */
3135 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3136 if (rc
!= X86EMUL_CONTINUE
)
3141 /* only subword offset */
3142 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3143 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
3145 case 0xac: /* shrd imm8, r, r/m */
3146 case 0xad: /* shrd cl, r, r/m */
3147 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3149 case 0xae: /* clflush */
3151 case 0xb0 ... 0xb1: /* cmpxchg */
3153 * Save real source value, then compare EAX against
3156 c
->src
.orig_val
= c
->src
.val
;
3157 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
3158 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
3159 if (ctxt
->eflags
& EFLG_ZF
) {
3160 /* Success: write back to memory. */
3161 c
->dst
.val
= c
->src
.orig_val
;
3163 /* Failure: write the value we saw to EAX. */
3164 c
->dst
.type
= OP_REG
;
3165 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
3170 /* only subword offset */
3171 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3172 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
3174 case 0xb6 ... 0xb7: /* movzx */
3175 c
->dst
.bytes
= c
->op_bytes
;
3176 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
3179 case 0xba: /* Grp8 */
3180 switch (c
->modrm_reg
& 3) {
3193 /* only subword offset */
3194 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3195 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
3197 case 0xbe ... 0xbf: /* movsx */
3198 c
->dst
.bytes
= c
->op_bytes
;
3199 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
3202 case 0xc3: /* movnti */
3203 c
->dst
.bytes
= c
->op_bytes
;
3204 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
3207 case 0xc7: /* Grp9 (cmpxchg8b) */
3208 rc
= emulate_grp9(ctxt
, ops
);
3209 if (rc
!= X86EMUL_CONTINUE
)
3211 c
->dst
.type
= OP_NONE
;
3217 DPRINTF("Cannot emulate %02x\n", c
->b
);