1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
25 #include <public/xen.h>
26 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #include <linux/kvm_host.h>
29 #include "kvm_cache_regs.h"
30 #define DPRINTF(x...) do {} while (0)
32 #include <linux/module.h>
33 #include <asm/kvm_emulate.h>
38 * Opcode effective-address decode tables.
39 * Note that we only emulate instructions that have at least one memory
40 * operand (excluding implicit stack references). We assume that stack
41 * references and instruction fetches will never occur in special memory
42 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
46 /* Operand sizes: 8-bit operands or specified/overridden size. */
47 #define ByteOp (1<<0) /* 8-bit operands. */
48 /* Destination operand type. */
49 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
50 #define DstReg (2<<1) /* Register operand. */
51 #define DstMem (3<<1) /* Memory operand. */
52 #define DstAcc (4<<1) /* Destination Accumulator */
53 #define DstMask (7<<1)
54 /* Source operand type. */
55 #define SrcNone (0<<4) /* No source operand. */
56 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
57 #define SrcReg (1<<4) /* Register operand. */
58 #define SrcMem (2<<4) /* Memory operand. */
59 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
60 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
61 #define SrcImm (5<<4) /* Immediate operand. */
62 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
63 #define SrcOne (7<<4) /* Implied '1' */
64 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
65 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
66 #define SrcMask (0xf<<4)
67 /* Generic ModRM decode. */
69 /* Destination is only written; never read. */
72 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
73 #define String (1<<12) /* String instruction (rep capable) */
74 #define Stack (1<<13) /* Stack instruction (push/pop) */
75 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
76 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
77 #define GroupMask 0xff /* Group number stored in bits 0:7 */
79 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
80 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
82 /* Source 2 operand type */
83 #define Src2None (0<<29)
84 #define Src2CL (1<<29)
85 #define Src2ImmByte (2<<29)
86 #define Src2One (3<<29)
87 #define Src2Imm16 (4<<29)
88 #define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be
89 in memory and second argument is located
90 immediately after the first one in memory. */
91 #define Src2Mask (7<<29)
94 Group1_80
, Group1_81
, Group1_82
, Group1_83
,
95 Group1A
, Group3_Byte
, Group3
, Group4
, Group5
, Group7
,
99 static u32 opcode_table
[256] = {
101 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
102 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
103 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
104 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
106 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
107 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
108 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
109 ImplicitOps
| Stack
| No64
, 0,
111 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
112 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
113 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
114 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
116 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
117 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
118 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
119 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
121 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
122 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
123 DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
125 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
126 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
129 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
130 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
133 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
134 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
135 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
138 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
140 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
142 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
143 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
145 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
146 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
148 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
149 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
152 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
153 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
, /* insb, insw/insd */
154 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
, /* outsb, outsw/outsd */
156 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
157 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
159 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
160 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
162 Group
| Group1_80
, Group
| Group1_81
,
163 Group
| Group1_82
, Group
| Group1_83
,
164 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
165 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
167 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
168 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
169 DstMem
| SrcReg
| ModRM
| Mov
, ModRM
| DstReg
,
170 DstReg
| SrcMem
| ModRM
| Mov
, Group
| Group1A
,
172 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
174 0, 0, SrcImm
| Src2Imm16
| No64
, 0,
175 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
177 ByteOp
| DstReg
| SrcMem
| Mov
| MemAbs
, DstReg
| SrcMem
| Mov
| MemAbs
,
178 ByteOp
| DstMem
| SrcReg
| Mov
| MemAbs
, DstMem
| SrcReg
| Mov
| MemAbs
,
179 ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
180 ByteOp
| ImplicitOps
| String
, ImplicitOps
| String
,
182 0, 0, ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
183 ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
184 ByteOp
| ImplicitOps
| String
, ImplicitOps
| String
,
186 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
187 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
188 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
189 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
191 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
192 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
193 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
194 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
196 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
197 0, ImplicitOps
| Stack
, 0, 0,
198 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
200 0, 0, 0, ImplicitOps
| Stack
,
201 ImplicitOps
, SrcImmByte
, ImplicitOps
| No64
, ImplicitOps
,
203 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
204 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
207 0, 0, 0, 0, 0, 0, 0, 0,
210 ByteOp
| SrcImmUByte
, SrcImmUByte
,
211 ByteOp
| SrcImmUByte
, SrcImmUByte
,
213 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
214 SrcImmU
| Src2Imm16
| No64
, SrcImmByte
| ImplicitOps
,
215 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
,
216 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
,
219 ImplicitOps
| Priv
, ImplicitOps
, Group
| Group3_Byte
, Group
| Group3
,
221 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
222 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
225 static u32 twobyte_table
[256] = {
227 0, Group
| GroupDual
| Group7
, 0, 0,
228 0, ImplicitOps
, ImplicitOps
| Priv
, 0,
229 ImplicitOps
| Priv
, ImplicitOps
| Priv
, 0, 0,
230 0, ImplicitOps
| ModRM
, 0, 0,
232 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
234 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
235 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
237 0, 0, 0, 0, 0, 0, 0, 0,
239 ImplicitOps
| Priv
, 0, ImplicitOps
| Priv
, 0,
240 ImplicitOps
, ImplicitOps
| Priv
, 0, 0,
241 0, 0, 0, 0, 0, 0, 0, 0,
243 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
244 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
245 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
246 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
248 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
249 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
250 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
251 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
253 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
255 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
259 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
260 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
262 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
264 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
265 0, DstMem
| SrcReg
| ModRM
| BitOp
,
266 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
267 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
269 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
270 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
271 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
272 DstMem
| SrcReg
| Src2CL
| ModRM
,
275 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
276 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
277 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
278 DstReg
| SrcMem16
| ModRM
| Mov
,
281 Group
| Group8
, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
282 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
283 DstReg
| SrcMem16
| ModRM
| Mov
,
285 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
,
286 0, 0, 0, Group
| GroupDual
| Group9
,
287 0, 0, 0, 0, 0, 0, 0, 0,
289 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
291 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
293 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
296 static u32 group_table
[] = {
298 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
299 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
300 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
301 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
302 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
303 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
304 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
305 ByteOp
| DstMem
| SrcImm
| ModRM
,
307 DstMem
| SrcImm
| ModRM
| Lock
,
308 DstMem
| SrcImm
| ModRM
| Lock
,
309 DstMem
| SrcImm
| ModRM
| Lock
,
310 DstMem
| SrcImm
| ModRM
| Lock
,
311 DstMem
| SrcImm
| ModRM
| Lock
,
312 DstMem
| SrcImm
| ModRM
| Lock
,
313 DstMem
| SrcImm
| ModRM
| Lock
,
314 DstMem
| SrcImm
| ModRM
,
316 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
317 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
318 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
319 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
320 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
321 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
322 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
323 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
,
325 DstMem
| SrcImmByte
| ModRM
| Lock
,
326 DstMem
| SrcImmByte
| ModRM
| Lock
,
327 DstMem
| SrcImmByte
| ModRM
| Lock
,
328 DstMem
| SrcImmByte
| ModRM
| Lock
,
329 DstMem
| SrcImmByte
| ModRM
| Lock
,
330 DstMem
| SrcImmByte
| ModRM
| Lock
,
331 DstMem
| SrcImmByte
| ModRM
| Lock
,
332 DstMem
| SrcImmByte
| ModRM
,
334 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
336 ByteOp
| SrcImm
| DstMem
| ModRM
, 0,
337 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
340 DstMem
| SrcImm
| ModRM
, 0,
341 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
344 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
347 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
348 SrcMem
| ModRM
| Stack
, 0,
349 SrcMem
| ModRM
| Stack
, SrcMem
| ModRM
| Src2Mem16
| ImplicitOps
,
350 SrcMem
| ModRM
| Stack
, 0,
352 0, 0, ModRM
| SrcMem
| Priv
, ModRM
| SrcMem
| Priv
,
353 SrcNone
| ModRM
| DstMem
| Mov
, 0,
354 SrcMem16
| ModRM
| Mov
| Priv
, SrcMem
| ModRM
| ByteOp
| Priv
,
357 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
| Lock
,
358 DstMem
| SrcImmByte
| ModRM
| Lock
, DstMem
| SrcImmByte
| ModRM
| Lock
,
360 0, ImplicitOps
| ModRM
| Lock
, 0, 0, 0, 0, 0, 0,
363 static u32 group2_table
[] = {
365 SrcNone
| ModRM
| Priv
, 0, 0, SrcNone
| ModRM
| Priv
,
366 SrcNone
| ModRM
| DstMem
| Mov
, 0,
367 SrcMem16
| ModRM
| Mov
| Priv
, 0,
369 0, 0, 0, 0, 0, 0, 0, 0,
372 /* EFLAGS bit definitions. */
373 #define EFLG_ID (1<<21)
374 #define EFLG_VIP (1<<20)
375 #define EFLG_VIF (1<<19)
376 #define EFLG_AC (1<<18)
377 #define EFLG_VM (1<<17)
378 #define EFLG_RF (1<<16)
379 #define EFLG_IOPL (3<<12)
380 #define EFLG_NT (1<<14)
381 #define EFLG_OF (1<<11)
382 #define EFLG_DF (1<<10)
383 #define EFLG_IF (1<<9)
384 #define EFLG_TF (1<<8)
385 #define EFLG_SF (1<<7)
386 #define EFLG_ZF (1<<6)
387 #define EFLG_AF (1<<4)
388 #define EFLG_PF (1<<2)
389 #define EFLG_CF (1<<0)
392 * Instruction emulation:
393 * Most instructions are emulated directly via a fragment of inline assembly
394 * code. This allows us to save/restore EFLAGS and thus very easily pick up
395 * any modified flags.
398 #if defined(CONFIG_X86_64)
399 #define _LO32 "k" /* force 32-bit operand */
400 #define _STK "%%rsp" /* stack pointer */
401 #elif defined(__i386__)
402 #define _LO32 "" /* force 32-bit operand */
403 #define _STK "%%esp" /* stack pointer */
407 * These EFLAGS bits are restored from saved value during emulation, and
408 * any changes are written back to the saved value after emulation.
410 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
412 /* Before executing instruction: restore necessary bits in EFLAGS. */
413 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
414 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
415 "movl %"_sav",%"_LO32 _tmp"; " \
418 "movl %"_msk",%"_LO32 _tmp"; " \
419 "andl %"_LO32 _tmp",("_STK"); " \
421 "notl %"_LO32 _tmp"; " \
422 "andl %"_LO32 _tmp",("_STK"); " \
423 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
425 "orl %"_LO32 _tmp",("_STK"); " \
429 /* After executing instruction: write-back necessary bits in EFLAGS. */
430 #define _POST_EFLAGS(_sav, _msk, _tmp) \
431 /* _sav |= EFLAGS & _msk; */ \
434 "andl %"_msk",%"_LO32 _tmp"; " \
435 "orl %"_LO32 _tmp",%"_sav"; "
443 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
445 __asm__ __volatile__ ( \
446 _PRE_EFLAGS("0", "4", "2") \
447 _op _suffix " %"_x"3,%1; " \
448 _POST_EFLAGS("0", "4", "2") \
449 : "=m" (_eflags), "=m" ((_dst).val), \
451 : _y ((_src).val), "i" (EFLAGS_MASK)); \
455 /* Raw emulation: instruction has two explicit operands. */
456 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
458 unsigned long _tmp; \
460 switch ((_dst).bytes) { \
462 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
465 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
468 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
473 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
475 unsigned long _tmp; \
476 switch ((_dst).bytes) { \
478 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
481 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
482 _wx, _wy, _lx, _ly, _qx, _qy); \
487 /* Source operand is byte-sized and may be restricted to just %cl. */
488 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
489 __emulate_2op(_op, _src, _dst, _eflags, \
490 "b", "c", "b", "c", "b", "c", "b", "c")
492 /* Source operand is byte, word, long or quad sized. */
493 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
494 __emulate_2op(_op, _src, _dst, _eflags, \
495 "b", "q", "w", "r", _LO32, "r", "", "r")
497 /* Source operand is word, long or quad sized. */
498 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
499 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
500 "w", "r", _LO32, "r", "", "r")
502 /* Instruction has three operands and one operand is stored in ECX register */
503 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
505 unsigned long _tmp; \
506 _type _clv = (_cl).val; \
507 _type _srcv = (_src).val; \
508 _type _dstv = (_dst).val; \
510 __asm__ __volatile__ ( \
511 _PRE_EFLAGS("0", "5", "2") \
512 _op _suffix " %4,%1 \n" \
513 _POST_EFLAGS("0", "5", "2") \
514 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
515 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
518 (_cl).val = (unsigned long) _clv; \
519 (_src).val = (unsigned long) _srcv; \
520 (_dst).val = (unsigned long) _dstv; \
523 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
525 switch ((_dst).bytes) { \
527 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
528 "w", unsigned short); \
531 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
532 "l", unsigned int); \
535 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
536 "q", unsigned long)); \
541 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
543 unsigned long _tmp; \
545 __asm__ __volatile__ ( \
546 _PRE_EFLAGS("0", "3", "2") \
547 _op _suffix " %1; " \
548 _POST_EFLAGS("0", "3", "2") \
549 : "=m" (_eflags), "+m" ((_dst).val), \
551 : "i" (EFLAGS_MASK)); \
554 /* Instruction has only one explicit operand (no source operand). */
555 #define emulate_1op(_op, _dst, _eflags) \
557 switch ((_dst).bytes) { \
558 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
559 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
560 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
561 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
565 /* Fetch next part of the instruction being emulated. */
566 #define insn_fetch(_type, _size, _eip) \
567 ({ unsigned long _x; \
568 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
569 if (rc != X86EMUL_CONTINUE) \
575 static inline unsigned long ad_mask(struct decode_cache
*c
)
577 return (1UL << (c
->ad_bytes
<< 3)) - 1;
580 /* Access/update address held in a register, based on addressing mode. */
581 static inline unsigned long
582 address_mask(struct decode_cache
*c
, unsigned long reg
)
584 if (c
->ad_bytes
== sizeof(unsigned long))
587 return reg
& ad_mask(c
);
590 static inline unsigned long
591 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
593 return base
+ address_mask(c
, reg
);
597 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
599 if (c
->ad_bytes
== sizeof(unsigned long))
602 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
605 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
607 register_address_increment(c
, &c
->eip
, rel
);
610 static void set_seg_override(struct decode_cache
*c
, int seg
)
612 c
->has_seg_override
= true;
613 c
->seg_override
= seg
;
616 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
618 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
621 return kvm_x86_ops
->get_segment_base(ctxt
->vcpu
, seg
);
624 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
625 struct decode_cache
*c
)
627 if (!c
->has_seg_override
)
630 return seg_base(ctxt
, c
->seg_override
);
633 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
)
635 return seg_base(ctxt
, VCPU_SREG_ES
);
638 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
)
640 return seg_base(ctxt
, VCPU_SREG_SS
);
643 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
644 struct x86_emulate_ops
*ops
,
645 unsigned long linear
, u8
*dest
)
647 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
651 if (linear
< fc
->start
|| linear
>= fc
->end
) {
652 size
= min(15UL, PAGE_SIZE
- offset_in_page(linear
));
653 rc
= ops
->fetch(linear
, fc
->data
, size
, ctxt
->vcpu
, NULL
);
654 if (rc
!= X86EMUL_CONTINUE
)
657 fc
->end
= linear
+ size
;
659 *dest
= fc
->data
[linear
- fc
->start
];
660 return X86EMUL_CONTINUE
;
663 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
664 struct x86_emulate_ops
*ops
,
665 unsigned long eip
, void *dest
, unsigned size
)
669 /* x86 instructions are limited to 15 bytes. */
670 if (eip
+ size
- ctxt
->decode
.eip_orig
> 15)
671 return X86EMUL_UNHANDLEABLE
;
672 eip
+= ctxt
->cs_base
;
674 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
675 if (rc
!= X86EMUL_CONTINUE
)
678 return X86EMUL_CONTINUE
;
682 * Given the 'reg' portion of a ModRM byte, and a register block, return a
683 * pointer into the block that addresses the relevant register.
684 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
686 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
691 p
= ®s
[modrm_reg
];
692 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
693 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
697 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
698 struct x86_emulate_ops
*ops
,
700 u16
*size
, unsigned long *address
, int op_bytes
)
707 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
709 if (rc
!= X86EMUL_CONTINUE
)
711 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
716 static int test_cc(unsigned int condition
, unsigned int flags
)
720 switch ((condition
& 15) >> 1) {
722 rc
|= (flags
& EFLG_OF
);
724 case 1: /* b/c/nae */
725 rc
|= (flags
& EFLG_CF
);
728 rc
|= (flags
& EFLG_ZF
);
731 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
734 rc
|= (flags
& EFLG_SF
);
737 rc
|= (flags
& EFLG_PF
);
740 rc
|= (flags
& EFLG_ZF
);
743 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
747 /* Odd condition identifiers (lsb == 1) have inverted sense. */
748 return (!!rc
^ (condition
& 1));
751 static void decode_register_operand(struct operand
*op
,
752 struct decode_cache
*c
,
755 unsigned reg
= c
->modrm_reg
;
756 int highbyte_regs
= c
->rex_prefix
== 0;
759 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
761 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
762 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
763 op
->val
= *(u8
*)op
->ptr
;
766 op
->ptr
= decode_register(reg
, c
->regs
, 0);
767 op
->bytes
= c
->op_bytes
;
770 op
->val
= *(u16
*)op
->ptr
;
773 op
->val
= *(u32
*)op
->ptr
;
776 op
->val
= *(u64
*) op
->ptr
;
780 op
->orig_val
= op
->val
;
783 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
784 struct x86_emulate_ops
*ops
)
786 struct decode_cache
*c
= &ctxt
->decode
;
788 int index_reg
= 0, base_reg
= 0, scale
;
789 int rc
= X86EMUL_CONTINUE
;
792 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
793 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
794 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
797 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
798 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
799 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
800 c
->modrm_rm
|= (c
->modrm
& 0x07);
804 if (c
->modrm_mod
== 3) {
805 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
806 c
->regs
, c
->d
& ByteOp
);
807 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
811 if (c
->ad_bytes
== 2) {
812 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
813 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
814 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
815 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
817 /* 16-bit ModR/M decode. */
818 switch (c
->modrm_mod
) {
820 if (c
->modrm_rm
== 6)
821 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
824 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
827 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
830 switch (c
->modrm_rm
) {
832 c
->modrm_ea
+= bx
+ si
;
835 c
->modrm_ea
+= bx
+ di
;
838 c
->modrm_ea
+= bp
+ si
;
841 c
->modrm_ea
+= bp
+ di
;
850 if (c
->modrm_mod
!= 0)
857 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
858 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
859 if (!c
->has_seg_override
)
860 set_seg_override(c
, VCPU_SREG_SS
);
861 c
->modrm_ea
= (u16
)c
->modrm_ea
;
863 /* 32/64-bit ModR/M decode. */
864 if ((c
->modrm_rm
& 7) == 4) {
865 sib
= insn_fetch(u8
, 1, c
->eip
);
866 index_reg
|= (sib
>> 3) & 7;
870 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
871 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
873 c
->modrm_ea
+= c
->regs
[base_reg
];
875 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
876 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
877 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
880 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
881 switch (c
->modrm_mod
) {
883 if (c
->modrm_rm
== 5)
884 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
887 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
890 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
898 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
899 struct x86_emulate_ops
*ops
)
901 struct decode_cache
*c
= &ctxt
->decode
;
902 int rc
= X86EMUL_CONTINUE
;
904 switch (c
->ad_bytes
) {
906 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
909 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
912 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
920 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
922 struct decode_cache
*c
= &ctxt
->decode
;
923 int rc
= X86EMUL_CONTINUE
;
924 int mode
= ctxt
->mode
;
925 int def_op_bytes
, def_ad_bytes
, group
;
927 /* Shadow copy of register state. Committed on successful emulation. */
929 memset(c
, 0, sizeof(struct decode_cache
));
930 c
->eip
= c
->eip_orig
= kvm_rip_read(ctxt
->vcpu
);
931 ctxt
->cs_base
= seg_base(ctxt
, VCPU_SREG_CS
);
932 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
935 case X86EMUL_MODE_REAL
:
936 case X86EMUL_MODE_VM86
:
937 case X86EMUL_MODE_PROT16
:
938 def_op_bytes
= def_ad_bytes
= 2;
940 case X86EMUL_MODE_PROT32
:
941 def_op_bytes
= def_ad_bytes
= 4;
944 case X86EMUL_MODE_PROT64
:
953 c
->op_bytes
= def_op_bytes
;
954 c
->ad_bytes
= def_ad_bytes
;
956 /* Legacy prefixes. */
958 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
959 case 0x66: /* operand-size override */
960 /* switch between 2/4 bytes */
961 c
->op_bytes
= def_op_bytes
^ 6;
963 case 0x67: /* address-size override */
964 if (mode
== X86EMUL_MODE_PROT64
)
965 /* switch between 4/8 bytes */
966 c
->ad_bytes
= def_ad_bytes
^ 12;
968 /* switch between 2/4 bytes */
969 c
->ad_bytes
= def_ad_bytes
^ 6;
971 case 0x26: /* ES override */
972 case 0x2e: /* CS override */
973 case 0x36: /* SS override */
974 case 0x3e: /* DS override */
975 set_seg_override(c
, (c
->b
>> 3) & 3);
977 case 0x64: /* FS override */
978 case 0x65: /* GS override */
979 set_seg_override(c
, c
->b
& 7);
981 case 0x40 ... 0x4f: /* REX */
982 if (mode
!= X86EMUL_MODE_PROT64
)
984 c
->rex_prefix
= c
->b
;
986 case 0xf0: /* LOCK */
989 case 0xf2: /* REPNE/REPNZ */
990 c
->rep_prefix
= REPNE_PREFIX
;
992 case 0xf3: /* REP/REPE/REPZ */
993 c
->rep_prefix
= REPE_PREFIX
;
999 /* Any legacy prefix after a REX prefix nullifies its effect. */
1008 if (c
->rex_prefix
& 8)
1009 c
->op_bytes
= 8; /* REX.W */
1011 /* Opcode byte(s). */
1012 c
->d
= opcode_table
[c
->b
];
1014 /* Two-byte opcode? */
1017 c
->b
= insn_fetch(u8
, 1, c
->eip
);
1018 c
->d
= twobyte_table
[c
->b
];
1023 group
= c
->d
& GroupMask
;
1024 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
1027 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
1028 if ((c
->d
& GroupDual
) && (c
->modrm
>> 6) == 3)
1029 c
->d
= group2_table
[group
];
1031 c
->d
= group_table
[group
];
1036 DPRINTF("Cannot emulate %02x\n", c
->b
);
1040 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1043 /* ModRM and SIB bytes. */
1045 rc
= decode_modrm(ctxt
, ops
);
1046 else if (c
->d
& MemAbs
)
1047 rc
= decode_abs(ctxt
, ops
);
1048 if (rc
!= X86EMUL_CONTINUE
)
1051 if (!c
->has_seg_override
)
1052 set_seg_override(c
, VCPU_SREG_DS
);
1054 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1055 c
->modrm_ea
+= seg_override_base(ctxt
, c
);
1057 if (c
->ad_bytes
!= 8)
1058 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1060 * Decode and fetch the source operand: register, memory
1063 switch (c
->d
& SrcMask
) {
1067 decode_register_operand(&c
->src
, c
, 0);
1076 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1078 /* Don't fetch the address for invlpg: it could be unmapped. */
1079 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1083 * For instructions with a ModR/M byte, switch to register
1084 * access if Mod = 3.
1086 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1087 c
->src
.type
= OP_REG
;
1088 c
->src
.val
= c
->modrm_val
;
1089 c
->src
.ptr
= c
->modrm_ptr
;
1092 c
->src
.type
= OP_MEM
;
1096 c
->src
.type
= OP_IMM
;
1097 c
->src
.ptr
= (unsigned long *)c
->eip
;
1098 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1099 if (c
->src
.bytes
== 8)
1101 /* NB. Immediates are sign-extended as necessary. */
1102 switch (c
->src
.bytes
) {
1104 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1107 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1110 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1113 if ((c
->d
& SrcMask
) == SrcImmU
) {
1114 switch (c
->src
.bytes
) {
1119 c
->src
.val
&= 0xffff;
1122 c
->src
.val
&= 0xffffffff;
1129 c
->src
.type
= OP_IMM
;
1130 c
->src
.ptr
= (unsigned long *)c
->eip
;
1132 if ((c
->d
& SrcMask
) == SrcImmByte
)
1133 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1135 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1144 * Decode and fetch the second source operand: register, memory
1147 switch (c
->d
& Src2Mask
) {
1152 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1155 c
->src2
.type
= OP_IMM
;
1156 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1158 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1161 c
->src2
.type
= OP_IMM
;
1162 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1164 c
->src2
.val
= insn_fetch(u16
, 2, c
->eip
);
1172 c
->src2
.type
= OP_MEM
;
1176 /* Decode and fetch the destination operand: register or memory. */
1177 switch (c
->d
& DstMask
) {
1179 /* Special instructions do their own operand decoding. */
1182 decode_register_operand(&c
->dst
, c
,
1183 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1186 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1187 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1188 c
->dst
.type
= OP_REG
;
1189 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1190 c
->dst
.ptr
= c
->modrm_ptr
;
1193 c
->dst
.type
= OP_MEM
;
1196 c
->dst
.type
= OP_REG
;
1197 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1198 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1199 switch (c
->dst
.bytes
) {
1201 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1204 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1207 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1210 c
->dst
.val
= *(u64
*)c
->dst
.ptr
;
1213 c
->dst
.orig_val
= c
->dst
.val
;
1217 if (c
->rip_relative
)
1218 c
->modrm_ea
+= c
->eip
;
1221 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1224 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
)
1226 struct decode_cache
*c
= &ctxt
->decode
;
1228 c
->dst
.type
= OP_MEM
;
1229 c
->dst
.bytes
= c
->op_bytes
;
1230 c
->dst
.val
= c
->src
.val
;
1231 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1232 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
),
1233 c
->regs
[VCPU_REGS_RSP
]);
1236 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1237 struct x86_emulate_ops
*ops
,
1238 void *dest
, int len
)
1240 struct decode_cache
*c
= &ctxt
->decode
;
1243 rc
= ops
->read_emulated(register_address(c
, ss_base(ctxt
),
1244 c
->regs
[VCPU_REGS_RSP
]),
1245 dest
, len
, ctxt
->vcpu
);
1246 if (rc
!= X86EMUL_CONTINUE
)
1249 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1253 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1254 struct x86_emulate_ops
*ops
,
1255 void *dest
, int len
)
1258 unsigned long val
, change_mask
;
1259 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1260 int cpl
= ops
->cpl(ctxt
->vcpu
);
1262 rc
= emulate_pop(ctxt
, ops
, &val
, len
);
1263 if (rc
!= X86EMUL_CONTINUE
)
1266 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1267 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1269 switch(ctxt
->mode
) {
1270 case X86EMUL_MODE_PROT64
:
1271 case X86EMUL_MODE_PROT32
:
1272 case X86EMUL_MODE_PROT16
:
1274 change_mask
|= EFLG_IOPL
;
1276 change_mask
|= EFLG_IF
;
1278 case X86EMUL_MODE_VM86
:
1280 kvm_inject_gp(ctxt
->vcpu
, 0);
1281 return X86EMUL_PROPAGATE_FAULT
;
1283 change_mask
|= EFLG_IF
;
1285 default: /* real mode */
1286 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1290 *(unsigned long *)dest
=
1291 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1296 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
, int seg
)
1298 struct decode_cache
*c
= &ctxt
->decode
;
1299 struct kvm_segment segment
;
1301 kvm_x86_ops
->get_segment(ctxt
->vcpu
, &segment
, seg
);
1303 c
->src
.val
= segment
.selector
;
1307 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1308 struct x86_emulate_ops
*ops
, int seg
)
1310 struct decode_cache
*c
= &ctxt
->decode
;
1311 unsigned long selector
;
1314 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1315 if (rc
!= X86EMUL_CONTINUE
)
1318 rc
= kvm_load_segment_descriptor(ctxt
->vcpu
, (u16
)selector
, seg
);
1322 static void emulate_pusha(struct x86_emulate_ctxt
*ctxt
)
1324 struct decode_cache
*c
= &ctxt
->decode
;
1325 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1326 int reg
= VCPU_REGS_RAX
;
1328 while (reg
<= VCPU_REGS_RDI
) {
1329 (reg
== VCPU_REGS_RSP
) ?
1330 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1337 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1338 struct x86_emulate_ops
*ops
)
1340 struct decode_cache
*c
= &ctxt
->decode
;
1341 int rc
= X86EMUL_CONTINUE
;
1342 int reg
= VCPU_REGS_RDI
;
1344 while (reg
>= VCPU_REGS_RAX
) {
1345 if (reg
== VCPU_REGS_RSP
) {
1346 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1351 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1352 if (rc
!= X86EMUL_CONTINUE
)
1359 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1360 struct x86_emulate_ops
*ops
)
1362 struct decode_cache
*c
= &ctxt
->decode
;
1364 return emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1367 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1369 struct decode_cache
*c
= &ctxt
->decode
;
1370 switch (c
->modrm_reg
) {
1372 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1375 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1378 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1381 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1383 case 4: /* sal/shl */
1384 case 6: /* sal/shl */
1385 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1388 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1391 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1396 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1397 struct x86_emulate_ops
*ops
)
1399 struct decode_cache
*c
= &ctxt
->decode
;
1400 int rc
= X86EMUL_CONTINUE
;
1402 switch (c
->modrm_reg
) {
1403 case 0 ... 1: /* test */
1404 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1407 c
->dst
.val
= ~c
->dst
.val
;
1410 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1413 DPRINTF("Cannot emulate %02x\n", c
->b
);
1414 rc
= X86EMUL_UNHANDLEABLE
;
1420 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1421 struct x86_emulate_ops
*ops
)
1423 struct decode_cache
*c
= &ctxt
->decode
;
1425 switch (c
->modrm_reg
) {
1427 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1430 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1432 case 2: /* call near abs */ {
1435 c
->eip
= c
->src
.val
;
1436 c
->src
.val
= old_eip
;
1440 case 4: /* jmp abs */
1441 c
->eip
= c
->src
.val
;
1447 return X86EMUL_CONTINUE
;
1450 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1451 struct x86_emulate_ops
*ops
,
1452 unsigned long memop
)
1454 struct decode_cache
*c
= &ctxt
->decode
;
1458 rc
= ops
->read_emulated(memop
, &old
, 8, ctxt
->vcpu
);
1459 if (rc
!= X86EMUL_CONTINUE
)
1462 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1463 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1465 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1466 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1467 ctxt
->eflags
&= ~EFLG_ZF
;
1470 new = ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1471 (u32
) c
->regs
[VCPU_REGS_RBX
];
1473 rc
= ops
->cmpxchg_emulated(memop
, &old
, &new, 8, ctxt
->vcpu
);
1474 if (rc
!= X86EMUL_CONTINUE
)
1476 ctxt
->eflags
|= EFLG_ZF
;
1478 return X86EMUL_CONTINUE
;
1481 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1482 struct x86_emulate_ops
*ops
)
1484 struct decode_cache
*c
= &ctxt
->decode
;
1488 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1489 if (rc
!= X86EMUL_CONTINUE
)
1491 if (c
->op_bytes
== 4)
1492 c
->eip
= (u32
)c
->eip
;
1493 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1494 if (rc
!= X86EMUL_CONTINUE
)
1496 rc
= kvm_load_segment_descriptor(ctxt
->vcpu
, (u16
)cs
, VCPU_SREG_CS
);
1500 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1501 struct x86_emulate_ops
*ops
)
1504 struct decode_cache
*c
= &ctxt
->decode
;
1506 switch (c
->dst
.type
) {
1508 /* The 4-byte case *is* correct:
1509 * in 64-bit mode we zero-extend.
1511 switch (c
->dst
.bytes
) {
1513 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1516 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1519 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1520 break; /* 64b: zero-ext */
1522 *c
->dst
.ptr
= c
->dst
.val
;
1528 rc
= ops
->cmpxchg_emulated(
1529 (unsigned long)c
->dst
.ptr
,
1535 rc
= ops
->write_emulated(
1536 (unsigned long)c
->dst
.ptr
,
1540 if (rc
!= X86EMUL_CONTINUE
)
1549 return X86EMUL_CONTINUE
;
1552 static void toggle_interruptibility(struct x86_emulate_ctxt
*ctxt
, u32 mask
)
1554 u32 int_shadow
= kvm_x86_ops
->get_interrupt_shadow(ctxt
->vcpu
, mask
);
1556 * an sti; sti; sequence only disable interrupts for the first
1557 * instruction. So, if the last instruction, be it emulated or
1558 * not, left the system with the INT_STI flag enabled, it
1559 * means that the last instruction is an sti. We should not
1560 * leave the flag on in this case. The same goes for mov ss
1562 if (!(int_shadow
& mask
))
1563 ctxt
->interruptibility
= mask
;
1567 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1568 struct kvm_segment
*cs
, struct kvm_segment
*ss
)
1570 memset(cs
, 0, sizeof(struct kvm_segment
));
1571 kvm_x86_ops
->get_segment(ctxt
->vcpu
, cs
, VCPU_SREG_CS
);
1572 memset(ss
, 0, sizeof(struct kvm_segment
));
1574 cs
->l
= 0; /* will be adjusted later */
1575 cs
->base
= 0; /* flat segment */
1576 cs
->g
= 1; /* 4kb granularity */
1577 cs
->limit
= 0xffffffff; /* 4GB limit */
1578 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1580 cs
->dpl
= 0; /* will be adjusted later */
1585 ss
->base
= 0; /* flat segment */
1586 ss
->limit
= 0xffffffff; /* 4GB limit */
1587 ss
->g
= 1; /* 4kb granularity */
1589 ss
->type
= 0x03; /* Read/Write, Accessed */
1590 ss
->db
= 1; /* 32bit stack segment */
1596 emulate_syscall(struct x86_emulate_ctxt
*ctxt
)
1598 struct decode_cache
*c
= &ctxt
->decode
;
1599 struct kvm_segment cs
, ss
;
1602 /* syscall is not available in real mode */
1603 if (ctxt
->mode
== X86EMUL_MODE_REAL
|| ctxt
->mode
== X86EMUL_MODE_VM86
)
1604 return X86EMUL_UNHANDLEABLE
;
1606 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1608 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1610 cs
.selector
= (u16
)(msr_data
& 0xfffc);
1611 ss
.selector
= (u16
)(msr_data
+ 8);
1613 if (is_long_mode(ctxt
->vcpu
)) {
1617 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1618 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1620 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1621 if (is_long_mode(ctxt
->vcpu
)) {
1622 #ifdef CONFIG_X86_64
1623 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1625 kvm_x86_ops
->get_msr(ctxt
->vcpu
,
1626 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1627 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1630 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1631 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1635 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1636 c
->eip
= (u32
)msr_data
;
1638 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1641 return X86EMUL_CONTINUE
;
1645 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
)
1647 struct decode_cache
*c
= &ctxt
->decode
;
1648 struct kvm_segment cs
, ss
;
1651 /* inject #GP if in real mode */
1652 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1653 kvm_inject_gp(ctxt
->vcpu
, 0);
1654 return X86EMUL_UNHANDLEABLE
;
1657 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1658 * Therefore, we inject an #UD.
1660 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1661 return X86EMUL_UNHANDLEABLE
;
1663 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1665 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1666 switch (ctxt
->mode
) {
1667 case X86EMUL_MODE_PROT32
:
1668 if ((msr_data
& 0xfffc) == 0x0) {
1669 kvm_inject_gp(ctxt
->vcpu
, 0);
1670 return X86EMUL_PROPAGATE_FAULT
;
1673 case X86EMUL_MODE_PROT64
:
1674 if (msr_data
== 0x0) {
1675 kvm_inject_gp(ctxt
->vcpu
, 0);
1676 return X86EMUL_PROPAGATE_FAULT
;
1681 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1682 cs
.selector
= (u16
)msr_data
;
1683 cs
.selector
&= ~SELECTOR_RPL_MASK
;
1684 ss
.selector
= cs
.selector
+ 8;
1685 ss
.selector
&= ~SELECTOR_RPL_MASK
;
1686 if (ctxt
->mode
== X86EMUL_MODE_PROT64
1687 || is_long_mode(ctxt
->vcpu
)) {
1692 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1693 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1695 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
1698 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
1699 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
1701 return X86EMUL_CONTINUE
;
1705 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
)
1707 struct decode_cache
*c
= &ctxt
->decode
;
1708 struct kvm_segment cs
, ss
;
1712 /* inject #GP if in real mode or Virtual 8086 mode */
1713 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1714 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1715 kvm_inject_gp(ctxt
->vcpu
, 0);
1716 return X86EMUL_UNHANDLEABLE
;
1719 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1721 if ((c
->rex_prefix
& 0x8) != 0x0)
1722 usermode
= X86EMUL_MODE_PROT64
;
1724 usermode
= X86EMUL_MODE_PROT32
;
1728 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1730 case X86EMUL_MODE_PROT32
:
1731 cs
.selector
= (u16
)(msr_data
+ 16);
1732 if ((msr_data
& 0xfffc) == 0x0) {
1733 kvm_inject_gp(ctxt
->vcpu
, 0);
1734 return X86EMUL_PROPAGATE_FAULT
;
1736 ss
.selector
= (u16
)(msr_data
+ 24);
1738 case X86EMUL_MODE_PROT64
:
1739 cs
.selector
= (u16
)(msr_data
+ 32);
1740 if (msr_data
== 0x0) {
1741 kvm_inject_gp(ctxt
->vcpu
, 0);
1742 return X86EMUL_PROPAGATE_FAULT
;
1744 ss
.selector
= cs
.selector
+ 8;
1749 cs
.selector
|= SELECTOR_RPL_MASK
;
1750 ss
.selector
|= SELECTOR_RPL_MASK
;
1752 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1753 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1755 c
->eip
= ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RDX
];
1756 c
->regs
[VCPU_REGS_RSP
] = ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RCX
];
1758 return X86EMUL_CONTINUE
;
1761 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
,
1762 struct x86_emulate_ops
*ops
)
1765 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
1767 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
1769 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1770 return ops
->cpl(ctxt
->vcpu
) > iopl
;
1773 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
1774 struct x86_emulate_ops
*ops
,
1777 struct kvm_segment tr_seg
;
1780 u8 perm
, bit_idx
= port
& 0x7;
1781 unsigned mask
= (1 << len
) - 1;
1783 kvm_get_segment(ctxt
->vcpu
, &tr_seg
, VCPU_SREG_TR
);
1784 if (tr_seg
.unusable
)
1786 if (tr_seg
.limit
< 103)
1788 r
= ops
->read_std(tr_seg
.base
+ 102, &io_bitmap_ptr
, 2, ctxt
->vcpu
,
1790 if (r
!= X86EMUL_CONTINUE
)
1792 if (io_bitmap_ptr
+ port
/8 > tr_seg
.limit
)
1794 r
= ops
->read_std(tr_seg
.base
+ io_bitmap_ptr
+ port
/8, &perm
, 1,
1796 if (r
!= X86EMUL_CONTINUE
)
1798 if ((perm
>> bit_idx
) & mask
)
1803 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
1804 struct x86_emulate_ops
*ops
,
1807 if (emulator_bad_iopl(ctxt
, ops
))
1808 if (!emulator_io_port_access_allowed(ctxt
, ops
, port
, len
))
1814 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1816 unsigned long memop
= 0;
1818 unsigned long saved_eip
= 0;
1819 struct decode_cache
*c
= &ctxt
->decode
;
1822 int rc
= X86EMUL_CONTINUE
;
1824 ctxt
->interruptibility
= 0;
1826 /* Shadow copy of register state. Committed on successful emulation.
1827 * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
1831 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
1834 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
1835 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1839 /* LOCK prefix is allowed only with some instructions */
1840 if (c
->lock_prefix
&& !(c
->d
& Lock
)) {
1841 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1845 /* Privileged instruction can be executed only in CPL=0 */
1846 if ((c
->d
& Priv
) && ops
->cpl(ctxt
->vcpu
)) {
1847 kvm_inject_gp(ctxt
->vcpu
, 0);
1851 if (((c
->d
& ModRM
) && (c
->modrm_mod
!= 3)) || (c
->d
& MemAbs
))
1852 memop
= c
->modrm_ea
;
1854 if (c
->rep_prefix
&& (c
->d
& String
)) {
1855 /* All REP prefixes have the same first termination condition */
1856 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0) {
1857 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1860 /* The second termination condition only applies for REPE
1861 * and REPNE. Test if the repeat string operation prefix is
1862 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
1863 * corresponding termination condition according to:
1864 * - if REPE/REPZ and ZF = 0 then done
1865 * - if REPNE/REPNZ and ZF = 1 then done
1867 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
1868 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
1869 if ((c
->rep_prefix
== REPE_PREFIX
) &&
1870 ((ctxt
->eflags
& EFLG_ZF
) == 0)) {
1871 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1874 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
1875 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
)) {
1876 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1880 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
1881 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
1884 if (c
->src
.type
== OP_MEM
) {
1885 c
->src
.ptr
= (unsigned long *)memop
;
1887 rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
1891 if (rc
!= X86EMUL_CONTINUE
)
1893 c
->src
.orig_val
= c
->src
.val
;
1896 if (c
->src2
.type
== OP_MEM
) {
1897 c
->src2
.ptr
= (unsigned long *)(memop
+ c
->src
.bytes
);
1899 rc
= ops
->read_emulated((unsigned long)c
->src2
.ptr
,
1903 if (rc
!= X86EMUL_CONTINUE
)
1907 if ((c
->d
& DstMask
) == ImplicitOps
)
1911 if (c
->dst
.type
== OP_MEM
) {
1912 c
->dst
.ptr
= (unsigned long *)memop
;
1913 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1916 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1918 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1919 (c
->src
.val
& mask
) / 8;
1921 if (!(c
->d
& Mov
)) {
1922 /* optimisation - avoid slow emulated read */
1923 rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
,
1927 if (rc
!= X86EMUL_CONTINUE
)
1931 c
->dst
.orig_val
= c
->dst
.val
;
1941 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
1943 case 0x06: /* push es */
1944 emulate_push_sreg(ctxt
, VCPU_SREG_ES
);
1946 case 0x07: /* pop es */
1947 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
1948 if (rc
!= X86EMUL_CONTINUE
)
1953 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
1955 case 0x0e: /* push cs */
1956 emulate_push_sreg(ctxt
, VCPU_SREG_CS
);
1960 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
1962 case 0x16: /* push ss */
1963 emulate_push_sreg(ctxt
, VCPU_SREG_SS
);
1965 case 0x17: /* pop ss */
1966 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
1967 if (rc
!= X86EMUL_CONTINUE
)
1972 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
1974 case 0x1e: /* push ds */
1975 emulate_push_sreg(ctxt
, VCPU_SREG_DS
);
1977 case 0x1f: /* pop ds */
1978 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
1979 if (rc
!= X86EMUL_CONTINUE
)
1984 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
1988 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
1992 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
1996 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
1998 case 0x40 ... 0x47: /* inc r16/r32 */
1999 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
2001 case 0x48 ... 0x4f: /* dec r16/r32 */
2002 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
2004 case 0x50 ... 0x57: /* push reg */
2007 case 0x58 ... 0x5f: /* pop reg */
2009 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2010 if (rc
!= X86EMUL_CONTINUE
)
2013 case 0x60: /* pusha */
2014 emulate_pusha(ctxt
);
2016 case 0x61: /* popa */
2017 rc
= emulate_popa(ctxt
, ops
);
2018 if (rc
!= X86EMUL_CONTINUE
)
2021 case 0x63: /* movsxd */
2022 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
2023 goto cannot_emulate
;
2024 c
->dst
.val
= (s32
) c
->src
.val
;
2026 case 0x68: /* push imm */
2027 case 0x6a: /* push imm8 */
2030 case 0x6c: /* insb */
2031 case 0x6d: /* insw/insd */
2032 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2033 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
)) {
2034 kvm_inject_gp(ctxt
->vcpu
, 0);
2037 if (kvm_emulate_pio_string(ctxt
->vcpu
,
2039 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
2041 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1,
2042 (ctxt
->eflags
& EFLG_DF
),
2043 register_address(c
, es_base(ctxt
),
2044 c
->regs
[VCPU_REGS_RDI
]),
2046 c
->regs
[VCPU_REGS_RDX
]) == 0) {
2051 case 0x6e: /* outsb */
2052 case 0x6f: /* outsw/outsd */
2053 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2054 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
)) {
2055 kvm_inject_gp(ctxt
->vcpu
, 0);
2058 if (kvm_emulate_pio_string(ctxt
->vcpu
,
2060 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
2062 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1,
2063 (ctxt
->eflags
& EFLG_DF
),
2065 seg_override_base(ctxt
, c
),
2066 c
->regs
[VCPU_REGS_RSI
]),
2068 c
->regs
[VCPU_REGS_RDX
]) == 0) {
2073 case 0x70 ... 0x7f: /* jcc (short) */
2074 if (test_cc(c
->b
, ctxt
->eflags
))
2075 jmp_rel(c
, c
->src
.val
);
2077 case 0x80 ... 0x83: /* Grp1 */
2078 switch (c
->modrm_reg
) {
2098 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
2100 case 0x86 ... 0x87: /* xchg */
2102 /* Write back the register source. */
2103 switch (c
->dst
.bytes
) {
2105 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
2108 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
2111 *c
->src
.ptr
= (u32
) c
->dst
.val
;
2112 break; /* 64b reg: zero-extend */
2114 *c
->src
.ptr
= c
->dst
.val
;
2118 * Write back the memory destination with implicit LOCK
2121 c
->dst
.val
= c
->src
.val
;
2124 case 0x88 ... 0x8b: /* mov */
2126 case 0x8c: { /* mov r/m, sreg */
2127 struct kvm_segment segreg
;
2129 if (c
->modrm_reg
<= 5)
2130 kvm_get_segment(ctxt
->vcpu
, &segreg
, c
->modrm_reg
);
2132 printk(KERN_INFO
"0x8c: Invalid segreg in modrm byte 0x%02x\n",
2134 goto cannot_emulate
;
2136 c
->dst
.val
= segreg
.selector
;
2139 case 0x8d: /* lea r16/r32, m */
2140 c
->dst
.val
= c
->modrm_ea
;
2142 case 0x8e: { /* mov seg, r/m16 */
2147 if (c
->modrm_reg
== VCPU_SREG_CS
||
2148 c
->modrm_reg
> VCPU_SREG_GS
) {
2149 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2153 if (c
->modrm_reg
== VCPU_SREG_SS
)
2154 toggle_interruptibility(ctxt
, KVM_X86_SHADOW_INT_MOV_SS
);
2156 rc
= kvm_load_segment_descriptor(ctxt
->vcpu
, sel
, c
->modrm_reg
);
2158 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2161 case 0x8f: /* pop (sole member of Grp1a) */
2162 rc
= emulate_grp1a(ctxt
, ops
);
2163 if (rc
!= X86EMUL_CONTINUE
)
2166 case 0x90: /* nop / xchg r8,rax */
2167 if (!(c
->rex_prefix
& 1)) { /* nop */
2168 c
->dst
.type
= OP_NONE
;
2171 case 0x91 ... 0x97: /* xchg reg,rax */
2172 c
->src
.type
= c
->dst
.type
= OP_REG
;
2173 c
->src
.bytes
= c
->dst
.bytes
= c
->op_bytes
;
2174 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
2175 c
->src
.val
= *(c
->src
.ptr
);
2177 case 0x9c: /* pushf */
2178 c
->src
.val
= (unsigned long) ctxt
->eflags
;
2181 case 0x9d: /* popf */
2182 c
->dst
.type
= OP_REG
;
2183 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
2184 c
->dst
.bytes
= c
->op_bytes
;
2185 rc
= emulate_popf(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2186 if (rc
!= X86EMUL_CONTINUE
)
2189 case 0xa0 ... 0xa1: /* mov */
2190 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2191 c
->dst
.val
= c
->src
.val
;
2193 case 0xa2 ... 0xa3: /* mov */
2194 c
->dst
.val
= (unsigned long)c
->regs
[VCPU_REGS_RAX
];
2196 case 0xa4 ... 0xa5: /* movs */
2197 c
->dst
.type
= OP_MEM
;
2198 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2199 c
->dst
.ptr
= (unsigned long *)register_address(c
,
2201 c
->regs
[VCPU_REGS_RDI
]);
2202 rc
= ops
->read_emulated(register_address(c
,
2203 seg_override_base(ctxt
, c
),
2204 c
->regs
[VCPU_REGS_RSI
]),
2206 c
->dst
.bytes
, ctxt
->vcpu
);
2207 if (rc
!= X86EMUL_CONTINUE
)
2209 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
2210 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2212 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
2213 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2216 case 0xa6 ... 0xa7: /* cmps */
2217 c
->src
.type
= OP_NONE
; /* Disable writeback. */
2218 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2219 c
->src
.ptr
= (unsigned long *)register_address(c
,
2220 seg_override_base(ctxt
, c
),
2221 c
->regs
[VCPU_REGS_RSI
]);
2222 rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
2226 if (rc
!= X86EMUL_CONTINUE
)
2229 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2230 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2231 c
->dst
.ptr
= (unsigned long *)register_address(c
,
2233 c
->regs
[VCPU_REGS_RDI
]);
2234 rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
,
2238 if (rc
!= X86EMUL_CONTINUE
)
2241 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
2243 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2245 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
2246 (ctxt
->eflags
& EFLG_DF
) ? -c
->src
.bytes
2248 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
2249 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2253 case 0xaa ... 0xab: /* stos */
2254 c
->dst
.type
= OP_MEM
;
2255 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2256 c
->dst
.ptr
= (unsigned long *)register_address(c
,
2258 c
->regs
[VCPU_REGS_RDI
]);
2259 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2260 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
2261 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2264 case 0xac ... 0xad: /* lods */
2265 c
->dst
.type
= OP_REG
;
2266 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2267 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2268 rc
= ops
->read_emulated(register_address(c
,
2269 seg_override_base(ctxt
, c
),
2270 c
->regs
[VCPU_REGS_RSI
]),
2274 if (rc
!= X86EMUL_CONTINUE
)
2276 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
2277 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2280 case 0xae ... 0xaf: /* scas */
2281 DPRINTF("Urk! I don't handle SCAS.\n");
2282 goto cannot_emulate
;
2283 case 0xb0 ... 0xbf: /* mov r, imm */
2288 case 0xc3: /* ret */
2289 c
->dst
.type
= OP_REG
;
2290 c
->dst
.ptr
= &c
->eip
;
2291 c
->dst
.bytes
= c
->op_bytes
;
2292 goto pop_instruction
;
2293 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2295 c
->dst
.val
= c
->src
.val
;
2297 case 0xcb: /* ret far */
2298 rc
= emulate_ret_far(ctxt
, ops
);
2299 if (rc
!= X86EMUL_CONTINUE
)
2302 case 0xd0 ... 0xd1: /* Grp2 */
2306 case 0xd2 ... 0xd3: /* Grp2 */
2307 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2310 case 0xe4: /* inb */
2315 case 0xe6: /* outb */
2316 case 0xe7: /* out */
2320 case 0xe8: /* call (near) */ {
2321 long int rel
= c
->src
.val
;
2322 c
->src
.val
= (unsigned long) c
->eip
;
2327 case 0xe9: /* jmp rel */
2329 case 0xea: /* jmp far */
2331 if (kvm_load_segment_descriptor(ctxt
->vcpu
, c
->src2
.val
,
2335 c
->eip
= c
->src
.val
;
2338 jmp
: /* jmp rel short */
2339 jmp_rel(c
, c
->src
.val
);
2340 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2342 case 0xec: /* in al,dx */
2343 case 0xed: /* in (e/r)ax,dx */
2344 port
= c
->regs
[VCPU_REGS_RDX
];
2347 case 0xee: /* out al,dx */
2348 case 0xef: /* out (e/r)ax,dx */
2349 port
= c
->regs
[VCPU_REGS_RDX
];
2352 if (!emulator_io_permited(ctxt
, ops
, port
,
2353 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
)) {
2354 kvm_inject_gp(ctxt
->vcpu
, 0);
2357 if (kvm_emulate_pio(ctxt
->vcpu
, io_dir_in
,
2358 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
2361 goto cannot_emulate
;
2364 case 0xf4: /* hlt */
2365 ctxt
->vcpu
->arch
.halt_request
= 1;
2367 case 0xf5: /* cmc */
2368 /* complement carry flag from eflags reg */
2369 ctxt
->eflags
^= EFLG_CF
;
2370 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2372 case 0xf6 ... 0xf7: /* Grp3 */
2373 rc
= emulate_grp3(ctxt
, ops
);
2374 if (rc
!= X86EMUL_CONTINUE
)
2377 case 0xf8: /* clc */
2378 ctxt
->eflags
&= ~EFLG_CF
;
2379 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2381 case 0xfa: /* cli */
2382 if (emulator_bad_iopl(ctxt
, ops
))
2383 kvm_inject_gp(ctxt
->vcpu
, 0);
2385 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2386 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2389 case 0xfb: /* sti */
2390 if (emulator_bad_iopl(ctxt
, ops
))
2391 kvm_inject_gp(ctxt
->vcpu
, 0);
2393 toggle_interruptibility(ctxt
, KVM_X86_SHADOW_INT_STI
);
2394 ctxt
->eflags
|= X86_EFLAGS_IF
;
2395 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2398 case 0xfc: /* cld */
2399 ctxt
->eflags
&= ~EFLG_DF
;
2400 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2402 case 0xfd: /* std */
2403 ctxt
->eflags
|= EFLG_DF
;
2404 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2406 case 0xfe: /* Grp4 */
2408 rc
= emulate_grp45(ctxt
, ops
);
2409 if (rc
!= X86EMUL_CONTINUE
)
2412 case 0xff: /* Grp5 */
2413 if (c
->modrm_reg
== 5)
2419 rc
= writeback(ctxt
, ops
);
2420 if (rc
!= X86EMUL_CONTINUE
)
2423 /* Commit shadow register state. */
2424 memcpy(ctxt
->vcpu
->arch
.regs
, c
->regs
, sizeof c
->regs
);
2425 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
2428 if (rc
== X86EMUL_UNHANDLEABLE
) {
2436 case 0x01: /* lgdt, lidt, lmsw */
2437 switch (c
->modrm_reg
) {
2439 unsigned long address
;
2441 case 0: /* vmcall */
2442 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
2443 goto cannot_emulate
;
2445 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2446 if (rc
!= X86EMUL_CONTINUE
)
2449 /* Let the processor re-execute the fixed hypercall */
2450 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2451 /* Disable writeback. */
2452 c
->dst
.type
= OP_NONE
;
2455 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2456 &size
, &address
, c
->op_bytes
);
2457 if (rc
!= X86EMUL_CONTINUE
)
2459 realmode_lgdt(ctxt
->vcpu
, size
, address
);
2460 /* Disable writeback. */
2461 c
->dst
.type
= OP_NONE
;
2463 case 3: /* lidt/vmmcall */
2464 if (c
->modrm_mod
== 3) {
2465 switch (c
->modrm_rm
) {
2467 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2468 if (rc
!= X86EMUL_CONTINUE
)
2472 goto cannot_emulate
;
2475 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2478 if (rc
!= X86EMUL_CONTINUE
)
2480 realmode_lidt(ctxt
->vcpu
, size
, address
);
2482 /* Disable writeback. */
2483 c
->dst
.type
= OP_NONE
;
2487 c
->dst
.val
= ops
->get_cr(0, ctxt
->vcpu
);
2490 ops
->set_cr(0, (ops
->get_cr(0, ctxt
->vcpu
) & ~0x0ful
) |
2491 (c
->src
.val
& 0x0f), ctxt
->vcpu
);
2492 c
->dst
.type
= OP_NONE
;
2495 emulate_invlpg(ctxt
->vcpu
, memop
);
2496 /* Disable writeback. */
2497 c
->dst
.type
= OP_NONE
;
2500 goto cannot_emulate
;
2503 case 0x05: /* syscall */
2504 rc
= emulate_syscall(ctxt
);
2505 if (rc
!= X86EMUL_CONTINUE
)
2511 emulate_clts(ctxt
->vcpu
);
2512 c
->dst
.type
= OP_NONE
;
2514 case 0x08: /* invd */
2515 case 0x09: /* wbinvd */
2516 case 0x0d: /* GrpP (prefetch) */
2517 case 0x18: /* Grp16 (prefetch/nop) */
2518 c
->dst
.type
= OP_NONE
;
2520 case 0x20: /* mov cr, reg */
2521 if (c
->modrm_mod
!= 3)
2522 goto cannot_emulate
;
2523 c
->regs
[c
->modrm_rm
] = ops
->get_cr(c
->modrm_reg
, ctxt
->vcpu
);
2524 c
->dst
.type
= OP_NONE
; /* no writeback */
2526 case 0x21: /* mov from dr to reg */
2527 if (c
->modrm_mod
!= 3)
2528 goto cannot_emulate
;
2529 if (emulator_get_dr(ctxt
, c
->modrm_reg
, &c
->regs
[c
->modrm_rm
]))
2530 goto cannot_emulate
;
2531 rc
= X86EMUL_CONTINUE
;
2532 c
->dst
.type
= OP_NONE
; /* no writeback */
2534 case 0x22: /* mov reg, cr */
2535 if (c
->modrm_mod
!= 3)
2536 goto cannot_emulate
;
2537 ops
->set_cr(c
->modrm_reg
, c
->modrm_val
, ctxt
->vcpu
);
2538 c
->dst
.type
= OP_NONE
;
2540 case 0x23: /* mov from reg to dr */
2541 if (c
->modrm_mod
!= 3)
2542 goto cannot_emulate
;
2543 if (emulator_set_dr(ctxt
, c
->modrm_reg
, c
->regs
[c
->modrm_rm
]))
2544 goto cannot_emulate
;
2545 rc
= X86EMUL_CONTINUE
;
2546 c
->dst
.type
= OP_NONE
; /* no writeback */
2550 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
2551 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
2552 if (kvm_set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
)) {
2553 kvm_inject_gp(ctxt
->vcpu
, 0);
2554 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2556 rc
= X86EMUL_CONTINUE
;
2557 c
->dst
.type
= OP_NONE
;
2561 if (kvm_get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
)) {
2562 kvm_inject_gp(ctxt
->vcpu
, 0);
2563 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2565 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
2566 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
2568 rc
= X86EMUL_CONTINUE
;
2569 c
->dst
.type
= OP_NONE
;
2571 case 0x34: /* sysenter */
2572 rc
= emulate_sysenter(ctxt
);
2573 if (rc
!= X86EMUL_CONTINUE
)
2578 case 0x35: /* sysexit */
2579 rc
= emulate_sysexit(ctxt
);
2580 if (rc
!= X86EMUL_CONTINUE
)
2585 case 0x40 ... 0x4f: /* cmov */
2586 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
2587 if (!test_cc(c
->b
, ctxt
->eflags
))
2588 c
->dst
.type
= OP_NONE
; /* no writeback */
2590 case 0x80 ... 0x8f: /* jnz rel, etc*/
2591 if (test_cc(c
->b
, ctxt
->eflags
))
2592 jmp_rel(c
, c
->src
.val
);
2593 c
->dst
.type
= OP_NONE
;
2595 case 0xa0: /* push fs */
2596 emulate_push_sreg(ctxt
, VCPU_SREG_FS
);
2598 case 0xa1: /* pop fs */
2599 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
2600 if (rc
!= X86EMUL_CONTINUE
)
2605 c
->dst
.type
= OP_NONE
;
2606 /* only subword offset */
2607 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2608 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
2610 case 0xa4: /* shld imm8, r, r/m */
2611 case 0xa5: /* shld cl, r, r/m */
2612 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
2614 case 0xa8: /* push gs */
2615 emulate_push_sreg(ctxt
, VCPU_SREG_GS
);
2617 case 0xa9: /* pop gs */
2618 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
2619 if (rc
!= X86EMUL_CONTINUE
)
2624 /* only subword offset */
2625 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2626 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
2628 case 0xac: /* shrd imm8, r, r/m */
2629 case 0xad: /* shrd cl, r, r/m */
2630 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
2632 case 0xae: /* clflush */
2634 case 0xb0 ... 0xb1: /* cmpxchg */
2636 * Save real source value, then compare EAX against
2639 c
->src
.orig_val
= c
->src
.val
;
2640 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
2641 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2642 if (ctxt
->eflags
& EFLG_ZF
) {
2643 /* Success: write back to memory. */
2644 c
->dst
.val
= c
->src
.orig_val
;
2646 /* Failure: write the value we saw to EAX. */
2647 c
->dst
.type
= OP_REG
;
2648 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2653 /* only subword offset */
2654 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2655 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
2657 case 0xb6 ... 0xb7: /* movzx */
2658 c
->dst
.bytes
= c
->op_bytes
;
2659 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
2662 case 0xba: /* Grp8 */
2663 switch (c
->modrm_reg
& 3) {
2676 /* only subword offset */
2677 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2678 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
2680 case 0xbe ... 0xbf: /* movsx */
2681 c
->dst
.bytes
= c
->op_bytes
;
2682 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
2685 case 0xc3: /* movnti */
2686 c
->dst
.bytes
= c
->op_bytes
;
2687 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
2690 case 0xc7: /* Grp9 (cmpxchg8b) */
2691 rc
= emulate_grp9(ctxt
, ops
, memop
);
2692 if (rc
!= X86EMUL_CONTINUE
)
2694 c
->dst
.type
= OP_NONE
;
2700 DPRINTF("Cannot emulate %02x\n", c
->b
);