1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
25 #include <public/xen.h>
26 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #include <linux/kvm_host.h>
29 #include "kvm_cache_regs.h"
30 #define DPRINTF(x...) do {} while (0)
32 #include <linux/module.h>
33 #include <asm/kvm_emulate.h>
39 * Opcode effective-address decode tables.
40 * Note that we only emulate instructions that have at least one memory
41 * operand (excluding implicit stack references). We assume that stack
42 * references and instruction fetches will never occur in special memory
43 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
47 /* Operand sizes: 8-bit operands or specified/overridden size. */
48 #define ByteOp (1<<0) /* 8-bit operands. */
49 /* Destination operand type. */
50 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
51 #define DstReg (2<<1) /* Register operand. */
52 #define DstMem (3<<1) /* Memory operand. */
53 #define DstAcc (4<<1) /* Destination Accumulator */
54 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
55 #define DstMask (7<<1)
56 /* Source operand type. */
57 #define SrcNone (0<<4) /* No source operand. */
58 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
59 #define SrcReg (1<<4) /* Register operand. */
60 #define SrcMem (2<<4) /* Memory operand. */
61 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
62 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
63 #define SrcImm (5<<4) /* Immediate operand. */
64 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
65 #define SrcOne (7<<4) /* Implied '1' */
66 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
67 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
68 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
69 #define SrcMask (0xf<<4)
70 /* Generic ModRM decode. */
72 /* Destination is only written; never read. */
75 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
76 #define String (1<<12) /* String instruction (rep capable) */
77 #define Stack (1<<13) /* Stack instruction (push/pop) */
78 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
79 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
80 #define GroupMask 0xff /* Group number stored in bits 0:7 */
82 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
83 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
85 /* Source 2 operand type */
86 #define Src2None (0<<29)
87 #define Src2CL (1<<29)
88 #define Src2ImmByte (2<<29)
89 #define Src2One (3<<29)
90 #define Src2Imm16 (4<<29)
91 #define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be
92 in memory and second argument is located
93 immediately after the first one in memory. */
94 #define Src2Mask (7<<29)
97 Group1_80
, Group1_81
, Group1_82
, Group1_83
,
98 Group1A
, Group3_Byte
, Group3
, Group4
, Group5
, Group7
,
102 static u32 opcode_table
[256] = {
104 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
105 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
106 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
107 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
109 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
110 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
111 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
112 ImplicitOps
| Stack
| No64
, 0,
114 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
115 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
116 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
117 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
119 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
120 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
121 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
122 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
124 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
125 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
126 DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
128 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
129 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
132 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
133 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
136 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
137 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
138 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
141 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
143 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
145 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
146 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
148 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
149 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
151 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
152 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
155 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
156 DstDI
| ByteOp
| Mov
| String
, DstDI
| Mov
| String
, /* insb, insw/insd */
157 SrcSI
| ByteOp
| ImplicitOps
| String
, SrcSI
| ImplicitOps
| String
, /* outsb, outsw/outsd */
159 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
160 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
162 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
163 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
165 Group
| Group1_80
, Group
| Group1_81
,
166 Group
| Group1_82
, Group
| Group1_83
,
167 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
168 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
170 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
171 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
172 DstMem
| SrcReg
| ModRM
| Mov
, ModRM
| DstReg
,
173 DstReg
| SrcMem
| ModRM
| Mov
, Group
| Group1A
,
175 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
177 0, 0, SrcImm
| Src2Imm16
| No64
, 0,
178 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
180 ByteOp
| DstReg
| SrcMem
| Mov
| MemAbs
, DstReg
| SrcMem
| Mov
| MemAbs
,
181 ByteOp
| DstMem
| SrcReg
| Mov
| MemAbs
, DstMem
| SrcReg
| Mov
| MemAbs
,
182 ByteOp
| SrcSI
| DstDI
| Mov
| String
, SrcSI
| DstDI
| Mov
| String
,
183 ByteOp
| SrcSI
| DstDI
| String
, SrcSI
| DstDI
| String
,
185 0, 0, ByteOp
| DstDI
| Mov
| String
, DstDI
| Mov
| String
,
186 ByteOp
| SrcSI
| DstAcc
| Mov
| String
, SrcSI
| DstAcc
| Mov
| String
,
187 ByteOp
| DstDI
| String
, DstDI
| String
,
189 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
190 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
191 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
192 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
194 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
195 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
196 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
197 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
199 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
200 0, ImplicitOps
| Stack
, 0, 0,
201 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
203 0, 0, 0, ImplicitOps
| Stack
,
204 ImplicitOps
, SrcImmByte
, ImplicitOps
| No64
, ImplicitOps
,
206 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
207 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
210 0, 0, 0, 0, 0, 0, 0, 0,
213 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
214 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
216 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
217 SrcImmU
| Src2Imm16
| No64
, SrcImmByte
| ImplicitOps
,
218 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
219 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
222 ImplicitOps
| Priv
, ImplicitOps
, Group
| Group3_Byte
, Group
| Group3
,
224 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
225 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
228 static u32 twobyte_table
[256] = {
230 0, Group
| GroupDual
| Group7
, 0, 0,
231 0, ImplicitOps
, ImplicitOps
| Priv
, 0,
232 ImplicitOps
| Priv
, ImplicitOps
| Priv
, 0, 0,
233 0, ImplicitOps
| ModRM
, 0, 0,
235 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
237 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
238 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
240 0, 0, 0, 0, 0, 0, 0, 0,
242 ImplicitOps
| Priv
, 0, ImplicitOps
| Priv
, 0,
243 ImplicitOps
, ImplicitOps
| Priv
, 0, 0,
244 0, 0, 0, 0, 0, 0, 0, 0,
246 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
247 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
248 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
249 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
251 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
252 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
253 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
254 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
256 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
258 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
260 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
262 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
263 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
265 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
267 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
268 0, DstMem
| SrcReg
| ModRM
| BitOp
,
269 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
270 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
272 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
273 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
274 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
275 DstMem
| SrcReg
| Src2CL
| ModRM
,
278 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
279 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
280 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
281 DstReg
| SrcMem16
| ModRM
| Mov
,
284 Group
| Group8
, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
285 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
286 DstReg
| SrcMem16
| ModRM
| Mov
,
288 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
,
289 0, 0, 0, Group
| GroupDual
| Group9
,
290 0, 0, 0, 0, 0, 0, 0, 0,
292 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
294 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
296 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
299 static u32 group_table
[] = {
301 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
302 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
303 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
304 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
305 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
306 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
307 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
308 ByteOp
| DstMem
| SrcImm
| ModRM
,
310 DstMem
| SrcImm
| ModRM
| Lock
,
311 DstMem
| SrcImm
| ModRM
| Lock
,
312 DstMem
| SrcImm
| ModRM
| Lock
,
313 DstMem
| SrcImm
| ModRM
| Lock
,
314 DstMem
| SrcImm
| ModRM
| Lock
,
315 DstMem
| SrcImm
| ModRM
| Lock
,
316 DstMem
| SrcImm
| ModRM
| Lock
,
317 DstMem
| SrcImm
| ModRM
,
319 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
320 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
321 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
322 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
323 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
324 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
325 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
326 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
,
328 DstMem
| SrcImmByte
| ModRM
| Lock
,
329 DstMem
| SrcImmByte
| ModRM
| Lock
,
330 DstMem
| SrcImmByte
| ModRM
| Lock
,
331 DstMem
| SrcImmByte
| ModRM
| Lock
,
332 DstMem
| SrcImmByte
| ModRM
| Lock
,
333 DstMem
| SrcImmByte
| ModRM
| Lock
,
334 DstMem
| SrcImmByte
| ModRM
| Lock
,
335 DstMem
| SrcImmByte
| ModRM
,
337 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
339 ByteOp
| SrcImm
| DstMem
| ModRM
, 0,
340 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
343 DstMem
| SrcImm
| ModRM
, 0,
344 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
347 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
350 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
351 SrcMem
| ModRM
| Stack
, 0,
352 SrcMem
| ModRM
| Stack
, SrcMem
| ModRM
| Src2Mem16
| ImplicitOps
,
353 SrcMem
| ModRM
| Stack
, 0,
355 0, 0, ModRM
| SrcMem
| Priv
, ModRM
| SrcMem
| Priv
,
356 SrcNone
| ModRM
| DstMem
| Mov
, 0,
357 SrcMem16
| ModRM
| Mov
| Priv
, SrcMem
| ModRM
| ByteOp
| Priv
,
360 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
| Lock
,
361 DstMem
| SrcImmByte
| ModRM
| Lock
, DstMem
| SrcImmByte
| ModRM
| Lock
,
363 0, ImplicitOps
| ModRM
| Lock
, 0, 0, 0, 0, 0, 0,
366 static u32 group2_table
[] = {
368 SrcNone
| ModRM
| Priv
, 0, 0, SrcNone
| ModRM
| Priv
,
369 SrcNone
| ModRM
| DstMem
| Mov
, 0,
370 SrcMem16
| ModRM
| Mov
| Priv
, 0,
372 0, 0, 0, 0, 0, 0, 0, 0,
375 /* EFLAGS bit definitions. */
376 #define EFLG_ID (1<<21)
377 #define EFLG_VIP (1<<20)
378 #define EFLG_VIF (1<<19)
379 #define EFLG_AC (1<<18)
380 #define EFLG_VM (1<<17)
381 #define EFLG_RF (1<<16)
382 #define EFLG_IOPL (3<<12)
383 #define EFLG_NT (1<<14)
384 #define EFLG_OF (1<<11)
385 #define EFLG_DF (1<<10)
386 #define EFLG_IF (1<<9)
387 #define EFLG_TF (1<<8)
388 #define EFLG_SF (1<<7)
389 #define EFLG_ZF (1<<6)
390 #define EFLG_AF (1<<4)
391 #define EFLG_PF (1<<2)
392 #define EFLG_CF (1<<0)
395 * Instruction emulation:
396 * Most instructions are emulated directly via a fragment of inline assembly
397 * code. This allows us to save/restore EFLAGS and thus very easily pick up
398 * any modified flags.
401 #if defined(CONFIG_X86_64)
402 #define _LO32 "k" /* force 32-bit operand */
403 #define _STK "%%rsp" /* stack pointer */
404 #elif defined(__i386__)
405 #define _LO32 "" /* force 32-bit operand */
406 #define _STK "%%esp" /* stack pointer */
410 * These EFLAGS bits are restored from saved value during emulation, and
411 * any changes are written back to the saved value after emulation.
413 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
415 /* Before executing instruction: restore necessary bits in EFLAGS. */
416 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
417 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
418 "movl %"_sav",%"_LO32 _tmp"; " \
421 "movl %"_msk",%"_LO32 _tmp"; " \
422 "andl %"_LO32 _tmp",("_STK"); " \
424 "notl %"_LO32 _tmp"; " \
425 "andl %"_LO32 _tmp",("_STK"); " \
426 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
428 "orl %"_LO32 _tmp",("_STK"); " \
432 /* After executing instruction: write-back necessary bits in EFLAGS. */
433 #define _POST_EFLAGS(_sav, _msk, _tmp) \
434 /* _sav |= EFLAGS & _msk; */ \
437 "andl %"_msk",%"_LO32 _tmp"; " \
438 "orl %"_LO32 _tmp",%"_sav"; "
446 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
448 __asm__ __volatile__ ( \
449 _PRE_EFLAGS("0", "4", "2") \
450 _op _suffix " %"_x"3,%1; " \
451 _POST_EFLAGS("0", "4", "2") \
452 : "=m" (_eflags), "=m" ((_dst).val), \
454 : _y ((_src).val), "i" (EFLAGS_MASK)); \
458 /* Raw emulation: instruction has two explicit operands. */
459 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
461 unsigned long _tmp; \
463 switch ((_dst).bytes) { \
465 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
468 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
471 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
476 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
478 unsigned long _tmp; \
479 switch ((_dst).bytes) { \
481 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
484 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
485 _wx, _wy, _lx, _ly, _qx, _qy); \
490 /* Source operand is byte-sized and may be restricted to just %cl. */
491 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
492 __emulate_2op(_op, _src, _dst, _eflags, \
493 "b", "c", "b", "c", "b", "c", "b", "c")
495 /* Source operand is byte, word, long or quad sized. */
496 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
497 __emulate_2op(_op, _src, _dst, _eflags, \
498 "b", "q", "w", "r", _LO32, "r", "", "r")
500 /* Source operand is word, long or quad sized. */
501 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
502 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
503 "w", "r", _LO32, "r", "", "r")
505 /* Instruction has three operands and one operand is stored in ECX register */
506 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
508 unsigned long _tmp; \
509 _type _clv = (_cl).val; \
510 _type _srcv = (_src).val; \
511 _type _dstv = (_dst).val; \
513 __asm__ __volatile__ ( \
514 _PRE_EFLAGS("0", "5", "2") \
515 _op _suffix " %4,%1 \n" \
516 _POST_EFLAGS("0", "5", "2") \
517 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
518 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
521 (_cl).val = (unsigned long) _clv; \
522 (_src).val = (unsigned long) _srcv; \
523 (_dst).val = (unsigned long) _dstv; \
526 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
528 switch ((_dst).bytes) { \
530 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
531 "w", unsigned short); \
534 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
535 "l", unsigned int); \
538 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
539 "q", unsigned long)); \
544 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
546 unsigned long _tmp; \
548 __asm__ __volatile__ ( \
549 _PRE_EFLAGS("0", "3", "2") \
550 _op _suffix " %1; " \
551 _POST_EFLAGS("0", "3", "2") \
552 : "=m" (_eflags), "+m" ((_dst).val), \
554 : "i" (EFLAGS_MASK)); \
557 /* Instruction has only one explicit operand (no source operand). */
558 #define emulate_1op(_op, _dst, _eflags) \
560 switch ((_dst).bytes) { \
561 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
562 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
563 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
564 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
568 /* Fetch next part of the instruction being emulated. */
569 #define insn_fetch(_type, _size, _eip) \
570 ({ unsigned long _x; \
571 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
572 if (rc != X86EMUL_CONTINUE) \
578 static inline unsigned long ad_mask(struct decode_cache
*c
)
580 return (1UL << (c
->ad_bytes
<< 3)) - 1;
583 /* Access/update address held in a register, based on addressing mode. */
584 static inline unsigned long
585 address_mask(struct decode_cache
*c
, unsigned long reg
)
587 if (c
->ad_bytes
== sizeof(unsigned long))
590 return reg
& ad_mask(c
);
593 static inline unsigned long
594 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
596 return base
+ address_mask(c
, reg
);
600 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
602 if (c
->ad_bytes
== sizeof(unsigned long))
605 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
608 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
610 register_address_increment(c
, &c
->eip
, rel
);
613 static void set_seg_override(struct decode_cache
*c
, int seg
)
615 c
->has_seg_override
= true;
616 c
->seg_override
= seg
;
619 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
621 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
624 return kvm_x86_ops
->get_segment_base(ctxt
->vcpu
, seg
);
627 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
628 struct decode_cache
*c
)
630 if (!c
->has_seg_override
)
633 return seg_base(ctxt
, c
->seg_override
);
636 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
)
638 return seg_base(ctxt
, VCPU_SREG_ES
);
641 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
)
643 return seg_base(ctxt
, VCPU_SREG_SS
);
646 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
647 struct x86_emulate_ops
*ops
,
648 unsigned long linear
, u8
*dest
)
650 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
654 if (linear
< fc
->start
|| linear
>= fc
->end
) {
655 size
= min(15UL, PAGE_SIZE
- offset_in_page(linear
));
656 rc
= ops
->fetch(linear
, fc
->data
, size
, ctxt
->vcpu
, NULL
);
657 if (rc
!= X86EMUL_CONTINUE
)
660 fc
->end
= linear
+ size
;
662 *dest
= fc
->data
[linear
- fc
->start
];
663 return X86EMUL_CONTINUE
;
666 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
667 struct x86_emulate_ops
*ops
,
668 unsigned long eip
, void *dest
, unsigned size
)
672 /* x86 instructions are limited to 15 bytes. */
673 if (eip
+ size
- ctxt
->eip
> 15)
674 return X86EMUL_UNHANDLEABLE
;
675 eip
+= ctxt
->cs_base
;
677 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
678 if (rc
!= X86EMUL_CONTINUE
)
681 return X86EMUL_CONTINUE
;
685 * Given the 'reg' portion of a ModRM byte, and a register block, return a
686 * pointer into the block that addresses the relevant register.
687 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
689 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
694 p
= ®s
[modrm_reg
];
695 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
696 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
700 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
701 struct x86_emulate_ops
*ops
,
703 u16
*size
, unsigned long *address
, int op_bytes
)
710 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
712 if (rc
!= X86EMUL_CONTINUE
)
714 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
719 static int test_cc(unsigned int condition
, unsigned int flags
)
723 switch ((condition
& 15) >> 1) {
725 rc
|= (flags
& EFLG_OF
);
727 case 1: /* b/c/nae */
728 rc
|= (flags
& EFLG_CF
);
731 rc
|= (flags
& EFLG_ZF
);
734 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
737 rc
|= (flags
& EFLG_SF
);
740 rc
|= (flags
& EFLG_PF
);
743 rc
|= (flags
& EFLG_ZF
);
746 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
750 /* Odd condition identifiers (lsb == 1) have inverted sense. */
751 return (!!rc
^ (condition
& 1));
754 static void decode_register_operand(struct operand
*op
,
755 struct decode_cache
*c
,
758 unsigned reg
= c
->modrm_reg
;
759 int highbyte_regs
= c
->rex_prefix
== 0;
762 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
764 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
765 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
766 op
->val
= *(u8
*)op
->ptr
;
769 op
->ptr
= decode_register(reg
, c
->regs
, 0);
770 op
->bytes
= c
->op_bytes
;
773 op
->val
= *(u16
*)op
->ptr
;
776 op
->val
= *(u32
*)op
->ptr
;
779 op
->val
= *(u64
*) op
->ptr
;
783 op
->orig_val
= op
->val
;
786 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
787 struct x86_emulate_ops
*ops
)
789 struct decode_cache
*c
= &ctxt
->decode
;
791 int index_reg
= 0, base_reg
= 0, scale
;
792 int rc
= X86EMUL_CONTINUE
;
795 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
796 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
797 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
800 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
801 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
802 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
803 c
->modrm_rm
|= (c
->modrm
& 0x07);
807 if (c
->modrm_mod
== 3) {
808 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
809 c
->regs
, c
->d
& ByteOp
);
810 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
814 if (c
->ad_bytes
== 2) {
815 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
816 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
817 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
818 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
820 /* 16-bit ModR/M decode. */
821 switch (c
->modrm_mod
) {
823 if (c
->modrm_rm
== 6)
824 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
827 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
830 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
833 switch (c
->modrm_rm
) {
835 c
->modrm_ea
+= bx
+ si
;
838 c
->modrm_ea
+= bx
+ di
;
841 c
->modrm_ea
+= bp
+ si
;
844 c
->modrm_ea
+= bp
+ di
;
853 if (c
->modrm_mod
!= 0)
860 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
861 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
862 if (!c
->has_seg_override
)
863 set_seg_override(c
, VCPU_SREG_SS
);
864 c
->modrm_ea
= (u16
)c
->modrm_ea
;
866 /* 32/64-bit ModR/M decode. */
867 if ((c
->modrm_rm
& 7) == 4) {
868 sib
= insn_fetch(u8
, 1, c
->eip
);
869 index_reg
|= (sib
>> 3) & 7;
873 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
874 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
876 c
->modrm_ea
+= c
->regs
[base_reg
];
878 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
879 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
880 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
883 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
884 switch (c
->modrm_mod
) {
886 if (c
->modrm_rm
== 5)
887 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
890 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
893 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
901 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
902 struct x86_emulate_ops
*ops
)
904 struct decode_cache
*c
= &ctxt
->decode
;
905 int rc
= X86EMUL_CONTINUE
;
907 switch (c
->ad_bytes
) {
909 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
912 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
915 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
923 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
925 struct decode_cache
*c
= &ctxt
->decode
;
926 int rc
= X86EMUL_CONTINUE
;
927 int mode
= ctxt
->mode
;
928 int def_op_bytes
, def_ad_bytes
, group
;
931 /* we cannot decode insn before we complete previous rep insn */
932 WARN_ON(ctxt
->restart
);
934 /* Shadow copy of register state. Committed on successful emulation. */
935 memset(c
, 0, sizeof(struct decode_cache
));
937 ctxt
->cs_base
= seg_base(ctxt
, VCPU_SREG_CS
);
938 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
941 case X86EMUL_MODE_REAL
:
942 case X86EMUL_MODE_VM86
:
943 case X86EMUL_MODE_PROT16
:
944 def_op_bytes
= def_ad_bytes
= 2;
946 case X86EMUL_MODE_PROT32
:
947 def_op_bytes
= def_ad_bytes
= 4;
950 case X86EMUL_MODE_PROT64
:
959 c
->op_bytes
= def_op_bytes
;
960 c
->ad_bytes
= def_ad_bytes
;
962 /* Legacy prefixes. */
964 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
965 case 0x66: /* operand-size override */
966 /* switch between 2/4 bytes */
967 c
->op_bytes
= def_op_bytes
^ 6;
969 case 0x67: /* address-size override */
970 if (mode
== X86EMUL_MODE_PROT64
)
971 /* switch between 4/8 bytes */
972 c
->ad_bytes
= def_ad_bytes
^ 12;
974 /* switch between 2/4 bytes */
975 c
->ad_bytes
= def_ad_bytes
^ 6;
977 case 0x26: /* ES override */
978 case 0x2e: /* CS override */
979 case 0x36: /* SS override */
980 case 0x3e: /* DS override */
981 set_seg_override(c
, (c
->b
>> 3) & 3);
983 case 0x64: /* FS override */
984 case 0x65: /* GS override */
985 set_seg_override(c
, c
->b
& 7);
987 case 0x40 ... 0x4f: /* REX */
988 if (mode
!= X86EMUL_MODE_PROT64
)
990 c
->rex_prefix
= c
->b
;
992 case 0xf0: /* LOCK */
995 case 0xf2: /* REPNE/REPNZ */
996 c
->rep_prefix
= REPNE_PREFIX
;
998 case 0xf3: /* REP/REPE/REPZ */
999 c
->rep_prefix
= REPE_PREFIX
;
1005 /* Any legacy prefix after a REX prefix nullifies its effect. */
1014 if (c
->rex_prefix
& 8)
1015 c
->op_bytes
= 8; /* REX.W */
1017 /* Opcode byte(s). */
1018 c
->d
= opcode_table
[c
->b
];
1020 /* Two-byte opcode? */
1023 c
->b
= insn_fetch(u8
, 1, c
->eip
);
1024 c
->d
= twobyte_table
[c
->b
];
1029 group
= c
->d
& GroupMask
;
1030 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
1033 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
1034 if ((c
->d
& GroupDual
) && (c
->modrm
>> 6) == 3)
1035 c
->d
= group2_table
[group
];
1037 c
->d
= group_table
[group
];
1042 DPRINTF("Cannot emulate %02x\n", c
->b
);
1046 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1049 /* ModRM and SIB bytes. */
1051 rc
= decode_modrm(ctxt
, ops
);
1052 else if (c
->d
& MemAbs
)
1053 rc
= decode_abs(ctxt
, ops
);
1054 if (rc
!= X86EMUL_CONTINUE
)
1057 if (!c
->has_seg_override
)
1058 set_seg_override(c
, VCPU_SREG_DS
);
1060 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1061 c
->modrm_ea
+= seg_override_base(ctxt
, c
);
1063 if (c
->ad_bytes
!= 8)
1064 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1066 if (c
->rip_relative
)
1067 c
->modrm_ea
+= c
->eip
;
1070 * Decode and fetch the source operand: register, memory
1073 switch (c
->d
& SrcMask
) {
1077 decode_register_operand(&c
->src
, c
, 0);
1086 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1088 /* Don't fetch the address for invlpg: it could be unmapped. */
1089 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1093 * For instructions with a ModR/M byte, switch to register
1094 * access if Mod = 3.
1096 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1097 c
->src
.type
= OP_REG
;
1098 c
->src
.val
= c
->modrm_val
;
1099 c
->src
.ptr
= c
->modrm_ptr
;
1102 c
->src
.type
= OP_MEM
;
1103 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1108 c
->src
.type
= OP_IMM
;
1109 c
->src
.ptr
= (unsigned long *)c
->eip
;
1110 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1111 if (c
->src
.bytes
== 8)
1113 /* NB. Immediates are sign-extended as necessary. */
1114 switch (c
->src
.bytes
) {
1116 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1119 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1122 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1125 if ((c
->d
& SrcMask
) == SrcImmU
) {
1126 switch (c
->src
.bytes
) {
1131 c
->src
.val
&= 0xffff;
1134 c
->src
.val
&= 0xffffffff;
1141 c
->src
.type
= OP_IMM
;
1142 c
->src
.ptr
= (unsigned long *)c
->eip
;
1144 if ((c
->d
& SrcMask
) == SrcImmByte
)
1145 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1147 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1154 c
->src
.type
= OP_MEM
;
1155 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1156 c
->src
.ptr
= (unsigned long *)
1157 register_address(c
, seg_override_base(ctxt
, c
),
1158 c
->regs
[VCPU_REGS_RSI
]);
1164 * Decode and fetch the second source operand: register, memory
1167 switch (c
->d
& Src2Mask
) {
1172 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1175 c
->src2
.type
= OP_IMM
;
1176 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1178 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1181 c
->src2
.type
= OP_IMM
;
1182 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1184 c
->src2
.val
= insn_fetch(u16
, 2, c
->eip
);
1191 c
->src2
.type
= OP_MEM
;
1193 c
->src2
.ptr
= (unsigned long *)(c
->modrm_ea
+ c
->src
.bytes
);
1198 /* Decode and fetch the destination operand: register or memory. */
1199 switch (c
->d
& DstMask
) {
1201 /* Special instructions do their own operand decoding. */
1204 decode_register_operand(&c
->dst
, c
,
1205 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1208 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1209 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1210 c
->dst
.type
= OP_REG
;
1211 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1212 c
->dst
.ptr
= c
->modrm_ptr
;
1215 c
->dst
.type
= OP_MEM
;
1216 c
->dst
.ptr
= (unsigned long *)c
->modrm_ea
;
1217 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1220 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1222 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1223 (c
->src
.val
& mask
) / 8;
1227 c
->dst
.type
= OP_REG
;
1228 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1229 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1230 switch (c
->dst
.bytes
) {
1232 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1235 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1238 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1241 c
->dst
.val
= *(u64
*)c
->dst
.ptr
;
1244 c
->dst
.orig_val
= c
->dst
.val
;
1247 c
->dst
.type
= OP_MEM
;
1248 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1249 c
->dst
.ptr
= (unsigned long *)
1250 register_address(c
, es_base(ctxt
),
1251 c
->regs
[VCPU_REGS_RDI
]);
1257 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1260 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1261 struct x86_emulate_ops
*ops
,
1262 unsigned int size
, unsigned short port
,
1265 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
1267 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1268 struct decode_cache
*c
= &ctxt
->decode
;
1269 unsigned int in_page
, n
;
1270 unsigned int count
= c
->rep_prefix
?
1271 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1;
1272 in_page
= (ctxt
->eflags
& EFLG_DF
) ?
1273 offset_in_page(c
->regs
[VCPU_REGS_RDI
]) :
1274 PAGE_SIZE
- offset_in_page(c
->regs
[VCPU_REGS_RDI
]);
1275 n
= min(min(in_page
, (unsigned int)sizeof(rc
->data
)) / size
,
1279 rc
->pos
= rc
->end
= 0;
1280 if (!ops
->pio_in_emulated(size
, port
, rc
->data
, n
, ctxt
->vcpu
))
1285 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1290 static u32
desc_limit_scaled(struct desc_struct
*desc
)
1292 u32 limit
= get_desc_limit(desc
);
1294 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
1297 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1298 struct x86_emulate_ops
*ops
,
1299 u16 selector
, struct desc_ptr
*dt
)
1301 if (selector
& 1 << 2) {
1302 struct desc_struct desc
;
1303 memset (dt
, 0, sizeof *dt
);
1304 if (!ops
->get_cached_descriptor(&desc
, VCPU_SREG_LDTR
, ctxt
->vcpu
))
1307 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1308 dt
->address
= get_desc_base(&desc
);
1310 ops
->get_gdt(dt
, ctxt
->vcpu
);
1313 /* allowed just for 8 bytes segments */
1314 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1315 struct x86_emulate_ops
*ops
,
1316 u16 selector
, struct desc_struct
*desc
)
1319 u16 index
= selector
>> 3;
1324 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1326 if (dt
.size
< index
* 8 + 7) {
1327 kvm_inject_gp(ctxt
->vcpu
, selector
& 0xfffc);
1328 return X86EMUL_PROPAGATE_FAULT
;
1330 addr
= dt
.address
+ index
* 8;
1331 ret
= ops
->read_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1332 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1333 kvm_inject_page_fault(ctxt
->vcpu
, addr
, err
);
1338 /* allowed just for 8 bytes segments */
1339 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1340 struct x86_emulate_ops
*ops
,
1341 u16 selector
, struct desc_struct
*desc
)
1344 u16 index
= selector
>> 3;
1349 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1351 if (dt
.size
< index
* 8 + 7) {
1352 kvm_inject_gp(ctxt
->vcpu
, selector
& 0xfffc);
1353 return X86EMUL_PROPAGATE_FAULT
;
1356 addr
= dt
.address
+ index
* 8;
1357 ret
= ops
->write_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1358 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1359 kvm_inject_page_fault(ctxt
->vcpu
, addr
, err
);
1364 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1365 struct x86_emulate_ops
*ops
,
1366 u16 selector
, int seg
)
1368 struct desc_struct seg_desc
;
1370 unsigned err_vec
= GP_VECTOR
;
1372 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1375 memset(&seg_desc
, 0, sizeof seg_desc
);
1377 if ((seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
)
1378 || ctxt
->mode
== X86EMUL_MODE_REAL
) {
1379 /* set real mode segment descriptor */
1380 set_desc_base(&seg_desc
, selector
<< 4);
1381 set_desc_limit(&seg_desc
, 0xffff);
1388 /* NULL selector is not valid for TR, CS and SS */
1389 if ((seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_SS
|| seg
== VCPU_SREG_TR
)
1393 /* TR should be in GDT only */
1394 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1397 if (null_selector
) /* for NULL selector skip all following checks */
1400 ret
= read_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1401 if (ret
!= X86EMUL_CONTINUE
)
1404 err_code
= selector
& 0xfffc;
1405 err_vec
= GP_VECTOR
;
1407 /* can't load system descriptor into segment selecor */
1408 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
)
1412 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1418 cpl
= ops
->cpl(ctxt
->vcpu
);
1423 * segment is not a writable data segment or segment
1424 * selector's RPL != CPL or segment selector's RPL != CPL
1426 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1430 if (!(seg_desc
.type
& 8))
1433 if (seg_desc
.type
& 4) {
1439 if (rpl
> cpl
|| dpl
!= cpl
)
1442 /* CS(RPL) <- CPL */
1443 selector
= (selector
& 0xfffc) | cpl
;
1446 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1449 case VCPU_SREG_LDTR
:
1450 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1453 default: /* DS, ES, FS, or GS */
1455 * segment is not a data or readable code segment or
1456 * ((segment is a data or nonconforming code segment)
1457 * and (both RPL and CPL > DPL))
1459 if ((seg_desc
.type
& 0xa) == 0x8 ||
1460 (((seg_desc
.type
& 0xc) != 0xc) &&
1461 (rpl
> dpl
&& cpl
> dpl
)))
1467 /* mark segment as accessed */
1469 ret
= write_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1470 if (ret
!= X86EMUL_CONTINUE
)
1474 ops
->set_segment_selector(selector
, seg
, ctxt
->vcpu
);
1475 ops
->set_cached_descriptor(&seg_desc
, seg
, ctxt
->vcpu
);
1476 return X86EMUL_CONTINUE
;
1478 kvm_queue_exception_e(ctxt
->vcpu
, err_vec
, err_code
);
1479 return X86EMUL_PROPAGATE_FAULT
;
1482 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
)
1484 struct decode_cache
*c
= &ctxt
->decode
;
1486 c
->dst
.type
= OP_MEM
;
1487 c
->dst
.bytes
= c
->op_bytes
;
1488 c
->dst
.val
= c
->src
.val
;
1489 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1490 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
),
1491 c
->regs
[VCPU_REGS_RSP
]);
1494 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1495 struct x86_emulate_ops
*ops
,
1496 void *dest
, int len
)
1498 struct decode_cache
*c
= &ctxt
->decode
;
1501 rc
= ops
->read_emulated(register_address(c
, ss_base(ctxt
),
1502 c
->regs
[VCPU_REGS_RSP
]),
1503 dest
, len
, ctxt
->vcpu
);
1504 if (rc
!= X86EMUL_CONTINUE
)
1507 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1511 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1512 struct x86_emulate_ops
*ops
,
1513 void *dest
, int len
)
1516 unsigned long val
, change_mask
;
1517 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1518 int cpl
= ops
->cpl(ctxt
->vcpu
);
1520 rc
= emulate_pop(ctxt
, ops
, &val
, len
);
1521 if (rc
!= X86EMUL_CONTINUE
)
1524 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1525 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1527 switch(ctxt
->mode
) {
1528 case X86EMUL_MODE_PROT64
:
1529 case X86EMUL_MODE_PROT32
:
1530 case X86EMUL_MODE_PROT16
:
1532 change_mask
|= EFLG_IOPL
;
1534 change_mask
|= EFLG_IF
;
1536 case X86EMUL_MODE_VM86
:
1538 kvm_inject_gp(ctxt
->vcpu
, 0);
1539 return X86EMUL_PROPAGATE_FAULT
;
1541 change_mask
|= EFLG_IF
;
1543 default: /* real mode */
1544 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1548 *(unsigned long *)dest
=
1549 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1554 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
, int seg
)
1556 struct decode_cache
*c
= &ctxt
->decode
;
1557 struct kvm_segment segment
;
1559 kvm_x86_ops
->get_segment(ctxt
->vcpu
, &segment
, seg
);
1561 c
->src
.val
= segment
.selector
;
1565 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1566 struct x86_emulate_ops
*ops
, int seg
)
1568 struct decode_cache
*c
= &ctxt
->decode
;
1569 unsigned long selector
;
1572 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1573 if (rc
!= X86EMUL_CONTINUE
)
1576 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)selector
, seg
);
1580 static void emulate_pusha(struct x86_emulate_ctxt
*ctxt
)
1582 struct decode_cache
*c
= &ctxt
->decode
;
1583 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1584 int reg
= VCPU_REGS_RAX
;
1586 while (reg
<= VCPU_REGS_RDI
) {
1587 (reg
== VCPU_REGS_RSP
) ?
1588 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1595 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1596 struct x86_emulate_ops
*ops
)
1598 struct decode_cache
*c
= &ctxt
->decode
;
1599 int rc
= X86EMUL_CONTINUE
;
1600 int reg
= VCPU_REGS_RDI
;
1602 while (reg
>= VCPU_REGS_RAX
) {
1603 if (reg
== VCPU_REGS_RSP
) {
1604 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1609 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1610 if (rc
!= X86EMUL_CONTINUE
)
1617 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1618 struct x86_emulate_ops
*ops
)
1620 struct decode_cache
*c
= &ctxt
->decode
;
1622 return emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1625 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1627 struct decode_cache
*c
= &ctxt
->decode
;
1628 switch (c
->modrm_reg
) {
1630 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1633 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1636 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1639 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1641 case 4: /* sal/shl */
1642 case 6: /* sal/shl */
1643 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1646 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1649 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1654 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1655 struct x86_emulate_ops
*ops
)
1657 struct decode_cache
*c
= &ctxt
->decode
;
1659 switch (c
->modrm_reg
) {
1660 case 0 ... 1: /* test */
1661 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1664 c
->dst
.val
= ~c
->dst
.val
;
1667 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1675 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1676 struct x86_emulate_ops
*ops
)
1678 struct decode_cache
*c
= &ctxt
->decode
;
1680 switch (c
->modrm_reg
) {
1682 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1685 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1687 case 2: /* call near abs */ {
1690 c
->eip
= c
->src
.val
;
1691 c
->src
.val
= old_eip
;
1695 case 4: /* jmp abs */
1696 c
->eip
= c
->src
.val
;
1702 return X86EMUL_CONTINUE
;
1705 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1706 struct x86_emulate_ops
*ops
)
1708 struct decode_cache
*c
= &ctxt
->decode
;
1712 rc
= ops
->read_emulated(c
->modrm_ea
, &old
, 8, ctxt
->vcpu
);
1713 if (rc
!= X86EMUL_CONTINUE
)
1716 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1717 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1719 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1720 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1721 ctxt
->eflags
&= ~EFLG_ZF
;
1724 new = ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1725 (u32
) c
->regs
[VCPU_REGS_RBX
];
1727 rc
= ops
->cmpxchg_emulated(c
->modrm_ea
, &old
, &new, 8, ctxt
->vcpu
);
1728 if (rc
!= X86EMUL_CONTINUE
)
1730 ctxt
->eflags
|= EFLG_ZF
;
1732 return X86EMUL_CONTINUE
;
1735 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1736 struct x86_emulate_ops
*ops
)
1738 struct decode_cache
*c
= &ctxt
->decode
;
1742 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1743 if (rc
!= X86EMUL_CONTINUE
)
1745 if (c
->op_bytes
== 4)
1746 c
->eip
= (u32
)c
->eip
;
1747 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1748 if (rc
!= X86EMUL_CONTINUE
)
1750 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)cs
, VCPU_SREG_CS
);
1754 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1755 struct x86_emulate_ops
*ops
)
1758 struct decode_cache
*c
= &ctxt
->decode
;
1760 switch (c
->dst
.type
) {
1762 /* The 4-byte case *is* correct:
1763 * in 64-bit mode we zero-extend.
1765 switch (c
->dst
.bytes
) {
1767 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1770 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1773 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1774 break; /* 64b: zero-ext */
1776 *c
->dst
.ptr
= c
->dst
.val
;
1782 rc
= ops
->cmpxchg_emulated(
1783 (unsigned long)c
->dst
.ptr
,
1789 rc
= ops
->write_emulated(
1790 (unsigned long)c
->dst
.ptr
,
1794 if (rc
!= X86EMUL_CONTINUE
)
1803 return X86EMUL_CONTINUE
;
1806 static void toggle_interruptibility(struct x86_emulate_ctxt
*ctxt
, u32 mask
)
1808 u32 int_shadow
= kvm_x86_ops
->get_interrupt_shadow(ctxt
->vcpu
, mask
);
1810 * an sti; sti; sequence only disable interrupts for the first
1811 * instruction. So, if the last instruction, be it emulated or
1812 * not, left the system with the INT_STI flag enabled, it
1813 * means that the last instruction is an sti. We should not
1814 * leave the flag on in this case. The same goes for mov ss
1816 if (!(int_shadow
& mask
))
1817 ctxt
->interruptibility
= mask
;
1821 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1822 struct kvm_segment
*cs
, struct kvm_segment
*ss
)
1824 memset(cs
, 0, sizeof(struct kvm_segment
));
1825 kvm_x86_ops
->get_segment(ctxt
->vcpu
, cs
, VCPU_SREG_CS
);
1826 memset(ss
, 0, sizeof(struct kvm_segment
));
1828 cs
->l
= 0; /* will be adjusted later */
1829 cs
->base
= 0; /* flat segment */
1830 cs
->g
= 1; /* 4kb granularity */
1831 cs
->limit
= 0xffffffff; /* 4GB limit */
1832 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1834 cs
->dpl
= 0; /* will be adjusted later */
1839 ss
->base
= 0; /* flat segment */
1840 ss
->limit
= 0xffffffff; /* 4GB limit */
1841 ss
->g
= 1; /* 4kb granularity */
1843 ss
->type
= 0x03; /* Read/Write, Accessed */
1844 ss
->db
= 1; /* 32bit stack segment */
1850 emulate_syscall(struct x86_emulate_ctxt
*ctxt
)
1852 struct decode_cache
*c
= &ctxt
->decode
;
1853 struct kvm_segment cs
, ss
;
1856 /* syscall is not available in real mode */
1857 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1858 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1859 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1860 return X86EMUL_PROPAGATE_FAULT
;
1863 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1865 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1867 cs
.selector
= (u16
)(msr_data
& 0xfffc);
1868 ss
.selector
= (u16
)(msr_data
+ 8);
1870 if (is_long_mode(ctxt
->vcpu
)) {
1874 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1875 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1877 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1878 if (is_long_mode(ctxt
->vcpu
)) {
1879 #ifdef CONFIG_X86_64
1880 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1882 kvm_x86_ops
->get_msr(ctxt
->vcpu
,
1883 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1884 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1887 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1888 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1892 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1893 c
->eip
= (u32
)msr_data
;
1895 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1898 return X86EMUL_CONTINUE
;
1902 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
)
1904 struct decode_cache
*c
= &ctxt
->decode
;
1905 struct kvm_segment cs
, ss
;
1908 /* inject #GP if in real mode */
1909 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1910 kvm_inject_gp(ctxt
->vcpu
, 0);
1911 return X86EMUL_PROPAGATE_FAULT
;
1914 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1915 * Therefore, we inject an #UD.
1917 if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1918 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1919 return X86EMUL_PROPAGATE_FAULT
;
1922 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1924 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1925 switch (ctxt
->mode
) {
1926 case X86EMUL_MODE_PROT32
:
1927 if ((msr_data
& 0xfffc) == 0x0) {
1928 kvm_inject_gp(ctxt
->vcpu
, 0);
1929 return X86EMUL_PROPAGATE_FAULT
;
1932 case X86EMUL_MODE_PROT64
:
1933 if (msr_data
== 0x0) {
1934 kvm_inject_gp(ctxt
->vcpu
, 0);
1935 return X86EMUL_PROPAGATE_FAULT
;
1940 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1941 cs
.selector
= (u16
)msr_data
;
1942 cs
.selector
&= ~SELECTOR_RPL_MASK
;
1943 ss
.selector
= cs
.selector
+ 8;
1944 ss
.selector
&= ~SELECTOR_RPL_MASK
;
1945 if (ctxt
->mode
== X86EMUL_MODE_PROT64
1946 || is_long_mode(ctxt
->vcpu
)) {
1951 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1952 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1954 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
1957 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
1958 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
1960 return X86EMUL_CONTINUE
;
1964 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
)
1966 struct decode_cache
*c
= &ctxt
->decode
;
1967 struct kvm_segment cs
, ss
;
1971 /* inject #GP if in real mode or Virtual 8086 mode */
1972 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1973 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1974 kvm_inject_gp(ctxt
->vcpu
, 0);
1975 return X86EMUL_PROPAGATE_FAULT
;
1978 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1980 if ((c
->rex_prefix
& 0x8) != 0x0)
1981 usermode
= X86EMUL_MODE_PROT64
;
1983 usermode
= X86EMUL_MODE_PROT32
;
1987 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1989 case X86EMUL_MODE_PROT32
:
1990 cs
.selector
= (u16
)(msr_data
+ 16);
1991 if ((msr_data
& 0xfffc) == 0x0) {
1992 kvm_inject_gp(ctxt
->vcpu
, 0);
1993 return X86EMUL_PROPAGATE_FAULT
;
1995 ss
.selector
= (u16
)(msr_data
+ 24);
1997 case X86EMUL_MODE_PROT64
:
1998 cs
.selector
= (u16
)(msr_data
+ 32);
1999 if (msr_data
== 0x0) {
2000 kvm_inject_gp(ctxt
->vcpu
, 0);
2001 return X86EMUL_PROPAGATE_FAULT
;
2003 ss
.selector
= cs
.selector
+ 8;
2008 cs
.selector
|= SELECTOR_RPL_MASK
;
2009 ss
.selector
|= SELECTOR_RPL_MASK
;
2011 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
2012 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
2014 c
->eip
= ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RDX
];
2015 c
->regs
[VCPU_REGS_RSP
] = ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RCX
];
2017 return X86EMUL_CONTINUE
;
2020 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
,
2021 struct x86_emulate_ops
*ops
)
2024 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2026 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2028 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
2029 return ops
->cpl(ctxt
->vcpu
) > iopl
;
2032 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2033 struct x86_emulate_ops
*ops
,
2036 struct kvm_segment tr_seg
;
2039 u8 perm
, bit_idx
= port
& 0x7;
2040 unsigned mask
= (1 << len
) - 1;
2042 kvm_get_segment(ctxt
->vcpu
, &tr_seg
, VCPU_SREG_TR
);
2043 if (tr_seg
.unusable
)
2045 if (tr_seg
.limit
< 103)
2047 r
= ops
->read_std(tr_seg
.base
+ 102, &io_bitmap_ptr
, 2, ctxt
->vcpu
,
2049 if (r
!= X86EMUL_CONTINUE
)
2051 if (io_bitmap_ptr
+ port
/8 > tr_seg
.limit
)
2053 r
= ops
->read_std(tr_seg
.base
+ io_bitmap_ptr
+ port
/8, &perm
, 1,
2055 if (r
!= X86EMUL_CONTINUE
)
2057 if ((perm
>> bit_idx
) & mask
)
2062 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2063 struct x86_emulate_ops
*ops
,
2066 if (emulator_bad_iopl(ctxt
, ops
))
2067 if (!emulator_io_port_access_allowed(ctxt
, ops
, port
, len
))
2072 static u32
get_cached_descriptor_base(struct x86_emulate_ctxt
*ctxt
,
2073 struct x86_emulate_ops
*ops
,
2076 struct desc_struct desc
;
2077 if (ops
->get_cached_descriptor(&desc
, seg
, ctxt
->vcpu
))
2078 return get_desc_base(&desc
);
2083 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2084 struct x86_emulate_ops
*ops
,
2085 struct tss_segment_16
*tss
)
2087 struct decode_cache
*c
= &ctxt
->decode
;
2090 tss
->flag
= ctxt
->eflags
;
2091 tss
->ax
= c
->regs
[VCPU_REGS_RAX
];
2092 tss
->cx
= c
->regs
[VCPU_REGS_RCX
];
2093 tss
->dx
= c
->regs
[VCPU_REGS_RDX
];
2094 tss
->bx
= c
->regs
[VCPU_REGS_RBX
];
2095 tss
->sp
= c
->regs
[VCPU_REGS_RSP
];
2096 tss
->bp
= c
->regs
[VCPU_REGS_RBP
];
2097 tss
->si
= c
->regs
[VCPU_REGS_RSI
];
2098 tss
->di
= c
->regs
[VCPU_REGS_RDI
];
2100 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2101 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2102 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2103 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2104 tss
->ldt
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2107 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2108 struct x86_emulate_ops
*ops
,
2109 struct tss_segment_16
*tss
)
2111 struct decode_cache
*c
= &ctxt
->decode
;
2115 ctxt
->eflags
= tss
->flag
| 2;
2116 c
->regs
[VCPU_REGS_RAX
] = tss
->ax
;
2117 c
->regs
[VCPU_REGS_RCX
] = tss
->cx
;
2118 c
->regs
[VCPU_REGS_RDX
] = tss
->dx
;
2119 c
->regs
[VCPU_REGS_RBX
] = tss
->bx
;
2120 c
->regs
[VCPU_REGS_RSP
] = tss
->sp
;
2121 c
->regs
[VCPU_REGS_RBP
] = tss
->bp
;
2122 c
->regs
[VCPU_REGS_RSI
] = tss
->si
;
2123 c
->regs
[VCPU_REGS_RDI
] = tss
->di
;
2126 * SDM says that segment selectors are loaded before segment
2129 ops
->set_segment_selector(tss
->ldt
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2130 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2131 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2132 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2133 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2136 * Now load segment descriptors. If fault happenes at this stage
2137 * it is handled in a context of new task
2139 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt
, VCPU_SREG_LDTR
);
2140 if (ret
!= X86EMUL_CONTINUE
)
2142 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2143 if (ret
!= X86EMUL_CONTINUE
)
2145 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2146 if (ret
!= X86EMUL_CONTINUE
)
2148 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2149 if (ret
!= X86EMUL_CONTINUE
)
2151 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2152 if (ret
!= X86EMUL_CONTINUE
)
2155 return X86EMUL_CONTINUE
;
2158 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2159 struct x86_emulate_ops
*ops
,
2160 u16 tss_selector
, u16 old_tss_sel
,
2161 ulong old_tss_base
, struct desc_struct
*new_desc
)
2163 struct tss_segment_16 tss_seg
;
2165 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2167 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2169 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2170 /* FIXME: need to provide precise fault address */
2171 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2175 save_state_to_tss16(ctxt
, ops
, &tss_seg
);
2177 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2179 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2180 /* FIXME: need to provide precise fault address */
2181 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2185 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2187 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2188 /* FIXME: need to provide precise fault address */
2189 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2193 if (old_tss_sel
!= 0xffff) {
2194 tss_seg
.prev_task_link
= old_tss_sel
;
2196 ret
= ops
->write_std(new_tss_base
,
2197 &tss_seg
.prev_task_link
,
2198 sizeof tss_seg
.prev_task_link
,
2200 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2201 /* FIXME: need to provide precise fault address */
2202 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2207 return load_state_from_tss16(ctxt
, ops
, &tss_seg
);
2210 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
2211 struct x86_emulate_ops
*ops
,
2212 struct tss_segment_32
*tss
)
2214 struct decode_cache
*c
= &ctxt
->decode
;
2216 tss
->cr3
= ops
->get_cr(3, ctxt
->vcpu
);
2218 tss
->eflags
= ctxt
->eflags
;
2219 tss
->eax
= c
->regs
[VCPU_REGS_RAX
];
2220 tss
->ecx
= c
->regs
[VCPU_REGS_RCX
];
2221 tss
->edx
= c
->regs
[VCPU_REGS_RDX
];
2222 tss
->ebx
= c
->regs
[VCPU_REGS_RBX
];
2223 tss
->esp
= c
->regs
[VCPU_REGS_RSP
];
2224 tss
->ebp
= c
->regs
[VCPU_REGS_RBP
];
2225 tss
->esi
= c
->regs
[VCPU_REGS_RSI
];
2226 tss
->edi
= c
->regs
[VCPU_REGS_RDI
];
2228 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2229 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2230 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2231 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2232 tss
->fs
= ops
->get_segment_selector(VCPU_SREG_FS
, ctxt
->vcpu
);
2233 tss
->gs
= ops
->get_segment_selector(VCPU_SREG_GS
, ctxt
->vcpu
);
2234 tss
->ldt_selector
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2237 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
2238 struct x86_emulate_ops
*ops
,
2239 struct tss_segment_32
*tss
)
2241 struct decode_cache
*c
= &ctxt
->decode
;
2244 ops
->set_cr(3, tss
->cr3
, ctxt
->vcpu
);
2246 ctxt
->eflags
= tss
->eflags
| 2;
2247 c
->regs
[VCPU_REGS_RAX
] = tss
->eax
;
2248 c
->regs
[VCPU_REGS_RCX
] = tss
->ecx
;
2249 c
->regs
[VCPU_REGS_RDX
] = tss
->edx
;
2250 c
->regs
[VCPU_REGS_RBX
] = tss
->ebx
;
2251 c
->regs
[VCPU_REGS_RSP
] = tss
->esp
;
2252 c
->regs
[VCPU_REGS_RBP
] = tss
->ebp
;
2253 c
->regs
[VCPU_REGS_RSI
] = tss
->esi
;
2254 c
->regs
[VCPU_REGS_RDI
] = tss
->edi
;
2257 * SDM says that segment selectors are loaded before segment
2260 ops
->set_segment_selector(tss
->ldt_selector
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2261 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2262 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2263 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2264 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2265 ops
->set_segment_selector(tss
->fs
, VCPU_SREG_FS
, ctxt
->vcpu
);
2266 ops
->set_segment_selector(tss
->gs
, VCPU_SREG_GS
, ctxt
->vcpu
);
2269 * Now load segment descriptors. If fault happenes at this stage
2270 * it is handled in a context of new task
2272 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
2273 if (ret
!= X86EMUL_CONTINUE
)
2275 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2276 if (ret
!= X86EMUL_CONTINUE
)
2278 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2279 if (ret
!= X86EMUL_CONTINUE
)
2281 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2282 if (ret
!= X86EMUL_CONTINUE
)
2284 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2285 if (ret
!= X86EMUL_CONTINUE
)
2287 ret
= load_segment_descriptor(ctxt
, ops
, tss
->fs
, VCPU_SREG_FS
);
2288 if (ret
!= X86EMUL_CONTINUE
)
2290 ret
= load_segment_descriptor(ctxt
, ops
, tss
->gs
, VCPU_SREG_GS
);
2291 if (ret
!= X86EMUL_CONTINUE
)
2294 return X86EMUL_CONTINUE
;
2297 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
2298 struct x86_emulate_ops
*ops
,
2299 u16 tss_selector
, u16 old_tss_sel
,
2300 ulong old_tss_base
, struct desc_struct
*new_desc
)
2302 struct tss_segment_32 tss_seg
;
2304 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2306 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2308 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2309 /* FIXME: need to provide precise fault address */
2310 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2314 save_state_to_tss32(ctxt
, ops
, &tss_seg
);
2316 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2318 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2319 /* FIXME: need to provide precise fault address */
2320 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2324 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2326 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2327 /* FIXME: need to provide precise fault address */
2328 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2332 if (old_tss_sel
!= 0xffff) {
2333 tss_seg
.prev_task_link
= old_tss_sel
;
2335 ret
= ops
->write_std(new_tss_base
,
2336 &tss_seg
.prev_task_link
,
2337 sizeof tss_seg
.prev_task_link
,
2339 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2340 /* FIXME: need to provide precise fault address */
2341 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2346 return load_state_from_tss32(ctxt
, ops
, &tss_seg
);
2349 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
2350 struct x86_emulate_ops
*ops
,
2351 u16 tss_selector
, int reason
)
2353 struct desc_struct curr_tss_desc
, next_tss_desc
;
2355 u16 old_tss_sel
= ops
->get_segment_selector(VCPU_SREG_TR
, ctxt
->vcpu
);
2356 ulong old_tss_base
=
2357 get_cached_descriptor_base(ctxt
, ops
, VCPU_SREG_TR
);
2360 /* FIXME: old_tss_base == ~0 ? */
2362 ret
= read_segment_descriptor(ctxt
, ops
, tss_selector
, &next_tss_desc
);
2363 if (ret
!= X86EMUL_CONTINUE
)
2365 ret
= read_segment_descriptor(ctxt
, ops
, old_tss_sel
, &curr_tss_desc
);
2366 if (ret
!= X86EMUL_CONTINUE
)
2369 /* FIXME: check that next_tss_desc is tss */
2371 if (reason
!= TASK_SWITCH_IRET
) {
2372 if ((tss_selector
& 3) > next_tss_desc
.dpl
||
2373 ops
->cpl(ctxt
->vcpu
) > next_tss_desc
.dpl
) {
2374 kvm_inject_gp(ctxt
->vcpu
, 0);
2375 return X86EMUL_PROPAGATE_FAULT
;
2379 desc_limit
= desc_limit_scaled(&next_tss_desc
);
2380 if (!next_tss_desc
.p
||
2381 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
2382 desc_limit
< 0x2b)) {
2383 kvm_queue_exception_e(ctxt
->vcpu
, TS_VECTOR
,
2384 tss_selector
& 0xfffc);
2385 return X86EMUL_PROPAGATE_FAULT
;
2388 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
2389 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
2390 write_segment_descriptor(ctxt
, ops
, old_tss_sel
,
2394 if (reason
== TASK_SWITCH_IRET
)
2395 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
2397 /* set back link to prev task only if NT bit is set in eflags
2398 note that old_tss_sel is not used afetr this point */
2399 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
2400 old_tss_sel
= 0xffff;
2402 if (next_tss_desc
.type
& 8)
2403 ret
= task_switch_32(ctxt
, ops
, tss_selector
, old_tss_sel
,
2404 old_tss_base
, &next_tss_desc
);
2406 ret
= task_switch_16(ctxt
, ops
, tss_selector
, old_tss_sel
,
2407 old_tss_base
, &next_tss_desc
);
2409 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
2410 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
2412 if (reason
!= TASK_SWITCH_IRET
) {
2413 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
2414 write_segment_descriptor(ctxt
, ops
, tss_selector
,
2418 ops
->set_cr(0, ops
->get_cr(0, ctxt
->vcpu
) | X86_CR0_TS
, ctxt
->vcpu
);
2419 ops
->set_cached_descriptor(&next_tss_desc
, VCPU_SREG_TR
, ctxt
->vcpu
);
2420 ops
->set_segment_selector(tss_selector
, VCPU_SREG_TR
, ctxt
->vcpu
);
2425 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
2426 struct x86_emulate_ops
*ops
,
2427 u16 tss_selector
, int reason
)
2429 struct decode_cache
*c
= &ctxt
->decode
;
2432 memset(c
, 0, sizeof(struct decode_cache
));
2434 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
2436 rc
= emulator_do_task_switch(ctxt
, ops
, tss_selector
, reason
);
2438 if (rc
== X86EMUL_CONTINUE
) {
2439 memcpy(ctxt
->vcpu
->arch
.regs
, c
->regs
, sizeof c
->regs
);
2440 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
2446 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, unsigned long base
,
2447 int reg
, struct operand
*op
)
2449 struct decode_cache
*c
= &ctxt
->decode
;
2450 int df
= (ctxt
->eflags
& EFLG_DF
) ? -1 : 1;
2452 register_address_increment(c
, &c
->regs
[reg
], df
* op
->bytes
);
2453 op
->ptr
= (unsigned long *)register_address(c
, base
, c
->regs
[reg
]);
2457 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2460 struct decode_cache
*c
= &ctxt
->decode
;
2461 int rc
= X86EMUL_CONTINUE
;
2462 int saved_dst_type
= c
->dst
.type
;
2464 ctxt
->interruptibility
= 0;
2466 /* Shadow copy of register state. Committed on successful emulation.
2467 * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
2471 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
2473 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
2474 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2478 /* LOCK prefix is allowed only with some instructions */
2479 if (c
->lock_prefix
&& (!(c
->d
& Lock
) || c
->dst
.type
!= OP_MEM
)) {
2480 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2484 /* Privileged instruction can be executed only in CPL=0 */
2485 if ((c
->d
& Priv
) && ops
->cpl(ctxt
->vcpu
)) {
2486 kvm_inject_gp(ctxt
->vcpu
, 0);
2490 if (c
->rep_prefix
&& (c
->d
& String
)) {
2491 ctxt
->restart
= true;
2492 /* All REP prefixes have the same first termination condition */
2493 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0) {
2495 ctxt
->restart
= false;
2496 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
2499 /* The second termination condition only applies for REPE
2500 * and REPNE. Test if the repeat string operation prefix is
2501 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2502 * corresponding termination condition according to:
2503 * - if REPE/REPZ and ZF = 0 then done
2504 * - if REPNE/REPNZ and ZF = 1 then done
2506 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
2507 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
2508 if ((c
->rep_prefix
== REPE_PREFIX
) &&
2509 ((ctxt
->eflags
& EFLG_ZF
) == 0))
2511 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
2512 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
))
2518 if (c
->src
.type
== OP_MEM
) {
2519 rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
2523 if (rc
!= X86EMUL_CONTINUE
)
2525 c
->src
.orig_val
= c
->src
.val
;
2528 if (c
->src2
.type
== OP_MEM
) {
2529 rc
= ops
->read_emulated((unsigned long)c
->src2
.ptr
,
2533 if (rc
!= X86EMUL_CONTINUE
)
2537 if ((c
->d
& DstMask
) == ImplicitOps
)
2541 if ((c
->dst
.type
== OP_MEM
) && !(c
->d
& Mov
)) {
2542 /* optimisation - avoid slow emulated read if Mov */
2543 rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
, &c
->dst
.val
,
2544 c
->dst
.bytes
, ctxt
->vcpu
);
2545 if (rc
!= X86EMUL_CONTINUE
)
2548 c
->dst
.orig_val
= c
->dst
.val
;
2558 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
2560 case 0x06: /* push es */
2561 emulate_push_sreg(ctxt
, VCPU_SREG_ES
);
2563 case 0x07: /* pop es */
2564 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2565 if (rc
!= X86EMUL_CONTINUE
)
2570 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
2572 case 0x0e: /* push cs */
2573 emulate_push_sreg(ctxt
, VCPU_SREG_CS
);
2577 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
2579 case 0x16: /* push ss */
2580 emulate_push_sreg(ctxt
, VCPU_SREG_SS
);
2582 case 0x17: /* pop ss */
2583 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2584 if (rc
!= X86EMUL_CONTINUE
)
2589 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
2591 case 0x1e: /* push ds */
2592 emulate_push_sreg(ctxt
, VCPU_SREG_DS
);
2594 case 0x1f: /* pop ds */
2595 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2596 if (rc
!= X86EMUL_CONTINUE
)
2601 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
2605 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
2609 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
2613 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2615 case 0x40 ... 0x47: /* inc r16/r32 */
2616 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
2618 case 0x48 ... 0x4f: /* dec r16/r32 */
2619 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
2621 case 0x50 ... 0x57: /* push reg */
2624 case 0x58 ... 0x5f: /* pop reg */
2626 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2627 if (rc
!= X86EMUL_CONTINUE
)
2630 case 0x60: /* pusha */
2631 emulate_pusha(ctxt
);
2633 case 0x61: /* popa */
2634 rc
= emulate_popa(ctxt
, ops
);
2635 if (rc
!= X86EMUL_CONTINUE
)
2638 case 0x63: /* movsxd */
2639 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
2640 goto cannot_emulate
;
2641 c
->dst
.val
= (s32
) c
->src
.val
;
2643 case 0x68: /* push imm */
2644 case 0x6a: /* push imm8 */
2647 case 0x6c: /* insb */
2648 case 0x6d: /* insw/insd */
2649 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2650 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2652 kvm_inject_gp(ctxt
->vcpu
, 0);
2655 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
,
2656 c
->regs
[VCPU_REGS_RDX
], &c
->dst
.val
))
2657 goto done
; /* IO is needed, skip writeback */
2659 case 0x6e: /* outsb */
2660 case 0x6f: /* outsw/outsd */
2661 c
->src
.bytes
= min(c
->src
.bytes
, 4u);
2662 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2664 kvm_inject_gp(ctxt
->vcpu
, 0);
2667 ops
->pio_out_emulated(c
->src
.bytes
, c
->regs
[VCPU_REGS_RDX
],
2668 &c
->src
.val
, 1, ctxt
->vcpu
);
2670 c
->dst
.type
= OP_NONE
; /* nothing to writeback */
2672 case 0x70 ... 0x7f: /* jcc (short) */
2673 if (test_cc(c
->b
, ctxt
->eflags
))
2674 jmp_rel(c
, c
->src
.val
);
2676 case 0x80 ... 0x83: /* Grp1 */
2677 switch (c
->modrm_reg
) {
2697 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
2699 case 0x86 ... 0x87: /* xchg */
2701 /* Write back the register source. */
2702 switch (c
->dst
.bytes
) {
2704 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
2707 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
2710 *c
->src
.ptr
= (u32
) c
->dst
.val
;
2711 break; /* 64b reg: zero-extend */
2713 *c
->src
.ptr
= c
->dst
.val
;
2717 * Write back the memory destination with implicit LOCK
2720 c
->dst
.val
= c
->src
.val
;
2723 case 0x88 ... 0x8b: /* mov */
2725 case 0x8c: { /* mov r/m, sreg */
2726 struct kvm_segment segreg
;
2728 if (c
->modrm_reg
<= VCPU_SREG_GS
)
2729 kvm_get_segment(ctxt
->vcpu
, &segreg
, c
->modrm_reg
);
2731 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2734 c
->dst
.val
= segreg
.selector
;
2737 case 0x8d: /* lea r16/r32, m */
2738 c
->dst
.val
= c
->modrm_ea
;
2740 case 0x8e: { /* mov seg, r/m16 */
2745 if (c
->modrm_reg
== VCPU_SREG_CS
||
2746 c
->modrm_reg
> VCPU_SREG_GS
) {
2747 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2751 if (c
->modrm_reg
== VCPU_SREG_SS
)
2752 toggle_interruptibility(ctxt
, KVM_X86_SHADOW_INT_MOV_SS
);
2754 rc
= load_segment_descriptor(ctxt
, ops
, sel
, c
->modrm_reg
);
2756 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2759 case 0x8f: /* pop (sole member of Grp1a) */
2760 rc
= emulate_grp1a(ctxt
, ops
);
2761 if (rc
!= X86EMUL_CONTINUE
)
2764 case 0x90: /* nop / xchg r8,rax */
2765 if (!(c
->rex_prefix
& 1)) { /* nop */
2766 c
->dst
.type
= OP_NONE
;
2769 case 0x91 ... 0x97: /* xchg reg,rax */
2770 c
->src
.type
= c
->dst
.type
= OP_REG
;
2771 c
->src
.bytes
= c
->dst
.bytes
= c
->op_bytes
;
2772 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
2773 c
->src
.val
= *(c
->src
.ptr
);
2775 case 0x9c: /* pushf */
2776 c
->src
.val
= (unsigned long) ctxt
->eflags
;
2779 case 0x9d: /* popf */
2780 c
->dst
.type
= OP_REG
;
2781 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
2782 c
->dst
.bytes
= c
->op_bytes
;
2783 rc
= emulate_popf(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2784 if (rc
!= X86EMUL_CONTINUE
)
2787 case 0xa0 ... 0xa1: /* mov */
2788 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2789 c
->dst
.val
= c
->src
.val
;
2791 case 0xa2 ... 0xa3: /* mov */
2792 c
->dst
.val
= (unsigned long)c
->regs
[VCPU_REGS_RAX
];
2794 case 0xa4 ... 0xa5: /* movs */
2796 case 0xa6 ... 0xa7: /* cmps */
2797 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2798 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
2800 case 0xaa ... 0xab: /* stos */
2801 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2803 case 0xac ... 0xad: /* lods */
2805 case 0xae ... 0xaf: /* scas */
2806 DPRINTF("Urk! I don't handle SCAS.\n");
2807 goto cannot_emulate
;
2808 case 0xb0 ... 0xbf: /* mov r, imm */
2813 case 0xc3: /* ret */
2814 c
->dst
.type
= OP_REG
;
2815 c
->dst
.ptr
= &c
->eip
;
2816 c
->dst
.bytes
= c
->op_bytes
;
2817 goto pop_instruction
;
2818 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2820 c
->dst
.val
= c
->src
.val
;
2822 case 0xcb: /* ret far */
2823 rc
= emulate_ret_far(ctxt
, ops
);
2824 if (rc
!= X86EMUL_CONTINUE
)
2827 case 0xd0 ... 0xd1: /* Grp2 */
2831 case 0xd2 ... 0xd3: /* Grp2 */
2832 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2835 case 0xe4: /* inb */
2838 case 0xe6: /* outb */
2839 case 0xe7: /* out */
2841 case 0xe8: /* call (near) */ {
2842 long int rel
= c
->src
.val
;
2843 c
->src
.val
= (unsigned long) c
->eip
;
2848 case 0xe9: /* jmp rel */
2850 case 0xea: /* jmp far */
2852 if (load_segment_descriptor(ctxt
, ops
, c
->src2
.val
,
2856 c
->eip
= c
->src
.val
;
2859 jmp
: /* jmp rel short */
2860 jmp_rel(c
, c
->src
.val
);
2861 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2863 case 0xec: /* in al,dx */
2864 case 0xed: /* in (e/r)ax,dx */
2865 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2867 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2868 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2869 kvm_inject_gp(ctxt
->vcpu
, 0);
2872 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
, c
->src
.val
,
2874 goto done
; /* IO is needed */
2876 case 0xee: /* out al,dx */
2877 case 0xef: /* out (e/r)ax,dx */
2878 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2880 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2881 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2882 kvm_inject_gp(ctxt
->vcpu
, 0);
2885 ops
->pio_out_emulated(c
->dst
.bytes
, c
->src
.val
, &c
->dst
.val
, 1,
2887 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2889 case 0xf4: /* hlt */
2890 ctxt
->vcpu
->arch
.halt_request
= 1;
2892 case 0xf5: /* cmc */
2893 /* complement carry flag from eflags reg */
2894 ctxt
->eflags
^= EFLG_CF
;
2895 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2897 case 0xf6 ... 0xf7: /* Grp3 */
2898 if (!emulate_grp3(ctxt
, ops
))
2899 goto cannot_emulate
;
2901 case 0xf8: /* clc */
2902 ctxt
->eflags
&= ~EFLG_CF
;
2903 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2905 case 0xfa: /* cli */
2906 if (emulator_bad_iopl(ctxt
, ops
))
2907 kvm_inject_gp(ctxt
->vcpu
, 0);
2909 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2910 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2913 case 0xfb: /* sti */
2914 if (emulator_bad_iopl(ctxt
, ops
))
2915 kvm_inject_gp(ctxt
->vcpu
, 0);
2917 toggle_interruptibility(ctxt
, KVM_X86_SHADOW_INT_STI
);
2918 ctxt
->eflags
|= X86_EFLAGS_IF
;
2919 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2922 case 0xfc: /* cld */
2923 ctxt
->eflags
&= ~EFLG_DF
;
2924 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2926 case 0xfd: /* std */
2927 ctxt
->eflags
|= EFLG_DF
;
2928 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2930 case 0xfe: /* Grp4 */
2932 rc
= emulate_grp45(ctxt
, ops
);
2933 if (rc
!= X86EMUL_CONTINUE
)
2936 case 0xff: /* Grp5 */
2937 if (c
->modrm_reg
== 5)
2943 rc
= writeback(ctxt
, ops
);
2944 if (rc
!= X86EMUL_CONTINUE
)
2948 * restore dst type in case the decoding will be reused
2949 * (happens for string instruction )
2951 c
->dst
.type
= saved_dst_type
;
2953 if ((c
->d
& SrcMask
) == SrcSI
)
2954 string_addr_inc(ctxt
, seg_override_base(ctxt
, c
), VCPU_REGS_RSI
,
2957 if ((c
->d
& DstMask
) == DstDI
)
2958 string_addr_inc(ctxt
, es_base(ctxt
), VCPU_REGS_RDI
, &c
->dst
);
2960 if (c
->rep_prefix
&& (c
->d
& String
)) {
2961 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
2962 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
2964 * Re-enter guest when pio read ahead buffer is empty or,
2965 * if it is not used, after each 1024 iteration.
2967 if ((rc
->end
== 0 && !(c
->regs
[VCPU_REGS_RCX
] & 0x3ff)) ||
2968 (rc
->end
!= 0 && rc
->end
== rc
->pos
))
2969 ctxt
->restart
= false;
2972 /* Commit shadow register state. */
2973 memcpy(ctxt
->vcpu
->arch
.regs
, c
->regs
, sizeof c
->regs
);
2974 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
2977 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
2981 case 0x01: /* lgdt, lidt, lmsw */
2982 switch (c
->modrm_reg
) {
2984 unsigned long address
;
2986 case 0: /* vmcall */
2987 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
2988 goto cannot_emulate
;
2990 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2991 if (rc
!= X86EMUL_CONTINUE
)
2994 /* Let the processor re-execute the fixed hypercall */
2996 /* Disable writeback. */
2997 c
->dst
.type
= OP_NONE
;
3000 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3001 &size
, &address
, c
->op_bytes
);
3002 if (rc
!= X86EMUL_CONTINUE
)
3004 realmode_lgdt(ctxt
->vcpu
, size
, address
);
3005 /* Disable writeback. */
3006 c
->dst
.type
= OP_NONE
;
3008 case 3: /* lidt/vmmcall */
3009 if (c
->modrm_mod
== 3) {
3010 switch (c
->modrm_rm
) {
3012 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3013 if (rc
!= X86EMUL_CONTINUE
)
3017 goto cannot_emulate
;
3020 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3023 if (rc
!= X86EMUL_CONTINUE
)
3025 realmode_lidt(ctxt
->vcpu
, size
, address
);
3027 /* Disable writeback. */
3028 c
->dst
.type
= OP_NONE
;
3032 c
->dst
.val
= ops
->get_cr(0, ctxt
->vcpu
);
3035 ops
->set_cr(0, (ops
->get_cr(0, ctxt
->vcpu
) & ~0x0ful
) |
3036 (c
->src
.val
& 0x0f), ctxt
->vcpu
);
3037 c
->dst
.type
= OP_NONE
;
3039 case 5: /* not defined */
3040 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3043 emulate_invlpg(ctxt
->vcpu
, c
->modrm_ea
);
3044 /* Disable writeback. */
3045 c
->dst
.type
= OP_NONE
;
3048 goto cannot_emulate
;
3051 case 0x05: /* syscall */
3052 rc
= emulate_syscall(ctxt
);
3053 if (rc
!= X86EMUL_CONTINUE
)
3059 emulate_clts(ctxt
->vcpu
);
3060 c
->dst
.type
= OP_NONE
;
3062 case 0x08: /* invd */
3063 case 0x09: /* wbinvd */
3064 case 0x0d: /* GrpP (prefetch) */
3065 case 0x18: /* Grp16 (prefetch/nop) */
3066 c
->dst
.type
= OP_NONE
;
3068 case 0x20: /* mov cr, reg */
3069 switch (c
->modrm_reg
) {
3073 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3076 c
->regs
[c
->modrm_rm
] = ops
->get_cr(c
->modrm_reg
, ctxt
->vcpu
);
3077 c
->dst
.type
= OP_NONE
; /* no writeback */
3079 case 0x21: /* mov from dr to reg */
3080 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3081 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3082 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3085 emulator_get_dr(ctxt
, c
->modrm_reg
, &c
->regs
[c
->modrm_rm
]);
3086 c
->dst
.type
= OP_NONE
; /* no writeback */
3088 case 0x22: /* mov reg, cr */
3089 ops
->set_cr(c
->modrm_reg
, c
->modrm_val
, ctxt
->vcpu
);
3090 c
->dst
.type
= OP_NONE
;
3092 case 0x23: /* mov from reg to dr */
3093 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3094 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3095 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3098 emulator_set_dr(ctxt
, c
->modrm_reg
, c
->regs
[c
->modrm_rm
]);
3099 c
->dst
.type
= OP_NONE
; /* no writeback */
3103 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
3104 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
3105 if (kvm_set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
)) {
3106 kvm_inject_gp(ctxt
->vcpu
, 0);
3109 rc
= X86EMUL_CONTINUE
;
3110 c
->dst
.type
= OP_NONE
;
3114 if (kvm_get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
)) {
3115 kvm_inject_gp(ctxt
->vcpu
, 0);
3118 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
3119 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
3121 rc
= X86EMUL_CONTINUE
;
3122 c
->dst
.type
= OP_NONE
;
3124 case 0x34: /* sysenter */
3125 rc
= emulate_sysenter(ctxt
);
3126 if (rc
!= X86EMUL_CONTINUE
)
3131 case 0x35: /* sysexit */
3132 rc
= emulate_sysexit(ctxt
);
3133 if (rc
!= X86EMUL_CONTINUE
)
3138 case 0x40 ... 0x4f: /* cmov */
3139 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
3140 if (!test_cc(c
->b
, ctxt
->eflags
))
3141 c
->dst
.type
= OP_NONE
; /* no writeback */
3143 case 0x80 ... 0x8f: /* jnz rel, etc*/
3144 if (test_cc(c
->b
, ctxt
->eflags
))
3145 jmp_rel(c
, c
->src
.val
);
3146 c
->dst
.type
= OP_NONE
;
3148 case 0xa0: /* push fs */
3149 emulate_push_sreg(ctxt
, VCPU_SREG_FS
);
3151 case 0xa1: /* pop fs */
3152 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3153 if (rc
!= X86EMUL_CONTINUE
)
3158 c
->dst
.type
= OP_NONE
;
3159 /* only subword offset */
3160 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3161 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
3163 case 0xa4: /* shld imm8, r, r/m */
3164 case 0xa5: /* shld cl, r, r/m */
3165 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3167 case 0xa8: /* push gs */
3168 emulate_push_sreg(ctxt
, VCPU_SREG_GS
);
3170 case 0xa9: /* pop gs */
3171 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3172 if (rc
!= X86EMUL_CONTINUE
)
3177 /* only subword offset */
3178 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3179 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
3181 case 0xac: /* shrd imm8, r, r/m */
3182 case 0xad: /* shrd cl, r, r/m */
3183 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3185 case 0xae: /* clflush */
3187 case 0xb0 ... 0xb1: /* cmpxchg */
3189 * Save real source value, then compare EAX against
3192 c
->src
.orig_val
= c
->src
.val
;
3193 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
3194 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
3195 if (ctxt
->eflags
& EFLG_ZF
) {
3196 /* Success: write back to memory. */
3197 c
->dst
.val
= c
->src
.orig_val
;
3199 /* Failure: write the value we saw to EAX. */
3200 c
->dst
.type
= OP_REG
;
3201 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
3206 /* only subword offset */
3207 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3208 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
3210 case 0xb6 ... 0xb7: /* movzx */
3211 c
->dst
.bytes
= c
->op_bytes
;
3212 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
3215 case 0xba: /* Grp8 */
3216 switch (c
->modrm_reg
& 3) {
3229 /* only subword offset */
3230 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3231 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
3233 case 0xbe ... 0xbf: /* movsx */
3234 c
->dst
.bytes
= c
->op_bytes
;
3235 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
3238 case 0xc3: /* movnti */
3239 c
->dst
.bytes
= c
->op_bytes
;
3240 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
3243 case 0xc7: /* Grp9 (cmpxchg8b) */
3244 rc
= emulate_grp9(ctxt
, ops
);
3245 if (rc
!= X86EMUL_CONTINUE
)
3247 c
->dst
.type
= OP_NONE
;
3253 DPRINTF("Cannot emulate %02x\n", c
->b
);