1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
25 #include <public/xen.h>
26 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #include <linux/kvm_host.h>
29 #include "kvm_cache_regs.h"
30 #define DPRINTF(x...) do {} while (0)
32 #include <linux/module.h>
33 #include <asm/kvm_emulate.h>
39 * Opcode effective-address decode tables.
40 * Note that we only emulate instructions that have at least one memory
41 * operand (excluding implicit stack references). We assume that stack
42 * references and instruction fetches will never occur in special memory
43 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
47 /* Operand sizes: 8-bit operands or specified/overridden size. */
48 #define ByteOp (1<<0) /* 8-bit operands. */
49 /* Destination operand type. */
50 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
51 #define DstReg (2<<1) /* Register operand. */
52 #define DstMem (3<<1) /* Memory operand. */
53 #define DstAcc (4<<1) /* Destination Accumulator */
54 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
55 #define DstMem64 (6<<1) /* 64bit memory operand */
56 #define DstMask (7<<1)
57 /* Source operand type. */
58 #define SrcNone (0<<4) /* No source operand. */
59 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
60 #define SrcReg (1<<4) /* Register operand. */
61 #define SrcMem (2<<4) /* Memory operand. */
62 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
63 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
64 #define SrcImm (5<<4) /* Immediate operand. */
65 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
66 #define SrcOne (7<<4) /* Implied '1' */
67 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
68 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
69 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
70 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
71 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
72 #define SrcMask (0xf<<4)
73 /* Generic ModRM decode. */
75 /* Destination is only written; never read. */
78 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
79 #define String (1<<12) /* String instruction (rep capable) */
80 #define Stack (1<<13) /* Stack instruction (push/pop) */
81 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
82 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
83 #define GroupMask 0xff /* Group number stored in bits 0:7 */
85 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
86 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
88 /* Source 2 operand type */
89 #define Src2None (0<<29)
90 #define Src2CL (1<<29)
91 #define Src2ImmByte (2<<29)
92 #define Src2One (3<<29)
93 #define Src2Mask (7<<29)
96 Group1_80
, Group1_81
, Group1_82
, Group1_83
,
97 Group1A
, Group3_Byte
, Group3
, Group4
, Group5
, Group7
,
101 static u32 opcode_table
[256] = {
103 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
104 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
105 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
106 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
108 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
109 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
110 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
111 ImplicitOps
| Stack
| No64
, 0,
113 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
114 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
115 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
116 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
118 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
119 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
120 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
121 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
123 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
124 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
125 DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
127 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
128 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
131 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
132 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
135 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
136 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
137 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
140 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
142 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
144 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
145 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
147 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
148 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
150 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
151 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
154 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
155 DstDI
| ByteOp
| Mov
| String
, DstDI
| Mov
| String
, /* insb, insw/insd */
156 SrcSI
| ByteOp
| ImplicitOps
| String
, SrcSI
| ImplicitOps
| String
, /* outsb, outsw/outsd */
158 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
159 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
161 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
162 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
164 Group
| Group1_80
, Group
| Group1_81
,
165 Group
| Group1_82
, Group
| Group1_83
,
166 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
167 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
169 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
170 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
171 DstMem
| SrcReg
| ModRM
| Mov
, ModRM
| DstReg
,
172 ImplicitOps
| SrcMem
| ModRM
, Group
| Group1A
,
174 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
176 0, 0, SrcImmFAddr
| No64
, 0,
177 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
179 ByteOp
| DstReg
| SrcMem
| Mov
| MemAbs
, DstReg
| SrcMem
| Mov
| MemAbs
,
180 ByteOp
| DstMem
| SrcReg
| Mov
| MemAbs
, DstMem
| SrcReg
| Mov
| MemAbs
,
181 ByteOp
| SrcSI
| DstDI
| Mov
| String
, SrcSI
| DstDI
| Mov
| String
,
182 ByteOp
| SrcSI
| DstDI
| String
, SrcSI
| DstDI
| String
,
184 0, 0, ByteOp
| DstDI
| Mov
| String
, DstDI
| Mov
| String
,
185 ByteOp
| SrcSI
| DstAcc
| Mov
| String
, SrcSI
| DstAcc
| Mov
| String
,
186 ByteOp
| DstDI
| String
, DstDI
| String
,
188 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
189 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
190 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
191 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
193 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
194 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
195 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
196 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
198 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
199 0, ImplicitOps
| Stack
, 0, 0,
200 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
202 0, 0, 0, ImplicitOps
| Stack
,
203 ImplicitOps
, SrcImmByte
, ImplicitOps
| No64
, ImplicitOps
,
205 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
206 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
209 0, 0, 0, 0, 0, 0, 0, 0,
212 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
213 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
215 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
216 SrcImmFAddr
| No64
, SrcImmByte
| ImplicitOps
,
217 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
218 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
221 ImplicitOps
| Priv
, ImplicitOps
, Group
| Group3_Byte
, Group
| Group3
,
223 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
224 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
227 static u32 twobyte_table
[256] = {
229 0, Group
| GroupDual
| Group7
, 0, 0,
230 0, ImplicitOps
, ImplicitOps
| Priv
, 0,
231 ImplicitOps
| Priv
, ImplicitOps
| Priv
, 0, 0,
232 0, ImplicitOps
| ModRM
, 0, 0,
234 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
236 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
237 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
239 0, 0, 0, 0, 0, 0, 0, 0,
241 ImplicitOps
| Priv
, 0, ImplicitOps
| Priv
, 0,
242 ImplicitOps
, ImplicitOps
| Priv
, 0, 0,
243 0, 0, 0, 0, 0, 0, 0, 0,
245 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
246 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
247 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
248 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
250 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
251 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
252 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
253 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
255 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
259 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
261 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
262 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
264 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
266 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
267 0, DstMem
| SrcReg
| ModRM
| BitOp
,
268 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
269 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
271 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
272 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
273 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
274 DstMem
| SrcReg
| Src2CL
| ModRM
,
277 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
278 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
279 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
280 DstReg
| SrcMem16
| ModRM
| Mov
,
283 Group
| Group8
, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
284 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
285 DstReg
| SrcMem16
| ModRM
| Mov
,
287 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
,
288 0, 0, 0, Group
| GroupDual
| Group9
,
289 0, 0, 0, 0, 0, 0, 0, 0,
291 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
293 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
298 static u32 group_table
[] = {
300 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
301 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
302 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
303 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
304 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
305 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
306 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
307 ByteOp
| DstMem
| SrcImm
| ModRM
,
309 DstMem
| SrcImm
| ModRM
| Lock
,
310 DstMem
| SrcImm
| ModRM
| Lock
,
311 DstMem
| SrcImm
| ModRM
| Lock
,
312 DstMem
| SrcImm
| ModRM
| Lock
,
313 DstMem
| SrcImm
| ModRM
| Lock
,
314 DstMem
| SrcImm
| ModRM
| Lock
,
315 DstMem
| SrcImm
| ModRM
| Lock
,
316 DstMem
| SrcImm
| ModRM
,
318 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
319 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
320 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
321 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
322 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
323 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
324 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
325 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
,
327 DstMem
| SrcImmByte
| ModRM
| Lock
,
328 DstMem
| SrcImmByte
| ModRM
| Lock
,
329 DstMem
| SrcImmByte
| ModRM
| Lock
,
330 DstMem
| SrcImmByte
| ModRM
| Lock
,
331 DstMem
| SrcImmByte
| ModRM
| Lock
,
332 DstMem
| SrcImmByte
| ModRM
| Lock
,
333 DstMem
| SrcImmByte
| ModRM
| Lock
,
334 DstMem
| SrcImmByte
| ModRM
,
336 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
338 ByteOp
| SrcImm
| DstMem
| ModRM
, 0,
339 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
342 DstMem
| SrcImm
| ModRM
, 0,
343 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
346 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
349 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
350 SrcMem
| ModRM
| Stack
, 0,
351 SrcMem
| ModRM
| Stack
, SrcMemFAddr
| ModRM
| ImplicitOps
,
352 SrcMem
| ModRM
| Stack
, 0,
354 0, 0, ModRM
| SrcMem
| Priv
, ModRM
| SrcMem
| Priv
,
355 SrcNone
| ModRM
| DstMem
| Mov
, 0,
356 SrcMem16
| ModRM
| Mov
| Priv
, SrcMem
| ModRM
| ByteOp
| Priv
,
359 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
| Lock
,
360 DstMem
| SrcImmByte
| ModRM
| Lock
, DstMem
| SrcImmByte
| ModRM
| Lock
,
362 0, DstMem64
| ModRM
| Lock
, 0, 0, 0, 0, 0, 0,
365 static u32 group2_table
[] = {
367 SrcNone
| ModRM
| Priv
, 0, 0, SrcNone
| ModRM
| Priv
,
368 SrcNone
| ModRM
| DstMem
| Mov
, 0,
369 SrcMem16
| ModRM
| Mov
| Priv
, 0,
371 0, 0, 0, 0, 0, 0, 0, 0,
374 /* EFLAGS bit definitions. */
375 #define EFLG_ID (1<<21)
376 #define EFLG_VIP (1<<20)
377 #define EFLG_VIF (1<<19)
378 #define EFLG_AC (1<<18)
379 #define EFLG_VM (1<<17)
380 #define EFLG_RF (1<<16)
381 #define EFLG_IOPL (3<<12)
382 #define EFLG_NT (1<<14)
383 #define EFLG_OF (1<<11)
384 #define EFLG_DF (1<<10)
385 #define EFLG_IF (1<<9)
386 #define EFLG_TF (1<<8)
387 #define EFLG_SF (1<<7)
388 #define EFLG_ZF (1<<6)
389 #define EFLG_AF (1<<4)
390 #define EFLG_PF (1<<2)
391 #define EFLG_CF (1<<0)
394 * Instruction emulation:
395 * Most instructions are emulated directly via a fragment of inline assembly
396 * code. This allows us to save/restore EFLAGS and thus very easily pick up
397 * any modified flags.
400 #if defined(CONFIG_X86_64)
401 #define _LO32 "k" /* force 32-bit operand */
402 #define _STK "%%rsp" /* stack pointer */
403 #elif defined(__i386__)
404 #define _LO32 "" /* force 32-bit operand */
405 #define _STK "%%esp" /* stack pointer */
409 * These EFLAGS bits are restored from saved value during emulation, and
410 * any changes are written back to the saved value after emulation.
412 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
414 /* Before executing instruction: restore necessary bits in EFLAGS. */
415 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
416 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
417 "movl %"_sav",%"_LO32 _tmp"; " \
420 "movl %"_msk",%"_LO32 _tmp"; " \
421 "andl %"_LO32 _tmp",("_STK"); " \
423 "notl %"_LO32 _tmp"; " \
424 "andl %"_LO32 _tmp",("_STK"); " \
425 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
427 "orl %"_LO32 _tmp",("_STK"); " \
431 /* After executing instruction: write-back necessary bits in EFLAGS. */
432 #define _POST_EFLAGS(_sav, _msk, _tmp) \
433 /* _sav |= EFLAGS & _msk; */ \
436 "andl %"_msk",%"_LO32 _tmp"; " \
437 "orl %"_LO32 _tmp",%"_sav"; "
445 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
447 __asm__ __volatile__ ( \
448 _PRE_EFLAGS("0", "4", "2") \
449 _op _suffix " %"_x"3,%1; " \
450 _POST_EFLAGS("0", "4", "2") \
451 : "=m" (_eflags), "=m" ((_dst).val), \
453 : _y ((_src).val), "i" (EFLAGS_MASK)); \
457 /* Raw emulation: instruction has two explicit operands. */
458 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
460 unsigned long _tmp; \
462 switch ((_dst).bytes) { \
464 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
467 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
470 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
475 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
477 unsigned long _tmp; \
478 switch ((_dst).bytes) { \
480 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
483 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
484 _wx, _wy, _lx, _ly, _qx, _qy); \
489 /* Source operand is byte-sized and may be restricted to just %cl. */
490 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
491 __emulate_2op(_op, _src, _dst, _eflags, \
492 "b", "c", "b", "c", "b", "c", "b", "c")
494 /* Source operand is byte, word, long or quad sized. */
495 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
496 __emulate_2op(_op, _src, _dst, _eflags, \
497 "b", "q", "w", "r", _LO32, "r", "", "r")
499 /* Source operand is word, long or quad sized. */
500 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
501 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
502 "w", "r", _LO32, "r", "", "r")
504 /* Instruction has three operands and one operand is stored in ECX register */
505 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
507 unsigned long _tmp; \
508 _type _clv = (_cl).val; \
509 _type _srcv = (_src).val; \
510 _type _dstv = (_dst).val; \
512 __asm__ __volatile__ ( \
513 _PRE_EFLAGS("0", "5", "2") \
514 _op _suffix " %4,%1 \n" \
515 _POST_EFLAGS("0", "5", "2") \
516 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
517 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
520 (_cl).val = (unsigned long) _clv; \
521 (_src).val = (unsigned long) _srcv; \
522 (_dst).val = (unsigned long) _dstv; \
525 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
527 switch ((_dst).bytes) { \
529 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
530 "w", unsigned short); \
533 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
534 "l", unsigned int); \
537 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
538 "q", unsigned long)); \
543 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
545 unsigned long _tmp; \
547 __asm__ __volatile__ ( \
548 _PRE_EFLAGS("0", "3", "2") \
549 _op _suffix " %1; " \
550 _POST_EFLAGS("0", "3", "2") \
551 : "=m" (_eflags), "+m" ((_dst).val), \
553 : "i" (EFLAGS_MASK)); \
556 /* Instruction has only one explicit operand (no source operand). */
557 #define emulate_1op(_op, _dst, _eflags) \
559 switch ((_dst).bytes) { \
560 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
561 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
562 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
563 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
567 /* Fetch next part of the instruction being emulated. */
568 #define insn_fetch(_type, _size, _eip) \
569 ({ unsigned long _x; \
570 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
571 if (rc != X86EMUL_CONTINUE) \
577 #define insn_fetch_arr(_arr, _size, _eip) \
578 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
579 if (rc != X86EMUL_CONTINUE) \
584 static inline unsigned long ad_mask(struct decode_cache
*c
)
586 return (1UL << (c
->ad_bytes
<< 3)) - 1;
589 /* Access/update address held in a register, based on addressing mode. */
590 static inline unsigned long
591 address_mask(struct decode_cache
*c
, unsigned long reg
)
593 if (c
->ad_bytes
== sizeof(unsigned long))
596 return reg
& ad_mask(c
);
599 static inline unsigned long
600 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
602 return base
+ address_mask(c
, reg
);
606 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
608 if (c
->ad_bytes
== sizeof(unsigned long))
611 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
614 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
616 register_address_increment(c
, &c
->eip
, rel
);
619 static void set_seg_override(struct decode_cache
*c
, int seg
)
621 c
->has_seg_override
= true;
622 c
->seg_override
= seg
;
625 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
,
626 struct x86_emulate_ops
*ops
, int seg
)
628 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
631 return ops
->get_cached_segment_base(seg
, ctxt
->vcpu
);
634 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
635 struct x86_emulate_ops
*ops
,
636 struct decode_cache
*c
)
638 if (!c
->has_seg_override
)
641 return seg_base(ctxt
, ops
, c
->seg_override
);
644 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
,
645 struct x86_emulate_ops
*ops
)
647 return seg_base(ctxt
, ops
, VCPU_SREG_ES
);
650 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
,
651 struct x86_emulate_ops
*ops
)
653 return seg_base(ctxt
, ops
, VCPU_SREG_SS
);
656 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
657 struct x86_emulate_ops
*ops
,
658 unsigned long eip
, u8
*dest
)
660 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
664 if (eip
== fc
->end
) {
665 cur_size
= fc
->end
- fc
->start
;
666 size
= min(15UL - cur_size
, PAGE_SIZE
- offset_in_page(eip
));
667 rc
= ops
->fetch(ctxt
->cs_base
+ eip
, fc
->data
+ cur_size
,
668 size
, ctxt
->vcpu
, NULL
);
669 if (rc
!= X86EMUL_CONTINUE
)
673 *dest
= fc
->data
[eip
- fc
->start
];
674 return X86EMUL_CONTINUE
;
677 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
678 struct x86_emulate_ops
*ops
,
679 unsigned long eip
, void *dest
, unsigned size
)
683 /* x86 instructions are limited to 15 bytes. */
684 if (eip
+ size
- ctxt
->eip
> 15)
685 return X86EMUL_UNHANDLEABLE
;
687 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
688 if (rc
!= X86EMUL_CONTINUE
)
691 return X86EMUL_CONTINUE
;
695 * Given the 'reg' portion of a ModRM byte, and a register block, return a
696 * pointer into the block that addresses the relevant register.
697 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
699 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
704 p
= ®s
[modrm_reg
];
705 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
706 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
710 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
711 struct x86_emulate_ops
*ops
,
713 u16
*size
, unsigned long *address
, int op_bytes
)
720 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
722 if (rc
!= X86EMUL_CONTINUE
)
724 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
729 static int test_cc(unsigned int condition
, unsigned int flags
)
733 switch ((condition
& 15) >> 1) {
735 rc
|= (flags
& EFLG_OF
);
737 case 1: /* b/c/nae */
738 rc
|= (flags
& EFLG_CF
);
741 rc
|= (flags
& EFLG_ZF
);
744 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
747 rc
|= (flags
& EFLG_SF
);
750 rc
|= (flags
& EFLG_PF
);
753 rc
|= (flags
& EFLG_ZF
);
756 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
760 /* Odd condition identifiers (lsb == 1) have inverted sense. */
761 return (!!rc
^ (condition
& 1));
764 static void decode_register_operand(struct operand
*op
,
765 struct decode_cache
*c
,
768 unsigned reg
= c
->modrm_reg
;
769 int highbyte_regs
= c
->rex_prefix
== 0;
772 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
774 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
775 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
776 op
->val
= *(u8
*)op
->ptr
;
779 op
->ptr
= decode_register(reg
, c
->regs
, 0);
780 op
->bytes
= c
->op_bytes
;
783 op
->val
= *(u16
*)op
->ptr
;
786 op
->val
= *(u32
*)op
->ptr
;
789 op
->val
= *(u64
*) op
->ptr
;
793 op
->orig_val
= op
->val
;
796 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
797 struct x86_emulate_ops
*ops
)
799 struct decode_cache
*c
= &ctxt
->decode
;
801 int index_reg
= 0, base_reg
= 0, scale
;
802 int rc
= X86EMUL_CONTINUE
;
805 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
806 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
807 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
810 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
811 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
812 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
813 c
->modrm_rm
|= (c
->modrm
& 0x07);
817 if (c
->modrm_mod
== 3) {
818 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
819 c
->regs
, c
->d
& ByteOp
);
820 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
824 if (c
->ad_bytes
== 2) {
825 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
826 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
827 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
828 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
830 /* 16-bit ModR/M decode. */
831 switch (c
->modrm_mod
) {
833 if (c
->modrm_rm
== 6)
834 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
837 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
840 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
843 switch (c
->modrm_rm
) {
845 c
->modrm_ea
+= bx
+ si
;
848 c
->modrm_ea
+= bx
+ di
;
851 c
->modrm_ea
+= bp
+ si
;
854 c
->modrm_ea
+= bp
+ di
;
863 if (c
->modrm_mod
!= 0)
870 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
871 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
872 if (!c
->has_seg_override
)
873 set_seg_override(c
, VCPU_SREG_SS
);
874 c
->modrm_ea
= (u16
)c
->modrm_ea
;
876 /* 32/64-bit ModR/M decode. */
877 if ((c
->modrm_rm
& 7) == 4) {
878 sib
= insn_fetch(u8
, 1, c
->eip
);
879 index_reg
|= (sib
>> 3) & 7;
883 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
884 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
886 c
->modrm_ea
+= c
->regs
[base_reg
];
888 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
889 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
890 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
893 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
894 switch (c
->modrm_mod
) {
896 if (c
->modrm_rm
== 5)
897 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
900 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
903 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
911 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
912 struct x86_emulate_ops
*ops
)
914 struct decode_cache
*c
= &ctxt
->decode
;
915 int rc
= X86EMUL_CONTINUE
;
917 switch (c
->ad_bytes
) {
919 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
922 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
925 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
933 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
935 struct decode_cache
*c
= &ctxt
->decode
;
936 int rc
= X86EMUL_CONTINUE
;
937 int mode
= ctxt
->mode
;
938 int def_op_bytes
, def_ad_bytes
, group
;
941 /* we cannot decode insn before we complete previous rep insn */
942 WARN_ON(ctxt
->restart
);
945 c
->fetch
.start
= c
->fetch
.end
= c
->eip
;
946 ctxt
->cs_base
= seg_base(ctxt
, ops
, VCPU_SREG_CS
);
949 case X86EMUL_MODE_REAL
:
950 case X86EMUL_MODE_VM86
:
951 case X86EMUL_MODE_PROT16
:
952 def_op_bytes
= def_ad_bytes
= 2;
954 case X86EMUL_MODE_PROT32
:
955 def_op_bytes
= def_ad_bytes
= 4;
958 case X86EMUL_MODE_PROT64
:
967 c
->op_bytes
= def_op_bytes
;
968 c
->ad_bytes
= def_ad_bytes
;
970 /* Legacy prefixes. */
972 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
973 case 0x66: /* operand-size override */
974 /* switch between 2/4 bytes */
975 c
->op_bytes
= def_op_bytes
^ 6;
977 case 0x67: /* address-size override */
978 if (mode
== X86EMUL_MODE_PROT64
)
979 /* switch between 4/8 bytes */
980 c
->ad_bytes
= def_ad_bytes
^ 12;
982 /* switch between 2/4 bytes */
983 c
->ad_bytes
= def_ad_bytes
^ 6;
985 case 0x26: /* ES override */
986 case 0x2e: /* CS override */
987 case 0x36: /* SS override */
988 case 0x3e: /* DS override */
989 set_seg_override(c
, (c
->b
>> 3) & 3);
991 case 0x64: /* FS override */
992 case 0x65: /* GS override */
993 set_seg_override(c
, c
->b
& 7);
995 case 0x40 ... 0x4f: /* REX */
996 if (mode
!= X86EMUL_MODE_PROT64
)
998 c
->rex_prefix
= c
->b
;
1000 case 0xf0: /* LOCK */
1003 case 0xf2: /* REPNE/REPNZ */
1004 c
->rep_prefix
= REPNE_PREFIX
;
1006 case 0xf3: /* REP/REPE/REPZ */
1007 c
->rep_prefix
= REPE_PREFIX
;
1013 /* Any legacy prefix after a REX prefix nullifies its effect. */
1022 if (c
->rex_prefix
& 8)
1023 c
->op_bytes
= 8; /* REX.W */
1025 /* Opcode byte(s). */
1026 c
->d
= opcode_table
[c
->b
];
1028 /* Two-byte opcode? */
1031 c
->b
= insn_fetch(u8
, 1, c
->eip
);
1032 c
->d
= twobyte_table
[c
->b
];
1037 group
= c
->d
& GroupMask
;
1038 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
1041 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
1042 if ((c
->d
& GroupDual
) && (c
->modrm
>> 6) == 3)
1043 c
->d
= group2_table
[group
];
1045 c
->d
= group_table
[group
];
1050 DPRINTF("Cannot emulate %02x\n", c
->b
);
1054 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1057 /* ModRM and SIB bytes. */
1059 rc
= decode_modrm(ctxt
, ops
);
1060 else if (c
->d
& MemAbs
)
1061 rc
= decode_abs(ctxt
, ops
);
1062 if (rc
!= X86EMUL_CONTINUE
)
1065 if (!c
->has_seg_override
)
1066 set_seg_override(c
, VCPU_SREG_DS
);
1068 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1069 c
->modrm_ea
+= seg_override_base(ctxt
, ops
, c
);
1071 if (c
->ad_bytes
!= 8)
1072 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1074 if (c
->rip_relative
)
1075 c
->modrm_ea
+= c
->eip
;
1078 * Decode and fetch the source operand: register, memory
1081 switch (c
->d
& SrcMask
) {
1085 decode_register_operand(&c
->src
, c
, 0);
1094 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1096 /* Don't fetch the address for invlpg: it could be unmapped. */
1097 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1101 * For instructions with a ModR/M byte, switch to register
1102 * access if Mod = 3.
1104 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1105 c
->src
.type
= OP_REG
;
1106 c
->src
.val
= c
->modrm_val
;
1107 c
->src
.ptr
= c
->modrm_ptr
;
1110 c
->src
.type
= OP_MEM
;
1111 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1116 c
->src
.type
= OP_IMM
;
1117 c
->src
.ptr
= (unsigned long *)c
->eip
;
1118 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1119 if (c
->src
.bytes
== 8)
1121 /* NB. Immediates are sign-extended as necessary. */
1122 switch (c
->src
.bytes
) {
1124 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1127 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1130 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1133 if ((c
->d
& SrcMask
) == SrcImmU
) {
1134 switch (c
->src
.bytes
) {
1139 c
->src
.val
&= 0xffff;
1142 c
->src
.val
&= 0xffffffff;
1149 c
->src
.type
= OP_IMM
;
1150 c
->src
.ptr
= (unsigned long *)c
->eip
;
1152 if ((c
->d
& SrcMask
) == SrcImmByte
)
1153 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1155 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1162 c
->src
.type
= OP_MEM
;
1163 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1164 c
->src
.ptr
= (unsigned long *)
1165 register_address(c
, seg_override_base(ctxt
, ops
, c
),
1166 c
->regs
[VCPU_REGS_RSI
]);
1170 c
->src
.type
= OP_IMM
;
1171 c
->src
.ptr
= (unsigned long *)c
->eip
;
1172 c
->src
.bytes
= c
->op_bytes
+ 2;
1173 insn_fetch_arr(c
->src
.valptr
, c
->src
.bytes
, c
->eip
);
1176 c
->src
.type
= OP_MEM
;
1177 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1178 c
->src
.bytes
= c
->op_bytes
+ 2;
1183 * Decode and fetch the second source operand: register, memory
1186 switch (c
->d
& Src2Mask
) {
1191 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1194 c
->src2
.type
= OP_IMM
;
1195 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1197 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1205 /* Decode and fetch the destination operand: register or memory. */
1206 switch (c
->d
& DstMask
) {
1208 /* Special instructions do their own operand decoding. */
1211 decode_register_operand(&c
->dst
, c
,
1212 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1216 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1217 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1218 c
->dst
.type
= OP_REG
;
1219 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1220 c
->dst
.ptr
= c
->modrm_ptr
;
1223 c
->dst
.type
= OP_MEM
;
1224 c
->dst
.ptr
= (unsigned long *)c
->modrm_ea
;
1225 if ((c
->d
& DstMask
) == DstMem64
)
1228 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1231 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1233 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1234 (c
->src
.val
& mask
) / 8;
1238 c
->dst
.type
= OP_REG
;
1239 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1240 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1241 switch (c
->dst
.bytes
) {
1243 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1246 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1249 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1252 c
->dst
.val
= *(u64
*)c
->dst
.ptr
;
1255 c
->dst
.orig_val
= c
->dst
.val
;
1258 c
->dst
.type
= OP_MEM
;
1259 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1260 c
->dst
.ptr
= (unsigned long *)
1261 register_address(c
, es_base(ctxt
, ops
),
1262 c
->regs
[VCPU_REGS_RDI
]);
1268 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1271 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1272 struct x86_emulate_ops
*ops
,
1273 unsigned long addr
, void *dest
, unsigned size
)
1276 struct read_cache
*mc
= &ctxt
->decode
.mem_read
;
1280 int n
= min(size
, 8u);
1282 if (mc
->pos
< mc
->end
)
1285 rc
= ops
->read_emulated(addr
, mc
->data
+ mc
->end
, n
, &err
,
1287 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1288 kvm_inject_page_fault(ctxt
->vcpu
, addr
, err
);
1289 if (rc
!= X86EMUL_CONTINUE
)
1294 memcpy(dest
, mc
->data
+ mc
->pos
, n
);
1299 return X86EMUL_CONTINUE
;
1302 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1303 struct x86_emulate_ops
*ops
,
1304 unsigned int size
, unsigned short port
,
1307 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
1309 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1310 struct decode_cache
*c
= &ctxt
->decode
;
1311 unsigned int in_page
, n
;
1312 unsigned int count
= c
->rep_prefix
?
1313 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1;
1314 in_page
= (ctxt
->eflags
& EFLG_DF
) ?
1315 offset_in_page(c
->regs
[VCPU_REGS_RDI
]) :
1316 PAGE_SIZE
- offset_in_page(c
->regs
[VCPU_REGS_RDI
]);
1317 n
= min(min(in_page
, (unsigned int)sizeof(rc
->data
)) / size
,
1321 rc
->pos
= rc
->end
= 0;
1322 if (!ops
->pio_in_emulated(size
, port
, rc
->data
, n
, ctxt
->vcpu
))
1327 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1332 static u32
desc_limit_scaled(struct desc_struct
*desc
)
1334 u32 limit
= get_desc_limit(desc
);
1336 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
1339 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1340 struct x86_emulate_ops
*ops
,
1341 u16 selector
, struct desc_ptr
*dt
)
1343 if (selector
& 1 << 2) {
1344 struct desc_struct desc
;
1345 memset (dt
, 0, sizeof *dt
);
1346 if (!ops
->get_cached_descriptor(&desc
, VCPU_SREG_LDTR
, ctxt
->vcpu
))
1349 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1350 dt
->address
= get_desc_base(&desc
);
1352 ops
->get_gdt(dt
, ctxt
->vcpu
);
1355 /* allowed just for 8 bytes segments */
1356 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1357 struct x86_emulate_ops
*ops
,
1358 u16 selector
, struct desc_struct
*desc
)
1361 u16 index
= selector
>> 3;
1366 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1368 if (dt
.size
< index
* 8 + 7) {
1369 kvm_inject_gp(ctxt
->vcpu
, selector
& 0xfffc);
1370 return X86EMUL_PROPAGATE_FAULT
;
1372 addr
= dt
.address
+ index
* 8;
1373 ret
= ops
->read_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1374 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1375 kvm_inject_page_fault(ctxt
->vcpu
, addr
, err
);
1380 /* allowed just for 8 bytes segments */
1381 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1382 struct x86_emulate_ops
*ops
,
1383 u16 selector
, struct desc_struct
*desc
)
1386 u16 index
= selector
>> 3;
1391 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1393 if (dt
.size
< index
* 8 + 7) {
1394 kvm_inject_gp(ctxt
->vcpu
, selector
& 0xfffc);
1395 return X86EMUL_PROPAGATE_FAULT
;
1398 addr
= dt
.address
+ index
* 8;
1399 ret
= ops
->write_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1400 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1401 kvm_inject_page_fault(ctxt
->vcpu
, addr
, err
);
1406 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1407 struct x86_emulate_ops
*ops
,
1408 u16 selector
, int seg
)
1410 struct desc_struct seg_desc
;
1412 unsigned err_vec
= GP_VECTOR
;
1414 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1417 memset(&seg_desc
, 0, sizeof seg_desc
);
1419 if ((seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
)
1420 || ctxt
->mode
== X86EMUL_MODE_REAL
) {
1421 /* set real mode segment descriptor */
1422 set_desc_base(&seg_desc
, selector
<< 4);
1423 set_desc_limit(&seg_desc
, 0xffff);
1430 /* NULL selector is not valid for TR, CS and SS */
1431 if ((seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_SS
|| seg
== VCPU_SREG_TR
)
1435 /* TR should be in GDT only */
1436 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1439 if (null_selector
) /* for NULL selector skip all following checks */
1442 ret
= read_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1443 if (ret
!= X86EMUL_CONTINUE
)
1446 err_code
= selector
& 0xfffc;
1447 err_vec
= GP_VECTOR
;
1449 /* can't load system descriptor into segment selecor */
1450 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
)
1454 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1460 cpl
= ops
->cpl(ctxt
->vcpu
);
1465 * segment is not a writable data segment or segment
1466 * selector's RPL != CPL or segment selector's RPL != CPL
1468 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1472 if (!(seg_desc
.type
& 8))
1475 if (seg_desc
.type
& 4) {
1481 if (rpl
> cpl
|| dpl
!= cpl
)
1484 /* CS(RPL) <- CPL */
1485 selector
= (selector
& 0xfffc) | cpl
;
1488 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1491 case VCPU_SREG_LDTR
:
1492 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1495 default: /* DS, ES, FS, or GS */
1497 * segment is not a data or readable code segment or
1498 * ((segment is a data or nonconforming code segment)
1499 * and (both RPL and CPL > DPL))
1501 if ((seg_desc
.type
& 0xa) == 0x8 ||
1502 (((seg_desc
.type
& 0xc) != 0xc) &&
1503 (rpl
> dpl
&& cpl
> dpl
)))
1509 /* mark segment as accessed */
1511 ret
= write_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1512 if (ret
!= X86EMUL_CONTINUE
)
1516 ops
->set_segment_selector(selector
, seg
, ctxt
->vcpu
);
1517 ops
->set_cached_descriptor(&seg_desc
, seg
, ctxt
->vcpu
);
1518 return X86EMUL_CONTINUE
;
1520 kvm_queue_exception_e(ctxt
->vcpu
, err_vec
, err_code
);
1521 return X86EMUL_PROPAGATE_FAULT
;
1524 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
,
1525 struct x86_emulate_ops
*ops
)
1527 struct decode_cache
*c
= &ctxt
->decode
;
1529 c
->dst
.type
= OP_MEM
;
1530 c
->dst
.bytes
= c
->op_bytes
;
1531 c
->dst
.val
= c
->src
.val
;
1532 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1533 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
, ops
),
1534 c
->regs
[VCPU_REGS_RSP
]);
1537 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1538 struct x86_emulate_ops
*ops
,
1539 void *dest
, int len
)
1541 struct decode_cache
*c
= &ctxt
->decode
;
1544 rc
= read_emulated(ctxt
, ops
, register_address(c
, ss_base(ctxt
, ops
),
1545 c
->regs
[VCPU_REGS_RSP
]),
1547 if (rc
!= X86EMUL_CONTINUE
)
1550 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1554 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1555 struct x86_emulate_ops
*ops
,
1556 void *dest
, int len
)
1559 unsigned long val
, change_mask
;
1560 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1561 int cpl
= ops
->cpl(ctxt
->vcpu
);
1563 rc
= emulate_pop(ctxt
, ops
, &val
, len
);
1564 if (rc
!= X86EMUL_CONTINUE
)
1567 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1568 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1570 switch(ctxt
->mode
) {
1571 case X86EMUL_MODE_PROT64
:
1572 case X86EMUL_MODE_PROT32
:
1573 case X86EMUL_MODE_PROT16
:
1575 change_mask
|= EFLG_IOPL
;
1577 change_mask
|= EFLG_IF
;
1579 case X86EMUL_MODE_VM86
:
1581 kvm_inject_gp(ctxt
->vcpu
, 0);
1582 return X86EMUL_PROPAGATE_FAULT
;
1584 change_mask
|= EFLG_IF
;
1586 default: /* real mode */
1587 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1591 *(unsigned long *)dest
=
1592 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1597 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
,
1598 struct x86_emulate_ops
*ops
, int seg
)
1600 struct decode_cache
*c
= &ctxt
->decode
;
1602 c
->src
.val
= ops
->get_segment_selector(seg
, ctxt
->vcpu
);
1604 emulate_push(ctxt
, ops
);
1607 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1608 struct x86_emulate_ops
*ops
, int seg
)
1610 struct decode_cache
*c
= &ctxt
->decode
;
1611 unsigned long selector
;
1614 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1615 if (rc
!= X86EMUL_CONTINUE
)
1618 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)selector
, seg
);
1622 static void emulate_pusha(struct x86_emulate_ctxt
*ctxt
,
1623 struct x86_emulate_ops
*ops
)
1625 struct decode_cache
*c
= &ctxt
->decode
;
1626 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1627 int reg
= VCPU_REGS_RAX
;
1629 while (reg
<= VCPU_REGS_RDI
) {
1630 (reg
== VCPU_REGS_RSP
) ?
1631 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1633 emulate_push(ctxt
, ops
);
1638 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1639 struct x86_emulate_ops
*ops
)
1641 struct decode_cache
*c
= &ctxt
->decode
;
1642 int rc
= X86EMUL_CONTINUE
;
1643 int reg
= VCPU_REGS_RDI
;
1645 while (reg
>= VCPU_REGS_RAX
) {
1646 if (reg
== VCPU_REGS_RSP
) {
1647 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1652 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1653 if (rc
!= X86EMUL_CONTINUE
)
1660 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1661 struct x86_emulate_ops
*ops
)
1663 struct decode_cache
*c
= &ctxt
->decode
;
1665 return emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1668 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1670 struct decode_cache
*c
= &ctxt
->decode
;
1671 switch (c
->modrm_reg
) {
1673 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1676 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1679 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1682 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1684 case 4: /* sal/shl */
1685 case 6: /* sal/shl */
1686 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1689 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1692 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1697 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1698 struct x86_emulate_ops
*ops
)
1700 struct decode_cache
*c
= &ctxt
->decode
;
1702 switch (c
->modrm_reg
) {
1703 case 0 ... 1: /* test */
1704 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1707 c
->dst
.val
= ~c
->dst
.val
;
1710 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1718 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1719 struct x86_emulate_ops
*ops
)
1721 struct decode_cache
*c
= &ctxt
->decode
;
1723 switch (c
->modrm_reg
) {
1725 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1728 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1730 case 2: /* call near abs */ {
1733 c
->eip
= c
->src
.val
;
1734 c
->src
.val
= old_eip
;
1735 emulate_push(ctxt
, ops
);
1738 case 4: /* jmp abs */
1739 c
->eip
= c
->src
.val
;
1742 emulate_push(ctxt
, ops
);
1745 return X86EMUL_CONTINUE
;
1748 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1749 struct x86_emulate_ops
*ops
)
1751 struct decode_cache
*c
= &ctxt
->decode
;
1752 u64 old
= c
->dst
.orig_val
;
1754 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1755 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1757 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1758 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1759 ctxt
->eflags
&= ~EFLG_ZF
;
1761 c
->dst
.val
= ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1762 (u32
) c
->regs
[VCPU_REGS_RBX
];
1764 ctxt
->eflags
|= EFLG_ZF
;
1766 return X86EMUL_CONTINUE
;
1769 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1770 struct x86_emulate_ops
*ops
)
1772 struct decode_cache
*c
= &ctxt
->decode
;
1776 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1777 if (rc
!= X86EMUL_CONTINUE
)
1779 if (c
->op_bytes
== 4)
1780 c
->eip
= (u32
)c
->eip
;
1781 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1782 if (rc
!= X86EMUL_CONTINUE
)
1784 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)cs
, VCPU_SREG_CS
);
1788 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1789 struct x86_emulate_ops
*ops
)
1792 struct decode_cache
*c
= &ctxt
->decode
;
1795 switch (c
->dst
.type
) {
1797 /* The 4-byte case *is* correct:
1798 * in 64-bit mode we zero-extend.
1800 switch (c
->dst
.bytes
) {
1802 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1805 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1808 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1809 break; /* 64b: zero-ext */
1811 *c
->dst
.ptr
= c
->dst
.val
;
1817 rc
= ops
->cmpxchg_emulated(
1818 (unsigned long)c
->dst
.ptr
,
1825 rc
= ops
->write_emulated(
1826 (unsigned long)c
->dst
.ptr
,
1831 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1832 kvm_inject_page_fault(ctxt
->vcpu
,
1833 (unsigned long)c
->dst
.ptr
, err
);
1834 if (rc
!= X86EMUL_CONTINUE
)
1843 return X86EMUL_CONTINUE
;
1847 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1848 struct x86_emulate_ops
*ops
, struct desc_struct
*cs
,
1849 struct desc_struct
*ss
)
1851 memset(cs
, 0, sizeof(struct desc_struct
));
1852 ops
->get_cached_descriptor(cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1853 memset(ss
, 0, sizeof(struct desc_struct
));
1855 cs
->l
= 0; /* will be adjusted later */
1856 set_desc_base(cs
, 0); /* flat segment */
1857 cs
->g
= 1; /* 4kb granularity */
1858 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
1859 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1861 cs
->dpl
= 0; /* will be adjusted later */
1865 set_desc_base(ss
, 0); /* flat segment */
1866 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
1867 ss
->g
= 1; /* 4kb granularity */
1869 ss
->type
= 0x03; /* Read/Write, Accessed */
1870 ss
->d
= 1; /* 32bit stack segment */
1876 emulate_syscall(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1878 struct decode_cache
*c
= &ctxt
->decode
;
1879 struct desc_struct cs
, ss
;
1883 /* syscall is not available in real mode */
1884 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1885 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1886 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1887 return X86EMUL_PROPAGATE_FAULT
;
1890 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1892 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1894 cs_sel
= (u16
)(msr_data
& 0xfffc);
1895 ss_sel
= (u16
)(msr_data
+ 8);
1897 if (is_long_mode(ctxt
->vcpu
)) {
1901 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1902 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
1903 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
1904 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
1906 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1907 if (is_long_mode(ctxt
->vcpu
)) {
1908 #ifdef CONFIG_X86_64
1909 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1911 ops
->get_msr(ctxt
->vcpu
,
1912 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1913 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1916 ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1917 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1921 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1922 c
->eip
= (u32
)msr_data
;
1924 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1927 return X86EMUL_CONTINUE
;
1931 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1933 struct decode_cache
*c
= &ctxt
->decode
;
1934 struct desc_struct cs
, ss
;
1938 /* inject #GP if in real mode */
1939 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1940 kvm_inject_gp(ctxt
->vcpu
, 0);
1941 return X86EMUL_PROPAGATE_FAULT
;
1944 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1945 * Therefore, we inject an #UD.
1947 if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1948 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1949 return X86EMUL_PROPAGATE_FAULT
;
1952 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1954 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1955 switch (ctxt
->mode
) {
1956 case X86EMUL_MODE_PROT32
:
1957 if ((msr_data
& 0xfffc) == 0x0) {
1958 kvm_inject_gp(ctxt
->vcpu
, 0);
1959 return X86EMUL_PROPAGATE_FAULT
;
1962 case X86EMUL_MODE_PROT64
:
1963 if (msr_data
== 0x0) {
1964 kvm_inject_gp(ctxt
->vcpu
, 0);
1965 return X86EMUL_PROPAGATE_FAULT
;
1970 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1971 cs_sel
= (u16
)msr_data
;
1972 cs_sel
&= ~SELECTOR_RPL_MASK
;
1973 ss_sel
= cs_sel
+ 8;
1974 ss_sel
&= ~SELECTOR_RPL_MASK
;
1975 if (ctxt
->mode
== X86EMUL_MODE_PROT64
1976 || is_long_mode(ctxt
->vcpu
)) {
1981 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1982 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
1983 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
1984 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
1986 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
1989 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
1990 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
1992 return X86EMUL_CONTINUE
;
1996 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1998 struct decode_cache
*c
= &ctxt
->decode
;
1999 struct desc_struct cs
, ss
;
2004 /* inject #GP if in real mode or Virtual 8086 mode */
2005 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2006 ctxt
->mode
== X86EMUL_MODE_VM86
) {
2007 kvm_inject_gp(ctxt
->vcpu
, 0);
2008 return X86EMUL_PROPAGATE_FAULT
;
2011 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
2013 if ((c
->rex_prefix
& 0x8) != 0x0)
2014 usermode
= X86EMUL_MODE_PROT64
;
2016 usermode
= X86EMUL_MODE_PROT32
;
2020 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2022 case X86EMUL_MODE_PROT32
:
2023 cs_sel
= (u16
)(msr_data
+ 16);
2024 if ((msr_data
& 0xfffc) == 0x0) {
2025 kvm_inject_gp(ctxt
->vcpu
, 0);
2026 return X86EMUL_PROPAGATE_FAULT
;
2028 ss_sel
= (u16
)(msr_data
+ 24);
2030 case X86EMUL_MODE_PROT64
:
2031 cs_sel
= (u16
)(msr_data
+ 32);
2032 if (msr_data
== 0x0) {
2033 kvm_inject_gp(ctxt
->vcpu
, 0);
2034 return X86EMUL_PROPAGATE_FAULT
;
2036 ss_sel
= cs_sel
+ 8;
2041 cs_sel
|= SELECTOR_RPL_MASK
;
2042 ss_sel
|= SELECTOR_RPL_MASK
;
2044 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2045 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
2046 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2047 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
2049 c
->eip
= c
->regs
[VCPU_REGS_RDX
];
2050 c
->regs
[VCPU_REGS_RSP
] = c
->regs
[VCPU_REGS_RCX
];
2052 return X86EMUL_CONTINUE
;
2055 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
,
2056 struct x86_emulate_ops
*ops
)
2059 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2061 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2063 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
2064 return ops
->cpl(ctxt
->vcpu
) > iopl
;
2067 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2068 struct x86_emulate_ops
*ops
,
2071 struct desc_struct tr_seg
;
2074 u8 perm
, bit_idx
= port
& 0x7;
2075 unsigned mask
= (1 << len
) - 1;
2077 ops
->get_cached_descriptor(&tr_seg
, VCPU_SREG_TR
, ctxt
->vcpu
);
2080 if (desc_limit_scaled(&tr_seg
) < 103)
2082 r
= ops
->read_std(get_desc_base(&tr_seg
) + 102, &io_bitmap_ptr
, 2,
2084 if (r
!= X86EMUL_CONTINUE
)
2086 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2088 r
= ops
->read_std(get_desc_base(&tr_seg
) + io_bitmap_ptr
+ port
/8,
2089 &perm
, 1, ctxt
->vcpu
, NULL
);
2090 if (r
!= X86EMUL_CONTINUE
)
2092 if ((perm
>> bit_idx
) & mask
)
2097 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2098 struct x86_emulate_ops
*ops
,
2101 if (emulator_bad_iopl(ctxt
, ops
))
2102 if (!emulator_io_port_access_allowed(ctxt
, ops
, port
, len
))
2107 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2108 struct x86_emulate_ops
*ops
,
2109 struct tss_segment_16
*tss
)
2111 struct decode_cache
*c
= &ctxt
->decode
;
2114 tss
->flag
= ctxt
->eflags
;
2115 tss
->ax
= c
->regs
[VCPU_REGS_RAX
];
2116 tss
->cx
= c
->regs
[VCPU_REGS_RCX
];
2117 tss
->dx
= c
->regs
[VCPU_REGS_RDX
];
2118 tss
->bx
= c
->regs
[VCPU_REGS_RBX
];
2119 tss
->sp
= c
->regs
[VCPU_REGS_RSP
];
2120 tss
->bp
= c
->regs
[VCPU_REGS_RBP
];
2121 tss
->si
= c
->regs
[VCPU_REGS_RSI
];
2122 tss
->di
= c
->regs
[VCPU_REGS_RDI
];
2124 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2125 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2126 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2127 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2128 tss
->ldt
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2131 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2132 struct x86_emulate_ops
*ops
,
2133 struct tss_segment_16
*tss
)
2135 struct decode_cache
*c
= &ctxt
->decode
;
2139 ctxt
->eflags
= tss
->flag
| 2;
2140 c
->regs
[VCPU_REGS_RAX
] = tss
->ax
;
2141 c
->regs
[VCPU_REGS_RCX
] = tss
->cx
;
2142 c
->regs
[VCPU_REGS_RDX
] = tss
->dx
;
2143 c
->regs
[VCPU_REGS_RBX
] = tss
->bx
;
2144 c
->regs
[VCPU_REGS_RSP
] = tss
->sp
;
2145 c
->regs
[VCPU_REGS_RBP
] = tss
->bp
;
2146 c
->regs
[VCPU_REGS_RSI
] = tss
->si
;
2147 c
->regs
[VCPU_REGS_RDI
] = tss
->di
;
2150 * SDM says that segment selectors are loaded before segment
2153 ops
->set_segment_selector(tss
->ldt
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2154 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2155 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2156 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2157 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2160 * Now load segment descriptors. If fault happenes at this stage
2161 * it is handled in a context of new task
2163 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt
, VCPU_SREG_LDTR
);
2164 if (ret
!= X86EMUL_CONTINUE
)
2166 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2167 if (ret
!= X86EMUL_CONTINUE
)
2169 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2170 if (ret
!= X86EMUL_CONTINUE
)
2172 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2173 if (ret
!= X86EMUL_CONTINUE
)
2175 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2176 if (ret
!= X86EMUL_CONTINUE
)
2179 return X86EMUL_CONTINUE
;
2182 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2183 struct x86_emulate_ops
*ops
,
2184 u16 tss_selector
, u16 old_tss_sel
,
2185 ulong old_tss_base
, struct desc_struct
*new_desc
)
2187 struct tss_segment_16 tss_seg
;
2189 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2191 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2193 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2194 /* FIXME: need to provide precise fault address */
2195 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2199 save_state_to_tss16(ctxt
, ops
, &tss_seg
);
2201 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2203 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2204 /* FIXME: need to provide precise fault address */
2205 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2209 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2211 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2212 /* FIXME: need to provide precise fault address */
2213 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2217 if (old_tss_sel
!= 0xffff) {
2218 tss_seg
.prev_task_link
= old_tss_sel
;
2220 ret
= ops
->write_std(new_tss_base
,
2221 &tss_seg
.prev_task_link
,
2222 sizeof tss_seg
.prev_task_link
,
2224 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2225 /* FIXME: need to provide precise fault address */
2226 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2231 return load_state_from_tss16(ctxt
, ops
, &tss_seg
);
2234 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
2235 struct x86_emulate_ops
*ops
,
2236 struct tss_segment_32
*tss
)
2238 struct decode_cache
*c
= &ctxt
->decode
;
2240 tss
->cr3
= ops
->get_cr(3, ctxt
->vcpu
);
2242 tss
->eflags
= ctxt
->eflags
;
2243 tss
->eax
= c
->regs
[VCPU_REGS_RAX
];
2244 tss
->ecx
= c
->regs
[VCPU_REGS_RCX
];
2245 tss
->edx
= c
->regs
[VCPU_REGS_RDX
];
2246 tss
->ebx
= c
->regs
[VCPU_REGS_RBX
];
2247 tss
->esp
= c
->regs
[VCPU_REGS_RSP
];
2248 tss
->ebp
= c
->regs
[VCPU_REGS_RBP
];
2249 tss
->esi
= c
->regs
[VCPU_REGS_RSI
];
2250 tss
->edi
= c
->regs
[VCPU_REGS_RDI
];
2252 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2253 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2254 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2255 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2256 tss
->fs
= ops
->get_segment_selector(VCPU_SREG_FS
, ctxt
->vcpu
);
2257 tss
->gs
= ops
->get_segment_selector(VCPU_SREG_GS
, ctxt
->vcpu
);
2258 tss
->ldt_selector
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2261 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
2262 struct x86_emulate_ops
*ops
,
2263 struct tss_segment_32
*tss
)
2265 struct decode_cache
*c
= &ctxt
->decode
;
2268 if (ops
->set_cr(3, tss
->cr3
, ctxt
->vcpu
)) {
2269 kvm_inject_gp(ctxt
->vcpu
, 0);
2270 return X86EMUL_PROPAGATE_FAULT
;
2273 ctxt
->eflags
= tss
->eflags
| 2;
2274 c
->regs
[VCPU_REGS_RAX
] = tss
->eax
;
2275 c
->regs
[VCPU_REGS_RCX
] = tss
->ecx
;
2276 c
->regs
[VCPU_REGS_RDX
] = tss
->edx
;
2277 c
->regs
[VCPU_REGS_RBX
] = tss
->ebx
;
2278 c
->regs
[VCPU_REGS_RSP
] = tss
->esp
;
2279 c
->regs
[VCPU_REGS_RBP
] = tss
->ebp
;
2280 c
->regs
[VCPU_REGS_RSI
] = tss
->esi
;
2281 c
->regs
[VCPU_REGS_RDI
] = tss
->edi
;
2284 * SDM says that segment selectors are loaded before segment
2287 ops
->set_segment_selector(tss
->ldt_selector
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2288 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2289 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2290 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2291 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2292 ops
->set_segment_selector(tss
->fs
, VCPU_SREG_FS
, ctxt
->vcpu
);
2293 ops
->set_segment_selector(tss
->gs
, VCPU_SREG_GS
, ctxt
->vcpu
);
2296 * Now load segment descriptors. If fault happenes at this stage
2297 * it is handled in a context of new task
2299 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
2300 if (ret
!= X86EMUL_CONTINUE
)
2302 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2303 if (ret
!= X86EMUL_CONTINUE
)
2305 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2306 if (ret
!= X86EMUL_CONTINUE
)
2308 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2309 if (ret
!= X86EMUL_CONTINUE
)
2311 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2312 if (ret
!= X86EMUL_CONTINUE
)
2314 ret
= load_segment_descriptor(ctxt
, ops
, tss
->fs
, VCPU_SREG_FS
);
2315 if (ret
!= X86EMUL_CONTINUE
)
2317 ret
= load_segment_descriptor(ctxt
, ops
, tss
->gs
, VCPU_SREG_GS
);
2318 if (ret
!= X86EMUL_CONTINUE
)
2321 return X86EMUL_CONTINUE
;
2324 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
2325 struct x86_emulate_ops
*ops
,
2326 u16 tss_selector
, u16 old_tss_sel
,
2327 ulong old_tss_base
, struct desc_struct
*new_desc
)
2329 struct tss_segment_32 tss_seg
;
2331 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2333 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2335 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2336 /* FIXME: need to provide precise fault address */
2337 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2341 save_state_to_tss32(ctxt
, ops
, &tss_seg
);
2343 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2345 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2346 /* FIXME: need to provide precise fault address */
2347 kvm_inject_page_fault(ctxt
->vcpu
, old_tss_base
, err
);
2351 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2353 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2354 /* FIXME: need to provide precise fault address */
2355 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2359 if (old_tss_sel
!= 0xffff) {
2360 tss_seg
.prev_task_link
= old_tss_sel
;
2362 ret
= ops
->write_std(new_tss_base
,
2363 &tss_seg
.prev_task_link
,
2364 sizeof tss_seg
.prev_task_link
,
2366 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2367 /* FIXME: need to provide precise fault address */
2368 kvm_inject_page_fault(ctxt
->vcpu
, new_tss_base
, err
);
2373 return load_state_from_tss32(ctxt
, ops
, &tss_seg
);
2376 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
2377 struct x86_emulate_ops
*ops
,
2378 u16 tss_selector
, int reason
,
2379 bool has_error_code
, u32 error_code
)
2381 struct desc_struct curr_tss_desc
, next_tss_desc
;
2383 u16 old_tss_sel
= ops
->get_segment_selector(VCPU_SREG_TR
, ctxt
->vcpu
);
2384 ulong old_tss_base
=
2385 ops
->get_cached_segment_base(VCPU_SREG_TR
, ctxt
->vcpu
);
2388 /* FIXME: old_tss_base == ~0 ? */
2390 ret
= read_segment_descriptor(ctxt
, ops
, tss_selector
, &next_tss_desc
);
2391 if (ret
!= X86EMUL_CONTINUE
)
2393 ret
= read_segment_descriptor(ctxt
, ops
, old_tss_sel
, &curr_tss_desc
);
2394 if (ret
!= X86EMUL_CONTINUE
)
2397 /* FIXME: check that next_tss_desc is tss */
2399 if (reason
!= TASK_SWITCH_IRET
) {
2400 if ((tss_selector
& 3) > next_tss_desc
.dpl
||
2401 ops
->cpl(ctxt
->vcpu
) > next_tss_desc
.dpl
) {
2402 kvm_inject_gp(ctxt
->vcpu
, 0);
2403 return X86EMUL_PROPAGATE_FAULT
;
2407 desc_limit
= desc_limit_scaled(&next_tss_desc
);
2408 if (!next_tss_desc
.p
||
2409 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
2410 desc_limit
< 0x2b)) {
2411 kvm_queue_exception_e(ctxt
->vcpu
, TS_VECTOR
,
2412 tss_selector
& 0xfffc);
2413 return X86EMUL_PROPAGATE_FAULT
;
2416 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
2417 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
2418 write_segment_descriptor(ctxt
, ops
, old_tss_sel
,
2422 if (reason
== TASK_SWITCH_IRET
)
2423 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
2425 /* set back link to prev task only if NT bit is set in eflags
2426 note that old_tss_sel is not used afetr this point */
2427 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
2428 old_tss_sel
= 0xffff;
2430 if (next_tss_desc
.type
& 8)
2431 ret
= task_switch_32(ctxt
, ops
, tss_selector
, old_tss_sel
,
2432 old_tss_base
, &next_tss_desc
);
2434 ret
= task_switch_16(ctxt
, ops
, tss_selector
, old_tss_sel
,
2435 old_tss_base
, &next_tss_desc
);
2436 if (ret
!= X86EMUL_CONTINUE
)
2439 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
2440 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
2442 if (reason
!= TASK_SWITCH_IRET
) {
2443 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
2444 write_segment_descriptor(ctxt
, ops
, tss_selector
,
2448 ops
->set_cr(0, ops
->get_cr(0, ctxt
->vcpu
) | X86_CR0_TS
, ctxt
->vcpu
);
2449 ops
->set_cached_descriptor(&next_tss_desc
, VCPU_SREG_TR
, ctxt
->vcpu
);
2450 ops
->set_segment_selector(tss_selector
, VCPU_SREG_TR
, ctxt
->vcpu
);
2452 if (has_error_code
) {
2453 struct decode_cache
*c
= &ctxt
->decode
;
2455 c
->op_bytes
= c
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
2457 c
->src
.val
= (unsigned long) error_code
;
2458 emulate_push(ctxt
, ops
);
2464 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
2465 struct x86_emulate_ops
*ops
,
2466 u16 tss_selector
, int reason
,
2467 bool has_error_code
, u32 error_code
)
2469 struct decode_cache
*c
= &ctxt
->decode
;
2473 c
->dst
.type
= OP_NONE
;
2475 rc
= emulator_do_task_switch(ctxt
, ops
, tss_selector
, reason
,
2476 has_error_code
, error_code
);
2478 if (rc
== X86EMUL_CONTINUE
) {
2479 rc
= writeback(ctxt
, ops
);
2480 if (rc
== X86EMUL_CONTINUE
)
2484 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
2487 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, unsigned long base
,
2488 int reg
, struct operand
*op
)
2490 struct decode_cache
*c
= &ctxt
->decode
;
2491 int df
= (ctxt
->eflags
& EFLG_DF
) ? -1 : 1;
2493 register_address_increment(c
, &c
->regs
[reg
], df
* op
->bytes
);
2494 op
->ptr
= (unsigned long *)register_address(c
, base
, c
->regs
[reg
]);
2498 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2501 struct decode_cache
*c
= &ctxt
->decode
;
2502 int rc
= X86EMUL_CONTINUE
;
2503 int saved_dst_type
= c
->dst
.type
;
2505 ctxt
->decode
.mem_read
.pos
= 0;
2507 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
2508 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2512 /* LOCK prefix is allowed only with some instructions */
2513 if (c
->lock_prefix
&& (!(c
->d
& Lock
) || c
->dst
.type
!= OP_MEM
)) {
2514 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2518 /* Privileged instruction can be executed only in CPL=0 */
2519 if ((c
->d
& Priv
) && ops
->cpl(ctxt
->vcpu
)) {
2520 kvm_inject_gp(ctxt
->vcpu
, 0);
2524 if (c
->rep_prefix
&& (c
->d
& String
)) {
2525 ctxt
->restart
= true;
2526 /* All REP prefixes have the same first termination condition */
2527 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0) {
2529 ctxt
->restart
= false;
2533 /* The second termination condition only applies for REPE
2534 * and REPNE. Test if the repeat string operation prefix is
2535 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2536 * corresponding termination condition according to:
2537 * - if REPE/REPZ and ZF = 0 then done
2538 * - if REPNE/REPNZ and ZF = 1 then done
2540 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
2541 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
2542 if ((c
->rep_prefix
== REPE_PREFIX
) &&
2543 ((ctxt
->eflags
& EFLG_ZF
) == 0))
2545 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
2546 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
))
2552 if (c
->src
.type
== OP_MEM
) {
2553 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src
.ptr
,
2554 c
->src
.valptr
, c
->src
.bytes
);
2555 if (rc
!= X86EMUL_CONTINUE
)
2557 c
->src
.orig_val
= c
->src
.val
;
2560 if (c
->src2
.type
== OP_MEM
) {
2561 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src2
.ptr
,
2562 &c
->src2
.val
, c
->src2
.bytes
);
2563 if (rc
!= X86EMUL_CONTINUE
)
2567 if ((c
->d
& DstMask
) == ImplicitOps
)
2571 if ((c
->dst
.type
== OP_MEM
) && !(c
->d
& Mov
)) {
2572 /* optimisation - avoid slow emulated read if Mov */
2573 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->dst
.ptr
,
2574 &c
->dst
.val
, c
->dst
.bytes
);
2575 if (rc
!= X86EMUL_CONTINUE
)
2578 c
->dst
.orig_val
= c
->dst
.val
;
2588 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
2590 case 0x06: /* push es */
2591 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2593 case 0x07: /* pop es */
2594 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2595 if (rc
!= X86EMUL_CONTINUE
)
2600 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
2602 case 0x0e: /* push cs */
2603 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_CS
);
2607 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
2609 case 0x16: /* push ss */
2610 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2612 case 0x17: /* pop ss */
2613 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2614 if (rc
!= X86EMUL_CONTINUE
)
2619 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
2621 case 0x1e: /* push ds */
2622 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2624 case 0x1f: /* pop ds */
2625 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2626 if (rc
!= X86EMUL_CONTINUE
)
2631 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
2635 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
2639 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
2643 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2645 case 0x40 ... 0x47: /* inc r16/r32 */
2646 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
2648 case 0x48 ... 0x4f: /* dec r16/r32 */
2649 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
2651 case 0x50 ... 0x57: /* push reg */
2652 emulate_push(ctxt
, ops
);
2654 case 0x58 ... 0x5f: /* pop reg */
2656 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2657 if (rc
!= X86EMUL_CONTINUE
)
2660 case 0x60: /* pusha */
2661 emulate_pusha(ctxt
, ops
);
2663 case 0x61: /* popa */
2664 rc
= emulate_popa(ctxt
, ops
);
2665 if (rc
!= X86EMUL_CONTINUE
)
2668 case 0x63: /* movsxd */
2669 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
2670 goto cannot_emulate
;
2671 c
->dst
.val
= (s32
) c
->src
.val
;
2673 case 0x68: /* push imm */
2674 case 0x6a: /* push imm8 */
2675 emulate_push(ctxt
, ops
);
2677 case 0x6c: /* insb */
2678 case 0x6d: /* insw/insd */
2679 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2680 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2682 kvm_inject_gp(ctxt
->vcpu
, 0);
2685 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
,
2686 c
->regs
[VCPU_REGS_RDX
], &c
->dst
.val
))
2687 goto done
; /* IO is needed, skip writeback */
2689 case 0x6e: /* outsb */
2690 case 0x6f: /* outsw/outsd */
2691 c
->src
.bytes
= min(c
->src
.bytes
, 4u);
2692 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2694 kvm_inject_gp(ctxt
->vcpu
, 0);
2697 ops
->pio_out_emulated(c
->src
.bytes
, c
->regs
[VCPU_REGS_RDX
],
2698 &c
->src
.val
, 1, ctxt
->vcpu
);
2700 c
->dst
.type
= OP_NONE
; /* nothing to writeback */
2702 case 0x70 ... 0x7f: /* jcc (short) */
2703 if (test_cc(c
->b
, ctxt
->eflags
))
2704 jmp_rel(c
, c
->src
.val
);
2706 case 0x80 ... 0x83: /* Grp1 */
2707 switch (c
->modrm_reg
) {
2727 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
2729 case 0x86 ... 0x87: /* xchg */
2731 /* Write back the register source. */
2732 switch (c
->dst
.bytes
) {
2734 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
2737 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
2740 *c
->src
.ptr
= (u32
) c
->dst
.val
;
2741 break; /* 64b reg: zero-extend */
2743 *c
->src
.ptr
= c
->dst
.val
;
2747 * Write back the memory destination with implicit LOCK
2750 c
->dst
.val
= c
->src
.val
;
2753 case 0x88 ... 0x8b: /* mov */
2755 case 0x8c: /* mov r/m, sreg */
2756 if (c
->modrm_reg
> VCPU_SREG_GS
) {
2757 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2760 c
->dst
.val
= ops
->get_segment_selector(c
->modrm_reg
, ctxt
->vcpu
);
2762 case 0x8d: /* lea r16/r32, m */
2763 c
->dst
.val
= c
->modrm_ea
;
2765 case 0x8e: { /* mov seg, r/m16 */
2770 if (c
->modrm_reg
== VCPU_SREG_CS
||
2771 c
->modrm_reg
> VCPU_SREG_GS
) {
2772 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2776 if (c
->modrm_reg
== VCPU_SREG_SS
)
2777 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
2779 rc
= load_segment_descriptor(ctxt
, ops
, sel
, c
->modrm_reg
);
2781 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2784 case 0x8f: /* pop (sole member of Grp1a) */
2785 rc
= emulate_grp1a(ctxt
, ops
);
2786 if (rc
!= X86EMUL_CONTINUE
)
2789 case 0x90: /* nop / xchg r8,rax */
2790 if (c
->dst
.ptr
== (unsigned long *)&c
->regs
[VCPU_REGS_RAX
]) {
2791 c
->dst
.type
= OP_NONE
; /* nop */
2794 case 0x91 ... 0x97: /* xchg reg,rax */
2795 c
->src
.type
= OP_REG
;
2796 c
->src
.bytes
= c
->op_bytes
;
2797 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
2798 c
->src
.val
= *(c
->src
.ptr
);
2800 case 0x9c: /* pushf */
2801 c
->src
.val
= (unsigned long) ctxt
->eflags
;
2802 emulate_push(ctxt
, ops
);
2804 case 0x9d: /* popf */
2805 c
->dst
.type
= OP_REG
;
2806 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
2807 c
->dst
.bytes
= c
->op_bytes
;
2808 rc
= emulate_popf(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2809 if (rc
!= X86EMUL_CONTINUE
)
2812 case 0xa0 ... 0xa1: /* mov */
2813 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2814 c
->dst
.val
= c
->src
.val
;
2816 case 0xa2 ... 0xa3: /* mov */
2817 c
->dst
.val
= (unsigned long)c
->regs
[VCPU_REGS_RAX
];
2819 case 0xa4 ... 0xa5: /* movs */
2821 case 0xa6 ... 0xa7: /* cmps */
2822 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2823 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
2825 case 0xaa ... 0xab: /* stos */
2826 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2828 case 0xac ... 0xad: /* lods */
2830 case 0xae ... 0xaf: /* scas */
2831 DPRINTF("Urk! I don't handle SCAS.\n");
2832 goto cannot_emulate
;
2833 case 0xb0 ... 0xbf: /* mov r, imm */
2838 case 0xc3: /* ret */
2839 c
->dst
.type
= OP_REG
;
2840 c
->dst
.ptr
= &c
->eip
;
2841 c
->dst
.bytes
= c
->op_bytes
;
2842 goto pop_instruction
;
2843 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2845 c
->dst
.val
= c
->src
.val
;
2847 case 0xcb: /* ret far */
2848 rc
= emulate_ret_far(ctxt
, ops
);
2849 if (rc
!= X86EMUL_CONTINUE
)
2852 case 0xd0 ... 0xd1: /* Grp2 */
2856 case 0xd2 ... 0xd3: /* Grp2 */
2857 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2860 case 0xe4: /* inb */
2863 case 0xe6: /* outb */
2864 case 0xe7: /* out */
2866 case 0xe8: /* call (near) */ {
2867 long int rel
= c
->src
.val
;
2868 c
->src
.val
= (unsigned long) c
->eip
;
2870 emulate_push(ctxt
, ops
);
2873 case 0xe9: /* jmp rel */
2875 case 0xea: { /* jmp far */
2878 memcpy(&sel
, c
->src
.valptr
+ c
->op_bytes
, 2);
2880 if (load_segment_descriptor(ctxt
, ops
, sel
, VCPU_SREG_CS
))
2884 memcpy(&c
->eip
, c
->src
.valptr
, c
->op_bytes
);
2888 jmp
: /* jmp rel short */
2889 jmp_rel(c
, c
->src
.val
);
2890 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2892 case 0xec: /* in al,dx */
2893 case 0xed: /* in (e/r)ax,dx */
2894 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2896 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2897 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2898 kvm_inject_gp(ctxt
->vcpu
, 0);
2901 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
, c
->src
.val
,
2903 goto done
; /* IO is needed */
2905 case 0xee: /* out al,dx */
2906 case 0xef: /* out (e/r)ax,dx */
2907 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2909 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2910 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2911 kvm_inject_gp(ctxt
->vcpu
, 0);
2914 ops
->pio_out_emulated(c
->dst
.bytes
, c
->src
.val
, &c
->dst
.val
, 1,
2916 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2918 case 0xf4: /* hlt */
2919 ctxt
->vcpu
->arch
.halt_request
= 1;
2921 case 0xf5: /* cmc */
2922 /* complement carry flag from eflags reg */
2923 ctxt
->eflags
^= EFLG_CF
;
2924 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2926 case 0xf6 ... 0xf7: /* Grp3 */
2927 if (!emulate_grp3(ctxt
, ops
))
2928 goto cannot_emulate
;
2930 case 0xf8: /* clc */
2931 ctxt
->eflags
&= ~EFLG_CF
;
2932 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2934 case 0xfa: /* cli */
2935 if (emulator_bad_iopl(ctxt
, ops
))
2936 kvm_inject_gp(ctxt
->vcpu
, 0);
2938 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2939 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2942 case 0xfb: /* sti */
2943 if (emulator_bad_iopl(ctxt
, ops
))
2944 kvm_inject_gp(ctxt
->vcpu
, 0);
2946 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
2947 ctxt
->eflags
|= X86_EFLAGS_IF
;
2948 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2951 case 0xfc: /* cld */
2952 ctxt
->eflags
&= ~EFLG_DF
;
2953 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2955 case 0xfd: /* std */
2956 ctxt
->eflags
|= EFLG_DF
;
2957 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2959 case 0xfe: /* Grp4 */
2961 rc
= emulate_grp45(ctxt
, ops
);
2962 if (rc
!= X86EMUL_CONTINUE
)
2965 case 0xff: /* Grp5 */
2966 if (c
->modrm_reg
== 5)
2972 rc
= writeback(ctxt
, ops
);
2973 if (rc
!= X86EMUL_CONTINUE
)
2977 * restore dst type in case the decoding will be reused
2978 * (happens for string instruction )
2980 c
->dst
.type
= saved_dst_type
;
2982 if ((c
->d
& SrcMask
) == SrcSI
)
2983 string_addr_inc(ctxt
, seg_override_base(ctxt
, ops
, c
),
2984 VCPU_REGS_RSI
, &c
->src
);
2986 if ((c
->d
& DstMask
) == DstDI
)
2987 string_addr_inc(ctxt
, es_base(ctxt
, ops
), VCPU_REGS_RDI
,
2990 if (c
->rep_prefix
&& (c
->d
& String
)) {
2991 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
2992 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
2994 * Re-enter guest when pio read ahead buffer is empty or,
2995 * if it is not used, after each 1024 iteration.
2997 if ((rc
->end
== 0 && !(c
->regs
[VCPU_REGS_RCX
] & 0x3ff)) ||
2998 (rc
->end
!= 0 && rc
->end
== rc
->pos
))
2999 ctxt
->restart
= false;
3002 * reset read cache here in case string instruction is restared
3005 ctxt
->decode
.mem_read
.end
= 0;
3009 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
3013 case 0x01: /* lgdt, lidt, lmsw */
3014 switch (c
->modrm_reg
) {
3016 unsigned long address
;
3018 case 0: /* vmcall */
3019 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
3020 goto cannot_emulate
;
3022 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3023 if (rc
!= X86EMUL_CONTINUE
)
3026 /* Let the processor re-execute the fixed hypercall */
3028 /* Disable writeback. */
3029 c
->dst
.type
= OP_NONE
;
3032 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3033 &size
, &address
, c
->op_bytes
);
3034 if (rc
!= X86EMUL_CONTINUE
)
3036 realmode_lgdt(ctxt
->vcpu
, size
, address
);
3037 /* Disable writeback. */
3038 c
->dst
.type
= OP_NONE
;
3040 case 3: /* lidt/vmmcall */
3041 if (c
->modrm_mod
== 3) {
3042 switch (c
->modrm_rm
) {
3044 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3045 if (rc
!= X86EMUL_CONTINUE
)
3049 goto cannot_emulate
;
3052 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3055 if (rc
!= X86EMUL_CONTINUE
)
3057 realmode_lidt(ctxt
->vcpu
, size
, address
);
3059 /* Disable writeback. */
3060 c
->dst
.type
= OP_NONE
;
3064 c
->dst
.val
= ops
->get_cr(0, ctxt
->vcpu
);
3067 ops
->set_cr(0, (ops
->get_cr(0, ctxt
->vcpu
) & ~0x0ful
) |
3068 (c
->src
.val
& 0x0f), ctxt
->vcpu
);
3069 c
->dst
.type
= OP_NONE
;
3071 case 5: /* not defined */
3072 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3075 emulate_invlpg(ctxt
->vcpu
, c
->modrm_ea
);
3076 /* Disable writeback. */
3077 c
->dst
.type
= OP_NONE
;
3080 goto cannot_emulate
;
3083 case 0x05: /* syscall */
3084 rc
= emulate_syscall(ctxt
, ops
);
3085 if (rc
!= X86EMUL_CONTINUE
)
3091 emulate_clts(ctxt
->vcpu
);
3092 c
->dst
.type
= OP_NONE
;
3094 case 0x08: /* invd */
3095 case 0x09: /* wbinvd */
3096 case 0x0d: /* GrpP (prefetch) */
3097 case 0x18: /* Grp16 (prefetch/nop) */
3098 c
->dst
.type
= OP_NONE
;
3100 case 0x20: /* mov cr, reg */
3101 switch (c
->modrm_reg
) {
3105 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3108 c
->regs
[c
->modrm_rm
] = ops
->get_cr(c
->modrm_reg
, ctxt
->vcpu
);
3109 c
->dst
.type
= OP_NONE
; /* no writeback */
3111 case 0x21: /* mov from dr to reg */
3112 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3113 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3114 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3117 ops
->get_dr(c
->modrm_reg
, &c
->regs
[c
->modrm_rm
], ctxt
->vcpu
);
3118 c
->dst
.type
= OP_NONE
; /* no writeback */
3120 case 0x22: /* mov reg, cr */
3121 if (ops
->set_cr(c
->modrm_reg
, c
->modrm_val
, ctxt
->vcpu
)) {
3122 kvm_inject_gp(ctxt
->vcpu
, 0);
3125 c
->dst
.type
= OP_NONE
;
3127 case 0x23: /* mov from reg to dr */
3128 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3129 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3130 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
3134 if (ops
->set_dr(c
->modrm_reg
, c
->regs
[c
->modrm_rm
] &
3135 ((ctxt
->mode
== X86EMUL_MODE_PROT64
) ?
3136 ~0ULL : ~0U), ctxt
->vcpu
) < 0) {
3137 /* #UD condition is already handled by the code above */
3138 kvm_inject_gp(ctxt
->vcpu
, 0);
3142 c
->dst
.type
= OP_NONE
; /* no writeback */
3146 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
3147 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
3148 if (ops
->set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
)) {
3149 kvm_inject_gp(ctxt
->vcpu
, 0);
3152 rc
= X86EMUL_CONTINUE
;
3153 c
->dst
.type
= OP_NONE
;
3157 if (ops
->get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
)) {
3158 kvm_inject_gp(ctxt
->vcpu
, 0);
3161 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
3162 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
3164 rc
= X86EMUL_CONTINUE
;
3165 c
->dst
.type
= OP_NONE
;
3167 case 0x34: /* sysenter */
3168 rc
= emulate_sysenter(ctxt
, ops
);
3169 if (rc
!= X86EMUL_CONTINUE
)
3174 case 0x35: /* sysexit */
3175 rc
= emulate_sysexit(ctxt
, ops
);
3176 if (rc
!= X86EMUL_CONTINUE
)
3181 case 0x40 ... 0x4f: /* cmov */
3182 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
3183 if (!test_cc(c
->b
, ctxt
->eflags
))
3184 c
->dst
.type
= OP_NONE
; /* no writeback */
3186 case 0x80 ... 0x8f: /* jnz rel, etc*/
3187 if (test_cc(c
->b
, ctxt
->eflags
))
3188 jmp_rel(c
, c
->src
.val
);
3189 c
->dst
.type
= OP_NONE
;
3191 case 0xa0: /* push fs */
3192 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3194 case 0xa1: /* pop fs */
3195 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3196 if (rc
!= X86EMUL_CONTINUE
)
3201 c
->dst
.type
= OP_NONE
;
3202 /* only subword offset */
3203 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3204 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
3206 case 0xa4: /* shld imm8, r, r/m */
3207 case 0xa5: /* shld cl, r, r/m */
3208 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3210 case 0xa8: /* push gs */
3211 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3213 case 0xa9: /* pop gs */
3214 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3215 if (rc
!= X86EMUL_CONTINUE
)
3220 /* only subword offset */
3221 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3222 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
3224 case 0xac: /* shrd imm8, r, r/m */
3225 case 0xad: /* shrd cl, r, r/m */
3226 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3228 case 0xae: /* clflush */
3230 case 0xb0 ... 0xb1: /* cmpxchg */
3232 * Save real source value, then compare EAX against
3235 c
->src
.orig_val
= c
->src
.val
;
3236 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
3237 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
3238 if (ctxt
->eflags
& EFLG_ZF
) {
3239 /* Success: write back to memory. */
3240 c
->dst
.val
= c
->src
.orig_val
;
3242 /* Failure: write the value we saw to EAX. */
3243 c
->dst
.type
= OP_REG
;
3244 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
3249 /* only subword offset */
3250 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3251 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
3253 case 0xb6 ... 0xb7: /* movzx */
3254 c
->dst
.bytes
= c
->op_bytes
;
3255 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
3258 case 0xba: /* Grp8 */
3259 switch (c
->modrm_reg
& 3) {
3272 /* only subword offset */
3273 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3274 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
3276 case 0xbe ... 0xbf: /* movsx */
3277 c
->dst
.bytes
= c
->op_bytes
;
3278 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
3281 case 0xc3: /* movnti */
3282 c
->dst
.bytes
= c
->op_bytes
;
3283 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
3286 case 0xc7: /* Grp9 (cmpxchg8b) */
3287 rc
= emulate_grp9(ctxt
, ops
);
3288 if (rc
!= X86EMUL_CONTINUE
)
3295 DPRINTF("Cannot emulate %02x\n", c
->b
);