KVM: x86 emulator: introduce struct x86_exception to communicate faults
[deliverable/linux.git] / arch / x86 / kvm / emulate.c
1 /******************************************************************************
2 * emulate.c
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
10 *
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27
28 #include "x86.h"
29 #include "tss.h"
30
31 /*
32 * Opcode effective-address decode tables.
33 * Note that we only emulate instructions that have at least one memory
34 * operand (excluding implicit stack references). We assume that stack
35 * references and instruction fetches will never occur in special memory
36 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
37 * not be handled.
38 */
39
40 /* Operand sizes: 8-bit operands or specified/overridden size. */
41 #define ByteOp (1<<0) /* 8-bit operands. */
42 /* Destination operand type. */
43 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
44 #define DstReg (2<<1) /* Register operand. */
45 #define DstMem (3<<1) /* Memory operand. */
46 #define DstAcc (4<<1) /* Destination Accumulator */
47 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
48 #define DstMem64 (6<<1) /* 64bit memory operand */
49 #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50 #define DstMask (7<<1)
51 /* Source operand type. */
52 #define SrcNone (0<<4) /* No source operand. */
53 #define SrcReg (1<<4) /* Register operand. */
54 #define SrcMem (2<<4) /* Memory operand. */
55 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
56 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
57 #define SrcImm (5<<4) /* Immediate operand. */
58 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
59 #define SrcOne (7<<4) /* Implied '1' */
60 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
61 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
62 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
63 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
64 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
65 #define SrcAcc (0xd<<4) /* Source Accumulator */
66 #define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
67 #define SrcMask (0xf<<4)
68 /* Generic ModRM decode. */
69 #define ModRM (1<<8)
70 /* Destination is only written; never read. */
71 #define Mov (1<<9)
72 #define BitOp (1<<10)
73 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
74 #define String (1<<12) /* String instruction (rep capable) */
75 #define Stack (1<<13) /* Stack instruction (push/pop) */
76 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
77 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
78 /* Misc flags */
79 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
80 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
81 #define Undefined (1<<25) /* No Such Instruction */
82 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
83 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
84 #define No64 (1<<28)
85 /* Source 2 operand type */
86 #define Src2None (0<<29)
87 #define Src2CL (1<<29)
88 #define Src2ImmByte (2<<29)
89 #define Src2One (3<<29)
90 #define Src2Imm (4<<29)
91 #define Src2Mask (7<<29)
92
93 #define X2(x...) x, x
94 #define X3(x...) X2(x), x
95 #define X4(x...) X2(x), X2(x)
96 #define X5(x...) X4(x), x
97 #define X6(x...) X4(x), X2(x)
98 #define X7(x...) X4(x), X3(x)
99 #define X8(x...) X4(x), X4(x)
100 #define X16(x...) X8(x), X8(x)
101
102 struct opcode {
103 u32 flags;
104 union {
105 int (*execute)(struct x86_emulate_ctxt *ctxt);
106 struct opcode *group;
107 struct group_dual *gdual;
108 } u;
109 };
110
111 struct group_dual {
112 struct opcode mod012[8];
113 struct opcode mod3[8];
114 };
115
116 /* EFLAGS bit definitions. */
117 #define EFLG_ID (1<<21)
118 #define EFLG_VIP (1<<20)
119 #define EFLG_VIF (1<<19)
120 #define EFLG_AC (1<<18)
121 #define EFLG_VM (1<<17)
122 #define EFLG_RF (1<<16)
123 #define EFLG_IOPL (3<<12)
124 #define EFLG_NT (1<<14)
125 #define EFLG_OF (1<<11)
126 #define EFLG_DF (1<<10)
127 #define EFLG_IF (1<<9)
128 #define EFLG_TF (1<<8)
129 #define EFLG_SF (1<<7)
130 #define EFLG_ZF (1<<6)
131 #define EFLG_AF (1<<4)
132 #define EFLG_PF (1<<2)
133 #define EFLG_CF (1<<0)
134
135 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
136 #define EFLG_RESERVED_ONE_MASK 2
137
138 /*
139 * Instruction emulation:
140 * Most instructions are emulated directly via a fragment of inline assembly
141 * code. This allows us to save/restore EFLAGS and thus very easily pick up
142 * any modified flags.
143 */
144
145 #if defined(CONFIG_X86_64)
146 #define _LO32 "k" /* force 32-bit operand */
147 #define _STK "%%rsp" /* stack pointer */
148 #elif defined(__i386__)
149 #define _LO32 "" /* force 32-bit operand */
150 #define _STK "%%esp" /* stack pointer */
151 #endif
152
153 /*
154 * These EFLAGS bits are restored from saved value during emulation, and
155 * any changes are written back to the saved value after emulation.
156 */
157 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
158
159 /* Before executing instruction: restore necessary bits in EFLAGS. */
160 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
161 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
162 "movl %"_sav",%"_LO32 _tmp"; " \
163 "push %"_tmp"; " \
164 "push %"_tmp"; " \
165 "movl %"_msk",%"_LO32 _tmp"; " \
166 "andl %"_LO32 _tmp",("_STK"); " \
167 "pushf; " \
168 "notl %"_LO32 _tmp"; " \
169 "andl %"_LO32 _tmp",("_STK"); " \
170 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
171 "pop %"_tmp"; " \
172 "orl %"_LO32 _tmp",("_STK"); " \
173 "popf; " \
174 "pop %"_sav"; "
175
176 /* After executing instruction: write-back necessary bits in EFLAGS. */
177 #define _POST_EFLAGS(_sav, _msk, _tmp) \
178 /* _sav |= EFLAGS & _msk; */ \
179 "pushf; " \
180 "pop %"_tmp"; " \
181 "andl %"_msk",%"_LO32 _tmp"; " \
182 "orl %"_LO32 _tmp",%"_sav"; "
183
184 #ifdef CONFIG_X86_64
185 #define ON64(x) x
186 #else
187 #define ON64(x)
188 #endif
189
190 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
191 do { \
192 __asm__ __volatile__ ( \
193 _PRE_EFLAGS("0", "4", "2") \
194 _op _suffix " %"_x"3,%1; " \
195 _POST_EFLAGS("0", "4", "2") \
196 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
197 "=&r" (_tmp) \
198 : _y ((_src).val), "i" (EFLAGS_MASK)); \
199 } while (0)
200
201
202 /* Raw emulation: instruction has two explicit operands. */
203 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
204 do { \
205 unsigned long _tmp; \
206 \
207 switch ((_dst).bytes) { \
208 case 2: \
209 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
210 break; \
211 case 4: \
212 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
213 break; \
214 case 8: \
215 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
216 break; \
217 } \
218 } while (0)
219
220 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
221 do { \
222 unsigned long _tmp; \
223 switch ((_dst).bytes) { \
224 case 1: \
225 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
226 break; \
227 default: \
228 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
229 _wx, _wy, _lx, _ly, _qx, _qy); \
230 break; \
231 } \
232 } while (0)
233
234 /* Source operand is byte-sized and may be restricted to just %cl. */
235 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
236 __emulate_2op(_op, _src, _dst, _eflags, \
237 "b", "c", "b", "c", "b", "c", "b", "c")
238
239 /* Source operand is byte, word, long or quad sized. */
240 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
241 __emulate_2op(_op, _src, _dst, _eflags, \
242 "b", "q", "w", "r", _LO32, "r", "", "r")
243
244 /* Source operand is word, long or quad sized. */
245 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
246 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
247 "w", "r", _LO32, "r", "", "r")
248
249 /* Instruction has three operands and one operand is stored in ECX register */
250 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
251 do { \
252 unsigned long _tmp; \
253 _type _clv = (_cl).val; \
254 _type _srcv = (_src).val; \
255 _type _dstv = (_dst).val; \
256 \
257 __asm__ __volatile__ ( \
258 _PRE_EFLAGS("0", "5", "2") \
259 _op _suffix " %4,%1 \n" \
260 _POST_EFLAGS("0", "5", "2") \
261 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
262 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
263 ); \
264 \
265 (_cl).val = (unsigned long) _clv; \
266 (_src).val = (unsigned long) _srcv; \
267 (_dst).val = (unsigned long) _dstv; \
268 } while (0)
269
270 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
271 do { \
272 switch ((_dst).bytes) { \
273 case 2: \
274 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
275 "w", unsigned short); \
276 break; \
277 case 4: \
278 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
279 "l", unsigned int); \
280 break; \
281 case 8: \
282 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
283 "q", unsigned long)); \
284 break; \
285 } \
286 } while (0)
287
288 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
289 do { \
290 unsigned long _tmp; \
291 \
292 __asm__ __volatile__ ( \
293 _PRE_EFLAGS("0", "3", "2") \
294 _op _suffix " %1; " \
295 _POST_EFLAGS("0", "3", "2") \
296 : "=m" (_eflags), "+m" ((_dst).val), \
297 "=&r" (_tmp) \
298 : "i" (EFLAGS_MASK)); \
299 } while (0)
300
301 /* Instruction has only one explicit operand (no source operand). */
302 #define emulate_1op(_op, _dst, _eflags) \
303 do { \
304 switch ((_dst).bytes) { \
305 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
306 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
307 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
308 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
309 } \
310 } while (0)
311
312 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
313 do { \
314 unsigned long _tmp; \
315 \
316 __asm__ __volatile__ ( \
317 _PRE_EFLAGS("0", "4", "1") \
318 _op _suffix " %5; " \
319 _POST_EFLAGS("0", "4", "1") \
320 : "=m" (_eflags), "=&r" (_tmp), \
321 "+a" (_rax), "+d" (_rdx) \
322 : "i" (EFLAGS_MASK), "m" ((_src).val), \
323 "a" (_rax), "d" (_rdx)); \
324 } while (0)
325
326 #define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
327 do { \
328 unsigned long _tmp; \
329 \
330 __asm__ __volatile__ ( \
331 _PRE_EFLAGS("0", "5", "1") \
332 "1: \n\t" \
333 _op _suffix " %6; " \
334 "2: \n\t" \
335 _POST_EFLAGS("0", "5", "1") \
336 ".pushsection .fixup,\"ax\" \n\t" \
337 "3: movb $1, %4 \n\t" \
338 "jmp 2b \n\t" \
339 ".popsection \n\t" \
340 _ASM_EXTABLE(1b, 3b) \
341 : "=m" (_eflags), "=&r" (_tmp), \
342 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
343 : "i" (EFLAGS_MASK), "m" ((_src).val), \
344 "a" (_rax), "d" (_rdx)); \
345 } while (0)
346
347 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
348 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
349 do { \
350 switch((_src).bytes) { \
351 case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
352 case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "w"); break; \
353 case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
354 case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
355 } \
356 } while (0)
357
358 #define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
359 do { \
360 switch((_src).bytes) { \
361 case 1: \
362 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
363 _eflags, "b", _ex); \
364 break; \
365 case 2: \
366 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
367 _eflags, "w", _ex); \
368 break; \
369 case 4: \
370 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
371 _eflags, "l", _ex); \
372 break; \
373 case 8: ON64( \
374 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
375 _eflags, "q", _ex)); \
376 break; \
377 } \
378 } while (0)
379
380 /* Fetch next part of the instruction being emulated. */
381 #define insn_fetch(_type, _size, _eip) \
382 ({ unsigned long _x; \
383 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
384 if (rc != X86EMUL_CONTINUE) \
385 goto done; \
386 (_eip) += (_size); \
387 (_type)_x; \
388 })
389
390 #define insn_fetch_arr(_arr, _size, _eip) \
391 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
392 if (rc != X86EMUL_CONTINUE) \
393 goto done; \
394 (_eip) += (_size); \
395 })
396
397 static inline unsigned long ad_mask(struct decode_cache *c)
398 {
399 return (1UL << (c->ad_bytes << 3)) - 1;
400 }
401
402 /* Access/update address held in a register, based on addressing mode. */
403 static inline unsigned long
404 address_mask(struct decode_cache *c, unsigned long reg)
405 {
406 if (c->ad_bytes == sizeof(unsigned long))
407 return reg;
408 else
409 return reg & ad_mask(c);
410 }
411
412 static inline unsigned long
413 register_address(struct decode_cache *c, unsigned long reg)
414 {
415 return address_mask(c, reg);
416 }
417
418 static inline void
419 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
420 {
421 if (c->ad_bytes == sizeof(unsigned long))
422 *reg += inc;
423 else
424 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
425 }
426
427 static inline void jmp_rel(struct decode_cache *c, int rel)
428 {
429 register_address_increment(c, &c->eip, rel);
430 }
431
432 static void set_seg_override(struct decode_cache *c, int seg)
433 {
434 c->has_seg_override = true;
435 c->seg_override = seg;
436 }
437
438 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
439 struct x86_emulate_ops *ops, int seg)
440 {
441 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
442 return 0;
443
444 return ops->get_cached_segment_base(seg, ctxt->vcpu);
445 }
446
447 static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
448 struct x86_emulate_ops *ops,
449 struct decode_cache *c)
450 {
451 if (!c->has_seg_override)
452 return 0;
453
454 return c->seg_override;
455 }
456
457 static ulong linear(struct x86_emulate_ctxt *ctxt,
458 struct segmented_address addr)
459 {
460 struct decode_cache *c = &ctxt->decode;
461 ulong la;
462
463 la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
464 if (c->ad_bytes != 8)
465 la &= (u32)-1;
466 return la;
467 }
468
469 static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
470 u32 error, bool valid)
471 {
472 ctxt->exception.vector = vec;
473 ctxt->exception.error_code = error;
474 ctxt->exception.error_code_valid = valid;
475 }
476
477 static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
478 {
479 emulate_exception(ctxt, GP_VECTOR, err, true);
480 }
481
482 static void emulate_pf(struct x86_emulate_ctxt *ctxt)
483 {
484 emulate_exception(ctxt, PF_VECTOR, 0, true);
485 }
486
487 static void emulate_ud(struct x86_emulate_ctxt *ctxt)
488 {
489 emulate_exception(ctxt, UD_VECTOR, 0, false);
490 }
491
492 static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
493 {
494 emulate_exception(ctxt, TS_VECTOR, err, true);
495 }
496
497 static int emulate_de(struct x86_emulate_ctxt *ctxt)
498 {
499 emulate_exception(ctxt, DE_VECTOR, 0, false);
500 return X86EMUL_PROPAGATE_FAULT;
501 }
502
503 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
504 struct x86_emulate_ops *ops,
505 unsigned long eip, u8 *dest)
506 {
507 struct fetch_cache *fc = &ctxt->decode.fetch;
508 int rc;
509 int size, cur_size;
510
511 if (eip == fc->end) {
512 cur_size = fc->end - fc->start;
513 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
514 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
515 size, ctxt->vcpu, NULL);
516 if (rc != X86EMUL_CONTINUE)
517 return rc;
518 fc->end += size;
519 }
520 *dest = fc->data[eip - fc->start];
521 return X86EMUL_CONTINUE;
522 }
523
524 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
525 struct x86_emulate_ops *ops,
526 unsigned long eip, void *dest, unsigned size)
527 {
528 int rc;
529
530 /* x86 instructions are limited to 15 bytes. */
531 if (eip + size - ctxt->eip > 15)
532 return X86EMUL_UNHANDLEABLE;
533 while (size--) {
534 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
535 if (rc != X86EMUL_CONTINUE)
536 return rc;
537 }
538 return X86EMUL_CONTINUE;
539 }
540
541 /*
542 * Given the 'reg' portion of a ModRM byte, and a register block, return a
543 * pointer into the block that addresses the relevant register.
544 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
545 */
546 static void *decode_register(u8 modrm_reg, unsigned long *regs,
547 int highbyte_regs)
548 {
549 void *p;
550
551 p = &regs[modrm_reg];
552 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
553 p = (unsigned char *)&regs[modrm_reg & 3] + 1;
554 return p;
555 }
556
557 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
558 struct x86_emulate_ops *ops,
559 struct segmented_address addr,
560 u16 *size, unsigned long *address, int op_bytes)
561 {
562 int rc;
563
564 if (op_bytes == 2)
565 op_bytes = 3;
566 *address = 0;
567 rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
568 ctxt->vcpu, NULL);
569 if (rc != X86EMUL_CONTINUE)
570 return rc;
571 addr.ea += 2;
572 rc = ops->read_std(linear(ctxt, addr), address, op_bytes,
573 ctxt->vcpu, NULL);
574 return rc;
575 }
576
577 static int test_cc(unsigned int condition, unsigned int flags)
578 {
579 int rc = 0;
580
581 switch ((condition & 15) >> 1) {
582 case 0: /* o */
583 rc |= (flags & EFLG_OF);
584 break;
585 case 1: /* b/c/nae */
586 rc |= (flags & EFLG_CF);
587 break;
588 case 2: /* z/e */
589 rc |= (flags & EFLG_ZF);
590 break;
591 case 3: /* be/na */
592 rc |= (flags & (EFLG_CF|EFLG_ZF));
593 break;
594 case 4: /* s */
595 rc |= (flags & EFLG_SF);
596 break;
597 case 5: /* p/pe */
598 rc |= (flags & EFLG_PF);
599 break;
600 case 7: /* le/ng */
601 rc |= (flags & EFLG_ZF);
602 /* fall through */
603 case 6: /* l/nge */
604 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
605 break;
606 }
607
608 /* Odd condition identifiers (lsb == 1) have inverted sense. */
609 return (!!rc ^ (condition & 1));
610 }
611
612 static void fetch_register_operand(struct operand *op)
613 {
614 switch (op->bytes) {
615 case 1:
616 op->val = *(u8 *)op->addr.reg;
617 break;
618 case 2:
619 op->val = *(u16 *)op->addr.reg;
620 break;
621 case 4:
622 op->val = *(u32 *)op->addr.reg;
623 break;
624 case 8:
625 op->val = *(u64 *)op->addr.reg;
626 break;
627 }
628 }
629
630 static void decode_register_operand(struct operand *op,
631 struct decode_cache *c,
632 int inhibit_bytereg)
633 {
634 unsigned reg = c->modrm_reg;
635 int highbyte_regs = c->rex_prefix == 0;
636
637 if (!(c->d & ModRM))
638 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
639 op->type = OP_REG;
640 if ((c->d & ByteOp) && !inhibit_bytereg) {
641 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
642 op->bytes = 1;
643 } else {
644 op->addr.reg = decode_register(reg, c->regs, 0);
645 op->bytes = c->op_bytes;
646 }
647 fetch_register_operand(op);
648 op->orig_val = op->val;
649 }
650
651 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
652 struct x86_emulate_ops *ops,
653 struct operand *op)
654 {
655 struct decode_cache *c = &ctxt->decode;
656 u8 sib;
657 int index_reg = 0, base_reg = 0, scale;
658 int rc = X86EMUL_CONTINUE;
659 ulong modrm_ea = 0;
660
661 if (c->rex_prefix) {
662 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
663 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
664 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
665 }
666
667 c->modrm = insn_fetch(u8, 1, c->eip);
668 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
669 c->modrm_reg |= (c->modrm & 0x38) >> 3;
670 c->modrm_rm |= (c->modrm & 0x07);
671 c->modrm_seg = VCPU_SREG_DS;
672
673 if (c->modrm_mod == 3) {
674 op->type = OP_REG;
675 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
676 op->addr.reg = decode_register(c->modrm_rm,
677 c->regs, c->d & ByteOp);
678 fetch_register_operand(op);
679 return rc;
680 }
681
682 op->type = OP_MEM;
683
684 if (c->ad_bytes == 2) {
685 unsigned bx = c->regs[VCPU_REGS_RBX];
686 unsigned bp = c->regs[VCPU_REGS_RBP];
687 unsigned si = c->regs[VCPU_REGS_RSI];
688 unsigned di = c->regs[VCPU_REGS_RDI];
689
690 /* 16-bit ModR/M decode. */
691 switch (c->modrm_mod) {
692 case 0:
693 if (c->modrm_rm == 6)
694 modrm_ea += insn_fetch(u16, 2, c->eip);
695 break;
696 case 1:
697 modrm_ea += insn_fetch(s8, 1, c->eip);
698 break;
699 case 2:
700 modrm_ea += insn_fetch(u16, 2, c->eip);
701 break;
702 }
703 switch (c->modrm_rm) {
704 case 0:
705 modrm_ea += bx + si;
706 break;
707 case 1:
708 modrm_ea += bx + di;
709 break;
710 case 2:
711 modrm_ea += bp + si;
712 break;
713 case 3:
714 modrm_ea += bp + di;
715 break;
716 case 4:
717 modrm_ea += si;
718 break;
719 case 5:
720 modrm_ea += di;
721 break;
722 case 6:
723 if (c->modrm_mod != 0)
724 modrm_ea += bp;
725 break;
726 case 7:
727 modrm_ea += bx;
728 break;
729 }
730 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
731 (c->modrm_rm == 6 && c->modrm_mod != 0))
732 c->modrm_seg = VCPU_SREG_SS;
733 modrm_ea = (u16)modrm_ea;
734 } else {
735 /* 32/64-bit ModR/M decode. */
736 if ((c->modrm_rm & 7) == 4) {
737 sib = insn_fetch(u8, 1, c->eip);
738 index_reg |= (sib >> 3) & 7;
739 base_reg |= sib & 7;
740 scale = sib >> 6;
741
742 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
743 modrm_ea += insn_fetch(s32, 4, c->eip);
744 else
745 modrm_ea += c->regs[base_reg];
746 if (index_reg != 4)
747 modrm_ea += c->regs[index_reg] << scale;
748 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
749 if (ctxt->mode == X86EMUL_MODE_PROT64)
750 c->rip_relative = 1;
751 } else
752 modrm_ea += c->regs[c->modrm_rm];
753 switch (c->modrm_mod) {
754 case 0:
755 if (c->modrm_rm == 5)
756 modrm_ea += insn_fetch(s32, 4, c->eip);
757 break;
758 case 1:
759 modrm_ea += insn_fetch(s8, 1, c->eip);
760 break;
761 case 2:
762 modrm_ea += insn_fetch(s32, 4, c->eip);
763 break;
764 }
765 }
766 op->addr.mem.ea = modrm_ea;
767 done:
768 return rc;
769 }
770
771 static int decode_abs(struct x86_emulate_ctxt *ctxt,
772 struct x86_emulate_ops *ops,
773 struct operand *op)
774 {
775 struct decode_cache *c = &ctxt->decode;
776 int rc = X86EMUL_CONTINUE;
777
778 op->type = OP_MEM;
779 switch (c->ad_bytes) {
780 case 2:
781 op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
782 break;
783 case 4:
784 op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
785 break;
786 case 8:
787 op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
788 break;
789 }
790 done:
791 return rc;
792 }
793
794 static void fetch_bit_operand(struct decode_cache *c)
795 {
796 long sv = 0, mask;
797
798 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
799 mask = ~(c->dst.bytes * 8 - 1);
800
801 if (c->src.bytes == 2)
802 sv = (s16)c->src.val & (s16)mask;
803 else if (c->src.bytes == 4)
804 sv = (s32)c->src.val & (s32)mask;
805
806 c->dst.addr.mem.ea += (sv >> 3);
807 }
808
809 /* only subword offset */
810 c->src.val &= (c->dst.bytes << 3) - 1;
811 }
812
813 static int read_emulated(struct x86_emulate_ctxt *ctxt,
814 struct x86_emulate_ops *ops,
815 unsigned long addr, void *dest, unsigned size)
816 {
817 int rc;
818 struct read_cache *mc = &ctxt->decode.mem_read;
819 u32 err;
820
821 while (size) {
822 int n = min(size, 8u);
823 size -= n;
824 if (mc->pos < mc->end)
825 goto read_cached;
826
827 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
828 ctxt->vcpu);
829 if (rc == X86EMUL_PROPAGATE_FAULT)
830 emulate_pf(ctxt);
831 if (rc != X86EMUL_CONTINUE)
832 return rc;
833 mc->end += n;
834
835 read_cached:
836 memcpy(dest, mc->data + mc->pos, n);
837 mc->pos += n;
838 dest += n;
839 addr += n;
840 }
841 return X86EMUL_CONTINUE;
842 }
843
844 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
845 struct x86_emulate_ops *ops,
846 unsigned int size, unsigned short port,
847 void *dest)
848 {
849 struct read_cache *rc = &ctxt->decode.io_read;
850
851 if (rc->pos == rc->end) { /* refill pio read ahead */
852 struct decode_cache *c = &ctxt->decode;
853 unsigned int in_page, n;
854 unsigned int count = c->rep_prefix ?
855 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
856 in_page = (ctxt->eflags & EFLG_DF) ?
857 offset_in_page(c->regs[VCPU_REGS_RDI]) :
858 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
859 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
860 count);
861 if (n == 0)
862 n = 1;
863 rc->pos = rc->end = 0;
864 if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
865 return 0;
866 rc->end = n * size;
867 }
868
869 memcpy(dest, rc->data + rc->pos, size);
870 rc->pos += size;
871 return 1;
872 }
873
874 static u32 desc_limit_scaled(struct desc_struct *desc)
875 {
876 u32 limit = get_desc_limit(desc);
877
878 return desc->g ? (limit << 12) | 0xfff : limit;
879 }
880
881 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
882 struct x86_emulate_ops *ops,
883 u16 selector, struct desc_ptr *dt)
884 {
885 if (selector & 1 << 2) {
886 struct desc_struct desc;
887 memset (dt, 0, sizeof *dt);
888 if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
889 return;
890
891 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
892 dt->address = get_desc_base(&desc);
893 } else
894 ops->get_gdt(dt, ctxt->vcpu);
895 }
896
897 /* allowed just for 8 bytes segments */
898 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
899 struct x86_emulate_ops *ops,
900 u16 selector, struct desc_struct *desc)
901 {
902 struct desc_ptr dt;
903 u16 index = selector >> 3;
904 int ret;
905 u32 err;
906 ulong addr;
907
908 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
909
910 if (dt.size < index * 8 + 7) {
911 emulate_gp(ctxt, selector & 0xfffc);
912 return X86EMUL_PROPAGATE_FAULT;
913 }
914 addr = dt.address + index * 8;
915 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
916 if (ret == X86EMUL_PROPAGATE_FAULT)
917 emulate_pf(ctxt);
918
919 return ret;
920 }
921
922 /* allowed just for 8 bytes segments */
923 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
924 struct x86_emulate_ops *ops,
925 u16 selector, struct desc_struct *desc)
926 {
927 struct desc_ptr dt;
928 u16 index = selector >> 3;
929 u32 err;
930 ulong addr;
931 int ret;
932
933 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
934
935 if (dt.size < index * 8 + 7) {
936 emulate_gp(ctxt, selector & 0xfffc);
937 return X86EMUL_PROPAGATE_FAULT;
938 }
939
940 addr = dt.address + index * 8;
941 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
942 if (ret == X86EMUL_PROPAGATE_FAULT)
943 emulate_pf(ctxt);
944
945 return ret;
946 }
947
948 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
949 struct x86_emulate_ops *ops,
950 u16 selector, int seg)
951 {
952 struct desc_struct seg_desc;
953 u8 dpl, rpl, cpl;
954 unsigned err_vec = GP_VECTOR;
955 u32 err_code = 0;
956 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
957 int ret;
958
959 memset(&seg_desc, 0, sizeof seg_desc);
960
961 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
962 || ctxt->mode == X86EMUL_MODE_REAL) {
963 /* set real mode segment descriptor */
964 set_desc_base(&seg_desc, selector << 4);
965 set_desc_limit(&seg_desc, 0xffff);
966 seg_desc.type = 3;
967 seg_desc.p = 1;
968 seg_desc.s = 1;
969 goto load;
970 }
971
972 /* NULL selector is not valid for TR, CS and SS */
973 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
974 && null_selector)
975 goto exception;
976
977 /* TR should be in GDT only */
978 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
979 goto exception;
980
981 if (null_selector) /* for NULL selector skip all following checks */
982 goto load;
983
984 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
985 if (ret != X86EMUL_CONTINUE)
986 return ret;
987
988 err_code = selector & 0xfffc;
989 err_vec = GP_VECTOR;
990
991 /* can't load system descriptor into segment selecor */
992 if (seg <= VCPU_SREG_GS && !seg_desc.s)
993 goto exception;
994
995 if (!seg_desc.p) {
996 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
997 goto exception;
998 }
999
1000 rpl = selector & 3;
1001 dpl = seg_desc.dpl;
1002 cpl = ops->cpl(ctxt->vcpu);
1003
1004 switch (seg) {
1005 case VCPU_SREG_SS:
1006 /*
1007 * segment is not a writable data segment or segment
1008 * selector's RPL != CPL or segment selector's RPL != CPL
1009 */
1010 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1011 goto exception;
1012 break;
1013 case VCPU_SREG_CS:
1014 if (!(seg_desc.type & 8))
1015 goto exception;
1016
1017 if (seg_desc.type & 4) {
1018 /* conforming */
1019 if (dpl > cpl)
1020 goto exception;
1021 } else {
1022 /* nonconforming */
1023 if (rpl > cpl || dpl != cpl)
1024 goto exception;
1025 }
1026 /* CS(RPL) <- CPL */
1027 selector = (selector & 0xfffc) | cpl;
1028 break;
1029 case VCPU_SREG_TR:
1030 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1031 goto exception;
1032 break;
1033 case VCPU_SREG_LDTR:
1034 if (seg_desc.s || seg_desc.type != 2)
1035 goto exception;
1036 break;
1037 default: /* DS, ES, FS, or GS */
1038 /*
1039 * segment is not a data or readable code segment or
1040 * ((segment is a data or nonconforming code segment)
1041 * and (both RPL and CPL > DPL))
1042 */
1043 if ((seg_desc.type & 0xa) == 0x8 ||
1044 (((seg_desc.type & 0xc) != 0xc) &&
1045 (rpl > dpl && cpl > dpl)))
1046 goto exception;
1047 break;
1048 }
1049
1050 if (seg_desc.s) {
1051 /* mark segment as accessed */
1052 seg_desc.type |= 1;
1053 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1054 if (ret != X86EMUL_CONTINUE)
1055 return ret;
1056 }
1057 load:
1058 ops->set_segment_selector(selector, seg, ctxt->vcpu);
1059 ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
1060 return X86EMUL_CONTINUE;
1061 exception:
1062 emulate_exception(ctxt, err_vec, err_code, true);
1063 return X86EMUL_PROPAGATE_FAULT;
1064 }
1065
1066 static void write_register_operand(struct operand *op)
1067 {
1068 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1069 switch (op->bytes) {
1070 case 1:
1071 *(u8 *)op->addr.reg = (u8)op->val;
1072 break;
1073 case 2:
1074 *(u16 *)op->addr.reg = (u16)op->val;
1075 break;
1076 case 4:
1077 *op->addr.reg = (u32)op->val;
1078 break; /* 64b: zero-extend */
1079 case 8:
1080 *op->addr.reg = op->val;
1081 break;
1082 }
1083 }
1084
1085 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1086 struct x86_emulate_ops *ops)
1087 {
1088 int rc;
1089 struct decode_cache *c = &ctxt->decode;
1090 u32 err;
1091
1092 switch (c->dst.type) {
1093 case OP_REG:
1094 write_register_operand(&c->dst);
1095 break;
1096 case OP_MEM:
1097 if (c->lock_prefix)
1098 rc = ops->cmpxchg_emulated(
1099 linear(ctxt, c->dst.addr.mem),
1100 &c->dst.orig_val,
1101 &c->dst.val,
1102 c->dst.bytes,
1103 &err,
1104 ctxt->vcpu);
1105 else
1106 rc = ops->write_emulated(
1107 linear(ctxt, c->dst.addr.mem),
1108 &c->dst.val,
1109 c->dst.bytes,
1110 &err,
1111 ctxt->vcpu);
1112 if (rc == X86EMUL_PROPAGATE_FAULT)
1113 emulate_pf(ctxt);
1114 if (rc != X86EMUL_CONTINUE)
1115 return rc;
1116 break;
1117 case OP_NONE:
1118 /* no writeback */
1119 break;
1120 default:
1121 break;
1122 }
1123 return X86EMUL_CONTINUE;
1124 }
1125
1126 static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1127 struct x86_emulate_ops *ops)
1128 {
1129 struct decode_cache *c = &ctxt->decode;
1130
1131 c->dst.type = OP_MEM;
1132 c->dst.bytes = c->op_bytes;
1133 c->dst.val = c->src.val;
1134 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1135 c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1136 c->dst.addr.mem.seg = VCPU_SREG_SS;
1137 }
1138
1139 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1140 struct x86_emulate_ops *ops,
1141 void *dest, int len)
1142 {
1143 struct decode_cache *c = &ctxt->decode;
1144 int rc;
1145 struct segmented_address addr;
1146
1147 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1148 addr.seg = VCPU_SREG_SS;
1149 rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
1150 if (rc != X86EMUL_CONTINUE)
1151 return rc;
1152
1153 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1154 return rc;
1155 }
1156
1157 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1158 struct x86_emulate_ops *ops,
1159 void *dest, int len)
1160 {
1161 int rc;
1162 unsigned long val, change_mask;
1163 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1164 int cpl = ops->cpl(ctxt->vcpu);
1165
1166 rc = emulate_pop(ctxt, ops, &val, len);
1167 if (rc != X86EMUL_CONTINUE)
1168 return rc;
1169
1170 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1171 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1172
1173 switch(ctxt->mode) {
1174 case X86EMUL_MODE_PROT64:
1175 case X86EMUL_MODE_PROT32:
1176 case X86EMUL_MODE_PROT16:
1177 if (cpl == 0)
1178 change_mask |= EFLG_IOPL;
1179 if (cpl <= iopl)
1180 change_mask |= EFLG_IF;
1181 break;
1182 case X86EMUL_MODE_VM86:
1183 if (iopl < 3) {
1184 emulate_gp(ctxt, 0);
1185 return X86EMUL_PROPAGATE_FAULT;
1186 }
1187 change_mask |= EFLG_IF;
1188 break;
1189 default: /* real mode */
1190 change_mask |= (EFLG_IOPL | EFLG_IF);
1191 break;
1192 }
1193
1194 *(unsigned long *)dest =
1195 (ctxt->eflags & ~change_mask) | (val & change_mask);
1196
1197 if (rc == X86EMUL_PROPAGATE_FAULT)
1198 emulate_pf(ctxt);
1199
1200 return rc;
1201 }
1202
1203 static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1204 struct x86_emulate_ops *ops, int seg)
1205 {
1206 struct decode_cache *c = &ctxt->decode;
1207
1208 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1209
1210 emulate_push(ctxt, ops);
1211 }
1212
1213 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1214 struct x86_emulate_ops *ops, int seg)
1215 {
1216 struct decode_cache *c = &ctxt->decode;
1217 unsigned long selector;
1218 int rc;
1219
1220 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1221 if (rc != X86EMUL_CONTINUE)
1222 return rc;
1223
1224 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1225 return rc;
1226 }
1227
1228 static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
1229 struct x86_emulate_ops *ops)
1230 {
1231 struct decode_cache *c = &ctxt->decode;
1232 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1233 int rc = X86EMUL_CONTINUE;
1234 int reg = VCPU_REGS_RAX;
1235
1236 while (reg <= VCPU_REGS_RDI) {
1237 (reg == VCPU_REGS_RSP) ?
1238 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1239
1240 emulate_push(ctxt, ops);
1241
1242 rc = writeback(ctxt, ops);
1243 if (rc != X86EMUL_CONTINUE)
1244 return rc;
1245
1246 ++reg;
1247 }
1248
1249 /* Disable writeback. */
1250 c->dst.type = OP_NONE;
1251
1252 return rc;
1253 }
1254
1255 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1256 struct x86_emulate_ops *ops)
1257 {
1258 struct decode_cache *c = &ctxt->decode;
1259 int rc = X86EMUL_CONTINUE;
1260 int reg = VCPU_REGS_RDI;
1261
1262 while (reg >= VCPU_REGS_RAX) {
1263 if (reg == VCPU_REGS_RSP) {
1264 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1265 c->op_bytes);
1266 --reg;
1267 }
1268
1269 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1270 if (rc != X86EMUL_CONTINUE)
1271 break;
1272 --reg;
1273 }
1274 return rc;
1275 }
1276
1277 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1278 struct x86_emulate_ops *ops, int irq)
1279 {
1280 struct decode_cache *c = &ctxt->decode;
1281 int rc;
1282 struct desc_ptr dt;
1283 gva_t cs_addr;
1284 gva_t eip_addr;
1285 u16 cs, eip;
1286 u32 err;
1287
1288 /* TODO: Add limit checks */
1289 c->src.val = ctxt->eflags;
1290 emulate_push(ctxt, ops);
1291 rc = writeback(ctxt, ops);
1292 if (rc != X86EMUL_CONTINUE)
1293 return rc;
1294
1295 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1296
1297 c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1298 emulate_push(ctxt, ops);
1299 rc = writeback(ctxt, ops);
1300 if (rc != X86EMUL_CONTINUE)
1301 return rc;
1302
1303 c->src.val = c->eip;
1304 emulate_push(ctxt, ops);
1305 rc = writeback(ctxt, ops);
1306 if (rc != X86EMUL_CONTINUE)
1307 return rc;
1308
1309 c->dst.type = OP_NONE;
1310
1311 ops->get_idt(&dt, ctxt->vcpu);
1312
1313 eip_addr = dt.address + (irq << 2);
1314 cs_addr = dt.address + (irq << 2) + 2;
1315
1316 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
1317 if (rc != X86EMUL_CONTINUE)
1318 return rc;
1319
1320 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
1321 if (rc != X86EMUL_CONTINUE)
1322 return rc;
1323
1324 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1325 if (rc != X86EMUL_CONTINUE)
1326 return rc;
1327
1328 c->eip = eip;
1329
1330 return rc;
1331 }
1332
1333 static int emulate_int(struct x86_emulate_ctxt *ctxt,
1334 struct x86_emulate_ops *ops, int irq)
1335 {
1336 switch(ctxt->mode) {
1337 case X86EMUL_MODE_REAL:
1338 return emulate_int_real(ctxt, ops, irq);
1339 case X86EMUL_MODE_VM86:
1340 case X86EMUL_MODE_PROT16:
1341 case X86EMUL_MODE_PROT32:
1342 case X86EMUL_MODE_PROT64:
1343 default:
1344 /* Protected mode interrupts unimplemented yet */
1345 return X86EMUL_UNHANDLEABLE;
1346 }
1347 }
1348
1349 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1350 struct x86_emulate_ops *ops)
1351 {
1352 struct decode_cache *c = &ctxt->decode;
1353 int rc = X86EMUL_CONTINUE;
1354 unsigned long temp_eip = 0;
1355 unsigned long temp_eflags = 0;
1356 unsigned long cs = 0;
1357 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1358 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1359 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1360 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1361
1362 /* TODO: Add stack limit check */
1363
1364 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1365
1366 if (rc != X86EMUL_CONTINUE)
1367 return rc;
1368
1369 if (temp_eip & ~0xffff) {
1370 emulate_gp(ctxt, 0);
1371 return X86EMUL_PROPAGATE_FAULT;
1372 }
1373
1374 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1375
1376 if (rc != X86EMUL_CONTINUE)
1377 return rc;
1378
1379 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1380
1381 if (rc != X86EMUL_CONTINUE)
1382 return rc;
1383
1384 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1385
1386 if (rc != X86EMUL_CONTINUE)
1387 return rc;
1388
1389 c->eip = temp_eip;
1390
1391
1392 if (c->op_bytes == 4)
1393 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1394 else if (c->op_bytes == 2) {
1395 ctxt->eflags &= ~0xffff;
1396 ctxt->eflags |= temp_eflags;
1397 }
1398
1399 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1400 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1401
1402 return rc;
1403 }
1404
1405 static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1406 struct x86_emulate_ops* ops)
1407 {
1408 switch(ctxt->mode) {
1409 case X86EMUL_MODE_REAL:
1410 return emulate_iret_real(ctxt, ops);
1411 case X86EMUL_MODE_VM86:
1412 case X86EMUL_MODE_PROT16:
1413 case X86EMUL_MODE_PROT32:
1414 case X86EMUL_MODE_PROT64:
1415 default:
1416 /* iret from protected mode unimplemented yet */
1417 return X86EMUL_UNHANDLEABLE;
1418 }
1419 }
1420
1421 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1422 struct x86_emulate_ops *ops)
1423 {
1424 struct decode_cache *c = &ctxt->decode;
1425
1426 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1427 }
1428
1429 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1430 {
1431 struct decode_cache *c = &ctxt->decode;
1432 switch (c->modrm_reg) {
1433 case 0: /* rol */
1434 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1435 break;
1436 case 1: /* ror */
1437 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1438 break;
1439 case 2: /* rcl */
1440 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1441 break;
1442 case 3: /* rcr */
1443 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1444 break;
1445 case 4: /* sal/shl */
1446 case 6: /* sal/shl */
1447 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1448 break;
1449 case 5: /* shr */
1450 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1451 break;
1452 case 7: /* sar */
1453 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1454 break;
1455 }
1456 }
1457
1458 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1459 struct x86_emulate_ops *ops)
1460 {
1461 struct decode_cache *c = &ctxt->decode;
1462 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1463 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1464 u8 de = 0;
1465
1466 switch (c->modrm_reg) {
1467 case 0 ... 1: /* test */
1468 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1469 break;
1470 case 2: /* not */
1471 c->dst.val = ~c->dst.val;
1472 break;
1473 case 3: /* neg */
1474 emulate_1op("neg", c->dst, ctxt->eflags);
1475 break;
1476 case 4: /* mul */
1477 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1478 break;
1479 case 5: /* imul */
1480 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1481 break;
1482 case 6: /* div */
1483 emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
1484 ctxt->eflags, de);
1485 break;
1486 case 7: /* idiv */
1487 emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
1488 ctxt->eflags, de);
1489 break;
1490 default:
1491 return X86EMUL_UNHANDLEABLE;
1492 }
1493 if (de)
1494 return emulate_de(ctxt);
1495 return X86EMUL_CONTINUE;
1496 }
1497
1498 static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1499 struct x86_emulate_ops *ops)
1500 {
1501 struct decode_cache *c = &ctxt->decode;
1502
1503 switch (c->modrm_reg) {
1504 case 0: /* inc */
1505 emulate_1op("inc", c->dst, ctxt->eflags);
1506 break;
1507 case 1: /* dec */
1508 emulate_1op("dec", c->dst, ctxt->eflags);
1509 break;
1510 case 2: /* call near abs */ {
1511 long int old_eip;
1512 old_eip = c->eip;
1513 c->eip = c->src.val;
1514 c->src.val = old_eip;
1515 emulate_push(ctxt, ops);
1516 break;
1517 }
1518 case 4: /* jmp abs */
1519 c->eip = c->src.val;
1520 break;
1521 case 6: /* push */
1522 emulate_push(ctxt, ops);
1523 break;
1524 }
1525 return X86EMUL_CONTINUE;
1526 }
1527
1528 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1529 struct x86_emulate_ops *ops)
1530 {
1531 struct decode_cache *c = &ctxt->decode;
1532 u64 old = c->dst.orig_val64;
1533
1534 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1535 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1536 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1537 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1538 ctxt->eflags &= ~EFLG_ZF;
1539 } else {
1540 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1541 (u32) c->regs[VCPU_REGS_RBX];
1542
1543 ctxt->eflags |= EFLG_ZF;
1544 }
1545 return X86EMUL_CONTINUE;
1546 }
1547
1548 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1549 struct x86_emulate_ops *ops)
1550 {
1551 struct decode_cache *c = &ctxt->decode;
1552 int rc;
1553 unsigned long cs;
1554
1555 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1556 if (rc != X86EMUL_CONTINUE)
1557 return rc;
1558 if (c->op_bytes == 4)
1559 c->eip = (u32)c->eip;
1560 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1561 if (rc != X86EMUL_CONTINUE)
1562 return rc;
1563 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1564 return rc;
1565 }
1566
1567 static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1568 struct x86_emulate_ops *ops, int seg)
1569 {
1570 struct decode_cache *c = &ctxt->decode;
1571 unsigned short sel;
1572 int rc;
1573
1574 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1575
1576 rc = load_segment_descriptor(ctxt, ops, sel, seg);
1577 if (rc != X86EMUL_CONTINUE)
1578 return rc;
1579
1580 c->dst.val = c->src.val;
1581 return rc;
1582 }
1583
1584 static inline void
1585 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1586 struct x86_emulate_ops *ops, struct desc_struct *cs,
1587 struct desc_struct *ss)
1588 {
1589 memset(cs, 0, sizeof(struct desc_struct));
1590 ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu);
1591 memset(ss, 0, sizeof(struct desc_struct));
1592
1593 cs->l = 0; /* will be adjusted later */
1594 set_desc_base(cs, 0); /* flat segment */
1595 cs->g = 1; /* 4kb granularity */
1596 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1597 cs->type = 0x0b; /* Read, Execute, Accessed */
1598 cs->s = 1;
1599 cs->dpl = 0; /* will be adjusted later */
1600 cs->p = 1;
1601 cs->d = 1;
1602
1603 set_desc_base(ss, 0); /* flat segment */
1604 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1605 ss->g = 1; /* 4kb granularity */
1606 ss->s = 1;
1607 ss->type = 0x03; /* Read/Write, Accessed */
1608 ss->d = 1; /* 32bit stack segment */
1609 ss->dpl = 0;
1610 ss->p = 1;
1611 }
1612
1613 static int
1614 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1615 {
1616 struct decode_cache *c = &ctxt->decode;
1617 struct desc_struct cs, ss;
1618 u64 msr_data;
1619 u16 cs_sel, ss_sel;
1620
1621 /* syscall is not available in real mode */
1622 if (ctxt->mode == X86EMUL_MODE_REAL ||
1623 ctxt->mode == X86EMUL_MODE_VM86) {
1624 emulate_ud(ctxt);
1625 return X86EMUL_PROPAGATE_FAULT;
1626 }
1627
1628 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1629
1630 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1631 msr_data >>= 32;
1632 cs_sel = (u16)(msr_data & 0xfffc);
1633 ss_sel = (u16)(msr_data + 8);
1634
1635 if (is_long_mode(ctxt->vcpu)) {
1636 cs.d = 0;
1637 cs.l = 1;
1638 }
1639 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1640 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1641 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1642 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1643
1644 c->regs[VCPU_REGS_RCX] = c->eip;
1645 if (is_long_mode(ctxt->vcpu)) {
1646 #ifdef CONFIG_X86_64
1647 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1648
1649 ops->get_msr(ctxt->vcpu,
1650 ctxt->mode == X86EMUL_MODE_PROT64 ?
1651 MSR_LSTAR : MSR_CSTAR, &msr_data);
1652 c->eip = msr_data;
1653
1654 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1655 ctxt->eflags &= ~(msr_data | EFLG_RF);
1656 #endif
1657 } else {
1658 /* legacy mode */
1659 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1660 c->eip = (u32)msr_data;
1661
1662 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1663 }
1664
1665 return X86EMUL_CONTINUE;
1666 }
1667
1668 static int
1669 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1670 {
1671 struct decode_cache *c = &ctxt->decode;
1672 struct desc_struct cs, ss;
1673 u64 msr_data;
1674 u16 cs_sel, ss_sel;
1675
1676 /* inject #GP if in real mode */
1677 if (ctxt->mode == X86EMUL_MODE_REAL) {
1678 emulate_gp(ctxt, 0);
1679 return X86EMUL_PROPAGATE_FAULT;
1680 }
1681
1682 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1683 * Therefore, we inject an #UD.
1684 */
1685 if (ctxt->mode == X86EMUL_MODE_PROT64) {
1686 emulate_ud(ctxt);
1687 return X86EMUL_PROPAGATE_FAULT;
1688 }
1689
1690 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1691
1692 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1693 switch (ctxt->mode) {
1694 case X86EMUL_MODE_PROT32:
1695 if ((msr_data & 0xfffc) == 0x0) {
1696 emulate_gp(ctxt, 0);
1697 return X86EMUL_PROPAGATE_FAULT;
1698 }
1699 break;
1700 case X86EMUL_MODE_PROT64:
1701 if (msr_data == 0x0) {
1702 emulate_gp(ctxt, 0);
1703 return X86EMUL_PROPAGATE_FAULT;
1704 }
1705 break;
1706 }
1707
1708 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1709 cs_sel = (u16)msr_data;
1710 cs_sel &= ~SELECTOR_RPL_MASK;
1711 ss_sel = cs_sel + 8;
1712 ss_sel &= ~SELECTOR_RPL_MASK;
1713 if (ctxt->mode == X86EMUL_MODE_PROT64
1714 || is_long_mode(ctxt->vcpu)) {
1715 cs.d = 0;
1716 cs.l = 1;
1717 }
1718
1719 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1720 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1721 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1722 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1723
1724 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1725 c->eip = msr_data;
1726
1727 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1728 c->regs[VCPU_REGS_RSP] = msr_data;
1729
1730 return X86EMUL_CONTINUE;
1731 }
1732
1733 static int
1734 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1735 {
1736 struct decode_cache *c = &ctxt->decode;
1737 struct desc_struct cs, ss;
1738 u64 msr_data;
1739 int usermode;
1740 u16 cs_sel, ss_sel;
1741
1742 /* inject #GP if in real mode or Virtual 8086 mode */
1743 if (ctxt->mode == X86EMUL_MODE_REAL ||
1744 ctxt->mode == X86EMUL_MODE_VM86) {
1745 emulate_gp(ctxt, 0);
1746 return X86EMUL_PROPAGATE_FAULT;
1747 }
1748
1749 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1750
1751 if ((c->rex_prefix & 0x8) != 0x0)
1752 usermode = X86EMUL_MODE_PROT64;
1753 else
1754 usermode = X86EMUL_MODE_PROT32;
1755
1756 cs.dpl = 3;
1757 ss.dpl = 3;
1758 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1759 switch (usermode) {
1760 case X86EMUL_MODE_PROT32:
1761 cs_sel = (u16)(msr_data + 16);
1762 if ((msr_data & 0xfffc) == 0x0) {
1763 emulate_gp(ctxt, 0);
1764 return X86EMUL_PROPAGATE_FAULT;
1765 }
1766 ss_sel = (u16)(msr_data + 24);
1767 break;
1768 case X86EMUL_MODE_PROT64:
1769 cs_sel = (u16)(msr_data + 32);
1770 if (msr_data == 0x0) {
1771 emulate_gp(ctxt, 0);
1772 return X86EMUL_PROPAGATE_FAULT;
1773 }
1774 ss_sel = cs_sel + 8;
1775 cs.d = 0;
1776 cs.l = 1;
1777 break;
1778 }
1779 cs_sel |= SELECTOR_RPL_MASK;
1780 ss_sel |= SELECTOR_RPL_MASK;
1781
1782 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1783 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1784 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1785 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1786
1787 c->eip = c->regs[VCPU_REGS_RDX];
1788 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1789
1790 return X86EMUL_CONTINUE;
1791 }
1792
1793 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
1794 struct x86_emulate_ops *ops)
1795 {
1796 int iopl;
1797 if (ctxt->mode == X86EMUL_MODE_REAL)
1798 return false;
1799 if (ctxt->mode == X86EMUL_MODE_VM86)
1800 return true;
1801 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1802 return ops->cpl(ctxt->vcpu) > iopl;
1803 }
1804
1805 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1806 struct x86_emulate_ops *ops,
1807 u16 port, u16 len)
1808 {
1809 struct desc_struct tr_seg;
1810 int r;
1811 u16 io_bitmap_ptr;
1812 u8 perm, bit_idx = port & 0x7;
1813 unsigned mask = (1 << len) - 1;
1814
1815 ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu);
1816 if (!tr_seg.p)
1817 return false;
1818 if (desc_limit_scaled(&tr_seg) < 103)
1819 return false;
1820 r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2,
1821 ctxt->vcpu, NULL);
1822 if (r != X86EMUL_CONTINUE)
1823 return false;
1824 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1825 return false;
1826 r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8,
1827 &perm, 1, ctxt->vcpu, NULL);
1828 if (r != X86EMUL_CONTINUE)
1829 return false;
1830 if ((perm >> bit_idx) & mask)
1831 return false;
1832 return true;
1833 }
1834
1835 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
1836 struct x86_emulate_ops *ops,
1837 u16 port, u16 len)
1838 {
1839 if (ctxt->perm_ok)
1840 return true;
1841
1842 if (emulator_bad_iopl(ctxt, ops))
1843 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
1844 return false;
1845
1846 ctxt->perm_ok = true;
1847
1848 return true;
1849 }
1850
1851 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
1852 struct x86_emulate_ops *ops,
1853 struct tss_segment_16 *tss)
1854 {
1855 struct decode_cache *c = &ctxt->decode;
1856
1857 tss->ip = c->eip;
1858 tss->flag = ctxt->eflags;
1859 tss->ax = c->regs[VCPU_REGS_RAX];
1860 tss->cx = c->regs[VCPU_REGS_RCX];
1861 tss->dx = c->regs[VCPU_REGS_RDX];
1862 tss->bx = c->regs[VCPU_REGS_RBX];
1863 tss->sp = c->regs[VCPU_REGS_RSP];
1864 tss->bp = c->regs[VCPU_REGS_RBP];
1865 tss->si = c->regs[VCPU_REGS_RSI];
1866 tss->di = c->regs[VCPU_REGS_RDI];
1867
1868 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1869 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1870 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1871 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1872 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1873 }
1874
1875 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
1876 struct x86_emulate_ops *ops,
1877 struct tss_segment_16 *tss)
1878 {
1879 struct decode_cache *c = &ctxt->decode;
1880 int ret;
1881
1882 c->eip = tss->ip;
1883 ctxt->eflags = tss->flag | 2;
1884 c->regs[VCPU_REGS_RAX] = tss->ax;
1885 c->regs[VCPU_REGS_RCX] = tss->cx;
1886 c->regs[VCPU_REGS_RDX] = tss->dx;
1887 c->regs[VCPU_REGS_RBX] = tss->bx;
1888 c->regs[VCPU_REGS_RSP] = tss->sp;
1889 c->regs[VCPU_REGS_RBP] = tss->bp;
1890 c->regs[VCPU_REGS_RSI] = tss->si;
1891 c->regs[VCPU_REGS_RDI] = tss->di;
1892
1893 /*
1894 * SDM says that segment selectors are loaded before segment
1895 * descriptors
1896 */
1897 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
1898 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1899 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1900 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1901 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1902
1903 /*
1904 * Now load segment descriptors. If fault happenes at this stage
1905 * it is handled in a context of new task
1906 */
1907 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
1908 if (ret != X86EMUL_CONTINUE)
1909 return ret;
1910 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1911 if (ret != X86EMUL_CONTINUE)
1912 return ret;
1913 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
1914 if (ret != X86EMUL_CONTINUE)
1915 return ret;
1916 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
1917 if (ret != X86EMUL_CONTINUE)
1918 return ret;
1919 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
1920 if (ret != X86EMUL_CONTINUE)
1921 return ret;
1922
1923 return X86EMUL_CONTINUE;
1924 }
1925
1926 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
1927 struct x86_emulate_ops *ops,
1928 u16 tss_selector, u16 old_tss_sel,
1929 ulong old_tss_base, struct desc_struct *new_desc)
1930 {
1931 struct tss_segment_16 tss_seg;
1932 int ret;
1933 u32 err, new_tss_base = get_desc_base(new_desc);
1934
1935 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1936 &err);
1937 if (ret == X86EMUL_PROPAGATE_FAULT) {
1938 /* FIXME: need to provide precise fault address */
1939 emulate_pf(ctxt);
1940 return ret;
1941 }
1942
1943 save_state_to_tss16(ctxt, ops, &tss_seg);
1944
1945 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1946 &err);
1947 if (ret == X86EMUL_PROPAGATE_FAULT) {
1948 /* FIXME: need to provide precise fault address */
1949 emulate_pf(ctxt);
1950 return ret;
1951 }
1952
1953 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1954 &err);
1955 if (ret == X86EMUL_PROPAGATE_FAULT) {
1956 /* FIXME: need to provide precise fault address */
1957 emulate_pf(ctxt);
1958 return ret;
1959 }
1960
1961 if (old_tss_sel != 0xffff) {
1962 tss_seg.prev_task_link = old_tss_sel;
1963
1964 ret = ops->write_std(new_tss_base,
1965 &tss_seg.prev_task_link,
1966 sizeof tss_seg.prev_task_link,
1967 ctxt->vcpu, &err);
1968 if (ret == X86EMUL_PROPAGATE_FAULT) {
1969 /* FIXME: need to provide precise fault address */
1970 emulate_pf(ctxt);
1971 return ret;
1972 }
1973 }
1974
1975 return load_state_from_tss16(ctxt, ops, &tss_seg);
1976 }
1977
1978 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
1979 struct x86_emulate_ops *ops,
1980 struct tss_segment_32 *tss)
1981 {
1982 struct decode_cache *c = &ctxt->decode;
1983
1984 tss->cr3 = ops->get_cr(3, ctxt->vcpu);
1985 tss->eip = c->eip;
1986 tss->eflags = ctxt->eflags;
1987 tss->eax = c->regs[VCPU_REGS_RAX];
1988 tss->ecx = c->regs[VCPU_REGS_RCX];
1989 tss->edx = c->regs[VCPU_REGS_RDX];
1990 tss->ebx = c->regs[VCPU_REGS_RBX];
1991 tss->esp = c->regs[VCPU_REGS_RSP];
1992 tss->ebp = c->regs[VCPU_REGS_RBP];
1993 tss->esi = c->regs[VCPU_REGS_RSI];
1994 tss->edi = c->regs[VCPU_REGS_RDI];
1995
1996 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1997 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1998 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1999 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
2000 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
2001 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
2002 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
2003 }
2004
2005 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2006 struct x86_emulate_ops *ops,
2007 struct tss_segment_32 *tss)
2008 {
2009 struct decode_cache *c = &ctxt->decode;
2010 int ret;
2011
2012 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
2013 emulate_gp(ctxt, 0);
2014 return X86EMUL_PROPAGATE_FAULT;
2015 }
2016 c->eip = tss->eip;
2017 ctxt->eflags = tss->eflags | 2;
2018 c->regs[VCPU_REGS_RAX] = tss->eax;
2019 c->regs[VCPU_REGS_RCX] = tss->ecx;
2020 c->regs[VCPU_REGS_RDX] = tss->edx;
2021 c->regs[VCPU_REGS_RBX] = tss->ebx;
2022 c->regs[VCPU_REGS_RSP] = tss->esp;
2023 c->regs[VCPU_REGS_RBP] = tss->ebp;
2024 c->regs[VCPU_REGS_RSI] = tss->esi;
2025 c->regs[VCPU_REGS_RDI] = tss->edi;
2026
2027 /*
2028 * SDM says that segment selectors are loaded before segment
2029 * descriptors
2030 */
2031 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
2032 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
2033 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
2034 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
2035 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
2036 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
2037 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
2038
2039 /*
2040 * Now load segment descriptors. If fault happenes at this stage
2041 * it is handled in a context of new task
2042 */
2043 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
2044 if (ret != X86EMUL_CONTINUE)
2045 return ret;
2046 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2047 if (ret != X86EMUL_CONTINUE)
2048 return ret;
2049 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2050 if (ret != X86EMUL_CONTINUE)
2051 return ret;
2052 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2053 if (ret != X86EMUL_CONTINUE)
2054 return ret;
2055 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2056 if (ret != X86EMUL_CONTINUE)
2057 return ret;
2058 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2059 if (ret != X86EMUL_CONTINUE)
2060 return ret;
2061 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2062 if (ret != X86EMUL_CONTINUE)
2063 return ret;
2064
2065 return X86EMUL_CONTINUE;
2066 }
2067
2068 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2069 struct x86_emulate_ops *ops,
2070 u16 tss_selector, u16 old_tss_sel,
2071 ulong old_tss_base, struct desc_struct *new_desc)
2072 {
2073 struct tss_segment_32 tss_seg;
2074 int ret;
2075 u32 err, new_tss_base = get_desc_base(new_desc);
2076
2077 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2078 &err);
2079 if (ret == X86EMUL_PROPAGATE_FAULT) {
2080 /* FIXME: need to provide precise fault address */
2081 emulate_pf(ctxt);
2082 return ret;
2083 }
2084
2085 save_state_to_tss32(ctxt, ops, &tss_seg);
2086
2087 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2088 &err);
2089 if (ret == X86EMUL_PROPAGATE_FAULT) {
2090 /* FIXME: need to provide precise fault address */
2091 emulate_pf(ctxt);
2092 return ret;
2093 }
2094
2095 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2096 &err);
2097 if (ret == X86EMUL_PROPAGATE_FAULT) {
2098 /* FIXME: need to provide precise fault address */
2099 emulate_pf(ctxt);
2100 return ret;
2101 }
2102
2103 if (old_tss_sel != 0xffff) {
2104 tss_seg.prev_task_link = old_tss_sel;
2105
2106 ret = ops->write_std(new_tss_base,
2107 &tss_seg.prev_task_link,
2108 sizeof tss_seg.prev_task_link,
2109 ctxt->vcpu, &err);
2110 if (ret == X86EMUL_PROPAGATE_FAULT) {
2111 /* FIXME: need to provide precise fault address */
2112 emulate_pf(ctxt);
2113 return ret;
2114 }
2115 }
2116
2117 return load_state_from_tss32(ctxt, ops, &tss_seg);
2118 }
2119
2120 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2121 struct x86_emulate_ops *ops,
2122 u16 tss_selector, int reason,
2123 bool has_error_code, u32 error_code)
2124 {
2125 struct desc_struct curr_tss_desc, next_tss_desc;
2126 int ret;
2127 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2128 ulong old_tss_base =
2129 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2130 u32 desc_limit;
2131
2132 /* FIXME: old_tss_base == ~0 ? */
2133
2134 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2135 if (ret != X86EMUL_CONTINUE)
2136 return ret;
2137 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2138 if (ret != X86EMUL_CONTINUE)
2139 return ret;
2140
2141 /* FIXME: check that next_tss_desc is tss */
2142
2143 if (reason != TASK_SWITCH_IRET) {
2144 if ((tss_selector & 3) > next_tss_desc.dpl ||
2145 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
2146 emulate_gp(ctxt, 0);
2147 return X86EMUL_PROPAGATE_FAULT;
2148 }
2149 }
2150
2151 desc_limit = desc_limit_scaled(&next_tss_desc);
2152 if (!next_tss_desc.p ||
2153 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2154 desc_limit < 0x2b)) {
2155 emulate_ts(ctxt, tss_selector & 0xfffc);
2156 return X86EMUL_PROPAGATE_FAULT;
2157 }
2158
2159 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2160 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2161 write_segment_descriptor(ctxt, ops, old_tss_sel,
2162 &curr_tss_desc);
2163 }
2164
2165 if (reason == TASK_SWITCH_IRET)
2166 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2167
2168 /* set back link to prev task only if NT bit is set in eflags
2169 note that old_tss_sel is not used afetr this point */
2170 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2171 old_tss_sel = 0xffff;
2172
2173 if (next_tss_desc.type & 8)
2174 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2175 old_tss_base, &next_tss_desc);
2176 else
2177 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2178 old_tss_base, &next_tss_desc);
2179 if (ret != X86EMUL_CONTINUE)
2180 return ret;
2181
2182 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2183 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2184
2185 if (reason != TASK_SWITCH_IRET) {
2186 next_tss_desc.type |= (1 << 1); /* set busy flag */
2187 write_segment_descriptor(ctxt, ops, tss_selector,
2188 &next_tss_desc);
2189 }
2190
2191 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2192 ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
2193 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2194
2195 if (has_error_code) {
2196 struct decode_cache *c = &ctxt->decode;
2197
2198 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2199 c->lock_prefix = 0;
2200 c->src.val = (unsigned long) error_code;
2201 emulate_push(ctxt, ops);
2202 }
2203
2204 return ret;
2205 }
2206
2207 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2208 u16 tss_selector, int reason,
2209 bool has_error_code, u32 error_code)
2210 {
2211 struct x86_emulate_ops *ops = ctxt->ops;
2212 struct decode_cache *c = &ctxt->decode;
2213 int rc;
2214
2215 c->eip = ctxt->eip;
2216 c->dst.type = OP_NONE;
2217
2218 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2219 has_error_code, error_code);
2220
2221 if (rc == X86EMUL_CONTINUE) {
2222 rc = writeback(ctxt, ops);
2223 if (rc == X86EMUL_CONTINUE)
2224 ctxt->eip = c->eip;
2225 }
2226
2227 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2228 }
2229
2230 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2231 int reg, struct operand *op)
2232 {
2233 struct decode_cache *c = &ctxt->decode;
2234 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2235
2236 register_address_increment(c, &c->regs[reg], df * op->bytes);
2237 op->addr.mem.ea = register_address(c, c->regs[reg]);
2238 op->addr.mem.seg = seg;
2239 }
2240
2241 static int em_push(struct x86_emulate_ctxt *ctxt)
2242 {
2243 emulate_push(ctxt, ctxt->ops);
2244 return X86EMUL_CONTINUE;
2245 }
2246
2247 static int em_das(struct x86_emulate_ctxt *ctxt)
2248 {
2249 struct decode_cache *c = &ctxt->decode;
2250 u8 al, old_al;
2251 bool af, cf, old_cf;
2252
2253 cf = ctxt->eflags & X86_EFLAGS_CF;
2254 al = c->dst.val;
2255
2256 old_al = al;
2257 old_cf = cf;
2258 cf = false;
2259 af = ctxt->eflags & X86_EFLAGS_AF;
2260 if ((al & 0x0f) > 9 || af) {
2261 al -= 6;
2262 cf = old_cf | (al >= 250);
2263 af = true;
2264 } else {
2265 af = false;
2266 }
2267 if (old_al > 0x99 || old_cf) {
2268 al -= 0x60;
2269 cf = true;
2270 }
2271
2272 c->dst.val = al;
2273 /* Set PF, ZF, SF */
2274 c->src.type = OP_IMM;
2275 c->src.val = 0;
2276 c->src.bytes = 1;
2277 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2278 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2279 if (cf)
2280 ctxt->eflags |= X86_EFLAGS_CF;
2281 if (af)
2282 ctxt->eflags |= X86_EFLAGS_AF;
2283 return X86EMUL_CONTINUE;
2284 }
2285
2286 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2287 {
2288 struct decode_cache *c = &ctxt->decode;
2289 u16 sel, old_cs;
2290 ulong old_eip;
2291 int rc;
2292
2293 old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2294 old_eip = c->eip;
2295
2296 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2297 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
2298 return X86EMUL_CONTINUE;
2299
2300 c->eip = 0;
2301 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2302
2303 c->src.val = old_cs;
2304 emulate_push(ctxt, ctxt->ops);
2305 rc = writeback(ctxt, ctxt->ops);
2306 if (rc != X86EMUL_CONTINUE)
2307 return rc;
2308
2309 c->src.val = old_eip;
2310 emulate_push(ctxt, ctxt->ops);
2311 rc = writeback(ctxt, ctxt->ops);
2312 if (rc != X86EMUL_CONTINUE)
2313 return rc;
2314
2315 c->dst.type = OP_NONE;
2316
2317 return X86EMUL_CONTINUE;
2318 }
2319
2320 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2321 {
2322 struct decode_cache *c = &ctxt->decode;
2323 int rc;
2324
2325 c->dst.type = OP_REG;
2326 c->dst.addr.reg = &c->eip;
2327 c->dst.bytes = c->op_bytes;
2328 rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
2329 if (rc != X86EMUL_CONTINUE)
2330 return rc;
2331 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2332 return X86EMUL_CONTINUE;
2333 }
2334
2335 static int em_imul(struct x86_emulate_ctxt *ctxt)
2336 {
2337 struct decode_cache *c = &ctxt->decode;
2338
2339 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2340 return X86EMUL_CONTINUE;
2341 }
2342
2343 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2344 {
2345 struct decode_cache *c = &ctxt->decode;
2346
2347 c->dst.val = c->src2.val;
2348 return em_imul(ctxt);
2349 }
2350
2351 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2352 {
2353 struct decode_cache *c = &ctxt->decode;
2354
2355 c->dst.type = OP_REG;
2356 c->dst.bytes = c->src.bytes;
2357 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2358 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2359
2360 return X86EMUL_CONTINUE;
2361 }
2362
2363 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2364 {
2365 unsigned cpl = ctxt->ops->cpl(ctxt->vcpu);
2366 struct decode_cache *c = &ctxt->decode;
2367 u64 tsc = 0;
2368
2369 if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) {
2370 emulate_gp(ctxt, 0);
2371 return X86EMUL_PROPAGATE_FAULT;
2372 }
2373 ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
2374 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2375 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2376 return X86EMUL_CONTINUE;
2377 }
2378
2379 static int em_mov(struct x86_emulate_ctxt *ctxt)
2380 {
2381 struct decode_cache *c = &ctxt->decode;
2382 c->dst.val = c->src.val;
2383 return X86EMUL_CONTINUE;
2384 }
2385
2386 #define D(_y) { .flags = (_y) }
2387 #define N D(0)
2388 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2389 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2390 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2391
2392 #define D2bv(_f) D((_f) | ByteOp), D(_f)
2393 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
2394
2395 #define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \
2396 D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \
2397 D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
2398
2399
2400 static struct opcode group1[] = {
2401 X7(D(Lock)), N
2402 };
2403
2404 static struct opcode group1A[] = {
2405 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2406 };
2407
2408 static struct opcode group3[] = {
2409 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2410 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2411 X4(D(SrcMem | ModRM)),
2412 };
2413
2414 static struct opcode group4[] = {
2415 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2416 N, N, N, N, N, N,
2417 };
2418
2419 static struct opcode group5[] = {
2420 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2421 D(SrcMem | ModRM | Stack),
2422 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2423 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2424 D(SrcMem | ModRM | Stack), N,
2425 };
2426
2427 static struct group_dual group7 = { {
2428 N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
2429 D(SrcNone | ModRM | DstMem | Mov), N,
2430 D(SrcMem16 | ModRM | Mov | Priv),
2431 D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2432 }, {
2433 D(SrcNone | ModRM | Priv), N, N, D(SrcNone | ModRM | Priv),
2434 D(SrcNone | ModRM | DstMem | Mov), N,
2435 D(SrcMem16 | ModRM | Mov | Priv), N,
2436 } };
2437
2438 static struct opcode group8[] = {
2439 N, N, N, N,
2440 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2441 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2442 };
2443
2444 static struct group_dual group9 = { {
2445 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2446 }, {
2447 N, N, N, N, N, N, N, N,
2448 } };
2449
2450 static struct opcode group11[] = {
2451 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
2452 };
2453
2454 static struct opcode opcode_table[256] = {
2455 /* 0x00 - 0x07 */
2456 D6ALU(Lock),
2457 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2458 /* 0x08 - 0x0F */
2459 D6ALU(Lock),
2460 D(ImplicitOps | Stack | No64), N,
2461 /* 0x10 - 0x17 */
2462 D6ALU(Lock),
2463 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2464 /* 0x18 - 0x1F */
2465 D6ALU(Lock),
2466 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2467 /* 0x20 - 0x27 */
2468 D6ALU(Lock), N, N,
2469 /* 0x28 - 0x2F */
2470 D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
2471 /* 0x30 - 0x37 */
2472 D6ALU(Lock), N, N,
2473 /* 0x38 - 0x3F */
2474 D6ALU(0), N, N,
2475 /* 0x40 - 0x4F */
2476 X16(D(DstReg)),
2477 /* 0x50 - 0x57 */
2478 X8(I(SrcReg | Stack, em_push)),
2479 /* 0x58 - 0x5F */
2480 X8(D(DstReg | Stack)),
2481 /* 0x60 - 0x67 */
2482 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2483 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2484 N, N, N, N,
2485 /* 0x68 - 0x6F */
2486 I(SrcImm | Mov | Stack, em_push),
2487 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2488 I(SrcImmByte | Mov | Stack, em_push),
2489 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2490 D2bv(DstDI | Mov | String), /* insb, insw/insd */
2491 D2bv(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2492 /* 0x70 - 0x7F */
2493 X16(D(SrcImmByte)),
2494 /* 0x80 - 0x87 */
2495 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2496 G(DstMem | SrcImm | ModRM | Group, group1),
2497 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2498 G(DstMem | SrcImmByte | ModRM | Group, group1),
2499 D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
2500 /* 0x88 - 0x8F */
2501 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
2502 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
2503 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2504 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2505 /* 0x90 - 0x97 */
2506 X8(D(SrcAcc | DstReg)),
2507 /* 0x98 - 0x9F */
2508 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2509 I(SrcImmFAddr | No64, em_call_far), N,
2510 D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
2511 /* 0xA0 - 0xA7 */
2512 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
2513 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
2514 I2bv(SrcSI | DstDI | Mov | String, em_mov),
2515 D2bv(SrcSI | DstDI | String),
2516 /* 0xA8 - 0xAF */
2517 D2bv(DstAcc | SrcImm),
2518 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
2519 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
2520 D2bv(SrcAcc | DstDI | String),
2521 /* 0xB0 - 0xB7 */
2522 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
2523 /* 0xB8 - 0xBF */
2524 X8(I(DstReg | SrcImm | Mov, em_mov)),
2525 /* 0xC0 - 0xC7 */
2526 D2bv(DstMem | SrcImmByte | ModRM),
2527 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
2528 D(ImplicitOps | Stack),
2529 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2530 G(ByteOp, group11), G(0, group11),
2531 /* 0xC8 - 0xCF */
2532 N, N, N, D(ImplicitOps | Stack),
2533 D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
2534 /* 0xD0 - 0xD7 */
2535 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
2536 N, N, N, N,
2537 /* 0xD8 - 0xDF */
2538 N, N, N, N, N, N, N, N,
2539 /* 0xE0 - 0xE7 */
2540 X4(D(SrcImmByte)),
2541 D2bv(SrcImmUByte | DstAcc), D2bv(SrcAcc | DstImmUByte),
2542 /* 0xE8 - 0xEF */
2543 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2544 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2545 D2bv(SrcNone | DstAcc), D2bv(SrcAcc | ImplicitOps),
2546 /* 0xF0 - 0xF7 */
2547 N, N, N, N,
2548 D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
2549 /* 0xF8 - 0xFF */
2550 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2551 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
2552 };
2553
2554 static struct opcode twobyte_table[256] = {
2555 /* 0x00 - 0x0F */
2556 N, GD(0, &group7), N, N,
2557 N, D(ImplicitOps), D(ImplicitOps | Priv), N,
2558 D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
2559 N, D(ImplicitOps | ModRM), N, N,
2560 /* 0x10 - 0x1F */
2561 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
2562 /* 0x20 - 0x2F */
2563 D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
2564 D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2565 N, N, N, N,
2566 N, N, N, N, N, N, N, N,
2567 /* 0x30 - 0x3F */
2568 D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc),
2569 D(ImplicitOps | Priv), N,
2570 D(ImplicitOps), D(ImplicitOps | Priv), N, N,
2571 N, N, N, N, N, N, N, N,
2572 /* 0x40 - 0x4F */
2573 X16(D(DstReg | SrcMem | ModRM | Mov)),
2574 /* 0x50 - 0x5F */
2575 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2576 /* 0x60 - 0x6F */
2577 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2578 /* 0x70 - 0x7F */
2579 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2580 /* 0x80 - 0x8F */
2581 X16(D(SrcImm)),
2582 /* 0x90 - 0x9F */
2583 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2584 /* 0xA0 - 0xA7 */
2585 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2586 N, D(DstMem | SrcReg | ModRM | BitOp),
2587 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2588 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
2589 /* 0xA8 - 0xAF */
2590 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2591 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2592 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2593 D(DstMem | SrcReg | Src2CL | ModRM),
2594 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
2595 /* 0xB0 - 0xB7 */
2596 D2bv(DstMem | SrcReg | ModRM | Lock),
2597 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2598 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
2599 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2600 /* 0xB8 - 0xBF */
2601 N, N,
2602 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2603 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2604 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2605 /* 0xC0 - 0xCF */
2606 D2bv(DstMem | SrcReg | ModRM | Lock),
2607 N, D(DstMem | SrcReg | ModRM | Mov),
2608 N, N, N, GD(0, &group9),
2609 N, N, N, N, N, N, N, N,
2610 /* 0xD0 - 0xDF */
2611 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2612 /* 0xE0 - 0xEF */
2613 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2614 /* 0xF0 - 0xFF */
2615 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
2616 };
2617
2618 #undef D
2619 #undef N
2620 #undef G
2621 #undef GD
2622 #undef I
2623
2624 #undef D2bv
2625 #undef I2bv
2626 #undef D6ALU
2627
2628 static unsigned imm_size(struct decode_cache *c)
2629 {
2630 unsigned size;
2631
2632 size = (c->d & ByteOp) ? 1 : c->op_bytes;
2633 if (size == 8)
2634 size = 4;
2635 return size;
2636 }
2637
2638 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
2639 unsigned size, bool sign_extension)
2640 {
2641 struct decode_cache *c = &ctxt->decode;
2642 struct x86_emulate_ops *ops = ctxt->ops;
2643 int rc = X86EMUL_CONTINUE;
2644
2645 op->type = OP_IMM;
2646 op->bytes = size;
2647 op->addr.mem.ea = c->eip;
2648 /* NB. Immediates are sign-extended as necessary. */
2649 switch (op->bytes) {
2650 case 1:
2651 op->val = insn_fetch(s8, 1, c->eip);
2652 break;
2653 case 2:
2654 op->val = insn_fetch(s16, 2, c->eip);
2655 break;
2656 case 4:
2657 op->val = insn_fetch(s32, 4, c->eip);
2658 break;
2659 }
2660 if (!sign_extension) {
2661 switch (op->bytes) {
2662 case 1:
2663 op->val &= 0xff;
2664 break;
2665 case 2:
2666 op->val &= 0xffff;
2667 break;
2668 case 4:
2669 op->val &= 0xffffffff;
2670 break;
2671 }
2672 }
2673 done:
2674 return rc;
2675 }
2676
2677 int
2678 x86_decode_insn(struct x86_emulate_ctxt *ctxt)
2679 {
2680 struct x86_emulate_ops *ops = ctxt->ops;
2681 struct decode_cache *c = &ctxt->decode;
2682 int rc = X86EMUL_CONTINUE;
2683 int mode = ctxt->mode;
2684 int def_op_bytes, def_ad_bytes, dual, goffset;
2685 struct opcode opcode, *g_mod012, *g_mod3;
2686 struct operand memop = { .type = OP_NONE };
2687
2688 c->eip = ctxt->eip;
2689 c->fetch.start = c->fetch.end = c->eip;
2690 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
2691
2692 switch (mode) {
2693 case X86EMUL_MODE_REAL:
2694 case X86EMUL_MODE_VM86:
2695 case X86EMUL_MODE_PROT16:
2696 def_op_bytes = def_ad_bytes = 2;
2697 break;
2698 case X86EMUL_MODE_PROT32:
2699 def_op_bytes = def_ad_bytes = 4;
2700 break;
2701 #ifdef CONFIG_X86_64
2702 case X86EMUL_MODE_PROT64:
2703 def_op_bytes = 4;
2704 def_ad_bytes = 8;
2705 break;
2706 #endif
2707 default:
2708 return -1;
2709 }
2710
2711 c->op_bytes = def_op_bytes;
2712 c->ad_bytes = def_ad_bytes;
2713
2714 /* Legacy prefixes. */
2715 for (;;) {
2716 switch (c->b = insn_fetch(u8, 1, c->eip)) {
2717 case 0x66: /* operand-size override */
2718 /* switch between 2/4 bytes */
2719 c->op_bytes = def_op_bytes ^ 6;
2720 break;
2721 case 0x67: /* address-size override */
2722 if (mode == X86EMUL_MODE_PROT64)
2723 /* switch between 4/8 bytes */
2724 c->ad_bytes = def_ad_bytes ^ 12;
2725 else
2726 /* switch between 2/4 bytes */
2727 c->ad_bytes = def_ad_bytes ^ 6;
2728 break;
2729 case 0x26: /* ES override */
2730 case 0x2e: /* CS override */
2731 case 0x36: /* SS override */
2732 case 0x3e: /* DS override */
2733 set_seg_override(c, (c->b >> 3) & 3);
2734 break;
2735 case 0x64: /* FS override */
2736 case 0x65: /* GS override */
2737 set_seg_override(c, c->b & 7);
2738 break;
2739 case 0x40 ... 0x4f: /* REX */
2740 if (mode != X86EMUL_MODE_PROT64)
2741 goto done_prefixes;
2742 c->rex_prefix = c->b;
2743 continue;
2744 case 0xf0: /* LOCK */
2745 c->lock_prefix = 1;
2746 break;
2747 case 0xf2: /* REPNE/REPNZ */
2748 c->rep_prefix = REPNE_PREFIX;
2749 break;
2750 case 0xf3: /* REP/REPE/REPZ */
2751 c->rep_prefix = REPE_PREFIX;
2752 break;
2753 default:
2754 goto done_prefixes;
2755 }
2756
2757 /* Any legacy prefix after a REX prefix nullifies its effect. */
2758
2759 c->rex_prefix = 0;
2760 }
2761
2762 done_prefixes:
2763
2764 /* REX prefix. */
2765 if (c->rex_prefix & 8)
2766 c->op_bytes = 8; /* REX.W */
2767
2768 /* Opcode byte(s). */
2769 opcode = opcode_table[c->b];
2770 /* Two-byte opcode? */
2771 if (c->b == 0x0f) {
2772 c->twobyte = 1;
2773 c->b = insn_fetch(u8, 1, c->eip);
2774 opcode = twobyte_table[c->b];
2775 }
2776 c->d = opcode.flags;
2777
2778 if (c->d & Group) {
2779 dual = c->d & GroupDual;
2780 c->modrm = insn_fetch(u8, 1, c->eip);
2781 --c->eip;
2782
2783 if (c->d & GroupDual) {
2784 g_mod012 = opcode.u.gdual->mod012;
2785 g_mod3 = opcode.u.gdual->mod3;
2786 } else
2787 g_mod012 = g_mod3 = opcode.u.group;
2788
2789 c->d &= ~(Group | GroupDual);
2790
2791 goffset = (c->modrm >> 3) & 7;
2792
2793 if ((c->modrm >> 6) == 3)
2794 opcode = g_mod3[goffset];
2795 else
2796 opcode = g_mod012[goffset];
2797 c->d |= opcode.flags;
2798 }
2799
2800 c->execute = opcode.u.execute;
2801
2802 /* Unrecognised? */
2803 if (c->d == 0 || (c->d & Undefined))
2804 return -1;
2805
2806 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
2807 c->op_bytes = 8;
2808
2809 if (c->d & Op3264) {
2810 if (mode == X86EMUL_MODE_PROT64)
2811 c->op_bytes = 8;
2812 else
2813 c->op_bytes = 4;
2814 }
2815
2816 /* ModRM and SIB bytes. */
2817 if (c->d & ModRM) {
2818 rc = decode_modrm(ctxt, ops, &memop);
2819 if (!c->has_seg_override)
2820 set_seg_override(c, c->modrm_seg);
2821 } else if (c->d & MemAbs)
2822 rc = decode_abs(ctxt, ops, &memop);
2823 if (rc != X86EMUL_CONTINUE)
2824 goto done;
2825
2826 if (!c->has_seg_override)
2827 set_seg_override(c, VCPU_SREG_DS);
2828
2829 memop.addr.mem.seg = seg_override(ctxt, ops, c);
2830
2831 if (memop.type == OP_MEM && c->ad_bytes != 8)
2832 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
2833
2834 if (memop.type == OP_MEM && c->rip_relative)
2835 memop.addr.mem.ea += c->eip;
2836
2837 /*
2838 * Decode and fetch the source operand: register, memory
2839 * or immediate.
2840 */
2841 switch (c->d & SrcMask) {
2842 case SrcNone:
2843 break;
2844 case SrcReg:
2845 decode_register_operand(&c->src, c, 0);
2846 break;
2847 case SrcMem16:
2848 memop.bytes = 2;
2849 goto srcmem_common;
2850 case SrcMem32:
2851 memop.bytes = 4;
2852 goto srcmem_common;
2853 case SrcMem:
2854 memop.bytes = (c->d & ByteOp) ? 1 :
2855 c->op_bytes;
2856 srcmem_common:
2857 c->src = memop;
2858 break;
2859 case SrcImmU16:
2860 rc = decode_imm(ctxt, &c->src, 2, false);
2861 break;
2862 case SrcImm:
2863 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
2864 break;
2865 case SrcImmU:
2866 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
2867 break;
2868 case SrcImmByte:
2869 rc = decode_imm(ctxt, &c->src, 1, true);
2870 break;
2871 case SrcImmUByte:
2872 rc = decode_imm(ctxt, &c->src, 1, false);
2873 break;
2874 case SrcAcc:
2875 c->src.type = OP_REG;
2876 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2877 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2878 fetch_register_operand(&c->src);
2879 break;
2880 case SrcOne:
2881 c->src.bytes = 1;
2882 c->src.val = 1;
2883 break;
2884 case SrcSI:
2885 c->src.type = OP_MEM;
2886 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2887 c->src.addr.mem.ea =
2888 register_address(c, c->regs[VCPU_REGS_RSI]);
2889 c->src.addr.mem.seg = seg_override(ctxt, ops, c),
2890 c->src.val = 0;
2891 break;
2892 case SrcImmFAddr:
2893 c->src.type = OP_IMM;
2894 c->src.addr.mem.ea = c->eip;
2895 c->src.bytes = c->op_bytes + 2;
2896 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
2897 break;
2898 case SrcMemFAddr:
2899 memop.bytes = c->op_bytes + 2;
2900 goto srcmem_common;
2901 break;
2902 }
2903
2904 if (rc != X86EMUL_CONTINUE)
2905 goto done;
2906
2907 /*
2908 * Decode and fetch the second source operand: register, memory
2909 * or immediate.
2910 */
2911 switch (c->d & Src2Mask) {
2912 case Src2None:
2913 break;
2914 case Src2CL:
2915 c->src2.bytes = 1;
2916 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
2917 break;
2918 case Src2ImmByte:
2919 rc = decode_imm(ctxt, &c->src2, 1, true);
2920 break;
2921 case Src2One:
2922 c->src2.bytes = 1;
2923 c->src2.val = 1;
2924 break;
2925 case Src2Imm:
2926 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
2927 break;
2928 }
2929
2930 if (rc != X86EMUL_CONTINUE)
2931 goto done;
2932
2933 /* Decode and fetch the destination operand: register or memory. */
2934 switch (c->d & DstMask) {
2935 case DstReg:
2936 decode_register_operand(&c->dst, c,
2937 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
2938 break;
2939 case DstImmUByte:
2940 c->dst.type = OP_IMM;
2941 c->dst.addr.mem.ea = c->eip;
2942 c->dst.bytes = 1;
2943 c->dst.val = insn_fetch(u8, 1, c->eip);
2944 break;
2945 case DstMem:
2946 case DstMem64:
2947 c->dst = memop;
2948 if ((c->d & DstMask) == DstMem64)
2949 c->dst.bytes = 8;
2950 else
2951 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2952 if (c->d & BitOp)
2953 fetch_bit_operand(c);
2954 c->dst.orig_val = c->dst.val;
2955 break;
2956 case DstAcc:
2957 c->dst.type = OP_REG;
2958 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2959 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
2960 fetch_register_operand(&c->dst);
2961 c->dst.orig_val = c->dst.val;
2962 break;
2963 case DstDI:
2964 c->dst.type = OP_MEM;
2965 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2966 c->dst.addr.mem.ea =
2967 register_address(c, c->regs[VCPU_REGS_RDI]);
2968 c->dst.addr.mem.seg = VCPU_SREG_ES;
2969 c->dst.val = 0;
2970 break;
2971 case ImplicitOps:
2972 /* Special instructions do their own operand decoding. */
2973 default:
2974 c->dst.type = OP_NONE; /* Disable writeback. */
2975 return 0;
2976 }
2977
2978 done:
2979 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2980 }
2981
2982 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
2983 {
2984 struct decode_cache *c = &ctxt->decode;
2985
2986 /* The second termination condition only applies for REPE
2987 * and REPNE. Test if the repeat string operation prefix is
2988 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2989 * corresponding termination condition according to:
2990 * - if REPE/REPZ and ZF = 0 then done
2991 * - if REPNE/REPNZ and ZF = 1 then done
2992 */
2993 if (((c->b == 0xa6) || (c->b == 0xa7) ||
2994 (c->b == 0xae) || (c->b == 0xaf))
2995 && (((c->rep_prefix == REPE_PREFIX) &&
2996 ((ctxt->eflags & EFLG_ZF) == 0))
2997 || ((c->rep_prefix == REPNE_PREFIX) &&
2998 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
2999 return true;
3000
3001 return false;
3002 }
3003
3004 int
3005 x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3006 {
3007 struct x86_emulate_ops *ops = ctxt->ops;
3008 u64 msr_data;
3009 struct decode_cache *c = &ctxt->decode;
3010 int rc = X86EMUL_CONTINUE;
3011 int saved_dst_type = c->dst.type;
3012 int irq; /* Used for int 3, int, and into */
3013
3014 ctxt->decode.mem_read.pos = 0;
3015
3016 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
3017 emulate_ud(ctxt);
3018 rc = X86EMUL_PROPAGATE_FAULT;
3019 goto done;
3020 }
3021
3022 /* LOCK prefix is allowed only with some instructions */
3023 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
3024 emulate_ud(ctxt);
3025 rc = X86EMUL_PROPAGATE_FAULT;
3026 goto done;
3027 }
3028
3029 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3030 emulate_ud(ctxt);
3031 rc = X86EMUL_PROPAGATE_FAULT;
3032 goto done;
3033 }
3034
3035 /* Privileged instruction can be executed only in CPL=0 */
3036 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
3037 emulate_gp(ctxt, 0);
3038 rc = X86EMUL_PROPAGATE_FAULT;
3039 goto done;
3040 }
3041
3042 if (c->rep_prefix && (c->d & String)) {
3043 /* All REP prefixes have the same first termination condition */
3044 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
3045 ctxt->eip = c->eip;
3046 goto done;
3047 }
3048 }
3049
3050 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3051 rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
3052 c->src.valptr, c->src.bytes);
3053 if (rc != X86EMUL_CONTINUE)
3054 goto done;
3055 c->src.orig_val64 = c->src.val64;
3056 }
3057
3058 if (c->src2.type == OP_MEM) {
3059 rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
3060 &c->src2.val, c->src2.bytes);
3061 if (rc != X86EMUL_CONTINUE)
3062 goto done;
3063 }
3064
3065 if ((c->d & DstMask) == ImplicitOps)
3066 goto special_insn;
3067
3068
3069 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
3070 /* optimisation - avoid slow emulated read if Mov */
3071 rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
3072 &c->dst.val, c->dst.bytes);
3073 if (rc != X86EMUL_CONTINUE)
3074 goto done;
3075 }
3076 c->dst.orig_val = c->dst.val;
3077
3078 special_insn:
3079
3080 if (c->execute) {
3081 rc = c->execute(ctxt);
3082 if (rc != X86EMUL_CONTINUE)
3083 goto done;
3084 goto writeback;
3085 }
3086
3087 if (c->twobyte)
3088 goto twobyte_insn;
3089
3090 switch (c->b) {
3091 case 0x00 ... 0x05:
3092 add: /* add */
3093 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3094 break;
3095 case 0x06: /* push es */
3096 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3097 break;
3098 case 0x07: /* pop es */
3099 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
3100 break;
3101 case 0x08 ... 0x0d:
3102 or: /* or */
3103 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
3104 break;
3105 case 0x0e: /* push cs */
3106 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3107 break;
3108 case 0x10 ... 0x15:
3109 adc: /* adc */
3110 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
3111 break;
3112 case 0x16: /* push ss */
3113 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3114 break;
3115 case 0x17: /* pop ss */
3116 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
3117 break;
3118 case 0x18 ... 0x1d:
3119 sbb: /* sbb */
3120 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
3121 break;
3122 case 0x1e: /* push ds */
3123 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3124 break;
3125 case 0x1f: /* pop ds */
3126 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
3127 break;
3128 case 0x20 ... 0x25:
3129 and: /* and */
3130 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
3131 break;
3132 case 0x28 ... 0x2d:
3133 sub: /* sub */
3134 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
3135 break;
3136 case 0x30 ... 0x35:
3137 xor: /* xor */
3138 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
3139 break;
3140 case 0x38 ... 0x3d:
3141 cmp: /* cmp */
3142 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3143 break;
3144 case 0x40 ... 0x47: /* inc r16/r32 */
3145 emulate_1op("inc", c->dst, ctxt->eflags);
3146 break;
3147 case 0x48 ... 0x4f: /* dec r16/r32 */
3148 emulate_1op("dec", c->dst, ctxt->eflags);
3149 break;
3150 case 0x58 ... 0x5f: /* pop reg */
3151 pop_instruction:
3152 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
3153 break;
3154 case 0x60: /* pusha */
3155 rc = emulate_pusha(ctxt, ops);
3156 break;
3157 case 0x61: /* popa */
3158 rc = emulate_popa(ctxt, ops);
3159 break;
3160 case 0x63: /* movsxd */
3161 if (ctxt->mode != X86EMUL_MODE_PROT64)
3162 goto cannot_emulate;
3163 c->dst.val = (s32) c->src.val;
3164 break;
3165 case 0x6c: /* insb */
3166 case 0x6d: /* insw/insd */
3167 c->src.val = c->regs[VCPU_REGS_RDX];
3168 goto do_io_in;
3169 case 0x6e: /* outsb */
3170 case 0x6f: /* outsw/outsd */
3171 c->dst.val = c->regs[VCPU_REGS_RDX];
3172 goto do_io_out;
3173 break;
3174 case 0x70 ... 0x7f: /* jcc (short) */
3175 if (test_cc(c->b, ctxt->eflags))
3176 jmp_rel(c, c->src.val);
3177 break;
3178 case 0x80 ... 0x83: /* Grp1 */
3179 switch (c->modrm_reg) {
3180 case 0:
3181 goto add;
3182 case 1:
3183 goto or;
3184 case 2:
3185 goto adc;
3186 case 3:
3187 goto sbb;
3188 case 4:
3189 goto and;
3190 case 5:
3191 goto sub;
3192 case 6:
3193 goto xor;
3194 case 7:
3195 goto cmp;
3196 }
3197 break;
3198 case 0x84 ... 0x85:
3199 test:
3200 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
3201 break;
3202 case 0x86 ... 0x87: /* xchg */
3203 xchg:
3204 /* Write back the register source. */
3205 c->src.val = c->dst.val;
3206 write_register_operand(&c->src);
3207 /*
3208 * Write back the memory destination with implicit LOCK
3209 * prefix.
3210 */
3211 c->dst.val = c->src.orig_val;
3212 c->lock_prefix = 1;
3213 break;
3214 case 0x8c: /* mov r/m, sreg */
3215 if (c->modrm_reg > VCPU_SREG_GS) {
3216 emulate_ud(ctxt);
3217 rc = X86EMUL_PROPAGATE_FAULT;
3218 goto done;
3219 }
3220 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
3221 break;
3222 case 0x8d: /* lea r16/r32, m */
3223 c->dst.val = c->src.addr.mem.ea;
3224 break;
3225 case 0x8e: { /* mov seg, r/m16 */
3226 uint16_t sel;
3227
3228 sel = c->src.val;
3229
3230 if (c->modrm_reg == VCPU_SREG_CS ||
3231 c->modrm_reg > VCPU_SREG_GS) {
3232 emulate_ud(ctxt);
3233 rc = X86EMUL_PROPAGATE_FAULT;
3234 goto done;
3235 }
3236
3237 if (c->modrm_reg == VCPU_SREG_SS)
3238 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3239
3240 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3241
3242 c->dst.type = OP_NONE; /* Disable writeback. */
3243 break;
3244 }
3245 case 0x8f: /* pop (sole member of Grp1a) */
3246 rc = emulate_grp1a(ctxt, ops);
3247 break;
3248 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3249 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3250 break;
3251 goto xchg;
3252 case 0x98: /* cbw/cwde/cdqe */
3253 switch (c->op_bytes) {
3254 case 2: c->dst.val = (s8)c->dst.val; break;
3255 case 4: c->dst.val = (s16)c->dst.val; break;
3256 case 8: c->dst.val = (s32)c->dst.val; break;
3257 }
3258 break;
3259 case 0x9c: /* pushf */
3260 c->src.val = (unsigned long) ctxt->eflags;
3261 emulate_push(ctxt, ops);
3262 break;
3263 case 0x9d: /* popf */
3264 c->dst.type = OP_REG;
3265 c->dst.addr.reg = &ctxt->eflags;
3266 c->dst.bytes = c->op_bytes;
3267 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
3268 break;
3269 case 0xa6 ... 0xa7: /* cmps */
3270 c->dst.type = OP_NONE; /* Disable writeback. */
3271 goto cmp;
3272 case 0xa8 ... 0xa9: /* test ax, imm */
3273 goto test;
3274 case 0xae ... 0xaf: /* scas */
3275 goto cmp;
3276 case 0xc0 ... 0xc1:
3277 emulate_grp2(ctxt);
3278 break;
3279 case 0xc3: /* ret */
3280 c->dst.type = OP_REG;
3281 c->dst.addr.reg = &c->eip;
3282 c->dst.bytes = c->op_bytes;
3283 goto pop_instruction;
3284 case 0xc4: /* les */
3285 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
3286 break;
3287 case 0xc5: /* lds */
3288 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
3289 break;
3290 case 0xcb: /* ret far */
3291 rc = emulate_ret_far(ctxt, ops);
3292 break;
3293 case 0xcc: /* int3 */
3294 irq = 3;
3295 goto do_interrupt;
3296 case 0xcd: /* int n */
3297 irq = c->src.val;
3298 do_interrupt:
3299 rc = emulate_int(ctxt, ops, irq);
3300 break;
3301 case 0xce: /* into */
3302 if (ctxt->eflags & EFLG_OF) {
3303 irq = 4;
3304 goto do_interrupt;
3305 }
3306 break;
3307 case 0xcf: /* iret */
3308 rc = emulate_iret(ctxt, ops);
3309 break;
3310 case 0xd0 ... 0xd1: /* Grp2 */
3311 emulate_grp2(ctxt);
3312 break;
3313 case 0xd2 ... 0xd3: /* Grp2 */
3314 c->src.val = c->regs[VCPU_REGS_RCX];
3315 emulate_grp2(ctxt);
3316 break;
3317 case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
3318 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3319 if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
3320 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
3321 jmp_rel(c, c->src.val);
3322 break;
3323 case 0xe3: /* jcxz/jecxz/jrcxz */
3324 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
3325 jmp_rel(c, c->src.val);
3326 break;
3327 case 0xe4: /* inb */
3328 case 0xe5: /* in */
3329 goto do_io_in;
3330 case 0xe6: /* outb */
3331 case 0xe7: /* out */
3332 goto do_io_out;
3333 case 0xe8: /* call (near) */ {
3334 long int rel = c->src.val;
3335 c->src.val = (unsigned long) c->eip;
3336 jmp_rel(c, rel);
3337 emulate_push(ctxt, ops);
3338 break;
3339 }
3340 case 0xe9: /* jmp rel */
3341 goto jmp;
3342 case 0xea: { /* jmp far */
3343 unsigned short sel;
3344 jump_far:
3345 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
3346
3347 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3348 goto done;
3349
3350 c->eip = 0;
3351 memcpy(&c->eip, c->src.valptr, c->op_bytes);
3352 break;
3353 }
3354 case 0xeb:
3355 jmp: /* jmp rel short */
3356 jmp_rel(c, c->src.val);
3357 c->dst.type = OP_NONE; /* Disable writeback. */
3358 break;
3359 case 0xec: /* in al,dx */
3360 case 0xed: /* in (e/r)ax,dx */
3361 c->src.val = c->regs[VCPU_REGS_RDX];
3362 do_io_in:
3363 c->dst.bytes = min(c->dst.bytes, 4u);
3364 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3365 emulate_gp(ctxt, 0);
3366 rc = X86EMUL_PROPAGATE_FAULT;
3367 goto done;
3368 }
3369 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
3370 &c->dst.val))
3371 goto done; /* IO is needed */
3372 break;
3373 case 0xee: /* out dx,al */
3374 case 0xef: /* out dx,(e/r)ax */
3375 c->dst.val = c->regs[VCPU_REGS_RDX];
3376 do_io_out:
3377 c->src.bytes = min(c->src.bytes, 4u);
3378 if (!emulator_io_permited(ctxt, ops, c->dst.val,
3379 c->src.bytes)) {
3380 emulate_gp(ctxt, 0);
3381 rc = X86EMUL_PROPAGATE_FAULT;
3382 goto done;
3383 }
3384 ops->pio_out_emulated(c->src.bytes, c->dst.val,
3385 &c->src.val, 1, ctxt->vcpu);
3386 c->dst.type = OP_NONE; /* Disable writeback. */
3387 break;
3388 case 0xf4: /* hlt */
3389 ctxt->vcpu->arch.halt_request = 1;
3390 break;
3391 case 0xf5: /* cmc */
3392 /* complement carry flag from eflags reg */
3393 ctxt->eflags ^= EFLG_CF;
3394 break;
3395 case 0xf6 ... 0xf7: /* Grp3 */
3396 rc = emulate_grp3(ctxt, ops);
3397 break;
3398 case 0xf8: /* clc */
3399 ctxt->eflags &= ~EFLG_CF;
3400 break;
3401 case 0xf9: /* stc */
3402 ctxt->eflags |= EFLG_CF;
3403 break;
3404 case 0xfa: /* cli */
3405 if (emulator_bad_iopl(ctxt, ops)) {
3406 emulate_gp(ctxt, 0);
3407 rc = X86EMUL_PROPAGATE_FAULT;
3408 goto done;
3409 } else
3410 ctxt->eflags &= ~X86_EFLAGS_IF;
3411 break;
3412 case 0xfb: /* sti */
3413 if (emulator_bad_iopl(ctxt, ops)) {
3414 emulate_gp(ctxt, 0);
3415 rc = X86EMUL_PROPAGATE_FAULT;
3416 goto done;
3417 } else {
3418 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3419 ctxt->eflags |= X86_EFLAGS_IF;
3420 }
3421 break;
3422 case 0xfc: /* cld */
3423 ctxt->eflags &= ~EFLG_DF;
3424 break;
3425 case 0xfd: /* std */
3426 ctxt->eflags |= EFLG_DF;
3427 break;
3428 case 0xfe: /* Grp4 */
3429 grp45:
3430 rc = emulate_grp45(ctxt, ops);
3431 break;
3432 case 0xff: /* Grp5 */
3433 if (c->modrm_reg == 5)
3434 goto jump_far;
3435 goto grp45;
3436 default:
3437 goto cannot_emulate;
3438 }
3439
3440 if (rc != X86EMUL_CONTINUE)
3441 goto done;
3442
3443 writeback:
3444 rc = writeback(ctxt, ops);
3445 if (rc != X86EMUL_CONTINUE)
3446 goto done;
3447
3448 /*
3449 * restore dst type in case the decoding will be reused
3450 * (happens for string instruction )
3451 */
3452 c->dst.type = saved_dst_type;
3453
3454 if ((c->d & SrcMask) == SrcSI)
3455 string_addr_inc(ctxt, seg_override(ctxt, ops, c),
3456 VCPU_REGS_RSI, &c->src);
3457
3458 if ((c->d & DstMask) == DstDI)
3459 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
3460 &c->dst);
3461
3462 if (c->rep_prefix && (c->d & String)) {
3463 struct read_cache *r = &ctxt->decode.io_read;
3464 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3465
3466 if (!string_insn_completed(ctxt)) {
3467 /*
3468 * Re-enter guest when pio read ahead buffer is empty
3469 * or, if it is not used, after each 1024 iteration.
3470 */
3471 if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
3472 (r->end == 0 || r->end != r->pos)) {
3473 /*
3474 * Reset read cache. Usually happens before
3475 * decode, but since instruction is restarted
3476 * we have to do it here.
3477 */
3478 ctxt->decode.mem_read.end = 0;
3479 return EMULATION_RESTART;
3480 }
3481 goto done; /* skip rip writeback */
3482 }
3483 }
3484
3485 ctxt->eip = c->eip;
3486
3487 done:
3488 if (rc == X86EMUL_PROPAGATE_FAULT)
3489 ctxt->have_exception = true;
3490 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3491
3492 twobyte_insn:
3493 switch (c->b) {
3494 case 0x01: /* lgdt, lidt, lmsw */
3495 switch (c->modrm_reg) {
3496 u16 size;
3497 unsigned long address;
3498
3499 case 0: /* vmcall */
3500 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3501 goto cannot_emulate;
3502
3503 rc = kvm_fix_hypercall(ctxt->vcpu);
3504 if (rc != X86EMUL_CONTINUE)
3505 goto done;
3506
3507 /* Let the processor re-execute the fixed hypercall */
3508 c->eip = ctxt->eip;
3509 /* Disable writeback. */
3510 c->dst.type = OP_NONE;
3511 break;
3512 case 2: /* lgdt */
3513 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3514 &size, &address, c->op_bytes);
3515 if (rc != X86EMUL_CONTINUE)
3516 goto done;
3517 realmode_lgdt(ctxt->vcpu, size, address);
3518 /* Disable writeback. */
3519 c->dst.type = OP_NONE;
3520 break;
3521 case 3: /* lidt/vmmcall */
3522 if (c->modrm_mod == 3) {
3523 switch (c->modrm_rm) {
3524 case 1:
3525 rc = kvm_fix_hypercall(ctxt->vcpu);
3526 break;
3527 default:
3528 goto cannot_emulate;
3529 }
3530 } else {
3531 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3532 &size, &address,
3533 c->op_bytes);
3534 if (rc != X86EMUL_CONTINUE)
3535 goto done;
3536 realmode_lidt(ctxt->vcpu, size, address);
3537 }
3538 /* Disable writeback. */
3539 c->dst.type = OP_NONE;
3540 break;
3541 case 4: /* smsw */
3542 c->dst.bytes = 2;
3543 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3544 break;
3545 case 6: /* lmsw */
3546 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3547 (c->src.val & 0x0f), ctxt->vcpu);
3548 c->dst.type = OP_NONE;
3549 break;
3550 case 5: /* not defined */
3551 emulate_ud(ctxt);
3552 rc = X86EMUL_PROPAGATE_FAULT;
3553 goto done;
3554 case 7: /* invlpg*/
3555 emulate_invlpg(ctxt->vcpu,
3556 linear(ctxt, c->src.addr.mem));
3557 /* Disable writeback. */
3558 c->dst.type = OP_NONE;
3559 break;
3560 default:
3561 goto cannot_emulate;
3562 }
3563 break;
3564 case 0x05: /* syscall */
3565 rc = emulate_syscall(ctxt, ops);
3566 break;
3567 case 0x06:
3568 emulate_clts(ctxt->vcpu);
3569 break;
3570 case 0x09: /* wbinvd */
3571 kvm_emulate_wbinvd(ctxt->vcpu);
3572 break;
3573 case 0x08: /* invd */
3574 case 0x0d: /* GrpP (prefetch) */
3575 case 0x18: /* Grp16 (prefetch/nop) */
3576 break;
3577 case 0x20: /* mov cr, reg */
3578 switch (c->modrm_reg) {
3579 case 1:
3580 case 5 ... 7:
3581 case 9 ... 15:
3582 emulate_ud(ctxt);
3583 rc = X86EMUL_PROPAGATE_FAULT;
3584 goto done;
3585 }
3586 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3587 break;
3588 case 0x21: /* mov from dr to reg */
3589 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3590 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3591 emulate_ud(ctxt);
3592 rc = X86EMUL_PROPAGATE_FAULT;
3593 goto done;
3594 }
3595 ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
3596 break;
3597 case 0x22: /* mov reg, cr */
3598 if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3599 emulate_gp(ctxt, 0);
3600 rc = X86EMUL_PROPAGATE_FAULT;
3601 goto done;
3602 }
3603 c->dst.type = OP_NONE;
3604 break;
3605 case 0x23: /* mov from reg to dr */
3606 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3607 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3608 emulate_ud(ctxt);
3609 rc = X86EMUL_PROPAGATE_FAULT;
3610 goto done;
3611 }
3612
3613 if (ops->set_dr(c->modrm_reg, c->src.val &
3614 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3615 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3616 /* #UD condition is already handled by the code above */
3617 emulate_gp(ctxt, 0);
3618 rc = X86EMUL_PROPAGATE_FAULT;
3619 goto done;
3620 }
3621
3622 c->dst.type = OP_NONE; /* no writeback */
3623 break;
3624 case 0x30:
3625 /* wrmsr */
3626 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3627 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3628 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3629 emulate_gp(ctxt, 0);
3630 rc = X86EMUL_PROPAGATE_FAULT;
3631 goto done;
3632 }
3633 rc = X86EMUL_CONTINUE;
3634 break;
3635 case 0x32:
3636 /* rdmsr */
3637 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3638 emulate_gp(ctxt, 0);
3639 rc = X86EMUL_PROPAGATE_FAULT;
3640 goto done;
3641 } else {
3642 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
3643 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3644 }
3645 rc = X86EMUL_CONTINUE;
3646 break;
3647 case 0x34: /* sysenter */
3648 rc = emulate_sysenter(ctxt, ops);
3649 break;
3650 case 0x35: /* sysexit */
3651 rc = emulate_sysexit(ctxt, ops);
3652 break;
3653 case 0x40 ... 0x4f: /* cmov */
3654 c->dst.val = c->dst.orig_val = c->src.val;
3655 if (!test_cc(c->b, ctxt->eflags))
3656 c->dst.type = OP_NONE; /* no writeback */
3657 break;
3658 case 0x80 ... 0x8f: /* jnz rel, etc*/
3659 if (test_cc(c->b, ctxt->eflags))
3660 jmp_rel(c, c->src.val);
3661 break;
3662 case 0x90 ... 0x9f: /* setcc r/m8 */
3663 c->dst.val = test_cc(c->b, ctxt->eflags);
3664 break;
3665 case 0xa0: /* push fs */
3666 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3667 break;
3668 case 0xa1: /* pop fs */
3669 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3670 break;
3671 case 0xa3:
3672 bt: /* bt */
3673 c->dst.type = OP_NONE;
3674 /* only subword offset */
3675 c->src.val &= (c->dst.bytes << 3) - 1;
3676 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3677 break;
3678 case 0xa4: /* shld imm8, r, r/m */
3679 case 0xa5: /* shld cl, r, r/m */
3680 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3681 break;
3682 case 0xa8: /* push gs */
3683 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3684 break;
3685 case 0xa9: /* pop gs */
3686 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3687 break;
3688 case 0xab:
3689 bts: /* bts */
3690 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3691 break;
3692 case 0xac: /* shrd imm8, r, r/m */
3693 case 0xad: /* shrd cl, r, r/m */
3694 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
3695 break;
3696 case 0xae: /* clflush */
3697 break;
3698 case 0xb0 ... 0xb1: /* cmpxchg */
3699 /*
3700 * Save real source value, then compare EAX against
3701 * destination.
3702 */
3703 c->src.orig_val = c->src.val;
3704 c->src.val = c->regs[VCPU_REGS_RAX];
3705 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3706 if (ctxt->eflags & EFLG_ZF) {
3707 /* Success: write back to memory. */
3708 c->dst.val = c->src.orig_val;
3709 } else {
3710 /* Failure: write the value we saw to EAX. */
3711 c->dst.type = OP_REG;
3712 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
3713 }
3714 break;
3715 case 0xb2: /* lss */
3716 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
3717 break;
3718 case 0xb3:
3719 btr: /* btr */
3720 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
3721 break;
3722 case 0xb4: /* lfs */
3723 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
3724 break;
3725 case 0xb5: /* lgs */
3726 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
3727 break;
3728 case 0xb6 ... 0xb7: /* movzx */
3729 c->dst.bytes = c->op_bytes;
3730 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
3731 : (u16) c->src.val;
3732 break;
3733 case 0xba: /* Grp8 */
3734 switch (c->modrm_reg & 3) {
3735 case 0:
3736 goto bt;
3737 case 1:
3738 goto bts;
3739 case 2:
3740 goto btr;
3741 case 3:
3742 goto btc;
3743 }
3744 break;
3745 case 0xbb:
3746 btc: /* btc */
3747 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3748 break;
3749 case 0xbc: { /* bsf */
3750 u8 zf;
3751 __asm__ ("bsf %2, %0; setz %1"
3752 : "=r"(c->dst.val), "=q"(zf)
3753 : "r"(c->src.val));
3754 ctxt->eflags &= ~X86_EFLAGS_ZF;
3755 if (zf) {
3756 ctxt->eflags |= X86_EFLAGS_ZF;
3757 c->dst.type = OP_NONE; /* Disable writeback. */
3758 }
3759 break;
3760 }
3761 case 0xbd: { /* bsr */
3762 u8 zf;
3763 __asm__ ("bsr %2, %0; setz %1"
3764 : "=r"(c->dst.val), "=q"(zf)
3765 : "r"(c->src.val));
3766 ctxt->eflags &= ~X86_EFLAGS_ZF;
3767 if (zf) {
3768 ctxt->eflags |= X86_EFLAGS_ZF;
3769 c->dst.type = OP_NONE; /* Disable writeback. */
3770 }
3771 break;
3772 }
3773 case 0xbe ... 0xbf: /* movsx */
3774 c->dst.bytes = c->op_bytes;
3775 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
3776 (s16) c->src.val;
3777 break;
3778 case 0xc0 ... 0xc1: /* xadd */
3779 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3780 /* Write back the register source. */
3781 c->src.val = c->dst.orig_val;
3782 write_register_operand(&c->src);
3783 break;
3784 case 0xc3: /* movnti */
3785 c->dst.bytes = c->op_bytes;
3786 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
3787 (u64) c->src.val;
3788 break;
3789 case 0xc7: /* Grp9 (cmpxchg8b) */
3790 rc = emulate_grp9(ctxt, ops);
3791 break;
3792 default:
3793 goto cannot_emulate;
3794 }
3795
3796 if (rc != X86EMUL_CONTINUE)
3797 goto done;
3798
3799 goto writeback;
3800
3801 cannot_emulate:
3802 return -1;
3803 }
This page took 0.112515 seconds and 6 git commands to generate.