KVM: x86 emulator: rename decode_cache::eip to _eip
[deliverable/linux.git] / arch / x86 / kvm / emulate.c
... / ...
CommitLineData
1/******************************************************************************
2 * emulate.c
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
10 *
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
23#include <linux/kvm_host.h>
24#include "kvm_cache_regs.h"
25#include <linux/module.h>
26#include <asm/kvm_emulate.h>
27
28#include "x86.h"
29#include "tss.h"
30
31/*
32 * Opcode effective-address decode tables.
33 * Note that we only emulate instructions that have at least one memory
34 * operand (excluding implicit stack references). We assume that stack
35 * references and instruction fetches will never occur in special memory
36 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
37 * not be handled.
38 */
39
40/* Operand sizes: 8-bit operands or specified/overridden size. */
41#define ByteOp (1<<0) /* 8-bit operands. */
42/* Destination operand type. */
43#define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
44#define DstReg (2<<1) /* Register operand. */
45#define DstMem (3<<1) /* Memory operand. */
46#define DstAcc (4<<1) /* Destination Accumulator */
47#define DstDI (5<<1) /* Destination is in ES:(E)DI */
48#define DstMem64 (6<<1) /* 64bit memory operand */
49#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50#define DstDX (8<<1) /* Destination is in DX register */
51#define DstMask (0xf<<1)
52/* Source operand type. */
53#define SrcNone (0<<5) /* No source operand. */
54#define SrcReg (1<<5) /* Register operand. */
55#define SrcMem (2<<5) /* Memory operand. */
56#define SrcMem16 (3<<5) /* Memory operand (16-bit). */
57#define SrcMem32 (4<<5) /* Memory operand (32-bit). */
58#define SrcImm (5<<5) /* Immediate operand. */
59#define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */
60#define SrcOne (7<<5) /* Implied '1' */
61#define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */
62#define SrcImmU (9<<5) /* Immediate operand, unsigned */
63#define SrcSI (0xa<<5) /* Source is in the DS:RSI */
64#define SrcImmFAddr (0xb<<5) /* Source is immediate far address */
65#define SrcMemFAddr (0xc<<5) /* Source is far address in memory */
66#define SrcAcc (0xd<<5) /* Source Accumulator */
67#define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */
68#define SrcDX (0xf<<5) /* Source is in DX register */
69#define SrcMask (0xf<<5)
70/* Generic ModRM decode. */
71#define ModRM (1<<9)
72/* Destination is only written; never read. */
73#define Mov (1<<10)
74#define BitOp (1<<11)
75#define MemAbs (1<<12) /* Memory operand is absolute displacement */
76#define String (1<<13) /* String instruction (rep capable) */
77#define Stack (1<<14) /* Stack instruction (push/pop) */
78#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
79#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
80#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
81#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
82#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
83#define Sse (1<<18) /* SSE Vector instruction */
84/* Misc flags */
85#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
86#define VendorSpecific (1<<22) /* Vendor specific instruction */
87#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
88#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
89#define Undefined (1<<25) /* No Such Instruction */
90#define Lock (1<<26) /* lock prefix is allowed for the instruction */
91#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
92#define No64 (1<<28)
93/* Source 2 operand type */
94#define Src2None (0<<29)
95#define Src2CL (1<<29)
96#define Src2ImmByte (2<<29)
97#define Src2One (3<<29)
98#define Src2Imm (4<<29)
99#define Src2Mask (7<<29)
100
101#define X2(x...) x, x
102#define X3(x...) X2(x), x
103#define X4(x...) X2(x), X2(x)
104#define X5(x...) X4(x), x
105#define X6(x...) X4(x), X2(x)
106#define X7(x...) X4(x), X3(x)
107#define X8(x...) X4(x), X4(x)
108#define X16(x...) X8(x), X8(x)
109
110struct opcode {
111 u32 flags;
112 u8 intercept;
113 union {
114 int (*execute)(struct x86_emulate_ctxt *ctxt);
115 struct opcode *group;
116 struct group_dual *gdual;
117 struct gprefix *gprefix;
118 } u;
119 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
120};
121
122struct group_dual {
123 struct opcode mod012[8];
124 struct opcode mod3[8];
125};
126
127struct gprefix {
128 struct opcode pfx_no;
129 struct opcode pfx_66;
130 struct opcode pfx_f2;
131 struct opcode pfx_f3;
132};
133
134/* EFLAGS bit definitions. */
135#define EFLG_ID (1<<21)
136#define EFLG_VIP (1<<20)
137#define EFLG_VIF (1<<19)
138#define EFLG_AC (1<<18)
139#define EFLG_VM (1<<17)
140#define EFLG_RF (1<<16)
141#define EFLG_IOPL (3<<12)
142#define EFLG_NT (1<<14)
143#define EFLG_OF (1<<11)
144#define EFLG_DF (1<<10)
145#define EFLG_IF (1<<9)
146#define EFLG_TF (1<<8)
147#define EFLG_SF (1<<7)
148#define EFLG_ZF (1<<6)
149#define EFLG_AF (1<<4)
150#define EFLG_PF (1<<2)
151#define EFLG_CF (1<<0)
152
153#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
154#define EFLG_RESERVED_ONE_MASK 2
155
156/*
157 * Instruction emulation:
158 * Most instructions are emulated directly via a fragment of inline assembly
159 * code. This allows us to save/restore EFLAGS and thus very easily pick up
160 * any modified flags.
161 */
162
163#if defined(CONFIG_X86_64)
164#define _LO32 "k" /* force 32-bit operand */
165#define _STK "%%rsp" /* stack pointer */
166#elif defined(__i386__)
167#define _LO32 "" /* force 32-bit operand */
168#define _STK "%%esp" /* stack pointer */
169#endif
170
171/*
172 * These EFLAGS bits are restored from saved value during emulation, and
173 * any changes are written back to the saved value after emulation.
174 */
175#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
176
177/* Before executing instruction: restore necessary bits in EFLAGS. */
178#define _PRE_EFLAGS(_sav, _msk, _tmp) \
179 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
180 "movl %"_sav",%"_LO32 _tmp"; " \
181 "push %"_tmp"; " \
182 "push %"_tmp"; " \
183 "movl %"_msk",%"_LO32 _tmp"; " \
184 "andl %"_LO32 _tmp",("_STK"); " \
185 "pushf; " \
186 "notl %"_LO32 _tmp"; " \
187 "andl %"_LO32 _tmp",("_STK"); " \
188 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
189 "pop %"_tmp"; " \
190 "orl %"_LO32 _tmp",("_STK"); " \
191 "popf; " \
192 "pop %"_sav"; "
193
194/* After executing instruction: write-back necessary bits in EFLAGS. */
195#define _POST_EFLAGS(_sav, _msk, _tmp) \
196 /* _sav |= EFLAGS & _msk; */ \
197 "pushf; " \
198 "pop %"_tmp"; " \
199 "andl %"_msk",%"_LO32 _tmp"; " \
200 "orl %"_LO32 _tmp",%"_sav"; "
201
202#ifdef CONFIG_X86_64
203#define ON64(x) x
204#else
205#define ON64(x)
206#endif
207
208#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
209 do { \
210 __asm__ __volatile__ ( \
211 _PRE_EFLAGS("0", "4", "2") \
212 _op _suffix " %"_x"3,%1; " \
213 _POST_EFLAGS("0", "4", "2") \
214 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
215 "=&r" (_tmp) \
216 : _y ((_src).val), "i" (EFLAGS_MASK)); \
217 } while (0)
218
219
220/* Raw emulation: instruction has two explicit operands. */
221#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
222 do { \
223 unsigned long _tmp; \
224 \
225 switch ((_dst).bytes) { \
226 case 2: \
227 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
228 break; \
229 case 4: \
230 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
231 break; \
232 case 8: \
233 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
234 break; \
235 } \
236 } while (0)
237
238#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
239 do { \
240 unsigned long _tmp; \
241 switch ((_dst).bytes) { \
242 case 1: \
243 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
244 break; \
245 default: \
246 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
247 _wx, _wy, _lx, _ly, _qx, _qy); \
248 break; \
249 } \
250 } while (0)
251
252/* Source operand is byte-sized and may be restricted to just %cl. */
253#define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
254 __emulate_2op(_op, _src, _dst, _eflags, \
255 "b", "c", "b", "c", "b", "c", "b", "c")
256
257/* Source operand is byte, word, long or quad sized. */
258#define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
259 __emulate_2op(_op, _src, _dst, _eflags, \
260 "b", "q", "w", "r", _LO32, "r", "", "r")
261
262/* Source operand is word, long or quad sized. */
263#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
264 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
265 "w", "r", _LO32, "r", "", "r")
266
267/* Instruction has three operands and one operand is stored in ECX register */
268#define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
269 do { \
270 unsigned long _tmp; \
271 _type _clv = (_cl).val; \
272 _type _srcv = (_src).val; \
273 _type _dstv = (_dst).val; \
274 \
275 __asm__ __volatile__ ( \
276 _PRE_EFLAGS("0", "5", "2") \
277 _op _suffix " %4,%1 \n" \
278 _POST_EFLAGS("0", "5", "2") \
279 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
280 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
281 ); \
282 \
283 (_cl).val = (unsigned long) _clv; \
284 (_src).val = (unsigned long) _srcv; \
285 (_dst).val = (unsigned long) _dstv; \
286 } while (0)
287
288#define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
289 do { \
290 switch ((_dst).bytes) { \
291 case 2: \
292 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
293 "w", unsigned short); \
294 break; \
295 case 4: \
296 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
297 "l", unsigned int); \
298 break; \
299 case 8: \
300 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
301 "q", unsigned long)); \
302 break; \
303 } \
304 } while (0)
305
306#define __emulate_1op(_op, _dst, _eflags, _suffix) \
307 do { \
308 unsigned long _tmp; \
309 \
310 __asm__ __volatile__ ( \
311 _PRE_EFLAGS("0", "3", "2") \
312 _op _suffix " %1; " \
313 _POST_EFLAGS("0", "3", "2") \
314 : "=m" (_eflags), "+m" ((_dst).val), \
315 "=&r" (_tmp) \
316 : "i" (EFLAGS_MASK)); \
317 } while (0)
318
319/* Instruction has only one explicit operand (no source operand). */
320#define emulate_1op(_op, _dst, _eflags) \
321 do { \
322 switch ((_dst).bytes) { \
323 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
324 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
325 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
326 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
327 } \
328 } while (0)
329
330#define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
331 do { \
332 unsigned long _tmp; \
333 \
334 __asm__ __volatile__ ( \
335 _PRE_EFLAGS("0", "4", "1") \
336 _op _suffix " %5; " \
337 _POST_EFLAGS("0", "4", "1") \
338 : "=m" (_eflags), "=&r" (_tmp), \
339 "+a" (_rax), "+d" (_rdx) \
340 : "i" (EFLAGS_MASK), "m" ((_src).val), \
341 "a" (_rax), "d" (_rdx)); \
342 } while (0)
343
344#define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
345 do { \
346 unsigned long _tmp; \
347 \
348 __asm__ __volatile__ ( \
349 _PRE_EFLAGS("0", "5", "1") \
350 "1: \n\t" \
351 _op _suffix " %6; " \
352 "2: \n\t" \
353 _POST_EFLAGS("0", "5", "1") \
354 ".pushsection .fixup,\"ax\" \n\t" \
355 "3: movb $1, %4 \n\t" \
356 "jmp 2b \n\t" \
357 ".popsection \n\t" \
358 _ASM_EXTABLE(1b, 3b) \
359 : "=m" (_eflags), "=&r" (_tmp), \
360 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
361 : "i" (EFLAGS_MASK), "m" ((_src).val), \
362 "a" (_rax), "d" (_rdx)); \
363 } while (0)
364
365/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
366#define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
367 do { \
368 switch((_src).bytes) { \
369 case 1: \
370 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
371 _eflags, "b"); \
372 break; \
373 case 2: \
374 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
375 _eflags, "w"); \
376 break; \
377 case 4: \
378 __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
379 _eflags, "l"); \
380 break; \
381 case 8: \
382 ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
383 _eflags, "q")); \
384 break; \
385 } \
386 } while (0)
387
388#define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
389 do { \
390 switch((_src).bytes) { \
391 case 1: \
392 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
393 _eflags, "b", _ex); \
394 break; \
395 case 2: \
396 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
397 _eflags, "w", _ex); \
398 break; \
399 case 4: \
400 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
401 _eflags, "l", _ex); \
402 break; \
403 case 8: ON64( \
404 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
405 _eflags, "q", _ex)); \
406 break; \
407 } \
408 } while (0)
409
410static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
411 enum x86_intercept intercept,
412 enum x86_intercept_stage stage)
413{
414 struct x86_instruction_info info = {
415 .intercept = intercept,
416 .rep_prefix = ctxt->decode.rep_prefix,
417 .modrm_mod = ctxt->decode.modrm_mod,
418 .modrm_reg = ctxt->decode.modrm_reg,
419 .modrm_rm = ctxt->decode.modrm_rm,
420 .src_val = ctxt->decode.src.val64,
421 .src_bytes = ctxt->decode.src.bytes,
422 .dst_bytes = ctxt->decode.dst.bytes,
423 .ad_bytes = ctxt->decode.ad_bytes,
424 .next_rip = ctxt->eip,
425 };
426
427 return ctxt->ops->intercept(ctxt, &info, stage);
428}
429
430static inline unsigned long ad_mask(struct decode_cache *c)
431{
432 return (1UL << (c->ad_bytes << 3)) - 1;
433}
434
435/* Access/update address held in a register, based on addressing mode. */
436static inline unsigned long
437address_mask(struct decode_cache *c, unsigned long reg)
438{
439 if (c->ad_bytes == sizeof(unsigned long))
440 return reg;
441 else
442 return reg & ad_mask(c);
443}
444
445static inline unsigned long
446register_address(struct decode_cache *c, unsigned long reg)
447{
448 return address_mask(c, reg);
449}
450
451static inline void
452register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
453{
454 if (c->ad_bytes == sizeof(unsigned long))
455 *reg += inc;
456 else
457 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
458}
459
460static inline void jmp_rel(struct decode_cache *c, int rel)
461{
462 register_address_increment(c, &c->_eip, rel);
463}
464
465static u32 desc_limit_scaled(struct desc_struct *desc)
466{
467 u32 limit = get_desc_limit(desc);
468
469 return desc->g ? (limit << 12) | 0xfff : limit;
470}
471
472static void set_seg_override(struct decode_cache *c, int seg)
473{
474 c->has_seg_override = true;
475 c->seg_override = seg;
476}
477
478static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
479{
480 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
481 return 0;
482
483 return ctxt->ops->get_cached_segment_base(ctxt, seg);
484}
485
486static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
487 struct decode_cache *c)
488{
489 if (!c->has_seg_override)
490 return 0;
491
492 return c->seg_override;
493}
494
495static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
496 u32 error, bool valid)
497{
498 ctxt->exception.vector = vec;
499 ctxt->exception.error_code = error;
500 ctxt->exception.error_code_valid = valid;
501 return X86EMUL_PROPAGATE_FAULT;
502}
503
504static int emulate_db(struct x86_emulate_ctxt *ctxt)
505{
506 return emulate_exception(ctxt, DB_VECTOR, 0, false);
507}
508
509static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
510{
511 return emulate_exception(ctxt, GP_VECTOR, err, true);
512}
513
514static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
515{
516 return emulate_exception(ctxt, SS_VECTOR, err, true);
517}
518
519static int emulate_ud(struct x86_emulate_ctxt *ctxt)
520{
521 return emulate_exception(ctxt, UD_VECTOR, 0, false);
522}
523
524static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
525{
526 return emulate_exception(ctxt, TS_VECTOR, err, true);
527}
528
529static int emulate_de(struct x86_emulate_ctxt *ctxt)
530{
531 return emulate_exception(ctxt, DE_VECTOR, 0, false);
532}
533
534static int emulate_nm(struct x86_emulate_ctxt *ctxt)
535{
536 return emulate_exception(ctxt, NM_VECTOR, 0, false);
537}
538
539static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
540{
541 u16 selector;
542 struct desc_struct desc;
543
544 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
545 return selector;
546}
547
548static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
549 unsigned seg)
550{
551 u16 dummy;
552 u32 base3;
553 struct desc_struct desc;
554
555 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
556 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
557}
558
559static int __linearize(struct x86_emulate_ctxt *ctxt,
560 struct segmented_address addr,
561 unsigned size, bool write, bool fetch,
562 ulong *linear)
563{
564 struct decode_cache *c = &ctxt->decode;
565 struct desc_struct desc;
566 bool usable;
567 ulong la;
568 u32 lim;
569 u16 sel;
570 unsigned cpl, rpl;
571
572 la = seg_base(ctxt, addr.seg) + addr.ea;
573 switch (ctxt->mode) {
574 case X86EMUL_MODE_REAL:
575 break;
576 case X86EMUL_MODE_PROT64:
577 if (((signed long)la << 16) >> 16 != la)
578 return emulate_gp(ctxt, 0);
579 break;
580 default:
581 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
582 addr.seg);
583 if (!usable)
584 goto bad;
585 /* code segment or read-only data segment */
586 if (((desc.type & 8) || !(desc.type & 2)) && write)
587 goto bad;
588 /* unreadable code segment */
589 if (!fetch && (desc.type & 8) && !(desc.type & 2))
590 goto bad;
591 lim = desc_limit_scaled(&desc);
592 if ((desc.type & 8) || !(desc.type & 4)) {
593 /* expand-up segment */
594 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
595 goto bad;
596 } else {
597 /* exapand-down segment */
598 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
599 goto bad;
600 lim = desc.d ? 0xffffffff : 0xffff;
601 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
602 goto bad;
603 }
604 cpl = ctxt->ops->cpl(ctxt);
605 rpl = sel & 3;
606 cpl = max(cpl, rpl);
607 if (!(desc.type & 8)) {
608 /* data segment */
609 if (cpl > desc.dpl)
610 goto bad;
611 } else if ((desc.type & 8) && !(desc.type & 4)) {
612 /* nonconforming code segment */
613 if (cpl != desc.dpl)
614 goto bad;
615 } else if ((desc.type & 8) && (desc.type & 4)) {
616 /* conforming code segment */
617 if (cpl < desc.dpl)
618 goto bad;
619 }
620 break;
621 }
622 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : c->ad_bytes != 8)
623 la &= (u32)-1;
624 *linear = la;
625 return X86EMUL_CONTINUE;
626bad:
627 if (addr.seg == VCPU_SREG_SS)
628 return emulate_ss(ctxt, addr.seg);
629 else
630 return emulate_gp(ctxt, addr.seg);
631}
632
633static int linearize(struct x86_emulate_ctxt *ctxt,
634 struct segmented_address addr,
635 unsigned size, bool write,
636 ulong *linear)
637{
638 return __linearize(ctxt, addr, size, write, false, linear);
639}
640
641
642static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
643 struct segmented_address addr,
644 void *data,
645 unsigned size)
646{
647 int rc;
648 ulong linear;
649
650 rc = linearize(ctxt, addr, size, false, &linear);
651 if (rc != X86EMUL_CONTINUE)
652 return rc;
653 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
654}
655
656static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt,
657 unsigned long eip, u8 *dest)
658{
659 struct fetch_cache *fc = &ctxt->decode.fetch;
660 int rc;
661 int size, cur_size;
662
663 if (eip == fc->end) {
664 unsigned long linear;
665 struct segmented_address addr = { .seg=VCPU_SREG_CS, .ea=eip};
666 cur_size = fc->end - fc->start;
667 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
668 rc = __linearize(ctxt, addr, size, false, true, &linear);
669 if (rc != X86EMUL_CONTINUE)
670 return rc;
671 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
672 size, &ctxt->exception);
673 if (rc != X86EMUL_CONTINUE)
674 return rc;
675 fc->end += size;
676 }
677 *dest = fc->data[eip - fc->start];
678 return X86EMUL_CONTINUE;
679}
680
681static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
682 unsigned long eip, void *dest, unsigned size)
683{
684 int rc;
685
686 /* x86 instructions are limited to 15 bytes. */
687 if (eip + size - ctxt->eip > 15)
688 return X86EMUL_UNHANDLEABLE;
689 while (size--) {
690 rc = do_insn_fetch_byte(ctxt, eip++, dest++);
691 if (rc != X86EMUL_CONTINUE)
692 return rc;
693 }
694 return X86EMUL_CONTINUE;
695}
696
697/* Fetch next part of the instruction being emulated. */
698#define insn_fetch(_type, _size, _eip) \
699({ unsigned long _x; \
700 rc = do_insn_fetch(ctxt, (_eip), &_x, (_size)); \
701 if (rc != X86EMUL_CONTINUE) \
702 goto done; \
703 (_eip) += (_size); \
704 (_type)_x; \
705})
706
707#define insn_fetch_arr(_arr, _size, _eip) \
708({ rc = do_insn_fetch(ctxt, (_eip), _arr, (_size)); \
709 if (rc != X86EMUL_CONTINUE) \
710 goto done; \
711 (_eip) += (_size); \
712})
713
714/*
715 * Given the 'reg' portion of a ModRM byte, and a register block, return a
716 * pointer into the block that addresses the relevant register.
717 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
718 */
719static void *decode_register(u8 modrm_reg, unsigned long *regs,
720 int highbyte_regs)
721{
722 void *p;
723
724 p = &regs[modrm_reg];
725 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
726 p = (unsigned char *)&regs[modrm_reg & 3] + 1;
727 return p;
728}
729
730static int read_descriptor(struct x86_emulate_ctxt *ctxt,
731 struct segmented_address addr,
732 u16 *size, unsigned long *address, int op_bytes)
733{
734 int rc;
735
736 if (op_bytes == 2)
737 op_bytes = 3;
738 *address = 0;
739 rc = segmented_read_std(ctxt, addr, size, 2);
740 if (rc != X86EMUL_CONTINUE)
741 return rc;
742 addr.ea += 2;
743 rc = segmented_read_std(ctxt, addr, address, op_bytes);
744 return rc;
745}
746
747static int test_cc(unsigned int condition, unsigned int flags)
748{
749 int rc = 0;
750
751 switch ((condition & 15) >> 1) {
752 case 0: /* o */
753 rc |= (flags & EFLG_OF);
754 break;
755 case 1: /* b/c/nae */
756 rc |= (flags & EFLG_CF);
757 break;
758 case 2: /* z/e */
759 rc |= (flags & EFLG_ZF);
760 break;
761 case 3: /* be/na */
762 rc |= (flags & (EFLG_CF|EFLG_ZF));
763 break;
764 case 4: /* s */
765 rc |= (flags & EFLG_SF);
766 break;
767 case 5: /* p/pe */
768 rc |= (flags & EFLG_PF);
769 break;
770 case 7: /* le/ng */
771 rc |= (flags & EFLG_ZF);
772 /* fall through */
773 case 6: /* l/nge */
774 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
775 break;
776 }
777
778 /* Odd condition identifiers (lsb == 1) have inverted sense. */
779 return (!!rc ^ (condition & 1));
780}
781
782static void fetch_register_operand(struct operand *op)
783{
784 switch (op->bytes) {
785 case 1:
786 op->val = *(u8 *)op->addr.reg;
787 break;
788 case 2:
789 op->val = *(u16 *)op->addr.reg;
790 break;
791 case 4:
792 op->val = *(u32 *)op->addr.reg;
793 break;
794 case 8:
795 op->val = *(u64 *)op->addr.reg;
796 break;
797 }
798}
799
800static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
801{
802 ctxt->ops->get_fpu(ctxt);
803 switch (reg) {
804 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
805 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
806 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
807 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
808 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
809 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
810 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
811 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
812#ifdef CONFIG_X86_64
813 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
814 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
815 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
816 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
817 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
818 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
819 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
820 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
821#endif
822 default: BUG();
823 }
824 ctxt->ops->put_fpu(ctxt);
825}
826
827static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
828 int reg)
829{
830 ctxt->ops->get_fpu(ctxt);
831 switch (reg) {
832 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
833 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
834 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
835 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
836 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
837 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
838 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
839 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
840#ifdef CONFIG_X86_64
841 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
842 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
843 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
844 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
845 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
846 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
847 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
848 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
849#endif
850 default: BUG();
851 }
852 ctxt->ops->put_fpu(ctxt);
853}
854
855static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
856 struct operand *op,
857 struct decode_cache *c,
858 int inhibit_bytereg)
859{
860 unsigned reg = c->modrm_reg;
861 int highbyte_regs = c->rex_prefix == 0;
862
863 if (!(c->d & ModRM))
864 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
865
866 if (c->d & Sse) {
867 op->type = OP_XMM;
868 op->bytes = 16;
869 op->addr.xmm = reg;
870 read_sse_reg(ctxt, &op->vec_val, reg);
871 return;
872 }
873
874 op->type = OP_REG;
875 if ((c->d & ByteOp) && !inhibit_bytereg) {
876 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
877 op->bytes = 1;
878 } else {
879 op->addr.reg = decode_register(reg, c->regs, 0);
880 op->bytes = c->op_bytes;
881 }
882 fetch_register_operand(op);
883 op->orig_val = op->val;
884}
885
886static int decode_modrm(struct x86_emulate_ctxt *ctxt,
887 struct operand *op)
888{
889 struct decode_cache *c = &ctxt->decode;
890 u8 sib;
891 int index_reg = 0, base_reg = 0, scale;
892 int rc = X86EMUL_CONTINUE;
893 ulong modrm_ea = 0;
894
895 if (c->rex_prefix) {
896 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
897 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
898 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
899 }
900
901 c->modrm = insn_fetch(u8, 1, c->_eip);
902 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
903 c->modrm_reg |= (c->modrm & 0x38) >> 3;
904 c->modrm_rm |= (c->modrm & 0x07);
905 c->modrm_seg = VCPU_SREG_DS;
906
907 if (c->modrm_mod == 3) {
908 op->type = OP_REG;
909 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
910 op->addr.reg = decode_register(c->modrm_rm,
911 c->regs, c->d & ByteOp);
912 if (c->d & Sse) {
913 op->type = OP_XMM;
914 op->bytes = 16;
915 op->addr.xmm = c->modrm_rm;
916 read_sse_reg(ctxt, &op->vec_val, c->modrm_rm);
917 return rc;
918 }
919 fetch_register_operand(op);
920 return rc;
921 }
922
923 op->type = OP_MEM;
924
925 if (c->ad_bytes == 2) {
926 unsigned bx = c->regs[VCPU_REGS_RBX];
927 unsigned bp = c->regs[VCPU_REGS_RBP];
928 unsigned si = c->regs[VCPU_REGS_RSI];
929 unsigned di = c->regs[VCPU_REGS_RDI];
930
931 /* 16-bit ModR/M decode. */
932 switch (c->modrm_mod) {
933 case 0:
934 if (c->modrm_rm == 6)
935 modrm_ea += insn_fetch(u16, 2, c->_eip);
936 break;
937 case 1:
938 modrm_ea += insn_fetch(s8, 1, c->_eip);
939 break;
940 case 2:
941 modrm_ea += insn_fetch(u16, 2, c->_eip);
942 break;
943 }
944 switch (c->modrm_rm) {
945 case 0:
946 modrm_ea += bx + si;
947 break;
948 case 1:
949 modrm_ea += bx + di;
950 break;
951 case 2:
952 modrm_ea += bp + si;
953 break;
954 case 3:
955 modrm_ea += bp + di;
956 break;
957 case 4:
958 modrm_ea += si;
959 break;
960 case 5:
961 modrm_ea += di;
962 break;
963 case 6:
964 if (c->modrm_mod != 0)
965 modrm_ea += bp;
966 break;
967 case 7:
968 modrm_ea += bx;
969 break;
970 }
971 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
972 (c->modrm_rm == 6 && c->modrm_mod != 0))
973 c->modrm_seg = VCPU_SREG_SS;
974 modrm_ea = (u16)modrm_ea;
975 } else {
976 /* 32/64-bit ModR/M decode. */
977 if ((c->modrm_rm & 7) == 4) {
978 sib = insn_fetch(u8, 1, c->_eip);
979 index_reg |= (sib >> 3) & 7;
980 base_reg |= sib & 7;
981 scale = sib >> 6;
982
983 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
984 modrm_ea += insn_fetch(s32, 4, c->_eip);
985 else
986 modrm_ea += c->regs[base_reg];
987 if (index_reg != 4)
988 modrm_ea += c->regs[index_reg] << scale;
989 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
990 if (ctxt->mode == X86EMUL_MODE_PROT64)
991 c->rip_relative = 1;
992 } else
993 modrm_ea += c->regs[c->modrm_rm];
994 switch (c->modrm_mod) {
995 case 0:
996 if (c->modrm_rm == 5)
997 modrm_ea += insn_fetch(s32, 4, c->_eip);
998 break;
999 case 1:
1000 modrm_ea += insn_fetch(s8, 1, c->_eip);
1001 break;
1002 case 2:
1003 modrm_ea += insn_fetch(s32, 4, c->_eip);
1004 break;
1005 }
1006 }
1007 op->addr.mem.ea = modrm_ea;
1008done:
1009 return rc;
1010}
1011
1012static int decode_abs(struct x86_emulate_ctxt *ctxt,
1013 struct operand *op)
1014{
1015 struct decode_cache *c = &ctxt->decode;
1016 int rc = X86EMUL_CONTINUE;
1017
1018 op->type = OP_MEM;
1019 switch (c->ad_bytes) {
1020 case 2:
1021 op->addr.mem.ea = insn_fetch(u16, 2, c->_eip);
1022 break;
1023 case 4:
1024 op->addr.mem.ea = insn_fetch(u32, 4, c->_eip);
1025 break;
1026 case 8:
1027 op->addr.mem.ea = insn_fetch(u64, 8, c->_eip);
1028 break;
1029 }
1030done:
1031 return rc;
1032}
1033
1034static void fetch_bit_operand(struct decode_cache *c)
1035{
1036 long sv = 0, mask;
1037
1038 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
1039 mask = ~(c->dst.bytes * 8 - 1);
1040
1041 if (c->src.bytes == 2)
1042 sv = (s16)c->src.val & (s16)mask;
1043 else if (c->src.bytes == 4)
1044 sv = (s32)c->src.val & (s32)mask;
1045
1046 c->dst.addr.mem.ea += (sv >> 3);
1047 }
1048
1049 /* only subword offset */
1050 c->src.val &= (c->dst.bytes << 3) - 1;
1051}
1052
1053static int read_emulated(struct x86_emulate_ctxt *ctxt,
1054 unsigned long addr, void *dest, unsigned size)
1055{
1056 int rc;
1057 struct read_cache *mc = &ctxt->decode.mem_read;
1058
1059 while (size) {
1060 int n = min(size, 8u);
1061 size -= n;
1062 if (mc->pos < mc->end)
1063 goto read_cached;
1064
1065 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1066 &ctxt->exception);
1067 if (rc != X86EMUL_CONTINUE)
1068 return rc;
1069 mc->end += n;
1070
1071 read_cached:
1072 memcpy(dest, mc->data + mc->pos, n);
1073 mc->pos += n;
1074 dest += n;
1075 addr += n;
1076 }
1077 return X86EMUL_CONTINUE;
1078}
1079
1080static int segmented_read(struct x86_emulate_ctxt *ctxt,
1081 struct segmented_address addr,
1082 void *data,
1083 unsigned size)
1084{
1085 int rc;
1086 ulong linear;
1087
1088 rc = linearize(ctxt, addr, size, false, &linear);
1089 if (rc != X86EMUL_CONTINUE)
1090 return rc;
1091 return read_emulated(ctxt, linear, data, size);
1092}
1093
1094static int segmented_write(struct x86_emulate_ctxt *ctxt,
1095 struct segmented_address addr,
1096 const void *data,
1097 unsigned size)
1098{
1099 int rc;
1100 ulong linear;
1101
1102 rc = linearize(ctxt, addr, size, true, &linear);
1103 if (rc != X86EMUL_CONTINUE)
1104 return rc;
1105 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1106 &ctxt->exception);
1107}
1108
1109static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1110 struct segmented_address addr,
1111 const void *orig_data, const void *data,
1112 unsigned size)
1113{
1114 int rc;
1115 ulong linear;
1116
1117 rc = linearize(ctxt, addr, size, true, &linear);
1118 if (rc != X86EMUL_CONTINUE)
1119 return rc;
1120 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1121 size, &ctxt->exception);
1122}
1123
1124static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1125 unsigned int size, unsigned short port,
1126 void *dest)
1127{
1128 struct read_cache *rc = &ctxt->decode.io_read;
1129
1130 if (rc->pos == rc->end) { /* refill pio read ahead */
1131 struct decode_cache *c = &ctxt->decode;
1132 unsigned int in_page, n;
1133 unsigned int count = c->rep_prefix ?
1134 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
1135 in_page = (ctxt->eflags & EFLG_DF) ?
1136 offset_in_page(c->regs[VCPU_REGS_RDI]) :
1137 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
1138 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1139 count);
1140 if (n == 0)
1141 n = 1;
1142 rc->pos = rc->end = 0;
1143 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1144 return 0;
1145 rc->end = n * size;
1146 }
1147
1148 memcpy(dest, rc->data + rc->pos, size);
1149 rc->pos += size;
1150 return 1;
1151}
1152
1153static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1154 u16 selector, struct desc_ptr *dt)
1155{
1156 struct x86_emulate_ops *ops = ctxt->ops;
1157
1158 if (selector & 1 << 2) {
1159 struct desc_struct desc;
1160 u16 sel;
1161
1162 memset (dt, 0, sizeof *dt);
1163 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1164 return;
1165
1166 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1167 dt->address = get_desc_base(&desc);
1168 } else
1169 ops->get_gdt(ctxt, dt);
1170}
1171
1172/* allowed just for 8 bytes segments */
1173static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1174 u16 selector, struct desc_struct *desc)
1175{
1176 struct desc_ptr dt;
1177 u16 index = selector >> 3;
1178 ulong addr;
1179
1180 get_descriptor_table_ptr(ctxt, selector, &dt);
1181
1182 if (dt.size < index * 8 + 7)
1183 return emulate_gp(ctxt, selector & 0xfffc);
1184
1185 addr = dt.address + index * 8;
1186 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1187 &ctxt->exception);
1188}
1189
1190/* allowed just for 8 bytes segments */
1191static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1192 u16 selector, struct desc_struct *desc)
1193{
1194 struct desc_ptr dt;
1195 u16 index = selector >> 3;
1196 ulong addr;
1197
1198 get_descriptor_table_ptr(ctxt, selector, &dt);
1199
1200 if (dt.size < index * 8 + 7)
1201 return emulate_gp(ctxt, selector & 0xfffc);
1202
1203 addr = dt.address + index * 8;
1204 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1205 &ctxt->exception);
1206}
1207
1208/* Does not support long mode */
1209static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1210 u16 selector, int seg)
1211{
1212 struct desc_struct seg_desc;
1213 u8 dpl, rpl, cpl;
1214 unsigned err_vec = GP_VECTOR;
1215 u32 err_code = 0;
1216 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1217 int ret;
1218
1219 memset(&seg_desc, 0, sizeof seg_desc);
1220
1221 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1222 || ctxt->mode == X86EMUL_MODE_REAL) {
1223 /* set real mode segment descriptor */
1224 set_desc_base(&seg_desc, selector << 4);
1225 set_desc_limit(&seg_desc, 0xffff);
1226 seg_desc.type = 3;
1227 seg_desc.p = 1;
1228 seg_desc.s = 1;
1229 goto load;
1230 }
1231
1232 /* NULL selector is not valid for TR, CS and SS */
1233 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1234 && null_selector)
1235 goto exception;
1236
1237 /* TR should be in GDT only */
1238 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1239 goto exception;
1240
1241 if (null_selector) /* for NULL selector skip all following checks */
1242 goto load;
1243
1244 ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1245 if (ret != X86EMUL_CONTINUE)
1246 return ret;
1247
1248 err_code = selector & 0xfffc;
1249 err_vec = GP_VECTOR;
1250
1251 /* can't load system descriptor into segment selecor */
1252 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1253 goto exception;
1254
1255 if (!seg_desc.p) {
1256 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1257 goto exception;
1258 }
1259
1260 rpl = selector & 3;
1261 dpl = seg_desc.dpl;
1262 cpl = ctxt->ops->cpl(ctxt);
1263
1264 switch (seg) {
1265 case VCPU_SREG_SS:
1266 /*
1267 * segment is not a writable data segment or segment
1268 * selector's RPL != CPL or segment selector's RPL != CPL
1269 */
1270 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1271 goto exception;
1272 break;
1273 case VCPU_SREG_CS:
1274 if (!(seg_desc.type & 8))
1275 goto exception;
1276
1277 if (seg_desc.type & 4) {
1278 /* conforming */
1279 if (dpl > cpl)
1280 goto exception;
1281 } else {
1282 /* nonconforming */
1283 if (rpl > cpl || dpl != cpl)
1284 goto exception;
1285 }
1286 /* CS(RPL) <- CPL */
1287 selector = (selector & 0xfffc) | cpl;
1288 break;
1289 case VCPU_SREG_TR:
1290 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1291 goto exception;
1292 break;
1293 case VCPU_SREG_LDTR:
1294 if (seg_desc.s || seg_desc.type != 2)
1295 goto exception;
1296 break;
1297 default: /* DS, ES, FS, or GS */
1298 /*
1299 * segment is not a data or readable code segment or
1300 * ((segment is a data or nonconforming code segment)
1301 * and (both RPL and CPL > DPL))
1302 */
1303 if ((seg_desc.type & 0xa) == 0x8 ||
1304 (((seg_desc.type & 0xc) != 0xc) &&
1305 (rpl > dpl && cpl > dpl)))
1306 goto exception;
1307 break;
1308 }
1309
1310 if (seg_desc.s) {
1311 /* mark segment as accessed */
1312 seg_desc.type |= 1;
1313 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1314 if (ret != X86EMUL_CONTINUE)
1315 return ret;
1316 }
1317load:
1318 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1319 return X86EMUL_CONTINUE;
1320exception:
1321 emulate_exception(ctxt, err_vec, err_code, true);
1322 return X86EMUL_PROPAGATE_FAULT;
1323}
1324
1325static void write_register_operand(struct operand *op)
1326{
1327 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1328 switch (op->bytes) {
1329 case 1:
1330 *(u8 *)op->addr.reg = (u8)op->val;
1331 break;
1332 case 2:
1333 *(u16 *)op->addr.reg = (u16)op->val;
1334 break;
1335 case 4:
1336 *op->addr.reg = (u32)op->val;
1337 break; /* 64b: zero-extend */
1338 case 8:
1339 *op->addr.reg = op->val;
1340 break;
1341 }
1342}
1343
1344static int writeback(struct x86_emulate_ctxt *ctxt)
1345{
1346 int rc;
1347 struct decode_cache *c = &ctxt->decode;
1348
1349 switch (c->dst.type) {
1350 case OP_REG:
1351 write_register_operand(&c->dst);
1352 break;
1353 case OP_MEM:
1354 if (c->lock_prefix)
1355 rc = segmented_cmpxchg(ctxt,
1356 c->dst.addr.mem,
1357 &c->dst.orig_val,
1358 &c->dst.val,
1359 c->dst.bytes);
1360 else
1361 rc = segmented_write(ctxt,
1362 c->dst.addr.mem,
1363 &c->dst.val,
1364 c->dst.bytes);
1365 if (rc != X86EMUL_CONTINUE)
1366 return rc;
1367 break;
1368 case OP_XMM:
1369 write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm);
1370 break;
1371 case OP_NONE:
1372 /* no writeback */
1373 break;
1374 default:
1375 break;
1376 }
1377 return X86EMUL_CONTINUE;
1378}
1379
1380static int em_push(struct x86_emulate_ctxt *ctxt)
1381{
1382 struct decode_cache *c = &ctxt->decode;
1383 struct segmented_address addr;
1384
1385 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1386 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1387 addr.seg = VCPU_SREG_SS;
1388
1389 /* Disable writeback. */
1390 c->dst.type = OP_NONE;
1391 return segmented_write(ctxt, addr, &c->src.val, c->op_bytes);
1392}
1393
1394static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1395 void *dest, int len)
1396{
1397 struct decode_cache *c = &ctxt->decode;
1398 int rc;
1399 struct segmented_address addr;
1400
1401 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1402 addr.seg = VCPU_SREG_SS;
1403 rc = segmented_read(ctxt, addr, dest, len);
1404 if (rc != X86EMUL_CONTINUE)
1405 return rc;
1406
1407 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1408 return rc;
1409}
1410
1411static int em_pop(struct x86_emulate_ctxt *ctxt)
1412{
1413 struct decode_cache *c = &ctxt->decode;
1414
1415 return emulate_pop(ctxt, &c->dst.val, c->op_bytes);
1416}
1417
1418static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1419 void *dest, int len)
1420{
1421 int rc;
1422 unsigned long val, change_mask;
1423 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1424 int cpl = ctxt->ops->cpl(ctxt);
1425
1426 rc = emulate_pop(ctxt, &val, len);
1427 if (rc != X86EMUL_CONTINUE)
1428 return rc;
1429
1430 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1431 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1432
1433 switch(ctxt->mode) {
1434 case X86EMUL_MODE_PROT64:
1435 case X86EMUL_MODE_PROT32:
1436 case X86EMUL_MODE_PROT16:
1437 if (cpl == 0)
1438 change_mask |= EFLG_IOPL;
1439 if (cpl <= iopl)
1440 change_mask |= EFLG_IF;
1441 break;
1442 case X86EMUL_MODE_VM86:
1443 if (iopl < 3)
1444 return emulate_gp(ctxt, 0);
1445 change_mask |= EFLG_IF;
1446 break;
1447 default: /* real mode */
1448 change_mask |= (EFLG_IOPL | EFLG_IF);
1449 break;
1450 }
1451
1452 *(unsigned long *)dest =
1453 (ctxt->eflags & ~change_mask) | (val & change_mask);
1454
1455 return rc;
1456}
1457
1458static int em_popf(struct x86_emulate_ctxt *ctxt)
1459{
1460 struct decode_cache *c = &ctxt->decode;
1461
1462 c->dst.type = OP_REG;
1463 c->dst.addr.reg = &ctxt->eflags;
1464 c->dst.bytes = c->op_bytes;
1465 return emulate_popf(ctxt, &c->dst.val, c->op_bytes);
1466}
1467
1468static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1469{
1470 struct decode_cache *c = &ctxt->decode;
1471
1472 c->src.val = get_segment_selector(ctxt, seg);
1473
1474 return em_push(ctxt);
1475}
1476
1477static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1478{
1479 struct decode_cache *c = &ctxt->decode;
1480 unsigned long selector;
1481 int rc;
1482
1483 rc = emulate_pop(ctxt, &selector, c->op_bytes);
1484 if (rc != X86EMUL_CONTINUE)
1485 return rc;
1486
1487 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1488 return rc;
1489}
1490
1491static int em_pusha(struct x86_emulate_ctxt *ctxt)
1492{
1493 struct decode_cache *c = &ctxt->decode;
1494 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1495 int rc = X86EMUL_CONTINUE;
1496 int reg = VCPU_REGS_RAX;
1497
1498 while (reg <= VCPU_REGS_RDI) {
1499 (reg == VCPU_REGS_RSP) ?
1500 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1501
1502 rc = em_push(ctxt);
1503 if (rc != X86EMUL_CONTINUE)
1504 return rc;
1505
1506 ++reg;
1507 }
1508
1509 return rc;
1510}
1511
1512static int em_pushf(struct x86_emulate_ctxt *ctxt)
1513{
1514 struct decode_cache *c = &ctxt->decode;
1515
1516 c->src.val = (unsigned long)ctxt->eflags;
1517 return em_push(ctxt);
1518}
1519
1520static int em_popa(struct x86_emulate_ctxt *ctxt)
1521{
1522 struct decode_cache *c = &ctxt->decode;
1523 int rc = X86EMUL_CONTINUE;
1524 int reg = VCPU_REGS_RDI;
1525
1526 while (reg >= VCPU_REGS_RAX) {
1527 if (reg == VCPU_REGS_RSP) {
1528 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1529 c->op_bytes);
1530 --reg;
1531 }
1532
1533 rc = emulate_pop(ctxt, &c->regs[reg], c->op_bytes);
1534 if (rc != X86EMUL_CONTINUE)
1535 break;
1536 --reg;
1537 }
1538 return rc;
1539}
1540
1541int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1542{
1543 struct decode_cache *c = &ctxt->decode;
1544 struct x86_emulate_ops *ops = ctxt->ops;
1545 int rc;
1546 struct desc_ptr dt;
1547 gva_t cs_addr;
1548 gva_t eip_addr;
1549 u16 cs, eip;
1550
1551 /* TODO: Add limit checks */
1552 c->src.val = ctxt->eflags;
1553 rc = em_push(ctxt);
1554 if (rc != X86EMUL_CONTINUE)
1555 return rc;
1556
1557 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1558
1559 c->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1560 rc = em_push(ctxt);
1561 if (rc != X86EMUL_CONTINUE)
1562 return rc;
1563
1564 c->src.val = c->_eip;
1565 rc = em_push(ctxt);
1566 if (rc != X86EMUL_CONTINUE)
1567 return rc;
1568
1569 ops->get_idt(ctxt, &dt);
1570
1571 eip_addr = dt.address + (irq << 2);
1572 cs_addr = dt.address + (irq << 2) + 2;
1573
1574 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1575 if (rc != X86EMUL_CONTINUE)
1576 return rc;
1577
1578 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1579 if (rc != X86EMUL_CONTINUE)
1580 return rc;
1581
1582 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1583 if (rc != X86EMUL_CONTINUE)
1584 return rc;
1585
1586 c->_eip = eip;
1587
1588 return rc;
1589}
1590
1591static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1592{
1593 switch(ctxt->mode) {
1594 case X86EMUL_MODE_REAL:
1595 return emulate_int_real(ctxt, irq);
1596 case X86EMUL_MODE_VM86:
1597 case X86EMUL_MODE_PROT16:
1598 case X86EMUL_MODE_PROT32:
1599 case X86EMUL_MODE_PROT64:
1600 default:
1601 /* Protected mode interrupts unimplemented yet */
1602 return X86EMUL_UNHANDLEABLE;
1603 }
1604}
1605
1606static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1607{
1608 struct decode_cache *c = &ctxt->decode;
1609 int rc = X86EMUL_CONTINUE;
1610 unsigned long temp_eip = 0;
1611 unsigned long temp_eflags = 0;
1612 unsigned long cs = 0;
1613 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1614 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1615 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1616 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1617
1618 /* TODO: Add stack limit check */
1619
1620 rc = emulate_pop(ctxt, &temp_eip, c->op_bytes);
1621
1622 if (rc != X86EMUL_CONTINUE)
1623 return rc;
1624
1625 if (temp_eip & ~0xffff)
1626 return emulate_gp(ctxt, 0);
1627
1628 rc = emulate_pop(ctxt, &cs, c->op_bytes);
1629
1630 if (rc != X86EMUL_CONTINUE)
1631 return rc;
1632
1633 rc = emulate_pop(ctxt, &temp_eflags, c->op_bytes);
1634
1635 if (rc != X86EMUL_CONTINUE)
1636 return rc;
1637
1638 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1639
1640 if (rc != X86EMUL_CONTINUE)
1641 return rc;
1642
1643 c->_eip = temp_eip;
1644
1645
1646 if (c->op_bytes == 4)
1647 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1648 else if (c->op_bytes == 2) {
1649 ctxt->eflags &= ~0xffff;
1650 ctxt->eflags |= temp_eflags;
1651 }
1652
1653 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1654 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1655
1656 return rc;
1657}
1658
1659static int em_iret(struct x86_emulate_ctxt *ctxt)
1660{
1661 switch(ctxt->mode) {
1662 case X86EMUL_MODE_REAL:
1663 return emulate_iret_real(ctxt);
1664 case X86EMUL_MODE_VM86:
1665 case X86EMUL_MODE_PROT16:
1666 case X86EMUL_MODE_PROT32:
1667 case X86EMUL_MODE_PROT64:
1668 default:
1669 /* iret from protected mode unimplemented yet */
1670 return X86EMUL_UNHANDLEABLE;
1671 }
1672}
1673
1674static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1675{
1676 struct decode_cache *c = &ctxt->decode;
1677 int rc;
1678 unsigned short sel;
1679
1680 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1681
1682 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1683 if (rc != X86EMUL_CONTINUE)
1684 return rc;
1685
1686 c->_eip = 0;
1687 memcpy(&c->_eip, c->src.valptr, c->op_bytes);
1688 return X86EMUL_CONTINUE;
1689}
1690
1691static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1692{
1693 struct decode_cache *c = &ctxt->decode;
1694
1695 return emulate_pop(ctxt, &c->dst.val, c->dst.bytes);
1696}
1697
1698static int em_grp2(struct x86_emulate_ctxt *ctxt)
1699{
1700 struct decode_cache *c = &ctxt->decode;
1701 switch (c->modrm_reg) {
1702 case 0: /* rol */
1703 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1704 break;
1705 case 1: /* ror */
1706 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1707 break;
1708 case 2: /* rcl */
1709 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1710 break;
1711 case 3: /* rcr */
1712 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1713 break;
1714 case 4: /* sal/shl */
1715 case 6: /* sal/shl */
1716 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1717 break;
1718 case 5: /* shr */
1719 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1720 break;
1721 case 7: /* sar */
1722 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1723 break;
1724 }
1725 return X86EMUL_CONTINUE;
1726}
1727
1728static int em_grp3(struct x86_emulate_ctxt *ctxt)
1729{
1730 struct decode_cache *c = &ctxt->decode;
1731 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1732 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1733 u8 de = 0;
1734
1735 switch (c->modrm_reg) {
1736 case 0 ... 1: /* test */
1737 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1738 break;
1739 case 2: /* not */
1740 c->dst.val = ~c->dst.val;
1741 break;
1742 case 3: /* neg */
1743 emulate_1op("neg", c->dst, ctxt->eflags);
1744 break;
1745 case 4: /* mul */
1746 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1747 break;
1748 case 5: /* imul */
1749 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1750 break;
1751 case 6: /* div */
1752 emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
1753 ctxt->eflags, de);
1754 break;
1755 case 7: /* idiv */
1756 emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
1757 ctxt->eflags, de);
1758 break;
1759 default:
1760 return X86EMUL_UNHANDLEABLE;
1761 }
1762 if (de)
1763 return emulate_de(ctxt);
1764 return X86EMUL_CONTINUE;
1765}
1766
1767static int em_grp45(struct x86_emulate_ctxt *ctxt)
1768{
1769 struct decode_cache *c = &ctxt->decode;
1770 int rc = X86EMUL_CONTINUE;
1771
1772 switch (c->modrm_reg) {
1773 case 0: /* inc */
1774 emulate_1op("inc", c->dst, ctxt->eflags);
1775 break;
1776 case 1: /* dec */
1777 emulate_1op("dec", c->dst, ctxt->eflags);
1778 break;
1779 case 2: /* call near abs */ {
1780 long int old_eip;
1781 old_eip = c->_eip;
1782 c->_eip = c->src.val;
1783 c->src.val = old_eip;
1784 rc = em_push(ctxt);
1785 break;
1786 }
1787 case 4: /* jmp abs */
1788 c->_eip = c->src.val;
1789 break;
1790 case 5: /* jmp far */
1791 rc = em_jmp_far(ctxt);
1792 break;
1793 case 6: /* push */
1794 rc = em_push(ctxt);
1795 break;
1796 }
1797 return rc;
1798}
1799
1800static int em_grp9(struct x86_emulate_ctxt *ctxt)
1801{
1802 struct decode_cache *c = &ctxt->decode;
1803 u64 old = c->dst.orig_val64;
1804
1805 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1806 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1807 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1808 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1809 ctxt->eflags &= ~EFLG_ZF;
1810 } else {
1811 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1812 (u32) c->regs[VCPU_REGS_RBX];
1813
1814 ctxt->eflags |= EFLG_ZF;
1815 }
1816 return X86EMUL_CONTINUE;
1817}
1818
1819static int em_ret(struct x86_emulate_ctxt *ctxt)
1820{
1821 struct decode_cache *c = &ctxt->decode;
1822
1823 c->dst.type = OP_REG;
1824 c->dst.addr.reg = &c->_eip;
1825 c->dst.bytes = c->op_bytes;
1826 return em_pop(ctxt);
1827}
1828
1829static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1830{
1831 struct decode_cache *c = &ctxt->decode;
1832 int rc;
1833 unsigned long cs;
1834
1835 rc = emulate_pop(ctxt, &c->_eip, c->op_bytes);
1836 if (rc != X86EMUL_CONTINUE)
1837 return rc;
1838 if (c->op_bytes == 4)
1839 c->_eip = (u32)c->_eip;
1840 rc = emulate_pop(ctxt, &cs, c->op_bytes);
1841 if (rc != X86EMUL_CONTINUE)
1842 return rc;
1843 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1844 return rc;
1845}
1846
1847static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, int seg)
1848{
1849 struct decode_cache *c = &ctxt->decode;
1850 unsigned short sel;
1851 int rc;
1852
1853 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1854
1855 rc = load_segment_descriptor(ctxt, sel, seg);
1856 if (rc != X86EMUL_CONTINUE)
1857 return rc;
1858
1859 c->dst.val = c->src.val;
1860 return rc;
1861}
1862
1863static void
1864setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1865 struct desc_struct *cs, struct desc_struct *ss)
1866{
1867 u16 selector;
1868
1869 memset(cs, 0, sizeof(struct desc_struct));
1870 ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1871 memset(ss, 0, sizeof(struct desc_struct));
1872
1873 cs->l = 0; /* will be adjusted later */
1874 set_desc_base(cs, 0); /* flat segment */
1875 cs->g = 1; /* 4kb granularity */
1876 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1877 cs->type = 0x0b; /* Read, Execute, Accessed */
1878 cs->s = 1;
1879 cs->dpl = 0; /* will be adjusted later */
1880 cs->p = 1;
1881 cs->d = 1;
1882
1883 set_desc_base(ss, 0); /* flat segment */
1884 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1885 ss->g = 1; /* 4kb granularity */
1886 ss->s = 1;
1887 ss->type = 0x03; /* Read/Write, Accessed */
1888 ss->d = 1; /* 32bit stack segment */
1889 ss->dpl = 0;
1890 ss->p = 1;
1891}
1892
1893static int em_syscall(struct x86_emulate_ctxt *ctxt)
1894{
1895 struct decode_cache *c = &ctxt->decode;
1896 struct x86_emulate_ops *ops = ctxt->ops;
1897 struct desc_struct cs, ss;
1898 u64 msr_data;
1899 u16 cs_sel, ss_sel;
1900 u64 efer = 0;
1901
1902 /* syscall is not available in real mode */
1903 if (ctxt->mode == X86EMUL_MODE_REAL ||
1904 ctxt->mode == X86EMUL_MODE_VM86)
1905 return emulate_ud(ctxt);
1906
1907 ops->get_msr(ctxt, MSR_EFER, &efer);
1908 setup_syscalls_segments(ctxt, &cs, &ss);
1909
1910 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1911 msr_data >>= 32;
1912 cs_sel = (u16)(msr_data & 0xfffc);
1913 ss_sel = (u16)(msr_data + 8);
1914
1915 if (efer & EFER_LMA) {
1916 cs.d = 0;
1917 cs.l = 1;
1918 }
1919 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1920 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1921
1922 c->regs[VCPU_REGS_RCX] = c->_eip;
1923 if (efer & EFER_LMA) {
1924#ifdef CONFIG_X86_64
1925 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1926
1927 ops->get_msr(ctxt,
1928 ctxt->mode == X86EMUL_MODE_PROT64 ?
1929 MSR_LSTAR : MSR_CSTAR, &msr_data);
1930 c->_eip = msr_data;
1931
1932 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1933 ctxt->eflags &= ~(msr_data | EFLG_RF);
1934#endif
1935 } else {
1936 /* legacy mode */
1937 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1938 c->_eip = (u32)msr_data;
1939
1940 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1941 }
1942
1943 return X86EMUL_CONTINUE;
1944}
1945
1946static int em_sysenter(struct x86_emulate_ctxt *ctxt)
1947{
1948 struct decode_cache *c = &ctxt->decode;
1949 struct x86_emulate_ops *ops = ctxt->ops;
1950 struct desc_struct cs, ss;
1951 u64 msr_data;
1952 u16 cs_sel, ss_sel;
1953 u64 efer = 0;
1954
1955 ops->get_msr(ctxt, MSR_EFER, &efer);
1956 /* inject #GP if in real mode */
1957 if (ctxt->mode == X86EMUL_MODE_REAL)
1958 return emulate_gp(ctxt, 0);
1959
1960 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1961 * Therefore, we inject an #UD.
1962 */
1963 if (ctxt->mode == X86EMUL_MODE_PROT64)
1964 return emulate_ud(ctxt);
1965
1966 setup_syscalls_segments(ctxt, &cs, &ss);
1967
1968 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1969 switch (ctxt->mode) {
1970 case X86EMUL_MODE_PROT32:
1971 if ((msr_data & 0xfffc) == 0x0)
1972 return emulate_gp(ctxt, 0);
1973 break;
1974 case X86EMUL_MODE_PROT64:
1975 if (msr_data == 0x0)
1976 return emulate_gp(ctxt, 0);
1977 break;
1978 }
1979
1980 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1981 cs_sel = (u16)msr_data;
1982 cs_sel &= ~SELECTOR_RPL_MASK;
1983 ss_sel = cs_sel + 8;
1984 ss_sel &= ~SELECTOR_RPL_MASK;
1985 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
1986 cs.d = 0;
1987 cs.l = 1;
1988 }
1989
1990 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1991 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1992
1993 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
1994 c->_eip = msr_data;
1995
1996 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
1997 c->regs[VCPU_REGS_RSP] = msr_data;
1998
1999 return X86EMUL_CONTINUE;
2000}
2001
2002static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2003{
2004 struct decode_cache *c = &ctxt->decode;
2005 struct x86_emulate_ops *ops = ctxt->ops;
2006 struct desc_struct cs, ss;
2007 u64 msr_data;
2008 int usermode;
2009 u16 cs_sel = 0, ss_sel = 0;
2010
2011 /* inject #GP if in real mode or Virtual 8086 mode */
2012 if (ctxt->mode == X86EMUL_MODE_REAL ||
2013 ctxt->mode == X86EMUL_MODE_VM86)
2014 return emulate_gp(ctxt, 0);
2015
2016 setup_syscalls_segments(ctxt, &cs, &ss);
2017
2018 if ((c->rex_prefix & 0x8) != 0x0)
2019 usermode = X86EMUL_MODE_PROT64;
2020 else
2021 usermode = X86EMUL_MODE_PROT32;
2022
2023 cs.dpl = 3;
2024 ss.dpl = 3;
2025 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2026 switch (usermode) {
2027 case X86EMUL_MODE_PROT32:
2028 cs_sel = (u16)(msr_data + 16);
2029 if ((msr_data & 0xfffc) == 0x0)
2030 return emulate_gp(ctxt, 0);
2031 ss_sel = (u16)(msr_data + 24);
2032 break;
2033 case X86EMUL_MODE_PROT64:
2034 cs_sel = (u16)(msr_data + 32);
2035 if (msr_data == 0x0)
2036 return emulate_gp(ctxt, 0);
2037 ss_sel = cs_sel + 8;
2038 cs.d = 0;
2039 cs.l = 1;
2040 break;
2041 }
2042 cs_sel |= SELECTOR_RPL_MASK;
2043 ss_sel |= SELECTOR_RPL_MASK;
2044
2045 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2046 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2047
2048 c->_eip = c->regs[VCPU_REGS_RDX];
2049 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
2050
2051 return X86EMUL_CONTINUE;
2052}
2053
2054static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2055{
2056 int iopl;
2057 if (ctxt->mode == X86EMUL_MODE_REAL)
2058 return false;
2059 if (ctxt->mode == X86EMUL_MODE_VM86)
2060 return true;
2061 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2062 return ctxt->ops->cpl(ctxt) > iopl;
2063}
2064
2065static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2066 u16 port, u16 len)
2067{
2068 struct x86_emulate_ops *ops = ctxt->ops;
2069 struct desc_struct tr_seg;
2070 u32 base3;
2071 int r;
2072 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2073 unsigned mask = (1 << len) - 1;
2074 unsigned long base;
2075
2076 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2077 if (!tr_seg.p)
2078 return false;
2079 if (desc_limit_scaled(&tr_seg) < 103)
2080 return false;
2081 base = get_desc_base(&tr_seg);
2082#ifdef CONFIG_X86_64
2083 base |= ((u64)base3) << 32;
2084#endif
2085 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2086 if (r != X86EMUL_CONTINUE)
2087 return false;
2088 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2089 return false;
2090 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2091 if (r != X86EMUL_CONTINUE)
2092 return false;
2093 if ((perm >> bit_idx) & mask)
2094 return false;
2095 return true;
2096}
2097
2098static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2099 u16 port, u16 len)
2100{
2101 if (ctxt->perm_ok)
2102 return true;
2103
2104 if (emulator_bad_iopl(ctxt))
2105 if (!emulator_io_port_access_allowed(ctxt, port, len))
2106 return false;
2107
2108 ctxt->perm_ok = true;
2109
2110 return true;
2111}
2112
2113static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2114 struct tss_segment_16 *tss)
2115{
2116 struct decode_cache *c = &ctxt->decode;
2117
2118 tss->ip = c->_eip;
2119 tss->flag = ctxt->eflags;
2120 tss->ax = c->regs[VCPU_REGS_RAX];
2121 tss->cx = c->regs[VCPU_REGS_RCX];
2122 tss->dx = c->regs[VCPU_REGS_RDX];
2123 tss->bx = c->regs[VCPU_REGS_RBX];
2124 tss->sp = c->regs[VCPU_REGS_RSP];
2125 tss->bp = c->regs[VCPU_REGS_RBP];
2126 tss->si = c->regs[VCPU_REGS_RSI];
2127 tss->di = c->regs[VCPU_REGS_RDI];
2128
2129 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2130 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2131 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2132 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2133 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2134}
2135
2136static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2137 struct tss_segment_16 *tss)
2138{
2139 struct decode_cache *c = &ctxt->decode;
2140 int ret;
2141
2142 c->_eip = tss->ip;
2143 ctxt->eflags = tss->flag | 2;
2144 c->regs[VCPU_REGS_RAX] = tss->ax;
2145 c->regs[VCPU_REGS_RCX] = tss->cx;
2146 c->regs[VCPU_REGS_RDX] = tss->dx;
2147 c->regs[VCPU_REGS_RBX] = tss->bx;
2148 c->regs[VCPU_REGS_RSP] = tss->sp;
2149 c->regs[VCPU_REGS_RBP] = tss->bp;
2150 c->regs[VCPU_REGS_RSI] = tss->si;
2151 c->regs[VCPU_REGS_RDI] = tss->di;
2152
2153 /*
2154 * SDM says that segment selectors are loaded before segment
2155 * descriptors
2156 */
2157 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2158 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2159 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2160 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2161 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2162
2163 /*
2164 * Now load segment descriptors. If fault happenes at this stage
2165 * it is handled in a context of new task
2166 */
2167 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2168 if (ret != X86EMUL_CONTINUE)
2169 return ret;
2170 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2171 if (ret != X86EMUL_CONTINUE)
2172 return ret;
2173 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2174 if (ret != X86EMUL_CONTINUE)
2175 return ret;
2176 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2177 if (ret != X86EMUL_CONTINUE)
2178 return ret;
2179 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2180 if (ret != X86EMUL_CONTINUE)
2181 return ret;
2182
2183 return X86EMUL_CONTINUE;
2184}
2185
2186static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2187 u16 tss_selector, u16 old_tss_sel,
2188 ulong old_tss_base, struct desc_struct *new_desc)
2189{
2190 struct x86_emulate_ops *ops = ctxt->ops;
2191 struct tss_segment_16 tss_seg;
2192 int ret;
2193 u32 new_tss_base = get_desc_base(new_desc);
2194
2195 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2196 &ctxt->exception);
2197 if (ret != X86EMUL_CONTINUE)
2198 /* FIXME: need to provide precise fault address */
2199 return ret;
2200
2201 save_state_to_tss16(ctxt, &tss_seg);
2202
2203 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2204 &ctxt->exception);
2205 if (ret != X86EMUL_CONTINUE)
2206 /* FIXME: need to provide precise fault address */
2207 return ret;
2208
2209 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2210 &ctxt->exception);
2211 if (ret != X86EMUL_CONTINUE)
2212 /* FIXME: need to provide precise fault address */
2213 return ret;
2214
2215 if (old_tss_sel != 0xffff) {
2216 tss_seg.prev_task_link = old_tss_sel;
2217
2218 ret = ops->write_std(ctxt, new_tss_base,
2219 &tss_seg.prev_task_link,
2220 sizeof tss_seg.prev_task_link,
2221 &ctxt->exception);
2222 if (ret != X86EMUL_CONTINUE)
2223 /* FIXME: need to provide precise fault address */
2224 return ret;
2225 }
2226
2227 return load_state_from_tss16(ctxt, &tss_seg);
2228}
2229
2230static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2231 struct tss_segment_32 *tss)
2232{
2233 struct decode_cache *c = &ctxt->decode;
2234
2235 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2236 tss->eip = c->_eip;
2237 tss->eflags = ctxt->eflags;
2238 tss->eax = c->regs[VCPU_REGS_RAX];
2239 tss->ecx = c->regs[VCPU_REGS_RCX];
2240 tss->edx = c->regs[VCPU_REGS_RDX];
2241 tss->ebx = c->regs[VCPU_REGS_RBX];
2242 tss->esp = c->regs[VCPU_REGS_RSP];
2243 tss->ebp = c->regs[VCPU_REGS_RBP];
2244 tss->esi = c->regs[VCPU_REGS_RSI];
2245 tss->edi = c->regs[VCPU_REGS_RDI];
2246
2247 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2248 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2249 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2250 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2251 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2252 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2253 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2254}
2255
2256static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2257 struct tss_segment_32 *tss)
2258{
2259 struct decode_cache *c = &ctxt->decode;
2260 int ret;
2261
2262 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2263 return emulate_gp(ctxt, 0);
2264 c->_eip = tss->eip;
2265 ctxt->eflags = tss->eflags | 2;
2266 c->regs[VCPU_REGS_RAX] = tss->eax;
2267 c->regs[VCPU_REGS_RCX] = tss->ecx;
2268 c->regs[VCPU_REGS_RDX] = tss->edx;
2269 c->regs[VCPU_REGS_RBX] = tss->ebx;
2270 c->regs[VCPU_REGS_RSP] = tss->esp;
2271 c->regs[VCPU_REGS_RBP] = tss->ebp;
2272 c->regs[VCPU_REGS_RSI] = tss->esi;
2273 c->regs[VCPU_REGS_RDI] = tss->edi;
2274
2275 /*
2276 * SDM says that segment selectors are loaded before segment
2277 * descriptors
2278 */
2279 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2280 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2281 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2282 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2283 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2284 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2285 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2286
2287 /*
2288 * Now load segment descriptors. If fault happenes at this stage
2289 * it is handled in a context of new task
2290 */
2291 ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2292 if (ret != X86EMUL_CONTINUE)
2293 return ret;
2294 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2295 if (ret != X86EMUL_CONTINUE)
2296 return ret;
2297 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2298 if (ret != X86EMUL_CONTINUE)
2299 return ret;
2300 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2301 if (ret != X86EMUL_CONTINUE)
2302 return ret;
2303 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2304 if (ret != X86EMUL_CONTINUE)
2305 return ret;
2306 ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2307 if (ret != X86EMUL_CONTINUE)
2308 return ret;
2309 ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2310 if (ret != X86EMUL_CONTINUE)
2311 return ret;
2312
2313 return X86EMUL_CONTINUE;
2314}
2315
2316static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2317 u16 tss_selector, u16 old_tss_sel,
2318 ulong old_tss_base, struct desc_struct *new_desc)
2319{
2320 struct x86_emulate_ops *ops = ctxt->ops;
2321 struct tss_segment_32 tss_seg;
2322 int ret;
2323 u32 new_tss_base = get_desc_base(new_desc);
2324
2325 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2326 &ctxt->exception);
2327 if (ret != X86EMUL_CONTINUE)
2328 /* FIXME: need to provide precise fault address */
2329 return ret;
2330
2331 save_state_to_tss32(ctxt, &tss_seg);
2332
2333 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2334 &ctxt->exception);
2335 if (ret != X86EMUL_CONTINUE)
2336 /* FIXME: need to provide precise fault address */
2337 return ret;
2338
2339 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2340 &ctxt->exception);
2341 if (ret != X86EMUL_CONTINUE)
2342 /* FIXME: need to provide precise fault address */
2343 return ret;
2344
2345 if (old_tss_sel != 0xffff) {
2346 tss_seg.prev_task_link = old_tss_sel;
2347
2348 ret = ops->write_std(ctxt, new_tss_base,
2349 &tss_seg.prev_task_link,
2350 sizeof tss_seg.prev_task_link,
2351 &ctxt->exception);
2352 if (ret != X86EMUL_CONTINUE)
2353 /* FIXME: need to provide precise fault address */
2354 return ret;
2355 }
2356
2357 return load_state_from_tss32(ctxt, &tss_seg);
2358}
2359
2360static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2361 u16 tss_selector, int reason,
2362 bool has_error_code, u32 error_code)
2363{
2364 struct x86_emulate_ops *ops = ctxt->ops;
2365 struct desc_struct curr_tss_desc, next_tss_desc;
2366 int ret;
2367 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2368 ulong old_tss_base =
2369 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2370 u32 desc_limit;
2371
2372 /* FIXME: old_tss_base == ~0 ? */
2373
2374 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2375 if (ret != X86EMUL_CONTINUE)
2376 return ret;
2377 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2378 if (ret != X86EMUL_CONTINUE)
2379 return ret;
2380
2381 /* FIXME: check that next_tss_desc is tss */
2382
2383 if (reason != TASK_SWITCH_IRET) {
2384 if ((tss_selector & 3) > next_tss_desc.dpl ||
2385 ops->cpl(ctxt) > next_tss_desc.dpl)
2386 return emulate_gp(ctxt, 0);
2387 }
2388
2389 desc_limit = desc_limit_scaled(&next_tss_desc);
2390 if (!next_tss_desc.p ||
2391 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2392 desc_limit < 0x2b)) {
2393 emulate_ts(ctxt, tss_selector & 0xfffc);
2394 return X86EMUL_PROPAGATE_FAULT;
2395 }
2396
2397 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2398 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2399 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2400 }
2401
2402 if (reason == TASK_SWITCH_IRET)
2403 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2404
2405 /* set back link to prev task only if NT bit is set in eflags
2406 note that old_tss_sel is not used afetr this point */
2407 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2408 old_tss_sel = 0xffff;
2409
2410 if (next_tss_desc.type & 8)
2411 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2412 old_tss_base, &next_tss_desc);
2413 else
2414 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2415 old_tss_base, &next_tss_desc);
2416 if (ret != X86EMUL_CONTINUE)
2417 return ret;
2418
2419 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2420 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2421
2422 if (reason != TASK_SWITCH_IRET) {
2423 next_tss_desc.type |= (1 << 1); /* set busy flag */
2424 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2425 }
2426
2427 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2428 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2429
2430 if (has_error_code) {
2431 struct decode_cache *c = &ctxt->decode;
2432
2433 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2434 c->lock_prefix = 0;
2435 c->src.val = (unsigned long) error_code;
2436 ret = em_push(ctxt);
2437 }
2438
2439 return ret;
2440}
2441
2442int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2443 u16 tss_selector, int reason,
2444 bool has_error_code, u32 error_code)
2445{
2446 struct decode_cache *c = &ctxt->decode;
2447 int rc;
2448
2449 c->_eip = ctxt->eip;
2450 c->dst.type = OP_NONE;
2451
2452 rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2453 has_error_code, error_code);
2454
2455 if (rc == X86EMUL_CONTINUE)
2456 ctxt->eip = c->_eip;
2457
2458 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2459}
2460
2461static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2462 int reg, struct operand *op)
2463{
2464 struct decode_cache *c = &ctxt->decode;
2465 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2466
2467 register_address_increment(c, &c->regs[reg], df * op->bytes);
2468 op->addr.mem.ea = register_address(c, c->regs[reg]);
2469 op->addr.mem.seg = seg;
2470}
2471
2472static int em_das(struct x86_emulate_ctxt *ctxt)
2473{
2474 struct decode_cache *c = &ctxt->decode;
2475 u8 al, old_al;
2476 bool af, cf, old_cf;
2477
2478 cf = ctxt->eflags & X86_EFLAGS_CF;
2479 al = c->dst.val;
2480
2481 old_al = al;
2482 old_cf = cf;
2483 cf = false;
2484 af = ctxt->eflags & X86_EFLAGS_AF;
2485 if ((al & 0x0f) > 9 || af) {
2486 al -= 6;
2487 cf = old_cf | (al >= 250);
2488 af = true;
2489 } else {
2490 af = false;
2491 }
2492 if (old_al > 0x99 || old_cf) {
2493 al -= 0x60;
2494 cf = true;
2495 }
2496
2497 c->dst.val = al;
2498 /* Set PF, ZF, SF */
2499 c->src.type = OP_IMM;
2500 c->src.val = 0;
2501 c->src.bytes = 1;
2502 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2503 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2504 if (cf)
2505 ctxt->eflags |= X86_EFLAGS_CF;
2506 if (af)
2507 ctxt->eflags |= X86_EFLAGS_AF;
2508 return X86EMUL_CONTINUE;
2509}
2510
2511static int em_call_far(struct x86_emulate_ctxt *ctxt)
2512{
2513 struct decode_cache *c = &ctxt->decode;
2514 u16 sel, old_cs;
2515 ulong old_eip;
2516 int rc;
2517
2518 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2519 old_eip = c->_eip;
2520
2521 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2522 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2523 return X86EMUL_CONTINUE;
2524
2525 c->_eip = 0;
2526 memcpy(&c->_eip, c->src.valptr, c->op_bytes);
2527
2528 c->src.val = old_cs;
2529 rc = em_push(ctxt);
2530 if (rc != X86EMUL_CONTINUE)
2531 return rc;
2532
2533 c->src.val = old_eip;
2534 return em_push(ctxt);
2535}
2536
2537static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2538{
2539 struct decode_cache *c = &ctxt->decode;
2540 int rc;
2541
2542 c->dst.type = OP_REG;
2543 c->dst.addr.reg = &c->_eip;
2544 c->dst.bytes = c->op_bytes;
2545 rc = emulate_pop(ctxt, &c->dst.val, c->op_bytes);
2546 if (rc != X86EMUL_CONTINUE)
2547 return rc;
2548 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2549 return X86EMUL_CONTINUE;
2550}
2551
2552static int em_add(struct x86_emulate_ctxt *ctxt)
2553{
2554 struct decode_cache *c = &ctxt->decode;
2555
2556 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
2557 return X86EMUL_CONTINUE;
2558}
2559
2560static int em_or(struct x86_emulate_ctxt *ctxt)
2561{
2562 struct decode_cache *c = &ctxt->decode;
2563
2564 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2565 return X86EMUL_CONTINUE;
2566}
2567
2568static int em_adc(struct x86_emulate_ctxt *ctxt)
2569{
2570 struct decode_cache *c = &ctxt->decode;
2571
2572 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
2573 return X86EMUL_CONTINUE;
2574}
2575
2576static int em_sbb(struct x86_emulate_ctxt *ctxt)
2577{
2578 struct decode_cache *c = &ctxt->decode;
2579
2580 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
2581 return X86EMUL_CONTINUE;
2582}
2583
2584static int em_and(struct x86_emulate_ctxt *ctxt)
2585{
2586 struct decode_cache *c = &ctxt->decode;
2587
2588 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
2589 return X86EMUL_CONTINUE;
2590}
2591
2592static int em_sub(struct x86_emulate_ctxt *ctxt)
2593{
2594 struct decode_cache *c = &ctxt->decode;
2595
2596 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
2597 return X86EMUL_CONTINUE;
2598}
2599
2600static int em_xor(struct x86_emulate_ctxt *ctxt)
2601{
2602 struct decode_cache *c = &ctxt->decode;
2603
2604 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
2605 return X86EMUL_CONTINUE;
2606}
2607
2608static int em_cmp(struct x86_emulate_ctxt *ctxt)
2609{
2610 struct decode_cache *c = &ctxt->decode;
2611
2612 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
2613 /* Disable writeback. */
2614 c->dst.type = OP_NONE;
2615 return X86EMUL_CONTINUE;
2616}
2617
2618static int em_test(struct x86_emulate_ctxt *ctxt)
2619{
2620 struct decode_cache *c = &ctxt->decode;
2621
2622 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
2623 return X86EMUL_CONTINUE;
2624}
2625
2626static int em_xchg(struct x86_emulate_ctxt *ctxt)
2627{
2628 struct decode_cache *c = &ctxt->decode;
2629
2630 /* Write back the register source. */
2631 c->src.val = c->dst.val;
2632 write_register_operand(&c->src);
2633
2634 /* Write back the memory destination with implicit LOCK prefix. */
2635 c->dst.val = c->src.orig_val;
2636 c->lock_prefix = 1;
2637 return X86EMUL_CONTINUE;
2638}
2639
2640static int em_imul(struct x86_emulate_ctxt *ctxt)
2641{
2642 struct decode_cache *c = &ctxt->decode;
2643
2644 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2645 return X86EMUL_CONTINUE;
2646}
2647
2648static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2649{
2650 struct decode_cache *c = &ctxt->decode;
2651
2652 c->dst.val = c->src2.val;
2653 return em_imul(ctxt);
2654}
2655
2656static int em_cwd(struct x86_emulate_ctxt *ctxt)
2657{
2658 struct decode_cache *c = &ctxt->decode;
2659
2660 c->dst.type = OP_REG;
2661 c->dst.bytes = c->src.bytes;
2662 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2663 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2664
2665 return X86EMUL_CONTINUE;
2666}
2667
2668static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2669{
2670 struct decode_cache *c = &ctxt->decode;
2671 u64 tsc = 0;
2672
2673 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2674 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2675 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2676 return X86EMUL_CONTINUE;
2677}
2678
2679static int em_mov(struct x86_emulate_ctxt *ctxt)
2680{
2681 struct decode_cache *c = &ctxt->decode;
2682 c->dst.val = c->src.val;
2683 return X86EMUL_CONTINUE;
2684}
2685
2686static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2687{
2688 struct decode_cache *c = &ctxt->decode;
2689
2690 if (c->modrm_reg > VCPU_SREG_GS)
2691 return emulate_ud(ctxt);
2692
2693 c->dst.val = get_segment_selector(ctxt, c->modrm_reg);
2694 return X86EMUL_CONTINUE;
2695}
2696
2697static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2698{
2699 struct decode_cache *c = &ctxt->decode;
2700 u16 sel = c->src.val;
2701
2702 if (c->modrm_reg == VCPU_SREG_CS || c->modrm_reg > VCPU_SREG_GS)
2703 return emulate_ud(ctxt);
2704
2705 if (c->modrm_reg == VCPU_SREG_SS)
2706 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2707
2708 /* Disable writeback. */
2709 c->dst.type = OP_NONE;
2710 return load_segment_descriptor(ctxt, sel, c->modrm_reg);
2711}
2712
2713static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2714{
2715 struct decode_cache *c = &ctxt->decode;
2716 memcpy(&c->dst.vec_val, &c->src.vec_val, c->op_bytes);
2717 return X86EMUL_CONTINUE;
2718}
2719
2720static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2721{
2722 struct decode_cache *c = &ctxt->decode;
2723 int rc;
2724 ulong linear;
2725
2726 rc = linearize(ctxt, c->src.addr.mem, 1, false, &linear);
2727 if (rc == X86EMUL_CONTINUE)
2728 ctxt->ops->invlpg(ctxt, linear);
2729 /* Disable writeback. */
2730 c->dst.type = OP_NONE;
2731 return X86EMUL_CONTINUE;
2732}
2733
2734static int em_clts(struct x86_emulate_ctxt *ctxt)
2735{
2736 ulong cr0;
2737
2738 cr0 = ctxt->ops->get_cr(ctxt, 0);
2739 cr0 &= ~X86_CR0_TS;
2740 ctxt->ops->set_cr(ctxt, 0, cr0);
2741 return X86EMUL_CONTINUE;
2742}
2743
2744static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2745{
2746 struct decode_cache *c = &ctxt->decode;
2747 int rc;
2748
2749 if (c->modrm_mod != 3 || c->modrm_rm != 1)
2750 return X86EMUL_UNHANDLEABLE;
2751
2752 rc = ctxt->ops->fix_hypercall(ctxt);
2753 if (rc != X86EMUL_CONTINUE)
2754 return rc;
2755
2756 /* Let the processor re-execute the fixed hypercall */
2757 c->_eip = ctxt->eip;
2758 /* Disable writeback. */
2759 c->dst.type = OP_NONE;
2760 return X86EMUL_CONTINUE;
2761}
2762
2763static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2764{
2765 struct decode_cache *c = &ctxt->decode;
2766 struct desc_ptr desc_ptr;
2767 int rc;
2768
2769 rc = read_descriptor(ctxt, c->src.addr.mem,
2770 &desc_ptr.size, &desc_ptr.address,
2771 c->op_bytes);
2772 if (rc != X86EMUL_CONTINUE)
2773 return rc;
2774 ctxt->ops->set_gdt(ctxt, &desc_ptr);
2775 /* Disable writeback. */
2776 c->dst.type = OP_NONE;
2777 return X86EMUL_CONTINUE;
2778}
2779
2780static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2781{
2782 struct decode_cache *c = &ctxt->decode;
2783 int rc;
2784
2785 rc = ctxt->ops->fix_hypercall(ctxt);
2786
2787 /* Disable writeback. */
2788 c->dst.type = OP_NONE;
2789 return rc;
2790}
2791
2792static int em_lidt(struct x86_emulate_ctxt *ctxt)
2793{
2794 struct decode_cache *c = &ctxt->decode;
2795 struct desc_ptr desc_ptr;
2796 int rc;
2797
2798 rc = read_descriptor(ctxt, c->src.addr.mem,
2799 &desc_ptr.size, &desc_ptr.address,
2800 c->op_bytes);
2801 if (rc != X86EMUL_CONTINUE)
2802 return rc;
2803 ctxt->ops->set_idt(ctxt, &desc_ptr);
2804 /* Disable writeback. */
2805 c->dst.type = OP_NONE;
2806 return X86EMUL_CONTINUE;
2807}
2808
2809static int em_smsw(struct x86_emulate_ctxt *ctxt)
2810{
2811 struct decode_cache *c = &ctxt->decode;
2812
2813 c->dst.bytes = 2;
2814 c->dst.val = ctxt->ops->get_cr(ctxt, 0);
2815 return X86EMUL_CONTINUE;
2816}
2817
2818static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2819{
2820 struct decode_cache *c = &ctxt->decode;
2821 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2822 | (c->src.val & 0x0f));
2823 c->dst.type = OP_NONE;
2824 return X86EMUL_CONTINUE;
2825}
2826
2827static int em_loop(struct x86_emulate_ctxt *ctxt)
2828{
2829 struct decode_cache *c = &ctxt->decode;
2830
2831 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
2832 if ((address_mask(c, c->regs[VCPU_REGS_RCX]) != 0) &&
2833 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
2834 jmp_rel(c, c->src.val);
2835
2836 return X86EMUL_CONTINUE;
2837}
2838
2839static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2840{
2841 struct decode_cache *c = &ctxt->decode;
2842
2843 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
2844 jmp_rel(c, c->src.val);
2845
2846 return X86EMUL_CONTINUE;
2847}
2848
2849static int em_cli(struct x86_emulate_ctxt *ctxt)
2850{
2851 if (emulator_bad_iopl(ctxt))
2852 return emulate_gp(ctxt, 0);
2853
2854 ctxt->eflags &= ~X86_EFLAGS_IF;
2855 return X86EMUL_CONTINUE;
2856}
2857
2858static int em_sti(struct x86_emulate_ctxt *ctxt)
2859{
2860 if (emulator_bad_iopl(ctxt))
2861 return emulate_gp(ctxt, 0);
2862
2863 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2864 ctxt->eflags |= X86_EFLAGS_IF;
2865 return X86EMUL_CONTINUE;
2866}
2867
2868static bool valid_cr(int nr)
2869{
2870 switch (nr) {
2871 case 0:
2872 case 2 ... 4:
2873 case 8:
2874 return true;
2875 default:
2876 return false;
2877 }
2878}
2879
2880static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2881{
2882 struct decode_cache *c = &ctxt->decode;
2883
2884 if (!valid_cr(c->modrm_reg))
2885 return emulate_ud(ctxt);
2886
2887 return X86EMUL_CONTINUE;
2888}
2889
2890static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2891{
2892 struct decode_cache *c = &ctxt->decode;
2893 u64 new_val = c->src.val64;
2894 int cr = c->modrm_reg;
2895 u64 efer = 0;
2896
2897 static u64 cr_reserved_bits[] = {
2898 0xffffffff00000000ULL,
2899 0, 0, 0, /* CR3 checked later */
2900 CR4_RESERVED_BITS,
2901 0, 0, 0,
2902 CR8_RESERVED_BITS,
2903 };
2904
2905 if (!valid_cr(cr))
2906 return emulate_ud(ctxt);
2907
2908 if (new_val & cr_reserved_bits[cr])
2909 return emulate_gp(ctxt, 0);
2910
2911 switch (cr) {
2912 case 0: {
2913 u64 cr4;
2914 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2915 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2916 return emulate_gp(ctxt, 0);
2917
2918 cr4 = ctxt->ops->get_cr(ctxt, 4);
2919 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2920
2921 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2922 !(cr4 & X86_CR4_PAE))
2923 return emulate_gp(ctxt, 0);
2924
2925 break;
2926 }
2927 case 3: {
2928 u64 rsvd = 0;
2929
2930 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2931 if (efer & EFER_LMA)
2932 rsvd = CR3_L_MODE_RESERVED_BITS;
2933 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
2934 rsvd = CR3_PAE_RESERVED_BITS;
2935 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
2936 rsvd = CR3_NONPAE_RESERVED_BITS;
2937
2938 if (new_val & rsvd)
2939 return emulate_gp(ctxt, 0);
2940
2941 break;
2942 }
2943 case 4: {
2944 u64 cr4;
2945
2946 cr4 = ctxt->ops->get_cr(ctxt, 4);
2947 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2948
2949 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2950 return emulate_gp(ctxt, 0);
2951
2952 break;
2953 }
2954 }
2955
2956 return X86EMUL_CONTINUE;
2957}
2958
2959static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2960{
2961 unsigned long dr7;
2962
2963 ctxt->ops->get_dr(ctxt, 7, &dr7);
2964
2965 /* Check if DR7.Global_Enable is set */
2966 return dr7 & (1 << 13);
2967}
2968
2969static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2970{
2971 struct decode_cache *c = &ctxt->decode;
2972 int dr = c->modrm_reg;
2973 u64 cr4;
2974
2975 if (dr > 7)
2976 return emulate_ud(ctxt);
2977
2978 cr4 = ctxt->ops->get_cr(ctxt, 4);
2979 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2980 return emulate_ud(ctxt);
2981
2982 if (check_dr7_gd(ctxt))
2983 return emulate_db(ctxt);
2984
2985 return X86EMUL_CONTINUE;
2986}
2987
2988static int check_dr_write(struct x86_emulate_ctxt *ctxt)
2989{
2990 struct decode_cache *c = &ctxt->decode;
2991 u64 new_val = c->src.val64;
2992 int dr = c->modrm_reg;
2993
2994 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
2995 return emulate_gp(ctxt, 0);
2996
2997 return check_dr_read(ctxt);
2998}
2999
3000static int check_svme(struct x86_emulate_ctxt *ctxt)
3001{
3002 u64 efer;
3003
3004 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3005
3006 if (!(efer & EFER_SVME))
3007 return emulate_ud(ctxt);
3008
3009 return X86EMUL_CONTINUE;
3010}
3011
3012static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3013{
3014 u64 rax = ctxt->decode.regs[VCPU_REGS_RAX];
3015
3016 /* Valid physical address? */
3017 if (rax & 0xffff000000000000ULL)
3018 return emulate_gp(ctxt, 0);
3019
3020 return check_svme(ctxt);
3021}
3022
3023static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3024{
3025 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3026
3027 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3028 return emulate_ud(ctxt);
3029
3030 return X86EMUL_CONTINUE;
3031}
3032
3033static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3034{
3035 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3036 u64 rcx = ctxt->decode.regs[VCPU_REGS_RCX];
3037
3038 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3039 (rcx > 3))
3040 return emulate_gp(ctxt, 0);
3041
3042 return X86EMUL_CONTINUE;
3043}
3044
3045static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3046{
3047 struct decode_cache *c = &ctxt->decode;
3048
3049 c->dst.bytes = min(c->dst.bytes, 4u);
3050 if (!emulator_io_permited(ctxt, c->src.val, c->dst.bytes))
3051 return emulate_gp(ctxt, 0);
3052
3053 return X86EMUL_CONTINUE;
3054}
3055
3056static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3057{
3058 struct decode_cache *c = &ctxt->decode;
3059
3060 c->src.bytes = min(c->src.bytes, 4u);
3061 if (!emulator_io_permited(ctxt, c->dst.val, c->src.bytes))
3062 return emulate_gp(ctxt, 0);
3063
3064 return X86EMUL_CONTINUE;
3065}
3066
3067#define D(_y) { .flags = (_y) }
3068#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3069#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3070 .check_perm = (_p) }
3071#define N D(0)
3072#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3073#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
3074#define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
3075#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3076#define II(_f, _e, _i) \
3077 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3078#define IIP(_f, _e, _i, _p) \
3079 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3080 .check_perm = (_p) }
3081#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3082
3083#define D2bv(_f) D((_f) | ByteOp), D(_f)
3084#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3085#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3086
3087#define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3088 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3089 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3090
3091static struct opcode group7_rm1[] = {
3092 DI(SrcNone | ModRM | Priv, monitor),
3093 DI(SrcNone | ModRM | Priv, mwait),
3094 N, N, N, N, N, N,
3095};
3096
3097static struct opcode group7_rm3[] = {
3098 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
3099 II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3100 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
3101 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
3102 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
3103 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
3104 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
3105 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3106};
3107
3108static struct opcode group7_rm7[] = {
3109 N,
3110 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3111 N, N, N, N, N, N,
3112};
3113
3114static struct opcode group1[] = {
3115 I(Lock, em_add),
3116 I(Lock, em_or),
3117 I(Lock, em_adc),
3118 I(Lock, em_sbb),
3119 I(Lock, em_and),
3120 I(Lock, em_sub),
3121 I(Lock, em_xor),
3122 I(0, em_cmp),
3123};
3124
3125static struct opcode group1A[] = {
3126 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
3127};
3128
3129static struct opcode group3[] = {
3130 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
3131 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3132 X4(D(SrcMem | ModRM)),
3133};
3134
3135static struct opcode group4[] = {
3136 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
3137 N, N, N, N, N, N,
3138};
3139
3140static struct opcode group5[] = {
3141 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3142 D(SrcMem | ModRM | Stack),
3143 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3144 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3145 D(SrcMem | ModRM | Stack), N,
3146};
3147
3148static struct opcode group6[] = {
3149 DI(ModRM | Prot, sldt),
3150 DI(ModRM | Prot, str),
3151 DI(ModRM | Prot | Priv, lldt),
3152 DI(ModRM | Prot | Priv, ltr),
3153 N, N, N, N,
3154};
3155
3156static struct group_dual group7 = { {
3157 DI(ModRM | Mov | DstMem | Priv, sgdt),
3158 DI(ModRM | Mov | DstMem | Priv, sidt),
3159 II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3160 II(ModRM | SrcMem | Priv, em_lidt, lidt),
3161 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3162 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3163 II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3164}, {
3165 I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3166 EXT(0, group7_rm1),
3167 N, EXT(0, group7_rm3),
3168 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3169 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3170} };
3171
3172static struct opcode group8[] = {
3173 N, N, N, N,
3174 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3175 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
3176};
3177
3178static struct group_dual group9 = { {
3179 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
3180}, {
3181 N, N, N, N, N, N, N, N,
3182} };
3183
3184static struct opcode group11[] = {
3185 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
3186};
3187
3188static struct gprefix pfx_0f_6f_0f_7f = {
3189 N, N, N, I(Sse, em_movdqu),
3190};
3191
3192static struct opcode opcode_table[256] = {
3193 /* 0x00 - 0x07 */
3194 I6ALU(Lock, em_add),
3195 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3196 /* 0x08 - 0x0F */
3197 I6ALU(Lock, em_or),
3198 D(ImplicitOps | Stack | No64), N,
3199 /* 0x10 - 0x17 */
3200 I6ALU(Lock, em_adc),
3201 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3202 /* 0x18 - 0x1F */
3203 I6ALU(Lock, em_sbb),
3204 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3205 /* 0x20 - 0x27 */
3206 I6ALU(Lock, em_and), N, N,
3207 /* 0x28 - 0x2F */
3208 I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3209 /* 0x30 - 0x37 */
3210 I6ALU(Lock, em_xor), N, N,
3211 /* 0x38 - 0x3F */
3212 I6ALU(0, em_cmp), N, N,
3213 /* 0x40 - 0x4F */
3214 X16(D(DstReg)),
3215 /* 0x50 - 0x57 */
3216 X8(I(SrcReg | Stack, em_push)),
3217 /* 0x58 - 0x5F */
3218 X8(I(DstReg | Stack, em_pop)),
3219 /* 0x60 - 0x67 */
3220 I(ImplicitOps | Stack | No64, em_pusha),
3221 I(ImplicitOps | Stack | No64, em_popa),
3222 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3223 N, N, N, N,
3224 /* 0x68 - 0x6F */
3225 I(SrcImm | Mov | Stack, em_push),
3226 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3227 I(SrcImmByte | Mov | Stack, em_push),
3228 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3229 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3230 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3231 /* 0x70 - 0x7F */
3232 X16(D(SrcImmByte)),
3233 /* 0x80 - 0x87 */
3234 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3235 G(DstMem | SrcImm | ModRM | Group, group1),
3236 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3237 G(DstMem | SrcImmByte | ModRM | Group, group1),
3238 I2bv(DstMem | SrcReg | ModRM, em_test),
3239 I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3240 /* 0x88 - 0x8F */
3241 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3242 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3243 I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
3244 D(ModRM | SrcMem | NoAccess | DstReg),
3245 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3246 G(0, group1A),
3247 /* 0x90 - 0x97 */
3248 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3249 /* 0x98 - 0x9F */
3250 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3251 I(SrcImmFAddr | No64, em_call_far), N,
3252 II(ImplicitOps | Stack, em_pushf, pushf),
3253 II(ImplicitOps | Stack, em_popf, popf), N, N,
3254 /* 0xA0 - 0xA7 */
3255 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3256 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3257 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3258 I2bv(SrcSI | DstDI | String, em_cmp),
3259 /* 0xA8 - 0xAF */
3260 I2bv(DstAcc | SrcImm, em_test),
3261 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3262 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3263 I2bv(SrcAcc | DstDI | String, em_cmp),
3264 /* 0xB0 - 0xB7 */
3265 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3266 /* 0xB8 - 0xBF */
3267 X8(I(DstReg | SrcImm | Mov, em_mov)),
3268 /* 0xC0 - 0xC7 */
3269 D2bv(DstMem | SrcImmByte | ModRM),
3270 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3271 I(ImplicitOps | Stack, em_ret),
3272 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
3273 G(ByteOp, group11), G(0, group11),
3274 /* 0xC8 - 0xCF */
3275 N, N, N, I(ImplicitOps | Stack, em_ret_far),
3276 D(ImplicitOps), DI(SrcImmByte, intn),
3277 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3278 /* 0xD0 - 0xD7 */
3279 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3280 N, N, N, N,
3281 /* 0xD8 - 0xDF */
3282 N, N, N, N, N, N, N, N,
3283 /* 0xE0 - 0xE7 */
3284 X3(I(SrcImmByte, em_loop)),
3285 I(SrcImmByte, em_jcxz),
3286 D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
3287 D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3288 /* 0xE8 - 0xEF */
3289 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3290 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3291 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3292 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3293 /* 0xF0 - 0xF7 */
3294 N, DI(ImplicitOps, icebp), N, N,
3295 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3296 G(ByteOp, group3), G(0, group3),
3297 /* 0xF8 - 0xFF */
3298 D(ImplicitOps), D(ImplicitOps),
3299 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3300 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3301};
3302
3303static struct opcode twobyte_table[256] = {
3304 /* 0x00 - 0x0F */
3305 G(0, group6), GD(0, &group7), N, N,
3306 N, I(ImplicitOps | VendorSpecific, em_syscall),
3307 II(ImplicitOps | Priv, em_clts, clts), N,
3308 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3309 N, D(ImplicitOps | ModRM), N, N,
3310 /* 0x10 - 0x1F */
3311 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3312 /* 0x20 - 0x2F */
3313 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3314 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3315 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3316 DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3317 N, N, N, N,
3318 N, N, N, N, N, N, N, N,
3319 /* 0x30 - 0x3F */
3320 DI(ImplicitOps | Priv, wrmsr),
3321 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3322 DI(ImplicitOps | Priv, rdmsr),
3323 DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3324 I(ImplicitOps | VendorSpecific, em_sysenter),
3325 I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3326 N, N,
3327 N, N, N, N, N, N, N, N,
3328 /* 0x40 - 0x4F */
3329 X16(D(DstReg | SrcMem | ModRM | Mov)),
3330 /* 0x50 - 0x5F */
3331 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3332 /* 0x60 - 0x6F */
3333 N, N, N, N,
3334 N, N, N, N,
3335 N, N, N, N,
3336 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3337 /* 0x70 - 0x7F */
3338 N, N, N, N,
3339 N, N, N, N,
3340 N, N, N, N,
3341 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3342 /* 0x80 - 0x8F */
3343 X16(D(SrcImm)),
3344 /* 0x90 - 0x9F */
3345 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3346 /* 0xA0 - 0xA7 */
3347 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3348 DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3349 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3350 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3351 /* 0xA8 - 0xAF */
3352 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3353 DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3354 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3355 D(DstMem | SrcReg | Src2CL | ModRM),
3356 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3357 /* 0xB0 - 0xB7 */
3358 D2bv(DstMem | SrcReg | ModRM | Lock),
3359 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3360 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
3361 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3362 /* 0xB8 - 0xBF */
3363 N, N,
3364 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3365 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3366 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3367 /* 0xC0 - 0xCF */
3368 D2bv(DstMem | SrcReg | ModRM | Lock),
3369 N, D(DstMem | SrcReg | ModRM | Mov),
3370 N, N, N, GD(0, &group9),
3371 N, N, N, N, N, N, N, N,
3372 /* 0xD0 - 0xDF */
3373 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3374 /* 0xE0 - 0xEF */
3375 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3376 /* 0xF0 - 0xFF */
3377 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3378};
3379
3380#undef D
3381#undef N
3382#undef G
3383#undef GD
3384#undef I
3385#undef GP
3386#undef EXT
3387
3388#undef D2bv
3389#undef D2bvIP
3390#undef I2bv
3391#undef I6ALU
3392
3393static unsigned imm_size(struct decode_cache *c)
3394{
3395 unsigned size;
3396
3397 size = (c->d & ByteOp) ? 1 : c->op_bytes;
3398 if (size == 8)
3399 size = 4;
3400 return size;
3401}
3402
3403static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3404 unsigned size, bool sign_extension)
3405{
3406 struct decode_cache *c = &ctxt->decode;
3407 int rc = X86EMUL_CONTINUE;
3408
3409 op->type = OP_IMM;
3410 op->bytes = size;
3411 op->addr.mem.ea = c->_eip;
3412 /* NB. Immediates are sign-extended as necessary. */
3413 switch (op->bytes) {
3414 case 1:
3415 op->val = insn_fetch(s8, 1, c->_eip);
3416 break;
3417 case 2:
3418 op->val = insn_fetch(s16, 2, c->_eip);
3419 break;
3420 case 4:
3421 op->val = insn_fetch(s32, 4, c->_eip);
3422 break;
3423 }
3424 if (!sign_extension) {
3425 switch (op->bytes) {
3426 case 1:
3427 op->val &= 0xff;
3428 break;
3429 case 2:
3430 op->val &= 0xffff;
3431 break;
3432 case 4:
3433 op->val &= 0xffffffff;
3434 break;
3435 }
3436 }
3437done:
3438 return rc;
3439}
3440
3441int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3442{
3443 struct decode_cache *c = &ctxt->decode;
3444 int rc = X86EMUL_CONTINUE;
3445 int mode = ctxt->mode;
3446 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3447 bool op_prefix = false;
3448 struct opcode opcode;
3449 struct operand memop = { .type = OP_NONE }, *memopp = NULL;
3450
3451 c->_eip = ctxt->eip;
3452 c->fetch.start = c->_eip;
3453 c->fetch.end = c->fetch.start + insn_len;
3454 if (insn_len > 0)
3455 memcpy(c->fetch.data, insn, insn_len);
3456
3457 switch (mode) {
3458 case X86EMUL_MODE_REAL:
3459 case X86EMUL_MODE_VM86:
3460 case X86EMUL_MODE_PROT16:
3461 def_op_bytes = def_ad_bytes = 2;
3462 break;
3463 case X86EMUL_MODE_PROT32:
3464 def_op_bytes = def_ad_bytes = 4;
3465 break;
3466#ifdef CONFIG_X86_64
3467 case X86EMUL_MODE_PROT64:
3468 def_op_bytes = 4;
3469 def_ad_bytes = 8;
3470 break;
3471#endif
3472 default:
3473 return -1;
3474 }
3475
3476 c->op_bytes = def_op_bytes;
3477 c->ad_bytes = def_ad_bytes;
3478
3479 /* Legacy prefixes. */
3480 for (;;) {
3481 switch (c->b = insn_fetch(u8, 1, c->_eip)) {
3482 case 0x66: /* operand-size override */
3483 op_prefix = true;
3484 /* switch between 2/4 bytes */
3485 c->op_bytes = def_op_bytes ^ 6;
3486 break;
3487 case 0x67: /* address-size override */
3488 if (mode == X86EMUL_MODE_PROT64)
3489 /* switch between 4/8 bytes */
3490 c->ad_bytes = def_ad_bytes ^ 12;
3491 else
3492 /* switch between 2/4 bytes */
3493 c->ad_bytes = def_ad_bytes ^ 6;
3494 break;
3495 case 0x26: /* ES override */
3496 case 0x2e: /* CS override */
3497 case 0x36: /* SS override */
3498 case 0x3e: /* DS override */
3499 set_seg_override(c, (c->b >> 3) & 3);
3500 break;
3501 case 0x64: /* FS override */
3502 case 0x65: /* GS override */
3503 set_seg_override(c, c->b & 7);
3504 break;
3505 case 0x40 ... 0x4f: /* REX */
3506 if (mode != X86EMUL_MODE_PROT64)
3507 goto done_prefixes;
3508 c->rex_prefix = c->b;
3509 continue;
3510 case 0xf0: /* LOCK */
3511 c->lock_prefix = 1;
3512 break;
3513 case 0xf2: /* REPNE/REPNZ */
3514 case 0xf3: /* REP/REPE/REPZ */
3515 c->rep_prefix = c->b;
3516 break;
3517 default:
3518 goto done_prefixes;
3519 }
3520
3521 /* Any legacy prefix after a REX prefix nullifies its effect. */
3522
3523 c->rex_prefix = 0;
3524 }
3525
3526done_prefixes:
3527
3528 /* REX prefix. */
3529 if (c->rex_prefix & 8)
3530 c->op_bytes = 8; /* REX.W */
3531
3532 /* Opcode byte(s). */
3533 opcode = opcode_table[c->b];
3534 /* Two-byte opcode? */
3535 if (c->b == 0x0f) {
3536 c->twobyte = 1;
3537 c->b = insn_fetch(u8, 1, c->_eip);
3538 opcode = twobyte_table[c->b];
3539 }
3540 c->d = opcode.flags;
3541
3542 while (c->d & GroupMask) {
3543 switch (c->d & GroupMask) {
3544 case Group:
3545 c->modrm = insn_fetch(u8, 1, c->_eip);
3546 --c->_eip;
3547 goffset = (c->modrm >> 3) & 7;
3548 opcode = opcode.u.group[goffset];
3549 break;
3550 case GroupDual:
3551 c->modrm = insn_fetch(u8, 1, c->_eip);
3552 --c->_eip;
3553 goffset = (c->modrm >> 3) & 7;
3554 if ((c->modrm >> 6) == 3)
3555 opcode = opcode.u.gdual->mod3[goffset];
3556 else
3557 opcode = opcode.u.gdual->mod012[goffset];
3558 break;
3559 case RMExt:
3560 goffset = c->modrm & 7;
3561 opcode = opcode.u.group[goffset];
3562 break;
3563 case Prefix:
3564 if (c->rep_prefix && op_prefix)
3565 return X86EMUL_UNHANDLEABLE;
3566 simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
3567 switch (simd_prefix) {
3568 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3569 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3570 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3571 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3572 }
3573 break;
3574 default:
3575 return X86EMUL_UNHANDLEABLE;
3576 }
3577
3578 c->d &= ~GroupMask;
3579 c->d |= opcode.flags;
3580 }
3581
3582 c->execute = opcode.u.execute;
3583 c->check_perm = opcode.check_perm;
3584 c->intercept = opcode.intercept;
3585
3586 /* Unrecognised? */
3587 if (c->d == 0 || (c->d & Undefined))
3588 return -1;
3589
3590 if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3591 return -1;
3592
3593 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
3594 c->op_bytes = 8;
3595
3596 if (c->d & Op3264) {
3597 if (mode == X86EMUL_MODE_PROT64)
3598 c->op_bytes = 8;
3599 else
3600 c->op_bytes = 4;
3601 }
3602
3603 if (c->d & Sse)
3604 c->op_bytes = 16;
3605
3606 /* ModRM and SIB bytes. */
3607 if (c->d & ModRM) {
3608 rc = decode_modrm(ctxt, &memop);
3609 if (!c->has_seg_override)
3610 set_seg_override(c, c->modrm_seg);
3611 } else if (c->d & MemAbs)
3612 rc = decode_abs(ctxt, &memop);
3613 if (rc != X86EMUL_CONTINUE)
3614 goto done;
3615
3616 if (!c->has_seg_override)
3617 set_seg_override(c, VCPU_SREG_DS);
3618
3619 memop.addr.mem.seg = seg_override(ctxt, c);
3620
3621 if (memop.type == OP_MEM && c->ad_bytes != 8)
3622 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3623
3624 /*
3625 * Decode and fetch the source operand: register, memory
3626 * or immediate.
3627 */
3628 switch (c->d & SrcMask) {
3629 case SrcNone:
3630 break;
3631 case SrcReg:
3632 decode_register_operand(ctxt, &c->src, c, 0);
3633 break;
3634 case SrcMem16:
3635 memop.bytes = 2;
3636 goto srcmem_common;
3637 case SrcMem32:
3638 memop.bytes = 4;
3639 goto srcmem_common;
3640 case SrcMem:
3641 memop.bytes = (c->d & ByteOp) ? 1 :
3642 c->op_bytes;
3643 srcmem_common:
3644 c->src = memop;
3645 memopp = &c->src;
3646 break;
3647 case SrcImmU16:
3648 rc = decode_imm(ctxt, &c->src, 2, false);
3649 break;
3650 case SrcImm:
3651 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
3652 break;
3653 case SrcImmU:
3654 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
3655 break;
3656 case SrcImmByte:
3657 rc = decode_imm(ctxt, &c->src, 1, true);
3658 break;
3659 case SrcImmUByte:
3660 rc = decode_imm(ctxt, &c->src, 1, false);
3661 break;
3662 case SrcAcc:
3663 c->src.type = OP_REG;
3664 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3665 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
3666 fetch_register_operand(&c->src);
3667 break;
3668 case SrcOne:
3669 c->src.bytes = 1;
3670 c->src.val = 1;
3671 break;
3672 case SrcSI:
3673 c->src.type = OP_MEM;
3674 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3675 c->src.addr.mem.ea =
3676 register_address(c, c->regs[VCPU_REGS_RSI]);
3677 c->src.addr.mem.seg = seg_override(ctxt, c);
3678 c->src.val = 0;
3679 break;
3680 case SrcImmFAddr:
3681 c->src.type = OP_IMM;
3682 c->src.addr.mem.ea = c->_eip;
3683 c->src.bytes = c->op_bytes + 2;
3684 insn_fetch_arr(c->src.valptr, c->src.bytes, c->_eip);
3685 break;
3686 case SrcMemFAddr:
3687 memop.bytes = c->op_bytes + 2;
3688 goto srcmem_common;
3689 break;
3690 case SrcDX:
3691 c->src.type = OP_REG;
3692 c->src.bytes = 2;
3693 c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
3694 fetch_register_operand(&c->src);
3695 break;
3696 }
3697
3698 if (rc != X86EMUL_CONTINUE)
3699 goto done;
3700
3701 /*
3702 * Decode and fetch the second source operand: register, memory
3703 * or immediate.
3704 */
3705 switch (c->d & Src2Mask) {
3706 case Src2None:
3707 break;
3708 case Src2CL:
3709 c->src2.bytes = 1;
3710 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
3711 break;
3712 case Src2ImmByte:
3713 rc = decode_imm(ctxt, &c->src2, 1, true);
3714 break;
3715 case Src2One:
3716 c->src2.bytes = 1;
3717 c->src2.val = 1;
3718 break;
3719 case Src2Imm:
3720 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
3721 break;
3722 }
3723
3724 if (rc != X86EMUL_CONTINUE)
3725 goto done;
3726
3727 /* Decode and fetch the destination operand: register or memory. */
3728 switch (c->d & DstMask) {
3729 case DstReg:
3730 decode_register_operand(ctxt, &c->dst, c,
3731 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
3732 break;
3733 case DstImmUByte:
3734 c->dst.type = OP_IMM;
3735 c->dst.addr.mem.ea = c->_eip;
3736 c->dst.bytes = 1;
3737 c->dst.val = insn_fetch(u8, 1, c->_eip);
3738 break;
3739 case DstMem:
3740 case DstMem64:
3741 c->dst = memop;
3742 memopp = &c->dst;
3743 if ((c->d & DstMask) == DstMem64)
3744 c->dst.bytes = 8;
3745 else
3746 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3747 if (c->d & BitOp)
3748 fetch_bit_operand(c);
3749 c->dst.orig_val = c->dst.val;
3750 break;
3751 case DstAcc:
3752 c->dst.type = OP_REG;
3753 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3754 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
3755 fetch_register_operand(&c->dst);
3756 c->dst.orig_val = c->dst.val;
3757 break;
3758 case DstDI:
3759 c->dst.type = OP_MEM;
3760 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3761 c->dst.addr.mem.ea =
3762 register_address(c, c->regs[VCPU_REGS_RDI]);
3763 c->dst.addr.mem.seg = VCPU_SREG_ES;
3764 c->dst.val = 0;
3765 break;
3766 case DstDX:
3767 c->dst.type = OP_REG;
3768 c->dst.bytes = 2;
3769 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
3770 fetch_register_operand(&c->dst);
3771 break;
3772 case ImplicitOps:
3773 /* Special instructions do their own operand decoding. */
3774 default:
3775 c->dst.type = OP_NONE; /* Disable writeback. */
3776 break;
3777 }
3778
3779done:
3780 if (memopp && memopp->type == OP_MEM && c->rip_relative)
3781 memopp->addr.mem.ea += c->_eip;
3782
3783 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3784}
3785
3786static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3787{
3788 struct decode_cache *c = &ctxt->decode;
3789
3790 /* The second termination condition only applies for REPE
3791 * and REPNE. Test if the repeat string operation prefix is
3792 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3793 * corresponding termination condition according to:
3794 * - if REPE/REPZ and ZF = 0 then done
3795 * - if REPNE/REPNZ and ZF = 1 then done
3796 */
3797 if (((c->b == 0xa6) || (c->b == 0xa7) ||
3798 (c->b == 0xae) || (c->b == 0xaf))
3799 && (((c->rep_prefix == REPE_PREFIX) &&
3800 ((ctxt->eflags & EFLG_ZF) == 0))
3801 || ((c->rep_prefix == REPNE_PREFIX) &&
3802 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3803 return true;
3804
3805 return false;
3806}
3807
3808int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3809{
3810 struct x86_emulate_ops *ops = ctxt->ops;
3811 u64 msr_data;
3812 struct decode_cache *c = &ctxt->decode;
3813 int rc = X86EMUL_CONTINUE;
3814 int saved_dst_type = c->dst.type;
3815
3816 c->mem_read.pos = 0;
3817
3818 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
3819 rc = emulate_ud(ctxt);
3820 goto done;
3821 }
3822
3823 /* LOCK prefix is allowed only with some instructions */
3824 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
3825 rc = emulate_ud(ctxt);
3826 goto done;
3827 }
3828
3829 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3830 rc = emulate_ud(ctxt);
3831 goto done;
3832 }
3833
3834 if ((c->d & Sse)
3835 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3836 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3837 rc = emulate_ud(ctxt);
3838 goto done;
3839 }
3840
3841 if ((c->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3842 rc = emulate_nm(ctxt);
3843 goto done;
3844 }
3845
3846 if (unlikely(ctxt->guest_mode) && c->intercept) {
3847 rc = emulator_check_intercept(ctxt, c->intercept,
3848 X86_ICPT_PRE_EXCEPT);
3849 if (rc != X86EMUL_CONTINUE)
3850 goto done;
3851 }
3852
3853 /* Privileged instruction can be executed only in CPL=0 */
3854 if ((c->d & Priv) && ops->cpl(ctxt)) {
3855 rc = emulate_gp(ctxt, 0);
3856 goto done;
3857 }
3858
3859 /* Instruction can only be executed in protected mode */
3860 if ((c->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3861 rc = emulate_ud(ctxt);
3862 goto done;
3863 }
3864
3865 /* Do instruction specific permission checks */
3866 if (c->check_perm) {
3867 rc = c->check_perm(ctxt);
3868 if (rc != X86EMUL_CONTINUE)
3869 goto done;
3870 }
3871
3872 if (unlikely(ctxt->guest_mode) && c->intercept) {
3873 rc = emulator_check_intercept(ctxt, c->intercept,
3874 X86_ICPT_POST_EXCEPT);
3875 if (rc != X86EMUL_CONTINUE)
3876 goto done;
3877 }
3878
3879 if (c->rep_prefix && (c->d & String)) {
3880 /* All REP prefixes have the same first termination condition */
3881 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
3882 ctxt->eip = c->_eip;
3883 goto done;
3884 }
3885 }
3886
3887 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3888 rc = segmented_read(ctxt, c->src.addr.mem,
3889 c->src.valptr, c->src.bytes);
3890 if (rc != X86EMUL_CONTINUE)
3891 goto done;
3892 c->src.orig_val64 = c->src.val64;
3893 }
3894
3895 if (c->src2.type == OP_MEM) {
3896 rc = segmented_read(ctxt, c->src2.addr.mem,
3897 &c->src2.val, c->src2.bytes);
3898 if (rc != X86EMUL_CONTINUE)
3899 goto done;
3900 }
3901
3902 if ((c->d & DstMask) == ImplicitOps)
3903 goto special_insn;
3904
3905
3906 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
3907 /* optimisation - avoid slow emulated read if Mov */
3908 rc = segmented_read(ctxt, c->dst.addr.mem,
3909 &c->dst.val, c->dst.bytes);
3910 if (rc != X86EMUL_CONTINUE)
3911 goto done;
3912 }
3913 c->dst.orig_val = c->dst.val;
3914
3915special_insn:
3916
3917 if (unlikely(ctxt->guest_mode) && c->intercept) {
3918 rc = emulator_check_intercept(ctxt, c->intercept,
3919 X86_ICPT_POST_MEMACCESS);
3920 if (rc != X86EMUL_CONTINUE)
3921 goto done;
3922 }
3923
3924 if (c->execute) {
3925 rc = c->execute(ctxt);
3926 if (rc != X86EMUL_CONTINUE)
3927 goto done;
3928 goto writeback;
3929 }
3930
3931 if (c->twobyte)
3932 goto twobyte_insn;
3933
3934 switch (c->b) {
3935 case 0x06: /* push es */
3936 rc = emulate_push_sreg(ctxt, VCPU_SREG_ES);
3937 break;
3938 case 0x07: /* pop es */
3939 rc = emulate_pop_sreg(ctxt, VCPU_SREG_ES);
3940 break;
3941 case 0x0e: /* push cs */
3942 rc = emulate_push_sreg(ctxt, VCPU_SREG_CS);
3943 break;
3944 case 0x16: /* push ss */
3945 rc = emulate_push_sreg(ctxt, VCPU_SREG_SS);
3946 break;
3947 case 0x17: /* pop ss */
3948 rc = emulate_pop_sreg(ctxt, VCPU_SREG_SS);
3949 break;
3950 case 0x1e: /* push ds */
3951 rc = emulate_push_sreg(ctxt, VCPU_SREG_DS);
3952 break;
3953 case 0x1f: /* pop ds */
3954 rc = emulate_pop_sreg(ctxt, VCPU_SREG_DS);
3955 break;
3956 case 0x40 ... 0x47: /* inc r16/r32 */
3957 emulate_1op("inc", c->dst, ctxt->eflags);
3958 break;
3959 case 0x48 ... 0x4f: /* dec r16/r32 */
3960 emulate_1op("dec", c->dst, ctxt->eflags);
3961 break;
3962 case 0x63: /* movsxd */
3963 if (ctxt->mode != X86EMUL_MODE_PROT64)
3964 goto cannot_emulate;
3965 c->dst.val = (s32) c->src.val;
3966 break;
3967 case 0x6c: /* insb */
3968 case 0x6d: /* insw/insd */
3969 c->src.val = c->regs[VCPU_REGS_RDX];
3970 goto do_io_in;
3971 case 0x6e: /* outsb */
3972 case 0x6f: /* outsw/outsd */
3973 c->dst.val = c->regs[VCPU_REGS_RDX];
3974 goto do_io_out;
3975 break;
3976 case 0x70 ... 0x7f: /* jcc (short) */
3977 if (test_cc(c->b, ctxt->eflags))
3978 jmp_rel(c, c->src.val);
3979 break;
3980 case 0x8d: /* lea r16/r32, m */
3981 c->dst.val = c->src.addr.mem.ea;
3982 break;
3983 case 0x8f: /* pop (sole member of Grp1a) */
3984 rc = em_grp1a(ctxt);
3985 break;
3986 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3987 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3988 break;
3989 rc = em_xchg(ctxt);
3990 break;
3991 case 0x98: /* cbw/cwde/cdqe */
3992 switch (c->op_bytes) {
3993 case 2: c->dst.val = (s8)c->dst.val; break;
3994 case 4: c->dst.val = (s16)c->dst.val; break;
3995 case 8: c->dst.val = (s32)c->dst.val; break;
3996 }
3997 break;
3998 case 0xc0 ... 0xc1:
3999 rc = em_grp2(ctxt);
4000 break;
4001 case 0xc4: /* les */
4002 rc = emulate_load_segment(ctxt, VCPU_SREG_ES);
4003 break;
4004 case 0xc5: /* lds */
4005 rc = emulate_load_segment(ctxt, VCPU_SREG_DS);
4006 break;
4007 case 0xcc: /* int3 */
4008 rc = emulate_int(ctxt, 3);
4009 break;
4010 case 0xcd: /* int n */
4011 rc = emulate_int(ctxt, c->src.val);
4012 break;
4013 case 0xce: /* into */
4014 if (ctxt->eflags & EFLG_OF)
4015 rc = emulate_int(ctxt, 4);
4016 break;
4017 case 0xd0 ... 0xd1: /* Grp2 */
4018 rc = em_grp2(ctxt);
4019 break;
4020 case 0xd2 ... 0xd3: /* Grp2 */
4021 c->src.val = c->regs[VCPU_REGS_RCX];
4022 rc = em_grp2(ctxt);
4023 break;
4024 case 0xe4: /* inb */
4025 case 0xe5: /* in */
4026 goto do_io_in;
4027 case 0xe6: /* outb */
4028 case 0xe7: /* out */
4029 goto do_io_out;
4030 case 0xe8: /* call (near) */ {
4031 long int rel = c->src.val;
4032 c->src.val = (unsigned long) c->_eip;
4033 jmp_rel(c, rel);
4034 rc = em_push(ctxt);
4035 break;
4036 }
4037 case 0xe9: /* jmp rel */
4038 case 0xeb: /* jmp rel short */
4039 jmp_rel(c, c->src.val);
4040 c->dst.type = OP_NONE; /* Disable writeback. */
4041 break;
4042 case 0xec: /* in al,dx */
4043 case 0xed: /* in (e/r)ax,dx */
4044 do_io_in:
4045 if (!pio_in_emulated(ctxt, c->dst.bytes, c->src.val,
4046 &c->dst.val))
4047 goto done; /* IO is needed */
4048 break;
4049 case 0xee: /* out dx,al */
4050 case 0xef: /* out dx,(e/r)ax */
4051 do_io_out:
4052 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
4053 &c->src.val, 1);
4054 c->dst.type = OP_NONE; /* Disable writeback. */
4055 break;
4056 case 0xf4: /* hlt */
4057 ctxt->ops->halt(ctxt);
4058 break;
4059 case 0xf5: /* cmc */
4060 /* complement carry flag from eflags reg */
4061 ctxt->eflags ^= EFLG_CF;
4062 break;
4063 case 0xf6 ... 0xf7: /* Grp3 */
4064 rc = em_grp3(ctxt);
4065 break;
4066 case 0xf8: /* clc */
4067 ctxt->eflags &= ~EFLG_CF;
4068 break;
4069 case 0xf9: /* stc */
4070 ctxt->eflags |= EFLG_CF;
4071 break;
4072 case 0xfc: /* cld */
4073 ctxt->eflags &= ~EFLG_DF;
4074 break;
4075 case 0xfd: /* std */
4076 ctxt->eflags |= EFLG_DF;
4077 break;
4078 case 0xfe: /* Grp4 */
4079 rc = em_grp45(ctxt);
4080 break;
4081 case 0xff: /* Grp5 */
4082 rc = em_grp45(ctxt);
4083 break;
4084 default:
4085 goto cannot_emulate;
4086 }
4087
4088 if (rc != X86EMUL_CONTINUE)
4089 goto done;
4090
4091writeback:
4092 rc = writeback(ctxt);
4093 if (rc != X86EMUL_CONTINUE)
4094 goto done;
4095
4096 /*
4097 * restore dst type in case the decoding will be reused
4098 * (happens for string instruction )
4099 */
4100 c->dst.type = saved_dst_type;
4101
4102 if ((c->d & SrcMask) == SrcSI)
4103 string_addr_inc(ctxt, seg_override(ctxt, c),
4104 VCPU_REGS_RSI, &c->src);
4105
4106 if ((c->d & DstMask) == DstDI)
4107 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4108 &c->dst);
4109
4110 if (c->rep_prefix && (c->d & String)) {
4111 struct read_cache *r = &c->io_read;
4112 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
4113
4114 if (!string_insn_completed(ctxt)) {
4115 /*
4116 * Re-enter guest when pio read ahead buffer is empty
4117 * or, if it is not used, after each 1024 iteration.
4118 */
4119 if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
4120 (r->end == 0 || r->end != r->pos)) {
4121 /*
4122 * Reset read cache. Usually happens before
4123 * decode, but since instruction is restarted
4124 * we have to do it here.
4125 */
4126 c->mem_read.end = 0;
4127 return EMULATION_RESTART;
4128 }
4129 goto done; /* skip rip writeback */
4130 }
4131 }
4132
4133 ctxt->eip = c->_eip;
4134
4135done:
4136 if (rc == X86EMUL_PROPAGATE_FAULT)
4137 ctxt->have_exception = true;
4138 if (rc == X86EMUL_INTERCEPTED)
4139 return EMULATION_INTERCEPTED;
4140
4141 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4142
4143twobyte_insn:
4144 switch (c->b) {
4145 case 0x09: /* wbinvd */
4146 (ctxt->ops->wbinvd)(ctxt);
4147 break;
4148 case 0x08: /* invd */
4149 case 0x0d: /* GrpP (prefetch) */
4150 case 0x18: /* Grp16 (prefetch/nop) */
4151 break;
4152 case 0x20: /* mov cr, reg */
4153 c->dst.val = ops->get_cr(ctxt, c->modrm_reg);
4154 break;
4155 case 0x21: /* mov from dr to reg */
4156 ops->get_dr(ctxt, c->modrm_reg, &c->dst.val);
4157 break;
4158 case 0x22: /* mov reg, cr */
4159 if (ops->set_cr(ctxt, c->modrm_reg, c->src.val)) {
4160 emulate_gp(ctxt, 0);
4161 rc = X86EMUL_PROPAGATE_FAULT;
4162 goto done;
4163 }
4164 c->dst.type = OP_NONE;
4165 break;
4166 case 0x23: /* mov from reg to dr */
4167 if (ops->set_dr(ctxt, c->modrm_reg, c->src.val &
4168 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
4169 ~0ULL : ~0U)) < 0) {
4170 /* #UD condition is already handled by the code above */
4171 emulate_gp(ctxt, 0);
4172 rc = X86EMUL_PROPAGATE_FAULT;
4173 goto done;
4174 }
4175
4176 c->dst.type = OP_NONE; /* no writeback */
4177 break;
4178 case 0x30:
4179 /* wrmsr */
4180 msr_data = (u32)c->regs[VCPU_REGS_RAX]
4181 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
4182 if (ops->set_msr(ctxt, c->regs[VCPU_REGS_RCX], msr_data)) {
4183 emulate_gp(ctxt, 0);
4184 rc = X86EMUL_PROPAGATE_FAULT;
4185 goto done;
4186 }
4187 rc = X86EMUL_CONTINUE;
4188 break;
4189 case 0x32:
4190 /* rdmsr */
4191 if (ops->get_msr(ctxt, c->regs[VCPU_REGS_RCX], &msr_data)) {
4192 emulate_gp(ctxt, 0);
4193 rc = X86EMUL_PROPAGATE_FAULT;
4194 goto done;
4195 } else {
4196 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
4197 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
4198 }
4199 rc = X86EMUL_CONTINUE;
4200 break;
4201 case 0x40 ... 0x4f: /* cmov */
4202 c->dst.val = c->dst.orig_val = c->src.val;
4203 if (!test_cc(c->b, ctxt->eflags))
4204 c->dst.type = OP_NONE; /* no writeback */
4205 break;
4206 case 0x80 ... 0x8f: /* jnz rel, etc*/
4207 if (test_cc(c->b, ctxt->eflags))
4208 jmp_rel(c, c->src.val);
4209 break;
4210 case 0x90 ... 0x9f: /* setcc r/m8 */
4211 c->dst.val = test_cc(c->b, ctxt->eflags);
4212 break;
4213 case 0xa0: /* push fs */
4214 rc = emulate_push_sreg(ctxt, VCPU_SREG_FS);
4215 break;
4216 case 0xa1: /* pop fs */
4217 rc = emulate_pop_sreg(ctxt, VCPU_SREG_FS);
4218 break;
4219 case 0xa3:
4220 bt: /* bt */
4221 c->dst.type = OP_NONE;
4222 /* only subword offset */
4223 c->src.val &= (c->dst.bytes << 3) - 1;
4224 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
4225 break;
4226 case 0xa4: /* shld imm8, r, r/m */
4227 case 0xa5: /* shld cl, r, r/m */
4228 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
4229 break;
4230 case 0xa8: /* push gs */
4231 rc = emulate_push_sreg(ctxt, VCPU_SREG_GS);
4232 break;
4233 case 0xa9: /* pop gs */
4234 rc = emulate_pop_sreg(ctxt, VCPU_SREG_GS);
4235 break;
4236 case 0xab:
4237 bts: /* bts */
4238 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
4239 break;
4240 case 0xac: /* shrd imm8, r, r/m */
4241 case 0xad: /* shrd cl, r, r/m */
4242 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
4243 break;
4244 case 0xae: /* clflush */
4245 break;
4246 case 0xb0 ... 0xb1: /* cmpxchg */
4247 /*
4248 * Save real source value, then compare EAX against
4249 * destination.
4250 */
4251 c->src.orig_val = c->src.val;
4252 c->src.val = c->regs[VCPU_REGS_RAX];
4253 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
4254 if (ctxt->eflags & EFLG_ZF) {
4255 /* Success: write back to memory. */
4256 c->dst.val = c->src.orig_val;
4257 } else {
4258 /* Failure: write the value we saw to EAX. */
4259 c->dst.type = OP_REG;
4260 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
4261 }
4262 break;
4263 case 0xb2: /* lss */
4264 rc = emulate_load_segment(ctxt, VCPU_SREG_SS);
4265 break;
4266 case 0xb3:
4267 btr: /* btr */
4268 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
4269 break;
4270 case 0xb4: /* lfs */
4271 rc = emulate_load_segment(ctxt, VCPU_SREG_FS);
4272 break;
4273 case 0xb5: /* lgs */
4274 rc = emulate_load_segment(ctxt, VCPU_SREG_GS);
4275 break;
4276 case 0xb6 ... 0xb7: /* movzx */
4277 c->dst.bytes = c->op_bytes;
4278 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
4279 : (u16) c->src.val;
4280 break;
4281 case 0xba: /* Grp8 */
4282 switch (c->modrm_reg & 3) {
4283 case 0:
4284 goto bt;
4285 case 1:
4286 goto bts;
4287 case 2:
4288 goto btr;
4289 case 3:
4290 goto btc;
4291 }
4292 break;
4293 case 0xbb:
4294 btc: /* btc */
4295 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
4296 break;
4297 case 0xbc: { /* bsf */
4298 u8 zf;
4299 __asm__ ("bsf %2, %0; setz %1"
4300 : "=r"(c->dst.val), "=q"(zf)
4301 : "r"(c->src.val));
4302 ctxt->eflags &= ~X86_EFLAGS_ZF;
4303 if (zf) {
4304 ctxt->eflags |= X86_EFLAGS_ZF;
4305 c->dst.type = OP_NONE; /* Disable writeback. */
4306 }
4307 break;
4308 }
4309 case 0xbd: { /* bsr */
4310 u8 zf;
4311 __asm__ ("bsr %2, %0; setz %1"
4312 : "=r"(c->dst.val), "=q"(zf)
4313 : "r"(c->src.val));
4314 ctxt->eflags &= ~X86_EFLAGS_ZF;
4315 if (zf) {
4316 ctxt->eflags |= X86_EFLAGS_ZF;
4317 c->dst.type = OP_NONE; /* Disable writeback. */
4318 }
4319 break;
4320 }
4321 case 0xbe ... 0xbf: /* movsx */
4322 c->dst.bytes = c->op_bytes;
4323 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
4324 (s16) c->src.val;
4325 break;
4326 case 0xc0 ... 0xc1: /* xadd */
4327 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
4328 /* Write back the register source. */
4329 c->src.val = c->dst.orig_val;
4330 write_register_operand(&c->src);
4331 break;
4332 case 0xc3: /* movnti */
4333 c->dst.bytes = c->op_bytes;
4334 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
4335 (u64) c->src.val;
4336 break;
4337 case 0xc7: /* Grp9 (cmpxchg8b) */
4338 rc = em_grp9(ctxt);
4339 break;
4340 default:
4341 goto cannot_emulate;
4342 }
4343
4344 if (rc != X86EMUL_CONTINUE)
4345 goto done;
4346
4347 goto writeback;
4348
4349cannot_emulate:
4350 return EMULATION_FAILED;
4351}
This page took 0.055792 seconds and 5 git commands to generate.