Merge git://git.kernel.org/pub/scm/virt/kvm/kvm
[deliverable/linux.git] / arch / x86 / kvm / emulate.c
1 /******************************************************************************
2 * emulate.c
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
10 *
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
28
29 #include "x86.h"
30 #include "tss.h"
31
32 /*
33 * Operand types
34 */
35 #define OpNone 0ull
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
66
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
69
70 /*
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
82 #define DstShift 1
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstMem16 (OpMem16 << DstShift)
90 #define DstImmUByte (OpImmUByte << DstShift)
91 #define DstDX (OpDX << DstShift)
92 #define DstAccLo (OpAccLo << DstShift)
93 #define DstMask (OpMask << DstShift)
94 /* Source operand type. */
95 #define SrcShift 6
96 #define SrcNone (OpNone << SrcShift)
97 #define SrcReg (OpReg << SrcShift)
98 #define SrcMem (OpMem << SrcShift)
99 #define SrcMem16 (OpMem16 << SrcShift)
100 #define SrcMem32 (OpMem32 << SrcShift)
101 #define SrcImm (OpImm << SrcShift)
102 #define SrcImmByte (OpImmByte << SrcShift)
103 #define SrcOne (OpOne << SrcShift)
104 #define SrcImmUByte (OpImmUByte << SrcShift)
105 #define SrcImmU (OpImmU << SrcShift)
106 #define SrcSI (OpSI << SrcShift)
107 #define SrcXLat (OpXLat << SrcShift)
108 #define SrcImmFAddr (OpImmFAddr << SrcShift)
109 #define SrcMemFAddr (OpMemFAddr << SrcShift)
110 #define SrcAcc (OpAcc << SrcShift)
111 #define SrcImmU16 (OpImmU16 << SrcShift)
112 #define SrcImm64 (OpImm64 << SrcShift)
113 #define SrcDX (OpDX << SrcShift)
114 #define SrcMem8 (OpMem8 << SrcShift)
115 #define SrcAccHi (OpAccHi << SrcShift)
116 #define SrcMask (OpMask << SrcShift)
117 #define BitOp (1<<11)
118 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
119 #define String (1<<13) /* String instruction (rep capable) */
120 #define Stack (1<<14) /* Stack instruction (push/pop) */
121 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
122 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
123 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
124 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
125 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
126 #define Escape (5<<15) /* Escape to coprocessor instruction */
127 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
128 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
129 #define Sse (1<<18) /* SSE Vector instruction */
130 /* Generic ModRM decode. */
131 #define ModRM (1<<19)
132 /* Destination is only written; never read. */
133 #define Mov (1<<20)
134 /* Misc flags */
135 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
136 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
137 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
138 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
139 #define Undefined (1<<25) /* No Such Instruction */
140 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
141 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
142 #define No64 (1<<28)
143 #define PageTable (1 << 29) /* instruction used to write page table */
144 #define NotImpl (1 << 30) /* instruction is not implemented */
145 /* Source 2 operand type */
146 #define Src2Shift (31)
147 #define Src2None (OpNone << Src2Shift)
148 #define Src2Mem (OpMem << Src2Shift)
149 #define Src2CL (OpCL << Src2Shift)
150 #define Src2ImmByte (OpImmByte << Src2Shift)
151 #define Src2One (OpOne << Src2Shift)
152 #define Src2Imm (OpImm << Src2Shift)
153 #define Src2ES (OpES << Src2Shift)
154 #define Src2CS (OpCS << Src2Shift)
155 #define Src2SS (OpSS << Src2Shift)
156 #define Src2DS (OpDS << Src2Shift)
157 #define Src2FS (OpFS << Src2Shift)
158 #define Src2GS (OpGS << Src2Shift)
159 #define Src2Mask (OpMask << Src2Shift)
160 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
161 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
162 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
163 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
164 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
165 #define NoWrite ((u64)1 << 45) /* No writeback */
166 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
167 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
168 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
169 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
170 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
171 #define NearBranch ((u64)1 << 52) /* Near branches */
172 #define No16 ((u64)1 << 53) /* No 16 bit operand */
173 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
174
175 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
176
177 #define X2(x...) x, x
178 #define X3(x...) X2(x), x
179 #define X4(x...) X2(x), X2(x)
180 #define X5(x...) X4(x), x
181 #define X6(x...) X4(x), X2(x)
182 #define X7(x...) X4(x), X3(x)
183 #define X8(x...) X4(x), X4(x)
184 #define X16(x...) X8(x), X8(x)
185
186 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
187 #define FASTOP_SIZE 8
188
189 /*
190 * fastop functions have a special calling convention:
191 *
192 * dst: rax (in/out)
193 * src: rdx (in/out)
194 * src2: rcx (in)
195 * flags: rflags (in/out)
196 * ex: rsi (in:fastop pointer, out:zero if exception)
197 *
198 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
199 * different operand sizes can be reached by calculation, rather than a jump
200 * table (which would be bigger than the code).
201 *
202 * fastop functions are declared as taking a never-defined fastop parameter,
203 * so they can't be called from C directly.
204 */
205
206 struct fastop;
207
208 struct opcode {
209 u64 flags : 56;
210 u64 intercept : 8;
211 union {
212 int (*execute)(struct x86_emulate_ctxt *ctxt);
213 const struct opcode *group;
214 const struct group_dual *gdual;
215 const struct gprefix *gprefix;
216 const struct escape *esc;
217 const struct instr_dual *idual;
218 const struct mode_dual *mdual;
219 void (*fastop)(struct fastop *fake);
220 } u;
221 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
222 };
223
224 struct group_dual {
225 struct opcode mod012[8];
226 struct opcode mod3[8];
227 };
228
229 struct gprefix {
230 struct opcode pfx_no;
231 struct opcode pfx_66;
232 struct opcode pfx_f2;
233 struct opcode pfx_f3;
234 };
235
236 struct escape {
237 struct opcode op[8];
238 struct opcode high[64];
239 };
240
241 struct instr_dual {
242 struct opcode mod012;
243 struct opcode mod3;
244 };
245
246 struct mode_dual {
247 struct opcode mode32;
248 struct opcode mode64;
249 };
250
251 /* EFLAGS bit definitions. */
252 #define EFLG_ID (1<<21)
253 #define EFLG_VIP (1<<20)
254 #define EFLG_VIF (1<<19)
255 #define EFLG_AC (1<<18)
256 #define EFLG_VM (1<<17)
257 #define EFLG_RF (1<<16)
258 #define EFLG_IOPL (3<<12)
259 #define EFLG_NT (1<<14)
260 #define EFLG_OF (1<<11)
261 #define EFLG_DF (1<<10)
262 #define EFLG_IF (1<<9)
263 #define EFLG_TF (1<<8)
264 #define EFLG_SF (1<<7)
265 #define EFLG_ZF (1<<6)
266 #define EFLG_AF (1<<4)
267 #define EFLG_PF (1<<2)
268 #define EFLG_CF (1<<0)
269
270 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
271 #define EFLG_RESERVED_ONE_MASK 2
272
273 enum x86_transfer_type {
274 X86_TRANSFER_NONE,
275 X86_TRANSFER_CALL_JMP,
276 X86_TRANSFER_RET,
277 X86_TRANSFER_TASK_SWITCH,
278 };
279
280 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
281 {
282 if (!(ctxt->regs_valid & (1 << nr))) {
283 ctxt->regs_valid |= 1 << nr;
284 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
285 }
286 return ctxt->_regs[nr];
287 }
288
289 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
290 {
291 ctxt->regs_valid |= 1 << nr;
292 ctxt->regs_dirty |= 1 << nr;
293 return &ctxt->_regs[nr];
294 }
295
296 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
297 {
298 reg_read(ctxt, nr);
299 return reg_write(ctxt, nr);
300 }
301
302 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
303 {
304 unsigned reg;
305
306 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
307 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
308 }
309
310 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
311 {
312 ctxt->regs_dirty = 0;
313 ctxt->regs_valid = 0;
314 }
315
316 /*
317 * These EFLAGS bits are restored from saved value during emulation, and
318 * any changes are written back to the saved value after emulation.
319 */
320 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
321
322 #ifdef CONFIG_X86_64
323 #define ON64(x) x
324 #else
325 #define ON64(x)
326 #endif
327
328 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
329
330 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
331 #define FOP_RET "ret \n\t"
332
333 #define FOP_START(op) \
334 extern void em_##op(struct fastop *fake); \
335 asm(".pushsection .text, \"ax\" \n\t" \
336 ".global em_" #op " \n\t" \
337 FOP_ALIGN \
338 "em_" #op ": \n\t"
339
340 #define FOP_END \
341 ".popsection")
342
343 #define FOPNOP() FOP_ALIGN FOP_RET
344
345 #define FOP1E(op, dst) \
346 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
347
348 #define FOP1EEX(op, dst) \
349 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
350
351 #define FASTOP1(op) \
352 FOP_START(op) \
353 FOP1E(op##b, al) \
354 FOP1E(op##w, ax) \
355 FOP1E(op##l, eax) \
356 ON64(FOP1E(op##q, rax)) \
357 FOP_END
358
359 /* 1-operand, using src2 (for MUL/DIV r/m) */
360 #define FASTOP1SRC2(op, name) \
361 FOP_START(name) \
362 FOP1E(op, cl) \
363 FOP1E(op, cx) \
364 FOP1E(op, ecx) \
365 ON64(FOP1E(op, rcx)) \
366 FOP_END
367
368 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
369 #define FASTOP1SRC2EX(op, name) \
370 FOP_START(name) \
371 FOP1EEX(op, cl) \
372 FOP1EEX(op, cx) \
373 FOP1EEX(op, ecx) \
374 ON64(FOP1EEX(op, rcx)) \
375 FOP_END
376
377 #define FOP2E(op, dst, src) \
378 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
379
380 #define FASTOP2(op) \
381 FOP_START(op) \
382 FOP2E(op##b, al, dl) \
383 FOP2E(op##w, ax, dx) \
384 FOP2E(op##l, eax, edx) \
385 ON64(FOP2E(op##q, rax, rdx)) \
386 FOP_END
387
388 /* 2 operand, word only */
389 #define FASTOP2W(op) \
390 FOP_START(op) \
391 FOPNOP() \
392 FOP2E(op##w, ax, dx) \
393 FOP2E(op##l, eax, edx) \
394 ON64(FOP2E(op##q, rax, rdx)) \
395 FOP_END
396
397 /* 2 operand, src is CL */
398 #define FASTOP2CL(op) \
399 FOP_START(op) \
400 FOP2E(op##b, al, cl) \
401 FOP2E(op##w, ax, cl) \
402 FOP2E(op##l, eax, cl) \
403 ON64(FOP2E(op##q, rax, cl)) \
404 FOP_END
405
406 /* 2 operand, src and dest are reversed */
407 #define FASTOP2R(op, name) \
408 FOP_START(name) \
409 FOP2E(op##b, dl, al) \
410 FOP2E(op##w, dx, ax) \
411 FOP2E(op##l, edx, eax) \
412 ON64(FOP2E(op##q, rdx, rax)) \
413 FOP_END
414
415 #define FOP3E(op, dst, src, src2) \
416 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
417
418 /* 3-operand, word-only, src2=cl */
419 #define FASTOP3WCL(op) \
420 FOP_START(op) \
421 FOPNOP() \
422 FOP3E(op##w, ax, dx, cl) \
423 FOP3E(op##l, eax, edx, cl) \
424 ON64(FOP3E(op##q, rax, rdx, cl)) \
425 FOP_END
426
427 /* Special case for SETcc - 1 instruction per cc */
428 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
429
430 asm(".global kvm_fastop_exception \n"
431 "kvm_fastop_exception: xor %esi, %esi; ret");
432
433 FOP_START(setcc)
434 FOP_SETCC(seto)
435 FOP_SETCC(setno)
436 FOP_SETCC(setc)
437 FOP_SETCC(setnc)
438 FOP_SETCC(setz)
439 FOP_SETCC(setnz)
440 FOP_SETCC(setbe)
441 FOP_SETCC(setnbe)
442 FOP_SETCC(sets)
443 FOP_SETCC(setns)
444 FOP_SETCC(setp)
445 FOP_SETCC(setnp)
446 FOP_SETCC(setl)
447 FOP_SETCC(setnl)
448 FOP_SETCC(setle)
449 FOP_SETCC(setnle)
450 FOP_END;
451
452 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
453 FOP_END;
454
455 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
456 enum x86_intercept intercept,
457 enum x86_intercept_stage stage)
458 {
459 struct x86_instruction_info info = {
460 .intercept = intercept,
461 .rep_prefix = ctxt->rep_prefix,
462 .modrm_mod = ctxt->modrm_mod,
463 .modrm_reg = ctxt->modrm_reg,
464 .modrm_rm = ctxt->modrm_rm,
465 .src_val = ctxt->src.val64,
466 .dst_val = ctxt->dst.val64,
467 .src_bytes = ctxt->src.bytes,
468 .dst_bytes = ctxt->dst.bytes,
469 .ad_bytes = ctxt->ad_bytes,
470 .next_rip = ctxt->eip,
471 };
472
473 return ctxt->ops->intercept(ctxt, &info, stage);
474 }
475
476 static void assign_masked(ulong *dest, ulong src, ulong mask)
477 {
478 *dest = (*dest & ~mask) | (src & mask);
479 }
480
481 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
482 {
483 return (1UL << (ctxt->ad_bytes << 3)) - 1;
484 }
485
486 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
487 {
488 u16 sel;
489 struct desc_struct ss;
490
491 if (ctxt->mode == X86EMUL_MODE_PROT64)
492 return ~0UL;
493 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
494 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
495 }
496
497 static int stack_size(struct x86_emulate_ctxt *ctxt)
498 {
499 return (__fls(stack_mask(ctxt)) + 1) >> 3;
500 }
501
502 /* Access/update address held in a register, based on addressing mode. */
503 static inline unsigned long
504 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
505 {
506 if (ctxt->ad_bytes == sizeof(unsigned long))
507 return reg;
508 else
509 return reg & ad_mask(ctxt);
510 }
511
512 static inline unsigned long
513 register_address(struct x86_emulate_ctxt *ctxt, int reg)
514 {
515 return address_mask(ctxt, reg_read(ctxt, reg));
516 }
517
518 static void masked_increment(ulong *reg, ulong mask, int inc)
519 {
520 assign_masked(reg, *reg + inc, mask);
521 }
522
523 static inline void
524 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
525 {
526 ulong mask;
527
528 if (ctxt->ad_bytes == sizeof(unsigned long))
529 mask = ~0UL;
530 else
531 mask = ad_mask(ctxt);
532 masked_increment(reg_rmw(ctxt, reg), mask, inc);
533 }
534
535 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
536 {
537 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
538 }
539
540 static u32 desc_limit_scaled(struct desc_struct *desc)
541 {
542 u32 limit = get_desc_limit(desc);
543
544 return desc->g ? (limit << 12) | 0xfff : limit;
545 }
546
547 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
548 {
549 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
550 return 0;
551
552 return ctxt->ops->get_cached_segment_base(ctxt, seg);
553 }
554
555 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
556 u32 error, bool valid)
557 {
558 WARN_ON(vec > 0x1f);
559 ctxt->exception.vector = vec;
560 ctxt->exception.error_code = error;
561 ctxt->exception.error_code_valid = valid;
562 return X86EMUL_PROPAGATE_FAULT;
563 }
564
565 static int emulate_db(struct x86_emulate_ctxt *ctxt)
566 {
567 return emulate_exception(ctxt, DB_VECTOR, 0, false);
568 }
569
570 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
571 {
572 return emulate_exception(ctxt, GP_VECTOR, err, true);
573 }
574
575 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
576 {
577 return emulate_exception(ctxt, SS_VECTOR, err, true);
578 }
579
580 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
581 {
582 return emulate_exception(ctxt, UD_VECTOR, 0, false);
583 }
584
585 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
586 {
587 return emulate_exception(ctxt, TS_VECTOR, err, true);
588 }
589
590 static int emulate_de(struct x86_emulate_ctxt *ctxt)
591 {
592 return emulate_exception(ctxt, DE_VECTOR, 0, false);
593 }
594
595 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
596 {
597 return emulate_exception(ctxt, NM_VECTOR, 0, false);
598 }
599
600 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
601 {
602 u16 selector;
603 struct desc_struct desc;
604
605 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
606 return selector;
607 }
608
609 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
610 unsigned seg)
611 {
612 u16 dummy;
613 u32 base3;
614 struct desc_struct desc;
615
616 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
617 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
618 }
619
620 /*
621 * x86 defines three classes of vector instructions: explicitly
622 * aligned, explicitly unaligned, and the rest, which change behaviour
623 * depending on whether they're AVX encoded or not.
624 *
625 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
626 * subject to the same check.
627 */
628 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
629 {
630 if (likely(size < 16))
631 return false;
632
633 if (ctxt->d & Aligned)
634 return true;
635 else if (ctxt->d & Unaligned)
636 return false;
637 else if (ctxt->d & Avx)
638 return false;
639 else
640 return true;
641 }
642
643 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
644 struct segmented_address addr,
645 unsigned *max_size, unsigned size,
646 bool write, bool fetch,
647 enum x86emul_mode mode, ulong *linear)
648 {
649 struct desc_struct desc;
650 bool usable;
651 ulong la;
652 u32 lim;
653 u16 sel;
654
655 la = seg_base(ctxt, addr.seg) + addr.ea;
656 *max_size = 0;
657 switch (mode) {
658 case X86EMUL_MODE_PROT64:
659 if (is_noncanonical_address(la))
660 goto bad;
661
662 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
663 if (size > *max_size)
664 goto bad;
665 break;
666 default:
667 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
668 addr.seg);
669 if (!usable)
670 goto bad;
671 /* code segment in protected mode or read-only data segment */
672 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
673 || !(desc.type & 2)) && write)
674 goto bad;
675 /* unreadable code segment */
676 if (!fetch && (desc.type & 8) && !(desc.type & 2))
677 goto bad;
678 lim = desc_limit_scaled(&desc);
679 if (!(desc.type & 8) && (desc.type & 4)) {
680 /* expand-down segment */
681 if (addr.ea <= lim)
682 goto bad;
683 lim = desc.d ? 0xffffffff : 0xffff;
684 }
685 if (addr.ea > lim)
686 goto bad;
687 if (lim == 0xffffffff)
688 *max_size = ~0u;
689 else {
690 *max_size = (u64)lim + 1 - addr.ea;
691 if (size > *max_size)
692 goto bad;
693 }
694 la &= (u32)-1;
695 break;
696 }
697 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
698 return emulate_gp(ctxt, 0);
699 *linear = la;
700 return X86EMUL_CONTINUE;
701 bad:
702 if (addr.seg == VCPU_SREG_SS)
703 return emulate_ss(ctxt, 0);
704 else
705 return emulate_gp(ctxt, 0);
706 }
707
708 static int linearize(struct x86_emulate_ctxt *ctxt,
709 struct segmented_address addr,
710 unsigned size, bool write,
711 ulong *linear)
712 {
713 unsigned max_size;
714 return __linearize(ctxt, addr, &max_size, size, write, false,
715 ctxt->mode, linear);
716 }
717
718 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
719 enum x86emul_mode mode)
720 {
721 ulong linear;
722 int rc;
723 unsigned max_size;
724 struct segmented_address addr = { .seg = VCPU_SREG_CS,
725 .ea = dst };
726
727 if (ctxt->op_bytes != sizeof(unsigned long))
728 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
729 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
730 if (rc == X86EMUL_CONTINUE)
731 ctxt->_eip = addr.ea;
732 return rc;
733 }
734
735 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
736 {
737 return assign_eip(ctxt, dst, ctxt->mode);
738 }
739
740 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
741 const struct desc_struct *cs_desc)
742 {
743 enum x86emul_mode mode = ctxt->mode;
744 int rc;
745
746 #ifdef CONFIG_X86_64
747 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
748 if (cs_desc->l) {
749 u64 efer = 0;
750
751 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
752 if (efer & EFER_LMA)
753 mode = X86EMUL_MODE_PROT64;
754 } else
755 mode = X86EMUL_MODE_PROT32; /* temporary value */
756 }
757 #endif
758 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
759 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
760 rc = assign_eip(ctxt, dst, mode);
761 if (rc == X86EMUL_CONTINUE)
762 ctxt->mode = mode;
763 return rc;
764 }
765
766 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
767 {
768 return assign_eip_near(ctxt, ctxt->_eip + rel);
769 }
770
771 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
772 struct segmented_address addr,
773 void *data,
774 unsigned size)
775 {
776 int rc;
777 ulong linear;
778
779 rc = linearize(ctxt, addr, size, false, &linear);
780 if (rc != X86EMUL_CONTINUE)
781 return rc;
782 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
783 }
784
785 /*
786 * Prefetch the remaining bytes of the instruction without crossing page
787 * boundary if they are not in fetch_cache yet.
788 */
789 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
790 {
791 int rc;
792 unsigned size, max_size;
793 unsigned long linear;
794 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
795 struct segmented_address addr = { .seg = VCPU_SREG_CS,
796 .ea = ctxt->eip + cur_size };
797
798 /*
799 * We do not know exactly how many bytes will be needed, and
800 * __linearize is expensive, so fetch as much as possible. We
801 * just have to avoid going beyond the 15 byte limit, the end
802 * of the segment, or the end of the page.
803 *
804 * __linearize is called with size 0 so that it does not do any
805 * boundary check itself. Instead, we use max_size to check
806 * against op_size.
807 */
808 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
809 &linear);
810 if (unlikely(rc != X86EMUL_CONTINUE))
811 return rc;
812
813 size = min_t(unsigned, 15UL ^ cur_size, max_size);
814 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
815
816 /*
817 * One instruction can only straddle two pages,
818 * and one has been loaded at the beginning of
819 * x86_decode_insn. So, if not enough bytes
820 * still, we must have hit the 15-byte boundary.
821 */
822 if (unlikely(size < op_size))
823 return emulate_gp(ctxt, 0);
824
825 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
826 size, &ctxt->exception);
827 if (unlikely(rc != X86EMUL_CONTINUE))
828 return rc;
829 ctxt->fetch.end += size;
830 return X86EMUL_CONTINUE;
831 }
832
833 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
834 unsigned size)
835 {
836 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
837
838 if (unlikely(done_size < size))
839 return __do_insn_fetch_bytes(ctxt, size - done_size);
840 else
841 return X86EMUL_CONTINUE;
842 }
843
844 /* Fetch next part of the instruction being emulated. */
845 #define insn_fetch(_type, _ctxt) \
846 ({ _type _x; \
847 \
848 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
849 if (rc != X86EMUL_CONTINUE) \
850 goto done; \
851 ctxt->_eip += sizeof(_type); \
852 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
853 ctxt->fetch.ptr += sizeof(_type); \
854 _x; \
855 })
856
857 #define insn_fetch_arr(_arr, _size, _ctxt) \
858 ({ \
859 rc = do_insn_fetch_bytes(_ctxt, _size); \
860 if (rc != X86EMUL_CONTINUE) \
861 goto done; \
862 ctxt->_eip += (_size); \
863 memcpy(_arr, ctxt->fetch.ptr, _size); \
864 ctxt->fetch.ptr += (_size); \
865 })
866
867 /*
868 * Given the 'reg' portion of a ModRM byte, and a register block, return a
869 * pointer into the block that addresses the relevant register.
870 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
871 */
872 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
873 int byteop)
874 {
875 void *p;
876 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
877
878 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
879 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
880 else
881 p = reg_rmw(ctxt, modrm_reg);
882 return p;
883 }
884
885 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
886 struct segmented_address addr,
887 u16 *size, unsigned long *address, int op_bytes)
888 {
889 int rc;
890
891 if (op_bytes == 2)
892 op_bytes = 3;
893 *address = 0;
894 rc = segmented_read_std(ctxt, addr, size, 2);
895 if (rc != X86EMUL_CONTINUE)
896 return rc;
897 addr.ea += 2;
898 rc = segmented_read_std(ctxt, addr, address, op_bytes);
899 return rc;
900 }
901
902 FASTOP2(add);
903 FASTOP2(or);
904 FASTOP2(adc);
905 FASTOP2(sbb);
906 FASTOP2(and);
907 FASTOP2(sub);
908 FASTOP2(xor);
909 FASTOP2(cmp);
910 FASTOP2(test);
911
912 FASTOP1SRC2(mul, mul_ex);
913 FASTOP1SRC2(imul, imul_ex);
914 FASTOP1SRC2EX(div, div_ex);
915 FASTOP1SRC2EX(idiv, idiv_ex);
916
917 FASTOP3WCL(shld);
918 FASTOP3WCL(shrd);
919
920 FASTOP2W(imul);
921
922 FASTOP1(not);
923 FASTOP1(neg);
924 FASTOP1(inc);
925 FASTOP1(dec);
926
927 FASTOP2CL(rol);
928 FASTOP2CL(ror);
929 FASTOP2CL(rcl);
930 FASTOP2CL(rcr);
931 FASTOP2CL(shl);
932 FASTOP2CL(shr);
933 FASTOP2CL(sar);
934
935 FASTOP2W(bsf);
936 FASTOP2W(bsr);
937 FASTOP2W(bt);
938 FASTOP2W(bts);
939 FASTOP2W(btr);
940 FASTOP2W(btc);
941
942 FASTOP2(xadd);
943
944 FASTOP2R(cmp, cmp_r);
945
946 static u8 test_cc(unsigned int condition, unsigned long flags)
947 {
948 u8 rc;
949 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
950
951 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
952 asm("push %[flags]; popf; call *%[fastop]"
953 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
954 return rc;
955 }
956
957 static void fetch_register_operand(struct operand *op)
958 {
959 switch (op->bytes) {
960 case 1:
961 op->val = *(u8 *)op->addr.reg;
962 break;
963 case 2:
964 op->val = *(u16 *)op->addr.reg;
965 break;
966 case 4:
967 op->val = *(u32 *)op->addr.reg;
968 break;
969 case 8:
970 op->val = *(u64 *)op->addr.reg;
971 break;
972 }
973 }
974
975 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
976 {
977 ctxt->ops->get_fpu(ctxt);
978 switch (reg) {
979 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
980 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
981 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
982 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
983 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
984 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
985 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
986 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
987 #ifdef CONFIG_X86_64
988 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
989 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
990 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
991 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
992 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
993 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
994 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
995 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
996 #endif
997 default: BUG();
998 }
999 ctxt->ops->put_fpu(ctxt);
1000 }
1001
1002 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1003 int reg)
1004 {
1005 ctxt->ops->get_fpu(ctxt);
1006 switch (reg) {
1007 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1008 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1009 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1010 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1011 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1012 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1013 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1014 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1015 #ifdef CONFIG_X86_64
1016 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1017 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1018 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1019 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1020 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1021 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1022 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1023 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1024 #endif
1025 default: BUG();
1026 }
1027 ctxt->ops->put_fpu(ctxt);
1028 }
1029
1030 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1031 {
1032 ctxt->ops->get_fpu(ctxt);
1033 switch (reg) {
1034 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1035 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1036 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1037 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1038 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1039 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1040 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1041 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1042 default: BUG();
1043 }
1044 ctxt->ops->put_fpu(ctxt);
1045 }
1046
1047 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1048 {
1049 ctxt->ops->get_fpu(ctxt);
1050 switch (reg) {
1051 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1052 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1053 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1054 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1055 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1056 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1057 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1058 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1059 default: BUG();
1060 }
1061 ctxt->ops->put_fpu(ctxt);
1062 }
1063
1064 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1065 {
1066 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1067 return emulate_nm(ctxt);
1068
1069 ctxt->ops->get_fpu(ctxt);
1070 asm volatile("fninit");
1071 ctxt->ops->put_fpu(ctxt);
1072 return X86EMUL_CONTINUE;
1073 }
1074
1075 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1076 {
1077 u16 fcw;
1078
1079 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1080 return emulate_nm(ctxt);
1081
1082 ctxt->ops->get_fpu(ctxt);
1083 asm volatile("fnstcw %0": "+m"(fcw));
1084 ctxt->ops->put_fpu(ctxt);
1085
1086 ctxt->dst.val = fcw;
1087
1088 return X86EMUL_CONTINUE;
1089 }
1090
1091 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1092 {
1093 u16 fsw;
1094
1095 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1096 return emulate_nm(ctxt);
1097
1098 ctxt->ops->get_fpu(ctxt);
1099 asm volatile("fnstsw %0": "+m"(fsw));
1100 ctxt->ops->put_fpu(ctxt);
1101
1102 ctxt->dst.val = fsw;
1103
1104 return X86EMUL_CONTINUE;
1105 }
1106
1107 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1108 struct operand *op)
1109 {
1110 unsigned reg = ctxt->modrm_reg;
1111
1112 if (!(ctxt->d & ModRM))
1113 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1114
1115 if (ctxt->d & Sse) {
1116 op->type = OP_XMM;
1117 op->bytes = 16;
1118 op->addr.xmm = reg;
1119 read_sse_reg(ctxt, &op->vec_val, reg);
1120 return;
1121 }
1122 if (ctxt->d & Mmx) {
1123 reg &= 7;
1124 op->type = OP_MM;
1125 op->bytes = 8;
1126 op->addr.mm = reg;
1127 return;
1128 }
1129
1130 op->type = OP_REG;
1131 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1132 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1133
1134 fetch_register_operand(op);
1135 op->orig_val = op->val;
1136 }
1137
1138 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1139 {
1140 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1141 ctxt->modrm_seg = VCPU_SREG_SS;
1142 }
1143
1144 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1145 struct operand *op)
1146 {
1147 u8 sib;
1148 int index_reg, base_reg, scale;
1149 int rc = X86EMUL_CONTINUE;
1150 ulong modrm_ea = 0;
1151
1152 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1153 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1154 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1155
1156 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1157 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1158 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1159 ctxt->modrm_seg = VCPU_SREG_DS;
1160
1161 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1162 op->type = OP_REG;
1163 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1164 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1165 ctxt->d & ByteOp);
1166 if (ctxt->d & Sse) {
1167 op->type = OP_XMM;
1168 op->bytes = 16;
1169 op->addr.xmm = ctxt->modrm_rm;
1170 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1171 return rc;
1172 }
1173 if (ctxt->d & Mmx) {
1174 op->type = OP_MM;
1175 op->bytes = 8;
1176 op->addr.mm = ctxt->modrm_rm & 7;
1177 return rc;
1178 }
1179 fetch_register_operand(op);
1180 return rc;
1181 }
1182
1183 op->type = OP_MEM;
1184
1185 if (ctxt->ad_bytes == 2) {
1186 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1187 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1188 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1189 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1190
1191 /* 16-bit ModR/M decode. */
1192 switch (ctxt->modrm_mod) {
1193 case 0:
1194 if (ctxt->modrm_rm == 6)
1195 modrm_ea += insn_fetch(u16, ctxt);
1196 break;
1197 case 1:
1198 modrm_ea += insn_fetch(s8, ctxt);
1199 break;
1200 case 2:
1201 modrm_ea += insn_fetch(u16, ctxt);
1202 break;
1203 }
1204 switch (ctxt->modrm_rm) {
1205 case 0:
1206 modrm_ea += bx + si;
1207 break;
1208 case 1:
1209 modrm_ea += bx + di;
1210 break;
1211 case 2:
1212 modrm_ea += bp + si;
1213 break;
1214 case 3:
1215 modrm_ea += bp + di;
1216 break;
1217 case 4:
1218 modrm_ea += si;
1219 break;
1220 case 5:
1221 modrm_ea += di;
1222 break;
1223 case 6:
1224 if (ctxt->modrm_mod != 0)
1225 modrm_ea += bp;
1226 break;
1227 case 7:
1228 modrm_ea += bx;
1229 break;
1230 }
1231 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1232 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1233 ctxt->modrm_seg = VCPU_SREG_SS;
1234 modrm_ea = (u16)modrm_ea;
1235 } else {
1236 /* 32/64-bit ModR/M decode. */
1237 if ((ctxt->modrm_rm & 7) == 4) {
1238 sib = insn_fetch(u8, ctxt);
1239 index_reg |= (sib >> 3) & 7;
1240 base_reg |= sib & 7;
1241 scale = sib >> 6;
1242
1243 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1244 modrm_ea += insn_fetch(s32, ctxt);
1245 else {
1246 modrm_ea += reg_read(ctxt, base_reg);
1247 adjust_modrm_seg(ctxt, base_reg);
1248 /* Increment ESP on POP [ESP] */
1249 if ((ctxt->d & IncSP) &&
1250 base_reg == VCPU_REGS_RSP)
1251 modrm_ea += ctxt->op_bytes;
1252 }
1253 if (index_reg != 4)
1254 modrm_ea += reg_read(ctxt, index_reg) << scale;
1255 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1256 modrm_ea += insn_fetch(s32, ctxt);
1257 if (ctxt->mode == X86EMUL_MODE_PROT64)
1258 ctxt->rip_relative = 1;
1259 } else {
1260 base_reg = ctxt->modrm_rm;
1261 modrm_ea += reg_read(ctxt, base_reg);
1262 adjust_modrm_seg(ctxt, base_reg);
1263 }
1264 switch (ctxt->modrm_mod) {
1265 case 1:
1266 modrm_ea += insn_fetch(s8, ctxt);
1267 break;
1268 case 2:
1269 modrm_ea += insn_fetch(s32, ctxt);
1270 break;
1271 }
1272 }
1273 op->addr.mem.ea = modrm_ea;
1274 if (ctxt->ad_bytes != 8)
1275 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1276
1277 done:
1278 return rc;
1279 }
1280
1281 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1282 struct operand *op)
1283 {
1284 int rc = X86EMUL_CONTINUE;
1285
1286 op->type = OP_MEM;
1287 switch (ctxt->ad_bytes) {
1288 case 2:
1289 op->addr.mem.ea = insn_fetch(u16, ctxt);
1290 break;
1291 case 4:
1292 op->addr.mem.ea = insn_fetch(u32, ctxt);
1293 break;
1294 case 8:
1295 op->addr.mem.ea = insn_fetch(u64, ctxt);
1296 break;
1297 }
1298 done:
1299 return rc;
1300 }
1301
1302 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1303 {
1304 long sv = 0, mask;
1305
1306 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1307 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1308
1309 if (ctxt->src.bytes == 2)
1310 sv = (s16)ctxt->src.val & (s16)mask;
1311 else if (ctxt->src.bytes == 4)
1312 sv = (s32)ctxt->src.val & (s32)mask;
1313 else
1314 sv = (s64)ctxt->src.val & (s64)mask;
1315
1316 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1317 ctxt->dst.addr.mem.ea + (sv >> 3));
1318 }
1319
1320 /* only subword offset */
1321 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1322 }
1323
1324 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1325 unsigned long addr, void *dest, unsigned size)
1326 {
1327 int rc;
1328 struct read_cache *mc = &ctxt->mem_read;
1329
1330 if (mc->pos < mc->end)
1331 goto read_cached;
1332
1333 WARN_ON((mc->end + size) >= sizeof(mc->data));
1334
1335 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1336 &ctxt->exception);
1337 if (rc != X86EMUL_CONTINUE)
1338 return rc;
1339
1340 mc->end += size;
1341
1342 read_cached:
1343 memcpy(dest, mc->data + mc->pos, size);
1344 mc->pos += size;
1345 return X86EMUL_CONTINUE;
1346 }
1347
1348 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1349 struct segmented_address addr,
1350 void *data,
1351 unsigned size)
1352 {
1353 int rc;
1354 ulong linear;
1355
1356 rc = linearize(ctxt, addr, size, false, &linear);
1357 if (rc != X86EMUL_CONTINUE)
1358 return rc;
1359 return read_emulated(ctxt, linear, data, size);
1360 }
1361
1362 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1363 struct segmented_address addr,
1364 const void *data,
1365 unsigned size)
1366 {
1367 int rc;
1368 ulong linear;
1369
1370 rc = linearize(ctxt, addr, size, true, &linear);
1371 if (rc != X86EMUL_CONTINUE)
1372 return rc;
1373 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1374 &ctxt->exception);
1375 }
1376
1377 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1378 struct segmented_address addr,
1379 const void *orig_data, const void *data,
1380 unsigned size)
1381 {
1382 int rc;
1383 ulong linear;
1384
1385 rc = linearize(ctxt, addr, size, true, &linear);
1386 if (rc != X86EMUL_CONTINUE)
1387 return rc;
1388 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1389 size, &ctxt->exception);
1390 }
1391
1392 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1393 unsigned int size, unsigned short port,
1394 void *dest)
1395 {
1396 struct read_cache *rc = &ctxt->io_read;
1397
1398 if (rc->pos == rc->end) { /* refill pio read ahead */
1399 unsigned int in_page, n;
1400 unsigned int count = ctxt->rep_prefix ?
1401 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1402 in_page = (ctxt->eflags & EFLG_DF) ?
1403 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1404 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1405 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1406 if (n == 0)
1407 n = 1;
1408 rc->pos = rc->end = 0;
1409 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1410 return 0;
1411 rc->end = n * size;
1412 }
1413
1414 if (ctxt->rep_prefix && (ctxt->d & String) &&
1415 !(ctxt->eflags & EFLG_DF)) {
1416 ctxt->dst.data = rc->data + rc->pos;
1417 ctxt->dst.type = OP_MEM_STR;
1418 ctxt->dst.count = (rc->end - rc->pos) / size;
1419 rc->pos = rc->end;
1420 } else {
1421 memcpy(dest, rc->data + rc->pos, size);
1422 rc->pos += size;
1423 }
1424 return 1;
1425 }
1426
1427 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1428 u16 index, struct desc_struct *desc)
1429 {
1430 struct desc_ptr dt;
1431 ulong addr;
1432
1433 ctxt->ops->get_idt(ctxt, &dt);
1434
1435 if (dt.size < index * 8 + 7)
1436 return emulate_gp(ctxt, index << 3 | 0x2);
1437
1438 addr = dt.address + index * 8;
1439 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1440 &ctxt->exception);
1441 }
1442
1443 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1444 u16 selector, struct desc_ptr *dt)
1445 {
1446 const struct x86_emulate_ops *ops = ctxt->ops;
1447 u32 base3 = 0;
1448
1449 if (selector & 1 << 2) {
1450 struct desc_struct desc;
1451 u16 sel;
1452
1453 memset (dt, 0, sizeof *dt);
1454 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1455 VCPU_SREG_LDTR))
1456 return;
1457
1458 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1459 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1460 } else
1461 ops->get_gdt(ctxt, dt);
1462 }
1463
1464 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1465 u16 selector, ulong *desc_addr_p)
1466 {
1467 struct desc_ptr dt;
1468 u16 index = selector >> 3;
1469 ulong addr;
1470
1471 get_descriptor_table_ptr(ctxt, selector, &dt);
1472
1473 if (dt.size < index * 8 + 7)
1474 return emulate_gp(ctxt, selector & 0xfffc);
1475
1476 addr = dt.address + index * 8;
1477
1478 #ifdef CONFIG_X86_64
1479 if (addr >> 32 != 0) {
1480 u64 efer = 0;
1481
1482 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1483 if (!(efer & EFER_LMA))
1484 addr &= (u32)-1;
1485 }
1486 #endif
1487
1488 *desc_addr_p = addr;
1489 return X86EMUL_CONTINUE;
1490 }
1491
1492 /* allowed just for 8 bytes segments */
1493 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1494 u16 selector, struct desc_struct *desc,
1495 ulong *desc_addr_p)
1496 {
1497 int rc;
1498
1499 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1500 if (rc != X86EMUL_CONTINUE)
1501 return rc;
1502
1503 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1504 &ctxt->exception);
1505 }
1506
1507 /* allowed just for 8 bytes segments */
1508 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1509 u16 selector, struct desc_struct *desc)
1510 {
1511 int rc;
1512 ulong addr;
1513
1514 rc = get_descriptor_ptr(ctxt, selector, &addr);
1515 if (rc != X86EMUL_CONTINUE)
1516 return rc;
1517
1518 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1519 &ctxt->exception);
1520 }
1521
1522 /* Does not support long mode */
1523 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1524 u16 selector, int seg, u8 cpl,
1525 enum x86_transfer_type transfer,
1526 struct desc_struct *desc)
1527 {
1528 struct desc_struct seg_desc, old_desc;
1529 u8 dpl, rpl;
1530 unsigned err_vec = GP_VECTOR;
1531 u32 err_code = 0;
1532 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1533 ulong desc_addr;
1534 int ret;
1535 u16 dummy;
1536 u32 base3 = 0;
1537
1538 memset(&seg_desc, 0, sizeof seg_desc);
1539
1540 if (ctxt->mode == X86EMUL_MODE_REAL) {
1541 /* set real mode segment descriptor (keep limit etc. for
1542 * unreal mode) */
1543 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1544 set_desc_base(&seg_desc, selector << 4);
1545 goto load;
1546 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1547 /* VM86 needs a clean new segment descriptor */
1548 set_desc_base(&seg_desc, selector << 4);
1549 set_desc_limit(&seg_desc, 0xffff);
1550 seg_desc.type = 3;
1551 seg_desc.p = 1;
1552 seg_desc.s = 1;
1553 seg_desc.dpl = 3;
1554 goto load;
1555 }
1556
1557 rpl = selector & 3;
1558
1559 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1560 if ((seg == VCPU_SREG_CS
1561 || (seg == VCPU_SREG_SS
1562 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1563 || seg == VCPU_SREG_TR)
1564 && null_selector)
1565 goto exception;
1566
1567 /* TR should be in GDT only */
1568 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1569 goto exception;
1570
1571 if (null_selector) /* for NULL selector skip all following checks */
1572 goto load;
1573
1574 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1575 if (ret != X86EMUL_CONTINUE)
1576 return ret;
1577
1578 err_code = selector & 0xfffc;
1579 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1580 GP_VECTOR;
1581
1582 /* can't load system descriptor into segment selector */
1583 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1584 if (transfer == X86_TRANSFER_CALL_JMP)
1585 return X86EMUL_UNHANDLEABLE;
1586 goto exception;
1587 }
1588
1589 if (!seg_desc.p) {
1590 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1591 goto exception;
1592 }
1593
1594 dpl = seg_desc.dpl;
1595
1596 switch (seg) {
1597 case VCPU_SREG_SS:
1598 /*
1599 * segment is not a writable data segment or segment
1600 * selector's RPL != CPL or segment selector's RPL != CPL
1601 */
1602 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1603 goto exception;
1604 break;
1605 case VCPU_SREG_CS:
1606 if (!(seg_desc.type & 8))
1607 goto exception;
1608
1609 if (seg_desc.type & 4) {
1610 /* conforming */
1611 if (dpl > cpl)
1612 goto exception;
1613 } else {
1614 /* nonconforming */
1615 if (rpl > cpl || dpl != cpl)
1616 goto exception;
1617 }
1618 /* in long-mode d/b must be clear if l is set */
1619 if (seg_desc.d && seg_desc.l) {
1620 u64 efer = 0;
1621
1622 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1623 if (efer & EFER_LMA)
1624 goto exception;
1625 }
1626
1627 /* CS(RPL) <- CPL */
1628 selector = (selector & 0xfffc) | cpl;
1629 break;
1630 case VCPU_SREG_TR:
1631 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1632 goto exception;
1633 old_desc = seg_desc;
1634 seg_desc.type |= 2; /* busy */
1635 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1636 sizeof(seg_desc), &ctxt->exception);
1637 if (ret != X86EMUL_CONTINUE)
1638 return ret;
1639 break;
1640 case VCPU_SREG_LDTR:
1641 if (seg_desc.s || seg_desc.type != 2)
1642 goto exception;
1643 break;
1644 default: /* DS, ES, FS, or GS */
1645 /*
1646 * segment is not a data or readable code segment or
1647 * ((segment is a data or nonconforming code segment)
1648 * and (both RPL and CPL > DPL))
1649 */
1650 if ((seg_desc.type & 0xa) == 0x8 ||
1651 (((seg_desc.type & 0xc) != 0xc) &&
1652 (rpl > dpl && cpl > dpl)))
1653 goto exception;
1654 break;
1655 }
1656
1657 if (seg_desc.s) {
1658 /* mark segment as accessed */
1659 if (!(seg_desc.type & 1)) {
1660 seg_desc.type |= 1;
1661 ret = write_segment_descriptor(ctxt, selector,
1662 &seg_desc);
1663 if (ret != X86EMUL_CONTINUE)
1664 return ret;
1665 }
1666 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1667 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1668 sizeof(base3), &ctxt->exception);
1669 if (ret != X86EMUL_CONTINUE)
1670 return ret;
1671 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1672 ((u64)base3 << 32)))
1673 return emulate_gp(ctxt, 0);
1674 }
1675 load:
1676 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1677 if (desc)
1678 *desc = seg_desc;
1679 return X86EMUL_CONTINUE;
1680 exception:
1681 return emulate_exception(ctxt, err_vec, err_code, true);
1682 }
1683
1684 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1685 u16 selector, int seg)
1686 {
1687 u8 cpl = ctxt->ops->cpl(ctxt);
1688 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1689 X86_TRANSFER_NONE, NULL);
1690 }
1691
1692 static void write_register_operand(struct operand *op)
1693 {
1694 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1695 switch (op->bytes) {
1696 case 1:
1697 *(u8 *)op->addr.reg = (u8)op->val;
1698 break;
1699 case 2:
1700 *(u16 *)op->addr.reg = (u16)op->val;
1701 break;
1702 case 4:
1703 *op->addr.reg = (u32)op->val;
1704 break; /* 64b: zero-extend */
1705 case 8:
1706 *op->addr.reg = op->val;
1707 break;
1708 }
1709 }
1710
1711 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1712 {
1713 switch (op->type) {
1714 case OP_REG:
1715 write_register_operand(op);
1716 break;
1717 case OP_MEM:
1718 if (ctxt->lock_prefix)
1719 return segmented_cmpxchg(ctxt,
1720 op->addr.mem,
1721 &op->orig_val,
1722 &op->val,
1723 op->bytes);
1724 else
1725 return segmented_write(ctxt,
1726 op->addr.mem,
1727 &op->val,
1728 op->bytes);
1729 break;
1730 case OP_MEM_STR:
1731 return segmented_write(ctxt,
1732 op->addr.mem,
1733 op->data,
1734 op->bytes * op->count);
1735 break;
1736 case OP_XMM:
1737 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1738 break;
1739 case OP_MM:
1740 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1741 break;
1742 case OP_NONE:
1743 /* no writeback */
1744 break;
1745 default:
1746 break;
1747 }
1748 return X86EMUL_CONTINUE;
1749 }
1750
1751 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1752 {
1753 struct segmented_address addr;
1754
1755 rsp_increment(ctxt, -bytes);
1756 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1757 addr.seg = VCPU_SREG_SS;
1758
1759 return segmented_write(ctxt, addr, data, bytes);
1760 }
1761
1762 static int em_push(struct x86_emulate_ctxt *ctxt)
1763 {
1764 /* Disable writeback. */
1765 ctxt->dst.type = OP_NONE;
1766 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1767 }
1768
1769 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1770 void *dest, int len)
1771 {
1772 int rc;
1773 struct segmented_address addr;
1774
1775 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1776 addr.seg = VCPU_SREG_SS;
1777 rc = segmented_read(ctxt, addr, dest, len);
1778 if (rc != X86EMUL_CONTINUE)
1779 return rc;
1780
1781 rsp_increment(ctxt, len);
1782 return rc;
1783 }
1784
1785 static int em_pop(struct x86_emulate_ctxt *ctxt)
1786 {
1787 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1788 }
1789
1790 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1791 void *dest, int len)
1792 {
1793 int rc;
1794 unsigned long val, change_mask;
1795 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1796 int cpl = ctxt->ops->cpl(ctxt);
1797
1798 rc = emulate_pop(ctxt, &val, len);
1799 if (rc != X86EMUL_CONTINUE)
1800 return rc;
1801
1802 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1803 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1804
1805 switch(ctxt->mode) {
1806 case X86EMUL_MODE_PROT64:
1807 case X86EMUL_MODE_PROT32:
1808 case X86EMUL_MODE_PROT16:
1809 if (cpl == 0)
1810 change_mask |= EFLG_IOPL;
1811 if (cpl <= iopl)
1812 change_mask |= EFLG_IF;
1813 break;
1814 case X86EMUL_MODE_VM86:
1815 if (iopl < 3)
1816 return emulate_gp(ctxt, 0);
1817 change_mask |= EFLG_IF;
1818 break;
1819 default: /* real mode */
1820 change_mask |= (EFLG_IOPL | EFLG_IF);
1821 break;
1822 }
1823
1824 *(unsigned long *)dest =
1825 (ctxt->eflags & ~change_mask) | (val & change_mask);
1826
1827 return rc;
1828 }
1829
1830 static int em_popf(struct x86_emulate_ctxt *ctxt)
1831 {
1832 ctxt->dst.type = OP_REG;
1833 ctxt->dst.addr.reg = &ctxt->eflags;
1834 ctxt->dst.bytes = ctxt->op_bytes;
1835 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1836 }
1837
1838 static int em_enter(struct x86_emulate_ctxt *ctxt)
1839 {
1840 int rc;
1841 unsigned frame_size = ctxt->src.val;
1842 unsigned nesting_level = ctxt->src2.val & 31;
1843 ulong rbp;
1844
1845 if (nesting_level)
1846 return X86EMUL_UNHANDLEABLE;
1847
1848 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1849 rc = push(ctxt, &rbp, stack_size(ctxt));
1850 if (rc != X86EMUL_CONTINUE)
1851 return rc;
1852 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1853 stack_mask(ctxt));
1854 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1855 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1856 stack_mask(ctxt));
1857 return X86EMUL_CONTINUE;
1858 }
1859
1860 static int em_leave(struct x86_emulate_ctxt *ctxt)
1861 {
1862 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1863 stack_mask(ctxt));
1864 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1865 }
1866
1867 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1868 {
1869 int seg = ctxt->src2.val;
1870
1871 ctxt->src.val = get_segment_selector(ctxt, seg);
1872 if (ctxt->op_bytes == 4) {
1873 rsp_increment(ctxt, -2);
1874 ctxt->op_bytes = 2;
1875 }
1876
1877 return em_push(ctxt);
1878 }
1879
1880 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1881 {
1882 int seg = ctxt->src2.val;
1883 unsigned long selector;
1884 int rc;
1885
1886 rc = emulate_pop(ctxt, &selector, 2);
1887 if (rc != X86EMUL_CONTINUE)
1888 return rc;
1889
1890 if (ctxt->modrm_reg == VCPU_SREG_SS)
1891 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1892 if (ctxt->op_bytes > 2)
1893 rsp_increment(ctxt, ctxt->op_bytes - 2);
1894
1895 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1896 return rc;
1897 }
1898
1899 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1900 {
1901 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1902 int rc = X86EMUL_CONTINUE;
1903 int reg = VCPU_REGS_RAX;
1904
1905 while (reg <= VCPU_REGS_RDI) {
1906 (reg == VCPU_REGS_RSP) ?
1907 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1908
1909 rc = em_push(ctxt);
1910 if (rc != X86EMUL_CONTINUE)
1911 return rc;
1912
1913 ++reg;
1914 }
1915
1916 return rc;
1917 }
1918
1919 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1920 {
1921 ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
1922 return em_push(ctxt);
1923 }
1924
1925 static int em_popa(struct x86_emulate_ctxt *ctxt)
1926 {
1927 int rc = X86EMUL_CONTINUE;
1928 int reg = VCPU_REGS_RDI;
1929
1930 while (reg >= VCPU_REGS_RAX) {
1931 if (reg == VCPU_REGS_RSP) {
1932 rsp_increment(ctxt, ctxt->op_bytes);
1933 --reg;
1934 }
1935
1936 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1937 if (rc != X86EMUL_CONTINUE)
1938 break;
1939 --reg;
1940 }
1941 return rc;
1942 }
1943
1944 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1945 {
1946 const struct x86_emulate_ops *ops = ctxt->ops;
1947 int rc;
1948 struct desc_ptr dt;
1949 gva_t cs_addr;
1950 gva_t eip_addr;
1951 u16 cs, eip;
1952
1953 /* TODO: Add limit checks */
1954 ctxt->src.val = ctxt->eflags;
1955 rc = em_push(ctxt);
1956 if (rc != X86EMUL_CONTINUE)
1957 return rc;
1958
1959 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1960
1961 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1962 rc = em_push(ctxt);
1963 if (rc != X86EMUL_CONTINUE)
1964 return rc;
1965
1966 ctxt->src.val = ctxt->_eip;
1967 rc = em_push(ctxt);
1968 if (rc != X86EMUL_CONTINUE)
1969 return rc;
1970
1971 ops->get_idt(ctxt, &dt);
1972
1973 eip_addr = dt.address + (irq << 2);
1974 cs_addr = dt.address + (irq << 2) + 2;
1975
1976 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1977 if (rc != X86EMUL_CONTINUE)
1978 return rc;
1979
1980 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1981 if (rc != X86EMUL_CONTINUE)
1982 return rc;
1983
1984 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1985 if (rc != X86EMUL_CONTINUE)
1986 return rc;
1987
1988 ctxt->_eip = eip;
1989
1990 return rc;
1991 }
1992
1993 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1994 {
1995 int rc;
1996
1997 invalidate_registers(ctxt);
1998 rc = __emulate_int_real(ctxt, irq);
1999 if (rc == X86EMUL_CONTINUE)
2000 writeback_registers(ctxt);
2001 return rc;
2002 }
2003
2004 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2005 {
2006 switch(ctxt->mode) {
2007 case X86EMUL_MODE_REAL:
2008 return __emulate_int_real(ctxt, irq);
2009 case X86EMUL_MODE_VM86:
2010 case X86EMUL_MODE_PROT16:
2011 case X86EMUL_MODE_PROT32:
2012 case X86EMUL_MODE_PROT64:
2013 default:
2014 /* Protected mode interrupts unimplemented yet */
2015 return X86EMUL_UNHANDLEABLE;
2016 }
2017 }
2018
2019 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2020 {
2021 int rc = X86EMUL_CONTINUE;
2022 unsigned long temp_eip = 0;
2023 unsigned long temp_eflags = 0;
2024 unsigned long cs = 0;
2025 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
2026 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
2027 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
2028 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
2029
2030 /* TODO: Add stack limit check */
2031
2032 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2033
2034 if (rc != X86EMUL_CONTINUE)
2035 return rc;
2036
2037 if (temp_eip & ~0xffff)
2038 return emulate_gp(ctxt, 0);
2039
2040 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2041
2042 if (rc != X86EMUL_CONTINUE)
2043 return rc;
2044
2045 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2046
2047 if (rc != X86EMUL_CONTINUE)
2048 return rc;
2049
2050 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2051
2052 if (rc != X86EMUL_CONTINUE)
2053 return rc;
2054
2055 ctxt->_eip = temp_eip;
2056
2057
2058 if (ctxt->op_bytes == 4)
2059 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2060 else if (ctxt->op_bytes == 2) {
2061 ctxt->eflags &= ~0xffff;
2062 ctxt->eflags |= temp_eflags;
2063 }
2064
2065 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2066 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2067 ctxt->ops->set_nmi_mask(ctxt, false);
2068
2069 return rc;
2070 }
2071
2072 static int em_iret(struct x86_emulate_ctxt *ctxt)
2073 {
2074 switch(ctxt->mode) {
2075 case X86EMUL_MODE_REAL:
2076 return emulate_iret_real(ctxt);
2077 case X86EMUL_MODE_VM86:
2078 case X86EMUL_MODE_PROT16:
2079 case X86EMUL_MODE_PROT32:
2080 case X86EMUL_MODE_PROT64:
2081 default:
2082 /* iret from protected mode unimplemented yet */
2083 return X86EMUL_UNHANDLEABLE;
2084 }
2085 }
2086
2087 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2088 {
2089 int rc;
2090 unsigned short sel, old_sel;
2091 struct desc_struct old_desc, new_desc;
2092 const struct x86_emulate_ops *ops = ctxt->ops;
2093 u8 cpl = ctxt->ops->cpl(ctxt);
2094
2095 /* Assignment of RIP may only fail in 64-bit mode */
2096 if (ctxt->mode == X86EMUL_MODE_PROT64)
2097 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2098 VCPU_SREG_CS);
2099
2100 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2101
2102 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2103 X86_TRANSFER_CALL_JMP,
2104 &new_desc);
2105 if (rc != X86EMUL_CONTINUE)
2106 return rc;
2107
2108 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2109 if (rc != X86EMUL_CONTINUE) {
2110 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2111 /* assigning eip failed; restore the old cs */
2112 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2113 return rc;
2114 }
2115 return rc;
2116 }
2117
2118 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2119 {
2120 return assign_eip_near(ctxt, ctxt->src.val);
2121 }
2122
2123 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2124 {
2125 int rc;
2126 long int old_eip;
2127
2128 old_eip = ctxt->_eip;
2129 rc = assign_eip_near(ctxt, ctxt->src.val);
2130 if (rc != X86EMUL_CONTINUE)
2131 return rc;
2132 ctxt->src.val = old_eip;
2133 rc = em_push(ctxt);
2134 return rc;
2135 }
2136
2137 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2138 {
2139 u64 old = ctxt->dst.orig_val64;
2140
2141 if (ctxt->dst.bytes == 16)
2142 return X86EMUL_UNHANDLEABLE;
2143
2144 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2145 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2146 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2147 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2148 ctxt->eflags &= ~EFLG_ZF;
2149 } else {
2150 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2151 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2152
2153 ctxt->eflags |= EFLG_ZF;
2154 }
2155 return X86EMUL_CONTINUE;
2156 }
2157
2158 static int em_ret(struct x86_emulate_ctxt *ctxt)
2159 {
2160 int rc;
2161 unsigned long eip;
2162
2163 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2164 if (rc != X86EMUL_CONTINUE)
2165 return rc;
2166
2167 return assign_eip_near(ctxt, eip);
2168 }
2169
2170 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2171 {
2172 int rc;
2173 unsigned long eip, cs;
2174 u16 old_cs;
2175 int cpl = ctxt->ops->cpl(ctxt);
2176 struct desc_struct old_desc, new_desc;
2177 const struct x86_emulate_ops *ops = ctxt->ops;
2178
2179 if (ctxt->mode == X86EMUL_MODE_PROT64)
2180 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2181 VCPU_SREG_CS);
2182
2183 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2184 if (rc != X86EMUL_CONTINUE)
2185 return rc;
2186 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2187 if (rc != X86EMUL_CONTINUE)
2188 return rc;
2189 /* Outer-privilege level return is not implemented */
2190 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2191 return X86EMUL_UNHANDLEABLE;
2192 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2193 X86_TRANSFER_RET,
2194 &new_desc);
2195 if (rc != X86EMUL_CONTINUE)
2196 return rc;
2197 rc = assign_eip_far(ctxt, eip, &new_desc);
2198 if (rc != X86EMUL_CONTINUE) {
2199 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2200 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2201 }
2202 return rc;
2203 }
2204
2205 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2206 {
2207 int rc;
2208
2209 rc = em_ret_far(ctxt);
2210 if (rc != X86EMUL_CONTINUE)
2211 return rc;
2212 rsp_increment(ctxt, ctxt->src.val);
2213 return X86EMUL_CONTINUE;
2214 }
2215
2216 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2217 {
2218 /* Save real source value, then compare EAX against destination. */
2219 ctxt->dst.orig_val = ctxt->dst.val;
2220 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2221 ctxt->src.orig_val = ctxt->src.val;
2222 ctxt->src.val = ctxt->dst.orig_val;
2223 fastop(ctxt, em_cmp);
2224
2225 if (ctxt->eflags & EFLG_ZF) {
2226 /* Success: write back to memory; no update of EAX */
2227 ctxt->src.type = OP_NONE;
2228 ctxt->dst.val = ctxt->src.orig_val;
2229 } else {
2230 /* Failure: write the value we saw to EAX. */
2231 ctxt->src.type = OP_REG;
2232 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2233 ctxt->src.val = ctxt->dst.orig_val;
2234 /* Create write-cycle to dest by writing the same value */
2235 ctxt->dst.val = ctxt->dst.orig_val;
2236 }
2237 return X86EMUL_CONTINUE;
2238 }
2239
2240 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2241 {
2242 int seg = ctxt->src2.val;
2243 unsigned short sel;
2244 int rc;
2245
2246 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2247
2248 rc = load_segment_descriptor(ctxt, sel, seg);
2249 if (rc != X86EMUL_CONTINUE)
2250 return rc;
2251
2252 ctxt->dst.val = ctxt->src.val;
2253 return rc;
2254 }
2255
2256 static void
2257 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2258 struct desc_struct *cs, struct desc_struct *ss)
2259 {
2260 cs->l = 0; /* will be adjusted later */
2261 set_desc_base(cs, 0); /* flat segment */
2262 cs->g = 1; /* 4kb granularity */
2263 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2264 cs->type = 0x0b; /* Read, Execute, Accessed */
2265 cs->s = 1;
2266 cs->dpl = 0; /* will be adjusted later */
2267 cs->p = 1;
2268 cs->d = 1;
2269 cs->avl = 0;
2270
2271 set_desc_base(ss, 0); /* flat segment */
2272 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2273 ss->g = 1; /* 4kb granularity */
2274 ss->s = 1;
2275 ss->type = 0x03; /* Read/Write, Accessed */
2276 ss->d = 1; /* 32bit stack segment */
2277 ss->dpl = 0;
2278 ss->p = 1;
2279 ss->l = 0;
2280 ss->avl = 0;
2281 }
2282
2283 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2284 {
2285 u32 eax, ebx, ecx, edx;
2286
2287 eax = ecx = 0;
2288 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2289 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2290 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2291 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2292 }
2293
2294 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2295 {
2296 const struct x86_emulate_ops *ops = ctxt->ops;
2297 u32 eax, ebx, ecx, edx;
2298
2299 /*
2300 * syscall should always be enabled in longmode - so only become
2301 * vendor specific (cpuid) if other modes are active...
2302 */
2303 if (ctxt->mode == X86EMUL_MODE_PROT64)
2304 return true;
2305
2306 eax = 0x00000000;
2307 ecx = 0x00000000;
2308 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2309 /*
2310 * Intel ("GenuineIntel")
2311 * remark: Intel CPUs only support "syscall" in 64bit
2312 * longmode. Also an 64bit guest with a
2313 * 32bit compat-app running will #UD !! While this
2314 * behaviour can be fixed (by emulating) into AMD
2315 * response - CPUs of AMD can't behave like Intel.
2316 */
2317 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2318 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2319 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2320 return false;
2321
2322 /* AMD ("AuthenticAMD") */
2323 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2324 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2325 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2326 return true;
2327
2328 /* AMD ("AMDisbetter!") */
2329 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2330 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2331 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2332 return true;
2333
2334 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2335 return false;
2336 }
2337
2338 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2339 {
2340 const struct x86_emulate_ops *ops = ctxt->ops;
2341 struct desc_struct cs, ss;
2342 u64 msr_data;
2343 u16 cs_sel, ss_sel;
2344 u64 efer = 0;
2345
2346 /* syscall is not available in real mode */
2347 if (ctxt->mode == X86EMUL_MODE_REAL ||
2348 ctxt->mode == X86EMUL_MODE_VM86)
2349 return emulate_ud(ctxt);
2350
2351 if (!(em_syscall_is_enabled(ctxt)))
2352 return emulate_ud(ctxt);
2353
2354 ops->get_msr(ctxt, MSR_EFER, &efer);
2355 setup_syscalls_segments(ctxt, &cs, &ss);
2356
2357 if (!(efer & EFER_SCE))
2358 return emulate_ud(ctxt);
2359
2360 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2361 msr_data >>= 32;
2362 cs_sel = (u16)(msr_data & 0xfffc);
2363 ss_sel = (u16)(msr_data + 8);
2364
2365 if (efer & EFER_LMA) {
2366 cs.d = 0;
2367 cs.l = 1;
2368 }
2369 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2370 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2371
2372 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2373 if (efer & EFER_LMA) {
2374 #ifdef CONFIG_X86_64
2375 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2376
2377 ops->get_msr(ctxt,
2378 ctxt->mode == X86EMUL_MODE_PROT64 ?
2379 MSR_LSTAR : MSR_CSTAR, &msr_data);
2380 ctxt->_eip = msr_data;
2381
2382 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2383 ctxt->eflags &= ~msr_data;
2384 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2385 #endif
2386 } else {
2387 /* legacy mode */
2388 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2389 ctxt->_eip = (u32)msr_data;
2390
2391 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2392 }
2393
2394 return X86EMUL_CONTINUE;
2395 }
2396
2397 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2398 {
2399 const struct x86_emulate_ops *ops = ctxt->ops;
2400 struct desc_struct cs, ss;
2401 u64 msr_data;
2402 u16 cs_sel, ss_sel;
2403 u64 efer = 0;
2404
2405 ops->get_msr(ctxt, MSR_EFER, &efer);
2406 /* inject #GP if in real mode */
2407 if (ctxt->mode == X86EMUL_MODE_REAL)
2408 return emulate_gp(ctxt, 0);
2409
2410 /*
2411 * Not recognized on AMD in compat mode (but is recognized in legacy
2412 * mode).
2413 */
2414 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2415 && !vendor_intel(ctxt))
2416 return emulate_ud(ctxt);
2417
2418 /* sysenter/sysexit have not been tested in 64bit mode. */
2419 if (ctxt->mode == X86EMUL_MODE_PROT64)
2420 return X86EMUL_UNHANDLEABLE;
2421
2422 setup_syscalls_segments(ctxt, &cs, &ss);
2423
2424 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2425 if ((msr_data & 0xfffc) == 0x0)
2426 return emulate_gp(ctxt, 0);
2427
2428 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2429 cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
2430 ss_sel = cs_sel + 8;
2431 if (efer & EFER_LMA) {
2432 cs.d = 0;
2433 cs.l = 1;
2434 }
2435
2436 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2437 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2438
2439 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2440 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2441
2442 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2443 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2444 (u32)msr_data;
2445
2446 return X86EMUL_CONTINUE;
2447 }
2448
2449 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2450 {
2451 const struct x86_emulate_ops *ops = ctxt->ops;
2452 struct desc_struct cs, ss;
2453 u64 msr_data, rcx, rdx;
2454 int usermode;
2455 u16 cs_sel = 0, ss_sel = 0;
2456
2457 /* inject #GP if in real mode or Virtual 8086 mode */
2458 if (ctxt->mode == X86EMUL_MODE_REAL ||
2459 ctxt->mode == X86EMUL_MODE_VM86)
2460 return emulate_gp(ctxt, 0);
2461
2462 setup_syscalls_segments(ctxt, &cs, &ss);
2463
2464 if ((ctxt->rex_prefix & 0x8) != 0x0)
2465 usermode = X86EMUL_MODE_PROT64;
2466 else
2467 usermode = X86EMUL_MODE_PROT32;
2468
2469 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2470 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2471
2472 cs.dpl = 3;
2473 ss.dpl = 3;
2474 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2475 switch (usermode) {
2476 case X86EMUL_MODE_PROT32:
2477 cs_sel = (u16)(msr_data + 16);
2478 if ((msr_data & 0xfffc) == 0x0)
2479 return emulate_gp(ctxt, 0);
2480 ss_sel = (u16)(msr_data + 24);
2481 rcx = (u32)rcx;
2482 rdx = (u32)rdx;
2483 break;
2484 case X86EMUL_MODE_PROT64:
2485 cs_sel = (u16)(msr_data + 32);
2486 if (msr_data == 0x0)
2487 return emulate_gp(ctxt, 0);
2488 ss_sel = cs_sel + 8;
2489 cs.d = 0;
2490 cs.l = 1;
2491 if (is_noncanonical_address(rcx) ||
2492 is_noncanonical_address(rdx))
2493 return emulate_gp(ctxt, 0);
2494 break;
2495 }
2496 cs_sel |= SELECTOR_RPL_MASK;
2497 ss_sel |= SELECTOR_RPL_MASK;
2498
2499 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2500 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2501
2502 ctxt->_eip = rdx;
2503 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2504
2505 return X86EMUL_CONTINUE;
2506 }
2507
2508 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2509 {
2510 int iopl;
2511 if (ctxt->mode == X86EMUL_MODE_REAL)
2512 return false;
2513 if (ctxt->mode == X86EMUL_MODE_VM86)
2514 return true;
2515 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2516 return ctxt->ops->cpl(ctxt) > iopl;
2517 }
2518
2519 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2520 u16 port, u16 len)
2521 {
2522 const struct x86_emulate_ops *ops = ctxt->ops;
2523 struct desc_struct tr_seg;
2524 u32 base3;
2525 int r;
2526 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2527 unsigned mask = (1 << len) - 1;
2528 unsigned long base;
2529
2530 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2531 if (!tr_seg.p)
2532 return false;
2533 if (desc_limit_scaled(&tr_seg) < 103)
2534 return false;
2535 base = get_desc_base(&tr_seg);
2536 #ifdef CONFIG_X86_64
2537 base |= ((u64)base3) << 32;
2538 #endif
2539 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2540 if (r != X86EMUL_CONTINUE)
2541 return false;
2542 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2543 return false;
2544 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2545 if (r != X86EMUL_CONTINUE)
2546 return false;
2547 if ((perm >> bit_idx) & mask)
2548 return false;
2549 return true;
2550 }
2551
2552 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2553 u16 port, u16 len)
2554 {
2555 if (ctxt->perm_ok)
2556 return true;
2557
2558 if (emulator_bad_iopl(ctxt))
2559 if (!emulator_io_port_access_allowed(ctxt, port, len))
2560 return false;
2561
2562 ctxt->perm_ok = true;
2563
2564 return true;
2565 }
2566
2567 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2568 struct tss_segment_16 *tss)
2569 {
2570 tss->ip = ctxt->_eip;
2571 tss->flag = ctxt->eflags;
2572 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2573 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2574 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2575 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2576 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2577 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2578 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2579 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2580
2581 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2582 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2583 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2584 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2585 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2586 }
2587
2588 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2589 struct tss_segment_16 *tss)
2590 {
2591 int ret;
2592 u8 cpl;
2593
2594 ctxt->_eip = tss->ip;
2595 ctxt->eflags = tss->flag | 2;
2596 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2597 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2598 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2599 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2600 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2601 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2602 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2603 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2604
2605 /*
2606 * SDM says that segment selectors are loaded before segment
2607 * descriptors
2608 */
2609 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2610 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2611 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2612 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2613 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2614
2615 cpl = tss->cs & 3;
2616
2617 /*
2618 * Now load segment descriptors. If fault happens at this stage
2619 * it is handled in a context of new task
2620 */
2621 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2622 X86_TRANSFER_TASK_SWITCH, NULL);
2623 if (ret != X86EMUL_CONTINUE)
2624 return ret;
2625 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2626 X86_TRANSFER_TASK_SWITCH, NULL);
2627 if (ret != X86EMUL_CONTINUE)
2628 return ret;
2629 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2630 X86_TRANSFER_TASK_SWITCH, NULL);
2631 if (ret != X86EMUL_CONTINUE)
2632 return ret;
2633 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2634 X86_TRANSFER_TASK_SWITCH, NULL);
2635 if (ret != X86EMUL_CONTINUE)
2636 return ret;
2637 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2638 X86_TRANSFER_TASK_SWITCH, NULL);
2639 if (ret != X86EMUL_CONTINUE)
2640 return ret;
2641
2642 return X86EMUL_CONTINUE;
2643 }
2644
2645 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2646 u16 tss_selector, u16 old_tss_sel,
2647 ulong old_tss_base, struct desc_struct *new_desc)
2648 {
2649 const struct x86_emulate_ops *ops = ctxt->ops;
2650 struct tss_segment_16 tss_seg;
2651 int ret;
2652 u32 new_tss_base = get_desc_base(new_desc);
2653
2654 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2655 &ctxt->exception);
2656 if (ret != X86EMUL_CONTINUE)
2657 return ret;
2658
2659 save_state_to_tss16(ctxt, &tss_seg);
2660
2661 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2662 &ctxt->exception);
2663 if (ret != X86EMUL_CONTINUE)
2664 return ret;
2665
2666 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2667 &ctxt->exception);
2668 if (ret != X86EMUL_CONTINUE)
2669 return ret;
2670
2671 if (old_tss_sel != 0xffff) {
2672 tss_seg.prev_task_link = old_tss_sel;
2673
2674 ret = ops->write_std(ctxt, new_tss_base,
2675 &tss_seg.prev_task_link,
2676 sizeof tss_seg.prev_task_link,
2677 &ctxt->exception);
2678 if (ret != X86EMUL_CONTINUE)
2679 return ret;
2680 }
2681
2682 return load_state_from_tss16(ctxt, &tss_seg);
2683 }
2684
2685 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2686 struct tss_segment_32 *tss)
2687 {
2688 /* CR3 and ldt selector are not saved intentionally */
2689 tss->eip = ctxt->_eip;
2690 tss->eflags = ctxt->eflags;
2691 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2692 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2693 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2694 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2695 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2696 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2697 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2698 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2699
2700 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2701 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2702 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2703 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2704 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2705 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2706 }
2707
2708 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2709 struct tss_segment_32 *tss)
2710 {
2711 int ret;
2712 u8 cpl;
2713
2714 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2715 return emulate_gp(ctxt, 0);
2716 ctxt->_eip = tss->eip;
2717 ctxt->eflags = tss->eflags | 2;
2718
2719 /* General purpose registers */
2720 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2721 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2722 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2723 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2724 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2725 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2726 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2727 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2728
2729 /*
2730 * SDM says that segment selectors are loaded before segment
2731 * descriptors. This is important because CPL checks will
2732 * use CS.RPL.
2733 */
2734 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2735 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2736 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2737 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2738 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2739 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2740 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2741
2742 /*
2743 * If we're switching between Protected Mode and VM86, we need to make
2744 * sure to update the mode before loading the segment descriptors so
2745 * that the selectors are interpreted correctly.
2746 */
2747 if (ctxt->eflags & X86_EFLAGS_VM) {
2748 ctxt->mode = X86EMUL_MODE_VM86;
2749 cpl = 3;
2750 } else {
2751 ctxt->mode = X86EMUL_MODE_PROT32;
2752 cpl = tss->cs & 3;
2753 }
2754
2755 /*
2756 * Now load segment descriptors. If fault happenes at this stage
2757 * it is handled in a context of new task
2758 */
2759 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2760 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
2761 if (ret != X86EMUL_CONTINUE)
2762 return ret;
2763 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2764 X86_TRANSFER_TASK_SWITCH, NULL);
2765 if (ret != X86EMUL_CONTINUE)
2766 return ret;
2767 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2768 X86_TRANSFER_TASK_SWITCH, NULL);
2769 if (ret != X86EMUL_CONTINUE)
2770 return ret;
2771 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2772 X86_TRANSFER_TASK_SWITCH, NULL);
2773 if (ret != X86EMUL_CONTINUE)
2774 return ret;
2775 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2776 X86_TRANSFER_TASK_SWITCH, NULL);
2777 if (ret != X86EMUL_CONTINUE)
2778 return ret;
2779 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2780 X86_TRANSFER_TASK_SWITCH, NULL);
2781 if (ret != X86EMUL_CONTINUE)
2782 return ret;
2783 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2784 X86_TRANSFER_TASK_SWITCH, NULL);
2785 if (ret != X86EMUL_CONTINUE)
2786 return ret;
2787
2788 return X86EMUL_CONTINUE;
2789 }
2790
2791 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2792 u16 tss_selector, u16 old_tss_sel,
2793 ulong old_tss_base, struct desc_struct *new_desc)
2794 {
2795 const struct x86_emulate_ops *ops = ctxt->ops;
2796 struct tss_segment_32 tss_seg;
2797 int ret;
2798 u32 new_tss_base = get_desc_base(new_desc);
2799 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2800 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2801
2802 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2803 &ctxt->exception);
2804 if (ret != X86EMUL_CONTINUE)
2805 return ret;
2806
2807 save_state_to_tss32(ctxt, &tss_seg);
2808
2809 /* Only GP registers and segment selectors are saved */
2810 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2811 ldt_sel_offset - eip_offset, &ctxt->exception);
2812 if (ret != X86EMUL_CONTINUE)
2813 return ret;
2814
2815 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2816 &ctxt->exception);
2817 if (ret != X86EMUL_CONTINUE)
2818 return ret;
2819
2820 if (old_tss_sel != 0xffff) {
2821 tss_seg.prev_task_link = old_tss_sel;
2822
2823 ret = ops->write_std(ctxt, new_tss_base,
2824 &tss_seg.prev_task_link,
2825 sizeof tss_seg.prev_task_link,
2826 &ctxt->exception);
2827 if (ret != X86EMUL_CONTINUE)
2828 return ret;
2829 }
2830
2831 return load_state_from_tss32(ctxt, &tss_seg);
2832 }
2833
2834 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2835 u16 tss_selector, int idt_index, int reason,
2836 bool has_error_code, u32 error_code)
2837 {
2838 const struct x86_emulate_ops *ops = ctxt->ops;
2839 struct desc_struct curr_tss_desc, next_tss_desc;
2840 int ret;
2841 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2842 ulong old_tss_base =
2843 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2844 u32 desc_limit;
2845 ulong desc_addr;
2846
2847 /* FIXME: old_tss_base == ~0 ? */
2848
2849 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2850 if (ret != X86EMUL_CONTINUE)
2851 return ret;
2852 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2853 if (ret != X86EMUL_CONTINUE)
2854 return ret;
2855
2856 /* FIXME: check that next_tss_desc is tss */
2857
2858 /*
2859 * Check privileges. The three cases are task switch caused by...
2860 *
2861 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2862 * 2. Exception/IRQ/iret: No check is performed
2863 * 3. jmp/call to TSS/task-gate: No check is performed since the
2864 * hardware checks it before exiting.
2865 */
2866 if (reason == TASK_SWITCH_GATE) {
2867 if (idt_index != -1) {
2868 /* Software interrupts */
2869 struct desc_struct task_gate_desc;
2870 int dpl;
2871
2872 ret = read_interrupt_descriptor(ctxt, idt_index,
2873 &task_gate_desc);
2874 if (ret != X86EMUL_CONTINUE)
2875 return ret;
2876
2877 dpl = task_gate_desc.dpl;
2878 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2879 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2880 }
2881 }
2882
2883 desc_limit = desc_limit_scaled(&next_tss_desc);
2884 if (!next_tss_desc.p ||
2885 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2886 desc_limit < 0x2b)) {
2887 return emulate_ts(ctxt, tss_selector & 0xfffc);
2888 }
2889
2890 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2891 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2892 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2893 }
2894
2895 if (reason == TASK_SWITCH_IRET)
2896 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2897
2898 /* set back link to prev task only if NT bit is set in eflags
2899 note that old_tss_sel is not used after this point */
2900 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2901 old_tss_sel = 0xffff;
2902
2903 if (next_tss_desc.type & 8)
2904 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2905 old_tss_base, &next_tss_desc);
2906 else
2907 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2908 old_tss_base, &next_tss_desc);
2909 if (ret != X86EMUL_CONTINUE)
2910 return ret;
2911
2912 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2913 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2914
2915 if (reason != TASK_SWITCH_IRET) {
2916 next_tss_desc.type |= (1 << 1); /* set busy flag */
2917 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2918 }
2919
2920 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2921 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2922
2923 if (has_error_code) {
2924 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2925 ctxt->lock_prefix = 0;
2926 ctxt->src.val = (unsigned long) error_code;
2927 ret = em_push(ctxt);
2928 }
2929
2930 return ret;
2931 }
2932
2933 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2934 u16 tss_selector, int idt_index, int reason,
2935 bool has_error_code, u32 error_code)
2936 {
2937 int rc;
2938
2939 invalidate_registers(ctxt);
2940 ctxt->_eip = ctxt->eip;
2941 ctxt->dst.type = OP_NONE;
2942
2943 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2944 has_error_code, error_code);
2945
2946 if (rc == X86EMUL_CONTINUE) {
2947 ctxt->eip = ctxt->_eip;
2948 writeback_registers(ctxt);
2949 }
2950
2951 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2952 }
2953
2954 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2955 struct operand *op)
2956 {
2957 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2958
2959 register_address_increment(ctxt, reg, df * op->bytes);
2960 op->addr.mem.ea = register_address(ctxt, reg);
2961 }
2962
2963 static int em_das(struct x86_emulate_ctxt *ctxt)
2964 {
2965 u8 al, old_al;
2966 bool af, cf, old_cf;
2967
2968 cf = ctxt->eflags & X86_EFLAGS_CF;
2969 al = ctxt->dst.val;
2970
2971 old_al = al;
2972 old_cf = cf;
2973 cf = false;
2974 af = ctxt->eflags & X86_EFLAGS_AF;
2975 if ((al & 0x0f) > 9 || af) {
2976 al -= 6;
2977 cf = old_cf | (al >= 250);
2978 af = true;
2979 } else {
2980 af = false;
2981 }
2982 if (old_al > 0x99 || old_cf) {
2983 al -= 0x60;
2984 cf = true;
2985 }
2986
2987 ctxt->dst.val = al;
2988 /* Set PF, ZF, SF */
2989 ctxt->src.type = OP_IMM;
2990 ctxt->src.val = 0;
2991 ctxt->src.bytes = 1;
2992 fastop(ctxt, em_or);
2993 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2994 if (cf)
2995 ctxt->eflags |= X86_EFLAGS_CF;
2996 if (af)
2997 ctxt->eflags |= X86_EFLAGS_AF;
2998 return X86EMUL_CONTINUE;
2999 }
3000
3001 static int em_aam(struct x86_emulate_ctxt *ctxt)
3002 {
3003 u8 al, ah;
3004
3005 if (ctxt->src.val == 0)
3006 return emulate_de(ctxt);
3007
3008 al = ctxt->dst.val & 0xff;
3009 ah = al / ctxt->src.val;
3010 al %= ctxt->src.val;
3011
3012 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3013
3014 /* Set PF, ZF, SF */
3015 ctxt->src.type = OP_IMM;
3016 ctxt->src.val = 0;
3017 ctxt->src.bytes = 1;
3018 fastop(ctxt, em_or);
3019
3020 return X86EMUL_CONTINUE;
3021 }
3022
3023 static int em_aad(struct x86_emulate_ctxt *ctxt)
3024 {
3025 u8 al = ctxt->dst.val & 0xff;
3026 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3027
3028 al = (al + (ah * ctxt->src.val)) & 0xff;
3029
3030 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3031
3032 /* Set PF, ZF, SF */
3033 ctxt->src.type = OP_IMM;
3034 ctxt->src.val = 0;
3035 ctxt->src.bytes = 1;
3036 fastop(ctxt, em_or);
3037
3038 return X86EMUL_CONTINUE;
3039 }
3040
3041 static int em_call(struct x86_emulate_ctxt *ctxt)
3042 {
3043 int rc;
3044 long rel = ctxt->src.val;
3045
3046 ctxt->src.val = (unsigned long)ctxt->_eip;
3047 rc = jmp_rel(ctxt, rel);
3048 if (rc != X86EMUL_CONTINUE)
3049 return rc;
3050 return em_push(ctxt);
3051 }
3052
3053 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3054 {
3055 u16 sel, old_cs;
3056 ulong old_eip;
3057 int rc;
3058 struct desc_struct old_desc, new_desc;
3059 const struct x86_emulate_ops *ops = ctxt->ops;
3060 int cpl = ctxt->ops->cpl(ctxt);
3061 enum x86emul_mode prev_mode = ctxt->mode;
3062
3063 old_eip = ctxt->_eip;
3064 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3065
3066 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3067 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3068 X86_TRANSFER_CALL_JMP, &new_desc);
3069 if (rc != X86EMUL_CONTINUE)
3070 return rc;
3071
3072 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3073 if (rc != X86EMUL_CONTINUE)
3074 goto fail;
3075
3076 ctxt->src.val = old_cs;
3077 rc = em_push(ctxt);
3078 if (rc != X86EMUL_CONTINUE)
3079 goto fail;
3080
3081 ctxt->src.val = old_eip;
3082 rc = em_push(ctxt);
3083 /* If we failed, we tainted the memory, but the very least we should
3084 restore cs */
3085 if (rc != X86EMUL_CONTINUE) {
3086 pr_warn_once("faulting far call emulation tainted memory\n");
3087 goto fail;
3088 }
3089 return rc;
3090 fail:
3091 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3092 ctxt->mode = prev_mode;
3093 return rc;
3094
3095 }
3096
3097 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3098 {
3099 int rc;
3100 unsigned long eip;
3101
3102 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3103 if (rc != X86EMUL_CONTINUE)
3104 return rc;
3105 rc = assign_eip_near(ctxt, eip);
3106 if (rc != X86EMUL_CONTINUE)
3107 return rc;
3108 rsp_increment(ctxt, ctxt->src.val);
3109 return X86EMUL_CONTINUE;
3110 }
3111
3112 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3113 {
3114 /* Write back the register source. */
3115 ctxt->src.val = ctxt->dst.val;
3116 write_register_operand(&ctxt->src);
3117
3118 /* Write back the memory destination with implicit LOCK prefix. */
3119 ctxt->dst.val = ctxt->src.orig_val;
3120 ctxt->lock_prefix = 1;
3121 return X86EMUL_CONTINUE;
3122 }
3123
3124 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3125 {
3126 ctxt->dst.val = ctxt->src2.val;
3127 return fastop(ctxt, em_imul);
3128 }
3129
3130 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3131 {
3132 ctxt->dst.type = OP_REG;
3133 ctxt->dst.bytes = ctxt->src.bytes;
3134 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3135 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3136
3137 return X86EMUL_CONTINUE;
3138 }
3139
3140 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3141 {
3142 u64 tsc = 0;
3143
3144 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3145 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3146 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3147 return X86EMUL_CONTINUE;
3148 }
3149
3150 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3151 {
3152 u64 pmc;
3153
3154 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3155 return emulate_gp(ctxt, 0);
3156 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3157 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3158 return X86EMUL_CONTINUE;
3159 }
3160
3161 static int em_mov(struct x86_emulate_ctxt *ctxt)
3162 {
3163 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3164 return X86EMUL_CONTINUE;
3165 }
3166
3167 #define FFL(x) bit(X86_FEATURE_##x)
3168
3169 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3170 {
3171 u32 ebx, ecx, edx, eax = 1;
3172 u16 tmp;
3173
3174 /*
3175 * Check MOVBE is set in the guest-visible CPUID leaf.
3176 */
3177 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3178 if (!(ecx & FFL(MOVBE)))
3179 return emulate_ud(ctxt);
3180
3181 switch (ctxt->op_bytes) {
3182 case 2:
3183 /*
3184 * From MOVBE definition: "...When the operand size is 16 bits,
3185 * the upper word of the destination register remains unchanged
3186 * ..."
3187 *
3188 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3189 * rules so we have to do the operation almost per hand.
3190 */
3191 tmp = (u16)ctxt->src.val;
3192 ctxt->dst.val &= ~0xffffUL;
3193 ctxt->dst.val |= (unsigned long)swab16(tmp);
3194 break;
3195 case 4:
3196 ctxt->dst.val = swab32((u32)ctxt->src.val);
3197 break;
3198 case 8:
3199 ctxt->dst.val = swab64(ctxt->src.val);
3200 break;
3201 default:
3202 BUG();
3203 }
3204 return X86EMUL_CONTINUE;
3205 }
3206
3207 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3208 {
3209 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3210 return emulate_gp(ctxt, 0);
3211
3212 /* Disable writeback. */
3213 ctxt->dst.type = OP_NONE;
3214 return X86EMUL_CONTINUE;
3215 }
3216
3217 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3218 {
3219 unsigned long val;
3220
3221 if (ctxt->mode == X86EMUL_MODE_PROT64)
3222 val = ctxt->src.val & ~0ULL;
3223 else
3224 val = ctxt->src.val & ~0U;
3225
3226 /* #UD condition is already handled. */
3227 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3228 return emulate_gp(ctxt, 0);
3229
3230 /* Disable writeback. */
3231 ctxt->dst.type = OP_NONE;
3232 return X86EMUL_CONTINUE;
3233 }
3234
3235 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3236 {
3237 u64 msr_data;
3238
3239 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3240 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3241 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3242 return emulate_gp(ctxt, 0);
3243
3244 return X86EMUL_CONTINUE;
3245 }
3246
3247 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3248 {
3249 u64 msr_data;
3250
3251 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3252 return emulate_gp(ctxt, 0);
3253
3254 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3255 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3256 return X86EMUL_CONTINUE;
3257 }
3258
3259 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3260 {
3261 if (ctxt->modrm_reg > VCPU_SREG_GS)
3262 return emulate_ud(ctxt);
3263
3264 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3265 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3266 ctxt->dst.bytes = 2;
3267 return X86EMUL_CONTINUE;
3268 }
3269
3270 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3271 {
3272 u16 sel = ctxt->src.val;
3273
3274 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3275 return emulate_ud(ctxt);
3276
3277 if (ctxt->modrm_reg == VCPU_SREG_SS)
3278 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3279
3280 /* Disable writeback. */
3281 ctxt->dst.type = OP_NONE;
3282 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3283 }
3284
3285 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3286 {
3287 u16 sel = ctxt->src.val;
3288
3289 /* Disable writeback. */
3290 ctxt->dst.type = OP_NONE;
3291 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3292 }
3293
3294 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3295 {
3296 u16 sel = ctxt->src.val;
3297
3298 /* Disable writeback. */
3299 ctxt->dst.type = OP_NONE;
3300 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3301 }
3302
3303 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3304 {
3305 int rc;
3306 ulong linear;
3307
3308 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3309 if (rc == X86EMUL_CONTINUE)
3310 ctxt->ops->invlpg(ctxt, linear);
3311 /* Disable writeback. */
3312 ctxt->dst.type = OP_NONE;
3313 return X86EMUL_CONTINUE;
3314 }
3315
3316 static int em_clts(struct x86_emulate_ctxt *ctxt)
3317 {
3318 ulong cr0;
3319
3320 cr0 = ctxt->ops->get_cr(ctxt, 0);
3321 cr0 &= ~X86_CR0_TS;
3322 ctxt->ops->set_cr(ctxt, 0, cr0);
3323 return X86EMUL_CONTINUE;
3324 }
3325
3326 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3327 {
3328 int rc = ctxt->ops->fix_hypercall(ctxt);
3329
3330 if (rc != X86EMUL_CONTINUE)
3331 return rc;
3332
3333 /* Let the processor re-execute the fixed hypercall */
3334 ctxt->_eip = ctxt->eip;
3335 /* Disable writeback. */
3336 ctxt->dst.type = OP_NONE;
3337 return X86EMUL_CONTINUE;
3338 }
3339
3340 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3341 void (*get)(struct x86_emulate_ctxt *ctxt,
3342 struct desc_ptr *ptr))
3343 {
3344 struct desc_ptr desc_ptr;
3345
3346 if (ctxt->mode == X86EMUL_MODE_PROT64)
3347 ctxt->op_bytes = 8;
3348 get(ctxt, &desc_ptr);
3349 if (ctxt->op_bytes == 2) {
3350 ctxt->op_bytes = 4;
3351 desc_ptr.address &= 0x00ffffff;
3352 }
3353 /* Disable writeback. */
3354 ctxt->dst.type = OP_NONE;
3355 return segmented_write(ctxt, ctxt->dst.addr.mem,
3356 &desc_ptr, 2 + ctxt->op_bytes);
3357 }
3358
3359 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3360 {
3361 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3362 }
3363
3364 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3365 {
3366 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3367 }
3368
3369 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3370 {
3371 struct desc_ptr desc_ptr;
3372 int rc;
3373
3374 if (ctxt->mode == X86EMUL_MODE_PROT64)
3375 ctxt->op_bytes = 8;
3376 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3377 &desc_ptr.size, &desc_ptr.address,
3378 ctxt->op_bytes);
3379 if (rc != X86EMUL_CONTINUE)
3380 return rc;
3381 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3382 is_noncanonical_address(desc_ptr.address))
3383 return emulate_gp(ctxt, 0);
3384 if (lgdt)
3385 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3386 else
3387 ctxt->ops->set_idt(ctxt, &desc_ptr);
3388 /* Disable writeback. */
3389 ctxt->dst.type = OP_NONE;
3390 return X86EMUL_CONTINUE;
3391 }
3392
3393 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3394 {
3395 return em_lgdt_lidt(ctxt, true);
3396 }
3397
3398 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3399 {
3400 int rc;
3401
3402 rc = ctxt->ops->fix_hypercall(ctxt);
3403
3404 /* Disable writeback. */
3405 ctxt->dst.type = OP_NONE;
3406 return rc;
3407 }
3408
3409 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3410 {
3411 return em_lgdt_lidt(ctxt, false);
3412 }
3413
3414 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3415 {
3416 if (ctxt->dst.type == OP_MEM)
3417 ctxt->dst.bytes = 2;
3418 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3419 return X86EMUL_CONTINUE;
3420 }
3421
3422 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3423 {
3424 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3425 | (ctxt->src.val & 0x0f));
3426 ctxt->dst.type = OP_NONE;
3427 return X86EMUL_CONTINUE;
3428 }
3429
3430 static int em_loop(struct x86_emulate_ctxt *ctxt)
3431 {
3432 int rc = X86EMUL_CONTINUE;
3433
3434 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3435 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3436 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3437 rc = jmp_rel(ctxt, ctxt->src.val);
3438
3439 return rc;
3440 }
3441
3442 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3443 {
3444 int rc = X86EMUL_CONTINUE;
3445
3446 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3447 rc = jmp_rel(ctxt, ctxt->src.val);
3448
3449 return rc;
3450 }
3451
3452 static int em_in(struct x86_emulate_ctxt *ctxt)
3453 {
3454 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3455 &ctxt->dst.val))
3456 return X86EMUL_IO_NEEDED;
3457
3458 return X86EMUL_CONTINUE;
3459 }
3460
3461 static int em_out(struct x86_emulate_ctxt *ctxt)
3462 {
3463 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3464 &ctxt->src.val, 1);
3465 /* Disable writeback. */
3466 ctxt->dst.type = OP_NONE;
3467 return X86EMUL_CONTINUE;
3468 }
3469
3470 static int em_cli(struct x86_emulate_ctxt *ctxt)
3471 {
3472 if (emulator_bad_iopl(ctxt))
3473 return emulate_gp(ctxt, 0);
3474
3475 ctxt->eflags &= ~X86_EFLAGS_IF;
3476 return X86EMUL_CONTINUE;
3477 }
3478
3479 static int em_sti(struct x86_emulate_ctxt *ctxt)
3480 {
3481 if (emulator_bad_iopl(ctxt))
3482 return emulate_gp(ctxt, 0);
3483
3484 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3485 ctxt->eflags |= X86_EFLAGS_IF;
3486 return X86EMUL_CONTINUE;
3487 }
3488
3489 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3490 {
3491 u32 eax, ebx, ecx, edx;
3492
3493 eax = reg_read(ctxt, VCPU_REGS_RAX);
3494 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3495 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3496 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3497 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3498 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3499 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3500 return X86EMUL_CONTINUE;
3501 }
3502
3503 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3504 {
3505 u32 flags;
3506
3507 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3508 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3509
3510 ctxt->eflags &= ~0xffUL;
3511 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3512 return X86EMUL_CONTINUE;
3513 }
3514
3515 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3516 {
3517 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3518 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3519 return X86EMUL_CONTINUE;
3520 }
3521
3522 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3523 {
3524 switch (ctxt->op_bytes) {
3525 #ifdef CONFIG_X86_64
3526 case 8:
3527 asm("bswap %0" : "+r"(ctxt->dst.val));
3528 break;
3529 #endif
3530 default:
3531 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3532 break;
3533 }
3534 return X86EMUL_CONTINUE;
3535 }
3536
3537 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3538 {
3539 /* emulating clflush regardless of cpuid */
3540 return X86EMUL_CONTINUE;
3541 }
3542
3543 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3544 {
3545 ctxt->dst.val = (s32) ctxt->src.val;
3546 return X86EMUL_CONTINUE;
3547 }
3548
3549 static bool valid_cr(int nr)
3550 {
3551 switch (nr) {
3552 case 0:
3553 case 2 ... 4:
3554 case 8:
3555 return true;
3556 default:
3557 return false;
3558 }
3559 }
3560
3561 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3562 {
3563 if (!valid_cr(ctxt->modrm_reg))
3564 return emulate_ud(ctxt);
3565
3566 return X86EMUL_CONTINUE;
3567 }
3568
3569 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3570 {
3571 u64 new_val = ctxt->src.val64;
3572 int cr = ctxt->modrm_reg;
3573 u64 efer = 0;
3574
3575 static u64 cr_reserved_bits[] = {
3576 0xffffffff00000000ULL,
3577 0, 0, 0, /* CR3 checked later */
3578 CR4_RESERVED_BITS,
3579 0, 0, 0,
3580 CR8_RESERVED_BITS,
3581 };
3582
3583 if (!valid_cr(cr))
3584 return emulate_ud(ctxt);
3585
3586 if (new_val & cr_reserved_bits[cr])
3587 return emulate_gp(ctxt, 0);
3588
3589 switch (cr) {
3590 case 0: {
3591 u64 cr4;
3592 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3593 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3594 return emulate_gp(ctxt, 0);
3595
3596 cr4 = ctxt->ops->get_cr(ctxt, 4);
3597 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3598
3599 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3600 !(cr4 & X86_CR4_PAE))
3601 return emulate_gp(ctxt, 0);
3602
3603 break;
3604 }
3605 case 3: {
3606 u64 rsvd = 0;
3607
3608 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3609 if (efer & EFER_LMA)
3610 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3611
3612 if (new_val & rsvd)
3613 return emulate_gp(ctxt, 0);
3614
3615 break;
3616 }
3617 case 4: {
3618 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3619
3620 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3621 return emulate_gp(ctxt, 0);
3622
3623 break;
3624 }
3625 }
3626
3627 return X86EMUL_CONTINUE;
3628 }
3629
3630 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3631 {
3632 unsigned long dr7;
3633
3634 ctxt->ops->get_dr(ctxt, 7, &dr7);
3635
3636 /* Check if DR7.Global_Enable is set */
3637 return dr7 & (1 << 13);
3638 }
3639
3640 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3641 {
3642 int dr = ctxt->modrm_reg;
3643 u64 cr4;
3644
3645 if (dr > 7)
3646 return emulate_ud(ctxt);
3647
3648 cr4 = ctxt->ops->get_cr(ctxt, 4);
3649 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3650 return emulate_ud(ctxt);
3651
3652 if (check_dr7_gd(ctxt)) {
3653 ulong dr6;
3654
3655 ctxt->ops->get_dr(ctxt, 6, &dr6);
3656 dr6 &= ~15;
3657 dr6 |= DR6_BD | DR6_RTM;
3658 ctxt->ops->set_dr(ctxt, 6, dr6);
3659 return emulate_db(ctxt);
3660 }
3661
3662 return X86EMUL_CONTINUE;
3663 }
3664
3665 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3666 {
3667 u64 new_val = ctxt->src.val64;
3668 int dr = ctxt->modrm_reg;
3669
3670 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3671 return emulate_gp(ctxt, 0);
3672
3673 return check_dr_read(ctxt);
3674 }
3675
3676 static int check_svme(struct x86_emulate_ctxt *ctxt)
3677 {
3678 u64 efer;
3679
3680 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3681
3682 if (!(efer & EFER_SVME))
3683 return emulate_ud(ctxt);
3684
3685 return X86EMUL_CONTINUE;
3686 }
3687
3688 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3689 {
3690 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3691
3692 /* Valid physical address? */
3693 if (rax & 0xffff000000000000ULL)
3694 return emulate_gp(ctxt, 0);
3695
3696 return check_svme(ctxt);
3697 }
3698
3699 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3700 {
3701 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3702
3703 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3704 return emulate_ud(ctxt);
3705
3706 return X86EMUL_CONTINUE;
3707 }
3708
3709 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3710 {
3711 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3712 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3713
3714 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3715 ctxt->ops->check_pmc(ctxt, rcx))
3716 return emulate_gp(ctxt, 0);
3717
3718 return X86EMUL_CONTINUE;
3719 }
3720
3721 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3722 {
3723 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3724 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3725 return emulate_gp(ctxt, 0);
3726
3727 return X86EMUL_CONTINUE;
3728 }
3729
3730 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3731 {
3732 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3733 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3734 return emulate_gp(ctxt, 0);
3735
3736 return X86EMUL_CONTINUE;
3737 }
3738
3739 #define D(_y) { .flags = (_y) }
3740 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3741 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3742 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3743 #define N D(NotImpl)
3744 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3745 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3746 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3747 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3748 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
3749 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3750 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3751 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3752 #define II(_f, _e, _i) \
3753 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3754 #define IIP(_f, _e, _i, _p) \
3755 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3756 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3757 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3758
3759 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3760 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3761 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3762 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3763 #define I2bvIP(_f, _e, _i, _p) \
3764 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3765
3766 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3767 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3768 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3769
3770 static const struct opcode group7_rm0[] = {
3771 N,
3772 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3773 N, N, N, N, N, N,
3774 };
3775
3776 static const struct opcode group7_rm1[] = {
3777 DI(SrcNone | Priv, monitor),
3778 DI(SrcNone | Priv, mwait),
3779 N, N, N, N, N, N,
3780 };
3781
3782 static const struct opcode group7_rm3[] = {
3783 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3784 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3785 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3786 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3787 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3788 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3789 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3790 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3791 };
3792
3793 static const struct opcode group7_rm7[] = {
3794 N,
3795 DIP(SrcNone, rdtscp, check_rdtsc),
3796 N, N, N, N, N, N,
3797 };
3798
3799 static const struct opcode group1[] = {
3800 F(Lock, em_add),
3801 F(Lock | PageTable, em_or),
3802 F(Lock, em_adc),
3803 F(Lock, em_sbb),
3804 F(Lock | PageTable, em_and),
3805 F(Lock, em_sub),
3806 F(Lock, em_xor),
3807 F(NoWrite, em_cmp),
3808 };
3809
3810 static const struct opcode group1A[] = {
3811 I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
3812 };
3813
3814 static const struct opcode group2[] = {
3815 F(DstMem | ModRM, em_rol),
3816 F(DstMem | ModRM, em_ror),
3817 F(DstMem | ModRM, em_rcl),
3818 F(DstMem | ModRM, em_rcr),
3819 F(DstMem | ModRM, em_shl),
3820 F(DstMem | ModRM, em_shr),
3821 F(DstMem | ModRM, em_shl),
3822 F(DstMem | ModRM, em_sar),
3823 };
3824
3825 static const struct opcode group3[] = {
3826 F(DstMem | SrcImm | NoWrite, em_test),
3827 F(DstMem | SrcImm | NoWrite, em_test),
3828 F(DstMem | SrcNone | Lock, em_not),
3829 F(DstMem | SrcNone | Lock, em_neg),
3830 F(DstXacc | Src2Mem, em_mul_ex),
3831 F(DstXacc | Src2Mem, em_imul_ex),
3832 F(DstXacc | Src2Mem, em_div_ex),
3833 F(DstXacc | Src2Mem, em_idiv_ex),
3834 };
3835
3836 static const struct opcode group4[] = {
3837 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3838 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3839 N, N, N, N, N, N,
3840 };
3841
3842 static const struct opcode group5[] = {
3843 F(DstMem | SrcNone | Lock, em_inc),
3844 F(DstMem | SrcNone | Lock, em_dec),
3845 I(SrcMem | NearBranch, em_call_near_abs),
3846 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3847 I(SrcMem | NearBranch, em_jmp_abs),
3848 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3849 I(SrcMem | Stack, em_push), D(Undefined),
3850 };
3851
3852 static const struct opcode group6[] = {
3853 DI(Prot | DstMem, sldt),
3854 DI(Prot | DstMem, str),
3855 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3856 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3857 N, N, N, N,
3858 };
3859
3860 static const struct group_dual group7 = { {
3861 II(Mov | DstMem, em_sgdt, sgdt),
3862 II(Mov | DstMem, em_sidt, sidt),
3863 II(SrcMem | Priv, em_lgdt, lgdt),
3864 II(SrcMem | Priv, em_lidt, lidt),
3865 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3866 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3867 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3868 }, {
3869 EXT(0, group7_rm0),
3870 EXT(0, group7_rm1),
3871 N, EXT(0, group7_rm3),
3872 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3873 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3874 EXT(0, group7_rm7),
3875 } };
3876
3877 static const struct opcode group8[] = {
3878 N, N, N, N,
3879 F(DstMem | SrcImmByte | NoWrite, em_bt),
3880 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3881 F(DstMem | SrcImmByte | Lock, em_btr),
3882 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3883 };
3884
3885 static const struct group_dual group9 = { {
3886 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3887 }, {
3888 N, N, N, N, N, N, N, N,
3889 } };
3890
3891 static const struct opcode group11[] = {
3892 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3893 X7(D(Undefined)),
3894 };
3895
3896 static const struct gprefix pfx_0f_ae_7 = {
3897 I(SrcMem | ByteOp, em_clflush), N, N, N,
3898 };
3899
3900 static const struct group_dual group15 = { {
3901 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3902 }, {
3903 N, N, N, N, N, N, N, N,
3904 } };
3905
3906 static const struct gprefix pfx_0f_6f_0f_7f = {
3907 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3908 };
3909
3910 static const struct instr_dual instr_dual_0f_2b = {
3911 I(0, em_mov), N
3912 };
3913
3914 static const struct gprefix pfx_0f_2b = {
3915 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
3916 };
3917
3918 static const struct gprefix pfx_0f_28_0f_29 = {
3919 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3920 };
3921
3922 static const struct gprefix pfx_0f_e7 = {
3923 N, I(Sse, em_mov), N, N,
3924 };
3925
3926 static const struct escape escape_d9 = { {
3927 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
3928 }, {
3929 /* 0xC0 - 0xC7 */
3930 N, N, N, N, N, N, N, N,
3931 /* 0xC8 - 0xCF */
3932 N, N, N, N, N, N, N, N,
3933 /* 0xD0 - 0xC7 */
3934 N, N, N, N, N, N, N, N,
3935 /* 0xD8 - 0xDF */
3936 N, N, N, N, N, N, N, N,
3937 /* 0xE0 - 0xE7 */
3938 N, N, N, N, N, N, N, N,
3939 /* 0xE8 - 0xEF */
3940 N, N, N, N, N, N, N, N,
3941 /* 0xF0 - 0xF7 */
3942 N, N, N, N, N, N, N, N,
3943 /* 0xF8 - 0xFF */
3944 N, N, N, N, N, N, N, N,
3945 } };
3946
3947 static const struct escape escape_db = { {
3948 N, N, N, N, N, N, N, N,
3949 }, {
3950 /* 0xC0 - 0xC7 */
3951 N, N, N, N, N, N, N, N,
3952 /* 0xC8 - 0xCF */
3953 N, N, N, N, N, N, N, N,
3954 /* 0xD0 - 0xC7 */
3955 N, N, N, N, N, N, N, N,
3956 /* 0xD8 - 0xDF */
3957 N, N, N, N, N, N, N, N,
3958 /* 0xE0 - 0xE7 */
3959 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3960 /* 0xE8 - 0xEF */
3961 N, N, N, N, N, N, N, N,
3962 /* 0xF0 - 0xF7 */
3963 N, N, N, N, N, N, N, N,
3964 /* 0xF8 - 0xFF */
3965 N, N, N, N, N, N, N, N,
3966 } };
3967
3968 static const struct escape escape_dd = { {
3969 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
3970 }, {
3971 /* 0xC0 - 0xC7 */
3972 N, N, N, N, N, N, N, N,
3973 /* 0xC8 - 0xCF */
3974 N, N, N, N, N, N, N, N,
3975 /* 0xD0 - 0xC7 */
3976 N, N, N, N, N, N, N, N,
3977 /* 0xD8 - 0xDF */
3978 N, N, N, N, N, N, N, N,
3979 /* 0xE0 - 0xE7 */
3980 N, N, N, N, N, N, N, N,
3981 /* 0xE8 - 0xEF */
3982 N, N, N, N, N, N, N, N,
3983 /* 0xF0 - 0xF7 */
3984 N, N, N, N, N, N, N, N,
3985 /* 0xF8 - 0xFF */
3986 N, N, N, N, N, N, N, N,
3987 } };
3988
3989 static const struct instr_dual instr_dual_0f_c3 = {
3990 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3991 };
3992
3993 static const struct mode_dual mode_dual_63 = {
3994 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
3995 };
3996
3997 static const struct opcode opcode_table[256] = {
3998 /* 0x00 - 0x07 */
3999 F6ALU(Lock, em_add),
4000 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4001 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4002 /* 0x08 - 0x0F */
4003 F6ALU(Lock | PageTable, em_or),
4004 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4005 N,
4006 /* 0x10 - 0x17 */
4007 F6ALU(Lock, em_adc),
4008 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4009 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4010 /* 0x18 - 0x1F */
4011 F6ALU(Lock, em_sbb),
4012 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4013 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4014 /* 0x20 - 0x27 */
4015 F6ALU(Lock | PageTable, em_and), N, N,
4016 /* 0x28 - 0x2F */
4017 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4018 /* 0x30 - 0x37 */
4019 F6ALU(Lock, em_xor), N, N,
4020 /* 0x38 - 0x3F */
4021 F6ALU(NoWrite, em_cmp), N, N,
4022 /* 0x40 - 0x4F */
4023 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4024 /* 0x50 - 0x57 */
4025 X8(I(SrcReg | Stack, em_push)),
4026 /* 0x58 - 0x5F */
4027 X8(I(DstReg | Stack, em_pop)),
4028 /* 0x60 - 0x67 */
4029 I(ImplicitOps | Stack | No64, em_pusha),
4030 I(ImplicitOps | Stack | No64, em_popa),
4031 N, MD(ModRM, &mode_dual_63),
4032 N, N, N, N,
4033 /* 0x68 - 0x6F */
4034 I(SrcImm | Mov | Stack, em_push),
4035 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4036 I(SrcImmByte | Mov | Stack, em_push),
4037 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4038 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4039 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4040 /* 0x70 - 0x7F */
4041 X16(D(SrcImmByte | NearBranch)),
4042 /* 0x80 - 0x87 */
4043 G(ByteOp | DstMem | SrcImm, group1),
4044 G(DstMem | SrcImm, group1),
4045 G(ByteOp | DstMem | SrcImm | No64, group1),
4046 G(DstMem | SrcImmByte, group1),
4047 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4048 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4049 /* 0x88 - 0x8F */
4050 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4051 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4052 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4053 D(ModRM | SrcMem | NoAccess | DstReg),
4054 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4055 G(0, group1A),
4056 /* 0x90 - 0x97 */
4057 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4058 /* 0x98 - 0x9F */
4059 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4060 I(SrcImmFAddr | No64, em_call_far), N,
4061 II(ImplicitOps | Stack, em_pushf, pushf),
4062 II(ImplicitOps | Stack, em_popf, popf),
4063 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4064 /* 0xA0 - 0xA7 */
4065 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4066 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4067 I2bv(SrcSI | DstDI | Mov | String, em_mov),
4068 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4069 /* 0xA8 - 0xAF */
4070 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4071 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4072 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4073 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4074 /* 0xB0 - 0xB7 */
4075 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4076 /* 0xB8 - 0xBF */
4077 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4078 /* 0xC0 - 0xC7 */
4079 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4080 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4081 I(ImplicitOps | NearBranch, em_ret),
4082 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4083 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4084 G(ByteOp, group11), G(0, group11),
4085 /* 0xC8 - 0xCF */
4086 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4087 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4088 I(ImplicitOps, em_ret_far),
4089 D(ImplicitOps), DI(SrcImmByte, intn),
4090 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4091 /* 0xD0 - 0xD7 */
4092 G(Src2One | ByteOp, group2), G(Src2One, group2),
4093 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4094 I(DstAcc | SrcImmUByte | No64, em_aam),
4095 I(DstAcc | SrcImmUByte | No64, em_aad),
4096 F(DstAcc | ByteOp | No64, em_salc),
4097 I(DstAcc | SrcXLat | ByteOp, em_mov),
4098 /* 0xD8 - 0xDF */
4099 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4100 /* 0xE0 - 0xE7 */
4101 X3(I(SrcImmByte | NearBranch, em_loop)),
4102 I(SrcImmByte | NearBranch, em_jcxz),
4103 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4104 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4105 /* 0xE8 - 0xEF */
4106 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4107 I(SrcImmFAddr | No64, em_jmp_far),
4108 D(SrcImmByte | ImplicitOps | NearBranch),
4109 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4110 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4111 /* 0xF0 - 0xF7 */
4112 N, DI(ImplicitOps, icebp), N, N,
4113 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4114 G(ByteOp, group3), G(0, group3),
4115 /* 0xF8 - 0xFF */
4116 D(ImplicitOps), D(ImplicitOps),
4117 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4118 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4119 };
4120
4121 static const struct opcode twobyte_table[256] = {
4122 /* 0x00 - 0x0F */
4123 G(0, group6), GD(0, &group7), N, N,
4124 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4125 II(ImplicitOps | Priv, em_clts, clts), N,
4126 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4127 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4128 /* 0x10 - 0x1F */
4129 N, N, N, N, N, N, N, N,
4130 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4131 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4132 /* 0x20 - 0x2F */
4133 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4134 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4135 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4136 check_cr_write),
4137 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4138 check_dr_write),
4139 N, N, N, N,
4140 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4141 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4142 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4143 N, N, N, N,
4144 /* 0x30 - 0x3F */
4145 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4146 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4147 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4148 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4149 I(ImplicitOps | EmulateOnUD, em_sysenter),
4150 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4151 N, N,
4152 N, N, N, N, N, N, N, N,
4153 /* 0x40 - 0x4F */
4154 X16(D(DstReg | SrcMem | ModRM)),
4155 /* 0x50 - 0x5F */
4156 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4157 /* 0x60 - 0x6F */
4158 N, N, N, N,
4159 N, N, N, N,
4160 N, N, N, N,
4161 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4162 /* 0x70 - 0x7F */
4163 N, N, N, N,
4164 N, N, N, N,
4165 N, N, N, N,
4166 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4167 /* 0x80 - 0x8F */
4168 X16(D(SrcImm | NearBranch)),
4169 /* 0x90 - 0x9F */
4170 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4171 /* 0xA0 - 0xA7 */
4172 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4173 II(ImplicitOps, em_cpuid, cpuid),
4174 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4175 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4176 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4177 /* 0xA8 - 0xAF */
4178 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4179 DI(ImplicitOps, rsm),
4180 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4181 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4182 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4183 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4184 /* 0xB0 - 0xB7 */
4185 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4186 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4187 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4188 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4189 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4190 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4191 /* 0xB8 - 0xBF */
4192 N, N,
4193 G(BitOp, group8),
4194 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4195 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4196 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4197 /* 0xC0 - 0xC7 */
4198 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4199 N, ID(0, &instr_dual_0f_c3),
4200 N, N, N, GD(0, &group9),
4201 /* 0xC8 - 0xCF */
4202 X8(I(DstReg, em_bswap)),
4203 /* 0xD0 - 0xDF */
4204 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4205 /* 0xE0 - 0xEF */
4206 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4207 N, N, N, N, N, N, N, N,
4208 /* 0xF0 - 0xFF */
4209 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4210 };
4211
4212 static const struct instr_dual instr_dual_0f_38_f0 = {
4213 I(DstReg | SrcMem | Mov, em_movbe), N
4214 };
4215
4216 static const struct instr_dual instr_dual_0f_38_f1 = {
4217 I(DstMem | SrcReg | Mov, em_movbe), N
4218 };
4219
4220 static const struct gprefix three_byte_0f_38_f0 = {
4221 ID(0, &instr_dual_0f_38_f0), N, N, N
4222 };
4223
4224 static const struct gprefix three_byte_0f_38_f1 = {
4225 ID(0, &instr_dual_0f_38_f1), N, N, N
4226 };
4227
4228 /*
4229 * Insns below are selected by the prefix which indexed by the third opcode
4230 * byte.
4231 */
4232 static const struct opcode opcode_map_0f_38[256] = {
4233 /* 0x00 - 0x7f */
4234 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4235 /* 0x80 - 0xef */
4236 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4237 /* 0xf0 - 0xf1 */
4238 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4239 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4240 /* 0xf2 - 0xff */
4241 N, N, X4(N), X8(N)
4242 };
4243
4244 #undef D
4245 #undef N
4246 #undef G
4247 #undef GD
4248 #undef I
4249 #undef GP
4250 #undef EXT
4251 #undef MD
4252 #undef ID
4253
4254 #undef D2bv
4255 #undef D2bvIP
4256 #undef I2bv
4257 #undef I2bvIP
4258 #undef I6ALU
4259
4260 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4261 {
4262 unsigned size;
4263
4264 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4265 if (size == 8)
4266 size = 4;
4267 return size;
4268 }
4269
4270 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4271 unsigned size, bool sign_extension)
4272 {
4273 int rc = X86EMUL_CONTINUE;
4274
4275 op->type = OP_IMM;
4276 op->bytes = size;
4277 op->addr.mem.ea = ctxt->_eip;
4278 /* NB. Immediates are sign-extended as necessary. */
4279 switch (op->bytes) {
4280 case 1:
4281 op->val = insn_fetch(s8, ctxt);
4282 break;
4283 case 2:
4284 op->val = insn_fetch(s16, ctxt);
4285 break;
4286 case 4:
4287 op->val = insn_fetch(s32, ctxt);
4288 break;
4289 case 8:
4290 op->val = insn_fetch(s64, ctxt);
4291 break;
4292 }
4293 if (!sign_extension) {
4294 switch (op->bytes) {
4295 case 1:
4296 op->val &= 0xff;
4297 break;
4298 case 2:
4299 op->val &= 0xffff;
4300 break;
4301 case 4:
4302 op->val &= 0xffffffff;
4303 break;
4304 }
4305 }
4306 done:
4307 return rc;
4308 }
4309
4310 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4311 unsigned d)
4312 {
4313 int rc = X86EMUL_CONTINUE;
4314
4315 switch (d) {
4316 case OpReg:
4317 decode_register_operand(ctxt, op);
4318 break;
4319 case OpImmUByte:
4320 rc = decode_imm(ctxt, op, 1, false);
4321 break;
4322 case OpMem:
4323 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4324 mem_common:
4325 *op = ctxt->memop;
4326 ctxt->memopp = op;
4327 if (ctxt->d & BitOp)
4328 fetch_bit_operand(ctxt);
4329 op->orig_val = op->val;
4330 break;
4331 case OpMem64:
4332 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4333 goto mem_common;
4334 case OpAcc:
4335 op->type = OP_REG;
4336 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4337 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4338 fetch_register_operand(op);
4339 op->orig_val = op->val;
4340 break;
4341 case OpAccLo:
4342 op->type = OP_REG;
4343 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4344 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4345 fetch_register_operand(op);
4346 op->orig_val = op->val;
4347 break;
4348 case OpAccHi:
4349 if (ctxt->d & ByteOp) {
4350 op->type = OP_NONE;
4351 break;
4352 }
4353 op->type = OP_REG;
4354 op->bytes = ctxt->op_bytes;
4355 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4356 fetch_register_operand(op);
4357 op->orig_val = op->val;
4358 break;
4359 case OpDI:
4360 op->type = OP_MEM;
4361 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4362 op->addr.mem.ea =
4363 register_address(ctxt, VCPU_REGS_RDI);
4364 op->addr.mem.seg = VCPU_SREG_ES;
4365 op->val = 0;
4366 op->count = 1;
4367 break;
4368 case OpDX:
4369 op->type = OP_REG;
4370 op->bytes = 2;
4371 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4372 fetch_register_operand(op);
4373 break;
4374 case OpCL:
4375 op->type = OP_IMM;
4376 op->bytes = 1;
4377 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4378 break;
4379 case OpImmByte:
4380 rc = decode_imm(ctxt, op, 1, true);
4381 break;
4382 case OpOne:
4383 op->type = OP_IMM;
4384 op->bytes = 1;
4385 op->val = 1;
4386 break;
4387 case OpImm:
4388 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4389 break;
4390 case OpImm64:
4391 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4392 break;
4393 case OpMem8:
4394 ctxt->memop.bytes = 1;
4395 if (ctxt->memop.type == OP_REG) {
4396 ctxt->memop.addr.reg = decode_register(ctxt,
4397 ctxt->modrm_rm, true);
4398 fetch_register_operand(&ctxt->memop);
4399 }
4400 goto mem_common;
4401 case OpMem16:
4402 ctxt->memop.bytes = 2;
4403 goto mem_common;
4404 case OpMem32:
4405 ctxt->memop.bytes = 4;
4406 goto mem_common;
4407 case OpImmU16:
4408 rc = decode_imm(ctxt, op, 2, false);
4409 break;
4410 case OpImmU:
4411 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4412 break;
4413 case OpSI:
4414 op->type = OP_MEM;
4415 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4416 op->addr.mem.ea =
4417 register_address(ctxt, VCPU_REGS_RSI);
4418 op->addr.mem.seg = ctxt->seg_override;
4419 op->val = 0;
4420 op->count = 1;
4421 break;
4422 case OpXLat:
4423 op->type = OP_MEM;
4424 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4425 op->addr.mem.ea =
4426 address_mask(ctxt,
4427 reg_read(ctxt, VCPU_REGS_RBX) +
4428 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4429 op->addr.mem.seg = ctxt->seg_override;
4430 op->val = 0;
4431 break;
4432 case OpImmFAddr:
4433 op->type = OP_IMM;
4434 op->addr.mem.ea = ctxt->_eip;
4435 op->bytes = ctxt->op_bytes + 2;
4436 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4437 break;
4438 case OpMemFAddr:
4439 ctxt->memop.bytes = ctxt->op_bytes + 2;
4440 goto mem_common;
4441 case OpES:
4442 op->type = OP_IMM;
4443 op->val = VCPU_SREG_ES;
4444 break;
4445 case OpCS:
4446 op->type = OP_IMM;
4447 op->val = VCPU_SREG_CS;
4448 break;
4449 case OpSS:
4450 op->type = OP_IMM;
4451 op->val = VCPU_SREG_SS;
4452 break;
4453 case OpDS:
4454 op->type = OP_IMM;
4455 op->val = VCPU_SREG_DS;
4456 break;
4457 case OpFS:
4458 op->type = OP_IMM;
4459 op->val = VCPU_SREG_FS;
4460 break;
4461 case OpGS:
4462 op->type = OP_IMM;
4463 op->val = VCPU_SREG_GS;
4464 break;
4465 case OpImplicit:
4466 /* Special instructions do their own operand decoding. */
4467 default:
4468 op->type = OP_NONE; /* Disable writeback. */
4469 break;
4470 }
4471
4472 done:
4473 return rc;
4474 }
4475
4476 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4477 {
4478 int rc = X86EMUL_CONTINUE;
4479 int mode = ctxt->mode;
4480 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4481 bool op_prefix = false;
4482 bool has_seg_override = false;
4483 struct opcode opcode;
4484
4485 ctxt->memop.type = OP_NONE;
4486 ctxt->memopp = NULL;
4487 ctxt->_eip = ctxt->eip;
4488 ctxt->fetch.ptr = ctxt->fetch.data;
4489 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4490 ctxt->opcode_len = 1;
4491 if (insn_len > 0)
4492 memcpy(ctxt->fetch.data, insn, insn_len);
4493 else {
4494 rc = __do_insn_fetch_bytes(ctxt, 1);
4495 if (rc != X86EMUL_CONTINUE)
4496 return rc;
4497 }
4498
4499 switch (mode) {
4500 case X86EMUL_MODE_REAL:
4501 case X86EMUL_MODE_VM86:
4502 case X86EMUL_MODE_PROT16:
4503 def_op_bytes = def_ad_bytes = 2;
4504 break;
4505 case X86EMUL_MODE_PROT32:
4506 def_op_bytes = def_ad_bytes = 4;
4507 break;
4508 #ifdef CONFIG_X86_64
4509 case X86EMUL_MODE_PROT64:
4510 def_op_bytes = 4;
4511 def_ad_bytes = 8;
4512 break;
4513 #endif
4514 default:
4515 return EMULATION_FAILED;
4516 }
4517
4518 ctxt->op_bytes = def_op_bytes;
4519 ctxt->ad_bytes = def_ad_bytes;
4520
4521 /* Legacy prefixes. */
4522 for (;;) {
4523 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4524 case 0x66: /* operand-size override */
4525 op_prefix = true;
4526 /* switch between 2/4 bytes */
4527 ctxt->op_bytes = def_op_bytes ^ 6;
4528 break;
4529 case 0x67: /* address-size override */
4530 if (mode == X86EMUL_MODE_PROT64)
4531 /* switch between 4/8 bytes */
4532 ctxt->ad_bytes = def_ad_bytes ^ 12;
4533 else
4534 /* switch between 2/4 bytes */
4535 ctxt->ad_bytes = def_ad_bytes ^ 6;
4536 break;
4537 case 0x26: /* ES override */
4538 case 0x2e: /* CS override */
4539 case 0x36: /* SS override */
4540 case 0x3e: /* DS override */
4541 has_seg_override = true;
4542 ctxt->seg_override = (ctxt->b >> 3) & 3;
4543 break;
4544 case 0x64: /* FS override */
4545 case 0x65: /* GS override */
4546 has_seg_override = true;
4547 ctxt->seg_override = ctxt->b & 7;
4548 break;
4549 case 0x40 ... 0x4f: /* REX */
4550 if (mode != X86EMUL_MODE_PROT64)
4551 goto done_prefixes;
4552 ctxt->rex_prefix = ctxt->b;
4553 continue;
4554 case 0xf0: /* LOCK */
4555 ctxt->lock_prefix = 1;
4556 break;
4557 case 0xf2: /* REPNE/REPNZ */
4558 case 0xf3: /* REP/REPE/REPZ */
4559 ctxt->rep_prefix = ctxt->b;
4560 break;
4561 default:
4562 goto done_prefixes;
4563 }
4564
4565 /* Any legacy prefix after a REX prefix nullifies its effect. */
4566
4567 ctxt->rex_prefix = 0;
4568 }
4569
4570 done_prefixes:
4571
4572 /* REX prefix. */
4573 if (ctxt->rex_prefix & 8)
4574 ctxt->op_bytes = 8; /* REX.W */
4575
4576 /* Opcode byte(s). */
4577 opcode = opcode_table[ctxt->b];
4578 /* Two-byte opcode? */
4579 if (ctxt->b == 0x0f) {
4580 ctxt->opcode_len = 2;
4581 ctxt->b = insn_fetch(u8, ctxt);
4582 opcode = twobyte_table[ctxt->b];
4583
4584 /* 0F_38 opcode map */
4585 if (ctxt->b == 0x38) {
4586 ctxt->opcode_len = 3;
4587 ctxt->b = insn_fetch(u8, ctxt);
4588 opcode = opcode_map_0f_38[ctxt->b];
4589 }
4590 }
4591 ctxt->d = opcode.flags;
4592
4593 if (ctxt->d & ModRM)
4594 ctxt->modrm = insn_fetch(u8, ctxt);
4595
4596 /* vex-prefix instructions are not implemented */
4597 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4598 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4599 ctxt->d = NotImpl;
4600 }
4601
4602 while (ctxt->d & GroupMask) {
4603 switch (ctxt->d & GroupMask) {
4604 case Group:
4605 goffset = (ctxt->modrm >> 3) & 7;
4606 opcode = opcode.u.group[goffset];
4607 break;
4608 case GroupDual:
4609 goffset = (ctxt->modrm >> 3) & 7;
4610 if ((ctxt->modrm >> 6) == 3)
4611 opcode = opcode.u.gdual->mod3[goffset];
4612 else
4613 opcode = opcode.u.gdual->mod012[goffset];
4614 break;
4615 case RMExt:
4616 goffset = ctxt->modrm & 7;
4617 opcode = opcode.u.group[goffset];
4618 break;
4619 case Prefix:
4620 if (ctxt->rep_prefix && op_prefix)
4621 return EMULATION_FAILED;
4622 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4623 switch (simd_prefix) {
4624 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4625 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4626 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4627 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4628 }
4629 break;
4630 case Escape:
4631 if (ctxt->modrm > 0xbf)
4632 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4633 else
4634 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4635 break;
4636 case InstrDual:
4637 if ((ctxt->modrm >> 6) == 3)
4638 opcode = opcode.u.idual->mod3;
4639 else
4640 opcode = opcode.u.idual->mod012;
4641 break;
4642 case ModeDual:
4643 if (ctxt->mode == X86EMUL_MODE_PROT64)
4644 opcode = opcode.u.mdual->mode64;
4645 else
4646 opcode = opcode.u.mdual->mode32;
4647 break;
4648 default:
4649 return EMULATION_FAILED;
4650 }
4651
4652 ctxt->d &= ~(u64)GroupMask;
4653 ctxt->d |= opcode.flags;
4654 }
4655
4656 /* Unrecognised? */
4657 if (ctxt->d == 0)
4658 return EMULATION_FAILED;
4659
4660 ctxt->execute = opcode.u.execute;
4661
4662 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4663 return EMULATION_FAILED;
4664
4665 if (unlikely(ctxt->d &
4666 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4667 No16))) {
4668 /*
4669 * These are copied unconditionally here, and checked unconditionally
4670 * in x86_emulate_insn.
4671 */
4672 ctxt->check_perm = opcode.check_perm;
4673 ctxt->intercept = opcode.intercept;
4674
4675 if (ctxt->d & NotImpl)
4676 return EMULATION_FAILED;
4677
4678 if (mode == X86EMUL_MODE_PROT64) {
4679 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4680 ctxt->op_bytes = 8;
4681 else if (ctxt->d & NearBranch)
4682 ctxt->op_bytes = 8;
4683 }
4684
4685 if (ctxt->d & Op3264) {
4686 if (mode == X86EMUL_MODE_PROT64)
4687 ctxt->op_bytes = 8;
4688 else
4689 ctxt->op_bytes = 4;
4690 }
4691
4692 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4693 ctxt->op_bytes = 4;
4694
4695 if (ctxt->d & Sse)
4696 ctxt->op_bytes = 16;
4697 else if (ctxt->d & Mmx)
4698 ctxt->op_bytes = 8;
4699 }
4700
4701 /* ModRM and SIB bytes. */
4702 if (ctxt->d & ModRM) {
4703 rc = decode_modrm(ctxt, &ctxt->memop);
4704 if (!has_seg_override) {
4705 has_seg_override = true;
4706 ctxt->seg_override = ctxt->modrm_seg;
4707 }
4708 } else if (ctxt->d & MemAbs)
4709 rc = decode_abs(ctxt, &ctxt->memop);
4710 if (rc != X86EMUL_CONTINUE)
4711 goto done;
4712
4713 if (!has_seg_override)
4714 ctxt->seg_override = VCPU_SREG_DS;
4715
4716 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4717
4718 /*
4719 * Decode and fetch the source operand: register, memory
4720 * or immediate.
4721 */
4722 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4723 if (rc != X86EMUL_CONTINUE)
4724 goto done;
4725
4726 /*
4727 * Decode and fetch the second source operand: register, memory
4728 * or immediate.
4729 */
4730 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4731 if (rc != X86EMUL_CONTINUE)
4732 goto done;
4733
4734 /* Decode and fetch the destination operand: register or memory. */
4735 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4736
4737 if (ctxt->rip_relative)
4738 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4739 ctxt->memopp->addr.mem.ea + ctxt->_eip);
4740
4741 done:
4742 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4743 }
4744
4745 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4746 {
4747 return ctxt->d & PageTable;
4748 }
4749
4750 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4751 {
4752 /* The second termination condition only applies for REPE
4753 * and REPNE. Test if the repeat string operation prefix is
4754 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4755 * corresponding termination condition according to:
4756 * - if REPE/REPZ and ZF = 0 then done
4757 * - if REPNE/REPNZ and ZF = 1 then done
4758 */
4759 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4760 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4761 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4762 ((ctxt->eflags & EFLG_ZF) == 0))
4763 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4764 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4765 return true;
4766
4767 return false;
4768 }
4769
4770 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4771 {
4772 bool fault = false;
4773
4774 ctxt->ops->get_fpu(ctxt);
4775 asm volatile("1: fwait \n\t"
4776 "2: \n\t"
4777 ".pushsection .fixup,\"ax\" \n\t"
4778 "3: \n\t"
4779 "movb $1, %[fault] \n\t"
4780 "jmp 2b \n\t"
4781 ".popsection \n\t"
4782 _ASM_EXTABLE(1b, 3b)
4783 : [fault]"+qm"(fault));
4784 ctxt->ops->put_fpu(ctxt);
4785
4786 if (unlikely(fault))
4787 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4788
4789 return X86EMUL_CONTINUE;
4790 }
4791
4792 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4793 struct operand *op)
4794 {
4795 if (op->type == OP_MM)
4796 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4797 }
4798
4799 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4800 {
4801 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4802 if (!(ctxt->d & ByteOp))
4803 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4804 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4805 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4806 [fastop]"+S"(fop)
4807 : "c"(ctxt->src2.val));
4808 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4809 if (!fop) /* exception is returned in fop variable */
4810 return emulate_de(ctxt);
4811 return X86EMUL_CONTINUE;
4812 }
4813
4814 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4815 {
4816 memset(&ctxt->rip_relative, 0,
4817 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4818
4819 ctxt->io_read.pos = 0;
4820 ctxt->io_read.end = 0;
4821 ctxt->mem_read.end = 0;
4822 }
4823
4824 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4825 {
4826 const struct x86_emulate_ops *ops = ctxt->ops;
4827 int rc = X86EMUL_CONTINUE;
4828 int saved_dst_type = ctxt->dst.type;
4829
4830 ctxt->mem_read.pos = 0;
4831
4832 /* LOCK prefix is allowed only with some instructions */
4833 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4834 rc = emulate_ud(ctxt);
4835 goto done;
4836 }
4837
4838 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4839 rc = emulate_ud(ctxt);
4840 goto done;
4841 }
4842
4843 if (unlikely(ctxt->d &
4844 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4845 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4846 (ctxt->d & Undefined)) {
4847 rc = emulate_ud(ctxt);
4848 goto done;
4849 }
4850
4851 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4852 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4853 rc = emulate_ud(ctxt);
4854 goto done;
4855 }
4856
4857 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4858 rc = emulate_nm(ctxt);
4859 goto done;
4860 }
4861
4862 if (ctxt->d & Mmx) {
4863 rc = flush_pending_x87_faults(ctxt);
4864 if (rc != X86EMUL_CONTINUE)
4865 goto done;
4866 /*
4867 * Now that we know the fpu is exception safe, we can fetch
4868 * operands from it.
4869 */
4870 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4871 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4872 if (!(ctxt->d & Mov))
4873 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4874 }
4875
4876 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4877 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4878 X86_ICPT_PRE_EXCEPT);
4879 if (rc != X86EMUL_CONTINUE)
4880 goto done;
4881 }
4882
4883 /* Instruction can only be executed in protected mode */
4884 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4885 rc = emulate_ud(ctxt);
4886 goto done;
4887 }
4888
4889 /* Privileged instruction can be executed only in CPL=0 */
4890 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4891 if (ctxt->d & PrivUD)
4892 rc = emulate_ud(ctxt);
4893 else
4894 rc = emulate_gp(ctxt, 0);
4895 goto done;
4896 }
4897
4898 /* Do instruction specific permission checks */
4899 if (ctxt->d & CheckPerm) {
4900 rc = ctxt->check_perm(ctxt);
4901 if (rc != X86EMUL_CONTINUE)
4902 goto done;
4903 }
4904
4905 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4906 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4907 X86_ICPT_POST_EXCEPT);
4908 if (rc != X86EMUL_CONTINUE)
4909 goto done;
4910 }
4911
4912 if (ctxt->rep_prefix && (ctxt->d & String)) {
4913 /* All REP prefixes have the same first termination condition */
4914 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4915 ctxt->eip = ctxt->_eip;
4916 ctxt->eflags &= ~EFLG_RF;
4917 goto done;
4918 }
4919 }
4920 }
4921
4922 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4923 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4924 ctxt->src.valptr, ctxt->src.bytes);
4925 if (rc != X86EMUL_CONTINUE)
4926 goto done;
4927 ctxt->src.orig_val64 = ctxt->src.val64;
4928 }
4929
4930 if (ctxt->src2.type == OP_MEM) {
4931 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4932 &ctxt->src2.val, ctxt->src2.bytes);
4933 if (rc != X86EMUL_CONTINUE)
4934 goto done;
4935 }
4936
4937 if ((ctxt->d & DstMask) == ImplicitOps)
4938 goto special_insn;
4939
4940
4941 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4942 /* optimisation - avoid slow emulated read if Mov */
4943 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4944 &ctxt->dst.val, ctxt->dst.bytes);
4945 if (rc != X86EMUL_CONTINUE) {
4946 if (!(ctxt->d & NoWrite) &&
4947 rc == X86EMUL_PROPAGATE_FAULT &&
4948 ctxt->exception.vector == PF_VECTOR)
4949 ctxt->exception.error_code |= PFERR_WRITE_MASK;
4950 goto done;
4951 }
4952 }
4953 /* Copy full 64-bit value for CMPXCHG8B. */
4954 ctxt->dst.orig_val64 = ctxt->dst.val64;
4955
4956 special_insn:
4957
4958 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4959 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4960 X86_ICPT_POST_MEMACCESS);
4961 if (rc != X86EMUL_CONTINUE)
4962 goto done;
4963 }
4964
4965 if (ctxt->rep_prefix && (ctxt->d & String))
4966 ctxt->eflags |= EFLG_RF;
4967 else
4968 ctxt->eflags &= ~EFLG_RF;
4969
4970 if (ctxt->execute) {
4971 if (ctxt->d & Fastop) {
4972 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4973 rc = fastop(ctxt, fop);
4974 if (rc != X86EMUL_CONTINUE)
4975 goto done;
4976 goto writeback;
4977 }
4978 rc = ctxt->execute(ctxt);
4979 if (rc != X86EMUL_CONTINUE)
4980 goto done;
4981 goto writeback;
4982 }
4983
4984 if (ctxt->opcode_len == 2)
4985 goto twobyte_insn;
4986 else if (ctxt->opcode_len == 3)
4987 goto threebyte_insn;
4988
4989 switch (ctxt->b) {
4990 case 0x70 ... 0x7f: /* jcc (short) */
4991 if (test_cc(ctxt->b, ctxt->eflags))
4992 rc = jmp_rel(ctxt, ctxt->src.val);
4993 break;
4994 case 0x8d: /* lea r16/r32, m */
4995 ctxt->dst.val = ctxt->src.addr.mem.ea;
4996 break;
4997 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4998 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4999 ctxt->dst.type = OP_NONE;
5000 else
5001 rc = em_xchg(ctxt);
5002 break;
5003 case 0x98: /* cbw/cwde/cdqe */
5004 switch (ctxt->op_bytes) {
5005 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5006 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5007 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5008 }
5009 break;
5010 case 0xcc: /* int3 */
5011 rc = emulate_int(ctxt, 3);
5012 break;
5013 case 0xcd: /* int n */
5014 rc = emulate_int(ctxt, ctxt->src.val);
5015 break;
5016 case 0xce: /* into */
5017 if (ctxt->eflags & EFLG_OF)
5018 rc = emulate_int(ctxt, 4);
5019 break;
5020 case 0xe9: /* jmp rel */
5021 case 0xeb: /* jmp rel short */
5022 rc = jmp_rel(ctxt, ctxt->src.val);
5023 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5024 break;
5025 case 0xf4: /* hlt */
5026 ctxt->ops->halt(ctxt);
5027 break;
5028 case 0xf5: /* cmc */
5029 /* complement carry flag from eflags reg */
5030 ctxt->eflags ^= EFLG_CF;
5031 break;
5032 case 0xf8: /* clc */
5033 ctxt->eflags &= ~EFLG_CF;
5034 break;
5035 case 0xf9: /* stc */
5036 ctxt->eflags |= EFLG_CF;
5037 break;
5038 case 0xfc: /* cld */
5039 ctxt->eflags &= ~EFLG_DF;
5040 break;
5041 case 0xfd: /* std */
5042 ctxt->eflags |= EFLG_DF;
5043 break;
5044 default:
5045 goto cannot_emulate;
5046 }
5047
5048 if (rc != X86EMUL_CONTINUE)
5049 goto done;
5050
5051 writeback:
5052 if (ctxt->d & SrcWrite) {
5053 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5054 rc = writeback(ctxt, &ctxt->src);
5055 if (rc != X86EMUL_CONTINUE)
5056 goto done;
5057 }
5058 if (!(ctxt->d & NoWrite)) {
5059 rc = writeback(ctxt, &ctxt->dst);
5060 if (rc != X86EMUL_CONTINUE)
5061 goto done;
5062 }
5063
5064 /*
5065 * restore dst type in case the decoding will be reused
5066 * (happens for string instruction )
5067 */
5068 ctxt->dst.type = saved_dst_type;
5069
5070 if ((ctxt->d & SrcMask) == SrcSI)
5071 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5072
5073 if ((ctxt->d & DstMask) == DstDI)
5074 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5075
5076 if (ctxt->rep_prefix && (ctxt->d & String)) {
5077 unsigned int count;
5078 struct read_cache *r = &ctxt->io_read;
5079 if ((ctxt->d & SrcMask) == SrcSI)
5080 count = ctxt->src.count;
5081 else
5082 count = ctxt->dst.count;
5083 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5084
5085 if (!string_insn_completed(ctxt)) {
5086 /*
5087 * Re-enter guest when pio read ahead buffer is empty
5088 * or, if it is not used, after each 1024 iteration.
5089 */
5090 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5091 (r->end == 0 || r->end != r->pos)) {
5092 /*
5093 * Reset read cache. Usually happens before
5094 * decode, but since instruction is restarted
5095 * we have to do it here.
5096 */
5097 ctxt->mem_read.end = 0;
5098 writeback_registers(ctxt);
5099 return EMULATION_RESTART;
5100 }
5101 goto done; /* skip rip writeback */
5102 }
5103 ctxt->eflags &= ~EFLG_RF;
5104 }
5105
5106 ctxt->eip = ctxt->_eip;
5107
5108 done:
5109 if (rc == X86EMUL_PROPAGATE_FAULT) {
5110 WARN_ON(ctxt->exception.vector > 0x1f);
5111 ctxt->have_exception = true;
5112 }
5113 if (rc == X86EMUL_INTERCEPTED)
5114 return EMULATION_INTERCEPTED;
5115
5116 if (rc == X86EMUL_CONTINUE)
5117 writeback_registers(ctxt);
5118
5119 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5120
5121 twobyte_insn:
5122 switch (ctxt->b) {
5123 case 0x09: /* wbinvd */
5124 (ctxt->ops->wbinvd)(ctxt);
5125 break;
5126 case 0x08: /* invd */
5127 case 0x0d: /* GrpP (prefetch) */
5128 case 0x18: /* Grp16 (prefetch/nop) */
5129 case 0x1f: /* nop */
5130 break;
5131 case 0x20: /* mov cr, reg */
5132 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5133 break;
5134 case 0x21: /* mov from dr to reg */
5135 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5136 break;
5137 case 0x40 ... 0x4f: /* cmov */
5138 if (test_cc(ctxt->b, ctxt->eflags))
5139 ctxt->dst.val = ctxt->src.val;
5140 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5141 ctxt->op_bytes != 4)
5142 ctxt->dst.type = OP_NONE; /* no writeback */
5143 break;
5144 case 0x80 ... 0x8f: /* jnz rel, etc*/
5145 if (test_cc(ctxt->b, ctxt->eflags))
5146 rc = jmp_rel(ctxt, ctxt->src.val);
5147 break;
5148 case 0x90 ... 0x9f: /* setcc r/m8 */
5149 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5150 break;
5151 case 0xb6 ... 0xb7: /* movzx */
5152 ctxt->dst.bytes = ctxt->op_bytes;
5153 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5154 : (u16) ctxt->src.val;
5155 break;
5156 case 0xbe ... 0xbf: /* movsx */
5157 ctxt->dst.bytes = ctxt->op_bytes;
5158 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5159 (s16) ctxt->src.val;
5160 break;
5161 default:
5162 goto cannot_emulate;
5163 }
5164
5165 threebyte_insn:
5166
5167 if (rc != X86EMUL_CONTINUE)
5168 goto done;
5169
5170 goto writeback;
5171
5172 cannot_emulate:
5173 return EMULATION_FAILED;
5174 }
5175
5176 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5177 {
5178 invalidate_registers(ctxt);
5179 }
5180
5181 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5182 {
5183 writeback_registers(ctxt);
5184 }
This page took 0.159245 seconds and 6 git commands to generate.