KVM: x86 emulator: make emulator memory callbacks return full exception
[deliverable/linux.git] / arch / x86 / include / asm / kvm_emulate.h
1 /******************************************************************************
2 * x86_emulate.h
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
9 */
10
11 #ifndef _ASM_X86_KVM_X86_EMULATE_H
12 #define _ASM_X86_KVM_X86_EMULATE_H
13
14 #include <asm/desc_defs.h>
15
16 struct x86_emulate_ctxt;
17
18 struct x86_exception {
19 u8 vector;
20 bool error_code_valid;
21 u16 error_code;
22 };
23
24 /*
25 * x86_emulate_ops:
26 *
27 * These operations represent the instruction emulator's interface to memory.
28 * There are two categories of operation: those that act on ordinary memory
29 * regions (*_std), and those that act on memory regions known to require
30 * special treatment or emulation (*_emulated).
31 *
32 * The emulator assumes that an instruction accesses only one 'emulated memory'
33 * location, that this location is the given linear faulting address (cr2), and
34 * that this is one of the instruction's data operands. Instruction fetches and
35 * stack operations are assumed never to access emulated memory. The emulator
36 * automatically deduces which operand of a string-move operation is accessing
37 * emulated memory, and assumes that the other operand accesses normal memory.
38 *
39 * NOTES:
40 * 1. The emulator isn't very smart about emulated vs. standard memory.
41 * 'Emulated memory' access addresses should be checked for sanity.
42 * 'Normal memory' accesses may fault, and the caller must arrange to
43 * detect and handle reentrancy into the emulator via recursive faults.
44 * Accesses may be unaligned and may cross page boundaries.
45 * 2. If the access fails (cannot emulate, or a standard access faults) then
46 * it is up to the memop to propagate the fault to the guest VM via
47 * some out-of-band mechanism, unknown to the emulator. The memop signals
48 * failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
49 * then immediately bail.
50 * 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
51 * cmpxchg8b_emulated need support 8-byte accesses.
52 * 4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
53 */
54 /* Access completed successfully: continue emulation as normal. */
55 #define X86EMUL_CONTINUE 0
56 /* Access is unhandleable: bail from emulation and return error to caller. */
57 #define X86EMUL_UNHANDLEABLE 1
58 /* Terminate emulation but return success to the caller. */
59 #define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
60 #define X86EMUL_RETRY_INSTR 3 /* retry the instruction for some reason */
61 #define X86EMUL_CMPXCHG_FAILED 4 /* cmpxchg did not see expected value */
62 #define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */
63
64 struct x86_emulate_ops {
65 /*
66 * read_std: Read bytes of standard (non-emulated/special) memory.
67 * Used for descriptor reading.
68 * @addr: [IN ] Linear address from which to read.
69 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
70 * @bytes: [IN ] Number of bytes to read from memory.
71 */
72 int (*read_std)(unsigned long addr, void *val,
73 unsigned int bytes, struct kvm_vcpu *vcpu,
74 struct x86_exception *fault);
75
76 /*
77 * write_std: Write bytes of standard (non-emulated/special) memory.
78 * Used for descriptor writing.
79 * @addr: [IN ] Linear address to which to write.
80 * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
81 * @bytes: [IN ] Number of bytes to write to memory.
82 */
83 int (*write_std)(unsigned long addr, void *val,
84 unsigned int bytes, struct kvm_vcpu *vcpu,
85 struct x86_exception *fault);
86 /*
87 * fetch: Read bytes of standard (non-emulated/special) memory.
88 * Used for instruction fetch.
89 * @addr: [IN ] Linear address from which to read.
90 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
91 * @bytes: [IN ] Number of bytes to read from memory.
92 */
93 int (*fetch)(unsigned long addr, void *val,
94 unsigned int bytes, struct kvm_vcpu *vcpu,
95 struct x86_exception *fault);
96
97 /*
98 * read_emulated: Read bytes from emulated/special memory area.
99 * @addr: [IN ] Linear address from which to read.
100 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
101 * @bytes: [IN ] Number of bytes to read from memory.
102 */
103 int (*read_emulated)(unsigned long addr,
104 void *val,
105 unsigned int bytes,
106 struct x86_exception *fault,
107 struct kvm_vcpu *vcpu);
108
109 /*
110 * write_emulated: Write bytes to emulated/special memory area.
111 * @addr: [IN ] Linear address to which to write.
112 * @val: [IN ] Value to write to memory (low-order bytes used as
113 * required).
114 * @bytes: [IN ] Number of bytes to write to memory.
115 */
116 int (*write_emulated)(unsigned long addr,
117 const void *val,
118 unsigned int bytes,
119 struct x86_exception *fault,
120 struct kvm_vcpu *vcpu);
121
122 /*
123 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
124 * emulated/special memory area.
125 * @addr: [IN ] Linear address to access.
126 * @old: [IN ] Value expected to be current at @addr.
127 * @new: [IN ] Value to write to @addr.
128 * @bytes: [IN ] Number of bytes to access using CMPXCHG.
129 */
130 int (*cmpxchg_emulated)(unsigned long addr,
131 const void *old,
132 const void *new,
133 unsigned int bytes,
134 struct x86_exception *fault,
135 struct kvm_vcpu *vcpu);
136
137 int (*pio_in_emulated)(int size, unsigned short port, void *val,
138 unsigned int count, struct kvm_vcpu *vcpu);
139
140 int (*pio_out_emulated)(int size, unsigned short port, const void *val,
141 unsigned int count, struct kvm_vcpu *vcpu);
142
143 bool (*get_cached_descriptor)(struct desc_struct *desc,
144 int seg, struct kvm_vcpu *vcpu);
145 void (*set_cached_descriptor)(struct desc_struct *desc,
146 int seg, struct kvm_vcpu *vcpu);
147 u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
148 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
149 unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu);
150 void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
151 void (*get_idt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
152 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
153 int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
154 int (*cpl)(struct kvm_vcpu *vcpu);
155 int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu);
156 int (*set_dr)(int dr, unsigned long value, struct kvm_vcpu *vcpu);
157 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
158 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
159 };
160
161 /* Type, address-of, and value of an instruction's operand. */
162 struct operand {
163 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
164 unsigned int bytes;
165 union {
166 unsigned long orig_val;
167 u64 orig_val64;
168 };
169 union {
170 unsigned long *reg;
171 struct segmented_address {
172 ulong ea;
173 unsigned seg;
174 } mem;
175 } addr;
176 union {
177 unsigned long val;
178 u64 val64;
179 char valptr[sizeof(unsigned long) + 2];
180 };
181 };
182
183 struct fetch_cache {
184 u8 data[15];
185 unsigned long start;
186 unsigned long end;
187 };
188
189 struct read_cache {
190 u8 data[1024];
191 unsigned long pos;
192 unsigned long end;
193 };
194
195 struct decode_cache {
196 u8 twobyte;
197 u8 b;
198 u8 lock_prefix;
199 u8 rep_prefix;
200 u8 op_bytes;
201 u8 ad_bytes;
202 u8 rex_prefix;
203 struct operand src;
204 struct operand src2;
205 struct operand dst;
206 bool has_seg_override;
207 u8 seg_override;
208 unsigned int d;
209 int (*execute)(struct x86_emulate_ctxt *ctxt);
210 unsigned long regs[NR_VCPU_REGS];
211 unsigned long eip;
212 /* modrm */
213 u8 modrm;
214 u8 modrm_mod;
215 u8 modrm_reg;
216 u8 modrm_rm;
217 u8 modrm_seg;
218 bool rip_relative;
219 struct fetch_cache fetch;
220 struct read_cache io_read;
221 struct read_cache mem_read;
222 };
223
224 struct x86_emulate_ctxt {
225 struct x86_emulate_ops *ops;
226
227 /* Register state before/after emulation. */
228 struct kvm_vcpu *vcpu;
229
230 unsigned long eflags;
231 unsigned long eip; /* eip before instruction emulation */
232 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
233 int mode;
234 u32 cs_base;
235
236 /* interruptibility state, as a result of execution of STI or MOV SS */
237 int interruptibility;
238
239 bool perm_ok; /* do not check permissions if true */
240
241 bool have_exception;
242 struct x86_exception exception;
243
244 /* decode cache */
245 struct decode_cache decode;
246 };
247
248 /* Repeat String Operation Prefix */
249 #define REPE_PREFIX 1
250 #define REPNE_PREFIX 2
251
252 /* Execution mode, passed to the emulator. */
253 #define X86EMUL_MODE_REAL 0 /* Real mode. */
254 #define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */
255 #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
256 #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
257 #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
258
259 /* Host execution mode. */
260 #if defined(CONFIG_X86_32)
261 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
262 #elif defined(CONFIG_X86_64)
263 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
264 #endif
265
266 int x86_decode_insn(struct x86_emulate_ctxt *ctxt);
267 #define EMULATION_FAILED -1
268 #define EMULATION_OK 0
269 #define EMULATION_RESTART 1
270 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
271 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
272 u16 tss_selector, int reason,
273 bool has_error_code, u32 error_code);
274 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
275 struct x86_emulate_ops *ops, int irq);
276 #endif /* _ASM_X86_KVM_X86_EMULATE_H */
This page took 0.037387 seconds and 5 git commands to generate.