KVM: x86 emulator: store x86_emulate_ops in emulation context
[deliverable/linux.git] / arch / x86 / include / asm / kvm_emulate.h
1 /******************************************************************************
2 * x86_emulate.h
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
9 */
10
11 #ifndef _ASM_X86_KVM_X86_EMULATE_H
12 #define _ASM_X86_KVM_X86_EMULATE_H
13
14 #include <asm/desc_defs.h>
15
16 struct x86_emulate_ctxt;
17
18 /*
19 * x86_emulate_ops:
20 *
21 * These operations represent the instruction emulator's interface to memory.
22 * There are two categories of operation: those that act on ordinary memory
23 * regions (*_std), and those that act on memory regions known to require
24 * special treatment or emulation (*_emulated).
25 *
26 * The emulator assumes that an instruction accesses only one 'emulated memory'
27 * location, that this location is the given linear faulting address (cr2), and
28 * that this is one of the instruction's data operands. Instruction fetches and
29 * stack operations are assumed never to access emulated memory. The emulator
30 * automatically deduces which operand of a string-move operation is accessing
31 * emulated memory, and assumes that the other operand accesses normal memory.
32 *
33 * NOTES:
34 * 1. The emulator isn't very smart about emulated vs. standard memory.
35 * 'Emulated memory' access addresses should be checked for sanity.
36 * 'Normal memory' accesses may fault, and the caller must arrange to
37 * detect and handle reentrancy into the emulator via recursive faults.
38 * Accesses may be unaligned and may cross page boundaries.
39 * 2. If the access fails (cannot emulate, or a standard access faults) then
40 * it is up to the memop to propagate the fault to the guest VM via
41 * some out-of-band mechanism, unknown to the emulator. The memop signals
42 * failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
43 * then immediately bail.
44 * 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
45 * cmpxchg8b_emulated need support 8-byte accesses.
46 * 4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
47 */
48 /* Access completed successfully: continue emulation as normal. */
49 #define X86EMUL_CONTINUE 0
50 /* Access is unhandleable: bail from emulation and return error to caller. */
51 #define X86EMUL_UNHANDLEABLE 1
52 /* Terminate emulation but return success to the caller. */
53 #define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
54 #define X86EMUL_RETRY_INSTR 3 /* retry the instruction for some reason */
55 #define X86EMUL_CMPXCHG_FAILED 4 /* cmpxchg did not see expected value */
56 #define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */
57
58 struct x86_emulate_ops {
59 /*
60 * read_std: Read bytes of standard (non-emulated/special) memory.
61 * Used for descriptor reading.
62 * @addr: [IN ] Linear address from which to read.
63 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
64 * @bytes: [IN ] Number of bytes to read from memory.
65 */
66 int (*read_std)(unsigned long addr, void *val,
67 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
68
69 /*
70 * write_std: Write bytes of standard (non-emulated/special) memory.
71 * Used for descriptor writing.
72 * @addr: [IN ] Linear address to which to write.
73 * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
74 * @bytes: [IN ] Number of bytes to write to memory.
75 */
76 int (*write_std)(unsigned long addr, void *val,
77 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
78 /*
79 * fetch: Read bytes of standard (non-emulated/special) memory.
80 * Used for instruction fetch.
81 * @addr: [IN ] Linear address from which to read.
82 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
83 * @bytes: [IN ] Number of bytes to read from memory.
84 */
85 int (*fetch)(unsigned long addr, void *val,
86 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
87
88 /*
89 * read_emulated: Read bytes from emulated/special memory area.
90 * @addr: [IN ] Linear address from which to read.
91 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
92 * @bytes: [IN ] Number of bytes to read from memory.
93 */
94 int (*read_emulated)(unsigned long addr,
95 void *val,
96 unsigned int bytes,
97 unsigned int *error,
98 struct kvm_vcpu *vcpu);
99
100 /*
101 * write_emulated: Write bytes to emulated/special memory area.
102 * @addr: [IN ] Linear address to which to write.
103 * @val: [IN ] Value to write to memory (low-order bytes used as
104 * required).
105 * @bytes: [IN ] Number of bytes to write to memory.
106 */
107 int (*write_emulated)(unsigned long addr,
108 const void *val,
109 unsigned int bytes,
110 unsigned int *error,
111 struct kvm_vcpu *vcpu);
112
113 /*
114 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
115 * emulated/special memory area.
116 * @addr: [IN ] Linear address to access.
117 * @old: [IN ] Value expected to be current at @addr.
118 * @new: [IN ] Value to write to @addr.
119 * @bytes: [IN ] Number of bytes to access using CMPXCHG.
120 */
121 int (*cmpxchg_emulated)(unsigned long addr,
122 const void *old,
123 const void *new,
124 unsigned int bytes,
125 unsigned int *error,
126 struct kvm_vcpu *vcpu);
127
128 int (*pio_in_emulated)(int size, unsigned short port, void *val,
129 unsigned int count, struct kvm_vcpu *vcpu);
130
131 int (*pio_out_emulated)(int size, unsigned short port, const void *val,
132 unsigned int count, struct kvm_vcpu *vcpu);
133
134 bool (*get_cached_descriptor)(struct desc_struct *desc,
135 int seg, struct kvm_vcpu *vcpu);
136 void (*set_cached_descriptor)(struct desc_struct *desc,
137 int seg, struct kvm_vcpu *vcpu);
138 u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
139 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
140 unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu);
141 void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
142 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
143 int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
144 int (*cpl)(struct kvm_vcpu *vcpu);
145 int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu);
146 int (*set_dr)(int dr, unsigned long value, struct kvm_vcpu *vcpu);
147 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
148 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
149 };
150
151 /* Type, address-of, and value of an instruction's operand. */
152 struct operand {
153 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
154 unsigned int bytes;
155 union {
156 unsigned long orig_val;
157 u64 orig_val64;
158 };
159 unsigned long *ptr;
160 union {
161 unsigned long val;
162 u64 val64;
163 char valptr[sizeof(unsigned long) + 2];
164 };
165 };
166
167 struct fetch_cache {
168 u8 data[15];
169 unsigned long start;
170 unsigned long end;
171 };
172
173 struct read_cache {
174 u8 data[1024];
175 unsigned long pos;
176 unsigned long end;
177 };
178
179 struct decode_cache {
180 u8 twobyte;
181 u8 b;
182 u8 lock_prefix;
183 u8 rep_prefix;
184 u8 op_bytes;
185 u8 ad_bytes;
186 u8 rex_prefix;
187 struct operand src;
188 struct operand src2;
189 struct operand dst;
190 bool has_seg_override;
191 u8 seg_override;
192 unsigned int d;
193 unsigned long regs[NR_VCPU_REGS];
194 unsigned long eip;
195 /* modrm */
196 u8 modrm;
197 u8 modrm_mod;
198 u8 modrm_reg;
199 u8 modrm_rm;
200 u8 use_modrm_ea;
201 bool rip_relative;
202 unsigned long modrm_ea;
203 void *modrm_ptr;
204 unsigned long modrm_val;
205 struct fetch_cache fetch;
206 struct read_cache io_read;
207 struct read_cache mem_read;
208 };
209
210 struct x86_emulate_ctxt {
211 struct x86_emulate_ops *ops;
212
213 /* Register state before/after emulation. */
214 struct kvm_vcpu *vcpu;
215
216 unsigned long eflags;
217 unsigned long eip; /* eip before instruction emulation */
218 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
219 int mode;
220 u32 cs_base;
221
222 /* interruptibility state, as a result of execution of STI or MOV SS */
223 int interruptibility;
224
225 bool restart; /* restart string instruction after writeback */
226
227 int exception; /* exception that happens during emulation or -1 */
228 u32 error_code; /* error code for exception */
229 bool error_code_valid;
230 unsigned long cr2; /* faulted address in case of #PF */
231
232 /* decode cache */
233 struct decode_cache decode;
234 };
235
236 /* Repeat String Operation Prefix */
237 #define REPE_PREFIX 1
238 #define REPNE_PREFIX 2
239
240 /* Execution mode, passed to the emulator. */
241 #define X86EMUL_MODE_REAL 0 /* Real mode. */
242 #define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */
243 #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
244 #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
245 #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
246
247 /* Host execution mode. */
248 #if defined(CONFIG_X86_32)
249 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
250 #elif defined(CONFIG_X86_64)
251 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
252 #endif
253
254 int x86_decode_insn(struct x86_emulate_ctxt *ctxt);
255 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
256 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
257 u16 tss_selector, int reason,
258 bool has_error_code, u32 error_code);
259
260 #endif /* _ASM_X86_KVM_X86_EMULATE_H */
This page took 0.059763 seconds and 5 git commands to generate.