Commit | Line | Data |
---|---|---|
740765ce SL |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | |
7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
8 | */ | |
9 | ||
10 | #ifndef __MIPS_KVM_HOST_H__ | |
11 | #define __MIPS_KVM_HOST_H__ | |
12 | ||
13 | #include <linux/mutex.h> | |
14 | #include <linux/hrtimer.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/kvm.h> | |
18 | #include <linux/kvm_types.h> | |
19 | #include <linux/threads.h> | |
20 | #include <linux/spinlock.h> | |
21 | ||
258f3a2e | 22 | #include <asm/inst.h> |
e6207bbe JH |
23 | #include <asm/mipsregs.h> |
24 | ||
48a3c4e4 JH |
25 | /* MIPS KVM register ids */ |
26 | #define MIPS_CP0_32(_R, _S) \ | |
7bd4acec | 27 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) |
48a3c4e4 JH |
28 | |
29 | #define MIPS_CP0_64(_R, _S) \ | |
7bd4acec | 30 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) |
48a3c4e4 JH |
31 | |
32 | #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) | |
33 | #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) | |
34 | #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) | |
35 | #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) | |
36 | #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) | |
37 | #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) | |
38 | #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) | |
39 | #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) | |
40 | #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) | |
41 | #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) | |
42 | #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) | |
43 | #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) | |
44 | #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) | |
45 | #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) | |
46 | #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) | |
47 | #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) | |
1068eaaf | 48 | #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) |
48a3c4e4 JH |
49 | #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) |
50 | #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) | |
51 | #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) | |
52 | #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) | |
53 | #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) | |
c771607a JH |
54 | #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) |
55 | #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) | |
48a3c4e4 JH |
56 | #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) |
57 | #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) | |
58 | #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) | |
05108709 JH |
59 | #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2) |
60 | #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3) | |
61 | #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4) | |
62 | #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5) | |
63 | #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6) | |
64 | #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7) | |
48a3c4e4 | 65 | |
740765ce SL |
66 | |
67 | #define KVM_MAX_VCPUS 1 | |
68 | #define KVM_USER_MEM_SLOTS 8 | |
69 | /* memory slots that does not exposed to userspace */ | |
caa1faa7 | 70 | #define KVM_PRIVATE_MEM_SLOTS 0 |
740765ce SL |
71 | |
72 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | |
920552b2 | 73 | #define KVM_HALT_POLL_NS_DEFAULT 500000 |
740765ce | 74 | |
740765ce SL |
75 | |
76 | ||
42aa12e7 JH |
77 | /* |
78 | * Special address that contains the comm page, used for reducing # of traps | |
79 | * This needs to be within 32Kb of 0x0 (so the zero register can be used), but | |
80 | * preferably not at 0x0 so that most kernel NULL pointer dereferences can be | |
81 | * caught. | |
82 | */ | |
83 | #define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \ | |
84 | (0x8000 - PAGE_SIZE)) | |
740765ce SL |
85 | |
86 | #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ | |
87 | ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) | |
88 | ||
22027945 JH |
89 | #define KVM_GUEST_KUSEG 0x00000000UL |
90 | #define KVM_GUEST_KSEG0 0x40000000UL | |
91 | #define KVM_GUEST_KSEG23 0x60000000UL | |
7f5a1ddc | 92 | #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000) |
22027945 | 93 | #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) |
740765ce SL |
94 | |
95 | #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) | |
96 | #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) | |
97 | #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) | |
98 | ||
99 | /* | |
100 | * Map an address to a certain kernel segment | |
101 | */ | |
102 | #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) | |
103 | #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) | |
104 | #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) | |
105 | ||
22027945 JH |
106 | #define KVM_INVALID_PAGE 0xdeadbeef |
107 | #define KVM_INVALID_INST 0xdeadbeef | |
108 | #define KVM_INVALID_ADDR 0xdeadbeef | |
740765ce | 109 | |
740765ce | 110 | extern atomic_t kvm_mips_instance; |
740765ce SL |
111 | |
112 | struct kvm_vm_stat { | |
113 | u32 remote_tlb_flush; | |
114 | }; | |
115 | ||
116 | struct kvm_vcpu_stat { | |
117 | u32 wait_exits; | |
118 | u32 cache_exits; | |
119 | u32 signal_exits; | |
120 | u32 int_exits; | |
121 | u32 cop_unusable_exits; | |
122 | u32 tlbmod_exits; | |
123 | u32 tlbmiss_ld_exits; | |
124 | u32 tlbmiss_st_exits; | |
125 | u32 addrerr_st_exits; | |
126 | u32 addrerr_ld_exits; | |
127 | u32 syscall_exits; | |
128 | u32 resvd_inst_exits; | |
129 | u32 break_inst_exits; | |
0a560427 | 130 | u32 trap_inst_exits; |
c2537ed9 | 131 | u32 msa_fpe_exits; |
1c0cd66a | 132 | u32 fpe_exits; |
c2537ed9 | 133 | u32 msa_disabled_exits; |
740765ce | 134 | u32 flush_dcache_exits; |
f7819512 | 135 | u32 halt_successful_poll; |
62bea5bf | 136 | u32 halt_attempted_poll; |
3491caf2 | 137 | u32 halt_poll_invalid; |
740765ce SL |
138 | u32 halt_wakeup; |
139 | }; | |
140 | ||
740765ce SL |
141 | struct kvm_arch_memory_slot { |
142 | }; | |
143 | ||
144 | struct kvm_arch { | |
145 | /* Guest GVA->HPA page table */ | |
146 | unsigned long *guest_pmap; | |
147 | unsigned long guest_pmap_npages; | |
148 | ||
149 | /* Wired host TLB used for the commpage */ | |
150 | int commpage_tlb; | |
151 | }; | |
152 | ||
22027945 JH |
153 | #define N_MIPS_COPROC_REGS 32 |
154 | #define N_MIPS_COPROC_SEL 8 | |
740765ce SL |
155 | |
156 | struct mips_coproc { | |
157 | unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; | |
158 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | |
159 | unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; | |
160 | #endif | |
161 | }; | |
162 | ||
163 | /* | |
164 | * Coprocessor 0 register names | |
165 | */ | |
22027945 JH |
166 | #define MIPS_CP0_TLB_INDEX 0 |
167 | #define MIPS_CP0_TLB_RANDOM 1 | |
168 | #define MIPS_CP0_TLB_LOW 2 | |
169 | #define MIPS_CP0_TLB_LO0 2 | |
170 | #define MIPS_CP0_TLB_LO1 3 | |
171 | #define MIPS_CP0_TLB_CONTEXT 4 | |
172 | #define MIPS_CP0_TLB_PG_MASK 5 | |
173 | #define MIPS_CP0_TLB_WIRED 6 | |
174 | #define MIPS_CP0_HWRENA 7 | |
175 | #define MIPS_CP0_BAD_VADDR 8 | |
176 | #define MIPS_CP0_COUNT 9 | |
177 | #define MIPS_CP0_TLB_HI 10 | |
178 | #define MIPS_CP0_COMPARE 11 | |
179 | #define MIPS_CP0_STATUS 12 | |
180 | #define MIPS_CP0_CAUSE 13 | |
181 | #define MIPS_CP0_EXC_PC 14 | |
182 | #define MIPS_CP0_PRID 15 | |
183 | #define MIPS_CP0_CONFIG 16 | |
184 | #define MIPS_CP0_LLADDR 17 | |
185 | #define MIPS_CP0_WATCH_LO 18 | |
186 | #define MIPS_CP0_WATCH_HI 19 | |
187 | #define MIPS_CP0_TLB_XCONTEXT 20 | |
188 | #define MIPS_CP0_ECC 26 | |
189 | #define MIPS_CP0_CACHE_ERR 27 | |
190 | #define MIPS_CP0_TAG_LO 28 | |
191 | #define MIPS_CP0_TAG_HI 29 | |
192 | #define MIPS_CP0_ERROR_PC 30 | |
193 | #define MIPS_CP0_DEBUG 23 | |
194 | #define MIPS_CP0_DEPC 24 | |
195 | #define MIPS_CP0_PERFCNT 25 | |
196 | #define MIPS_CP0_ERRCTL 26 | |
197 | #define MIPS_CP0_DATA_LO 28 | |
198 | #define MIPS_CP0_DATA_HI 29 | |
199 | #define MIPS_CP0_DESAVE 31 | |
200 | ||
201 | #define MIPS_CP0_CONFIG_SEL 0 | |
202 | #define MIPS_CP0_CONFIG1_SEL 1 | |
203 | #define MIPS_CP0_CONFIG2_SEL 2 | |
204 | #define MIPS_CP0_CONFIG3_SEL 3 | |
c771607a JH |
205 | #define MIPS_CP0_CONFIG4_SEL 4 |
206 | #define MIPS_CP0_CONFIG5_SEL 5 | |
740765ce | 207 | |
740765ce | 208 | /* Resume Flags */ |
22027945 JH |
209 | #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */ |
210 | #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ | |
740765ce | 211 | |
22027945 JH |
212 | #define RESUME_GUEST 0 |
213 | #define RESUME_GUEST_DR RESUME_FLAG_DR | |
214 | #define RESUME_HOST RESUME_FLAG_HOST | |
740765ce SL |
215 | |
216 | enum emulation_result { | |
217 | EMULATE_DONE, /* no further processing */ | |
218 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ | |
219 | EMULATE_FAIL, /* can't emulate this instruction */ | |
220 | EMULATE_WAIT, /* WAIT instruction */ | |
221 | EMULATE_PRIV_FAIL, | |
222 | }; | |
223 | ||
740765ce | 224 | #define mips3_paddr_to_tlbpfn(x) \ |
22027945 | 225 | (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) |
740765ce | 226 | #define mips3_tlbpfn_to_paddr(x) \ |
22027945 | 227 | ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) |
740765ce | 228 | |
22027945 JH |
229 | #define MIPS3_PG_SHIFT 6 |
230 | #define MIPS3_PG_FRAME 0x3fffffc0 | |
740765ce | 231 | |
22027945 | 232 | #define VPN2_MASK 0xffffe000 |
ca64c2be | 233 | #define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID |
e6207bbe | 234 | #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) |
22027945 | 235 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) |
ca64c2be | 236 | #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) |
19d194c6 | 237 | #define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1) |
e6207bbe | 238 | #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V) |
d116e812 DCZ |
239 | #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ |
240 | ((y) & VPN2_MASK & ~(x).tlb_mask)) | |
241 | #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ | |
ca64c2be | 242 | TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID)) |
740765ce SL |
243 | |
244 | struct kvm_mips_tlb { | |
245 | long tlb_mask; | |
246 | long tlb_hi; | |
9fbfb06a | 247 | long tlb_lo[2]; |
740765ce SL |
248 | }; |
249 | ||
f943176a JH |
250 | #define KVM_MIPS_AUX_FPU 0x1 |
251 | #define KVM_MIPS_AUX_MSA 0x2 | |
98e91b84 | 252 | |
22027945 | 253 | #define KVM_MIPS_GUEST_TLB_SIZE 64 |
740765ce | 254 | struct kvm_vcpu_arch { |
878edf01 | 255 | void *guest_ebase; |
797179bc | 256 | int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); |
740765ce SL |
257 | unsigned long host_stack; |
258 | unsigned long host_gp; | |
259 | ||
260 | /* Host CP0 registers used when handling exits from guest */ | |
261 | unsigned long host_cp0_badvaddr; | |
740765ce | 262 | unsigned long host_cp0_epc; |
31cf7498 | 263 | u32 host_cp0_cause; |
740765ce SL |
264 | |
265 | /* GPRS */ | |
266 | unsigned long gprs[32]; | |
267 | unsigned long hi; | |
268 | unsigned long lo; | |
269 | unsigned long pc; | |
270 | ||
271 | /* FPU State */ | |
272 | struct mips_fpu_struct fpu; | |
f943176a JH |
273 | /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */ |
274 | unsigned int aux_inuse; | |
740765ce SL |
275 | |
276 | /* COP0 State */ | |
277 | struct mips_coproc *cop0; | |
278 | ||
279 | /* Host KSEG0 address of the EI/DI offset */ | |
280 | void *kseg0_commpage; | |
281 | ||
282 | u32 io_gpr; /* GPR used as IO source/target */ | |
283 | ||
e30492bb | 284 | struct hrtimer comparecount_timer; |
f8239342 | 285 | /* Count timer control KVM register */ |
bdb7ed86 | 286 | u32 count_ctl; |
e30492bb | 287 | /* Count bias from the raw time */ |
bdb7ed86 | 288 | u32 count_bias; |
e30492bb | 289 | /* Frequency of timer in Hz */ |
bdb7ed86 | 290 | u32 count_hz; |
e30492bb JH |
291 | /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */ |
292 | s64 count_dyn_bias; | |
f8239342 JH |
293 | /* Resume time */ |
294 | ktime_t count_resume; | |
e30492bb JH |
295 | /* Period of timer tick in ns */ |
296 | u64 count_period; | |
740765ce SL |
297 | |
298 | /* Bitmask of exceptions that are pending */ | |
299 | unsigned long pending_exceptions; | |
300 | ||
301 | /* Bitmask of pending exceptions to be cleared */ | |
302 | unsigned long pending_exceptions_clr; | |
303 | ||
31cf7498 | 304 | u32 pending_load_cause; |
740765ce SL |
305 | |
306 | /* Save/Restore the entryhi register when are are preempted/scheduled back in */ | |
307 | unsigned long preempt_entryhi; | |
308 | ||
309 | /* S/W Based TLB for guest */ | |
310 | struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; | |
311 | ||
312 | /* Cached guest kernel/user ASIDs */ | |
bdb7ed86 JH |
313 | u32 guest_user_asid[NR_CPUS]; |
314 | u32 guest_kernel_asid[NR_CPUS]; | |
740765ce SL |
315 | struct mm_struct guest_kernel_mm, guest_user_mm; |
316 | ||
740765ce SL |
317 | int last_sched_cpu; |
318 | ||
319 | /* WAIT executed */ | |
320 | int wait; | |
98e91b84 JH |
321 | |
322 | u8 fpu_enabled; | |
539cb89f | 323 | u8 msa_enabled; |
05108709 | 324 | u8 kscratch_enabled; |
740765ce SL |
325 | }; |
326 | ||
327 | ||
22027945 JH |
328 | #define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0]) |
329 | #define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) | |
330 | #define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0]) | |
331 | #define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0]) | |
332 | #define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) | |
333 | #define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) | |
334 | #define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) | |
7767b7d2 | 335 | #define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val)) |
22027945 JH |
336 | #define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0]) |
337 | #define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val)) | |
338 | #define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0]) | |
339 | #define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val)) | |
26f4f3b5 JH |
340 | #define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0]) |
341 | #define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val)) | |
22027945 JH |
342 | #define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0]) |
343 | #define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val)) | |
344 | #define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0]) | |
345 | #define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val)) | |
346 | #define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0]) | |
347 | #define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val)) | |
348 | #define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0]) | |
349 | #define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val)) | |
350 | #define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0]) | |
351 | #define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val)) | |
352 | #define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1]) | |
353 | #define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val)) | |
354 | #define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0]) | |
355 | #define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val)) | |
356 | #define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0]) | |
357 | #define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val)) | |
358 | #define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0]) | |
359 | #define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val)) | |
360 | #define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1]) | |
361 | #define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val)) | |
362 | #define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0]) | |
363 | #define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) | |
364 | #define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) | |
365 | #define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) | |
c771607a JH |
366 | #define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4]) |
367 | #define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5]) | |
22027945 JH |
368 | #define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) |
369 | #define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) | |
370 | #define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) | |
371 | #define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) | |
372 | #define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) | |
c771607a JH |
373 | #define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val)) |
374 | #define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val)) | |
22027945 JH |
375 | #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) |
376 | #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) | |
377 | #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) | |
05108709 JH |
378 | #define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2]) |
379 | #define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3]) | |
380 | #define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4]) | |
381 | #define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5]) | |
382 | #define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6]) | |
383 | #define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7]) | |
384 | #define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val)) | |
385 | #define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val)) | |
386 | #define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val)) | |
387 | #define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val)) | |
388 | #define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val)) | |
389 | #define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val)) | |
22027945 | 390 | |
c73c99b0 JH |
391 | /* |
392 | * Some of the guest registers may be modified asynchronously (e.g. from a | |
393 | * hrtimer callback in hard irq context) and therefore need stronger atomicity | |
394 | * guarantees than other registers. | |
395 | */ | |
396 | ||
397 | static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, | |
398 | unsigned long val) | |
399 | { | |
400 | unsigned long temp; | |
401 | do { | |
402 | __asm__ __volatile__( | |
d85ebff0 | 403 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
c73c99b0 JH |
404 | " " __LL "%0, %1 \n" |
405 | " or %0, %2 \n" | |
406 | " " __SC "%0, %1 \n" | |
407 | " .set mips0 \n" | |
408 | : "=&r" (temp), "+m" (*reg) | |
409 | : "r" (val)); | |
410 | } while (unlikely(!temp)); | |
411 | } | |
412 | ||
413 | static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg, | |
414 | unsigned long val) | |
415 | { | |
416 | unsigned long temp; | |
417 | do { | |
418 | __asm__ __volatile__( | |
d85ebff0 | 419 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
c73c99b0 JH |
420 | " " __LL "%0, %1 \n" |
421 | " and %0, %2 \n" | |
422 | " " __SC "%0, %1 \n" | |
423 | " .set mips0 \n" | |
424 | : "=&r" (temp), "+m" (*reg) | |
425 | : "r" (~val)); | |
426 | } while (unlikely(!temp)); | |
427 | } | |
428 | ||
429 | static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, | |
430 | unsigned long change, | |
431 | unsigned long val) | |
432 | { | |
433 | unsigned long temp; | |
434 | do { | |
435 | __asm__ __volatile__( | |
d85ebff0 | 436 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
c73c99b0 JH |
437 | " " __LL "%0, %1 \n" |
438 | " and %0, %2 \n" | |
439 | " or %0, %3 \n" | |
440 | " " __SC "%0, %1 \n" | |
441 | " .set mips0 \n" | |
442 | : "=&r" (temp), "+m" (*reg) | |
443 | : "r" (~change), "r" (val & change)); | |
444 | } while (unlikely(!temp)); | |
445 | } | |
446 | ||
22027945 JH |
447 | #define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) |
448 | #define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) | |
c73c99b0 JH |
449 | |
450 | /* Cause can be modified asynchronously from hardirq hrtimer callback */ | |
451 | #define kvm_set_c0_guest_cause(cop0, val) \ | |
452 | _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) | |
453 | #define kvm_clear_c0_guest_cause(cop0, val) \ | |
454 | _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) | |
22027945 | 455 | #define kvm_change_c0_guest_cause(cop0, change, val) \ |
c73c99b0 JH |
456 | _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \ |
457 | change, val) | |
458 | ||
22027945 JH |
459 | #define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) |
460 | #define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) | |
461 | #define kvm_change_c0_guest_ebase(cop0, change, val) \ | |
462 | { \ | |
463 | kvm_clear_c0_guest_ebase(cop0, change); \ | |
464 | kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ | |
740765ce SL |
465 | } |
466 | ||
98e91b84 JH |
467 | /* Helpers */ |
468 | ||
469 | static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) | |
470 | { | |
19451e51 | 471 | return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) && |
98e91b84 JH |
472 | vcpu->fpu_enabled; |
473 | } | |
474 | ||
475 | static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) | |
476 | { | |
477 | return kvm_mips_guest_can_have_fpu(vcpu) && | |
478 | kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; | |
479 | } | |
740765ce | 480 | |
539cb89f JH |
481 | static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) |
482 | { | |
483 | return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) && | |
484 | vcpu->msa_enabled; | |
485 | } | |
486 | ||
487 | static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) | |
488 | { | |
489 | return kvm_mips_guest_can_have_msa(vcpu) && | |
490 | kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA; | |
491 | } | |
492 | ||
740765ce | 493 | struct kvm_mips_callbacks { |
2dca3725 JH |
494 | int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); |
495 | int (*handle_tlb_mod)(struct kvm_vcpu *vcpu); | |
496 | int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu); | |
497 | int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu); | |
498 | int (*handle_addr_err_st)(struct kvm_vcpu *vcpu); | |
499 | int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu); | |
500 | int (*handle_syscall)(struct kvm_vcpu *vcpu); | |
501 | int (*handle_res_inst)(struct kvm_vcpu *vcpu); | |
502 | int (*handle_break)(struct kvm_vcpu *vcpu); | |
0a560427 | 503 | int (*handle_trap)(struct kvm_vcpu *vcpu); |
c2537ed9 | 504 | int (*handle_msa_fpe)(struct kvm_vcpu *vcpu); |
1c0cd66a | 505 | int (*handle_fpe)(struct kvm_vcpu *vcpu); |
98119ad5 | 506 | int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); |
2dca3725 JH |
507 | int (*vm_init)(struct kvm *kvm); |
508 | int (*vcpu_init)(struct kvm_vcpu *vcpu); | |
509 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); | |
510 | gpa_t (*gva_to_gpa)(gva_t gva); | |
511 | void (*queue_timer_int)(struct kvm_vcpu *vcpu); | |
512 | void (*dequeue_timer_int)(struct kvm_vcpu *vcpu); | |
513 | void (*queue_io_int)(struct kvm_vcpu *vcpu, | |
514 | struct kvm_mips_interrupt *irq); | |
515 | void (*dequeue_io_int)(struct kvm_vcpu *vcpu, | |
516 | struct kvm_mips_interrupt *irq); | |
517 | int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority, | |
bdb7ed86 | 518 | u32 cause); |
2dca3725 | 519 | int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority, |
bdb7ed86 | 520 | u32 cause); |
f5c43bd4 JH |
521 | unsigned long (*num_regs)(struct kvm_vcpu *vcpu); |
522 | int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices); | |
f8be02da JH |
523 | int (*get_one_reg)(struct kvm_vcpu *vcpu, |
524 | const struct kvm_one_reg *reg, s64 *v); | |
525 | int (*set_one_reg)(struct kvm_vcpu *vcpu, | |
526 | const struct kvm_one_reg *reg, s64 v); | |
b86ecb37 JH |
527 | int (*vcpu_get_regs)(struct kvm_vcpu *vcpu); |
528 | int (*vcpu_set_regs)(struct kvm_vcpu *vcpu); | |
740765ce SL |
529 | }; |
530 | extern struct kvm_mips_callbacks *kvm_mips_callbacks; | |
531 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); | |
532 | ||
533 | /* Debug: dump vcpu state */ | |
534 | int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); | |
535 | ||
90e9311a JH |
536 | extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); |
537 | ||
538 | /* Building of entry/exception code */ | |
1e5217f5 | 539 | int kvm_mips_entry_setup(void); |
90e9311a | 540 | void *kvm_mips_build_vcpu_run(void *addr); |
1f9ca62c | 541 | void *kvm_mips_build_exception(void *addr, void *handler); |
90e9311a | 542 | void *kvm_mips_build_exit(void *addr); |
740765ce | 543 | |
539cb89f | 544 | /* FPU/MSA context management */ |
98e91b84 JH |
545 | void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); |
546 | void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu); | |
547 | void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu); | |
539cb89f JH |
548 | void __kvm_save_msa(struct kvm_vcpu_arch *vcpu); |
549 | void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu); | |
550 | void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu); | |
551 | void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu); | |
98e91b84 | 552 | void kvm_own_fpu(struct kvm_vcpu *vcpu); |
539cb89f | 553 | void kvm_own_msa(struct kvm_vcpu *vcpu); |
98e91b84 JH |
554 | void kvm_drop_fpu(struct kvm_vcpu *vcpu); |
555 | void kvm_lose_fpu(struct kvm_vcpu *vcpu); | |
556 | ||
740765ce | 557 | /* TLB handling */ |
bdb7ed86 | 558 | u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu); |
740765ce | 559 | |
bdb7ed86 | 560 | u32 kvm_get_user_asid(struct kvm_vcpu *vcpu); |
740765ce | 561 | |
bdb7ed86 | 562 | u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu); |
740765ce SL |
563 | |
564 | extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, | |
565 | struct kvm_vcpu *vcpu); | |
566 | ||
567 | extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | |
568 | struct kvm_vcpu *vcpu); | |
569 | ||
570 | extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | |
26ee17ff | 571 | struct kvm_mips_tlb *tlb); |
740765ce | 572 | |
31cf7498 | 573 | extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, |
bdb7ed86 | 574 | u32 *opc, |
740765ce SL |
575 | struct kvm_run *run, |
576 | struct kvm_vcpu *vcpu); | |
577 | ||
31cf7498 | 578 | extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause, |
bdb7ed86 | 579 | u32 *opc, |
740765ce SL |
580 | struct kvm_run *run, |
581 | struct kvm_vcpu *vcpu); | |
582 | ||
583 | extern void kvm_mips_dump_host_tlbs(void); | |
584 | extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); | |
403015b3 JH |
585 | extern int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, |
586 | unsigned long entrylo0, | |
587 | unsigned long entrylo1, | |
588 | int flush_dcache_mask); | |
740765ce SL |
589 | extern void kvm_mips_flush_host_tlb(int skip_kseg0); |
590 | extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); | |
740765ce SL |
591 | |
592 | extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, | |
593 | unsigned long entryhi); | |
594 | extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr); | |
595 | extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, | |
596 | unsigned long gva); | |
597 | extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | |
598 | struct kvm_vcpu *vcpu); | |
740765ce | 599 | extern void kvm_local_flush_tlb_all(void); |
740765ce SL |
600 | extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu); |
601 | extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |
602 | extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu); | |
603 | ||
604 | /* Emulation */ | |
bdb7ed86 JH |
605 | u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu); |
606 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); | |
740765ce | 607 | |
31cf7498 | 608 | extern enum emulation_result kvm_mips_emulate_inst(u32 cause, |
bdb7ed86 | 609 | u32 *opc, |
740765ce SL |
610 | struct kvm_run *run, |
611 | struct kvm_vcpu *vcpu); | |
612 | ||
31cf7498 | 613 | extern enum emulation_result kvm_mips_emulate_syscall(u32 cause, |
bdb7ed86 | 614 | u32 *opc, |
740765ce SL |
615 | struct kvm_run *run, |
616 | struct kvm_vcpu *vcpu); | |
617 | ||
31cf7498 | 618 | extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, |
bdb7ed86 | 619 | u32 *opc, |
740765ce SL |
620 | struct kvm_run *run, |
621 | struct kvm_vcpu *vcpu); | |
622 | ||
31cf7498 | 623 | extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, |
bdb7ed86 | 624 | u32 *opc, |
740765ce SL |
625 | struct kvm_run *run, |
626 | struct kvm_vcpu *vcpu); | |
627 | ||
31cf7498 | 628 | extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, |
bdb7ed86 | 629 | u32 *opc, |
740765ce SL |
630 | struct kvm_run *run, |
631 | struct kvm_vcpu *vcpu); | |
632 | ||
31cf7498 | 633 | extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, |
bdb7ed86 | 634 | u32 *opc, |
740765ce SL |
635 | struct kvm_run *run, |
636 | struct kvm_vcpu *vcpu); | |
637 | ||
31cf7498 | 638 | extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, |
bdb7ed86 | 639 | u32 *opc, |
740765ce SL |
640 | struct kvm_run *run, |
641 | struct kvm_vcpu *vcpu); | |
642 | ||
31cf7498 | 643 | extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, |
bdb7ed86 | 644 | u32 *opc, |
740765ce SL |
645 | struct kvm_run *run, |
646 | struct kvm_vcpu *vcpu); | |
647 | ||
31cf7498 | 648 | extern enum emulation_result kvm_mips_handle_ri(u32 cause, |
bdb7ed86 | 649 | u32 *opc, |
740765ce SL |
650 | struct kvm_run *run, |
651 | struct kvm_vcpu *vcpu); | |
652 | ||
31cf7498 | 653 | extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, |
bdb7ed86 | 654 | u32 *opc, |
740765ce SL |
655 | struct kvm_run *run, |
656 | struct kvm_vcpu *vcpu); | |
657 | ||
31cf7498 | 658 | extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, |
bdb7ed86 | 659 | u32 *opc, |
740765ce SL |
660 | struct kvm_run *run, |
661 | struct kvm_vcpu *vcpu); | |
662 | ||
31cf7498 | 663 | extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, |
bdb7ed86 | 664 | u32 *opc, |
0a560427 JH |
665 | struct kvm_run *run, |
666 | struct kvm_vcpu *vcpu); | |
667 | ||
31cf7498 | 668 | extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, |
bdb7ed86 | 669 | u32 *opc, |
c2537ed9 JH |
670 | struct kvm_run *run, |
671 | struct kvm_vcpu *vcpu); | |
672 | ||
31cf7498 | 673 | extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, |
bdb7ed86 | 674 | u32 *opc, |
1c0cd66a JH |
675 | struct kvm_run *run, |
676 | struct kvm_vcpu *vcpu); | |
677 | ||
31cf7498 | 678 | extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, |
bdb7ed86 | 679 | u32 *opc, |
c2537ed9 JH |
680 | struct kvm_run *run, |
681 | struct kvm_vcpu *vcpu); | |
682 | ||
740765ce SL |
683 | extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, |
684 | struct kvm_run *run); | |
685 | ||
bdb7ed86 JH |
686 | u32 kvm_mips_read_count(struct kvm_vcpu *vcpu); |
687 | void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count); | |
688 | void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack); | |
e30492bb | 689 | void kvm_mips_init_count(struct kvm_vcpu *vcpu); |
f8239342 JH |
690 | int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); |
691 | int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); | |
f74a8e22 | 692 | int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz); |
e30492bb JH |
693 | void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu); |
694 | void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu); | |
695 | enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu); | |
740765ce | 696 | |
31cf7498 | 697 | enum emulation_result kvm_mips_check_privilege(u32 cause, |
bdb7ed86 | 698 | u32 *opc, |
740765ce SL |
699 | struct kvm_run *run, |
700 | struct kvm_vcpu *vcpu); | |
701 | ||
258f3a2e | 702 | enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, |
bdb7ed86 JH |
703 | u32 *opc, |
704 | u32 cause, | |
740765ce SL |
705 | struct kvm_run *run, |
706 | struct kvm_vcpu *vcpu); | |
258f3a2e | 707 | enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, |
bdb7ed86 JH |
708 | u32 *opc, |
709 | u32 cause, | |
740765ce SL |
710 | struct kvm_run *run, |
711 | struct kvm_vcpu *vcpu); | |
258f3a2e | 712 | enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, |
bdb7ed86 | 713 | u32 cause, |
740765ce SL |
714 | struct kvm_run *run, |
715 | struct kvm_vcpu *vcpu); | |
258f3a2e | 716 | enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, |
bdb7ed86 | 717 | u32 cause, |
740765ce SL |
718 | struct kvm_run *run, |
719 | struct kvm_vcpu *vcpu); | |
720 | ||
c771607a JH |
721 | unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu); |
722 | unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu); | |
723 | unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); | |
724 | unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); | |
725 | ||
740765ce | 726 | /* Dynamic binary translation */ |
258f3a2e JH |
727 | extern int kvm_mips_trans_cache_index(union mips_instruction inst, |
728 | u32 *opc, struct kvm_vcpu *vcpu); | |
729 | extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc, | |
730 | struct kvm_vcpu *vcpu); | |
731 | extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc, | |
732 | struct kvm_vcpu *vcpu); | |
733 | extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, | |
734 | struct kvm_vcpu *vcpu); | |
740765ce SL |
735 | |
736 | /* Misc */ | |
d98403a5 | 737 | extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); |
740765ce SL |
738 | extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); |
739 | ||
13a34e06 | 740 | static inline void kvm_arch_hardware_disable(void) {} |
0865e636 RK |
741 | static inline void kvm_arch_hardware_unsetup(void) {} |
742 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} | |
743 | static inline void kvm_arch_free_memslot(struct kvm *kvm, | |
744 | struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} | |
15f46015 | 745 | static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} |
0865e636 RK |
746 | static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} |
747 | static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
748 | struct kvm_memory_slot *slot) {} | |
749 | static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} | |
750 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} | |
3217f7c2 CD |
751 | static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} |
752 | static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} | |
3491caf2 | 753 | static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} |
740765ce SL |
754 | |
755 | #endif /* __MIPS_KVM_HOST_H__ */ |