MIPS: KVM: Make various Cause variables 32-bit
[deliverable/linux.git] / arch / mips / kvm / tlb.c
CommitLineData
858dd5d4 1/*
d116e812
DCZ
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
8 *
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11 */
858dd5d4 12
858dd5d4
SL
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/mm.h>
16#include <linux/delay.h>
17#include <linux/module.h>
18#include <linux/kvm_host.h>
6d17c0d1
SL
19#include <linux/srcu.h>
20
858dd5d4
SL
21#include <asm/cpu.h>
22#include <asm/bootinfo.h>
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
25#include <asm/cacheflush.h>
e36059e5 26#include <asm/tlb.h>
858dd5d4
SL
27
28#undef CONFIG_MIPS_MT
29#include <asm/r4kcache.h>
30#define CONFIG_MIPS_MT
31
32#define KVM_GUEST_PC_TLB 0
33#define KVM_GUEST_SP_TLB 1
34
858dd5d4 35atomic_t kvm_mips_instance;
cb1b447f 36EXPORT_SYMBOL_GPL(kvm_mips_instance);
858dd5d4
SL
37
38/* These function pointers are initialized once the KVM module is loaded */
ba049e93 39kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
cb1b447f 40EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
858dd5d4 41
ba049e93 42void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
cb1b447f 43EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
858dd5d4 44
ba049e93 45bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
cb1b447f 46EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
858dd5d4 47
bdb7ed86 48u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
858dd5d4 49{
4edf00a4
PB
50 int cpu = smp_processor_id();
51
52 return vcpu->arch.guest_kernel_asid[cpu] &
53 cpu_asid_mask(&cpu_data[cpu]);
858dd5d4
SL
54}
55
bdb7ed86 56u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
858dd5d4 57{
4edf00a4
PB
58 int cpu = smp_processor_id();
59
60 return vcpu->arch.guest_user_asid[cpu] &
61 cpu_asid_mask(&cpu_data[cpu]);
858dd5d4
SL
62}
63
bdb7ed86 64inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
858dd5d4
SL
65{
66 return vcpu->kvm->arch.commpage_tlb;
67}
68
d116e812 69/* Structure defining an tlb entry data set. */
858dd5d4
SL
70
71void kvm_mips_dump_host_tlbs(void)
72{
73 unsigned long old_entryhi;
74 unsigned long old_pagemask;
75 struct kvm_mips_tlb tlb;
76 unsigned long flags;
77 int i;
78
79 local_irq_save(flags);
80
81 old_entryhi = read_c0_entryhi();
82 old_pagemask = read_c0_pagemask();
83
6ad78a5c 84 kvm_info("HOST TLBs:\n");
4edf00a4
PB
85 kvm_info("ASID: %#lx\n", read_c0_entryhi() &
86 cpu_asid_mask(&current_cpu_data));
858dd5d4
SL
87
88 for (i = 0; i < current_cpu_data.tlbsize; i++) {
89 write_c0_index(i);
90 mtc0_tlbw_hazard();
91
92 tlb_read();
93 tlbw_use_hazard();
94
95 tlb.tlb_hi = read_c0_entryhi();
96 tlb.tlb_lo0 = read_c0_entrylo0();
97 tlb.tlb_lo1 = read_c0_entrylo1();
98 tlb.tlb_mask = read_c0_pagemask();
99
6ad78a5c
DCZ
100 kvm_info("TLB%c%3d Hi 0x%08lx ",
101 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
102 i, tlb.tlb_hi);
8cffd197
JH
103 kvm_info("Lo0=0x%09llx %c%c attr %lx ",
104 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
6ad78a5c
DCZ
105 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
106 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
107 (tlb.tlb_lo0 >> 3) & 7);
8cffd197
JH
108 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
109 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
6ad78a5c
DCZ
110 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
111 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
112 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
858dd5d4
SL
113 }
114 write_c0_entryhi(old_entryhi);
115 write_c0_pagemask(old_pagemask);
116 mtc0_tlbw_hazard();
117 local_irq_restore(flags);
118}
cb1b447f 119EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
858dd5d4
SL
120
121void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
122{
123 struct mips_coproc *cop0 = vcpu->arch.cop0;
124 struct kvm_mips_tlb tlb;
125 int i;
126
6ad78a5c
DCZ
127 kvm_info("Guest TLBs:\n");
128 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
858dd5d4
SL
129
130 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
131 tlb = vcpu->arch.guest_tlb[i];
6ad78a5c
DCZ
132 kvm_info("TLB%c%3d Hi 0x%08lx ",
133 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
134 i, tlb.tlb_hi);
8cffd197
JH
135 kvm_info("Lo0=0x%09llx %c%c attr %lx ",
136 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
6ad78a5c
DCZ
137 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
138 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
139 (tlb.tlb_lo0 >> 3) & 7);
8cffd197
JH
140 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
141 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
6ad78a5c
DCZ
142 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
143 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
144 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
858dd5d4
SL
145 }
146}
cb1b447f 147EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
858dd5d4 148
6d17c0d1 149static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
858dd5d4 150{
6d17c0d1 151 int srcu_idx, err = 0;
ba049e93 152 kvm_pfn_t pfn;
858dd5d4
SL
153
154 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
6d17c0d1 155 return 0;
858dd5d4 156
d116e812 157 srcu_idx = srcu_read_lock(&kvm->srcu);
858dd5d4
SL
158 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
159
160 if (kvm_mips_is_error_pfn(pfn)) {
8cffd197 161 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
6d17c0d1
SL
162 err = -EFAULT;
163 goto out;
858dd5d4
SL
164 }
165
166 kvm->arch.guest_pmap[gfn] = pfn;
6d17c0d1
SL
167out:
168 srcu_read_unlock(&kvm->srcu, srcu_idx);
169 return err;
858dd5d4
SL
170}
171
172/* Translate guest KSEG0 addresses to Host PA */
173unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
d116e812 174 unsigned long gva)
858dd5d4
SL
175{
176 gfn_t gfn;
8cffd197 177 unsigned long offset = gva & ~PAGE_MASK;
858dd5d4
SL
178 struct kvm *kvm = vcpu->kvm;
179
180 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
181 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
182 __builtin_return_address(0), gva);
183 return KVM_INVALID_PAGE;
184 }
185
186 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
187
188 if (gfn >= kvm->arch.guest_pmap_npages) {
189 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
190 gva);
191 return KVM_INVALID_PAGE;
192 }
6d17c0d1
SL
193
194 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
195 return KVM_INVALID_ADDR;
196
858dd5d4
SL
197 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
198}
cb1b447f 199EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
858dd5d4
SL
200
201/* XXXKYMA: Must be called with interrupts disabled */
202/* set flush_dcache_mask == 0 if no dcache flush required */
d116e812
DCZ
203int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
204 unsigned long entrylo0, unsigned long entrylo1,
205 int flush_dcache_mask)
858dd5d4
SL
206{
207 unsigned long flags;
208 unsigned long old_entryhi;
b045c406 209 int idx;
858dd5d4
SL
210
211 local_irq_save(flags);
212
858dd5d4
SL
213 old_entryhi = read_c0_entryhi();
214 write_c0_entryhi(entryhi);
215 mtc0_tlbw_hazard();
216
217 tlb_probe();
218 tlb_probe_hazard();
219 idx = read_c0_index();
220
221 if (idx > current_cpu_data.tlbsize) {
222 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
223 kvm_mips_dump_host_tlbs();
cfec0e75 224 local_irq_restore(flags);
858dd5d4
SL
225 return -1;
226 }
227
858dd5d4
SL
228 write_c0_entrylo0(entrylo0);
229 write_c0_entrylo1(entrylo1);
230 mtc0_tlbw_hazard();
231
b5dfc6c1
JH
232 if (idx < 0)
233 tlb_write_random();
234 else
235 tlb_write_indexed();
858dd5d4
SL
236 tlbw_use_hazard();
237
3d654833
JH
238 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
239 vcpu->arch.pc, idx, read_c0_entryhi(),
240 read_c0_entrylo0(), read_c0_entrylo1());
858dd5d4
SL
241
242 /* Flush D-cache */
243 if (flush_dcache_mask) {
244 if (entrylo0 & MIPS3_PG_V) {
245 ++vcpu->stat.flush_dcache_exits;
d116e812
DCZ
246 flush_data_cache_page((entryhi & VPN2_MASK) &
247 ~flush_dcache_mask);
858dd5d4
SL
248 }
249 if (entrylo1 & MIPS3_PG_V) {
250 ++vcpu->stat.flush_dcache_exits;
d116e812
DCZ
251 flush_data_cache_page(((entryhi & VPN2_MASK) &
252 ~flush_dcache_mask) |
253 (0x1 << PAGE_SHIFT));
858dd5d4
SL
254 }
255 }
256
257 /* Restore old ASID */
258 write_c0_entryhi(old_entryhi);
259 mtc0_tlbw_hazard();
260 tlbw_use_hazard();
261 local_irq_restore(flags);
262 return 0;
263}
264
858dd5d4
SL
265/* XXXKYMA: Must be called with interrupts disabled */
266int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
d116e812 267 struct kvm_vcpu *vcpu)
858dd5d4
SL
268{
269 gfn_t gfn;
ba049e93 270 kvm_pfn_t pfn0, pfn1;
858dd5d4
SL
271 unsigned long vaddr = 0;
272 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
273 int even;
274 struct kvm *kvm = vcpu->kvm;
275 const int flush_dcache_mask = 0;
f049729c 276 int ret;
858dd5d4 277
858dd5d4
SL
278 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
279 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
280 kvm_mips_dump_host_tlbs();
281 return -1;
282 }
283
284 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
285 if (gfn >= kvm->arch.guest_pmap_npages) {
286 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
287 gfn, badvaddr);
288 kvm_mips_dump_host_tlbs();
289 return -1;
290 }
291 even = !(gfn & 0x1);
292 vaddr = badvaddr & (PAGE_MASK << 1);
293
6d17c0d1
SL
294 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
295 return -1;
296
297 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
298 return -1;
858dd5d4
SL
299
300 if (even) {
301 pfn0 = kvm->arch.guest_pmap[gfn];
302 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
303 } else {
304 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
305 pfn1 = kvm->arch.guest_pmap[gfn];
306 }
307
d116e812
DCZ
308 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
309 (1 << 2) | (0x1 << 1);
310 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
311 (1 << 2) | (0x1 << 1);
858dd5d4 312
f049729c
JH
313 preempt_disable();
314 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
315 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
316 flush_dcache_mask);
317 preempt_enable();
318
319 return ret;
858dd5d4 320}
cb1b447f 321EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
858dd5d4
SL
322
323int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
324 struct kvm_vcpu *vcpu)
325{
ba049e93 326 kvm_pfn_t pfn0, pfn1;
858dd5d4
SL
327 unsigned long flags, old_entryhi = 0, vaddr = 0;
328 unsigned long entrylo0 = 0, entrylo1 = 0;
329
858dd5d4
SL
330 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
331 pfn1 = 0;
d116e812
DCZ
332 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
333 (1 << 2) | (0x1 << 1);
858dd5d4
SL
334 entrylo1 = 0;
335
336 local_irq_save(flags);
337
338 old_entryhi = read_c0_entryhi();
339 vaddr = badvaddr & (PAGE_MASK << 1);
340 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
341 mtc0_tlbw_hazard();
342 write_c0_entrylo0(entrylo0);
343 mtc0_tlbw_hazard();
344 write_c0_entrylo1(entrylo1);
345 mtc0_tlbw_hazard();
346 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
347 mtc0_tlbw_hazard();
348 tlb_write_indexed();
349 mtc0_tlbw_hazard();
350 tlbw_use_hazard();
351
d116e812
DCZ
352 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
353 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
354 read_c0_entrylo0(), read_c0_entrylo1());
858dd5d4
SL
355
356 /* Restore old ASID */
357 write_c0_entryhi(old_entryhi);
358 mtc0_tlbw_hazard();
359 tlbw_use_hazard();
360 local_irq_restore(flags);
361
362 return 0;
363}
cb1b447f 364EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
858dd5d4 365
d116e812
DCZ
366int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
367 struct kvm_mips_tlb *tlb,
368 unsigned long *hpa0,
369 unsigned long *hpa1)
858dd5d4
SL
370{
371 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
372 struct kvm *kvm = vcpu->kvm;
ba049e93 373 kvm_pfn_t pfn0, pfn1;
f049729c 374 int ret;
858dd5d4 375
858dd5d4
SL
376 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
377 pfn0 = 0;
378 pfn1 = 0;
379 } else {
d116e812
DCZ
380 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
381 >> PAGE_SHIFT) < 0)
6d17c0d1
SL
382 return -1;
383
d116e812
DCZ
384 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
385 >> PAGE_SHIFT) < 0)
6d17c0d1 386 return -1;
858dd5d4 387
d116e812
DCZ
388 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
389 >> PAGE_SHIFT];
390 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
391 >> PAGE_SHIFT];
858dd5d4
SL
392 }
393
394 if (hpa0)
395 *hpa0 = pfn0 << PAGE_SHIFT;
396
397 if (hpa1)
398 *hpa1 = pfn1 << PAGE_SHIFT;
399
400 /* Get attributes from the Guest TLB */
858dd5d4 401 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
d116e812 402 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
858dd5d4 403 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
d116e812 404 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
858dd5d4 405
858dd5d4
SL
406 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
407 tlb->tlb_lo0, tlb->tlb_lo1);
858dd5d4 408
f049729c
JH
409 preempt_disable();
410 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
411 kvm_mips_get_kernel_asid(vcpu) :
412 kvm_mips_get_user_asid(vcpu));
413 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
414 tlb->tlb_mask);
415 preempt_enable();
416
417 return ret;
858dd5d4 418}
cb1b447f 419EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
858dd5d4
SL
420
421int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
422{
423 int i;
424 int index = -1;
425 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
426
858dd5d4 427 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
d116e812
DCZ
428 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
429 TLB_HI_ASID_HIT(tlb[i], entryhi)) {
858dd5d4
SL
430 index = i;
431 break;
432 }
433 }
434
858dd5d4
SL
435 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
436 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
858dd5d4
SL
437
438 return index;
439}
cb1b447f 440EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
858dd5d4
SL
441
442int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
443{
444 unsigned long old_entryhi, flags;
b045c406 445 int idx;
858dd5d4 446
858dd5d4
SL
447 local_irq_save(flags);
448
449 old_entryhi = read_c0_entryhi();
450
451 if (KVM_GUEST_KERNEL_MODE(vcpu))
d116e812
DCZ
452 write_c0_entryhi((vaddr & VPN2_MASK) |
453 kvm_mips_get_kernel_asid(vcpu));
858dd5d4 454 else {
d116e812
DCZ
455 write_c0_entryhi((vaddr & VPN2_MASK) |
456 kvm_mips_get_user_asid(vcpu));
858dd5d4
SL
457 }
458
459 mtc0_tlbw_hazard();
460
461 tlb_probe();
462 tlb_probe_hazard();
463 idx = read_c0_index();
464
465 /* Restore old ASID */
466 write_c0_entryhi(old_entryhi);
467 mtc0_tlbw_hazard();
468 tlbw_use_hazard();
469
470 local_irq_restore(flags);
471
858dd5d4 472 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
858dd5d4
SL
473
474 return idx;
475}
cb1b447f 476EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
858dd5d4
SL
477
478int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
479{
480 int idx;
481 unsigned long flags, old_entryhi;
482
483 local_irq_save(flags);
484
858dd5d4
SL
485 old_entryhi = read_c0_entryhi();
486
487 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
488 mtc0_tlbw_hazard();
489
490 tlb_probe();
491 tlb_probe_hazard();
492 idx = read_c0_index();
493
494 if (idx >= current_cpu_data.tlbsize)
495 BUG();
496
497 if (idx > 0) {
498 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
499 mtc0_tlbw_hazard();
500
501 write_c0_entrylo0(0);
502 mtc0_tlbw_hazard();
503
504 write_c0_entrylo1(0);
505 mtc0_tlbw_hazard();
506
507 tlb_write_indexed();
508 mtc0_tlbw_hazard();
509 }
510
511 write_c0_entryhi(old_entryhi);
512 mtc0_tlbw_hazard();
513 tlbw_use_hazard();
514
515 local_irq_restore(flags);
516
3d654833 517 if (idx > 0)
858dd5d4 518 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
3d654833 519 (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
858dd5d4
SL
520
521 return 0;
522}
cb1b447f 523EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
858dd5d4
SL
524
525void kvm_mips_flush_host_tlb(int skip_kseg0)
526{
527 unsigned long flags;
528 unsigned long old_entryhi, entryhi;
529 unsigned long old_pagemask;
530 int entry = 0;
531 int maxentry = current_cpu_data.tlbsize;
532
858dd5d4
SL
533 local_irq_save(flags);
534
535 old_entryhi = read_c0_entryhi();
536 old_pagemask = read_c0_pagemask();
537
538 /* Blast 'em all away. */
539 for (entry = 0; entry < maxentry; entry++) {
858dd5d4
SL
540 write_c0_index(entry);
541 mtc0_tlbw_hazard();
542
543 if (skip_kseg0) {
544 tlb_read();
545 tlbw_use_hazard();
546
547 entryhi = read_c0_entryhi();
548
549 /* Don't blow away guest kernel entries */
d116e812 550 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
858dd5d4 551 continue;
858dd5d4
SL
552 }
553
554 /* Make sure all entries differ. */
555 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
556 mtc0_tlbw_hazard();
557 write_c0_entrylo0(0);
558 mtc0_tlbw_hazard();
559 write_c0_entrylo1(0);
560 mtc0_tlbw_hazard();
561
562 tlb_write_indexed();
563 mtc0_tlbw_hazard();
564 }
565
566 tlbw_use_hazard();
567
568 write_c0_entryhi(old_entryhi);
569 write_c0_pagemask(old_pagemask);
570 mtc0_tlbw_hazard();
571 tlbw_use_hazard();
572
573 local_irq_restore(flags);
574}
cb1b447f 575EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
858dd5d4 576
d116e812
DCZ
577void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
578 struct kvm_vcpu *vcpu)
858dd5d4
SL
579{
580 unsigned long asid = asid_cache(cpu);
581
4edf00a4
PB
582 asid += cpu_asid_inc();
583 if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
d116e812 584 if (cpu_has_vtag_icache)
858dd5d4 585 flush_icache_all();
858dd5d4
SL
586
587 kvm_local_flush_tlb_all(); /* start new asid cycle */
588
589 if (!asid) /* fix version if needed */
4edf00a4 590 asid = asid_first_version(cpu);
858dd5d4
SL
591 }
592
593 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
594}
595
858dd5d4
SL
596void kvm_local_flush_tlb_all(void)
597{
598 unsigned long flags;
599 unsigned long old_ctx;
600 int entry = 0;
601
602 local_irq_save(flags);
603 /* Save old context and create impossible VPN2 value */
604 old_ctx = read_c0_entryhi();
605 write_c0_entrylo0(0);
606 write_c0_entrylo1(0);
607
608 /* Blast 'em all away. */
609 while (entry < current_cpu_data.tlbsize) {
610 /* Make sure all entries differ. */
611 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
612 write_c0_index(entry);
613 mtc0_tlbw_hazard();
614 tlb_write_indexed();
615 entry++;
616 }
617 tlbw_use_hazard();
618 write_c0_entryhi(old_ctx);
619 mtc0_tlbw_hazard();
620
621 local_irq_restore(flags);
622}
cb1b447f 623EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
858dd5d4 624
3a0ba774
JH
625/**
626 * kvm_mips_migrate_count() - Migrate timer.
627 * @vcpu: Virtual CPU.
628 *
629 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
630 * if it was running prior to being cancelled.
631 *
632 * Must be called when the VCPU is migrated to a different CPU to ensure that
633 * timer expiry during guest execution interrupts the guest and causes the
634 * interrupt to be delivered in a timely manner.
635 */
636static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
637{
638 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
639 hrtimer_restart(&vcpu->arch.comparecount_timer);
640}
641
858dd5d4
SL
642/* Restore ASID once we are scheduled back after preemption */
643void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
644{
4edf00a4 645 unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
858dd5d4
SL
646 unsigned long flags;
647 int newasid = 0;
648
858dd5d4 649 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
858dd5d4 650
92a76f6d 651 /* Allocate new kernel and user ASIDs if needed */
858dd5d4
SL
652
653 local_irq_save(flags);
654
caa1faa7 655 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
4edf00a4 656 asid_version_mask(cpu)) {
858dd5d4
SL
657 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
658 vcpu->arch.guest_kernel_asid[cpu] =
659 vcpu->arch.guest_kernel_mm.context.asid[cpu];
660 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
661 vcpu->arch.guest_user_asid[cpu] =
662 vcpu->arch.guest_user_mm.context.asid[cpu];
663 newasid++;
664
6e95bfd2
JH
665 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
666 cpu_context(cpu, current->mm));
667 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
668 cpu, vcpu->arch.guest_kernel_asid[cpu]);
669 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
670 vcpu->arch.guest_user_asid[cpu]);
858dd5d4
SL
671 }
672
673 if (vcpu->arch.last_sched_cpu != cpu) {
6e95bfd2
JH
674 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
675 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
3a0ba774
JH
676 /*
677 * Migrate the timer interrupt to the current CPU so that it
678 * always interrupts the guest and synchronously triggers a
679 * guest timer interrupt.
680 */
681 kvm_mips_migrate_count(vcpu);
858dd5d4
SL
682 }
683
858dd5d4 684 if (!newasid) {
d116e812
DCZ
685 /*
686 * If we preempted while the guest was executing, then reload
687 * the pre-empted ASID
688 */
858dd5d4 689 if (current->flags & PF_VCPU) {
48c4ac97 690 write_c0_entryhi(vcpu->arch.
4edf00a4 691 preempt_entryhi & asid_mask);
858dd5d4
SL
692 ehb();
693 }
694 } else {
695 /* New ASIDs were allocated for the VM */
696
d116e812
DCZ
697 /*
698 * Were we in guest context? If so then the pre-empted ASID is
699 * no longer valid, we need to set it to what it should be based
700 * on the mode of the Guest (Kernel/User)
858dd5d4
SL
701 */
702 if (current->flags & PF_VCPU) {
703 if (KVM_GUEST_KERNEL_MODE(vcpu))
48c4ac97
DD
704 write_c0_entryhi(vcpu->arch.
705 guest_kernel_asid[cpu] &
4edf00a4 706 asid_mask);
858dd5d4 707 else
48c4ac97
DD
708 write_c0_entryhi(vcpu->arch.
709 guest_user_asid[cpu] &
4edf00a4 710 asid_mask);
858dd5d4
SL
711 ehb();
712 }
713 }
714
b86ecb37
JH
715 /* restore guest state to registers */
716 kvm_mips_callbacks->vcpu_set_regs(vcpu);
717
858dd5d4
SL
718 local_irq_restore(flags);
719
720}
cb1b447f 721EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
858dd5d4
SL
722
723/* ASID can change if another task is scheduled during preemption */
724void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
725{
726 unsigned long flags;
8cffd197 727 int cpu;
858dd5d4
SL
728
729 local_irq_save(flags);
730
731 cpu = smp_processor_id();
732
858dd5d4
SL
733 vcpu->arch.preempt_entryhi = read_c0_entryhi();
734 vcpu->arch.last_sched_cpu = cpu;
735
b86ecb37
JH
736 /* save guest state in registers */
737 kvm_mips_callbacks->vcpu_get_regs(vcpu);
738
858dd5d4 739 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
4edf00a4 740 asid_version_mask(cpu))) {
858dd5d4
SL
741 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
742 cpu_context(cpu, current->mm));
743 drop_mmu_context(current->mm, cpu);
744 }
745 write_c0_entryhi(cpu_asid(cpu, current->mm));
746 ehb();
747
748 local_irq_restore(flags);
749}
cb1b447f 750EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
858dd5d4 751
bdb7ed86 752u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
858dd5d4
SL
753{
754 struct mips_coproc *cop0 = vcpu->arch.cop0;
d116e812 755 unsigned long paddr, flags, vpn2, asid;
8cffd197 756 u32 inst;
858dd5d4
SL
757 int index;
758
759 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
760 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
761 local_irq_save(flags);
762 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
763 if (index >= 0) {
764 inst = *(opc);
765 } else {
d116e812 766 vpn2 = (unsigned long) opc & VPN2_MASK;
ca64c2be
PB
767 asid = kvm_read_c0_guest_entryhi(cop0) &
768 KVM_ENTRYHI_ASID;
d116e812 769 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
858dd5d4 770 if (index < 0) {
d116e812
DCZ
771 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
772 __func__, opc, vcpu, read_c0_entryhi());
858dd5d4
SL
773 kvm_mips_dump_host_tlbs();
774 local_irq_restore(flags);
775 return KVM_INVALID_INST;
776 }
777 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
778 &vcpu->arch.
779 guest_tlb[index],
780 NULL, NULL);
781 inst = *(opc);
782 }
783 local_irq_restore(flags);
784 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
785 paddr =
786 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
d116e812 787 (unsigned long) opc);
8cffd197 788 inst = *(u32 *) CKSEG0ADDR(paddr);
858dd5d4
SL
789 } else {
790 kvm_err("%s: illegal address: %p\n", __func__, opc);
791 return KVM_INVALID_INST;
792 }
793
794 return inst;
795}
cb1b447f 796EXPORT_SYMBOL_GPL(kvm_get_inst);
This page took 0.26882 seconds and 5 git commands to generate.