Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid...
[deliverable/linux.git] / arch / mips / kvm / kvm_tlb.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
8 *
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11 */
12
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
20
21
22 #include <asm/cpu.h>
23 #include <asm/bootinfo.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlb.h>
28
29 #undef CONFIG_MIPS_MT
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
32
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
35
36 #define PRIx64 "llx"
37
38 atomic_t kvm_mips_instance;
39 EXPORT_SYMBOL(kvm_mips_instance);
40
41 /* These function pointers are initialized once the KVM module is loaded */
42 pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
43 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
44
45 void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
46 EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
47
48 bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
49 EXPORT_SYMBOL(kvm_mips_is_error_pfn);
50
51 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
52 {
53 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
54 }
55
56
57 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
58 {
59 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
60 }
61
62 inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
63 {
64 return vcpu->kvm->arch.commpage_tlb;
65 }
66
67
68 /*
69 * Structure defining an tlb entry data set.
70 */
71
72 void kvm_mips_dump_host_tlbs(void)
73 {
74 unsigned long old_entryhi;
75 unsigned long old_pagemask;
76 struct kvm_mips_tlb tlb;
77 unsigned long flags;
78 int i;
79
80 local_irq_save(flags);
81
82 old_entryhi = read_c0_entryhi();
83 old_pagemask = read_c0_pagemask();
84
85 printk("HOST TLBs:\n");
86 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
87
88 for (i = 0; i < current_cpu_data.tlbsize; i++) {
89 write_c0_index(i);
90 mtc0_tlbw_hazard();
91
92 tlb_read();
93 tlbw_use_hazard();
94
95 tlb.tlb_hi = read_c0_entryhi();
96 tlb.tlb_lo0 = read_c0_entrylo0();
97 tlb.tlb_lo1 = read_c0_entrylo1();
98 tlb.tlb_mask = read_c0_pagemask();
99
100 printk("TLB%c%3d Hi 0x%08lx ",
101 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
102 i, tlb.tlb_hi);
103 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
104 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
105 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
106 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
107 (tlb.tlb_lo0 >> 3) & 7);
108 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
109 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
110 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
111 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
112 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
113 }
114 write_c0_entryhi(old_entryhi);
115 write_c0_pagemask(old_pagemask);
116 mtc0_tlbw_hazard();
117 local_irq_restore(flags);
118 }
119
120 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
121 {
122 struct mips_coproc *cop0 = vcpu->arch.cop0;
123 struct kvm_mips_tlb tlb;
124 int i;
125
126 printk("Guest TLBs:\n");
127 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
128
129 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
130 tlb = vcpu->arch.guest_tlb[i];
131 printk("TLB%c%3d Hi 0x%08lx ",
132 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
133 i, tlb.tlb_hi);
134 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
135 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
136 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
137 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
138 (tlb.tlb_lo0 >> 3) & 7);
139 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
140 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
141 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
142 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
143 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
144 }
145 }
146
147 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
148 {
149 int srcu_idx, err = 0;
150 pfn_t pfn;
151
152 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
153 return 0;
154
155 srcu_idx = srcu_read_lock(&kvm->srcu);
156 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
157
158 if (kvm_mips_is_error_pfn(pfn)) {
159 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
160 err = -EFAULT;
161 goto out;
162 }
163
164 kvm->arch.guest_pmap[gfn] = pfn;
165 out:
166 srcu_read_unlock(&kvm->srcu, srcu_idx);
167 return err;
168 }
169
170 /* Translate guest KSEG0 addresses to Host PA */
171 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
172 unsigned long gva)
173 {
174 gfn_t gfn;
175 uint32_t offset = gva & ~PAGE_MASK;
176 struct kvm *kvm = vcpu->kvm;
177
178 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
179 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
180 __builtin_return_address(0), gva);
181 return KVM_INVALID_PAGE;
182 }
183
184 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
185
186 if (gfn >= kvm->arch.guest_pmap_npages) {
187 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
188 gva);
189 return KVM_INVALID_PAGE;
190 }
191
192 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
193 return KVM_INVALID_ADDR;
194
195 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
196 }
197
198 /* XXXKYMA: Must be called with interrupts disabled */
199 /* set flush_dcache_mask == 0 if no dcache flush required */
200 int
201 kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
202 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
203 {
204 unsigned long flags;
205 unsigned long old_entryhi;
206 volatile int idx;
207
208 local_irq_save(flags);
209
210
211 old_entryhi = read_c0_entryhi();
212 write_c0_entryhi(entryhi);
213 mtc0_tlbw_hazard();
214
215 tlb_probe();
216 tlb_probe_hazard();
217 idx = read_c0_index();
218
219 if (idx > current_cpu_data.tlbsize) {
220 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
221 kvm_mips_dump_host_tlbs();
222 return -1;
223 }
224
225 write_c0_entrylo0(entrylo0);
226 write_c0_entrylo1(entrylo1);
227 mtc0_tlbw_hazard();
228
229 if (idx < 0)
230 tlb_write_random();
231 else
232 tlb_write_indexed();
233 tlbw_use_hazard();
234
235 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
236 vcpu->arch.pc, idx, read_c0_entryhi(),
237 read_c0_entrylo0(), read_c0_entrylo1());
238
239 /* Flush D-cache */
240 if (flush_dcache_mask) {
241 if (entrylo0 & MIPS3_PG_V) {
242 ++vcpu->stat.flush_dcache_exits;
243 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
244 }
245 if (entrylo1 & MIPS3_PG_V) {
246 ++vcpu->stat.flush_dcache_exits;
247 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
248 (0x1 << PAGE_SHIFT));
249 }
250 }
251
252 /* Restore old ASID */
253 write_c0_entryhi(old_entryhi);
254 mtc0_tlbw_hazard();
255 tlbw_use_hazard();
256 local_irq_restore(flags);
257 return 0;
258 }
259
260
261 /* XXXKYMA: Must be called with interrupts disabled */
262 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
263 struct kvm_vcpu *vcpu)
264 {
265 gfn_t gfn;
266 pfn_t pfn0, pfn1;
267 unsigned long vaddr = 0;
268 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
269 int even;
270 struct kvm *kvm = vcpu->kvm;
271 const int flush_dcache_mask = 0;
272
273
274 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
275 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
276 kvm_mips_dump_host_tlbs();
277 return -1;
278 }
279
280 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
281 if (gfn >= kvm->arch.guest_pmap_npages) {
282 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
283 gfn, badvaddr);
284 kvm_mips_dump_host_tlbs();
285 return -1;
286 }
287 even = !(gfn & 0x1);
288 vaddr = badvaddr & (PAGE_MASK << 1);
289
290 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
291 return -1;
292
293 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
294 return -1;
295
296 if (even) {
297 pfn0 = kvm->arch.guest_pmap[gfn];
298 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
299 } else {
300 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
301 pfn1 = kvm->arch.guest_pmap[gfn];
302 }
303
304 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
305 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
306 (0x1 << 1);
307 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
308 (0x1 << 1);
309
310 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
311 flush_dcache_mask);
312 }
313
314 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
315 struct kvm_vcpu *vcpu)
316 {
317 pfn_t pfn0, pfn1;
318 unsigned long flags, old_entryhi = 0, vaddr = 0;
319 unsigned long entrylo0 = 0, entrylo1 = 0;
320
321
322 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
323 pfn1 = 0;
324 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
325 (0x1 << 1);
326 entrylo1 = 0;
327
328 local_irq_save(flags);
329
330 old_entryhi = read_c0_entryhi();
331 vaddr = badvaddr & (PAGE_MASK << 1);
332 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
333 mtc0_tlbw_hazard();
334 write_c0_entrylo0(entrylo0);
335 mtc0_tlbw_hazard();
336 write_c0_entrylo1(entrylo1);
337 mtc0_tlbw_hazard();
338 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
339 mtc0_tlbw_hazard();
340 tlb_write_indexed();
341 mtc0_tlbw_hazard();
342 tlbw_use_hazard();
343
344 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
345 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
346 read_c0_entrylo0(), read_c0_entrylo1());
347
348 /* Restore old ASID */
349 write_c0_entryhi(old_entryhi);
350 mtc0_tlbw_hazard();
351 tlbw_use_hazard();
352 local_irq_restore(flags);
353
354 return 0;
355 }
356
357 int
358 kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
359 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
360 {
361 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
362 struct kvm *kvm = vcpu->kvm;
363 pfn_t pfn0, pfn1;
364
365
366 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
367 pfn0 = 0;
368 pfn1 = 0;
369 } else {
370 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
371 return -1;
372
373 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
374 return -1;
375
376 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
377 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
378 }
379
380 if (hpa0)
381 *hpa0 = pfn0 << PAGE_SHIFT;
382
383 if (hpa1)
384 *hpa1 = pfn1 << PAGE_SHIFT;
385
386 /* Get attributes from the Guest TLB */
387 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
388 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
389 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
390 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
391 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
392 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
393
394 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
395 tlb->tlb_lo0, tlb->tlb_lo1);
396
397 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
398 tlb->tlb_mask);
399 }
400
401 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
402 {
403 int i;
404 int index = -1;
405 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
406
407
408 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
409 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
410 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
411 index = i;
412 break;
413 }
414 }
415
416 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
417 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
418
419 return index;
420 }
421
422 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
423 {
424 unsigned long old_entryhi, flags;
425 volatile int idx;
426
427
428 local_irq_save(flags);
429
430 old_entryhi = read_c0_entryhi();
431
432 if (KVM_GUEST_KERNEL_MODE(vcpu))
433 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
434 else {
435 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
436 }
437
438 mtc0_tlbw_hazard();
439
440 tlb_probe();
441 tlb_probe_hazard();
442 idx = read_c0_index();
443
444 /* Restore old ASID */
445 write_c0_entryhi(old_entryhi);
446 mtc0_tlbw_hazard();
447 tlbw_use_hazard();
448
449 local_irq_restore(flags);
450
451 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
452
453 return idx;
454 }
455
456 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
457 {
458 int idx;
459 unsigned long flags, old_entryhi;
460
461 local_irq_save(flags);
462
463
464 old_entryhi = read_c0_entryhi();
465
466 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
467 mtc0_tlbw_hazard();
468
469 tlb_probe();
470 tlb_probe_hazard();
471 idx = read_c0_index();
472
473 if (idx >= current_cpu_data.tlbsize)
474 BUG();
475
476 if (idx > 0) {
477 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
478 mtc0_tlbw_hazard();
479
480 write_c0_entrylo0(0);
481 mtc0_tlbw_hazard();
482
483 write_c0_entrylo1(0);
484 mtc0_tlbw_hazard();
485
486 tlb_write_indexed();
487 mtc0_tlbw_hazard();
488 }
489
490 write_c0_entryhi(old_entryhi);
491 mtc0_tlbw_hazard();
492 tlbw_use_hazard();
493
494 local_irq_restore(flags);
495
496 if (idx > 0)
497 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
498 (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
499
500 return 0;
501 }
502
503 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
504 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
505 {
506 unsigned long flags, old_entryhi;
507
508 if (index >= current_cpu_data.tlbsize)
509 BUG();
510
511 local_irq_save(flags);
512
513
514 old_entryhi = read_c0_entryhi();
515
516 write_c0_entryhi(UNIQUE_ENTRYHI(index));
517 mtc0_tlbw_hazard();
518
519 write_c0_index(index);
520 mtc0_tlbw_hazard();
521
522 write_c0_entrylo0(0);
523 mtc0_tlbw_hazard();
524
525 write_c0_entrylo1(0);
526 mtc0_tlbw_hazard();
527
528 tlb_write_indexed();
529 mtc0_tlbw_hazard();
530 tlbw_use_hazard();
531
532 write_c0_entryhi(old_entryhi);
533 mtc0_tlbw_hazard();
534 tlbw_use_hazard();
535
536 local_irq_restore(flags);
537
538 return 0;
539 }
540
541 void kvm_mips_flush_host_tlb(int skip_kseg0)
542 {
543 unsigned long flags;
544 unsigned long old_entryhi, entryhi;
545 unsigned long old_pagemask;
546 int entry = 0;
547 int maxentry = current_cpu_data.tlbsize;
548
549
550 local_irq_save(flags);
551
552 old_entryhi = read_c0_entryhi();
553 old_pagemask = read_c0_pagemask();
554
555 /* Blast 'em all away. */
556 for (entry = 0; entry < maxentry; entry++) {
557
558 write_c0_index(entry);
559 mtc0_tlbw_hazard();
560
561 if (skip_kseg0) {
562 tlb_read();
563 tlbw_use_hazard();
564
565 entryhi = read_c0_entryhi();
566
567 /* Don't blow away guest kernel entries */
568 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
569 continue;
570 }
571 }
572
573 /* Make sure all entries differ. */
574 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
575 mtc0_tlbw_hazard();
576 write_c0_entrylo0(0);
577 mtc0_tlbw_hazard();
578 write_c0_entrylo1(0);
579 mtc0_tlbw_hazard();
580
581 tlb_write_indexed();
582 mtc0_tlbw_hazard();
583 }
584
585 tlbw_use_hazard();
586
587 write_c0_entryhi(old_entryhi);
588 write_c0_pagemask(old_pagemask);
589 mtc0_tlbw_hazard();
590 tlbw_use_hazard();
591
592 local_irq_restore(flags);
593 }
594
595 void
596 kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
597 struct kvm_vcpu *vcpu)
598 {
599 unsigned long asid = asid_cache(cpu);
600
601 if (!((asid += ASID_INC) & ASID_MASK)) {
602 if (cpu_has_vtag_icache) {
603 flush_icache_all();
604 }
605
606 kvm_local_flush_tlb_all(); /* start new asid cycle */
607
608 if (!asid) /* fix version if needed */
609 asid = ASID_FIRST_VERSION;
610 }
611
612 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
613 }
614
615 void kvm_local_flush_tlb_all(void)
616 {
617 unsigned long flags;
618 unsigned long old_ctx;
619 int entry = 0;
620
621 local_irq_save(flags);
622 /* Save old context and create impossible VPN2 value */
623 old_ctx = read_c0_entryhi();
624 write_c0_entrylo0(0);
625 write_c0_entrylo1(0);
626
627 /* Blast 'em all away. */
628 while (entry < current_cpu_data.tlbsize) {
629 /* Make sure all entries differ. */
630 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
631 write_c0_index(entry);
632 mtc0_tlbw_hazard();
633 tlb_write_indexed();
634 entry++;
635 }
636 tlbw_use_hazard();
637 write_c0_entryhi(old_ctx);
638 mtc0_tlbw_hazard();
639
640 local_irq_restore(flags);
641 }
642
643 /**
644 * kvm_mips_migrate_count() - Migrate timer.
645 * @vcpu: Virtual CPU.
646 *
647 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
648 * if it was running prior to being cancelled.
649 *
650 * Must be called when the VCPU is migrated to a different CPU to ensure that
651 * timer expiry during guest execution interrupts the guest and causes the
652 * interrupt to be delivered in a timely manner.
653 */
654 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
655 {
656 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
657 hrtimer_restart(&vcpu->arch.comparecount_timer);
658 }
659
660 /* Restore ASID once we are scheduled back after preemption */
661 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
662 {
663 unsigned long flags;
664 int newasid = 0;
665
666 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
667
668 /* Alocate new kernel and user ASIDs if needed */
669
670 local_irq_save(flags);
671
672 if (((vcpu->arch.
673 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
674 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
675 vcpu->arch.guest_kernel_asid[cpu] =
676 vcpu->arch.guest_kernel_mm.context.asid[cpu];
677 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
678 vcpu->arch.guest_user_asid[cpu] =
679 vcpu->arch.guest_user_mm.context.asid[cpu];
680 newasid++;
681
682 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
683 cpu_context(cpu, current->mm));
684 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
685 cpu, vcpu->arch.guest_kernel_asid[cpu]);
686 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
687 vcpu->arch.guest_user_asid[cpu]);
688 }
689
690 if (vcpu->arch.last_sched_cpu != cpu) {
691 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
692 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
693 /*
694 * Migrate the timer interrupt to the current CPU so that it
695 * always interrupts the guest and synchronously triggers a
696 * guest timer interrupt.
697 */
698 kvm_mips_migrate_count(vcpu);
699 }
700
701 if (!newasid) {
702 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
703 if (current->flags & PF_VCPU) {
704 write_c0_entryhi(vcpu->arch.
705 preempt_entryhi & ASID_MASK);
706 ehb();
707 }
708 } else {
709 /* New ASIDs were allocated for the VM */
710
711 /* Were we in guest context? If so then the pre-empted ASID is no longer
712 * valid, we need to set it to what it should be based on the mode of
713 * the Guest (Kernel/User)
714 */
715 if (current->flags & PF_VCPU) {
716 if (KVM_GUEST_KERNEL_MODE(vcpu))
717 write_c0_entryhi(vcpu->arch.
718 guest_kernel_asid[cpu] &
719 ASID_MASK);
720 else
721 write_c0_entryhi(vcpu->arch.
722 guest_user_asid[cpu] &
723 ASID_MASK);
724 ehb();
725 }
726 }
727
728 local_irq_restore(flags);
729
730 }
731
732 /* ASID can change if another task is scheduled during preemption */
733 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
734 {
735 unsigned long flags;
736 uint32_t cpu;
737
738 local_irq_save(flags);
739
740 cpu = smp_processor_id();
741
742
743 vcpu->arch.preempt_entryhi = read_c0_entryhi();
744 vcpu->arch.last_sched_cpu = cpu;
745
746 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
747 ASID_VERSION_MASK)) {
748 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
749 cpu_context(cpu, current->mm));
750 drop_mmu_context(current->mm, cpu);
751 }
752 write_c0_entryhi(cpu_asid(cpu, current->mm));
753 ehb();
754
755 local_irq_restore(flags);
756 }
757
758 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
759 {
760 struct mips_coproc *cop0 = vcpu->arch.cop0;
761 unsigned long paddr, flags;
762 uint32_t inst;
763 int index;
764
765 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
766 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
767 local_irq_save(flags);
768 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
769 if (index >= 0) {
770 inst = *(opc);
771 } else {
772 index =
773 kvm_mips_guest_tlb_lookup(vcpu,
774 ((unsigned long) opc & VPN2_MASK)
775 |
776 (kvm_read_c0_guest_entryhi
777 (cop0) & ASID_MASK));
778 if (index < 0) {
779 kvm_err
780 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
781 __func__, opc, vcpu, read_c0_entryhi());
782 kvm_mips_dump_host_tlbs();
783 local_irq_restore(flags);
784 return KVM_INVALID_INST;
785 }
786 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
787 &vcpu->arch.
788 guest_tlb[index],
789 NULL, NULL);
790 inst = *(opc);
791 }
792 local_irq_restore(flags);
793 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
794 paddr =
795 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
796 (unsigned long) opc);
797 inst = *(uint32_t *) CKSEG0ADDR(paddr);
798 } else {
799 kvm_err("%s: illegal address: %p\n", __func__, opc);
800 return KVM_INVALID_INST;
801 }
802
803 return inst;
804 }
805
806 EXPORT_SYMBOL(kvm_local_flush_tlb_all);
807 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
808 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
809 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
810 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
811 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
812 EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
813 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
814 EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
815 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
816 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
817 EXPORT_SYMBOL(kvm_get_inst);
818 EXPORT_SYMBOL(kvm_arch_vcpu_load);
819 EXPORT_SYMBOL(kvm_arch_vcpu_put);
This page took 0.046872 seconds and 6 git commands to generate.