2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
32 #define KVM_GUEST_PC_TLB 0
33 #define KVM_GUEST_SP_TLB 1
37 atomic_t kvm_mips_instance
;
38 EXPORT_SYMBOL_GPL(kvm_mips_instance
);
40 /* These function pointers are initialized once the KVM module is loaded */
41 kvm_pfn_t (*kvm_mips_gfn_to_pfn
)(struct kvm
*kvm
, gfn_t gfn
);
42 EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn
);
44 void (*kvm_mips_release_pfn_clean
)(kvm_pfn_t pfn
);
45 EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean
);
47 bool (*kvm_mips_is_error_pfn
)(kvm_pfn_t pfn
);
48 EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn
);
50 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu
*vcpu
)
52 int cpu
= smp_processor_id();
54 return vcpu
->arch
.guest_kernel_asid
[cpu
] &
55 cpu_asid_mask(&cpu_data
[cpu
]);
58 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu
*vcpu
)
60 int cpu
= smp_processor_id();
62 return vcpu
->arch
.guest_user_asid
[cpu
] &
63 cpu_asid_mask(&cpu_data
[cpu
]);
66 inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu
*vcpu
)
68 return vcpu
->kvm
->arch
.commpage_tlb
;
71 /* Structure defining an tlb entry data set. */
73 void kvm_mips_dump_host_tlbs(void)
75 unsigned long old_entryhi
;
76 unsigned long old_pagemask
;
77 struct kvm_mips_tlb tlb
;
81 local_irq_save(flags
);
83 old_entryhi
= read_c0_entryhi();
84 old_pagemask
= read_c0_pagemask();
86 kvm_info("HOST TLBs:\n");
87 kvm_info("ASID: %#lx\n", read_c0_entryhi() &
88 cpu_asid_mask(¤t_cpu_data
));
90 for (i
= 0; i
< current_cpu_data
.tlbsize
; i
++) {
97 tlb
.tlb_hi
= read_c0_entryhi();
98 tlb
.tlb_lo0
= read_c0_entrylo0();
99 tlb
.tlb_lo1
= read_c0_entrylo1();
100 tlb
.tlb_mask
= read_c0_pagemask();
102 kvm_info("TLB%c%3d Hi 0x%08lx ",
103 (tlb
.tlb_lo0
| tlb
.tlb_lo1
) & MIPS3_PG_V
? ' ' : '*',
105 kvm_info("Lo0=0x%09" PRIx64
" %c%c attr %lx ",
106 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo0
),
107 (tlb
.tlb_lo0
& MIPS3_PG_D
) ? 'D' : ' ',
108 (tlb
.tlb_lo0
& MIPS3_PG_G
) ? 'G' : ' ',
109 (tlb
.tlb_lo0
>> 3) & 7);
110 kvm_info("Lo1=0x%09" PRIx64
" %c%c attr %lx sz=%lx\n",
111 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo1
),
112 (tlb
.tlb_lo1
& MIPS3_PG_D
) ? 'D' : ' ',
113 (tlb
.tlb_lo1
& MIPS3_PG_G
) ? 'G' : ' ',
114 (tlb
.tlb_lo1
>> 3) & 7, tlb
.tlb_mask
);
116 write_c0_entryhi(old_entryhi
);
117 write_c0_pagemask(old_pagemask
);
119 local_irq_restore(flags
);
121 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs
);
123 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu
*vcpu
)
125 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
126 struct kvm_mips_tlb tlb
;
129 kvm_info("Guest TLBs:\n");
130 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0
));
132 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
133 tlb
= vcpu
->arch
.guest_tlb
[i
];
134 kvm_info("TLB%c%3d Hi 0x%08lx ",
135 (tlb
.tlb_lo0
| tlb
.tlb_lo1
) & MIPS3_PG_V
? ' ' : '*',
137 kvm_info("Lo0=0x%09" PRIx64
" %c%c attr %lx ",
138 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo0
),
139 (tlb
.tlb_lo0
& MIPS3_PG_D
) ? 'D' : ' ',
140 (tlb
.tlb_lo0
& MIPS3_PG_G
) ? 'G' : ' ',
141 (tlb
.tlb_lo0
>> 3) & 7);
142 kvm_info("Lo1=0x%09" PRIx64
" %c%c attr %lx sz=%lx\n",
143 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo1
),
144 (tlb
.tlb_lo1
& MIPS3_PG_D
) ? 'D' : ' ',
145 (tlb
.tlb_lo1
& MIPS3_PG_G
) ? 'G' : ' ',
146 (tlb
.tlb_lo1
>> 3) & 7, tlb
.tlb_mask
);
149 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs
);
151 static int kvm_mips_map_page(struct kvm
*kvm
, gfn_t gfn
)
153 int srcu_idx
, err
= 0;
156 if (kvm
->arch
.guest_pmap
[gfn
] != KVM_INVALID_PAGE
)
159 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
160 pfn
= kvm_mips_gfn_to_pfn(kvm
, gfn
);
162 if (kvm_mips_is_error_pfn(pfn
)) {
163 kvm_err("Couldn't get pfn for gfn %#" PRIx64
"!\n", gfn
);
168 kvm
->arch
.guest_pmap
[gfn
] = pfn
;
170 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
174 /* Translate guest KSEG0 addresses to Host PA */
175 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu
*vcpu
,
179 uint32_t offset
= gva
& ~PAGE_MASK
;
180 struct kvm
*kvm
= vcpu
->kvm
;
182 if (KVM_GUEST_KSEGX(gva
) != KVM_GUEST_KSEG0
) {
183 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__
,
184 __builtin_return_address(0), gva
);
185 return KVM_INVALID_PAGE
;
188 gfn
= (KVM_GUEST_CPHYSADDR(gva
) >> PAGE_SHIFT
);
190 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
191 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__
, gfn
,
193 return KVM_INVALID_PAGE
;
196 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
197 return KVM_INVALID_ADDR
;
199 return (kvm
->arch
.guest_pmap
[gfn
] << PAGE_SHIFT
) + offset
;
201 EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa
);
203 /* XXXKYMA: Must be called with interrupts disabled */
204 /* set flush_dcache_mask == 0 if no dcache flush required */
205 int kvm_mips_host_tlb_write(struct kvm_vcpu
*vcpu
, unsigned long entryhi
,
206 unsigned long entrylo0
, unsigned long entrylo1
,
207 int flush_dcache_mask
)
210 unsigned long old_entryhi
;
213 local_irq_save(flags
);
215 old_entryhi
= read_c0_entryhi();
216 write_c0_entryhi(entryhi
);
221 idx
= read_c0_index();
223 if (idx
> current_cpu_data
.tlbsize
) {
224 kvm_err("%s: Invalid Index: %d\n", __func__
, idx
);
225 kvm_mips_dump_host_tlbs();
226 local_irq_restore(flags
);
230 write_c0_entrylo0(entrylo0
);
231 write_c0_entrylo1(entrylo1
);
240 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
241 vcpu
->arch
.pc
, idx
, read_c0_entryhi(),
242 read_c0_entrylo0(), read_c0_entrylo1());
245 if (flush_dcache_mask
) {
246 if (entrylo0
& MIPS3_PG_V
) {
247 ++vcpu
->stat
.flush_dcache_exits
;
248 flush_data_cache_page((entryhi
& VPN2_MASK
) &
251 if (entrylo1
& MIPS3_PG_V
) {
252 ++vcpu
->stat
.flush_dcache_exits
;
253 flush_data_cache_page(((entryhi
& VPN2_MASK
) &
254 ~flush_dcache_mask
) |
255 (0x1 << PAGE_SHIFT
));
259 /* Restore old ASID */
260 write_c0_entryhi(old_entryhi
);
263 local_irq_restore(flags
);
267 /* XXXKYMA: Must be called with interrupts disabled */
268 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr
,
269 struct kvm_vcpu
*vcpu
)
272 kvm_pfn_t pfn0
, pfn1
;
273 unsigned long vaddr
= 0;
274 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
276 struct kvm
*kvm
= vcpu
->kvm
;
277 const int flush_dcache_mask
= 0;
280 if (KVM_GUEST_KSEGX(badvaddr
) != KVM_GUEST_KSEG0
) {
281 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__
, badvaddr
);
282 kvm_mips_dump_host_tlbs();
286 gfn
= (KVM_GUEST_CPHYSADDR(badvaddr
) >> PAGE_SHIFT
);
287 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
288 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__
,
290 kvm_mips_dump_host_tlbs();
294 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
296 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
299 if (kvm_mips_map_page(vcpu
->kvm
, gfn
^ 0x1) < 0)
303 pfn0
= kvm
->arch
.guest_pmap
[gfn
];
304 pfn1
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
306 pfn0
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
307 pfn1
= kvm
->arch
.guest_pmap
[gfn
];
310 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) |
311 (1 << 2) | (0x1 << 1);
312 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) |
313 (1 << 2) | (0x1 << 1);
316 entryhi
= (vaddr
| kvm_mips_get_kernel_asid(vcpu
));
317 ret
= kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
323 EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault
);
325 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr
,
326 struct kvm_vcpu
*vcpu
)
328 kvm_pfn_t pfn0
, pfn1
;
329 unsigned long flags
, old_entryhi
= 0, vaddr
= 0;
330 unsigned long entrylo0
= 0, entrylo1
= 0;
332 pfn0
= CPHYSADDR(vcpu
->arch
.kseg0_commpage
) >> PAGE_SHIFT
;
334 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) |
335 (1 << 2) | (0x1 << 1);
338 local_irq_save(flags
);
340 old_entryhi
= read_c0_entryhi();
341 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
342 write_c0_entryhi(vaddr
| kvm_mips_get_kernel_asid(vcpu
));
344 write_c0_entrylo0(entrylo0
);
346 write_c0_entrylo1(entrylo1
);
348 write_c0_index(kvm_mips_get_commpage_asid(vcpu
));
354 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
355 vcpu
->arch
.pc
, read_c0_index(), read_c0_entryhi(),
356 read_c0_entrylo0(), read_c0_entrylo1());
358 /* Restore old ASID */
359 write_c0_entryhi(old_entryhi
);
362 local_irq_restore(flags
);
366 EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault
);
368 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu
*vcpu
,
369 struct kvm_mips_tlb
*tlb
,
373 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
374 struct kvm
*kvm
= vcpu
->kvm
;
375 kvm_pfn_t pfn0
, pfn1
;
378 if ((tlb
->tlb_hi
& VPN2_MASK
) == 0) {
382 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
)
386 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
)
390 pfn0
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
)
392 pfn1
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
)
397 *hpa0
= pfn0
<< PAGE_SHIFT
;
400 *hpa1
= pfn1
<< PAGE_SHIFT
;
402 /* Get attributes from the Guest TLB */
403 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) |
404 (tlb
->tlb_lo0
& MIPS3_PG_D
) | (tlb
->tlb_lo0
& MIPS3_PG_V
);
405 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) |
406 (tlb
->tlb_lo1
& MIPS3_PG_D
) | (tlb
->tlb_lo1
& MIPS3_PG_V
);
408 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu
->arch
.pc
,
409 tlb
->tlb_lo0
, tlb
->tlb_lo1
);
412 entryhi
= (tlb
->tlb_hi
& VPN2_MASK
) | (KVM_GUEST_KERNEL_MODE(vcpu
) ?
413 kvm_mips_get_kernel_asid(vcpu
) :
414 kvm_mips_get_user_asid(vcpu
));
415 ret
= kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
421 EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault
);
423 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long entryhi
)
427 struct kvm_mips_tlb
*tlb
= vcpu
->arch
.guest_tlb
;
429 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
430 if (TLB_HI_VPN2_HIT(tlb
[i
], entryhi
) &&
431 TLB_HI_ASID_HIT(tlb
[i
], entryhi
)) {
437 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
438 __func__
, entryhi
, index
, tlb
[i
].tlb_lo0
, tlb
[i
].tlb_lo1
);
442 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup
);
444 int kvm_mips_host_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long vaddr
)
446 unsigned long old_entryhi
, flags
;
449 local_irq_save(flags
);
451 old_entryhi
= read_c0_entryhi();
453 if (KVM_GUEST_KERNEL_MODE(vcpu
))
454 write_c0_entryhi((vaddr
& VPN2_MASK
) |
455 kvm_mips_get_kernel_asid(vcpu
));
457 write_c0_entryhi((vaddr
& VPN2_MASK
) |
458 kvm_mips_get_user_asid(vcpu
));
465 idx
= read_c0_index();
467 /* Restore old ASID */
468 write_c0_entryhi(old_entryhi
);
472 local_irq_restore(flags
);
474 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr
, idx
);
478 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup
);
480 int kvm_mips_host_tlb_inv(struct kvm_vcpu
*vcpu
, unsigned long va
)
483 unsigned long flags
, old_entryhi
;
485 local_irq_save(flags
);
487 old_entryhi
= read_c0_entryhi();
489 write_c0_entryhi((va
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
));
494 idx
= read_c0_index();
496 if (idx
>= current_cpu_data
.tlbsize
)
500 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
503 write_c0_entrylo0(0);
506 write_c0_entrylo1(0);
513 write_c0_entryhi(old_entryhi
);
517 local_irq_restore(flags
);
520 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__
,
521 (va
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
), idx
);
525 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv
);
527 void kvm_mips_flush_host_tlb(int skip_kseg0
)
530 unsigned long old_entryhi
, entryhi
;
531 unsigned long old_pagemask
;
533 int maxentry
= current_cpu_data
.tlbsize
;
535 local_irq_save(flags
);
537 old_entryhi
= read_c0_entryhi();
538 old_pagemask
= read_c0_pagemask();
540 /* Blast 'em all away. */
541 for (entry
= 0; entry
< maxentry
; entry
++) {
542 write_c0_index(entry
);
549 entryhi
= read_c0_entryhi();
551 /* Don't blow away guest kernel entries */
552 if (KVM_GUEST_KSEGX(entryhi
) == KVM_GUEST_KSEG0
)
556 /* Make sure all entries differ. */
557 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
559 write_c0_entrylo0(0);
561 write_c0_entrylo1(0);
570 write_c0_entryhi(old_entryhi
);
571 write_c0_pagemask(old_pagemask
);
575 local_irq_restore(flags
);
577 EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb
);
579 void kvm_get_new_mmu_context(struct mm_struct
*mm
, unsigned long cpu
,
580 struct kvm_vcpu
*vcpu
)
582 unsigned long asid
= asid_cache(cpu
);
584 asid
+= cpu_asid_inc();
585 if (!(asid
& cpu_asid_mask(&cpu_data
[cpu
]))) {
586 if (cpu_has_vtag_icache
)
589 kvm_local_flush_tlb_all(); /* start new asid cycle */
591 if (!asid
) /* fix version if needed */
592 asid
= asid_first_version(cpu
);
595 cpu_context(cpu
, mm
) = asid_cache(cpu
) = asid
;
598 void kvm_local_flush_tlb_all(void)
601 unsigned long old_ctx
;
604 local_irq_save(flags
);
605 /* Save old context and create impossible VPN2 value */
606 old_ctx
= read_c0_entryhi();
607 write_c0_entrylo0(0);
608 write_c0_entrylo1(0);
610 /* Blast 'em all away. */
611 while (entry
< current_cpu_data
.tlbsize
) {
612 /* Make sure all entries differ. */
613 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
614 write_c0_index(entry
);
620 write_c0_entryhi(old_ctx
);
623 local_irq_restore(flags
);
625 EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all
);
628 * kvm_mips_migrate_count() - Migrate timer.
629 * @vcpu: Virtual CPU.
631 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
632 * if it was running prior to being cancelled.
634 * Must be called when the VCPU is migrated to a different CPU to ensure that
635 * timer expiry during guest execution interrupts the guest and causes the
636 * interrupt to be delivered in a timely manner.
638 static void kvm_mips_migrate_count(struct kvm_vcpu
*vcpu
)
640 if (hrtimer_cancel(&vcpu
->arch
.comparecount_timer
))
641 hrtimer_restart(&vcpu
->arch
.comparecount_timer
);
644 /* Restore ASID once we are scheduled back after preemption */
645 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
647 unsigned long asid_mask
= cpu_asid_mask(&cpu_data
[cpu
]);
651 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__
, vcpu
, cpu
);
653 /* Allocate new kernel and user ASIDs if needed */
655 local_irq_save(flags
);
657 if ((vcpu
->arch
.guest_kernel_asid
[cpu
] ^ asid_cache(cpu
)) &
658 asid_version_mask(cpu
)) {
659 kvm_get_new_mmu_context(&vcpu
->arch
.guest_kernel_mm
, cpu
, vcpu
);
660 vcpu
->arch
.guest_kernel_asid
[cpu
] =
661 vcpu
->arch
.guest_kernel_mm
.context
.asid
[cpu
];
662 kvm_get_new_mmu_context(&vcpu
->arch
.guest_user_mm
, cpu
, vcpu
);
663 vcpu
->arch
.guest_user_asid
[cpu
] =
664 vcpu
->arch
.guest_user_mm
.context
.asid
[cpu
];
667 kvm_debug("[%d]: cpu_context: %#lx\n", cpu
,
668 cpu_context(cpu
, current
->mm
));
669 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
670 cpu
, vcpu
->arch
.guest_kernel_asid
[cpu
]);
671 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu
,
672 vcpu
->arch
.guest_user_asid
[cpu
]);
675 if (vcpu
->arch
.last_sched_cpu
!= cpu
) {
676 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
677 vcpu
->arch
.last_sched_cpu
, cpu
, vcpu
->vcpu_id
);
679 * Migrate the timer interrupt to the current CPU so that it
680 * always interrupts the guest and synchronously triggers a
681 * guest timer interrupt.
683 kvm_mips_migrate_count(vcpu
);
688 * If we preempted while the guest was executing, then reload
689 * the pre-empted ASID
691 if (current
->flags
& PF_VCPU
) {
692 write_c0_entryhi(vcpu
->arch
.
693 preempt_entryhi
& asid_mask
);
697 /* New ASIDs were allocated for the VM */
700 * Were we in guest context? If so then the pre-empted ASID is
701 * no longer valid, we need to set it to what it should be based
702 * on the mode of the Guest (Kernel/User)
704 if (current
->flags
& PF_VCPU
) {
705 if (KVM_GUEST_KERNEL_MODE(vcpu
))
706 write_c0_entryhi(vcpu
->arch
.
707 guest_kernel_asid
[cpu
] &
710 write_c0_entryhi(vcpu
->arch
.
711 guest_user_asid
[cpu
] &
717 /* restore guest state to registers */
718 kvm_mips_callbacks
->vcpu_set_regs(vcpu
);
720 local_irq_restore(flags
);
723 EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load
);
725 /* ASID can change if another task is scheduled during preemption */
726 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
731 local_irq_save(flags
);
733 cpu
= smp_processor_id();
735 vcpu
->arch
.preempt_entryhi
= read_c0_entryhi();
736 vcpu
->arch
.last_sched_cpu
= cpu
;
738 /* save guest state in registers */
739 kvm_mips_callbacks
->vcpu_get_regs(vcpu
);
741 if (((cpu_context(cpu
, current
->mm
) ^ asid_cache(cpu
)) &
742 asid_version_mask(cpu
))) {
743 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__
,
744 cpu_context(cpu
, current
->mm
));
745 drop_mmu_context(current
->mm
, cpu
);
747 write_c0_entryhi(cpu_asid(cpu
, current
->mm
));
750 local_irq_restore(flags
);
752 EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put
);
754 uint32_t kvm_get_inst(uint32_t *opc
, struct kvm_vcpu
*vcpu
)
756 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
757 unsigned long paddr
, flags
, vpn2
, asid
;
761 if (KVM_GUEST_KSEGX((unsigned long) opc
) < KVM_GUEST_KSEG0
||
762 KVM_GUEST_KSEGX((unsigned long) opc
) == KVM_GUEST_KSEG23
) {
763 local_irq_save(flags
);
764 index
= kvm_mips_host_tlb_lookup(vcpu
, (unsigned long) opc
);
768 vpn2
= (unsigned long) opc
& VPN2_MASK
;
769 asid
= kvm_read_c0_guest_entryhi(cop0
) &
771 index
= kvm_mips_guest_tlb_lookup(vcpu
, vpn2
| asid
);
773 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
774 __func__
, opc
, vcpu
, read_c0_entryhi());
775 kvm_mips_dump_host_tlbs();
776 local_irq_restore(flags
);
777 return KVM_INVALID_INST
;
779 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
,
785 local_irq_restore(flags
);
786 } else if (KVM_GUEST_KSEGX(opc
) == KVM_GUEST_KSEG0
) {
788 kvm_mips_translate_guest_kseg0_to_hpa(vcpu
,
789 (unsigned long) opc
);
790 inst
= *(uint32_t *) CKSEG0ADDR(paddr
);
792 kvm_err("%s: illegal address: %p\n", __func__
, opc
);
793 return KVM_INVALID_INST
;
798 EXPORT_SYMBOL_GPL(kvm_get_inst
);