2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
23 #include <asm/bootinfo.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
26 #include <asm/cacheflush.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
38 atomic_t kvm_mips_instance
;
39 EXPORT_SYMBOL(kvm_mips_instance
);
41 /* These function pointers are initialized once the KVM module is loaded */
42 pfn_t(*kvm_mips_gfn_to_pfn
) (struct kvm
*kvm
, gfn_t gfn
);
43 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn
);
45 void (*kvm_mips_release_pfn_clean
) (pfn_t pfn
);
46 EXPORT_SYMBOL(kvm_mips_release_pfn_clean
);
48 bool(*kvm_mips_is_error_pfn
) (pfn_t pfn
);
49 EXPORT_SYMBOL(kvm_mips_is_error_pfn
);
51 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu
*vcpu
)
53 return vcpu
->arch
.guest_kernel_asid
[smp_processor_id()] & ASID_MASK
;
57 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu
*vcpu
)
59 return vcpu
->arch
.guest_user_asid
[smp_processor_id()] & ASID_MASK
;
62 inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu
*vcpu
)
64 return vcpu
->kvm
->arch
.commpage_tlb
;
69 * Structure defining an tlb entry data set.
72 void kvm_mips_dump_host_tlbs(void)
74 unsigned long old_entryhi
;
75 unsigned long old_pagemask
;
76 struct kvm_mips_tlb tlb
;
80 local_irq_save(flags
);
82 old_entryhi
= read_c0_entryhi();
83 old_pagemask
= read_c0_pagemask();
85 printk("HOST TLBs:\n");
86 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK
);
88 for (i
= 0; i
< current_cpu_data
.tlbsize
; i
++) {
95 tlb
.tlb_hi
= read_c0_entryhi();
96 tlb
.tlb_lo0
= read_c0_entrylo0();
97 tlb
.tlb_lo1
= read_c0_entrylo1();
98 tlb
.tlb_mask
= read_c0_pagemask();
100 printk("TLB%c%3d Hi 0x%08lx ",
101 (tlb
.tlb_lo0
| tlb
.tlb_lo1
) & MIPS3_PG_V
? ' ' : '*',
103 printk("Lo0=0x%09" PRIx64
" %c%c attr %lx ",
104 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo0
),
105 (tlb
.tlb_lo0
& MIPS3_PG_D
) ? 'D' : ' ',
106 (tlb
.tlb_lo0
& MIPS3_PG_G
) ? 'G' : ' ',
107 (tlb
.tlb_lo0
>> 3) & 7);
108 printk("Lo1=0x%09" PRIx64
" %c%c attr %lx sz=%lx\n",
109 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo1
),
110 (tlb
.tlb_lo1
& MIPS3_PG_D
) ? 'D' : ' ',
111 (tlb
.tlb_lo1
& MIPS3_PG_G
) ? 'G' : ' ',
112 (tlb
.tlb_lo1
>> 3) & 7, tlb
.tlb_mask
);
114 write_c0_entryhi(old_entryhi
);
115 write_c0_pagemask(old_pagemask
);
117 local_irq_restore(flags
);
120 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu
*vcpu
)
122 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
123 struct kvm_mips_tlb tlb
;
126 printk("Guest TLBs:\n");
127 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0
));
129 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
130 tlb
= vcpu
->arch
.guest_tlb
[i
];
131 printk("TLB%c%3d Hi 0x%08lx ",
132 (tlb
.tlb_lo0
| tlb
.tlb_lo1
) & MIPS3_PG_V
? ' ' : '*',
134 printk("Lo0=0x%09" PRIx64
" %c%c attr %lx ",
135 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo0
),
136 (tlb
.tlb_lo0
& MIPS3_PG_D
) ? 'D' : ' ',
137 (tlb
.tlb_lo0
& MIPS3_PG_G
) ? 'G' : ' ',
138 (tlb
.tlb_lo0
>> 3) & 7);
139 printk("Lo1=0x%09" PRIx64
" %c%c attr %lx sz=%lx\n",
140 (uint64_t) mips3_tlbpfn_to_paddr(tlb
.tlb_lo1
),
141 (tlb
.tlb_lo1
& MIPS3_PG_D
) ? 'D' : ' ',
142 (tlb
.tlb_lo1
& MIPS3_PG_G
) ? 'G' : ' ',
143 (tlb
.tlb_lo1
>> 3) & 7, tlb
.tlb_mask
);
147 static int kvm_mips_map_page(struct kvm
*kvm
, gfn_t gfn
)
149 int srcu_idx
, err
= 0;
152 if (kvm
->arch
.guest_pmap
[gfn
] != KVM_INVALID_PAGE
)
155 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
156 pfn
= kvm_mips_gfn_to_pfn(kvm
, gfn
);
158 if (kvm_mips_is_error_pfn(pfn
)) {
159 kvm_err("Couldn't get pfn for gfn %#" PRIx64
"!\n", gfn
);
164 kvm
->arch
.guest_pmap
[gfn
] = pfn
;
166 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
170 /* Translate guest KSEG0 addresses to Host PA */
171 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu
*vcpu
,
175 uint32_t offset
= gva
& ~PAGE_MASK
;
176 struct kvm
*kvm
= vcpu
->kvm
;
178 if (KVM_GUEST_KSEGX(gva
) != KVM_GUEST_KSEG0
) {
179 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__
,
180 __builtin_return_address(0), gva
);
181 return KVM_INVALID_PAGE
;
184 gfn
= (KVM_GUEST_CPHYSADDR(gva
) >> PAGE_SHIFT
);
186 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
187 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__
, gfn
,
189 return KVM_INVALID_PAGE
;
192 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
193 return KVM_INVALID_ADDR
;
195 return (kvm
->arch
.guest_pmap
[gfn
] << PAGE_SHIFT
) + offset
;
198 /* XXXKYMA: Must be called with interrupts disabled */
199 /* set flush_dcache_mask == 0 if no dcache flush required */
201 kvm_mips_host_tlb_write(struct kvm_vcpu
*vcpu
, unsigned long entryhi
,
202 unsigned long entrylo0
, unsigned long entrylo1
, int flush_dcache_mask
)
205 unsigned long old_entryhi
;
208 local_irq_save(flags
);
211 old_entryhi
= read_c0_entryhi();
212 write_c0_entryhi(entryhi
);
217 idx
= read_c0_index();
219 if (idx
> current_cpu_data
.tlbsize
) {
220 kvm_err("%s: Invalid Index: %d\n", __func__
, idx
);
221 kvm_mips_dump_host_tlbs();
225 write_c0_entrylo0(entrylo0
);
226 write_c0_entrylo1(entrylo1
);
235 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
236 vcpu
->arch
.pc
, idx
, read_c0_entryhi(),
237 read_c0_entrylo0(), read_c0_entrylo1());
240 if (flush_dcache_mask
) {
241 if (entrylo0
& MIPS3_PG_V
) {
242 ++vcpu
->stat
.flush_dcache_exits
;
243 flush_data_cache_page((entryhi
& VPN2_MASK
) & ~flush_dcache_mask
);
245 if (entrylo1
& MIPS3_PG_V
) {
246 ++vcpu
->stat
.flush_dcache_exits
;
247 flush_data_cache_page(((entryhi
& VPN2_MASK
) & ~flush_dcache_mask
) |
248 (0x1 << PAGE_SHIFT
));
252 /* Restore old ASID */
253 write_c0_entryhi(old_entryhi
);
256 local_irq_restore(flags
);
261 /* XXXKYMA: Must be called with interrupts disabled */
262 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr
,
263 struct kvm_vcpu
*vcpu
)
267 unsigned long vaddr
= 0;
268 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
270 struct kvm
*kvm
= vcpu
->kvm
;
271 const int flush_dcache_mask
= 0;
274 if (KVM_GUEST_KSEGX(badvaddr
) != KVM_GUEST_KSEG0
) {
275 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__
, badvaddr
);
276 kvm_mips_dump_host_tlbs();
280 gfn
= (KVM_GUEST_CPHYSADDR(badvaddr
) >> PAGE_SHIFT
);
281 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
282 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__
,
284 kvm_mips_dump_host_tlbs();
288 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
290 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
293 if (kvm_mips_map_page(vcpu
->kvm
, gfn
^ 0x1) < 0)
297 pfn0
= kvm
->arch
.guest_pmap
[gfn
];
298 pfn1
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
300 pfn0
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
301 pfn1
= kvm
->arch
.guest_pmap
[gfn
];
304 entryhi
= (vaddr
| kvm_mips_get_kernel_asid(vcpu
));
305 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) | (1 << 2) |
307 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) | (1 << 2) |
310 return kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
314 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr
,
315 struct kvm_vcpu
*vcpu
)
318 unsigned long flags
, old_entryhi
= 0, vaddr
= 0;
319 unsigned long entrylo0
= 0, entrylo1
= 0;
322 pfn0
= CPHYSADDR(vcpu
->arch
.kseg0_commpage
) >> PAGE_SHIFT
;
324 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) | (1 << 2) |
328 local_irq_save(flags
);
330 old_entryhi
= read_c0_entryhi();
331 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
332 write_c0_entryhi(vaddr
| kvm_mips_get_kernel_asid(vcpu
));
334 write_c0_entrylo0(entrylo0
);
336 write_c0_entrylo1(entrylo1
);
338 write_c0_index(kvm_mips_get_commpage_asid(vcpu
));
344 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
345 vcpu
->arch
.pc
, read_c0_index(), read_c0_entryhi(),
346 read_c0_entrylo0(), read_c0_entrylo1());
348 /* Restore old ASID */
349 write_c0_entryhi(old_entryhi
);
352 local_irq_restore(flags
);
358 kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu
*vcpu
,
359 struct kvm_mips_tlb
*tlb
, unsigned long *hpa0
, unsigned long *hpa1
)
361 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
362 struct kvm
*kvm
= vcpu
->kvm
;
366 if ((tlb
->tlb_hi
& VPN2_MASK
) == 0) {
370 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
) >> PAGE_SHIFT
) < 0)
373 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
) >> PAGE_SHIFT
) < 0)
376 pfn0
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
) >> PAGE_SHIFT
];
377 pfn1
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
) >> PAGE_SHIFT
];
381 *hpa0
= pfn0
<< PAGE_SHIFT
;
384 *hpa1
= pfn1
<< PAGE_SHIFT
;
386 /* Get attributes from the Guest TLB */
387 entryhi
= (tlb
->tlb_hi
& VPN2_MASK
) | (KVM_GUEST_KERNEL_MODE(vcpu
) ?
388 kvm_mips_get_kernel_asid(vcpu
) : kvm_mips_get_user_asid(vcpu
));
389 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) |
390 (tlb
->tlb_lo0
& MIPS3_PG_D
) | (tlb
->tlb_lo0
& MIPS3_PG_V
);
391 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) |
392 (tlb
->tlb_lo1
& MIPS3_PG_D
) | (tlb
->tlb_lo1
& MIPS3_PG_V
);
394 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu
->arch
.pc
,
395 tlb
->tlb_lo0
, tlb
->tlb_lo1
);
397 return kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
401 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long entryhi
)
405 struct kvm_mips_tlb
*tlb
= vcpu
->arch
.guest_tlb
;
408 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
409 if (((TLB_VPN2(tlb
[i
]) & ~tlb
[i
].tlb_mask
) == ((entryhi
& VPN2_MASK
) & ~tlb
[i
].tlb_mask
)) &&
410 (TLB_IS_GLOBAL(tlb
[i
]) || (TLB_ASID(tlb
[i
]) == (entryhi
& ASID_MASK
)))) {
416 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
417 __func__
, entryhi
, index
, tlb
[i
].tlb_lo0
, tlb
[i
].tlb_lo1
);
422 int kvm_mips_host_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long vaddr
)
424 unsigned long old_entryhi
, flags
;
428 local_irq_save(flags
);
430 old_entryhi
= read_c0_entryhi();
432 if (KVM_GUEST_KERNEL_MODE(vcpu
))
433 write_c0_entryhi((vaddr
& VPN2_MASK
) | kvm_mips_get_kernel_asid(vcpu
));
435 write_c0_entryhi((vaddr
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
));
442 idx
= read_c0_index();
444 /* Restore old ASID */
445 write_c0_entryhi(old_entryhi
);
449 local_irq_restore(flags
);
451 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr
, idx
);
456 int kvm_mips_host_tlb_inv(struct kvm_vcpu
*vcpu
, unsigned long va
)
459 unsigned long flags
, old_entryhi
;
461 local_irq_save(flags
);
464 old_entryhi
= read_c0_entryhi();
466 write_c0_entryhi((va
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
));
471 idx
= read_c0_index();
473 if (idx
>= current_cpu_data
.tlbsize
)
477 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
480 write_c0_entrylo0(0);
483 write_c0_entrylo1(0);
490 write_c0_entryhi(old_entryhi
);
494 local_irq_restore(flags
);
497 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__
,
498 (va
& VPN2_MASK
) | kvm_mips_get_user_asid(vcpu
), idx
);
503 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
504 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu
*vcpu
, int index
)
506 unsigned long flags
, old_entryhi
;
508 if (index
>= current_cpu_data
.tlbsize
)
511 local_irq_save(flags
);
514 old_entryhi
= read_c0_entryhi();
516 write_c0_entryhi(UNIQUE_ENTRYHI(index
));
519 write_c0_index(index
);
522 write_c0_entrylo0(0);
525 write_c0_entrylo1(0);
532 write_c0_entryhi(old_entryhi
);
536 local_irq_restore(flags
);
541 void kvm_mips_flush_host_tlb(int skip_kseg0
)
544 unsigned long old_entryhi
, entryhi
;
545 unsigned long old_pagemask
;
547 int maxentry
= current_cpu_data
.tlbsize
;
550 local_irq_save(flags
);
552 old_entryhi
= read_c0_entryhi();
553 old_pagemask
= read_c0_pagemask();
555 /* Blast 'em all away. */
556 for (entry
= 0; entry
< maxentry
; entry
++) {
558 write_c0_index(entry
);
565 entryhi
= read_c0_entryhi();
567 /* Don't blow away guest kernel entries */
568 if (KVM_GUEST_KSEGX(entryhi
) == KVM_GUEST_KSEG0
) {
573 /* Make sure all entries differ. */
574 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
576 write_c0_entrylo0(0);
578 write_c0_entrylo1(0);
587 write_c0_entryhi(old_entryhi
);
588 write_c0_pagemask(old_pagemask
);
592 local_irq_restore(flags
);
596 kvm_get_new_mmu_context(struct mm_struct
*mm
, unsigned long cpu
,
597 struct kvm_vcpu
*vcpu
)
599 unsigned long asid
= asid_cache(cpu
);
601 if (!((asid
+= ASID_INC
) & ASID_MASK
)) {
602 if (cpu_has_vtag_icache
) {
606 kvm_local_flush_tlb_all(); /* start new asid cycle */
608 if (!asid
) /* fix version if needed */
609 asid
= ASID_FIRST_VERSION
;
612 cpu_context(cpu
, mm
) = asid_cache(cpu
) = asid
;
615 void kvm_local_flush_tlb_all(void)
618 unsigned long old_ctx
;
621 local_irq_save(flags
);
622 /* Save old context and create impossible VPN2 value */
623 old_ctx
= read_c0_entryhi();
624 write_c0_entrylo0(0);
625 write_c0_entrylo1(0);
627 /* Blast 'em all away. */
628 while (entry
< current_cpu_data
.tlbsize
) {
629 /* Make sure all entries differ. */
630 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
631 write_c0_index(entry
);
637 write_c0_entryhi(old_ctx
);
640 local_irq_restore(flags
);
644 * kvm_mips_migrate_count() - Migrate timer.
645 * @vcpu: Virtual CPU.
647 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
648 * if it was running prior to being cancelled.
650 * Must be called when the VCPU is migrated to a different CPU to ensure that
651 * timer expiry during guest execution interrupts the guest and causes the
652 * interrupt to be delivered in a timely manner.
654 static void kvm_mips_migrate_count(struct kvm_vcpu
*vcpu
)
656 if (hrtimer_cancel(&vcpu
->arch
.comparecount_timer
))
657 hrtimer_restart(&vcpu
->arch
.comparecount_timer
);
660 /* Restore ASID once we are scheduled back after preemption */
661 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
666 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__
, vcpu
, cpu
);
668 /* Alocate new kernel and user ASIDs if needed */
670 local_irq_save(flags
);
673 guest_kernel_asid
[cpu
] ^ asid_cache(cpu
)) & ASID_VERSION_MASK
)) {
674 kvm_get_new_mmu_context(&vcpu
->arch
.guest_kernel_mm
, cpu
, vcpu
);
675 vcpu
->arch
.guest_kernel_asid
[cpu
] =
676 vcpu
->arch
.guest_kernel_mm
.context
.asid
[cpu
];
677 kvm_get_new_mmu_context(&vcpu
->arch
.guest_user_mm
, cpu
, vcpu
);
678 vcpu
->arch
.guest_user_asid
[cpu
] =
679 vcpu
->arch
.guest_user_mm
.context
.asid
[cpu
];
682 kvm_debug("[%d]: cpu_context: %#lx\n", cpu
,
683 cpu_context(cpu
, current
->mm
));
684 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
685 cpu
, vcpu
->arch
.guest_kernel_asid
[cpu
]);
686 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu
,
687 vcpu
->arch
.guest_user_asid
[cpu
]);
690 if (vcpu
->arch
.last_sched_cpu
!= cpu
) {
691 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
692 vcpu
->arch
.last_sched_cpu
, cpu
, vcpu
->vcpu_id
);
694 * Migrate the timer interrupt to the current CPU so that it
695 * always interrupts the guest and synchronously triggers a
696 * guest timer interrupt.
698 kvm_mips_migrate_count(vcpu
);
702 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
703 if (current
->flags
& PF_VCPU
) {
704 write_c0_entryhi(vcpu
->arch
.
705 preempt_entryhi
& ASID_MASK
);
709 /* New ASIDs were allocated for the VM */
711 /* Were we in guest context? If so then the pre-empted ASID is no longer
712 * valid, we need to set it to what it should be based on the mode of
713 * the Guest (Kernel/User)
715 if (current
->flags
& PF_VCPU
) {
716 if (KVM_GUEST_KERNEL_MODE(vcpu
))
717 write_c0_entryhi(vcpu
->arch
.
718 guest_kernel_asid
[cpu
] &
721 write_c0_entryhi(vcpu
->arch
.
722 guest_user_asid
[cpu
] &
728 local_irq_restore(flags
);
732 /* ASID can change if another task is scheduled during preemption */
733 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
738 local_irq_save(flags
);
740 cpu
= smp_processor_id();
743 vcpu
->arch
.preempt_entryhi
= read_c0_entryhi();
744 vcpu
->arch
.last_sched_cpu
= cpu
;
746 if (((cpu_context(cpu
, current
->mm
) ^ asid_cache(cpu
)) &
747 ASID_VERSION_MASK
)) {
748 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__
,
749 cpu_context(cpu
, current
->mm
));
750 drop_mmu_context(current
->mm
, cpu
);
752 write_c0_entryhi(cpu_asid(cpu
, current
->mm
));
755 local_irq_restore(flags
);
758 uint32_t kvm_get_inst(uint32_t *opc
, struct kvm_vcpu
*vcpu
)
760 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
761 unsigned long paddr
, flags
;
765 if (KVM_GUEST_KSEGX((unsigned long) opc
) < KVM_GUEST_KSEG0
||
766 KVM_GUEST_KSEGX((unsigned long) opc
) == KVM_GUEST_KSEG23
) {
767 local_irq_save(flags
);
768 index
= kvm_mips_host_tlb_lookup(vcpu
, (unsigned long) opc
);
773 kvm_mips_guest_tlb_lookup(vcpu
,
774 ((unsigned long) opc
& VPN2_MASK
)
776 (kvm_read_c0_guest_entryhi
777 (cop0
) & ASID_MASK
));
780 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
781 __func__
, opc
, vcpu
, read_c0_entryhi());
782 kvm_mips_dump_host_tlbs();
783 local_irq_restore(flags
);
784 return KVM_INVALID_INST
;
786 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
,
792 local_irq_restore(flags
);
793 } else if (KVM_GUEST_KSEGX(opc
) == KVM_GUEST_KSEG0
) {
795 kvm_mips_translate_guest_kseg0_to_hpa(vcpu
,
796 (unsigned long) opc
);
797 inst
= *(uint32_t *) CKSEG0ADDR(paddr
);
799 kvm_err("%s: illegal address: %p\n", __func__
, opc
);
800 return KVM_INVALID_INST
;
806 EXPORT_SYMBOL(kvm_local_flush_tlb_all
);
807 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault
);
808 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault
);
809 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs
);
810 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault
);
811 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup
);
812 EXPORT_SYMBOL(kvm_mips_flush_host_tlb
);
813 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup
);
814 EXPORT_SYMBOL(kvm_mips_host_tlb_inv
);
815 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa
);
816 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs
);
817 EXPORT_SYMBOL(kvm_get_inst
);
818 EXPORT_SYMBOL(kvm_arch_vcpu_load
);
819 EXPORT_SYMBOL(kvm_arch_vcpu_put
);