2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kdebug.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
21 #include <asm/cacheflush.h>
22 #include <asm/mmu_context.h>
23 #include <asm/pgtable.h>
25 #include <linux/kvm_host.h>
27 #include "interrupt.h"
30 #define CREATE_TRACE_POINTS
34 #define VECTORSPACING 0x100 /* for EI/VI mode */
37 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
38 struct kvm_stats_debugfs_item debugfs_entries
[] = {
39 { "wait", VCPU_STAT(wait_exits
), KVM_STAT_VCPU
},
40 { "cache", VCPU_STAT(cache_exits
), KVM_STAT_VCPU
},
41 { "signal", VCPU_STAT(signal_exits
), KVM_STAT_VCPU
},
42 { "interrupt", VCPU_STAT(int_exits
), KVM_STAT_VCPU
},
43 { "cop_unsuable", VCPU_STAT(cop_unusable_exits
), KVM_STAT_VCPU
},
44 { "tlbmod", VCPU_STAT(tlbmod_exits
), KVM_STAT_VCPU
},
45 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits
), KVM_STAT_VCPU
},
46 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits
), KVM_STAT_VCPU
},
47 { "addrerr_st", VCPU_STAT(addrerr_st_exits
), KVM_STAT_VCPU
},
48 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits
), KVM_STAT_VCPU
},
49 { "syscall", VCPU_STAT(syscall_exits
), KVM_STAT_VCPU
},
50 { "resvd_inst", VCPU_STAT(resvd_inst_exits
), KVM_STAT_VCPU
},
51 { "break_inst", VCPU_STAT(break_inst_exits
), KVM_STAT_VCPU
},
52 { "trap_inst", VCPU_STAT(trap_inst_exits
), KVM_STAT_VCPU
},
53 { "msa_fpe", VCPU_STAT(msa_fpe_exits
), KVM_STAT_VCPU
},
54 { "fpe", VCPU_STAT(fpe_exits
), KVM_STAT_VCPU
},
55 { "msa_disabled", VCPU_STAT(msa_disabled_exits
), KVM_STAT_VCPU
},
56 { "flush_dcache", VCPU_STAT(flush_dcache_exits
), KVM_STAT_VCPU
},
57 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
), KVM_STAT_VCPU
},
58 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll
), KVM_STAT_VCPU
},
59 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid
), KVM_STAT_VCPU
},
60 { "halt_wakeup", VCPU_STAT(halt_wakeup
), KVM_STAT_VCPU
},
64 static int kvm_mips_reset_vcpu(struct kvm_vcpu
*vcpu
)
68 for_each_possible_cpu(i
) {
69 vcpu
->arch
.guest_kernel_asid
[i
] = 0;
70 vcpu
->arch
.guest_user_asid
[i
] = 0;
77 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
78 * Config7, so we are "runnable" if interrupts are pending
80 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
82 return !!(vcpu
->arch
.pending_exceptions
);
85 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
90 int kvm_arch_hardware_enable(void)
95 int kvm_arch_hardware_setup(void)
100 void kvm_arch_check_processor_compat(void *rtn
)
105 static void kvm_mips_init_tlbs(struct kvm
*kvm
)
110 * Add a wired entry to the TLB, it is used to map the commpage to
113 wired
= read_c0_wired();
114 write_c0_wired(wired
+ 1);
116 kvm
->arch
.commpage_tlb
= wired
;
118 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
119 kvm
->arch
.commpage_tlb
);
122 static void kvm_mips_init_vm_percpu(void *arg
)
124 struct kvm
*kvm
= (struct kvm
*)arg
;
126 kvm_mips_init_tlbs(kvm
);
127 kvm_mips_callbacks
->vm_init(kvm
);
131 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
133 if (atomic_inc_return(&kvm_mips_instance
) == 1) {
134 kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
136 on_each_cpu(kvm_mips_init_vm_percpu
, kvm
, 1);
142 void kvm_mips_free_vcpus(struct kvm
*kvm
)
145 struct kvm_vcpu
*vcpu
;
147 /* Put the pages we reserved for the guest pmap */
148 for (i
= 0; i
< kvm
->arch
.guest_pmap_npages
; i
++) {
149 if (kvm
->arch
.guest_pmap
[i
] != KVM_INVALID_PAGE
)
150 kvm_mips_release_pfn_clean(kvm
->arch
.guest_pmap
[i
]);
152 kfree(kvm
->arch
.guest_pmap
);
154 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
155 kvm_arch_vcpu_free(vcpu
);
158 mutex_lock(&kvm
->lock
);
160 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
161 kvm
->vcpus
[i
] = NULL
;
163 atomic_set(&kvm
->online_vcpus
, 0);
165 mutex_unlock(&kvm
->lock
);
168 static void kvm_mips_uninit_tlbs(void *arg
)
170 /* Restore wired count */
173 /* Clear out all the TLBs */
174 kvm_local_flush_tlb_all();
177 void kvm_arch_destroy_vm(struct kvm
*kvm
)
179 kvm_mips_free_vcpus(kvm
);
181 /* If this is the last instance, restore wired count */
182 if (atomic_dec_return(&kvm_mips_instance
) == 0) {
183 kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
185 on_each_cpu(kvm_mips_uninit_tlbs
, NULL
, 1);
189 long kvm_arch_dev_ioctl(struct file
*filp
, unsigned int ioctl
,
195 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
196 unsigned long npages
)
201 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
202 struct kvm_memory_slot
*memslot
,
203 const struct kvm_userspace_memory_region
*mem
,
204 enum kvm_mr_change change
)
209 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
210 const struct kvm_userspace_memory_region
*mem
,
211 const struct kvm_memory_slot
*old
,
212 const struct kvm_memory_slot
*new,
213 enum kvm_mr_change change
)
215 unsigned long npages
= 0;
218 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
219 __func__
, kvm
, mem
->slot
, mem
->guest_phys_addr
,
220 mem
->memory_size
, mem
->userspace_addr
);
222 /* Setup Guest PMAP table */
223 if (!kvm
->arch
.guest_pmap
) {
225 npages
= mem
->memory_size
>> PAGE_SHIFT
;
228 kvm
->arch
.guest_pmap_npages
= npages
;
229 kvm
->arch
.guest_pmap
=
230 kzalloc(npages
* sizeof(unsigned long), GFP_KERNEL
);
232 if (!kvm
->arch
.guest_pmap
) {
233 kvm_err("Failed to allocate guest PMAP\n");
237 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
238 npages
, kvm
->arch
.guest_pmap
);
240 /* Now setup the page table */
241 for (i
= 0; i
< npages
; i
++)
242 kvm
->arch
.guest_pmap
[i
] = KVM_INVALID_PAGE
;
247 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
249 int err
, size
, offset
;
253 struct kvm_vcpu
*vcpu
= kzalloc(sizeof(struct kvm_vcpu
), GFP_KERNEL
);
260 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
265 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm
, id
, vcpu
);
268 * Allocate space for host mode exception handlers that handle
271 if (cpu_has_veic
|| cpu_has_vint
)
272 size
= 0x200 + VECTORSPACING
* 64;
276 /* Save Linux EBASE */
277 vcpu
->arch
.host_ebase
= (void *)read_c0_ebase();
279 gebase
= kzalloc(ALIGN(size
, PAGE_SIZE
), GFP_KERNEL
);
285 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
286 ALIGN(size
, PAGE_SIZE
), gebase
);
289 vcpu
->arch
.guest_ebase
= gebase
;
291 /* Copy L1 Guest Exception handler to correct offset */
293 /* TLB Refill, EXL = 0 */
294 memcpy(gebase
, mips32_exception
,
295 mips32_exceptionEnd
- mips32_exception
);
297 /* General Exception Entry point */
298 memcpy(gebase
+ 0x180, mips32_exception
,
299 mips32_exceptionEnd
- mips32_exception
);
301 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
302 for (i
= 0; i
< 8; i
++) {
303 kvm_debug("L1 Vectored handler @ %p\n",
304 gebase
+ 0x200 + (i
* VECTORSPACING
));
305 memcpy(gebase
+ 0x200 + (i
* VECTORSPACING
), mips32_exception
,
306 mips32_exceptionEnd
- mips32_exception
);
309 /* General handler, relocate to unmapped space for sanity's sake */
311 kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
313 mips32_GuestExceptionEnd
- mips32_GuestException
);
315 memcpy(gebase
+ offset
, mips32_GuestException
,
316 mips32_GuestExceptionEnd
- mips32_GuestException
);
318 /* Invalidate the icache for these ranges */
319 local_flush_icache_range((unsigned long)gebase
,
320 (unsigned long)gebase
+ ALIGN(size
, PAGE_SIZE
));
323 * Allocate comm page for guest kernel, a TLB will be reserved for
324 * mapping GVA @ 0xFFFF8000 to this page
326 vcpu
->arch
.kseg0_commpage
= kzalloc(PAGE_SIZE
<< 1, GFP_KERNEL
);
328 if (!vcpu
->arch
.kseg0_commpage
) {
330 goto out_free_gebase
;
333 kvm_debug("Allocated COMM page @ %p\n", vcpu
->arch
.kseg0_commpage
);
334 kvm_mips_commpage_init(vcpu
);
337 vcpu
->arch
.last_sched_cpu
= -1;
339 /* Start off the timer */
340 kvm_mips_init_count(vcpu
);
348 kvm_vcpu_uninit(vcpu
);
357 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
359 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
361 kvm_vcpu_uninit(vcpu
);
363 kvm_mips_dump_stats(vcpu
);
365 kfree(vcpu
->arch
.guest_ebase
);
366 kfree(vcpu
->arch
.kseg0_commpage
);
370 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
372 kvm_arch_vcpu_free(vcpu
);
375 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
376 struct kvm_guest_debug
*dbg
)
381 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
386 if (vcpu
->sigset_active
)
387 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
389 if (vcpu
->mmio_needed
) {
390 if (!vcpu
->mmio_is_write
)
391 kvm_mips_complete_mmio_load(vcpu
, run
);
392 vcpu
->mmio_needed
= 0;
398 /* Check if we have any exceptions/interrupts pending */
399 kvm_mips_deliver_interrupts(vcpu
,
400 kvm_read_c0_guest_cause(vcpu
->arch
.cop0
));
404 /* Disable hardware page table walking while in guest */
407 r
= __kvm_mips_vcpu_run(run
, vcpu
);
409 /* Re-enable HTW before enabling interrupts */
415 if (vcpu
->sigset_active
)
416 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
421 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
,
422 struct kvm_mips_interrupt
*irq
)
424 int intr
= (int)irq
->irq
;
425 struct kvm_vcpu
*dvcpu
= NULL
;
427 if (intr
== 3 || intr
== -3 || intr
== 4 || intr
== -4)
428 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__
, irq
->cpu
,
434 dvcpu
= vcpu
->kvm
->vcpus
[irq
->cpu
];
436 if (intr
== 2 || intr
== 3 || intr
== 4) {
437 kvm_mips_callbacks
->queue_io_int(dvcpu
, irq
);
439 } else if (intr
== -2 || intr
== -3 || intr
== -4) {
440 kvm_mips_callbacks
->dequeue_io_int(dvcpu
, irq
);
442 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__
,
447 dvcpu
->arch
.wait
= 0;
449 if (swait_active(&dvcpu
->wq
))
450 swake_up(&dvcpu
->wq
);
455 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
456 struct kvm_mp_state
*mp_state
)
461 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
462 struct kvm_mp_state
*mp_state
)
467 static u64 kvm_mips_get_one_regs
[] = {
505 KVM_REG_MIPS_CP0_INDEX
,
506 KVM_REG_MIPS_CP0_CONTEXT
,
507 KVM_REG_MIPS_CP0_USERLOCAL
,
508 KVM_REG_MIPS_CP0_PAGEMASK
,
509 KVM_REG_MIPS_CP0_WIRED
,
510 KVM_REG_MIPS_CP0_HWRENA
,
511 KVM_REG_MIPS_CP0_BADVADDR
,
512 KVM_REG_MIPS_CP0_COUNT
,
513 KVM_REG_MIPS_CP0_ENTRYHI
,
514 KVM_REG_MIPS_CP0_COMPARE
,
515 KVM_REG_MIPS_CP0_STATUS
,
516 KVM_REG_MIPS_CP0_CAUSE
,
517 KVM_REG_MIPS_CP0_EPC
,
518 KVM_REG_MIPS_CP0_PRID
,
519 KVM_REG_MIPS_CP0_CONFIG
,
520 KVM_REG_MIPS_CP0_CONFIG1
,
521 KVM_REG_MIPS_CP0_CONFIG2
,
522 KVM_REG_MIPS_CP0_CONFIG3
,
523 KVM_REG_MIPS_CP0_CONFIG4
,
524 KVM_REG_MIPS_CP0_CONFIG5
,
525 KVM_REG_MIPS_CP0_CONFIG7
,
526 KVM_REG_MIPS_CP0_ERROREPC
,
528 KVM_REG_MIPS_COUNT_CTL
,
529 KVM_REG_MIPS_COUNT_RESUME
,
530 KVM_REG_MIPS_COUNT_HZ
,
533 static int kvm_mips_get_reg(struct kvm_vcpu
*vcpu
,
534 const struct kvm_one_reg
*reg
)
536 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
537 struct mips_fpu_struct
*fpu
= &vcpu
->arch
.fpu
;
544 /* General purpose registers */
545 case KVM_REG_MIPS_R0
... KVM_REG_MIPS_R31
:
546 v
= (long)vcpu
->arch
.gprs
[reg
->id
- KVM_REG_MIPS_R0
];
548 case KVM_REG_MIPS_HI
:
549 v
= (long)vcpu
->arch
.hi
;
551 case KVM_REG_MIPS_LO
:
552 v
= (long)vcpu
->arch
.lo
;
554 case KVM_REG_MIPS_PC
:
555 v
= (long)vcpu
->arch
.pc
;
558 /* Floating point registers */
559 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
560 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
562 idx
= reg
->id
- KVM_REG_MIPS_FPR_32(0);
563 /* Odd singles in top of even double when FR=0 */
564 if (kvm_read_c0_guest_status(cop0
) & ST0_FR
)
565 v
= get_fpr32(&fpu
->fpr
[idx
], 0);
567 v
= get_fpr32(&fpu
->fpr
[idx
& ~1], idx
& 1);
569 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
570 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
572 idx
= reg
->id
- KVM_REG_MIPS_FPR_64(0);
573 /* Can't access odd doubles in FR=0 mode */
574 if (idx
& 1 && !(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
576 v
= get_fpr64(&fpu
->fpr
[idx
], 0);
578 case KVM_REG_MIPS_FCR_IR
:
579 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
581 v
= boot_cpu_data
.fpu_id
;
583 case KVM_REG_MIPS_FCR_CSR
:
584 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
589 /* MIPS SIMD Architecture (MSA) registers */
590 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
591 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
593 /* Can't access MSA registers in FR=0 mode */
594 if (!(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
596 idx
= reg
->id
- KVM_REG_MIPS_VEC_128(0);
597 #ifdef CONFIG_CPU_LITTLE_ENDIAN
598 /* least significant byte first */
599 vs
[0] = get_fpr64(&fpu
->fpr
[idx
], 0);
600 vs
[1] = get_fpr64(&fpu
->fpr
[idx
], 1);
602 /* most significant byte first */
603 vs
[0] = get_fpr64(&fpu
->fpr
[idx
], 1);
604 vs
[1] = get_fpr64(&fpu
->fpr
[idx
], 0);
607 case KVM_REG_MIPS_MSA_IR
:
608 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
610 v
= boot_cpu_data
.msa_id
;
612 case KVM_REG_MIPS_MSA_CSR
:
613 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
618 /* Co-processor 0 registers */
619 case KVM_REG_MIPS_CP0_INDEX
:
620 v
= (long)kvm_read_c0_guest_index(cop0
);
622 case KVM_REG_MIPS_CP0_CONTEXT
:
623 v
= (long)kvm_read_c0_guest_context(cop0
);
625 case KVM_REG_MIPS_CP0_USERLOCAL
:
626 v
= (long)kvm_read_c0_guest_userlocal(cop0
);
628 case KVM_REG_MIPS_CP0_PAGEMASK
:
629 v
= (long)kvm_read_c0_guest_pagemask(cop0
);
631 case KVM_REG_MIPS_CP0_WIRED
:
632 v
= (long)kvm_read_c0_guest_wired(cop0
);
634 case KVM_REG_MIPS_CP0_HWRENA
:
635 v
= (long)kvm_read_c0_guest_hwrena(cop0
);
637 case KVM_REG_MIPS_CP0_BADVADDR
:
638 v
= (long)kvm_read_c0_guest_badvaddr(cop0
);
640 case KVM_REG_MIPS_CP0_ENTRYHI
:
641 v
= (long)kvm_read_c0_guest_entryhi(cop0
);
643 case KVM_REG_MIPS_CP0_COMPARE
:
644 v
= (long)kvm_read_c0_guest_compare(cop0
);
646 case KVM_REG_MIPS_CP0_STATUS
:
647 v
= (long)kvm_read_c0_guest_status(cop0
);
649 case KVM_REG_MIPS_CP0_CAUSE
:
650 v
= (long)kvm_read_c0_guest_cause(cop0
);
652 case KVM_REG_MIPS_CP0_EPC
:
653 v
= (long)kvm_read_c0_guest_epc(cop0
);
655 case KVM_REG_MIPS_CP0_PRID
:
656 v
= (long)kvm_read_c0_guest_prid(cop0
);
658 case KVM_REG_MIPS_CP0_CONFIG
:
659 v
= (long)kvm_read_c0_guest_config(cop0
);
661 case KVM_REG_MIPS_CP0_CONFIG1
:
662 v
= (long)kvm_read_c0_guest_config1(cop0
);
664 case KVM_REG_MIPS_CP0_CONFIG2
:
665 v
= (long)kvm_read_c0_guest_config2(cop0
);
667 case KVM_REG_MIPS_CP0_CONFIG3
:
668 v
= (long)kvm_read_c0_guest_config3(cop0
);
670 case KVM_REG_MIPS_CP0_CONFIG4
:
671 v
= (long)kvm_read_c0_guest_config4(cop0
);
673 case KVM_REG_MIPS_CP0_CONFIG5
:
674 v
= (long)kvm_read_c0_guest_config5(cop0
);
676 case KVM_REG_MIPS_CP0_CONFIG7
:
677 v
= (long)kvm_read_c0_guest_config7(cop0
);
679 case KVM_REG_MIPS_CP0_ERROREPC
:
680 v
= (long)kvm_read_c0_guest_errorepc(cop0
);
682 /* registers to be handled specially */
683 case KVM_REG_MIPS_CP0_COUNT
:
684 case KVM_REG_MIPS_COUNT_CTL
:
685 case KVM_REG_MIPS_COUNT_RESUME
:
686 case KVM_REG_MIPS_COUNT_HZ
:
687 ret
= kvm_mips_callbacks
->get_one_reg(vcpu
, reg
, &v
);
694 if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
) {
695 u64 __user
*uaddr64
= (u64 __user
*)(long)reg
->addr
;
697 return put_user(v
, uaddr64
);
698 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U32
) {
699 u32 __user
*uaddr32
= (u32 __user
*)(long)reg
->addr
;
702 return put_user(v32
, uaddr32
);
703 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U128
) {
704 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
706 return copy_to_user(uaddr
, vs
, 16) ? -EFAULT
: 0;
712 static int kvm_mips_set_reg(struct kvm_vcpu
*vcpu
,
713 const struct kvm_one_reg
*reg
)
715 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
716 struct mips_fpu_struct
*fpu
= &vcpu
->arch
.fpu
;
721 if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
) {
722 u64 __user
*uaddr64
= (u64 __user
*)(long)reg
->addr
;
724 if (get_user(v
, uaddr64
) != 0)
726 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U32
) {
727 u32 __user
*uaddr32
= (u32 __user
*)(long)reg
->addr
;
730 if (get_user(v32
, uaddr32
) != 0)
733 } else if ((reg
->id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U128
) {
734 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
736 return copy_from_user(vs
, uaddr
, 16) ? -EFAULT
: 0;
742 /* General purpose registers */
743 case KVM_REG_MIPS_R0
:
744 /* Silently ignore requests to set $0 */
746 case KVM_REG_MIPS_R1
... KVM_REG_MIPS_R31
:
747 vcpu
->arch
.gprs
[reg
->id
- KVM_REG_MIPS_R0
] = v
;
749 case KVM_REG_MIPS_HI
:
752 case KVM_REG_MIPS_LO
:
755 case KVM_REG_MIPS_PC
:
759 /* Floating point registers */
760 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
761 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
763 idx
= reg
->id
- KVM_REG_MIPS_FPR_32(0);
764 /* Odd singles in top of even double when FR=0 */
765 if (kvm_read_c0_guest_status(cop0
) & ST0_FR
)
766 set_fpr32(&fpu
->fpr
[idx
], 0, v
);
768 set_fpr32(&fpu
->fpr
[idx
& ~1], idx
& 1, v
);
770 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
771 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
773 idx
= reg
->id
- KVM_REG_MIPS_FPR_64(0);
774 /* Can't access odd doubles in FR=0 mode */
775 if (idx
& 1 && !(kvm_read_c0_guest_status(cop0
) & ST0_FR
))
777 set_fpr64(&fpu
->fpr
[idx
], 0, v
);
779 case KVM_REG_MIPS_FCR_IR
:
780 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
784 case KVM_REG_MIPS_FCR_CSR
:
785 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
790 /* MIPS SIMD Architecture (MSA) registers */
791 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
792 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
794 idx
= reg
->id
- KVM_REG_MIPS_VEC_128(0);
795 #ifdef CONFIG_CPU_LITTLE_ENDIAN
796 /* least significant byte first */
797 set_fpr64(&fpu
->fpr
[idx
], 0, vs
[0]);
798 set_fpr64(&fpu
->fpr
[idx
], 1, vs
[1]);
800 /* most significant byte first */
801 set_fpr64(&fpu
->fpr
[idx
], 1, vs
[0]);
802 set_fpr64(&fpu
->fpr
[idx
], 0, vs
[1]);
805 case KVM_REG_MIPS_MSA_IR
:
806 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
810 case KVM_REG_MIPS_MSA_CSR
:
811 if (!kvm_mips_guest_has_msa(&vcpu
->arch
))
816 /* Co-processor 0 registers */
817 case KVM_REG_MIPS_CP0_INDEX
:
818 kvm_write_c0_guest_index(cop0
, v
);
820 case KVM_REG_MIPS_CP0_CONTEXT
:
821 kvm_write_c0_guest_context(cop0
, v
);
823 case KVM_REG_MIPS_CP0_USERLOCAL
:
824 kvm_write_c0_guest_userlocal(cop0
, v
);
826 case KVM_REG_MIPS_CP0_PAGEMASK
:
827 kvm_write_c0_guest_pagemask(cop0
, v
);
829 case KVM_REG_MIPS_CP0_WIRED
:
830 kvm_write_c0_guest_wired(cop0
, v
);
832 case KVM_REG_MIPS_CP0_HWRENA
:
833 kvm_write_c0_guest_hwrena(cop0
, v
);
835 case KVM_REG_MIPS_CP0_BADVADDR
:
836 kvm_write_c0_guest_badvaddr(cop0
, v
);
838 case KVM_REG_MIPS_CP0_ENTRYHI
:
839 kvm_write_c0_guest_entryhi(cop0
, v
);
841 case KVM_REG_MIPS_CP0_STATUS
:
842 kvm_write_c0_guest_status(cop0
, v
);
844 case KVM_REG_MIPS_CP0_EPC
:
845 kvm_write_c0_guest_epc(cop0
, v
);
847 case KVM_REG_MIPS_CP0_PRID
:
848 kvm_write_c0_guest_prid(cop0
, v
);
850 case KVM_REG_MIPS_CP0_ERROREPC
:
851 kvm_write_c0_guest_errorepc(cop0
, v
);
853 /* registers to be handled specially */
854 case KVM_REG_MIPS_CP0_COUNT
:
855 case KVM_REG_MIPS_CP0_COMPARE
:
856 case KVM_REG_MIPS_CP0_CAUSE
:
857 case KVM_REG_MIPS_CP0_CONFIG
:
858 case KVM_REG_MIPS_CP0_CONFIG1
:
859 case KVM_REG_MIPS_CP0_CONFIG2
:
860 case KVM_REG_MIPS_CP0_CONFIG3
:
861 case KVM_REG_MIPS_CP0_CONFIG4
:
862 case KVM_REG_MIPS_CP0_CONFIG5
:
863 case KVM_REG_MIPS_COUNT_CTL
:
864 case KVM_REG_MIPS_COUNT_RESUME
:
865 case KVM_REG_MIPS_COUNT_HZ
:
866 return kvm_mips_callbacks
->set_one_reg(vcpu
, reg
, v
);
873 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
874 struct kvm_enable_cap
*cap
)
878 if (!kvm_vm_ioctl_check_extension(vcpu
->kvm
, cap
->cap
))
886 case KVM_CAP_MIPS_FPU
:
887 vcpu
->arch
.fpu_enabled
= true;
889 case KVM_CAP_MIPS_MSA
:
890 vcpu
->arch
.msa_enabled
= true;
900 long kvm_arch_vcpu_ioctl(struct file
*filp
, unsigned int ioctl
,
903 struct kvm_vcpu
*vcpu
= filp
->private_data
;
904 void __user
*argp
= (void __user
*)arg
;
908 case KVM_SET_ONE_REG
:
909 case KVM_GET_ONE_REG
: {
910 struct kvm_one_reg reg
;
912 if (copy_from_user(®
, argp
, sizeof(reg
)))
914 if (ioctl
== KVM_SET_ONE_REG
)
915 return kvm_mips_set_reg(vcpu
, ®
);
917 return kvm_mips_get_reg(vcpu
, ®
);
919 case KVM_GET_REG_LIST
: {
920 struct kvm_reg_list __user
*user_list
= argp
;
921 u64 __user
*reg_dest
;
922 struct kvm_reg_list reg_list
;
925 if (copy_from_user(®_list
, user_list
, sizeof(reg_list
)))
928 reg_list
.n
= ARRAY_SIZE(kvm_mips_get_one_regs
);
929 if (copy_to_user(user_list
, ®_list
, sizeof(reg_list
)))
933 reg_dest
= user_list
->reg
;
934 if (copy_to_user(reg_dest
, kvm_mips_get_one_regs
,
935 sizeof(kvm_mips_get_one_regs
)))
940 /* Treat the NMI as a CPU reset */
941 r
= kvm_mips_reset_vcpu(vcpu
);
945 struct kvm_mips_interrupt irq
;
948 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
951 kvm_debug("[%d] %s: irq: %d\n", vcpu
->vcpu_id
, __func__
,
954 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
957 case KVM_ENABLE_CAP
: {
958 struct kvm_enable_cap cap
;
961 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
963 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
974 /* Get (and clear) the dirty memory log for a memory slot. */
975 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
977 struct kvm_memslots
*slots
;
978 struct kvm_memory_slot
*memslot
;
979 unsigned long ga
, ga_end
;
984 mutex_lock(&kvm
->slots_lock
);
986 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
990 /* If nothing is dirty, don't bother messing with page tables. */
992 slots
= kvm_memslots(kvm
);
993 memslot
= id_to_memslot(slots
, log
->slot
);
995 ga
= memslot
->base_gfn
<< PAGE_SHIFT
;
996 ga_end
= ga
+ (memslot
->npages
<< PAGE_SHIFT
);
998 kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__
, ga
,
1001 n
= kvm_dirty_bitmap_bytes(memslot
);
1002 memset(memslot
->dirty_bitmap
, 0, n
);
1007 mutex_unlock(&kvm
->slots_lock
);
1012 long kvm_arch_vm_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
1024 int kvm_arch_init(void *opaque
)
1026 if (kvm_mips_callbacks
) {
1027 kvm_err("kvm: module already exists\n");
1031 return kvm_mips_emulation_init(&kvm_mips_callbacks
);
1034 void kvm_arch_exit(void)
1036 kvm_mips_callbacks
= NULL
;
1039 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1040 struct kvm_sregs
*sregs
)
1042 return -ENOIOCTLCMD
;
1045 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1046 struct kvm_sregs
*sregs
)
1048 return -ENOIOCTLCMD
;
1051 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1055 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1057 return -ENOIOCTLCMD
;
1060 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1062 return -ENOIOCTLCMD
;
1065 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1067 return VM_FAULT_SIGBUS
;
1070 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
1075 case KVM_CAP_ONE_REG
:
1076 case KVM_CAP_ENABLE_CAP
:
1079 case KVM_CAP_COALESCED_MMIO
:
1080 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
1082 case KVM_CAP_MIPS_FPU
:
1083 /* We don't handle systems with inconsistent cpu_has_fpu */
1084 r
= !!raw_cpu_has_fpu
;
1086 case KVM_CAP_MIPS_MSA
:
1088 * We don't support MSA vector partitioning yet:
1089 * 1) It would require explicit support which can't be tested
1090 * yet due to lack of support in current hardware.
1091 * 2) It extends the state that would need to be saved/restored
1092 * by e.g. QEMU for migration.
1094 * When vector partitioning hardware becomes available, support
1095 * could be added by requiring a flag when enabling
1096 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1097 * to save/restore the appropriate extra state.
1099 r
= cpu_has_msa
&& !(boot_cpu_data
.msa_id
& MSA_IR_WRPF
);
1108 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
1110 return kvm_mips_pending_timer(vcpu
);
1113 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu
*vcpu
)
1116 struct mips_coproc
*cop0
;
1121 kvm_debug("VCPU Register Dump:\n");
1122 kvm_debug("\tpc = 0x%08lx\n", vcpu
->arch
.pc
);
1123 kvm_debug("\texceptions: %08lx\n", vcpu
->arch
.pending_exceptions
);
1125 for (i
= 0; i
< 32; i
+= 4) {
1126 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i
,
1128 vcpu
->arch
.gprs
[i
+ 1],
1129 vcpu
->arch
.gprs
[i
+ 2], vcpu
->arch
.gprs
[i
+ 3]);
1131 kvm_debug("\thi: 0x%08lx\n", vcpu
->arch
.hi
);
1132 kvm_debug("\tlo: 0x%08lx\n", vcpu
->arch
.lo
);
1134 cop0
= vcpu
->arch
.cop0
;
1135 kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
1136 kvm_read_c0_guest_status(cop0
),
1137 kvm_read_c0_guest_cause(cop0
));
1139 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0
));
1144 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1148 for (i
= 1; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
1149 vcpu
->arch
.gprs
[i
] = regs
->gpr
[i
];
1150 vcpu
->arch
.gprs
[0] = 0; /* zero is special, and cannot be set. */
1151 vcpu
->arch
.hi
= regs
->hi
;
1152 vcpu
->arch
.lo
= regs
->lo
;
1153 vcpu
->arch
.pc
= regs
->pc
;
1158 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1162 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.gprs
); i
++)
1163 regs
->gpr
[i
] = vcpu
->arch
.gprs
[i
];
1165 regs
->hi
= vcpu
->arch
.hi
;
1166 regs
->lo
= vcpu
->arch
.lo
;
1167 regs
->pc
= vcpu
->arch
.pc
;
1172 static void kvm_mips_comparecount_func(unsigned long data
)
1174 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
1176 kvm_mips_callbacks
->queue_timer_int(vcpu
);
1178 vcpu
->arch
.wait
= 0;
1179 if (swait_active(&vcpu
->wq
))
1180 swake_up(&vcpu
->wq
);
1183 /* low level hrtimer wake routine */
1184 static enum hrtimer_restart
kvm_mips_comparecount_wakeup(struct hrtimer
*timer
)
1186 struct kvm_vcpu
*vcpu
;
1188 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.comparecount_timer
);
1189 kvm_mips_comparecount_func((unsigned long) vcpu
);
1190 return kvm_mips_count_timeout(vcpu
);
1193 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1195 kvm_mips_callbacks
->vcpu_init(vcpu
);
1196 hrtimer_init(&vcpu
->arch
.comparecount_timer
, CLOCK_MONOTONIC
,
1198 vcpu
->arch
.comparecount_timer
.function
= kvm_mips_comparecount_wakeup
;
1202 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1203 struct kvm_translation
*tr
)
1208 /* Initial guest state */
1209 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1211 return kvm_mips_callbacks
->vcpu_setup(vcpu
);
1214 static void kvm_mips_set_c0_status(void)
1216 uint32_t status
= read_c0_status();
1221 write_c0_status(status
);
1226 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1228 int kvm_mips_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1230 uint32_t cause
= vcpu
->arch
.host_cp0_cause
;
1231 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
1232 uint32_t __user
*opc
= (uint32_t __user
*) vcpu
->arch
.pc
;
1233 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
1234 enum emulation_result er
= EMULATE_DONE
;
1235 int ret
= RESUME_GUEST
;
1237 /* re-enable HTW before enabling interrupts */
1240 /* Set a default exit reason */
1241 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1242 run
->ready_for_interrupt_injection
= 1;
1245 * Set the appropriate status bits based on host CPU features,
1246 * before we hit the scheduler
1248 kvm_mips_set_c0_status();
1252 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1253 cause
, opc
, run
, vcpu
);
1256 * Do a privilege check, if in UM most of these exit conditions end up
1257 * causing an exception to be delivered to the Guest Kernel
1259 er
= kvm_mips_check_privilege(cause
, opc
, run
, vcpu
);
1260 if (er
== EMULATE_PRIV_FAIL
) {
1262 } else if (er
== EMULATE_FAIL
) {
1263 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1270 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu
->vcpu_id
, opc
);
1272 ++vcpu
->stat
.int_exits
;
1273 trace_kvm_exit(vcpu
, INT_EXITS
);
1282 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc
);
1284 ++vcpu
->stat
.cop_unusable_exits
;
1285 trace_kvm_exit(vcpu
, COP_UNUSABLE_EXITS
);
1286 ret
= kvm_mips_callbacks
->handle_cop_unusable(vcpu
);
1287 /* XXXKYMA: Might need to return to user space */
1288 if (run
->exit_reason
== KVM_EXIT_IRQ_WINDOW_OPEN
)
1293 ++vcpu
->stat
.tlbmod_exits
;
1294 trace_kvm_exit(vcpu
, TLBMOD_EXITS
);
1295 ret
= kvm_mips_callbacks
->handle_tlb_mod(vcpu
);
1299 kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1300 cause
, kvm_read_c0_guest_status(vcpu
->arch
.cop0
), opc
,
1303 ++vcpu
->stat
.tlbmiss_st_exits
;
1304 trace_kvm_exit(vcpu
, TLBMISS_ST_EXITS
);
1305 ret
= kvm_mips_callbacks
->handle_tlb_st_miss(vcpu
);
1309 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1310 cause
, opc
, badvaddr
);
1312 ++vcpu
->stat
.tlbmiss_ld_exits
;
1313 trace_kvm_exit(vcpu
, TLBMISS_LD_EXITS
);
1314 ret
= kvm_mips_callbacks
->handle_tlb_ld_miss(vcpu
);
1318 ++vcpu
->stat
.addrerr_st_exits
;
1319 trace_kvm_exit(vcpu
, ADDRERR_ST_EXITS
);
1320 ret
= kvm_mips_callbacks
->handle_addr_err_st(vcpu
);
1324 ++vcpu
->stat
.addrerr_ld_exits
;
1325 trace_kvm_exit(vcpu
, ADDRERR_LD_EXITS
);
1326 ret
= kvm_mips_callbacks
->handle_addr_err_ld(vcpu
);
1330 ++vcpu
->stat
.syscall_exits
;
1331 trace_kvm_exit(vcpu
, SYSCALL_EXITS
);
1332 ret
= kvm_mips_callbacks
->handle_syscall(vcpu
);
1336 ++vcpu
->stat
.resvd_inst_exits
;
1337 trace_kvm_exit(vcpu
, RESVD_INST_EXITS
);
1338 ret
= kvm_mips_callbacks
->handle_res_inst(vcpu
);
1342 ++vcpu
->stat
.break_inst_exits
;
1343 trace_kvm_exit(vcpu
, BREAK_INST_EXITS
);
1344 ret
= kvm_mips_callbacks
->handle_break(vcpu
);
1348 ++vcpu
->stat
.trap_inst_exits
;
1349 trace_kvm_exit(vcpu
, TRAP_INST_EXITS
);
1350 ret
= kvm_mips_callbacks
->handle_trap(vcpu
);
1353 case EXCCODE_MSAFPE
:
1354 ++vcpu
->stat
.msa_fpe_exits
;
1355 trace_kvm_exit(vcpu
, MSA_FPE_EXITS
);
1356 ret
= kvm_mips_callbacks
->handle_msa_fpe(vcpu
);
1360 ++vcpu
->stat
.fpe_exits
;
1361 trace_kvm_exit(vcpu
, FPE_EXITS
);
1362 ret
= kvm_mips_callbacks
->handle_fpe(vcpu
);
1365 case EXCCODE_MSADIS
:
1366 ++vcpu
->stat
.msa_disabled_exits
;
1367 trace_kvm_exit(vcpu
, MSA_DISABLED_EXITS
);
1368 ret
= kvm_mips_callbacks
->handle_msa_disabled(vcpu
);
1372 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1373 exccode
, opc
, kvm_get_inst(opc
, vcpu
), badvaddr
,
1374 kvm_read_c0_guest_status(vcpu
->arch
.cop0
));
1375 kvm_arch_vcpu_dump_regs(vcpu
);
1376 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1383 local_irq_disable();
1385 if (er
== EMULATE_DONE
&& !(ret
& RESUME_HOST
))
1386 kvm_mips_deliver_interrupts(vcpu
, cause
);
1388 if (!(ret
& RESUME_HOST
)) {
1389 /* Only check for signals if not already exiting to userspace */
1390 if (signal_pending(current
)) {
1391 run
->exit_reason
= KVM_EXIT_INTR
;
1392 ret
= (-EINTR
<< 2) | RESUME_HOST
;
1393 ++vcpu
->stat
.signal_exits
;
1394 trace_kvm_exit(vcpu
, SIGNAL_EXITS
);
1398 if (ret
== RESUME_GUEST
) {
1400 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1401 * is live), restore FCR31 / MSACSR.
1403 * This should be before returning to the guest exception
1404 * vector, as it may well cause an [MSA] FP exception if there
1405 * are pending exception bits unmasked. (see
1406 * kvm_mips_csr_die_notifier() for how that is handled).
1408 if (kvm_mips_guest_has_fpu(&vcpu
->arch
) &&
1409 read_c0_status() & ST0_CU1
)
1410 __kvm_restore_fcsr(&vcpu
->arch
);
1412 if (kvm_mips_guest_has_msa(&vcpu
->arch
) &&
1413 read_c0_config5() & MIPS_CONF5_MSAEN
)
1414 __kvm_restore_msacsr(&vcpu
->arch
);
1417 /* Disable HTW before returning to guest or host */
1423 /* Enable FPU for guest and restore context */
1424 void kvm_own_fpu(struct kvm_vcpu
*vcpu
)
1426 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1427 unsigned int sr
, cfg5
;
1431 sr
= kvm_read_c0_guest_status(cop0
);
1434 * If MSA state is already live, it is undefined how it interacts with
1435 * FR=0 FPU state, and we don't want to hit reserved instruction
1436 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1437 * play it safe and save it first.
1439 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1440 * get called when guest CU1 is set, however we can't trust the guest
1441 * not to clobber the status register directly via the commpage.
1443 if (cpu_has_msa
&& sr
& ST0_CU1
&& !(sr
& ST0_FR
) &&
1444 vcpu
->arch
.fpu_inuse
& KVM_MIPS_FPU_MSA
)
1448 * Enable FPU for guest
1449 * We set FR and FRE according to guest context
1451 change_c0_status(ST0_CU1
| ST0_FR
, sr
);
1453 cfg5
= kvm_read_c0_guest_config5(cop0
);
1454 change_c0_config5(MIPS_CONF5_FRE
, cfg5
);
1456 enable_fpu_hazard();
1458 /* If guest FPU state not active, restore it now */
1459 if (!(vcpu
->arch
.fpu_inuse
& KVM_MIPS_FPU_FPU
)) {
1460 __kvm_restore_fpu(&vcpu
->arch
);
1461 vcpu
->arch
.fpu_inuse
|= KVM_MIPS_FPU_FPU
;
1467 #ifdef CONFIG_CPU_HAS_MSA
1468 /* Enable MSA for guest and restore context */
1469 void kvm_own_msa(struct kvm_vcpu
*vcpu
)
1471 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1472 unsigned int sr
, cfg5
;
1477 * Enable FPU if enabled in guest, since we're restoring FPU context
1478 * anyway. We set FR and FRE according to guest context.
1480 if (kvm_mips_guest_has_fpu(&vcpu
->arch
)) {
1481 sr
= kvm_read_c0_guest_status(cop0
);
1484 * If FR=0 FPU state is already live, it is undefined how it
1485 * interacts with MSA state, so play it safe and save it first.
1487 if (!(sr
& ST0_FR
) &&
1488 (vcpu
->arch
.fpu_inuse
& (KVM_MIPS_FPU_FPU
|
1489 KVM_MIPS_FPU_MSA
)) == KVM_MIPS_FPU_FPU
)
1492 change_c0_status(ST0_CU1
| ST0_FR
, sr
);
1493 if (sr
& ST0_CU1
&& cpu_has_fre
) {
1494 cfg5
= kvm_read_c0_guest_config5(cop0
);
1495 change_c0_config5(MIPS_CONF5_FRE
, cfg5
);
1499 /* Enable MSA for guest */
1500 set_c0_config5(MIPS_CONF5_MSAEN
);
1501 enable_fpu_hazard();
1503 switch (vcpu
->arch
.fpu_inuse
& (KVM_MIPS_FPU_FPU
| KVM_MIPS_FPU_MSA
)) {
1504 case KVM_MIPS_FPU_FPU
:
1506 * Guest FPU state already loaded, only restore upper MSA state
1508 __kvm_restore_msa_upper(&vcpu
->arch
);
1509 vcpu
->arch
.fpu_inuse
|= KVM_MIPS_FPU_MSA
;
1512 /* Neither FPU or MSA already active, restore full MSA state */
1513 __kvm_restore_msa(&vcpu
->arch
);
1514 vcpu
->arch
.fpu_inuse
|= KVM_MIPS_FPU_MSA
;
1515 if (kvm_mips_guest_has_fpu(&vcpu
->arch
))
1516 vcpu
->arch
.fpu_inuse
|= KVM_MIPS_FPU_FPU
;
1526 /* Drop FPU & MSA without saving it */
1527 void kvm_drop_fpu(struct kvm_vcpu
*vcpu
)
1530 if (cpu_has_msa
&& vcpu
->arch
.fpu_inuse
& KVM_MIPS_FPU_MSA
) {
1532 vcpu
->arch
.fpu_inuse
&= ~KVM_MIPS_FPU_MSA
;
1534 if (vcpu
->arch
.fpu_inuse
& KVM_MIPS_FPU_FPU
) {
1535 clear_c0_status(ST0_CU1
| ST0_FR
);
1536 vcpu
->arch
.fpu_inuse
&= ~KVM_MIPS_FPU_FPU
;
1541 /* Save and disable FPU & MSA */
1542 void kvm_lose_fpu(struct kvm_vcpu
*vcpu
)
1545 * FPU & MSA get disabled in root context (hardware) when it is disabled
1546 * in guest context (software), but the register state in the hardware
1547 * may still be in use. This is why we explicitly re-enable the hardware
1552 if (cpu_has_msa
&& vcpu
->arch
.fpu_inuse
& KVM_MIPS_FPU_MSA
) {
1553 set_c0_config5(MIPS_CONF5_MSAEN
);
1554 enable_fpu_hazard();
1556 __kvm_save_msa(&vcpu
->arch
);
1558 /* Disable MSA & FPU */
1560 if (vcpu
->arch
.fpu_inuse
& KVM_MIPS_FPU_FPU
) {
1561 clear_c0_status(ST0_CU1
| ST0_FR
);
1562 disable_fpu_hazard();
1564 vcpu
->arch
.fpu_inuse
&= ~(KVM_MIPS_FPU_FPU
| KVM_MIPS_FPU_MSA
);
1565 } else if (vcpu
->arch
.fpu_inuse
& KVM_MIPS_FPU_FPU
) {
1566 set_c0_status(ST0_CU1
);
1567 enable_fpu_hazard();
1569 __kvm_save_fpu(&vcpu
->arch
);
1570 vcpu
->arch
.fpu_inuse
&= ~KVM_MIPS_FPU_FPU
;
1573 clear_c0_status(ST0_CU1
| ST0_FR
);
1574 disable_fpu_hazard();
1580 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1581 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1582 * exception if cause bits are set in the value being written.
1584 static int kvm_mips_csr_die_notify(struct notifier_block
*self
,
1585 unsigned long cmd
, void *ptr
)
1587 struct die_args
*args
= (struct die_args
*)ptr
;
1588 struct pt_regs
*regs
= args
->regs
;
1591 /* Only interested in FPE and MSAFPE */
1592 if (cmd
!= DIE_FP
&& cmd
!= DIE_MSAFP
)
1595 /* Return immediately if guest context isn't active */
1596 if (!(current
->flags
& PF_VCPU
))
1599 /* Should never get here from user mode */
1600 BUG_ON(user_mode(regs
));
1602 pc
= instruction_pointer(regs
);
1605 /* match 2nd instruction in __kvm_restore_fcsr */
1606 if (pc
!= (unsigned long)&__kvm_restore_fcsr
+ 4)
1610 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1612 pc
< (unsigned long)&__kvm_restore_msacsr
+ 4 ||
1613 pc
> (unsigned long)&__kvm_restore_msacsr
+ 8)
1618 /* Move PC forward a little and continue executing */
1619 instruction_pointer(regs
) += 4;
1624 static struct notifier_block kvm_mips_csr_die_notifier
= {
1625 .notifier_call
= kvm_mips_csr_die_notify
,
1628 static int __init
kvm_mips_init(void)
1632 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1637 register_die_notifier(&kvm_mips_csr_die_notifier
);
1640 * On MIPS, kernel modules are executed from "mapped space", which
1641 * requires TLBs. The TLB handling code is statically linked with
1642 * the rest of the kernel (tlb.c) to avoid the possibility of
1643 * double faulting. The issue is that the TLB code references
1644 * routines that are part of the the KVM module, which are only
1645 * available once the module is loaded.
1647 kvm_mips_gfn_to_pfn
= gfn_to_pfn
;
1648 kvm_mips_release_pfn_clean
= kvm_release_pfn_clean
;
1649 kvm_mips_is_error_pfn
= is_error_pfn
;
1654 static void __exit
kvm_mips_exit(void)
1658 kvm_mips_gfn_to_pfn
= NULL
;
1659 kvm_mips_release_pfn_clean
= NULL
;
1660 kvm_mips_is_error_pfn
= NULL
;
1662 unregister_die_notifier(&kvm_mips_csr_die_notifier
);
1665 module_init(kvm_mips_init
);
1666 module_exit(kvm_mips_exit
);
1668 EXPORT_TRACEPOINT_SYMBOL(kvm_exit
);