MIPS: KVM: Emulate MSA bits in COP0 interface
[deliverable/linux.git] / arch / mips / kvm / mips.c
CommitLineData
669e846e
SL
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
d116e812 10 */
669e846e
SL
11
12#include <linux/errno.h>
13#include <linux/err.h>
98e91b84 14#include <linux/kdebug.h>
669e846e
SL
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
f798217d 19#include <asm/fpu.h>
669e846e
SL
20#include <asm/page.h>
21#include <asm/cacheflush.h>
22#include <asm/mmu_context.h>
c4c6f2ca 23#include <asm/pgtable.h>
669e846e
SL
24
25#include <linux/kvm_host.h>
26
d7d5b05f
DCZ
27#include "interrupt.h"
28#include "commpage.h"
669e846e
SL
29
30#define CREATE_TRACE_POINTS
31#include "trace.h"
32
33#ifndef VECTORSPACING
34#define VECTORSPACING 0x100 /* for EI/VI mode */
35#endif
36
d116e812 37#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
669e846e 38struct kvm_stats_debugfs_item debugfs_entries[] = {
d116e812
DCZ
39 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
40 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
41 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
42 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
43 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
44 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
45 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
46 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
47 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
48 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
49 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
50 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
51 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
0a560427 52 { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
1c0cd66a 53 { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
d116e812 54 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
f7819512 55 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
d116e812 56 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
669e846e
SL
57 {NULL}
58};
59
60static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
61{
62 int i;
d116e812 63
669e846e
SL
64 for_each_possible_cpu(i) {
65 vcpu->arch.guest_kernel_asid[i] = 0;
66 vcpu->arch.guest_user_asid[i] = 0;
67 }
d116e812 68
669e846e
SL
69 return 0;
70}
71
d116e812
DCZ
72/*
73 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
74 * Config7, so we are "runnable" if interrupts are pending
669e846e
SL
75 */
76int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
77{
78 return !!(vcpu->arch.pending_exceptions);
79}
80
81int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
82{
83 return 1;
84}
85
13a34e06 86int kvm_arch_hardware_enable(void)
669e846e
SL
87{
88 return 0;
89}
90
669e846e
SL
91int kvm_arch_hardware_setup(void)
92{
93 return 0;
94}
95
669e846e
SL
96void kvm_arch_check_processor_compat(void *rtn)
97{
d98403a5 98 *(int *)rtn = 0;
669e846e
SL
99}
100
101static void kvm_mips_init_tlbs(struct kvm *kvm)
102{
103 unsigned long wired;
104
d116e812
DCZ
105 /*
106 * Add a wired entry to the TLB, it is used to map the commpage to
107 * the Guest kernel
108 */
669e846e
SL
109 wired = read_c0_wired();
110 write_c0_wired(wired + 1);
111 mtc0_tlbw_hazard();
112 kvm->arch.commpage_tlb = wired;
113
114 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
115 kvm->arch.commpage_tlb);
116}
117
118static void kvm_mips_init_vm_percpu(void *arg)
119{
120 struct kvm *kvm = (struct kvm *)arg;
121
122 kvm_mips_init_tlbs(kvm);
123 kvm_mips_callbacks->vm_init(kvm);
124
125}
126
127int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
128{
129 if (atomic_inc_return(&kvm_mips_instance) == 1) {
6e95bfd2
JH
130 kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
131 __func__);
669e846e
SL
132 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
133 }
134
669e846e
SL
135 return 0;
136}
137
138void kvm_mips_free_vcpus(struct kvm *kvm)
139{
140 unsigned int i;
141 struct kvm_vcpu *vcpu;
142
143 /* Put the pages we reserved for the guest pmap */
144 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
145 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
146 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
147 }
c6c0a663 148 kfree(kvm->arch.guest_pmap);
669e846e
SL
149
150 kvm_for_each_vcpu(i, vcpu, kvm) {
151 kvm_arch_vcpu_free(vcpu);
152 }
153
154 mutex_lock(&kvm->lock);
155
156 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
157 kvm->vcpus[i] = NULL;
158
159 atomic_set(&kvm->online_vcpus, 0);
160
161 mutex_unlock(&kvm->lock);
162}
163
669e846e
SL
164static void kvm_mips_uninit_tlbs(void *arg)
165{
166 /* Restore wired count */
167 write_c0_wired(0);
168 mtc0_tlbw_hazard();
169 /* Clear out all the TLBs */
170 kvm_local_flush_tlb_all();
171}
172
173void kvm_arch_destroy_vm(struct kvm *kvm)
174{
175 kvm_mips_free_vcpus(kvm);
176
177 /* If this is the last instance, restore wired count */
178 if (atomic_dec_return(&kvm_mips_instance) == 0) {
6e95bfd2
JH
179 kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
180 __func__);
669e846e
SL
181 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
182 }
183}
184
d116e812
DCZ
185long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
186 unsigned long arg)
669e846e 187{
ed829857 188 return -ENOIOCTLCMD;
669e846e
SL
189}
190
5587027c
AK
191int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
192 unsigned long npages)
669e846e
SL
193{
194 return 0;
195}
196
197int kvm_arch_prepare_memory_region(struct kvm *kvm,
d116e812
DCZ
198 struct kvm_memory_slot *memslot,
199 struct kvm_userspace_memory_region *mem,
200 enum kvm_mr_change change)
669e846e
SL
201{
202 return 0;
203}
204
205void kvm_arch_commit_memory_region(struct kvm *kvm,
d116e812
DCZ
206 struct kvm_userspace_memory_region *mem,
207 const struct kvm_memory_slot *old,
208 enum kvm_mr_change change)
669e846e
SL
209{
210 unsigned long npages = 0;
d98403a5 211 int i;
669e846e
SL
212
213 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
214 __func__, kvm, mem->slot, mem->guest_phys_addr,
215 mem->memory_size, mem->userspace_addr);
216
217 /* Setup Guest PMAP table */
218 if (!kvm->arch.guest_pmap) {
219 if (mem->slot == 0)
220 npages = mem->memory_size >> PAGE_SHIFT;
221
222 if (npages) {
223 kvm->arch.guest_pmap_npages = npages;
224 kvm->arch.guest_pmap =
225 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
226
227 if (!kvm->arch.guest_pmap) {
228 kvm_err("Failed to allocate guest PMAP");
d98403a5 229 return;
669e846e
SL
230 }
231
6e95bfd2
JH
232 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
233 npages, kvm->arch.guest_pmap);
669e846e
SL
234
235 /* Now setup the page table */
d116e812 236 for (i = 0; i < npages; i++)
669e846e 237 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
669e846e
SL
238 }
239 }
669e846e
SL
240}
241
669e846e
SL
242struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
243{
669e846e
SL
244 int err, size, offset;
245 void *gebase;
246 int i;
247
248 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
249
250 if (!vcpu) {
251 err = -ENOMEM;
252 goto out;
253 }
254
255 err = kvm_vcpu_init(vcpu, kvm, id);
256
257 if (err)
258 goto out_free_cpu;
259
6e95bfd2 260 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
669e846e 261
d116e812
DCZ
262 /*
263 * Allocate space for host mode exception handlers that handle
669e846e
SL
264 * guest mode exits
265 */
d116e812 266 if (cpu_has_veic || cpu_has_vint)
669e846e 267 size = 0x200 + VECTORSPACING * 64;
d116e812 268 else
7006e2df 269 size = 0x4000;
669e846e
SL
270
271 /* Save Linux EBASE */
272 vcpu->arch.host_ebase = (void *)read_c0_ebase();
273
274 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
275
276 if (!gebase) {
277 err = -ENOMEM;
278 goto out_free_cpu;
279 }
6e95bfd2
JH
280 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
281 ALIGN(size, PAGE_SIZE), gebase);
669e846e
SL
282
283 /* Save new ebase */
284 vcpu->arch.guest_ebase = gebase;
285
286 /* Copy L1 Guest Exception handler to correct offset */
287
288 /* TLB Refill, EXL = 0 */
289 memcpy(gebase, mips32_exception,
290 mips32_exceptionEnd - mips32_exception);
291
292 /* General Exception Entry point */
293 memcpy(gebase + 0x180, mips32_exception,
294 mips32_exceptionEnd - mips32_exception);
295
296 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
297 for (i = 0; i < 8; i++) {
298 kvm_debug("L1 Vectored handler @ %p\n",
299 gebase + 0x200 + (i * VECTORSPACING));
300 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
301 mips32_exceptionEnd - mips32_exception);
302 }
303
304 /* General handler, relocate to unmapped space for sanity's sake */
305 offset = 0x2000;
6e95bfd2
JH
306 kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
307 gebase + offset,
308 mips32_GuestExceptionEnd - mips32_GuestException);
669e846e
SL
309
310 memcpy(gebase + offset, mips32_GuestException,
311 mips32_GuestExceptionEnd - mips32_GuestException);
312
313 /* Invalidate the icache for these ranges */
facaaec1
JH
314 local_flush_icache_range((unsigned long)gebase,
315 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
669e846e 316
d116e812
DCZ
317 /*
318 * Allocate comm page for guest kernel, a TLB will be reserved for
319 * mapping GVA @ 0xFFFF8000 to this page
320 */
669e846e
SL
321 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
322
323 if (!vcpu->arch.kseg0_commpage) {
324 err = -ENOMEM;
325 goto out_free_gebase;
326 }
327
6e95bfd2 328 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
669e846e
SL
329 kvm_mips_commpage_init(vcpu);
330
331 /* Init */
332 vcpu->arch.last_sched_cpu = -1;
333
334 /* Start off the timer */
e30492bb 335 kvm_mips_init_count(vcpu);
669e846e
SL
336
337 return vcpu;
338
339out_free_gebase:
340 kfree(gebase);
341
342out_free_cpu:
343 kfree(vcpu);
344
345out:
346 return ERR_PTR(err);
347}
348
349void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
350{
351 hrtimer_cancel(&vcpu->arch.comparecount_timer);
352
353 kvm_vcpu_uninit(vcpu);
354
355 kvm_mips_dump_stats(vcpu);
356
c6c0a663
JH
357 kfree(vcpu->arch.guest_ebase);
358 kfree(vcpu->arch.kseg0_commpage);
8c9eb041 359 kfree(vcpu);
669e846e
SL
360}
361
362void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
363{
364 kvm_arch_vcpu_free(vcpu);
365}
366
d116e812
DCZ
367int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
368 struct kvm_guest_debug *dbg)
669e846e 369{
ed829857 370 return -ENOIOCTLCMD;
669e846e
SL
371}
372
373int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
374{
375 int r = 0;
376 sigset_t sigsaved;
377
378 if (vcpu->sigset_active)
379 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
380
381 if (vcpu->mmio_needed) {
382 if (!vcpu->mmio_is_write)
383 kvm_mips_complete_mmio_load(vcpu, run);
384 vcpu->mmio_needed = 0;
385 }
386
f798217d
JH
387 lose_fpu(1);
388
044f0f03 389 local_irq_disable();
669e846e
SL
390 /* Check if we have any exceptions/interrupts pending */
391 kvm_mips_deliver_interrupts(vcpu,
392 kvm_read_c0_guest_cause(vcpu->arch.cop0));
393
669e846e
SL
394 kvm_guest_enter();
395
c4c6f2ca
JH
396 /* Disable hardware page table walking while in guest */
397 htw_stop();
398
669e846e
SL
399 r = __kvm_mips_vcpu_run(run, vcpu);
400
c4c6f2ca
JH
401 /* Re-enable HTW before enabling interrupts */
402 htw_start();
403
669e846e
SL
404 kvm_guest_exit();
405 local_irq_enable();
406
407 if (vcpu->sigset_active)
408 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
409
410 return r;
411}
412
d116e812
DCZ
413int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
414 struct kvm_mips_interrupt *irq)
669e846e
SL
415{
416 int intr = (int)irq->irq;
417 struct kvm_vcpu *dvcpu = NULL;
418
419 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
420 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
421 (int)intr);
422
423 if (irq->cpu == -1)
424 dvcpu = vcpu;
425 else
426 dvcpu = vcpu->kvm->vcpus[irq->cpu];
427
428 if (intr == 2 || intr == 3 || intr == 4) {
429 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
430
431 } else if (intr == -2 || intr == -3 || intr == -4) {
432 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
433 } else {
434 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
435 irq->cpu, irq->irq);
436 return -EINVAL;
437 }
438
439 dvcpu->arch.wait = 0;
440
d116e812 441 if (waitqueue_active(&dvcpu->wq))
669e846e 442 wake_up_interruptible(&dvcpu->wq);
669e846e
SL
443
444 return 0;
445}
446
d116e812
DCZ
447int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
448 struct kvm_mp_state *mp_state)
669e846e 449{
ed829857 450 return -ENOIOCTLCMD;
669e846e
SL
451}
452
d116e812
DCZ
453int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
454 struct kvm_mp_state *mp_state)
669e846e 455{
ed829857 456 return -ENOIOCTLCMD;
669e846e
SL
457}
458
4c73fb2b
DD
459static u64 kvm_mips_get_one_regs[] = {
460 KVM_REG_MIPS_R0,
461 KVM_REG_MIPS_R1,
462 KVM_REG_MIPS_R2,
463 KVM_REG_MIPS_R3,
464 KVM_REG_MIPS_R4,
465 KVM_REG_MIPS_R5,
466 KVM_REG_MIPS_R6,
467 KVM_REG_MIPS_R7,
468 KVM_REG_MIPS_R8,
469 KVM_REG_MIPS_R9,
470 KVM_REG_MIPS_R10,
471 KVM_REG_MIPS_R11,
472 KVM_REG_MIPS_R12,
473 KVM_REG_MIPS_R13,
474 KVM_REG_MIPS_R14,
475 KVM_REG_MIPS_R15,
476 KVM_REG_MIPS_R16,
477 KVM_REG_MIPS_R17,
478 KVM_REG_MIPS_R18,
479 KVM_REG_MIPS_R19,
480 KVM_REG_MIPS_R20,
481 KVM_REG_MIPS_R21,
482 KVM_REG_MIPS_R22,
483 KVM_REG_MIPS_R23,
484 KVM_REG_MIPS_R24,
485 KVM_REG_MIPS_R25,
486 KVM_REG_MIPS_R26,
487 KVM_REG_MIPS_R27,
488 KVM_REG_MIPS_R28,
489 KVM_REG_MIPS_R29,
490 KVM_REG_MIPS_R30,
491 KVM_REG_MIPS_R31,
492
493 KVM_REG_MIPS_HI,
494 KVM_REG_MIPS_LO,
495 KVM_REG_MIPS_PC,
496
497 KVM_REG_MIPS_CP0_INDEX,
498 KVM_REG_MIPS_CP0_CONTEXT,
7767b7d2 499 KVM_REG_MIPS_CP0_USERLOCAL,
4c73fb2b
DD
500 KVM_REG_MIPS_CP0_PAGEMASK,
501 KVM_REG_MIPS_CP0_WIRED,
16fd5c1d 502 KVM_REG_MIPS_CP0_HWRENA,
4c73fb2b 503 KVM_REG_MIPS_CP0_BADVADDR,
f8be02da 504 KVM_REG_MIPS_CP0_COUNT,
4c73fb2b 505 KVM_REG_MIPS_CP0_ENTRYHI,
f8be02da 506 KVM_REG_MIPS_CP0_COMPARE,
4c73fb2b
DD
507 KVM_REG_MIPS_CP0_STATUS,
508 KVM_REG_MIPS_CP0_CAUSE,
fb6df0cd 509 KVM_REG_MIPS_CP0_EPC,
1068eaaf 510 KVM_REG_MIPS_CP0_PRID,
4c73fb2b
DD
511 KVM_REG_MIPS_CP0_CONFIG,
512 KVM_REG_MIPS_CP0_CONFIG1,
513 KVM_REG_MIPS_CP0_CONFIG2,
514 KVM_REG_MIPS_CP0_CONFIG3,
c771607a
JH
515 KVM_REG_MIPS_CP0_CONFIG4,
516 KVM_REG_MIPS_CP0_CONFIG5,
4c73fb2b 517 KVM_REG_MIPS_CP0_CONFIG7,
f8239342
JH
518 KVM_REG_MIPS_CP0_ERROREPC,
519
520 KVM_REG_MIPS_COUNT_CTL,
521 KVM_REG_MIPS_COUNT_RESUME,
f74a8e22 522 KVM_REG_MIPS_COUNT_HZ,
4c73fb2b
DD
523};
524
525static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
526 const struct kvm_one_reg *reg)
527{
4c73fb2b 528 struct mips_coproc *cop0 = vcpu->arch.cop0;
379245cd 529 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
f8be02da 530 int ret;
4c73fb2b 531 s64 v;
379245cd 532 unsigned int idx;
4c73fb2b
DD
533
534 switch (reg->id) {
379245cd 535 /* General purpose registers */
4c73fb2b
DD
536 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
537 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
538 break;
539 case KVM_REG_MIPS_HI:
540 v = (long)vcpu->arch.hi;
541 break;
542 case KVM_REG_MIPS_LO:
543 v = (long)vcpu->arch.lo;
544 break;
545 case KVM_REG_MIPS_PC:
546 v = (long)vcpu->arch.pc;
547 break;
548
379245cd
JH
549 /* Floating point registers */
550 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
551 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
552 return -EINVAL;
553 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
554 /* Odd singles in top of even double when FR=0 */
555 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
556 v = get_fpr32(&fpu->fpr[idx], 0);
557 else
558 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
559 break;
560 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
561 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
562 return -EINVAL;
563 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
564 /* Can't access odd doubles in FR=0 mode */
565 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
566 return -EINVAL;
567 v = get_fpr64(&fpu->fpr[idx], 0);
568 break;
569 case KVM_REG_MIPS_FCR_IR:
570 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
571 return -EINVAL;
572 v = boot_cpu_data.fpu_id;
573 break;
574 case KVM_REG_MIPS_FCR_CSR:
575 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
576 return -EINVAL;
577 v = fpu->fcr31;
578 break;
579
580 /* Co-processor 0 registers */
4c73fb2b
DD
581 case KVM_REG_MIPS_CP0_INDEX:
582 v = (long)kvm_read_c0_guest_index(cop0);
583 break;
584 case KVM_REG_MIPS_CP0_CONTEXT:
585 v = (long)kvm_read_c0_guest_context(cop0);
586 break;
7767b7d2
JH
587 case KVM_REG_MIPS_CP0_USERLOCAL:
588 v = (long)kvm_read_c0_guest_userlocal(cop0);
589 break;
4c73fb2b
DD
590 case KVM_REG_MIPS_CP0_PAGEMASK:
591 v = (long)kvm_read_c0_guest_pagemask(cop0);
592 break;
593 case KVM_REG_MIPS_CP0_WIRED:
594 v = (long)kvm_read_c0_guest_wired(cop0);
595 break;
16fd5c1d
JH
596 case KVM_REG_MIPS_CP0_HWRENA:
597 v = (long)kvm_read_c0_guest_hwrena(cop0);
598 break;
4c73fb2b
DD
599 case KVM_REG_MIPS_CP0_BADVADDR:
600 v = (long)kvm_read_c0_guest_badvaddr(cop0);
601 break;
602 case KVM_REG_MIPS_CP0_ENTRYHI:
603 v = (long)kvm_read_c0_guest_entryhi(cop0);
604 break;
f8be02da
JH
605 case KVM_REG_MIPS_CP0_COMPARE:
606 v = (long)kvm_read_c0_guest_compare(cop0);
607 break;
4c73fb2b
DD
608 case KVM_REG_MIPS_CP0_STATUS:
609 v = (long)kvm_read_c0_guest_status(cop0);
610 break;
611 case KVM_REG_MIPS_CP0_CAUSE:
612 v = (long)kvm_read_c0_guest_cause(cop0);
613 break;
fb6df0cd
JH
614 case KVM_REG_MIPS_CP0_EPC:
615 v = (long)kvm_read_c0_guest_epc(cop0);
616 break;
1068eaaf
JH
617 case KVM_REG_MIPS_CP0_PRID:
618 v = (long)kvm_read_c0_guest_prid(cop0);
619 break;
4c73fb2b
DD
620 case KVM_REG_MIPS_CP0_CONFIG:
621 v = (long)kvm_read_c0_guest_config(cop0);
622 break;
623 case KVM_REG_MIPS_CP0_CONFIG1:
624 v = (long)kvm_read_c0_guest_config1(cop0);
625 break;
626 case KVM_REG_MIPS_CP0_CONFIG2:
627 v = (long)kvm_read_c0_guest_config2(cop0);
628 break;
629 case KVM_REG_MIPS_CP0_CONFIG3:
630 v = (long)kvm_read_c0_guest_config3(cop0);
631 break;
c771607a
JH
632 case KVM_REG_MIPS_CP0_CONFIG4:
633 v = (long)kvm_read_c0_guest_config4(cop0);
634 break;
635 case KVM_REG_MIPS_CP0_CONFIG5:
636 v = (long)kvm_read_c0_guest_config5(cop0);
637 break;
4c73fb2b
DD
638 case KVM_REG_MIPS_CP0_CONFIG7:
639 v = (long)kvm_read_c0_guest_config7(cop0);
640 break;
e93d4c15
JH
641 case KVM_REG_MIPS_CP0_ERROREPC:
642 v = (long)kvm_read_c0_guest_errorepc(cop0);
643 break;
f8be02da
JH
644 /* registers to be handled specially */
645 case KVM_REG_MIPS_CP0_COUNT:
f8239342
JH
646 case KVM_REG_MIPS_COUNT_CTL:
647 case KVM_REG_MIPS_COUNT_RESUME:
f74a8e22 648 case KVM_REG_MIPS_COUNT_HZ:
f8be02da
JH
649 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
650 if (ret)
651 return ret;
652 break;
4c73fb2b
DD
653 default:
654 return -EINVAL;
655 }
681865d4
DD
656 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
657 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
d116e812 658
681865d4
DD
659 return put_user(v, uaddr64);
660 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
661 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
662 u32 v32 = (u32)v;
d116e812 663
681865d4
DD
664 return put_user(v32, uaddr32);
665 } else {
666 return -EINVAL;
667 }
4c73fb2b
DD
668}
669
670static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
671 const struct kvm_one_reg *reg)
672{
4c73fb2b 673 struct mips_coproc *cop0 = vcpu->arch.cop0;
379245cd
JH
674 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
675 s64 v;
676 unsigned int idx;
4c73fb2b 677
681865d4
DD
678 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
679 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
680
681 if (get_user(v, uaddr64) != 0)
682 return -EFAULT;
683 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
684 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
685 s32 v32;
686
687 if (get_user(v32, uaddr32) != 0)
688 return -EFAULT;
689 v = (s64)v32;
690 } else {
691 return -EINVAL;
692 }
4c73fb2b
DD
693
694 switch (reg->id) {
379245cd 695 /* General purpose registers */
4c73fb2b
DD
696 case KVM_REG_MIPS_R0:
697 /* Silently ignore requests to set $0 */
698 break;
699 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
700 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
701 break;
702 case KVM_REG_MIPS_HI:
703 vcpu->arch.hi = v;
704 break;
705 case KVM_REG_MIPS_LO:
706 vcpu->arch.lo = v;
707 break;
708 case KVM_REG_MIPS_PC:
709 vcpu->arch.pc = v;
710 break;
711
379245cd
JH
712 /* Floating point registers */
713 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
714 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
715 return -EINVAL;
716 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
717 /* Odd singles in top of even double when FR=0 */
718 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
719 set_fpr32(&fpu->fpr[idx], 0, v);
720 else
721 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
722 break;
723 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
724 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
725 return -EINVAL;
726 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
727 /* Can't access odd doubles in FR=0 mode */
728 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
729 return -EINVAL;
730 set_fpr64(&fpu->fpr[idx], 0, v);
731 break;
732 case KVM_REG_MIPS_FCR_IR:
733 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
734 return -EINVAL;
735 /* Read-only */
736 break;
737 case KVM_REG_MIPS_FCR_CSR:
738 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
739 return -EINVAL;
740 fpu->fcr31 = v;
741 break;
742
743 /* Co-processor 0 registers */
4c73fb2b
DD
744 case KVM_REG_MIPS_CP0_INDEX:
745 kvm_write_c0_guest_index(cop0, v);
746 break;
747 case KVM_REG_MIPS_CP0_CONTEXT:
748 kvm_write_c0_guest_context(cop0, v);
749 break;
7767b7d2
JH
750 case KVM_REG_MIPS_CP0_USERLOCAL:
751 kvm_write_c0_guest_userlocal(cop0, v);
752 break;
4c73fb2b
DD
753 case KVM_REG_MIPS_CP0_PAGEMASK:
754 kvm_write_c0_guest_pagemask(cop0, v);
755 break;
756 case KVM_REG_MIPS_CP0_WIRED:
757 kvm_write_c0_guest_wired(cop0, v);
758 break;
16fd5c1d
JH
759 case KVM_REG_MIPS_CP0_HWRENA:
760 kvm_write_c0_guest_hwrena(cop0, v);
761 break;
4c73fb2b
DD
762 case KVM_REG_MIPS_CP0_BADVADDR:
763 kvm_write_c0_guest_badvaddr(cop0, v);
764 break;
765 case KVM_REG_MIPS_CP0_ENTRYHI:
766 kvm_write_c0_guest_entryhi(cop0, v);
767 break;
768 case KVM_REG_MIPS_CP0_STATUS:
769 kvm_write_c0_guest_status(cop0, v);
770 break;
fb6df0cd
JH
771 case KVM_REG_MIPS_CP0_EPC:
772 kvm_write_c0_guest_epc(cop0, v);
773 break;
1068eaaf
JH
774 case KVM_REG_MIPS_CP0_PRID:
775 kvm_write_c0_guest_prid(cop0, v);
776 break;
4c73fb2b
DD
777 case KVM_REG_MIPS_CP0_ERROREPC:
778 kvm_write_c0_guest_errorepc(cop0, v);
779 break;
f8be02da
JH
780 /* registers to be handled specially */
781 case KVM_REG_MIPS_CP0_COUNT:
782 case KVM_REG_MIPS_CP0_COMPARE:
e30492bb 783 case KVM_REG_MIPS_CP0_CAUSE:
c771607a
JH
784 case KVM_REG_MIPS_CP0_CONFIG:
785 case KVM_REG_MIPS_CP0_CONFIG1:
786 case KVM_REG_MIPS_CP0_CONFIG2:
787 case KVM_REG_MIPS_CP0_CONFIG3:
788 case KVM_REG_MIPS_CP0_CONFIG4:
789 case KVM_REG_MIPS_CP0_CONFIG5:
f8239342
JH
790 case KVM_REG_MIPS_COUNT_CTL:
791 case KVM_REG_MIPS_COUNT_RESUME:
f74a8e22 792 case KVM_REG_MIPS_COUNT_HZ:
f8be02da 793 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
4c73fb2b
DD
794 default:
795 return -EINVAL;
796 }
797 return 0;
798}
799
5fafd874
JH
800static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
801 struct kvm_enable_cap *cap)
802{
803 int r = 0;
804
805 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
806 return -EINVAL;
807 if (cap->flags)
808 return -EINVAL;
809 if (cap->args[0])
810 return -EINVAL;
811
812 switch (cap->cap) {
813 case KVM_CAP_MIPS_FPU:
814 vcpu->arch.fpu_enabled = true;
815 break;
816 default:
817 r = -EINVAL;
818 break;
819 }
820
821 return r;
822}
823
d116e812
DCZ
824long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
825 unsigned long arg)
669e846e
SL
826{
827 struct kvm_vcpu *vcpu = filp->private_data;
828 void __user *argp = (void __user *)arg;
829 long r;
669e846e
SL
830
831 switch (ioctl) {
4c73fb2b
DD
832 case KVM_SET_ONE_REG:
833 case KVM_GET_ONE_REG: {
834 struct kvm_one_reg reg;
d116e812 835
4c73fb2b
DD
836 if (copy_from_user(&reg, argp, sizeof(reg)))
837 return -EFAULT;
838 if (ioctl == KVM_SET_ONE_REG)
839 return kvm_mips_set_reg(vcpu, &reg);
840 else
841 return kvm_mips_get_reg(vcpu, &reg);
842 }
843 case KVM_GET_REG_LIST: {
844 struct kvm_reg_list __user *user_list = argp;
845 u64 __user *reg_dest;
846 struct kvm_reg_list reg_list;
847 unsigned n;
848
849 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
850 return -EFAULT;
851 n = reg_list.n;
852 reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
853 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
854 return -EFAULT;
855 if (n < reg_list.n)
856 return -E2BIG;
857 reg_dest = user_list->reg;
858 if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
859 sizeof(kvm_mips_get_one_regs)))
860 return -EFAULT;
861 return 0;
862 }
669e846e
SL
863 case KVM_NMI:
864 /* Treat the NMI as a CPU reset */
865 r = kvm_mips_reset_vcpu(vcpu);
866 break;
867 case KVM_INTERRUPT:
868 {
869 struct kvm_mips_interrupt irq;
d116e812 870
669e846e
SL
871 r = -EFAULT;
872 if (copy_from_user(&irq, argp, sizeof(irq)))
873 goto out;
874
669e846e
SL
875 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
876 irq.irq);
877
878 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
879 break;
880 }
5fafd874
JH
881 case KVM_ENABLE_CAP: {
882 struct kvm_enable_cap cap;
883
884 r = -EFAULT;
885 if (copy_from_user(&cap, argp, sizeof(cap)))
886 goto out;
887 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
888 break;
889 }
669e846e 890 default:
4c73fb2b 891 r = -ENOIOCTLCMD;
669e846e
SL
892 }
893
894out:
895 return r;
896}
897
d116e812 898/* Get (and clear) the dirty memory log for a memory slot. */
669e846e
SL
899int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
900{
901 struct kvm_memory_slot *memslot;
902 unsigned long ga, ga_end;
903 int is_dirty = 0;
904 int r;
905 unsigned long n;
906
907 mutex_lock(&kvm->slots_lock);
908
909 r = kvm_get_dirty_log(kvm, log, &is_dirty);
910 if (r)
911 goto out;
912
913 /* If nothing is dirty, don't bother messing with page tables. */
914 if (is_dirty) {
915 memslot = &kvm->memslots->memslots[log->slot];
916
917 ga = memslot->base_gfn << PAGE_SHIFT;
918 ga_end = ga + (memslot->npages << PAGE_SHIFT);
919
6ad78a5c
DCZ
920 kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
921 ga_end);
669e846e
SL
922
923 n = kvm_dirty_bitmap_bytes(memslot);
924 memset(memslot->dirty_bitmap, 0, n);
925 }
926
927 r = 0;
928out:
929 mutex_unlock(&kvm->slots_lock);
930 return r;
931
932}
933
934long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
935{
936 long r;
937
938 switch (ioctl) {
939 default:
ed829857 940 r = -ENOIOCTLCMD;
669e846e
SL
941 }
942
943 return r;
944}
945
946int kvm_arch_init(void *opaque)
947{
669e846e
SL
948 if (kvm_mips_callbacks) {
949 kvm_err("kvm: module already exists\n");
950 return -EEXIST;
951 }
952
d98403a5 953 return kvm_mips_emulation_init(&kvm_mips_callbacks);
669e846e
SL
954}
955
956void kvm_arch_exit(void)
957{
958 kvm_mips_callbacks = NULL;
959}
960
d116e812
DCZ
961int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
962 struct kvm_sregs *sregs)
669e846e 963{
ed829857 964 return -ENOIOCTLCMD;
669e846e
SL
965}
966
d116e812
DCZ
967int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
968 struct kvm_sregs *sregs)
669e846e 969{
ed829857 970 return -ENOIOCTLCMD;
669e846e
SL
971}
972
31928aa5 973void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
669e846e 974{
669e846e
SL
975}
976
977int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
978{
ed829857 979 return -ENOIOCTLCMD;
669e846e
SL
980}
981
982int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
983{
ed829857 984 return -ENOIOCTLCMD;
669e846e
SL
985}
986
987int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
988{
989 return VM_FAULT_SIGBUS;
990}
991
784aa3d7 992int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
669e846e
SL
993{
994 int r;
995
996 switch (ext) {
4c73fb2b 997 case KVM_CAP_ONE_REG:
5fafd874 998 case KVM_CAP_ENABLE_CAP:
4c73fb2b
DD
999 r = 1;
1000 break;
669e846e
SL
1001 case KVM_CAP_COALESCED_MMIO:
1002 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1003 break;
5fafd874
JH
1004 case KVM_CAP_MIPS_FPU:
1005 r = !!cpu_has_fpu;
1006 break;
669e846e
SL
1007 default:
1008 r = 0;
1009 break;
1010 }
1011 return r;
669e846e
SL
1012}
1013
1014int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1015{
1016 return kvm_mips_pending_timer(vcpu);
1017}
1018
1019int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1020{
1021 int i;
1022 struct mips_coproc *cop0;
1023
1024 if (!vcpu)
1025 return -1;
1026
6ad78a5c
DCZ
1027 kvm_debug("VCPU Register Dump:\n");
1028 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1029 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
669e846e
SL
1030
1031 for (i = 0; i < 32; i += 4) {
6ad78a5c 1032 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
669e846e
SL
1033 vcpu->arch.gprs[i],
1034 vcpu->arch.gprs[i + 1],
1035 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1036 }
6ad78a5c
DCZ
1037 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1038 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
669e846e
SL
1039
1040 cop0 = vcpu->arch.cop0;
6ad78a5c
DCZ
1041 kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
1042 kvm_read_c0_guest_status(cop0),
1043 kvm_read_c0_guest_cause(cop0));
669e846e 1044
6ad78a5c 1045 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
669e846e
SL
1046
1047 return 0;
1048}
1049
1050int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1051{
1052 int i;
1053
8d17dd04 1054 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
bf32ebf6 1055 vcpu->arch.gprs[i] = regs->gpr[i];
8d17dd04 1056 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
669e846e
SL
1057 vcpu->arch.hi = regs->hi;
1058 vcpu->arch.lo = regs->lo;
1059 vcpu->arch.pc = regs->pc;
1060
4c73fb2b 1061 return 0;
669e846e
SL
1062}
1063
1064int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1065{
1066 int i;
1067
8d17dd04 1068 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
bf32ebf6 1069 regs->gpr[i] = vcpu->arch.gprs[i];
669e846e
SL
1070
1071 regs->hi = vcpu->arch.hi;
1072 regs->lo = vcpu->arch.lo;
1073 regs->pc = vcpu->arch.pc;
1074
4c73fb2b 1075 return 0;
669e846e
SL
1076}
1077
0fae34f4 1078static void kvm_mips_comparecount_func(unsigned long data)
669e846e
SL
1079{
1080 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1081
1082 kvm_mips_callbacks->queue_timer_int(vcpu);
1083
1084 vcpu->arch.wait = 0;
d116e812 1085 if (waitqueue_active(&vcpu->wq))
669e846e 1086 wake_up_interruptible(&vcpu->wq);
669e846e
SL
1087}
1088
d116e812 1089/* low level hrtimer wake routine */
0fae34f4 1090static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
669e846e
SL
1091{
1092 struct kvm_vcpu *vcpu;
1093
1094 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
1095 kvm_mips_comparecount_func((unsigned long) vcpu);
e30492bb 1096 return kvm_mips_count_timeout(vcpu);
669e846e
SL
1097}
1098
1099int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1100{
1101 kvm_mips_callbacks->vcpu_init(vcpu);
1102 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1103 HRTIMER_MODE_REL);
1104 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
669e846e
SL
1105 return 0;
1106}
1107
d116e812
DCZ
1108int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1109 struct kvm_translation *tr)
669e846e
SL
1110{
1111 return 0;
1112}
1113
1114/* Initial guest state */
1115int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1116{
1117 return kvm_mips_callbacks->vcpu_setup(vcpu);
1118}
1119
d116e812 1120static void kvm_mips_set_c0_status(void)
669e846e
SL
1121{
1122 uint32_t status = read_c0_status();
1123
669e846e
SL
1124 if (cpu_has_dsp)
1125 status |= (ST0_MX);
1126
1127 write_c0_status(status);
1128 ehb();
1129}
1130
1131/*
1132 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1133 */
1134int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1135{
1136 uint32_t cause = vcpu->arch.host_cp0_cause;
1137 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1138 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
1139 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1140 enum emulation_result er = EMULATE_DONE;
1141 int ret = RESUME_GUEST;
1142
c4c6f2ca
JH
1143 /* re-enable HTW before enabling interrupts */
1144 htw_start();
1145
669e846e
SL
1146 /* Set a default exit reason */
1147 run->exit_reason = KVM_EXIT_UNKNOWN;
1148 run->ready_for_interrupt_injection = 1;
1149
d116e812
DCZ
1150 /*
1151 * Set the appropriate status bits based on host CPU features,
1152 * before we hit the scheduler
1153 */
669e846e
SL
1154 kvm_mips_set_c0_status();
1155
1156 local_irq_enable();
1157
1158 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1159 cause, opc, run, vcpu);
1160
d116e812
DCZ
1161 /*
1162 * Do a privilege check, if in UM most of these exit conditions end up
669e846e
SL
1163 * causing an exception to be delivered to the Guest Kernel
1164 */
1165 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1166 if (er == EMULATE_PRIV_FAIL) {
1167 goto skip_emul;
1168 } else if (er == EMULATE_FAIL) {
1169 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1170 ret = RESUME_HOST;
1171 goto skip_emul;
1172 }
1173
1174 switch (exccode) {
1175 case T_INT:
1176 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
1177
1178 ++vcpu->stat.int_exits;
1179 trace_kvm_exit(vcpu, INT_EXITS);
1180
d116e812 1181 if (need_resched())
669e846e 1182 cond_resched();
669e846e
SL
1183
1184 ret = RESUME_GUEST;
1185 break;
1186
1187 case T_COP_UNUSABLE:
1188 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
1189
1190 ++vcpu->stat.cop_unusable_exits;
1191 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
1192 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1193 /* XXXKYMA: Might need to return to user space */
d116e812 1194 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
669e846e 1195 ret = RESUME_HOST;
669e846e
SL
1196 break;
1197
1198 case T_TLB_MOD:
1199 ++vcpu->stat.tlbmod_exits;
1200 trace_kvm_exit(vcpu, TLBMOD_EXITS);
1201 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1202 break;
1203
1204 case T_TLB_ST_MISS:
d116e812
DCZ
1205 kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1206 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1207 badvaddr);
669e846e
SL
1208
1209 ++vcpu->stat.tlbmiss_st_exits;
1210 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
1211 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1212 break;
1213
1214 case T_TLB_LD_MISS:
1215 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1216 cause, opc, badvaddr);
1217
1218 ++vcpu->stat.tlbmiss_ld_exits;
1219 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
1220 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1221 break;
1222
1223 case T_ADDR_ERR_ST:
1224 ++vcpu->stat.addrerr_st_exits;
1225 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
1226 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1227 break;
1228
1229 case T_ADDR_ERR_LD:
1230 ++vcpu->stat.addrerr_ld_exits;
1231 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
1232 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1233 break;
1234
1235 case T_SYSCALL:
1236 ++vcpu->stat.syscall_exits;
1237 trace_kvm_exit(vcpu, SYSCALL_EXITS);
1238 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1239 break;
1240
1241 case T_RES_INST:
1242 ++vcpu->stat.resvd_inst_exits;
1243 trace_kvm_exit(vcpu, RESVD_INST_EXITS);
1244 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1245 break;
1246
1247 case T_BREAK:
1248 ++vcpu->stat.break_inst_exits;
1249 trace_kvm_exit(vcpu, BREAK_INST_EXITS);
1250 ret = kvm_mips_callbacks->handle_break(vcpu);
1251 break;
1252
0a560427
JH
1253 case T_TRAP:
1254 ++vcpu->stat.trap_inst_exits;
1255 trace_kvm_exit(vcpu, TRAP_INST_EXITS);
1256 ret = kvm_mips_callbacks->handle_trap(vcpu);
1257 break;
1258
1c0cd66a
JH
1259 case T_FPE:
1260 ++vcpu->stat.fpe_exits;
1261 trace_kvm_exit(vcpu, FPE_EXITS);
1262 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1263 break;
1264
98119ad5
JH
1265 case T_MSADIS:
1266 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1267 break;
1268
669e846e 1269 default:
d116e812
DCZ
1270 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1271 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1272 kvm_read_c0_guest_status(vcpu->arch.cop0));
669e846e
SL
1273 kvm_arch_vcpu_dump_regs(vcpu);
1274 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1275 ret = RESUME_HOST;
1276 break;
1277
1278 }
1279
1280skip_emul:
1281 local_irq_disable();
1282
1283 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1284 kvm_mips_deliver_interrupts(vcpu, cause);
1285
1286 if (!(ret & RESUME_HOST)) {
d116e812 1287 /* Only check for signals if not already exiting to userspace */
669e846e
SL
1288 if (signal_pending(current)) {
1289 run->exit_reason = KVM_EXIT_INTR;
1290 ret = (-EINTR << 2) | RESUME_HOST;
1291 ++vcpu->stat.signal_exits;
1292 trace_kvm_exit(vcpu, SIGNAL_EXITS);
1293 }
1294 }
1295
98e91b84
JH
1296 if (ret == RESUME_GUEST) {
1297 /*
539cb89f
JH
1298 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1299 * is live), restore FCR31 / MSACSR.
98e91b84
JH
1300 *
1301 * This should be before returning to the guest exception
539cb89f
JH
1302 * vector, as it may well cause an [MSA] FP exception if there
1303 * are pending exception bits unmasked. (see
98e91b84
JH
1304 * kvm_mips_csr_die_notifier() for how that is handled).
1305 */
1306 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1307 read_c0_status() & ST0_CU1)
1308 __kvm_restore_fcsr(&vcpu->arch);
539cb89f
JH
1309
1310 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1311 read_c0_config5() & MIPS_CONF5_MSAEN)
1312 __kvm_restore_msacsr(&vcpu->arch);
98e91b84
JH
1313 }
1314
c4c6f2ca
JH
1315 /* Disable HTW before returning to guest or host */
1316 htw_stop();
1317
669e846e
SL
1318 return ret;
1319}
1320
98e91b84
JH
1321/* Enable FPU for guest and restore context */
1322void kvm_own_fpu(struct kvm_vcpu *vcpu)
1323{
1324 struct mips_coproc *cop0 = vcpu->arch.cop0;
1325 unsigned int sr, cfg5;
1326
1327 preempt_disable();
1328
539cb89f
JH
1329 sr = kvm_read_c0_guest_status(cop0);
1330
1331 /*
1332 * If MSA state is already live, it is undefined how it interacts with
1333 * FR=0 FPU state, and we don't want to hit reserved instruction
1334 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1335 * play it safe and save it first.
1336 *
1337 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1338 * get called when guest CU1 is set, however we can't trust the guest
1339 * not to clobber the status register directly via the commpage.
1340 */
1341 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1342 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1343 kvm_lose_fpu(vcpu);
1344
98e91b84
JH
1345 /*
1346 * Enable FPU for guest
1347 * We set FR and FRE according to guest context
1348 */
98e91b84
JH
1349 change_c0_status(ST0_CU1 | ST0_FR, sr);
1350 if (cpu_has_fre) {
1351 cfg5 = kvm_read_c0_guest_config5(cop0);
1352 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1353 }
1354 enable_fpu_hazard();
1355
1356 /* If guest FPU state not active, restore it now */
1357 if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
1358 __kvm_restore_fpu(&vcpu->arch);
1359 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
1360 }
1361
1362 preempt_enable();
1363}
1364
539cb89f
JH
1365#ifdef CONFIG_CPU_HAS_MSA
1366/* Enable MSA for guest and restore context */
1367void kvm_own_msa(struct kvm_vcpu *vcpu)
1368{
1369 struct mips_coproc *cop0 = vcpu->arch.cop0;
1370 unsigned int sr, cfg5;
1371
1372 preempt_disable();
1373
1374 /*
1375 * Enable FPU if enabled in guest, since we're restoring FPU context
1376 * anyway. We set FR and FRE according to guest context.
1377 */
1378 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1379 sr = kvm_read_c0_guest_status(cop0);
1380
1381 /*
1382 * If FR=0 FPU state is already live, it is undefined how it
1383 * interacts with MSA state, so play it safe and save it first.
1384 */
1385 if (!(sr & ST0_FR) &&
1386 (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
1387 KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
1388 kvm_lose_fpu(vcpu);
1389
1390 change_c0_status(ST0_CU1 | ST0_FR, sr);
1391 if (sr & ST0_CU1 && cpu_has_fre) {
1392 cfg5 = kvm_read_c0_guest_config5(cop0);
1393 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1394 }
1395 }
1396
1397 /* Enable MSA for guest */
1398 set_c0_config5(MIPS_CONF5_MSAEN);
1399 enable_fpu_hazard();
1400
1401 switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
1402 case KVM_MIPS_FPU_FPU:
1403 /*
1404 * Guest FPU state already loaded, only restore upper MSA state
1405 */
1406 __kvm_restore_msa_upper(&vcpu->arch);
1407 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
1408 break;
1409 case 0:
1410 /* Neither FPU or MSA already active, restore full MSA state */
1411 __kvm_restore_msa(&vcpu->arch);
1412 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
1413 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1414 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
1415 break;
1416 default:
1417 break;
1418 }
1419
1420 preempt_enable();
1421}
1422#endif
1423
1424/* Drop FPU & MSA without saving it */
98e91b84
JH
1425void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1426{
1427 preempt_disable();
539cb89f
JH
1428 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
1429 disable_msa();
1430 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
1431 }
98e91b84
JH
1432 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1433 clear_c0_status(ST0_CU1 | ST0_FR);
1434 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
1435 }
1436 preempt_enable();
1437}
1438
539cb89f 1439/* Save and disable FPU & MSA */
98e91b84
JH
1440void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1441{
1442 /*
539cb89f
JH
1443 * FPU & MSA get disabled in root context (hardware) when it is disabled
1444 * in guest context (software), but the register state in the hardware
1445 * may still be in use. This is why we explicitly re-enable the hardware
98e91b84
JH
1446 * before saving.
1447 */
1448
1449 preempt_disable();
539cb89f
JH
1450 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
1451 set_c0_config5(MIPS_CONF5_MSAEN);
1452 enable_fpu_hazard();
1453
1454 __kvm_save_msa(&vcpu->arch);
1455
1456 /* Disable MSA & FPU */
1457 disable_msa();
1458 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1459 clear_c0_status(ST0_CU1 | ST0_FR);
1460 vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
1461 } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
98e91b84
JH
1462 set_c0_status(ST0_CU1);
1463 enable_fpu_hazard();
1464
1465 __kvm_save_fpu(&vcpu->arch);
1466 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
1467
1468 /* Disable FPU */
1469 clear_c0_status(ST0_CU1 | ST0_FR);
1470 }
1471 preempt_enable();
1472}
1473
1474/*
539cb89f
JH
1475 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1476 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1477 * exception if cause bits are set in the value being written.
98e91b84
JH
1478 */
1479static int kvm_mips_csr_die_notify(struct notifier_block *self,
1480 unsigned long cmd, void *ptr)
1481{
1482 struct die_args *args = (struct die_args *)ptr;
1483 struct pt_regs *regs = args->regs;
1484 unsigned long pc;
1485
539cb89f
JH
1486 /* Only interested in FPE and MSAFPE */
1487 if (cmd != DIE_FP && cmd != DIE_MSAFP)
98e91b84
JH
1488 return NOTIFY_DONE;
1489
1490 /* Return immediately if guest context isn't active */
1491 if (!(current->flags & PF_VCPU))
1492 return NOTIFY_DONE;
1493
1494 /* Should never get here from user mode */
1495 BUG_ON(user_mode(regs));
1496
1497 pc = instruction_pointer(regs);
1498 switch (cmd) {
1499 case DIE_FP:
1500 /* match 2nd instruction in __kvm_restore_fcsr */
1501 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1502 return NOTIFY_DONE;
1503 break;
539cb89f
JH
1504 case DIE_MSAFP:
1505 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1506 if (!cpu_has_msa ||
1507 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1508 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1509 return NOTIFY_DONE;
1510 break;
98e91b84
JH
1511 }
1512
1513 /* Move PC forward a little and continue executing */
1514 instruction_pointer(regs) += 4;
1515
1516 return NOTIFY_STOP;
1517}
1518
1519static struct notifier_block kvm_mips_csr_die_notifier = {
1520 .notifier_call = kvm_mips_csr_die_notify,
1521};
1522
669e846e
SL
1523int __init kvm_mips_init(void)
1524{
1525 int ret;
1526
1527 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1528
1529 if (ret)
1530 return ret;
1531
98e91b84
JH
1532 register_die_notifier(&kvm_mips_csr_die_notifier);
1533
d116e812
DCZ
1534 /*
1535 * On MIPS, kernel modules are executed from "mapped space", which
1536 * requires TLBs. The TLB handling code is statically linked with
d7d5b05f 1537 * the rest of the kernel (tlb.c) to avoid the possibility of
d116e812
DCZ
1538 * double faulting. The issue is that the TLB code references
1539 * routines that are part of the the KVM module, which are only
1540 * available once the module is loaded.
669e846e
SL
1541 */
1542 kvm_mips_gfn_to_pfn = gfn_to_pfn;
1543 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
1544 kvm_mips_is_error_pfn = is_error_pfn;
1545
669e846e
SL
1546 return 0;
1547}
1548
1549void __exit kvm_mips_exit(void)
1550{
1551 kvm_exit();
1552
1553 kvm_mips_gfn_to_pfn = NULL;
1554 kvm_mips_release_pfn_clean = NULL;
1555 kvm_mips_is_error_pfn = NULL;
98e91b84
JH
1556
1557 unregister_die_notifier(&kvm_mips_csr_die_notifier);
669e846e
SL
1558}
1559
1560module_init(kvm_mips_init);
1561module_exit(kvm_mips_exit);
1562
1563EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
This page took 0.161203 seconds and 5 git commands to generate.