Merge tag 'kvms390-20140626' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390...
[deliverable/linux.git] / arch / mips / kvm / kvm_mips.c
CommitLineData
669e846e
SL
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16#include <linux/fs.h>
17#include <linux/bootmem.h>
18#include <asm/page.h>
19#include <asm/cacheflush.h>
20#include <asm/mmu_context.h>
21
22#include <linux/kvm_host.h>
23
24#include "kvm_mips_int.h"
25#include "kvm_mips_comm.h"
26
27#define CREATE_TRACE_POINTS
28#include "trace.h"
29
30#ifndef VECTORSPACING
31#define VECTORSPACING 0x100 /* for EI/VI mode */
32#endif
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "wait", VCPU_STAT(wait_exits) },
37 { "cache", VCPU_STAT(cache_exits) },
38 { "signal", VCPU_STAT(signal_exits) },
39 { "interrupt", VCPU_STAT(int_exits) },
40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
41 { "tlbmod", VCPU_STAT(tlbmod_exits) },
42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
44 { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
46 { "syscall", VCPU_STAT(syscall_exits) },
47 { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
48 { "break_inst", VCPU_STAT(break_inst_exits) },
49 { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
50 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
51 {NULL}
52};
53
54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55{
56 int i;
57 for_each_possible_cpu(i) {
58 vcpu->arch.guest_kernel_asid[i] = 0;
59 vcpu->arch.guest_user_asid[i] = 0;
60 }
61 return 0;
62}
63
669e846e
SL
64/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
65 * are "runnable" if interrupts are pending
66 */
67int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
68{
69 return !!(vcpu->arch.pending_exceptions);
70}
71
72int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
73{
74 return 1;
75}
76
77int kvm_arch_hardware_enable(void *garbage)
78{
79 return 0;
80}
81
82void kvm_arch_hardware_disable(void *garbage)
83{
84}
85
86int kvm_arch_hardware_setup(void)
87{
88 return 0;
89}
90
91void kvm_arch_hardware_unsetup(void)
92{
93}
94
95void kvm_arch_check_processor_compat(void *rtn)
96{
97 int *r = (int *)rtn;
98 *r = 0;
99 return;
100}
101
102static void kvm_mips_init_tlbs(struct kvm *kvm)
103{
104 unsigned long wired;
105
106 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
107 wired = read_c0_wired();
108 write_c0_wired(wired + 1);
109 mtc0_tlbw_hazard();
110 kvm->arch.commpage_tlb = wired;
111
112 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
113 kvm->arch.commpage_tlb);
114}
115
116static void kvm_mips_init_vm_percpu(void *arg)
117{
118 struct kvm *kvm = (struct kvm *)arg;
119
120 kvm_mips_init_tlbs(kvm);
121 kvm_mips_callbacks->vm_init(kvm);
122
123}
124
125int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
126{
127 if (atomic_inc_return(&kvm_mips_instance) == 1) {
6e95bfd2
JH
128 kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
129 __func__);
669e846e
SL
130 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
131 }
132
133
134 return 0;
135}
136
137void kvm_mips_free_vcpus(struct kvm *kvm)
138{
139 unsigned int i;
140 struct kvm_vcpu *vcpu;
141
142 /* Put the pages we reserved for the guest pmap */
143 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
144 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
145 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
146 }
c6c0a663 147 kfree(kvm->arch.guest_pmap);
669e846e
SL
148
149 kvm_for_each_vcpu(i, vcpu, kvm) {
150 kvm_arch_vcpu_free(vcpu);
151 }
152
153 mutex_lock(&kvm->lock);
154
155 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
156 kvm->vcpus[i] = NULL;
157
158 atomic_set(&kvm->online_vcpus, 0);
159
160 mutex_unlock(&kvm->lock);
161}
162
163void kvm_arch_sync_events(struct kvm *kvm)
164{
165}
166
167static void kvm_mips_uninit_tlbs(void *arg)
168{
169 /* Restore wired count */
170 write_c0_wired(0);
171 mtc0_tlbw_hazard();
172 /* Clear out all the TLBs */
173 kvm_local_flush_tlb_all();
174}
175
176void kvm_arch_destroy_vm(struct kvm *kvm)
177{
178 kvm_mips_free_vcpus(kvm);
179
180 /* If this is the last instance, restore wired count */
181 if (atomic_dec_return(&kvm_mips_instance) == 0) {
6e95bfd2
JH
182 kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
183 __func__);
669e846e
SL
184 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
185 }
186}
187
188long
189kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
190{
ed829857 191 return -ENOIOCTLCMD;
669e846e
SL
192}
193
5587027c 194void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
669e846e
SL
195 struct kvm_memory_slot *dont)
196{
197}
198
5587027c
AK
199int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
200 unsigned long npages)
669e846e
SL
201{
202 return 0;
203}
204
e59dbe09
TY
205void kvm_arch_memslots_updated(struct kvm *kvm)
206{
207}
208
669e846e 209int kvm_arch_prepare_memory_region(struct kvm *kvm,
daf799cc
LT
210 struct kvm_memory_slot *memslot,
211 struct kvm_userspace_memory_region *mem,
212 enum kvm_mr_change change)
669e846e
SL
213{
214 return 0;
215}
216
217void kvm_arch_commit_memory_region(struct kvm *kvm,
daf799cc
LT
218 struct kvm_userspace_memory_region *mem,
219 const struct kvm_memory_slot *old,
220 enum kvm_mr_change change)
669e846e
SL
221{
222 unsigned long npages = 0;
223 int i, err = 0;
224
225 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
226 __func__, kvm, mem->slot, mem->guest_phys_addr,
227 mem->memory_size, mem->userspace_addr);
228
229 /* Setup Guest PMAP table */
230 if (!kvm->arch.guest_pmap) {
231 if (mem->slot == 0)
232 npages = mem->memory_size >> PAGE_SHIFT;
233
234 if (npages) {
235 kvm->arch.guest_pmap_npages = npages;
236 kvm->arch.guest_pmap =
237 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
238
239 if (!kvm->arch.guest_pmap) {
240 kvm_err("Failed to allocate guest PMAP");
241 err = -ENOMEM;
242 goto out;
243 }
244
6e95bfd2
JH
245 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
246 npages, kvm->arch.guest_pmap);
669e846e
SL
247
248 /* Now setup the page table */
249 for (i = 0; i < npages; i++) {
250 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
251 }
252 }
253 }
254out:
255 return;
256}
257
258void kvm_arch_flush_shadow_all(struct kvm *kvm)
259{
260}
261
262void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
263 struct kvm_memory_slot *slot)
264{
265}
266
267void kvm_arch_flush_shadow(struct kvm *kvm)
268{
269}
270
271struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
272{
273 extern char mips32_exception[], mips32_exceptionEnd[];
274 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
275 int err, size, offset;
276 void *gebase;
277 int i;
278
279 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
280
281 if (!vcpu) {
282 err = -ENOMEM;
283 goto out;
284 }
285
286 err = kvm_vcpu_init(vcpu, kvm, id);
287
288 if (err)
289 goto out_free_cpu;
290
6e95bfd2 291 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
669e846e
SL
292
293 /* Allocate space for host mode exception handlers that handle
294 * guest mode exits
295 */
296 if (cpu_has_veic || cpu_has_vint) {
297 size = 0x200 + VECTORSPACING * 64;
298 } else {
7006e2df 299 size = 0x4000;
669e846e
SL
300 }
301
302 /* Save Linux EBASE */
303 vcpu->arch.host_ebase = (void *)read_c0_ebase();
304
305 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
306
307 if (!gebase) {
308 err = -ENOMEM;
309 goto out_free_cpu;
310 }
6e95bfd2
JH
311 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
312 ALIGN(size, PAGE_SIZE), gebase);
669e846e
SL
313
314 /* Save new ebase */
315 vcpu->arch.guest_ebase = gebase;
316
317 /* Copy L1 Guest Exception handler to correct offset */
318
319 /* TLB Refill, EXL = 0 */
320 memcpy(gebase, mips32_exception,
321 mips32_exceptionEnd - mips32_exception);
322
323 /* General Exception Entry point */
324 memcpy(gebase + 0x180, mips32_exception,
325 mips32_exceptionEnd - mips32_exception);
326
327 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
328 for (i = 0; i < 8; i++) {
329 kvm_debug("L1 Vectored handler @ %p\n",
330 gebase + 0x200 + (i * VECTORSPACING));
331 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
332 mips32_exceptionEnd - mips32_exception);
333 }
334
335 /* General handler, relocate to unmapped space for sanity's sake */
336 offset = 0x2000;
6e95bfd2
JH
337 kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
338 gebase + offset,
339 mips32_GuestExceptionEnd - mips32_GuestException);
669e846e
SL
340
341 memcpy(gebase + offset, mips32_GuestException,
342 mips32_GuestExceptionEnd - mips32_GuestException);
343
344 /* Invalidate the icache for these ranges */
facaaec1
JH
345 local_flush_icache_range((unsigned long)gebase,
346 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
669e846e
SL
347
348 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
349 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
350
351 if (!vcpu->arch.kseg0_commpage) {
352 err = -ENOMEM;
353 goto out_free_gebase;
354 }
355
6e95bfd2 356 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
669e846e
SL
357 kvm_mips_commpage_init(vcpu);
358
359 /* Init */
360 vcpu->arch.last_sched_cpu = -1;
361
362 /* Start off the timer */
e30492bb 363 kvm_mips_init_count(vcpu);
669e846e
SL
364
365 return vcpu;
366
367out_free_gebase:
368 kfree(gebase);
369
370out_free_cpu:
371 kfree(vcpu);
372
373out:
374 return ERR_PTR(err);
375}
376
377void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
378{
379 hrtimer_cancel(&vcpu->arch.comparecount_timer);
380
381 kvm_vcpu_uninit(vcpu);
382
383 kvm_mips_dump_stats(vcpu);
384
c6c0a663
JH
385 kfree(vcpu->arch.guest_ebase);
386 kfree(vcpu->arch.kseg0_commpage);
669e846e
SL
387}
388
389void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
390{
391 kvm_arch_vcpu_free(vcpu);
392}
393
394int
395kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
396 struct kvm_guest_debug *dbg)
397{
ed829857 398 return -ENOIOCTLCMD;
669e846e
SL
399}
400
401int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
402{
403 int r = 0;
404 sigset_t sigsaved;
405
406 if (vcpu->sigset_active)
407 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
408
409 if (vcpu->mmio_needed) {
410 if (!vcpu->mmio_is_write)
411 kvm_mips_complete_mmio_load(vcpu, run);
412 vcpu->mmio_needed = 0;
413 }
414
044f0f03 415 local_irq_disable();
669e846e
SL
416 /* Check if we have any exceptions/interrupts pending */
417 kvm_mips_deliver_interrupts(vcpu,
418 kvm_read_c0_guest_cause(vcpu->arch.cop0));
419
669e846e
SL
420 kvm_guest_enter();
421
422 r = __kvm_mips_vcpu_run(run, vcpu);
423
424 kvm_guest_exit();
425 local_irq_enable();
426
427 if (vcpu->sigset_active)
428 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
429
430 return r;
431}
432
433int
434kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
435{
436 int intr = (int)irq->irq;
437 struct kvm_vcpu *dvcpu = NULL;
438
439 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
440 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
441 (int)intr);
442
443 if (irq->cpu == -1)
444 dvcpu = vcpu;
445 else
446 dvcpu = vcpu->kvm->vcpus[irq->cpu];
447
448 if (intr == 2 || intr == 3 || intr == 4) {
449 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
450
451 } else if (intr == -2 || intr == -3 || intr == -4) {
452 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
453 } else {
454 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
455 irq->cpu, irq->irq);
456 return -EINVAL;
457 }
458
459 dvcpu->arch.wait = 0;
460
461 if (waitqueue_active(&dvcpu->wq)) {
462 wake_up_interruptible(&dvcpu->wq);
463 }
464
465 return 0;
466}
467
468int
469kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
470 struct kvm_mp_state *mp_state)
471{
ed829857 472 return -ENOIOCTLCMD;
669e846e
SL
473}
474
475int
476kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
477 struct kvm_mp_state *mp_state)
478{
ed829857 479 return -ENOIOCTLCMD;
669e846e
SL
480}
481
4c73fb2b
DD
482static u64 kvm_mips_get_one_regs[] = {
483 KVM_REG_MIPS_R0,
484 KVM_REG_MIPS_R1,
485 KVM_REG_MIPS_R2,
486 KVM_REG_MIPS_R3,
487 KVM_REG_MIPS_R4,
488 KVM_REG_MIPS_R5,
489 KVM_REG_MIPS_R6,
490 KVM_REG_MIPS_R7,
491 KVM_REG_MIPS_R8,
492 KVM_REG_MIPS_R9,
493 KVM_REG_MIPS_R10,
494 KVM_REG_MIPS_R11,
495 KVM_REG_MIPS_R12,
496 KVM_REG_MIPS_R13,
497 KVM_REG_MIPS_R14,
498 KVM_REG_MIPS_R15,
499 KVM_REG_MIPS_R16,
500 KVM_REG_MIPS_R17,
501 KVM_REG_MIPS_R18,
502 KVM_REG_MIPS_R19,
503 KVM_REG_MIPS_R20,
504 KVM_REG_MIPS_R21,
505 KVM_REG_MIPS_R22,
506 KVM_REG_MIPS_R23,
507 KVM_REG_MIPS_R24,
508 KVM_REG_MIPS_R25,
509 KVM_REG_MIPS_R26,
510 KVM_REG_MIPS_R27,
511 KVM_REG_MIPS_R28,
512 KVM_REG_MIPS_R29,
513 KVM_REG_MIPS_R30,
514 KVM_REG_MIPS_R31,
515
516 KVM_REG_MIPS_HI,
517 KVM_REG_MIPS_LO,
518 KVM_REG_MIPS_PC,
519
520 KVM_REG_MIPS_CP0_INDEX,
521 KVM_REG_MIPS_CP0_CONTEXT,
7767b7d2 522 KVM_REG_MIPS_CP0_USERLOCAL,
4c73fb2b
DD
523 KVM_REG_MIPS_CP0_PAGEMASK,
524 KVM_REG_MIPS_CP0_WIRED,
16fd5c1d 525 KVM_REG_MIPS_CP0_HWRENA,
4c73fb2b 526 KVM_REG_MIPS_CP0_BADVADDR,
f8be02da 527 KVM_REG_MIPS_CP0_COUNT,
4c73fb2b 528 KVM_REG_MIPS_CP0_ENTRYHI,
f8be02da 529 KVM_REG_MIPS_CP0_COMPARE,
4c73fb2b
DD
530 KVM_REG_MIPS_CP0_STATUS,
531 KVM_REG_MIPS_CP0_CAUSE,
fb6df0cd 532 KVM_REG_MIPS_CP0_EPC,
4c73fb2b
DD
533 KVM_REG_MIPS_CP0_CONFIG,
534 KVM_REG_MIPS_CP0_CONFIG1,
535 KVM_REG_MIPS_CP0_CONFIG2,
536 KVM_REG_MIPS_CP0_CONFIG3,
537 KVM_REG_MIPS_CP0_CONFIG7,
f8239342
JH
538 KVM_REG_MIPS_CP0_ERROREPC,
539
540 KVM_REG_MIPS_COUNT_CTL,
541 KVM_REG_MIPS_COUNT_RESUME,
f74a8e22 542 KVM_REG_MIPS_COUNT_HZ,
4c73fb2b
DD
543};
544
545static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
546 const struct kvm_one_reg *reg)
547{
4c73fb2b 548 struct mips_coproc *cop0 = vcpu->arch.cop0;
f8be02da 549 int ret;
4c73fb2b
DD
550 s64 v;
551
552 switch (reg->id) {
553 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
554 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
555 break;
556 case KVM_REG_MIPS_HI:
557 v = (long)vcpu->arch.hi;
558 break;
559 case KVM_REG_MIPS_LO:
560 v = (long)vcpu->arch.lo;
561 break;
562 case KVM_REG_MIPS_PC:
563 v = (long)vcpu->arch.pc;
564 break;
565
566 case KVM_REG_MIPS_CP0_INDEX:
567 v = (long)kvm_read_c0_guest_index(cop0);
568 break;
569 case KVM_REG_MIPS_CP0_CONTEXT:
570 v = (long)kvm_read_c0_guest_context(cop0);
571 break;
7767b7d2
JH
572 case KVM_REG_MIPS_CP0_USERLOCAL:
573 v = (long)kvm_read_c0_guest_userlocal(cop0);
574 break;
4c73fb2b
DD
575 case KVM_REG_MIPS_CP0_PAGEMASK:
576 v = (long)kvm_read_c0_guest_pagemask(cop0);
577 break;
578 case KVM_REG_MIPS_CP0_WIRED:
579 v = (long)kvm_read_c0_guest_wired(cop0);
580 break;
16fd5c1d
JH
581 case KVM_REG_MIPS_CP0_HWRENA:
582 v = (long)kvm_read_c0_guest_hwrena(cop0);
583 break;
4c73fb2b
DD
584 case KVM_REG_MIPS_CP0_BADVADDR:
585 v = (long)kvm_read_c0_guest_badvaddr(cop0);
586 break;
587 case KVM_REG_MIPS_CP0_ENTRYHI:
588 v = (long)kvm_read_c0_guest_entryhi(cop0);
589 break;
f8be02da
JH
590 case KVM_REG_MIPS_CP0_COMPARE:
591 v = (long)kvm_read_c0_guest_compare(cop0);
592 break;
4c73fb2b
DD
593 case KVM_REG_MIPS_CP0_STATUS:
594 v = (long)kvm_read_c0_guest_status(cop0);
595 break;
596 case KVM_REG_MIPS_CP0_CAUSE:
597 v = (long)kvm_read_c0_guest_cause(cop0);
598 break;
fb6df0cd
JH
599 case KVM_REG_MIPS_CP0_EPC:
600 v = (long)kvm_read_c0_guest_epc(cop0);
601 break;
4c73fb2b
DD
602 case KVM_REG_MIPS_CP0_ERROREPC:
603 v = (long)kvm_read_c0_guest_errorepc(cop0);
604 break;
605 case KVM_REG_MIPS_CP0_CONFIG:
606 v = (long)kvm_read_c0_guest_config(cop0);
607 break;
608 case KVM_REG_MIPS_CP0_CONFIG1:
609 v = (long)kvm_read_c0_guest_config1(cop0);
610 break;
611 case KVM_REG_MIPS_CP0_CONFIG2:
612 v = (long)kvm_read_c0_guest_config2(cop0);
613 break;
614 case KVM_REG_MIPS_CP0_CONFIG3:
615 v = (long)kvm_read_c0_guest_config3(cop0);
616 break;
617 case KVM_REG_MIPS_CP0_CONFIG7:
618 v = (long)kvm_read_c0_guest_config7(cop0);
619 break;
f8be02da
JH
620 /* registers to be handled specially */
621 case KVM_REG_MIPS_CP0_COUNT:
f8239342
JH
622 case KVM_REG_MIPS_COUNT_CTL:
623 case KVM_REG_MIPS_COUNT_RESUME:
f74a8e22 624 case KVM_REG_MIPS_COUNT_HZ:
f8be02da
JH
625 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
626 if (ret)
627 return ret;
628 break;
4c73fb2b
DD
629 default:
630 return -EINVAL;
631 }
681865d4
DD
632 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
633 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
634 return put_user(v, uaddr64);
635 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
636 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
637 u32 v32 = (u32)v;
638 return put_user(v32, uaddr32);
639 } else {
640 return -EINVAL;
641 }
4c73fb2b
DD
642}
643
644static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
645 const struct kvm_one_reg *reg)
646{
4c73fb2b
DD
647 struct mips_coproc *cop0 = vcpu->arch.cop0;
648 u64 v;
649
681865d4
DD
650 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
651 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
652
653 if (get_user(v, uaddr64) != 0)
654 return -EFAULT;
655 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
656 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
657 s32 v32;
658
659 if (get_user(v32, uaddr32) != 0)
660 return -EFAULT;
661 v = (s64)v32;
662 } else {
663 return -EINVAL;
664 }
4c73fb2b
DD
665
666 switch (reg->id) {
667 case KVM_REG_MIPS_R0:
668 /* Silently ignore requests to set $0 */
669 break;
670 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
671 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
672 break;
673 case KVM_REG_MIPS_HI:
674 vcpu->arch.hi = v;
675 break;
676 case KVM_REG_MIPS_LO:
677 vcpu->arch.lo = v;
678 break;
679 case KVM_REG_MIPS_PC:
680 vcpu->arch.pc = v;
681 break;
682
683 case KVM_REG_MIPS_CP0_INDEX:
684 kvm_write_c0_guest_index(cop0, v);
685 break;
686 case KVM_REG_MIPS_CP0_CONTEXT:
687 kvm_write_c0_guest_context(cop0, v);
688 break;
7767b7d2
JH
689 case KVM_REG_MIPS_CP0_USERLOCAL:
690 kvm_write_c0_guest_userlocal(cop0, v);
691 break;
4c73fb2b
DD
692 case KVM_REG_MIPS_CP0_PAGEMASK:
693 kvm_write_c0_guest_pagemask(cop0, v);
694 break;
695 case KVM_REG_MIPS_CP0_WIRED:
696 kvm_write_c0_guest_wired(cop0, v);
697 break;
16fd5c1d
JH
698 case KVM_REG_MIPS_CP0_HWRENA:
699 kvm_write_c0_guest_hwrena(cop0, v);
700 break;
4c73fb2b
DD
701 case KVM_REG_MIPS_CP0_BADVADDR:
702 kvm_write_c0_guest_badvaddr(cop0, v);
703 break;
704 case KVM_REG_MIPS_CP0_ENTRYHI:
705 kvm_write_c0_guest_entryhi(cop0, v);
706 break;
707 case KVM_REG_MIPS_CP0_STATUS:
708 kvm_write_c0_guest_status(cop0, v);
709 break;
fb6df0cd
JH
710 case KVM_REG_MIPS_CP0_EPC:
711 kvm_write_c0_guest_epc(cop0, v);
712 break;
4c73fb2b
DD
713 case KVM_REG_MIPS_CP0_ERROREPC:
714 kvm_write_c0_guest_errorepc(cop0, v);
715 break;
f8be02da
JH
716 /* registers to be handled specially */
717 case KVM_REG_MIPS_CP0_COUNT:
718 case KVM_REG_MIPS_CP0_COMPARE:
e30492bb 719 case KVM_REG_MIPS_CP0_CAUSE:
f8239342
JH
720 case KVM_REG_MIPS_COUNT_CTL:
721 case KVM_REG_MIPS_COUNT_RESUME:
f74a8e22 722 case KVM_REG_MIPS_COUNT_HZ:
f8be02da 723 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
4c73fb2b
DD
724 default:
725 return -EINVAL;
726 }
727 return 0;
728}
729
669e846e
SL
730long
731kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
732{
733 struct kvm_vcpu *vcpu = filp->private_data;
734 void __user *argp = (void __user *)arg;
735 long r;
669e846e
SL
736
737 switch (ioctl) {
4c73fb2b
DD
738 case KVM_SET_ONE_REG:
739 case KVM_GET_ONE_REG: {
740 struct kvm_one_reg reg;
741 if (copy_from_user(&reg, argp, sizeof(reg)))
742 return -EFAULT;
743 if (ioctl == KVM_SET_ONE_REG)
744 return kvm_mips_set_reg(vcpu, &reg);
745 else
746 return kvm_mips_get_reg(vcpu, &reg);
747 }
748 case KVM_GET_REG_LIST: {
749 struct kvm_reg_list __user *user_list = argp;
750 u64 __user *reg_dest;
751 struct kvm_reg_list reg_list;
752 unsigned n;
753
754 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
755 return -EFAULT;
756 n = reg_list.n;
757 reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
758 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
759 return -EFAULT;
760 if (n < reg_list.n)
761 return -E2BIG;
762 reg_dest = user_list->reg;
763 if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
764 sizeof(kvm_mips_get_one_regs)))
765 return -EFAULT;
766 return 0;
767 }
669e846e
SL
768 case KVM_NMI:
769 /* Treat the NMI as a CPU reset */
770 r = kvm_mips_reset_vcpu(vcpu);
771 break;
772 case KVM_INTERRUPT:
773 {
774 struct kvm_mips_interrupt irq;
775 r = -EFAULT;
776 if (copy_from_user(&irq, argp, sizeof(irq)))
777 goto out;
778
669e846e
SL
779 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
780 irq.irq);
781
782 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
783 break;
784 }
785 default:
4c73fb2b 786 r = -ENOIOCTLCMD;
669e846e
SL
787 }
788
789out:
790 return r;
791}
792
793/*
794 * Get (and clear) the dirty memory log for a memory slot.
795 */
796int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
797{
798 struct kvm_memory_slot *memslot;
799 unsigned long ga, ga_end;
800 int is_dirty = 0;
801 int r;
802 unsigned long n;
803
804 mutex_lock(&kvm->slots_lock);
805
806 r = kvm_get_dirty_log(kvm, log, &is_dirty);
807 if (r)
808 goto out;
809
810 /* If nothing is dirty, don't bother messing with page tables. */
811 if (is_dirty) {
812 memslot = &kvm->memslots->memslots[log->slot];
813
814 ga = memslot->base_gfn << PAGE_SHIFT;
815 ga_end = ga + (memslot->npages << PAGE_SHIFT);
816
817 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
818 ga_end);
819
820 n = kvm_dirty_bitmap_bytes(memslot);
821 memset(memslot->dirty_bitmap, 0, n);
822 }
823
824 r = 0;
825out:
826 mutex_unlock(&kvm->slots_lock);
827 return r;
828
829}
830
831long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
832{
833 long r;
834
835 switch (ioctl) {
836 default:
ed829857 837 r = -ENOIOCTLCMD;
669e846e
SL
838 }
839
840 return r;
841}
842
843int kvm_arch_init(void *opaque)
844{
845 int ret;
846
847 if (kvm_mips_callbacks) {
848 kvm_err("kvm: module already exists\n");
849 return -EEXIST;
850 }
851
852 ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
853
854 return ret;
855}
856
857void kvm_arch_exit(void)
858{
859 kvm_mips_callbacks = NULL;
860}
861
862int
863kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
864{
ed829857 865 return -ENOIOCTLCMD;
669e846e
SL
866}
867
868int
869kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
870{
ed829857 871 return -ENOIOCTLCMD;
669e846e
SL
872}
873
874int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
875{
876 return 0;
877}
878
879int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
880{
ed829857 881 return -ENOIOCTLCMD;
669e846e
SL
882}
883
884int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
885{
ed829857 886 return -ENOIOCTLCMD;
669e846e
SL
887}
888
889int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
890{
891 return VM_FAULT_SIGBUS;
892}
893
894int kvm_dev_ioctl_check_extension(long ext)
895{
896 int r;
897
898 switch (ext) {
4c73fb2b
DD
899 case KVM_CAP_ONE_REG:
900 r = 1;
901 break;
669e846e
SL
902 case KVM_CAP_COALESCED_MMIO:
903 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
904 break;
905 default:
906 r = 0;
907 break;
908 }
909 return r;
669e846e
SL
910}
911
912int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
913{
914 return kvm_mips_pending_timer(vcpu);
915}
916
917int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
918{
919 int i;
920 struct mips_coproc *cop0;
921
922 if (!vcpu)
923 return -1;
924
925 printk("VCPU Register Dump:\n");
ee1a725f 926 printk("\tpc = 0x%08lx\n", vcpu->arch.pc);
669e846e
SL
927 printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
928
929 for (i = 0; i < 32; i += 4) {
930 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
931 vcpu->arch.gprs[i],
932 vcpu->arch.gprs[i + 1],
933 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
934 }
935 printk("\thi: 0x%08lx\n", vcpu->arch.hi);
936 printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
937
938 cop0 = vcpu->arch.cop0;
939 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
940 kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
941
942 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
943
944 return 0;
945}
946
947int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
948{
949 int i;
950
8d17dd04 951 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
bf32ebf6 952 vcpu->arch.gprs[i] = regs->gpr[i];
8d17dd04 953 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
669e846e
SL
954 vcpu->arch.hi = regs->hi;
955 vcpu->arch.lo = regs->lo;
956 vcpu->arch.pc = regs->pc;
957
4c73fb2b 958 return 0;
669e846e
SL
959}
960
961int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
962{
963 int i;
964
8d17dd04 965 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
bf32ebf6 966 regs->gpr[i] = vcpu->arch.gprs[i];
669e846e
SL
967
968 regs->hi = vcpu->arch.hi;
969 regs->lo = vcpu->arch.lo;
970 regs->pc = vcpu->arch.pc;
971
4c73fb2b 972 return 0;
669e846e
SL
973}
974
0fae34f4 975static void kvm_mips_comparecount_func(unsigned long data)
669e846e
SL
976{
977 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
978
979 kvm_mips_callbacks->queue_timer_int(vcpu);
980
981 vcpu->arch.wait = 0;
982 if (waitqueue_active(&vcpu->wq)) {
983 wake_up_interruptible(&vcpu->wq);
984 }
985}
986
987/*
988 * low level hrtimer wake routine.
989 */
0fae34f4 990static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
669e846e
SL
991{
992 struct kvm_vcpu *vcpu;
993
994 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
995 kvm_mips_comparecount_func((unsigned long) vcpu);
e30492bb 996 return kvm_mips_count_timeout(vcpu);
669e846e
SL
997}
998
999int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1000{
1001 kvm_mips_callbacks->vcpu_init(vcpu);
1002 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1003 HRTIMER_MODE_REL);
1004 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
669e846e
SL
1005 return 0;
1006}
1007
1008void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1009{
1010 return;
1011}
1012
1013int
1014kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
1015{
1016 return 0;
1017}
1018
1019/* Initial guest state */
1020int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1021{
1022 return kvm_mips_callbacks->vcpu_setup(vcpu);
1023}
1024
1025static
1026void kvm_mips_set_c0_status(void)
1027{
1028 uint32_t status = read_c0_status();
1029
1030 if (cpu_has_fpu)
1031 status |= (ST0_CU1);
1032
1033 if (cpu_has_dsp)
1034 status |= (ST0_MX);
1035
1036 write_c0_status(status);
1037 ehb();
1038}
1039
1040/*
1041 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1042 */
1043int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1044{
1045 uint32_t cause = vcpu->arch.host_cp0_cause;
1046 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1047 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
1048 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1049 enum emulation_result er = EMULATE_DONE;
1050 int ret = RESUME_GUEST;
1051
1052 /* Set a default exit reason */
1053 run->exit_reason = KVM_EXIT_UNKNOWN;
1054 run->ready_for_interrupt_injection = 1;
1055
1056 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
1057 kvm_mips_set_c0_status();
1058
1059 local_irq_enable();
1060
1061 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1062 cause, opc, run, vcpu);
1063
1064 /* Do a privilege check, if in UM most of these exit conditions end up
1065 * causing an exception to be delivered to the Guest Kernel
1066 */
1067 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1068 if (er == EMULATE_PRIV_FAIL) {
1069 goto skip_emul;
1070 } else if (er == EMULATE_FAIL) {
1071 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1072 ret = RESUME_HOST;
1073 goto skip_emul;
1074 }
1075
1076 switch (exccode) {
1077 case T_INT:
1078 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
1079
1080 ++vcpu->stat.int_exits;
1081 trace_kvm_exit(vcpu, INT_EXITS);
1082
1083 if (need_resched()) {
1084 cond_resched();
1085 }
1086
1087 ret = RESUME_GUEST;
1088 break;
1089
1090 case T_COP_UNUSABLE:
1091 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
1092
1093 ++vcpu->stat.cop_unusable_exits;
1094 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
1095 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1096 /* XXXKYMA: Might need to return to user space */
1097 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
1098 ret = RESUME_HOST;
1099 }
1100 break;
1101
1102 case T_TLB_MOD:
1103 ++vcpu->stat.tlbmod_exits;
1104 trace_kvm_exit(vcpu, TLBMOD_EXITS);
1105 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1106 break;
1107
1108 case T_TLB_ST_MISS:
1109 kvm_debug
1110 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1111 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1112 badvaddr);
1113
1114 ++vcpu->stat.tlbmiss_st_exits;
1115 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
1116 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1117 break;
1118
1119 case T_TLB_LD_MISS:
1120 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1121 cause, opc, badvaddr);
1122
1123 ++vcpu->stat.tlbmiss_ld_exits;
1124 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
1125 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1126 break;
1127
1128 case T_ADDR_ERR_ST:
1129 ++vcpu->stat.addrerr_st_exits;
1130 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
1131 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1132 break;
1133
1134 case T_ADDR_ERR_LD:
1135 ++vcpu->stat.addrerr_ld_exits;
1136 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
1137 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1138 break;
1139
1140 case T_SYSCALL:
1141 ++vcpu->stat.syscall_exits;
1142 trace_kvm_exit(vcpu, SYSCALL_EXITS);
1143 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1144 break;
1145
1146 case T_RES_INST:
1147 ++vcpu->stat.resvd_inst_exits;
1148 trace_kvm_exit(vcpu, RESVD_INST_EXITS);
1149 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1150 break;
1151
1152 case T_BREAK:
1153 ++vcpu->stat.break_inst_exits;
1154 trace_kvm_exit(vcpu, BREAK_INST_EXITS);
1155 ret = kvm_mips_callbacks->handle_break(vcpu);
1156 break;
1157
1158 default:
1159 kvm_err
1160 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1161 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1162 kvm_read_c0_guest_status(vcpu->arch.cop0));
1163 kvm_arch_vcpu_dump_regs(vcpu);
1164 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1165 ret = RESUME_HOST;
1166 break;
1167
1168 }
1169
1170skip_emul:
1171 local_irq_disable();
1172
1173 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1174 kvm_mips_deliver_interrupts(vcpu, cause);
1175
1176 if (!(ret & RESUME_HOST)) {
1177 /* Only check for signals if not already exiting to userspace */
1178 if (signal_pending(current)) {
1179 run->exit_reason = KVM_EXIT_INTR;
1180 ret = (-EINTR << 2) | RESUME_HOST;
1181 ++vcpu->stat.signal_exits;
1182 trace_kvm_exit(vcpu, SIGNAL_EXITS);
1183 }
1184 }
1185
1186 return ret;
1187}
1188
1189int __init kvm_mips_init(void)
1190{
1191 int ret;
1192
1193 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1194
1195 if (ret)
1196 return ret;
1197
1198 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
1199 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
1200 * to avoid the possibility of double faulting. The issue is that the TLB code
1201 * references routines that are part of the the KVM module,
1202 * which are only available once the module is loaded.
1203 */
1204 kvm_mips_gfn_to_pfn = gfn_to_pfn;
1205 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
1206 kvm_mips_is_error_pfn = is_error_pfn;
1207
1208 pr_info("KVM/MIPS Initialized\n");
1209 return 0;
1210}
1211
1212void __exit kvm_mips_exit(void)
1213{
1214 kvm_exit();
1215
1216 kvm_mips_gfn_to_pfn = NULL;
1217 kvm_mips_release_pfn_clean = NULL;
1218 kvm_mips_is_error_pfn = NULL;
1219
1220 pr_info("KVM/MIPS unloaded\n");
1221}
1222
1223module_init(kvm_mips_init);
1224module_exit(kvm_mips_exit);
1225
1226EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
This page took 0.117067 seconds and 5 git commands to generate.