KVM: PPC: Allow for read-only pages backing a Book3S HV guest
[deliverable/linux.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
bbf45ba5
HB
28#include <asm/cputable.h>
29#include <asm/uaccess.h>
30#include <asm/kvm_ppc.h>
83aae4a8 31#include <asm/tlbflush.h>
371fefd6 32#include <asm/cputhreads.h>
73e75b41 33#include "timing.h"
fad7b9b5 34#include "../mm/mmu_decl.h"
bbf45ba5 35
46f43c6e
MT
36#define CREATE_TRACE_POINTS
37#include "trace.h"
38
bbf45ba5
HB
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{
666e7252 41 return !(v->arch.shared->msr & MSR_WE) ||
dfd4d47e
SW
42 !!(v->arch.pending_exceptions) ||
43 v->requests;
bbf45ba5
HB
44}
45
2a342ed5
AG
46int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
47{
48 int nr = kvmppc_get_gpr(vcpu, 11);
49 int r;
50 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
51 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
52 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
53 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
54 unsigned long r2 = 0;
55
56 if (!(vcpu->arch.shared->msr & MSR_SF)) {
57 /* 32 bit mode */
58 param1 &= 0xffffffff;
59 param2 &= 0xffffffff;
60 param3 &= 0xffffffff;
61 param4 &= 0xffffffff;
62 }
63
64 switch (nr) {
5fc87407
AG
65 case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
66 {
67 vcpu->arch.magic_page_pa = param1;
68 vcpu->arch.magic_page_ea = param2;
69
b5904972 70 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 71
5fc87407
AG
72 r = HC_EV_SUCCESS;
73 break;
74 }
2a342ed5
AG
75 case HC_VENDOR_KVM | KVM_HC_FEATURES:
76 r = HC_EV_SUCCESS;
a4cd8b23
SW
77#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
78 /* XXX Missing magic page on 44x */
5fc87407
AG
79 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
80#endif
2a342ed5
AG
81
82 /* Second return value is in r4 */
2a342ed5
AG
83 break;
84 default:
85 r = HC_EV_UNIMPLEMENTED;
86 break;
87 }
88
7508e16c
AG
89 kvmppc_set_gpr(vcpu, 4, r2);
90
2a342ed5
AG
91 return r;
92}
bbf45ba5 93
af8f38b3
AG
94int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
95{
96 int r = false;
97
98 /* We have to know what CPU to virtualize */
99 if (!vcpu->arch.pvr)
100 goto out;
101
102 /* PAPR only works with book3s_64 */
103 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
104 goto out;
105
106#ifdef CONFIG_KVM_BOOK3S_64_HV
107 /* HV KVM can only do PAPR mode for now */
108 if (!vcpu->arch.papr_enabled)
109 goto out;
110#endif
111
112 r = true;
113
114out:
115 vcpu->arch.sane = r;
116 return r ? 0 : -EINVAL;
117}
118
bbf45ba5
HB
119int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
120{
121 enum emulation_result er;
122 int r;
123
124 er = kvmppc_emulate_instruction(run, vcpu);
125 switch (er) {
126 case EMULATE_DONE:
127 /* Future optimization: only reload non-volatiles if they were
128 * actually modified. */
129 r = RESUME_GUEST_NV;
130 break;
131 case EMULATE_DO_MMIO:
132 run->exit_reason = KVM_EXIT_MMIO;
133 /* We must reload nonvolatiles because "update" load/store
134 * instructions modify register state. */
135 /* Future optimization: only reload non-volatiles if they were
136 * actually modified. */
137 r = RESUME_HOST_NV;
138 break;
139 case EMULATE_FAIL:
140 /* XXX Deliver Program interrupt to guest. */
141 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
c7f38f46 142 kvmppc_get_last_inst(vcpu));
bbf45ba5
HB
143 r = RESUME_HOST;
144 break;
145 default:
146 BUG();
147 }
148
149 return r;
150}
151
10474ae8 152int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 153{
10474ae8 154 return 0;
bbf45ba5
HB
155}
156
157void kvm_arch_hardware_disable(void *garbage)
158{
159}
160
161int kvm_arch_hardware_setup(void)
162{
163 return 0;
164}
165
166void kvm_arch_hardware_unsetup(void)
167{
168}
169
170void kvm_arch_check_processor_compat(void *rtn)
171{
9dd921cf 172 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
173}
174
e08b9637 175int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 176{
e08b9637
CO
177 if (type)
178 return -EINVAL;
179
f9e0554d 180 return kvmppc_core_init_vm(kvm);
bbf45ba5
HB
181}
182
d89f5eff 183void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
184{
185 unsigned int i;
988a2cae 186 struct kvm_vcpu *vcpu;
bbf45ba5 187
988a2cae
GN
188 kvm_for_each_vcpu(i, vcpu, kvm)
189 kvm_arch_vcpu_free(vcpu);
190
191 mutex_lock(&kvm->lock);
192 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
193 kvm->vcpus[i] = NULL;
194
195 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
196
197 kvmppc_core_destroy_vm(kvm);
198
988a2cae 199 mutex_unlock(&kvm->lock);
bbf45ba5
HB
200}
201
ad8ba2cd
SY
202void kvm_arch_sync_events(struct kvm *kvm)
203{
204}
205
bbf45ba5
HB
206int kvm_dev_ioctl_check_extension(long ext)
207{
208 int r;
209
210 switch (ext) {
5ce941ee
SW
211#ifdef CONFIG_BOOKE
212 case KVM_CAP_PPC_BOOKE_SREGS:
213#else
e15a1137 214 case KVM_CAP_PPC_SEGSTATE:
930b412a 215 case KVM_CAP_PPC_PAPR:
5ce941ee 216#endif
18978768 217 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 218 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 219 case KVM_CAP_ENABLE_CAP:
de56a948
PM
220 r = 1;
221 break;
222#ifndef CONFIG_KVM_BOOK3S_64_HV
223 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 224 case KVM_CAP_PPC_OSI:
15711e9c 225 case KVM_CAP_PPC_GET_PVINFO:
dc83b8bc
SW
226#ifdef CONFIG_KVM_E500
227 case KVM_CAP_SW_TLB:
228#endif
e15a1137
AG
229 r = 1;
230 break;
588968b6
LV
231 case KVM_CAP_COALESCED_MMIO:
232 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
233 break;
54738c09
DG
234#endif
235#ifdef CONFIG_KVM_BOOK3S_64_HV
236 case KVM_CAP_SPAPR_TCE:
237 r = 1;
238 break;
371fefd6
PM
239 case KVM_CAP_PPC_SMT:
240 r = threads_per_core;
241 break;
aa04b4cc
PM
242 case KVM_CAP_PPC_RMA:
243 r = 1;
9e368f29
PM
244 /* PPC970 requires an RMA */
245 if (cpu_has_feature(CPU_FTR_ARCH_201))
246 r = 2;
aa04b4cc 247 break;
342d3db7
PM
248 case KVM_CAP_SYNC_MMU:
249 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
250 break;
de56a948 251#endif
bbf45ba5
HB
252 default:
253 r = 0;
254 break;
255 }
256 return r;
257
258}
259
260long kvm_arch_dev_ioctl(struct file *filp,
261 unsigned int ioctl, unsigned long arg)
262{
263 return -EINVAL;
264}
265
f7784b8e
MT
266int kvm_arch_prepare_memory_region(struct kvm *kvm,
267 struct kvm_memory_slot *memslot,
268 struct kvm_memory_slot old,
269 struct kvm_userspace_memory_region *mem,
270 int user_alloc)
bbf45ba5 271{
f9e0554d 272 return kvmppc_core_prepare_memory_region(kvm, mem);
bbf45ba5
HB
273}
274
f7784b8e
MT
275void kvm_arch_commit_memory_region(struct kvm *kvm,
276 struct kvm_userspace_memory_region *mem,
277 struct kvm_memory_slot old,
278 int user_alloc)
279{
f9e0554d 280 kvmppc_core_commit_memory_region(kvm, mem);
f7784b8e
MT
281}
282
283
34d4cb8f
MT
284void kvm_arch_flush_shadow(struct kvm *kvm)
285{
286}
287
bbf45ba5
HB
288struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
289{
73e75b41
HB
290 struct kvm_vcpu *vcpu;
291 vcpu = kvmppc_core_vcpu_create(kvm, id);
19ccb76a 292 vcpu->arch.wqp = &vcpu->wq;
06056bfb
WY
293 if (!IS_ERR(vcpu))
294 kvmppc_create_vcpu_debugfs(vcpu, id);
73e75b41 295 return vcpu;
bbf45ba5
HB
296}
297
298void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
299{
a595405d
AG
300 /* Make sure we're not using the vcpu anymore */
301 hrtimer_cancel(&vcpu->arch.dec_timer);
302 tasklet_kill(&vcpu->arch.tasklet);
303
73e75b41 304 kvmppc_remove_vcpu_debugfs(vcpu);
db93f574 305 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
306}
307
308void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
309{
310 kvm_arch_vcpu_free(vcpu);
311}
312
313int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
314{
9dd921cf 315 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
316}
317
544c6761
AG
318/*
319 * low level hrtimer wake routine. Because this runs in hardirq context
320 * we schedule a tasklet to do the real work.
321 */
322enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
323{
324 struct kvm_vcpu *vcpu;
325
326 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
327 tasklet_schedule(&vcpu->arch.tasklet);
328
329 return HRTIMER_NORESTART;
330}
331
bbf45ba5
HB
332int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
333{
544c6761
AG
334 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
335 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
336 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 337 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 338
09000adb
BB
339#ifdef CONFIG_KVM_EXIT_TIMING
340 mutex_init(&vcpu->arch.exit_timing_lock);
341#endif
342
bbf45ba5
HB
343 return 0;
344}
345
346void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
347{
ecc0981f 348 kvmppc_mmu_destroy(vcpu);
bbf45ba5
HB
349}
350
351void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
352{
eab17672
SW
353#ifdef CONFIG_BOOKE
354 /*
355 * vrsave (formerly usprg0) isn't used by Linux, but may
356 * be used by the guest.
357 *
358 * On non-booke this is associated with Altivec and
359 * is handled by code in book3s.c.
360 */
361 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
362#endif
9dd921cf 363 kvmppc_core_vcpu_load(vcpu, cpu);
de56a948 364 vcpu->cpu = smp_processor_id();
bbf45ba5
HB
365}
366
367void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
368{
9dd921cf 369 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
370#ifdef CONFIG_BOOKE
371 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
372#endif
de56a948 373 vcpu->cpu = -1;
bbf45ba5
HB
374}
375
d0bfb940 376int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
f5d0906b 377 struct kvm_guest_debug *dbg)
bbf45ba5 378{
f5d0906b 379 return -EINVAL;
bbf45ba5
HB
380}
381
382static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
383 struct kvm_run *run)
384{
8e5b26b5 385 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
bbf45ba5
HB
386}
387
388static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
389 struct kvm_run *run)
390{
69b61833 391 u64 uninitialized_var(gpr);
bbf45ba5 392
8e5b26b5 393 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
394 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
395 return;
396 }
397
398 if (vcpu->arch.mmio_is_bigendian) {
399 switch (run->mmio.len) {
b104d066 400 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
401 case 4: gpr = *(u32 *)run->mmio.data; break;
402 case 2: gpr = *(u16 *)run->mmio.data; break;
403 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
404 }
405 } else {
406 /* Convert BE data from userland back to LE. */
407 switch (run->mmio.len) {
8e5b26b5
AG
408 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
409 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
410 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
411 }
412 }
8e5b26b5 413
3587d534
AG
414 if (vcpu->arch.mmio_sign_extend) {
415 switch (run->mmio.len) {
416#ifdef CONFIG_PPC64
417 case 4:
418 gpr = (s64)(s32)gpr;
419 break;
420#endif
421 case 2:
422 gpr = (s64)(s16)gpr;
423 break;
424 case 1:
425 gpr = (s64)(s8)gpr;
426 break;
427 }
428 }
429
8e5b26b5 430 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066
AG
431
432 switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
433 case KVM_REG_GPR:
434 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
435 break;
436 case KVM_REG_FPR:
437 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
438 break;
287d5611 439#ifdef CONFIG_PPC_BOOK3S
b104d066
AG
440 case KVM_REG_QPR:
441 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
442 break;
443 case KVM_REG_FQPR:
444 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
445 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
446 break;
287d5611 447#endif
b104d066
AG
448 default:
449 BUG();
450 }
bbf45ba5
HB
451}
452
453int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
454 unsigned int rt, unsigned int bytes, int is_bigendian)
455{
456 if (bytes > sizeof(run->mmio.data)) {
457 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
458 run->mmio.len);
459 }
460
461 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
462 run->mmio.len = bytes;
463 run->mmio.is_write = 0;
464
465 vcpu->arch.io_gpr = rt;
466 vcpu->arch.mmio_is_bigendian = is_bigendian;
467 vcpu->mmio_needed = 1;
468 vcpu->mmio_is_write = 0;
3587d534 469 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5
HB
470
471 return EMULATE_DO_MMIO;
472}
473
3587d534
AG
474/* Same as above, but sign extends */
475int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
476 unsigned int rt, unsigned int bytes, int is_bigendian)
477{
478 int r;
479
480 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
481 vcpu->arch.mmio_sign_extend = 1;
482
483 return r;
484}
485
bbf45ba5 486int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
b104d066 487 u64 val, unsigned int bytes, int is_bigendian)
bbf45ba5
HB
488{
489 void *data = run->mmio.data;
490
491 if (bytes > sizeof(run->mmio.data)) {
492 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
493 run->mmio.len);
494 }
495
496 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
497 run->mmio.len = bytes;
498 run->mmio.is_write = 1;
499 vcpu->mmio_needed = 1;
500 vcpu->mmio_is_write = 1;
501
502 /* Store the value at the lowest bytes in 'data'. */
503 if (is_bigendian) {
504 switch (bytes) {
b104d066 505 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
506 case 4: *(u32 *)data = val; break;
507 case 2: *(u16 *)data = val; break;
508 case 1: *(u8 *)data = val; break;
509 }
510 } else {
511 /* Store LE value into 'data'. */
512 switch (bytes) {
513 case 4: st_le32(data, val); break;
514 case 2: st_le16(data, val); break;
515 case 1: *(u8 *)data = val; break;
516 }
517 }
518
519 return EMULATE_DO_MMIO;
520}
521
522int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
523{
524 int r;
525 sigset_t sigsaved;
526
527 if (vcpu->sigset_active)
528 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
529
530 if (vcpu->mmio_needed) {
531 if (!vcpu->mmio_is_write)
532 kvmppc_complete_mmio_load(vcpu, run);
533 vcpu->mmio_needed = 0;
534 } else if (vcpu->arch.dcr_needed) {
535 if (!vcpu->arch.dcr_is_write)
536 kvmppc_complete_dcr_load(vcpu, run);
537 vcpu->arch.dcr_needed = 0;
ad0a048b
AG
538 } else if (vcpu->arch.osi_needed) {
539 u64 *gprs = run->osi.gprs;
540 int i;
541
542 for (i = 0; i < 32; i++)
543 kvmppc_set_gpr(vcpu, i, gprs[i]);
544 vcpu->arch.osi_needed = 0;
de56a948
PM
545 } else if (vcpu->arch.hcall_needed) {
546 int i;
547
548 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
549 for (i = 0; i < 9; ++i)
550 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
551 vcpu->arch.hcall_needed = 0;
bbf45ba5
HB
552 }
553
df6909e5 554 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
555
556 if (vcpu->sigset_active)
557 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
558
559 return r;
560}
561
dfd4d47e
SW
562void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
563{
ae21216b
AG
564 int me;
565 int cpu = vcpu->cpu;
566
567 me = get_cpu();
4e72dbe1 568 if (waitqueue_active(vcpu->arch.wqp)) {
dfd4d47e
SW
569 wake_up_interruptible(vcpu->arch.wqp);
570 vcpu->stat.halt_wakeup++;
ae21216b 571 } else if (cpu != me && cpu != -1) {
dfd4d47e
SW
572 smp_send_reschedule(vcpu->cpu);
573 }
ae21216b 574 put_cpu();
dfd4d47e
SW
575}
576
bbf45ba5
HB
577int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
578{
19ccb76a 579 if (irq->irq == KVM_INTERRUPT_UNSET) {
18978768 580 kvmppc_core_dequeue_external(vcpu, irq);
19ccb76a
PM
581 return 0;
582 }
583
584 kvmppc_core_queue_external(vcpu, irq);
dfd4d47e 585 kvm_vcpu_kick(vcpu);
45c5eb67 586
bbf45ba5
HB
587 return 0;
588}
589
71fbfd5f
AG
590static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
591 struct kvm_enable_cap *cap)
592{
593 int r;
594
595 if (cap->flags)
596 return -EINVAL;
597
598 switch (cap->cap) {
ad0a048b
AG
599 case KVM_CAP_PPC_OSI:
600 r = 0;
601 vcpu->arch.osi_enabled = true;
602 break;
930b412a
AG
603 case KVM_CAP_PPC_PAPR:
604 r = 0;
605 vcpu->arch.papr_enabled = true;
606 break;
dc83b8bc
SW
607#ifdef CONFIG_KVM_E500
608 case KVM_CAP_SW_TLB: {
609 struct kvm_config_tlb cfg;
610 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
611
612 r = -EFAULT;
613 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
614 break;
615
616 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
617 break;
618 }
619#endif
71fbfd5f
AG
620 default:
621 r = -EINVAL;
622 break;
623 }
624
af8f38b3
AG
625 if (!r)
626 r = kvmppc_sanity_check(vcpu);
627
71fbfd5f
AG
628 return r;
629}
630
bbf45ba5
HB
631int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
632 struct kvm_mp_state *mp_state)
633{
634 return -EINVAL;
635}
636
637int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
638 struct kvm_mp_state *mp_state)
639{
640 return -EINVAL;
641}
642
643long kvm_arch_vcpu_ioctl(struct file *filp,
644 unsigned int ioctl, unsigned long arg)
645{
646 struct kvm_vcpu *vcpu = filp->private_data;
647 void __user *argp = (void __user *)arg;
648 long r;
649
93736624
AK
650 switch (ioctl) {
651 case KVM_INTERRUPT: {
bbf45ba5
HB
652 struct kvm_interrupt irq;
653 r = -EFAULT;
654 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 655 goto out;
bbf45ba5 656 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 657 goto out;
bbf45ba5 658 }
19483d14 659
71fbfd5f
AG
660 case KVM_ENABLE_CAP:
661 {
662 struct kvm_enable_cap cap;
663 r = -EFAULT;
664 if (copy_from_user(&cap, argp, sizeof(cap)))
665 goto out;
666 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
667 break;
668 }
dc83b8bc
SW
669
670#ifdef CONFIG_KVM_E500
671 case KVM_DIRTY_TLB: {
672 struct kvm_dirty_tlb dirty;
673 r = -EFAULT;
674 if (copy_from_user(&dirty, argp, sizeof(dirty)))
675 goto out;
676 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
677 break;
678 }
679#endif
680
bbf45ba5
HB
681 default:
682 r = -EINVAL;
683 }
684
685out:
686 return r;
687}
688
5b1c1493
CO
689int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
690{
691 return VM_FAULT_SIGBUS;
692}
693
15711e9c
AG
694static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
695{
696 u32 inst_lis = 0x3c000000;
697 u32 inst_ori = 0x60000000;
698 u32 inst_nop = 0x60000000;
699 u32 inst_sc = 0x44000002;
700 u32 inst_imm_mask = 0xffff;
701
702 /*
703 * The hypercall to get into KVM from within guest context is as
704 * follows:
705 *
706 * lis r0, r0, KVM_SC_MAGIC_R0@h
707 * ori r0, KVM_SC_MAGIC_R0@l
708 * sc
709 * nop
710 */
711 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
712 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
713 pvinfo->hcall[2] = inst_sc;
714 pvinfo->hcall[3] = inst_nop;
715
716 return 0;
717}
718
bbf45ba5
HB
719long kvm_arch_vm_ioctl(struct file *filp,
720 unsigned int ioctl, unsigned long arg)
721{
15711e9c 722 void __user *argp = (void __user *)arg;
bbf45ba5
HB
723 long r;
724
725 switch (ioctl) {
15711e9c
AG
726 case KVM_PPC_GET_PVINFO: {
727 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 728 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
729 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
730 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
731 r = -EFAULT;
732 goto out;
733 }
734
735 break;
736 }
54738c09
DG
737#ifdef CONFIG_KVM_BOOK3S_64_HV
738 case KVM_CREATE_SPAPR_TCE: {
739 struct kvm_create_spapr_tce create_tce;
740 struct kvm *kvm = filp->private_data;
741
742 r = -EFAULT;
743 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
744 goto out;
745 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
746 goto out;
747 }
aa04b4cc
PM
748
749 case KVM_ALLOCATE_RMA: {
750 struct kvm *kvm = filp->private_data;
751 struct kvm_allocate_rma rma;
752
753 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
754 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
755 r = -EFAULT;
756 break;
757 }
54738c09
DG
758#endif /* CONFIG_KVM_BOOK3S_64_HV */
759
bbf45ba5 760 default:
367e1319 761 r = -ENOTTY;
bbf45ba5
HB
762 }
763
15711e9c 764out:
bbf45ba5
HB
765 return r;
766}
767
768int kvm_arch_init(void *opaque)
769{
770 return 0;
771}
772
773void kvm_arch_exit(void)
774{
775}
This page took 0.315836 seconds and 5 git commands to generate.