KVM: PPC: Support irq routing and irqfd for in-kernel MPIC
[deliverable/linux.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
eb1e4f43 28#include <linux/file.h>
bbf45ba5
HB
29#include <asm/cputable.h>
30#include <asm/uaccess.h>
31#include <asm/kvm_ppc.h>
83aae4a8 32#include <asm/tlbflush.h>
371fefd6 33#include <asm/cputhreads.h>
bd2be683 34#include <asm/irqflags.h>
73e75b41 35#include "timing.h"
fad7b9b5 36#include "../mm/mmu_decl.h"
bbf45ba5 37
46f43c6e
MT
38#define CREATE_TRACE_POINTS
39#include "trace.h"
40
bbf45ba5
HB
41int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
42{
9202e076 43 return !!(v->arch.pending_exceptions) ||
dfd4d47e 44 v->requests;
bbf45ba5
HB
45}
46
b6d33834
CD
47int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
48{
49 return 1;
50}
51
03d25c5b
AG
52#ifndef CONFIG_KVM_BOOK3S_64_HV
53/*
54 * Common checks before entering the guest world. Call with interrupts
55 * disabled.
56 *
7ee78855
AG
57 * returns:
58 *
59 * == 1 if we're ready to go into guest state
60 * <= 0 if we need to go back to the host with return value
03d25c5b
AG
61 */
62int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
63{
7ee78855 64 int r = 1;
03d25c5b
AG
65
66 WARN_ON_ONCE(!irqs_disabled());
67 while (true) {
68 if (need_resched()) {
69 local_irq_enable();
70 cond_resched();
71 local_irq_disable();
72 continue;
73 }
74
75 if (signal_pending(current)) {
7ee78855
AG
76 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
77 vcpu->run->exit_reason = KVM_EXIT_INTR;
78 r = -EINTR;
03d25c5b
AG
79 break;
80 }
81
5bd1cf11
SW
82 vcpu->mode = IN_GUEST_MODE;
83
84 /*
85 * Reading vcpu->requests must happen after setting vcpu->mode,
86 * so we don't miss a request because the requester sees
87 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
88 * before next entering the guest (and thus doesn't IPI).
89 */
03d25c5b 90 smp_mb();
5bd1cf11 91
03d25c5b
AG
92 if (vcpu->requests) {
93 /* Make sure we process requests preemptable */
94 local_irq_enable();
95 trace_kvm_check_requests(vcpu);
7c973a2e 96 r = kvmppc_core_check_requests(vcpu);
03d25c5b 97 local_irq_disable();
7c973a2e
AG
98 if (r > 0)
99 continue;
100 break;
03d25c5b
AG
101 }
102
103 if (kvmppc_core_prepare_to_enter(vcpu)) {
104 /* interrupts got enabled in between, so we
105 are back at square 1 */
106 continue;
107 }
108
bd2be683
AG
109#ifdef CONFIG_PPC64
110 /* lazy EE magic */
111 hard_irq_disable();
112 if (lazy_irq_pending()) {
113 /* Got an interrupt in between, try again */
114 local_irq_enable();
115 local_irq_disable();
3766a4c6 116 kvm_guest_exit();
bd2be683
AG
117 continue;
118 }
119
120 trace_hardirqs_on();
121#endif
122
3766a4c6 123 kvm_guest_enter();
03d25c5b
AG
124 break;
125 }
126
127 return r;
128}
129#endif /* CONFIG_KVM_BOOK3S_64_HV */
130
2a342ed5
AG
131int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
132{
133 int nr = kvmppc_get_gpr(vcpu, 11);
134 int r;
135 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
136 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
137 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
138 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
139 unsigned long r2 = 0;
140
141 if (!(vcpu->arch.shared->msr & MSR_SF)) {
142 /* 32 bit mode */
143 param1 &= 0xffffffff;
144 param2 &= 0xffffffff;
145 param3 &= 0xffffffff;
146 param4 &= 0xffffffff;
147 }
148
149 switch (nr) {
fdcf8bd7 150 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
5fc87407
AG
151 {
152 vcpu->arch.magic_page_pa = param1;
153 vcpu->arch.magic_page_ea = param2;
154
b5904972 155 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 156
fdcf8bd7 157 r = EV_SUCCESS;
5fc87407
AG
158 break;
159 }
fdcf8bd7
SY
160 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
161 r = EV_SUCCESS;
bf7ca4bd 162#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
a4cd8b23 163 /* XXX Missing magic page on 44x */
5fc87407
AG
164 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
165#endif
2a342ed5
AG
166
167 /* Second return value is in r4 */
2a342ed5 168 break;
9202e076
LYB
169 case EV_HCALL_TOKEN(EV_IDLE):
170 r = EV_SUCCESS;
171 kvm_vcpu_block(vcpu);
172 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
173 break;
2a342ed5 174 default:
fdcf8bd7 175 r = EV_UNIMPLEMENTED;
2a342ed5
AG
176 break;
177 }
178
7508e16c
AG
179 kvmppc_set_gpr(vcpu, 4, r2);
180
2a342ed5
AG
181 return r;
182}
bbf45ba5 183
af8f38b3
AG
184int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
185{
186 int r = false;
187
188 /* We have to know what CPU to virtualize */
189 if (!vcpu->arch.pvr)
190 goto out;
191
192 /* PAPR only works with book3s_64 */
193 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
194 goto out;
195
196#ifdef CONFIG_KVM_BOOK3S_64_HV
197 /* HV KVM can only do PAPR mode for now */
198 if (!vcpu->arch.papr_enabled)
199 goto out;
200#endif
201
d30f6e48
SW
202#ifdef CONFIG_KVM_BOOKE_HV
203 if (!cpu_has_feature(CPU_FTR_EMB_HV))
204 goto out;
205#endif
206
af8f38b3
AG
207 r = true;
208
209out:
210 vcpu->arch.sane = r;
211 return r ? 0 : -EINVAL;
212}
213
bbf45ba5
HB
214int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
215{
216 enum emulation_result er;
217 int r;
218
219 er = kvmppc_emulate_instruction(run, vcpu);
220 switch (er) {
221 case EMULATE_DONE:
222 /* Future optimization: only reload non-volatiles if they were
223 * actually modified. */
224 r = RESUME_GUEST_NV;
225 break;
226 case EMULATE_DO_MMIO:
227 run->exit_reason = KVM_EXIT_MMIO;
228 /* We must reload nonvolatiles because "update" load/store
229 * instructions modify register state. */
230 /* Future optimization: only reload non-volatiles if they were
231 * actually modified. */
232 r = RESUME_HOST_NV;
233 break;
234 case EMULATE_FAIL:
235 /* XXX Deliver Program interrupt to guest. */
236 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
c7f38f46 237 kvmppc_get_last_inst(vcpu));
bbf45ba5
HB
238 r = RESUME_HOST;
239 break;
240 default:
5a33169e
AG
241 WARN_ON(1);
242 r = RESUME_GUEST;
bbf45ba5
HB
243 }
244
245 return r;
246}
247
10474ae8 248int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 249{
10474ae8 250 return 0;
bbf45ba5
HB
251}
252
253void kvm_arch_hardware_disable(void *garbage)
254{
255}
256
257int kvm_arch_hardware_setup(void)
258{
259 return 0;
260}
261
262void kvm_arch_hardware_unsetup(void)
263{
264}
265
266void kvm_arch_check_processor_compat(void *rtn)
267{
9dd921cf 268 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
269}
270
e08b9637 271int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 272{
e08b9637
CO
273 if (type)
274 return -EINVAL;
275
f9e0554d 276 return kvmppc_core_init_vm(kvm);
bbf45ba5
HB
277}
278
d89f5eff 279void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
280{
281 unsigned int i;
988a2cae 282 struct kvm_vcpu *vcpu;
bbf45ba5 283
988a2cae
GN
284 kvm_for_each_vcpu(i, vcpu, kvm)
285 kvm_arch_vcpu_free(vcpu);
286
287 mutex_lock(&kvm->lock);
288 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
289 kvm->vcpus[i] = NULL;
290
291 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
292
293 kvmppc_core_destroy_vm(kvm);
294
988a2cae 295 mutex_unlock(&kvm->lock);
bbf45ba5
HB
296}
297
ad8ba2cd
SY
298void kvm_arch_sync_events(struct kvm *kvm)
299{
300}
301
bbf45ba5
HB
302int kvm_dev_ioctl_check_extension(long ext)
303{
304 int r;
305
306 switch (ext) {
5ce941ee
SW
307#ifdef CONFIG_BOOKE
308 case KVM_CAP_PPC_BOOKE_SREGS:
f61c94bb 309 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1c810636 310 case KVM_CAP_PPC_EPR:
5ce941ee 311#else
e15a1137 312 case KVM_CAP_PPC_SEGSTATE:
1022fc3d 313 case KVM_CAP_PPC_HIOR:
930b412a 314 case KVM_CAP_PPC_PAPR:
5ce941ee 315#endif
18978768 316 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 317 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 318 case KVM_CAP_ENABLE_CAP:
e24ed81f 319 case KVM_CAP_ONE_REG:
0e673fb6 320 case KVM_CAP_IOEVENTFD:
5df554ad 321 case KVM_CAP_DEVICE_CTRL:
de56a948
PM
322 r = 1;
323 break;
324#ifndef CONFIG_KVM_BOOK3S_64_HV
325 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 326 case KVM_CAP_PPC_OSI:
15711e9c 327 case KVM_CAP_PPC_GET_PVINFO:
bf7ca4bd 328#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc 329 case KVM_CAP_SW_TLB:
eb1e4f43
SW
330#endif
331#ifdef CONFIG_KVM_MPIC
332 case KVM_CAP_IRQ_MPIC:
dc83b8bc 333#endif
e15a1137
AG
334 r = 1;
335 break;
588968b6
LV
336 case KVM_CAP_COALESCED_MMIO:
337 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
338 break;
54738c09 339#endif
f31e65e1 340#ifdef CONFIG_PPC_BOOK3S_64
54738c09 341 case KVM_CAP_SPAPR_TCE:
32fad281 342 case KVM_CAP_PPC_ALLOC_HTAB:
54738c09
DG
343 r = 1;
344 break;
f31e65e1
BH
345#endif /* CONFIG_PPC_BOOK3S_64 */
346#ifdef CONFIG_KVM_BOOK3S_64_HV
371fefd6
PM
347 case KVM_CAP_PPC_SMT:
348 r = threads_per_core;
349 break;
aa04b4cc
PM
350 case KVM_CAP_PPC_RMA:
351 r = 1;
9e368f29
PM
352 /* PPC970 requires an RMA */
353 if (cpu_has_feature(CPU_FTR_ARCH_201))
354 r = 2;
aa04b4cc 355 break;
f4800b1f 356#endif
342d3db7 357 case KVM_CAP_SYNC_MMU:
f4800b1f 358#ifdef CONFIG_KVM_BOOK3S_64_HV
342d3db7 359 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
f4800b1f
AG
360#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
361 r = 1;
362#else
363 r = 0;
a2932923
PM
364 break;
365#endif
366#ifdef CONFIG_KVM_BOOK3S_64_HV
367 case KVM_CAP_PPC_HTAB_FD:
368 r = 1;
369 break;
de56a948 370#endif
f4800b1f 371 break;
b5434032
ME
372 case KVM_CAP_NR_VCPUS:
373 /*
374 * Recommending a number of CPUs is somewhat arbitrary; we
375 * return the number of present CPUs for -HV (since a host
376 * will have secondary threads "offline"), and for other KVM
377 * implementations just count online CPUs.
378 */
379#ifdef CONFIG_KVM_BOOK3S_64_HV
380 r = num_present_cpus();
381#else
382 r = num_online_cpus();
383#endif
384 break;
385 case KVM_CAP_MAX_VCPUS:
386 r = KVM_MAX_VCPUS;
387 break;
5b74716e
BH
388#ifdef CONFIG_PPC_BOOK3S_64
389 case KVM_CAP_PPC_GET_SMMU_INFO:
390 r = 1;
391 break;
392#endif
bbf45ba5
HB
393 default:
394 r = 0;
395 break;
396 }
397 return r;
398
399}
400
401long kvm_arch_dev_ioctl(struct file *filp,
402 unsigned int ioctl, unsigned long arg)
403{
404 return -EINVAL;
405}
406
db3fe4eb
TY
407void kvm_arch_free_memslot(struct kvm_memory_slot *free,
408 struct kvm_memory_slot *dont)
409{
a66b48c3 410 kvmppc_core_free_memslot(free, dont);
db3fe4eb
TY
411}
412
413int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
414{
a66b48c3 415 return kvmppc_core_create_memslot(slot, npages);
db3fe4eb
TY
416}
417
f7784b8e 418int kvm_arch_prepare_memory_region(struct kvm *kvm,
462fce46 419 struct kvm_memory_slot *memslot,
7b6195a9
TY
420 struct kvm_userspace_memory_region *mem,
421 enum kvm_mr_change change)
bbf45ba5 422{
a66b48c3 423 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
bbf45ba5
HB
424}
425
f7784b8e 426void kvm_arch_commit_memory_region(struct kvm *kvm,
462fce46 427 struct kvm_userspace_memory_region *mem,
8482644a
TY
428 const struct kvm_memory_slot *old,
429 enum kvm_mr_change change)
f7784b8e 430{
dfe49dbd 431 kvmppc_core_commit_memory_region(kvm, mem, old);
f7784b8e
MT
432}
433
2df72e9b
MT
434void kvm_arch_flush_shadow_all(struct kvm *kvm)
435{
436}
f7784b8e 437
2df72e9b
MT
438void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
439 struct kvm_memory_slot *slot)
34d4cb8f 440{
dfe49dbd 441 kvmppc_core_flush_memslot(kvm, slot);
34d4cb8f
MT
442}
443
bbf45ba5
HB
444struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
445{
73e75b41
HB
446 struct kvm_vcpu *vcpu;
447 vcpu = kvmppc_core_vcpu_create(kvm, id);
03cdab53
ME
448 if (!IS_ERR(vcpu)) {
449 vcpu->arch.wqp = &vcpu->wq;
06056bfb 450 kvmppc_create_vcpu_debugfs(vcpu, id);
03cdab53 451 }
73e75b41 452 return vcpu;
bbf45ba5
HB
453}
454
42897d86
MT
455int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
456{
457 return 0;
458}
459
bbf45ba5
HB
460void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
461{
a595405d
AG
462 /* Make sure we're not using the vcpu anymore */
463 hrtimer_cancel(&vcpu->arch.dec_timer);
464 tasklet_kill(&vcpu->arch.tasklet);
465
73e75b41 466 kvmppc_remove_vcpu_debugfs(vcpu);
eb1e4f43
SW
467
468 switch (vcpu->arch.irq_type) {
469 case KVMPPC_IRQ_MPIC:
470 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
471 break;
472 }
473
db93f574 474 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
475}
476
477void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
478{
479 kvm_arch_vcpu_free(vcpu);
480}
481
482int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
483{
9dd921cf 484 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
485}
486
544c6761
AG
487/*
488 * low level hrtimer wake routine. Because this runs in hardirq context
489 * we schedule a tasklet to do the real work.
490 */
491enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
492{
493 struct kvm_vcpu *vcpu;
494
495 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
496 tasklet_schedule(&vcpu->arch.tasklet);
497
498 return HRTIMER_NORESTART;
499}
500
bbf45ba5
HB
501int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
502{
f61c94bb
BB
503 int ret;
504
544c6761
AG
505 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
506 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
507 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 508 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 509
09000adb
BB
510#ifdef CONFIG_KVM_EXIT_TIMING
511 mutex_init(&vcpu->arch.exit_timing_lock);
512#endif
f61c94bb
BB
513 ret = kvmppc_subarch_vcpu_init(vcpu);
514 return ret;
bbf45ba5
HB
515}
516
517void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
518{
ecc0981f 519 kvmppc_mmu_destroy(vcpu);
f61c94bb 520 kvmppc_subarch_vcpu_uninit(vcpu);
bbf45ba5
HB
521}
522
523void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
524{
eab17672
SW
525#ifdef CONFIG_BOOKE
526 /*
527 * vrsave (formerly usprg0) isn't used by Linux, but may
528 * be used by the guest.
529 *
530 * On non-booke this is associated with Altivec and
531 * is handled by code in book3s.c.
532 */
533 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
534#endif
9dd921cf 535 kvmppc_core_vcpu_load(vcpu, cpu);
bbf45ba5
HB
536}
537
538void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
539{
9dd921cf 540 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
541#ifdef CONFIG_BOOKE
542 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
543#endif
bbf45ba5
HB
544}
545
bbf45ba5
HB
546static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
547 struct kvm_run *run)
548{
8e5b26b5 549 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
bbf45ba5
HB
550}
551
552static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
553 struct kvm_run *run)
554{
69b61833 555 u64 uninitialized_var(gpr);
bbf45ba5 556
8e5b26b5 557 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
558 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
559 return;
560 }
561
562 if (vcpu->arch.mmio_is_bigendian) {
563 switch (run->mmio.len) {
b104d066 564 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
565 case 4: gpr = *(u32 *)run->mmio.data; break;
566 case 2: gpr = *(u16 *)run->mmio.data; break;
567 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
568 }
569 } else {
570 /* Convert BE data from userland back to LE. */
571 switch (run->mmio.len) {
8e5b26b5
AG
572 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
573 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
574 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
575 }
576 }
8e5b26b5 577
3587d534
AG
578 if (vcpu->arch.mmio_sign_extend) {
579 switch (run->mmio.len) {
580#ifdef CONFIG_PPC64
581 case 4:
582 gpr = (s64)(s32)gpr;
583 break;
584#endif
585 case 2:
586 gpr = (s64)(s16)gpr;
587 break;
588 case 1:
589 gpr = (s64)(s8)gpr;
590 break;
591 }
592 }
593
8e5b26b5 594 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066 595
b3c5d3c2
AG
596 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
597 case KVM_MMIO_REG_GPR:
b104d066
AG
598 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
599 break;
b3c5d3c2
AG
600 case KVM_MMIO_REG_FPR:
601 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 602 break;
287d5611 603#ifdef CONFIG_PPC_BOOK3S
b3c5d3c2
AG
604 case KVM_MMIO_REG_QPR:
605 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 606 break;
b3c5d3c2
AG
607 case KVM_MMIO_REG_FQPR:
608 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
609 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 610 break;
287d5611 611#endif
b104d066
AG
612 default:
613 BUG();
614 }
bbf45ba5
HB
615}
616
617int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
618 unsigned int rt, unsigned int bytes, int is_bigendian)
619{
620 if (bytes > sizeof(run->mmio.data)) {
621 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
622 run->mmio.len);
623 }
624
625 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
626 run->mmio.len = bytes;
627 run->mmio.is_write = 0;
628
629 vcpu->arch.io_gpr = rt;
630 vcpu->arch.mmio_is_bigendian = is_bigendian;
631 vcpu->mmio_needed = 1;
632 vcpu->mmio_is_write = 0;
3587d534 633 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5 634
0e673fb6
AG
635 if (!kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
636 bytes, &run->mmio.data)) {
637 kvmppc_complete_mmio_load(vcpu, run);
638 vcpu->mmio_needed = 0;
639 return EMULATE_DONE;
640 }
641
bbf45ba5
HB
642 return EMULATE_DO_MMIO;
643}
644
3587d534
AG
645/* Same as above, but sign extends */
646int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
647 unsigned int rt, unsigned int bytes, int is_bigendian)
648{
649 int r;
650
3587d534 651 vcpu->arch.mmio_sign_extend = 1;
0e673fb6 652 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
3587d534
AG
653
654 return r;
655}
656
bbf45ba5 657int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
b104d066 658 u64 val, unsigned int bytes, int is_bigendian)
bbf45ba5
HB
659{
660 void *data = run->mmio.data;
661
662 if (bytes > sizeof(run->mmio.data)) {
663 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
664 run->mmio.len);
665 }
666
667 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
668 run->mmio.len = bytes;
669 run->mmio.is_write = 1;
670 vcpu->mmio_needed = 1;
671 vcpu->mmio_is_write = 1;
672
673 /* Store the value at the lowest bytes in 'data'. */
674 if (is_bigendian) {
675 switch (bytes) {
b104d066 676 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
677 case 4: *(u32 *)data = val; break;
678 case 2: *(u16 *)data = val; break;
679 case 1: *(u8 *)data = val; break;
680 }
681 } else {
682 /* Store LE value into 'data'. */
683 switch (bytes) {
684 case 4: st_le32(data, val); break;
685 case 2: st_le16(data, val); break;
686 case 1: *(u8 *)data = val; break;
687 }
688 }
689
0e673fb6
AG
690 if (!kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
691 bytes, &run->mmio.data)) {
0e673fb6
AG
692 vcpu->mmio_needed = 0;
693 return EMULATE_DONE;
694 }
695
bbf45ba5
HB
696 return EMULATE_DO_MMIO;
697}
698
699int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
700{
701 int r;
702 sigset_t sigsaved;
703
704 if (vcpu->sigset_active)
705 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
706
707 if (vcpu->mmio_needed) {
708 if (!vcpu->mmio_is_write)
709 kvmppc_complete_mmio_load(vcpu, run);
710 vcpu->mmio_needed = 0;
711 } else if (vcpu->arch.dcr_needed) {
712 if (!vcpu->arch.dcr_is_write)
713 kvmppc_complete_dcr_load(vcpu, run);
714 vcpu->arch.dcr_needed = 0;
ad0a048b
AG
715 } else if (vcpu->arch.osi_needed) {
716 u64 *gprs = run->osi.gprs;
717 int i;
718
719 for (i = 0; i < 32; i++)
720 kvmppc_set_gpr(vcpu, i, gprs[i]);
721 vcpu->arch.osi_needed = 0;
de56a948
PM
722 } else if (vcpu->arch.hcall_needed) {
723 int i;
724
725 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
726 for (i = 0; i < 9; ++i)
727 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
728 vcpu->arch.hcall_needed = 0;
1c810636
AG
729#ifdef CONFIG_BOOKE
730 } else if (vcpu->arch.epr_needed) {
731 kvmppc_set_epr(vcpu, run->epr.epr);
732 vcpu->arch.epr_needed = 0;
733#endif
bbf45ba5
HB
734 }
735
df6909e5 736 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
737
738 if (vcpu->sigset_active)
739 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
740
741 return r;
742}
743
744int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
745{
19ccb76a 746 if (irq->irq == KVM_INTERRUPT_UNSET) {
4fe27d2a 747 kvmppc_core_dequeue_external(vcpu);
19ccb76a
PM
748 return 0;
749 }
750
751 kvmppc_core_queue_external(vcpu, irq);
b6d33834 752
dfd4d47e 753 kvm_vcpu_kick(vcpu);
45c5eb67 754
bbf45ba5
HB
755 return 0;
756}
757
71fbfd5f
AG
758static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
759 struct kvm_enable_cap *cap)
760{
761 int r;
762
763 if (cap->flags)
764 return -EINVAL;
765
766 switch (cap->cap) {
ad0a048b
AG
767 case KVM_CAP_PPC_OSI:
768 r = 0;
769 vcpu->arch.osi_enabled = true;
770 break;
930b412a
AG
771 case KVM_CAP_PPC_PAPR:
772 r = 0;
773 vcpu->arch.papr_enabled = true;
774 break;
1c810636
AG
775 case KVM_CAP_PPC_EPR:
776 r = 0;
5df554ad
SW
777 if (cap->args[0])
778 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
779 else
780 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1c810636 781 break;
f61c94bb
BB
782#ifdef CONFIG_BOOKE
783 case KVM_CAP_PPC_BOOKE_WATCHDOG:
784 r = 0;
785 vcpu->arch.watchdog_enabled = true;
786 break;
787#endif
bf7ca4bd 788#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
789 case KVM_CAP_SW_TLB: {
790 struct kvm_config_tlb cfg;
791 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
792
793 r = -EFAULT;
794 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
795 break;
796
797 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
798 break;
eb1e4f43
SW
799 }
800#endif
801#ifdef CONFIG_KVM_MPIC
802 case KVM_CAP_IRQ_MPIC: {
803 struct file *filp;
804 struct kvm_device *dev;
805
806 r = -EBADF;
807 filp = fget(cap->args[0]);
808 if (!filp)
809 break;
810
811 r = -EPERM;
812 dev = kvm_device_from_filp(filp);
813 if (dev)
814 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
815
816 fput(filp);
817 break;
dc83b8bc
SW
818 }
819#endif
71fbfd5f
AG
820 default:
821 r = -EINVAL;
822 break;
823 }
824
af8f38b3
AG
825 if (!r)
826 r = kvmppc_sanity_check(vcpu);
827
71fbfd5f
AG
828 return r;
829}
830
bbf45ba5
HB
831int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
832 struct kvm_mp_state *mp_state)
833{
834 return -EINVAL;
835}
836
837int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
838 struct kvm_mp_state *mp_state)
839{
840 return -EINVAL;
841}
842
843long kvm_arch_vcpu_ioctl(struct file *filp,
844 unsigned int ioctl, unsigned long arg)
845{
846 struct kvm_vcpu *vcpu = filp->private_data;
847 void __user *argp = (void __user *)arg;
848 long r;
849
93736624
AK
850 switch (ioctl) {
851 case KVM_INTERRUPT: {
bbf45ba5
HB
852 struct kvm_interrupt irq;
853 r = -EFAULT;
854 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 855 goto out;
bbf45ba5 856 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 857 goto out;
bbf45ba5 858 }
19483d14 859
71fbfd5f
AG
860 case KVM_ENABLE_CAP:
861 {
862 struct kvm_enable_cap cap;
863 r = -EFAULT;
864 if (copy_from_user(&cap, argp, sizeof(cap)))
865 goto out;
866 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
867 break;
868 }
dc83b8bc 869
e24ed81f
AG
870 case KVM_SET_ONE_REG:
871 case KVM_GET_ONE_REG:
872 {
873 struct kvm_one_reg reg;
874 r = -EFAULT;
875 if (copy_from_user(&reg, argp, sizeof(reg)))
876 goto out;
877 if (ioctl == KVM_SET_ONE_REG)
878 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
879 else
880 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
881 break;
882 }
883
bf7ca4bd 884#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
885 case KVM_DIRTY_TLB: {
886 struct kvm_dirty_tlb dirty;
887 r = -EFAULT;
888 if (copy_from_user(&dirty, argp, sizeof(dirty)))
889 goto out;
890 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
891 break;
892 }
893#endif
bbf45ba5
HB
894 default:
895 r = -EINVAL;
896 }
897
898out:
899 return r;
900}
901
5b1c1493
CO
902int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
903{
904 return VM_FAULT_SIGBUS;
905}
906
15711e9c
AG
907static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
908{
784bafac
SY
909 u32 inst_nop = 0x60000000;
910#ifdef CONFIG_KVM_BOOKE_HV
911 u32 inst_sc1 = 0x44000022;
912 pvinfo->hcall[0] = inst_sc1;
913 pvinfo->hcall[1] = inst_nop;
914 pvinfo->hcall[2] = inst_nop;
915 pvinfo->hcall[3] = inst_nop;
916#else
15711e9c
AG
917 u32 inst_lis = 0x3c000000;
918 u32 inst_ori = 0x60000000;
15711e9c
AG
919 u32 inst_sc = 0x44000002;
920 u32 inst_imm_mask = 0xffff;
921
922 /*
923 * The hypercall to get into KVM from within guest context is as
924 * follows:
925 *
926 * lis r0, r0, KVM_SC_MAGIC_R0@h
927 * ori r0, KVM_SC_MAGIC_R0@l
928 * sc
929 * nop
930 */
931 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
932 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
933 pvinfo->hcall[2] = inst_sc;
934 pvinfo->hcall[3] = inst_nop;
784bafac 935#endif
15711e9c 936
9202e076
LYB
937 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
938
15711e9c
AG
939 return 0;
940}
941
bbf45ba5
HB
942long kvm_arch_vm_ioctl(struct file *filp,
943 unsigned int ioctl, unsigned long arg)
944{
5df554ad 945 struct kvm *kvm __maybe_unused = filp->private_data;
15711e9c 946 void __user *argp = (void __user *)arg;
bbf45ba5
HB
947 long r;
948
949 switch (ioctl) {
15711e9c
AG
950 case KVM_PPC_GET_PVINFO: {
951 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 952 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
953 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
954 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
955 r = -EFAULT;
956 goto out;
957 }
958
959 break;
960 }
f31e65e1 961#ifdef CONFIG_PPC_BOOK3S_64
54738c09
DG
962 case KVM_CREATE_SPAPR_TCE: {
963 struct kvm_create_spapr_tce create_tce;
54738c09
DG
964
965 r = -EFAULT;
966 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
967 goto out;
968 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
969 goto out;
970 }
f31e65e1 971#endif /* CONFIG_PPC_BOOK3S_64 */
aa04b4cc 972
f31e65e1 973#ifdef CONFIG_KVM_BOOK3S_64_HV
aa04b4cc 974 case KVM_ALLOCATE_RMA: {
aa04b4cc
PM
975 struct kvm_allocate_rma rma;
976
977 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
978 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
979 r = -EFAULT;
980 break;
981 }
32fad281
PM
982
983 case KVM_PPC_ALLOCATE_HTAB: {
32fad281
PM
984 u32 htab_order;
985
986 r = -EFAULT;
987 if (get_user(htab_order, (u32 __user *)argp))
988 break;
989 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
990 if (r)
991 break;
992 r = -EFAULT;
993 if (put_user(htab_order, (u32 __user *)argp))
994 break;
995 r = 0;
996 break;
997 }
a2932923
PM
998
999 case KVM_PPC_GET_HTAB_FD: {
a2932923
PM
1000 struct kvm_get_htab_fd ghf;
1001
1002 r = -EFAULT;
1003 if (copy_from_user(&ghf, argp, sizeof(ghf)))
1004 break;
1005 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
1006 break;
1007 }
54738c09
DG
1008#endif /* CONFIG_KVM_BOOK3S_64_HV */
1009
5b74716e
BH
1010#ifdef CONFIG_PPC_BOOK3S_64
1011 case KVM_PPC_GET_SMMU_INFO: {
5b74716e
BH
1012 struct kvm_ppc_smmu_info info;
1013
1014 memset(&info, 0, sizeof(info));
1015 r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
1016 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1017 r = -EFAULT;
1018 break;
1019 }
1020#endif /* CONFIG_PPC_BOOK3S_64 */
bbf45ba5 1021 default:
367e1319 1022 r = -ENOTTY;
bbf45ba5
HB
1023 }
1024
15711e9c 1025out:
bbf45ba5
HB
1026 return r;
1027}
1028
043cc4d7
SW
1029static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1030static unsigned long nr_lpids;
1031
1032long kvmppc_alloc_lpid(void)
1033{
1034 long lpid;
1035
1036 do {
1037 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1038 if (lpid >= nr_lpids) {
1039 pr_err("%s: No LPIDs free\n", __func__);
1040 return -ENOMEM;
1041 }
1042 } while (test_and_set_bit(lpid, lpid_inuse));
1043
1044 return lpid;
1045}
1046
1047void kvmppc_claim_lpid(long lpid)
1048{
1049 set_bit(lpid, lpid_inuse);
1050}
1051
1052void kvmppc_free_lpid(long lpid)
1053{
1054 clear_bit(lpid, lpid_inuse);
1055}
1056
1057void kvmppc_init_lpid(unsigned long nr_lpids_param)
1058{
1059 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1060 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1061}
1062
bbf45ba5
HB
1063int kvm_arch_init(void *opaque)
1064{
1065 return 0;
1066}
1067
1068void kvm_arch_exit(void)
1069{
1070}
This page took 0.358044 seconds and 5 git commands to generate.