2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/module.h>
30 #include <asm/cputable.h>
31 #include <asm/uaccess.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cputhreads.h>
35 #include <asm/irqflags.h>
38 #include "../mm/mmu_decl.h"
40 #define CREATE_TRACE_POINTS
43 struct kvmppc_ops
*kvmppc_hv_ops
;
44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops
);
45 struct kvmppc_ops
*kvmppc_pr_ops
;
46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops
);
49 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
51 return !!(v
->arch
.pending_exceptions
) ||
55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
61 * Common checks before entering the guest world. Call with interrupts
66 * == 1 if we're ready to go into guest state
67 * <= 0 if we need to go back to the host with return value
69 int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
73 WARN_ON(irqs_disabled());
84 if (signal_pending(current
)) {
85 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
86 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
91 vcpu
->mode
= IN_GUEST_MODE
;
94 * Reading vcpu->requests must happen after setting vcpu->mode,
95 * so we don't miss a request because the requester sees
96 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
97 * before next entering the guest (and thus doesn't IPI).
101 if (vcpu
->requests
) {
102 /* Make sure we process requests preemptable */
104 trace_kvm_check_requests(vcpu
);
105 r
= kvmppc_core_check_requests(vcpu
);
112 if (kvmppc_core_prepare_to_enter(vcpu
)) {
113 /* interrupts got enabled in between, so we
114 are back at square 1 */
126 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter
);
128 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
129 static void kvmppc_swab_shared(struct kvm_vcpu
*vcpu
)
131 struct kvm_vcpu_arch_shared
*shared
= vcpu
->arch
.shared
;
134 shared
->sprg0
= swab64(shared
->sprg0
);
135 shared
->sprg1
= swab64(shared
->sprg1
);
136 shared
->sprg2
= swab64(shared
->sprg2
);
137 shared
->sprg3
= swab64(shared
->sprg3
);
138 shared
->srr0
= swab64(shared
->srr0
);
139 shared
->srr1
= swab64(shared
->srr1
);
140 shared
->dar
= swab64(shared
->dar
);
141 shared
->msr
= swab64(shared
->msr
);
142 shared
->dsisr
= swab32(shared
->dsisr
);
143 shared
->int_pending
= swab32(shared
->int_pending
);
144 for (i
= 0; i
< ARRAY_SIZE(shared
->sr
); i
++)
145 shared
->sr
[i
] = swab32(shared
->sr
[i
]);
149 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
151 int nr
= kvmppc_get_gpr(vcpu
, 11);
153 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
154 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
155 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
156 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
157 unsigned long r2
= 0;
159 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
161 param1
&= 0xffffffff;
162 param2
&= 0xffffffff;
163 param3
&= 0xffffffff;
164 param4
&= 0xffffffff;
168 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
170 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
171 /* Book3S can be little endian, find it out here */
172 int shared_big_endian
= true;
173 if (vcpu
->arch
.intr_msr
& MSR_LE
)
174 shared_big_endian
= false;
175 if (shared_big_endian
!= vcpu
->arch
.shared_big_endian
)
176 kvmppc_swab_shared(vcpu
);
177 vcpu
->arch
.shared_big_endian
= shared_big_endian
;
180 if (!(param2
& MAGIC_PAGE_FLAG_NOT_MAPPED_NX
)) {
182 * Older versions of the Linux magic page code had
183 * a bug where they would map their trampoline code
184 * NX. If that's the case, remove !PR NX capability.
186 vcpu
->arch
.disable_kernel_nx
= true;
187 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
190 vcpu
->arch
.magic_page_pa
= param1
& ~0xfffULL
;
191 vcpu
->arch
.magic_page_ea
= param2
& ~0xfffULL
;
193 #ifdef CONFIG_PPC_64K_PAGES
195 * Make sure our 4k magic page is in the same window of a 64k
196 * page within the guest and within the host's page.
198 if ((vcpu
->arch
.magic_page_pa
& 0xf000) !=
199 ((ulong
)vcpu
->arch
.shared
& 0xf000)) {
200 void *old_shared
= vcpu
->arch
.shared
;
201 ulong shared
= (ulong
)vcpu
->arch
.shared
;
205 shared
|= vcpu
->arch
.magic_page_pa
& 0xf000;
206 new_shared
= (void*)shared
;
207 memcpy(new_shared
, old_shared
, 0x1000);
208 vcpu
->arch
.shared
= new_shared
;
212 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
219 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
220 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
223 /* Second return value is in r4 */
225 case EV_HCALL_TOKEN(EV_IDLE
):
227 kvm_vcpu_block(vcpu
);
228 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
231 r
= EV_UNIMPLEMENTED
;
235 kvmppc_set_gpr(vcpu
, 4, r2
);
239 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv
);
241 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
245 /* We have to know what CPU to virtualize */
249 /* PAPR only works with book3s_64 */
250 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
253 /* HV KVM can only do PAPR mode for now */
254 if (!vcpu
->arch
.papr_enabled
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
257 #ifdef CONFIG_KVM_BOOKE_HV
258 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
266 return r
? 0 : -EINVAL
;
268 EXPORT_SYMBOL_GPL(kvmppc_sanity_check
);
270 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
272 enum emulation_result er
;
275 er
= kvmppc_emulate_instruction(run
, vcpu
);
278 /* Future optimization: only reload non-volatiles if they were
279 * actually modified. */
285 case EMULATE_DO_MMIO
:
286 run
->exit_reason
= KVM_EXIT_MMIO
;
287 /* We must reload nonvolatiles because "update" load/store
288 * instructions modify register state. */
289 /* Future optimization: only reload non-volatiles if they were
290 * actually modified. */
297 kvmppc_get_last_inst(vcpu
, false, &last_inst
);
298 /* XXX Deliver Program interrupt to guest. */
299 pr_emerg("%s: emulation failed (%08x)\n", __func__
, last_inst
);
310 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio
);
312 int kvmppc_st(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
315 struct kvmppc_pte pte
;
320 r
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
330 if (kvm_write_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
331 return EMULATE_DO_MMIO
;
335 EXPORT_SYMBOL_GPL(kvmppc_st
);
337 int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
340 struct kvmppc_pte pte
;
345 rc
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
355 if (!data
&& !pte
.may_execute
)
358 if (kvm_read_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
359 return EMULATE_DO_MMIO
;
363 EXPORT_SYMBOL_GPL(kvmppc_ld
);
365 int kvm_arch_hardware_enable(void *garbage
)
370 void kvm_arch_hardware_disable(void *garbage
)
374 int kvm_arch_hardware_setup(void)
379 void kvm_arch_hardware_unsetup(void)
383 void kvm_arch_check_processor_compat(void *rtn
)
385 *(int *)rtn
= kvmppc_core_check_processor_compat();
388 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
390 struct kvmppc_ops
*kvm_ops
= NULL
;
392 * if we have both HV and PR enabled, default is HV
396 kvm_ops
= kvmppc_hv_ops
;
398 kvm_ops
= kvmppc_pr_ops
;
401 } else if (type
== KVM_VM_PPC_HV
) {
404 kvm_ops
= kvmppc_hv_ops
;
405 } else if (type
== KVM_VM_PPC_PR
) {
408 kvm_ops
= kvmppc_pr_ops
;
412 if (kvm_ops
->owner
&& !try_module_get(kvm_ops
->owner
))
415 kvm
->arch
.kvm_ops
= kvm_ops
;
416 return kvmppc_core_init_vm(kvm
);
421 void kvm_arch_destroy_vm(struct kvm
*kvm
)
424 struct kvm_vcpu
*vcpu
;
426 kvm_for_each_vcpu(i
, vcpu
, kvm
)
427 kvm_arch_vcpu_free(vcpu
);
429 mutex_lock(&kvm
->lock
);
430 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
431 kvm
->vcpus
[i
] = NULL
;
433 atomic_set(&kvm
->online_vcpus
, 0);
435 kvmppc_core_destroy_vm(kvm
);
437 mutex_unlock(&kvm
->lock
);
439 /* drop the module reference */
440 module_put(kvm
->arch
.kvm_ops
->owner
);
443 void kvm_arch_sync_events(struct kvm
*kvm
)
447 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
450 /* Assume we're using HV mode when the HV module is loaded */
451 int hv_enabled
= kvmppc_hv_ops
? 1 : 0;
455 * Hooray - we know which VM type we're running on. Depend on
456 * that rather than the guess above.
458 hv_enabled
= is_kvmppc_hv_enabled(kvm
);
463 case KVM_CAP_PPC_BOOKE_SREGS
:
464 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
465 case KVM_CAP_PPC_EPR
:
467 case KVM_CAP_PPC_SEGSTATE
:
468 case KVM_CAP_PPC_HIOR
:
469 case KVM_CAP_PPC_PAPR
:
471 case KVM_CAP_PPC_UNSET_IRQ
:
472 case KVM_CAP_PPC_IRQ_LEVEL
:
473 case KVM_CAP_ENABLE_CAP
:
474 case KVM_CAP_ENABLE_CAP_VM
:
475 case KVM_CAP_ONE_REG
:
476 case KVM_CAP_IOEVENTFD
:
477 case KVM_CAP_DEVICE_CTRL
:
480 case KVM_CAP_PPC_PAIRED_SINGLES
:
481 case KVM_CAP_PPC_OSI
:
482 case KVM_CAP_PPC_GET_PVINFO
:
483 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
486 /* We support this only for PR */
489 #ifdef CONFIG_KVM_MMIO
490 case KVM_CAP_COALESCED_MMIO
:
491 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
494 #ifdef CONFIG_KVM_MPIC
495 case KVM_CAP_IRQ_MPIC
:
500 #ifdef CONFIG_PPC_BOOK3S_64
501 case KVM_CAP_SPAPR_TCE
:
502 case KVM_CAP_PPC_ALLOC_HTAB
:
503 case KVM_CAP_PPC_RTAS
:
504 case KVM_CAP_PPC_FIXUP_HCALL
:
505 case KVM_CAP_PPC_ENABLE_HCALL
:
506 #ifdef CONFIG_KVM_XICS
507 case KVM_CAP_IRQ_XICS
:
511 #endif /* CONFIG_PPC_BOOK3S_64 */
512 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
513 case KVM_CAP_PPC_SMT
:
515 r
= threads_per_subcore
;
519 case KVM_CAP_PPC_RMA
:
521 /* PPC970 requires an RMA */
522 if (r
&& cpu_has_feature(CPU_FTR_ARCH_201
))
526 case KVM_CAP_SYNC_MMU
:
527 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
529 r
= cpu_has_feature(CPU_FTR_ARCH_206
) ? 1 : 0;
532 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
538 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
539 case KVM_CAP_PPC_HTAB_FD
:
543 case KVM_CAP_NR_VCPUS
:
545 * Recommending a number of CPUs is somewhat arbitrary; we
546 * return the number of present CPUs for -HV (since a host
547 * will have secondary threads "offline"), and for other KVM
548 * implementations just count online CPUs.
551 r
= num_present_cpus();
553 r
= num_online_cpus();
555 case KVM_CAP_MAX_VCPUS
:
558 #ifdef CONFIG_PPC_BOOK3S_64
559 case KVM_CAP_PPC_GET_SMMU_INFO
:
571 long kvm_arch_dev_ioctl(struct file
*filp
,
572 unsigned int ioctl
, unsigned long arg
)
577 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
578 struct kvm_memory_slot
*dont
)
580 kvmppc_core_free_memslot(kvm
, free
, dont
);
583 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
584 unsigned long npages
)
586 return kvmppc_core_create_memslot(kvm
, slot
, npages
);
589 void kvm_arch_memslots_updated(struct kvm
*kvm
)
593 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
594 struct kvm_memory_slot
*memslot
,
595 struct kvm_userspace_memory_region
*mem
,
596 enum kvm_mr_change change
)
598 return kvmppc_core_prepare_memory_region(kvm
, memslot
, mem
);
601 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
602 struct kvm_userspace_memory_region
*mem
,
603 const struct kvm_memory_slot
*old
,
604 enum kvm_mr_change change
)
606 kvmppc_core_commit_memory_region(kvm
, mem
, old
);
609 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
613 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
614 struct kvm_memory_slot
*slot
)
616 kvmppc_core_flush_memslot(kvm
, slot
);
619 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
621 struct kvm_vcpu
*vcpu
;
622 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
624 vcpu
->arch
.wqp
= &vcpu
->wq
;
625 kvmppc_create_vcpu_debugfs(vcpu
, id
);
630 int kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
635 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
637 /* Make sure we're not using the vcpu anymore */
638 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
639 tasklet_kill(&vcpu
->arch
.tasklet
);
641 kvmppc_remove_vcpu_debugfs(vcpu
);
643 switch (vcpu
->arch
.irq_type
) {
644 case KVMPPC_IRQ_MPIC
:
645 kvmppc_mpic_disconnect_vcpu(vcpu
->arch
.mpic
, vcpu
);
647 case KVMPPC_IRQ_XICS
:
648 kvmppc_xics_free_icp(vcpu
);
652 kvmppc_core_vcpu_free(vcpu
);
655 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
657 kvm_arch_vcpu_free(vcpu
);
660 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
662 return kvmppc_core_pending_dec(vcpu
);
666 * low level hrtimer wake routine. Because this runs in hardirq context
667 * we schedule a tasklet to do the real work.
669 enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
671 struct kvm_vcpu
*vcpu
;
673 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
674 tasklet_schedule(&vcpu
->arch
.tasklet
);
676 return HRTIMER_NORESTART
;
679 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
683 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
684 tasklet_init(&vcpu
->arch
.tasklet
, kvmppc_decrementer_func
, (ulong
)vcpu
);
685 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
686 vcpu
->arch
.dec_expires
= ~(u64
)0;
688 #ifdef CONFIG_KVM_EXIT_TIMING
689 mutex_init(&vcpu
->arch
.exit_timing_lock
);
691 ret
= kvmppc_subarch_vcpu_init(vcpu
);
695 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
697 kvmppc_mmu_destroy(vcpu
);
698 kvmppc_subarch_vcpu_uninit(vcpu
);
701 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
705 * vrsave (formerly usprg0) isn't used by Linux, but may
706 * be used by the guest.
708 * On non-booke this is associated with Altivec and
709 * is handled by code in book3s.c.
711 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
713 kvmppc_core_vcpu_load(vcpu
, cpu
);
716 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
718 kvmppc_core_vcpu_put(vcpu
);
720 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
724 static void kvmppc_complete_dcr_load(struct kvm_vcpu
*vcpu
,
727 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, run
->dcr
.data
);
730 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
733 u64
uninitialized_var(gpr
);
735 if (run
->mmio
.len
> sizeof(gpr
)) {
736 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
740 if (vcpu
->arch
.mmio_is_bigendian
) {
741 switch (run
->mmio
.len
) {
742 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
743 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
744 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
745 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
748 /* Convert BE data from userland back to LE. */
749 switch (run
->mmio
.len
) {
750 case 4: gpr
= ld_le32((u32
*)run
->mmio
.data
); break;
751 case 2: gpr
= ld_le16((u16
*)run
->mmio
.data
); break;
752 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
756 if (vcpu
->arch
.mmio_sign_extend
) {
757 switch (run
->mmio
.len
) {
772 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
774 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
775 case KVM_MMIO_REG_GPR
:
776 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
778 case KVM_MMIO_REG_FPR
:
779 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
781 #ifdef CONFIG_PPC_BOOK3S
782 case KVM_MMIO_REG_QPR
:
783 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
785 case KVM_MMIO_REG_FQPR
:
786 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
787 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
795 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
796 unsigned int rt
, unsigned int bytes
,
797 int is_default_endian
)
802 if (kvmppc_need_byteswap(vcpu
)) {
803 /* Default endianness is "little endian". */
804 is_bigendian
= !is_default_endian
;
806 /* Default endianness is "big endian". */
807 is_bigendian
= is_default_endian
;
810 if (bytes
> sizeof(run
->mmio
.data
)) {
811 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
815 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
816 run
->mmio
.len
= bytes
;
817 run
->mmio
.is_write
= 0;
819 vcpu
->arch
.io_gpr
= rt
;
820 vcpu
->arch
.mmio_is_bigendian
= is_bigendian
;
821 vcpu
->mmio_needed
= 1;
822 vcpu
->mmio_is_write
= 0;
823 vcpu
->arch
.mmio_sign_extend
= 0;
825 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
827 ret
= kvm_io_bus_read(vcpu
->kvm
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
828 bytes
, &run
->mmio
.data
);
830 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
833 kvmppc_complete_mmio_load(vcpu
, run
);
834 vcpu
->mmio_needed
= 0;
838 return EMULATE_DO_MMIO
;
840 EXPORT_SYMBOL_GPL(kvmppc_handle_load
);
842 /* Same as above, but sign extends */
843 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
844 unsigned int rt
, unsigned int bytes
,
845 int is_default_endian
)
849 vcpu
->arch
.mmio_sign_extend
= 1;
850 r
= kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_default_endian
);
855 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
856 u64 val
, unsigned int bytes
, int is_default_endian
)
858 void *data
= run
->mmio
.data
;
862 if (kvmppc_need_byteswap(vcpu
)) {
863 /* Default endianness is "little endian". */
864 is_bigendian
= !is_default_endian
;
866 /* Default endianness is "big endian". */
867 is_bigendian
= is_default_endian
;
870 if (bytes
> sizeof(run
->mmio
.data
)) {
871 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
875 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
876 run
->mmio
.len
= bytes
;
877 run
->mmio
.is_write
= 1;
878 vcpu
->mmio_needed
= 1;
879 vcpu
->mmio_is_write
= 1;
881 /* Store the value at the lowest bytes in 'data'. */
884 case 8: *(u64
*)data
= val
; break;
885 case 4: *(u32
*)data
= val
; break;
886 case 2: *(u16
*)data
= val
; break;
887 case 1: *(u8
*)data
= val
; break;
890 /* Store LE value into 'data'. */
892 case 4: st_le32(data
, val
); break;
893 case 2: st_le16(data
, val
); break;
894 case 1: *(u8
*)data
= val
; break;
898 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
900 ret
= kvm_io_bus_write(vcpu
->kvm
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
901 bytes
, &run
->mmio
.data
);
903 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
906 vcpu
->mmio_needed
= 0;
910 return EMULATE_DO_MMIO
;
912 EXPORT_SYMBOL_GPL(kvmppc_handle_store
);
914 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
919 if (vcpu
->sigset_active
)
920 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
922 if (vcpu
->mmio_needed
) {
923 if (!vcpu
->mmio_is_write
)
924 kvmppc_complete_mmio_load(vcpu
, run
);
925 vcpu
->mmio_needed
= 0;
926 } else if (vcpu
->arch
.dcr_needed
) {
927 if (!vcpu
->arch
.dcr_is_write
)
928 kvmppc_complete_dcr_load(vcpu
, run
);
929 vcpu
->arch
.dcr_needed
= 0;
930 } else if (vcpu
->arch
.osi_needed
) {
931 u64
*gprs
= run
->osi
.gprs
;
934 for (i
= 0; i
< 32; i
++)
935 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
936 vcpu
->arch
.osi_needed
= 0;
937 } else if (vcpu
->arch
.hcall_needed
) {
940 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
941 for (i
= 0; i
< 9; ++i
)
942 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
943 vcpu
->arch
.hcall_needed
= 0;
945 } else if (vcpu
->arch
.epr_needed
) {
946 kvmppc_set_epr(vcpu
, run
->epr
.epr
);
947 vcpu
->arch
.epr_needed
= 0;
951 r
= kvmppc_vcpu_run(run
, vcpu
);
953 if (vcpu
->sigset_active
)
954 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
959 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
961 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
962 kvmppc_core_dequeue_external(vcpu
);
966 kvmppc_core_queue_external(vcpu
, irq
);
973 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
974 struct kvm_enable_cap
*cap
)
982 case KVM_CAP_PPC_OSI
:
984 vcpu
->arch
.osi_enabled
= true;
986 case KVM_CAP_PPC_PAPR
:
988 vcpu
->arch
.papr_enabled
= true;
990 case KVM_CAP_PPC_EPR
:
993 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_USER
;
995 vcpu
->arch
.epr_flags
&= ~KVMPPC_EPR_USER
;
998 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
1000 vcpu
->arch
.watchdog_enabled
= true;
1003 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1004 case KVM_CAP_SW_TLB
: {
1005 struct kvm_config_tlb cfg
;
1006 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
1009 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
1012 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
1016 #ifdef CONFIG_KVM_MPIC
1017 case KVM_CAP_IRQ_MPIC
: {
1019 struct kvm_device
*dev
;
1022 f
= fdget(cap
->args
[0]);
1027 dev
= kvm_device_from_filp(f
.file
);
1029 r
= kvmppc_mpic_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1035 #ifdef CONFIG_KVM_XICS
1036 case KVM_CAP_IRQ_XICS
: {
1038 struct kvm_device
*dev
;
1041 f
= fdget(cap
->args
[0]);
1046 dev
= kvm_device_from_filp(f
.file
);
1048 r
= kvmppc_xics_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1053 #endif /* CONFIG_KVM_XICS */
1060 r
= kvmppc_sanity_check(vcpu
);
1065 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1066 struct kvm_mp_state
*mp_state
)
1071 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1072 struct kvm_mp_state
*mp_state
)
1077 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1078 unsigned int ioctl
, unsigned long arg
)
1080 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1081 void __user
*argp
= (void __user
*)arg
;
1085 case KVM_INTERRUPT
: {
1086 struct kvm_interrupt irq
;
1088 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
1090 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
1094 case KVM_ENABLE_CAP
:
1096 struct kvm_enable_cap cap
;
1098 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1100 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
1104 case KVM_SET_ONE_REG
:
1105 case KVM_GET_ONE_REG
:
1107 struct kvm_one_reg reg
;
1109 if (copy_from_user(®
, argp
, sizeof(reg
)))
1111 if (ioctl
== KVM_SET_ONE_REG
)
1112 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
1114 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
1118 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1119 case KVM_DIRTY_TLB
: {
1120 struct kvm_dirty_tlb dirty
;
1122 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
1124 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
1136 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1138 return VM_FAULT_SIGBUS
;
1141 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
1143 u32 inst_nop
= 0x60000000;
1144 #ifdef CONFIG_KVM_BOOKE_HV
1145 u32 inst_sc1
= 0x44000022;
1146 pvinfo
->hcall
[0] = cpu_to_be32(inst_sc1
);
1147 pvinfo
->hcall
[1] = cpu_to_be32(inst_nop
);
1148 pvinfo
->hcall
[2] = cpu_to_be32(inst_nop
);
1149 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
1151 u32 inst_lis
= 0x3c000000;
1152 u32 inst_ori
= 0x60000000;
1153 u32 inst_sc
= 0x44000002;
1154 u32 inst_imm_mask
= 0xffff;
1157 * The hypercall to get into KVM from within guest context is as
1160 * lis r0, r0, KVM_SC_MAGIC_R0@h
1161 * ori r0, KVM_SC_MAGIC_R0@l
1165 pvinfo
->hcall
[0] = cpu_to_be32(inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
));
1166 pvinfo
->hcall
[1] = cpu_to_be32(inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
));
1167 pvinfo
->hcall
[2] = cpu_to_be32(inst_sc
);
1168 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
1171 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
1176 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
,
1179 if (!irqchip_in_kernel(kvm
))
1182 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
1183 irq_event
->irq
, irq_event
->level
,
1189 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
,
1190 struct kvm_enable_cap
*cap
)
1198 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1199 case KVM_CAP_PPC_ENABLE_HCALL
: {
1200 unsigned long hcall
= cap
->args
[0];
1203 if (hcall
> MAX_HCALL_OPCODE
|| (hcall
& 3) ||
1206 if (!kvmppc_book3s_hcall_implemented(kvm
, hcall
))
1209 set_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
1211 clear_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
1224 long kvm_arch_vm_ioctl(struct file
*filp
,
1225 unsigned int ioctl
, unsigned long arg
)
1227 struct kvm
*kvm __maybe_unused
= filp
->private_data
;
1228 void __user
*argp
= (void __user
*)arg
;
1232 case KVM_PPC_GET_PVINFO
: {
1233 struct kvm_ppc_pvinfo pvinfo
;
1234 memset(&pvinfo
, 0, sizeof(pvinfo
));
1235 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
1236 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
1243 case KVM_ENABLE_CAP
:
1245 struct kvm_enable_cap cap
;
1247 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1249 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
1252 #ifdef CONFIG_PPC_BOOK3S_64
1253 case KVM_CREATE_SPAPR_TCE
: {
1254 struct kvm_create_spapr_tce create_tce
;
1257 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
1259 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce
);
1262 case KVM_PPC_GET_SMMU_INFO
: {
1263 struct kvm_ppc_smmu_info info
;
1264 struct kvm
*kvm
= filp
->private_data
;
1266 memset(&info
, 0, sizeof(info
));
1267 r
= kvm
->arch
.kvm_ops
->get_smmu_info(kvm
, &info
);
1268 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
1272 case KVM_PPC_RTAS_DEFINE_TOKEN
: {
1273 struct kvm
*kvm
= filp
->private_data
;
1275 r
= kvm_vm_ioctl_rtas_define_token(kvm
, argp
);
1279 struct kvm
*kvm
= filp
->private_data
;
1280 r
= kvm
->arch
.kvm_ops
->arch_vm_ioctl(filp
, ioctl
, arg
);
1282 #else /* CONFIG_PPC_BOOK3S_64 */
1291 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
1292 static unsigned long nr_lpids
;
1294 long kvmppc_alloc_lpid(void)
1299 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
1300 if (lpid
>= nr_lpids
) {
1301 pr_err("%s: No LPIDs free\n", __func__
);
1304 } while (test_and_set_bit(lpid
, lpid_inuse
));
1308 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid
);
1310 void kvmppc_claim_lpid(long lpid
)
1312 set_bit(lpid
, lpid_inuse
);
1314 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid
);
1316 void kvmppc_free_lpid(long lpid
)
1318 clear_bit(lpid
, lpid_inuse
);
1320 EXPORT_SYMBOL_GPL(kvmppc_free_lpid
);
1322 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
1324 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
1325 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
1327 EXPORT_SYMBOL_GPL(kvmppc_init_lpid
);
1329 int kvm_arch_init(void *opaque
)
1334 void kvm_arch_exit(void)