2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/tlbflush.h>
32 #include <asm/cputhreads.h>
34 #include "../mm/mmu_decl.h"
36 #define CREATE_TRACE_POINTS
39 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
41 return !!(v
->arch
.pending_exceptions
) ||
45 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
50 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
52 int nr
= kvmppc_get_gpr(vcpu
, 11);
54 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
55 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
56 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
57 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
60 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
69 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
71 vcpu
->arch
.magic_page_pa
= param1
;
72 vcpu
->arch
.magic_page_ea
= param2
;
74 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
79 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
81 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
82 /* XXX Missing magic page on 44x */
83 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
86 /* Second return value is in r4 */
88 case EV_HCALL_TOKEN(EV_IDLE
):
91 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
98 kvmppc_set_gpr(vcpu
, 4, r2
);
103 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
107 /* We have to know what CPU to virtualize */
111 /* PAPR only works with book3s_64 */
112 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
115 #ifdef CONFIG_KVM_BOOK3S_64_HV
116 /* HV KVM can only do PAPR mode for now */
117 if (!vcpu
->arch
.papr_enabled
)
121 #ifdef CONFIG_KVM_BOOKE_HV
122 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
130 return r
? 0 : -EINVAL
;
133 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
135 enum emulation_result er
;
138 er
= kvmppc_emulate_instruction(run
, vcpu
);
141 /* Future optimization: only reload non-volatiles if they were
142 * actually modified. */
145 case EMULATE_DO_MMIO
:
146 run
->exit_reason
= KVM_EXIT_MMIO
;
147 /* We must reload nonvolatiles because "update" load/store
148 * instructions modify register state. */
149 /* Future optimization: only reload non-volatiles if they were
150 * actually modified. */
154 /* XXX Deliver Program interrupt to guest. */
155 printk(KERN_EMERG
"%s: emulation failed (%08x)\n", __func__
,
156 kvmppc_get_last_inst(vcpu
));
166 int kvm_arch_hardware_enable(void *garbage
)
171 void kvm_arch_hardware_disable(void *garbage
)
175 int kvm_arch_hardware_setup(void)
180 void kvm_arch_hardware_unsetup(void)
184 void kvm_arch_check_processor_compat(void *rtn
)
186 *(int *)rtn
= kvmppc_core_check_processor_compat();
189 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
194 return kvmppc_core_init_vm(kvm
);
197 void kvm_arch_destroy_vm(struct kvm
*kvm
)
200 struct kvm_vcpu
*vcpu
;
202 kvm_for_each_vcpu(i
, vcpu
, kvm
)
203 kvm_arch_vcpu_free(vcpu
);
205 mutex_lock(&kvm
->lock
);
206 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
207 kvm
->vcpus
[i
] = NULL
;
209 atomic_set(&kvm
->online_vcpus
, 0);
211 kvmppc_core_destroy_vm(kvm
);
213 mutex_unlock(&kvm
->lock
);
216 void kvm_arch_sync_events(struct kvm
*kvm
)
220 int kvm_dev_ioctl_check_extension(long ext
)
226 case KVM_CAP_PPC_BOOKE_SREGS
:
228 case KVM_CAP_PPC_SEGSTATE
:
229 case KVM_CAP_PPC_HIOR
:
230 case KVM_CAP_PPC_PAPR
:
232 case KVM_CAP_PPC_UNSET_IRQ
:
233 case KVM_CAP_PPC_IRQ_LEVEL
:
234 case KVM_CAP_ENABLE_CAP
:
235 case KVM_CAP_ONE_REG
:
238 #ifndef CONFIG_KVM_BOOK3S_64_HV
239 case KVM_CAP_PPC_PAIRED_SINGLES
:
240 case KVM_CAP_PPC_OSI
:
241 case KVM_CAP_PPC_GET_PVINFO
:
242 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
247 case KVM_CAP_COALESCED_MMIO
:
248 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
251 #ifdef CONFIG_PPC_BOOK3S_64
252 case KVM_CAP_SPAPR_TCE
:
253 case KVM_CAP_PPC_ALLOC_HTAB
:
256 #endif /* CONFIG_PPC_BOOK3S_64 */
257 #ifdef CONFIG_KVM_BOOK3S_64_HV
258 case KVM_CAP_PPC_SMT
:
259 r
= threads_per_core
;
261 case KVM_CAP_PPC_RMA
:
263 /* PPC970 requires an RMA */
264 if (cpu_has_feature(CPU_FTR_ARCH_201
))
267 case KVM_CAP_SYNC_MMU
:
268 r
= cpu_has_feature(CPU_FTR_ARCH_206
) ? 1 : 0;
271 case KVM_CAP_NR_VCPUS
:
273 * Recommending a number of CPUs is somewhat arbitrary; we
274 * return the number of present CPUs for -HV (since a host
275 * will have secondary threads "offline"), and for other KVM
276 * implementations just count online CPUs.
278 #ifdef CONFIG_KVM_BOOK3S_64_HV
279 r
= num_present_cpus();
281 r
= num_online_cpus();
284 case KVM_CAP_MAX_VCPUS
:
287 #ifdef CONFIG_PPC_BOOK3S_64
288 case KVM_CAP_PPC_GET_SMMU_INFO
:
300 long kvm_arch_dev_ioctl(struct file
*filp
,
301 unsigned int ioctl
, unsigned long arg
)
306 void kvm_arch_free_memslot(struct kvm_memory_slot
*free
,
307 struct kvm_memory_slot
*dont
)
309 if (!dont
|| free
->arch
.rmap
!= dont
->arch
.rmap
) {
310 vfree(free
->arch
.rmap
);
311 free
->arch
.rmap
= NULL
;
315 int kvm_arch_create_memslot(struct kvm_memory_slot
*slot
, unsigned long npages
)
317 slot
->arch
.rmap
= vzalloc(npages
* sizeof(*slot
->arch
.rmap
));
318 if (!slot
->arch
.rmap
)
324 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
325 struct kvm_memory_slot
*memslot
,
326 struct kvm_memory_slot old
,
327 struct kvm_userspace_memory_region
*mem
,
330 return kvmppc_core_prepare_memory_region(kvm
, mem
);
333 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
334 struct kvm_userspace_memory_region
*mem
,
335 struct kvm_memory_slot old
,
338 kvmppc_core_commit_memory_region(kvm
, mem
);
341 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
345 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
346 struct kvm_memory_slot
*slot
)
350 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
352 struct kvm_vcpu
*vcpu
;
353 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
355 vcpu
->arch
.wqp
= &vcpu
->wq
;
356 kvmppc_create_vcpu_debugfs(vcpu
, id
);
361 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
363 /* Make sure we're not using the vcpu anymore */
364 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
365 tasklet_kill(&vcpu
->arch
.tasklet
);
367 kvmppc_remove_vcpu_debugfs(vcpu
);
368 kvmppc_core_vcpu_free(vcpu
);
371 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
373 kvm_arch_vcpu_free(vcpu
);
376 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
378 return kvmppc_core_pending_dec(vcpu
);
382 * low level hrtimer wake routine. Because this runs in hardirq context
383 * we schedule a tasklet to do the real work.
385 enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
387 struct kvm_vcpu
*vcpu
;
389 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
390 tasklet_schedule(&vcpu
->arch
.tasklet
);
392 return HRTIMER_NORESTART
;
395 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
397 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
398 tasklet_init(&vcpu
->arch
.tasklet
, kvmppc_decrementer_func
, (ulong
)vcpu
);
399 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
400 vcpu
->arch
.dec_expires
= ~(u64
)0;
402 #ifdef CONFIG_KVM_EXIT_TIMING
403 mutex_init(&vcpu
->arch
.exit_timing_lock
);
409 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
411 kvmppc_mmu_destroy(vcpu
);
414 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
418 * vrsave (formerly usprg0) isn't used by Linux, but may
419 * be used by the guest.
421 * On non-booke this is associated with Altivec and
422 * is handled by code in book3s.c.
424 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
426 kvmppc_core_vcpu_load(vcpu
, cpu
);
427 vcpu
->cpu
= smp_processor_id();
430 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
432 kvmppc_core_vcpu_put(vcpu
);
434 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
439 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
440 struct kvm_guest_debug
*dbg
)
445 static void kvmppc_complete_dcr_load(struct kvm_vcpu
*vcpu
,
448 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, run
->dcr
.data
);
451 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
454 u64
uninitialized_var(gpr
);
456 if (run
->mmio
.len
> sizeof(gpr
)) {
457 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
461 if (vcpu
->arch
.mmio_is_bigendian
) {
462 switch (run
->mmio
.len
) {
463 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
464 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
465 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
466 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
469 /* Convert BE data from userland back to LE. */
470 switch (run
->mmio
.len
) {
471 case 4: gpr
= ld_le32((u32
*)run
->mmio
.data
); break;
472 case 2: gpr
= ld_le16((u16
*)run
->mmio
.data
); break;
473 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
477 if (vcpu
->arch
.mmio_sign_extend
) {
478 switch (run
->mmio
.len
) {
493 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
495 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
496 case KVM_MMIO_REG_GPR
:
497 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
499 case KVM_MMIO_REG_FPR
:
500 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
502 #ifdef CONFIG_PPC_BOOK3S
503 case KVM_MMIO_REG_QPR
:
504 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
506 case KVM_MMIO_REG_FQPR
:
507 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
508 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
516 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
517 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
519 if (bytes
> sizeof(run
->mmio
.data
)) {
520 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
524 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
525 run
->mmio
.len
= bytes
;
526 run
->mmio
.is_write
= 0;
528 vcpu
->arch
.io_gpr
= rt
;
529 vcpu
->arch
.mmio_is_bigendian
= is_bigendian
;
530 vcpu
->mmio_needed
= 1;
531 vcpu
->mmio_is_write
= 0;
532 vcpu
->arch
.mmio_sign_extend
= 0;
534 return EMULATE_DO_MMIO
;
537 /* Same as above, but sign extends */
538 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
539 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
543 r
= kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_bigendian
);
544 vcpu
->arch
.mmio_sign_extend
= 1;
549 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
550 u64 val
, unsigned int bytes
, int is_bigendian
)
552 void *data
= run
->mmio
.data
;
554 if (bytes
> sizeof(run
->mmio
.data
)) {
555 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
559 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
560 run
->mmio
.len
= bytes
;
561 run
->mmio
.is_write
= 1;
562 vcpu
->mmio_needed
= 1;
563 vcpu
->mmio_is_write
= 1;
565 /* Store the value at the lowest bytes in 'data'. */
568 case 8: *(u64
*)data
= val
; break;
569 case 4: *(u32
*)data
= val
; break;
570 case 2: *(u16
*)data
= val
; break;
571 case 1: *(u8
*)data
= val
; break;
574 /* Store LE value into 'data'. */
576 case 4: st_le32(data
, val
); break;
577 case 2: st_le16(data
, val
); break;
578 case 1: *(u8
*)data
= val
; break;
582 return EMULATE_DO_MMIO
;
585 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
590 if (vcpu
->sigset_active
)
591 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
593 if (vcpu
->mmio_needed
) {
594 if (!vcpu
->mmio_is_write
)
595 kvmppc_complete_mmio_load(vcpu
, run
);
596 vcpu
->mmio_needed
= 0;
597 } else if (vcpu
->arch
.dcr_needed
) {
598 if (!vcpu
->arch
.dcr_is_write
)
599 kvmppc_complete_dcr_load(vcpu
, run
);
600 vcpu
->arch
.dcr_needed
= 0;
601 } else if (vcpu
->arch
.osi_needed
) {
602 u64
*gprs
= run
->osi
.gprs
;
605 for (i
= 0; i
< 32; i
++)
606 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
607 vcpu
->arch
.osi_needed
= 0;
608 } else if (vcpu
->arch
.hcall_needed
) {
611 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
612 for (i
= 0; i
< 9; ++i
)
613 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
614 vcpu
->arch
.hcall_needed
= 0;
617 r
= kvmppc_vcpu_run(run
, vcpu
);
619 if (vcpu
->sigset_active
)
620 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
625 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
627 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
628 kvmppc_core_dequeue_external(vcpu
, irq
);
632 kvmppc_core_queue_external(vcpu
, irq
);
639 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
640 struct kvm_enable_cap
*cap
)
648 case KVM_CAP_PPC_OSI
:
650 vcpu
->arch
.osi_enabled
= true;
652 case KVM_CAP_PPC_PAPR
:
654 vcpu
->arch
.papr_enabled
= true;
656 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
657 case KVM_CAP_SW_TLB
: {
658 struct kvm_config_tlb cfg
;
659 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
662 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
665 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
675 r
= kvmppc_sanity_check(vcpu
);
680 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
681 struct kvm_mp_state
*mp_state
)
686 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
687 struct kvm_mp_state
*mp_state
)
692 long kvm_arch_vcpu_ioctl(struct file
*filp
,
693 unsigned int ioctl
, unsigned long arg
)
695 struct kvm_vcpu
*vcpu
= filp
->private_data
;
696 void __user
*argp
= (void __user
*)arg
;
700 case KVM_INTERRUPT
: {
701 struct kvm_interrupt irq
;
703 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
705 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
711 struct kvm_enable_cap cap
;
713 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
715 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
719 case KVM_SET_ONE_REG
:
720 case KVM_GET_ONE_REG
:
722 struct kvm_one_reg reg
;
724 if (copy_from_user(®
, argp
, sizeof(reg
)))
726 if (ioctl
== KVM_SET_ONE_REG
)
727 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
729 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
733 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
734 case KVM_DIRTY_TLB
: {
735 struct kvm_dirty_tlb dirty
;
737 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
739 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
751 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
753 return VM_FAULT_SIGBUS
;
756 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
758 u32 inst_nop
= 0x60000000;
759 #ifdef CONFIG_KVM_BOOKE_HV
760 u32 inst_sc1
= 0x44000022;
761 pvinfo
->hcall
[0] = inst_sc1
;
762 pvinfo
->hcall
[1] = inst_nop
;
763 pvinfo
->hcall
[2] = inst_nop
;
764 pvinfo
->hcall
[3] = inst_nop
;
766 u32 inst_lis
= 0x3c000000;
767 u32 inst_ori
= 0x60000000;
768 u32 inst_sc
= 0x44000002;
769 u32 inst_imm_mask
= 0xffff;
772 * The hypercall to get into KVM from within guest context is as
775 * lis r0, r0, KVM_SC_MAGIC_R0@h
776 * ori r0, KVM_SC_MAGIC_R0@l
780 pvinfo
->hcall
[0] = inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
);
781 pvinfo
->hcall
[1] = inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
);
782 pvinfo
->hcall
[2] = inst_sc
;
783 pvinfo
->hcall
[3] = inst_nop
;
786 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
791 long kvm_arch_vm_ioctl(struct file
*filp
,
792 unsigned int ioctl
, unsigned long arg
)
794 void __user
*argp
= (void __user
*)arg
;
798 case KVM_PPC_GET_PVINFO
: {
799 struct kvm_ppc_pvinfo pvinfo
;
800 memset(&pvinfo
, 0, sizeof(pvinfo
));
801 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
802 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
809 #ifdef CONFIG_PPC_BOOK3S_64
810 case KVM_CREATE_SPAPR_TCE
: {
811 struct kvm_create_spapr_tce create_tce
;
812 struct kvm
*kvm
= filp
->private_data
;
815 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
817 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce
);
820 #endif /* CONFIG_PPC_BOOK3S_64 */
822 #ifdef CONFIG_KVM_BOOK3S_64_HV
823 case KVM_ALLOCATE_RMA
: {
824 struct kvm
*kvm
= filp
->private_data
;
825 struct kvm_allocate_rma rma
;
827 r
= kvm_vm_ioctl_allocate_rma(kvm
, &rma
);
828 if (r
>= 0 && copy_to_user(argp
, &rma
, sizeof(rma
)))
833 case KVM_PPC_ALLOCATE_HTAB
: {
834 struct kvm
*kvm
= filp
->private_data
;
838 if (get_user(htab_order
, (u32 __user
*)argp
))
840 r
= kvmppc_alloc_reset_hpt(kvm
, &htab_order
);
844 if (put_user(htab_order
, (u32 __user
*)argp
))
849 #endif /* CONFIG_KVM_BOOK3S_64_HV */
851 #ifdef CONFIG_PPC_BOOK3S_64
852 case KVM_PPC_GET_SMMU_INFO
: {
853 struct kvm
*kvm
= filp
->private_data
;
854 struct kvm_ppc_smmu_info info
;
856 memset(&info
, 0, sizeof(info
));
857 r
= kvm_vm_ioctl_get_smmu_info(kvm
, &info
);
858 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
862 #endif /* CONFIG_PPC_BOOK3S_64 */
871 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
872 static unsigned long nr_lpids
;
874 long kvmppc_alloc_lpid(void)
879 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
880 if (lpid
>= nr_lpids
) {
881 pr_err("%s: No LPIDs free\n", __func__
);
884 } while (test_and_set_bit(lpid
, lpid_inuse
));
889 void kvmppc_claim_lpid(long lpid
)
891 set_bit(lpid
, lpid_inuse
);
894 void kvmppc_free_lpid(long lpid
)
896 clear_bit(lpid
, lpid_inuse
);
899 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
901 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
902 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
905 int kvm_arch_init(void *opaque
)
910 void kvm_arch_exit(void)