2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/lowcore.h>
30 #include <asm/pgtable.h>
32 #include <asm/switch_to.h>
37 #define CREATE_TRACE_POINTS
39 #include "trace-s390.h"
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43 struct kvm_stats_debugfs_item debugfs_entries
[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace
) },
45 { "exit_null", VCPU_STAT(exit_null
) },
46 { "exit_validity", VCPU_STAT(exit_validity
) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
48 { "exit_external_request", VCPU_STAT(exit_external_request
) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
50 { "exit_instruction", VCPU_STAT(exit_instruction
) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
53 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
) },
54 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
55 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
56 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
57 { "instruction_stctl", VCPU_STAT(instruction_stctl
) },
58 { "instruction_stctg", VCPU_STAT(instruction_stctg
) },
59 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
60 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
61 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
62 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
63 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
64 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
65 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
66 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
67 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
68 { "instruction_pfmf", VCPU_STAT(instruction_pfmf
) },
69 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
70 { "instruction_spx", VCPU_STAT(instruction_spx
) },
71 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
72 { "instruction_stap", VCPU_STAT(instruction_stap
) },
73 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
74 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock
) },
75 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
76 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
77 { "instruction_essa", VCPU_STAT(instruction_essa
) },
78 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
79 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
80 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
81 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
82 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
83 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
84 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
85 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency
) },
86 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start
) },
87 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
88 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status
) },
89 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status
) },
90 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
91 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
92 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
93 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset
) },
94 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset
) },
95 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown
) },
96 { "diagnose_10", VCPU_STAT(diagnose_10
) },
97 { "diagnose_44", VCPU_STAT(diagnose_44
) },
98 { "diagnose_9c", VCPU_STAT(diagnose_9c
) },
102 /* upper facilities limit for kvm */
103 unsigned long kvm_s390_fac_list_mask
[] = {
104 0xff82fffbf4fc2000UL
,
105 0x005c000000000000UL
,
108 unsigned long kvm_s390_fac_list_mask_size(void)
110 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask
) > S390_ARCH_FAC_MASK_SIZE_U64
);
111 return ARRAY_SIZE(kvm_s390_fac_list_mask
);
114 static struct gmap_notifier gmap_notifier
;
116 /* Section: not file related */
117 int kvm_arch_hardware_enable(void)
119 /* every s390 is virtualization enabled ;-) */
123 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
);
125 int kvm_arch_hardware_setup(void)
127 gmap_notifier
.notifier_call
= kvm_gmap_notifier
;
128 gmap_register_ipte_notifier(&gmap_notifier
);
132 void kvm_arch_hardware_unsetup(void)
134 gmap_unregister_ipte_notifier(&gmap_notifier
);
137 int kvm_arch_init(void *opaque
)
139 /* Register floating interrupt controller interface. */
140 return kvm_register_device_ops(&kvm_flic_ops
, KVM_DEV_TYPE_FLIC
);
143 /* Section: device related */
144 long kvm_arch_dev_ioctl(struct file
*filp
,
145 unsigned int ioctl
, unsigned long arg
)
147 if (ioctl
== KVM_S390_ENABLE_SIE
)
148 return s390_enable_sie();
152 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
157 case KVM_CAP_S390_PSW
:
158 case KVM_CAP_S390_GMAP
:
159 case KVM_CAP_SYNC_MMU
:
160 #ifdef CONFIG_KVM_S390_UCONTROL
161 case KVM_CAP_S390_UCONTROL
:
163 case KVM_CAP_ASYNC_PF
:
164 case KVM_CAP_SYNC_REGS
:
165 case KVM_CAP_ONE_REG
:
166 case KVM_CAP_ENABLE_CAP
:
167 case KVM_CAP_S390_CSS_SUPPORT
:
169 case KVM_CAP_IOEVENTFD
:
170 case KVM_CAP_DEVICE_CTRL
:
171 case KVM_CAP_ENABLE_CAP_VM
:
172 case KVM_CAP_S390_IRQCHIP
:
173 case KVM_CAP_VM_ATTRIBUTES
:
174 case KVM_CAP_MP_STATE
:
175 case KVM_CAP_S390_USER_SIGP
:
178 case KVM_CAP_NR_VCPUS
:
179 case KVM_CAP_MAX_VCPUS
:
182 case KVM_CAP_NR_MEMSLOTS
:
183 r
= KVM_USER_MEM_SLOTS
;
185 case KVM_CAP_S390_COW
:
186 r
= MACHINE_HAS_ESOP
;
194 static void kvm_s390_sync_dirty_log(struct kvm
*kvm
,
195 struct kvm_memory_slot
*memslot
)
197 gfn_t cur_gfn
, last_gfn
;
198 unsigned long address
;
199 struct gmap
*gmap
= kvm
->arch
.gmap
;
201 down_read(&gmap
->mm
->mmap_sem
);
202 /* Loop over all guest pages */
203 last_gfn
= memslot
->base_gfn
+ memslot
->npages
;
204 for (cur_gfn
= memslot
->base_gfn
; cur_gfn
<= last_gfn
; cur_gfn
++) {
205 address
= gfn_to_hva_memslot(memslot
, cur_gfn
);
207 if (gmap_test_and_clear_dirty(address
, gmap
))
208 mark_page_dirty(kvm
, cur_gfn
);
210 up_read(&gmap
->mm
->mmap_sem
);
213 /* Section: vm related */
215 * Get (and clear) the dirty memory log for a memory slot.
217 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
218 struct kvm_dirty_log
*log
)
222 struct kvm_memory_slot
*memslot
;
225 mutex_lock(&kvm
->slots_lock
);
228 if (log
->slot
>= KVM_USER_MEM_SLOTS
)
231 memslot
= id_to_memslot(kvm
->memslots
, log
->slot
);
233 if (!memslot
->dirty_bitmap
)
236 kvm_s390_sync_dirty_log(kvm
, memslot
);
237 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
241 /* Clear the dirty log */
243 n
= kvm_dirty_bitmap_bytes(memslot
);
244 memset(memslot
->dirty_bitmap
, 0, n
);
248 mutex_unlock(&kvm
->slots_lock
);
252 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
, struct kvm_enable_cap
*cap
)
260 case KVM_CAP_S390_IRQCHIP
:
261 kvm
->arch
.use_irqchip
= 1;
264 case KVM_CAP_S390_USER_SIGP
:
265 kvm
->arch
.user_sigp
= 1;
275 static int kvm_s390_get_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
279 switch (attr
->attr
) {
280 case KVM_S390_VM_MEM_LIMIT_SIZE
:
282 if (put_user(kvm
->arch
.gmap
->asce_end
, (u64 __user
*)attr
->addr
))
292 static int kvm_s390_set_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
296 switch (attr
->attr
) {
297 case KVM_S390_VM_MEM_ENABLE_CMMA
:
299 mutex_lock(&kvm
->lock
);
300 if (atomic_read(&kvm
->online_vcpus
) == 0) {
301 kvm
->arch
.use_cmma
= 1;
304 mutex_unlock(&kvm
->lock
);
306 case KVM_S390_VM_MEM_CLR_CMMA
:
307 mutex_lock(&kvm
->lock
);
308 idx
= srcu_read_lock(&kvm
->srcu
);
309 s390_reset_cmma(kvm
->arch
.gmap
->mm
);
310 srcu_read_unlock(&kvm
->srcu
, idx
);
311 mutex_unlock(&kvm
->lock
);
314 case KVM_S390_VM_MEM_LIMIT_SIZE
: {
315 unsigned long new_limit
;
317 if (kvm_is_ucontrol(kvm
))
320 if (get_user(new_limit
, (u64 __user
*)attr
->addr
))
323 if (new_limit
> kvm
->arch
.gmap
->asce_end
)
327 mutex_lock(&kvm
->lock
);
328 if (atomic_read(&kvm
->online_vcpus
) == 0) {
329 /* gmap_alloc will round the limit up */
330 struct gmap
*new = gmap_alloc(current
->mm
, new_limit
);
335 gmap_free(kvm
->arch
.gmap
);
337 kvm
->arch
.gmap
= new;
341 mutex_unlock(&kvm
->lock
);
351 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
);
353 static int kvm_s390_vm_set_crypto(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
355 struct kvm_vcpu
*vcpu
;
358 if (!test_kvm_facility(kvm
, 76))
361 mutex_lock(&kvm
->lock
);
362 switch (attr
->attr
) {
363 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
365 kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
366 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
367 kvm
->arch
.crypto
.aes_kw
= 1;
369 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
371 kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
372 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
373 kvm
->arch
.crypto
.dea_kw
= 1;
375 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
376 kvm
->arch
.crypto
.aes_kw
= 0;
377 memset(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
, 0,
378 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
380 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
381 kvm
->arch
.crypto
.dea_kw
= 0;
382 memset(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
, 0,
383 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
386 mutex_unlock(&kvm
->lock
);
390 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
391 kvm_s390_vcpu_crypto_setup(vcpu
);
394 mutex_unlock(&kvm
->lock
);
398 static int kvm_s390_set_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
402 if (copy_from_user(>od_high
, (void __user
*)attr
->addr
,
412 static int kvm_s390_set_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
414 struct kvm_vcpu
*cur_vcpu
;
415 unsigned int vcpu_idx
;
419 if (copy_from_user(>od
, (void __user
*)attr
->addr
, sizeof(gtod
)))
422 r
= store_tod_clock(&host_tod
);
426 mutex_lock(&kvm
->lock
);
427 kvm
->arch
.epoch
= gtod
- host_tod
;
428 kvm_for_each_vcpu(vcpu_idx
, cur_vcpu
, kvm
) {
429 cur_vcpu
->arch
.sie_block
->epoch
= kvm
->arch
.epoch
;
432 mutex_unlock(&kvm
->lock
);
436 static int kvm_s390_set_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
443 switch (attr
->attr
) {
444 case KVM_S390_VM_TOD_HIGH
:
445 ret
= kvm_s390_set_tod_high(kvm
, attr
);
447 case KVM_S390_VM_TOD_LOW
:
448 ret
= kvm_s390_set_tod_low(kvm
, attr
);
457 static int kvm_s390_get_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
461 if (copy_to_user((void __user
*)attr
->addr
, >od_high
,
468 static int kvm_s390_get_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
473 r
= store_tod_clock(&host_tod
);
477 gtod
= host_tod
+ kvm
->arch
.epoch
;
478 if (copy_to_user((void __user
*)attr
->addr
, >od
, sizeof(gtod
)))
484 static int kvm_s390_get_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
491 switch (attr
->attr
) {
492 case KVM_S390_VM_TOD_HIGH
:
493 ret
= kvm_s390_get_tod_high(kvm
, attr
);
495 case KVM_S390_VM_TOD_LOW
:
496 ret
= kvm_s390_get_tod_low(kvm
, attr
);
505 static int kvm_s390_set_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
507 struct kvm_s390_vm_cpu_processor
*proc
;
510 mutex_lock(&kvm
->lock
);
511 if (atomic_read(&kvm
->online_vcpus
)) {
515 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
520 if (!copy_from_user(proc
, (void __user
*)attr
->addr
,
522 memcpy(&kvm
->arch
.model
.cpu_id
, &proc
->cpuid
,
523 sizeof(struct cpuid
));
524 kvm
->arch
.model
.ibc
= proc
->ibc
;
525 memcpy(kvm
->arch
.model
.fac
->list
, proc
->fac_list
,
526 S390_ARCH_FAC_LIST_SIZE_BYTE
);
531 mutex_unlock(&kvm
->lock
);
535 static int kvm_s390_set_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
539 switch (attr
->attr
) {
540 case KVM_S390_VM_CPU_PROCESSOR
:
541 ret
= kvm_s390_set_processor(kvm
, attr
);
547 static int kvm_s390_get_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
549 struct kvm_s390_vm_cpu_processor
*proc
;
552 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
557 memcpy(&proc
->cpuid
, &kvm
->arch
.model
.cpu_id
, sizeof(struct cpuid
));
558 proc
->ibc
= kvm
->arch
.model
.ibc
;
559 memcpy(&proc
->fac_list
, kvm
->arch
.model
.fac
->list
, S390_ARCH_FAC_LIST_SIZE_BYTE
);
560 if (copy_to_user((void __user
*)attr
->addr
, proc
, sizeof(*proc
)))
567 static int kvm_s390_get_machine(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
569 struct kvm_s390_vm_cpu_machine
*mach
;
572 mach
= kzalloc(sizeof(*mach
), GFP_KERNEL
);
577 get_cpu_id((struct cpuid
*) &mach
->cpuid
);
578 mach
->ibc
= sclp_get_ibc();
579 memcpy(&mach
->fac_mask
, kvm
->arch
.model
.fac
->mask
,
580 S390_ARCH_FAC_LIST_SIZE_BYTE
);
581 memcpy((unsigned long *)&mach
->fac_list
, S390_lowcore
.stfle_fac_list
,
582 S390_ARCH_FAC_LIST_SIZE_BYTE
);
583 if (copy_to_user((void __user
*)attr
->addr
, mach
, sizeof(*mach
)))
590 static int kvm_s390_get_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
594 switch (attr
->attr
) {
595 case KVM_S390_VM_CPU_PROCESSOR
:
596 ret
= kvm_s390_get_processor(kvm
, attr
);
598 case KVM_S390_VM_CPU_MACHINE
:
599 ret
= kvm_s390_get_machine(kvm
, attr
);
605 static int kvm_s390_vm_set_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
609 switch (attr
->group
) {
610 case KVM_S390_VM_MEM_CTRL
:
611 ret
= kvm_s390_set_mem_control(kvm
, attr
);
613 case KVM_S390_VM_TOD
:
614 ret
= kvm_s390_set_tod(kvm
, attr
);
616 case KVM_S390_VM_CPU_MODEL
:
617 ret
= kvm_s390_set_cpu_model(kvm
, attr
);
619 case KVM_S390_VM_CRYPTO
:
620 ret
= kvm_s390_vm_set_crypto(kvm
, attr
);
630 static int kvm_s390_vm_get_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
634 switch (attr
->group
) {
635 case KVM_S390_VM_MEM_CTRL
:
636 ret
= kvm_s390_get_mem_control(kvm
, attr
);
638 case KVM_S390_VM_TOD
:
639 ret
= kvm_s390_get_tod(kvm
, attr
);
641 case KVM_S390_VM_CPU_MODEL
:
642 ret
= kvm_s390_get_cpu_model(kvm
, attr
);
652 static int kvm_s390_vm_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
656 switch (attr
->group
) {
657 case KVM_S390_VM_MEM_CTRL
:
658 switch (attr
->attr
) {
659 case KVM_S390_VM_MEM_ENABLE_CMMA
:
660 case KVM_S390_VM_MEM_CLR_CMMA
:
661 case KVM_S390_VM_MEM_LIMIT_SIZE
:
669 case KVM_S390_VM_TOD
:
670 switch (attr
->attr
) {
671 case KVM_S390_VM_TOD_LOW
:
672 case KVM_S390_VM_TOD_HIGH
:
680 case KVM_S390_VM_CPU_MODEL
:
681 switch (attr
->attr
) {
682 case KVM_S390_VM_CPU_PROCESSOR
:
683 case KVM_S390_VM_CPU_MACHINE
:
691 case KVM_S390_VM_CRYPTO
:
692 switch (attr
->attr
) {
693 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
694 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
695 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
696 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
712 long kvm_arch_vm_ioctl(struct file
*filp
,
713 unsigned int ioctl
, unsigned long arg
)
715 struct kvm
*kvm
= filp
->private_data
;
716 void __user
*argp
= (void __user
*)arg
;
717 struct kvm_device_attr attr
;
721 case KVM_S390_INTERRUPT
: {
722 struct kvm_s390_interrupt s390int
;
725 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
727 r
= kvm_s390_inject_vm(kvm
, &s390int
);
730 case KVM_ENABLE_CAP
: {
731 struct kvm_enable_cap cap
;
733 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
735 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
738 case KVM_CREATE_IRQCHIP
: {
739 struct kvm_irq_routing_entry routing
;
742 if (kvm
->arch
.use_irqchip
) {
743 /* Set up dummy routing. */
744 memset(&routing
, 0, sizeof(routing
));
745 kvm_set_irq_routing(kvm
, &routing
, 0, 0);
750 case KVM_SET_DEVICE_ATTR
: {
752 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
754 r
= kvm_s390_vm_set_attr(kvm
, &attr
);
757 case KVM_GET_DEVICE_ATTR
: {
759 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
761 r
= kvm_s390_vm_get_attr(kvm
, &attr
);
764 case KVM_HAS_DEVICE_ATTR
: {
766 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
768 r
= kvm_s390_vm_has_attr(kvm
, &attr
);
778 static int kvm_s390_query_ap_config(u8
*config
)
780 u32 fcn_code
= 0x04000000UL
;
783 memset(config
, 0, 128);
787 ".long 0xb2af0000\n" /* PQAP(QCI) */
793 : "r" (fcn_code
), "r" (config
)
794 : "cc", "0", "2", "memory"
800 static int kvm_s390_apxa_installed(void)
805 if (test_facility(2) && test_facility(12)) {
806 cc
= kvm_s390_query_ap_config(config
);
809 pr_err("PQAP(QCI) failed with cc=%d", cc
);
811 return config
[0] & 0x40;
817 static void kvm_s390_set_crycb_format(struct kvm
*kvm
)
819 kvm
->arch
.crypto
.crycbd
= (__u32
)(unsigned long) kvm
->arch
.crypto
.crycb
;
821 if (kvm_s390_apxa_installed())
822 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT2
;
824 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT1
;
827 static void kvm_s390_get_cpu_id(struct cpuid
*cpu_id
)
830 cpu_id
->version
= 0xff;
833 static int kvm_s390_crypto_init(struct kvm
*kvm
)
835 if (!test_kvm_facility(kvm
, 76))
838 kvm
->arch
.crypto
.crycb
= kzalloc(sizeof(*kvm
->arch
.crypto
.crycb
),
839 GFP_KERNEL
| GFP_DMA
);
840 if (!kvm
->arch
.crypto
.crycb
)
843 kvm_s390_set_crycb_format(kvm
);
845 /* Enable AES/DEA protected key functions by default */
846 kvm
->arch
.crypto
.aes_kw
= 1;
847 kvm
->arch
.crypto
.dea_kw
= 1;
848 get_random_bytes(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
849 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
850 get_random_bytes(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
851 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
856 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
860 static unsigned long sca_offset
;
863 #ifdef CONFIG_KVM_S390_UCONTROL
864 if (type
& ~KVM_VM_S390_UCONTROL
)
866 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
873 rc
= s390_enable_sie();
879 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
882 spin_lock(&kvm_lock
);
883 sca_offset
= (sca_offset
+ 16) & 0x7f0;
884 kvm
->arch
.sca
= (struct sca_block
*) ((char *) kvm
->arch
.sca
+ sca_offset
);
885 spin_unlock(&kvm_lock
);
887 sprintf(debug_name
, "kvm-%u", current
->pid
);
889 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
894 * The architectural maximum amount of facilities is 16 kbit. To store
895 * this amount, 2 kbyte of memory is required. Thus we need a full
896 * page to hold the guest facility list (arch.model.fac->list) and the
897 * facility mask (arch.model.fac->mask). Its address size has to be
898 * 31 bits and word aligned.
900 kvm
->arch
.model
.fac
=
901 (struct kvm_s390_fac
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
902 if (!kvm
->arch
.model
.fac
)
905 memcpy(kvm
->arch
.model
.fac
->mask
, S390_lowcore
.stfle_fac_list
,
906 S390_ARCH_FAC_LIST_SIZE_BYTE
);
909 * If this KVM host runs *not* in a LPAR, relax the facility bits
910 * of the kvm facility mask by all missing facilities. This will allow
911 * to determine the right CPU model by means of the remaining facilities.
912 * Live guest migration must prohibit the migration of KVMs running in
913 * a LPAR to non LPAR hosts.
915 if (!MACHINE_IS_LPAR
)
916 for (i
= 0; i
< kvm_s390_fac_list_mask_size(); i
++)
917 kvm_s390_fac_list_mask
[i
] &= kvm
->arch
.model
.fac
->mask
[i
];
920 * Apply the kvm facility mask to limit the kvm supported/tolerated
923 for (i
= 0; i
< S390_ARCH_FAC_LIST_SIZE_U64
; i
++) {
924 if (i
< kvm_s390_fac_list_mask_size())
925 kvm
->arch
.model
.fac
->mask
[i
] &= kvm_s390_fac_list_mask
[i
];
927 kvm
->arch
.model
.fac
->mask
[i
] = 0UL;
930 /* Populate the facility list initially. */
931 memcpy(kvm
->arch
.model
.fac
->list
, kvm
->arch
.model
.fac
->mask
,
932 S390_ARCH_FAC_LIST_SIZE_BYTE
);
934 kvm_s390_get_cpu_id(&kvm
->arch
.model
.cpu_id
);
935 kvm
->arch
.model
.ibc
= sclp_get_ibc() & 0x0fff;
937 if (kvm_s390_crypto_init(kvm
) < 0)
940 spin_lock_init(&kvm
->arch
.float_int
.lock
);
941 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
942 init_waitqueue_head(&kvm
->arch
.ipte_wq
);
943 mutex_init(&kvm
->arch
.ipte_mutex
);
945 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
946 VM_EVENT(kvm
, 3, "%s", "vm created");
948 if (type
& KVM_VM_S390_UCONTROL
) {
949 kvm
->arch
.gmap
= NULL
;
951 kvm
->arch
.gmap
= gmap_alloc(current
->mm
, (1UL << 44) - 1);
954 kvm
->arch
.gmap
->private = kvm
;
955 kvm
->arch
.gmap
->pfault_enabled
= 0;
958 kvm
->arch
.css_support
= 0;
959 kvm
->arch
.use_irqchip
= 0;
962 spin_lock_init(&kvm
->arch
.start_stop_lock
);
966 kfree(kvm
->arch
.crypto
.crycb
);
968 free_page((unsigned long)kvm
->arch
.model
.fac
);
970 debug_unregister(kvm
->arch
.dbf
);
972 free_page((unsigned long)(kvm
->arch
.sca
));
977 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
979 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
980 trace_kvm_s390_destroy_vcpu(vcpu
->vcpu_id
);
981 kvm_s390_clear_local_irqs(vcpu
);
982 kvm_clear_async_pf_completion_queue(vcpu
);
983 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
984 clear_bit(63 - vcpu
->vcpu_id
,
985 (unsigned long *) &vcpu
->kvm
->arch
.sca
->mcn
);
986 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
987 (__u64
) vcpu
->arch
.sie_block
)
988 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
992 if (kvm_is_ucontrol(vcpu
->kvm
))
993 gmap_free(vcpu
->arch
.gmap
);
995 if (kvm_s390_cmma_enabled(vcpu
->kvm
))
996 kvm_s390_vcpu_unsetup_cmma(vcpu
);
997 free_page((unsigned long)(vcpu
->arch
.sie_block
));
999 kvm_vcpu_uninit(vcpu
);
1000 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1003 static void kvm_free_vcpus(struct kvm
*kvm
)
1006 struct kvm_vcpu
*vcpu
;
1008 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1009 kvm_arch_vcpu_destroy(vcpu
);
1011 mutex_lock(&kvm
->lock
);
1012 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
1013 kvm
->vcpus
[i
] = NULL
;
1015 atomic_set(&kvm
->online_vcpus
, 0);
1016 mutex_unlock(&kvm
->lock
);
1019 void kvm_arch_destroy_vm(struct kvm
*kvm
)
1021 kvm_free_vcpus(kvm
);
1022 free_page((unsigned long)kvm
->arch
.model
.fac
);
1023 free_page((unsigned long)(kvm
->arch
.sca
));
1024 debug_unregister(kvm
->arch
.dbf
);
1025 kfree(kvm
->arch
.crypto
.crycb
);
1026 if (!kvm_is_ucontrol(kvm
))
1027 gmap_free(kvm
->arch
.gmap
);
1028 kvm_s390_destroy_adapters(kvm
);
1029 kvm_s390_clear_float_irqs(kvm
);
1032 /* Section: vcpu related */
1033 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu
*vcpu
)
1035 vcpu
->arch
.gmap
= gmap_alloc(current
->mm
, -1UL);
1036 if (!vcpu
->arch
.gmap
)
1038 vcpu
->arch
.gmap
->private = vcpu
->kvm
;
1043 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1045 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1046 kvm_clear_async_pf_completion_queue(vcpu
);
1047 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
1054 if (kvm_is_ucontrol(vcpu
->kvm
))
1055 return __kvm_ucontrol_vcpu_init(vcpu
);
1060 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1062 save_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
1063 save_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
1064 save_access_regs(vcpu
->arch
.host_acrs
);
1065 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1066 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1067 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1068 gmap_enable(vcpu
->arch
.gmap
);
1069 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1072 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1074 atomic_clear_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1075 gmap_disable(vcpu
->arch
.gmap
);
1076 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1077 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1078 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1079 restore_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
1080 restore_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
1081 restore_access_regs(vcpu
->arch
.host_acrs
);
1084 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
1086 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1087 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
1088 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
1089 kvm_s390_set_prefix(vcpu
, 0);
1090 vcpu
->arch
.sie_block
->cputm
= 0UL;
1091 vcpu
->arch
.sie_block
->ckc
= 0UL;
1092 vcpu
->arch
.sie_block
->todpr
= 0;
1093 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
1094 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
1095 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
1096 vcpu
->arch
.guest_fpregs
.fpc
= 0;
1097 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
1098 vcpu
->arch
.sie_block
->gbea
= 1;
1099 vcpu
->arch
.sie_block
->pp
= 0;
1100 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1101 kvm_clear_async_pf_completion_queue(vcpu
);
1102 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
))
1103 kvm_s390_vcpu_stop(vcpu
);
1104 kvm_s390_clear_local_irqs(vcpu
);
1107 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1109 mutex_lock(&vcpu
->kvm
->lock
);
1110 vcpu
->arch
.sie_block
->epoch
= vcpu
->kvm
->arch
.epoch
;
1111 mutex_unlock(&vcpu
->kvm
->lock
);
1112 if (!kvm_is_ucontrol(vcpu
->kvm
))
1113 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
1116 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
)
1118 if (!test_kvm_facility(vcpu
->kvm
, 76))
1121 vcpu
->arch
.sie_block
->ecb3
&= ~(ECB3_AES
| ECB3_DEA
);
1123 if (vcpu
->kvm
->arch
.crypto
.aes_kw
)
1124 vcpu
->arch
.sie_block
->ecb3
|= ECB3_AES
;
1125 if (vcpu
->kvm
->arch
.crypto
.dea_kw
)
1126 vcpu
->arch
.sie_block
->ecb3
|= ECB3_DEA
;
1128 vcpu
->arch
.sie_block
->crycbd
= vcpu
->kvm
->arch
.crypto
.crycbd
;
1131 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu
*vcpu
)
1133 free_page(vcpu
->arch
.sie_block
->cbrlo
);
1134 vcpu
->arch
.sie_block
->cbrlo
= 0;
1137 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu
*vcpu
)
1139 vcpu
->arch
.sie_block
->cbrlo
= get_zeroed_page(GFP_KERNEL
);
1140 if (!vcpu
->arch
.sie_block
->cbrlo
)
1143 vcpu
->arch
.sie_block
->ecb2
|= 0x80;
1144 vcpu
->arch
.sie_block
->ecb2
&= ~0x08;
1148 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1152 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
1156 vcpu
->arch
.sie_block
->ecb
= 6;
1157 if (test_kvm_facility(vcpu
->kvm
, 50) && test_kvm_facility(vcpu
->kvm
, 73))
1158 vcpu
->arch
.sie_block
->ecb
|= 0x10;
1160 vcpu
->arch
.sie_block
->ecb2
= 8;
1161 vcpu
->arch
.sie_block
->eca
= 0xC1002000U
;
1162 if (sclp_has_siif())
1163 vcpu
->arch
.sie_block
->eca
|= 1;
1164 if (sclp_has_sigpif())
1165 vcpu
->arch
.sie_block
->eca
|= 0x10000000U
;
1166 vcpu
->arch
.sie_block
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
|
1169 if (kvm_s390_cmma_enabled(vcpu
->kvm
)) {
1170 rc
= kvm_s390_vcpu_setup_cmma(vcpu
);
1174 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1175 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
1177 mutex_lock(&vcpu
->kvm
->lock
);
1178 vcpu
->arch
.cpu_id
= vcpu
->kvm
->arch
.model
.cpu_id
;
1179 vcpu
->arch
.sie_block
->ibc
= vcpu
->kvm
->arch
.model
.ibc
;
1180 mutex_unlock(&vcpu
->kvm
->lock
);
1182 kvm_s390_vcpu_crypto_setup(vcpu
);
1187 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
1190 struct kvm_vcpu
*vcpu
;
1191 struct sie_page
*sie_page
;
1194 if (id
>= KVM_MAX_VCPUS
)
1199 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
1203 sie_page
= (struct sie_page
*) get_zeroed_page(GFP_KERNEL
);
1207 vcpu
->arch
.sie_block
= &sie_page
->sie_block
;
1208 vcpu
->arch
.sie_block
->itdba
= (unsigned long) &sie_page
->itdb
;
1210 vcpu
->arch
.sie_block
->icpua
= id
;
1211 if (!kvm_is_ucontrol(kvm
)) {
1212 if (!kvm
->arch
.sca
) {
1216 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
1217 kvm
->arch
.sca
->cpu
[id
].sda
=
1218 (__u64
) vcpu
->arch
.sie_block
;
1219 vcpu
->arch
.sie_block
->scaoh
=
1220 (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
1221 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
1222 set_bit(63 - id
, (unsigned long *) &kvm
->arch
.sca
->mcn
);
1224 vcpu
->arch
.sie_block
->fac
= (int) (long) kvm
->arch
.model
.fac
->list
;
1226 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
1227 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
1228 vcpu
->arch
.local_int
.wq
= &vcpu
->wq
;
1229 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
1231 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
1233 goto out_free_sie_block
;
1234 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
1235 vcpu
->arch
.sie_block
);
1236 trace_kvm_s390_create_vcpu(id
, vcpu
, vcpu
->arch
.sie_block
);
1240 free_page((unsigned long)(vcpu
->arch
.sie_block
));
1242 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1247 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
1249 return kvm_s390_vcpu_has_irq(vcpu
, 0);
1252 void s390_vcpu_block(struct kvm_vcpu
*vcpu
)
1254 atomic_set_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1257 void s390_vcpu_unblock(struct kvm_vcpu
*vcpu
)
1259 atomic_clear_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1263 * Kick a guest cpu out of SIE and wait until SIE is not running.
1264 * If the CPU is not running (e.g. waiting as idle) the function will
1265 * return immediately. */
1266 void exit_sie(struct kvm_vcpu
*vcpu
)
1268 atomic_set_mask(CPUSTAT_STOP_INT
, &vcpu
->arch
.sie_block
->cpuflags
);
1269 while (vcpu
->arch
.sie_block
->prog0c
& PROG_IN_SIE
)
1273 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
1274 void exit_sie_sync(struct kvm_vcpu
*vcpu
)
1276 s390_vcpu_block(vcpu
);
1280 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
)
1283 struct kvm
*kvm
= gmap
->private;
1284 struct kvm_vcpu
*vcpu
;
1286 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1287 /* match against both prefix pages */
1288 if (kvm_s390_get_prefix(vcpu
) == (address
& ~0x1000UL
)) {
1289 VCPU_EVENT(vcpu
, 2, "gmap notifier for %lx", address
);
1290 kvm_make_request(KVM_REQ_MMU_RELOAD
, vcpu
);
1291 exit_sie_sync(vcpu
);
1296 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
1298 /* kvm common code refers to this, but never calls it */
1303 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
1304 struct kvm_one_reg
*reg
)
1309 case KVM_REG_S390_TODPR
:
1310 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
1311 (u32 __user
*)reg
->addr
);
1313 case KVM_REG_S390_EPOCHDIFF
:
1314 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
1315 (u64 __user
*)reg
->addr
);
1317 case KVM_REG_S390_CPU_TIMER
:
1318 r
= put_user(vcpu
->arch
.sie_block
->cputm
,
1319 (u64 __user
*)reg
->addr
);
1321 case KVM_REG_S390_CLOCK_COMP
:
1322 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
1323 (u64 __user
*)reg
->addr
);
1325 case KVM_REG_S390_PFTOKEN
:
1326 r
= put_user(vcpu
->arch
.pfault_token
,
1327 (u64 __user
*)reg
->addr
);
1329 case KVM_REG_S390_PFCOMPARE
:
1330 r
= put_user(vcpu
->arch
.pfault_compare
,
1331 (u64 __user
*)reg
->addr
);
1333 case KVM_REG_S390_PFSELECT
:
1334 r
= put_user(vcpu
->arch
.pfault_select
,
1335 (u64 __user
*)reg
->addr
);
1337 case KVM_REG_S390_PP
:
1338 r
= put_user(vcpu
->arch
.sie_block
->pp
,
1339 (u64 __user
*)reg
->addr
);
1341 case KVM_REG_S390_GBEA
:
1342 r
= put_user(vcpu
->arch
.sie_block
->gbea
,
1343 (u64 __user
*)reg
->addr
);
1352 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
1353 struct kvm_one_reg
*reg
)
1358 case KVM_REG_S390_TODPR
:
1359 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
1360 (u32 __user
*)reg
->addr
);
1362 case KVM_REG_S390_EPOCHDIFF
:
1363 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
1364 (u64 __user
*)reg
->addr
);
1366 case KVM_REG_S390_CPU_TIMER
:
1367 r
= get_user(vcpu
->arch
.sie_block
->cputm
,
1368 (u64 __user
*)reg
->addr
);
1370 case KVM_REG_S390_CLOCK_COMP
:
1371 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
1372 (u64 __user
*)reg
->addr
);
1374 case KVM_REG_S390_PFTOKEN
:
1375 r
= get_user(vcpu
->arch
.pfault_token
,
1376 (u64 __user
*)reg
->addr
);
1377 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1378 kvm_clear_async_pf_completion_queue(vcpu
);
1380 case KVM_REG_S390_PFCOMPARE
:
1381 r
= get_user(vcpu
->arch
.pfault_compare
,
1382 (u64 __user
*)reg
->addr
);
1384 case KVM_REG_S390_PFSELECT
:
1385 r
= get_user(vcpu
->arch
.pfault_select
,
1386 (u64 __user
*)reg
->addr
);
1388 case KVM_REG_S390_PP
:
1389 r
= get_user(vcpu
->arch
.sie_block
->pp
,
1390 (u64 __user
*)reg
->addr
);
1392 case KVM_REG_S390_GBEA
:
1393 r
= get_user(vcpu
->arch
.sie_block
->gbea
,
1394 (u64 __user
*)reg
->addr
);
1403 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
1405 kvm_s390_vcpu_initial_reset(vcpu
);
1409 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1411 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
1415 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1417 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
1421 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1422 struct kvm_sregs
*sregs
)
1424 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
1425 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
1426 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1430 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1431 struct kvm_sregs
*sregs
)
1433 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
1434 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
1438 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1440 if (test_fp_ctl(fpu
->fpc
))
1442 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
1443 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
;
1444 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1445 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1449 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1451 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
1452 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
1456 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
1460 if (!is_vcpu_stopped(vcpu
))
1463 vcpu
->run
->psw_mask
= psw
.mask
;
1464 vcpu
->run
->psw_addr
= psw
.addr
;
1469 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1470 struct kvm_translation
*tr
)
1472 return -EINVAL
; /* not implemented yet */
1475 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1476 KVM_GUESTDBG_USE_HW_BP | \
1477 KVM_GUESTDBG_ENABLE)
1479 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
1480 struct kvm_guest_debug
*dbg
)
1484 vcpu
->guest_debug
= 0;
1485 kvm_s390_clear_bp_data(vcpu
);
1487 if (dbg
->control
& ~VALID_GUESTDBG_FLAGS
)
1490 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
1491 vcpu
->guest_debug
= dbg
->control
;
1492 /* enforce guest PER */
1493 atomic_set_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1495 if (dbg
->control
& KVM_GUESTDBG_USE_HW_BP
)
1496 rc
= kvm_s390_import_bp_data(vcpu
, dbg
);
1498 atomic_clear_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1499 vcpu
->arch
.guestdbg
.last_bp
= 0;
1503 vcpu
->guest_debug
= 0;
1504 kvm_s390_clear_bp_data(vcpu
);
1505 atomic_clear_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1511 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1512 struct kvm_mp_state
*mp_state
)
1514 /* CHECK_STOP and LOAD are not supported yet */
1515 return is_vcpu_stopped(vcpu
) ? KVM_MP_STATE_STOPPED
:
1516 KVM_MP_STATE_OPERATING
;
1519 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1520 struct kvm_mp_state
*mp_state
)
1524 /* user space knows about this interface - let it control the state */
1525 vcpu
->kvm
->arch
.user_cpu_state_ctrl
= 1;
1527 switch (mp_state
->mp_state
) {
1528 case KVM_MP_STATE_STOPPED
:
1529 kvm_s390_vcpu_stop(vcpu
);
1531 case KVM_MP_STATE_OPERATING
:
1532 kvm_s390_vcpu_start(vcpu
);
1534 case KVM_MP_STATE_LOAD
:
1535 case KVM_MP_STATE_CHECK_STOP
:
1536 /* fall through - CHECK_STOP and LOAD are not supported yet */
1544 bool kvm_s390_cmma_enabled(struct kvm
*kvm
)
1546 if (!MACHINE_IS_LPAR
)
1548 /* only enable for z10 and later */
1549 if (!MACHINE_HAS_EDAT1
)
1551 if (!kvm
->arch
.use_cmma
)
1556 static bool ibs_enabled(struct kvm_vcpu
*vcpu
)
1558 return atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_IBS
;
1561 static int kvm_s390_handle_requests(struct kvm_vcpu
*vcpu
)
1564 s390_vcpu_unblock(vcpu
);
1566 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1567 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1568 * This ensures that the ipte instruction for this request has
1569 * already finished. We might race against a second unmapper that
1570 * wants to set the blocking bit. Lets just retry the request loop.
1572 if (kvm_check_request(KVM_REQ_MMU_RELOAD
, vcpu
)) {
1574 rc
= gmap_ipte_notify(vcpu
->arch
.gmap
,
1575 kvm_s390_get_prefix(vcpu
),
1582 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
1583 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
1587 if (kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
)) {
1588 if (!ibs_enabled(vcpu
)) {
1589 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 1);
1590 atomic_set_mask(CPUSTAT_IBS
,
1591 &vcpu
->arch
.sie_block
->cpuflags
);
1596 if (kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
)) {
1597 if (ibs_enabled(vcpu
)) {
1598 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 0);
1599 atomic_clear_mask(CPUSTAT_IBS
,
1600 &vcpu
->arch
.sie_block
->cpuflags
);
1605 /* nothing to do, just clear the request */
1606 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
1612 * kvm_arch_fault_in_page - fault-in guest page if necessary
1613 * @vcpu: The corresponding virtual cpu
1614 * @gpa: Guest physical address
1615 * @writable: Whether the page should be writable or not
1617 * Make sure that a guest page has been faulted-in on the host.
1619 * Return: Zero on success, negative error code otherwise.
1621 long kvm_arch_fault_in_page(struct kvm_vcpu
*vcpu
, gpa_t gpa
, int writable
)
1623 return gmap_fault(vcpu
->arch
.gmap
, gpa
,
1624 writable
? FAULT_FLAG_WRITE
: 0);
1627 static void __kvm_inject_pfault_token(struct kvm_vcpu
*vcpu
, bool start_token
,
1628 unsigned long token
)
1630 struct kvm_s390_interrupt inti
;
1631 struct kvm_s390_irq irq
;
1634 irq
.u
.ext
.ext_params2
= token
;
1635 irq
.type
= KVM_S390_INT_PFAULT_INIT
;
1636 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu
, &irq
));
1638 inti
.type
= KVM_S390_INT_PFAULT_DONE
;
1639 inti
.parm64
= token
;
1640 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu
->kvm
, &inti
));
1644 void kvm_arch_async_page_not_present(struct kvm_vcpu
*vcpu
,
1645 struct kvm_async_pf
*work
)
1647 trace_kvm_s390_pfault_init(vcpu
, work
->arch
.pfault_token
);
1648 __kvm_inject_pfault_token(vcpu
, true, work
->arch
.pfault_token
);
1651 void kvm_arch_async_page_present(struct kvm_vcpu
*vcpu
,
1652 struct kvm_async_pf
*work
)
1654 trace_kvm_s390_pfault_done(vcpu
, work
->arch
.pfault_token
);
1655 __kvm_inject_pfault_token(vcpu
, false, work
->arch
.pfault_token
);
1658 void kvm_arch_async_page_ready(struct kvm_vcpu
*vcpu
,
1659 struct kvm_async_pf
*work
)
1661 /* s390 will always inject the page directly */
1664 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu
*vcpu
)
1667 * s390 will always inject the page directly,
1668 * but we still want check_async_completion to cleanup
1673 static int kvm_arch_setup_async_pf(struct kvm_vcpu
*vcpu
)
1676 struct kvm_arch_async_pf arch
;
1679 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1681 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& vcpu
->arch
.pfault_select
) !=
1682 vcpu
->arch
.pfault_compare
)
1684 if (psw_extint_disabled(vcpu
))
1686 if (kvm_s390_vcpu_has_irq(vcpu
, 0))
1688 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
1690 if (!vcpu
->arch
.gmap
->pfault_enabled
)
1693 hva
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(current
->thread
.gmap_addr
));
1694 hva
+= current
->thread
.gmap_addr
& ~PAGE_MASK
;
1695 if (read_guest_real(vcpu
, vcpu
->arch
.pfault_token
, &arch
.pfault_token
, 8))
1698 rc
= kvm_setup_async_pf(vcpu
, current
->thread
.gmap_addr
, hva
, &arch
);
1702 static int vcpu_pre_run(struct kvm_vcpu
*vcpu
)
1707 * On s390 notifications for arriving pages will be delivered directly
1708 * to the guest but the house keeping for completed pfaults is
1709 * handled outside the worker.
1711 kvm_check_async_pf_completion(vcpu
);
1713 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->run
->s
.regs
.gprs
[14], 16);
1718 if (test_cpu_flag(CIF_MCCK_PENDING
))
1721 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1722 rc
= kvm_s390_deliver_pending_interrupts(vcpu
);
1727 rc
= kvm_s390_handle_requests(vcpu
);
1731 if (guestdbg_enabled(vcpu
)) {
1732 kvm_s390_backup_guest_per_regs(vcpu
);
1733 kvm_s390_patch_guest_per_regs(vcpu
);
1736 vcpu
->arch
.sie_block
->icptcode
= 0;
1737 cpuflags
= atomic_read(&vcpu
->arch
.sie_block
->cpuflags
);
1738 VCPU_EVENT(vcpu
, 6, "entering sie flags %x", cpuflags
);
1739 trace_kvm_s390_sie_enter(vcpu
, cpuflags
);
1744 static int vcpu_post_run(struct kvm_vcpu
*vcpu
, int exit_reason
)
1748 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
1749 vcpu
->arch
.sie_block
->icptcode
);
1750 trace_kvm_s390_sie_exit(vcpu
, vcpu
->arch
.sie_block
->icptcode
);
1752 if (guestdbg_enabled(vcpu
))
1753 kvm_s390_restore_guest_per_regs(vcpu
);
1755 if (exit_reason
>= 0) {
1757 } else if (kvm_is_ucontrol(vcpu
->kvm
)) {
1758 vcpu
->run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
1759 vcpu
->run
->s390_ucontrol
.trans_exc_code
=
1760 current
->thread
.gmap_addr
;
1761 vcpu
->run
->s390_ucontrol
.pgm_code
= 0x10;
1764 } else if (current
->thread
.gmap_pfault
) {
1765 trace_kvm_s390_major_guest_pfault(vcpu
);
1766 current
->thread
.gmap_pfault
= 0;
1767 if (kvm_arch_setup_async_pf(vcpu
)) {
1770 gpa_t gpa
= current
->thread
.gmap_addr
;
1771 rc
= kvm_arch_fault_in_page(vcpu
, gpa
, 1);
1776 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
1777 trace_kvm_s390_sie_fault(vcpu
);
1778 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
1781 memcpy(&vcpu
->run
->s
.regs
.gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
1784 if (kvm_is_ucontrol(vcpu
->kvm
))
1785 /* Don't exit for host interrupts. */
1786 rc
= vcpu
->arch
.sie_block
->icptcode
? -EOPNOTSUPP
: 0;
1788 rc
= kvm_handle_sie_intercept(vcpu
);
1794 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
1796 int rc
, exit_reason
;
1799 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1800 * ning the guest), so that memslots (and other stuff) are protected
1802 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1805 rc
= vcpu_pre_run(vcpu
);
1809 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1811 * As PF_VCPU will be used in fault handler, between
1812 * guest_enter and guest_exit should be no uaccess.
1817 exit_reason
= sie64a(vcpu
->arch
.sie_block
,
1818 vcpu
->run
->s
.regs
.gprs
);
1820 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1822 rc
= vcpu_post_run(vcpu
, exit_reason
);
1823 } while (!signal_pending(current
) && !guestdbg_exit_pending(vcpu
) && !rc
);
1825 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1829 static void sync_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1831 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
1832 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
1833 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
)
1834 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
1835 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
1836 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
1837 /* some control register changes require a tlb flush */
1838 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1840 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_ARCH0
) {
1841 vcpu
->arch
.sie_block
->cputm
= kvm_run
->s
.regs
.cputm
;
1842 vcpu
->arch
.sie_block
->ckc
= kvm_run
->s
.regs
.ckc
;
1843 vcpu
->arch
.sie_block
->todpr
= kvm_run
->s
.regs
.todpr
;
1844 vcpu
->arch
.sie_block
->pp
= kvm_run
->s
.regs
.pp
;
1845 vcpu
->arch
.sie_block
->gbea
= kvm_run
->s
.regs
.gbea
;
1847 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PFAULT
) {
1848 vcpu
->arch
.pfault_token
= kvm_run
->s
.regs
.pft
;
1849 vcpu
->arch
.pfault_select
= kvm_run
->s
.regs
.pfs
;
1850 vcpu
->arch
.pfault_compare
= kvm_run
->s
.regs
.pfc
;
1851 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1852 kvm_clear_async_pf_completion_queue(vcpu
);
1854 kvm_run
->kvm_dirty_regs
= 0;
1857 static void store_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1859 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
1860 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
1861 kvm_run
->s
.regs
.prefix
= kvm_s390_get_prefix(vcpu
);
1862 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
1863 kvm_run
->s
.regs
.cputm
= vcpu
->arch
.sie_block
->cputm
;
1864 kvm_run
->s
.regs
.ckc
= vcpu
->arch
.sie_block
->ckc
;
1865 kvm_run
->s
.regs
.todpr
= vcpu
->arch
.sie_block
->todpr
;
1866 kvm_run
->s
.regs
.pp
= vcpu
->arch
.sie_block
->pp
;
1867 kvm_run
->s
.regs
.gbea
= vcpu
->arch
.sie_block
->gbea
;
1868 kvm_run
->s
.regs
.pft
= vcpu
->arch
.pfault_token
;
1869 kvm_run
->s
.regs
.pfs
= vcpu
->arch
.pfault_select
;
1870 kvm_run
->s
.regs
.pfc
= vcpu
->arch
.pfault_compare
;
1873 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1878 if (guestdbg_exit_pending(vcpu
)) {
1879 kvm_s390_prepare_debug_exit(vcpu
);
1883 if (vcpu
->sigset_active
)
1884 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
1886 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
)) {
1887 kvm_s390_vcpu_start(vcpu
);
1888 } else if (is_vcpu_stopped(vcpu
)) {
1889 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1894 sync_regs(vcpu
, kvm_run
);
1897 rc
= __vcpu_run(vcpu
);
1899 if (signal_pending(current
) && !rc
) {
1900 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1904 if (guestdbg_exit_pending(vcpu
) && !rc
) {
1905 kvm_s390_prepare_debug_exit(vcpu
);
1909 if (rc
== -EOPNOTSUPP
) {
1910 /* intercept cannot be handled in-kernel, prepare kvm-run */
1911 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
1912 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
1913 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
1914 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
1918 if (rc
== -EREMOTE
) {
1919 /* intercept was handled, but userspace support is needed
1920 * kvm_run has been prepared by the handler */
1924 store_regs(vcpu
, kvm_run
);
1926 if (vcpu
->sigset_active
)
1927 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
1929 vcpu
->stat
.exit_userspace
++;
1934 * store status at address
1935 * we use have two special cases:
1936 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1937 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1939 int kvm_s390_store_status_unloaded(struct kvm_vcpu
*vcpu
, unsigned long gpa
)
1941 unsigned char archmode
= 1;
1946 if (gpa
== KVM_S390_STORE_STATUS_NOADDR
) {
1947 if (write_guest_abs(vcpu
, 163, &archmode
, 1))
1949 gpa
= SAVE_AREA_BASE
;
1950 } else if (gpa
== KVM_S390_STORE_STATUS_PREFIXED
) {
1951 if (write_guest_real(vcpu
, 163, &archmode
, 1))
1953 gpa
= kvm_s390_real_to_abs(vcpu
, SAVE_AREA_BASE
);
1955 rc
= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, fp_regs
),
1956 vcpu
->arch
.guest_fpregs
.fprs
, 128);
1957 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, gp_regs
),
1958 vcpu
->run
->s
.regs
.gprs
, 128);
1959 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, psw
),
1960 &vcpu
->arch
.sie_block
->gpsw
, 16);
1961 px
= kvm_s390_get_prefix(vcpu
);
1962 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, pref_reg
),
1964 rc
|= write_guest_abs(vcpu
,
1965 gpa
+ offsetof(struct save_area
, fp_ctrl_reg
),
1966 &vcpu
->arch
.guest_fpregs
.fpc
, 4);
1967 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, tod_reg
),
1968 &vcpu
->arch
.sie_block
->todpr
, 4);
1969 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, timer
),
1970 &vcpu
->arch
.sie_block
->cputm
, 8);
1971 clkcomp
= vcpu
->arch
.sie_block
->ckc
>> 8;
1972 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, clk_cmp
),
1974 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, acc_regs
),
1975 &vcpu
->run
->s
.regs
.acrs
, 64);
1976 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, ctrl_regs
),
1977 &vcpu
->arch
.sie_block
->gcr
, 128);
1978 return rc
? -EFAULT
: 0;
1981 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
1984 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1985 * copying in vcpu load/put. Lets update our copies before we save
1986 * it into the save area
1988 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1989 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1990 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1992 return kvm_s390_store_status_unloaded(vcpu
, addr
);
1995 static void __disable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
1997 kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
);
1998 kvm_make_request(KVM_REQ_DISABLE_IBS
, vcpu
);
1999 exit_sie_sync(vcpu
);
2002 static void __disable_ibs_on_all_vcpus(struct kvm
*kvm
)
2005 struct kvm_vcpu
*vcpu
;
2007 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2008 __disable_ibs_on_vcpu(vcpu
);
2012 static void __enable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
2014 kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
);
2015 kvm_make_request(KVM_REQ_ENABLE_IBS
, vcpu
);
2016 exit_sie_sync(vcpu
);
2019 void kvm_s390_vcpu_start(struct kvm_vcpu
*vcpu
)
2021 int i
, online_vcpus
, started_vcpus
= 0;
2023 if (!is_vcpu_stopped(vcpu
))
2026 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 1);
2027 /* Only one cpu at a time may enter/leave the STOPPED state. */
2028 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2029 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2031 for (i
= 0; i
< online_vcpus
; i
++) {
2032 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
]))
2036 if (started_vcpus
== 0) {
2037 /* we're the only active VCPU -> speed it up */
2038 __enable_ibs_on_vcpu(vcpu
);
2039 } else if (started_vcpus
== 1) {
2041 * As we are starting a second VCPU, we have to disable
2042 * the IBS facility on all VCPUs to remove potentially
2043 * oustanding ENABLE requests.
2045 __disable_ibs_on_all_vcpus(vcpu
->kvm
);
2048 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2050 * Another VCPU might have used IBS while we were offline.
2051 * Let's play safe and flush the VCPU at startup.
2053 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2054 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2058 void kvm_s390_vcpu_stop(struct kvm_vcpu
*vcpu
)
2060 int i
, online_vcpus
, started_vcpus
= 0;
2061 struct kvm_vcpu
*started_vcpu
= NULL
;
2063 if (is_vcpu_stopped(vcpu
))
2066 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 0);
2067 /* Only one cpu at a time may enter/leave the STOPPED state. */
2068 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2069 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2071 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2072 kvm_s390_clear_stop_irq(vcpu
);
2074 atomic_set_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2075 __disable_ibs_on_vcpu(vcpu
);
2077 for (i
= 0; i
< online_vcpus
; i
++) {
2078 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
])) {
2080 started_vcpu
= vcpu
->kvm
->vcpus
[i
];
2084 if (started_vcpus
== 1) {
2086 * As we only have one VCPU left, we want to enable the
2087 * IBS facility for that VCPU to speed it up.
2089 __enable_ibs_on_vcpu(started_vcpu
);
2092 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2096 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
2097 struct kvm_enable_cap
*cap
)
2105 case KVM_CAP_S390_CSS_SUPPORT
:
2106 if (!vcpu
->kvm
->arch
.css_support
) {
2107 vcpu
->kvm
->arch
.css_support
= 1;
2108 trace_kvm_s390_enable_css(vcpu
->kvm
);
2119 long kvm_arch_vcpu_ioctl(struct file
*filp
,
2120 unsigned int ioctl
, unsigned long arg
)
2122 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2123 void __user
*argp
= (void __user
*)arg
;
2128 case KVM_S390_INTERRUPT
: {
2129 struct kvm_s390_interrupt s390int
;
2130 struct kvm_s390_irq s390irq
;
2133 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
2135 if (s390int_to_s390irq(&s390int
, &s390irq
))
2137 r
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
2140 case KVM_S390_STORE_STATUS
:
2141 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2142 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
2143 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
2145 case KVM_S390_SET_INITIAL_PSW
: {
2149 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
2151 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
2154 case KVM_S390_INITIAL_RESET
:
2155 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
2157 case KVM_SET_ONE_REG
:
2158 case KVM_GET_ONE_REG
: {
2159 struct kvm_one_reg reg
;
2161 if (copy_from_user(®
, argp
, sizeof(reg
)))
2163 if (ioctl
== KVM_SET_ONE_REG
)
2164 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
2166 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
2169 #ifdef CONFIG_KVM_S390_UCONTROL
2170 case KVM_S390_UCAS_MAP
: {
2171 struct kvm_s390_ucas_mapping ucasmap
;
2173 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2178 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2183 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
2184 ucasmap
.vcpu_addr
, ucasmap
.length
);
2187 case KVM_S390_UCAS_UNMAP
: {
2188 struct kvm_s390_ucas_mapping ucasmap
;
2190 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2195 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2200 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
2205 case KVM_S390_VCPU_FAULT
: {
2206 r
= gmap_fault(vcpu
->arch
.gmap
, arg
, 0);
2209 case KVM_ENABLE_CAP
:
2211 struct kvm_enable_cap cap
;
2213 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
2215 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
2224 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
2226 #ifdef CONFIG_KVM_S390_UCONTROL
2227 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
2228 && (kvm_is_ucontrol(vcpu
->kvm
))) {
2229 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
2230 get_page(vmf
->page
);
2234 return VM_FAULT_SIGBUS
;
2237 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
2238 unsigned long npages
)
2243 /* Section: memory related */
2244 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
2245 struct kvm_memory_slot
*memslot
,
2246 struct kvm_userspace_memory_region
*mem
,
2247 enum kvm_mr_change change
)
2249 /* A few sanity checks. We can have memory slots which have to be
2250 located/ended at a segment boundary (1MB). The memory in userland is
2251 ok to be fragmented into various different vmas. It is okay to mmap()
2252 and munmap() stuff in this slot after doing this call at any time */
2254 if (mem
->userspace_addr
& 0xffffful
)
2257 if (mem
->memory_size
& 0xffffful
)
2263 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
2264 struct kvm_userspace_memory_region
*mem
,
2265 const struct kvm_memory_slot
*old
,
2266 enum kvm_mr_change change
)
2270 /* If the basics of the memslot do not change, we do not want
2271 * to update the gmap. Every update causes several unnecessary
2272 * segment translation exceptions. This is usually handled just
2273 * fine by the normal fault handler + gmap, but it will also
2274 * cause faults on the prefix page of running guest CPUs.
2276 if (old
->userspace_addr
== mem
->userspace_addr
&&
2277 old
->base_gfn
* PAGE_SIZE
== mem
->guest_phys_addr
&&
2278 old
->npages
* PAGE_SIZE
== mem
->memory_size
)
2281 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
2282 mem
->guest_phys_addr
, mem
->memory_size
);
2284 printk(KERN_WARNING
"kvm-s390: failed to commit memory region\n");
2288 static int __init
kvm_s390_init(void)
2290 return kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
2293 static void __exit
kvm_s390_exit(void)
2298 module_init(kvm_s390_init
);
2299 module_exit(kvm_s390_exit
);
2302 * Enable autoloading of the kvm module.
2303 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2304 * since x86 takes a different approach.
2306 #include <linux/miscdevice.h>
2307 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
2308 MODULE_ALIAS("devname:kvm");