2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <linux/vmalloc.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/lowcore.h>
32 #include <asm/pgtable.h>
34 #include <asm/switch_to.h>
40 #define KMSG_COMPONENT "kvm-s390"
42 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44 #define CREATE_TRACE_POINTS
46 #include "trace-s390.h"
48 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
50 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
53 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
55 struct kvm_stats_debugfs_item debugfs_entries
[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace
) },
57 { "exit_null", VCPU_STAT(exit_null
) },
58 { "exit_validity", VCPU_STAT(exit_validity
) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
60 { "exit_external_request", VCPU_STAT(exit_external_request
) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
62 { "exit_instruction", VCPU_STAT(exit_instruction
) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
) },
66 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll
) },
67 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
68 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
69 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
70 { "instruction_stctl", VCPU_STAT(instruction_stctl
) },
71 { "instruction_stctg", VCPU_STAT(instruction_stctg
) },
72 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
73 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
74 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
75 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
76 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
77 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
78 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
79 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
80 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
81 { "instruction_pfmf", VCPU_STAT(instruction_pfmf
) },
82 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
83 { "instruction_spx", VCPU_STAT(instruction_spx
) },
84 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
85 { "instruction_stap", VCPU_STAT(instruction_stap
) },
86 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
87 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock
) },
88 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
89 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
90 { "instruction_essa", VCPU_STAT(instruction_essa
) },
91 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
92 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
93 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
94 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
95 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
96 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
97 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
98 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency
) },
99 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start
) },
100 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
101 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status
) },
102 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status
) },
103 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status
) },
104 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
105 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
106 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
107 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset
) },
108 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset
) },
109 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown
) },
110 { "diagnose_10", VCPU_STAT(diagnose_10
) },
111 { "diagnose_44", VCPU_STAT(diagnose_44
) },
112 { "diagnose_9c", VCPU_STAT(diagnose_9c
) },
113 { "diagnose_258", VCPU_STAT(diagnose_258
) },
114 { "diagnose_308", VCPU_STAT(diagnose_308
) },
115 { "diagnose_500", VCPU_STAT(diagnose_500
) },
119 /* upper facilities limit for kvm */
120 unsigned long kvm_s390_fac_list_mask
[] = {
121 0xffe6fffbfcfdfc40UL
,
122 0x005e800000000000UL
,
125 unsigned long kvm_s390_fac_list_mask_size(void)
127 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask
) > S390_ARCH_FAC_MASK_SIZE_U64
);
128 return ARRAY_SIZE(kvm_s390_fac_list_mask
);
131 static struct gmap_notifier gmap_notifier
;
132 debug_info_t
*kvm_s390_dbf
;
134 /* Section: not file related */
135 int kvm_arch_hardware_enable(void)
137 /* every s390 is virtualization enabled ;-) */
141 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
);
144 * This callback is executed during stop_machine(). All CPUs are therefore
145 * temporarily stopped. In order not to change guest behavior, we have to
146 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
147 * so a CPU won't be stopped while calculating with the epoch.
149 static int kvm_clock_sync(struct notifier_block
*notifier
, unsigned long val
,
153 struct kvm_vcpu
*vcpu
;
155 unsigned long long *delta
= v
;
157 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
158 kvm
->arch
.epoch
-= *delta
;
159 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
160 vcpu
->arch
.sie_block
->epoch
-= *delta
;
166 static struct notifier_block kvm_clock_notifier
= {
167 .notifier_call
= kvm_clock_sync
,
170 int kvm_arch_hardware_setup(void)
172 gmap_notifier
.notifier_call
= kvm_gmap_notifier
;
173 gmap_register_ipte_notifier(&gmap_notifier
);
174 atomic_notifier_chain_register(&s390_epoch_delta_notifier
,
175 &kvm_clock_notifier
);
179 void kvm_arch_hardware_unsetup(void)
181 gmap_unregister_ipte_notifier(&gmap_notifier
);
182 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier
,
183 &kvm_clock_notifier
);
186 int kvm_arch_init(void *opaque
)
188 kvm_s390_dbf
= debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
192 if (debug_register_view(kvm_s390_dbf
, &debug_sprintf_view
)) {
193 debug_unregister(kvm_s390_dbf
);
197 /* Register floating interrupt controller interface. */
198 return kvm_register_device_ops(&kvm_flic_ops
, KVM_DEV_TYPE_FLIC
);
201 void kvm_arch_exit(void)
203 debug_unregister(kvm_s390_dbf
);
206 /* Section: device related */
207 long kvm_arch_dev_ioctl(struct file
*filp
,
208 unsigned int ioctl
, unsigned long arg
)
210 if (ioctl
== KVM_S390_ENABLE_SIE
)
211 return s390_enable_sie();
215 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
220 case KVM_CAP_S390_PSW
:
221 case KVM_CAP_S390_GMAP
:
222 case KVM_CAP_SYNC_MMU
:
223 #ifdef CONFIG_KVM_S390_UCONTROL
224 case KVM_CAP_S390_UCONTROL
:
226 case KVM_CAP_ASYNC_PF
:
227 case KVM_CAP_SYNC_REGS
:
228 case KVM_CAP_ONE_REG
:
229 case KVM_CAP_ENABLE_CAP
:
230 case KVM_CAP_S390_CSS_SUPPORT
:
231 case KVM_CAP_IOEVENTFD
:
232 case KVM_CAP_DEVICE_CTRL
:
233 case KVM_CAP_ENABLE_CAP_VM
:
234 case KVM_CAP_S390_IRQCHIP
:
235 case KVM_CAP_VM_ATTRIBUTES
:
236 case KVM_CAP_MP_STATE
:
237 case KVM_CAP_S390_INJECT_IRQ
:
238 case KVM_CAP_S390_USER_SIGP
:
239 case KVM_CAP_S390_USER_STSI
:
240 case KVM_CAP_S390_SKEYS
:
241 case KVM_CAP_S390_IRQ_STATE
:
244 case KVM_CAP_S390_MEM_OP
:
247 case KVM_CAP_NR_VCPUS
:
248 case KVM_CAP_MAX_VCPUS
:
249 r
= sclp
.has_esca
? KVM_S390_ESCA_CPU_SLOTS
250 : KVM_S390_BSCA_CPU_SLOTS
;
252 case KVM_CAP_NR_MEMSLOTS
:
253 r
= KVM_USER_MEM_SLOTS
;
255 case KVM_CAP_S390_COW
:
256 r
= MACHINE_HAS_ESOP
;
258 case KVM_CAP_S390_VECTOR_REGISTERS
:
261 case KVM_CAP_S390_RI
:
262 r
= test_facility(64);
270 static void kvm_s390_sync_dirty_log(struct kvm
*kvm
,
271 struct kvm_memory_slot
*memslot
)
273 gfn_t cur_gfn
, last_gfn
;
274 unsigned long address
;
275 struct gmap
*gmap
= kvm
->arch
.gmap
;
277 down_read(&gmap
->mm
->mmap_sem
);
278 /* Loop over all guest pages */
279 last_gfn
= memslot
->base_gfn
+ memslot
->npages
;
280 for (cur_gfn
= memslot
->base_gfn
; cur_gfn
<= last_gfn
; cur_gfn
++) {
281 address
= gfn_to_hva_memslot(memslot
, cur_gfn
);
283 if (gmap_test_and_clear_dirty(address
, gmap
))
284 mark_page_dirty(kvm
, cur_gfn
);
286 up_read(&gmap
->mm
->mmap_sem
);
289 /* Section: vm related */
290 static void sca_del_vcpu(struct kvm_vcpu
*vcpu
);
293 * Get (and clear) the dirty memory log for a memory slot.
295 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
296 struct kvm_dirty_log
*log
)
300 struct kvm_memslots
*slots
;
301 struct kvm_memory_slot
*memslot
;
304 mutex_lock(&kvm
->slots_lock
);
307 if (log
->slot
>= KVM_USER_MEM_SLOTS
)
310 slots
= kvm_memslots(kvm
);
311 memslot
= id_to_memslot(slots
, log
->slot
);
313 if (!memslot
->dirty_bitmap
)
316 kvm_s390_sync_dirty_log(kvm
, memslot
);
317 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
321 /* Clear the dirty log */
323 n
= kvm_dirty_bitmap_bytes(memslot
);
324 memset(memslot
->dirty_bitmap
, 0, n
);
328 mutex_unlock(&kvm
->slots_lock
);
332 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
, struct kvm_enable_cap
*cap
)
340 case KVM_CAP_S390_IRQCHIP
:
341 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
342 kvm
->arch
.use_irqchip
= 1;
345 case KVM_CAP_S390_USER_SIGP
:
346 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
347 kvm
->arch
.user_sigp
= 1;
350 case KVM_CAP_S390_VECTOR_REGISTERS
:
351 mutex_lock(&kvm
->lock
);
352 if (atomic_read(&kvm
->online_vcpus
)) {
354 } else if (MACHINE_HAS_VX
) {
355 set_kvm_facility(kvm
->arch
.model
.fac
->mask
, 129);
356 set_kvm_facility(kvm
->arch
.model
.fac
->list
, 129);
360 mutex_unlock(&kvm
->lock
);
361 VM_EVENT(kvm
, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
362 r
? "(not available)" : "(success)");
364 case KVM_CAP_S390_RI
:
366 mutex_lock(&kvm
->lock
);
367 if (atomic_read(&kvm
->online_vcpus
)) {
369 } else if (test_facility(64)) {
370 set_kvm_facility(kvm
->arch
.model
.fac
->mask
, 64);
371 set_kvm_facility(kvm
->arch
.model
.fac
->list
, 64);
374 mutex_unlock(&kvm
->lock
);
375 VM_EVENT(kvm
, 3, "ENABLE: CAP_S390_RI %s",
376 r
? "(not available)" : "(success)");
378 case KVM_CAP_S390_USER_STSI
:
379 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
380 kvm
->arch
.user_stsi
= 1;
390 static int kvm_s390_get_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
394 switch (attr
->attr
) {
395 case KVM_S390_VM_MEM_LIMIT_SIZE
:
397 VM_EVENT(kvm
, 3, "QUERY: max guest memory: %lu bytes",
398 kvm
->arch
.mem_limit
);
399 if (put_user(kvm
->arch
.mem_limit
, (u64 __user
*)attr
->addr
))
409 static int kvm_s390_set_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
413 switch (attr
->attr
) {
414 case KVM_S390_VM_MEM_ENABLE_CMMA
:
415 /* enable CMMA only for z10 and later (EDAT_1) */
417 if (!MACHINE_IS_LPAR
|| !MACHINE_HAS_EDAT1
)
421 VM_EVENT(kvm
, 3, "%s", "ENABLE: CMMA support");
422 mutex_lock(&kvm
->lock
);
423 if (atomic_read(&kvm
->online_vcpus
) == 0) {
424 kvm
->arch
.use_cmma
= 1;
427 mutex_unlock(&kvm
->lock
);
429 case KVM_S390_VM_MEM_CLR_CMMA
:
431 if (!kvm
->arch
.use_cmma
)
434 VM_EVENT(kvm
, 3, "%s", "RESET: CMMA states");
435 mutex_lock(&kvm
->lock
);
436 idx
= srcu_read_lock(&kvm
->srcu
);
437 s390_reset_cmma(kvm
->arch
.gmap
->mm
);
438 srcu_read_unlock(&kvm
->srcu
, idx
);
439 mutex_unlock(&kvm
->lock
);
442 case KVM_S390_VM_MEM_LIMIT_SIZE
: {
443 unsigned long new_limit
;
445 if (kvm_is_ucontrol(kvm
))
448 if (get_user(new_limit
, (u64 __user
*)attr
->addr
))
451 if (kvm
->arch
.mem_limit
!= KVM_S390_NO_MEM_LIMIT
&&
452 new_limit
> kvm
->arch
.mem_limit
)
458 /* gmap_alloc takes last usable address */
459 if (new_limit
!= KVM_S390_NO_MEM_LIMIT
)
463 mutex_lock(&kvm
->lock
);
464 if (atomic_read(&kvm
->online_vcpus
) == 0) {
465 /* gmap_alloc will round the limit up */
466 struct gmap
*new = gmap_alloc(current
->mm
, new_limit
);
471 gmap_free(kvm
->arch
.gmap
);
473 kvm
->arch
.gmap
= new;
477 mutex_unlock(&kvm
->lock
);
478 VM_EVENT(kvm
, 3, "SET: max guest address: %lu", new_limit
);
479 VM_EVENT(kvm
, 3, "New guest asce: 0x%pK",
480 (void *) kvm
->arch
.gmap
->asce
);
490 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
);
492 static int kvm_s390_vm_set_crypto(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
494 struct kvm_vcpu
*vcpu
;
497 if (!test_kvm_facility(kvm
, 76))
500 mutex_lock(&kvm
->lock
);
501 switch (attr
->attr
) {
502 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
504 kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
505 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
506 kvm
->arch
.crypto
.aes_kw
= 1;
507 VM_EVENT(kvm
, 3, "%s", "ENABLE: AES keywrapping support");
509 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
511 kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
512 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
513 kvm
->arch
.crypto
.dea_kw
= 1;
514 VM_EVENT(kvm
, 3, "%s", "ENABLE: DEA keywrapping support");
516 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
517 kvm
->arch
.crypto
.aes_kw
= 0;
518 memset(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
, 0,
519 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
520 VM_EVENT(kvm
, 3, "%s", "DISABLE: AES keywrapping support");
522 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
523 kvm
->arch
.crypto
.dea_kw
= 0;
524 memset(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
, 0,
525 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
526 VM_EVENT(kvm
, 3, "%s", "DISABLE: DEA keywrapping support");
529 mutex_unlock(&kvm
->lock
);
533 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
534 kvm_s390_vcpu_crypto_setup(vcpu
);
537 mutex_unlock(&kvm
->lock
);
541 static int kvm_s390_set_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
545 if (copy_from_user(>od_high
, (void __user
*)attr
->addr
,
551 VM_EVENT(kvm
, 3, "SET: TOD extension: 0x%x", gtod_high
);
556 static int kvm_s390_set_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
560 if (copy_from_user(>od
, (void __user
*)attr
->addr
, sizeof(gtod
)))
563 kvm_s390_set_tod_clock(kvm
, gtod
);
564 VM_EVENT(kvm
, 3, "SET: TOD base: 0x%llx", gtod
);
568 static int kvm_s390_set_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
575 switch (attr
->attr
) {
576 case KVM_S390_VM_TOD_HIGH
:
577 ret
= kvm_s390_set_tod_high(kvm
, attr
);
579 case KVM_S390_VM_TOD_LOW
:
580 ret
= kvm_s390_set_tod_low(kvm
, attr
);
589 static int kvm_s390_get_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
593 if (copy_to_user((void __user
*)attr
->addr
, >od_high
,
596 VM_EVENT(kvm
, 3, "QUERY: TOD extension: 0x%x", gtod_high
);
601 static int kvm_s390_get_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
605 gtod
= kvm_s390_get_tod_clock_fast(kvm
);
606 if (copy_to_user((void __user
*)attr
->addr
, >od
, sizeof(gtod
)))
608 VM_EVENT(kvm
, 3, "QUERY: TOD base: 0x%llx", gtod
);
613 static int kvm_s390_get_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
620 switch (attr
->attr
) {
621 case KVM_S390_VM_TOD_HIGH
:
622 ret
= kvm_s390_get_tod_high(kvm
, attr
);
624 case KVM_S390_VM_TOD_LOW
:
625 ret
= kvm_s390_get_tod_low(kvm
, attr
);
634 static int kvm_s390_set_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
636 struct kvm_s390_vm_cpu_processor
*proc
;
639 mutex_lock(&kvm
->lock
);
640 if (atomic_read(&kvm
->online_vcpus
)) {
644 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
649 if (!copy_from_user(proc
, (void __user
*)attr
->addr
,
651 memcpy(&kvm
->arch
.model
.cpu_id
, &proc
->cpuid
,
652 sizeof(struct cpuid
));
653 kvm
->arch
.model
.ibc
= proc
->ibc
;
654 memcpy(kvm
->arch
.model
.fac
->list
, proc
->fac_list
,
655 S390_ARCH_FAC_LIST_SIZE_BYTE
);
660 mutex_unlock(&kvm
->lock
);
664 static int kvm_s390_set_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
668 switch (attr
->attr
) {
669 case KVM_S390_VM_CPU_PROCESSOR
:
670 ret
= kvm_s390_set_processor(kvm
, attr
);
676 static int kvm_s390_get_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
678 struct kvm_s390_vm_cpu_processor
*proc
;
681 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
686 memcpy(&proc
->cpuid
, &kvm
->arch
.model
.cpu_id
, sizeof(struct cpuid
));
687 proc
->ibc
= kvm
->arch
.model
.ibc
;
688 memcpy(&proc
->fac_list
, kvm
->arch
.model
.fac
->list
, S390_ARCH_FAC_LIST_SIZE_BYTE
);
689 if (copy_to_user((void __user
*)attr
->addr
, proc
, sizeof(*proc
)))
696 static int kvm_s390_get_machine(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
698 struct kvm_s390_vm_cpu_machine
*mach
;
701 mach
= kzalloc(sizeof(*mach
), GFP_KERNEL
);
706 get_cpu_id((struct cpuid
*) &mach
->cpuid
);
707 mach
->ibc
= sclp
.ibc
;
708 memcpy(&mach
->fac_mask
, kvm
->arch
.model
.fac
->mask
,
709 S390_ARCH_FAC_LIST_SIZE_BYTE
);
710 memcpy((unsigned long *)&mach
->fac_list
, S390_lowcore
.stfle_fac_list
,
711 S390_ARCH_FAC_LIST_SIZE_BYTE
);
712 if (copy_to_user((void __user
*)attr
->addr
, mach
, sizeof(*mach
)))
719 static int kvm_s390_get_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
723 switch (attr
->attr
) {
724 case KVM_S390_VM_CPU_PROCESSOR
:
725 ret
= kvm_s390_get_processor(kvm
, attr
);
727 case KVM_S390_VM_CPU_MACHINE
:
728 ret
= kvm_s390_get_machine(kvm
, attr
);
734 static int kvm_s390_vm_set_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
738 switch (attr
->group
) {
739 case KVM_S390_VM_MEM_CTRL
:
740 ret
= kvm_s390_set_mem_control(kvm
, attr
);
742 case KVM_S390_VM_TOD
:
743 ret
= kvm_s390_set_tod(kvm
, attr
);
745 case KVM_S390_VM_CPU_MODEL
:
746 ret
= kvm_s390_set_cpu_model(kvm
, attr
);
748 case KVM_S390_VM_CRYPTO
:
749 ret
= kvm_s390_vm_set_crypto(kvm
, attr
);
759 static int kvm_s390_vm_get_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
763 switch (attr
->group
) {
764 case KVM_S390_VM_MEM_CTRL
:
765 ret
= kvm_s390_get_mem_control(kvm
, attr
);
767 case KVM_S390_VM_TOD
:
768 ret
= kvm_s390_get_tod(kvm
, attr
);
770 case KVM_S390_VM_CPU_MODEL
:
771 ret
= kvm_s390_get_cpu_model(kvm
, attr
);
781 static int kvm_s390_vm_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
785 switch (attr
->group
) {
786 case KVM_S390_VM_MEM_CTRL
:
787 switch (attr
->attr
) {
788 case KVM_S390_VM_MEM_ENABLE_CMMA
:
789 case KVM_S390_VM_MEM_CLR_CMMA
:
790 case KVM_S390_VM_MEM_LIMIT_SIZE
:
798 case KVM_S390_VM_TOD
:
799 switch (attr
->attr
) {
800 case KVM_S390_VM_TOD_LOW
:
801 case KVM_S390_VM_TOD_HIGH
:
809 case KVM_S390_VM_CPU_MODEL
:
810 switch (attr
->attr
) {
811 case KVM_S390_VM_CPU_PROCESSOR
:
812 case KVM_S390_VM_CPU_MACHINE
:
820 case KVM_S390_VM_CRYPTO
:
821 switch (attr
->attr
) {
822 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
823 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
824 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
825 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
841 static long kvm_s390_get_skeys(struct kvm
*kvm
, struct kvm_s390_skeys
*args
)
845 unsigned long curkey
;
848 if (args
->flags
!= 0)
851 /* Is this guest using storage keys? */
852 if (!mm_use_skey(current
->mm
))
853 return KVM_S390_GET_SKEYS_NONE
;
855 /* Enforce sane limit on memory allocation */
856 if (args
->count
< 1 || args
->count
> KVM_S390_SKEYS_MAX
)
859 keys
= kmalloc_array(args
->count
, sizeof(uint8_t),
860 GFP_KERNEL
| __GFP_NOWARN
);
862 keys
= vmalloc(sizeof(uint8_t) * args
->count
);
866 for (i
= 0; i
< args
->count
; i
++) {
867 hva
= gfn_to_hva(kvm
, args
->start_gfn
+ i
);
868 if (kvm_is_error_hva(hva
)) {
873 curkey
= get_guest_storage_key(current
->mm
, hva
);
874 if (IS_ERR_VALUE(curkey
)) {
881 r
= copy_to_user((uint8_t __user
*)args
->skeydata_addr
, keys
,
882 sizeof(uint8_t) * args
->count
);
890 static long kvm_s390_set_skeys(struct kvm
*kvm
, struct kvm_s390_skeys
*args
)
896 if (args
->flags
!= 0)
899 /* Enforce sane limit on memory allocation */
900 if (args
->count
< 1 || args
->count
> KVM_S390_SKEYS_MAX
)
903 keys
= kmalloc_array(args
->count
, sizeof(uint8_t),
904 GFP_KERNEL
| __GFP_NOWARN
);
906 keys
= vmalloc(sizeof(uint8_t) * args
->count
);
910 r
= copy_from_user(keys
, (uint8_t __user
*)args
->skeydata_addr
,
911 sizeof(uint8_t) * args
->count
);
917 /* Enable storage key handling for the guest */
918 r
= s390_enable_skey();
922 for (i
= 0; i
< args
->count
; i
++) {
923 hva
= gfn_to_hva(kvm
, args
->start_gfn
+ i
);
924 if (kvm_is_error_hva(hva
)) {
929 /* Lowest order bit is reserved */
930 if (keys
[i
] & 0x01) {
935 r
= set_guest_storage_key(current
->mm
, hva
,
936 (unsigned long)keys
[i
], 0);
945 long kvm_arch_vm_ioctl(struct file
*filp
,
946 unsigned int ioctl
, unsigned long arg
)
948 struct kvm
*kvm
= filp
->private_data
;
949 void __user
*argp
= (void __user
*)arg
;
950 struct kvm_device_attr attr
;
954 case KVM_S390_INTERRUPT
: {
955 struct kvm_s390_interrupt s390int
;
958 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
960 r
= kvm_s390_inject_vm(kvm
, &s390int
);
963 case KVM_ENABLE_CAP
: {
964 struct kvm_enable_cap cap
;
966 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
968 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
971 case KVM_CREATE_IRQCHIP
: {
972 struct kvm_irq_routing_entry routing
;
975 if (kvm
->arch
.use_irqchip
) {
976 /* Set up dummy routing. */
977 memset(&routing
, 0, sizeof(routing
));
978 r
= kvm_set_irq_routing(kvm
, &routing
, 0, 0);
982 case KVM_SET_DEVICE_ATTR
: {
984 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
986 r
= kvm_s390_vm_set_attr(kvm
, &attr
);
989 case KVM_GET_DEVICE_ATTR
: {
991 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
993 r
= kvm_s390_vm_get_attr(kvm
, &attr
);
996 case KVM_HAS_DEVICE_ATTR
: {
998 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
1000 r
= kvm_s390_vm_has_attr(kvm
, &attr
);
1003 case KVM_S390_GET_SKEYS
: {
1004 struct kvm_s390_skeys args
;
1007 if (copy_from_user(&args
, argp
,
1008 sizeof(struct kvm_s390_skeys
)))
1010 r
= kvm_s390_get_skeys(kvm
, &args
);
1013 case KVM_S390_SET_SKEYS
: {
1014 struct kvm_s390_skeys args
;
1017 if (copy_from_user(&args
, argp
,
1018 sizeof(struct kvm_s390_skeys
)))
1020 r
= kvm_s390_set_skeys(kvm
, &args
);
1030 static int kvm_s390_query_ap_config(u8
*config
)
1032 u32 fcn_code
= 0x04000000UL
;
1035 memset(config
, 0, 128);
1039 ".long 0xb2af0000\n" /* PQAP(QCI) */
1045 : "r" (fcn_code
), "r" (config
)
1046 : "cc", "0", "2", "memory"
1052 static int kvm_s390_apxa_installed(void)
1057 if (test_facility(12)) {
1058 cc
= kvm_s390_query_ap_config(config
);
1061 pr_err("PQAP(QCI) failed with cc=%d", cc
);
1063 return config
[0] & 0x40;
1069 static void kvm_s390_set_crycb_format(struct kvm
*kvm
)
1071 kvm
->arch
.crypto
.crycbd
= (__u32
)(unsigned long) kvm
->arch
.crypto
.crycb
;
1073 if (kvm_s390_apxa_installed())
1074 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT2
;
1076 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT1
;
1079 static void kvm_s390_get_cpu_id(struct cpuid
*cpu_id
)
1082 cpu_id
->version
= 0xff;
1085 static int kvm_s390_crypto_init(struct kvm
*kvm
)
1087 if (!test_kvm_facility(kvm
, 76))
1090 kvm
->arch
.crypto
.crycb
= kzalloc(sizeof(*kvm
->arch
.crypto
.crycb
),
1091 GFP_KERNEL
| GFP_DMA
);
1092 if (!kvm
->arch
.crypto
.crycb
)
1095 kvm_s390_set_crycb_format(kvm
);
1097 /* Enable AES/DEA protected key functions by default */
1098 kvm
->arch
.crypto
.aes_kw
= 1;
1099 kvm
->arch
.crypto
.dea_kw
= 1;
1100 get_random_bytes(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
1101 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
1102 get_random_bytes(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
1103 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
1108 static void sca_dispose(struct kvm
*kvm
)
1110 if (kvm
->arch
.use_esca
)
1111 free_pages_exact(kvm
->arch
.sca
, sizeof(struct esca_block
));
1113 free_page((unsigned long)(kvm
->arch
.sca
));
1114 kvm
->arch
.sca
= NULL
;
1117 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
1120 char debug_name
[16];
1121 static unsigned long sca_offset
;
1124 #ifdef CONFIG_KVM_S390_UCONTROL
1125 if (type
& ~KVM_VM_S390_UCONTROL
)
1127 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
1134 rc
= s390_enable_sie();
1140 kvm
->arch
.use_esca
= 0; /* start with basic SCA */
1141 rwlock_init(&kvm
->arch
.sca_lock
);
1142 kvm
->arch
.sca
= (struct bsca_block
*) get_zeroed_page(GFP_KERNEL
);
1145 spin_lock(&kvm_lock
);
1147 if (sca_offset
+ sizeof(struct bsca_block
) > PAGE_SIZE
)
1149 kvm
->arch
.sca
= (struct bsca_block
*)
1150 ((char *) kvm
->arch
.sca
+ sca_offset
);
1151 spin_unlock(&kvm_lock
);
1153 sprintf(debug_name
, "kvm-%u", current
->pid
);
1155 kvm
->arch
.dbf
= debug_register(debug_name
, 32, 1, 7 * sizeof(long));
1160 * The architectural maximum amount of facilities is 16 kbit. To store
1161 * this amount, 2 kbyte of memory is required. Thus we need a full
1162 * page to hold the guest facility list (arch.model.fac->list) and the
1163 * facility mask (arch.model.fac->mask). Its address size has to be
1164 * 31 bits and word aligned.
1166 kvm
->arch
.model
.fac
=
1167 (struct kvm_s390_fac
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1168 if (!kvm
->arch
.model
.fac
)
1171 /* Populate the facility mask initially. */
1172 memcpy(kvm
->arch
.model
.fac
->mask
, S390_lowcore
.stfle_fac_list
,
1173 S390_ARCH_FAC_LIST_SIZE_BYTE
);
1174 for (i
= 0; i
< S390_ARCH_FAC_LIST_SIZE_U64
; i
++) {
1175 if (i
< kvm_s390_fac_list_mask_size())
1176 kvm
->arch
.model
.fac
->mask
[i
] &= kvm_s390_fac_list_mask
[i
];
1178 kvm
->arch
.model
.fac
->mask
[i
] = 0UL;
1181 /* Populate the facility list initially. */
1182 memcpy(kvm
->arch
.model
.fac
->list
, kvm
->arch
.model
.fac
->mask
,
1183 S390_ARCH_FAC_LIST_SIZE_BYTE
);
1185 kvm_s390_get_cpu_id(&kvm
->arch
.model
.cpu_id
);
1186 kvm
->arch
.model
.ibc
= sclp
.ibc
& 0x0fff;
1188 if (kvm_s390_crypto_init(kvm
) < 0)
1191 spin_lock_init(&kvm
->arch
.float_int
.lock
);
1192 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
1193 INIT_LIST_HEAD(&kvm
->arch
.float_int
.lists
[i
]);
1194 init_waitqueue_head(&kvm
->arch
.ipte_wq
);
1195 mutex_init(&kvm
->arch
.ipte_mutex
);
1197 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
1198 VM_EVENT(kvm
, 3, "vm created with type %lu", type
);
1200 if (type
& KVM_VM_S390_UCONTROL
) {
1201 kvm
->arch
.gmap
= NULL
;
1202 kvm
->arch
.mem_limit
= KVM_S390_NO_MEM_LIMIT
;
1204 if (sclp
.hamax
== U64_MAX
)
1205 kvm
->arch
.mem_limit
= TASK_MAX_SIZE
;
1207 kvm
->arch
.mem_limit
= min_t(unsigned long, TASK_MAX_SIZE
,
1209 kvm
->arch
.gmap
= gmap_alloc(current
->mm
, kvm
->arch
.mem_limit
- 1);
1210 if (!kvm
->arch
.gmap
)
1212 kvm
->arch
.gmap
->private = kvm
;
1213 kvm
->arch
.gmap
->pfault_enabled
= 0;
1216 kvm
->arch
.css_support
= 0;
1217 kvm
->arch
.use_irqchip
= 0;
1218 kvm
->arch
.epoch
= 0;
1220 spin_lock_init(&kvm
->arch
.start_stop_lock
);
1221 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm
, current
->pid
);
1225 kfree(kvm
->arch
.crypto
.crycb
);
1226 free_page((unsigned long)kvm
->arch
.model
.fac
);
1227 debug_unregister(kvm
->arch
.dbf
);
1229 KVM_EVENT(3, "creation of vm failed: %d", rc
);
1233 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1235 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
1236 trace_kvm_s390_destroy_vcpu(vcpu
->vcpu_id
);
1237 kvm_s390_clear_local_irqs(vcpu
);
1238 kvm_clear_async_pf_completion_queue(vcpu
);
1239 if (!kvm_is_ucontrol(vcpu
->kvm
))
1242 if (kvm_is_ucontrol(vcpu
->kvm
))
1243 gmap_free(vcpu
->arch
.gmap
);
1245 if (vcpu
->kvm
->arch
.use_cmma
)
1246 kvm_s390_vcpu_unsetup_cmma(vcpu
);
1247 free_page((unsigned long)(vcpu
->arch
.sie_block
));
1249 kvm_vcpu_uninit(vcpu
);
1250 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1253 static void kvm_free_vcpus(struct kvm
*kvm
)
1256 struct kvm_vcpu
*vcpu
;
1258 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1259 kvm_arch_vcpu_destroy(vcpu
);
1261 mutex_lock(&kvm
->lock
);
1262 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
1263 kvm
->vcpus
[i
] = NULL
;
1265 atomic_set(&kvm
->online_vcpus
, 0);
1266 mutex_unlock(&kvm
->lock
);
1269 void kvm_arch_destroy_vm(struct kvm
*kvm
)
1271 kvm_free_vcpus(kvm
);
1272 free_page((unsigned long)kvm
->arch
.model
.fac
);
1274 debug_unregister(kvm
->arch
.dbf
);
1275 kfree(kvm
->arch
.crypto
.crycb
);
1276 if (!kvm_is_ucontrol(kvm
))
1277 gmap_free(kvm
->arch
.gmap
);
1278 kvm_s390_destroy_adapters(kvm
);
1279 kvm_s390_clear_float_irqs(kvm
);
1280 KVM_EVENT(3, "vm 0x%pK destroyed", kvm
);
1283 /* Section: vcpu related */
1284 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu
*vcpu
)
1286 vcpu
->arch
.gmap
= gmap_alloc(current
->mm
, -1UL);
1287 if (!vcpu
->arch
.gmap
)
1289 vcpu
->arch
.gmap
->private = vcpu
->kvm
;
1294 static void sca_del_vcpu(struct kvm_vcpu
*vcpu
)
1296 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
1297 if (vcpu
->kvm
->arch
.use_esca
) {
1298 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1300 clear_bit_inv(vcpu
->vcpu_id
, (unsigned long *) sca
->mcn
);
1301 sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
1303 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1305 clear_bit_inv(vcpu
->vcpu_id
, (unsigned long *) &sca
->mcn
);
1306 sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
1308 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
1311 static void sca_add_vcpu(struct kvm_vcpu
*vcpu
)
1313 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
1314 if (vcpu
->kvm
->arch
.use_esca
) {
1315 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1317 sca
->cpu
[vcpu
->vcpu_id
].sda
= (__u64
) vcpu
->arch
.sie_block
;
1318 vcpu
->arch
.sie_block
->scaoh
= (__u32
)(((__u64
)sca
) >> 32);
1319 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)sca
& ~0x3fU
;
1320 vcpu
->arch
.sie_block
->ecb2
|= 0x04U
;
1321 set_bit_inv(vcpu
->vcpu_id
, (unsigned long *) sca
->mcn
);
1323 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1325 sca
->cpu
[vcpu
->vcpu_id
].sda
= (__u64
) vcpu
->arch
.sie_block
;
1326 vcpu
->arch
.sie_block
->scaoh
= (__u32
)(((__u64
)sca
) >> 32);
1327 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)sca
;
1328 set_bit_inv(vcpu
->vcpu_id
, (unsigned long *) &sca
->mcn
);
1330 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
1333 /* Basic SCA to Extended SCA data copy routines */
1334 static inline void sca_copy_entry(struct esca_entry
*d
, struct bsca_entry
*s
)
1337 d
->sigp_ctrl
.c
= s
->sigp_ctrl
.c
;
1338 d
->sigp_ctrl
.scn
= s
->sigp_ctrl
.scn
;
1341 static void sca_copy_b_to_e(struct esca_block
*d
, struct bsca_block
*s
)
1345 d
->ipte_control
= s
->ipte_control
;
1347 for (i
= 0; i
< KVM_S390_BSCA_CPU_SLOTS
; i
++)
1348 sca_copy_entry(&d
->cpu
[i
], &s
->cpu
[i
]);
1351 static int sca_switch_to_extended(struct kvm
*kvm
)
1353 struct bsca_block
*old_sca
= kvm
->arch
.sca
;
1354 struct esca_block
*new_sca
;
1355 struct kvm_vcpu
*vcpu
;
1356 unsigned int vcpu_idx
;
1359 new_sca
= alloc_pages_exact(sizeof(*new_sca
), GFP_KERNEL
|__GFP_ZERO
);
1363 scaoh
= (u32
)((u64
)(new_sca
) >> 32);
1364 scaol
= (u32
)(u64
)(new_sca
) & ~0x3fU
;
1366 kvm_s390_vcpu_block_all(kvm
);
1367 write_lock(&kvm
->arch
.sca_lock
);
1369 sca_copy_b_to_e(new_sca
, old_sca
);
1371 kvm_for_each_vcpu(vcpu_idx
, vcpu
, kvm
) {
1372 vcpu
->arch
.sie_block
->scaoh
= scaoh
;
1373 vcpu
->arch
.sie_block
->scaol
= scaol
;
1374 vcpu
->arch
.sie_block
->ecb2
|= 0x04U
;
1376 kvm
->arch
.sca
= new_sca
;
1377 kvm
->arch
.use_esca
= 1;
1379 write_unlock(&kvm
->arch
.sca_lock
);
1380 kvm_s390_vcpu_unblock_all(kvm
);
1382 free_page((unsigned long)old_sca
);
1384 VM_EVENT(kvm
, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1385 old_sca
, kvm
->arch
.sca
);
1389 static int sca_can_add_vcpu(struct kvm
*kvm
, unsigned int id
)
1393 if (id
< KVM_S390_BSCA_CPU_SLOTS
)
1398 mutex_lock(&kvm
->lock
);
1399 rc
= kvm
->arch
.use_esca
? 0 : sca_switch_to_extended(kvm
);
1400 mutex_unlock(&kvm
->lock
);
1402 return rc
== 0 && id
< KVM_S390_ESCA_CPU_SLOTS
;
1405 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1407 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1408 kvm_clear_async_pf_completion_queue(vcpu
);
1409 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
1415 if (test_kvm_facility(vcpu
->kvm
, 64))
1416 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_RICCB
;
1417 if (test_kvm_facility(vcpu
->kvm
, 129))
1418 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_VRS
;
1420 if (kvm_is_ucontrol(vcpu
->kvm
))
1421 return __kvm_ucontrol_vcpu_init(vcpu
);
1426 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1428 /* Save host register state */
1430 vcpu
->arch
.host_fpregs
.fpc
= current
->thread
.fpu
.fpc
;
1431 vcpu
->arch
.host_fpregs
.regs
= current
->thread
.fpu
.regs
;
1433 /* Depending on MACHINE_HAS_VX, data stored to vrs either
1434 * has vector register or floating point register format.
1436 current
->thread
.fpu
.regs
= vcpu
->run
->s
.regs
.vrs
;
1437 current
->thread
.fpu
.fpc
= vcpu
->run
->s
.regs
.fpc
;
1438 if (test_fp_ctl(current
->thread
.fpu
.fpc
))
1439 /* User space provided an invalid FPC, let's clear it */
1440 current
->thread
.fpu
.fpc
= 0;
1442 save_access_regs(vcpu
->arch
.host_acrs
);
1443 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1444 gmap_enable(vcpu
->arch
.gmap
);
1445 atomic_or(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1448 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1450 atomic_andnot(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1451 gmap_disable(vcpu
->arch
.gmap
);
1453 /* Save guest register state */
1455 vcpu
->run
->s
.regs
.fpc
= current
->thread
.fpu
.fpc
;
1457 /* Restore host register state */
1458 current
->thread
.fpu
.fpc
= vcpu
->arch
.host_fpregs
.fpc
;
1459 current
->thread
.fpu
.regs
= vcpu
->arch
.host_fpregs
.regs
;
1461 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1462 restore_access_regs(vcpu
->arch
.host_acrs
);
1465 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
1467 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1468 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
1469 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
1470 kvm_s390_set_prefix(vcpu
, 0);
1471 vcpu
->arch
.sie_block
->cputm
= 0UL;
1472 vcpu
->arch
.sie_block
->ckc
= 0UL;
1473 vcpu
->arch
.sie_block
->todpr
= 0;
1474 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
1475 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
1476 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
1477 /* make sure the new fpc will be lazily loaded */
1479 current
->thread
.fpu
.fpc
= 0;
1480 vcpu
->arch
.sie_block
->gbea
= 1;
1481 vcpu
->arch
.sie_block
->pp
= 0;
1482 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1483 kvm_clear_async_pf_completion_queue(vcpu
);
1484 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
))
1485 kvm_s390_vcpu_stop(vcpu
);
1486 kvm_s390_clear_local_irqs(vcpu
);
1489 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1491 mutex_lock(&vcpu
->kvm
->lock
);
1493 vcpu
->arch
.sie_block
->epoch
= vcpu
->kvm
->arch
.epoch
;
1495 mutex_unlock(&vcpu
->kvm
->lock
);
1496 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1497 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
1503 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
)
1505 if (!test_kvm_facility(vcpu
->kvm
, 76))
1508 vcpu
->arch
.sie_block
->ecb3
&= ~(ECB3_AES
| ECB3_DEA
);
1510 if (vcpu
->kvm
->arch
.crypto
.aes_kw
)
1511 vcpu
->arch
.sie_block
->ecb3
|= ECB3_AES
;
1512 if (vcpu
->kvm
->arch
.crypto
.dea_kw
)
1513 vcpu
->arch
.sie_block
->ecb3
|= ECB3_DEA
;
1515 vcpu
->arch
.sie_block
->crycbd
= vcpu
->kvm
->arch
.crypto
.crycbd
;
1518 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu
*vcpu
)
1520 free_page(vcpu
->arch
.sie_block
->cbrlo
);
1521 vcpu
->arch
.sie_block
->cbrlo
= 0;
1524 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu
*vcpu
)
1526 vcpu
->arch
.sie_block
->cbrlo
= get_zeroed_page(GFP_KERNEL
);
1527 if (!vcpu
->arch
.sie_block
->cbrlo
)
1530 vcpu
->arch
.sie_block
->ecb2
|= 0x80;
1531 vcpu
->arch
.sie_block
->ecb2
&= ~0x08;
1535 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu
*vcpu
)
1537 struct kvm_s390_cpu_model
*model
= &vcpu
->kvm
->arch
.model
;
1539 vcpu
->arch
.cpu_id
= model
->cpu_id
;
1540 vcpu
->arch
.sie_block
->ibc
= model
->ibc
;
1541 vcpu
->arch
.sie_block
->fac
= (int) (long) model
->fac
->list
;
1544 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1548 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
1552 if (test_kvm_facility(vcpu
->kvm
, 78))
1553 atomic_or(CPUSTAT_GED2
, &vcpu
->arch
.sie_block
->cpuflags
);
1554 else if (test_kvm_facility(vcpu
->kvm
, 8))
1555 atomic_or(CPUSTAT_GED
, &vcpu
->arch
.sie_block
->cpuflags
);
1557 kvm_s390_vcpu_setup_model(vcpu
);
1559 vcpu
->arch
.sie_block
->ecb
= 6;
1560 if (test_kvm_facility(vcpu
->kvm
, 50) && test_kvm_facility(vcpu
->kvm
, 73))
1561 vcpu
->arch
.sie_block
->ecb
|= 0x10;
1563 vcpu
->arch
.sie_block
->ecb2
= 8;
1564 vcpu
->arch
.sie_block
->eca
= 0xC1002000U
;
1566 vcpu
->arch
.sie_block
->eca
|= 1;
1567 if (sclp
.has_sigpif
)
1568 vcpu
->arch
.sie_block
->eca
|= 0x10000000U
;
1569 if (test_kvm_facility(vcpu
->kvm
, 64))
1570 vcpu
->arch
.sie_block
->ecb3
|= 0x01;
1571 if (test_kvm_facility(vcpu
->kvm
, 129)) {
1572 vcpu
->arch
.sie_block
->eca
|= 0x00020000;
1573 vcpu
->arch
.sie_block
->ecd
|= 0x20000000;
1575 vcpu
->arch
.sie_block
->riccbd
= (unsigned long) &vcpu
->run
->s
.regs
.riccb
;
1576 vcpu
->arch
.sie_block
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
;
1578 if (vcpu
->kvm
->arch
.use_cmma
) {
1579 rc
= kvm_s390_vcpu_setup_cmma(vcpu
);
1583 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1584 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
1586 kvm_s390_vcpu_crypto_setup(vcpu
);
1591 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
1594 struct kvm_vcpu
*vcpu
;
1595 struct sie_page
*sie_page
;
1598 if (!kvm_is_ucontrol(kvm
) && !sca_can_add_vcpu(kvm
, id
))
1603 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
1607 sie_page
= (struct sie_page
*) get_zeroed_page(GFP_KERNEL
);
1611 vcpu
->arch
.sie_block
= &sie_page
->sie_block
;
1612 vcpu
->arch
.sie_block
->itdba
= (unsigned long) &sie_page
->itdb
;
1614 vcpu
->arch
.sie_block
->icpua
= id
;
1615 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
1616 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
1617 vcpu
->arch
.local_int
.wq
= &vcpu
->wq
;
1618 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
1620 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
1622 goto out_free_sie_block
;
1623 VM_EVENT(kvm
, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id
, vcpu
,
1624 vcpu
->arch
.sie_block
);
1625 trace_kvm_s390_create_vcpu(id
, vcpu
, vcpu
->arch
.sie_block
);
1629 free_page((unsigned long)(vcpu
->arch
.sie_block
));
1631 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1636 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
1638 return kvm_s390_vcpu_has_irq(vcpu
, 0);
1641 void kvm_s390_vcpu_block(struct kvm_vcpu
*vcpu
)
1643 atomic_or(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1647 void kvm_s390_vcpu_unblock(struct kvm_vcpu
*vcpu
)
1649 atomic_andnot(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1652 static void kvm_s390_vcpu_request(struct kvm_vcpu
*vcpu
)
1654 atomic_or(PROG_REQUEST
, &vcpu
->arch
.sie_block
->prog20
);
1658 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu
*vcpu
)
1660 atomic_andnot(PROG_REQUEST
, &vcpu
->arch
.sie_block
->prog20
);
1664 * Kick a guest cpu out of SIE and wait until SIE is not running.
1665 * If the CPU is not running (e.g. waiting as idle) the function will
1666 * return immediately. */
1667 void exit_sie(struct kvm_vcpu
*vcpu
)
1669 atomic_or(CPUSTAT_STOP_INT
, &vcpu
->arch
.sie_block
->cpuflags
);
1670 while (vcpu
->arch
.sie_block
->prog0c
& PROG_IN_SIE
)
1674 /* Kick a guest cpu out of SIE to process a request synchronously */
1675 void kvm_s390_sync_request(int req
, struct kvm_vcpu
*vcpu
)
1677 kvm_make_request(req
, vcpu
);
1678 kvm_s390_vcpu_request(vcpu
);
1681 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
)
1684 struct kvm
*kvm
= gmap
->private;
1685 struct kvm_vcpu
*vcpu
;
1687 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1688 /* match against both prefix pages */
1689 if (kvm_s390_get_prefix(vcpu
) == (address
& ~0x1000UL
)) {
1690 VCPU_EVENT(vcpu
, 2, "gmap notifier for %lx", address
);
1691 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD
, vcpu
);
1696 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
1698 /* kvm common code refers to this, but never calls it */
1703 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
1704 struct kvm_one_reg
*reg
)
1709 case KVM_REG_S390_TODPR
:
1710 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
1711 (u32 __user
*)reg
->addr
);
1713 case KVM_REG_S390_EPOCHDIFF
:
1714 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
1715 (u64 __user
*)reg
->addr
);
1717 case KVM_REG_S390_CPU_TIMER
:
1718 r
= put_user(vcpu
->arch
.sie_block
->cputm
,
1719 (u64 __user
*)reg
->addr
);
1721 case KVM_REG_S390_CLOCK_COMP
:
1722 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
1723 (u64 __user
*)reg
->addr
);
1725 case KVM_REG_S390_PFTOKEN
:
1726 r
= put_user(vcpu
->arch
.pfault_token
,
1727 (u64 __user
*)reg
->addr
);
1729 case KVM_REG_S390_PFCOMPARE
:
1730 r
= put_user(vcpu
->arch
.pfault_compare
,
1731 (u64 __user
*)reg
->addr
);
1733 case KVM_REG_S390_PFSELECT
:
1734 r
= put_user(vcpu
->arch
.pfault_select
,
1735 (u64 __user
*)reg
->addr
);
1737 case KVM_REG_S390_PP
:
1738 r
= put_user(vcpu
->arch
.sie_block
->pp
,
1739 (u64 __user
*)reg
->addr
);
1741 case KVM_REG_S390_GBEA
:
1742 r
= put_user(vcpu
->arch
.sie_block
->gbea
,
1743 (u64 __user
*)reg
->addr
);
1752 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
1753 struct kvm_one_reg
*reg
)
1758 case KVM_REG_S390_TODPR
:
1759 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
1760 (u32 __user
*)reg
->addr
);
1762 case KVM_REG_S390_EPOCHDIFF
:
1763 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
1764 (u64 __user
*)reg
->addr
);
1766 case KVM_REG_S390_CPU_TIMER
:
1767 r
= get_user(vcpu
->arch
.sie_block
->cputm
,
1768 (u64 __user
*)reg
->addr
);
1770 case KVM_REG_S390_CLOCK_COMP
:
1771 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
1772 (u64 __user
*)reg
->addr
);
1774 case KVM_REG_S390_PFTOKEN
:
1775 r
= get_user(vcpu
->arch
.pfault_token
,
1776 (u64 __user
*)reg
->addr
);
1777 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1778 kvm_clear_async_pf_completion_queue(vcpu
);
1780 case KVM_REG_S390_PFCOMPARE
:
1781 r
= get_user(vcpu
->arch
.pfault_compare
,
1782 (u64 __user
*)reg
->addr
);
1784 case KVM_REG_S390_PFSELECT
:
1785 r
= get_user(vcpu
->arch
.pfault_select
,
1786 (u64 __user
*)reg
->addr
);
1788 case KVM_REG_S390_PP
:
1789 r
= get_user(vcpu
->arch
.sie_block
->pp
,
1790 (u64 __user
*)reg
->addr
);
1792 case KVM_REG_S390_GBEA
:
1793 r
= get_user(vcpu
->arch
.sie_block
->gbea
,
1794 (u64 __user
*)reg
->addr
);
1803 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
1805 kvm_s390_vcpu_initial_reset(vcpu
);
1809 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1811 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
1815 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1817 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
1821 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1822 struct kvm_sregs
*sregs
)
1824 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
1825 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
1826 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1830 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1831 struct kvm_sregs
*sregs
)
1833 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
1834 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
1838 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1840 /* make sure the new values will be lazily loaded */
1842 if (test_fp_ctl(fpu
->fpc
))
1844 current
->thread
.fpu
.fpc
= fpu
->fpc
;
1846 convert_fp_to_vx(current
->thread
.fpu
.vxrs
, (freg_t
*)fpu
->fprs
);
1848 memcpy(current
->thread
.fpu
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
1852 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1854 /* make sure we have the latest values */
1857 convert_vx_to_fp((freg_t
*)fpu
->fprs
, current
->thread
.fpu
.vxrs
);
1859 memcpy(fpu
->fprs
, current
->thread
.fpu
.fprs
, sizeof(fpu
->fprs
));
1860 fpu
->fpc
= current
->thread
.fpu
.fpc
;
1864 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
1868 if (!is_vcpu_stopped(vcpu
))
1871 vcpu
->run
->psw_mask
= psw
.mask
;
1872 vcpu
->run
->psw_addr
= psw
.addr
;
1877 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1878 struct kvm_translation
*tr
)
1880 return -EINVAL
; /* not implemented yet */
1883 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1884 KVM_GUESTDBG_USE_HW_BP | \
1885 KVM_GUESTDBG_ENABLE)
1887 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
1888 struct kvm_guest_debug
*dbg
)
1892 vcpu
->guest_debug
= 0;
1893 kvm_s390_clear_bp_data(vcpu
);
1895 if (dbg
->control
& ~VALID_GUESTDBG_FLAGS
)
1898 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
1899 vcpu
->guest_debug
= dbg
->control
;
1900 /* enforce guest PER */
1901 atomic_or(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1903 if (dbg
->control
& KVM_GUESTDBG_USE_HW_BP
)
1904 rc
= kvm_s390_import_bp_data(vcpu
, dbg
);
1906 atomic_andnot(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1907 vcpu
->arch
.guestdbg
.last_bp
= 0;
1911 vcpu
->guest_debug
= 0;
1912 kvm_s390_clear_bp_data(vcpu
);
1913 atomic_andnot(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1919 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1920 struct kvm_mp_state
*mp_state
)
1922 /* CHECK_STOP and LOAD are not supported yet */
1923 return is_vcpu_stopped(vcpu
) ? KVM_MP_STATE_STOPPED
:
1924 KVM_MP_STATE_OPERATING
;
1927 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1928 struct kvm_mp_state
*mp_state
)
1932 /* user space knows about this interface - let it control the state */
1933 vcpu
->kvm
->arch
.user_cpu_state_ctrl
= 1;
1935 switch (mp_state
->mp_state
) {
1936 case KVM_MP_STATE_STOPPED
:
1937 kvm_s390_vcpu_stop(vcpu
);
1939 case KVM_MP_STATE_OPERATING
:
1940 kvm_s390_vcpu_start(vcpu
);
1942 case KVM_MP_STATE_LOAD
:
1943 case KVM_MP_STATE_CHECK_STOP
:
1944 /* fall through - CHECK_STOP and LOAD are not supported yet */
1952 static bool ibs_enabled(struct kvm_vcpu
*vcpu
)
1954 return atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_IBS
;
1957 static int kvm_s390_handle_requests(struct kvm_vcpu
*vcpu
)
1960 kvm_s390_vcpu_request_handled(vcpu
);
1961 if (!vcpu
->requests
)
1964 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1965 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1966 * This ensures that the ipte instruction for this request has
1967 * already finished. We might race against a second unmapper that
1968 * wants to set the blocking bit. Lets just retry the request loop.
1970 if (kvm_check_request(KVM_REQ_MMU_RELOAD
, vcpu
)) {
1972 rc
= gmap_ipte_notify(vcpu
->arch
.gmap
,
1973 kvm_s390_get_prefix(vcpu
),
1980 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
1981 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
1985 if (kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
)) {
1986 if (!ibs_enabled(vcpu
)) {
1987 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 1);
1988 atomic_or(CPUSTAT_IBS
,
1989 &vcpu
->arch
.sie_block
->cpuflags
);
1994 if (kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
)) {
1995 if (ibs_enabled(vcpu
)) {
1996 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 0);
1997 atomic_andnot(CPUSTAT_IBS
,
1998 &vcpu
->arch
.sie_block
->cpuflags
);
2003 /* nothing to do, just clear the request */
2004 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
2009 void kvm_s390_set_tod_clock(struct kvm
*kvm
, u64 tod
)
2011 struct kvm_vcpu
*vcpu
;
2014 mutex_lock(&kvm
->lock
);
2016 kvm
->arch
.epoch
= tod
- get_tod_clock();
2017 kvm_s390_vcpu_block_all(kvm
);
2018 kvm_for_each_vcpu(i
, vcpu
, kvm
)
2019 vcpu
->arch
.sie_block
->epoch
= kvm
->arch
.epoch
;
2020 kvm_s390_vcpu_unblock_all(kvm
);
2022 mutex_unlock(&kvm
->lock
);
2026 * kvm_arch_fault_in_page - fault-in guest page if necessary
2027 * @vcpu: The corresponding virtual cpu
2028 * @gpa: Guest physical address
2029 * @writable: Whether the page should be writable or not
2031 * Make sure that a guest page has been faulted-in on the host.
2033 * Return: Zero on success, negative error code otherwise.
2035 long kvm_arch_fault_in_page(struct kvm_vcpu
*vcpu
, gpa_t gpa
, int writable
)
2037 return gmap_fault(vcpu
->arch
.gmap
, gpa
,
2038 writable
? FAULT_FLAG_WRITE
: 0);
2041 static void __kvm_inject_pfault_token(struct kvm_vcpu
*vcpu
, bool start_token
,
2042 unsigned long token
)
2044 struct kvm_s390_interrupt inti
;
2045 struct kvm_s390_irq irq
;
2048 irq
.u
.ext
.ext_params2
= token
;
2049 irq
.type
= KVM_S390_INT_PFAULT_INIT
;
2050 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu
, &irq
));
2052 inti
.type
= KVM_S390_INT_PFAULT_DONE
;
2053 inti
.parm64
= token
;
2054 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu
->kvm
, &inti
));
2058 void kvm_arch_async_page_not_present(struct kvm_vcpu
*vcpu
,
2059 struct kvm_async_pf
*work
)
2061 trace_kvm_s390_pfault_init(vcpu
, work
->arch
.pfault_token
);
2062 __kvm_inject_pfault_token(vcpu
, true, work
->arch
.pfault_token
);
2065 void kvm_arch_async_page_present(struct kvm_vcpu
*vcpu
,
2066 struct kvm_async_pf
*work
)
2068 trace_kvm_s390_pfault_done(vcpu
, work
->arch
.pfault_token
);
2069 __kvm_inject_pfault_token(vcpu
, false, work
->arch
.pfault_token
);
2072 void kvm_arch_async_page_ready(struct kvm_vcpu
*vcpu
,
2073 struct kvm_async_pf
*work
)
2075 /* s390 will always inject the page directly */
2078 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu
*vcpu
)
2081 * s390 will always inject the page directly,
2082 * but we still want check_async_completion to cleanup
2087 static int kvm_arch_setup_async_pf(struct kvm_vcpu
*vcpu
)
2090 struct kvm_arch_async_pf arch
;
2093 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
2095 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& vcpu
->arch
.pfault_select
) !=
2096 vcpu
->arch
.pfault_compare
)
2098 if (psw_extint_disabled(vcpu
))
2100 if (kvm_s390_vcpu_has_irq(vcpu
, 0))
2102 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
2104 if (!vcpu
->arch
.gmap
->pfault_enabled
)
2107 hva
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(current
->thread
.gmap_addr
));
2108 hva
+= current
->thread
.gmap_addr
& ~PAGE_MASK
;
2109 if (read_guest_real(vcpu
, vcpu
->arch
.pfault_token
, &arch
.pfault_token
, 8))
2112 rc
= kvm_setup_async_pf(vcpu
, current
->thread
.gmap_addr
, hva
, &arch
);
2116 static int vcpu_pre_run(struct kvm_vcpu
*vcpu
)
2121 * On s390 notifications for arriving pages will be delivered directly
2122 * to the guest but the house keeping for completed pfaults is
2123 * handled outside the worker.
2125 kvm_check_async_pf_completion(vcpu
);
2127 vcpu
->arch
.sie_block
->gg14
= vcpu
->run
->s
.regs
.gprs
[14];
2128 vcpu
->arch
.sie_block
->gg15
= vcpu
->run
->s
.regs
.gprs
[15];
2133 if (test_cpu_flag(CIF_MCCK_PENDING
))
2136 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2137 rc
= kvm_s390_deliver_pending_interrupts(vcpu
);
2142 rc
= kvm_s390_handle_requests(vcpu
);
2146 if (guestdbg_enabled(vcpu
)) {
2147 kvm_s390_backup_guest_per_regs(vcpu
);
2148 kvm_s390_patch_guest_per_regs(vcpu
);
2151 vcpu
->arch
.sie_block
->icptcode
= 0;
2152 cpuflags
= atomic_read(&vcpu
->arch
.sie_block
->cpuflags
);
2153 VCPU_EVENT(vcpu
, 6, "entering sie flags %x", cpuflags
);
2154 trace_kvm_s390_sie_enter(vcpu
, cpuflags
);
2159 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu
*vcpu
)
2161 psw_t
*psw
= &vcpu
->arch
.sie_block
->gpsw
;
2165 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
2166 trace_kvm_s390_sie_fault(vcpu
);
2169 * We want to inject an addressing exception, which is defined as a
2170 * suppressing or terminating exception. However, since we came here
2171 * by a DAT access exception, the PSW still points to the faulting
2172 * instruction since DAT exceptions are nullifying. So we've got
2173 * to look up the current opcode to get the length of the instruction
2174 * to be able to forward the PSW.
2176 rc
= read_guest(vcpu
, psw
->addr
, 0, &opcode
, 1);
2178 return kvm_s390_inject_prog_cond(vcpu
, rc
);
2179 psw
->addr
= __rewind_psw(*psw
, -insn_length(opcode
));
2181 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
2184 static int vcpu_post_run(struct kvm_vcpu
*vcpu
, int exit_reason
)
2186 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
2187 vcpu
->arch
.sie_block
->icptcode
);
2188 trace_kvm_s390_sie_exit(vcpu
, vcpu
->arch
.sie_block
->icptcode
);
2190 if (guestdbg_enabled(vcpu
))
2191 kvm_s390_restore_guest_per_regs(vcpu
);
2193 vcpu
->run
->s
.regs
.gprs
[14] = vcpu
->arch
.sie_block
->gg14
;
2194 vcpu
->run
->s
.regs
.gprs
[15] = vcpu
->arch
.sie_block
->gg15
;
2196 if (vcpu
->arch
.sie_block
->icptcode
> 0) {
2197 int rc
= kvm_handle_sie_intercept(vcpu
);
2199 if (rc
!= -EOPNOTSUPP
)
2201 vcpu
->run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
2202 vcpu
->run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
2203 vcpu
->run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
2204 vcpu
->run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
2206 } else if (exit_reason
!= -EFAULT
) {
2207 vcpu
->stat
.exit_null
++;
2209 } else if (kvm_is_ucontrol(vcpu
->kvm
)) {
2210 vcpu
->run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
2211 vcpu
->run
->s390_ucontrol
.trans_exc_code
=
2212 current
->thread
.gmap_addr
;
2213 vcpu
->run
->s390_ucontrol
.pgm_code
= 0x10;
2215 } else if (current
->thread
.gmap_pfault
) {
2216 trace_kvm_s390_major_guest_pfault(vcpu
);
2217 current
->thread
.gmap_pfault
= 0;
2218 if (kvm_arch_setup_async_pf(vcpu
))
2220 return kvm_arch_fault_in_page(vcpu
, current
->thread
.gmap_addr
, 1);
2222 return vcpu_post_run_fault_in_sie(vcpu
);
2225 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
2227 int rc
, exit_reason
;
2230 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2231 * ning the guest), so that memslots (and other stuff) are protected
2233 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2236 rc
= vcpu_pre_run(vcpu
);
2240 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
2242 * As PF_VCPU will be used in fault handler, between
2243 * guest_enter and guest_exit should be no uaccess.
2245 local_irq_disable();
2246 __kvm_guest_enter();
2248 exit_reason
= sie64a(vcpu
->arch
.sie_block
,
2249 vcpu
->run
->s
.regs
.gprs
);
2250 local_irq_disable();
2253 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2255 rc
= vcpu_post_run(vcpu
, exit_reason
);
2256 } while (!signal_pending(current
) && !guestdbg_exit_pending(vcpu
) && !rc
);
2258 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
2262 static void sync_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2264 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
2265 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
2266 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
)
2267 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
2268 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
2269 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
2270 /* some control register changes require a tlb flush */
2271 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2273 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_ARCH0
) {
2274 vcpu
->arch
.sie_block
->cputm
= kvm_run
->s
.regs
.cputm
;
2275 vcpu
->arch
.sie_block
->ckc
= kvm_run
->s
.regs
.ckc
;
2276 vcpu
->arch
.sie_block
->todpr
= kvm_run
->s
.regs
.todpr
;
2277 vcpu
->arch
.sie_block
->pp
= kvm_run
->s
.regs
.pp
;
2278 vcpu
->arch
.sie_block
->gbea
= kvm_run
->s
.regs
.gbea
;
2280 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PFAULT
) {
2281 vcpu
->arch
.pfault_token
= kvm_run
->s
.regs
.pft
;
2282 vcpu
->arch
.pfault_select
= kvm_run
->s
.regs
.pfs
;
2283 vcpu
->arch
.pfault_compare
= kvm_run
->s
.regs
.pfc
;
2284 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
2285 kvm_clear_async_pf_completion_queue(vcpu
);
2287 kvm_run
->kvm_dirty_regs
= 0;
2290 static void store_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2292 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
2293 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
2294 kvm_run
->s
.regs
.prefix
= kvm_s390_get_prefix(vcpu
);
2295 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
2296 kvm_run
->s
.regs
.cputm
= vcpu
->arch
.sie_block
->cputm
;
2297 kvm_run
->s
.regs
.ckc
= vcpu
->arch
.sie_block
->ckc
;
2298 kvm_run
->s
.regs
.todpr
= vcpu
->arch
.sie_block
->todpr
;
2299 kvm_run
->s
.regs
.pp
= vcpu
->arch
.sie_block
->pp
;
2300 kvm_run
->s
.regs
.gbea
= vcpu
->arch
.sie_block
->gbea
;
2301 kvm_run
->s
.regs
.pft
= vcpu
->arch
.pfault_token
;
2302 kvm_run
->s
.regs
.pfs
= vcpu
->arch
.pfault_select
;
2303 kvm_run
->s
.regs
.pfc
= vcpu
->arch
.pfault_compare
;
2306 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2311 if (guestdbg_exit_pending(vcpu
)) {
2312 kvm_s390_prepare_debug_exit(vcpu
);
2316 if (vcpu
->sigset_active
)
2317 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
2319 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
)) {
2320 kvm_s390_vcpu_start(vcpu
);
2321 } else if (is_vcpu_stopped(vcpu
)) {
2322 pr_err_ratelimited("can't run stopped vcpu %d\n",
2327 sync_regs(vcpu
, kvm_run
);
2330 rc
= __vcpu_run(vcpu
);
2332 if (signal_pending(current
) && !rc
) {
2333 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
2337 if (guestdbg_exit_pending(vcpu
) && !rc
) {
2338 kvm_s390_prepare_debug_exit(vcpu
);
2342 if (rc
== -EREMOTE
) {
2343 /* userspace support is needed, kvm_run has been prepared */
2347 store_regs(vcpu
, kvm_run
);
2349 if (vcpu
->sigset_active
)
2350 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
2352 vcpu
->stat
.exit_userspace
++;
2357 * store status at address
2358 * we use have two special cases:
2359 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2360 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2362 int kvm_s390_store_status_unloaded(struct kvm_vcpu
*vcpu
, unsigned long gpa
)
2364 unsigned char archmode
= 1;
2365 freg_t fprs
[NUM_FPRS
];
2370 px
= kvm_s390_get_prefix(vcpu
);
2371 if (gpa
== KVM_S390_STORE_STATUS_NOADDR
) {
2372 if (write_guest_abs(vcpu
, 163, &archmode
, 1))
2375 } else if (gpa
== KVM_S390_STORE_STATUS_PREFIXED
) {
2376 if (write_guest_real(vcpu
, 163, &archmode
, 1))
2380 gpa
-= __LC_FPREGS_SAVE_AREA
;
2382 /* manually convert vector registers if necessary */
2383 if (MACHINE_HAS_VX
) {
2384 convert_vx_to_fp(fprs
, current
->thread
.fpu
.vxrs
);
2385 rc
= write_guest_abs(vcpu
, gpa
+ __LC_FPREGS_SAVE_AREA
,
2388 rc
= write_guest_abs(vcpu
, gpa
+ __LC_FPREGS_SAVE_AREA
,
2389 vcpu
->run
->s
.regs
.vrs
, 128);
2391 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_GPREGS_SAVE_AREA
,
2392 vcpu
->run
->s
.regs
.gprs
, 128);
2393 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_PSW_SAVE_AREA
,
2394 &vcpu
->arch
.sie_block
->gpsw
, 16);
2395 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_PREFIX_SAVE_AREA
,
2397 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_FP_CREG_SAVE_AREA
,
2398 &vcpu
->run
->s
.regs
.fpc
, 4);
2399 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_TOD_PROGREG_SAVE_AREA
,
2400 &vcpu
->arch
.sie_block
->todpr
, 4);
2401 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CPU_TIMER_SAVE_AREA
,
2402 &vcpu
->arch
.sie_block
->cputm
, 8);
2403 clkcomp
= vcpu
->arch
.sie_block
->ckc
>> 8;
2404 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CLOCK_COMP_SAVE_AREA
,
2406 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_AREGS_SAVE_AREA
,
2407 &vcpu
->run
->s
.regs
.acrs
, 64);
2408 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CREGS_SAVE_AREA
,
2409 &vcpu
->arch
.sie_block
->gcr
, 128);
2410 return rc
? -EFAULT
: 0;
2413 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
2416 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2417 * copying in vcpu load/put. Lets update our copies before we save
2418 * it into the save area
2421 vcpu
->run
->s
.regs
.fpc
= current
->thread
.fpu
.fpc
;
2422 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
2424 return kvm_s390_store_status_unloaded(vcpu
, addr
);
2428 * store additional status at address
2430 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu
*vcpu
,
2433 /* Only bits 0-53 are used for address formation */
2434 if (!(gpa
& ~0x3ff))
2437 return write_guest_abs(vcpu
, gpa
& ~0x3ff,
2438 (void *)&vcpu
->run
->s
.regs
.vrs
, 512);
2441 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
2443 if (!test_kvm_facility(vcpu
->kvm
, 129))
2447 * The guest VXRS are in the host VXRs due to the lazy
2448 * copying in vcpu load/put. We can simply call save_fpu_regs()
2449 * to save the current register state because we are in the
2450 * middle of a load/put cycle.
2452 * Let's update our copies before we save it into the save area.
2456 return kvm_s390_store_adtl_status_unloaded(vcpu
, addr
);
2459 static void __disable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
2461 kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
);
2462 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS
, vcpu
);
2465 static void __disable_ibs_on_all_vcpus(struct kvm
*kvm
)
2468 struct kvm_vcpu
*vcpu
;
2470 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2471 __disable_ibs_on_vcpu(vcpu
);
2475 static void __enable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
2477 kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
);
2478 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS
, vcpu
);
2481 void kvm_s390_vcpu_start(struct kvm_vcpu
*vcpu
)
2483 int i
, online_vcpus
, started_vcpus
= 0;
2485 if (!is_vcpu_stopped(vcpu
))
2488 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 1);
2489 /* Only one cpu at a time may enter/leave the STOPPED state. */
2490 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2491 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2493 for (i
= 0; i
< online_vcpus
; i
++) {
2494 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
]))
2498 if (started_vcpus
== 0) {
2499 /* we're the only active VCPU -> speed it up */
2500 __enable_ibs_on_vcpu(vcpu
);
2501 } else if (started_vcpus
== 1) {
2503 * As we are starting a second VCPU, we have to disable
2504 * the IBS facility on all VCPUs to remove potentially
2505 * oustanding ENABLE requests.
2507 __disable_ibs_on_all_vcpus(vcpu
->kvm
);
2510 atomic_andnot(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2512 * Another VCPU might have used IBS while we were offline.
2513 * Let's play safe and flush the VCPU at startup.
2515 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2516 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2520 void kvm_s390_vcpu_stop(struct kvm_vcpu
*vcpu
)
2522 int i
, online_vcpus
, started_vcpus
= 0;
2523 struct kvm_vcpu
*started_vcpu
= NULL
;
2525 if (is_vcpu_stopped(vcpu
))
2528 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 0);
2529 /* Only one cpu at a time may enter/leave the STOPPED state. */
2530 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2531 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2533 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2534 kvm_s390_clear_stop_irq(vcpu
);
2536 atomic_or(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2537 __disable_ibs_on_vcpu(vcpu
);
2539 for (i
= 0; i
< online_vcpus
; i
++) {
2540 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
])) {
2542 started_vcpu
= vcpu
->kvm
->vcpus
[i
];
2546 if (started_vcpus
== 1) {
2548 * As we only have one VCPU left, we want to enable the
2549 * IBS facility for that VCPU to speed it up.
2551 __enable_ibs_on_vcpu(started_vcpu
);
2554 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2558 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
2559 struct kvm_enable_cap
*cap
)
2567 case KVM_CAP_S390_CSS_SUPPORT
:
2568 if (!vcpu
->kvm
->arch
.css_support
) {
2569 vcpu
->kvm
->arch
.css_support
= 1;
2570 VM_EVENT(vcpu
->kvm
, 3, "%s", "ENABLE: CSS support");
2571 trace_kvm_s390_enable_css(vcpu
->kvm
);
2582 static long kvm_s390_guest_mem_op(struct kvm_vcpu
*vcpu
,
2583 struct kvm_s390_mem_op
*mop
)
2585 void __user
*uaddr
= (void __user
*)mop
->buf
;
2586 void *tmpbuf
= NULL
;
2588 const u64 supported_flags
= KVM_S390_MEMOP_F_INJECT_EXCEPTION
2589 | KVM_S390_MEMOP_F_CHECK_ONLY
;
2591 if (mop
->flags
& ~supported_flags
)
2594 if (mop
->size
> MEM_OP_MAX_SIZE
)
2597 if (!(mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
)) {
2598 tmpbuf
= vmalloc(mop
->size
);
2603 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2606 case KVM_S390_MEMOP_LOGICAL_READ
:
2607 if (mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
) {
2608 r
= check_gva_range(vcpu
, mop
->gaddr
, mop
->ar
, mop
->size
, false);
2611 r
= read_guest(vcpu
, mop
->gaddr
, mop
->ar
, tmpbuf
, mop
->size
);
2613 if (copy_to_user(uaddr
, tmpbuf
, mop
->size
))
2617 case KVM_S390_MEMOP_LOGICAL_WRITE
:
2618 if (mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
) {
2619 r
= check_gva_range(vcpu
, mop
->gaddr
, mop
->ar
, mop
->size
, true);
2622 if (copy_from_user(tmpbuf
, uaddr
, mop
->size
)) {
2626 r
= write_guest(vcpu
, mop
->gaddr
, mop
->ar
, tmpbuf
, mop
->size
);
2632 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
2634 if (r
> 0 && (mop
->flags
& KVM_S390_MEMOP_F_INJECT_EXCEPTION
) != 0)
2635 kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
2641 long kvm_arch_vcpu_ioctl(struct file
*filp
,
2642 unsigned int ioctl
, unsigned long arg
)
2644 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2645 void __user
*argp
= (void __user
*)arg
;
2650 case KVM_S390_IRQ
: {
2651 struct kvm_s390_irq s390irq
;
2654 if (copy_from_user(&s390irq
, argp
, sizeof(s390irq
)))
2656 r
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
2659 case KVM_S390_INTERRUPT
: {
2660 struct kvm_s390_interrupt s390int
;
2661 struct kvm_s390_irq s390irq
;
2664 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
2666 if (s390int_to_s390irq(&s390int
, &s390irq
))
2668 r
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
2671 case KVM_S390_STORE_STATUS
:
2672 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2673 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
2674 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
2676 case KVM_S390_SET_INITIAL_PSW
: {
2680 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
2682 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
2685 case KVM_S390_INITIAL_RESET
:
2686 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
2688 case KVM_SET_ONE_REG
:
2689 case KVM_GET_ONE_REG
: {
2690 struct kvm_one_reg reg
;
2692 if (copy_from_user(®
, argp
, sizeof(reg
)))
2694 if (ioctl
== KVM_SET_ONE_REG
)
2695 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
2697 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
2700 #ifdef CONFIG_KVM_S390_UCONTROL
2701 case KVM_S390_UCAS_MAP
: {
2702 struct kvm_s390_ucas_mapping ucasmap
;
2704 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2709 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2714 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
2715 ucasmap
.vcpu_addr
, ucasmap
.length
);
2718 case KVM_S390_UCAS_UNMAP
: {
2719 struct kvm_s390_ucas_mapping ucasmap
;
2721 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2726 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2731 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
2736 case KVM_S390_VCPU_FAULT
: {
2737 r
= gmap_fault(vcpu
->arch
.gmap
, arg
, 0);
2740 case KVM_ENABLE_CAP
:
2742 struct kvm_enable_cap cap
;
2744 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
2746 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
2749 case KVM_S390_MEM_OP
: {
2750 struct kvm_s390_mem_op mem_op
;
2752 if (copy_from_user(&mem_op
, argp
, sizeof(mem_op
)) == 0)
2753 r
= kvm_s390_guest_mem_op(vcpu
, &mem_op
);
2758 case KVM_S390_SET_IRQ_STATE
: {
2759 struct kvm_s390_irq_state irq_state
;
2762 if (copy_from_user(&irq_state
, argp
, sizeof(irq_state
)))
2764 if (irq_state
.len
> VCPU_IRQS_MAX_BUF
||
2765 irq_state
.len
== 0 ||
2766 irq_state
.len
% sizeof(struct kvm_s390_irq
) > 0) {
2770 r
= kvm_s390_set_irq_state(vcpu
,
2771 (void __user
*) irq_state
.buf
,
2775 case KVM_S390_GET_IRQ_STATE
: {
2776 struct kvm_s390_irq_state irq_state
;
2779 if (copy_from_user(&irq_state
, argp
, sizeof(irq_state
)))
2781 if (irq_state
.len
== 0) {
2785 r
= kvm_s390_get_irq_state(vcpu
,
2786 (__u8 __user
*) irq_state
.buf
,
2796 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
2798 #ifdef CONFIG_KVM_S390_UCONTROL
2799 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
2800 && (kvm_is_ucontrol(vcpu
->kvm
))) {
2801 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
2802 get_page(vmf
->page
);
2806 return VM_FAULT_SIGBUS
;
2809 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
2810 unsigned long npages
)
2815 /* Section: memory related */
2816 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
2817 struct kvm_memory_slot
*memslot
,
2818 const struct kvm_userspace_memory_region
*mem
,
2819 enum kvm_mr_change change
)
2821 /* A few sanity checks. We can have memory slots which have to be
2822 located/ended at a segment boundary (1MB). The memory in userland is
2823 ok to be fragmented into various different vmas. It is okay to mmap()
2824 and munmap() stuff in this slot after doing this call at any time */
2826 if (mem
->userspace_addr
& 0xffffful
)
2829 if (mem
->memory_size
& 0xffffful
)
2832 if (mem
->guest_phys_addr
+ mem
->memory_size
> kvm
->arch
.mem_limit
)
2838 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
2839 const struct kvm_userspace_memory_region
*mem
,
2840 const struct kvm_memory_slot
*old
,
2841 const struct kvm_memory_slot
*new,
2842 enum kvm_mr_change change
)
2846 /* If the basics of the memslot do not change, we do not want
2847 * to update the gmap. Every update causes several unnecessary
2848 * segment translation exceptions. This is usually handled just
2849 * fine by the normal fault handler + gmap, but it will also
2850 * cause faults on the prefix page of running guest CPUs.
2852 if (old
->userspace_addr
== mem
->userspace_addr
&&
2853 old
->base_gfn
* PAGE_SIZE
== mem
->guest_phys_addr
&&
2854 old
->npages
* PAGE_SIZE
== mem
->memory_size
)
2857 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
2858 mem
->guest_phys_addr
, mem
->memory_size
);
2860 pr_warn("failed to commit memory region\n");
2864 static int __init
kvm_s390_init(void)
2866 if (!sclp
.has_sief2
) {
2867 pr_info("SIE not available\n");
2871 return kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
2874 static void __exit
kvm_s390_exit(void)
2879 module_init(kvm_s390_init
);
2880 module_exit(kvm_s390_exit
);
2883 * Enable autoloading of the kvm module.
2884 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2885 * since x86 takes a different approach.
2887 #include <linux/miscdevice.h>
2888 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
2889 MODULE_ALIAS("devname:kvm");