2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <linux/vmalloc.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/lowcore.h>
32 #include <asm/pgtable.h>
34 #include <asm/switch_to.h>
40 #define KMSG_COMPONENT "kvm-s390"
42 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44 #define CREATE_TRACE_POINTS
46 #include "trace-s390.h"
48 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
50 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
53 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
55 struct kvm_stats_debugfs_item debugfs_entries
[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace
) },
57 { "exit_null", VCPU_STAT(exit_null
) },
58 { "exit_validity", VCPU_STAT(exit_validity
) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
60 { "exit_external_request", VCPU_STAT(exit_external_request
) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
62 { "exit_instruction", VCPU_STAT(exit_instruction
) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
) },
66 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll
) },
67 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
68 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
69 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
70 { "instruction_stctl", VCPU_STAT(instruction_stctl
) },
71 { "instruction_stctg", VCPU_STAT(instruction_stctg
) },
72 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
73 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
74 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
75 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
76 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
77 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
78 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
79 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
80 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
81 { "instruction_pfmf", VCPU_STAT(instruction_pfmf
) },
82 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
83 { "instruction_spx", VCPU_STAT(instruction_spx
) },
84 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
85 { "instruction_stap", VCPU_STAT(instruction_stap
) },
86 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
87 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock
) },
88 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
89 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
90 { "instruction_essa", VCPU_STAT(instruction_essa
) },
91 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
92 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
93 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
94 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
95 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
96 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
97 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
98 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency
) },
99 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start
) },
100 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
101 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status
) },
102 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status
) },
103 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status
) },
104 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
105 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
106 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
107 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset
) },
108 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset
) },
109 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown
) },
110 { "diagnose_10", VCPU_STAT(diagnose_10
) },
111 { "diagnose_44", VCPU_STAT(diagnose_44
) },
112 { "diagnose_9c", VCPU_STAT(diagnose_9c
) },
113 { "diagnose_258", VCPU_STAT(diagnose_258
) },
114 { "diagnose_308", VCPU_STAT(diagnose_308
) },
115 { "diagnose_500", VCPU_STAT(diagnose_500
) },
119 /* upper facilities limit for kvm */
120 unsigned long kvm_s390_fac_list_mask
[] = {
121 0xffe6fffbfcfdfc40UL
,
122 0x005e800000000000UL
,
125 unsigned long kvm_s390_fac_list_mask_size(void)
127 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask
) > S390_ARCH_FAC_MASK_SIZE_U64
);
128 return ARRAY_SIZE(kvm_s390_fac_list_mask
);
131 static struct gmap_notifier gmap_notifier
;
132 debug_info_t
*kvm_s390_dbf
;
134 /* Section: not file related */
135 int kvm_arch_hardware_enable(void)
137 /* every s390 is virtualization enabled ;-) */
141 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
);
144 * This callback is executed during stop_machine(). All CPUs are therefore
145 * temporarily stopped. In order not to change guest behavior, we have to
146 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
147 * so a CPU won't be stopped while calculating with the epoch.
149 static int kvm_clock_sync(struct notifier_block
*notifier
, unsigned long val
,
153 struct kvm_vcpu
*vcpu
;
155 unsigned long long *delta
= v
;
157 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
158 kvm
->arch
.epoch
-= *delta
;
159 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
160 vcpu
->arch
.sie_block
->epoch
-= *delta
;
166 static struct notifier_block kvm_clock_notifier
= {
167 .notifier_call
= kvm_clock_sync
,
170 int kvm_arch_hardware_setup(void)
172 gmap_notifier
.notifier_call
= kvm_gmap_notifier
;
173 gmap_register_ipte_notifier(&gmap_notifier
);
174 atomic_notifier_chain_register(&s390_epoch_delta_notifier
,
175 &kvm_clock_notifier
);
179 void kvm_arch_hardware_unsetup(void)
181 gmap_unregister_ipte_notifier(&gmap_notifier
);
182 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier
,
183 &kvm_clock_notifier
);
186 int kvm_arch_init(void *opaque
)
188 kvm_s390_dbf
= debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
192 if (debug_register_view(kvm_s390_dbf
, &debug_sprintf_view
)) {
193 debug_unregister(kvm_s390_dbf
);
197 /* Register floating interrupt controller interface. */
198 return kvm_register_device_ops(&kvm_flic_ops
, KVM_DEV_TYPE_FLIC
);
201 void kvm_arch_exit(void)
203 debug_unregister(kvm_s390_dbf
);
206 /* Section: device related */
207 long kvm_arch_dev_ioctl(struct file
*filp
,
208 unsigned int ioctl
, unsigned long arg
)
210 if (ioctl
== KVM_S390_ENABLE_SIE
)
211 return s390_enable_sie();
215 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
220 case KVM_CAP_S390_PSW
:
221 case KVM_CAP_S390_GMAP
:
222 case KVM_CAP_SYNC_MMU
:
223 #ifdef CONFIG_KVM_S390_UCONTROL
224 case KVM_CAP_S390_UCONTROL
:
226 case KVM_CAP_ASYNC_PF
:
227 case KVM_CAP_SYNC_REGS
:
228 case KVM_CAP_ONE_REG
:
229 case KVM_CAP_ENABLE_CAP
:
230 case KVM_CAP_S390_CSS_SUPPORT
:
231 case KVM_CAP_IOEVENTFD
:
232 case KVM_CAP_DEVICE_CTRL
:
233 case KVM_CAP_ENABLE_CAP_VM
:
234 case KVM_CAP_S390_IRQCHIP
:
235 case KVM_CAP_VM_ATTRIBUTES
:
236 case KVM_CAP_MP_STATE
:
237 case KVM_CAP_S390_INJECT_IRQ
:
238 case KVM_CAP_S390_USER_SIGP
:
239 case KVM_CAP_S390_USER_STSI
:
240 case KVM_CAP_S390_SKEYS
:
241 case KVM_CAP_S390_IRQ_STATE
:
244 case KVM_CAP_S390_MEM_OP
:
247 case KVM_CAP_NR_VCPUS
:
248 case KVM_CAP_MAX_VCPUS
:
249 r
= sclp
.has_esca
? KVM_S390_ESCA_CPU_SLOTS
250 : KVM_S390_BSCA_CPU_SLOTS
;
252 case KVM_CAP_NR_MEMSLOTS
:
253 r
= KVM_USER_MEM_SLOTS
;
255 case KVM_CAP_S390_COW
:
256 r
= MACHINE_HAS_ESOP
;
258 case KVM_CAP_S390_VECTOR_REGISTERS
:
261 case KVM_CAP_S390_RI
:
262 r
= test_facility(64);
270 static void kvm_s390_sync_dirty_log(struct kvm
*kvm
,
271 struct kvm_memory_slot
*memslot
)
273 gfn_t cur_gfn
, last_gfn
;
274 unsigned long address
;
275 struct gmap
*gmap
= kvm
->arch
.gmap
;
277 down_read(&gmap
->mm
->mmap_sem
);
278 /* Loop over all guest pages */
279 last_gfn
= memslot
->base_gfn
+ memslot
->npages
;
280 for (cur_gfn
= memslot
->base_gfn
; cur_gfn
<= last_gfn
; cur_gfn
++) {
281 address
= gfn_to_hva_memslot(memslot
, cur_gfn
);
283 if (gmap_test_and_clear_dirty(address
, gmap
))
284 mark_page_dirty(kvm
, cur_gfn
);
286 up_read(&gmap
->mm
->mmap_sem
);
289 /* Section: vm related */
290 static void sca_del_vcpu(struct kvm_vcpu
*vcpu
);
293 * Get (and clear) the dirty memory log for a memory slot.
295 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
296 struct kvm_dirty_log
*log
)
300 struct kvm_memslots
*slots
;
301 struct kvm_memory_slot
*memslot
;
304 mutex_lock(&kvm
->slots_lock
);
307 if (log
->slot
>= KVM_USER_MEM_SLOTS
)
310 slots
= kvm_memslots(kvm
);
311 memslot
= id_to_memslot(slots
, log
->slot
);
313 if (!memslot
->dirty_bitmap
)
316 kvm_s390_sync_dirty_log(kvm
, memslot
);
317 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
321 /* Clear the dirty log */
323 n
= kvm_dirty_bitmap_bytes(memslot
);
324 memset(memslot
->dirty_bitmap
, 0, n
);
328 mutex_unlock(&kvm
->slots_lock
);
332 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
, struct kvm_enable_cap
*cap
)
340 case KVM_CAP_S390_IRQCHIP
:
341 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
342 kvm
->arch
.use_irqchip
= 1;
345 case KVM_CAP_S390_USER_SIGP
:
346 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
347 kvm
->arch
.user_sigp
= 1;
350 case KVM_CAP_S390_VECTOR_REGISTERS
:
351 mutex_lock(&kvm
->lock
);
352 if (atomic_read(&kvm
->online_vcpus
)) {
354 } else if (MACHINE_HAS_VX
) {
355 set_kvm_facility(kvm
->arch
.model
.fac
->mask
, 129);
356 set_kvm_facility(kvm
->arch
.model
.fac
->list
, 129);
360 mutex_unlock(&kvm
->lock
);
361 VM_EVENT(kvm
, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
362 r
? "(not available)" : "(success)");
364 case KVM_CAP_S390_RI
:
366 mutex_lock(&kvm
->lock
);
367 if (atomic_read(&kvm
->online_vcpus
)) {
369 } else if (test_facility(64)) {
370 set_kvm_facility(kvm
->arch
.model
.fac
->mask
, 64);
371 set_kvm_facility(kvm
->arch
.model
.fac
->list
, 64);
374 mutex_unlock(&kvm
->lock
);
375 VM_EVENT(kvm
, 3, "ENABLE: CAP_S390_RI %s",
376 r
? "(not available)" : "(success)");
378 case KVM_CAP_S390_USER_STSI
:
379 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
380 kvm
->arch
.user_stsi
= 1;
390 static int kvm_s390_get_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
394 switch (attr
->attr
) {
395 case KVM_S390_VM_MEM_LIMIT_SIZE
:
397 VM_EVENT(kvm
, 3, "QUERY: max guest memory: %lu bytes",
398 kvm
->arch
.mem_limit
);
399 if (put_user(kvm
->arch
.mem_limit
, (u64 __user
*)attr
->addr
))
409 static int kvm_s390_set_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
413 switch (attr
->attr
) {
414 case KVM_S390_VM_MEM_ENABLE_CMMA
:
415 /* enable CMMA only for z10 and later (EDAT_1) */
417 if (!MACHINE_IS_LPAR
|| !MACHINE_HAS_EDAT1
)
421 VM_EVENT(kvm
, 3, "%s", "ENABLE: CMMA support");
422 mutex_lock(&kvm
->lock
);
423 if (atomic_read(&kvm
->online_vcpus
) == 0) {
424 kvm
->arch
.use_cmma
= 1;
427 mutex_unlock(&kvm
->lock
);
429 case KVM_S390_VM_MEM_CLR_CMMA
:
431 if (!kvm
->arch
.use_cmma
)
434 VM_EVENT(kvm
, 3, "%s", "RESET: CMMA states");
435 mutex_lock(&kvm
->lock
);
436 idx
= srcu_read_lock(&kvm
->srcu
);
437 s390_reset_cmma(kvm
->arch
.gmap
->mm
);
438 srcu_read_unlock(&kvm
->srcu
, idx
);
439 mutex_unlock(&kvm
->lock
);
442 case KVM_S390_VM_MEM_LIMIT_SIZE
: {
443 unsigned long new_limit
;
445 if (kvm_is_ucontrol(kvm
))
448 if (get_user(new_limit
, (u64 __user
*)attr
->addr
))
451 if (kvm
->arch
.mem_limit
!= KVM_S390_NO_MEM_LIMIT
&&
452 new_limit
> kvm
->arch
.mem_limit
)
458 /* gmap_alloc takes last usable address */
459 if (new_limit
!= KVM_S390_NO_MEM_LIMIT
)
463 mutex_lock(&kvm
->lock
);
464 if (atomic_read(&kvm
->online_vcpus
) == 0) {
465 /* gmap_alloc will round the limit up */
466 struct gmap
*new = gmap_alloc(current
->mm
, new_limit
);
471 gmap_free(kvm
->arch
.gmap
);
473 kvm
->arch
.gmap
= new;
477 mutex_unlock(&kvm
->lock
);
478 VM_EVENT(kvm
, 3, "SET: max guest address: %lu", new_limit
);
479 VM_EVENT(kvm
, 3, "New guest asce: 0x%pK",
480 (void *) kvm
->arch
.gmap
->asce
);
490 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
);
492 static int kvm_s390_vm_set_crypto(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
494 struct kvm_vcpu
*vcpu
;
497 if (!test_kvm_facility(kvm
, 76))
500 mutex_lock(&kvm
->lock
);
501 switch (attr
->attr
) {
502 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
504 kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
505 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
506 kvm
->arch
.crypto
.aes_kw
= 1;
507 VM_EVENT(kvm
, 3, "%s", "ENABLE: AES keywrapping support");
509 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
511 kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
512 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
513 kvm
->arch
.crypto
.dea_kw
= 1;
514 VM_EVENT(kvm
, 3, "%s", "ENABLE: DEA keywrapping support");
516 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
517 kvm
->arch
.crypto
.aes_kw
= 0;
518 memset(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
, 0,
519 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
520 VM_EVENT(kvm
, 3, "%s", "DISABLE: AES keywrapping support");
522 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
523 kvm
->arch
.crypto
.dea_kw
= 0;
524 memset(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
, 0,
525 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
526 VM_EVENT(kvm
, 3, "%s", "DISABLE: DEA keywrapping support");
529 mutex_unlock(&kvm
->lock
);
533 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
534 kvm_s390_vcpu_crypto_setup(vcpu
);
537 mutex_unlock(&kvm
->lock
);
541 static int kvm_s390_set_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
545 if (copy_from_user(>od_high
, (void __user
*)attr
->addr
,
551 VM_EVENT(kvm
, 3, "SET: TOD extension: 0x%x", gtod_high
);
556 static int kvm_s390_set_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
560 if (copy_from_user(>od
, (void __user
*)attr
->addr
, sizeof(gtod
)))
563 kvm_s390_set_tod_clock(kvm
, gtod
);
564 VM_EVENT(kvm
, 3, "SET: TOD base: 0x%llx", gtod
);
568 static int kvm_s390_set_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
575 switch (attr
->attr
) {
576 case KVM_S390_VM_TOD_HIGH
:
577 ret
= kvm_s390_set_tod_high(kvm
, attr
);
579 case KVM_S390_VM_TOD_LOW
:
580 ret
= kvm_s390_set_tod_low(kvm
, attr
);
589 static int kvm_s390_get_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
593 if (copy_to_user((void __user
*)attr
->addr
, >od_high
,
596 VM_EVENT(kvm
, 3, "QUERY: TOD extension: 0x%x", gtod_high
);
601 static int kvm_s390_get_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
605 gtod
= kvm_s390_get_tod_clock_fast(kvm
);
606 if (copy_to_user((void __user
*)attr
->addr
, >od
, sizeof(gtod
)))
608 VM_EVENT(kvm
, 3, "QUERY: TOD base: 0x%llx", gtod
);
613 static int kvm_s390_get_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
620 switch (attr
->attr
) {
621 case KVM_S390_VM_TOD_HIGH
:
622 ret
= kvm_s390_get_tod_high(kvm
, attr
);
624 case KVM_S390_VM_TOD_LOW
:
625 ret
= kvm_s390_get_tod_low(kvm
, attr
);
634 static int kvm_s390_set_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
636 struct kvm_s390_vm_cpu_processor
*proc
;
639 mutex_lock(&kvm
->lock
);
640 if (atomic_read(&kvm
->online_vcpus
)) {
644 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
649 if (!copy_from_user(proc
, (void __user
*)attr
->addr
,
651 memcpy(&kvm
->arch
.model
.cpu_id
, &proc
->cpuid
,
652 sizeof(struct cpuid
));
653 kvm
->arch
.model
.ibc
= proc
->ibc
;
654 memcpy(kvm
->arch
.model
.fac
->list
, proc
->fac_list
,
655 S390_ARCH_FAC_LIST_SIZE_BYTE
);
660 mutex_unlock(&kvm
->lock
);
664 static int kvm_s390_set_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
668 switch (attr
->attr
) {
669 case KVM_S390_VM_CPU_PROCESSOR
:
670 ret
= kvm_s390_set_processor(kvm
, attr
);
676 static int kvm_s390_get_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
678 struct kvm_s390_vm_cpu_processor
*proc
;
681 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
686 memcpy(&proc
->cpuid
, &kvm
->arch
.model
.cpu_id
, sizeof(struct cpuid
));
687 proc
->ibc
= kvm
->arch
.model
.ibc
;
688 memcpy(&proc
->fac_list
, kvm
->arch
.model
.fac
->list
, S390_ARCH_FAC_LIST_SIZE_BYTE
);
689 if (copy_to_user((void __user
*)attr
->addr
, proc
, sizeof(*proc
)))
696 static int kvm_s390_get_machine(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
698 struct kvm_s390_vm_cpu_machine
*mach
;
701 mach
= kzalloc(sizeof(*mach
), GFP_KERNEL
);
706 get_cpu_id((struct cpuid
*) &mach
->cpuid
);
707 mach
->ibc
= sclp
.ibc
;
708 memcpy(&mach
->fac_mask
, kvm
->arch
.model
.fac
->mask
,
709 S390_ARCH_FAC_LIST_SIZE_BYTE
);
710 memcpy((unsigned long *)&mach
->fac_list
, S390_lowcore
.stfle_fac_list
,
711 S390_ARCH_FAC_LIST_SIZE_BYTE
);
712 if (copy_to_user((void __user
*)attr
->addr
, mach
, sizeof(*mach
)))
719 static int kvm_s390_get_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
723 switch (attr
->attr
) {
724 case KVM_S390_VM_CPU_PROCESSOR
:
725 ret
= kvm_s390_get_processor(kvm
, attr
);
727 case KVM_S390_VM_CPU_MACHINE
:
728 ret
= kvm_s390_get_machine(kvm
, attr
);
734 static int kvm_s390_vm_set_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
738 switch (attr
->group
) {
739 case KVM_S390_VM_MEM_CTRL
:
740 ret
= kvm_s390_set_mem_control(kvm
, attr
);
742 case KVM_S390_VM_TOD
:
743 ret
= kvm_s390_set_tod(kvm
, attr
);
745 case KVM_S390_VM_CPU_MODEL
:
746 ret
= kvm_s390_set_cpu_model(kvm
, attr
);
748 case KVM_S390_VM_CRYPTO
:
749 ret
= kvm_s390_vm_set_crypto(kvm
, attr
);
759 static int kvm_s390_vm_get_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
763 switch (attr
->group
) {
764 case KVM_S390_VM_MEM_CTRL
:
765 ret
= kvm_s390_get_mem_control(kvm
, attr
);
767 case KVM_S390_VM_TOD
:
768 ret
= kvm_s390_get_tod(kvm
, attr
);
770 case KVM_S390_VM_CPU_MODEL
:
771 ret
= kvm_s390_get_cpu_model(kvm
, attr
);
781 static int kvm_s390_vm_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
785 switch (attr
->group
) {
786 case KVM_S390_VM_MEM_CTRL
:
787 switch (attr
->attr
) {
788 case KVM_S390_VM_MEM_ENABLE_CMMA
:
789 case KVM_S390_VM_MEM_CLR_CMMA
:
790 case KVM_S390_VM_MEM_LIMIT_SIZE
:
798 case KVM_S390_VM_TOD
:
799 switch (attr
->attr
) {
800 case KVM_S390_VM_TOD_LOW
:
801 case KVM_S390_VM_TOD_HIGH
:
809 case KVM_S390_VM_CPU_MODEL
:
810 switch (attr
->attr
) {
811 case KVM_S390_VM_CPU_PROCESSOR
:
812 case KVM_S390_VM_CPU_MACHINE
:
820 case KVM_S390_VM_CRYPTO
:
821 switch (attr
->attr
) {
822 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
823 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
824 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
825 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
841 static long kvm_s390_get_skeys(struct kvm
*kvm
, struct kvm_s390_skeys
*args
)
845 unsigned long curkey
;
848 if (args
->flags
!= 0)
851 /* Is this guest using storage keys? */
852 if (!mm_use_skey(current
->mm
))
853 return KVM_S390_GET_SKEYS_NONE
;
855 /* Enforce sane limit on memory allocation */
856 if (args
->count
< 1 || args
->count
> KVM_S390_SKEYS_MAX
)
859 keys
= kmalloc_array(args
->count
, sizeof(uint8_t),
860 GFP_KERNEL
| __GFP_NOWARN
);
862 keys
= vmalloc(sizeof(uint8_t) * args
->count
);
866 for (i
= 0; i
< args
->count
; i
++) {
867 hva
= gfn_to_hva(kvm
, args
->start_gfn
+ i
);
868 if (kvm_is_error_hva(hva
)) {
873 curkey
= get_guest_storage_key(current
->mm
, hva
);
874 if (IS_ERR_VALUE(curkey
)) {
881 r
= copy_to_user((uint8_t __user
*)args
->skeydata_addr
, keys
,
882 sizeof(uint8_t) * args
->count
);
890 static long kvm_s390_set_skeys(struct kvm
*kvm
, struct kvm_s390_skeys
*args
)
896 if (args
->flags
!= 0)
899 /* Enforce sane limit on memory allocation */
900 if (args
->count
< 1 || args
->count
> KVM_S390_SKEYS_MAX
)
903 keys
= kmalloc_array(args
->count
, sizeof(uint8_t),
904 GFP_KERNEL
| __GFP_NOWARN
);
906 keys
= vmalloc(sizeof(uint8_t) * args
->count
);
910 r
= copy_from_user(keys
, (uint8_t __user
*)args
->skeydata_addr
,
911 sizeof(uint8_t) * args
->count
);
917 /* Enable storage key handling for the guest */
918 r
= s390_enable_skey();
922 for (i
= 0; i
< args
->count
; i
++) {
923 hva
= gfn_to_hva(kvm
, args
->start_gfn
+ i
);
924 if (kvm_is_error_hva(hva
)) {
929 /* Lowest order bit is reserved */
930 if (keys
[i
] & 0x01) {
935 r
= set_guest_storage_key(current
->mm
, hva
,
936 (unsigned long)keys
[i
], 0);
945 long kvm_arch_vm_ioctl(struct file
*filp
,
946 unsigned int ioctl
, unsigned long arg
)
948 struct kvm
*kvm
= filp
->private_data
;
949 void __user
*argp
= (void __user
*)arg
;
950 struct kvm_device_attr attr
;
954 case KVM_S390_INTERRUPT
: {
955 struct kvm_s390_interrupt s390int
;
958 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
960 r
= kvm_s390_inject_vm(kvm
, &s390int
);
963 case KVM_ENABLE_CAP
: {
964 struct kvm_enable_cap cap
;
966 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
968 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
971 case KVM_CREATE_IRQCHIP
: {
972 struct kvm_irq_routing_entry routing
;
975 if (kvm
->arch
.use_irqchip
) {
976 /* Set up dummy routing. */
977 memset(&routing
, 0, sizeof(routing
));
978 r
= kvm_set_irq_routing(kvm
, &routing
, 0, 0);
982 case KVM_SET_DEVICE_ATTR
: {
984 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
986 r
= kvm_s390_vm_set_attr(kvm
, &attr
);
989 case KVM_GET_DEVICE_ATTR
: {
991 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
993 r
= kvm_s390_vm_get_attr(kvm
, &attr
);
996 case KVM_HAS_DEVICE_ATTR
: {
998 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
1000 r
= kvm_s390_vm_has_attr(kvm
, &attr
);
1003 case KVM_S390_GET_SKEYS
: {
1004 struct kvm_s390_skeys args
;
1007 if (copy_from_user(&args
, argp
,
1008 sizeof(struct kvm_s390_skeys
)))
1010 r
= kvm_s390_get_skeys(kvm
, &args
);
1013 case KVM_S390_SET_SKEYS
: {
1014 struct kvm_s390_skeys args
;
1017 if (copy_from_user(&args
, argp
,
1018 sizeof(struct kvm_s390_skeys
)))
1020 r
= kvm_s390_set_skeys(kvm
, &args
);
1030 static int kvm_s390_query_ap_config(u8
*config
)
1032 u32 fcn_code
= 0x04000000UL
;
1035 memset(config
, 0, 128);
1039 ".long 0xb2af0000\n" /* PQAP(QCI) */
1045 : "r" (fcn_code
), "r" (config
)
1046 : "cc", "0", "2", "memory"
1052 static int kvm_s390_apxa_installed(void)
1057 if (test_facility(12)) {
1058 cc
= kvm_s390_query_ap_config(config
);
1061 pr_err("PQAP(QCI) failed with cc=%d", cc
);
1063 return config
[0] & 0x40;
1069 static void kvm_s390_set_crycb_format(struct kvm
*kvm
)
1071 kvm
->arch
.crypto
.crycbd
= (__u32
)(unsigned long) kvm
->arch
.crypto
.crycb
;
1073 if (kvm_s390_apxa_installed())
1074 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT2
;
1076 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT1
;
1079 static void kvm_s390_get_cpu_id(struct cpuid
*cpu_id
)
1082 cpu_id
->version
= 0xff;
1085 static int kvm_s390_crypto_init(struct kvm
*kvm
)
1087 if (!test_kvm_facility(kvm
, 76))
1090 kvm
->arch
.crypto
.crycb
= kzalloc(sizeof(*kvm
->arch
.crypto
.crycb
),
1091 GFP_KERNEL
| GFP_DMA
);
1092 if (!kvm
->arch
.crypto
.crycb
)
1095 kvm_s390_set_crycb_format(kvm
);
1097 /* Enable AES/DEA protected key functions by default */
1098 kvm
->arch
.crypto
.aes_kw
= 1;
1099 kvm
->arch
.crypto
.dea_kw
= 1;
1100 get_random_bytes(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
1101 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
1102 get_random_bytes(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
1103 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
1108 static void sca_dispose(struct kvm
*kvm
)
1110 if (kvm
->arch
.use_esca
)
1111 free_pages_exact(kvm
->arch
.sca
, sizeof(struct esca_block
));
1113 free_page((unsigned long)(kvm
->arch
.sca
));
1114 kvm
->arch
.sca
= NULL
;
1117 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
1120 char debug_name
[16];
1121 static unsigned long sca_offset
;
1124 #ifdef CONFIG_KVM_S390_UCONTROL
1125 if (type
& ~KVM_VM_S390_UCONTROL
)
1127 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
1134 rc
= s390_enable_sie();
1140 kvm
->arch
.use_esca
= 0; /* start with basic SCA */
1141 rwlock_init(&kvm
->arch
.sca_lock
);
1142 kvm
->arch
.sca
= (struct bsca_block
*) get_zeroed_page(GFP_KERNEL
);
1145 spin_lock(&kvm_lock
);
1147 if (sca_offset
+ sizeof(struct bsca_block
) > PAGE_SIZE
)
1149 kvm
->arch
.sca
= (struct bsca_block
*)
1150 ((char *) kvm
->arch
.sca
+ sca_offset
);
1151 spin_unlock(&kvm_lock
);
1153 sprintf(debug_name
, "kvm-%u", current
->pid
);
1155 kvm
->arch
.dbf
= debug_register(debug_name
, 32, 1, 7 * sizeof(long));
1160 * The architectural maximum amount of facilities is 16 kbit. To store
1161 * this amount, 2 kbyte of memory is required. Thus we need a full
1162 * page to hold the guest facility list (arch.model.fac->list) and the
1163 * facility mask (arch.model.fac->mask). Its address size has to be
1164 * 31 bits and word aligned.
1166 kvm
->arch
.model
.fac
=
1167 (struct kvm_s390_fac
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1168 if (!kvm
->arch
.model
.fac
)
1171 /* Populate the facility mask initially. */
1172 memcpy(kvm
->arch
.model
.fac
->mask
, S390_lowcore
.stfle_fac_list
,
1173 S390_ARCH_FAC_LIST_SIZE_BYTE
);
1174 for (i
= 0; i
< S390_ARCH_FAC_LIST_SIZE_U64
; i
++) {
1175 if (i
< kvm_s390_fac_list_mask_size())
1176 kvm
->arch
.model
.fac
->mask
[i
] &= kvm_s390_fac_list_mask
[i
];
1178 kvm
->arch
.model
.fac
->mask
[i
] = 0UL;
1181 /* Populate the facility list initially. */
1182 memcpy(kvm
->arch
.model
.fac
->list
, kvm
->arch
.model
.fac
->mask
,
1183 S390_ARCH_FAC_LIST_SIZE_BYTE
);
1185 kvm_s390_get_cpu_id(&kvm
->arch
.model
.cpu_id
);
1186 kvm
->arch
.model
.ibc
= sclp
.ibc
& 0x0fff;
1188 if (kvm_s390_crypto_init(kvm
) < 0)
1191 spin_lock_init(&kvm
->arch
.float_int
.lock
);
1192 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
1193 INIT_LIST_HEAD(&kvm
->arch
.float_int
.lists
[i
]);
1194 init_waitqueue_head(&kvm
->arch
.ipte_wq
);
1195 mutex_init(&kvm
->arch
.ipte_mutex
);
1197 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
1198 VM_EVENT(kvm
, 3, "vm created with type %lu", type
);
1200 if (type
& KVM_VM_S390_UCONTROL
) {
1201 kvm
->arch
.gmap
= NULL
;
1202 kvm
->arch
.mem_limit
= KVM_S390_NO_MEM_LIMIT
;
1204 if (sclp
.hamax
== U64_MAX
)
1205 kvm
->arch
.mem_limit
= TASK_MAX_SIZE
;
1207 kvm
->arch
.mem_limit
= min_t(unsigned long, TASK_MAX_SIZE
,
1209 kvm
->arch
.gmap
= gmap_alloc(current
->mm
, kvm
->arch
.mem_limit
- 1);
1210 if (!kvm
->arch
.gmap
)
1212 kvm
->arch
.gmap
->private = kvm
;
1213 kvm
->arch
.gmap
->pfault_enabled
= 0;
1216 kvm
->arch
.css_support
= 0;
1217 kvm
->arch
.use_irqchip
= 0;
1218 kvm
->arch
.epoch
= 0;
1220 spin_lock_init(&kvm
->arch
.start_stop_lock
);
1221 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm
, current
->pid
);
1225 kfree(kvm
->arch
.crypto
.crycb
);
1226 free_page((unsigned long)kvm
->arch
.model
.fac
);
1227 debug_unregister(kvm
->arch
.dbf
);
1229 KVM_EVENT(3, "creation of vm failed: %d", rc
);
1233 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1235 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
1236 trace_kvm_s390_destroy_vcpu(vcpu
->vcpu_id
);
1237 kvm_s390_clear_local_irqs(vcpu
);
1238 kvm_clear_async_pf_completion_queue(vcpu
);
1239 if (!kvm_is_ucontrol(vcpu
->kvm
))
1242 if (kvm_is_ucontrol(vcpu
->kvm
))
1243 gmap_free(vcpu
->arch
.gmap
);
1245 if (vcpu
->kvm
->arch
.use_cmma
)
1246 kvm_s390_vcpu_unsetup_cmma(vcpu
);
1247 free_page((unsigned long)(vcpu
->arch
.sie_block
));
1249 kvm_vcpu_uninit(vcpu
);
1250 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1253 static void kvm_free_vcpus(struct kvm
*kvm
)
1256 struct kvm_vcpu
*vcpu
;
1258 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1259 kvm_arch_vcpu_destroy(vcpu
);
1261 mutex_lock(&kvm
->lock
);
1262 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
1263 kvm
->vcpus
[i
] = NULL
;
1265 atomic_set(&kvm
->online_vcpus
, 0);
1266 mutex_unlock(&kvm
->lock
);
1269 void kvm_arch_destroy_vm(struct kvm
*kvm
)
1271 kvm_free_vcpus(kvm
);
1272 free_page((unsigned long)kvm
->arch
.model
.fac
);
1274 debug_unregister(kvm
->arch
.dbf
);
1275 kfree(kvm
->arch
.crypto
.crycb
);
1276 if (!kvm_is_ucontrol(kvm
))
1277 gmap_free(kvm
->arch
.gmap
);
1278 kvm_s390_destroy_adapters(kvm
);
1279 kvm_s390_clear_float_irqs(kvm
);
1280 KVM_EVENT(3, "vm 0x%pK destroyed", kvm
);
1283 /* Section: vcpu related */
1284 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu
*vcpu
)
1286 vcpu
->arch
.gmap
= gmap_alloc(current
->mm
, -1UL);
1287 if (!vcpu
->arch
.gmap
)
1289 vcpu
->arch
.gmap
->private = vcpu
->kvm
;
1294 static void sca_del_vcpu(struct kvm_vcpu
*vcpu
)
1296 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
1297 if (vcpu
->kvm
->arch
.use_esca
) {
1298 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1300 clear_bit_inv(vcpu
->vcpu_id
, (unsigned long *) sca
->mcn
);
1301 sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
1303 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1305 clear_bit_inv(vcpu
->vcpu_id
, (unsigned long *) &sca
->mcn
);
1306 sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
1308 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
1311 static void sca_add_vcpu(struct kvm_vcpu
*vcpu
)
1313 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
1314 if (vcpu
->kvm
->arch
.use_esca
) {
1315 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1317 sca
->cpu
[vcpu
->vcpu_id
].sda
= (__u64
) vcpu
->arch
.sie_block
;
1318 vcpu
->arch
.sie_block
->scaoh
= (__u32
)(((__u64
)sca
) >> 32);
1319 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)sca
& ~0x3fU
;
1320 vcpu
->arch
.sie_block
->ecb2
|= 0x04U
;
1321 set_bit_inv(vcpu
->vcpu_id
, (unsigned long *) sca
->mcn
);
1323 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1325 sca
->cpu
[vcpu
->vcpu_id
].sda
= (__u64
) vcpu
->arch
.sie_block
;
1326 vcpu
->arch
.sie_block
->scaoh
= (__u32
)(((__u64
)sca
) >> 32);
1327 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)sca
;
1328 set_bit_inv(vcpu
->vcpu_id
, (unsigned long *) &sca
->mcn
);
1330 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
1333 /* Basic SCA to Extended SCA data copy routines */
1334 static inline void sca_copy_entry(struct esca_entry
*d
, struct bsca_entry
*s
)
1337 d
->sigp_ctrl
.c
= s
->sigp_ctrl
.c
;
1338 d
->sigp_ctrl
.scn
= s
->sigp_ctrl
.scn
;
1341 static void sca_copy_b_to_e(struct esca_block
*d
, struct bsca_block
*s
)
1345 d
->ipte_control
= s
->ipte_control
;
1347 for (i
= 0; i
< KVM_S390_BSCA_CPU_SLOTS
; i
++)
1348 sca_copy_entry(&d
->cpu
[i
], &s
->cpu
[i
]);
1351 static int sca_switch_to_extended(struct kvm
*kvm
)
1353 struct bsca_block
*old_sca
= kvm
->arch
.sca
;
1354 struct esca_block
*new_sca
;
1355 struct kvm_vcpu
*vcpu
;
1356 unsigned int vcpu_idx
;
1359 new_sca
= alloc_pages_exact(sizeof(*new_sca
), GFP_KERNEL
|__GFP_ZERO
);
1363 scaoh
= (u32
)((u64
)(new_sca
) >> 32);
1364 scaol
= (u32
)(u64
)(new_sca
) & ~0x3fU
;
1366 kvm_s390_vcpu_block_all(kvm
);
1367 write_lock(&kvm
->arch
.sca_lock
);
1369 sca_copy_b_to_e(new_sca
, old_sca
);
1371 kvm_for_each_vcpu(vcpu_idx
, vcpu
, kvm
) {
1372 vcpu
->arch
.sie_block
->scaoh
= scaoh
;
1373 vcpu
->arch
.sie_block
->scaol
= scaol
;
1374 vcpu
->arch
.sie_block
->ecb2
|= 0x04U
;
1376 kvm
->arch
.sca
= new_sca
;
1377 kvm
->arch
.use_esca
= 1;
1379 write_unlock(&kvm
->arch
.sca_lock
);
1380 kvm_s390_vcpu_unblock_all(kvm
);
1382 free_page((unsigned long)old_sca
);
1384 VM_EVENT(kvm
, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1385 old_sca
, kvm
->arch
.sca
);
1389 static int sca_can_add_vcpu(struct kvm
*kvm
, unsigned int id
)
1393 if (id
< KVM_S390_BSCA_CPU_SLOTS
)
1398 mutex_lock(&kvm
->lock
);
1399 rc
= kvm
->arch
.use_esca
? 0 : sca_switch_to_extended(kvm
);
1400 mutex_unlock(&kvm
->lock
);
1402 return rc
== 0 && id
< KVM_S390_ESCA_CPU_SLOTS
;
1405 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1407 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1408 kvm_clear_async_pf_completion_queue(vcpu
);
1409 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
1415 if (test_kvm_facility(vcpu
->kvm
, 64))
1416 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_RICCB
;
1417 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1418 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1421 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_VRS
;
1423 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_FPRS
;
1425 if (kvm_is_ucontrol(vcpu
->kvm
))
1426 return __kvm_ucontrol_vcpu_init(vcpu
);
1431 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1433 /* Save host register state */
1435 vcpu
->arch
.host_fpregs
.fpc
= current
->thread
.fpu
.fpc
;
1436 vcpu
->arch
.host_fpregs
.regs
= current
->thread
.fpu
.regs
;
1439 current
->thread
.fpu
.regs
= vcpu
->run
->s
.regs
.vrs
;
1441 current
->thread
.fpu
.regs
= vcpu
->run
->s
.regs
.fprs
;
1442 current
->thread
.fpu
.fpc
= vcpu
->run
->s
.regs
.fpc
;
1443 if (test_fp_ctl(current
->thread
.fpu
.fpc
))
1444 /* User space provided an invalid FPC, let's clear it */
1445 current
->thread
.fpu
.fpc
= 0;
1447 save_access_regs(vcpu
->arch
.host_acrs
);
1448 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1449 gmap_enable(vcpu
->arch
.gmap
);
1450 atomic_or(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1453 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1455 atomic_andnot(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1456 gmap_disable(vcpu
->arch
.gmap
);
1458 /* Save guest register state */
1460 vcpu
->run
->s
.regs
.fpc
= current
->thread
.fpu
.fpc
;
1462 /* Restore host register state */
1463 current
->thread
.fpu
.fpc
= vcpu
->arch
.host_fpregs
.fpc
;
1464 current
->thread
.fpu
.regs
= vcpu
->arch
.host_fpregs
.regs
;
1466 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1467 restore_access_regs(vcpu
->arch
.host_acrs
);
1470 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
1472 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1473 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
1474 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
1475 kvm_s390_set_prefix(vcpu
, 0);
1476 vcpu
->arch
.sie_block
->cputm
= 0UL;
1477 vcpu
->arch
.sie_block
->ckc
= 0UL;
1478 vcpu
->arch
.sie_block
->todpr
= 0;
1479 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
1480 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
1481 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
1482 /* make sure the new fpc will be lazily loaded */
1484 current
->thread
.fpu
.fpc
= 0;
1485 vcpu
->arch
.sie_block
->gbea
= 1;
1486 vcpu
->arch
.sie_block
->pp
= 0;
1487 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1488 kvm_clear_async_pf_completion_queue(vcpu
);
1489 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
))
1490 kvm_s390_vcpu_stop(vcpu
);
1491 kvm_s390_clear_local_irqs(vcpu
);
1494 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1496 mutex_lock(&vcpu
->kvm
->lock
);
1498 vcpu
->arch
.sie_block
->epoch
= vcpu
->kvm
->arch
.epoch
;
1500 mutex_unlock(&vcpu
->kvm
->lock
);
1501 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1502 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
1508 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
)
1510 if (!test_kvm_facility(vcpu
->kvm
, 76))
1513 vcpu
->arch
.sie_block
->ecb3
&= ~(ECB3_AES
| ECB3_DEA
);
1515 if (vcpu
->kvm
->arch
.crypto
.aes_kw
)
1516 vcpu
->arch
.sie_block
->ecb3
|= ECB3_AES
;
1517 if (vcpu
->kvm
->arch
.crypto
.dea_kw
)
1518 vcpu
->arch
.sie_block
->ecb3
|= ECB3_DEA
;
1520 vcpu
->arch
.sie_block
->crycbd
= vcpu
->kvm
->arch
.crypto
.crycbd
;
1523 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu
*vcpu
)
1525 free_page(vcpu
->arch
.sie_block
->cbrlo
);
1526 vcpu
->arch
.sie_block
->cbrlo
= 0;
1529 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu
*vcpu
)
1531 vcpu
->arch
.sie_block
->cbrlo
= get_zeroed_page(GFP_KERNEL
);
1532 if (!vcpu
->arch
.sie_block
->cbrlo
)
1535 vcpu
->arch
.sie_block
->ecb2
|= 0x80;
1536 vcpu
->arch
.sie_block
->ecb2
&= ~0x08;
1540 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu
*vcpu
)
1542 struct kvm_s390_cpu_model
*model
= &vcpu
->kvm
->arch
.model
;
1544 vcpu
->arch
.cpu_id
= model
->cpu_id
;
1545 vcpu
->arch
.sie_block
->ibc
= model
->ibc
;
1546 vcpu
->arch
.sie_block
->fac
= (int) (long) model
->fac
->list
;
1549 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1553 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
1557 if (test_kvm_facility(vcpu
->kvm
, 78))
1558 atomic_or(CPUSTAT_GED2
, &vcpu
->arch
.sie_block
->cpuflags
);
1559 else if (test_kvm_facility(vcpu
->kvm
, 8))
1560 atomic_or(CPUSTAT_GED
, &vcpu
->arch
.sie_block
->cpuflags
);
1562 kvm_s390_vcpu_setup_model(vcpu
);
1564 vcpu
->arch
.sie_block
->ecb
= 6;
1565 if (test_kvm_facility(vcpu
->kvm
, 50) && test_kvm_facility(vcpu
->kvm
, 73))
1566 vcpu
->arch
.sie_block
->ecb
|= 0x10;
1568 vcpu
->arch
.sie_block
->ecb2
= 8;
1569 vcpu
->arch
.sie_block
->eca
= 0xC1002000U
;
1571 vcpu
->arch
.sie_block
->eca
|= 1;
1572 if (sclp
.has_sigpif
)
1573 vcpu
->arch
.sie_block
->eca
|= 0x10000000U
;
1574 if (test_kvm_facility(vcpu
->kvm
, 64))
1575 vcpu
->arch
.sie_block
->ecb3
|= 0x01;
1576 if (test_kvm_facility(vcpu
->kvm
, 129)) {
1577 vcpu
->arch
.sie_block
->eca
|= 0x00020000;
1578 vcpu
->arch
.sie_block
->ecd
|= 0x20000000;
1580 vcpu
->arch
.sie_block
->riccbd
= (unsigned long) &vcpu
->run
->s
.regs
.riccb
;
1581 vcpu
->arch
.sie_block
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
;
1583 if (vcpu
->kvm
->arch
.use_cmma
) {
1584 rc
= kvm_s390_vcpu_setup_cmma(vcpu
);
1588 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1589 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
1591 kvm_s390_vcpu_crypto_setup(vcpu
);
1596 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
1599 struct kvm_vcpu
*vcpu
;
1600 struct sie_page
*sie_page
;
1603 if (!kvm_is_ucontrol(kvm
) && !sca_can_add_vcpu(kvm
, id
))
1608 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
1612 sie_page
= (struct sie_page
*) get_zeroed_page(GFP_KERNEL
);
1616 vcpu
->arch
.sie_block
= &sie_page
->sie_block
;
1617 vcpu
->arch
.sie_block
->itdba
= (unsigned long) &sie_page
->itdb
;
1619 vcpu
->arch
.sie_block
->icpua
= id
;
1620 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
1621 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
1622 vcpu
->arch
.local_int
.wq
= &vcpu
->wq
;
1623 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
1625 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
1627 goto out_free_sie_block
;
1628 VM_EVENT(kvm
, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id
, vcpu
,
1629 vcpu
->arch
.sie_block
);
1630 trace_kvm_s390_create_vcpu(id
, vcpu
, vcpu
->arch
.sie_block
);
1634 free_page((unsigned long)(vcpu
->arch
.sie_block
));
1636 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1641 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
1643 return kvm_s390_vcpu_has_irq(vcpu
, 0);
1646 void kvm_s390_vcpu_block(struct kvm_vcpu
*vcpu
)
1648 atomic_or(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1652 void kvm_s390_vcpu_unblock(struct kvm_vcpu
*vcpu
)
1654 atomic_andnot(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1657 static void kvm_s390_vcpu_request(struct kvm_vcpu
*vcpu
)
1659 atomic_or(PROG_REQUEST
, &vcpu
->arch
.sie_block
->prog20
);
1663 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu
*vcpu
)
1665 atomic_andnot(PROG_REQUEST
, &vcpu
->arch
.sie_block
->prog20
);
1669 * Kick a guest cpu out of SIE and wait until SIE is not running.
1670 * If the CPU is not running (e.g. waiting as idle) the function will
1671 * return immediately. */
1672 void exit_sie(struct kvm_vcpu
*vcpu
)
1674 atomic_or(CPUSTAT_STOP_INT
, &vcpu
->arch
.sie_block
->cpuflags
);
1675 while (vcpu
->arch
.sie_block
->prog0c
& PROG_IN_SIE
)
1679 /* Kick a guest cpu out of SIE to process a request synchronously */
1680 void kvm_s390_sync_request(int req
, struct kvm_vcpu
*vcpu
)
1682 kvm_make_request(req
, vcpu
);
1683 kvm_s390_vcpu_request(vcpu
);
1686 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
)
1689 struct kvm
*kvm
= gmap
->private;
1690 struct kvm_vcpu
*vcpu
;
1692 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1693 /* match against both prefix pages */
1694 if (kvm_s390_get_prefix(vcpu
) == (address
& ~0x1000UL
)) {
1695 VCPU_EVENT(vcpu
, 2, "gmap notifier for %lx", address
);
1696 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD
, vcpu
);
1701 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
1703 /* kvm common code refers to this, but never calls it */
1708 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
1709 struct kvm_one_reg
*reg
)
1714 case KVM_REG_S390_TODPR
:
1715 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
1716 (u32 __user
*)reg
->addr
);
1718 case KVM_REG_S390_EPOCHDIFF
:
1719 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
1720 (u64 __user
*)reg
->addr
);
1722 case KVM_REG_S390_CPU_TIMER
:
1723 r
= put_user(vcpu
->arch
.sie_block
->cputm
,
1724 (u64 __user
*)reg
->addr
);
1726 case KVM_REG_S390_CLOCK_COMP
:
1727 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
1728 (u64 __user
*)reg
->addr
);
1730 case KVM_REG_S390_PFTOKEN
:
1731 r
= put_user(vcpu
->arch
.pfault_token
,
1732 (u64 __user
*)reg
->addr
);
1734 case KVM_REG_S390_PFCOMPARE
:
1735 r
= put_user(vcpu
->arch
.pfault_compare
,
1736 (u64 __user
*)reg
->addr
);
1738 case KVM_REG_S390_PFSELECT
:
1739 r
= put_user(vcpu
->arch
.pfault_select
,
1740 (u64 __user
*)reg
->addr
);
1742 case KVM_REG_S390_PP
:
1743 r
= put_user(vcpu
->arch
.sie_block
->pp
,
1744 (u64 __user
*)reg
->addr
);
1746 case KVM_REG_S390_GBEA
:
1747 r
= put_user(vcpu
->arch
.sie_block
->gbea
,
1748 (u64 __user
*)reg
->addr
);
1757 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
1758 struct kvm_one_reg
*reg
)
1763 case KVM_REG_S390_TODPR
:
1764 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
1765 (u32 __user
*)reg
->addr
);
1767 case KVM_REG_S390_EPOCHDIFF
:
1768 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
1769 (u64 __user
*)reg
->addr
);
1771 case KVM_REG_S390_CPU_TIMER
:
1772 r
= get_user(vcpu
->arch
.sie_block
->cputm
,
1773 (u64 __user
*)reg
->addr
);
1775 case KVM_REG_S390_CLOCK_COMP
:
1776 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
1777 (u64 __user
*)reg
->addr
);
1779 case KVM_REG_S390_PFTOKEN
:
1780 r
= get_user(vcpu
->arch
.pfault_token
,
1781 (u64 __user
*)reg
->addr
);
1782 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1783 kvm_clear_async_pf_completion_queue(vcpu
);
1785 case KVM_REG_S390_PFCOMPARE
:
1786 r
= get_user(vcpu
->arch
.pfault_compare
,
1787 (u64 __user
*)reg
->addr
);
1789 case KVM_REG_S390_PFSELECT
:
1790 r
= get_user(vcpu
->arch
.pfault_select
,
1791 (u64 __user
*)reg
->addr
);
1793 case KVM_REG_S390_PP
:
1794 r
= get_user(vcpu
->arch
.sie_block
->pp
,
1795 (u64 __user
*)reg
->addr
);
1797 case KVM_REG_S390_GBEA
:
1798 r
= get_user(vcpu
->arch
.sie_block
->gbea
,
1799 (u64 __user
*)reg
->addr
);
1808 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
1810 kvm_s390_vcpu_initial_reset(vcpu
);
1814 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1816 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
1820 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1822 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
1826 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1827 struct kvm_sregs
*sregs
)
1829 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
1830 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
1831 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1835 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1836 struct kvm_sregs
*sregs
)
1838 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
1839 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
1843 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1845 /* make sure the new values will be lazily loaded */
1847 if (test_fp_ctl(fpu
->fpc
))
1849 current
->thread
.fpu
.fpc
= fpu
->fpc
;
1851 convert_fp_to_vx(current
->thread
.fpu
.vxrs
, (freg_t
*)fpu
->fprs
);
1853 memcpy(current
->thread
.fpu
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
1857 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1859 /* make sure we have the latest values */
1862 convert_vx_to_fp((freg_t
*)fpu
->fprs
, current
->thread
.fpu
.vxrs
);
1864 memcpy(fpu
->fprs
, current
->thread
.fpu
.fprs
, sizeof(fpu
->fprs
));
1865 fpu
->fpc
= current
->thread
.fpu
.fpc
;
1869 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
1873 if (!is_vcpu_stopped(vcpu
))
1876 vcpu
->run
->psw_mask
= psw
.mask
;
1877 vcpu
->run
->psw_addr
= psw
.addr
;
1882 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1883 struct kvm_translation
*tr
)
1885 return -EINVAL
; /* not implemented yet */
1888 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1889 KVM_GUESTDBG_USE_HW_BP | \
1890 KVM_GUESTDBG_ENABLE)
1892 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
1893 struct kvm_guest_debug
*dbg
)
1897 vcpu
->guest_debug
= 0;
1898 kvm_s390_clear_bp_data(vcpu
);
1900 if (dbg
->control
& ~VALID_GUESTDBG_FLAGS
)
1903 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
1904 vcpu
->guest_debug
= dbg
->control
;
1905 /* enforce guest PER */
1906 atomic_or(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1908 if (dbg
->control
& KVM_GUESTDBG_USE_HW_BP
)
1909 rc
= kvm_s390_import_bp_data(vcpu
, dbg
);
1911 atomic_andnot(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1912 vcpu
->arch
.guestdbg
.last_bp
= 0;
1916 vcpu
->guest_debug
= 0;
1917 kvm_s390_clear_bp_data(vcpu
);
1918 atomic_andnot(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1924 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1925 struct kvm_mp_state
*mp_state
)
1927 /* CHECK_STOP and LOAD are not supported yet */
1928 return is_vcpu_stopped(vcpu
) ? KVM_MP_STATE_STOPPED
:
1929 KVM_MP_STATE_OPERATING
;
1932 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1933 struct kvm_mp_state
*mp_state
)
1937 /* user space knows about this interface - let it control the state */
1938 vcpu
->kvm
->arch
.user_cpu_state_ctrl
= 1;
1940 switch (mp_state
->mp_state
) {
1941 case KVM_MP_STATE_STOPPED
:
1942 kvm_s390_vcpu_stop(vcpu
);
1944 case KVM_MP_STATE_OPERATING
:
1945 kvm_s390_vcpu_start(vcpu
);
1947 case KVM_MP_STATE_LOAD
:
1948 case KVM_MP_STATE_CHECK_STOP
:
1949 /* fall through - CHECK_STOP and LOAD are not supported yet */
1957 static bool ibs_enabled(struct kvm_vcpu
*vcpu
)
1959 return atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_IBS
;
1962 static int kvm_s390_handle_requests(struct kvm_vcpu
*vcpu
)
1965 kvm_s390_vcpu_request_handled(vcpu
);
1966 if (!vcpu
->requests
)
1969 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1970 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1971 * This ensures that the ipte instruction for this request has
1972 * already finished. We might race against a second unmapper that
1973 * wants to set the blocking bit. Lets just retry the request loop.
1975 if (kvm_check_request(KVM_REQ_MMU_RELOAD
, vcpu
)) {
1977 rc
= gmap_ipte_notify(vcpu
->arch
.gmap
,
1978 kvm_s390_get_prefix(vcpu
),
1985 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
1986 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
1990 if (kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
)) {
1991 if (!ibs_enabled(vcpu
)) {
1992 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 1);
1993 atomic_or(CPUSTAT_IBS
,
1994 &vcpu
->arch
.sie_block
->cpuflags
);
1999 if (kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
)) {
2000 if (ibs_enabled(vcpu
)) {
2001 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 0);
2002 atomic_andnot(CPUSTAT_IBS
,
2003 &vcpu
->arch
.sie_block
->cpuflags
);
2008 /* nothing to do, just clear the request */
2009 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
2014 void kvm_s390_set_tod_clock(struct kvm
*kvm
, u64 tod
)
2016 struct kvm_vcpu
*vcpu
;
2019 mutex_lock(&kvm
->lock
);
2021 kvm
->arch
.epoch
= tod
- get_tod_clock();
2022 kvm_s390_vcpu_block_all(kvm
);
2023 kvm_for_each_vcpu(i
, vcpu
, kvm
)
2024 vcpu
->arch
.sie_block
->epoch
= kvm
->arch
.epoch
;
2025 kvm_s390_vcpu_unblock_all(kvm
);
2027 mutex_unlock(&kvm
->lock
);
2031 * kvm_arch_fault_in_page - fault-in guest page if necessary
2032 * @vcpu: The corresponding virtual cpu
2033 * @gpa: Guest physical address
2034 * @writable: Whether the page should be writable or not
2036 * Make sure that a guest page has been faulted-in on the host.
2038 * Return: Zero on success, negative error code otherwise.
2040 long kvm_arch_fault_in_page(struct kvm_vcpu
*vcpu
, gpa_t gpa
, int writable
)
2042 return gmap_fault(vcpu
->arch
.gmap
, gpa
,
2043 writable
? FAULT_FLAG_WRITE
: 0);
2046 static void __kvm_inject_pfault_token(struct kvm_vcpu
*vcpu
, bool start_token
,
2047 unsigned long token
)
2049 struct kvm_s390_interrupt inti
;
2050 struct kvm_s390_irq irq
;
2053 irq
.u
.ext
.ext_params2
= token
;
2054 irq
.type
= KVM_S390_INT_PFAULT_INIT
;
2055 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu
, &irq
));
2057 inti
.type
= KVM_S390_INT_PFAULT_DONE
;
2058 inti
.parm64
= token
;
2059 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu
->kvm
, &inti
));
2063 void kvm_arch_async_page_not_present(struct kvm_vcpu
*vcpu
,
2064 struct kvm_async_pf
*work
)
2066 trace_kvm_s390_pfault_init(vcpu
, work
->arch
.pfault_token
);
2067 __kvm_inject_pfault_token(vcpu
, true, work
->arch
.pfault_token
);
2070 void kvm_arch_async_page_present(struct kvm_vcpu
*vcpu
,
2071 struct kvm_async_pf
*work
)
2073 trace_kvm_s390_pfault_done(vcpu
, work
->arch
.pfault_token
);
2074 __kvm_inject_pfault_token(vcpu
, false, work
->arch
.pfault_token
);
2077 void kvm_arch_async_page_ready(struct kvm_vcpu
*vcpu
,
2078 struct kvm_async_pf
*work
)
2080 /* s390 will always inject the page directly */
2083 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu
*vcpu
)
2086 * s390 will always inject the page directly,
2087 * but we still want check_async_completion to cleanup
2092 static int kvm_arch_setup_async_pf(struct kvm_vcpu
*vcpu
)
2095 struct kvm_arch_async_pf arch
;
2098 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
2100 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& vcpu
->arch
.pfault_select
) !=
2101 vcpu
->arch
.pfault_compare
)
2103 if (psw_extint_disabled(vcpu
))
2105 if (kvm_s390_vcpu_has_irq(vcpu
, 0))
2107 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
2109 if (!vcpu
->arch
.gmap
->pfault_enabled
)
2112 hva
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(current
->thread
.gmap_addr
));
2113 hva
+= current
->thread
.gmap_addr
& ~PAGE_MASK
;
2114 if (read_guest_real(vcpu
, vcpu
->arch
.pfault_token
, &arch
.pfault_token
, 8))
2117 rc
= kvm_setup_async_pf(vcpu
, current
->thread
.gmap_addr
, hva
, &arch
);
2121 static int vcpu_pre_run(struct kvm_vcpu
*vcpu
)
2126 * On s390 notifications for arriving pages will be delivered directly
2127 * to the guest but the house keeping for completed pfaults is
2128 * handled outside the worker.
2130 kvm_check_async_pf_completion(vcpu
);
2132 vcpu
->arch
.sie_block
->gg14
= vcpu
->run
->s
.regs
.gprs
[14];
2133 vcpu
->arch
.sie_block
->gg15
= vcpu
->run
->s
.regs
.gprs
[15];
2138 if (test_cpu_flag(CIF_MCCK_PENDING
))
2141 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2142 rc
= kvm_s390_deliver_pending_interrupts(vcpu
);
2147 rc
= kvm_s390_handle_requests(vcpu
);
2151 if (guestdbg_enabled(vcpu
)) {
2152 kvm_s390_backup_guest_per_regs(vcpu
);
2153 kvm_s390_patch_guest_per_regs(vcpu
);
2156 vcpu
->arch
.sie_block
->icptcode
= 0;
2157 cpuflags
= atomic_read(&vcpu
->arch
.sie_block
->cpuflags
);
2158 VCPU_EVENT(vcpu
, 6, "entering sie flags %x", cpuflags
);
2159 trace_kvm_s390_sie_enter(vcpu
, cpuflags
);
2164 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu
*vcpu
)
2169 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
2170 trace_kvm_s390_sie_fault(vcpu
);
2173 * We want to inject an addressing exception, which is defined as a
2174 * suppressing or terminating exception. However, since we came here
2175 * by a DAT access exception, the PSW still points to the faulting
2176 * instruction since DAT exceptions are nullifying. So we've got
2177 * to look up the current opcode to get the length of the instruction
2178 * to be able to forward the PSW.
2180 rc
= read_guest_instr(vcpu
, &opcode
, 1);
2182 return kvm_s390_inject_prog_cond(vcpu
, rc
);
2183 kvm_s390_forward_psw(vcpu
, insn_length(opcode
));
2185 return kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
2188 static int vcpu_post_run(struct kvm_vcpu
*vcpu
, int exit_reason
)
2190 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
2191 vcpu
->arch
.sie_block
->icptcode
);
2192 trace_kvm_s390_sie_exit(vcpu
, vcpu
->arch
.sie_block
->icptcode
);
2194 if (guestdbg_enabled(vcpu
))
2195 kvm_s390_restore_guest_per_regs(vcpu
);
2197 vcpu
->run
->s
.regs
.gprs
[14] = vcpu
->arch
.sie_block
->gg14
;
2198 vcpu
->run
->s
.regs
.gprs
[15] = vcpu
->arch
.sie_block
->gg15
;
2200 if (vcpu
->arch
.sie_block
->icptcode
> 0) {
2201 int rc
= kvm_handle_sie_intercept(vcpu
);
2203 if (rc
!= -EOPNOTSUPP
)
2205 vcpu
->run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
2206 vcpu
->run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
2207 vcpu
->run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
2208 vcpu
->run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
2210 } else if (exit_reason
!= -EFAULT
) {
2211 vcpu
->stat
.exit_null
++;
2213 } else if (kvm_is_ucontrol(vcpu
->kvm
)) {
2214 vcpu
->run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
2215 vcpu
->run
->s390_ucontrol
.trans_exc_code
=
2216 current
->thread
.gmap_addr
;
2217 vcpu
->run
->s390_ucontrol
.pgm_code
= 0x10;
2219 } else if (current
->thread
.gmap_pfault
) {
2220 trace_kvm_s390_major_guest_pfault(vcpu
);
2221 current
->thread
.gmap_pfault
= 0;
2222 if (kvm_arch_setup_async_pf(vcpu
))
2224 return kvm_arch_fault_in_page(vcpu
, current
->thread
.gmap_addr
, 1);
2226 return vcpu_post_run_fault_in_sie(vcpu
);
2229 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
2231 int rc
, exit_reason
;
2234 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2235 * ning the guest), so that memslots (and other stuff) are protected
2237 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2240 rc
= vcpu_pre_run(vcpu
);
2244 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
2246 * As PF_VCPU will be used in fault handler, between
2247 * guest_enter and guest_exit should be no uaccess.
2249 local_irq_disable();
2250 __kvm_guest_enter();
2252 exit_reason
= sie64a(vcpu
->arch
.sie_block
,
2253 vcpu
->run
->s
.regs
.gprs
);
2254 local_irq_disable();
2257 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2259 rc
= vcpu_post_run(vcpu
, exit_reason
);
2260 } while (!signal_pending(current
) && !guestdbg_exit_pending(vcpu
) && !rc
);
2262 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
2266 static void sync_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2268 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
2269 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
2270 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
)
2271 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
2272 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
2273 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
2274 /* some control register changes require a tlb flush */
2275 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2277 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_ARCH0
) {
2278 vcpu
->arch
.sie_block
->cputm
= kvm_run
->s
.regs
.cputm
;
2279 vcpu
->arch
.sie_block
->ckc
= kvm_run
->s
.regs
.ckc
;
2280 vcpu
->arch
.sie_block
->todpr
= kvm_run
->s
.regs
.todpr
;
2281 vcpu
->arch
.sie_block
->pp
= kvm_run
->s
.regs
.pp
;
2282 vcpu
->arch
.sie_block
->gbea
= kvm_run
->s
.regs
.gbea
;
2284 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PFAULT
) {
2285 vcpu
->arch
.pfault_token
= kvm_run
->s
.regs
.pft
;
2286 vcpu
->arch
.pfault_select
= kvm_run
->s
.regs
.pfs
;
2287 vcpu
->arch
.pfault_compare
= kvm_run
->s
.regs
.pfc
;
2288 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
2289 kvm_clear_async_pf_completion_queue(vcpu
);
2291 kvm_run
->kvm_dirty_regs
= 0;
2294 static void store_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2296 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
2297 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
2298 kvm_run
->s
.regs
.prefix
= kvm_s390_get_prefix(vcpu
);
2299 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
2300 kvm_run
->s
.regs
.cputm
= vcpu
->arch
.sie_block
->cputm
;
2301 kvm_run
->s
.regs
.ckc
= vcpu
->arch
.sie_block
->ckc
;
2302 kvm_run
->s
.regs
.todpr
= vcpu
->arch
.sie_block
->todpr
;
2303 kvm_run
->s
.regs
.pp
= vcpu
->arch
.sie_block
->pp
;
2304 kvm_run
->s
.regs
.gbea
= vcpu
->arch
.sie_block
->gbea
;
2305 kvm_run
->s
.regs
.pft
= vcpu
->arch
.pfault_token
;
2306 kvm_run
->s
.regs
.pfs
= vcpu
->arch
.pfault_select
;
2307 kvm_run
->s
.regs
.pfc
= vcpu
->arch
.pfault_compare
;
2310 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2315 if (guestdbg_exit_pending(vcpu
)) {
2316 kvm_s390_prepare_debug_exit(vcpu
);
2320 if (vcpu
->sigset_active
)
2321 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
2323 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
)) {
2324 kvm_s390_vcpu_start(vcpu
);
2325 } else if (is_vcpu_stopped(vcpu
)) {
2326 pr_err_ratelimited("can't run stopped vcpu %d\n",
2331 sync_regs(vcpu
, kvm_run
);
2334 rc
= __vcpu_run(vcpu
);
2336 if (signal_pending(current
) && !rc
) {
2337 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
2341 if (guestdbg_exit_pending(vcpu
) && !rc
) {
2342 kvm_s390_prepare_debug_exit(vcpu
);
2346 if (rc
== -EREMOTE
) {
2347 /* userspace support is needed, kvm_run has been prepared */
2351 store_regs(vcpu
, kvm_run
);
2353 if (vcpu
->sigset_active
)
2354 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
2356 vcpu
->stat
.exit_userspace
++;
2361 * store status at address
2362 * we use have two special cases:
2363 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2364 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2366 int kvm_s390_store_status_unloaded(struct kvm_vcpu
*vcpu
, unsigned long gpa
)
2368 unsigned char archmode
= 1;
2369 freg_t fprs
[NUM_FPRS
];
2374 px
= kvm_s390_get_prefix(vcpu
);
2375 if (gpa
== KVM_S390_STORE_STATUS_NOADDR
) {
2376 if (write_guest_abs(vcpu
, 163, &archmode
, 1))
2379 } else if (gpa
== KVM_S390_STORE_STATUS_PREFIXED
) {
2380 if (write_guest_real(vcpu
, 163, &archmode
, 1))
2384 gpa
-= __LC_FPREGS_SAVE_AREA
;
2386 /* manually convert vector registers if necessary */
2387 if (MACHINE_HAS_VX
) {
2388 convert_vx_to_fp(fprs
, current
->thread
.fpu
.vxrs
);
2389 rc
= write_guest_abs(vcpu
, gpa
+ __LC_FPREGS_SAVE_AREA
,
2392 rc
= write_guest_abs(vcpu
, gpa
+ __LC_FPREGS_SAVE_AREA
,
2393 vcpu
->run
->s
.regs
.fprs
, 128);
2395 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_GPREGS_SAVE_AREA
,
2396 vcpu
->run
->s
.regs
.gprs
, 128);
2397 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_PSW_SAVE_AREA
,
2398 &vcpu
->arch
.sie_block
->gpsw
, 16);
2399 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_PREFIX_SAVE_AREA
,
2401 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_FP_CREG_SAVE_AREA
,
2402 &vcpu
->run
->s
.regs
.fpc
, 4);
2403 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_TOD_PROGREG_SAVE_AREA
,
2404 &vcpu
->arch
.sie_block
->todpr
, 4);
2405 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CPU_TIMER_SAVE_AREA
,
2406 &vcpu
->arch
.sie_block
->cputm
, 8);
2407 clkcomp
= vcpu
->arch
.sie_block
->ckc
>> 8;
2408 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CLOCK_COMP_SAVE_AREA
,
2410 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_AREGS_SAVE_AREA
,
2411 &vcpu
->run
->s
.regs
.acrs
, 64);
2412 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CREGS_SAVE_AREA
,
2413 &vcpu
->arch
.sie_block
->gcr
, 128);
2414 return rc
? -EFAULT
: 0;
2417 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
2420 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2421 * copying in vcpu load/put. Lets update our copies before we save
2422 * it into the save area
2425 vcpu
->run
->s
.regs
.fpc
= current
->thread
.fpu
.fpc
;
2426 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
2428 return kvm_s390_store_status_unloaded(vcpu
, addr
);
2432 * store additional status at address
2434 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu
*vcpu
,
2437 /* Only bits 0-53 are used for address formation */
2438 if (!(gpa
& ~0x3ff))
2441 return write_guest_abs(vcpu
, gpa
& ~0x3ff,
2442 (void *)&vcpu
->run
->s
.regs
.vrs
, 512);
2445 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
2447 if (!test_kvm_facility(vcpu
->kvm
, 129))
2451 * The guest VXRS are in the host VXRs due to the lazy
2452 * copying in vcpu load/put. We can simply call save_fpu_regs()
2453 * to save the current register state because we are in the
2454 * middle of a load/put cycle.
2456 * Let's update our copies before we save it into the save area.
2460 return kvm_s390_store_adtl_status_unloaded(vcpu
, addr
);
2463 static void __disable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
2465 kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
);
2466 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS
, vcpu
);
2469 static void __disable_ibs_on_all_vcpus(struct kvm
*kvm
)
2472 struct kvm_vcpu
*vcpu
;
2474 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2475 __disable_ibs_on_vcpu(vcpu
);
2479 static void __enable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
2481 kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
);
2482 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS
, vcpu
);
2485 void kvm_s390_vcpu_start(struct kvm_vcpu
*vcpu
)
2487 int i
, online_vcpus
, started_vcpus
= 0;
2489 if (!is_vcpu_stopped(vcpu
))
2492 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 1);
2493 /* Only one cpu at a time may enter/leave the STOPPED state. */
2494 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2495 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2497 for (i
= 0; i
< online_vcpus
; i
++) {
2498 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
]))
2502 if (started_vcpus
== 0) {
2503 /* we're the only active VCPU -> speed it up */
2504 __enable_ibs_on_vcpu(vcpu
);
2505 } else if (started_vcpus
== 1) {
2507 * As we are starting a second VCPU, we have to disable
2508 * the IBS facility on all VCPUs to remove potentially
2509 * oustanding ENABLE requests.
2511 __disable_ibs_on_all_vcpus(vcpu
->kvm
);
2514 atomic_andnot(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2516 * Another VCPU might have used IBS while we were offline.
2517 * Let's play safe and flush the VCPU at startup.
2519 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2520 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2524 void kvm_s390_vcpu_stop(struct kvm_vcpu
*vcpu
)
2526 int i
, online_vcpus
, started_vcpus
= 0;
2527 struct kvm_vcpu
*started_vcpu
= NULL
;
2529 if (is_vcpu_stopped(vcpu
))
2532 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 0);
2533 /* Only one cpu at a time may enter/leave the STOPPED state. */
2534 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2535 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2537 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2538 kvm_s390_clear_stop_irq(vcpu
);
2540 atomic_or(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2541 __disable_ibs_on_vcpu(vcpu
);
2543 for (i
= 0; i
< online_vcpus
; i
++) {
2544 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
])) {
2546 started_vcpu
= vcpu
->kvm
->vcpus
[i
];
2550 if (started_vcpus
== 1) {
2552 * As we only have one VCPU left, we want to enable the
2553 * IBS facility for that VCPU to speed it up.
2555 __enable_ibs_on_vcpu(started_vcpu
);
2558 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2562 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
2563 struct kvm_enable_cap
*cap
)
2571 case KVM_CAP_S390_CSS_SUPPORT
:
2572 if (!vcpu
->kvm
->arch
.css_support
) {
2573 vcpu
->kvm
->arch
.css_support
= 1;
2574 VM_EVENT(vcpu
->kvm
, 3, "%s", "ENABLE: CSS support");
2575 trace_kvm_s390_enable_css(vcpu
->kvm
);
2586 static long kvm_s390_guest_mem_op(struct kvm_vcpu
*vcpu
,
2587 struct kvm_s390_mem_op
*mop
)
2589 void __user
*uaddr
= (void __user
*)mop
->buf
;
2590 void *tmpbuf
= NULL
;
2592 const u64 supported_flags
= KVM_S390_MEMOP_F_INJECT_EXCEPTION
2593 | KVM_S390_MEMOP_F_CHECK_ONLY
;
2595 if (mop
->flags
& ~supported_flags
)
2598 if (mop
->size
> MEM_OP_MAX_SIZE
)
2601 if (!(mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
)) {
2602 tmpbuf
= vmalloc(mop
->size
);
2607 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2610 case KVM_S390_MEMOP_LOGICAL_READ
:
2611 if (mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
) {
2612 r
= check_gva_range(vcpu
, mop
->gaddr
, mop
->ar
,
2613 mop
->size
, GACC_FETCH
);
2616 r
= read_guest(vcpu
, mop
->gaddr
, mop
->ar
, tmpbuf
, mop
->size
);
2618 if (copy_to_user(uaddr
, tmpbuf
, mop
->size
))
2622 case KVM_S390_MEMOP_LOGICAL_WRITE
:
2623 if (mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
) {
2624 r
= check_gva_range(vcpu
, mop
->gaddr
, mop
->ar
,
2625 mop
->size
, GACC_STORE
);
2628 if (copy_from_user(tmpbuf
, uaddr
, mop
->size
)) {
2632 r
= write_guest(vcpu
, mop
->gaddr
, mop
->ar
, tmpbuf
, mop
->size
);
2638 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
2640 if (r
> 0 && (mop
->flags
& KVM_S390_MEMOP_F_INJECT_EXCEPTION
) != 0)
2641 kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
2647 long kvm_arch_vcpu_ioctl(struct file
*filp
,
2648 unsigned int ioctl
, unsigned long arg
)
2650 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2651 void __user
*argp
= (void __user
*)arg
;
2656 case KVM_S390_IRQ
: {
2657 struct kvm_s390_irq s390irq
;
2660 if (copy_from_user(&s390irq
, argp
, sizeof(s390irq
)))
2662 r
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
2665 case KVM_S390_INTERRUPT
: {
2666 struct kvm_s390_interrupt s390int
;
2667 struct kvm_s390_irq s390irq
;
2670 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
2672 if (s390int_to_s390irq(&s390int
, &s390irq
))
2674 r
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
2677 case KVM_S390_STORE_STATUS
:
2678 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2679 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
2680 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
2682 case KVM_S390_SET_INITIAL_PSW
: {
2686 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
2688 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
2691 case KVM_S390_INITIAL_RESET
:
2692 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
2694 case KVM_SET_ONE_REG
:
2695 case KVM_GET_ONE_REG
: {
2696 struct kvm_one_reg reg
;
2698 if (copy_from_user(®
, argp
, sizeof(reg
)))
2700 if (ioctl
== KVM_SET_ONE_REG
)
2701 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
2703 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
2706 #ifdef CONFIG_KVM_S390_UCONTROL
2707 case KVM_S390_UCAS_MAP
: {
2708 struct kvm_s390_ucas_mapping ucasmap
;
2710 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2715 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2720 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
2721 ucasmap
.vcpu_addr
, ucasmap
.length
);
2724 case KVM_S390_UCAS_UNMAP
: {
2725 struct kvm_s390_ucas_mapping ucasmap
;
2727 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2732 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2737 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
2742 case KVM_S390_VCPU_FAULT
: {
2743 r
= gmap_fault(vcpu
->arch
.gmap
, arg
, 0);
2746 case KVM_ENABLE_CAP
:
2748 struct kvm_enable_cap cap
;
2750 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
2752 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
2755 case KVM_S390_MEM_OP
: {
2756 struct kvm_s390_mem_op mem_op
;
2758 if (copy_from_user(&mem_op
, argp
, sizeof(mem_op
)) == 0)
2759 r
= kvm_s390_guest_mem_op(vcpu
, &mem_op
);
2764 case KVM_S390_SET_IRQ_STATE
: {
2765 struct kvm_s390_irq_state irq_state
;
2768 if (copy_from_user(&irq_state
, argp
, sizeof(irq_state
)))
2770 if (irq_state
.len
> VCPU_IRQS_MAX_BUF
||
2771 irq_state
.len
== 0 ||
2772 irq_state
.len
% sizeof(struct kvm_s390_irq
) > 0) {
2776 r
= kvm_s390_set_irq_state(vcpu
,
2777 (void __user
*) irq_state
.buf
,
2781 case KVM_S390_GET_IRQ_STATE
: {
2782 struct kvm_s390_irq_state irq_state
;
2785 if (copy_from_user(&irq_state
, argp
, sizeof(irq_state
)))
2787 if (irq_state
.len
== 0) {
2791 r
= kvm_s390_get_irq_state(vcpu
,
2792 (__u8 __user
*) irq_state
.buf
,
2802 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
2804 #ifdef CONFIG_KVM_S390_UCONTROL
2805 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
2806 && (kvm_is_ucontrol(vcpu
->kvm
))) {
2807 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
2808 get_page(vmf
->page
);
2812 return VM_FAULT_SIGBUS
;
2815 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
2816 unsigned long npages
)
2821 /* Section: memory related */
2822 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
2823 struct kvm_memory_slot
*memslot
,
2824 const struct kvm_userspace_memory_region
*mem
,
2825 enum kvm_mr_change change
)
2827 /* A few sanity checks. We can have memory slots which have to be
2828 located/ended at a segment boundary (1MB). The memory in userland is
2829 ok to be fragmented into various different vmas. It is okay to mmap()
2830 and munmap() stuff in this slot after doing this call at any time */
2832 if (mem
->userspace_addr
& 0xffffful
)
2835 if (mem
->memory_size
& 0xffffful
)
2838 if (mem
->guest_phys_addr
+ mem
->memory_size
> kvm
->arch
.mem_limit
)
2844 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
2845 const struct kvm_userspace_memory_region
*mem
,
2846 const struct kvm_memory_slot
*old
,
2847 const struct kvm_memory_slot
*new,
2848 enum kvm_mr_change change
)
2852 /* If the basics of the memslot do not change, we do not want
2853 * to update the gmap. Every update causes several unnecessary
2854 * segment translation exceptions. This is usually handled just
2855 * fine by the normal fault handler + gmap, but it will also
2856 * cause faults on the prefix page of running guest CPUs.
2858 if (old
->userspace_addr
== mem
->userspace_addr
&&
2859 old
->base_gfn
* PAGE_SIZE
== mem
->guest_phys_addr
&&
2860 old
->npages
* PAGE_SIZE
== mem
->memory_size
)
2863 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
2864 mem
->guest_phys_addr
, mem
->memory_size
);
2866 pr_warn("failed to commit memory region\n");
2870 static int __init
kvm_s390_init(void)
2872 if (!sclp
.has_sief2
) {
2873 pr_info("SIE not available\n");
2877 return kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
2880 static void __exit
kvm_s390_exit(void)
2885 module_init(kvm_s390_init
);
2886 module_exit(kvm_s390_exit
);
2889 * Enable autoloading of the kvm module.
2890 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2891 * since x86 takes a different approach.
2893 #include <linux/miscdevice.h>
2894 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
2895 MODULE_ALIAS("devname:kvm");