2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/lowcore.h>
30 #include <asm/pgtable.h>
32 #include <asm/switch_to.h>
33 #include <asm/facility.h>
38 #define CREATE_TRACE_POINTS
40 #include "trace-s390.h"
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
44 struct kvm_stats_debugfs_item debugfs_entries
[] = {
45 { "userspace_handled", VCPU_STAT(exit_userspace
) },
46 { "exit_null", VCPU_STAT(exit_null
) },
47 { "exit_validity", VCPU_STAT(exit_validity
) },
48 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
49 { "exit_external_request", VCPU_STAT(exit_external_request
) },
50 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
51 { "exit_instruction", VCPU_STAT(exit_instruction
) },
52 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
53 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
54 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
) },
55 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
56 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
57 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
58 { "instruction_stctl", VCPU_STAT(instruction_stctl
) },
59 { "instruction_stctg", VCPU_STAT(instruction_stctg
) },
60 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
61 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
62 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
63 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
64 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
65 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
66 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
67 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
68 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
69 { "instruction_pfmf", VCPU_STAT(instruction_pfmf
) },
70 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
71 { "instruction_spx", VCPU_STAT(instruction_spx
) },
72 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
73 { "instruction_stap", VCPU_STAT(instruction_stap
) },
74 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
75 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock
) },
76 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
77 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
78 { "instruction_essa", VCPU_STAT(instruction_essa
) },
79 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
80 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
81 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
82 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
83 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
84 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
85 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
86 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency
) },
87 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start
) },
88 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
89 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status
) },
90 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status
) },
91 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
92 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
93 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
94 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset
) },
95 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset
) },
96 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown
) },
97 { "diagnose_10", VCPU_STAT(diagnose_10
) },
98 { "diagnose_44", VCPU_STAT(diagnose_44
) },
99 { "diagnose_9c", VCPU_STAT(diagnose_9c
) },
103 unsigned long *vfacilities
;
104 static struct gmap_notifier gmap_notifier
;
106 /* test availability of vfacility */
107 int test_vfacility(unsigned long nr
)
109 return __test_facility(nr
, (void *) vfacilities
);
112 /* Section: not file related */
113 int kvm_arch_hardware_enable(void)
115 /* every s390 is virtualization enabled ;-) */
119 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
);
121 int kvm_arch_hardware_setup(void)
123 gmap_notifier
.notifier_call
= kvm_gmap_notifier
;
124 gmap_register_ipte_notifier(&gmap_notifier
);
128 void kvm_arch_hardware_unsetup(void)
130 gmap_unregister_ipte_notifier(&gmap_notifier
);
133 int kvm_arch_init(void *opaque
)
135 /* Register floating interrupt controller interface. */
136 return kvm_register_device_ops(&kvm_flic_ops
, KVM_DEV_TYPE_FLIC
);
139 /* Section: device related */
140 long kvm_arch_dev_ioctl(struct file
*filp
,
141 unsigned int ioctl
, unsigned long arg
)
143 if (ioctl
== KVM_S390_ENABLE_SIE
)
144 return s390_enable_sie();
148 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
153 case KVM_CAP_S390_PSW
:
154 case KVM_CAP_S390_GMAP
:
155 case KVM_CAP_SYNC_MMU
:
156 #ifdef CONFIG_KVM_S390_UCONTROL
157 case KVM_CAP_S390_UCONTROL
:
159 case KVM_CAP_ASYNC_PF
:
160 case KVM_CAP_SYNC_REGS
:
161 case KVM_CAP_ONE_REG
:
162 case KVM_CAP_ENABLE_CAP
:
163 case KVM_CAP_S390_CSS_SUPPORT
:
165 case KVM_CAP_IOEVENTFD
:
166 case KVM_CAP_DEVICE_CTRL
:
167 case KVM_CAP_ENABLE_CAP_VM
:
168 case KVM_CAP_S390_IRQCHIP
:
169 case KVM_CAP_VM_ATTRIBUTES
:
170 case KVM_CAP_MP_STATE
:
171 case KVM_CAP_S390_USER_SIGP
:
174 case KVM_CAP_NR_VCPUS
:
175 case KVM_CAP_MAX_VCPUS
:
178 case KVM_CAP_NR_MEMSLOTS
:
179 r
= KVM_USER_MEM_SLOTS
;
181 case KVM_CAP_S390_COW
:
182 r
= MACHINE_HAS_ESOP
;
190 static void kvm_s390_sync_dirty_log(struct kvm
*kvm
,
191 struct kvm_memory_slot
*memslot
)
193 gfn_t cur_gfn
, last_gfn
;
194 unsigned long address
;
195 struct gmap
*gmap
= kvm
->arch
.gmap
;
197 down_read(&gmap
->mm
->mmap_sem
);
198 /* Loop over all guest pages */
199 last_gfn
= memslot
->base_gfn
+ memslot
->npages
;
200 for (cur_gfn
= memslot
->base_gfn
; cur_gfn
<= last_gfn
; cur_gfn
++) {
201 address
= gfn_to_hva_memslot(memslot
, cur_gfn
);
203 if (gmap_test_and_clear_dirty(address
, gmap
))
204 mark_page_dirty(kvm
, cur_gfn
);
206 up_read(&gmap
->mm
->mmap_sem
);
209 /* Section: vm related */
211 * Get (and clear) the dirty memory log for a memory slot.
213 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
214 struct kvm_dirty_log
*log
)
218 struct kvm_memory_slot
*memslot
;
221 mutex_lock(&kvm
->slots_lock
);
224 if (log
->slot
>= KVM_USER_MEM_SLOTS
)
227 memslot
= id_to_memslot(kvm
->memslots
, log
->slot
);
229 if (!memslot
->dirty_bitmap
)
232 kvm_s390_sync_dirty_log(kvm
, memslot
);
233 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
237 /* Clear the dirty log */
239 n
= kvm_dirty_bitmap_bytes(memslot
);
240 memset(memslot
->dirty_bitmap
, 0, n
);
244 mutex_unlock(&kvm
->slots_lock
);
248 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
, struct kvm_enable_cap
*cap
)
256 case KVM_CAP_S390_IRQCHIP
:
257 kvm
->arch
.use_irqchip
= 1;
260 case KVM_CAP_S390_USER_SIGP
:
261 kvm
->arch
.user_sigp
= 1;
271 static int kvm_s390_get_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
275 switch (attr
->attr
) {
276 case KVM_S390_VM_MEM_LIMIT_SIZE
:
278 if (put_user(kvm
->arch
.gmap
->asce_end
, (u64 __user
*)attr
->addr
))
288 static int kvm_s390_set_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
292 switch (attr
->attr
) {
293 case KVM_S390_VM_MEM_ENABLE_CMMA
:
295 mutex_lock(&kvm
->lock
);
296 if (atomic_read(&kvm
->online_vcpus
) == 0) {
297 kvm
->arch
.use_cmma
= 1;
300 mutex_unlock(&kvm
->lock
);
302 case KVM_S390_VM_MEM_CLR_CMMA
:
303 mutex_lock(&kvm
->lock
);
304 idx
= srcu_read_lock(&kvm
->srcu
);
305 s390_reset_cmma(kvm
->arch
.gmap
->mm
);
306 srcu_read_unlock(&kvm
->srcu
, idx
);
307 mutex_unlock(&kvm
->lock
);
310 case KVM_S390_VM_MEM_LIMIT_SIZE
: {
311 unsigned long new_limit
;
313 if (kvm_is_ucontrol(kvm
))
316 if (get_user(new_limit
, (u64 __user
*)attr
->addr
))
319 if (new_limit
> kvm
->arch
.gmap
->asce_end
)
323 mutex_lock(&kvm
->lock
);
324 if (atomic_read(&kvm
->online_vcpus
) == 0) {
325 /* gmap_alloc will round the limit up */
326 struct gmap
*new = gmap_alloc(current
->mm
, new_limit
);
331 gmap_free(kvm
->arch
.gmap
);
333 kvm
->arch
.gmap
= new;
337 mutex_unlock(&kvm
->lock
);
347 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
);
349 static int kvm_s390_vm_set_crypto(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
351 struct kvm_vcpu
*vcpu
;
354 if (!test_vfacility(76))
357 mutex_lock(&kvm
->lock
);
358 switch (attr
->attr
) {
359 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
361 kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
362 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
363 kvm
->arch
.crypto
.aes_kw
= 1;
365 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
367 kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
368 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
369 kvm
->arch
.crypto
.dea_kw
= 1;
371 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
372 kvm
->arch
.crypto
.aes_kw
= 0;
373 memset(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
, 0,
374 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
376 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
377 kvm
->arch
.crypto
.dea_kw
= 0;
378 memset(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
, 0,
379 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
382 mutex_unlock(&kvm
->lock
);
386 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
387 kvm_s390_vcpu_crypto_setup(vcpu
);
390 mutex_unlock(&kvm
->lock
);
394 static int kvm_s390_set_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
398 if (copy_from_user(>od_high
, (void __user
*)attr
->addr
,
408 static int kvm_s390_set_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
410 struct kvm_vcpu
*cur_vcpu
;
411 unsigned int vcpu_idx
;
415 if (copy_from_user(>od
, (void __user
*)attr
->addr
, sizeof(gtod
)))
418 r
= store_tod_clock(&host_tod
);
422 mutex_lock(&kvm
->lock
);
423 kvm
->arch
.epoch
= gtod
- host_tod
;
424 kvm_for_each_vcpu(vcpu_idx
, cur_vcpu
, kvm
) {
425 cur_vcpu
->arch
.sie_block
->epoch
= kvm
->arch
.epoch
;
428 mutex_unlock(&kvm
->lock
);
432 static int kvm_s390_set_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
439 switch (attr
->attr
) {
440 case KVM_S390_VM_TOD_HIGH
:
441 ret
= kvm_s390_set_tod_high(kvm
, attr
);
443 case KVM_S390_VM_TOD_LOW
:
444 ret
= kvm_s390_set_tod_low(kvm
, attr
);
453 static int kvm_s390_get_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
457 if (copy_to_user((void __user
*)attr
->addr
, >od_high
,
464 static int kvm_s390_get_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
469 r
= store_tod_clock(&host_tod
);
473 gtod
= host_tod
+ kvm
->arch
.epoch
;
474 if (copy_to_user((void __user
*)attr
->addr
, >od
, sizeof(gtod
)))
480 static int kvm_s390_get_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
487 switch (attr
->attr
) {
488 case KVM_S390_VM_TOD_HIGH
:
489 ret
= kvm_s390_get_tod_high(kvm
, attr
);
491 case KVM_S390_VM_TOD_LOW
:
492 ret
= kvm_s390_get_tod_low(kvm
, attr
);
501 static int kvm_s390_vm_set_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
505 switch (attr
->group
) {
506 case KVM_S390_VM_MEM_CTRL
:
507 ret
= kvm_s390_set_mem_control(kvm
, attr
);
509 case KVM_S390_VM_TOD
:
510 ret
= kvm_s390_set_tod(kvm
, attr
);
512 case KVM_S390_VM_CRYPTO
:
513 ret
= kvm_s390_vm_set_crypto(kvm
, attr
);
523 static int kvm_s390_vm_get_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
527 switch (attr
->group
) {
528 case KVM_S390_VM_MEM_CTRL
:
529 ret
= kvm_s390_get_mem_control(kvm
, attr
);
531 case KVM_S390_VM_TOD
:
532 ret
= kvm_s390_get_tod(kvm
, attr
);
542 static int kvm_s390_vm_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
546 switch (attr
->group
) {
547 case KVM_S390_VM_MEM_CTRL
:
548 switch (attr
->attr
) {
549 case KVM_S390_VM_MEM_ENABLE_CMMA
:
550 case KVM_S390_VM_MEM_CLR_CMMA
:
551 case KVM_S390_VM_MEM_LIMIT_SIZE
:
559 case KVM_S390_VM_TOD
:
560 switch (attr
->attr
) {
561 case KVM_S390_VM_TOD_LOW
:
562 case KVM_S390_VM_TOD_HIGH
:
570 case KVM_S390_VM_CRYPTO
:
571 switch (attr
->attr
) {
572 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
573 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
574 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
575 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
591 long kvm_arch_vm_ioctl(struct file
*filp
,
592 unsigned int ioctl
, unsigned long arg
)
594 struct kvm
*kvm
= filp
->private_data
;
595 void __user
*argp
= (void __user
*)arg
;
596 struct kvm_device_attr attr
;
600 case KVM_S390_INTERRUPT
: {
601 struct kvm_s390_interrupt s390int
;
604 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
606 r
= kvm_s390_inject_vm(kvm
, &s390int
);
609 case KVM_ENABLE_CAP
: {
610 struct kvm_enable_cap cap
;
612 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
614 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
617 case KVM_CREATE_IRQCHIP
: {
618 struct kvm_irq_routing_entry routing
;
621 if (kvm
->arch
.use_irqchip
) {
622 /* Set up dummy routing. */
623 memset(&routing
, 0, sizeof(routing
));
624 kvm_set_irq_routing(kvm
, &routing
, 0, 0);
629 case KVM_SET_DEVICE_ATTR
: {
631 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
633 r
= kvm_s390_vm_set_attr(kvm
, &attr
);
636 case KVM_GET_DEVICE_ATTR
: {
638 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
640 r
= kvm_s390_vm_get_attr(kvm
, &attr
);
643 case KVM_HAS_DEVICE_ATTR
: {
645 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
647 r
= kvm_s390_vm_has_attr(kvm
, &attr
);
657 static int kvm_s390_crypto_init(struct kvm
*kvm
)
659 if (!test_vfacility(76))
662 kvm
->arch
.crypto
.crycb
= kzalloc(sizeof(*kvm
->arch
.crypto
.crycb
),
663 GFP_KERNEL
| GFP_DMA
);
664 if (!kvm
->arch
.crypto
.crycb
)
667 kvm
->arch
.crypto
.crycbd
= (__u32
) (unsigned long) kvm
->arch
.crypto
.crycb
|
670 /* Disable AES/DEA protected key functions by default */
671 kvm
->arch
.crypto
.aes_kw
= 0;
672 kvm
->arch
.crypto
.dea_kw
= 0;
677 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
681 static unsigned long sca_offset
;
684 #ifdef CONFIG_KVM_S390_UCONTROL
685 if (type
& ~KVM_VM_S390_UCONTROL
)
687 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
694 rc
= s390_enable_sie();
700 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
703 spin_lock(&kvm_lock
);
704 sca_offset
= (sca_offset
+ 16) & 0x7f0;
705 kvm
->arch
.sca
= (struct sca_block
*) ((char *) kvm
->arch
.sca
+ sca_offset
);
706 spin_unlock(&kvm_lock
);
708 sprintf(debug_name
, "kvm-%u", current
->pid
);
710 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
714 if (kvm_s390_crypto_init(kvm
) < 0)
717 spin_lock_init(&kvm
->arch
.float_int
.lock
);
718 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
719 init_waitqueue_head(&kvm
->arch
.ipte_wq
);
720 mutex_init(&kvm
->arch
.ipte_mutex
);
722 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
723 VM_EVENT(kvm
, 3, "%s", "vm created");
725 if (type
& KVM_VM_S390_UCONTROL
) {
726 kvm
->arch
.gmap
= NULL
;
728 kvm
->arch
.gmap
= gmap_alloc(current
->mm
, (1UL << 44) - 1);
731 kvm
->arch
.gmap
->private = kvm
;
732 kvm
->arch
.gmap
->pfault_enabled
= 0;
735 kvm
->arch
.css_support
= 0;
736 kvm
->arch
.use_irqchip
= 0;
739 spin_lock_init(&kvm
->arch
.start_stop_lock
);
743 kfree(kvm
->arch
.crypto
.crycb
);
745 debug_unregister(kvm
->arch
.dbf
);
747 free_page((unsigned long)(kvm
->arch
.sca
));
752 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
754 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
755 trace_kvm_s390_destroy_vcpu(vcpu
->vcpu_id
);
756 kvm_s390_clear_local_irqs(vcpu
);
757 kvm_clear_async_pf_completion_queue(vcpu
);
758 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
759 clear_bit(63 - vcpu
->vcpu_id
,
760 (unsigned long *) &vcpu
->kvm
->arch
.sca
->mcn
);
761 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
762 (__u64
) vcpu
->arch
.sie_block
)
763 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
767 if (kvm_is_ucontrol(vcpu
->kvm
))
768 gmap_free(vcpu
->arch
.gmap
);
770 if (kvm_s390_cmma_enabled(vcpu
->kvm
))
771 kvm_s390_vcpu_unsetup_cmma(vcpu
);
772 free_page((unsigned long)(vcpu
->arch
.sie_block
));
774 kvm_vcpu_uninit(vcpu
);
775 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
778 static void kvm_free_vcpus(struct kvm
*kvm
)
781 struct kvm_vcpu
*vcpu
;
783 kvm_for_each_vcpu(i
, vcpu
, kvm
)
784 kvm_arch_vcpu_destroy(vcpu
);
786 mutex_lock(&kvm
->lock
);
787 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
788 kvm
->vcpus
[i
] = NULL
;
790 atomic_set(&kvm
->online_vcpus
, 0);
791 mutex_unlock(&kvm
->lock
);
794 void kvm_arch_destroy_vm(struct kvm
*kvm
)
797 free_page((unsigned long)(kvm
->arch
.sca
));
798 debug_unregister(kvm
->arch
.dbf
);
799 kfree(kvm
->arch
.crypto
.crycb
);
800 if (!kvm_is_ucontrol(kvm
))
801 gmap_free(kvm
->arch
.gmap
);
802 kvm_s390_destroy_adapters(kvm
);
803 kvm_s390_clear_float_irqs(kvm
);
806 /* Section: vcpu related */
807 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu
*vcpu
)
809 vcpu
->arch
.gmap
= gmap_alloc(current
->mm
, -1UL);
810 if (!vcpu
->arch
.gmap
)
812 vcpu
->arch
.gmap
->private = vcpu
->kvm
;
817 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
819 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
820 kvm_clear_async_pf_completion_queue(vcpu
);
821 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
828 if (kvm_is_ucontrol(vcpu
->kvm
))
829 return __kvm_ucontrol_vcpu_init(vcpu
);
834 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
836 save_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
837 save_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
838 save_access_regs(vcpu
->arch
.host_acrs
);
839 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
840 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
841 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
842 gmap_enable(vcpu
->arch
.gmap
);
843 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
846 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
848 atomic_clear_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
849 gmap_disable(vcpu
->arch
.gmap
);
850 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
851 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
852 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
853 restore_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
854 restore_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
855 restore_access_regs(vcpu
->arch
.host_acrs
);
858 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
860 /* this equals initial cpu reset in pop, but we don't switch to ESA */
861 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
862 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
863 kvm_s390_set_prefix(vcpu
, 0);
864 vcpu
->arch
.sie_block
->cputm
= 0UL;
865 vcpu
->arch
.sie_block
->ckc
= 0UL;
866 vcpu
->arch
.sie_block
->todpr
= 0;
867 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
868 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
869 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
870 vcpu
->arch
.guest_fpregs
.fpc
= 0;
871 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
872 vcpu
->arch
.sie_block
->gbea
= 1;
873 vcpu
->arch
.sie_block
->pp
= 0;
874 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
875 kvm_clear_async_pf_completion_queue(vcpu
);
876 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
))
877 kvm_s390_vcpu_stop(vcpu
);
878 kvm_s390_clear_local_irqs(vcpu
);
881 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
883 mutex_lock(&vcpu
->kvm
->lock
);
884 vcpu
->arch
.sie_block
->epoch
= vcpu
->kvm
->arch
.epoch
;
885 mutex_unlock(&vcpu
->kvm
->lock
);
886 if (!kvm_is_ucontrol(vcpu
->kvm
))
887 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
890 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
)
892 if (!test_vfacility(76))
895 vcpu
->arch
.sie_block
->ecb3
&= ~(ECB3_AES
| ECB3_DEA
);
897 if (vcpu
->kvm
->arch
.crypto
.aes_kw
)
898 vcpu
->arch
.sie_block
->ecb3
|= ECB3_AES
;
899 if (vcpu
->kvm
->arch
.crypto
.dea_kw
)
900 vcpu
->arch
.sie_block
->ecb3
|= ECB3_DEA
;
902 vcpu
->arch
.sie_block
->crycbd
= vcpu
->kvm
->arch
.crypto
.crycbd
;
905 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu
*vcpu
)
907 free_page(vcpu
->arch
.sie_block
->cbrlo
);
908 vcpu
->arch
.sie_block
->cbrlo
= 0;
911 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu
*vcpu
)
913 vcpu
->arch
.sie_block
->cbrlo
= get_zeroed_page(GFP_KERNEL
);
914 if (!vcpu
->arch
.sie_block
->cbrlo
)
917 vcpu
->arch
.sie_block
->ecb2
|= 0x80;
918 vcpu
->arch
.sie_block
->ecb2
&= ~0x08;
922 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
926 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
930 vcpu
->arch
.sie_block
->ecb
= 6;
931 if (test_vfacility(50) && test_vfacility(73))
932 vcpu
->arch
.sie_block
->ecb
|= 0x10;
934 vcpu
->arch
.sie_block
->ecb2
= 8;
935 vcpu
->arch
.sie_block
->eca
= 0xC1002000U
;
937 vcpu
->arch
.sie_block
->eca
|= 1;
938 if (sclp_has_sigpif())
939 vcpu
->arch
.sie_block
->eca
|= 0x10000000U
;
940 vcpu
->arch
.sie_block
->fac
= (int) (long) vfacilities
;
941 vcpu
->arch
.sie_block
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
|
944 if (kvm_s390_cmma_enabled(vcpu
->kvm
)) {
945 rc
= kvm_s390_vcpu_setup_cmma(vcpu
);
949 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
950 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
951 get_cpu_id(&vcpu
->arch
.cpu_id
);
952 vcpu
->arch
.cpu_id
.version
= 0xff;
954 kvm_s390_vcpu_crypto_setup(vcpu
);
959 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
962 struct kvm_vcpu
*vcpu
;
963 struct sie_page
*sie_page
;
966 if (id
>= KVM_MAX_VCPUS
)
971 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
975 sie_page
= (struct sie_page
*) get_zeroed_page(GFP_KERNEL
);
979 vcpu
->arch
.sie_block
= &sie_page
->sie_block
;
980 vcpu
->arch
.sie_block
->itdba
= (unsigned long) &sie_page
->itdb
;
982 vcpu
->arch
.sie_block
->icpua
= id
;
983 if (!kvm_is_ucontrol(kvm
)) {
984 if (!kvm
->arch
.sca
) {
988 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
989 kvm
->arch
.sca
->cpu
[id
].sda
=
990 (__u64
) vcpu
->arch
.sie_block
;
991 vcpu
->arch
.sie_block
->scaoh
=
992 (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
993 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
994 set_bit(63 - id
, (unsigned long *) &kvm
->arch
.sca
->mcn
);
997 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
998 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
999 vcpu
->arch
.local_int
.wq
= &vcpu
->wq
;
1000 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
1002 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
1004 goto out_free_sie_block
;
1005 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
1006 vcpu
->arch
.sie_block
);
1007 trace_kvm_s390_create_vcpu(id
, vcpu
, vcpu
->arch
.sie_block
);
1011 free_page((unsigned long)(vcpu
->arch
.sie_block
));
1013 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1018 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
1020 return kvm_s390_vcpu_has_irq(vcpu
, 0);
1023 void s390_vcpu_block(struct kvm_vcpu
*vcpu
)
1025 atomic_set_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1028 void s390_vcpu_unblock(struct kvm_vcpu
*vcpu
)
1030 atomic_clear_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1034 * Kick a guest cpu out of SIE and wait until SIE is not running.
1035 * If the CPU is not running (e.g. waiting as idle) the function will
1036 * return immediately. */
1037 void exit_sie(struct kvm_vcpu
*vcpu
)
1039 atomic_set_mask(CPUSTAT_STOP_INT
, &vcpu
->arch
.sie_block
->cpuflags
);
1040 while (vcpu
->arch
.sie_block
->prog0c
& PROG_IN_SIE
)
1044 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
1045 void exit_sie_sync(struct kvm_vcpu
*vcpu
)
1047 s390_vcpu_block(vcpu
);
1051 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
)
1054 struct kvm
*kvm
= gmap
->private;
1055 struct kvm_vcpu
*vcpu
;
1057 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1058 /* match against both prefix pages */
1059 if (kvm_s390_get_prefix(vcpu
) == (address
& ~0x1000UL
)) {
1060 VCPU_EVENT(vcpu
, 2, "gmap notifier for %lx", address
);
1061 kvm_make_request(KVM_REQ_MMU_RELOAD
, vcpu
);
1062 exit_sie_sync(vcpu
);
1067 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
1069 /* kvm common code refers to this, but never calls it */
1074 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
1075 struct kvm_one_reg
*reg
)
1080 case KVM_REG_S390_TODPR
:
1081 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
1082 (u32 __user
*)reg
->addr
);
1084 case KVM_REG_S390_EPOCHDIFF
:
1085 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
1086 (u64 __user
*)reg
->addr
);
1088 case KVM_REG_S390_CPU_TIMER
:
1089 r
= put_user(vcpu
->arch
.sie_block
->cputm
,
1090 (u64 __user
*)reg
->addr
);
1092 case KVM_REG_S390_CLOCK_COMP
:
1093 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
1094 (u64 __user
*)reg
->addr
);
1096 case KVM_REG_S390_PFTOKEN
:
1097 r
= put_user(vcpu
->arch
.pfault_token
,
1098 (u64 __user
*)reg
->addr
);
1100 case KVM_REG_S390_PFCOMPARE
:
1101 r
= put_user(vcpu
->arch
.pfault_compare
,
1102 (u64 __user
*)reg
->addr
);
1104 case KVM_REG_S390_PFSELECT
:
1105 r
= put_user(vcpu
->arch
.pfault_select
,
1106 (u64 __user
*)reg
->addr
);
1108 case KVM_REG_S390_PP
:
1109 r
= put_user(vcpu
->arch
.sie_block
->pp
,
1110 (u64 __user
*)reg
->addr
);
1112 case KVM_REG_S390_GBEA
:
1113 r
= put_user(vcpu
->arch
.sie_block
->gbea
,
1114 (u64 __user
*)reg
->addr
);
1123 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
1124 struct kvm_one_reg
*reg
)
1129 case KVM_REG_S390_TODPR
:
1130 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
1131 (u32 __user
*)reg
->addr
);
1133 case KVM_REG_S390_EPOCHDIFF
:
1134 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
1135 (u64 __user
*)reg
->addr
);
1137 case KVM_REG_S390_CPU_TIMER
:
1138 r
= get_user(vcpu
->arch
.sie_block
->cputm
,
1139 (u64 __user
*)reg
->addr
);
1141 case KVM_REG_S390_CLOCK_COMP
:
1142 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
1143 (u64 __user
*)reg
->addr
);
1145 case KVM_REG_S390_PFTOKEN
:
1146 r
= get_user(vcpu
->arch
.pfault_token
,
1147 (u64 __user
*)reg
->addr
);
1148 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1149 kvm_clear_async_pf_completion_queue(vcpu
);
1151 case KVM_REG_S390_PFCOMPARE
:
1152 r
= get_user(vcpu
->arch
.pfault_compare
,
1153 (u64 __user
*)reg
->addr
);
1155 case KVM_REG_S390_PFSELECT
:
1156 r
= get_user(vcpu
->arch
.pfault_select
,
1157 (u64 __user
*)reg
->addr
);
1159 case KVM_REG_S390_PP
:
1160 r
= get_user(vcpu
->arch
.sie_block
->pp
,
1161 (u64 __user
*)reg
->addr
);
1163 case KVM_REG_S390_GBEA
:
1164 r
= get_user(vcpu
->arch
.sie_block
->gbea
,
1165 (u64 __user
*)reg
->addr
);
1174 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
1176 kvm_s390_vcpu_initial_reset(vcpu
);
1180 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1182 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
1186 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1188 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
1192 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1193 struct kvm_sregs
*sregs
)
1195 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
1196 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
1197 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1201 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1202 struct kvm_sregs
*sregs
)
1204 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
1205 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
1209 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1211 if (test_fp_ctl(fpu
->fpc
))
1213 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
1214 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
;
1215 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1216 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1220 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1222 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
1223 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
1227 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
1231 if (!is_vcpu_stopped(vcpu
))
1234 vcpu
->run
->psw_mask
= psw
.mask
;
1235 vcpu
->run
->psw_addr
= psw
.addr
;
1240 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1241 struct kvm_translation
*tr
)
1243 return -EINVAL
; /* not implemented yet */
1246 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1247 KVM_GUESTDBG_USE_HW_BP | \
1248 KVM_GUESTDBG_ENABLE)
1250 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
1251 struct kvm_guest_debug
*dbg
)
1255 vcpu
->guest_debug
= 0;
1256 kvm_s390_clear_bp_data(vcpu
);
1258 if (dbg
->control
& ~VALID_GUESTDBG_FLAGS
)
1261 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
1262 vcpu
->guest_debug
= dbg
->control
;
1263 /* enforce guest PER */
1264 atomic_set_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1266 if (dbg
->control
& KVM_GUESTDBG_USE_HW_BP
)
1267 rc
= kvm_s390_import_bp_data(vcpu
, dbg
);
1269 atomic_clear_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1270 vcpu
->arch
.guestdbg
.last_bp
= 0;
1274 vcpu
->guest_debug
= 0;
1275 kvm_s390_clear_bp_data(vcpu
);
1276 atomic_clear_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1282 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1283 struct kvm_mp_state
*mp_state
)
1285 /* CHECK_STOP and LOAD are not supported yet */
1286 return is_vcpu_stopped(vcpu
) ? KVM_MP_STATE_STOPPED
:
1287 KVM_MP_STATE_OPERATING
;
1290 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1291 struct kvm_mp_state
*mp_state
)
1295 /* user space knows about this interface - let it control the state */
1296 vcpu
->kvm
->arch
.user_cpu_state_ctrl
= 1;
1298 switch (mp_state
->mp_state
) {
1299 case KVM_MP_STATE_STOPPED
:
1300 kvm_s390_vcpu_stop(vcpu
);
1302 case KVM_MP_STATE_OPERATING
:
1303 kvm_s390_vcpu_start(vcpu
);
1305 case KVM_MP_STATE_LOAD
:
1306 case KVM_MP_STATE_CHECK_STOP
:
1307 /* fall through - CHECK_STOP and LOAD are not supported yet */
1315 bool kvm_s390_cmma_enabled(struct kvm
*kvm
)
1317 if (!MACHINE_IS_LPAR
)
1319 /* only enable for z10 and later */
1320 if (!MACHINE_HAS_EDAT1
)
1322 if (!kvm
->arch
.use_cmma
)
1327 static bool ibs_enabled(struct kvm_vcpu
*vcpu
)
1329 return atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_IBS
;
1332 static int kvm_s390_handle_requests(struct kvm_vcpu
*vcpu
)
1335 s390_vcpu_unblock(vcpu
);
1337 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1338 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1339 * This ensures that the ipte instruction for this request has
1340 * already finished. We might race against a second unmapper that
1341 * wants to set the blocking bit. Lets just retry the request loop.
1343 if (kvm_check_request(KVM_REQ_MMU_RELOAD
, vcpu
)) {
1345 rc
= gmap_ipte_notify(vcpu
->arch
.gmap
,
1346 kvm_s390_get_prefix(vcpu
),
1353 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
1354 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
1358 if (kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
)) {
1359 if (!ibs_enabled(vcpu
)) {
1360 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 1);
1361 atomic_set_mask(CPUSTAT_IBS
,
1362 &vcpu
->arch
.sie_block
->cpuflags
);
1367 if (kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
)) {
1368 if (ibs_enabled(vcpu
)) {
1369 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 0);
1370 atomic_clear_mask(CPUSTAT_IBS
,
1371 &vcpu
->arch
.sie_block
->cpuflags
);
1376 /* nothing to do, just clear the request */
1377 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
1383 * kvm_arch_fault_in_page - fault-in guest page if necessary
1384 * @vcpu: The corresponding virtual cpu
1385 * @gpa: Guest physical address
1386 * @writable: Whether the page should be writable or not
1388 * Make sure that a guest page has been faulted-in on the host.
1390 * Return: Zero on success, negative error code otherwise.
1392 long kvm_arch_fault_in_page(struct kvm_vcpu
*vcpu
, gpa_t gpa
, int writable
)
1394 return gmap_fault(vcpu
->arch
.gmap
, gpa
,
1395 writable
? FAULT_FLAG_WRITE
: 0);
1398 static void __kvm_inject_pfault_token(struct kvm_vcpu
*vcpu
, bool start_token
,
1399 unsigned long token
)
1401 struct kvm_s390_interrupt inti
;
1402 struct kvm_s390_irq irq
;
1405 irq
.u
.ext
.ext_params2
= token
;
1406 irq
.type
= KVM_S390_INT_PFAULT_INIT
;
1407 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu
, &irq
));
1409 inti
.type
= KVM_S390_INT_PFAULT_DONE
;
1410 inti
.parm64
= token
;
1411 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu
->kvm
, &inti
));
1415 void kvm_arch_async_page_not_present(struct kvm_vcpu
*vcpu
,
1416 struct kvm_async_pf
*work
)
1418 trace_kvm_s390_pfault_init(vcpu
, work
->arch
.pfault_token
);
1419 __kvm_inject_pfault_token(vcpu
, true, work
->arch
.pfault_token
);
1422 void kvm_arch_async_page_present(struct kvm_vcpu
*vcpu
,
1423 struct kvm_async_pf
*work
)
1425 trace_kvm_s390_pfault_done(vcpu
, work
->arch
.pfault_token
);
1426 __kvm_inject_pfault_token(vcpu
, false, work
->arch
.pfault_token
);
1429 void kvm_arch_async_page_ready(struct kvm_vcpu
*vcpu
,
1430 struct kvm_async_pf
*work
)
1432 /* s390 will always inject the page directly */
1435 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu
*vcpu
)
1438 * s390 will always inject the page directly,
1439 * but we still want check_async_completion to cleanup
1444 static int kvm_arch_setup_async_pf(struct kvm_vcpu
*vcpu
)
1447 struct kvm_arch_async_pf arch
;
1450 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1452 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& vcpu
->arch
.pfault_select
) !=
1453 vcpu
->arch
.pfault_compare
)
1455 if (psw_extint_disabled(vcpu
))
1457 if (kvm_s390_vcpu_has_irq(vcpu
, 0))
1459 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
1461 if (!vcpu
->arch
.gmap
->pfault_enabled
)
1464 hva
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(current
->thread
.gmap_addr
));
1465 hva
+= current
->thread
.gmap_addr
& ~PAGE_MASK
;
1466 if (read_guest_real(vcpu
, vcpu
->arch
.pfault_token
, &arch
.pfault_token
, 8))
1469 rc
= kvm_setup_async_pf(vcpu
, current
->thread
.gmap_addr
, hva
, &arch
);
1473 static int vcpu_pre_run(struct kvm_vcpu
*vcpu
)
1478 * On s390 notifications for arriving pages will be delivered directly
1479 * to the guest but the house keeping for completed pfaults is
1480 * handled outside the worker.
1482 kvm_check_async_pf_completion(vcpu
);
1484 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->run
->s
.regs
.gprs
[14], 16);
1489 if (test_cpu_flag(CIF_MCCK_PENDING
))
1492 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1493 rc
= kvm_s390_deliver_pending_interrupts(vcpu
);
1498 rc
= kvm_s390_handle_requests(vcpu
);
1502 if (guestdbg_enabled(vcpu
)) {
1503 kvm_s390_backup_guest_per_regs(vcpu
);
1504 kvm_s390_patch_guest_per_regs(vcpu
);
1507 vcpu
->arch
.sie_block
->icptcode
= 0;
1508 cpuflags
= atomic_read(&vcpu
->arch
.sie_block
->cpuflags
);
1509 VCPU_EVENT(vcpu
, 6, "entering sie flags %x", cpuflags
);
1510 trace_kvm_s390_sie_enter(vcpu
, cpuflags
);
1515 static int vcpu_post_run(struct kvm_vcpu
*vcpu
, int exit_reason
)
1519 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
1520 vcpu
->arch
.sie_block
->icptcode
);
1521 trace_kvm_s390_sie_exit(vcpu
, vcpu
->arch
.sie_block
->icptcode
);
1523 if (guestdbg_enabled(vcpu
))
1524 kvm_s390_restore_guest_per_regs(vcpu
);
1526 if (exit_reason
>= 0) {
1528 } else if (kvm_is_ucontrol(vcpu
->kvm
)) {
1529 vcpu
->run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
1530 vcpu
->run
->s390_ucontrol
.trans_exc_code
=
1531 current
->thread
.gmap_addr
;
1532 vcpu
->run
->s390_ucontrol
.pgm_code
= 0x10;
1535 } else if (current
->thread
.gmap_pfault
) {
1536 trace_kvm_s390_major_guest_pfault(vcpu
);
1537 current
->thread
.gmap_pfault
= 0;
1538 if (kvm_arch_setup_async_pf(vcpu
)) {
1541 gpa_t gpa
= current
->thread
.gmap_addr
;
1542 rc
= kvm_arch_fault_in_page(vcpu
, gpa
, 1);
1547 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
1548 trace_kvm_s390_sie_fault(vcpu
);
1549 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
1552 memcpy(&vcpu
->run
->s
.regs
.gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
1555 if (kvm_is_ucontrol(vcpu
->kvm
))
1556 /* Don't exit for host interrupts. */
1557 rc
= vcpu
->arch
.sie_block
->icptcode
? -EOPNOTSUPP
: 0;
1559 rc
= kvm_handle_sie_intercept(vcpu
);
1565 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
1567 int rc
, exit_reason
;
1570 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1571 * ning the guest), so that memslots (and other stuff) are protected
1573 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1576 rc
= vcpu_pre_run(vcpu
);
1580 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1582 * As PF_VCPU will be used in fault handler, between
1583 * guest_enter and guest_exit should be no uaccess.
1588 exit_reason
= sie64a(vcpu
->arch
.sie_block
,
1589 vcpu
->run
->s
.regs
.gprs
);
1591 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1593 rc
= vcpu_post_run(vcpu
, exit_reason
);
1594 } while (!signal_pending(current
) && !guestdbg_exit_pending(vcpu
) && !rc
);
1596 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1600 static void sync_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1602 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
1603 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
1604 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
)
1605 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
1606 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
1607 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
1608 /* some control register changes require a tlb flush */
1609 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1611 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_ARCH0
) {
1612 vcpu
->arch
.sie_block
->cputm
= kvm_run
->s
.regs
.cputm
;
1613 vcpu
->arch
.sie_block
->ckc
= kvm_run
->s
.regs
.ckc
;
1614 vcpu
->arch
.sie_block
->todpr
= kvm_run
->s
.regs
.todpr
;
1615 vcpu
->arch
.sie_block
->pp
= kvm_run
->s
.regs
.pp
;
1616 vcpu
->arch
.sie_block
->gbea
= kvm_run
->s
.regs
.gbea
;
1618 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PFAULT
) {
1619 vcpu
->arch
.pfault_token
= kvm_run
->s
.regs
.pft
;
1620 vcpu
->arch
.pfault_select
= kvm_run
->s
.regs
.pfs
;
1621 vcpu
->arch
.pfault_compare
= kvm_run
->s
.regs
.pfc
;
1622 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1623 kvm_clear_async_pf_completion_queue(vcpu
);
1625 kvm_run
->kvm_dirty_regs
= 0;
1628 static void store_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1630 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
1631 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
1632 kvm_run
->s
.regs
.prefix
= kvm_s390_get_prefix(vcpu
);
1633 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
1634 kvm_run
->s
.regs
.cputm
= vcpu
->arch
.sie_block
->cputm
;
1635 kvm_run
->s
.regs
.ckc
= vcpu
->arch
.sie_block
->ckc
;
1636 kvm_run
->s
.regs
.todpr
= vcpu
->arch
.sie_block
->todpr
;
1637 kvm_run
->s
.regs
.pp
= vcpu
->arch
.sie_block
->pp
;
1638 kvm_run
->s
.regs
.gbea
= vcpu
->arch
.sie_block
->gbea
;
1639 kvm_run
->s
.regs
.pft
= vcpu
->arch
.pfault_token
;
1640 kvm_run
->s
.regs
.pfs
= vcpu
->arch
.pfault_select
;
1641 kvm_run
->s
.regs
.pfc
= vcpu
->arch
.pfault_compare
;
1644 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1649 if (guestdbg_exit_pending(vcpu
)) {
1650 kvm_s390_prepare_debug_exit(vcpu
);
1654 if (vcpu
->sigset_active
)
1655 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
1657 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
)) {
1658 kvm_s390_vcpu_start(vcpu
);
1659 } else if (is_vcpu_stopped(vcpu
)) {
1660 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1665 sync_regs(vcpu
, kvm_run
);
1668 rc
= __vcpu_run(vcpu
);
1670 if (signal_pending(current
) && !rc
) {
1671 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1675 if (guestdbg_exit_pending(vcpu
) && !rc
) {
1676 kvm_s390_prepare_debug_exit(vcpu
);
1680 if (rc
== -EOPNOTSUPP
) {
1681 /* intercept cannot be handled in-kernel, prepare kvm-run */
1682 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
1683 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
1684 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
1685 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
1689 if (rc
== -EREMOTE
) {
1690 /* intercept was handled, but userspace support is needed
1691 * kvm_run has been prepared by the handler */
1695 store_regs(vcpu
, kvm_run
);
1697 if (vcpu
->sigset_active
)
1698 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
1700 vcpu
->stat
.exit_userspace
++;
1705 * store status at address
1706 * we use have two special cases:
1707 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1708 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1710 int kvm_s390_store_status_unloaded(struct kvm_vcpu
*vcpu
, unsigned long gpa
)
1712 unsigned char archmode
= 1;
1717 if (gpa
== KVM_S390_STORE_STATUS_NOADDR
) {
1718 if (write_guest_abs(vcpu
, 163, &archmode
, 1))
1720 gpa
= SAVE_AREA_BASE
;
1721 } else if (gpa
== KVM_S390_STORE_STATUS_PREFIXED
) {
1722 if (write_guest_real(vcpu
, 163, &archmode
, 1))
1724 gpa
= kvm_s390_real_to_abs(vcpu
, SAVE_AREA_BASE
);
1726 rc
= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, fp_regs
),
1727 vcpu
->arch
.guest_fpregs
.fprs
, 128);
1728 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, gp_regs
),
1729 vcpu
->run
->s
.regs
.gprs
, 128);
1730 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, psw
),
1731 &vcpu
->arch
.sie_block
->gpsw
, 16);
1732 px
= kvm_s390_get_prefix(vcpu
);
1733 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, pref_reg
),
1735 rc
|= write_guest_abs(vcpu
,
1736 gpa
+ offsetof(struct save_area
, fp_ctrl_reg
),
1737 &vcpu
->arch
.guest_fpregs
.fpc
, 4);
1738 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, tod_reg
),
1739 &vcpu
->arch
.sie_block
->todpr
, 4);
1740 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, timer
),
1741 &vcpu
->arch
.sie_block
->cputm
, 8);
1742 clkcomp
= vcpu
->arch
.sie_block
->ckc
>> 8;
1743 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, clk_cmp
),
1745 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, acc_regs
),
1746 &vcpu
->run
->s
.regs
.acrs
, 64);
1747 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, ctrl_regs
),
1748 &vcpu
->arch
.sie_block
->gcr
, 128);
1749 return rc
? -EFAULT
: 0;
1752 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
1755 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1756 * copying in vcpu load/put. Lets update our copies before we save
1757 * it into the save area
1759 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1760 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1761 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1763 return kvm_s390_store_status_unloaded(vcpu
, addr
);
1766 static void __disable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
1768 kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
);
1769 kvm_make_request(KVM_REQ_DISABLE_IBS
, vcpu
);
1770 exit_sie_sync(vcpu
);
1773 static void __disable_ibs_on_all_vcpus(struct kvm
*kvm
)
1776 struct kvm_vcpu
*vcpu
;
1778 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1779 __disable_ibs_on_vcpu(vcpu
);
1783 static void __enable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
1785 kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
);
1786 kvm_make_request(KVM_REQ_ENABLE_IBS
, vcpu
);
1787 exit_sie_sync(vcpu
);
1790 void kvm_s390_vcpu_start(struct kvm_vcpu
*vcpu
)
1792 int i
, online_vcpus
, started_vcpus
= 0;
1794 if (!is_vcpu_stopped(vcpu
))
1797 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 1);
1798 /* Only one cpu at a time may enter/leave the STOPPED state. */
1799 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
1800 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
1802 for (i
= 0; i
< online_vcpus
; i
++) {
1803 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
]))
1807 if (started_vcpus
== 0) {
1808 /* we're the only active VCPU -> speed it up */
1809 __enable_ibs_on_vcpu(vcpu
);
1810 } else if (started_vcpus
== 1) {
1812 * As we are starting a second VCPU, we have to disable
1813 * the IBS facility on all VCPUs to remove potentially
1814 * oustanding ENABLE requests.
1816 __disable_ibs_on_all_vcpus(vcpu
->kvm
);
1819 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
1821 * Another VCPU might have used IBS while we were offline.
1822 * Let's play safe and flush the VCPU at startup.
1824 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1825 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
1829 void kvm_s390_vcpu_stop(struct kvm_vcpu
*vcpu
)
1831 int i
, online_vcpus
, started_vcpus
= 0;
1832 struct kvm_vcpu
*started_vcpu
= NULL
;
1834 if (is_vcpu_stopped(vcpu
))
1837 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 0);
1838 /* Only one cpu at a time may enter/leave the STOPPED state. */
1839 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
1840 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
1842 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1843 kvm_s390_clear_stop_irq(vcpu
);
1845 atomic_set_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
1846 __disable_ibs_on_vcpu(vcpu
);
1848 for (i
= 0; i
< online_vcpus
; i
++) {
1849 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
])) {
1851 started_vcpu
= vcpu
->kvm
->vcpus
[i
];
1855 if (started_vcpus
== 1) {
1857 * As we only have one VCPU left, we want to enable the
1858 * IBS facility for that VCPU to speed it up.
1860 __enable_ibs_on_vcpu(started_vcpu
);
1863 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
1867 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
1868 struct kvm_enable_cap
*cap
)
1876 case KVM_CAP_S390_CSS_SUPPORT
:
1877 if (!vcpu
->kvm
->arch
.css_support
) {
1878 vcpu
->kvm
->arch
.css_support
= 1;
1879 trace_kvm_s390_enable_css(vcpu
->kvm
);
1890 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1891 unsigned int ioctl
, unsigned long arg
)
1893 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1894 void __user
*argp
= (void __user
*)arg
;
1899 case KVM_S390_INTERRUPT
: {
1900 struct kvm_s390_interrupt s390int
;
1901 struct kvm_s390_irq s390irq
;
1904 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
1906 if (s390int_to_s390irq(&s390int
, &s390irq
))
1908 r
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
1911 case KVM_S390_STORE_STATUS
:
1912 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1913 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
1914 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1916 case KVM_S390_SET_INITIAL_PSW
: {
1920 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
1922 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
1925 case KVM_S390_INITIAL_RESET
:
1926 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
1928 case KVM_SET_ONE_REG
:
1929 case KVM_GET_ONE_REG
: {
1930 struct kvm_one_reg reg
;
1932 if (copy_from_user(®
, argp
, sizeof(reg
)))
1934 if (ioctl
== KVM_SET_ONE_REG
)
1935 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
1937 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
1940 #ifdef CONFIG_KVM_S390_UCONTROL
1941 case KVM_S390_UCAS_MAP
: {
1942 struct kvm_s390_ucas_mapping ucasmap
;
1944 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
1949 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1954 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
1955 ucasmap
.vcpu_addr
, ucasmap
.length
);
1958 case KVM_S390_UCAS_UNMAP
: {
1959 struct kvm_s390_ucas_mapping ucasmap
;
1961 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
1966 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1971 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
1976 case KVM_S390_VCPU_FAULT
: {
1977 r
= gmap_fault(vcpu
->arch
.gmap
, arg
, 0);
1980 case KVM_ENABLE_CAP
:
1982 struct kvm_enable_cap cap
;
1984 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1986 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
1995 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1997 #ifdef CONFIG_KVM_S390_UCONTROL
1998 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
1999 && (kvm_is_ucontrol(vcpu
->kvm
))) {
2000 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
2001 get_page(vmf
->page
);
2005 return VM_FAULT_SIGBUS
;
2008 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
2009 unsigned long npages
)
2014 /* Section: memory related */
2015 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
2016 struct kvm_memory_slot
*memslot
,
2017 struct kvm_userspace_memory_region
*mem
,
2018 enum kvm_mr_change change
)
2020 /* A few sanity checks. We can have memory slots which have to be
2021 located/ended at a segment boundary (1MB). The memory in userland is
2022 ok to be fragmented into various different vmas. It is okay to mmap()
2023 and munmap() stuff in this slot after doing this call at any time */
2025 if (mem
->userspace_addr
& 0xffffful
)
2028 if (mem
->memory_size
& 0xffffful
)
2034 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
2035 struct kvm_userspace_memory_region
*mem
,
2036 const struct kvm_memory_slot
*old
,
2037 enum kvm_mr_change change
)
2041 /* If the basics of the memslot do not change, we do not want
2042 * to update the gmap. Every update causes several unnecessary
2043 * segment translation exceptions. This is usually handled just
2044 * fine by the normal fault handler + gmap, but it will also
2045 * cause faults on the prefix page of running guest CPUs.
2047 if (old
->userspace_addr
== mem
->userspace_addr
&&
2048 old
->base_gfn
* PAGE_SIZE
== mem
->guest_phys_addr
&&
2049 old
->npages
* PAGE_SIZE
== mem
->memory_size
)
2052 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
2053 mem
->guest_phys_addr
, mem
->memory_size
);
2055 printk(KERN_WARNING
"kvm-s390: failed to commit memory region\n");
2059 static int __init
kvm_s390_init(void)
2062 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
2067 * guests can ask for up to 255+1 double words, we need a full page
2068 * to hold the maximum amount of facilities. On the other hand, we
2069 * only set facilities that are known to work in KVM.
2071 vfacilities
= (unsigned long *) get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
2076 memcpy(vfacilities
, S390_lowcore
.stfle_fac_list
, 16);
2077 vfacilities
[0] &= 0xff82fffbf47c2000UL
;
2078 vfacilities
[1] &= 0x005c000000000000UL
;
2082 static void __exit
kvm_s390_exit(void)
2084 free_page((unsigned long) vfacilities
);
2088 module_init(kvm_s390_init
);
2089 module_exit(kvm_s390_exit
);
2092 * Enable autoloading of the kvm module.
2093 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2094 * since x86 takes a different approach.
2096 #include <linux/miscdevice.h>
2097 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
2098 MODULE_ALIAS("devname:kvm");