Merge branch 'drm-rockchip-next-fixes-2016-01-22' of https://github.com/markyzq/kerne...
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2 * hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008, 2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
15 */
16
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <linux/vmalloc.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/lowcore.h>
31 #include <asm/etr.h>
32 #include <asm/pgtable.h>
33 #include <asm/nmi.h>
34 #include <asm/switch_to.h>
35 #include <asm/isc.h>
36 #include <asm/sclp.h>
37 #include "kvm-s390.h"
38 #include "gaccess.h"
39
40 #define KMSG_COMPONENT "kvm-s390"
41 #undef pr_fmt
42 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43
44 #define CREATE_TRACE_POINTS
45 #include "trace.h"
46 #include "trace-s390.h"
47
48 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
49 #define LOCAL_IRQS 32
50 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
52
53 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54
55 struct kvm_stats_debugfs_item debugfs_entries[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace) },
57 { "exit_null", VCPU_STAT(exit_null) },
58 { "exit_validity", VCPU_STAT(exit_validity) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
60 { "exit_external_request", VCPU_STAT(exit_external_request) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
62 { "exit_instruction", VCPU_STAT(exit_instruction) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
66 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
67 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
68 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
69 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
70 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
71 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
72 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
73 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
74 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
75 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
76 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
77 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
78 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
79 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
80 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
81 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
82 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
83 { "instruction_spx", VCPU_STAT(instruction_spx) },
84 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
85 { "instruction_stap", VCPU_STAT(instruction_stap) },
86 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
87 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
88 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
89 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
90 { "instruction_essa", VCPU_STAT(instruction_essa) },
91 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
92 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
93 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
94 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
95 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
96 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
97 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
98 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
99 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
100 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
101 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
102 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
103 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
104 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
105 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
106 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
107 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
108 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
109 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
110 { "diagnose_10", VCPU_STAT(diagnose_10) },
111 { "diagnose_44", VCPU_STAT(diagnose_44) },
112 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
113 { "diagnose_258", VCPU_STAT(diagnose_258) },
114 { "diagnose_308", VCPU_STAT(diagnose_308) },
115 { "diagnose_500", VCPU_STAT(diagnose_500) },
116 { NULL }
117 };
118
119 /* upper facilities limit for kvm */
120 unsigned long kvm_s390_fac_list_mask[] = {
121 0xffe6fffbfcfdfc40UL,
122 0x005e800000000000UL,
123 };
124
125 unsigned long kvm_s390_fac_list_mask_size(void)
126 {
127 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
128 return ARRAY_SIZE(kvm_s390_fac_list_mask);
129 }
130
131 static struct gmap_notifier gmap_notifier;
132 debug_info_t *kvm_s390_dbf;
133
134 /* Section: not file related */
135 int kvm_arch_hardware_enable(void)
136 {
137 /* every s390 is virtualization enabled ;-) */
138 return 0;
139 }
140
141 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
142
143 /*
144 * This callback is executed during stop_machine(). All CPUs are therefore
145 * temporarily stopped. In order not to change guest behavior, we have to
146 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
147 * so a CPU won't be stopped while calculating with the epoch.
148 */
149 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
150 void *v)
151 {
152 struct kvm *kvm;
153 struct kvm_vcpu *vcpu;
154 int i;
155 unsigned long long *delta = v;
156
157 list_for_each_entry(kvm, &vm_list, vm_list) {
158 kvm->arch.epoch -= *delta;
159 kvm_for_each_vcpu(i, vcpu, kvm) {
160 vcpu->arch.sie_block->epoch -= *delta;
161 }
162 }
163 return NOTIFY_OK;
164 }
165
166 static struct notifier_block kvm_clock_notifier = {
167 .notifier_call = kvm_clock_sync,
168 };
169
170 int kvm_arch_hardware_setup(void)
171 {
172 gmap_notifier.notifier_call = kvm_gmap_notifier;
173 gmap_register_ipte_notifier(&gmap_notifier);
174 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
175 &kvm_clock_notifier);
176 return 0;
177 }
178
179 void kvm_arch_hardware_unsetup(void)
180 {
181 gmap_unregister_ipte_notifier(&gmap_notifier);
182 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
183 &kvm_clock_notifier);
184 }
185
186 int kvm_arch_init(void *opaque)
187 {
188 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
189 if (!kvm_s390_dbf)
190 return -ENOMEM;
191
192 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
193 debug_unregister(kvm_s390_dbf);
194 return -ENOMEM;
195 }
196
197 /* Register floating interrupt controller interface. */
198 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
199 }
200
201 void kvm_arch_exit(void)
202 {
203 debug_unregister(kvm_s390_dbf);
204 }
205
206 /* Section: device related */
207 long kvm_arch_dev_ioctl(struct file *filp,
208 unsigned int ioctl, unsigned long arg)
209 {
210 if (ioctl == KVM_S390_ENABLE_SIE)
211 return s390_enable_sie();
212 return -EINVAL;
213 }
214
215 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
216 {
217 int r;
218
219 switch (ext) {
220 case KVM_CAP_S390_PSW:
221 case KVM_CAP_S390_GMAP:
222 case KVM_CAP_SYNC_MMU:
223 #ifdef CONFIG_KVM_S390_UCONTROL
224 case KVM_CAP_S390_UCONTROL:
225 #endif
226 case KVM_CAP_ASYNC_PF:
227 case KVM_CAP_SYNC_REGS:
228 case KVM_CAP_ONE_REG:
229 case KVM_CAP_ENABLE_CAP:
230 case KVM_CAP_S390_CSS_SUPPORT:
231 case KVM_CAP_IOEVENTFD:
232 case KVM_CAP_DEVICE_CTRL:
233 case KVM_CAP_ENABLE_CAP_VM:
234 case KVM_CAP_S390_IRQCHIP:
235 case KVM_CAP_VM_ATTRIBUTES:
236 case KVM_CAP_MP_STATE:
237 case KVM_CAP_S390_INJECT_IRQ:
238 case KVM_CAP_S390_USER_SIGP:
239 case KVM_CAP_S390_USER_STSI:
240 case KVM_CAP_S390_SKEYS:
241 case KVM_CAP_S390_IRQ_STATE:
242 r = 1;
243 break;
244 case KVM_CAP_S390_MEM_OP:
245 r = MEM_OP_MAX_SIZE;
246 break;
247 case KVM_CAP_NR_VCPUS:
248 case KVM_CAP_MAX_VCPUS:
249 r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
250 : KVM_S390_BSCA_CPU_SLOTS;
251 break;
252 case KVM_CAP_NR_MEMSLOTS:
253 r = KVM_USER_MEM_SLOTS;
254 break;
255 case KVM_CAP_S390_COW:
256 r = MACHINE_HAS_ESOP;
257 break;
258 case KVM_CAP_S390_VECTOR_REGISTERS:
259 r = MACHINE_HAS_VX;
260 break;
261 case KVM_CAP_S390_RI:
262 r = test_facility(64);
263 break;
264 default:
265 r = 0;
266 }
267 return r;
268 }
269
270 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
271 struct kvm_memory_slot *memslot)
272 {
273 gfn_t cur_gfn, last_gfn;
274 unsigned long address;
275 struct gmap *gmap = kvm->arch.gmap;
276
277 down_read(&gmap->mm->mmap_sem);
278 /* Loop over all guest pages */
279 last_gfn = memslot->base_gfn + memslot->npages;
280 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
281 address = gfn_to_hva_memslot(memslot, cur_gfn);
282
283 if (gmap_test_and_clear_dirty(address, gmap))
284 mark_page_dirty(kvm, cur_gfn);
285 }
286 up_read(&gmap->mm->mmap_sem);
287 }
288
289 /* Section: vm related */
290 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
291
292 /*
293 * Get (and clear) the dirty memory log for a memory slot.
294 */
295 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
296 struct kvm_dirty_log *log)
297 {
298 int r;
299 unsigned long n;
300 struct kvm_memslots *slots;
301 struct kvm_memory_slot *memslot;
302 int is_dirty = 0;
303
304 mutex_lock(&kvm->slots_lock);
305
306 r = -EINVAL;
307 if (log->slot >= KVM_USER_MEM_SLOTS)
308 goto out;
309
310 slots = kvm_memslots(kvm);
311 memslot = id_to_memslot(slots, log->slot);
312 r = -ENOENT;
313 if (!memslot->dirty_bitmap)
314 goto out;
315
316 kvm_s390_sync_dirty_log(kvm, memslot);
317 r = kvm_get_dirty_log(kvm, log, &is_dirty);
318 if (r)
319 goto out;
320
321 /* Clear the dirty log */
322 if (is_dirty) {
323 n = kvm_dirty_bitmap_bytes(memslot);
324 memset(memslot->dirty_bitmap, 0, n);
325 }
326 r = 0;
327 out:
328 mutex_unlock(&kvm->slots_lock);
329 return r;
330 }
331
332 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
333 {
334 int r;
335
336 if (cap->flags)
337 return -EINVAL;
338
339 switch (cap->cap) {
340 case KVM_CAP_S390_IRQCHIP:
341 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
342 kvm->arch.use_irqchip = 1;
343 r = 0;
344 break;
345 case KVM_CAP_S390_USER_SIGP:
346 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
347 kvm->arch.user_sigp = 1;
348 r = 0;
349 break;
350 case KVM_CAP_S390_VECTOR_REGISTERS:
351 mutex_lock(&kvm->lock);
352 if (atomic_read(&kvm->online_vcpus)) {
353 r = -EBUSY;
354 } else if (MACHINE_HAS_VX) {
355 set_kvm_facility(kvm->arch.model.fac->mask, 129);
356 set_kvm_facility(kvm->arch.model.fac->list, 129);
357 r = 0;
358 } else
359 r = -EINVAL;
360 mutex_unlock(&kvm->lock);
361 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
362 r ? "(not available)" : "(success)");
363 break;
364 case KVM_CAP_S390_RI:
365 r = -EINVAL;
366 mutex_lock(&kvm->lock);
367 if (atomic_read(&kvm->online_vcpus)) {
368 r = -EBUSY;
369 } else if (test_facility(64)) {
370 set_kvm_facility(kvm->arch.model.fac->mask, 64);
371 set_kvm_facility(kvm->arch.model.fac->list, 64);
372 r = 0;
373 }
374 mutex_unlock(&kvm->lock);
375 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
376 r ? "(not available)" : "(success)");
377 break;
378 case KVM_CAP_S390_USER_STSI:
379 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
380 kvm->arch.user_stsi = 1;
381 r = 0;
382 break;
383 default:
384 r = -EINVAL;
385 break;
386 }
387 return r;
388 }
389
390 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
391 {
392 int ret;
393
394 switch (attr->attr) {
395 case KVM_S390_VM_MEM_LIMIT_SIZE:
396 ret = 0;
397 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
398 kvm->arch.mem_limit);
399 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
400 ret = -EFAULT;
401 break;
402 default:
403 ret = -ENXIO;
404 break;
405 }
406 return ret;
407 }
408
409 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
410 {
411 int ret;
412 unsigned int idx;
413 switch (attr->attr) {
414 case KVM_S390_VM_MEM_ENABLE_CMMA:
415 /* enable CMMA only for z10 and later (EDAT_1) */
416 ret = -EINVAL;
417 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
418 break;
419
420 ret = -EBUSY;
421 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
422 mutex_lock(&kvm->lock);
423 if (atomic_read(&kvm->online_vcpus) == 0) {
424 kvm->arch.use_cmma = 1;
425 ret = 0;
426 }
427 mutex_unlock(&kvm->lock);
428 break;
429 case KVM_S390_VM_MEM_CLR_CMMA:
430 ret = -EINVAL;
431 if (!kvm->arch.use_cmma)
432 break;
433
434 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
435 mutex_lock(&kvm->lock);
436 idx = srcu_read_lock(&kvm->srcu);
437 s390_reset_cmma(kvm->arch.gmap->mm);
438 srcu_read_unlock(&kvm->srcu, idx);
439 mutex_unlock(&kvm->lock);
440 ret = 0;
441 break;
442 case KVM_S390_VM_MEM_LIMIT_SIZE: {
443 unsigned long new_limit;
444
445 if (kvm_is_ucontrol(kvm))
446 return -EINVAL;
447
448 if (get_user(new_limit, (u64 __user *)attr->addr))
449 return -EFAULT;
450
451 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
452 new_limit > kvm->arch.mem_limit)
453 return -E2BIG;
454
455 if (!new_limit)
456 return -EINVAL;
457
458 /* gmap_alloc takes last usable address */
459 if (new_limit != KVM_S390_NO_MEM_LIMIT)
460 new_limit -= 1;
461
462 ret = -EBUSY;
463 mutex_lock(&kvm->lock);
464 if (atomic_read(&kvm->online_vcpus) == 0) {
465 /* gmap_alloc will round the limit up */
466 struct gmap *new = gmap_alloc(current->mm, new_limit);
467
468 if (!new) {
469 ret = -ENOMEM;
470 } else {
471 gmap_free(kvm->arch.gmap);
472 new->private = kvm;
473 kvm->arch.gmap = new;
474 ret = 0;
475 }
476 }
477 mutex_unlock(&kvm->lock);
478 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
479 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
480 (void *) kvm->arch.gmap->asce);
481 break;
482 }
483 default:
484 ret = -ENXIO;
485 break;
486 }
487 return ret;
488 }
489
490 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
491
492 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
493 {
494 struct kvm_vcpu *vcpu;
495 int i;
496
497 if (!test_kvm_facility(kvm, 76))
498 return -EINVAL;
499
500 mutex_lock(&kvm->lock);
501 switch (attr->attr) {
502 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
503 get_random_bytes(
504 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
505 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
506 kvm->arch.crypto.aes_kw = 1;
507 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
508 break;
509 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
510 get_random_bytes(
511 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
512 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
513 kvm->arch.crypto.dea_kw = 1;
514 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
515 break;
516 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
517 kvm->arch.crypto.aes_kw = 0;
518 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
519 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
520 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
521 break;
522 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
523 kvm->arch.crypto.dea_kw = 0;
524 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
525 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
526 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
527 break;
528 default:
529 mutex_unlock(&kvm->lock);
530 return -ENXIO;
531 }
532
533 kvm_for_each_vcpu(i, vcpu, kvm) {
534 kvm_s390_vcpu_crypto_setup(vcpu);
535 exit_sie(vcpu);
536 }
537 mutex_unlock(&kvm->lock);
538 return 0;
539 }
540
541 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
542 {
543 u8 gtod_high;
544
545 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
546 sizeof(gtod_high)))
547 return -EFAULT;
548
549 if (gtod_high != 0)
550 return -EINVAL;
551 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
552
553 return 0;
554 }
555
556 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
557 {
558 u64 gtod;
559
560 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
561 return -EFAULT;
562
563 kvm_s390_set_tod_clock(kvm, gtod);
564 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
565 return 0;
566 }
567
568 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
569 {
570 int ret;
571
572 if (attr->flags)
573 return -EINVAL;
574
575 switch (attr->attr) {
576 case KVM_S390_VM_TOD_HIGH:
577 ret = kvm_s390_set_tod_high(kvm, attr);
578 break;
579 case KVM_S390_VM_TOD_LOW:
580 ret = kvm_s390_set_tod_low(kvm, attr);
581 break;
582 default:
583 ret = -ENXIO;
584 break;
585 }
586 return ret;
587 }
588
589 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
590 {
591 u8 gtod_high = 0;
592
593 if (copy_to_user((void __user *)attr->addr, &gtod_high,
594 sizeof(gtod_high)))
595 return -EFAULT;
596 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
597
598 return 0;
599 }
600
601 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
602 {
603 u64 gtod;
604
605 gtod = kvm_s390_get_tod_clock_fast(kvm);
606 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
607 return -EFAULT;
608 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
609
610 return 0;
611 }
612
613 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
614 {
615 int ret;
616
617 if (attr->flags)
618 return -EINVAL;
619
620 switch (attr->attr) {
621 case KVM_S390_VM_TOD_HIGH:
622 ret = kvm_s390_get_tod_high(kvm, attr);
623 break;
624 case KVM_S390_VM_TOD_LOW:
625 ret = kvm_s390_get_tod_low(kvm, attr);
626 break;
627 default:
628 ret = -ENXIO;
629 break;
630 }
631 return ret;
632 }
633
634 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
635 {
636 struct kvm_s390_vm_cpu_processor *proc;
637 int ret = 0;
638
639 mutex_lock(&kvm->lock);
640 if (atomic_read(&kvm->online_vcpus)) {
641 ret = -EBUSY;
642 goto out;
643 }
644 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
645 if (!proc) {
646 ret = -ENOMEM;
647 goto out;
648 }
649 if (!copy_from_user(proc, (void __user *)attr->addr,
650 sizeof(*proc))) {
651 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
652 sizeof(struct cpuid));
653 kvm->arch.model.ibc = proc->ibc;
654 memcpy(kvm->arch.model.fac->list, proc->fac_list,
655 S390_ARCH_FAC_LIST_SIZE_BYTE);
656 } else
657 ret = -EFAULT;
658 kfree(proc);
659 out:
660 mutex_unlock(&kvm->lock);
661 return ret;
662 }
663
664 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
665 {
666 int ret = -ENXIO;
667
668 switch (attr->attr) {
669 case KVM_S390_VM_CPU_PROCESSOR:
670 ret = kvm_s390_set_processor(kvm, attr);
671 break;
672 }
673 return ret;
674 }
675
676 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
677 {
678 struct kvm_s390_vm_cpu_processor *proc;
679 int ret = 0;
680
681 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
682 if (!proc) {
683 ret = -ENOMEM;
684 goto out;
685 }
686 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
687 proc->ibc = kvm->arch.model.ibc;
688 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
689 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
690 ret = -EFAULT;
691 kfree(proc);
692 out:
693 return ret;
694 }
695
696 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
697 {
698 struct kvm_s390_vm_cpu_machine *mach;
699 int ret = 0;
700
701 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
702 if (!mach) {
703 ret = -ENOMEM;
704 goto out;
705 }
706 get_cpu_id((struct cpuid *) &mach->cpuid);
707 mach->ibc = sclp.ibc;
708 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
709 S390_ARCH_FAC_LIST_SIZE_BYTE);
710 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
711 S390_ARCH_FAC_LIST_SIZE_BYTE);
712 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
713 ret = -EFAULT;
714 kfree(mach);
715 out:
716 return ret;
717 }
718
719 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
720 {
721 int ret = -ENXIO;
722
723 switch (attr->attr) {
724 case KVM_S390_VM_CPU_PROCESSOR:
725 ret = kvm_s390_get_processor(kvm, attr);
726 break;
727 case KVM_S390_VM_CPU_MACHINE:
728 ret = kvm_s390_get_machine(kvm, attr);
729 break;
730 }
731 return ret;
732 }
733
734 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
735 {
736 int ret;
737
738 switch (attr->group) {
739 case KVM_S390_VM_MEM_CTRL:
740 ret = kvm_s390_set_mem_control(kvm, attr);
741 break;
742 case KVM_S390_VM_TOD:
743 ret = kvm_s390_set_tod(kvm, attr);
744 break;
745 case KVM_S390_VM_CPU_MODEL:
746 ret = kvm_s390_set_cpu_model(kvm, attr);
747 break;
748 case KVM_S390_VM_CRYPTO:
749 ret = kvm_s390_vm_set_crypto(kvm, attr);
750 break;
751 default:
752 ret = -ENXIO;
753 break;
754 }
755
756 return ret;
757 }
758
759 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
760 {
761 int ret;
762
763 switch (attr->group) {
764 case KVM_S390_VM_MEM_CTRL:
765 ret = kvm_s390_get_mem_control(kvm, attr);
766 break;
767 case KVM_S390_VM_TOD:
768 ret = kvm_s390_get_tod(kvm, attr);
769 break;
770 case KVM_S390_VM_CPU_MODEL:
771 ret = kvm_s390_get_cpu_model(kvm, attr);
772 break;
773 default:
774 ret = -ENXIO;
775 break;
776 }
777
778 return ret;
779 }
780
781 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
782 {
783 int ret;
784
785 switch (attr->group) {
786 case KVM_S390_VM_MEM_CTRL:
787 switch (attr->attr) {
788 case KVM_S390_VM_MEM_ENABLE_CMMA:
789 case KVM_S390_VM_MEM_CLR_CMMA:
790 case KVM_S390_VM_MEM_LIMIT_SIZE:
791 ret = 0;
792 break;
793 default:
794 ret = -ENXIO;
795 break;
796 }
797 break;
798 case KVM_S390_VM_TOD:
799 switch (attr->attr) {
800 case KVM_S390_VM_TOD_LOW:
801 case KVM_S390_VM_TOD_HIGH:
802 ret = 0;
803 break;
804 default:
805 ret = -ENXIO;
806 break;
807 }
808 break;
809 case KVM_S390_VM_CPU_MODEL:
810 switch (attr->attr) {
811 case KVM_S390_VM_CPU_PROCESSOR:
812 case KVM_S390_VM_CPU_MACHINE:
813 ret = 0;
814 break;
815 default:
816 ret = -ENXIO;
817 break;
818 }
819 break;
820 case KVM_S390_VM_CRYPTO:
821 switch (attr->attr) {
822 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
823 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
824 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
825 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
826 ret = 0;
827 break;
828 default:
829 ret = -ENXIO;
830 break;
831 }
832 break;
833 default:
834 ret = -ENXIO;
835 break;
836 }
837
838 return ret;
839 }
840
841 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
842 {
843 uint8_t *keys;
844 uint64_t hva;
845 unsigned long curkey;
846 int i, r = 0;
847
848 if (args->flags != 0)
849 return -EINVAL;
850
851 /* Is this guest using storage keys? */
852 if (!mm_use_skey(current->mm))
853 return KVM_S390_GET_SKEYS_NONE;
854
855 /* Enforce sane limit on memory allocation */
856 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
857 return -EINVAL;
858
859 keys = kmalloc_array(args->count, sizeof(uint8_t),
860 GFP_KERNEL | __GFP_NOWARN);
861 if (!keys)
862 keys = vmalloc(sizeof(uint8_t) * args->count);
863 if (!keys)
864 return -ENOMEM;
865
866 for (i = 0; i < args->count; i++) {
867 hva = gfn_to_hva(kvm, args->start_gfn + i);
868 if (kvm_is_error_hva(hva)) {
869 r = -EFAULT;
870 goto out;
871 }
872
873 curkey = get_guest_storage_key(current->mm, hva);
874 if (IS_ERR_VALUE(curkey)) {
875 r = curkey;
876 goto out;
877 }
878 keys[i] = curkey;
879 }
880
881 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
882 sizeof(uint8_t) * args->count);
883 if (r)
884 r = -EFAULT;
885 out:
886 kvfree(keys);
887 return r;
888 }
889
890 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
891 {
892 uint8_t *keys;
893 uint64_t hva;
894 int i, r = 0;
895
896 if (args->flags != 0)
897 return -EINVAL;
898
899 /* Enforce sane limit on memory allocation */
900 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
901 return -EINVAL;
902
903 keys = kmalloc_array(args->count, sizeof(uint8_t),
904 GFP_KERNEL | __GFP_NOWARN);
905 if (!keys)
906 keys = vmalloc(sizeof(uint8_t) * args->count);
907 if (!keys)
908 return -ENOMEM;
909
910 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
911 sizeof(uint8_t) * args->count);
912 if (r) {
913 r = -EFAULT;
914 goto out;
915 }
916
917 /* Enable storage key handling for the guest */
918 r = s390_enable_skey();
919 if (r)
920 goto out;
921
922 for (i = 0; i < args->count; i++) {
923 hva = gfn_to_hva(kvm, args->start_gfn + i);
924 if (kvm_is_error_hva(hva)) {
925 r = -EFAULT;
926 goto out;
927 }
928
929 /* Lowest order bit is reserved */
930 if (keys[i] & 0x01) {
931 r = -EINVAL;
932 goto out;
933 }
934
935 r = set_guest_storage_key(current->mm, hva,
936 (unsigned long)keys[i], 0);
937 if (r)
938 goto out;
939 }
940 out:
941 kvfree(keys);
942 return r;
943 }
944
945 long kvm_arch_vm_ioctl(struct file *filp,
946 unsigned int ioctl, unsigned long arg)
947 {
948 struct kvm *kvm = filp->private_data;
949 void __user *argp = (void __user *)arg;
950 struct kvm_device_attr attr;
951 int r;
952
953 switch (ioctl) {
954 case KVM_S390_INTERRUPT: {
955 struct kvm_s390_interrupt s390int;
956
957 r = -EFAULT;
958 if (copy_from_user(&s390int, argp, sizeof(s390int)))
959 break;
960 r = kvm_s390_inject_vm(kvm, &s390int);
961 break;
962 }
963 case KVM_ENABLE_CAP: {
964 struct kvm_enable_cap cap;
965 r = -EFAULT;
966 if (copy_from_user(&cap, argp, sizeof(cap)))
967 break;
968 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
969 break;
970 }
971 case KVM_CREATE_IRQCHIP: {
972 struct kvm_irq_routing_entry routing;
973
974 r = -EINVAL;
975 if (kvm->arch.use_irqchip) {
976 /* Set up dummy routing. */
977 memset(&routing, 0, sizeof(routing));
978 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
979 }
980 break;
981 }
982 case KVM_SET_DEVICE_ATTR: {
983 r = -EFAULT;
984 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
985 break;
986 r = kvm_s390_vm_set_attr(kvm, &attr);
987 break;
988 }
989 case KVM_GET_DEVICE_ATTR: {
990 r = -EFAULT;
991 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
992 break;
993 r = kvm_s390_vm_get_attr(kvm, &attr);
994 break;
995 }
996 case KVM_HAS_DEVICE_ATTR: {
997 r = -EFAULT;
998 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
999 break;
1000 r = kvm_s390_vm_has_attr(kvm, &attr);
1001 break;
1002 }
1003 case KVM_S390_GET_SKEYS: {
1004 struct kvm_s390_skeys args;
1005
1006 r = -EFAULT;
1007 if (copy_from_user(&args, argp,
1008 sizeof(struct kvm_s390_skeys)))
1009 break;
1010 r = kvm_s390_get_skeys(kvm, &args);
1011 break;
1012 }
1013 case KVM_S390_SET_SKEYS: {
1014 struct kvm_s390_skeys args;
1015
1016 r = -EFAULT;
1017 if (copy_from_user(&args, argp,
1018 sizeof(struct kvm_s390_skeys)))
1019 break;
1020 r = kvm_s390_set_skeys(kvm, &args);
1021 break;
1022 }
1023 default:
1024 r = -ENOTTY;
1025 }
1026
1027 return r;
1028 }
1029
1030 static int kvm_s390_query_ap_config(u8 *config)
1031 {
1032 u32 fcn_code = 0x04000000UL;
1033 u32 cc = 0;
1034
1035 memset(config, 0, 128);
1036 asm volatile(
1037 "lgr 0,%1\n"
1038 "lgr 2,%2\n"
1039 ".long 0xb2af0000\n" /* PQAP(QCI) */
1040 "0: ipm %0\n"
1041 "srl %0,28\n"
1042 "1:\n"
1043 EX_TABLE(0b, 1b)
1044 : "+r" (cc)
1045 : "r" (fcn_code), "r" (config)
1046 : "cc", "0", "2", "memory"
1047 );
1048
1049 return cc;
1050 }
1051
1052 static int kvm_s390_apxa_installed(void)
1053 {
1054 u8 config[128];
1055 int cc;
1056
1057 if (test_facility(12)) {
1058 cc = kvm_s390_query_ap_config(config);
1059
1060 if (cc)
1061 pr_err("PQAP(QCI) failed with cc=%d", cc);
1062 else
1063 return config[0] & 0x40;
1064 }
1065
1066 return 0;
1067 }
1068
1069 static void kvm_s390_set_crycb_format(struct kvm *kvm)
1070 {
1071 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1072
1073 if (kvm_s390_apxa_installed())
1074 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1075 else
1076 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1077 }
1078
1079 static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
1080 {
1081 get_cpu_id(cpu_id);
1082 cpu_id->version = 0xff;
1083 }
1084
1085 static int kvm_s390_crypto_init(struct kvm *kvm)
1086 {
1087 if (!test_kvm_facility(kvm, 76))
1088 return 0;
1089
1090 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1091 GFP_KERNEL | GFP_DMA);
1092 if (!kvm->arch.crypto.crycb)
1093 return -ENOMEM;
1094
1095 kvm_s390_set_crycb_format(kvm);
1096
1097 /* Enable AES/DEA protected key functions by default */
1098 kvm->arch.crypto.aes_kw = 1;
1099 kvm->arch.crypto.dea_kw = 1;
1100 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1101 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1102 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1103 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1104
1105 return 0;
1106 }
1107
1108 static void sca_dispose(struct kvm *kvm)
1109 {
1110 if (kvm->arch.use_esca)
1111 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
1112 else
1113 free_page((unsigned long)(kvm->arch.sca));
1114 kvm->arch.sca = NULL;
1115 }
1116
1117 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1118 {
1119 int i, rc;
1120 char debug_name[16];
1121 static unsigned long sca_offset;
1122
1123 rc = -EINVAL;
1124 #ifdef CONFIG_KVM_S390_UCONTROL
1125 if (type & ~KVM_VM_S390_UCONTROL)
1126 goto out_err;
1127 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1128 goto out_err;
1129 #else
1130 if (type)
1131 goto out_err;
1132 #endif
1133
1134 rc = s390_enable_sie();
1135 if (rc)
1136 goto out_err;
1137
1138 rc = -ENOMEM;
1139
1140 kvm->arch.use_esca = 0; /* start with basic SCA */
1141 rwlock_init(&kvm->arch.sca_lock);
1142 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
1143 if (!kvm->arch.sca)
1144 goto out_err;
1145 spin_lock(&kvm_lock);
1146 sca_offset += 16;
1147 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1148 sca_offset = 0;
1149 kvm->arch.sca = (struct bsca_block *)
1150 ((char *) kvm->arch.sca + sca_offset);
1151 spin_unlock(&kvm_lock);
1152
1153 sprintf(debug_name, "kvm-%u", current->pid);
1154
1155 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1156 if (!kvm->arch.dbf)
1157 goto out_err;
1158
1159 /*
1160 * The architectural maximum amount of facilities is 16 kbit. To store
1161 * this amount, 2 kbyte of memory is required. Thus we need a full
1162 * page to hold the guest facility list (arch.model.fac->list) and the
1163 * facility mask (arch.model.fac->mask). Its address size has to be
1164 * 31 bits and word aligned.
1165 */
1166 kvm->arch.model.fac =
1167 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1168 if (!kvm->arch.model.fac)
1169 goto out_err;
1170
1171 /* Populate the facility mask initially. */
1172 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
1173 S390_ARCH_FAC_LIST_SIZE_BYTE);
1174 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1175 if (i < kvm_s390_fac_list_mask_size())
1176 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
1177 else
1178 kvm->arch.model.fac->mask[i] = 0UL;
1179 }
1180
1181 /* Populate the facility list initially. */
1182 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1183 S390_ARCH_FAC_LIST_SIZE_BYTE);
1184
1185 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
1186 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
1187
1188 if (kvm_s390_crypto_init(kvm) < 0)
1189 goto out_err;
1190
1191 spin_lock_init(&kvm->arch.float_int.lock);
1192 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1193 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
1194 init_waitqueue_head(&kvm->arch.ipte_wq);
1195 mutex_init(&kvm->arch.ipte_mutex);
1196
1197 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1198 VM_EVENT(kvm, 3, "vm created with type %lu", type);
1199
1200 if (type & KVM_VM_S390_UCONTROL) {
1201 kvm->arch.gmap = NULL;
1202 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1203 } else {
1204 if (sclp.hamax == U64_MAX)
1205 kvm->arch.mem_limit = TASK_MAX_SIZE;
1206 else
1207 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1208 sclp.hamax + 1);
1209 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
1210 if (!kvm->arch.gmap)
1211 goto out_err;
1212 kvm->arch.gmap->private = kvm;
1213 kvm->arch.gmap->pfault_enabled = 0;
1214 }
1215
1216 kvm->arch.css_support = 0;
1217 kvm->arch.use_irqchip = 0;
1218 kvm->arch.epoch = 0;
1219
1220 spin_lock_init(&kvm->arch.start_stop_lock);
1221 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
1222
1223 return 0;
1224 out_err:
1225 kfree(kvm->arch.crypto.crycb);
1226 free_page((unsigned long)kvm->arch.model.fac);
1227 debug_unregister(kvm->arch.dbf);
1228 sca_dispose(kvm);
1229 KVM_EVENT(3, "creation of vm failed: %d", rc);
1230 return rc;
1231 }
1232
1233 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1234 {
1235 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1236 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1237 kvm_s390_clear_local_irqs(vcpu);
1238 kvm_clear_async_pf_completion_queue(vcpu);
1239 if (!kvm_is_ucontrol(vcpu->kvm))
1240 sca_del_vcpu(vcpu);
1241
1242 if (kvm_is_ucontrol(vcpu->kvm))
1243 gmap_free(vcpu->arch.gmap);
1244
1245 if (vcpu->kvm->arch.use_cmma)
1246 kvm_s390_vcpu_unsetup_cmma(vcpu);
1247 free_page((unsigned long)(vcpu->arch.sie_block));
1248
1249 kvm_vcpu_uninit(vcpu);
1250 kmem_cache_free(kvm_vcpu_cache, vcpu);
1251 }
1252
1253 static void kvm_free_vcpus(struct kvm *kvm)
1254 {
1255 unsigned int i;
1256 struct kvm_vcpu *vcpu;
1257
1258 kvm_for_each_vcpu(i, vcpu, kvm)
1259 kvm_arch_vcpu_destroy(vcpu);
1260
1261 mutex_lock(&kvm->lock);
1262 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1263 kvm->vcpus[i] = NULL;
1264
1265 atomic_set(&kvm->online_vcpus, 0);
1266 mutex_unlock(&kvm->lock);
1267 }
1268
1269 void kvm_arch_destroy_vm(struct kvm *kvm)
1270 {
1271 kvm_free_vcpus(kvm);
1272 free_page((unsigned long)kvm->arch.model.fac);
1273 sca_dispose(kvm);
1274 debug_unregister(kvm->arch.dbf);
1275 kfree(kvm->arch.crypto.crycb);
1276 if (!kvm_is_ucontrol(kvm))
1277 gmap_free(kvm->arch.gmap);
1278 kvm_s390_destroy_adapters(kvm);
1279 kvm_s390_clear_float_irqs(kvm);
1280 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1281 }
1282
1283 /* Section: vcpu related */
1284 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1285 {
1286 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1287 if (!vcpu->arch.gmap)
1288 return -ENOMEM;
1289 vcpu->arch.gmap->private = vcpu->kvm;
1290
1291 return 0;
1292 }
1293
1294 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1295 {
1296 read_lock(&vcpu->kvm->arch.sca_lock);
1297 if (vcpu->kvm->arch.use_esca) {
1298 struct esca_block *sca = vcpu->kvm->arch.sca;
1299
1300 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1301 sca->cpu[vcpu->vcpu_id].sda = 0;
1302 } else {
1303 struct bsca_block *sca = vcpu->kvm->arch.sca;
1304
1305 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1306 sca->cpu[vcpu->vcpu_id].sda = 0;
1307 }
1308 read_unlock(&vcpu->kvm->arch.sca_lock);
1309 }
1310
1311 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1312 {
1313 read_lock(&vcpu->kvm->arch.sca_lock);
1314 if (vcpu->kvm->arch.use_esca) {
1315 struct esca_block *sca = vcpu->kvm->arch.sca;
1316
1317 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1318 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1319 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
1320 vcpu->arch.sie_block->ecb2 |= 0x04U;
1321 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1322 } else {
1323 struct bsca_block *sca = vcpu->kvm->arch.sca;
1324
1325 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1326 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1327 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1328 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1329 }
1330 read_unlock(&vcpu->kvm->arch.sca_lock);
1331 }
1332
1333 /* Basic SCA to Extended SCA data copy routines */
1334 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1335 {
1336 d->sda = s->sda;
1337 d->sigp_ctrl.c = s->sigp_ctrl.c;
1338 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1339 }
1340
1341 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1342 {
1343 int i;
1344
1345 d->ipte_control = s->ipte_control;
1346 d->mcn[0] = s->mcn;
1347 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1348 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1349 }
1350
1351 static int sca_switch_to_extended(struct kvm *kvm)
1352 {
1353 struct bsca_block *old_sca = kvm->arch.sca;
1354 struct esca_block *new_sca;
1355 struct kvm_vcpu *vcpu;
1356 unsigned int vcpu_idx;
1357 u32 scaol, scaoh;
1358
1359 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1360 if (!new_sca)
1361 return -ENOMEM;
1362
1363 scaoh = (u32)((u64)(new_sca) >> 32);
1364 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1365
1366 kvm_s390_vcpu_block_all(kvm);
1367 write_lock(&kvm->arch.sca_lock);
1368
1369 sca_copy_b_to_e(new_sca, old_sca);
1370
1371 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1372 vcpu->arch.sie_block->scaoh = scaoh;
1373 vcpu->arch.sie_block->scaol = scaol;
1374 vcpu->arch.sie_block->ecb2 |= 0x04U;
1375 }
1376 kvm->arch.sca = new_sca;
1377 kvm->arch.use_esca = 1;
1378
1379 write_unlock(&kvm->arch.sca_lock);
1380 kvm_s390_vcpu_unblock_all(kvm);
1381
1382 free_page((unsigned long)old_sca);
1383
1384 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1385 old_sca, kvm->arch.sca);
1386 return 0;
1387 }
1388
1389 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1390 {
1391 int rc;
1392
1393 if (id < KVM_S390_BSCA_CPU_SLOTS)
1394 return true;
1395 if (!sclp.has_esca)
1396 return false;
1397
1398 mutex_lock(&kvm->lock);
1399 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1400 mutex_unlock(&kvm->lock);
1401
1402 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1403 }
1404
1405 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1406 {
1407 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1408 kvm_clear_async_pf_completion_queue(vcpu);
1409 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1410 KVM_SYNC_GPRS |
1411 KVM_SYNC_ACRS |
1412 KVM_SYNC_CRS |
1413 KVM_SYNC_ARCH0 |
1414 KVM_SYNC_PFAULT;
1415 if (test_kvm_facility(vcpu->kvm, 64))
1416 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1417 if (test_kvm_facility(vcpu->kvm, 129))
1418 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1419
1420 if (kvm_is_ucontrol(vcpu->kvm))
1421 return __kvm_ucontrol_vcpu_init(vcpu);
1422
1423 return 0;
1424 }
1425
1426 /*
1427 * Backs up the current FP/VX register save area on a particular
1428 * destination. Used to switch between different register save
1429 * areas.
1430 */
1431 static inline void save_fpu_to(struct fpu *dst)
1432 {
1433 dst->fpc = current->thread.fpu.fpc;
1434 dst->regs = current->thread.fpu.regs;
1435 }
1436
1437 /*
1438 * Switches the FP/VX register save area from which to lazy
1439 * restore register contents.
1440 */
1441 static inline void load_fpu_from(struct fpu *from)
1442 {
1443 current->thread.fpu.fpc = from->fpc;
1444 current->thread.fpu.regs = from->regs;
1445 }
1446
1447 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1448 {
1449 /* Save host register state */
1450 save_fpu_regs();
1451 save_fpu_to(&vcpu->arch.host_fpregs);
1452
1453 if (test_kvm_facility(vcpu->kvm, 129)) {
1454 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
1455 /*
1456 * Use the register save area in the SIE-control block
1457 * for register restore and save in kvm_arch_vcpu_put()
1458 */
1459 current->thread.fpu.vxrs =
1460 (__vector128 *)&vcpu->run->s.regs.vrs;
1461 } else
1462 load_fpu_from(&vcpu->arch.guest_fpregs);
1463
1464 if (test_fp_ctl(current->thread.fpu.fpc))
1465 /* User space provided an invalid FPC, let's clear it */
1466 current->thread.fpu.fpc = 0;
1467
1468 save_access_regs(vcpu->arch.host_acrs);
1469 restore_access_regs(vcpu->run->s.regs.acrs);
1470 gmap_enable(vcpu->arch.gmap);
1471 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1472 }
1473
1474 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1475 {
1476 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1477 gmap_disable(vcpu->arch.gmap);
1478
1479 save_fpu_regs();
1480
1481 if (test_kvm_facility(vcpu->kvm, 129))
1482 /*
1483 * kvm_arch_vcpu_load() set up the register save area to
1484 * the &vcpu->run->s.regs.vrs and, thus, the vector registers
1485 * are already saved. Only the floating-point control must be
1486 * copied.
1487 */
1488 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
1489 else
1490 save_fpu_to(&vcpu->arch.guest_fpregs);
1491 load_fpu_from(&vcpu->arch.host_fpregs);
1492
1493 save_access_regs(vcpu->run->s.regs.acrs);
1494 restore_access_regs(vcpu->arch.host_acrs);
1495 }
1496
1497 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1498 {
1499 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1500 vcpu->arch.sie_block->gpsw.mask = 0UL;
1501 vcpu->arch.sie_block->gpsw.addr = 0UL;
1502 kvm_s390_set_prefix(vcpu, 0);
1503 vcpu->arch.sie_block->cputm = 0UL;
1504 vcpu->arch.sie_block->ckc = 0UL;
1505 vcpu->arch.sie_block->todpr = 0;
1506 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1507 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1508 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1509 vcpu->arch.guest_fpregs.fpc = 0;
1510 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1511 vcpu->arch.sie_block->gbea = 1;
1512 vcpu->arch.sie_block->pp = 0;
1513 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1514 kvm_clear_async_pf_completion_queue(vcpu);
1515 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1516 kvm_s390_vcpu_stop(vcpu);
1517 kvm_s390_clear_local_irqs(vcpu);
1518 }
1519
1520 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1521 {
1522 mutex_lock(&vcpu->kvm->lock);
1523 preempt_disable();
1524 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1525 preempt_enable();
1526 mutex_unlock(&vcpu->kvm->lock);
1527 if (!kvm_is_ucontrol(vcpu->kvm)) {
1528 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1529 sca_add_vcpu(vcpu);
1530 }
1531
1532 }
1533
1534 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1535 {
1536 if (!test_kvm_facility(vcpu->kvm, 76))
1537 return;
1538
1539 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1540
1541 if (vcpu->kvm->arch.crypto.aes_kw)
1542 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1543 if (vcpu->kvm->arch.crypto.dea_kw)
1544 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1545
1546 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1547 }
1548
1549 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1550 {
1551 free_page(vcpu->arch.sie_block->cbrlo);
1552 vcpu->arch.sie_block->cbrlo = 0;
1553 }
1554
1555 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1556 {
1557 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1558 if (!vcpu->arch.sie_block->cbrlo)
1559 return -ENOMEM;
1560
1561 vcpu->arch.sie_block->ecb2 |= 0x80;
1562 vcpu->arch.sie_block->ecb2 &= ~0x08;
1563 return 0;
1564 }
1565
1566 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1567 {
1568 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1569
1570 vcpu->arch.cpu_id = model->cpu_id;
1571 vcpu->arch.sie_block->ibc = model->ibc;
1572 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1573 }
1574
1575 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1576 {
1577 int rc = 0;
1578
1579 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1580 CPUSTAT_SM |
1581 CPUSTAT_STOPPED);
1582
1583 if (test_kvm_facility(vcpu->kvm, 78))
1584 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1585 else if (test_kvm_facility(vcpu->kvm, 8))
1586 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1587
1588 kvm_s390_vcpu_setup_model(vcpu);
1589
1590 vcpu->arch.sie_block->ecb = 6;
1591 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
1592 vcpu->arch.sie_block->ecb |= 0x10;
1593
1594 vcpu->arch.sie_block->ecb2 = 8;
1595 vcpu->arch.sie_block->eca = 0xC1002000U;
1596 if (sclp.has_siif)
1597 vcpu->arch.sie_block->eca |= 1;
1598 if (sclp.has_sigpif)
1599 vcpu->arch.sie_block->eca |= 0x10000000U;
1600 if (test_kvm_facility(vcpu->kvm, 64))
1601 vcpu->arch.sie_block->ecb3 |= 0x01;
1602 if (test_kvm_facility(vcpu->kvm, 129)) {
1603 vcpu->arch.sie_block->eca |= 0x00020000;
1604 vcpu->arch.sie_block->ecd |= 0x20000000;
1605 }
1606 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1607 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1608
1609 if (vcpu->kvm->arch.use_cmma) {
1610 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1611 if (rc)
1612 return rc;
1613 }
1614 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1615 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
1616
1617 kvm_s390_vcpu_crypto_setup(vcpu);
1618
1619 return rc;
1620 }
1621
1622 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1623 unsigned int id)
1624 {
1625 struct kvm_vcpu *vcpu;
1626 struct sie_page *sie_page;
1627 int rc = -EINVAL;
1628
1629 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
1630 goto out;
1631
1632 rc = -ENOMEM;
1633
1634 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1635 if (!vcpu)
1636 goto out;
1637
1638 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1639 if (!sie_page)
1640 goto out_free_cpu;
1641
1642 vcpu->arch.sie_block = &sie_page->sie_block;
1643 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1644
1645 vcpu->arch.sie_block->icpua = id;
1646 spin_lock_init(&vcpu->arch.local_int.lock);
1647 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1648 vcpu->arch.local_int.wq = &vcpu->wq;
1649 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1650
1651 /*
1652 * Allocate a save area for floating-point registers. If the vector
1653 * extension is available, register contents are saved in the SIE
1654 * control block. The allocated save area is still required in
1655 * particular places, for example, in kvm_s390_vcpu_store_status().
1656 */
1657 vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
1658 GFP_KERNEL);
1659 if (!vcpu->arch.guest_fpregs.fprs)
1660 goto out_free_sie_block;
1661
1662 rc = kvm_vcpu_init(vcpu, kvm, id);
1663 if (rc)
1664 goto out_free_sie_block;
1665 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
1666 vcpu->arch.sie_block);
1667 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1668
1669 return vcpu;
1670 out_free_sie_block:
1671 free_page((unsigned long)(vcpu->arch.sie_block));
1672 out_free_cpu:
1673 kmem_cache_free(kvm_vcpu_cache, vcpu);
1674 out:
1675 return ERR_PTR(rc);
1676 }
1677
1678 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1679 {
1680 return kvm_s390_vcpu_has_irq(vcpu, 0);
1681 }
1682
1683 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1684 {
1685 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1686 exit_sie(vcpu);
1687 }
1688
1689 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1690 {
1691 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1692 }
1693
1694 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1695 {
1696 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1697 exit_sie(vcpu);
1698 }
1699
1700 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1701 {
1702 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1703 }
1704
1705 /*
1706 * Kick a guest cpu out of SIE and wait until SIE is not running.
1707 * If the CPU is not running (e.g. waiting as idle) the function will
1708 * return immediately. */
1709 void exit_sie(struct kvm_vcpu *vcpu)
1710 {
1711 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1712 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1713 cpu_relax();
1714 }
1715
1716 /* Kick a guest cpu out of SIE to process a request synchronously */
1717 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
1718 {
1719 kvm_make_request(req, vcpu);
1720 kvm_s390_vcpu_request(vcpu);
1721 }
1722
1723 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1724 {
1725 int i;
1726 struct kvm *kvm = gmap->private;
1727 struct kvm_vcpu *vcpu;
1728
1729 kvm_for_each_vcpu(i, vcpu, kvm) {
1730 /* match against both prefix pages */
1731 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
1732 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1733 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
1734 }
1735 }
1736 }
1737
1738 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1739 {
1740 /* kvm common code refers to this, but never calls it */
1741 BUG();
1742 return 0;
1743 }
1744
1745 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1746 struct kvm_one_reg *reg)
1747 {
1748 int r = -EINVAL;
1749
1750 switch (reg->id) {
1751 case KVM_REG_S390_TODPR:
1752 r = put_user(vcpu->arch.sie_block->todpr,
1753 (u32 __user *)reg->addr);
1754 break;
1755 case KVM_REG_S390_EPOCHDIFF:
1756 r = put_user(vcpu->arch.sie_block->epoch,
1757 (u64 __user *)reg->addr);
1758 break;
1759 case KVM_REG_S390_CPU_TIMER:
1760 r = put_user(vcpu->arch.sie_block->cputm,
1761 (u64 __user *)reg->addr);
1762 break;
1763 case KVM_REG_S390_CLOCK_COMP:
1764 r = put_user(vcpu->arch.sie_block->ckc,
1765 (u64 __user *)reg->addr);
1766 break;
1767 case KVM_REG_S390_PFTOKEN:
1768 r = put_user(vcpu->arch.pfault_token,
1769 (u64 __user *)reg->addr);
1770 break;
1771 case KVM_REG_S390_PFCOMPARE:
1772 r = put_user(vcpu->arch.pfault_compare,
1773 (u64 __user *)reg->addr);
1774 break;
1775 case KVM_REG_S390_PFSELECT:
1776 r = put_user(vcpu->arch.pfault_select,
1777 (u64 __user *)reg->addr);
1778 break;
1779 case KVM_REG_S390_PP:
1780 r = put_user(vcpu->arch.sie_block->pp,
1781 (u64 __user *)reg->addr);
1782 break;
1783 case KVM_REG_S390_GBEA:
1784 r = put_user(vcpu->arch.sie_block->gbea,
1785 (u64 __user *)reg->addr);
1786 break;
1787 default:
1788 break;
1789 }
1790
1791 return r;
1792 }
1793
1794 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1795 struct kvm_one_reg *reg)
1796 {
1797 int r = -EINVAL;
1798
1799 switch (reg->id) {
1800 case KVM_REG_S390_TODPR:
1801 r = get_user(vcpu->arch.sie_block->todpr,
1802 (u32 __user *)reg->addr);
1803 break;
1804 case KVM_REG_S390_EPOCHDIFF:
1805 r = get_user(vcpu->arch.sie_block->epoch,
1806 (u64 __user *)reg->addr);
1807 break;
1808 case KVM_REG_S390_CPU_TIMER:
1809 r = get_user(vcpu->arch.sie_block->cputm,
1810 (u64 __user *)reg->addr);
1811 break;
1812 case KVM_REG_S390_CLOCK_COMP:
1813 r = get_user(vcpu->arch.sie_block->ckc,
1814 (u64 __user *)reg->addr);
1815 break;
1816 case KVM_REG_S390_PFTOKEN:
1817 r = get_user(vcpu->arch.pfault_token,
1818 (u64 __user *)reg->addr);
1819 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1820 kvm_clear_async_pf_completion_queue(vcpu);
1821 break;
1822 case KVM_REG_S390_PFCOMPARE:
1823 r = get_user(vcpu->arch.pfault_compare,
1824 (u64 __user *)reg->addr);
1825 break;
1826 case KVM_REG_S390_PFSELECT:
1827 r = get_user(vcpu->arch.pfault_select,
1828 (u64 __user *)reg->addr);
1829 break;
1830 case KVM_REG_S390_PP:
1831 r = get_user(vcpu->arch.sie_block->pp,
1832 (u64 __user *)reg->addr);
1833 break;
1834 case KVM_REG_S390_GBEA:
1835 r = get_user(vcpu->arch.sie_block->gbea,
1836 (u64 __user *)reg->addr);
1837 break;
1838 default:
1839 break;
1840 }
1841
1842 return r;
1843 }
1844
1845 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1846 {
1847 kvm_s390_vcpu_initial_reset(vcpu);
1848 return 0;
1849 }
1850
1851 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1852 {
1853 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1854 return 0;
1855 }
1856
1857 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1858 {
1859 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1860 return 0;
1861 }
1862
1863 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1864 struct kvm_sregs *sregs)
1865 {
1866 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1867 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
1868 restore_access_regs(vcpu->run->s.regs.acrs);
1869 return 0;
1870 }
1871
1872 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1873 struct kvm_sregs *sregs)
1874 {
1875 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1876 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1877 return 0;
1878 }
1879
1880 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1881 {
1882 if (test_fp_ctl(fpu->fpc))
1883 return -EINVAL;
1884 memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
1885 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1886 save_fpu_regs();
1887 load_fpu_from(&vcpu->arch.guest_fpregs);
1888 return 0;
1889 }
1890
1891 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1892 {
1893 memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1894 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1895 return 0;
1896 }
1897
1898 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1899 {
1900 int rc = 0;
1901
1902 if (!is_vcpu_stopped(vcpu))
1903 rc = -EBUSY;
1904 else {
1905 vcpu->run->psw_mask = psw.mask;
1906 vcpu->run->psw_addr = psw.addr;
1907 }
1908 return rc;
1909 }
1910
1911 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1912 struct kvm_translation *tr)
1913 {
1914 return -EINVAL; /* not implemented yet */
1915 }
1916
1917 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1918 KVM_GUESTDBG_USE_HW_BP | \
1919 KVM_GUESTDBG_ENABLE)
1920
1921 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1922 struct kvm_guest_debug *dbg)
1923 {
1924 int rc = 0;
1925
1926 vcpu->guest_debug = 0;
1927 kvm_s390_clear_bp_data(vcpu);
1928
1929 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
1930 return -EINVAL;
1931
1932 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1933 vcpu->guest_debug = dbg->control;
1934 /* enforce guest PER */
1935 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1936
1937 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1938 rc = kvm_s390_import_bp_data(vcpu, dbg);
1939 } else {
1940 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1941 vcpu->arch.guestdbg.last_bp = 0;
1942 }
1943
1944 if (rc) {
1945 vcpu->guest_debug = 0;
1946 kvm_s390_clear_bp_data(vcpu);
1947 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1948 }
1949
1950 return rc;
1951 }
1952
1953 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1954 struct kvm_mp_state *mp_state)
1955 {
1956 /* CHECK_STOP and LOAD are not supported yet */
1957 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1958 KVM_MP_STATE_OPERATING;
1959 }
1960
1961 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1962 struct kvm_mp_state *mp_state)
1963 {
1964 int rc = 0;
1965
1966 /* user space knows about this interface - let it control the state */
1967 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1968
1969 switch (mp_state->mp_state) {
1970 case KVM_MP_STATE_STOPPED:
1971 kvm_s390_vcpu_stop(vcpu);
1972 break;
1973 case KVM_MP_STATE_OPERATING:
1974 kvm_s390_vcpu_start(vcpu);
1975 break;
1976 case KVM_MP_STATE_LOAD:
1977 case KVM_MP_STATE_CHECK_STOP:
1978 /* fall through - CHECK_STOP and LOAD are not supported yet */
1979 default:
1980 rc = -ENXIO;
1981 }
1982
1983 return rc;
1984 }
1985
1986 static bool ibs_enabled(struct kvm_vcpu *vcpu)
1987 {
1988 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1989 }
1990
1991 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1992 {
1993 retry:
1994 kvm_s390_vcpu_request_handled(vcpu);
1995 if (!vcpu->requests)
1996 return 0;
1997 /*
1998 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1999 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2000 * This ensures that the ipte instruction for this request has
2001 * already finished. We might race against a second unmapper that
2002 * wants to set the blocking bit. Lets just retry the request loop.
2003 */
2004 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2005 int rc;
2006 rc = gmap_ipte_notify(vcpu->arch.gmap,
2007 kvm_s390_get_prefix(vcpu),
2008 PAGE_SIZE * 2);
2009 if (rc)
2010 return rc;
2011 goto retry;
2012 }
2013
2014 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2015 vcpu->arch.sie_block->ihcpu = 0xffff;
2016 goto retry;
2017 }
2018
2019 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2020 if (!ibs_enabled(vcpu)) {
2021 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2022 atomic_or(CPUSTAT_IBS,
2023 &vcpu->arch.sie_block->cpuflags);
2024 }
2025 goto retry;
2026 }
2027
2028 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2029 if (ibs_enabled(vcpu)) {
2030 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2031 atomic_andnot(CPUSTAT_IBS,
2032 &vcpu->arch.sie_block->cpuflags);
2033 }
2034 goto retry;
2035 }
2036
2037 /* nothing to do, just clear the request */
2038 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2039
2040 return 0;
2041 }
2042
2043 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2044 {
2045 struct kvm_vcpu *vcpu;
2046 int i;
2047
2048 mutex_lock(&kvm->lock);
2049 preempt_disable();
2050 kvm->arch.epoch = tod - get_tod_clock();
2051 kvm_s390_vcpu_block_all(kvm);
2052 kvm_for_each_vcpu(i, vcpu, kvm)
2053 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2054 kvm_s390_vcpu_unblock_all(kvm);
2055 preempt_enable();
2056 mutex_unlock(&kvm->lock);
2057 }
2058
2059 /**
2060 * kvm_arch_fault_in_page - fault-in guest page if necessary
2061 * @vcpu: The corresponding virtual cpu
2062 * @gpa: Guest physical address
2063 * @writable: Whether the page should be writable or not
2064 *
2065 * Make sure that a guest page has been faulted-in on the host.
2066 *
2067 * Return: Zero on success, negative error code otherwise.
2068 */
2069 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
2070 {
2071 return gmap_fault(vcpu->arch.gmap, gpa,
2072 writable ? FAULT_FLAG_WRITE : 0);
2073 }
2074
2075 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2076 unsigned long token)
2077 {
2078 struct kvm_s390_interrupt inti;
2079 struct kvm_s390_irq irq;
2080
2081 if (start_token) {
2082 irq.u.ext.ext_params2 = token;
2083 irq.type = KVM_S390_INT_PFAULT_INIT;
2084 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
2085 } else {
2086 inti.type = KVM_S390_INT_PFAULT_DONE;
2087 inti.parm64 = token;
2088 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2089 }
2090 }
2091
2092 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2093 struct kvm_async_pf *work)
2094 {
2095 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2096 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2097 }
2098
2099 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2100 struct kvm_async_pf *work)
2101 {
2102 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2103 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2104 }
2105
2106 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2107 struct kvm_async_pf *work)
2108 {
2109 /* s390 will always inject the page directly */
2110 }
2111
2112 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2113 {
2114 /*
2115 * s390 will always inject the page directly,
2116 * but we still want check_async_completion to cleanup
2117 */
2118 return true;
2119 }
2120
2121 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2122 {
2123 hva_t hva;
2124 struct kvm_arch_async_pf arch;
2125 int rc;
2126
2127 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2128 return 0;
2129 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2130 vcpu->arch.pfault_compare)
2131 return 0;
2132 if (psw_extint_disabled(vcpu))
2133 return 0;
2134 if (kvm_s390_vcpu_has_irq(vcpu, 0))
2135 return 0;
2136 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2137 return 0;
2138 if (!vcpu->arch.gmap->pfault_enabled)
2139 return 0;
2140
2141 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2142 hva += current->thread.gmap_addr & ~PAGE_MASK;
2143 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
2144 return 0;
2145
2146 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2147 return rc;
2148 }
2149
2150 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2151 {
2152 int rc, cpuflags;
2153
2154 /*
2155 * On s390 notifications for arriving pages will be delivered directly
2156 * to the guest but the house keeping for completed pfaults is
2157 * handled outside the worker.
2158 */
2159 kvm_check_async_pf_completion(vcpu);
2160
2161 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2162 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2163
2164 if (need_resched())
2165 schedule();
2166
2167 if (test_cpu_flag(CIF_MCCK_PENDING))
2168 s390_handle_mcck();
2169
2170 if (!kvm_is_ucontrol(vcpu->kvm)) {
2171 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2172 if (rc)
2173 return rc;
2174 }
2175
2176 rc = kvm_s390_handle_requests(vcpu);
2177 if (rc)
2178 return rc;
2179
2180 if (guestdbg_enabled(vcpu)) {
2181 kvm_s390_backup_guest_per_regs(vcpu);
2182 kvm_s390_patch_guest_per_regs(vcpu);
2183 }
2184
2185 vcpu->arch.sie_block->icptcode = 0;
2186 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2187 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2188 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2189
2190 return 0;
2191 }
2192
2193 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2194 {
2195 psw_t *psw = &vcpu->arch.sie_block->gpsw;
2196 u8 opcode;
2197 int rc;
2198
2199 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2200 trace_kvm_s390_sie_fault(vcpu);
2201
2202 /*
2203 * We want to inject an addressing exception, which is defined as a
2204 * suppressing or terminating exception. However, since we came here
2205 * by a DAT access exception, the PSW still points to the faulting
2206 * instruction since DAT exceptions are nullifying. So we've got
2207 * to look up the current opcode to get the length of the instruction
2208 * to be able to forward the PSW.
2209 */
2210 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
2211 if (rc)
2212 return kvm_s390_inject_prog_cond(vcpu, rc);
2213 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
2214
2215 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2216 }
2217
2218 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2219 {
2220 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2221 vcpu->arch.sie_block->icptcode);
2222 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2223
2224 if (guestdbg_enabled(vcpu))
2225 kvm_s390_restore_guest_per_regs(vcpu);
2226
2227 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2228 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
2229
2230 if (vcpu->arch.sie_block->icptcode > 0) {
2231 int rc = kvm_handle_sie_intercept(vcpu);
2232
2233 if (rc != -EOPNOTSUPP)
2234 return rc;
2235 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2236 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2237 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2238 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2239 return -EREMOTE;
2240 } else if (exit_reason != -EFAULT) {
2241 vcpu->stat.exit_null++;
2242 return 0;
2243 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2244 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2245 vcpu->run->s390_ucontrol.trans_exc_code =
2246 current->thread.gmap_addr;
2247 vcpu->run->s390_ucontrol.pgm_code = 0x10;
2248 return -EREMOTE;
2249 } else if (current->thread.gmap_pfault) {
2250 trace_kvm_s390_major_guest_pfault(vcpu);
2251 current->thread.gmap_pfault = 0;
2252 if (kvm_arch_setup_async_pf(vcpu))
2253 return 0;
2254 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2255 }
2256 return vcpu_post_run_fault_in_sie(vcpu);
2257 }
2258
2259 static int __vcpu_run(struct kvm_vcpu *vcpu)
2260 {
2261 int rc, exit_reason;
2262
2263 /*
2264 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2265 * ning the guest), so that memslots (and other stuff) are protected
2266 */
2267 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2268
2269 do {
2270 rc = vcpu_pre_run(vcpu);
2271 if (rc)
2272 break;
2273
2274 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2275 /*
2276 * As PF_VCPU will be used in fault handler, between
2277 * guest_enter and guest_exit should be no uaccess.
2278 */
2279 local_irq_disable();
2280 __kvm_guest_enter();
2281 local_irq_enable();
2282 exit_reason = sie64a(vcpu->arch.sie_block,
2283 vcpu->run->s.regs.gprs);
2284 local_irq_disable();
2285 __kvm_guest_exit();
2286 local_irq_enable();
2287 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2288
2289 rc = vcpu_post_run(vcpu, exit_reason);
2290 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
2291
2292 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2293 return rc;
2294 }
2295
2296 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2297 {
2298 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2299 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2300 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2301 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2302 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2303 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2304 /* some control register changes require a tlb flush */
2305 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2306 }
2307 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2308 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2309 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2310 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2311 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2312 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2313 }
2314 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2315 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2316 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2317 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
2318 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2319 kvm_clear_async_pf_completion_queue(vcpu);
2320 }
2321 kvm_run->kvm_dirty_regs = 0;
2322 }
2323
2324 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2325 {
2326 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2327 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2328 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2329 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2330 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2331 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2332 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2333 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2334 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2335 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2336 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2337 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2338 }
2339
2340 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2341 {
2342 int rc;
2343 sigset_t sigsaved;
2344
2345 if (guestdbg_exit_pending(vcpu)) {
2346 kvm_s390_prepare_debug_exit(vcpu);
2347 return 0;
2348 }
2349
2350 if (vcpu->sigset_active)
2351 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2352
2353 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2354 kvm_s390_vcpu_start(vcpu);
2355 } else if (is_vcpu_stopped(vcpu)) {
2356 pr_err_ratelimited("can't run stopped vcpu %d\n",
2357 vcpu->vcpu_id);
2358 return -EINVAL;
2359 }
2360
2361 sync_regs(vcpu, kvm_run);
2362
2363 might_fault();
2364 rc = __vcpu_run(vcpu);
2365
2366 if (signal_pending(current) && !rc) {
2367 kvm_run->exit_reason = KVM_EXIT_INTR;
2368 rc = -EINTR;
2369 }
2370
2371 if (guestdbg_exit_pending(vcpu) && !rc) {
2372 kvm_s390_prepare_debug_exit(vcpu);
2373 rc = 0;
2374 }
2375
2376 if (rc == -EREMOTE) {
2377 /* userspace support is needed, kvm_run has been prepared */
2378 rc = 0;
2379 }
2380
2381 store_regs(vcpu, kvm_run);
2382
2383 if (vcpu->sigset_active)
2384 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2385
2386 vcpu->stat.exit_userspace++;
2387 return rc;
2388 }
2389
2390 /*
2391 * store status at address
2392 * we use have two special cases:
2393 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2394 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2395 */
2396 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2397 {
2398 unsigned char archmode = 1;
2399 unsigned int px;
2400 u64 clkcomp;
2401 int rc;
2402
2403 px = kvm_s390_get_prefix(vcpu);
2404 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2405 if (write_guest_abs(vcpu, 163, &archmode, 1))
2406 return -EFAULT;
2407 gpa = 0;
2408 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2409 if (write_guest_real(vcpu, 163, &archmode, 1))
2410 return -EFAULT;
2411 gpa = px;
2412 } else
2413 gpa -= __LC_FPREGS_SAVE_AREA;
2414 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2415 vcpu->arch.guest_fpregs.fprs, 128);
2416 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2417 vcpu->run->s.regs.gprs, 128);
2418 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2419 &vcpu->arch.sie_block->gpsw, 16);
2420 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2421 &px, 4);
2422 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
2423 &vcpu->arch.guest_fpregs.fpc, 4);
2424 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2425 &vcpu->arch.sie_block->todpr, 4);
2426 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
2427 &vcpu->arch.sie_block->cputm, 8);
2428 clkcomp = vcpu->arch.sie_block->ckc >> 8;
2429 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2430 &clkcomp, 8);
2431 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2432 &vcpu->run->s.regs.acrs, 64);
2433 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2434 &vcpu->arch.sie_block->gcr, 128);
2435 return rc ? -EFAULT : 0;
2436 }
2437
2438 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2439 {
2440 /*
2441 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2442 * copying in vcpu load/put. Lets update our copies before we save
2443 * it into the save area
2444 */
2445 save_fpu_regs();
2446 if (test_kvm_facility(vcpu->kvm, 129)) {
2447 /*
2448 * If the vector extension is available, the vector registers
2449 * which overlaps with floating-point registers are saved in
2450 * the SIE-control block. Hence, extract the floating-point
2451 * registers and the FPC value and store them in the
2452 * guest_fpregs structure.
2453 */
2454 vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
2455 convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
2456 current->thread.fpu.vxrs);
2457 } else
2458 save_fpu_to(&vcpu->arch.guest_fpregs);
2459 save_access_regs(vcpu->run->s.regs.acrs);
2460
2461 return kvm_s390_store_status_unloaded(vcpu, addr);
2462 }
2463
2464 /*
2465 * store additional status at address
2466 */
2467 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2468 unsigned long gpa)
2469 {
2470 /* Only bits 0-53 are used for address formation */
2471 if (!(gpa & ~0x3ff))
2472 return 0;
2473
2474 return write_guest_abs(vcpu, gpa & ~0x3ff,
2475 (void *)&vcpu->run->s.regs.vrs, 512);
2476 }
2477
2478 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2479 {
2480 if (!test_kvm_facility(vcpu->kvm, 129))
2481 return 0;
2482
2483 /*
2484 * The guest VXRS are in the host VXRs due to the lazy
2485 * copying in vcpu load/put. We can simply call save_fpu_regs()
2486 * to save the current register state because we are in the
2487 * middle of a load/put cycle.
2488 *
2489 * Let's update our copies before we save it into the save area.
2490 */
2491 save_fpu_regs();
2492
2493 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2494 }
2495
2496 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2497 {
2498 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2499 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
2500 }
2501
2502 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2503 {
2504 unsigned int i;
2505 struct kvm_vcpu *vcpu;
2506
2507 kvm_for_each_vcpu(i, vcpu, kvm) {
2508 __disable_ibs_on_vcpu(vcpu);
2509 }
2510 }
2511
2512 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2513 {
2514 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2515 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
2516 }
2517
2518 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2519 {
2520 int i, online_vcpus, started_vcpus = 0;
2521
2522 if (!is_vcpu_stopped(vcpu))
2523 return;
2524
2525 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2526 /* Only one cpu at a time may enter/leave the STOPPED state. */
2527 spin_lock(&vcpu->kvm->arch.start_stop_lock);
2528 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2529
2530 for (i = 0; i < online_vcpus; i++) {
2531 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2532 started_vcpus++;
2533 }
2534
2535 if (started_vcpus == 0) {
2536 /* we're the only active VCPU -> speed it up */
2537 __enable_ibs_on_vcpu(vcpu);
2538 } else if (started_vcpus == 1) {
2539 /*
2540 * As we are starting a second VCPU, we have to disable
2541 * the IBS facility on all VCPUs to remove potentially
2542 * oustanding ENABLE requests.
2543 */
2544 __disable_ibs_on_all_vcpus(vcpu->kvm);
2545 }
2546
2547 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2548 /*
2549 * Another VCPU might have used IBS while we were offline.
2550 * Let's play safe and flush the VCPU at startup.
2551 */
2552 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2553 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2554 return;
2555 }
2556
2557 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2558 {
2559 int i, online_vcpus, started_vcpus = 0;
2560 struct kvm_vcpu *started_vcpu = NULL;
2561
2562 if (is_vcpu_stopped(vcpu))
2563 return;
2564
2565 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2566 /* Only one cpu at a time may enter/leave the STOPPED state. */
2567 spin_lock(&vcpu->kvm->arch.start_stop_lock);
2568 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2569
2570 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2571 kvm_s390_clear_stop_irq(vcpu);
2572
2573 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2574 __disable_ibs_on_vcpu(vcpu);
2575
2576 for (i = 0; i < online_vcpus; i++) {
2577 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2578 started_vcpus++;
2579 started_vcpu = vcpu->kvm->vcpus[i];
2580 }
2581 }
2582
2583 if (started_vcpus == 1) {
2584 /*
2585 * As we only have one VCPU left, we want to enable the
2586 * IBS facility for that VCPU to speed it up.
2587 */
2588 __enable_ibs_on_vcpu(started_vcpu);
2589 }
2590
2591 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2592 return;
2593 }
2594
2595 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2596 struct kvm_enable_cap *cap)
2597 {
2598 int r;
2599
2600 if (cap->flags)
2601 return -EINVAL;
2602
2603 switch (cap->cap) {
2604 case KVM_CAP_S390_CSS_SUPPORT:
2605 if (!vcpu->kvm->arch.css_support) {
2606 vcpu->kvm->arch.css_support = 1;
2607 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2608 trace_kvm_s390_enable_css(vcpu->kvm);
2609 }
2610 r = 0;
2611 break;
2612 default:
2613 r = -EINVAL;
2614 break;
2615 }
2616 return r;
2617 }
2618
2619 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2620 struct kvm_s390_mem_op *mop)
2621 {
2622 void __user *uaddr = (void __user *)mop->buf;
2623 void *tmpbuf = NULL;
2624 int r, srcu_idx;
2625 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2626 | KVM_S390_MEMOP_F_CHECK_ONLY;
2627
2628 if (mop->flags & ~supported_flags)
2629 return -EINVAL;
2630
2631 if (mop->size > MEM_OP_MAX_SIZE)
2632 return -E2BIG;
2633
2634 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2635 tmpbuf = vmalloc(mop->size);
2636 if (!tmpbuf)
2637 return -ENOMEM;
2638 }
2639
2640 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2641
2642 switch (mop->op) {
2643 case KVM_S390_MEMOP_LOGICAL_READ:
2644 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2645 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2646 break;
2647 }
2648 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2649 if (r == 0) {
2650 if (copy_to_user(uaddr, tmpbuf, mop->size))
2651 r = -EFAULT;
2652 }
2653 break;
2654 case KVM_S390_MEMOP_LOGICAL_WRITE:
2655 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2656 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2657 break;
2658 }
2659 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2660 r = -EFAULT;
2661 break;
2662 }
2663 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2664 break;
2665 default:
2666 r = -EINVAL;
2667 }
2668
2669 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2670
2671 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2672 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2673
2674 vfree(tmpbuf);
2675 return r;
2676 }
2677
2678 long kvm_arch_vcpu_ioctl(struct file *filp,
2679 unsigned int ioctl, unsigned long arg)
2680 {
2681 struct kvm_vcpu *vcpu = filp->private_data;
2682 void __user *argp = (void __user *)arg;
2683 int idx;
2684 long r;
2685
2686 switch (ioctl) {
2687 case KVM_S390_IRQ: {
2688 struct kvm_s390_irq s390irq;
2689
2690 r = -EFAULT;
2691 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2692 break;
2693 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2694 break;
2695 }
2696 case KVM_S390_INTERRUPT: {
2697 struct kvm_s390_interrupt s390int;
2698 struct kvm_s390_irq s390irq;
2699
2700 r = -EFAULT;
2701 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2702 break;
2703 if (s390int_to_s390irq(&s390int, &s390irq))
2704 return -EINVAL;
2705 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2706 break;
2707 }
2708 case KVM_S390_STORE_STATUS:
2709 idx = srcu_read_lock(&vcpu->kvm->srcu);
2710 r = kvm_s390_vcpu_store_status(vcpu, arg);
2711 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2712 break;
2713 case KVM_S390_SET_INITIAL_PSW: {
2714 psw_t psw;
2715
2716 r = -EFAULT;
2717 if (copy_from_user(&psw, argp, sizeof(psw)))
2718 break;
2719 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2720 break;
2721 }
2722 case KVM_S390_INITIAL_RESET:
2723 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2724 break;
2725 case KVM_SET_ONE_REG:
2726 case KVM_GET_ONE_REG: {
2727 struct kvm_one_reg reg;
2728 r = -EFAULT;
2729 if (copy_from_user(&reg, argp, sizeof(reg)))
2730 break;
2731 if (ioctl == KVM_SET_ONE_REG)
2732 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2733 else
2734 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2735 break;
2736 }
2737 #ifdef CONFIG_KVM_S390_UCONTROL
2738 case KVM_S390_UCAS_MAP: {
2739 struct kvm_s390_ucas_mapping ucasmap;
2740
2741 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2742 r = -EFAULT;
2743 break;
2744 }
2745
2746 if (!kvm_is_ucontrol(vcpu->kvm)) {
2747 r = -EINVAL;
2748 break;
2749 }
2750
2751 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2752 ucasmap.vcpu_addr, ucasmap.length);
2753 break;
2754 }
2755 case KVM_S390_UCAS_UNMAP: {
2756 struct kvm_s390_ucas_mapping ucasmap;
2757
2758 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2759 r = -EFAULT;
2760 break;
2761 }
2762
2763 if (!kvm_is_ucontrol(vcpu->kvm)) {
2764 r = -EINVAL;
2765 break;
2766 }
2767
2768 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2769 ucasmap.length);
2770 break;
2771 }
2772 #endif
2773 case KVM_S390_VCPU_FAULT: {
2774 r = gmap_fault(vcpu->arch.gmap, arg, 0);
2775 break;
2776 }
2777 case KVM_ENABLE_CAP:
2778 {
2779 struct kvm_enable_cap cap;
2780 r = -EFAULT;
2781 if (copy_from_user(&cap, argp, sizeof(cap)))
2782 break;
2783 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2784 break;
2785 }
2786 case KVM_S390_MEM_OP: {
2787 struct kvm_s390_mem_op mem_op;
2788
2789 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2790 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2791 else
2792 r = -EFAULT;
2793 break;
2794 }
2795 case KVM_S390_SET_IRQ_STATE: {
2796 struct kvm_s390_irq_state irq_state;
2797
2798 r = -EFAULT;
2799 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2800 break;
2801 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2802 irq_state.len == 0 ||
2803 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2804 r = -EINVAL;
2805 break;
2806 }
2807 r = kvm_s390_set_irq_state(vcpu,
2808 (void __user *) irq_state.buf,
2809 irq_state.len);
2810 break;
2811 }
2812 case KVM_S390_GET_IRQ_STATE: {
2813 struct kvm_s390_irq_state irq_state;
2814
2815 r = -EFAULT;
2816 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2817 break;
2818 if (irq_state.len == 0) {
2819 r = -EINVAL;
2820 break;
2821 }
2822 r = kvm_s390_get_irq_state(vcpu,
2823 (__u8 __user *) irq_state.buf,
2824 irq_state.len);
2825 break;
2826 }
2827 default:
2828 r = -ENOTTY;
2829 }
2830 return r;
2831 }
2832
2833 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2834 {
2835 #ifdef CONFIG_KVM_S390_UCONTROL
2836 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2837 && (kvm_is_ucontrol(vcpu->kvm))) {
2838 vmf->page = virt_to_page(vcpu->arch.sie_block);
2839 get_page(vmf->page);
2840 return 0;
2841 }
2842 #endif
2843 return VM_FAULT_SIGBUS;
2844 }
2845
2846 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2847 unsigned long npages)
2848 {
2849 return 0;
2850 }
2851
2852 /* Section: memory related */
2853 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2854 struct kvm_memory_slot *memslot,
2855 const struct kvm_userspace_memory_region *mem,
2856 enum kvm_mr_change change)
2857 {
2858 /* A few sanity checks. We can have memory slots which have to be
2859 located/ended at a segment boundary (1MB). The memory in userland is
2860 ok to be fragmented into various different vmas. It is okay to mmap()
2861 and munmap() stuff in this slot after doing this call at any time */
2862
2863 if (mem->userspace_addr & 0xffffful)
2864 return -EINVAL;
2865
2866 if (mem->memory_size & 0xffffful)
2867 return -EINVAL;
2868
2869 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
2870 return -EINVAL;
2871
2872 return 0;
2873 }
2874
2875 void kvm_arch_commit_memory_region(struct kvm *kvm,
2876 const struct kvm_userspace_memory_region *mem,
2877 const struct kvm_memory_slot *old,
2878 const struct kvm_memory_slot *new,
2879 enum kvm_mr_change change)
2880 {
2881 int rc;
2882
2883 /* If the basics of the memslot do not change, we do not want
2884 * to update the gmap. Every update causes several unnecessary
2885 * segment translation exceptions. This is usually handled just
2886 * fine by the normal fault handler + gmap, but it will also
2887 * cause faults on the prefix page of running guest CPUs.
2888 */
2889 if (old->userspace_addr == mem->userspace_addr &&
2890 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2891 old->npages * PAGE_SIZE == mem->memory_size)
2892 return;
2893
2894 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2895 mem->guest_phys_addr, mem->memory_size);
2896 if (rc)
2897 pr_warn("failed to commit memory region\n");
2898 return;
2899 }
2900
2901 static int __init kvm_s390_init(void)
2902 {
2903 if (!sclp.has_sief2) {
2904 pr_info("SIE not available\n");
2905 return -ENODEV;
2906 }
2907
2908 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2909 }
2910
2911 static void __exit kvm_s390_exit(void)
2912 {
2913 kvm_exit();
2914 }
2915
2916 module_init(kvm_s390_init);
2917 module_exit(kvm_s390_exit);
2918
2919 /*
2920 * Enable autoloading of the kvm module.
2921 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2922 * since x86 takes a different approach.
2923 */
2924 #include <linux/miscdevice.h>
2925 MODULE_ALIAS_MISCDEV(KVM_MINOR);
2926 MODULE_ALIAS("devname:kvm");
This page took 0.146878 seconds and 6 git commands to generate.