KVM: s390: read the correct opcode on SIE faults
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2 * hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008, 2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
15 */
16
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <linux/vmalloc.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/lowcore.h>
31 #include <asm/etr.h>
32 #include <asm/pgtable.h>
33 #include <asm/nmi.h>
34 #include <asm/switch_to.h>
35 #include <asm/isc.h>
36 #include <asm/sclp.h>
37 #include "kvm-s390.h"
38 #include "gaccess.h"
39
40 #define KMSG_COMPONENT "kvm-s390"
41 #undef pr_fmt
42 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43
44 #define CREATE_TRACE_POINTS
45 #include "trace.h"
46 #include "trace-s390.h"
47
48 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
49 #define LOCAL_IRQS 32
50 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
52
53 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54
55 struct kvm_stats_debugfs_item debugfs_entries[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace) },
57 { "exit_null", VCPU_STAT(exit_null) },
58 { "exit_validity", VCPU_STAT(exit_validity) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
60 { "exit_external_request", VCPU_STAT(exit_external_request) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
62 { "exit_instruction", VCPU_STAT(exit_instruction) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
66 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
67 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
68 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
69 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
70 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
71 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
72 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
73 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
74 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
75 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
76 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
77 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
78 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
79 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
80 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
81 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
82 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
83 { "instruction_spx", VCPU_STAT(instruction_spx) },
84 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
85 { "instruction_stap", VCPU_STAT(instruction_stap) },
86 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
87 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
88 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
89 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
90 { "instruction_essa", VCPU_STAT(instruction_essa) },
91 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
92 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
93 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
94 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
95 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
96 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
97 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
98 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
99 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
100 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
101 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
102 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
103 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
104 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
105 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
106 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
107 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
108 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
109 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
110 { "diagnose_10", VCPU_STAT(diagnose_10) },
111 { "diagnose_44", VCPU_STAT(diagnose_44) },
112 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
113 { "diagnose_258", VCPU_STAT(diagnose_258) },
114 { "diagnose_308", VCPU_STAT(diagnose_308) },
115 { "diagnose_500", VCPU_STAT(diagnose_500) },
116 { NULL }
117 };
118
119 /* upper facilities limit for kvm */
120 unsigned long kvm_s390_fac_list_mask[] = {
121 0xffe6fffbfcfdfc40UL,
122 0x005e800000000000UL,
123 };
124
125 unsigned long kvm_s390_fac_list_mask_size(void)
126 {
127 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
128 return ARRAY_SIZE(kvm_s390_fac_list_mask);
129 }
130
131 static struct gmap_notifier gmap_notifier;
132 debug_info_t *kvm_s390_dbf;
133
134 /* Section: not file related */
135 int kvm_arch_hardware_enable(void)
136 {
137 /* every s390 is virtualization enabled ;-) */
138 return 0;
139 }
140
141 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
142
143 /*
144 * This callback is executed during stop_machine(). All CPUs are therefore
145 * temporarily stopped. In order not to change guest behavior, we have to
146 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
147 * so a CPU won't be stopped while calculating with the epoch.
148 */
149 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
150 void *v)
151 {
152 struct kvm *kvm;
153 struct kvm_vcpu *vcpu;
154 int i;
155 unsigned long long *delta = v;
156
157 list_for_each_entry(kvm, &vm_list, vm_list) {
158 kvm->arch.epoch -= *delta;
159 kvm_for_each_vcpu(i, vcpu, kvm) {
160 vcpu->arch.sie_block->epoch -= *delta;
161 }
162 }
163 return NOTIFY_OK;
164 }
165
166 static struct notifier_block kvm_clock_notifier = {
167 .notifier_call = kvm_clock_sync,
168 };
169
170 int kvm_arch_hardware_setup(void)
171 {
172 gmap_notifier.notifier_call = kvm_gmap_notifier;
173 gmap_register_ipte_notifier(&gmap_notifier);
174 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
175 &kvm_clock_notifier);
176 return 0;
177 }
178
179 void kvm_arch_hardware_unsetup(void)
180 {
181 gmap_unregister_ipte_notifier(&gmap_notifier);
182 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
183 &kvm_clock_notifier);
184 }
185
186 int kvm_arch_init(void *opaque)
187 {
188 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
189 if (!kvm_s390_dbf)
190 return -ENOMEM;
191
192 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
193 debug_unregister(kvm_s390_dbf);
194 return -ENOMEM;
195 }
196
197 /* Register floating interrupt controller interface. */
198 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
199 }
200
201 void kvm_arch_exit(void)
202 {
203 debug_unregister(kvm_s390_dbf);
204 }
205
206 /* Section: device related */
207 long kvm_arch_dev_ioctl(struct file *filp,
208 unsigned int ioctl, unsigned long arg)
209 {
210 if (ioctl == KVM_S390_ENABLE_SIE)
211 return s390_enable_sie();
212 return -EINVAL;
213 }
214
215 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
216 {
217 int r;
218
219 switch (ext) {
220 case KVM_CAP_S390_PSW:
221 case KVM_CAP_S390_GMAP:
222 case KVM_CAP_SYNC_MMU:
223 #ifdef CONFIG_KVM_S390_UCONTROL
224 case KVM_CAP_S390_UCONTROL:
225 #endif
226 case KVM_CAP_ASYNC_PF:
227 case KVM_CAP_SYNC_REGS:
228 case KVM_CAP_ONE_REG:
229 case KVM_CAP_ENABLE_CAP:
230 case KVM_CAP_S390_CSS_SUPPORT:
231 case KVM_CAP_IOEVENTFD:
232 case KVM_CAP_DEVICE_CTRL:
233 case KVM_CAP_ENABLE_CAP_VM:
234 case KVM_CAP_S390_IRQCHIP:
235 case KVM_CAP_VM_ATTRIBUTES:
236 case KVM_CAP_MP_STATE:
237 case KVM_CAP_S390_INJECT_IRQ:
238 case KVM_CAP_S390_USER_SIGP:
239 case KVM_CAP_S390_USER_STSI:
240 case KVM_CAP_S390_SKEYS:
241 case KVM_CAP_S390_IRQ_STATE:
242 r = 1;
243 break;
244 case KVM_CAP_S390_MEM_OP:
245 r = MEM_OP_MAX_SIZE;
246 break;
247 case KVM_CAP_NR_VCPUS:
248 case KVM_CAP_MAX_VCPUS:
249 r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
250 : KVM_S390_BSCA_CPU_SLOTS;
251 break;
252 case KVM_CAP_NR_MEMSLOTS:
253 r = KVM_USER_MEM_SLOTS;
254 break;
255 case KVM_CAP_S390_COW:
256 r = MACHINE_HAS_ESOP;
257 break;
258 case KVM_CAP_S390_VECTOR_REGISTERS:
259 r = MACHINE_HAS_VX;
260 break;
261 case KVM_CAP_S390_RI:
262 r = test_facility(64);
263 break;
264 default:
265 r = 0;
266 }
267 return r;
268 }
269
270 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
271 struct kvm_memory_slot *memslot)
272 {
273 gfn_t cur_gfn, last_gfn;
274 unsigned long address;
275 struct gmap *gmap = kvm->arch.gmap;
276
277 down_read(&gmap->mm->mmap_sem);
278 /* Loop over all guest pages */
279 last_gfn = memslot->base_gfn + memslot->npages;
280 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
281 address = gfn_to_hva_memslot(memslot, cur_gfn);
282
283 if (gmap_test_and_clear_dirty(address, gmap))
284 mark_page_dirty(kvm, cur_gfn);
285 }
286 up_read(&gmap->mm->mmap_sem);
287 }
288
289 /* Section: vm related */
290 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
291
292 /*
293 * Get (and clear) the dirty memory log for a memory slot.
294 */
295 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
296 struct kvm_dirty_log *log)
297 {
298 int r;
299 unsigned long n;
300 struct kvm_memslots *slots;
301 struct kvm_memory_slot *memslot;
302 int is_dirty = 0;
303
304 mutex_lock(&kvm->slots_lock);
305
306 r = -EINVAL;
307 if (log->slot >= KVM_USER_MEM_SLOTS)
308 goto out;
309
310 slots = kvm_memslots(kvm);
311 memslot = id_to_memslot(slots, log->slot);
312 r = -ENOENT;
313 if (!memslot->dirty_bitmap)
314 goto out;
315
316 kvm_s390_sync_dirty_log(kvm, memslot);
317 r = kvm_get_dirty_log(kvm, log, &is_dirty);
318 if (r)
319 goto out;
320
321 /* Clear the dirty log */
322 if (is_dirty) {
323 n = kvm_dirty_bitmap_bytes(memslot);
324 memset(memslot->dirty_bitmap, 0, n);
325 }
326 r = 0;
327 out:
328 mutex_unlock(&kvm->slots_lock);
329 return r;
330 }
331
332 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
333 {
334 int r;
335
336 if (cap->flags)
337 return -EINVAL;
338
339 switch (cap->cap) {
340 case KVM_CAP_S390_IRQCHIP:
341 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
342 kvm->arch.use_irqchip = 1;
343 r = 0;
344 break;
345 case KVM_CAP_S390_USER_SIGP:
346 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
347 kvm->arch.user_sigp = 1;
348 r = 0;
349 break;
350 case KVM_CAP_S390_VECTOR_REGISTERS:
351 mutex_lock(&kvm->lock);
352 if (atomic_read(&kvm->online_vcpus)) {
353 r = -EBUSY;
354 } else if (MACHINE_HAS_VX) {
355 set_kvm_facility(kvm->arch.model.fac->mask, 129);
356 set_kvm_facility(kvm->arch.model.fac->list, 129);
357 r = 0;
358 } else
359 r = -EINVAL;
360 mutex_unlock(&kvm->lock);
361 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
362 r ? "(not available)" : "(success)");
363 break;
364 case KVM_CAP_S390_RI:
365 r = -EINVAL;
366 mutex_lock(&kvm->lock);
367 if (atomic_read(&kvm->online_vcpus)) {
368 r = -EBUSY;
369 } else if (test_facility(64)) {
370 set_kvm_facility(kvm->arch.model.fac->mask, 64);
371 set_kvm_facility(kvm->arch.model.fac->list, 64);
372 r = 0;
373 }
374 mutex_unlock(&kvm->lock);
375 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
376 r ? "(not available)" : "(success)");
377 break;
378 case KVM_CAP_S390_USER_STSI:
379 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
380 kvm->arch.user_stsi = 1;
381 r = 0;
382 break;
383 default:
384 r = -EINVAL;
385 break;
386 }
387 return r;
388 }
389
390 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
391 {
392 int ret;
393
394 switch (attr->attr) {
395 case KVM_S390_VM_MEM_LIMIT_SIZE:
396 ret = 0;
397 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
398 kvm->arch.mem_limit);
399 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
400 ret = -EFAULT;
401 break;
402 default:
403 ret = -ENXIO;
404 break;
405 }
406 return ret;
407 }
408
409 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
410 {
411 int ret;
412 unsigned int idx;
413 switch (attr->attr) {
414 case KVM_S390_VM_MEM_ENABLE_CMMA:
415 /* enable CMMA only for z10 and later (EDAT_1) */
416 ret = -EINVAL;
417 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
418 break;
419
420 ret = -EBUSY;
421 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
422 mutex_lock(&kvm->lock);
423 if (atomic_read(&kvm->online_vcpus) == 0) {
424 kvm->arch.use_cmma = 1;
425 ret = 0;
426 }
427 mutex_unlock(&kvm->lock);
428 break;
429 case KVM_S390_VM_MEM_CLR_CMMA:
430 ret = -EINVAL;
431 if (!kvm->arch.use_cmma)
432 break;
433
434 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
435 mutex_lock(&kvm->lock);
436 idx = srcu_read_lock(&kvm->srcu);
437 s390_reset_cmma(kvm->arch.gmap->mm);
438 srcu_read_unlock(&kvm->srcu, idx);
439 mutex_unlock(&kvm->lock);
440 ret = 0;
441 break;
442 case KVM_S390_VM_MEM_LIMIT_SIZE: {
443 unsigned long new_limit;
444
445 if (kvm_is_ucontrol(kvm))
446 return -EINVAL;
447
448 if (get_user(new_limit, (u64 __user *)attr->addr))
449 return -EFAULT;
450
451 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
452 new_limit > kvm->arch.mem_limit)
453 return -E2BIG;
454
455 if (!new_limit)
456 return -EINVAL;
457
458 /* gmap_alloc takes last usable address */
459 if (new_limit != KVM_S390_NO_MEM_LIMIT)
460 new_limit -= 1;
461
462 ret = -EBUSY;
463 mutex_lock(&kvm->lock);
464 if (atomic_read(&kvm->online_vcpus) == 0) {
465 /* gmap_alloc will round the limit up */
466 struct gmap *new = gmap_alloc(current->mm, new_limit);
467
468 if (!new) {
469 ret = -ENOMEM;
470 } else {
471 gmap_free(kvm->arch.gmap);
472 new->private = kvm;
473 kvm->arch.gmap = new;
474 ret = 0;
475 }
476 }
477 mutex_unlock(&kvm->lock);
478 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
479 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
480 (void *) kvm->arch.gmap->asce);
481 break;
482 }
483 default:
484 ret = -ENXIO;
485 break;
486 }
487 return ret;
488 }
489
490 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
491
492 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
493 {
494 struct kvm_vcpu *vcpu;
495 int i;
496
497 if (!test_kvm_facility(kvm, 76))
498 return -EINVAL;
499
500 mutex_lock(&kvm->lock);
501 switch (attr->attr) {
502 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
503 get_random_bytes(
504 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
505 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
506 kvm->arch.crypto.aes_kw = 1;
507 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
508 break;
509 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
510 get_random_bytes(
511 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
512 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
513 kvm->arch.crypto.dea_kw = 1;
514 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
515 break;
516 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
517 kvm->arch.crypto.aes_kw = 0;
518 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
519 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
520 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
521 break;
522 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
523 kvm->arch.crypto.dea_kw = 0;
524 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
525 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
526 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
527 break;
528 default:
529 mutex_unlock(&kvm->lock);
530 return -ENXIO;
531 }
532
533 kvm_for_each_vcpu(i, vcpu, kvm) {
534 kvm_s390_vcpu_crypto_setup(vcpu);
535 exit_sie(vcpu);
536 }
537 mutex_unlock(&kvm->lock);
538 return 0;
539 }
540
541 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
542 {
543 u8 gtod_high;
544
545 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
546 sizeof(gtod_high)))
547 return -EFAULT;
548
549 if (gtod_high != 0)
550 return -EINVAL;
551 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
552
553 return 0;
554 }
555
556 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
557 {
558 u64 gtod;
559
560 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
561 return -EFAULT;
562
563 kvm_s390_set_tod_clock(kvm, gtod);
564 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
565 return 0;
566 }
567
568 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
569 {
570 int ret;
571
572 if (attr->flags)
573 return -EINVAL;
574
575 switch (attr->attr) {
576 case KVM_S390_VM_TOD_HIGH:
577 ret = kvm_s390_set_tod_high(kvm, attr);
578 break;
579 case KVM_S390_VM_TOD_LOW:
580 ret = kvm_s390_set_tod_low(kvm, attr);
581 break;
582 default:
583 ret = -ENXIO;
584 break;
585 }
586 return ret;
587 }
588
589 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
590 {
591 u8 gtod_high = 0;
592
593 if (copy_to_user((void __user *)attr->addr, &gtod_high,
594 sizeof(gtod_high)))
595 return -EFAULT;
596 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
597
598 return 0;
599 }
600
601 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
602 {
603 u64 gtod;
604
605 gtod = kvm_s390_get_tod_clock_fast(kvm);
606 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
607 return -EFAULT;
608 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
609
610 return 0;
611 }
612
613 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
614 {
615 int ret;
616
617 if (attr->flags)
618 return -EINVAL;
619
620 switch (attr->attr) {
621 case KVM_S390_VM_TOD_HIGH:
622 ret = kvm_s390_get_tod_high(kvm, attr);
623 break;
624 case KVM_S390_VM_TOD_LOW:
625 ret = kvm_s390_get_tod_low(kvm, attr);
626 break;
627 default:
628 ret = -ENXIO;
629 break;
630 }
631 return ret;
632 }
633
634 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
635 {
636 struct kvm_s390_vm_cpu_processor *proc;
637 int ret = 0;
638
639 mutex_lock(&kvm->lock);
640 if (atomic_read(&kvm->online_vcpus)) {
641 ret = -EBUSY;
642 goto out;
643 }
644 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
645 if (!proc) {
646 ret = -ENOMEM;
647 goto out;
648 }
649 if (!copy_from_user(proc, (void __user *)attr->addr,
650 sizeof(*proc))) {
651 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
652 sizeof(struct cpuid));
653 kvm->arch.model.ibc = proc->ibc;
654 memcpy(kvm->arch.model.fac->list, proc->fac_list,
655 S390_ARCH_FAC_LIST_SIZE_BYTE);
656 } else
657 ret = -EFAULT;
658 kfree(proc);
659 out:
660 mutex_unlock(&kvm->lock);
661 return ret;
662 }
663
664 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
665 {
666 int ret = -ENXIO;
667
668 switch (attr->attr) {
669 case KVM_S390_VM_CPU_PROCESSOR:
670 ret = kvm_s390_set_processor(kvm, attr);
671 break;
672 }
673 return ret;
674 }
675
676 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
677 {
678 struct kvm_s390_vm_cpu_processor *proc;
679 int ret = 0;
680
681 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
682 if (!proc) {
683 ret = -ENOMEM;
684 goto out;
685 }
686 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
687 proc->ibc = kvm->arch.model.ibc;
688 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
689 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
690 ret = -EFAULT;
691 kfree(proc);
692 out:
693 return ret;
694 }
695
696 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
697 {
698 struct kvm_s390_vm_cpu_machine *mach;
699 int ret = 0;
700
701 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
702 if (!mach) {
703 ret = -ENOMEM;
704 goto out;
705 }
706 get_cpu_id((struct cpuid *) &mach->cpuid);
707 mach->ibc = sclp.ibc;
708 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
709 S390_ARCH_FAC_LIST_SIZE_BYTE);
710 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
711 S390_ARCH_FAC_LIST_SIZE_BYTE);
712 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
713 ret = -EFAULT;
714 kfree(mach);
715 out:
716 return ret;
717 }
718
719 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
720 {
721 int ret = -ENXIO;
722
723 switch (attr->attr) {
724 case KVM_S390_VM_CPU_PROCESSOR:
725 ret = kvm_s390_get_processor(kvm, attr);
726 break;
727 case KVM_S390_VM_CPU_MACHINE:
728 ret = kvm_s390_get_machine(kvm, attr);
729 break;
730 }
731 return ret;
732 }
733
734 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
735 {
736 int ret;
737
738 switch (attr->group) {
739 case KVM_S390_VM_MEM_CTRL:
740 ret = kvm_s390_set_mem_control(kvm, attr);
741 break;
742 case KVM_S390_VM_TOD:
743 ret = kvm_s390_set_tod(kvm, attr);
744 break;
745 case KVM_S390_VM_CPU_MODEL:
746 ret = kvm_s390_set_cpu_model(kvm, attr);
747 break;
748 case KVM_S390_VM_CRYPTO:
749 ret = kvm_s390_vm_set_crypto(kvm, attr);
750 break;
751 default:
752 ret = -ENXIO;
753 break;
754 }
755
756 return ret;
757 }
758
759 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
760 {
761 int ret;
762
763 switch (attr->group) {
764 case KVM_S390_VM_MEM_CTRL:
765 ret = kvm_s390_get_mem_control(kvm, attr);
766 break;
767 case KVM_S390_VM_TOD:
768 ret = kvm_s390_get_tod(kvm, attr);
769 break;
770 case KVM_S390_VM_CPU_MODEL:
771 ret = kvm_s390_get_cpu_model(kvm, attr);
772 break;
773 default:
774 ret = -ENXIO;
775 break;
776 }
777
778 return ret;
779 }
780
781 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
782 {
783 int ret;
784
785 switch (attr->group) {
786 case KVM_S390_VM_MEM_CTRL:
787 switch (attr->attr) {
788 case KVM_S390_VM_MEM_ENABLE_CMMA:
789 case KVM_S390_VM_MEM_CLR_CMMA:
790 case KVM_S390_VM_MEM_LIMIT_SIZE:
791 ret = 0;
792 break;
793 default:
794 ret = -ENXIO;
795 break;
796 }
797 break;
798 case KVM_S390_VM_TOD:
799 switch (attr->attr) {
800 case KVM_S390_VM_TOD_LOW:
801 case KVM_S390_VM_TOD_HIGH:
802 ret = 0;
803 break;
804 default:
805 ret = -ENXIO;
806 break;
807 }
808 break;
809 case KVM_S390_VM_CPU_MODEL:
810 switch (attr->attr) {
811 case KVM_S390_VM_CPU_PROCESSOR:
812 case KVM_S390_VM_CPU_MACHINE:
813 ret = 0;
814 break;
815 default:
816 ret = -ENXIO;
817 break;
818 }
819 break;
820 case KVM_S390_VM_CRYPTO:
821 switch (attr->attr) {
822 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
823 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
824 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
825 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
826 ret = 0;
827 break;
828 default:
829 ret = -ENXIO;
830 break;
831 }
832 break;
833 default:
834 ret = -ENXIO;
835 break;
836 }
837
838 return ret;
839 }
840
841 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
842 {
843 uint8_t *keys;
844 uint64_t hva;
845 unsigned long curkey;
846 int i, r = 0;
847
848 if (args->flags != 0)
849 return -EINVAL;
850
851 /* Is this guest using storage keys? */
852 if (!mm_use_skey(current->mm))
853 return KVM_S390_GET_SKEYS_NONE;
854
855 /* Enforce sane limit on memory allocation */
856 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
857 return -EINVAL;
858
859 keys = kmalloc_array(args->count, sizeof(uint8_t),
860 GFP_KERNEL | __GFP_NOWARN);
861 if (!keys)
862 keys = vmalloc(sizeof(uint8_t) * args->count);
863 if (!keys)
864 return -ENOMEM;
865
866 for (i = 0; i < args->count; i++) {
867 hva = gfn_to_hva(kvm, args->start_gfn + i);
868 if (kvm_is_error_hva(hva)) {
869 r = -EFAULT;
870 goto out;
871 }
872
873 curkey = get_guest_storage_key(current->mm, hva);
874 if (IS_ERR_VALUE(curkey)) {
875 r = curkey;
876 goto out;
877 }
878 keys[i] = curkey;
879 }
880
881 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
882 sizeof(uint8_t) * args->count);
883 if (r)
884 r = -EFAULT;
885 out:
886 kvfree(keys);
887 return r;
888 }
889
890 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
891 {
892 uint8_t *keys;
893 uint64_t hva;
894 int i, r = 0;
895
896 if (args->flags != 0)
897 return -EINVAL;
898
899 /* Enforce sane limit on memory allocation */
900 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
901 return -EINVAL;
902
903 keys = kmalloc_array(args->count, sizeof(uint8_t),
904 GFP_KERNEL | __GFP_NOWARN);
905 if (!keys)
906 keys = vmalloc(sizeof(uint8_t) * args->count);
907 if (!keys)
908 return -ENOMEM;
909
910 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
911 sizeof(uint8_t) * args->count);
912 if (r) {
913 r = -EFAULT;
914 goto out;
915 }
916
917 /* Enable storage key handling for the guest */
918 r = s390_enable_skey();
919 if (r)
920 goto out;
921
922 for (i = 0; i < args->count; i++) {
923 hva = gfn_to_hva(kvm, args->start_gfn + i);
924 if (kvm_is_error_hva(hva)) {
925 r = -EFAULT;
926 goto out;
927 }
928
929 /* Lowest order bit is reserved */
930 if (keys[i] & 0x01) {
931 r = -EINVAL;
932 goto out;
933 }
934
935 r = set_guest_storage_key(current->mm, hva,
936 (unsigned long)keys[i], 0);
937 if (r)
938 goto out;
939 }
940 out:
941 kvfree(keys);
942 return r;
943 }
944
945 long kvm_arch_vm_ioctl(struct file *filp,
946 unsigned int ioctl, unsigned long arg)
947 {
948 struct kvm *kvm = filp->private_data;
949 void __user *argp = (void __user *)arg;
950 struct kvm_device_attr attr;
951 int r;
952
953 switch (ioctl) {
954 case KVM_S390_INTERRUPT: {
955 struct kvm_s390_interrupt s390int;
956
957 r = -EFAULT;
958 if (copy_from_user(&s390int, argp, sizeof(s390int)))
959 break;
960 r = kvm_s390_inject_vm(kvm, &s390int);
961 break;
962 }
963 case KVM_ENABLE_CAP: {
964 struct kvm_enable_cap cap;
965 r = -EFAULT;
966 if (copy_from_user(&cap, argp, sizeof(cap)))
967 break;
968 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
969 break;
970 }
971 case KVM_CREATE_IRQCHIP: {
972 struct kvm_irq_routing_entry routing;
973
974 r = -EINVAL;
975 if (kvm->arch.use_irqchip) {
976 /* Set up dummy routing. */
977 memset(&routing, 0, sizeof(routing));
978 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
979 }
980 break;
981 }
982 case KVM_SET_DEVICE_ATTR: {
983 r = -EFAULT;
984 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
985 break;
986 r = kvm_s390_vm_set_attr(kvm, &attr);
987 break;
988 }
989 case KVM_GET_DEVICE_ATTR: {
990 r = -EFAULT;
991 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
992 break;
993 r = kvm_s390_vm_get_attr(kvm, &attr);
994 break;
995 }
996 case KVM_HAS_DEVICE_ATTR: {
997 r = -EFAULT;
998 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
999 break;
1000 r = kvm_s390_vm_has_attr(kvm, &attr);
1001 break;
1002 }
1003 case KVM_S390_GET_SKEYS: {
1004 struct kvm_s390_skeys args;
1005
1006 r = -EFAULT;
1007 if (copy_from_user(&args, argp,
1008 sizeof(struct kvm_s390_skeys)))
1009 break;
1010 r = kvm_s390_get_skeys(kvm, &args);
1011 break;
1012 }
1013 case KVM_S390_SET_SKEYS: {
1014 struct kvm_s390_skeys args;
1015
1016 r = -EFAULT;
1017 if (copy_from_user(&args, argp,
1018 sizeof(struct kvm_s390_skeys)))
1019 break;
1020 r = kvm_s390_set_skeys(kvm, &args);
1021 break;
1022 }
1023 default:
1024 r = -ENOTTY;
1025 }
1026
1027 return r;
1028 }
1029
1030 static int kvm_s390_query_ap_config(u8 *config)
1031 {
1032 u32 fcn_code = 0x04000000UL;
1033 u32 cc = 0;
1034
1035 memset(config, 0, 128);
1036 asm volatile(
1037 "lgr 0,%1\n"
1038 "lgr 2,%2\n"
1039 ".long 0xb2af0000\n" /* PQAP(QCI) */
1040 "0: ipm %0\n"
1041 "srl %0,28\n"
1042 "1:\n"
1043 EX_TABLE(0b, 1b)
1044 : "+r" (cc)
1045 : "r" (fcn_code), "r" (config)
1046 : "cc", "0", "2", "memory"
1047 );
1048
1049 return cc;
1050 }
1051
1052 static int kvm_s390_apxa_installed(void)
1053 {
1054 u8 config[128];
1055 int cc;
1056
1057 if (test_facility(12)) {
1058 cc = kvm_s390_query_ap_config(config);
1059
1060 if (cc)
1061 pr_err("PQAP(QCI) failed with cc=%d", cc);
1062 else
1063 return config[0] & 0x40;
1064 }
1065
1066 return 0;
1067 }
1068
1069 static void kvm_s390_set_crycb_format(struct kvm *kvm)
1070 {
1071 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1072
1073 if (kvm_s390_apxa_installed())
1074 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1075 else
1076 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1077 }
1078
1079 static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
1080 {
1081 get_cpu_id(cpu_id);
1082 cpu_id->version = 0xff;
1083 }
1084
1085 static int kvm_s390_crypto_init(struct kvm *kvm)
1086 {
1087 if (!test_kvm_facility(kvm, 76))
1088 return 0;
1089
1090 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1091 GFP_KERNEL | GFP_DMA);
1092 if (!kvm->arch.crypto.crycb)
1093 return -ENOMEM;
1094
1095 kvm_s390_set_crycb_format(kvm);
1096
1097 /* Enable AES/DEA protected key functions by default */
1098 kvm->arch.crypto.aes_kw = 1;
1099 kvm->arch.crypto.dea_kw = 1;
1100 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1101 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1102 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1103 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1104
1105 return 0;
1106 }
1107
1108 static void sca_dispose(struct kvm *kvm)
1109 {
1110 if (kvm->arch.use_esca)
1111 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
1112 else
1113 free_page((unsigned long)(kvm->arch.sca));
1114 kvm->arch.sca = NULL;
1115 }
1116
1117 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1118 {
1119 int i, rc;
1120 char debug_name[16];
1121 static unsigned long sca_offset;
1122
1123 rc = -EINVAL;
1124 #ifdef CONFIG_KVM_S390_UCONTROL
1125 if (type & ~KVM_VM_S390_UCONTROL)
1126 goto out_err;
1127 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1128 goto out_err;
1129 #else
1130 if (type)
1131 goto out_err;
1132 #endif
1133
1134 rc = s390_enable_sie();
1135 if (rc)
1136 goto out_err;
1137
1138 rc = -ENOMEM;
1139
1140 kvm->arch.use_esca = 0; /* start with basic SCA */
1141 rwlock_init(&kvm->arch.sca_lock);
1142 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
1143 if (!kvm->arch.sca)
1144 goto out_err;
1145 spin_lock(&kvm_lock);
1146 sca_offset += 16;
1147 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1148 sca_offset = 0;
1149 kvm->arch.sca = (struct bsca_block *)
1150 ((char *) kvm->arch.sca + sca_offset);
1151 spin_unlock(&kvm_lock);
1152
1153 sprintf(debug_name, "kvm-%u", current->pid);
1154
1155 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1156 if (!kvm->arch.dbf)
1157 goto out_err;
1158
1159 /*
1160 * The architectural maximum amount of facilities is 16 kbit. To store
1161 * this amount, 2 kbyte of memory is required. Thus we need a full
1162 * page to hold the guest facility list (arch.model.fac->list) and the
1163 * facility mask (arch.model.fac->mask). Its address size has to be
1164 * 31 bits and word aligned.
1165 */
1166 kvm->arch.model.fac =
1167 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1168 if (!kvm->arch.model.fac)
1169 goto out_err;
1170
1171 /* Populate the facility mask initially. */
1172 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
1173 S390_ARCH_FAC_LIST_SIZE_BYTE);
1174 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1175 if (i < kvm_s390_fac_list_mask_size())
1176 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
1177 else
1178 kvm->arch.model.fac->mask[i] = 0UL;
1179 }
1180
1181 /* Populate the facility list initially. */
1182 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1183 S390_ARCH_FAC_LIST_SIZE_BYTE);
1184
1185 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
1186 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
1187
1188 if (kvm_s390_crypto_init(kvm) < 0)
1189 goto out_err;
1190
1191 spin_lock_init(&kvm->arch.float_int.lock);
1192 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1193 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
1194 init_waitqueue_head(&kvm->arch.ipte_wq);
1195 mutex_init(&kvm->arch.ipte_mutex);
1196
1197 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1198 VM_EVENT(kvm, 3, "vm created with type %lu", type);
1199
1200 if (type & KVM_VM_S390_UCONTROL) {
1201 kvm->arch.gmap = NULL;
1202 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1203 } else {
1204 if (sclp.hamax == U64_MAX)
1205 kvm->arch.mem_limit = TASK_MAX_SIZE;
1206 else
1207 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1208 sclp.hamax + 1);
1209 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
1210 if (!kvm->arch.gmap)
1211 goto out_err;
1212 kvm->arch.gmap->private = kvm;
1213 kvm->arch.gmap->pfault_enabled = 0;
1214 }
1215
1216 kvm->arch.css_support = 0;
1217 kvm->arch.use_irqchip = 0;
1218 kvm->arch.epoch = 0;
1219
1220 spin_lock_init(&kvm->arch.start_stop_lock);
1221 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
1222
1223 return 0;
1224 out_err:
1225 kfree(kvm->arch.crypto.crycb);
1226 free_page((unsigned long)kvm->arch.model.fac);
1227 debug_unregister(kvm->arch.dbf);
1228 sca_dispose(kvm);
1229 KVM_EVENT(3, "creation of vm failed: %d", rc);
1230 return rc;
1231 }
1232
1233 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1234 {
1235 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1236 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1237 kvm_s390_clear_local_irqs(vcpu);
1238 kvm_clear_async_pf_completion_queue(vcpu);
1239 if (!kvm_is_ucontrol(vcpu->kvm))
1240 sca_del_vcpu(vcpu);
1241
1242 if (kvm_is_ucontrol(vcpu->kvm))
1243 gmap_free(vcpu->arch.gmap);
1244
1245 if (vcpu->kvm->arch.use_cmma)
1246 kvm_s390_vcpu_unsetup_cmma(vcpu);
1247 free_page((unsigned long)(vcpu->arch.sie_block));
1248
1249 kvm_vcpu_uninit(vcpu);
1250 kmem_cache_free(kvm_vcpu_cache, vcpu);
1251 }
1252
1253 static void kvm_free_vcpus(struct kvm *kvm)
1254 {
1255 unsigned int i;
1256 struct kvm_vcpu *vcpu;
1257
1258 kvm_for_each_vcpu(i, vcpu, kvm)
1259 kvm_arch_vcpu_destroy(vcpu);
1260
1261 mutex_lock(&kvm->lock);
1262 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1263 kvm->vcpus[i] = NULL;
1264
1265 atomic_set(&kvm->online_vcpus, 0);
1266 mutex_unlock(&kvm->lock);
1267 }
1268
1269 void kvm_arch_destroy_vm(struct kvm *kvm)
1270 {
1271 kvm_free_vcpus(kvm);
1272 free_page((unsigned long)kvm->arch.model.fac);
1273 sca_dispose(kvm);
1274 debug_unregister(kvm->arch.dbf);
1275 kfree(kvm->arch.crypto.crycb);
1276 if (!kvm_is_ucontrol(kvm))
1277 gmap_free(kvm->arch.gmap);
1278 kvm_s390_destroy_adapters(kvm);
1279 kvm_s390_clear_float_irqs(kvm);
1280 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1281 }
1282
1283 /* Section: vcpu related */
1284 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1285 {
1286 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1287 if (!vcpu->arch.gmap)
1288 return -ENOMEM;
1289 vcpu->arch.gmap->private = vcpu->kvm;
1290
1291 return 0;
1292 }
1293
1294 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1295 {
1296 read_lock(&vcpu->kvm->arch.sca_lock);
1297 if (vcpu->kvm->arch.use_esca) {
1298 struct esca_block *sca = vcpu->kvm->arch.sca;
1299
1300 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1301 sca->cpu[vcpu->vcpu_id].sda = 0;
1302 } else {
1303 struct bsca_block *sca = vcpu->kvm->arch.sca;
1304
1305 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1306 sca->cpu[vcpu->vcpu_id].sda = 0;
1307 }
1308 read_unlock(&vcpu->kvm->arch.sca_lock);
1309 }
1310
1311 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1312 {
1313 read_lock(&vcpu->kvm->arch.sca_lock);
1314 if (vcpu->kvm->arch.use_esca) {
1315 struct esca_block *sca = vcpu->kvm->arch.sca;
1316
1317 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1318 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1319 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
1320 vcpu->arch.sie_block->ecb2 |= 0x04U;
1321 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1322 } else {
1323 struct bsca_block *sca = vcpu->kvm->arch.sca;
1324
1325 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1326 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1327 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1328 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1329 }
1330 read_unlock(&vcpu->kvm->arch.sca_lock);
1331 }
1332
1333 /* Basic SCA to Extended SCA data copy routines */
1334 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1335 {
1336 d->sda = s->sda;
1337 d->sigp_ctrl.c = s->sigp_ctrl.c;
1338 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1339 }
1340
1341 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1342 {
1343 int i;
1344
1345 d->ipte_control = s->ipte_control;
1346 d->mcn[0] = s->mcn;
1347 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1348 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1349 }
1350
1351 static int sca_switch_to_extended(struct kvm *kvm)
1352 {
1353 struct bsca_block *old_sca = kvm->arch.sca;
1354 struct esca_block *new_sca;
1355 struct kvm_vcpu *vcpu;
1356 unsigned int vcpu_idx;
1357 u32 scaol, scaoh;
1358
1359 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1360 if (!new_sca)
1361 return -ENOMEM;
1362
1363 scaoh = (u32)((u64)(new_sca) >> 32);
1364 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1365
1366 kvm_s390_vcpu_block_all(kvm);
1367 write_lock(&kvm->arch.sca_lock);
1368
1369 sca_copy_b_to_e(new_sca, old_sca);
1370
1371 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1372 vcpu->arch.sie_block->scaoh = scaoh;
1373 vcpu->arch.sie_block->scaol = scaol;
1374 vcpu->arch.sie_block->ecb2 |= 0x04U;
1375 }
1376 kvm->arch.sca = new_sca;
1377 kvm->arch.use_esca = 1;
1378
1379 write_unlock(&kvm->arch.sca_lock);
1380 kvm_s390_vcpu_unblock_all(kvm);
1381
1382 free_page((unsigned long)old_sca);
1383
1384 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1385 old_sca, kvm->arch.sca);
1386 return 0;
1387 }
1388
1389 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1390 {
1391 int rc;
1392
1393 if (id < KVM_S390_BSCA_CPU_SLOTS)
1394 return true;
1395 if (!sclp.has_esca)
1396 return false;
1397
1398 mutex_lock(&kvm->lock);
1399 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1400 mutex_unlock(&kvm->lock);
1401
1402 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1403 }
1404
1405 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1406 {
1407 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1408 kvm_clear_async_pf_completion_queue(vcpu);
1409 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1410 KVM_SYNC_GPRS |
1411 KVM_SYNC_ACRS |
1412 KVM_SYNC_CRS |
1413 KVM_SYNC_ARCH0 |
1414 KVM_SYNC_PFAULT;
1415 if (test_kvm_facility(vcpu->kvm, 64))
1416 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1417 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1418 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1419 */
1420 if (MACHINE_HAS_VX)
1421 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1422 else
1423 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
1424
1425 if (kvm_is_ucontrol(vcpu->kvm))
1426 return __kvm_ucontrol_vcpu_init(vcpu);
1427
1428 return 0;
1429 }
1430
1431 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1432 {
1433 /* Save host register state */
1434 save_fpu_regs();
1435 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1436 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
1437
1438 if (MACHINE_HAS_VX)
1439 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1440 else
1441 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
1442 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
1443 if (test_fp_ctl(current->thread.fpu.fpc))
1444 /* User space provided an invalid FPC, let's clear it */
1445 current->thread.fpu.fpc = 0;
1446
1447 save_access_regs(vcpu->arch.host_acrs);
1448 restore_access_regs(vcpu->run->s.regs.acrs);
1449 gmap_enable(vcpu->arch.gmap);
1450 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1451 }
1452
1453 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1454 {
1455 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1456 gmap_disable(vcpu->arch.gmap);
1457
1458 /* Save guest register state */
1459 save_fpu_regs();
1460 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
1461
1462 /* Restore host register state */
1463 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1464 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
1465
1466 save_access_regs(vcpu->run->s.regs.acrs);
1467 restore_access_regs(vcpu->arch.host_acrs);
1468 }
1469
1470 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1471 {
1472 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1473 vcpu->arch.sie_block->gpsw.mask = 0UL;
1474 vcpu->arch.sie_block->gpsw.addr = 0UL;
1475 kvm_s390_set_prefix(vcpu, 0);
1476 vcpu->arch.sie_block->cputm = 0UL;
1477 vcpu->arch.sie_block->ckc = 0UL;
1478 vcpu->arch.sie_block->todpr = 0;
1479 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1480 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1481 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1482 /* make sure the new fpc will be lazily loaded */
1483 save_fpu_regs();
1484 current->thread.fpu.fpc = 0;
1485 vcpu->arch.sie_block->gbea = 1;
1486 vcpu->arch.sie_block->pp = 0;
1487 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1488 kvm_clear_async_pf_completion_queue(vcpu);
1489 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1490 kvm_s390_vcpu_stop(vcpu);
1491 kvm_s390_clear_local_irqs(vcpu);
1492 }
1493
1494 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1495 {
1496 mutex_lock(&vcpu->kvm->lock);
1497 preempt_disable();
1498 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1499 preempt_enable();
1500 mutex_unlock(&vcpu->kvm->lock);
1501 if (!kvm_is_ucontrol(vcpu->kvm)) {
1502 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1503 sca_add_vcpu(vcpu);
1504 }
1505
1506 }
1507
1508 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1509 {
1510 if (!test_kvm_facility(vcpu->kvm, 76))
1511 return;
1512
1513 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1514
1515 if (vcpu->kvm->arch.crypto.aes_kw)
1516 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1517 if (vcpu->kvm->arch.crypto.dea_kw)
1518 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1519
1520 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1521 }
1522
1523 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1524 {
1525 free_page(vcpu->arch.sie_block->cbrlo);
1526 vcpu->arch.sie_block->cbrlo = 0;
1527 }
1528
1529 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1530 {
1531 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1532 if (!vcpu->arch.sie_block->cbrlo)
1533 return -ENOMEM;
1534
1535 vcpu->arch.sie_block->ecb2 |= 0x80;
1536 vcpu->arch.sie_block->ecb2 &= ~0x08;
1537 return 0;
1538 }
1539
1540 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1541 {
1542 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1543
1544 vcpu->arch.cpu_id = model->cpu_id;
1545 vcpu->arch.sie_block->ibc = model->ibc;
1546 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1547 }
1548
1549 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1550 {
1551 int rc = 0;
1552
1553 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1554 CPUSTAT_SM |
1555 CPUSTAT_STOPPED);
1556
1557 if (test_kvm_facility(vcpu->kvm, 78))
1558 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1559 else if (test_kvm_facility(vcpu->kvm, 8))
1560 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1561
1562 kvm_s390_vcpu_setup_model(vcpu);
1563
1564 vcpu->arch.sie_block->ecb = 6;
1565 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
1566 vcpu->arch.sie_block->ecb |= 0x10;
1567
1568 vcpu->arch.sie_block->ecb2 = 8;
1569 vcpu->arch.sie_block->eca = 0xC1002000U;
1570 if (sclp.has_siif)
1571 vcpu->arch.sie_block->eca |= 1;
1572 if (sclp.has_sigpif)
1573 vcpu->arch.sie_block->eca |= 0x10000000U;
1574 if (test_kvm_facility(vcpu->kvm, 64))
1575 vcpu->arch.sie_block->ecb3 |= 0x01;
1576 if (test_kvm_facility(vcpu->kvm, 129)) {
1577 vcpu->arch.sie_block->eca |= 0x00020000;
1578 vcpu->arch.sie_block->ecd |= 0x20000000;
1579 }
1580 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1581 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1582
1583 if (vcpu->kvm->arch.use_cmma) {
1584 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1585 if (rc)
1586 return rc;
1587 }
1588 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1589 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
1590
1591 kvm_s390_vcpu_crypto_setup(vcpu);
1592
1593 return rc;
1594 }
1595
1596 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1597 unsigned int id)
1598 {
1599 struct kvm_vcpu *vcpu;
1600 struct sie_page *sie_page;
1601 int rc = -EINVAL;
1602
1603 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
1604 goto out;
1605
1606 rc = -ENOMEM;
1607
1608 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1609 if (!vcpu)
1610 goto out;
1611
1612 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1613 if (!sie_page)
1614 goto out_free_cpu;
1615
1616 vcpu->arch.sie_block = &sie_page->sie_block;
1617 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1618
1619 vcpu->arch.sie_block->icpua = id;
1620 spin_lock_init(&vcpu->arch.local_int.lock);
1621 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1622 vcpu->arch.local_int.wq = &vcpu->wq;
1623 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1624
1625 rc = kvm_vcpu_init(vcpu, kvm, id);
1626 if (rc)
1627 goto out_free_sie_block;
1628 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
1629 vcpu->arch.sie_block);
1630 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1631
1632 return vcpu;
1633 out_free_sie_block:
1634 free_page((unsigned long)(vcpu->arch.sie_block));
1635 out_free_cpu:
1636 kmem_cache_free(kvm_vcpu_cache, vcpu);
1637 out:
1638 return ERR_PTR(rc);
1639 }
1640
1641 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1642 {
1643 return kvm_s390_vcpu_has_irq(vcpu, 0);
1644 }
1645
1646 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1647 {
1648 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1649 exit_sie(vcpu);
1650 }
1651
1652 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1653 {
1654 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1655 }
1656
1657 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1658 {
1659 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1660 exit_sie(vcpu);
1661 }
1662
1663 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1664 {
1665 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1666 }
1667
1668 /*
1669 * Kick a guest cpu out of SIE and wait until SIE is not running.
1670 * If the CPU is not running (e.g. waiting as idle) the function will
1671 * return immediately. */
1672 void exit_sie(struct kvm_vcpu *vcpu)
1673 {
1674 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1675 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1676 cpu_relax();
1677 }
1678
1679 /* Kick a guest cpu out of SIE to process a request synchronously */
1680 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
1681 {
1682 kvm_make_request(req, vcpu);
1683 kvm_s390_vcpu_request(vcpu);
1684 }
1685
1686 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1687 {
1688 int i;
1689 struct kvm *kvm = gmap->private;
1690 struct kvm_vcpu *vcpu;
1691
1692 kvm_for_each_vcpu(i, vcpu, kvm) {
1693 /* match against both prefix pages */
1694 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
1695 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1696 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
1697 }
1698 }
1699 }
1700
1701 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1702 {
1703 /* kvm common code refers to this, but never calls it */
1704 BUG();
1705 return 0;
1706 }
1707
1708 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1709 struct kvm_one_reg *reg)
1710 {
1711 int r = -EINVAL;
1712
1713 switch (reg->id) {
1714 case KVM_REG_S390_TODPR:
1715 r = put_user(vcpu->arch.sie_block->todpr,
1716 (u32 __user *)reg->addr);
1717 break;
1718 case KVM_REG_S390_EPOCHDIFF:
1719 r = put_user(vcpu->arch.sie_block->epoch,
1720 (u64 __user *)reg->addr);
1721 break;
1722 case KVM_REG_S390_CPU_TIMER:
1723 r = put_user(vcpu->arch.sie_block->cputm,
1724 (u64 __user *)reg->addr);
1725 break;
1726 case KVM_REG_S390_CLOCK_COMP:
1727 r = put_user(vcpu->arch.sie_block->ckc,
1728 (u64 __user *)reg->addr);
1729 break;
1730 case KVM_REG_S390_PFTOKEN:
1731 r = put_user(vcpu->arch.pfault_token,
1732 (u64 __user *)reg->addr);
1733 break;
1734 case KVM_REG_S390_PFCOMPARE:
1735 r = put_user(vcpu->arch.pfault_compare,
1736 (u64 __user *)reg->addr);
1737 break;
1738 case KVM_REG_S390_PFSELECT:
1739 r = put_user(vcpu->arch.pfault_select,
1740 (u64 __user *)reg->addr);
1741 break;
1742 case KVM_REG_S390_PP:
1743 r = put_user(vcpu->arch.sie_block->pp,
1744 (u64 __user *)reg->addr);
1745 break;
1746 case KVM_REG_S390_GBEA:
1747 r = put_user(vcpu->arch.sie_block->gbea,
1748 (u64 __user *)reg->addr);
1749 break;
1750 default:
1751 break;
1752 }
1753
1754 return r;
1755 }
1756
1757 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1758 struct kvm_one_reg *reg)
1759 {
1760 int r = -EINVAL;
1761
1762 switch (reg->id) {
1763 case KVM_REG_S390_TODPR:
1764 r = get_user(vcpu->arch.sie_block->todpr,
1765 (u32 __user *)reg->addr);
1766 break;
1767 case KVM_REG_S390_EPOCHDIFF:
1768 r = get_user(vcpu->arch.sie_block->epoch,
1769 (u64 __user *)reg->addr);
1770 break;
1771 case KVM_REG_S390_CPU_TIMER:
1772 r = get_user(vcpu->arch.sie_block->cputm,
1773 (u64 __user *)reg->addr);
1774 break;
1775 case KVM_REG_S390_CLOCK_COMP:
1776 r = get_user(vcpu->arch.sie_block->ckc,
1777 (u64 __user *)reg->addr);
1778 break;
1779 case KVM_REG_S390_PFTOKEN:
1780 r = get_user(vcpu->arch.pfault_token,
1781 (u64 __user *)reg->addr);
1782 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1783 kvm_clear_async_pf_completion_queue(vcpu);
1784 break;
1785 case KVM_REG_S390_PFCOMPARE:
1786 r = get_user(vcpu->arch.pfault_compare,
1787 (u64 __user *)reg->addr);
1788 break;
1789 case KVM_REG_S390_PFSELECT:
1790 r = get_user(vcpu->arch.pfault_select,
1791 (u64 __user *)reg->addr);
1792 break;
1793 case KVM_REG_S390_PP:
1794 r = get_user(vcpu->arch.sie_block->pp,
1795 (u64 __user *)reg->addr);
1796 break;
1797 case KVM_REG_S390_GBEA:
1798 r = get_user(vcpu->arch.sie_block->gbea,
1799 (u64 __user *)reg->addr);
1800 break;
1801 default:
1802 break;
1803 }
1804
1805 return r;
1806 }
1807
1808 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1809 {
1810 kvm_s390_vcpu_initial_reset(vcpu);
1811 return 0;
1812 }
1813
1814 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1815 {
1816 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1817 return 0;
1818 }
1819
1820 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1821 {
1822 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1823 return 0;
1824 }
1825
1826 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1827 struct kvm_sregs *sregs)
1828 {
1829 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1830 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
1831 restore_access_regs(vcpu->run->s.regs.acrs);
1832 return 0;
1833 }
1834
1835 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1836 struct kvm_sregs *sregs)
1837 {
1838 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1839 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1840 return 0;
1841 }
1842
1843 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1844 {
1845 /* make sure the new values will be lazily loaded */
1846 save_fpu_regs();
1847 if (test_fp_ctl(fpu->fpc))
1848 return -EINVAL;
1849 current->thread.fpu.fpc = fpu->fpc;
1850 if (MACHINE_HAS_VX)
1851 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
1852 else
1853 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
1854 return 0;
1855 }
1856
1857 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1858 {
1859 /* make sure we have the latest values */
1860 save_fpu_regs();
1861 if (MACHINE_HAS_VX)
1862 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
1863 else
1864 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
1865 fpu->fpc = current->thread.fpu.fpc;
1866 return 0;
1867 }
1868
1869 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1870 {
1871 int rc = 0;
1872
1873 if (!is_vcpu_stopped(vcpu))
1874 rc = -EBUSY;
1875 else {
1876 vcpu->run->psw_mask = psw.mask;
1877 vcpu->run->psw_addr = psw.addr;
1878 }
1879 return rc;
1880 }
1881
1882 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1883 struct kvm_translation *tr)
1884 {
1885 return -EINVAL; /* not implemented yet */
1886 }
1887
1888 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1889 KVM_GUESTDBG_USE_HW_BP | \
1890 KVM_GUESTDBG_ENABLE)
1891
1892 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1893 struct kvm_guest_debug *dbg)
1894 {
1895 int rc = 0;
1896
1897 vcpu->guest_debug = 0;
1898 kvm_s390_clear_bp_data(vcpu);
1899
1900 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
1901 return -EINVAL;
1902
1903 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1904 vcpu->guest_debug = dbg->control;
1905 /* enforce guest PER */
1906 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1907
1908 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1909 rc = kvm_s390_import_bp_data(vcpu, dbg);
1910 } else {
1911 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1912 vcpu->arch.guestdbg.last_bp = 0;
1913 }
1914
1915 if (rc) {
1916 vcpu->guest_debug = 0;
1917 kvm_s390_clear_bp_data(vcpu);
1918 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1919 }
1920
1921 return rc;
1922 }
1923
1924 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1925 struct kvm_mp_state *mp_state)
1926 {
1927 /* CHECK_STOP and LOAD are not supported yet */
1928 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1929 KVM_MP_STATE_OPERATING;
1930 }
1931
1932 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1933 struct kvm_mp_state *mp_state)
1934 {
1935 int rc = 0;
1936
1937 /* user space knows about this interface - let it control the state */
1938 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1939
1940 switch (mp_state->mp_state) {
1941 case KVM_MP_STATE_STOPPED:
1942 kvm_s390_vcpu_stop(vcpu);
1943 break;
1944 case KVM_MP_STATE_OPERATING:
1945 kvm_s390_vcpu_start(vcpu);
1946 break;
1947 case KVM_MP_STATE_LOAD:
1948 case KVM_MP_STATE_CHECK_STOP:
1949 /* fall through - CHECK_STOP and LOAD are not supported yet */
1950 default:
1951 rc = -ENXIO;
1952 }
1953
1954 return rc;
1955 }
1956
1957 static bool ibs_enabled(struct kvm_vcpu *vcpu)
1958 {
1959 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1960 }
1961
1962 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1963 {
1964 retry:
1965 kvm_s390_vcpu_request_handled(vcpu);
1966 if (!vcpu->requests)
1967 return 0;
1968 /*
1969 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1970 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1971 * This ensures that the ipte instruction for this request has
1972 * already finished. We might race against a second unmapper that
1973 * wants to set the blocking bit. Lets just retry the request loop.
1974 */
1975 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1976 int rc;
1977 rc = gmap_ipte_notify(vcpu->arch.gmap,
1978 kvm_s390_get_prefix(vcpu),
1979 PAGE_SIZE * 2);
1980 if (rc)
1981 return rc;
1982 goto retry;
1983 }
1984
1985 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1986 vcpu->arch.sie_block->ihcpu = 0xffff;
1987 goto retry;
1988 }
1989
1990 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1991 if (!ibs_enabled(vcpu)) {
1992 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1993 atomic_or(CPUSTAT_IBS,
1994 &vcpu->arch.sie_block->cpuflags);
1995 }
1996 goto retry;
1997 }
1998
1999 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2000 if (ibs_enabled(vcpu)) {
2001 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2002 atomic_andnot(CPUSTAT_IBS,
2003 &vcpu->arch.sie_block->cpuflags);
2004 }
2005 goto retry;
2006 }
2007
2008 /* nothing to do, just clear the request */
2009 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2010
2011 return 0;
2012 }
2013
2014 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2015 {
2016 struct kvm_vcpu *vcpu;
2017 int i;
2018
2019 mutex_lock(&kvm->lock);
2020 preempt_disable();
2021 kvm->arch.epoch = tod - get_tod_clock();
2022 kvm_s390_vcpu_block_all(kvm);
2023 kvm_for_each_vcpu(i, vcpu, kvm)
2024 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2025 kvm_s390_vcpu_unblock_all(kvm);
2026 preempt_enable();
2027 mutex_unlock(&kvm->lock);
2028 }
2029
2030 /**
2031 * kvm_arch_fault_in_page - fault-in guest page if necessary
2032 * @vcpu: The corresponding virtual cpu
2033 * @gpa: Guest physical address
2034 * @writable: Whether the page should be writable or not
2035 *
2036 * Make sure that a guest page has been faulted-in on the host.
2037 *
2038 * Return: Zero on success, negative error code otherwise.
2039 */
2040 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
2041 {
2042 return gmap_fault(vcpu->arch.gmap, gpa,
2043 writable ? FAULT_FLAG_WRITE : 0);
2044 }
2045
2046 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2047 unsigned long token)
2048 {
2049 struct kvm_s390_interrupt inti;
2050 struct kvm_s390_irq irq;
2051
2052 if (start_token) {
2053 irq.u.ext.ext_params2 = token;
2054 irq.type = KVM_S390_INT_PFAULT_INIT;
2055 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
2056 } else {
2057 inti.type = KVM_S390_INT_PFAULT_DONE;
2058 inti.parm64 = token;
2059 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2060 }
2061 }
2062
2063 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2064 struct kvm_async_pf *work)
2065 {
2066 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2067 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2068 }
2069
2070 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2071 struct kvm_async_pf *work)
2072 {
2073 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2074 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2075 }
2076
2077 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2078 struct kvm_async_pf *work)
2079 {
2080 /* s390 will always inject the page directly */
2081 }
2082
2083 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2084 {
2085 /*
2086 * s390 will always inject the page directly,
2087 * but we still want check_async_completion to cleanup
2088 */
2089 return true;
2090 }
2091
2092 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2093 {
2094 hva_t hva;
2095 struct kvm_arch_async_pf arch;
2096 int rc;
2097
2098 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2099 return 0;
2100 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2101 vcpu->arch.pfault_compare)
2102 return 0;
2103 if (psw_extint_disabled(vcpu))
2104 return 0;
2105 if (kvm_s390_vcpu_has_irq(vcpu, 0))
2106 return 0;
2107 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2108 return 0;
2109 if (!vcpu->arch.gmap->pfault_enabled)
2110 return 0;
2111
2112 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2113 hva += current->thread.gmap_addr & ~PAGE_MASK;
2114 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
2115 return 0;
2116
2117 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2118 return rc;
2119 }
2120
2121 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2122 {
2123 int rc, cpuflags;
2124
2125 /*
2126 * On s390 notifications for arriving pages will be delivered directly
2127 * to the guest but the house keeping for completed pfaults is
2128 * handled outside the worker.
2129 */
2130 kvm_check_async_pf_completion(vcpu);
2131
2132 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2133 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2134
2135 if (need_resched())
2136 schedule();
2137
2138 if (test_cpu_flag(CIF_MCCK_PENDING))
2139 s390_handle_mcck();
2140
2141 if (!kvm_is_ucontrol(vcpu->kvm)) {
2142 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2143 if (rc)
2144 return rc;
2145 }
2146
2147 rc = kvm_s390_handle_requests(vcpu);
2148 if (rc)
2149 return rc;
2150
2151 if (guestdbg_enabled(vcpu)) {
2152 kvm_s390_backup_guest_per_regs(vcpu);
2153 kvm_s390_patch_guest_per_regs(vcpu);
2154 }
2155
2156 vcpu->arch.sie_block->icptcode = 0;
2157 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2158 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2159 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2160
2161 return 0;
2162 }
2163
2164 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2165 {
2166 u8 opcode;
2167 int rc;
2168
2169 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2170 trace_kvm_s390_sie_fault(vcpu);
2171
2172 /*
2173 * We want to inject an addressing exception, which is defined as a
2174 * suppressing or terminating exception. However, since we came here
2175 * by a DAT access exception, the PSW still points to the faulting
2176 * instruction since DAT exceptions are nullifying. So we've got
2177 * to look up the current opcode to get the length of the instruction
2178 * to be able to forward the PSW.
2179 */
2180 rc = read_guest_instr(vcpu, &opcode, 1);
2181 if (rc)
2182 return kvm_s390_inject_prog_cond(vcpu, rc);
2183 kvm_s390_forward_psw(vcpu, insn_length(opcode));
2184
2185 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2186 }
2187
2188 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2189 {
2190 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2191 vcpu->arch.sie_block->icptcode);
2192 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2193
2194 if (guestdbg_enabled(vcpu))
2195 kvm_s390_restore_guest_per_regs(vcpu);
2196
2197 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2198 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
2199
2200 if (vcpu->arch.sie_block->icptcode > 0) {
2201 int rc = kvm_handle_sie_intercept(vcpu);
2202
2203 if (rc != -EOPNOTSUPP)
2204 return rc;
2205 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2206 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2207 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2208 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2209 return -EREMOTE;
2210 } else if (exit_reason != -EFAULT) {
2211 vcpu->stat.exit_null++;
2212 return 0;
2213 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2214 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2215 vcpu->run->s390_ucontrol.trans_exc_code =
2216 current->thread.gmap_addr;
2217 vcpu->run->s390_ucontrol.pgm_code = 0x10;
2218 return -EREMOTE;
2219 } else if (current->thread.gmap_pfault) {
2220 trace_kvm_s390_major_guest_pfault(vcpu);
2221 current->thread.gmap_pfault = 0;
2222 if (kvm_arch_setup_async_pf(vcpu))
2223 return 0;
2224 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2225 }
2226 return vcpu_post_run_fault_in_sie(vcpu);
2227 }
2228
2229 static int __vcpu_run(struct kvm_vcpu *vcpu)
2230 {
2231 int rc, exit_reason;
2232
2233 /*
2234 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2235 * ning the guest), so that memslots (and other stuff) are protected
2236 */
2237 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2238
2239 do {
2240 rc = vcpu_pre_run(vcpu);
2241 if (rc)
2242 break;
2243
2244 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2245 /*
2246 * As PF_VCPU will be used in fault handler, between
2247 * guest_enter and guest_exit should be no uaccess.
2248 */
2249 local_irq_disable();
2250 __kvm_guest_enter();
2251 local_irq_enable();
2252 exit_reason = sie64a(vcpu->arch.sie_block,
2253 vcpu->run->s.regs.gprs);
2254 local_irq_disable();
2255 __kvm_guest_exit();
2256 local_irq_enable();
2257 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2258
2259 rc = vcpu_post_run(vcpu, exit_reason);
2260 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
2261
2262 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2263 return rc;
2264 }
2265
2266 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2267 {
2268 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2269 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2270 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2271 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2272 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2273 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2274 /* some control register changes require a tlb flush */
2275 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2276 }
2277 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2278 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2279 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2280 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2281 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2282 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2283 }
2284 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2285 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2286 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2287 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
2288 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2289 kvm_clear_async_pf_completion_queue(vcpu);
2290 }
2291 kvm_run->kvm_dirty_regs = 0;
2292 }
2293
2294 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2295 {
2296 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2297 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2298 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2299 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2300 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2301 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2302 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2303 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2304 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2305 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2306 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2307 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2308 }
2309
2310 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2311 {
2312 int rc;
2313 sigset_t sigsaved;
2314
2315 if (guestdbg_exit_pending(vcpu)) {
2316 kvm_s390_prepare_debug_exit(vcpu);
2317 return 0;
2318 }
2319
2320 if (vcpu->sigset_active)
2321 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2322
2323 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2324 kvm_s390_vcpu_start(vcpu);
2325 } else if (is_vcpu_stopped(vcpu)) {
2326 pr_err_ratelimited("can't run stopped vcpu %d\n",
2327 vcpu->vcpu_id);
2328 return -EINVAL;
2329 }
2330
2331 sync_regs(vcpu, kvm_run);
2332
2333 might_fault();
2334 rc = __vcpu_run(vcpu);
2335
2336 if (signal_pending(current) && !rc) {
2337 kvm_run->exit_reason = KVM_EXIT_INTR;
2338 rc = -EINTR;
2339 }
2340
2341 if (guestdbg_exit_pending(vcpu) && !rc) {
2342 kvm_s390_prepare_debug_exit(vcpu);
2343 rc = 0;
2344 }
2345
2346 if (rc == -EREMOTE) {
2347 /* userspace support is needed, kvm_run has been prepared */
2348 rc = 0;
2349 }
2350
2351 store_regs(vcpu, kvm_run);
2352
2353 if (vcpu->sigset_active)
2354 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2355
2356 vcpu->stat.exit_userspace++;
2357 return rc;
2358 }
2359
2360 /*
2361 * store status at address
2362 * we use have two special cases:
2363 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2364 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2365 */
2366 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2367 {
2368 unsigned char archmode = 1;
2369 freg_t fprs[NUM_FPRS];
2370 unsigned int px;
2371 u64 clkcomp;
2372 int rc;
2373
2374 px = kvm_s390_get_prefix(vcpu);
2375 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2376 if (write_guest_abs(vcpu, 163, &archmode, 1))
2377 return -EFAULT;
2378 gpa = 0;
2379 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2380 if (write_guest_real(vcpu, 163, &archmode, 1))
2381 return -EFAULT;
2382 gpa = px;
2383 } else
2384 gpa -= __LC_FPREGS_SAVE_AREA;
2385
2386 /* manually convert vector registers if necessary */
2387 if (MACHINE_HAS_VX) {
2388 convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
2389 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2390 fprs, 128);
2391 } else {
2392 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2393 vcpu->run->s.regs.fprs, 128);
2394 }
2395 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2396 vcpu->run->s.regs.gprs, 128);
2397 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2398 &vcpu->arch.sie_block->gpsw, 16);
2399 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2400 &px, 4);
2401 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
2402 &vcpu->run->s.regs.fpc, 4);
2403 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2404 &vcpu->arch.sie_block->todpr, 4);
2405 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
2406 &vcpu->arch.sie_block->cputm, 8);
2407 clkcomp = vcpu->arch.sie_block->ckc >> 8;
2408 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2409 &clkcomp, 8);
2410 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2411 &vcpu->run->s.regs.acrs, 64);
2412 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2413 &vcpu->arch.sie_block->gcr, 128);
2414 return rc ? -EFAULT : 0;
2415 }
2416
2417 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2418 {
2419 /*
2420 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2421 * copying in vcpu load/put. Lets update our copies before we save
2422 * it into the save area
2423 */
2424 save_fpu_regs();
2425 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2426 save_access_regs(vcpu->run->s.regs.acrs);
2427
2428 return kvm_s390_store_status_unloaded(vcpu, addr);
2429 }
2430
2431 /*
2432 * store additional status at address
2433 */
2434 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2435 unsigned long gpa)
2436 {
2437 /* Only bits 0-53 are used for address formation */
2438 if (!(gpa & ~0x3ff))
2439 return 0;
2440
2441 return write_guest_abs(vcpu, gpa & ~0x3ff,
2442 (void *)&vcpu->run->s.regs.vrs, 512);
2443 }
2444
2445 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2446 {
2447 if (!test_kvm_facility(vcpu->kvm, 129))
2448 return 0;
2449
2450 /*
2451 * The guest VXRS are in the host VXRs due to the lazy
2452 * copying in vcpu load/put. We can simply call save_fpu_regs()
2453 * to save the current register state because we are in the
2454 * middle of a load/put cycle.
2455 *
2456 * Let's update our copies before we save it into the save area.
2457 */
2458 save_fpu_regs();
2459
2460 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2461 }
2462
2463 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2464 {
2465 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2466 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
2467 }
2468
2469 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2470 {
2471 unsigned int i;
2472 struct kvm_vcpu *vcpu;
2473
2474 kvm_for_each_vcpu(i, vcpu, kvm) {
2475 __disable_ibs_on_vcpu(vcpu);
2476 }
2477 }
2478
2479 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2480 {
2481 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2482 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
2483 }
2484
2485 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2486 {
2487 int i, online_vcpus, started_vcpus = 0;
2488
2489 if (!is_vcpu_stopped(vcpu))
2490 return;
2491
2492 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2493 /* Only one cpu at a time may enter/leave the STOPPED state. */
2494 spin_lock(&vcpu->kvm->arch.start_stop_lock);
2495 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2496
2497 for (i = 0; i < online_vcpus; i++) {
2498 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2499 started_vcpus++;
2500 }
2501
2502 if (started_vcpus == 0) {
2503 /* we're the only active VCPU -> speed it up */
2504 __enable_ibs_on_vcpu(vcpu);
2505 } else if (started_vcpus == 1) {
2506 /*
2507 * As we are starting a second VCPU, we have to disable
2508 * the IBS facility on all VCPUs to remove potentially
2509 * oustanding ENABLE requests.
2510 */
2511 __disable_ibs_on_all_vcpus(vcpu->kvm);
2512 }
2513
2514 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2515 /*
2516 * Another VCPU might have used IBS while we were offline.
2517 * Let's play safe and flush the VCPU at startup.
2518 */
2519 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2520 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2521 return;
2522 }
2523
2524 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2525 {
2526 int i, online_vcpus, started_vcpus = 0;
2527 struct kvm_vcpu *started_vcpu = NULL;
2528
2529 if (is_vcpu_stopped(vcpu))
2530 return;
2531
2532 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2533 /* Only one cpu at a time may enter/leave the STOPPED state. */
2534 spin_lock(&vcpu->kvm->arch.start_stop_lock);
2535 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2536
2537 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2538 kvm_s390_clear_stop_irq(vcpu);
2539
2540 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2541 __disable_ibs_on_vcpu(vcpu);
2542
2543 for (i = 0; i < online_vcpus; i++) {
2544 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2545 started_vcpus++;
2546 started_vcpu = vcpu->kvm->vcpus[i];
2547 }
2548 }
2549
2550 if (started_vcpus == 1) {
2551 /*
2552 * As we only have one VCPU left, we want to enable the
2553 * IBS facility for that VCPU to speed it up.
2554 */
2555 __enable_ibs_on_vcpu(started_vcpu);
2556 }
2557
2558 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2559 return;
2560 }
2561
2562 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2563 struct kvm_enable_cap *cap)
2564 {
2565 int r;
2566
2567 if (cap->flags)
2568 return -EINVAL;
2569
2570 switch (cap->cap) {
2571 case KVM_CAP_S390_CSS_SUPPORT:
2572 if (!vcpu->kvm->arch.css_support) {
2573 vcpu->kvm->arch.css_support = 1;
2574 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2575 trace_kvm_s390_enable_css(vcpu->kvm);
2576 }
2577 r = 0;
2578 break;
2579 default:
2580 r = -EINVAL;
2581 break;
2582 }
2583 return r;
2584 }
2585
2586 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2587 struct kvm_s390_mem_op *mop)
2588 {
2589 void __user *uaddr = (void __user *)mop->buf;
2590 void *tmpbuf = NULL;
2591 int r, srcu_idx;
2592 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2593 | KVM_S390_MEMOP_F_CHECK_ONLY;
2594
2595 if (mop->flags & ~supported_flags)
2596 return -EINVAL;
2597
2598 if (mop->size > MEM_OP_MAX_SIZE)
2599 return -E2BIG;
2600
2601 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2602 tmpbuf = vmalloc(mop->size);
2603 if (!tmpbuf)
2604 return -ENOMEM;
2605 }
2606
2607 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2608
2609 switch (mop->op) {
2610 case KVM_S390_MEMOP_LOGICAL_READ:
2611 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2612 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2613 mop->size, GACC_FETCH);
2614 break;
2615 }
2616 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2617 if (r == 0) {
2618 if (copy_to_user(uaddr, tmpbuf, mop->size))
2619 r = -EFAULT;
2620 }
2621 break;
2622 case KVM_S390_MEMOP_LOGICAL_WRITE:
2623 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2624 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2625 mop->size, GACC_STORE);
2626 break;
2627 }
2628 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2629 r = -EFAULT;
2630 break;
2631 }
2632 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2633 break;
2634 default:
2635 r = -EINVAL;
2636 }
2637
2638 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2639
2640 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2641 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2642
2643 vfree(tmpbuf);
2644 return r;
2645 }
2646
2647 long kvm_arch_vcpu_ioctl(struct file *filp,
2648 unsigned int ioctl, unsigned long arg)
2649 {
2650 struct kvm_vcpu *vcpu = filp->private_data;
2651 void __user *argp = (void __user *)arg;
2652 int idx;
2653 long r;
2654
2655 switch (ioctl) {
2656 case KVM_S390_IRQ: {
2657 struct kvm_s390_irq s390irq;
2658
2659 r = -EFAULT;
2660 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2661 break;
2662 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2663 break;
2664 }
2665 case KVM_S390_INTERRUPT: {
2666 struct kvm_s390_interrupt s390int;
2667 struct kvm_s390_irq s390irq;
2668
2669 r = -EFAULT;
2670 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2671 break;
2672 if (s390int_to_s390irq(&s390int, &s390irq))
2673 return -EINVAL;
2674 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2675 break;
2676 }
2677 case KVM_S390_STORE_STATUS:
2678 idx = srcu_read_lock(&vcpu->kvm->srcu);
2679 r = kvm_s390_vcpu_store_status(vcpu, arg);
2680 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2681 break;
2682 case KVM_S390_SET_INITIAL_PSW: {
2683 psw_t psw;
2684
2685 r = -EFAULT;
2686 if (copy_from_user(&psw, argp, sizeof(psw)))
2687 break;
2688 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2689 break;
2690 }
2691 case KVM_S390_INITIAL_RESET:
2692 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2693 break;
2694 case KVM_SET_ONE_REG:
2695 case KVM_GET_ONE_REG: {
2696 struct kvm_one_reg reg;
2697 r = -EFAULT;
2698 if (copy_from_user(&reg, argp, sizeof(reg)))
2699 break;
2700 if (ioctl == KVM_SET_ONE_REG)
2701 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2702 else
2703 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2704 break;
2705 }
2706 #ifdef CONFIG_KVM_S390_UCONTROL
2707 case KVM_S390_UCAS_MAP: {
2708 struct kvm_s390_ucas_mapping ucasmap;
2709
2710 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2711 r = -EFAULT;
2712 break;
2713 }
2714
2715 if (!kvm_is_ucontrol(vcpu->kvm)) {
2716 r = -EINVAL;
2717 break;
2718 }
2719
2720 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2721 ucasmap.vcpu_addr, ucasmap.length);
2722 break;
2723 }
2724 case KVM_S390_UCAS_UNMAP: {
2725 struct kvm_s390_ucas_mapping ucasmap;
2726
2727 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2728 r = -EFAULT;
2729 break;
2730 }
2731
2732 if (!kvm_is_ucontrol(vcpu->kvm)) {
2733 r = -EINVAL;
2734 break;
2735 }
2736
2737 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2738 ucasmap.length);
2739 break;
2740 }
2741 #endif
2742 case KVM_S390_VCPU_FAULT: {
2743 r = gmap_fault(vcpu->arch.gmap, arg, 0);
2744 break;
2745 }
2746 case KVM_ENABLE_CAP:
2747 {
2748 struct kvm_enable_cap cap;
2749 r = -EFAULT;
2750 if (copy_from_user(&cap, argp, sizeof(cap)))
2751 break;
2752 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2753 break;
2754 }
2755 case KVM_S390_MEM_OP: {
2756 struct kvm_s390_mem_op mem_op;
2757
2758 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2759 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2760 else
2761 r = -EFAULT;
2762 break;
2763 }
2764 case KVM_S390_SET_IRQ_STATE: {
2765 struct kvm_s390_irq_state irq_state;
2766
2767 r = -EFAULT;
2768 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2769 break;
2770 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2771 irq_state.len == 0 ||
2772 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2773 r = -EINVAL;
2774 break;
2775 }
2776 r = kvm_s390_set_irq_state(vcpu,
2777 (void __user *) irq_state.buf,
2778 irq_state.len);
2779 break;
2780 }
2781 case KVM_S390_GET_IRQ_STATE: {
2782 struct kvm_s390_irq_state irq_state;
2783
2784 r = -EFAULT;
2785 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2786 break;
2787 if (irq_state.len == 0) {
2788 r = -EINVAL;
2789 break;
2790 }
2791 r = kvm_s390_get_irq_state(vcpu,
2792 (__u8 __user *) irq_state.buf,
2793 irq_state.len);
2794 break;
2795 }
2796 default:
2797 r = -ENOTTY;
2798 }
2799 return r;
2800 }
2801
2802 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2803 {
2804 #ifdef CONFIG_KVM_S390_UCONTROL
2805 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2806 && (kvm_is_ucontrol(vcpu->kvm))) {
2807 vmf->page = virt_to_page(vcpu->arch.sie_block);
2808 get_page(vmf->page);
2809 return 0;
2810 }
2811 #endif
2812 return VM_FAULT_SIGBUS;
2813 }
2814
2815 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2816 unsigned long npages)
2817 {
2818 return 0;
2819 }
2820
2821 /* Section: memory related */
2822 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2823 struct kvm_memory_slot *memslot,
2824 const struct kvm_userspace_memory_region *mem,
2825 enum kvm_mr_change change)
2826 {
2827 /* A few sanity checks. We can have memory slots which have to be
2828 located/ended at a segment boundary (1MB). The memory in userland is
2829 ok to be fragmented into various different vmas. It is okay to mmap()
2830 and munmap() stuff in this slot after doing this call at any time */
2831
2832 if (mem->userspace_addr & 0xffffful)
2833 return -EINVAL;
2834
2835 if (mem->memory_size & 0xffffful)
2836 return -EINVAL;
2837
2838 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
2839 return -EINVAL;
2840
2841 return 0;
2842 }
2843
2844 void kvm_arch_commit_memory_region(struct kvm *kvm,
2845 const struct kvm_userspace_memory_region *mem,
2846 const struct kvm_memory_slot *old,
2847 const struct kvm_memory_slot *new,
2848 enum kvm_mr_change change)
2849 {
2850 int rc;
2851
2852 /* If the basics of the memslot do not change, we do not want
2853 * to update the gmap. Every update causes several unnecessary
2854 * segment translation exceptions. This is usually handled just
2855 * fine by the normal fault handler + gmap, but it will also
2856 * cause faults on the prefix page of running guest CPUs.
2857 */
2858 if (old->userspace_addr == mem->userspace_addr &&
2859 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2860 old->npages * PAGE_SIZE == mem->memory_size)
2861 return;
2862
2863 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2864 mem->guest_phys_addr, mem->memory_size);
2865 if (rc)
2866 pr_warn("failed to commit memory region\n");
2867 return;
2868 }
2869
2870 static int __init kvm_s390_init(void)
2871 {
2872 if (!sclp.has_sief2) {
2873 pr_info("SIE not available\n");
2874 return -ENODEV;
2875 }
2876
2877 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2878 }
2879
2880 static void __exit kvm_s390_exit(void)
2881 {
2882 kvm_exit();
2883 }
2884
2885 module_init(kvm_s390_init);
2886 module_exit(kvm_s390_exit);
2887
2888 /*
2889 * Enable autoloading of the kvm module.
2890 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2891 * since x86 takes a different approach.
2892 */
2893 #include <linux/miscdevice.h>
2894 MODULE_ALIAS_MISCDEV(KVM_MINOR);
2895 MODULE_ALIAS("devname:kvm");
This page took 0.090495 seconds and 5 git commands to generate.