KVM: s390: add architecture compliant guest access functions
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
cbb870c8 27#include <asm/asm-offsets.h>
b0c632db
HC
28#include <asm/lowcore.h>
29#include <asm/pgtable.h>
f5daba1d 30#include <asm/nmi.h>
a0616cde 31#include <asm/switch_to.h>
78c4b59f 32#include <asm/facility.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 53 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
54 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
55 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 56 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
57 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
58 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
59 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
60 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
61 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
62 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
63 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 64 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
65 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
66 { "instruction_spx", VCPU_STAT(instruction_spx) },
67 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
68 { "instruction_stap", VCPU_STAT(instruction_stap) },
69 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
70 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
71 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 72 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
73 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
74 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 75 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 76 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 77 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 78 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
79 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
80 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
81 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
82 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
83 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 84 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 85 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 86 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
87 { NULL }
88};
89
78c4b59f 90unsigned long *vfacilities;
2c70fe44 91static struct gmap_notifier gmap_notifier;
b0c632db 92
78c4b59f 93/* test availability of vfacility */
280ef0f1 94int test_vfacility(unsigned long nr)
78c4b59f
MM
95{
96 return __test_facility(nr, (void *) vfacilities);
97}
98
b0c632db 99/* Section: not file related */
10474ae8 100int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
101{
102 /* every s390 is virtualization enabled ;-) */
10474ae8 103 return 0;
b0c632db
HC
104}
105
106void kvm_arch_hardware_disable(void *garbage)
107{
108}
109
2c70fe44
CB
110static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
111
b0c632db
HC
112int kvm_arch_hardware_setup(void)
113{
2c70fe44
CB
114 gmap_notifier.notifier_call = kvm_gmap_notifier;
115 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
116 return 0;
117}
118
119void kvm_arch_hardware_unsetup(void)
120{
2c70fe44 121 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
122}
123
124void kvm_arch_check_processor_compat(void *rtn)
125{
126}
127
128int kvm_arch_init(void *opaque)
129{
130 return 0;
131}
132
133void kvm_arch_exit(void)
134{
135}
136
137/* Section: device related */
138long kvm_arch_dev_ioctl(struct file *filp,
139 unsigned int ioctl, unsigned long arg)
140{
141 if (ioctl == KVM_S390_ENABLE_SIE)
142 return s390_enable_sie();
143 return -EINVAL;
144}
145
146int kvm_dev_ioctl_check_extension(long ext)
147{
d7b0b5eb
CO
148 int r;
149
2bd0ac4e 150 switch (ext) {
d7b0b5eb 151 case KVM_CAP_S390_PSW:
b6cf8788 152 case KVM_CAP_S390_GMAP:
52e16b18 153 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
154#ifdef CONFIG_KVM_S390_UCONTROL
155 case KVM_CAP_S390_UCONTROL:
156#endif
3c038e6b 157 case KVM_CAP_ASYNC_PF:
60b413c9 158 case KVM_CAP_SYNC_REGS:
14eebd91 159 case KVM_CAP_ONE_REG:
d6712df9 160 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 161 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 162 case KVM_CAP_IOEVENTFD:
c05c4186 163 case KVM_CAP_DEVICE_CTRL:
d938dc55 164 case KVM_CAP_ENABLE_CAP_VM:
f2061656 165 case KVM_CAP_VM_ATTRIBUTES:
d7b0b5eb
CO
166 r = 1;
167 break;
e726b1bd
CB
168 case KVM_CAP_NR_VCPUS:
169 case KVM_CAP_MAX_VCPUS:
170 r = KVM_MAX_VCPUS;
171 break;
e1e2e605
NW
172 case KVM_CAP_NR_MEMSLOTS:
173 r = KVM_USER_MEM_SLOTS;
174 break;
1526bf9c 175 case KVM_CAP_S390_COW:
abf09bed 176 r = MACHINE_HAS_ESOP;
1526bf9c 177 break;
2bd0ac4e 178 default:
d7b0b5eb 179 r = 0;
2bd0ac4e 180 }
d7b0b5eb 181 return r;
b0c632db
HC
182}
183
15f36ebd
JH
184static void kvm_s390_sync_dirty_log(struct kvm *kvm,
185 struct kvm_memory_slot *memslot)
186{
187 gfn_t cur_gfn, last_gfn;
188 unsigned long address;
189 struct gmap *gmap = kvm->arch.gmap;
190
191 down_read(&gmap->mm->mmap_sem);
192 /* Loop over all guest pages */
193 last_gfn = memslot->base_gfn + memslot->npages;
194 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
195 address = gfn_to_hva_memslot(memslot, cur_gfn);
196
197 if (gmap_test_and_clear_dirty(address, gmap))
198 mark_page_dirty(kvm, cur_gfn);
199 }
200 up_read(&gmap->mm->mmap_sem);
201}
202
b0c632db
HC
203/* Section: vm related */
204/*
205 * Get (and clear) the dirty memory log for a memory slot.
206 */
207int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
208 struct kvm_dirty_log *log)
209{
15f36ebd
JH
210 int r;
211 unsigned long n;
212 struct kvm_memory_slot *memslot;
213 int is_dirty = 0;
214
215 mutex_lock(&kvm->slots_lock);
216
217 r = -EINVAL;
218 if (log->slot >= KVM_USER_MEM_SLOTS)
219 goto out;
220
221 memslot = id_to_memslot(kvm->memslots, log->slot);
222 r = -ENOENT;
223 if (!memslot->dirty_bitmap)
224 goto out;
225
226 kvm_s390_sync_dirty_log(kvm, memslot);
227 r = kvm_get_dirty_log(kvm, log, &is_dirty);
228 if (r)
229 goto out;
230
231 /* Clear the dirty log */
232 if (is_dirty) {
233 n = kvm_dirty_bitmap_bytes(memslot);
234 memset(memslot->dirty_bitmap, 0, n);
235 }
236 r = 0;
237out:
238 mutex_unlock(&kvm->slots_lock);
239 return r;
b0c632db
HC
240}
241
d938dc55
CH
242static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
243{
244 int r;
245
246 if (cap->flags)
247 return -EINVAL;
248
249 switch (cap->cap) {
84223598
CH
250 case KVM_CAP_S390_IRQCHIP:
251 kvm->arch.use_irqchip = 1;
252 r = 0;
253 break;
d938dc55
CH
254 default:
255 r = -EINVAL;
256 break;
257 }
258 return r;
259}
260
4f718eab
DD
261static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
262{
263 int ret;
264 unsigned int idx;
265 switch (attr->attr) {
266 case KVM_S390_VM_MEM_ENABLE_CMMA:
267 ret = -EBUSY;
268 mutex_lock(&kvm->lock);
269 if (atomic_read(&kvm->online_vcpus) == 0) {
270 kvm->arch.use_cmma = 1;
271 ret = 0;
272 }
273 mutex_unlock(&kvm->lock);
274 break;
275 case KVM_S390_VM_MEM_CLR_CMMA:
276 mutex_lock(&kvm->lock);
277 idx = srcu_read_lock(&kvm->srcu);
278 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
279 srcu_read_unlock(&kvm->srcu, idx);
280 mutex_unlock(&kvm->lock);
281 ret = 0;
282 break;
283 default:
284 ret = -ENXIO;
285 break;
286 }
287 return ret;
288}
289
f2061656
DD
290static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
291{
292 int ret;
293
294 switch (attr->group) {
4f718eab
DD
295 case KVM_S390_VM_MEM_CTRL:
296 ret = kvm_s390_mem_control(kvm, attr);
297 break;
f2061656
DD
298 default:
299 ret = -ENXIO;
300 break;
301 }
302
303 return ret;
304}
305
306static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
307{
308 return -ENXIO;
309}
310
311static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
312{
313 int ret;
314
315 switch (attr->group) {
4f718eab
DD
316 case KVM_S390_VM_MEM_CTRL:
317 switch (attr->attr) {
318 case KVM_S390_VM_MEM_ENABLE_CMMA:
319 case KVM_S390_VM_MEM_CLR_CMMA:
320 ret = 0;
321 break;
322 default:
323 ret = -ENXIO;
324 break;
325 }
326 break;
f2061656
DD
327 default:
328 ret = -ENXIO;
329 break;
330 }
331
332 return ret;
333}
334
b0c632db
HC
335long kvm_arch_vm_ioctl(struct file *filp,
336 unsigned int ioctl, unsigned long arg)
337{
338 struct kvm *kvm = filp->private_data;
339 void __user *argp = (void __user *)arg;
f2061656 340 struct kvm_device_attr attr;
b0c632db
HC
341 int r;
342
343 switch (ioctl) {
ba5c1e9b
CO
344 case KVM_S390_INTERRUPT: {
345 struct kvm_s390_interrupt s390int;
346
347 r = -EFAULT;
348 if (copy_from_user(&s390int, argp, sizeof(s390int)))
349 break;
350 r = kvm_s390_inject_vm(kvm, &s390int);
351 break;
352 }
d938dc55
CH
353 case KVM_ENABLE_CAP: {
354 struct kvm_enable_cap cap;
355 r = -EFAULT;
356 if (copy_from_user(&cap, argp, sizeof(cap)))
357 break;
358 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
359 break;
360 }
84223598
CH
361 case KVM_CREATE_IRQCHIP: {
362 struct kvm_irq_routing_entry routing;
363
364 r = -EINVAL;
365 if (kvm->arch.use_irqchip) {
366 /* Set up dummy routing. */
367 memset(&routing, 0, sizeof(routing));
368 kvm_set_irq_routing(kvm, &routing, 0, 0);
369 r = 0;
370 }
371 break;
372 }
f2061656
DD
373 case KVM_SET_DEVICE_ATTR: {
374 r = -EFAULT;
375 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
376 break;
377 r = kvm_s390_vm_set_attr(kvm, &attr);
378 break;
379 }
380 case KVM_GET_DEVICE_ATTR: {
381 r = -EFAULT;
382 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
383 break;
384 r = kvm_s390_vm_get_attr(kvm, &attr);
385 break;
386 }
387 case KVM_HAS_DEVICE_ATTR: {
388 r = -EFAULT;
389 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
390 break;
391 r = kvm_s390_vm_has_attr(kvm, &attr);
392 break;
393 }
b0c632db 394 default:
367e1319 395 r = -ENOTTY;
b0c632db
HC
396 }
397
398 return r;
399}
400
e08b9637 401int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 402{
b0c632db
HC
403 int rc;
404 char debug_name[16];
f6c137ff 405 static unsigned long sca_offset;
b0c632db 406
e08b9637
CO
407 rc = -EINVAL;
408#ifdef CONFIG_KVM_S390_UCONTROL
409 if (type & ~KVM_VM_S390_UCONTROL)
410 goto out_err;
411 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
412 goto out_err;
413#else
414 if (type)
415 goto out_err;
416#endif
417
b0c632db
HC
418 rc = s390_enable_sie();
419 if (rc)
d89f5eff 420 goto out_err;
b0c632db 421
b290411a
CO
422 rc = -ENOMEM;
423
b0c632db
HC
424 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
425 if (!kvm->arch.sca)
d89f5eff 426 goto out_err;
f6c137ff
CB
427 spin_lock(&kvm_lock);
428 sca_offset = (sca_offset + 16) & 0x7f0;
429 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
430 spin_unlock(&kvm_lock);
b0c632db
HC
431
432 sprintf(debug_name, "kvm-%u", current->pid);
433
434 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
435 if (!kvm->arch.dbf)
436 goto out_nodbf;
437
ba5c1e9b
CO
438 spin_lock_init(&kvm->arch.float_int.lock);
439 INIT_LIST_HEAD(&kvm->arch.float_int.list);
440
b0c632db
HC
441 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
442 VM_EVENT(kvm, 3, "%s", "vm created");
443
e08b9637
CO
444 if (type & KVM_VM_S390_UCONTROL) {
445 kvm->arch.gmap = NULL;
446 } else {
447 kvm->arch.gmap = gmap_alloc(current->mm);
448 if (!kvm->arch.gmap)
449 goto out_nogmap;
2c70fe44 450 kvm->arch.gmap->private = kvm;
24eb3a82 451 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 452 }
fa6b7fe9
CH
453
454 kvm->arch.css_support = 0;
84223598 455 kvm->arch.use_irqchip = 0;
fa6b7fe9 456
d89f5eff 457 return 0;
598841ca
CO
458out_nogmap:
459 debug_unregister(kvm->arch.dbf);
b0c632db
HC
460out_nodbf:
461 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
462out_err:
463 return rc;
b0c632db
HC
464}
465
d329c035
CB
466void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
467{
468 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 469 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3c038e6b 470 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
471 if (!kvm_is_ucontrol(vcpu->kvm)) {
472 clear_bit(63 - vcpu->vcpu_id,
473 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
474 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
475 (__u64) vcpu->arch.sie_block)
476 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
477 }
abf4a71e 478 smp_mb();
27e0393f
CO
479
480 if (kvm_is_ucontrol(vcpu->kvm))
481 gmap_free(vcpu->arch.gmap);
482
b31605c1
DD
483 if (kvm_s390_cmma_enabled(vcpu->kvm))
484 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 485 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 486
6692cef3 487 kvm_vcpu_uninit(vcpu);
b110feaf 488 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
489}
490
491static void kvm_free_vcpus(struct kvm *kvm)
492{
493 unsigned int i;
988a2cae 494 struct kvm_vcpu *vcpu;
d329c035 495
988a2cae
GN
496 kvm_for_each_vcpu(i, vcpu, kvm)
497 kvm_arch_vcpu_destroy(vcpu);
498
499 mutex_lock(&kvm->lock);
500 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
501 kvm->vcpus[i] = NULL;
502
503 atomic_set(&kvm->online_vcpus, 0);
504 mutex_unlock(&kvm->lock);
d329c035
CB
505}
506
ad8ba2cd
SY
507void kvm_arch_sync_events(struct kvm *kvm)
508{
509}
510
b0c632db
HC
511void kvm_arch_destroy_vm(struct kvm *kvm)
512{
d329c035 513 kvm_free_vcpus(kvm);
b0c632db 514 free_page((unsigned long)(kvm->arch.sca));
d329c035 515 debug_unregister(kvm->arch.dbf);
27e0393f
CO
516 if (!kvm_is_ucontrol(kvm))
517 gmap_free(kvm->arch.gmap);
841b91c5 518 kvm_s390_destroy_adapters(kvm);
b0c632db
HC
519}
520
521/* Section: vcpu related */
522int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
523{
3c038e6b
DD
524 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
525 kvm_clear_async_pf_completion_queue(vcpu);
27e0393f
CO
526 if (kvm_is_ucontrol(vcpu->kvm)) {
527 vcpu->arch.gmap = gmap_alloc(current->mm);
528 if (!vcpu->arch.gmap)
529 return -ENOMEM;
2c70fe44 530 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
531 return 0;
532 }
533
598841ca 534 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
535 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
536 KVM_SYNC_GPRS |
9eed0735
CB
537 KVM_SYNC_ACRS |
538 KVM_SYNC_CRS;
b0c632db
HC
539 return 0;
540}
541
542void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
543{
6692cef3 544 /* Nothing todo */
b0c632db
HC
545}
546
547void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
548{
4725c860
MS
549 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
550 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 551 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
552 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
553 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 554 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 555 gmap_enable(vcpu->arch.gmap);
9e6dabef 556 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
557}
558
559void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
560{
9e6dabef 561 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 562 gmap_disable(vcpu->arch.gmap);
4725c860
MS
563 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
564 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 565 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
566 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
567 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
568 restore_access_regs(vcpu->arch.host_acrs);
569}
570
571static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
572{
573 /* this equals initial cpu reset in pop, but we don't switch to ESA */
574 vcpu->arch.sie_block->gpsw.mask = 0UL;
575 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 576 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
577 vcpu->arch.sie_block->cputm = 0UL;
578 vcpu->arch.sie_block->ckc = 0UL;
579 vcpu->arch.sie_block->todpr = 0;
580 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
581 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
582 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
583 vcpu->arch.guest_fpregs.fpc = 0;
584 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
585 vcpu->arch.sie_block->gbea = 1;
672550fb 586 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
587 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
588 kvm_clear_async_pf_completion_queue(vcpu);
61bde82c 589 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2ed10cc1 590 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
591}
592
42897d86
MT
593int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
594{
595 return 0;
596}
597
b31605c1
DD
598void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
599{
600 free_page(vcpu->arch.sie_block->cbrlo);
601 vcpu->arch.sie_block->cbrlo = 0;
602}
603
604int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
605{
606 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
607 if (!vcpu->arch.sie_block->cbrlo)
608 return -ENOMEM;
609
610 vcpu->arch.sie_block->ecb2 |= 0x80;
611 vcpu->arch.sie_block->ecb2 &= ~0x08;
612 return 0;
613}
614
b0c632db
HC
615int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
616{
b31605c1 617 int rc = 0;
b31288fa 618
9e6dabef
CH
619 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
620 CPUSTAT_SM |
69d0d3a3
CB
621 CPUSTAT_STOPPED |
622 CPUSTAT_GED);
fc34531d 623 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
624 if (test_vfacility(50) && test_vfacility(73))
625 vcpu->arch.sie_block->ecb |= 0x10;
626
69d0d3a3 627 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 628 vcpu->arch.sie_block->eca = 0xC1002001U;
78c4b59f 629 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
693ffc08 630 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
b31605c1
DD
631 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
632 rc = kvm_s390_vcpu_setup_cmma(vcpu);
633 if (rc)
634 return rc;
b31288fa 635 }
ca872302
CB
636 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
637 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
638 (unsigned long) vcpu);
639 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 640 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 641 vcpu->arch.cpu_id.version = 0xff;
b31605c1 642 return rc;
b0c632db
HC
643}
644
645struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
646 unsigned int id)
647{
4d47555a 648 struct kvm_vcpu *vcpu;
7feb6bb8 649 struct sie_page *sie_page;
4d47555a
CO
650 int rc = -EINVAL;
651
652 if (id >= KVM_MAX_VCPUS)
653 goto out;
654
655 rc = -ENOMEM;
b0c632db 656
b110feaf 657 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 658 if (!vcpu)
4d47555a 659 goto out;
b0c632db 660
7feb6bb8
MM
661 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
662 if (!sie_page)
b0c632db
HC
663 goto out_free_cpu;
664
7feb6bb8
MM
665 vcpu->arch.sie_block = &sie_page->sie_block;
666 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
667
b0c632db 668 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
669 if (!kvm_is_ucontrol(kvm)) {
670 if (!kvm->arch.sca) {
671 WARN_ON_ONCE(1);
672 goto out_free_cpu;
673 }
674 if (!kvm->arch.sca->cpu[id].sda)
675 kvm->arch.sca->cpu[id].sda =
676 (__u64) vcpu->arch.sie_block;
677 vcpu->arch.sie_block->scaoh =
678 (__u32)(((__u64)kvm->arch.sca) >> 32);
679 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
680 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
681 }
b0c632db 682
ba5c1e9b
CO
683 spin_lock_init(&vcpu->arch.local_int.lock);
684 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
685 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 686 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 687 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 688
b0c632db
HC
689 rc = kvm_vcpu_init(vcpu, kvm, id);
690 if (rc)
7b06bf2f 691 goto out_free_sie_block;
b0c632db
HC
692 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
693 vcpu->arch.sie_block);
ade38c31 694 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 695
b0c632db 696 return vcpu;
7b06bf2f
WY
697out_free_sie_block:
698 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 699out_free_cpu:
b110feaf 700 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 701out:
b0c632db
HC
702 return ERR_PTR(rc);
703}
704
b0c632db
HC
705int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
706{
f87618e8 707 return kvm_cpu_has_interrupt(vcpu);
b0c632db
HC
708}
709
49b99e1e
CB
710void s390_vcpu_block(struct kvm_vcpu *vcpu)
711{
712 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
713}
714
715void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
716{
717 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
718}
719
720/*
721 * Kick a guest cpu out of SIE and wait until SIE is not running.
722 * If the CPU is not running (e.g. waiting as idle) the function will
723 * return immediately. */
724void exit_sie(struct kvm_vcpu *vcpu)
725{
726 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
727 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
728 cpu_relax();
729}
730
731/* Kick a guest cpu out of SIE and prevent SIE-reentry */
732void exit_sie_sync(struct kvm_vcpu *vcpu)
733{
734 s390_vcpu_block(vcpu);
735 exit_sie(vcpu);
736}
737
2c70fe44
CB
738static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
739{
740 int i;
741 struct kvm *kvm = gmap->private;
742 struct kvm_vcpu *vcpu;
743
744 kvm_for_each_vcpu(i, vcpu, kvm) {
745 /* match against both prefix pages */
746 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
747 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
748 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
749 exit_sie_sync(vcpu);
750 }
751 }
752}
753
b6d33834
CD
754int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
755{
756 /* kvm common code refers to this, but never calls it */
757 BUG();
758 return 0;
759}
760
14eebd91
CO
761static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
762 struct kvm_one_reg *reg)
763{
764 int r = -EINVAL;
765
766 switch (reg->id) {
29b7c71b
CO
767 case KVM_REG_S390_TODPR:
768 r = put_user(vcpu->arch.sie_block->todpr,
769 (u32 __user *)reg->addr);
770 break;
771 case KVM_REG_S390_EPOCHDIFF:
772 r = put_user(vcpu->arch.sie_block->epoch,
773 (u64 __user *)reg->addr);
774 break;
46a6dd1c
J
775 case KVM_REG_S390_CPU_TIMER:
776 r = put_user(vcpu->arch.sie_block->cputm,
777 (u64 __user *)reg->addr);
778 break;
779 case KVM_REG_S390_CLOCK_COMP:
780 r = put_user(vcpu->arch.sie_block->ckc,
781 (u64 __user *)reg->addr);
782 break;
536336c2
DD
783 case KVM_REG_S390_PFTOKEN:
784 r = put_user(vcpu->arch.pfault_token,
785 (u64 __user *)reg->addr);
786 break;
787 case KVM_REG_S390_PFCOMPARE:
788 r = put_user(vcpu->arch.pfault_compare,
789 (u64 __user *)reg->addr);
790 break;
791 case KVM_REG_S390_PFSELECT:
792 r = put_user(vcpu->arch.pfault_select,
793 (u64 __user *)reg->addr);
794 break;
672550fb
CB
795 case KVM_REG_S390_PP:
796 r = put_user(vcpu->arch.sie_block->pp,
797 (u64 __user *)reg->addr);
798 break;
afa45ff5
CB
799 case KVM_REG_S390_GBEA:
800 r = put_user(vcpu->arch.sie_block->gbea,
801 (u64 __user *)reg->addr);
802 break;
14eebd91
CO
803 default:
804 break;
805 }
806
807 return r;
808}
809
810static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
811 struct kvm_one_reg *reg)
812{
813 int r = -EINVAL;
814
815 switch (reg->id) {
29b7c71b
CO
816 case KVM_REG_S390_TODPR:
817 r = get_user(vcpu->arch.sie_block->todpr,
818 (u32 __user *)reg->addr);
819 break;
820 case KVM_REG_S390_EPOCHDIFF:
821 r = get_user(vcpu->arch.sie_block->epoch,
822 (u64 __user *)reg->addr);
823 break;
46a6dd1c
J
824 case KVM_REG_S390_CPU_TIMER:
825 r = get_user(vcpu->arch.sie_block->cputm,
826 (u64 __user *)reg->addr);
827 break;
828 case KVM_REG_S390_CLOCK_COMP:
829 r = get_user(vcpu->arch.sie_block->ckc,
830 (u64 __user *)reg->addr);
831 break;
536336c2
DD
832 case KVM_REG_S390_PFTOKEN:
833 r = get_user(vcpu->arch.pfault_token,
834 (u64 __user *)reg->addr);
835 break;
836 case KVM_REG_S390_PFCOMPARE:
837 r = get_user(vcpu->arch.pfault_compare,
838 (u64 __user *)reg->addr);
839 break;
840 case KVM_REG_S390_PFSELECT:
841 r = get_user(vcpu->arch.pfault_select,
842 (u64 __user *)reg->addr);
843 break;
672550fb
CB
844 case KVM_REG_S390_PP:
845 r = get_user(vcpu->arch.sie_block->pp,
846 (u64 __user *)reg->addr);
847 break;
afa45ff5
CB
848 case KVM_REG_S390_GBEA:
849 r = get_user(vcpu->arch.sie_block->gbea,
850 (u64 __user *)reg->addr);
851 break;
14eebd91
CO
852 default:
853 break;
854 }
855
856 return r;
857}
b6d33834 858
b0c632db
HC
859static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
860{
b0c632db 861 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
862 return 0;
863}
864
865int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
866{
5a32c1af 867 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
868 return 0;
869}
870
871int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
872{
5a32c1af 873 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
874 return 0;
875}
876
877int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
878 struct kvm_sregs *sregs)
879{
59674c1a 880 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 881 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 882 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
883 return 0;
884}
885
886int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
887 struct kvm_sregs *sregs)
888{
59674c1a 889 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 890 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
891 return 0;
892}
893
894int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
895{
4725c860
MS
896 if (test_fp_ctl(fpu->fpc))
897 return -EINVAL;
b0c632db 898 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
899 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
900 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
901 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
902 return 0;
903}
904
905int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
906{
b0c632db
HC
907 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
908 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
909 return 0;
910}
911
912static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
913{
914 int rc = 0;
915
9e6dabef 916 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 917 rc = -EBUSY;
d7b0b5eb
CO
918 else {
919 vcpu->run->psw_mask = psw.mask;
920 vcpu->run->psw_addr = psw.addr;
921 }
b0c632db
HC
922 return rc;
923}
924
925int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
926 struct kvm_translation *tr)
927{
928 return -EINVAL; /* not implemented yet */
929}
930
d0bfb940
JK
931int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
932 struct kvm_guest_debug *dbg)
b0c632db
HC
933{
934 return -EINVAL; /* not implemented yet */
935}
936
62d9f0db
MT
937int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
938 struct kvm_mp_state *mp_state)
939{
940 return -EINVAL; /* not implemented yet */
941}
942
943int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
944 struct kvm_mp_state *mp_state)
945{
946 return -EINVAL; /* not implemented yet */
947}
948
b31605c1
DD
949bool kvm_s390_cmma_enabled(struct kvm *kvm)
950{
951 if (!MACHINE_IS_LPAR)
952 return false;
953 /* only enable for z10 and later */
954 if (!MACHINE_HAS_EDAT1)
955 return false;
956 if (!kvm->arch.use_cmma)
957 return false;
958 return true;
959}
960
2c70fe44
CB
961static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
962{
963 /*
964 * We use MMU_RELOAD just to re-arm the ipte notifier for the
965 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
966 * This ensures that the ipte instruction for this request has
967 * already finished. We might race against a second unmapper that
968 * wants to set the blocking bit. Lets just retry the request loop.
969 */
970 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
971 int rc;
972 rc = gmap_ipte_notify(vcpu->arch.gmap,
973 vcpu->arch.sie_block->prefix,
974 PAGE_SIZE * 2);
975 if (rc)
976 return rc;
977 s390_vcpu_unblock(vcpu);
978 }
979 return 0;
980}
981
24eb3a82
DD
982static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
983{
984 long rc;
985 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
986 struct mm_struct *mm = current->mm;
987 down_read(&mm->mmap_sem);
988 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
989 up_read(&mm->mmap_sem);
990 return rc;
991}
992
3c038e6b
DD
993static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
994 unsigned long token)
995{
996 struct kvm_s390_interrupt inti;
997 inti.parm64 = token;
998
999 if (start_token) {
1000 inti.type = KVM_S390_INT_PFAULT_INIT;
1001 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1002 } else {
1003 inti.type = KVM_S390_INT_PFAULT_DONE;
1004 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1005 }
1006}
1007
1008void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1009 struct kvm_async_pf *work)
1010{
1011 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1012 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1013}
1014
1015void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1016 struct kvm_async_pf *work)
1017{
1018 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1019 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1020}
1021
1022void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1023 struct kvm_async_pf *work)
1024{
1025 /* s390 will always inject the page directly */
1026}
1027
1028bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1029{
1030 /*
1031 * s390 will always inject the page directly,
1032 * but we still want check_async_completion to cleanup
1033 */
1034 return true;
1035}
1036
1037static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1038{
1039 hva_t hva;
1040 struct kvm_arch_async_pf arch;
1041 int rc;
1042
1043 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1044 return 0;
1045 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1046 vcpu->arch.pfault_compare)
1047 return 0;
1048 if (psw_extint_disabled(vcpu))
1049 return 0;
1050 if (kvm_cpu_has_interrupt(vcpu))
1051 return 0;
1052 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1053 return 0;
1054 if (!vcpu->arch.gmap->pfault_enabled)
1055 return 0;
1056
1057 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
1058 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
1059 return 0;
1060
1061 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1062 return rc;
1063}
1064
3fb4c40f 1065static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1066{
3fb4c40f 1067 int rc, cpuflags;
e168bf8d 1068
3c038e6b
DD
1069 /*
1070 * On s390 notifications for arriving pages will be delivered directly
1071 * to the guest but the house keeping for completed pfaults is
1072 * handled outside the worker.
1073 */
1074 kvm_check_async_pf_completion(vcpu);
1075
5a32c1af 1076 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1077
1078 if (need_resched())
1079 schedule();
1080
71cde587
CB
1081 if (test_thread_flag(TIF_MCCK_PENDING))
1082 s390_handle_mcck();
1083
d6b6d166
CO
1084 if (!kvm_is_ucontrol(vcpu->kvm))
1085 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 1086
2c70fe44
CB
1087 rc = kvm_s390_handle_requests(vcpu);
1088 if (rc)
1089 return rc;
1090
b0c632db 1091 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1092 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1093 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1094 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1095
3fb4c40f
TH
1096 return 0;
1097}
1098
1099static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1100{
24eb3a82 1101 int rc = -1;
2b29a9fd
DD
1102
1103 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1104 vcpu->arch.sie_block->icptcode);
1105 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1106
3fb4c40f 1107 if (exit_reason >= 0) {
7c470539 1108 rc = 0;
210b1607
TH
1109 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1110 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1111 vcpu->run->s390_ucontrol.trans_exc_code =
1112 current->thread.gmap_addr;
1113 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1114 rc = -EREMOTE;
24eb3a82
DD
1115
1116 } else if (current->thread.gmap_pfault) {
3c038e6b 1117 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1118 current->thread.gmap_pfault = 0;
3c038e6b
DD
1119 if (kvm_arch_setup_async_pf(vcpu) ||
1120 (kvm_arch_fault_in_sync(vcpu) >= 0))
24eb3a82
DD
1121 rc = 0;
1122 }
1123
1124 if (rc == -1) {
699bde3b
CB
1125 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1126 trace_kvm_s390_sie_fault(vcpu);
1127 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1128 }
b0c632db 1129
5a32c1af 1130 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1131
a76ccff6
TH
1132 if (rc == 0) {
1133 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1134 /* Don't exit for host interrupts. */
1135 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1136 else
1137 rc = kvm_handle_sie_intercept(vcpu);
1138 }
1139
3fb4c40f
TH
1140 return rc;
1141}
1142
1143static int __vcpu_run(struct kvm_vcpu *vcpu)
1144{
1145 int rc, exit_reason;
1146
800c1065
TH
1147 /*
1148 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1149 * ning the guest), so that memslots (and other stuff) are protected
1150 */
1151 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1152
a76ccff6
TH
1153 do {
1154 rc = vcpu_pre_run(vcpu);
1155 if (rc)
1156 break;
3fb4c40f 1157
800c1065 1158 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1159 /*
1160 * As PF_VCPU will be used in fault handler, between
1161 * guest_enter and guest_exit should be no uaccess.
1162 */
1163 preempt_disable();
1164 kvm_guest_enter();
1165 preempt_enable();
1166 exit_reason = sie64a(vcpu->arch.sie_block,
1167 vcpu->run->s.regs.gprs);
1168 kvm_guest_exit();
800c1065 1169 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1170
1171 rc = vcpu_post_run(vcpu, exit_reason);
1172 } while (!signal_pending(current) && !rc);
3fb4c40f 1173
800c1065 1174 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1175 return rc;
b0c632db
HC
1176}
1177
1178int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1179{
8f2abe6a 1180 int rc;
b0c632db
HC
1181 sigset_t sigsaved;
1182
b0c632db
HC
1183 if (vcpu->sigset_active)
1184 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1185
9e6dabef 1186 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 1187
8f2abe6a
CB
1188 switch (kvm_run->exit_reason) {
1189 case KVM_EXIT_S390_SIEIC:
8f2abe6a 1190 case KVM_EXIT_UNKNOWN:
9ace903d 1191 case KVM_EXIT_INTR:
8f2abe6a 1192 case KVM_EXIT_S390_RESET:
e168bf8d 1193 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 1194 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
1195 break;
1196 default:
1197 BUG();
1198 }
1199
d7b0b5eb
CO
1200 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1201 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
1202 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1203 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1204 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1205 }
9eed0735
CB
1206 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1207 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1208 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1209 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1210 }
d7b0b5eb 1211
dab4079d 1212 might_fault();
a76ccff6 1213 rc = __vcpu_run(vcpu);
9ace903d 1214
b1d16c49
CE
1215 if (signal_pending(current) && !rc) {
1216 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1217 rc = -EINTR;
b1d16c49 1218 }
8f2abe6a 1219
b8e660b8 1220 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1221 /* intercept cannot be handled in-kernel, prepare kvm-run */
1222 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1223 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1224 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1225 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1226 rc = 0;
1227 }
1228
1229 if (rc == -EREMOTE) {
1230 /* intercept was handled, but userspace support is needed
1231 * kvm_run has been prepared by the handler */
1232 rc = 0;
1233 }
b0c632db 1234
d7b0b5eb
CO
1235 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1236 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 1237 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 1238 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 1239
b0c632db
HC
1240 if (vcpu->sigset_active)
1241 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1242
b0c632db 1243 vcpu->stat.exit_userspace++;
7e8e6ab4 1244 return rc;
b0c632db
HC
1245}
1246
092670cd 1247static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
1248 unsigned long n, int prefix)
1249{
1250 if (prefix)
1251 return copy_to_guest(vcpu, guestdest, from, n);
1252 else
1253 return copy_to_guest_absolute(vcpu, guestdest, from, n);
1254}
1255
1256/*
1257 * store status at address
1258 * we use have two special cases:
1259 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1260 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1261 */
e879892c 1262int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 1263{
092670cd 1264 unsigned char archmode = 1;
b0c632db 1265 int prefix;
178bd789 1266 u64 clkcomp;
b0c632db
HC
1267
1268 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
1269 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
1270 return -EFAULT;
1271 addr = SAVE_AREA_BASE;
1272 prefix = 0;
1273 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
1274 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
1275 return -EFAULT;
1276 addr = SAVE_AREA_BASE;
1277 prefix = 1;
1278 } else
1279 prefix = 0;
1280
f64ca217 1281 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
1282 vcpu->arch.guest_fpregs.fprs, 128, prefix))
1283 return -EFAULT;
1284
f64ca217 1285 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 1286 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
1287 return -EFAULT;
1288
f64ca217 1289 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
1290 &vcpu->arch.sie_block->gpsw, 16, prefix))
1291 return -EFAULT;
1292
f64ca217 1293 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
1294 &vcpu->arch.sie_block->prefix, 4, prefix))
1295 return -EFAULT;
1296
1297 if (__guestcopy(vcpu,
f64ca217 1298 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
1299 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
1300 return -EFAULT;
1301
f64ca217 1302 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
1303 &vcpu->arch.sie_block->todpr, 4, prefix))
1304 return -EFAULT;
1305
f64ca217 1306 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
1307 &vcpu->arch.sie_block->cputm, 8, prefix))
1308 return -EFAULT;
1309
178bd789 1310 clkcomp = vcpu->arch.sie_block->ckc >> 8;
f64ca217 1311 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
178bd789 1312 &clkcomp, 8, prefix))
b0c632db
HC
1313 return -EFAULT;
1314
f64ca217 1315 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 1316 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
1317 return -EFAULT;
1318
1319 if (__guestcopy(vcpu,
f64ca217 1320 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
1321 &vcpu->arch.sie_block->gcr, 128, prefix))
1322 return -EFAULT;
1323 return 0;
1324}
1325
e879892c
TH
1326int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1327{
1328 /*
1329 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1330 * copying in vcpu load/put. Lets update our copies before we save
1331 * it into the save area
1332 */
1333 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1334 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1335 save_access_regs(vcpu->run->s.regs.acrs);
1336
1337 return kvm_s390_store_status_unloaded(vcpu, addr);
1338}
1339
d6712df9
CH
1340static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1341 struct kvm_enable_cap *cap)
1342{
1343 int r;
1344
1345 if (cap->flags)
1346 return -EINVAL;
1347
1348 switch (cap->cap) {
fa6b7fe9
CH
1349 case KVM_CAP_S390_CSS_SUPPORT:
1350 if (!vcpu->kvm->arch.css_support) {
1351 vcpu->kvm->arch.css_support = 1;
1352 trace_kvm_s390_enable_css(vcpu->kvm);
1353 }
1354 r = 0;
1355 break;
d6712df9
CH
1356 default:
1357 r = -EINVAL;
1358 break;
1359 }
1360 return r;
1361}
1362
b0c632db
HC
1363long kvm_arch_vcpu_ioctl(struct file *filp,
1364 unsigned int ioctl, unsigned long arg)
1365{
1366 struct kvm_vcpu *vcpu = filp->private_data;
1367 void __user *argp = (void __user *)arg;
800c1065 1368 int idx;
bc923cc9 1369 long r;
b0c632db 1370
93736624
AK
1371 switch (ioctl) {
1372 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
1373 struct kvm_s390_interrupt s390int;
1374
93736624 1375 r = -EFAULT;
ba5c1e9b 1376 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
1377 break;
1378 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1379 break;
ba5c1e9b 1380 }
b0c632db 1381 case KVM_S390_STORE_STATUS:
800c1065 1382 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1383 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1384 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1385 break;
b0c632db
HC
1386 case KVM_S390_SET_INITIAL_PSW: {
1387 psw_t psw;
1388
bc923cc9 1389 r = -EFAULT;
b0c632db 1390 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1391 break;
1392 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1393 break;
b0c632db
HC
1394 }
1395 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1396 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1397 break;
14eebd91
CO
1398 case KVM_SET_ONE_REG:
1399 case KVM_GET_ONE_REG: {
1400 struct kvm_one_reg reg;
1401 r = -EFAULT;
1402 if (copy_from_user(&reg, argp, sizeof(reg)))
1403 break;
1404 if (ioctl == KVM_SET_ONE_REG)
1405 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1406 else
1407 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1408 break;
1409 }
27e0393f
CO
1410#ifdef CONFIG_KVM_S390_UCONTROL
1411 case KVM_S390_UCAS_MAP: {
1412 struct kvm_s390_ucas_mapping ucasmap;
1413
1414 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1415 r = -EFAULT;
1416 break;
1417 }
1418
1419 if (!kvm_is_ucontrol(vcpu->kvm)) {
1420 r = -EINVAL;
1421 break;
1422 }
1423
1424 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1425 ucasmap.vcpu_addr, ucasmap.length);
1426 break;
1427 }
1428 case KVM_S390_UCAS_UNMAP: {
1429 struct kvm_s390_ucas_mapping ucasmap;
1430
1431 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1432 r = -EFAULT;
1433 break;
1434 }
1435
1436 if (!kvm_is_ucontrol(vcpu->kvm)) {
1437 r = -EINVAL;
1438 break;
1439 }
1440
1441 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1442 ucasmap.length);
1443 break;
1444 }
1445#endif
ccc7910f
CO
1446 case KVM_S390_VCPU_FAULT: {
1447 r = gmap_fault(arg, vcpu->arch.gmap);
1448 if (!IS_ERR_VALUE(r))
1449 r = 0;
1450 break;
1451 }
d6712df9
CH
1452 case KVM_ENABLE_CAP:
1453 {
1454 struct kvm_enable_cap cap;
1455 r = -EFAULT;
1456 if (copy_from_user(&cap, argp, sizeof(cap)))
1457 break;
1458 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1459 break;
1460 }
b0c632db 1461 default:
3e6afcf1 1462 r = -ENOTTY;
b0c632db 1463 }
bc923cc9 1464 return r;
b0c632db
HC
1465}
1466
5b1c1493
CO
1467int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1468{
1469#ifdef CONFIG_KVM_S390_UCONTROL
1470 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1471 && (kvm_is_ucontrol(vcpu->kvm))) {
1472 vmf->page = virt_to_page(vcpu->arch.sie_block);
1473 get_page(vmf->page);
1474 return 0;
1475 }
1476#endif
1477 return VM_FAULT_SIGBUS;
1478}
1479
5587027c 1480void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1481 struct kvm_memory_slot *dont)
1482{
1483}
1484
5587027c
AK
1485int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1486 unsigned long npages)
db3fe4eb
TY
1487{
1488 return 0;
1489}
1490
e59dbe09
TY
1491void kvm_arch_memslots_updated(struct kvm *kvm)
1492{
1493}
1494
b0c632db 1495/* Section: memory related */
f7784b8e
MT
1496int kvm_arch_prepare_memory_region(struct kvm *kvm,
1497 struct kvm_memory_slot *memslot,
7b6195a9
TY
1498 struct kvm_userspace_memory_region *mem,
1499 enum kvm_mr_change change)
b0c632db 1500{
dd2887e7
NW
1501 /* A few sanity checks. We can have memory slots which have to be
1502 located/ended at a segment boundary (1MB). The memory in userland is
1503 ok to be fragmented into various different vmas. It is okay to mmap()
1504 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1505
598841ca 1506 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1507 return -EINVAL;
1508
598841ca 1509 if (mem->memory_size & 0xffffful)
b0c632db
HC
1510 return -EINVAL;
1511
f7784b8e
MT
1512 return 0;
1513}
1514
1515void kvm_arch_commit_memory_region(struct kvm *kvm,
1516 struct kvm_userspace_memory_region *mem,
8482644a
TY
1517 const struct kvm_memory_slot *old,
1518 enum kvm_mr_change change)
f7784b8e 1519{
f7850c92 1520 int rc;
f7784b8e 1521
2cef4deb
CB
1522 /* If the basics of the memslot do not change, we do not want
1523 * to update the gmap. Every update causes several unnecessary
1524 * segment translation exceptions. This is usually handled just
1525 * fine by the normal fault handler + gmap, but it will also
1526 * cause faults on the prefix page of running guest CPUs.
1527 */
1528 if (old->userspace_addr == mem->userspace_addr &&
1529 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1530 old->npages * PAGE_SIZE == mem->memory_size)
1531 return;
598841ca
CO
1532
1533 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1534 mem->guest_phys_addr, mem->memory_size);
1535 if (rc)
f7850c92 1536 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1537 return;
b0c632db
HC
1538}
1539
2df72e9b
MT
1540void kvm_arch_flush_shadow_all(struct kvm *kvm)
1541{
1542}
1543
1544void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1545 struct kvm_memory_slot *slot)
34d4cb8f
MT
1546{
1547}
1548
b0c632db
HC
1549static int __init kvm_s390_init(void)
1550{
ef50f7ac 1551 int ret;
0ee75bea 1552 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1553 if (ret)
1554 return ret;
1555
1556 /*
1557 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1558 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1559 * only set facilities that are known to work in KVM.
1560 */
78c4b59f
MM
1561 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1562 if (!vfacilities) {
ef50f7ac
CB
1563 kvm_exit();
1564 return -ENOMEM;
1565 }
78c4b59f 1566 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
d208c79d 1567 vfacilities[0] &= 0xff82fff3f4fc2000UL;
7feb6bb8 1568 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1569 return 0;
b0c632db
HC
1570}
1571
1572static void __exit kvm_s390_exit(void)
1573{
78c4b59f 1574 free_page((unsigned long) vfacilities);
b0c632db
HC
1575 kvm_exit();
1576}
1577
1578module_init(kvm_s390_init);
1579module_exit(kvm_s390_exit);
566af940
CH
1580
1581/*
1582 * Enable autoloading of the kvm module.
1583 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1584 * since x86 takes a different approach.
1585 */
1586#include <linux/miscdevice.h>
1587MODULE_ALIAS_MISCDEV(KVM_MINOR);
1588MODULE_ALIAS("devname:kvm");
This page took 0.49827 seconds and 5 git commands to generate.