KVM: Fix mmu_reload() clash with nested vmx event injection
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 77 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
78 { NULL }
79};
80
ef50f7ac 81static unsigned long long *facilities;
b0c632db
HC
82
83/* Section: not file related */
10474ae8 84int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
85{
86 /* every s390 is virtualization enabled ;-) */
10474ae8 87 return 0;
b0c632db
HC
88}
89
90void kvm_arch_hardware_disable(void *garbage)
91{
92}
93
b0c632db
HC
94int kvm_arch_hardware_setup(void)
95{
96 return 0;
97}
98
99void kvm_arch_hardware_unsetup(void)
100{
101}
102
103void kvm_arch_check_processor_compat(void *rtn)
104{
105}
106
107int kvm_arch_init(void *opaque)
108{
109 return 0;
110}
111
112void kvm_arch_exit(void)
113{
114}
115
116/* Section: device related */
117long kvm_arch_dev_ioctl(struct file *filp,
118 unsigned int ioctl, unsigned long arg)
119{
120 if (ioctl == KVM_S390_ENABLE_SIE)
121 return s390_enable_sie();
122 return -EINVAL;
123}
124
125int kvm_dev_ioctl_check_extension(long ext)
126{
d7b0b5eb
CO
127 int r;
128
2bd0ac4e 129 switch (ext) {
d7b0b5eb 130 case KVM_CAP_S390_PSW:
b6cf8788 131 case KVM_CAP_S390_GMAP:
52e16b18 132 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
133#ifdef CONFIG_KVM_S390_UCONTROL
134 case KVM_CAP_S390_UCONTROL:
135#endif
60b413c9 136 case KVM_CAP_SYNC_REGS:
d7b0b5eb
CO
137 r = 1;
138 break;
e726b1bd
CB
139 case KVM_CAP_NR_VCPUS:
140 case KVM_CAP_MAX_VCPUS:
141 r = KVM_MAX_VCPUS;
142 break;
2bd0ac4e 143 default:
d7b0b5eb 144 r = 0;
2bd0ac4e 145 }
d7b0b5eb 146 return r;
b0c632db
HC
147}
148
149/* Section: vm related */
150/*
151 * Get (and clear) the dirty memory log for a memory slot.
152 */
153int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
154 struct kvm_dirty_log *log)
155{
156 return 0;
157}
158
159long kvm_arch_vm_ioctl(struct file *filp,
160 unsigned int ioctl, unsigned long arg)
161{
162 struct kvm *kvm = filp->private_data;
163 void __user *argp = (void __user *)arg;
164 int r;
165
166 switch (ioctl) {
ba5c1e9b
CO
167 case KVM_S390_INTERRUPT: {
168 struct kvm_s390_interrupt s390int;
169
170 r = -EFAULT;
171 if (copy_from_user(&s390int, argp, sizeof(s390int)))
172 break;
173 r = kvm_s390_inject_vm(kvm, &s390int);
174 break;
175 }
b0c632db 176 default:
367e1319 177 r = -ENOTTY;
b0c632db
HC
178 }
179
180 return r;
181}
182
e08b9637 183int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 184{
b0c632db
HC
185 int rc;
186 char debug_name[16];
187
e08b9637
CO
188 rc = -EINVAL;
189#ifdef CONFIG_KVM_S390_UCONTROL
190 if (type & ~KVM_VM_S390_UCONTROL)
191 goto out_err;
192 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
193 goto out_err;
194#else
195 if (type)
196 goto out_err;
197#endif
198
b0c632db
HC
199 rc = s390_enable_sie();
200 if (rc)
d89f5eff 201 goto out_err;
b0c632db 202
b290411a
CO
203 rc = -ENOMEM;
204
b0c632db
HC
205 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
206 if (!kvm->arch.sca)
d89f5eff 207 goto out_err;
b0c632db
HC
208
209 sprintf(debug_name, "kvm-%u", current->pid);
210
211 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
212 if (!kvm->arch.dbf)
213 goto out_nodbf;
214
ba5c1e9b
CO
215 spin_lock_init(&kvm->arch.float_int.lock);
216 INIT_LIST_HEAD(&kvm->arch.float_int.list);
217
b0c632db
HC
218 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
219 VM_EVENT(kvm, 3, "%s", "vm created");
220
e08b9637
CO
221 if (type & KVM_VM_S390_UCONTROL) {
222 kvm->arch.gmap = NULL;
223 } else {
224 kvm->arch.gmap = gmap_alloc(current->mm);
225 if (!kvm->arch.gmap)
226 goto out_nogmap;
227 }
d89f5eff 228 return 0;
598841ca
CO
229out_nogmap:
230 debug_unregister(kvm->arch.dbf);
b0c632db
HC
231out_nodbf:
232 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
233out_err:
234 return rc;
b0c632db
HC
235}
236
d329c035
CB
237void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
238{
239 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
58f9460b
CO
240 if (!kvm_is_ucontrol(vcpu->kvm)) {
241 clear_bit(63 - vcpu->vcpu_id,
242 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
243 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
244 (__u64) vcpu->arch.sie_block)
245 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
246 }
abf4a71e 247 smp_mb();
27e0393f
CO
248
249 if (kvm_is_ucontrol(vcpu->kvm))
250 gmap_free(vcpu->arch.gmap);
251
d329c035 252 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 253 kvm_vcpu_uninit(vcpu);
d329c035
CB
254 kfree(vcpu);
255}
256
257static void kvm_free_vcpus(struct kvm *kvm)
258{
259 unsigned int i;
988a2cae 260 struct kvm_vcpu *vcpu;
d329c035 261
988a2cae
GN
262 kvm_for_each_vcpu(i, vcpu, kvm)
263 kvm_arch_vcpu_destroy(vcpu);
264
265 mutex_lock(&kvm->lock);
266 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
267 kvm->vcpus[i] = NULL;
268
269 atomic_set(&kvm->online_vcpus, 0);
270 mutex_unlock(&kvm->lock);
d329c035
CB
271}
272
ad8ba2cd
SY
273void kvm_arch_sync_events(struct kvm *kvm)
274{
275}
276
b0c632db
HC
277void kvm_arch_destroy_vm(struct kvm *kvm)
278{
d329c035 279 kvm_free_vcpus(kvm);
b0c632db 280 free_page((unsigned long)(kvm->arch.sca));
d329c035 281 debug_unregister(kvm->arch.dbf);
27e0393f
CO
282 if (!kvm_is_ucontrol(kvm))
283 gmap_free(kvm->arch.gmap);
b0c632db
HC
284}
285
286/* Section: vcpu related */
287int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
288{
27e0393f
CO
289 if (kvm_is_ucontrol(vcpu->kvm)) {
290 vcpu->arch.gmap = gmap_alloc(current->mm);
291 if (!vcpu->arch.gmap)
292 return -ENOMEM;
293 return 0;
294 }
295
598841ca 296 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
297 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
298 KVM_SYNC_GPRS |
9eed0735
CB
299 KVM_SYNC_ACRS |
300 KVM_SYNC_CRS;
b0c632db
HC
301 return 0;
302}
303
304void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
305{
6692cef3 306 /* Nothing todo */
b0c632db
HC
307}
308
309void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
310{
311 save_fp_regs(&vcpu->arch.host_fpregs);
312 save_access_regs(vcpu->arch.host_acrs);
313 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
314 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 315 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 316 gmap_enable(vcpu->arch.gmap);
9e6dabef 317 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
318}
319
320void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
321{
9e6dabef 322 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 323 gmap_disable(vcpu->arch.gmap);
b0c632db 324 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 325 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
326 restore_fp_regs(&vcpu->arch.host_fpregs);
327 restore_access_regs(vcpu->arch.host_acrs);
328}
329
330static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
331{
332 /* this equals initial cpu reset in pop, but we don't switch to ESA */
333 vcpu->arch.sie_block->gpsw.mask = 0UL;
334 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 335 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
336 vcpu->arch.sie_block->cputm = 0UL;
337 vcpu->arch.sie_block->ckc = 0UL;
338 vcpu->arch.sie_block->todpr = 0;
339 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
340 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
341 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
342 vcpu->arch.guest_fpregs.fpc = 0;
343 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
344 vcpu->arch.sie_block->gbea = 1;
345}
346
347int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
348{
9e6dabef
CH
349 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
350 CPUSTAT_SM |
351 CPUSTAT_STOPPED);
fc34531d 352 vcpu->arch.sie_block->ecb = 6;
b0c632db 353 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 354 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
355 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
356 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
357 (unsigned long) vcpu);
358 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 359 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 360 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
361 return 0;
362}
363
364struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
365 unsigned int id)
366{
4d47555a
CO
367 struct kvm_vcpu *vcpu;
368 int rc = -EINVAL;
369
370 if (id >= KVM_MAX_VCPUS)
371 goto out;
372
373 rc = -ENOMEM;
b0c632db 374
4d47555a 375 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 376 if (!vcpu)
4d47555a 377 goto out;
b0c632db 378
180c12fb
CB
379 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
380 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
381
382 if (!vcpu->arch.sie_block)
383 goto out_free_cpu;
384
385 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
386 if (!kvm_is_ucontrol(kvm)) {
387 if (!kvm->arch.sca) {
388 WARN_ON_ONCE(1);
389 goto out_free_cpu;
390 }
391 if (!kvm->arch.sca->cpu[id].sda)
392 kvm->arch.sca->cpu[id].sda =
393 (__u64) vcpu->arch.sie_block;
394 vcpu->arch.sie_block->scaoh =
395 (__u32)(((__u64)kvm->arch.sca) >> 32);
396 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
397 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
398 }
b0c632db 399
ba5c1e9b
CO
400 spin_lock_init(&vcpu->arch.local_int.lock);
401 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
402 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 403 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
404 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
405 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 406 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 407 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 408
b0c632db
HC
409 rc = kvm_vcpu_init(vcpu, kvm, id);
410 if (rc)
7b06bf2f 411 goto out_free_sie_block;
b0c632db
HC
412 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
413 vcpu->arch.sie_block);
414
b0c632db 415 return vcpu;
7b06bf2f
WY
416out_free_sie_block:
417 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
418out_free_cpu:
419 kfree(vcpu);
4d47555a 420out:
b0c632db
HC
421 return ERR_PTR(rc);
422}
423
b0c632db
HC
424int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
425{
426 /* kvm common code refers to this, but never calls it */
427 BUG();
428 return 0;
429}
430
b6d33834
CD
431int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
432{
433 /* kvm common code refers to this, but never calls it */
434 BUG();
435 return 0;
436}
437
438
b0c632db
HC
439static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
440{
b0c632db 441 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
442 return 0;
443}
444
445int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
446{
5a32c1af 447 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
448 return 0;
449}
450
451int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
452{
5a32c1af 453 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
454 return 0;
455}
456
457int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
458 struct kvm_sregs *sregs)
459{
59674c1a 460 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 461 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 462 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
463 return 0;
464}
465
466int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
467 struct kvm_sregs *sregs)
468{
59674c1a 469 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 470 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
471 return 0;
472}
473
474int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
475{
b0c632db 476 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 477 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 478 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
479 return 0;
480}
481
482int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
483{
b0c632db
HC
484 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
485 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
486 return 0;
487}
488
489static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
490{
491 int rc = 0;
492
9e6dabef 493 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 494 rc = -EBUSY;
d7b0b5eb
CO
495 else {
496 vcpu->run->psw_mask = psw.mask;
497 vcpu->run->psw_addr = psw.addr;
498 }
b0c632db
HC
499 return rc;
500}
501
502int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
503 struct kvm_translation *tr)
504{
505 return -EINVAL; /* not implemented yet */
506}
507
d0bfb940
JK
508int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
509 struct kvm_guest_debug *dbg)
b0c632db
HC
510{
511 return -EINVAL; /* not implemented yet */
512}
513
62d9f0db
MT
514int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
515 struct kvm_mp_state *mp_state)
516{
517 return -EINVAL; /* not implemented yet */
518}
519
520int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
521 struct kvm_mp_state *mp_state)
522{
523 return -EINVAL; /* not implemented yet */
524}
525
e168bf8d 526static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 527{
e168bf8d
CO
528 int rc;
529
5a32c1af 530 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
531
532 if (need_resched())
533 schedule();
534
71cde587
CB
535 if (test_thread_flag(TIF_MCCK_PENDING))
536 s390_handle_mcck();
537
d6b6d166
CO
538 if (!kvm_is_ucontrol(vcpu->kvm))
539 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 540
b0c632db
HC
541 vcpu->arch.sie_block->icptcode = 0;
542 local_irq_disable();
543 kvm_guest_enter();
544 local_irq_enable();
545 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
546 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 547 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
548 if (rc) {
549 if (kvm_is_ucontrol(vcpu->kvm)) {
550 rc = SIE_INTERCEPT_UCONTROL;
551 } else {
552 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
553 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
554 rc = 0;
555 }
1f0d0f09 556 }
b0c632db
HC
557 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
558 vcpu->arch.sie_block->icptcode);
559 local_irq_disable();
560 kvm_guest_exit();
561 local_irq_enable();
562
5a32c1af 563 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 564 return rc;
b0c632db
HC
565}
566
567int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
568{
8f2abe6a 569 int rc;
b0c632db
HC
570 sigset_t sigsaved;
571
9ace903d 572rerun_vcpu:
b0c632db
HC
573 if (vcpu->sigset_active)
574 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
575
9e6dabef 576 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 577
ba5c1e9b
CO
578 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
579
8f2abe6a
CB
580 switch (kvm_run->exit_reason) {
581 case KVM_EXIT_S390_SIEIC:
8f2abe6a 582 case KVM_EXIT_UNKNOWN:
9ace903d 583 case KVM_EXIT_INTR:
8f2abe6a 584 case KVM_EXIT_S390_RESET:
e168bf8d 585 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
586 break;
587 default:
588 BUG();
589 }
590
d7b0b5eb
CO
591 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
592 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
593 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
594 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
595 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
596 }
9eed0735
CB
597 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
598 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
599 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
600 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
601 }
d7b0b5eb 602
dab4079d 603 might_fault();
8f2abe6a
CB
604
605 do {
e168bf8d
CO
606 rc = __vcpu_run(vcpu);
607 if (rc)
608 break;
c0d744a9
CO
609 if (kvm_is_ucontrol(vcpu->kvm))
610 rc = -EOPNOTSUPP;
611 else
612 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
613 } while (!signal_pending(current) && !rc);
614
9ace903d
CE
615 if (rc == SIE_INTERCEPT_RERUNVCPU)
616 goto rerun_vcpu;
617
b1d16c49
CE
618 if (signal_pending(current) && !rc) {
619 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 620 rc = -EINTR;
b1d16c49 621 }
8f2abe6a 622
e168bf8d
CO
623#ifdef CONFIG_KVM_S390_UCONTROL
624 if (rc == SIE_INTERCEPT_UCONTROL) {
625 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
626 kvm_run->s390_ucontrol.trans_exc_code =
627 current->thread.gmap_addr;
628 kvm_run->s390_ucontrol.pgm_code = 0x10;
629 rc = 0;
630 }
631#endif
632
b8e660b8 633 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
634 /* intercept cannot be handled in-kernel, prepare kvm-run */
635 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
636 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
637 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
638 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
639 rc = 0;
640 }
641
642 if (rc == -EREMOTE) {
643 /* intercept was handled, but userspace support is needed
644 * kvm_run has been prepared by the handler */
645 rc = 0;
646 }
b0c632db 647
d7b0b5eb
CO
648 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
649 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 650 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 651 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 652
b0c632db
HC
653 if (vcpu->sigset_active)
654 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
655
b0c632db 656 vcpu->stat.exit_userspace++;
7e8e6ab4 657 return rc;
b0c632db
HC
658}
659
092670cd 660static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
661 unsigned long n, int prefix)
662{
663 if (prefix)
664 return copy_to_guest(vcpu, guestdest, from, n);
665 else
666 return copy_to_guest_absolute(vcpu, guestdest, from, n);
667}
668
669/*
670 * store status at address
671 * we use have two special cases:
672 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
673 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
674 */
971eb77f 675int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 676{
092670cd 677 unsigned char archmode = 1;
b0c632db
HC
678 int prefix;
679
680 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
681 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
682 return -EFAULT;
683 addr = SAVE_AREA_BASE;
684 prefix = 0;
685 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
686 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
687 return -EFAULT;
688 addr = SAVE_AREA_BASE;
689 prefix = 1;
690 } else
691 prefix = 0;
692
f64ca217 693 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
694 vcpu->arch.guest_fpregs.fprs, 128, prefix))
695 return -EFAULT;
696
f64ca217 697 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 698 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
699 return -EFAULT;
700
f64ca217 701 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
702 &vcpu->arch.sie_block->gpsw, 16, prefix))
703 return -EFAULT;
704
f64ca217 705 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
706 &vcpu->arch.sie_block->prefix, 4, prefix))
707 return -EFAULT;
708
709 if (__guestcopy(vcpu,
f64ca217 710 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
711 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
712 return -EFAULT;
713
f64ca217 714 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
715 &vcpu->arch.sie_block->todpr, 4, prefix))
716 return -EFAULT;
717
f64ca217 718 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
719 &vcpu->arch.sie_block->cputm, 8, prefix))
720 return -EFAULT;
721
f64ca217 722 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
723 &vcpu->arch.sie_block->ckc, 8, prefix))
724 return -EFAULT;
725
f64ca217 726 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 727 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
728 return -EFAULT;
729
730 if (__guestcopy(vcpu,
f64ca217 731 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
732 &vcpu->arch.sie_block->gcr, 128, prefix))
733 return -EFAULT;
734 return 0;
735}
736
b0c632db
HC
737long kvm_arch_vcpu_ioctl(struct file *filp,
738 unsigned int ioctl, unsigned long arg)
739{
740 struct kvm_vcpu *vcpu = filp->private_data;
741 void __user *argp = (void __user *)arg;
bc923cc9 742 long r;
b0c632db 743
93736624
AK
744 switch (ioctl) {
745 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
746 struct kvm_s390_interrupt s390int;
747
93736624 748 r = -EFAULT;
ba5c1e9b 749 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
750 break;
751 r = kvm_s390_inject_vcpu(vcpu, &s390int);
752 break;
ba5c1e9b 753 }
b0c632db 754 case KVM_S390_STORE_STATUS:
bc923cc9
AK
755 r = kvm_s390_vcpu_store_status(vcpu, arg);
756 break;
b0c632db
HC
757 case KVM_S390_SET_INITIAL_PSW: {
758 psw_t psw;
759
bc923cc9 760 r = -EFAULT;
b0c632db 761 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
762 break;
763 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
764 break;
b0c632db
HC
765 }
766 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
767 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
768 break;
27e0393f
CO
769#ifdef CONFIG_KVM_S390_UCONTROL
770 case KVM_S390_UCAS_MAP: {
771 struct kvm_s390_ucas_mapping ucasmap;
772
773 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
774 r = -EFAULT;
775 break;
776 }
777
778 if (!kvm_is_ucontrol(vcpu->kvm)) {
779 r = -EINVAL;
780 break;
781 }
782
783 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
784 ucasmap.vcpu_addr, ucasmap.length);
785 break;
786 }
787 case KVM_S390_UCAS_UNMAP: {
788 struct kvm_s390_ucas_mapping ucasmap;
789
790 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
791 r = -EFAULT;
792 break;
793 }
794
795 if (!kvm_is_ucontrol(vcpu->kvm)) {
796 r = -EINVAL;
797 break;
798 }
799
800 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
801 ucasmap.length);
802 break;
803 }
804#endif
ccc7910f
CO
805 case KVM_S390_VCPU_FAULT: {
806 r = gmap_fault(arg, vcpu->arch.gmap);
807 if (!IS_ERR_VALUE(r))
808 r = 0;
809 break;
810 }
b0c632db 811 default:
3e6afcf1 812 r = -ENOTTY;
b0c632db 813 }
bc923cc9 814 return r;
b0c632db
HC
815}
816
5b1c1493
CO
817int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
818{
819#ifdef CONFIG_KVM_S390_UCONTROL
820 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
821 && (kvm_is_ucontrol(vcpu->kvm))) {
822 vmf->page = virt_to_page(vcpu->arch.sie_block);
823 get_page(vmf->page);
824 return 0;
825 }
826#endif
827 return VM_FAULT_SIGBUS;
828}
829
db3fe4eb
TY
830void kvm_arch_free_memslot(struct kvm_memory_slot *free,
831 struct kvm_memory_slot *dont)
832{
833}
834
835int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
836{
837 return 0;
838}
839
b0c632db 840/* Section: memory related */
f7784b8e
MT
841int kvm_arch_prepare_memory_region(struct kvm *kvm,
842 struct kvm_memory_slot *memslot,
843 struct kvm_memory_slot old,
844 struct kvm_userspace_memory_region *mem,
845 int user_alloc)
b0c632db
HC
846{
847 /* A few sanity checks. We can have exactly one memory slot which has
848 to start at guest virtual zero and which has to be located at a
849 page boundary in userland and which has to end at a page boundary.
850 The memory in userland is ok to be fragmented into various different
851 vmas. It is okay to mmap() and munmap() stuff in this slot after
852 doing this call at any time */
853
628eb9b8 854 if (mem->slot)
b0c632db
HC
855 return -EINVAL;
856
857 if (mem->guest_phys_addr)
858 return -EINVAL;
859
598841ca 860 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
861 return -EINVAL;
862
598841ca 863 if (mem->memory_size & 0xffffful)
b0c632db
HC
864 return -EINVAL;
865
2668dab7
CO
866 if (!user_alloc)
867 return -EINVAL;
868
f7784b8e
MT
869 return 0;
870}
871
872void kvm_arch_commit_memory_region(struct kvm *kvm,
873 struct kvm_userspace_memory_region *mem,
874 struct kvm_memory_slot old,
875 int user_alloc)
876{
f7850c92 877 int rc;
f7784b8e 878
598841ca
CO
879
880 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
881 mem->guest_phys_addr, mem->memory_size);
882 if (rc)
f7850c92 883 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 884 return;
b0c632db
HC
885}
886
34d4cb8f
MT
887void kvm_arch_flush_shadow(struct kvm *kvm)
888{
889}
890
b0c632db
HC
891static int __init kvm_s390_init(void)
892{
ef50f7ac 893 int ret;
0ee75bea 894 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
895 if (ret)
896 return ret;
897
898 /*
899 * guests can ask for up to 255+1 double words, we need a full page
25985edc 900 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
901 * only set facilities that are known to work in KVM.
902 */
c2f0e8c8 903 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
904 if (!facilities) {
905 kvm_exit();
906 return -ENOMEM;
907 }
14375bc4 908 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 909 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 910 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 911 return 0;
b0c632db
HC
912}
913
914static void __exit kvm_s390_exit(void)
915{
ef50f7ac 916 free_page((unsigned long) facilities);
b0c632db
HC
917 kvm_exit();
918}
919
920module_init(kvm_s390_init);
921module_exit(kvm_s390_exit);
This page took 0.338866 seconds and 5 git commands to generate.