KVM: s390: fix assumption for KVM_MAX_VCPUS
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
77 { NULL }
78};
79
ef50f7ac 80static unsigned long long *facilities;
b0c632db
HC
81
82/* Section: not file related */
10474ae8 83int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
84{
85 /* every s390 is virtualization enabled ;-) */
10474ae8 86 return 0;
b0c632db
HC
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
b0c632db
HC
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
d7b0b5eb
CO
126 int r;
127
2bd0ac4e 128 switch (ext) {
d7b0b5eb 129 case KVM_CAP_S390_PSW:
b6cf8788 130 case KVM_CAP_S390_GMAP:
52e16b18 131 case KVM_CAP_SYNC_MMU:
d7b0b5eb
CO
132 r = 1;
133 break;
2bd0ac4e 134 default:
d7b0b5eb 135 r = 0;
2bd0ac4e 136 }
d7b0b5eb 137 return r;
b0c632db
HC
138}
139
140/* Section: vm related */
141/*
142 * Get (and clear) the dirty memory log for a memory slot.
143 */
144int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145 struct kvm_dirty_log *log)
146{
147 return 0;
148}
149
150long kvm_arch_vm_ioctl(struct file *filp,
151 unsigned int ioctl, unsigned long arg)
152{
153 struct kvm *kvm = filp->private_data;
154 void __user *argp = (void __user *)arg;
155 int r;
156
157 switch (ioctl) {
ba5c1e9b
CO
158 case KVM_S390_INTERRUPT: {
159 struct kvm_s390_interrupt s390int;
160
161 r = -EFAULT;
162 if (copy_from_user(&s390int, argp, sizeof(s390int)))
163 break;
164 r = kvm_s390_inject_vm(kvm, &s390int);
165 break;
166 }
b0c632db 167 default:
367e1319 168 r = -ENOTTY;
b0c632db
HC
169 }
170
171 return r;
172}
173
e08b9637 174int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 175{
b0c632db
HC
176 int rc;
177 char debug_name[16];
178
e08b9637
CO
179 rc = -EINVAL;
180#ifdef CONFIG_KVM_S390_UCONTROL
181 if (type & ~KVM_VM_S390_UCONTROL)
182 goto out_err;
183 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
184 goto out_err;
185#else
186 if (type)
187 goto out_err;
188#endif
189
b0c632db
HC
190 rc = s390_enable_sie();
191 if (rc)
d89f5eff 192 goto out_err;
b0c632db 193
b290411a
CO
194 rc = -ENOMEM;
195
b0c632db
HC
196 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
197 if (!kvm->arch.sca)
d89f5eff 198 goto out_err;
b0c632db
HC
199
200 sprintf(debug_name, "kvm-%u", current->pid);
201
202 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
203 if (!kvm->arch.dbf)
204 goto out_nodbf;
205
ba5c1e9b
CO
206 spin_lock_init(&kvm->arch.float_int.lock);
207 INIT_LIST_HEAD(&kvm->arch.float_int.list);
208
b0c632db
HC
209 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
210 VM_EVENT(kvm, 3, "%s", "vm created");
211
e08b9637
CO
212 if (type & KVM_VM_S390_UCONTROL) {
213 kvm->arch.gmap = NULL;
214 } else {
215 kvm->arch.gmap = gmap_alloc(current->mm);
216 if (!kvm->arch.gmap)
217 goto out_nogmap;
218 }
d89f5eff 219 return 0;
598841ca
CO
220out_nogmap:
221 debug_unregister(kvm->arch.dbf);
b0c632db
HC
222out_nodbf:
223 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
224out_err:
225 return rc;
b0c632db
HC
226}
227
d329c035
CB
228void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
229{
230 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
58f9460b
CO
231 if (!kvm_is_ucontrol(vcpu->kvm)) {
232 clear_bit(63 - vcpu->vcpu_id,
233 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
234 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
235 (__u64) vcpu->arch.sie_block)
236 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
237 }
abf4a71e 238 smp_mb();
27e0393f
CO
239
240 if (kvm_is_ucontrol(vcpu->kvm))
241 gmap_free(vcpu->arch.gmap);
242
d329c035 243 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 244 kvm_vcpu_uninit(vcpu);
d329c035
CB
245 kfree(vcpu);
246}
247
248static void kvm_free_vcpus(struct kvm *kvm)
249{
250 unsigned int i;
988a2cae 251 struct kvm_vcpu *vcpu;
d329c035 252
988a2cae
GN
253 kvm_for_each_vcpu(i, vcpu, kvm)
254 kvm_arch_vcpu_destroy(vcpu);
255
256 mutex_lock(&kvm->lock);
257 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
258 kvm->vcpus[i] = NULL;
259
260 atomic_set(&kvm->online_vcpus, 0);
261 mutex_unlock(&kvm->lock);
d329c035
CB
262}
263
ad8ba2cd
SY
264void kvm_arch_sync_events(struct kvm *kvm)
265{
266}
267
b0c632db
HC
268void kvm_arch_destroy_vm(struct kvm *kvm)
269{
d329c035 270 kvm_free_vcpus(kvm);
b0c632db 271 free_page((unsigned long)(kvm->arch.sca));
d329c035 272 debug_unregister(kvm->arch.dbf);
27e0393f
CO
273 if (!kvm_is_ucontrol(kvm))
274 gmap_free(kvm->arch.gmap);
b0c632db
HC
275}
276
277/* Section: vcpu related */
278int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
279{
27e0393f
CO
280 if (kvm_is_ucontrol(vcpu->kvm)) {
281 vcpu->arch.gmap = gmap_alloc(current->mm);
282 if (!vcpu->arch.gmap)
283 return -ENOMEM;
284 return 0;
285 }
286
598841ca 287 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
288 return 0;
289}
290
291void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
292{
6692cef3 293 /* Nothing todo */
b0c632db
HC
294}
295
296void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
297{
298 save_fp_regs(&vcpu->arch.host_fpregs);
299 save_access_regs(vcpu->arch.host_acrs);
300 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
301 restore_fp_regs(&vcpu->arch.guest_fpregs);
302 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 303 gmap_enable(vcpu->arch.gmap);
9e6dabef 304 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
305}
306
307void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
308{
9e6dabef 309 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 310 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
311 save_fp_regs(&vcpu->arch.guest_fpregs);
312 save_access_regs(vcpu->arch.guest_acrs);
313 restore_fp_regs(&vcpu->arch.host_fpregs);
314 restore_access_regs(vcpu->arch.host_acrs);
315}
316
317static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
318{
319 /* this equals initial cpu reset in pop, but we don't switch to ESA */
320 vcpu->arch.sie_block->gpsw.mask = 0UL;
321 vcpu->arch.sie_block->gpsw.addr = 0UL;
322 vcpu->arch.sie_block->prefix = 0UL;
323 vcpu->arch.sie_block->ihcpu = 0xffff;
324 vcpu->arch.sie_block->cputm = 0UL;
325 vcpu->arch.sie_block->ckc = 0UL;
326 vcpu->arch.sie_block->todpr = 0;
327 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
328 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
329 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
330 vcpu->arch.guest_fpregs.fpc = 0;
331 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
332 vcpu->arch.sie_block->gbea = 1;
333}
334
335int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
336{
9e6dabef
CH
337 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
338 CPUSTAT_SM |
339 CPUSTAT_STOPPED);
fc34531d 340 vcpu->arch.sie_block->ecb = 6;
b0c632db 341 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 342 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
343 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
344 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
345 (unsigned long) vcpu);
346 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 347 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 348 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
349 return 0;
350}
351
352struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
353 unsigned int id)
354{
4d47555a
CO
355 struct kvm_vcpu *vcpu;
356 int rc = -EINVAL;
357
358 if (id >= KVM_MAX_VCPUS)
359 goto out;
360
361 rc = -ENOMEM;
b0c632db 362
4d47555a 363 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 364 if (!vcpu)
4d47555a 365 goto out;
b0c632db 366
180c12fb
CB
367 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
368 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
369
370 if (!vcpu->arch.sie_block)
371 goto out_free_cpu;
372
373 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
374 if (!kvm_is_ucontrol(kvm)) {
375 if (!kvm->arch.sca) {
376 WARN_ON_ONCE(1);
377 goto out_free_cpu;
378 }
379 if (!kvm->arch.sca->cpu[id].sda)
380 kvm->arch.sca->cpu[id].sda =
381 (__u64) vcpu->arch.sie_block;
382 vcpu->arch.sie_block->scaoh =
383 (__u32)(((__u64)kvm->arch.sca) >> 32);
384 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
385 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
386 }
b0c632db 387
ba5c1e9b
CO
388 spin_lock_init(&vcpu->arch.local_int.lock);
389 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
390 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 391 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
392 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
393 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 394 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 395 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 396
b0c632db
HC
397 rc = kvm_vcpu_init(vcpu, kvm, id);
398 if (rc)
7b06bf2f 399 goto out_free_sie_block;
b0c632db
HC
400 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
401 vcpu->arch.sie_block);
402
b0c632db 403 return vcpu;
7b06bf2f
WY
404out_free_sie_block:
405 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
406out_free_cpu:
407 kfree(vcpu);
4d47555a 408out:
b0c632db
HC
409 return ERR_PTR(rc);
410}
411
b0c632db
HC
412int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
413{
414 /* kvm common code refers to this, but never calls it */
415 BUG();
416 return 0;
417}
418
419static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
420{
b0c632db 421 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
422 return 0;
423}
424
425int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
426{
b0c632db 427 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
428 return 0;
429}
430
431int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
432{
b0c632db 433 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
434 return 0;
435}
436
437int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
438 struct kvm_sregs *sregs)
439{
b0c632db
HC
440 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
441 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
7eef87dc 442 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
443 return 0;
444}
445
446int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
447 struct kvm_sregs *sregs)
448{
b0c632db
HC
449 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
450 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
451 return 0;
452}
453
454int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
455{
b0c632db
HC
456 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
457 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 458 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
459 return 0;
460}
461
462int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
463{
b0c632db
HC
464 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
465 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
466 return 0;
467}
468
469static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
470{
471 int rc = 0;
472
9e6dabef 473 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 474 rc = -EBUSY;
d7b0b5eb
CO
475 else {
476 vcpu->run->psw_mask = psw.mask;
477 vcpu->run->psw_addr = psw.addr;
478 }
b0c632db
HC
479 return rc;
480}
481
482int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
483 struct kvm_translation *tr)
484{
485 return -EINVAL; /* not implemented yet */
486}
487
d0bfb940
JK
488int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
489 struct kvm_guest_debug *dbg)
b0c632db
HC
490{
491 return -EINVAL; /* not implemented yet */
492}
493
62d9f0db
MT
494int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
495 struct kvm_mp_state *mp_state)
496{
497 return -EINVAL; /* not implemented yet */
498}
499
500int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
501 struct kvm_mp_state *mp_state)
502{
503 return -EINVAL; /* not implemented yet */
504}
505
e168bf8d 506static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 507{
e168bf8d
CO
508 int rc;
509
b0c632db
HC
510 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
511
512 if (need_resched())
513 schedule();
514
71cde587
CB
515 if (test_thread_flag(TIF_MCCK_PENDING))
516 s390_handle_mcck();
517
d6b6d166
CO
518 if (!kvm_is_ucontrol(vcpu->kvm))
519 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 520
b0c632db
HC
521 vcpu->arch.sie_block->icptcode = 0;
522 local_irq_disable();
523 kvm_guest_enter();
524 local_irq_enable();
525 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
526 atomic_read(&vcpu->arch.sie_block->cpuflags));
e168bf8d
CO
527 rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
528 if (rc) {
529 if (kvm_is_ucontrol(vcpu->kvm)) {
530 rc = SIE_INTERCEPT_UCONTROL;
531 } else {
532 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
533 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
534 rc = 0;
535 }
1f0d0f09 536 }
b0c632db
HC
537 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
538 vcpu->arch.sie_block->icptcode);
539 local_irq_disable();
540 kvm_guest_exit();
541 local_irq_enable();
542
543 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 544 return rc;
b0c632db
HC
545}
546
547int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
548{
8f2abe6a 549 int rc;
b0c632db
HC
550 sigset_t sigsaved;
551
9ace903d 552rerun_vcpu:
b0c632db
HC
553 if (vcpu->sigset_active)
554 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
555
9e6dabef 556 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 557
ba5c1e9b
CO
558 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
559
8f2abe6a
CB
560 switch (kvm_run->exit_reason) {
561 case KVM_EXIT_S390_SIEIC:
8f2abe6a 562 case KVM_EXIT_UNKNOWN:
9ace903d 563 case KVM_EXIT_INTR:
8f2abe6a 564 case KVM_EXIT_S390_RESET:
e168bf8d 565 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
566 break;
567 default:
568 BUG();
569 }
570
d7b0b5eb
CO
571 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
572 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
573
dab4079d 574 might_fault();
8f2abe6a
CB
575
576 do {
e168bf8d
CO
577 rc = __vcpu_run(vcpu);
578 if (rc)
579 break;
c0d744a9
CO
580 if (kvm_is_ucontrol(vcpu->kvm))
581 rc = -EOPNOTSUPP;
582 else
583 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
584 } while (!signal_pending(current) && !rc);
585
9ace903d
CE
586 if (rc == SIE_INTERCEPT_RERUNVCPU)
587 goto rerun_vcpu;
588
b1d16c49
CE
589 if (signal_pending(current) && !rc) {
590 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 591 rc = -EINTR;
b1d16c49 592 }
8f2abe6a 593
e168bf8d
CO
594#ifdef CONFIG_KVM_S390_UCONTROL
595 if (rc == SIE_INTERCEPT_UCONTROL) {
596 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
597 kvm_run->s390_ucontrol.trans_exc_code =
598 current->thread.gmap_addr;
599 kvm_run->s390_ucontrol.pgm_code = 0x10;
600 rc = 0;
601 }
602#endif
603
b8e660b8 604 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
605 /* intercept cannot be handled in-kernel, prepare kvm-run */
606 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
607 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
608 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
609 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
610 rc = 0;
611 }
612
613 if (rc == -EREMOTE) {
614 /* intercept was handled, but userspace support is needed
615 * kvm_run has been prepared by the handler */
616 rc = 0;
617 }
b0c632db 618
d7b0b5eb
CO
619 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
620 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
621
b0c632db
HC
622 if (vcpu->sigset_active)
623 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
624
b0c632db 625 vcpu->stat.exit_userspace++;
7e8e6ab4 626 return rc;
b0c632db
HC
627}
628
092670cd 629static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
630 unsigned long n, int prefix)
631{
632 if (prefix)
633 return copy_to_guest(vcpu, guestdest, from, n);
634 else
635 return copy_to_guest_absolute(vcpu, guestdest, from, n);
636}
637
638/*
639 * store status at address
640 * we use have two special cases:
641 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
642 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
643 */
971eb77f 644int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 645{
092670cd 646 unsigned char archmode = 1;
b0c632db
HC
647 int prefix;
648
649 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
650 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
651 return -EFAULT;
652 addr = SAVE_AREA_BASE;
653 prefix = 0;
654 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
655 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
656 return -EFAULT;
657 addr = SAVE_AREA_BASE;
658 prefix = 1;
659 } else
660 prefix = 0;
661
f64ca217 662 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
663 vcpu->arch.guest_fpregs.fprs, 128, prefix))
664 return -EFAULT;
665
f64ca217 666 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
667 vcpu->arch.guest_gprs, 128, prefix))
668 return -EFAULT;
669
f64ca217 670 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
671 &vcpu->arch.sie_block->gpsw, 16, prefix))
672 return -EFAULT;
673
f64ca217 674 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
675 &vcpu->arch.sie_block->prefix, 4, prefix))
676 return -EFAULT;
677
678 if (__guestcopy(vcpu,
f64ca217 679 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
680 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
681 return -EFAULT;
682
f64ca217 683 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
684 &vcpu->arch.sie_block->todpr, 4, prefix))
685 return -EFAULT;
686
f64ca217 687 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
688 &vcpu->arch.sie_block->cputm, 8, prefix))
689 return -EFAULT;
690
f64ca217 691 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
692 &vcpu->arch.sie_block->ckc, 8, prefix))
693 return -EFAULT;
694
f64ca217 695 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
696 &vcpu->arch.guest_acrs, 64, prefix))
697 return -EFAULT;
698
699 if (__guestcopy(vcpu,
f64ca217 700 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
701 &vcpu->arch.sie_block->gcr, 128, prefix))
702 return -EFAULT;
703 return 0;
704}
705
b0c632db
HC
706long kvm_arch_vcpu_ioctl(struct file *filp,
707 unsigned int ioctl, unsigned long arg)
708{
709 struct kvm_vcpu *vcpu = filp->private_data;
710 void __user *argp = (void __user *)arg;
bc923cc9 711 long r;
b0c632db 712
93736624
AK
713 switch (ioctl) {
714 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
715 struct kvm_s390_interrupt s390int;
716
93736624 717 r = -EFAULT;
ba5c1e9b 718 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
719 break;
720 r = kvm_s390_inject_vcpu(vcpu, &s390int);
721 break;
ba5c1e9b 722 }
b0c632db 723 case KVM_S390_STORE_STATUS:
bc923cc9
AK
724 r = kvm_s390_vcpu_store_status(vcpu, arg);
725 break;
b0c632db
HC
726 case KVM_S390_SET_INITIAL_PSW: {
727 psw_t psw;
728
bc923cc9 729 r = -EFAULT;
b0c632db 730 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
731 break;
732 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
733 break;
b0c632db
HC
734 }
735 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
736 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
737 break;
27e0393f
CO
738#ifdef CONFIG_KVM_S390_UCONTROL
739 case KVM_S390_UCAS_MAP: {
740 struct kvm_s390_ucas_mapping ucasmap;
741
742 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
743 r = -EFAULT;
744 break;
745 }
746
747 if (!kvm_is_ucontrol(vcpu->kvm)) {
748 r = -EINVAL;
749 break;
750 }
751
752 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
753 ucasmap.vcpu_addr, ucasmap.length);
754 break;
755 }
756 case KVM_S390_UCAS_UNMAP: {
757 struct kvm_s390_ucas_mapping ucasmap;
758
759 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
760 r = -EFAULT;
761 break;
762 }
763
764 if (!kvm_is_ucontrol(vcpu->kvm)) {
765 r = -EINVAL;
766 break;
767 }
768
769 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
770 ucasmap.length);
771 break;
772 }
773#endif
ccc7910f
CO
774 case KVM_S390_VCPU_FAULT: {
775 r = gmap_fault(arg, vcpu->arch.gmap);
776 if (!IS_ERR_VALUE(r))
777 r = 0;
778 break;
779 }
b0c632db 780 default:
bc923cc9 781 r = -EINVAL;
b0c632db 782 }
bc923cc9 783 return r;
b0c632db
HC
784}
785
5b1c1493
CO
786int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
787{
788#ifdef CONFIG_KVM_S390_UCONTROL
789 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
790 && (kvm_is_ucontrol(vcpu->kvm))) {
791 vmf->page = virt_to_page(vcpu->arch.sie_block);
792 get_page(vmf->page);
793 return 0;
794 }
795#endif
796 return VM_FAULT_SIGBUS;
797}
798
b0c632db 799/* Section: memory related */
f7784b8e
MT
800int kvm_arch_prepare_memory_region(struct kvm *kvm,
801 struct kvm_memory_slot *memslot,
802 struct kvm_memory_slot old,
803 struct kvm_userspace_memory_region *mem,
804 int user_alloc)
b0c632db
HC
805{
806 /* A few sanity checks. We can have exactly one memory slot which has
807 to start at guest virtual zero and which has to be located at a
808 page boundary in userland and which has to end at a page boundary.
809 The memory in userland is ok to be fragmented into various different
810 vmas. It is okay to mmap() and munmap() stuff in this slot after
811 doing this call at any time */
812
628eb9b8 813 if (mem->slot)
b0c632db
HC
814 return -EINVAL;
815
816 if (mem->guest_phys_addr)
817 return -EINVAL;
818
598841ca 819 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
820 return -EINVAL;
821
598841ca 822 if (mem->memory_size & 0xffffful)
b0c632db
HC
823 return -EINVAL;
824
2668dab7
CO
825 if (!user_alloc)
826 return -EINVAL;
827
f7784b8e
MT
828 return 0;
829}
830
831void kvm_arch_commit_memory_region(struct kvm *kvm,
832 struct kvm_userspace_memory_region *mem,
833 struct kvm_memory_slot old,
834 int user_alloc)
835{
f7850c92 836 int rc;
f7784b8e 837
598841ca
CO
838
839 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
840 mem->guest_phys_addr, mem->memory_size);
841 if (rc)
f7850c92 842 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 843 return;
b0c632db
HC
844}
845
34d4cb8f
MT
846void kvm_arch_flush_shadow(struct kvm *kvm)
847{
848}
849
b0c632db
HC
850static int __init kvm_s390_init(void)
851{
ef50f7ac 852 int ret;
0ee75bea 853 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
854 if (ret)
855 return ret;
856
857 /*
858 * guests can ask for up to 255+1 double words, we need a full page
25985edc 859 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
860 * only set facilities that are known to work in KVM.
861 */
c2f0e8c8 862 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
863 if (!facilities) {
864 kvm_exit();
865 return -ENOMEM;
866 }
14375bc4 867 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 868 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 869 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 870 return 0;
b0c632db
HC
871}
872
873static void __exit kvm_s390_exit(void)
874{
ef50f7ac 875 free_page((unsigned long) facilities);
b0c632db
HC
876 kvm_exit();
877}
878
879module_init(kvm_s390_init);
880module_exit(kvm_s390_exit);
This page took 0.439457 seconds and 5 git commands to generate.