KVM: x86: Lock arch specific vcpu ioctls centrally
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
5288fbf0
CB
65 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 71 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
72 { NULL }
73};
74
ef50f7ac 75static unsigned long long *facilities;
b0c632db
HC
76
77/* Section: not file related */
10474ae8 78int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
79{
80 /* every s390 is virtualization enabled ;-) */
10474ae8 81 return 0;
b0c632db
HC
82}
83
84void kvm_arch_hardware_disable(void *garbage)
85{
86}
87
b0c632db
HC
88int kvm_arch_hardware_setup(void)
89{
90 return 0;
91}
92
93void kvm_arch_hardware_unsetup(void)
94{
95}
96
97void kvm_arch_check_processor_compat(void *rtn)
98{
99}
100
101int kvm_arch_init(void *opaque)
102{
103 return 0;
104}
105
106void kvm_arch_exit(void)
107{
108}
109
110/* Section: device related */
111long kvm_arch_dev_ioctl(struct file *filp,
112 unsigned int ioctl, unsigned long arg)
113{
114 if (ioctl == KVM_S390_ENABLE_SIE)
115 return s390_enable_sie();
116 return -EINVAL;
117}
118
119int kvm_dev_ioctl_check_extension(long ext)
120{
d7b0b5eb
CO
121 int r;
122
2bd0ac4e 123 switch (ext) {
d7b0b5eb
CO
124 case KVM_CAP_S390_PSW:
125 r = 1;
126 break;
2bd0ac4e 127 default:
d7b0b5eb 128 r = 0;
2bd0ac4e 129 }
d7b0b5eb 130 return r;
b0c632db
HC
131}
132
133/* Section: vm related */
134/*
135 * Get (and clear) the dirty memory log for a memory slot.
136 */
137int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138 struct kvm_dirty_log *log)
139{
140 return 0;
141}
142
143long kvm_arch_vm_ioctl(struct file *filp,
144 unsigned int ioctl, unsigned long arg)
145{
146 struct kvm *kvm = filp->private_data;
147 void __user *argp = (void __user *)arg;
148 int r;
149
150 switch (ioctl) {
ba5c1e9b
CO
151 case KVM_S390_INTERRUPT: {
152 struct kvm_s390_interrupt s390int;
153
154 r = -EFAULT;
155 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156 break;
157 r = kvm_s390_inject_vm(kvm, &s390int);
158 break;
159 }
b0c632db 160 default:
367e1319 161 r = -ENOTTY;
b0c632db
HC
162 }
163
164 return r;
165}
166
167struct kvm *kvm_arch_create_vm(void)
168{
169 struct kvm *kvm;
170 int rc;
171 char debug_name[16];
172
173 rc = s390_enable_sie();
174 if (rc)
175 goto out_nokvm;
176
177 rc = -ENOMEM;
178 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
179 if (!kvm)
180 goto out_nokvm;
181
182 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
183 if (!kvm->arch.sca)
184 goto out_nosca;
185
186 sprintf(debug_name, "kvm-%u", current->pid);
187
188 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
189 if (!kvm->arch.dbf)
190 goto out_nodbf;
191
ba5c1e9b
CO
192 spin_lock_init(&kvm->arch.float_int.lock);
193 INIT_LIST_HEAD(&kvm->arch.float_int.list);
194
b0c632db
HC
195 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196 VM_EVENT(kvm, 3, "%s", "vm created");
197
b0c632db
HC
198 return kvm;
199out_nodbf:
200 free_page((unsigned long)(kvm->arch.sca));
201out_nosca:
202 kfree(kvm);
203out_nokvm:
204 return ERR_PTR(rc);
205}
206
d329c035
CB
207void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208{
209 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
abf4a71e
CO
210 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211 (__u64) vcpu->arch.sie_block)
212 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
213 smp_mb();
d329c035 214 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 215 kvm_vcpu_uninit(vcpu);
d329c035
CB
216 kfree(vcpu);
217}
218
219static void kvm_free_vcpus(struct kvm *kvm)
220{
221 unsigned int i;
988a2cae 222 struct kvm_vcpu *vcpu;
d329c035 223
988a2cae
GN
224 kvm_for_each_vcpu(i, vcpu, kvm)
225 kvm_arch_vcpu_destroy(vcpu);
226
227 mutex_lock(&kvm->lock);
228 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229 kvm->vcpus[i] = NULL;
230
231 atomic_set(&kvm->online_vcpus, 0);
232 mutex_unlock(&kvm->lock);
d329c035
CB
233}
234
ad8ba2cd
SY
235void kvm_arch_sync_events(struct kvm *kvm)
236{
237}
238
b0c632db
HC
239void kvm_arch_destroy_vm(struct kvm *kvm)
240{
d329c035 241 kvm_free_vcpus(kvm);
dfdded7c 242 kvm_free_physmem(kvm);
b0c632db 243 free_page((unsigned long)(kvm->arch.sca));
d329c035 244 debug_unregister(kvm->arch.dbf);
64749204 245 cleanup_srcu_struct(&kvm->srcu);
b0c632db 246 kfree(kvm);
b0c632db
HC
247}
248
249/* Section: vcpu related */
250int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
251{
252 return 0;
253}
254
255void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
256{
6692cef3 257 /* Nothing todo */
b0c632db
HC
258}
259
260void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
261{
262 save_fp_regs(&vcpu->arch.host_fpregs);
263 save_access_regs(vcpu->arch.host_acrs);
264 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
265 restore_fp_regs(&vcpu->arch.guest_fpregs);
266 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
267}
268
269void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
270{
271 save_fp_regs(&vcpu->arch.guest_fpregs);
272 save_access_regs(vcpu->arch.guest_acrs);
273 restore_fp_regs(&vcpu->arch.host_fpregs);
274 restore_access_regs(vcpu->arch.host_acrs);
275}
276
277static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
278{
279 /* this equals initial cpu reset in pop, but we don't switch to ESA */
280 vcpu->arch.sie_block->gpsw.mask = 0UL;
281 vcpu->arch.sie_block->gpsw.addr = 0UL;
282 vcpu->arch.sie_block->prefix = 0UL;
283 vcpu->arch.sie_block->ihcpu = 0xffff;
284 vcpu->arch.sie_block->cputm = 0UL;
285 vcpu->arch.sie_block->ckc = 0UL;
286 vcpu->arch.sie_block->todpr = 0;
287 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
288 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
289 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
290 vcpu->arch.guest_fpregs.fpc = 0;
291 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
292 vcpu->arch.sie_block->gbea = 1;
293}
294
295int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
296{
297 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
628eb9b8 298 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
b0c632db
HC
299 vcpu->arch.sie_block->ecb = 2;
300 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 301 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
302 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
303 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
304 (unsigned long) vcpu);
305 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 306 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 307 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
308 return 0;
309}
310
311struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
312 unsigned int id)
313{
314 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
315 int rc = -ENOMEM;
316
317 if (!vcpu)
318 goto out_nomem;
319
180c12fb
CB
320 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
321 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
322
323 if (!vcpu->arch.sie_block)
324 goto out_free_cpu;
325
326 vcpu->arch.sie_block->icpua = id;
327 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
328 if (!kvm->arch.sca->cpu[id].sda)
329 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
330 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
331 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
332
ba5c1e9b
CO
333 spin_lock_init(&vcpu->arch.local_int.lock);
334 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
335 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 336 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
337 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
338 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 339 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 340 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 341
b0c632db
HC
342 rc = kvm_vcpu_init(vcpu, kvm, id);
343 if (rc)
7b06bf2f 344 goto out_free_sie_block;
b0c632db
HC
345 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
346 vcpu->arch.sie_block);
347
b0c632db 348 return vcpu;
7b06bf2f
WY
349out_free_sie_block:
350 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
351out_free_cpu:
352 kfree(vcpu);
353out_nomem:
354 return ERR_PTR(rc);
355}
356
b0c632db
HC
357int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
358{
359 /* kvm common code refers to this, but never calls it */
360 BUG();
361 return 0;
362}
363
364static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
365{
366 vcpu_load(vcpu);
367 kvm_s390_vcpu_initial_reset(vcpu);
368 vcpu_put(vcpu);
369 return 0;
370}
371
372int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
373{
b0c632db 374 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
375 return 0;
376}
377
378int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
379{
b0c632db 380 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
381 return 0;
382}
383
384int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
385 struct kvm_sregs *sregs)
386{
b0c632db
HC
387 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
388 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
b0c632db
HC
389 return 0;
390}
391
392int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
393 struct kvm_sregs *sregs)
394{
b0c632db
HC
395 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
396 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
397 return 0;
398}
399
400int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
401{
b0c632db
HC
402 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
403 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
b0c632db
HC
404 return 0;
405}
406
407int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
408{
b0c632db
HC
409 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
410 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
411 return 0;
412}
413
414static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
415{
416 int rc = 0;
417
418 vcpu_load(vcpu);
419 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
420 rc = -EBUSY;
d7b0b5eb
CO
421 else {
422 vcpu->run->psw_mask = psw.mask;
423 vcpu->run->psw_addr = psw.addr;
424 }
b0c632db
HC
425 vcpu_put(vcpu);
426 return rc;
427}
428
429int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
430 struct kvm_translation *tr)
431{
432 return -EINVAL; /* not implemented yet */
433}
434
d0bfb940
JK
435int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
436 struct kvm_guest_debug *dbg)
b0c632db
HC
437{
438 return -EINVAL; /* not implemented yet */
439}
440
62d9f0db
MT
441int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
442 struct kvm_mp_state *mp_state)
443{
444 return -EINVAL; /* not implemented yet */
445}
446
447int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
448 struct kvm_mp_state *mp_state)
449{
450 return -EINVAL; /* not implemented yet */
451}
452
b0c632db
HC
453static void __vcpu_run(struct kvm_vcpu *vcpu)
454{
455 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
456
457 if (need_resched())
458 schedule();
459
71cde587
CB
460 if (test_thread_flag(TIF_MCCK_PENDING))
461 s390_handle_mcck();
462
0ff31867
CO
463 kvm_s390_deliver_pending_interrupts(vcpu);
464
b0c632db
HC
465 vcpu->arch.sie_block->icptcode = 0;
466 local_irq_disable();
467 kvm_guest_enter();
468 local_irq_enable();
469 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
470 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
471 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
472 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
473 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
474 }
b0c632db
HC
475 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
476 vcpu->arch.sie_block->icptcode);
477 local_irq_disable();
478 kvm_guest_exit();
479 local_irq_enable();
480
481 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
482}
483
484int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
485{
8f2abe6a 486 int rc;
b0c632db
HC
487 sigset_t sigsaved;
488
9ace903d 489rerun_vcpu:
628eb9b8
CE
490 if (vcpu->requests)
491 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
492 kvm_s390_vcpu_set_mem(vcpu);
493
51e4d5ab 494 /* verify, that memory has been registered */
628eb9b8 495 if (!vcpu->arch.sie_block->gmslm) {
51e4d5ab 496 vcpu_put(vcpu);
628eb9b8 497 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
51e4d5ab
CO
498 return -EINVAL;
499 }
500
b0c632db
HC
501 if (vcpu->sigset_active)
502 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
503
504 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
505
ba5c1e9b
CO
506 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
507
8f2abe6a
CB
508 switch (kvm_run->exit_reason) {
509 case KVM_EXIT_S390_SIEIC:
8f2abe6a 510 case KVM_EXIT_UNKNOWN:
9ace903d 511 case KVM_EXIT_INTR:
8f2abe6a
CB
512 case KVM_EXIT_S390_RESET:
513 break;
514 default:
515 BUG();
516 }
517
d7b0b5eb
CO
518 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
519 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
520
dab4079d 521 might_fault();
8f2abe6a
CB
522
523 do {
524 __vcpu_run(vcpu);
8f2abe6a
CB
525 rc = kvm_handle_sie_intercept(vcpu);
526 } while (!signal_pending(current) && !rc);
527
9ace903d
CE
528 if (rc == SIE_INTERCEPT_RERUNVCPU)
529 goto rerun_vcpu;
530
b1d16c49
CE
531 if (signal_pending(current) && !rc) {
532 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 533 rc = -EINTR;
b1d16c49 534 }
8f2abe6a 535
b8e660b8 536 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
537 /* intercept cannot be handled in-kernel, prepare kvm-run */
538 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
539 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
540 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
541 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
542 rc = 0;
543 }
544
545 if (rc == -EREMOTE) {
546 /* intercept was handled, but userspace support is needed
547 * kvm_run has been prepared by the handler */
548 rc = 0;
549 }
b0c632db 550
d7b0b5eb
CO
551 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
552 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
553
b0c632db
HC
554 if (vcpu->sigset_active)
555 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
556
b0c632db 557 vcpu->stat.exit_userspace++;
7e8e6ab4 558 return rc;
b0c632db
HC
559}
560
561static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
562 unsigned long n, int prefix)
563{
564 if (prefix)
565 return copy_to_guest(vcpu, guestdest, from, n);
566 else
567 return copy_to_guest_absolute(vcpu, guestdest, from, n);
568}
569
570/*
571 * store status at address
572 * we use have two special cases:
573 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
574 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
575 */
576int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
577{
578 const unsigned char archmode = 1;
579 int prefix;
580
581 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
582 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
583 return -EFAULT;
584 addr = SAVE_AREA_BASE;
585 prefix = 0;
586 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
587 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
588 return -EFAULT;
589 addr = SAVE_AREA_BASE;
590 prefix = 1;
591 } else
592 prefix = 0;
593
f64ca217 594 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
595 vcpu->arch.guest_fpregs.fprs, 128, prefix))
596 return -EFAULT;
597
f64ca217 598 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
599 vcpu->arch.guest_gprs, 128, prefix))
600 return -EFAULT;
601
f64ca217 602 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
603 &vcpu->arch.sie_block->gpsw, 16, prefix))
604 return -EFAULT;
605
f64ca217 606 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
607 &vcpu->arch.sie_block->prefix, 4, prefix))
608 return -EFAULT;
609
610 if (__guestcopy(vcpu,
f64ca217 611 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
612 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
613 return -EFAULT;
614
f64ca217 615 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
616 &vcpu->arch.sie_block->todpr, 4, prefix))
617 return -EFAULT;
618
f64ca217 619 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
620 &vcpu->arch.sie_block->cputm, 8, prefix))
621 return -EFAULT;
622
f64ca217 623 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
624 &vcpu->arch.sie_block->ckc, 8, prefix))
625 return -EFAULT;
626
f64ca217 627 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
628 &vcpu->arch.guest_acrs, 64, prefix))
629 return -EFAULT;
630
631 if (__guestcopy(vcpu,
f64ca217 632 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
633 &vcpu->arch.sie_block->gcr, 128, prefix))
634 return -EFAULT;
635 return 0;
636}
637
638static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
639{
640 int rc;
641
642 vcpu_load(vcpu);
643 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
644 vcpu_put(vcpu);
645 return rc;
646}
647
648long kvm_arch_vcpu_ioctl(struct file *filp,
649 unsigned int ioctl, unsigned long arg)
650{
651 struct kvm_vcpu *vcpu = filp->private_data;
652 void __user *argp = (void __user *)arg;
653
654 switch (ioctl) {
ba5c1e9b
CO
655 case KVM_S390_INTERRUPT: {
656 struct kvm_s390_interrupt s390int;
657
658 if (copy_from_user(&s390int, argp, sizeof(s390int)))
659 return -EFAULT;
660 return kvm_s390_inject_vcpu(vcpu, &s390int);
661 }
b0c632db
HC
662 case KVM_S390_STORE_STATUS:
663 return kvm_s390_vcpu_store_status(vcpu, arg);
664 case KVM_S390_SET_INITIAL_PSW: {
665 psw_t psw;
666
667 if (copy_from_user(&psw, argp, sizeof(psw)))
668 return -EFAULT;
669 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
670 }
671 case KVM_S390_INITIAL_RESET:
672 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
673 default:
674 ;
675 }
676 return -EINVAL;
677}
678
679/* Section: memory related */
f7784b8e
MT
680int kvm_arch_prepare_memory_region(struct kvm *kvm,
681 struct kvm_memory_slot *memslot,
682 struct kvm_memory_slot old,
683 struct kvm_userspace_memory_region *mem,
684 int user_alloc)
b0c632db
HC
685{
686 /* A few sanity checks. We can have exactly one memory slot which has
687 to start at guest virtual zero and which has to be located at a
688 page boundary in userland and which has to end at a page boundary.
689 The memory in userland is ok to be fragmented into various different
690 vmas. It is okay to mmap() and munmap() stuff in this slot after
691 doing this call at any time */
692
628eb9b8 693 if (mem->slot)
b0c632db
HC
694 return -EINVAL;
695
696 if (mem->guest_phys_addr)
697 return -EINVAL;
698
699 if (mem->userspace_addr & (PAGE_SIZE - 1))
700 return -EINVAL;
701
702 if (mem->memory_size & (PAGE_SIZE - 1))
703 return -EINVAL;
704
2668dab7
CO
705 if (!user_alloc)
706 return -EINVAL;
707
f7784b8e
MT
708 return 0;
709}
710
711void kvm_arch_commit_memory_region(struct kvm *kvm,
712 struct kvm_userspace_memory_region *mem,
713 struct kvm_memory_slot old,
714 int user_alloc)
715{
716 int i;
717 struct kvm_vcpu *vcpu;
718
628eb9b8 719 /* request update of sie control block for all available vcpus */
988a2cae
GN
720 kvm_for_each_vcpu(i, vcpu, kvm) {
721 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
722 continue;
723 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
2668dab7 724 }
b0c632db
HC
725}
726
34d4cb8f
MT
727void kvm_arch_flush_shadow(struct kvm *kvm)
728{
729}
730
b0c632db
HC
731gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
732{
733 return gfn;
734}
735
736static int __init kvm_s390_init(void)
737{
ef50f7ac 738 int ret;
0ee75bea 739 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
740 if (ret)
741 return ret;
742
743 /*
744 * guests can ask for up to 255+1 double words, we need a full page
745 * to hold the maximum amount of facilites. On the other hand, we
746 * only set facilities that are known to work in KVM.
747 */
c2f0e8c8 748 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
749 if (!facilities) {
750 kvm_exit();
751 return -ENOMEM;
752 }
753 stfle(facilities, 1);
754 facilities[0] &= 0xff00fff3f0700000ULL;
755 return 0;
b0c632db
HC
756}
757
758static void __exit kvm_s390_exit(void)
759{
ef50f7ac 760 free_page((unsigned long) facilities);
b0c632db
HC
761 kvm_exit();
762}
763
764module_init(kvm_s390_init);
765module_exit(kvm_s390_exit);
This page took 0.258627 seconds and 5 git commands to generate.