KVM: Remove memory alias support
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008,2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
71 { "diagnose_44", VCPU_STAT(diagnose_44) },
72 { NULL }
73 };
74
75 static unsigned long long *facilities;
76
77 /* Section: not file related */
78 int kvm_arch_hardware_enable(void *garbage)
79 {
80 /* every s390 is virtualization enabled ;-) */
81 return 0;
82 }
83
84 void kvm_arch_hardware_disable(void *garbage)
85 {
86 }
87
88 int kvm_arch_hardware_setup(void)
89 {
90 return 0;
91 }
92
93 void kvm_arch_hardware_unsetup(void)
94 {
95 }
96
97 void kvm_arch_check_processor_compat(void *rtn)
98 {
99 }
100
101 int kvm_arch_init(void *opaque)
102 {
103 return 0;
104 }
105
106 void kvm_arch_exit(void)
107 {
108 }
109
110 /* Section: device related */
111 long kvm_arch_dev_ioctl(struct file *filp,
112 unsigned int ioctl, unsigned long arg)
113 {
114 if (ioctl == KVM_S390_ENABLE_SIE)
115 return s390_enable_sie();
116 return -EINVAL;
117 }
118
119 int kvm_dev_ioctl_check_extension(long ext)
120 {
121 int r;
122
123 switch (ext) {
124 case KVM_CAP_S390_PSW:
125 r = 1;
126 break;
127 default:
128 r = 0;
129 }
130 return r;
131 }
132
133 /* Section: vm related */
134 /*
135 * Get (and clear) the dirty memory log for a memory slot.
136 */
137 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138 struct kvm_dirty_log *log)
139 {
140 return 0;
141 }
142
143 long kvm_arch_vm_ioctl(struct file *filp,
144 unsigned int ioctl, unsigned long arg)
145 {
146 struct kvm *kvm = filp->private_data;
147 void __user *argp = (void __user *)arg;
148 int r;
149
150 switch (ioctl) {
151 case KVM_S390_INTERRUPT: {
152 struct kvm_s390_interrupt s390int;
153
154 r = -EFAULT;
155 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156 break;
157 r = kvm_s390_inject_vm(kvm, &s390int);
158 break;
159 }
160 default:
161 r = -ENOTTY;
162 }
163
164 return r;
165 }
166
167 struct kvm *kvm_arch_create_vm(void)
168 {
169 struct kvm *kvm;
170 int rc;
171 char debug_name[16];
172
173 rc = s390_enable_sie();
174 if (rc)
175 goto out_nokvm;
176
177 rc = -ENOMEM;
178 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
179 if (!kvm)
180 goto out_nokvm;
181
182 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
183 if (!kvm->arch.sca)
184 goto out_nosca;
185
186 sprintf(debug_name, "kvm-%u", current->pid);
187
188 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
189 if (!kvm->arch.dbf)
190 goto out_nodbf;
191
192 spin_lock_init(&kvm->arch.float_int.lock);
193 INIT_LIST_HEAD(&kvm->arch.float_int.list);
194
195 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196 VM_EVENT(kvm, 3, "%s", "vm created");
197
198 return kvm;
199 out_nodbf:
200 free_page((unsigned long)(kvm->arch.sca));
201 out_nosca:
202 kfree(kvm);
203 out_nokvm:
204 return ERR_PTR(rc);
205 }
206
207 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208 {
209 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
210 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
211 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
212 (__u64) vcpu->arch.sie_block)
213 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
214 smp_mb();
215 free_page((unsigned long)(vcpu->arch.sie_block));
216 kvm_vcpu_uninit(vcpu);
217 kfree(vcpu);
218 }
219
220 static void kvm_free_vcpus(struct kvm *kvm)
221 {
222 unsigned int i;
223 struct kvm_vcpu *vcpu;
224
225 kvm_for_each_vcpu(i, vcpu, kvm)
226 kvm_arch_vcpu_destroy(vcpu);
227
228 mutex_lock(&kvm->lock);
229 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
230 kvm->vcpus[i] = NULL;
231
232 atomic_set(&kvm->online_vcpus, 0);
233 mutex_unlock(&kvm->lock);
234 }
235
236 void kvm_arch_sync_events(struct kvm *kvm)
237 {
238 }
239
240 void kvm_arch_destroy_vm(struct kvm *kvm)
241 {
242 kvm_free_vcpus(kvm);
243 kvm_free_physmem(kvm);
244 free_page((unsigned long)(kvm->arch.sca));
245 debug_unregister(kvm->arch.dbf);
246 cleanup_srcu_struct(&kvm->srcu);
247 kfree(kvm);
248 }
249
250 /* Section: vcpu related */
251 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
252 {
253 return 0;
254 }
255
256 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
257 {
258 /* Nothing todo */
259 }
260
261 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
262 {
263 save_fp_regs(&vcpu->arch.host_fpregs);
264 save_access_regs(vcpu->arch.host_acrs);
265 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
266 restore_fp_regs(&vcpu->arch.guest_fpregs);
267 restore_access_regs(vcpu->arch.guest_acrs);
268 }
269
270 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
271 {
272 save_fp_regs(&vcpu->arch.guest_fpregs);
273 save_access_regs(vcpu->arch.guest_acrs);
274 restore_fp_regs(&vcpu->arch.host_fpregs);
275 restore_access_regs(vcpu->arch.host_acrs);
276 }
277
278 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
279 {
280 /* this equals initial cpu reset in pop, but we don't switch to ESA */
281 vcpu->arch.sie_block->gpsw.mask = 0UL;
282 vcpu->arch.sie_block->gpsw.addr = 0UL;
283 vcpu->arch.sie_block->prefix = 0UL;
284 vcpu->arch.sie_block->ihcpu = 0xffff;
285 vcpu->arch.sie_block->cputm = 0UL;
286 vcpu->arch.sie_block->ckc = 0UL;
287 vcpu->arch.sie_block->todpr = 0;
288 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
289 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
290 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
291 vcpu->arch.guest_fpregs.fpc = 0;
292 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
293 vcpu->arch.sie_block->gbea = 1;
294 }
295
296 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
297 {
298 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
299 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
300 vcpu->arch.sie_block->ecb = 6;
301 vcpu->arch.sie_block->eca = 0xC1002001U;
302 vcpu->arch.sie_block->fac = (int) (long) facilities;
303 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
304 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
305 (unsigned long) vcpu);
306 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
307 get_cpu_id(&vcpu->arch.cpu_id);
308 vcpu->arch.cpu_id.version = 0xff;
309 return 0;
310 }
311
312 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
313 unsigned int id)
314 {
315 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
316 int rc = -ENOMEM;
317
318 if (!vcpu)
319 goto out_nomem;
320
321 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
322 get_zeroed_page(GFP_KERNEL);
323
324 if (!vcpu->arch.sie_block)
325 goto out_free_cpu;
326
327 vcpu->arch.sie_block->icpua = id;
328 BUG_ON(!kvm->arch.sca);
329 if (!kvm->arch.sca->cpu[id].sda)
330 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
331 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
332 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
333 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
334
335 spin_lock_init(&vcpu->arch.local_int.lock);
336 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
337 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
338 spin_lock(&kvm->arch.float_int.lock);
339 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
340 init_waitqueue_head(&vcpu->arch.local_int.wq);
341 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
342 spin_unlock(&kvm->arch.float_int.lock);
343
344 rc = kvm_vcpu_init(vcpu, kvm, id);
345 if (rc)
346 goto out_free_sie_block;
347 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
348 vcpu->arch.sie_block);
349
350 return vcpu;
351 out_free_sie_block:
352 free_page((unsigned long)(vcpu->arch.sie_block));
353 out_free_cpu:
354 kfree(vcpu);
355 out_nomem:
356 return ERR_PTR(rc);
357 }
358
359 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
360 {
361 /* kvm common code refers to this, but never calls it */
362 BUG();
363 return 0;
364 }
365
366 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
367 {
368 kvm_s390_vcpu_initial_reset(vcpu);
369 return 0;
370 }
371
372 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
373 {
374 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
375 return 0;
376 }
377
378 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
379 {
380 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
381 return 0;
382 }
383
384 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
385 struct kvm_sregs *sregs)
386 {
387 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
388 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
389 return 0;
390 }
391
392 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
393 struct kvm_sregs *sregs)
394 {
395 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
396 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
397 return 0;
398 }
399
400 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
401 {
402 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
403 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
404 return 0;
405 }
406
407 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
408 {
409 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
410 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
411 return 0;
412 }
413
414 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
415 {
416 int rc = 0;
417
418 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
419 rc = -EBUSY;
420 else {
421 vcpu->run->psw_mask = psw.mask;
422 vcpu->run->psw_addr = psw.addr;
423 }
424 return rc;
425 }
426
427 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
428 struct kvm_translation *tr)
429 {
430 return -EINVAL; /* not implemented yet */
431 }
432
433 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
434 struct kvm_guest_debug *dbg)
435 {
436 return -EINVAL; /* not implemented yet */
437 }
438
439 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
440 struct kvm_mp_state *mp_state)
441 {
442 return -EINVAL; /* not implemented yet */
443 }
444
445 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
446 struct kvm_mp_state *mp_state)
447 {
448 return -EINVAL; /* not implemented yet */
449 }
450
451 static void __vcpu_run(struct kvm_vcpu *vcpu)
452 {
453 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
454
455 if (need_resched())
456 schedule();
457
458 if (test_thread_flag(TIF_MCCK_PENDING))
459 s390_handle_mcck();
460
461 kvm_s390_deliver_pending_interrupts(vcpu);
462
463 vcpu->arch.sie_block->icptcode = 0;
464 local_irq_disable();
465 kvm_guest_enter();
466 local_irq_enable();
467 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
468 atomic_read(&vcpu->arch.sie_block->cpuflags));
469 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
470 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
471 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
472 }
473 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
474 vcpu->arch.sie_block->icptcode);
475 local_irq_disable();
476 kvm_guest_exit();
477 local_irq_enable();
478
479 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
480 }
481
482 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
483 {
484 int rc;
485 sigset_t sigsaved;
486
487 rerun_vcpu:
488 if (vcpu->requests)
489 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
490 kvm_s390_vcpu_set_mem(vcpu);
491
492 /* verify, that memory has been registered */
493 if (!vcpu->arch.sie_block->gmslm) {
494 vcpu_put(vcpu);
495 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
496 return -EINVAL;
497 }
498
499 if (vcpu->sigset_active)
500 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
501
502 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
503
504 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
505
506 switch (kvm_run->exit_reason) {
507 case KVM_EXIT_S390_SIEIC:
508 case KVM_EXIT_UNKNOWN:
509 case KVM_EXIT_INTR:
510 case KVM_EXIT_S390_RESET:
511 break;
512 default:
513 BUG();
514 }
515
516 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
517 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
518
519 might_fault();
520
521 do {
522 __vcpu_run(vcpu);
523 rc = kvm_handle_sie_intercept(vcpu);
524 } while (!signal_pending(current) && !rc);
525
526 if (rc == SIE_INTERCEPT_RERUNVCPU)
527 goto rerun_vcpu;
528
529 if (signal_pending(current) && !rc) {
530 kvm_run->exit_reason = KVM_EXIT_INTR;
531 rc = -EINTR;
532 }
533
534 if (rc == -EOPNOTSUPP) {
535 /* intercept cannot be handled in-kernel, prepare kvm-run */
536 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
537 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
538 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
539 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
540 rc = 0;
541 }
542
543 if (rc == -EREMOTE) {
544 /* intercept was handled, but userspace support is needed
545 * kvm_run has been prepared by the handler */
546 rc = 0;
547 }
548
549 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
550 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
551
552 if (vcpu->sigset_active)
553 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
554
555 vcpu->stat.exit_userspace++;
556 return rc;
557 }
558
559 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
560 unsigned long n, int prefix)
561 {
562 if (prefix)
563 return copy_to_guest(vcpu, guestdest, from, n);
564 else
565 return copy_to_guest_absolute(vcpu, guestdest, from, n);
566 }
567
568 /*
569 * store status at address
570 * we use have two special cases:
571 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
572 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
573 */
574 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
575 {
576 const unsigned char archmode = 1;
577 int prefix;
578
579 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
580 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
581 return -EFAULT;
582 addr = SAVE_AREA_BASE;
583 prefix = 0;
584 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
585 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
586 return -EFAULT;
587 addr = SAVE_AREA_BASE;
588 prefix = 1;
589 } else
590 prefix = 0;
591
592 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
593 vcpu->arch.guest_fpregs.fprs, 128, prefix))
594 return -EFAULT;
595
596 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
597 vcpu->arch.guest_gprs, 128, prefix))
598 return -EFAULT;
599
600 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
601 &vcpu->arch.sie_block->gpsw, 16, prefix))
602 return -EFAULT;
603
604 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
605 &vcpu->arch.sie_block->prefix, 4, prefix))
606 return -EFAULT;
607
608 if (__guestcopy(vcpu,
609 addr + offsetof(struct save_area, fp_ctrl_reg),
610 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
611 return -EFAULT;
612
613 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
614 &vcpu->arch.sie_block->todpr, 4, prefix))
615 return -EFAULT;
616
617 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
618 &vcpu->arch.sie_block->cputm, 8, prefix))
619 return -EFAULT;
620
621 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
622 &vcpu->arch.sie_block->ckc, 8, prefix))
623 return -EFAULT;
624
625 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
626 &vcpu->arch.guest_acrs, 64, prefix))
627 return -EFAULT;
628
629 if (__guestcopy(vcpu,
630 addr + offsetof(struct save_area, ctrl_regs),
631 &vcpu->arch.sie_block->gcr, 128, prefix))
632 return -EFAULT;
633 return 0;
634 }
635
636 long kvm_arch_vcpu_ioctl(struct file *filp,
637 unsigned int ioctl, unsigned long arg)
638 {
639 struct kvm_vcpu *vcpu = filp->private_data;
640 void __user *argp = (void __user *)arg;
641 long r;
642
643 switch (ioctl) {
644 case KVM_S390_INTERRUPT: {
645 struct kvm_s390_interrupt s390int;
646
647 r = -EFAULT;
648 if (copy_from_user(&s390int, argp, sizeof(s390int)))
649 break;
650 r = kvm_s390_inject_vcpu(vcpu, &s390int);
651 break;
652 }
653 case KVM_S390_STORE_STATUS:
654 r = kvm_s390_vcpu_store_status(vcpu, arg);
655 break;
656 case KVM_S390_SET_INITIAL_PSW: {
657 psw_t psw;
658
659 r = -EFAULT;
660 if (copy_from_user(&psw, argp, sizeof(psw)))
661 break;
662 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
663 break;
664 }
665 case KVM_S390_INITIAL_RESET:
666 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
667 break;
668 default:
669 r = -EINVAL;
670 }
671 return r;
672 }
673
674 /* Section: memory related */
675 int kvm_arch_prepare_memory_region(struct kvm *kvm,
676 struct kvm_memory_slot *memslot,
677 struct kvm_memory_slot old,
678 struct kvm_userspace_memory_region *mem,
679 int user_alloc)
680 {
681 /* A few sanity checks. We can have exactly one memory slot which has
682 to start at guest virtual zero and which has to be located at a
683 page boundary in userland and which has to end at a page boundary.
684 The memory in userland is ok to be fragmented into various different
685 vmas. It is okay to mmap() and munmap() stuff in this slot after
686 doing this call at any time */
687
688 if (mem->slot)
689 return -EINVAL;
690
691 if (mem->guest_phys_addr)
692 return -EINVAL;
693
694 if (mem->userspace_addr & (PAGE_SIZE - 1))
695 return -EINVAL;
696
697 if (mem->memory_size & (PAGE_SIZE - 1))
698 return -EINVAL;
699
700 if (!user_alloc)
701 return -EINVAL;
702
703 return 0;
704 }
705
706 void kvm_arch_commit_memory_region(struct kvm *kvm,
707 struct kvm_userspace_memory_region *mem,
708 struct kvm_memory_slot old,
709 int user_alloc)
710 {
711 int i;
712 struct kvm_vcpu *vcpu;
713
714 /* request update of sie control block for all available vcpus */
715 kvm_for_each_vcpu(i, vcpu, kvm) {
716 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
717 continue;
718 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
719 }
720 }
721
722 void kvm_arch_flush_shadow(struct kvm *kvm)
723 {
724 }
725
726 static int __init kvm_s390_init(void)
727 {
728 int ret;
729 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
730 if (ret)
731 return ret;
732
733 /*
734 * guests can ask for up to 255+1 double words, we need a full page
735 * to hold the maximum amount of facilites. On the other hand, we
736 * only set facilities that are known to work in KVM.
737 */
738 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
739 if (!facilities) {
740 kvm_exit();
741 return -ENOMEM;
742 }
743 stfle(facilities, 1);
744 facilities[0] &= 0xff00fff3f0700000ULL;
745 return 0;
746 }
747
748 static void __exit kvm_s390_exit(void)
749 {
750 free_page((unsigned long) facilities);
751 kvm_exit();
752 }
753
754 module_init(kvm_s390_init);
755 module_exit(kvm_s390_exit);
This page took 0.062613 seconds and 5 git commands to generate.