ACPI / cpuidle: Remove acpi_idle_suspend (to fix suspend regression)
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
7697e71f 68 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
69 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
70 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
71 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
72 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
73 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 74 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 75 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
76 { NULL }
77};
78
ef50f7ac 79static unsigned long long *facilities;
b0c632db
HC
80
81/* Section: not file related */
10474ae8 82int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
83{
84 /* every s390 is virtualization enabled ;-) */
10474ae8 85 return 0;
b0c632db
HC
86}
87
88void kvm_arch_hardware_disable(void *garbage)
89{
90}
91
b0c632db
HC
92int kvm_arch_hardware_setup(void)
93{
94 return 0;
95}
96
97void kvm_arch_hardware_unsetup(void)
98{
99}
100
101void kvm_arch_check_processor_compat(void *rtn)
102{
103}
104
105int kvm_arch_init(void *opaque)
106{
107 return 0;
108}
109
110void kvm_arch_exit(void)
111{
112}
113
114/* Section: device related */
115long kvm_arch_dev_ioctl(struct file *filp,
116 unsigned int ioctl, unsigned long arg)
117{
118 if (ioctl == KVM_S390_ENABLE_SIE)
119 return s390_enable_sie();
120 return -EINVAL;
121}
122
123int kvm_dev_ioctl_check_extension(long ext)
124{
d7b0b5eb
CO
125 int r;
126
2bd0ac4e 127 switch (ext) {
d7b0b5eb 128 case KVM_CAP_S390_PSW:
b6cf8788 129 case KVM_CAP_S390_GMAP:
d7b0b5eb
CO
130 r = 1;
131 break;
2bd0ac4e 132 default:
d7b0b5eb 133 r = 0;
2bd0ac4e 134 }
d7b0b5eb 135 return r;
b0c632db
HC
136}
137
138/* Section: vm related */
139/*
140 * Get (and clear) the dirty memory log for a memory slot.
141 */
142int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
143 struct kvm_dirty_log *log)
144{
145 return 0;
146}
147
148long kvm_arch_vm_ioctl(struct file *filp,
149 unsigned int ioctl, unsigned long arg)
150{
151 struct kvm *kvm = filp->private_data;
152 void __user *argp = (void __user *)arg;
153 int r;
154
155 switch (ioctl) {
ba5c1e9b
CO
156 case KVM_S390_INTERRUPT: {
157 struct kvm_s390_interrupt s390int;
158
159 r = -EFAULT;
160 if (copy_from_user(&s390int, argp, sizeof(s390int)))
161 break;
162 r = kvm_s390_inject_vm(kvm, &s390int);
163 break;
164 }
b0c632db 165 default:
367e1319 166 r = -ENOTTY;
b0c632db
HC
167 }
168
169 return r;
170}
171
d89f5eff 172int kvm_arch_init_vm(struct kvm *kvm)
b0c632db 173{
b0c632db
HC
174 int rc;
175 char debug_name[16];
176
177 rc = s390_enable_sie();
178 if (rc)
d89f5eff 179 goto out_err;
b0c632db 180
b290411a
CO
181 rc = -ENOMEM;
182
b0c632db
HC
183 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
184 if (!kvm->arch.sca)
d89f5eff 185 goto out_err;
b0c632db
HC
186
187 sprintf(debug_name, "kvm-%u", current->pid);
188
189 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
190 if (!kvm->arch.dbf)
191 goto out_nodbf;
192
ba5c1e9b
CO
193 spin_lock_init(&kvm->arch.float_int.lock);
194 INIT_LIST_HEAD(&kvm->arch.float_int.list);
195
b0c632db
HC
196 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
197 VM_EVENT(kvm, 3, "%s", "vm created");
198
598841ca
CO
199 kvm->arch.gmap = gmap_alloc(current->mm);
200 if (!kvm->arch.gmap)
201 goto out_nogmap;
202
d89f5eff 203 return 0;
598841ca
CO
204out_nogmap:
205 debug_unregister(kvm->arch.dbf);
b0c632db
HC
206out_nodbf:
207 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
208out_err:
209 return rc;
b0c632db
HC
210}
211
d329c035
CB
212void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
213{
214 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 215 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
216 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
217 (__u64) vcpu->arch.sie_block)
218 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
219 smp_mb();
d329c035 220 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 221 kvm_vcpu_uninit(vcpu);
d329c035
CB
222 kfree(vcpu);
223}
224
225static void kvm_free_vcpus(struct kvm *kvm)
226{
227 unsigned int i;
988a2cae 228 struct kvm_vcpu *vcpu;
d329c035 229
988a2cae
GN
230 kvm_for_each_vcpu(i, vcpu, kvm)
231 kvm_arch_vcpu_destroy(vcpu);
232
233 mutex_lock(&kvm->lock);
234 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
235 kvm->vcpus[i] = NULL;
236
237 atomic_set(&kvm->online_vcpus, 0);
238 mutex_unlock(&kvm->lock);
d329c035
CB
239}
240
ad8ba2cd
SY
241void kvm_arch_sync_events(struct kvm *kvm)
242{
243}
244
b0c632db
HC
245void kvm_arch_destroy_vm(struct kvm *kvm)
246{
d329c035 247 kvm_free_vcpus(kvm);
b0c632db 248 free_page((unsigned long)(kvm->arch.sca));
d329c035 249 debug_unregister(kvm->arch.dbf);
598841ca 250 gmap_free(kvm->arch.gmap);
b0c632db
HC
251}
252
253/* Section: vcpu related */
254int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
255{
598841ca 256 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
257 return 0;
258}
259
260void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
261{
6692cef3 262 /* Nothing todo */
b0c632db
HC
263}
264
265void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
266{
267 save_fp_regs(&vcpu->arch.host_fpregs);
268 save_access_regs(vcpu->arch.host_acrs);
269 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
270 restore_fp_regs(&vcpu->arch.guest_fpregs);
271 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 272 gmap_enable(vcpu->arch.gmap);
b0c632db
HC
273}
274
275void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
276{
480e5926 277 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
278 save_fp_regs(&vcpu->arch.guest_fpregs);
279 save_access_regs(vcpu->arch.guest_acrs);
280 restore_fp_regs(&vcpu->arch.host_fpregs);
281 restore_access_regs(vcpu->arch.host_acrs);
282}
283
284static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
285{
286 /* this equals initial cpu reset in pop, but we don't switch to ESA */
287 vcpu->arch.sie_block->gpsw.mask = 0UL;
288 vcpu->arch.sie_block->gpsw.addr = 0UL;
289 vcpu->arch.sie_block->prefix = 0UL;
290 vcpu->arch.sie_block->ihcpu = 0xffff;
291 vcpu->arch.sie_block->cputm = 0UL;
292 vcpu->arch.sie_block->ckc = 0UL;
293 vcpu->arch.sie_block->todpr = 0;
294 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
295 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
296 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
297 vcpu->arch.guest_fpregs.fpc = 0;
298 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
299 vcpu->arch.sie_block->gbea = 1;
300}
301
302int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
303{
598841ca 304 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
fc34531d 305 vcpu->arch.sie_block->ecb = 6;
b0c632db 306 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 307 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
308 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
309 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
310 (unsigned long) vcpu);
311 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 312 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 313 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
314 return 0;
315}
316
317struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
318 unsigned int id)
319{
4d47555a
CO
320 struct kvm_vcpu *vcpu;
321 int rc = -EINVAL;
322
323 if (id >= KVM_MAX_VCPUS)
324 goto out;
325
326 rc = -ENOMEM;
b0c632db 327
4d47555a 328 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 329 if (!vcpu)
4d47555a 330 goto out;
b0c632db 331
180c12fb
CB
332 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
333 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
334
335 if (!vcpu->arch.sie_block)
336 goto out_free_cpu;
337
338 vcpu->arch.sie_block->icpua = id;
339 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
340 if (!kvm->arch.sca->cpu[id].sda)
341 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
342 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
343 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 344 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 345
ba5c1e9b
CO
346 spin_lock_init(&vcpu->arch.local_int.lock);
347 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
348 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 349 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
350 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
351 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 352 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 353 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 354
b0c632db
HC
355 rc = kvm_vcpu_init(vcpu, kvm, id);
356 if (rc)
7b06bf2f 357 goto out_free_sie_block;
b0c632db
HC
358 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
359 vcpu->arch.sie_block);
360
b0c632db 361 return vcpu;
7b06bf2f
WY
362out_free_sie_block:
363 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
364out_free_cpu:
365 kfree(vcpu);
4d47555a 366out:
b0c632db
HC
367 return ERR_PTR(rc);
368}
369
b0c632db
HC
370int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
371{
372 /* kvm common code refers to this, but never calls it */
373 BUG();
374 return 0;
375}
376
377static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
378{
b0c632db 379 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
380 return 0;
381}
382
383int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
384{
b0c632db 385 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
386 return 0;
387}
388
389int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
390{
b0c632db 391 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
392 return 0;
393}
394
395int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
396 struct kvm_sregs *sregs)
397{
b0c632db
HC
398 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
399 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
7eef87dc 400 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
401 return 0;
402}
403
404int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
405 struct kvm_sregs *sregs)
406{
b0c632db
HC
407 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
408 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
409 return 0;
410}
411
412int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
413{
b0c632db
HC
414 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
415 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 416 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
417 return 0;
418}
419
420int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
421{
b0c632db
HC
422 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
423 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
424 return 0;
425}
426
427static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
428{
429 int rc = 0;
430
b0c632db
HC
431 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
432 rc = -EBUSY;
d7b0b5eb
CO
433 else {
434 vcpu->run->psw_mask = psw.mask;
435 vcpu->run->psw_addr = psw.addr;
436 }
b0c632db
HC
437 return rc;
438}
439
440int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
441 struct kvm_translation *tr)
442{
443 return -EINVAL; /* not implemented yet */
444}
445
d0bfb940
JK
446int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
447 struct kvm_guest_debug *dbg)
b0c632db
HC
448{
449 return -EINVAL; /* not implemented yet */
450}
451
62d9f0db
MT
452int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
453 struct kvm_mp_state *mp_state)
454{
455 return -EINVAL; /* not implemented yet */
456}
457
458int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
459 struct kvm_mp_state *mp_state)
460{
461 return -EINVAL; /* not implemented yet */
462}
463
b0c632db
HC
464static void __vcpu_run(struct kvm_vcpu *vcpu)
465{
466 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
467
468 if (need_resched())
469 schedule();
470
71cde587
CB
471 if (test_thread_flag(TIF_MCCK_PENDING))
472 s390_handle_mcck();
473
0ff31867
CO
474 kvm_s390_deliver_pending_interrupts(vcpu);
475
b0c632db
HC
476 vcpu->arch.sie_block->icptcode = 0;
477 local_irq_disable();
478 kvm_guest_enter();
479 local_irq_enable();
480 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
481 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
482 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
483 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
484 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
485 }
b0c632db
HC
486 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
487 vcpu->arch.sie_block->icptcode);
488 local_irq_disable();
489 kvm_guest_exit();
490 local_irq_enable();
491
492 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
493}
494
495int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
496{
8f2abe6a 497 int rc;
b0c632db
HC
498 sigset_t sigsaved;
499
9ace903d 500rerun_vcpu:
b0c632db
HC
501 if (vcpu->sigset_active)
502 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
503
504 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
505
ba5c1e9b
CO
506 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
507
8f2abe6a
CB
508 switch (kvm_run->exit_reason) {
509 case KVM_EXIT_S390_SIEIC:
8f2abe6a 510 case KVM_EXIT_UNKNOWN:
9ace903d 511 case KVM_EXIT_INTR:
8f2abe6a
CB
512 case KVM_EXIT_S390_RESET:
513 break;
514 default:
515 BUG();
516 }
517
d7b0b5eb
CO
518 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
519 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
520
dab4079d 521 might_fault();
8f2abe6a
CB
522
523 do {
524 __vcpu_run(vcpu);
8f2abe6a
CB
525 rc = kvm_handle_sie_intercept(vcpu);
526 } while (!signal_pending(current) && !rc);
527
9ace903d
CE
528 if (rc == SIE_INTERCEPT_RERUNVCPU)
529 goto rerun_vcpu;
530
b1d16c49
CE
531 if (signal_pending(current) && !rc) {
532 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 533 rc = -EINTR;
b1d16c49 534 }
8f2abe6a 535
b8e660b8 536 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
537 /* intercept cannot be handled in-kernel, prepare kvm-run */
538 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
539 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
540 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
541 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
542 rc = 0;
543 }
544
545 if (rc == -EREMOTE) {
546 /* intercept was handled, but userspace support is needed
547 * kvm_run has been prepared by the handler */
548 rc = 0;
549 }
b0c632db 550
d7b0b5eb
CO
551 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
552 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
553
b0c632db
HC
554 if (vcpu->sigset_active)
555 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
556
b0c632db 557 vcpu->stat.exit_userspace++;
7e8e6ab4 558 return rc;
b0c632db
HC
559}
560
092670cd 561static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
562 unsigned long n, int prefix)
563{
564 if (prefix)
565 return copy_to_guest(vcpu, guestdest, from, n);
566 else
567 return copy_to_guest_absolute(vcpu, guestdest, from, n);
568}
569
570/*
571 * store status at address
572 * we use have two special cases:
573 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
574 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
575 */
971eb77f 576int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 577{
092670cd 578 unsigned char archmode = 1;
b0c632db
HC
579 int prefix;
580
581 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
582 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
583 return -EFAULT;
584 addr = SAVE_AREA_BASE;
585 prefix = 0;
586 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
587 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
588 return -EFAULT;
589 addr = SAVE_AREA_BASE;
590 prefix = 1;
591 } else
592 prefix = 0;
593
f64ca217 594 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
595 vcpu->arch.guest_fpregs.fprs, 128, prefix))
596 return -EFAULT;
597
f64ca217 598 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
599 vcpu->arch.guest_gprs, 128, prefix))
600 return -EFAULT;
601
f64ca217 602 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
603 &vcpu->arch.sie_block->gpsw, 16, prefix))
604 return -EFAULT;
605
f64ca217 606 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
607 &vcpu->arch.sie_block->prefix, 4, prefix))
608 return -EFAULT;
609
610 if (__guestcopy(vcpu,
f64ca217 611 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
612 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
613 return -EFAULT;
614
f64ca217 615 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
616 &vcpu->arch.sie_block->todpr, 4, prefix))
617 return -EFAULT;
618
f64ca217 619 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
620 &vcpu->arch.sie_block->cputm, 8, prefix))
621 return -EFAULT;
622
f64ca217 623 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
624 &vcpu->arch.sie_block->ckc, 8, prefix))
625 return -EFAULT;
626
f64ca217 627 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
628 &vcpu->arch.guest_acrs, 64, prefix))
629 return -EFAULT;
630
631 if (__guestcopy(vcpu,
f64ca217 632 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
633 &vcpu->arch.sie_block->gcr, 128, prefix))
634 return -EFAULT;
635 return 0;
636}
637
b0c632db
HC
638long kvm_arch_vcpu_ioctl(struct file *filp,
639 unsigned int ioctl, unsigned long arg)
640{
641 struct kvm_vcpu *vcpu = filp->private_data;
642 void __user *argp = (void __user *)arg;
bc923cc9 643 long r;
b0c632db 644
93736624
AK
645 switch (ioctl) {
646 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
647 struct kvm_s390_interrupt s390int;
648
93736624 649 r = -EFAULT;
ba5c1e9b 650 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
651 break;
652 r = kvm_s390_inject_vcpu(vcpu, &s390int);
653 break;
ba5c1e9b 654 }
b0c632db 655 case KVM_S390_STORE_STATUS:
bc923cc9
AK
656 r = kvm_s390_vcpu_store_status(vcpu, arg);
657 break;
b0c632db
HC
658 case KVM_S390_SET_INITIAL_PSW: {
659 psw_t psw;
660
bc923cc9 661 r = -EFAULT;
b0c632db 662 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
663 break;
664 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
665 break;
b0c632db
HC
666 }
667 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
668 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
669 break;
b0c632db 670 default:
bc923cc9 671 r = -EINVAL;
b0c632db 672 }
bc923cc9 673 return r;
b0c632db
HC
674}
675
676/* Section: memory related */
f7784b8e
MT
677int kvm_arch_prepare_memory_region(struct kvm *kvm,
678 struct kvm_memory_slot *memslot,
679 struct kvm_memory_slot old,
680 struct kvm_userspace_memory_region *mem,
681 int user_alloc)
b0c632db
HC
682{
683 /* A few sanity checks. We can have exactly one memory slot which has
684 to start at guest virtual zero and which has to be located at a
685 page boundary in userland and which has to end at a page boundary.
686 The memory in userland is ok to be fragmented into various different
687 vmas. It is okay to mmap() and munmap() stuff in this slot after
688 doing this call at any time */
689
628eb9b8 690 if (mem->slot)
b0c632db
HC
691 return -EINVAL;
692
693 if (mem->guest_phys_addr)
694 return -EINVAL;
695
598841ca 696 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
697 return -EINVAL;
698
598841ca 699 if (mem->memory_size & 0xffffful)
b0c632db
HC
700 return -EINVAL;
701
2668dab7
CO
702 if (!user_alloc)
703 return -EINVAL;
704
f7784b8e
MT
705 return 0;
706}
707
708void kvm_arch_commit_memory_region(struct kvm *kvm,
709 struct kvm_userspace_memory_region *mem,
710 struct kvm_memory_slot old,
711 int user_alloc)
712{
f7850c92 713 int rc;
f7784b8e 714
598841ca
CO
715
716 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
717 mem->guest_phys_addr, mem->memory_size);
718 if (rc)
f7850c92 719 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 720 return;
b0c632db
HC
721}
722
34d4cb8f
MT
723void kvm_arch_flush_shadow(struct kvm *kvm)
724{
725}
726
b0c632db
HC
727static int __init kvm_s390_init(void)
728{
ef50f7ac 729 int ret;
0ee75bea 730 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
731 if (ret)
732 return ret;
733
734 /*
735 * guests can ask for up to 255+1 double words, we need a full page
25985edc 736 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
737 * only set facilities that are known to work in KVM.
738 */
c2f0e8c8 739 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
740 if (!facilities) {
741 kvm_exit();
742 return -ENOMEM;
743 }
14375bc4 744 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 745 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 746 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 747 return 0;
b0c632db
HC
748}
749
750static void __exit kvm_s390_exit(void)
751{
ef50f7ac 752 free_page((unsigned long) facilities);
b0c632db
HC
753 kvm_exit();
754}
755
756module_init(kvm_s390_init);
757module_exit(kvm_s390_exit);
This page took 0.366957 seconds and 5 git commands to generate.