KVM: s390: Split up __vcpu_run into three parts
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
78c4b59f 31#include <asm/facility.h>
1526bf9c 32#include <asm/sclp.h>
8f2abe6a 33#include "kvm-s390.h"
b0c632db
HC
34#include "gaccess.h"
35
5786fffa
CH
36#define CREATE_TRACE_POINTS
37#include "trace.h"
ade38c31 38#include "trace-s390.h"
5786fffa 39
b0c632db
HC
40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 44 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
45 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
49 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 52 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
53 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 55 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
56 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 63 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
64 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 73 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 74 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 75 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 76 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
77 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 82 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 83 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 84 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
85 { NULL }
86};
87
78c4b59f 88unsigned long *vfacilities;
2c70fe44 89static struct gmap_notifier gmap_notifier;
b0c632db 90
78c4b59f
MM
91/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
b0c632db 97/* Section: not file related */
10474ae8 98int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
99{
100 /* every s390 is virtualization enabled ;-) */
10474ae8 101 return 0;
b0c632db
HC
102}
103
104void kvm_arch_hardware_disable(void *garbage)
105{
106}
107
2c70fe44
CB
108static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
b0c632db
HC
110int kvm_arch_hardware_setup(void)
111{
2c70fe44
CB
112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
114 return 0;
115}
116
117void kvm_arch_hardware_unsetup(void)
118{
2c70fe44 119 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
120}
121
122void kvm_arch_check_processor_compat(void *rtn)
123{
124}
125
126int kvm_arch_init(void *opaque)
127{
128 return 0;
129}
130
131void kvm_arch_exit(void)
132{
133}
134
135/* Section: device related */
136long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
138{
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
142}
143
144int kvm_dev_ioctl_check_extension(long ext)
145{
d7b0b5eb
CO
146 int r;
147
2bd0ac4e 148 switch (ext) {
d7b0b5eb 149 case KVM_CAP_S390_PSW:
b6cf8788 150 case KVM_CAP_S390_GMAP:
52e16b18 151 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154#endif
60b413c9 155 case KVM_CAP_SYNC_REGS:
14eebd91 156 case KVM_CAP_ONE_REG:
d6712df9 157 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 158 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 159 case KVM_CAP_IOEVENTFD:
d7b0b5eb
CO
160 r = 1;
161 break;
e726b1bd
CB
162 case KVM_CAP_NR_VCPUS:
163 case KVM_CAP_MAX_VCPUS:
164 r = KVM_MAX_VCPUS;
165 break;
e1e2e605
NW
166 case KVM_CAP_NR_MEMSLOTS:
167 r = KVM_USER_MEM_SLOTS;
168 break;
1526bf9c 169 case KVM_CAP_S390_COW:
abf09bed 170 r = MACHINE_HAS_ESOP;
1526bf9c 171 break;
2bd0ac4e 172 default:
d7b0b5eb 173 r = 0;
2bd0ac4e 174 }
d7b0b5eb 175 return r;
b0c632db
HC
176}
177
178/* Section: vm related */
179/*
180 * Get (and clear) the dirty memory log for a memory slot.
181 */
182int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
183 struct kvm_dirty_log *log)
184{
185 return 0;
186}
187
188long kvm_arch_vm_ioctl(struct file *filp,
189 unsigned int ioctl, unsigned long arg)
190{
191 struct kvm *kvm = filp->private_data;
192 void __user *argp = (void __user *)arg;
193 int r;
194
195 switch (ioctl) {
ba5c1e9b
CO
196 case KVM_S390_INTERRUPT: {
197 struct kvm_s390_interrupt s390int;
198
199 r = -EFAULT;
200 if (copy_from_user(&s390int, argp, sizeof(s390int)))
201 break;
202 r = kvm_s390_inject_vm(kvm, &s390int);
203 break;
204 }
b0c632db 205 default:
367e1319 206 r = -ENOTTY;
b0c632db
HC
207 }
208
209 return r;
210}
211
e08b9637 212int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 213{
b0c632db
HC
214 int rc;
215 char debug_name[16];
216
e08b9637
CO
217 rc = -EINVAL;
218#ifdef CONFIG_KVM_S390_UCONTROL
219 if (type & ~KVM_VM_S390_UCONTROL)
220 goto out_err;
221 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
222 goto out_err;
223#else
224 if (type)
225 goto out_err;
226#endif
227
b0c632db
HC
228 rc = s390_enable_sie();
229 if (rc)
d89f5eff 230 goto out_err;
b0c632db 231
b290411a
CO
232 rc = -ENOMEM;
233
b0c632db
HC
234 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
235 if (!kvm->arch.sca)
d89f5eff 236 goto out_err;
b0c632db
HC
237
238 sprintf(debug_name, "kvm-%u", current->pid);
239
240 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
241 if (!kvm->arch.dbf)
242 goto out_nodbf;
243
ba5c1e9b
CO
244 spin_lock_init(&kvm->arch.float_int.lock);
245 INIT_LIST_HEAD(&kvm->arch.float_int.list);
246
b0c632db
HC
247 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
248 VM_EVENT(kvm, 3, "%s", "vm created");
249
e08b9637
CO
250 if (type & KVM_VM_S390_UCONTROL) {
251 kvm->arch.gmap = NULL;
252 } else {
253 kvm->arch.gmap = gmap_alloc(current->mm);
254 if (!kvm->arch.gmap)
255 goto out_nogmap;
2c70fe44 256 kvm->arch.gmap->private = kvm;
e08b9637 257 }
fa6b7fe9
CH
258
259 kvm->arch.css_support = 0;
260
d89f5eff 261 return 0;
598841ca
CO
262out_nogmap:
263 debug_unregister(kvm->arch.dbf);
b0c632db
HC
264out_nodbf:
265 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
266out_err:
267 return rc;
b0c632db
HC
268}
269
d329c035
CB
270void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
271{
272 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 273 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
274 if (!kvm_is_ucontrol(vcpu->kvm)) {
275 clear_bit(63 - vcpu->vcpu_id,
276 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
277 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
278 (__u64) vcpu->arch.sie_block)
279 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
280 }
abf4a71e 281 smp_mb();
27e0393f
CO
282
283 if (kvm_is_ucontrol(vcpu->kvm))
284 gmap_free(vcpu->arch.gmap);
285
d329c035 286 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 287 kvm_vcpu_uninit(vcpu);
b110feaf 288 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
289}
290
291static void kvm_free_vcpus(struct kvm *kvm)
292{
293 unsigned int i;
988a2cae 294 struct kvm_vcpu *vcpu;
d329c035 295
988a2cae
GN
296 kvm_for_each_vcpu(i, vcpu, kvm)
297 kvm_arch_vcpu_destroy(vcpu);
298
299 mutex_lock(&kvm->lock);
300 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
301 kvm->vcpus[i] = NULL;
302
303 atomic_set(&kvm->online_vcpus, 0);
304 mutex_unlock(&kvm->lock);
d329c035
CB
305}
306
ad8ba2cd
SY
307void kvm_arch_sync_events(struct kvm *kvm)
308{
309}
310
b0c632db
HC
311void kvm_arch_destroy_vm(struct kvm *kvm)
312{
d329c035 313 kvm_free_vcpus(kvm);
b0c632db 314 free_page((unsigned long)(kvm->arch.sca));
d329c035 315 debug_unregister(kvm->arch.dbf);
27e0393f
CO
316 if (!kvm_is_ucontrol(kvm))
317 gmap_free(kvm->arch.gmap);
b0c632db
HC
318}
319
320/* Section: vcpu related */
321int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
322{
27e0393f
CO
323 if (kvm_is_ucontrol(vcpu->kvm)) {
324 vcpu->arch.gmap = gmap_alloc(current->mm);
325 if (!vcpu->arch.gmap)
326 return -ENOMEM;
2c70fe44 327 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
328 return 0;
329 }
330
598841ca 331 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
332 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
333 KVM_SYNC_GPRS |
9eed0735
CB
334 KVM_SYNC_ACRS |
335 KVM_SYNC_CRS;
b0c632db
HC
336 return 0;
337}
338
339void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
340{
6692cef3 341 /* Nothing todo */
b0c632db
HC
342}
343
344void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
345{
346 save_fp_regs(&vcpu->arch.host_fpregs);
347 save_access_regs(vcpu->arch.host_acrs);
348 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
349 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 350 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 351 gmap_enable(vcpu->arch.gmap);
9e6dabef 352 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
353}
354
355void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
356{
9e6dabef 357 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 358 gmap_disable(vcpu->arch.gmap);
b0c632db 359 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 360 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
361 restore_fp_regs(&vcpu->arch.host_fpregs);
362 restore_access_regs(vcpu->arch.host_acrs);
363}
364
365static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
366{
367 /* this equals initial cpu reset in pop, but we don't switch to ESA */
368 vcpu->arch.sie_block->gpsw.mask = 0UL;
369 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 370 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
371 vcpu->arch.sie_block->cputm = 0UL;
372 vcpu->arch.sie_block->ckc = 0UL;
373 vcpu->arch.sie_block->todpr = 0;
374 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
375 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
376 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
377 vcpu->arch.guest_fpregs.fpc = 0;
378 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
379 vcpu->arch.sie_block->gbea = 1;
61bde82c 380 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
381}
382
42897d86
MT
383int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
384{
385 return 0;
386}
387
b0c632db
HC
388int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
389{
9e6dabef
CH
390 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
391 CPUSTAT_SM |
69d0d3a3
CB
392 CPUSTAT_STOPPED |
393 CPUSTAT_GED);
fc34531d 394 vcpu->arch.sie_block->ecb = 6;
69d0d3a3 395 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 396 vcpu->arch.sie_block->eca = 0xC1002001U;
78c4b59f 397 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
ca872302
CB
398 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
399 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
400 (unsigned long) vcpu);
401 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 402 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 403 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
404 return 0;
405}
406
407struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
408 unsigned int id)
409{
4d47555a
CO
410 struct kvm_vcpu *vcpu;
411 int rc = -EINVAL;
412
413 if (id >= KVM_MAX_VCPUS)
414 goto out;
415
416 rc = -ENOMEM;
b0c632db 417
b110feaf 418 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 419 if (!vcpu)
4d47555a 420 goto out;
b0c632db 421
180c12fb
CB
422 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
423 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
424
425 if (!vcpu->arch.sie_block)
426 goto out_free_cpu;
427
428 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
429 if (!kvm_is_ucontrol(kvm)) {
430 if (!kvm->arch.sca) {
431 WARN_ON_ONCE(1);
432 goto out_free_cpu;
433 }
434 if (!kvm->arch.sca->cpu[id].sda)
435 kvm->arch.sca->cpu[id].sda =
436 (__u64) vcpu->arch.sie_block;
437 vcpu->arch.sie_block->scaoh =
438 (__u32)(((__u64)kvm->arch.sca) >> 32);
439 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
440 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
441 }
b0c632db 442
ba5c1e9b
CO
443 spin_lock_init(&vcpu->arch.local_int.lock);
444 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
445 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 446 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b 447 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
d0321a24 448 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 449 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 450 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 451
b0c632db
HC
452 rc = kvm_vcpu_init(vcpu, kvm, id);
453 if (rc)
7b06bf2f 454 goto out_free_sie_block;
b0c632db
HC
455 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
456 vcpu->arch.sie_block);
ade38c31 457 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 458
b0c632db 459 return vcpu;
7b06bf2f
WY
460out_free_sie_block:
461 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 462out_free_cpu:
b110feaf 463 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 464out:
b0c632db
HC
465 return ERR_PTR(rc);
466}
467
b0c632db
HC
468int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
469{
470 /* kvm common code refers to this, but never calls it */
471 BUG();
472 return 0;
473}
474
49b99e1e
CB
475void s390_vcpu_block(struct kvm_vcpu *vcpu)
476{
477 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
478}
479
480void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
481{
482 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
483}
484
485/*
486 * Kick a guest cpu out of SIE and wait until SIE is not running.
487 * If the CPU is not running (e.g. waiting as idle) the function will
488 * return immediately. */
489void exit_sie(struct kvm_vcpu *vcpu)
490{
491 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
492 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
493 cpu_relax();
494}
495
496/* Kick a guest cpu out of SIE and prevent SIE-reentry */
497void exit_sie_sync(struct kvm_vcpu *vcpu)
498{
499 s390_vcpu_block(vcpu);
500 exit_sie(vcpu);
501}
502
2c70fe44
CB
503static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
504{
505 int i;
506 struct kvm *kvm = gmap->private;
507 struct kvm_vcpu *vcpu;
508
509 kvm_for_each_vcpu(i, vcpu, kvm) {
510 /* match against both prefix pages */
511 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
512 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
513 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
514 exit_sie_sync(vcpu);
515 }
516 }
517}
518
b6d33834
CD
519int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
520{
521 /* kvm common code refers to this, but never calls it */
522 BUG();
523 return 0;
524}
525
14eebd91
CO
526static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
527 struct kvm_one_reg *reg)
528{
529 int r = -EINVAL;
530
531 switch (reg->id) {
29b7c71b
CO
532 case KVM_REG_S390_TODPR:
533 r = put_user(vcpu->arch.sie_block->todpr,
534 (u32 __user *)reg->addr);
535 break;
536 case KVM_REG_S390_EPOCHDIFF:
537 r = put_user(vcpu->arch.sie_block->epoch,
538 (u64 __user *)reg->addr);
539 break;
46a6dd1c
J
540 case KVM_REG_S390_CPU_TIMER:
541 r = put_user(vcpu->arch.sie_block->cputm,
542 (u64 __user *)reg->addr);
543 break;
544 case KVM_REG_S390_CLOCK_COMP:
545 r = put_user(vcpu->arch.sie_block->ckc,
546 (u64 __user *)reg->addr);
547 break;
14eebd91
CO
548 default:
549 break;
550 }
551
552 return r;
553}
554
555static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
556 struct kvm_one_reg *reg)
557{
558 int r = -EINVAL;
559
560 switch (reg->id) {
29b7c71b
CO
561 case KVM_REG_S390_TODPR:
562 r = get_user(vcpu->arch.sie_block->todpr,
563 (u32 __user *)reg->addr);
564 break;
565 case KVM_REG_S390_EPOCHDIFF:
566 r = get_user(vcpu->arch.sie_block->epoch,
567 (u64 __user *)reg->addr);
568 break;
46a6dd1c
J
569 case KVM_REG_S390_CPU_TIMER:
570 r = get_user(vcpu->arch.sie_block->cputm,
571 (u64 __user *)reg->addr);
572 break;
573 case KVM_REG_S390_CLOCK_COMP:
574 r = get_user(vcpu->arch.sie_block->ckc,
575 (u64 __user *)reg->addr);
576 break;
14eebd91
CO
577 default:
578 break;
579 }
580
581 return r;
582}
b6d33834 583
b0c632db
HC
584static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
585{
b0c632db 586 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
587 return 0;
588}
589
590int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
591{
5a32c1af 592 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
593 return 0;
594}
595
596int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
597{
5a32c1af 598 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
599 return 0;
600}
601
602int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
603 struct kvm_sregs *sregs)
604{
59674c1a 605 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 606 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 607 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
608 return 0;
609}
610
611int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
612 struct kvm_sregs *sregs)
613{
59674c1a 614 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 615 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
616 return 0;
617}
618
619int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
620{
b0c632db 621 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 622 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 623 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
624 return 0;
625}
626
627int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
628{
b0c632db
HC
629 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
630 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
631 return 0;
632}
633
634static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
635{
636 int rc = 0;
637
9e6dabef 638 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 639 rc = -EBUSY;
d7b0b5eb
CO
640 else {
641 vcpu->run->psw_mask = psw.mask;
642 vcpu->run->psw_addr = psw.addr;
643 }
b0c632db
HC
644 return rc;
645}
646
647int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
648 struct kvm_translation *tr)
649{
650 return -EINVAL; /* not implemented yet */
651}
652
d0bfb940
JK
653int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
654 struct kvm_guest_debug *dbg)
b0c632db
HC
655{
656 return -EINVAL; /* not implemented yet */
657}
658
62d9f0db
MT
659int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
660 struct kvm_mp_state *mp_state)
661{
662 return -EINVAL; /* not implemented yet */
663}
664
665int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
666 struct kvm_mp_state *mp_state)
667{
668 return -EINVAL; /* not implemented yet */
669}
670
2c70fe44
CB
671static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
672{
673 /*
674 * We use MMU_RELOAD just to re-arm the ipte notifier for the
675 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
676 * This ensures that the ipte instruction for this request has
677 * already finished. We might race against a second unmapper that
678 * wants to set the blocking bit. Lets just retry the request loop.
679 */
680 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
681 int rc;
682 rc = gmap_ipte_notify(vcpu->arch.gmap,
683 vcpu->arch.sie_block->prefix,
684 PAGE_SIZE * 2);
685 if (rc)
686 return rc;
687 s390_vcpu_unblock(vcpu);
688 }
689 return 0;
690}
691
3fb4c40f 692static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 693{
3fb4c40f 694 int rc, cpuflags;
e168bf8d 695
5a32c1af 696 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
697
698 if (need_resched())
699 schedule();
700
71cde587
CB
701 if (test_thread_flag(TIF_MCCK_PENDING))
702 s390_handle_mcck();
703
d6b6d166
CO
704 if (!kvm_is_ucontrol(vcpu->kvm))
705 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 706
2c70fe44
CB
707 rc = kvm_s390_handle_requests(vcpu);
708 if (rc)
709 return rc;
710
b0c632db 711 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
712 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
713 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
714 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 715
3fb4c40f
TH
716 return 0;
717}
718
719static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
720{
721 int rc;
2b29a9fd
DD
722
723 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
724 vcpu->arch.sie_block->icptcode);
725 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
726
3fb4c40f 727 if (exit_reason >= 0) {
7c470539 728 rc = 0;
3fb4c40f 729 } else {
e168bf8d
CO
730 if (kvm_is_ucontrol(vcpu->kvm)) {
731 rc = SIE_INTERCEPT_UCONTROL;
732 } else {
733 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
5786fffa 734 trace_kvm_s390_sie_fault(vcpu);
db4a29cb 735 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
e168bf8d 736 }
1f0d0f09 737 }
b0c632db 738
5a32c1af 739 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f
TH
740
741 return rc;
742}
743
744static int __vcpu_run(struct kvm_vcpu *vcpu)
745{
746 int rc, exit_reason;
747
748 rc = vcpu_pre_run(vcpu);
749 if (rc)
750 return rc;
751
752 /*
753 * As PF_VCPU will be used in fault handler, between guest_enter
754 * and guest_exit should be no uaccess.
755 */
756 preempt_disable();
757 kvm_guest_enter();
758 preempt_enable();
759 exit_reason = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
760 kvm_guest_exit();
761
762 rc = vcpu_post_run(vcpu, exit_reason);
763
e168bf8d 764 return rc;
b0c632db
HC
765}
766
767int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
768{
8f2abe6a 769 int rc;
b0c632db
HC
770 sigset_t sigsaved;
771
b0c632db
HC
772 if (vcpu->sigset_active)
773 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
774
9e6dabef 775 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 776
ba5c1e9b
CO
777 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
778
8f2abe6a
CB
779 switch (kvm_run->exit_reason) {
780 case KVM_EXIT_S390_SIEIC:
8f2abe6a 781 case KVM_EXIT_UNKNOWN:
9ace903d 782 case KVM_EXIT_INTR:
8f2abe6a 783 case KVM_EXIT_S390_RESET:
e168bf8d 784 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 785 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
786 break;
787 default:
788 BUG();
789 }
790
d7b0b5eb
CO
791 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
792 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
793 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
794 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
795 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
796 }
9eed0735
CB
797 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
798 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
799 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
800 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
801 }
d7b0b5eb 802
dab4079d 803 might_fault();
8f2abe6a
CB
804
805 do {
e168bf8d
CO
806 rc = __vcpu_run(vcpu);
807 if (rc)
808 break;
c0d744a9
CO
809 if (kvm_is_ucontrol(vcpu->kvm))
810 rc = -EOPNOTSUPP;
811 else
812 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
813 } while (!signal_pending(current) && !rc);
814
b1d16c49
CE
815 if (signal_pending(current) && !rc) {
816 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 817 rc = -EINTR;
b1d16c49 818 }
8f2abe6a 819
e168bf8d
CO
820#ifdef CONFIG_KVM_S390_UCONTROL
821 if (rc == SIE_INTERCEPT_UCONTROL) {
822 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
823 kvm_run->s390_ucontrol.trans_exc_code =
824 current->thread.gmap_addr;
825 kvm_run->s390_ucontrol.pgm_code = 0x10;
826 rc = 0;
827 }
828#endif
829
b8e660b8 830 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
831 /* intercept cannot be handled in-kernel, prepare kvm-run */
832 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
833 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
834 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
835 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
836 rc = 0;
837 }
838
839 if (rc == -EREMOTE) {
840 /* intercept was handled, but userspace support is needed
841 * kvm_run has been prepared by the handler */
842 rc = 0;
843 }
b0c632db 844
d7b0b5eb
CO
845 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
846 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 847 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 848 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 849
b0c632db
HC
850 if (vcpu->sigset_active)
851 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
852
b0c632db 853 vcpu->stat.exit_userspace++;
7e8e6ab4 854 return rc;
b0c632db
HC
855}
856
092670cd 857static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
858 unsigned long n, int prefix)
859{
860 if (prefix)
861 return copy_to_guest(vcpu, guestdest, from, n);
862 else
863 return copy_to_guest_absolute(vcpu, guestdest, from, n);
864}
865
866/*
867 * store status at address
868 * we use have two special cases:
869 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
870 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
871 */
971eb77f 872int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 873{
092670cd 874 unsigned char archmode = 1;
b0c632db
HC
875 int prefix;
876
877 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
878 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
879 return -EFAULT;
880 addr = SAVE_AREA_BASE;
881 prefix = 0;
882 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
883 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
884 return -EFAULT;
885 addr = SAVE_AREA_BASE;
886 prefix = 1;
887 } else
888 prefix = 0;
889
15bc8d84
CB
890 /*
891 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
892 * copying in vcpu load/put. Lets update our copies before we save
893 * it into the save area
894 */
895 save_fp_regs(&vcpu->arch.guest_fpregs);
896 save_access_regs(vcpu->run->s.regs.acrs);
897
f64ca217 898 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
899 vcpu->arch.guest_fpregs.fprs, 128, prefix))
900 return -EFAULT;
901
f64ca217 902 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 903 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
904 return -EFAULT;
905
f64ca217 906 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
907 &vcpu->arch.sie_block->gpsw, 16, prefix))
908 return -EFAULT;
909
f64ca217 910 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
911 &vcpu->arch.sie_block->prefix, 4, prefix))
912 return -EFAULT;
913
914 if (__guestcopy(vcpu,
f64ca217 915 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
916 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
917 return -EFAULT;
918
f64ca217 919 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
920 &vcpu->arch.sie_block->todpr, 4, prefix))
921 return -EFAULT;
922
f64ca217 923 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
924 &vcpu->arch.sie_block->cputm, 8, prefix))
925 return -EFAULT;
926
f64ca217 927 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
928 &vcpu->arch.sie_block->ckc, 8, prefix))
929 return -EFAULT;
930
f64ca217 931 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 932 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
933 return -EFAULT;
934
935 if (__guestcopy(vcpu,
f64ca217 936 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
937 &vcpu->arch.sie_block->gcr, 128, prefix))
938 return -EFAULT;
939 return 0;
940}
941
d6712df9
CH
942static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
943 struct kvm_enable_cap *cap)
944{
945 int r;
946
947 if (cap->flags)
948 return -EINVAL;
949
950 switch (cap->cap) {
fa6b7fe9
CH
951 case KVM_CAP_S390_CSS_SUPPORT:
952 if (!vcpu->kvm->arch.css_support) {
953 vcpu->kvm->arch.css_support = 1;
954 trace_kvm_s390_enable_css(vcpu->kvm);
955 }
956 r = 0;
957 break;
d6712df9
CH
958 default:
959 r = -EINVAL;
960 break;
961 }
962 return r;
963}
964
b0c632db
HC
965long kvm_arch_vcpu_ioctl(struct file *filp,
966 unsigned int ioctl, unsigned long arg)
967{
968 struct kvm_vcpu *vcpu = filp->private_data;
969 void __user *argp = (void __user *)arg;
bc923cc9 970 long r;
b0c632db 971
93736624
AK
972 switch (ioctl) {
973 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
974 struct kvm_s390_interrupt s390int;
975
93736624 976 r = -EFAULT;
ba5c1e9b 977 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
978 break;
979 r = kvm_s390_inject_vcpu(vcpu, &s390int);
980 break;
ba5c1e9b 981 }
b0c632db 982 case KVM_S390_STORE_STATUS:
bc923cc9
AK
983 r = kvm_s390_vcpu_store_status(vcpu, arg);
984 break;
b0c632db
HC
985 case KVM_S390_SET_INITIAL_PSW: {
986 psw_t psw;
987
bc923cc9 988 r = -EFAULT;
b0c632db 989 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
990 break;
991 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
992 break;
b0c632db
HC
993 }
994 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
995 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
996 break;
14eebd91
CO
997 case KVM_SET_ONE_REG:
998 case KVM_GET_ONE_REG: {
999 struct kvm_one_reg reg;
1000 r = -EFAULT;
1001 if (copy_from_user(&reg, argp, sizeof(reg)))
1002 break;
1003 if (ioctl == KVM_SET_ONE_REG)
1004 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1005 else
1006 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1007 break;
1008 }
27e0393f
CO
1009#ifdef CONFIG_KVM_S390_UCONTROL
1010 case KVM_S390_UCAS_MAP: {
1011 struct kvm_s390_ucas_mapping ucasmap;
1012
1013 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1014 r = -EFAULT;
1015 break;
1016 }
1017
1018 if (!kvm_is_ucontrol(vcpu->kvm)) {
1019 r = -EINVAL;
1020 break;
1021 }
1022
1023 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1024 ucasmap.vcpu_addr, ucasmap.length);
1025 break;
1026 }
1027 case KVM_S390_UCAS_UNMAP: {
1028 struct kvm_s390_ucas_mapping ucasmap;
1029
1030 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1031 r = -EFAULT;
1032 break;
1033 }
1034
1035 if (!kvm_is_ucontrol(vcpu->kvm)) {
1036 r = -EINVAL;
1037 break;
1038 }
1039
1040 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1041 ucasmap.length);
1042 break;
1043 }
1044#endif
ccc7910f
CO
1045 case KVM_S390_VCPU_FAULT: {
1046 r = gmap_fault(arg, vcpu->arch.gmap);
1047 if (!IS_ERR_VALUE(r))
1048 r = 0;
1049 break;
1050 }
d6712df9
CH
1051 case KVM_ENABLE_CAP:
1052 {
1053 struct kvm_enable_cap cap;
1054 r = -EFAULT;
1055 if (copy_from_user(&cap, argp, sizeof(cap)))
1056 break;
1057 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1058 break;
1059 }
b0c632db 1060 default:
3e6afcf1 1061 r = -ENOTTY;
b0c632db 1062 }
bc923cc9 1063 return r;
b0c632db
HC
1064}
1065
5b1c1493
CO
1066int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1067{
1068#ifdef CONFIG_KVM_S390_UCONTROL
1069 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1070 && (kvm_is_ucontrol(vcpu->kvm))) {
1071 vmf->page = virt_to_page(vcpu->arch.sie_block);
1072 get_page(vmf->page);
1073 return 0;
1074 }
1075#endif
1076 return VM_FAULT_SIGBUS;
1077}
1078
db3fe4eb
TY
1079void kvm_arch_free_memslot(struct kvm_memory_slot *free,
1080 struct kvm_memory_slot *dont)
1081{
1082}
1083
1084int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1085{
1086 return 0;
1087}
1088
e59dbe09
TY
1089void kvm_arch_memslots_updated(struct kvm *kvm)
1090{
1091}
1092
b0c632db 1093/* Section: memory related */
f7784b8e
MT
1094int kvm_arch_prepare_memory_region(struct kvm *kvm,
1095 struct kvm_memory_slot *memslot,
7b6195a9
TY
1096 struct kvm_userspace_memory_region *mem,
1097 enum kvm_mr_change change)
b0c632db 1098{
dd2887e7
NW
1099 /* A few sanity checks. We can have memory slots which have to be
1100 located/ended at a segment boundary (1MB). The memory in userland is
1101 ok to be fragmented into various different vmas. It is okay to mmap()
1102 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1103
598841ca 1104 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1105 return -EINVAL;
1106
598841ca 1107 if (mem->memory_size & 0xffffful)
b0c632db
HC
1108 return -EINVAL;
1109
f7784b8e
MT
1110 return 0;
1111}
1112
1113void kvm_arch_commit_memory_region(struct kvm *kvm,
1114 struct kvm_userspace_memory_region *mem,
8482644a
TY
1115 const struct kvm_memory_slot *old,
1116 enum kvm_mr_change change)
f7784b8e 1117{
f7850c92 1118 int rc;
f7784b8e 1119
2cef4deb
CB
1120 /* If the basics of the memslot do not change, we do not want
1121 * to update the gmap. Every update causes several unnecessary
1122 * segment translation exceptions. This is usually handled just
1123 * fine by the normal fault handler + gmap, but it will also
1124 * cause faults on the prefix page of running guest CPUs.
1125 */
1126 if (old->userspace_addr == mem->userspace_addr &&
1127 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1128 old->npages * PAGE_SIZE == mem->memory_size)
1129 return;
598841ca
CO
1130
1131 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1132 mem->guest_phys_addr, mem->memory_size);
1133 if (rc)
f7850c92 1134 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1135 return;
b0c632db
HC
1136}
1137
2df72e9b
MT
1138void kvm_arch_flush_shadow_all(struct kvm *kvm)
1139{
1140}
1141
1142void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1143 struct kvm_memory_slot *slot)
34d4cb8f
MT
1144{
1145}
1146
b0c632db
HC
1147static int __init kvm_s390_init(void)
1148{
ef50f7ac 1149 int ret;
0ee75bea 1150 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1151 if (ret)
1152 return ret;
1153
1154 /*
1155 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1156 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1157 * only set facilities that are known to work in KVM.
1158 */
78c4b59f
MM
1159 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1160 if (!vfacilities) {
ef50f7ac
CB
1161 kvm_exit();
1162 return -ENOMEM;
1163 }
78c4b59f
MM
1164 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1165 vfacilities[0] &= 0xff82fff3f47c0000UL;
1166 vfacilities[1] &= 0x001c000000000000UL;
ef50f7ac 1167 return 0;
b0c632db
HC
1168}
1169
1170static void __exit kvm_s390_exit(void)
1171{
78c4b59f 1172 free_page((unsigned long) vfacilities);
b0c632db
HC
1173 kvm_exit();
1174}
1175
1176module_init(kvm_s390_init);
1177module_exit(kvm_s390_exit);
566af940
CH
1178
1179/*
1180 * Enable autoloading of the kvm module.
1181 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1182 * since x86 takes a different approach.
1183 */
1184#include <linux/miscdevice.h>
1185MODULE_ALIAS_MISCDEV(KVM_MINOR);
1186MODULE_ALIAS("devname:kvm");
This page took 0.484302 seconds and 5 git commands to generate.