KVM: s390: enable Transactional Execution
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
78c4b59f 31#include <asm/facility.h>
1526bf9c 32#include <asm/sclp.h>
8f2abe6a 33#include "kvm-s390.h"
b0c632db
HC
34#include "gaccess.h"
35
5786fffa
CH
36#define CREATE_TRACE_POINTS
37#include "trace.h"
ade38c31 38#include "trace-s390.h"
5786fffa 39
b0c632db
HC
40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 44 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
45 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
49 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 52 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
53 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 55 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
56 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 63 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
64 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 73 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 74 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 75 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 76 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
77 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 82 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 83 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 84 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
85 { NULL }
86};
87
78c4b59f 88unsigned long *vfacilities;
2c70fe44 89static struct gmap_notifier gmap_notifier;
b0c632db 90
78c4b59f
MM
91/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
b0c632db 97/* Section: not file related */
10474ae8 98int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
99{
100 /* every s390 is virtualization enabled ;-) */
10474ae8 101 return 0;
b0c632db
HC
102}
103
104void kvm_arch_hardware_disable(void *garbage)
105{
106}
107
2c70fe44
CB
108static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
b0c632db
HC
110int kvm_arch_hardware_setup(void)
111{
2c70fe44
CB
112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
114 return 0;
115}
116
117void kvm_arch_hardware_unsetup(void)
118{
2c70fe44 119 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
120}
121
122void kvm_arch_check_processor_compat(void *rtn)
123{
124}
125
126int kvm_arch_init(void *opaque)
127{
128 return 0;
129}
130
131void kvm_arch_exit(void)
132{
133}
134
135/* Section: device related */
136long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
138{
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
142}
143
144int kvm_dev_ioctl_check_extension(long ext)
145{
d7b0b5eb
CO
146 int r;
147
2bd0ac4e 148 switch (ext) {
d7b0b5eb 149 case KVM_CAP_S390_PSW:
b6cf8788 150 case KVM_CAP_S390_GMAP:
52e16b18 151 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154#endif
60b413c9 155 case KVM_CAP_SYNC_REGS:
14eebd91 156 case KVM_CAP_ONE_REG:
d6712df9 157 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 158 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 159 case KVM_CAP_IOEVENTFD:
d7b0b5eb
CO
160 r = 1;
161 break;
e726b1bd
CB
162 case KVM_CAP_NR_VCPUS:
163 case KVM_CAP_MAX_VCPUS:
164 r = KVM_MAX_VCPUS;
165 break;
e1e2e605
NW
166 case KVM_CAP_NR_MEMSLOTS:
167 r = KVM_USER_MEM_SLOTS;
168 break;
1526bf9c 169 case KVM_CAP_S390_COW:
abf09bed 170 r = MACHINE_HAS_ESOP;
1526bf9c 171 break;
2bd0ac4e 172 default:
d7b0b5eb 173 r = 0;
2bd0ac4e 174 }
d7b0b5eb 175 return r;
b0c632db
HC
176}
177
178/* Section: vm related */
179/*
180 * Get (and clear) the dirty memory log for a memory slot.
181 */
182int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
183 struct kvm_dirty_log *log)
184{
185 return 0;
186}
187
188long kvm_arch_vm_ioctl(struct file *filp,
189 unsigned int ioctl, unsigned long arg)
190{
191 struct kvm *kvm = filp->private_data;
192 void __user *argp = (void __user *)arg;
193 int r;
194
195 switch (ioctl) {
ba5c1e9b
CO
196 case KVM_S390_INTERRUPT: {
197 struct kvm_s390_interrupt s390int;
198
199 r = -EFAULT;
200 if (copy_from_user(&s390int, argp, sizeof(s390int)))
201 break;
202 r = kvm_s390_inject_vm(kvm, &s390int);
203 break;
204 }
b0c632db 205 default:
367e1319 206 r = -ENOTTY;
b0c632db
HC
207 }
208
209 return r;
210}
211
e08b9637 212int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 213{
b0c632db
HC
214 int rc;
215 char debug_name[16];
216
e08b9637
CO
217 rc = -EINVAL;
218#ifdef CONFIG_KVM_S390_UCONTROL
219 if (type & ~KVM_VM_S390_UCONTROL)
220 goto out_err;
221 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
222 goto out_err;
223#else
224 if (type)
225 goto out_err;
226#endif
227
b0c632db
HC
228 rc = s390_enable_sie();
229 if (rc)
d89f5eff 230 goto out_err;
b0c632db 231
b290411a
CO
232 rc = -ENOMEM;
233
b0c632db
HC
234 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
235 if (!kvm->arch.sca)
d89f5eff 236 goto out_err;
b0c632db
HC
237
238 sprintf(debug_name, "kvm-%u", current->pid);
239
240 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
241 if (!kvm->arch.dbf)
242 goto out_nodbf;
243
ba5c1e9b
CO
244 spin_lock_init(&kvm->arch.float_int.lock);
245 INIT_LIST_HEAD(&kvm->arch.float_int.list);
246
b0c632db
HC
247 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
248 VM_EVENT(kvm, 3, "%s", "vm created");
249
e08b9637
CO
250 if (type & KVM_VM_S390_UCONTROL) {
251 kvm->arch.gmap = NULL;
252 } else {
253 kvm->arch.gmap = gmap_alloc(current->mm);
254 if (!kvm->arch.gmap)
255 goto out_nogmap;
2c70fe44 256 kvm->arch.gmap->private = kvm;
e08b9637 257 }
fa6b7fe9
CH
258
259 kvm->arch.css_support = 0;
260
d89f5eff 261 return 0;
598841ca
CO
262out_nogmap:
263 debug_unregister(kvm->arch.dbf);
b0c632db
HC
264out_nodbf:
265 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
266out_err:
267 return rc;
b0c632db
HC
268}
269
d329c035
CB
270void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
271{
272 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 273 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
274 if (!kvm_is_ucontrol(vcpu->kvm)) {
275 clear_bit(63 - vcpu->vcpu_id,
276 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
277 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
278 (__u64) vcpu->arch.sie_block)
279 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
280 }
abf4a71e 281 smp_mb();
27e0393f
CO
282
283 if (kvm_is_ucontrol(vcpu->kvm))
284 gmap_free(vcpu->arch.gmap);
285
d329c035 286 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 287 kvm_vcpu_uninit(vcpu);
b110feaf 288 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
289}
290
291static void kvm_free_vcpus(struct kvm *kvm)
292{
293 unsigned int i;
988a2cae 294 struct kvm_vcpu *vcpu;
d329c035 295
988a2cae
GN
296 kvm_for_each_vcpu(i, vcpu, kvm)
297 kvm_arch_vcpu_destroy(vcpu);
298
299 mutex_lock(&kvm->lock);
300 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
301 kvm->vcpus[i] = NULL;
302
303 atomic_set(&kvm->online_vcpus, 0);
304 mutex_unlock(&kvm->lock);
d329c035
CB
305}
306
ad8ba2cd
SY
307void kvm_arch_sync_events(struct kvm *kvm)
308{
309}
310
b0c632db
HC
311void kvm_arch_destroy_vm(struct kvm *kvm)
312{
d329c035 313 kvm_free_vcpus(kvm);
b0c632db 314 free_page((unsigned long)(kvm->arch.sca));
d329c035 315 debug_unregister(kvm->arch.dbf);
27e0393f
CO
316 if (!kvm_is_ucontrol(kvm))
317 gmap_free(kvm->arch.gmap);
b0c632db
HC
318}
319
320/* Section: vcpu related */
321int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
322{
27e0393f
CO
323 if (kvm_is_ucontrol(vcpu->kvm)) {
324 vcpu->arch.gmap = gmap_alloc(current->mm);
325 if (!vcpu->arch.gmap)
326 return -ENOMEM;
2c70fe44 327 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
328 return 0;
329 }
330
598841ca 331 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
332 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
333 KVM_SYNC_GPRS |
9eed0735
CB
334 KVM_SYNC_ACRS |
335 KVM_SYNC_CRS;
b0c632db
HC
336 return 0;
337}
338
339void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
340{
6692cef3 341 /* Nothing todo */
b0c632db
HC
342}
343
344void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
345{
4725c860
MS
346 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
347 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 348 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
349 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
350 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 351 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 352 gmap_enable(vcpu->arch.gmap);
9e6dabef 353 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
354}
355
356void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
357{
9e6dabef 358 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 359 gmap_disable(vcpu->arch.gmap);
4725c860
MS
360 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
361 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 362 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
363 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
364 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
365 restore_access_regs(vcpu->arch.host_acrs);
366}
367
368static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
369{
370 /* this equals initial cpu reset in pop, but we don't switch to ESA */
371 vcpu->arch.sie_block->gpsw.mask = 0UL;
372 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 373 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
374 vcpu->arch.sie_block->cputm = 0UL;
375 vcpu->arch.sie_block->ckc = 0UL;
376 vcpu->arch.sie_block->todpr = 0;
377 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
378 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
379 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
380 vcpu->arch.guest_fpregs.fpc = 0;
381 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
382 vcpu->arch.sie_block->gbea = 1;
61bde82c 383 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
384}
385
42897d86
MT
386int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
387{
388 return 0;
389}
390
b0c632db
HC
391int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
392{
9e6dabef
CH
393 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
394 CPUSTAT_SM |
69d0d3a3
CB
395 CPUSTAT_STOPPED |
396 CPUSTAT_GED);
fc34531d 397 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
398 if (test_vfacility(50) && test_vfacility(73))
399 vcpu->arch.sie_block->ecb |= 0x10;
400
69d0d3a3 401 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 402 vcpu->arch.sie_block->eca = 0xC1002001U;
78c4b59f 403 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
ca872302
CB
404 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
405 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
406 (unsigned long) vcpu);
407 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 408 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 409 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
410 return 0;
411}
412
413struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
414 unsigned int id)
415{
4d47555a 416 struct kvm_vcpu *vcpu;
7feb6bb8 417 struct sie_page *sie_page;
4d47555a
CO
418 int rc = -EINVAL;
419
420 if (id >= KVM_MAX_VCPUS)
421 goto out;
422
423 rc = -ENOMEM;
b0c632db 424
b110feaf 425 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 426 if (!vcpu)
4d47555a 427 goto out;
b0c632db 428
7feb6bb8
MM
429 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
430 if (!sie_page)
b0c632db
HC
431 goto out_free_cpu;
432
7feb6bb8
MM
433 vcpu->arch.sie_block = &sie_page->sie_block;
434 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
435
b0c632db 436 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
437 if (!kvm_is_ucontrol(kvm)) {
438 if (!kvm->arch.sca) {
439 WARN_ON_ONCE(1);
440 goto out_free_cpu;
441 }
442 if (!kvm->arch.sca->cpu[id].sda)
443 kvm->arch.sca->cpu[id].sda =
444 (__u64) vcpu->arch.sie_block;
445 vcpu->arch.sie_block->scaoh =
446 (__u32)(((__u64)kvm->arch.sca) >> 32);
447 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
448 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
449 }
b0c632db 450
ba5c1e9b
CO
451 spin_lock_init(&vcpu->arch.local_int.lock);
452 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
453 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 454 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b 455 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
d0321a24 456 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 457 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 458 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 459
b0c632db
HC
460 rc = kvm_vcpu_init(vcpu, kvm, id);
461 if (rc)
7b06bf2f 462 goto out_free_sie_block;
b0c632db
HC
463 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
464 vcpu->arch.sie_block);
ade38c31 465 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 466
b0c632db 467 return vcpu;
7b06bf2f
WY
468out_free_sie_block:
469 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 470out_free_cpu:
b110feaf 471 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 472out:
b0c632db
HC
473 return ERR_PTR(rc);
474}
475
b0c632db
HC
476int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
477{
478 /* kvm common code refers to this, but never calls it */
479 BUG();
480 return 0;
481}
482
49b99e1e
CB
483void s390_vcpu_block(struct kvm_vcpu *vcpu)
484{
485 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
486}
487
488void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
489{
490 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
491}
492
493/*
494 * Kick a guest cpu out of SIE and wait until SIE is not running.
495 * If the CPU is not running (e.g. waiting as idle) the function will
496 * return immediately. */
497void exit_sie(struct kvm_vcpu *vcpu)
498{
499 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
500 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
501 cpu_relax();
502}
503
504/* Kick a guest cpu out of SIE and prevent SIE-reentry */
505void exit_sie_sync(struct kvm_vcpu *vcpu)
506{
507 s390_vcpu_block(vcpu);
508 exit_sie(vcpu);
509}
510
2c70fe44
CB
511static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
512{
513 int i;
514 struct kvm *kvm = gmap->private;
515 struct kvm_vcpu *vcpu;
516
517 kvm_for_each_vcpu(i, vcpu, kvm) {
518 /* match against both prefix pages */
519 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
520 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
521 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
522 exit_sie_sync(vcpu);
523 }
524 }
525}
526
b6d33834
CD
527int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
528{
529 /* kvm common code refers to this, but never calls it */
530 BUG();
531 return 0;
532}
533
14eebd91
CO
534static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
535 struct kvm_one_reg *reg)
536{
537 int r = -EINVAL;
538
539 switch (reg->id) {
29b7c71b
CO
540 case KVM_REG_S390_TODPR:
541 r = put_user(vcpu->arch.sie_block->todpr,
542 (u32 __user *)reg->addr);
543 break;
544 case KVM_REG_S390_EPOCHDIFF:
545 r = put_user(vcpu->arch.sie_block->epoch,
546 (u64 __user *)reg->addr);
547 break;
46a6dd1c
J
548 case KVM_REG_S390_CPU_TIMER:
549 r = put_user(vcpu->arch.sie_block->cputm,
550 (u64 __user *)reg->addr);
551 break;
552 case KVM_REG_S390_CLOCK_COMP:
553 r = put_user(vcpu->arch.sie_block->ckc,
554 (u64 __user *)reg->addr);
555 break;
14eebd91
CO
556 default:
557 break;
558 }
559
560 return r;
561}
562
563static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
564 struct kvm_one_reg *reg)
565{
566 int r = -EINVAL;
567
568 switch (reg->id) {
29b7c71b
CO
569 case KVM_REG_S390_TODPR:
570 r = get_user(vcpu->arch.sie_block->todpr,
571 (u32 __user *)reg->addr);
572 break;
573 case KVM_REG_S390_EPOCHDIFF:
574 r = get_user(vcpu->arch.sie_block->epoch,
575 (u64 __user *)reg->addr);
576 break;
46a6dd1c
J
577 case KVM_REG_S390_CPU_TIMER:
578 r = get_user(vcpu->arch.sie_block->cputm,
579 (u64 __user *)reg->addr);
580 break;
581 case KVM_REG_S390_CLOCK_COMP:
582 r = get_user(vcpu->arch.sie_block->ckc,
583 (u64 __user *)reg->addr);
584 break;
14eebd91
CO
585 default:
586 break;
587 }
588
589 return r;
590}
b6d33834 591
b0c632db
HC
592static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
593{
b0c632db 594 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
595 return 0;
596}
597
598int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
599{
5a32c1af 600 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
601 return 0;
602}
603
604int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
605{
5a32c1af 606 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
607 return 0;
608}
609
610int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
611 struct kvm_sregs *sregs)
612{
59674c1a 613 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 614 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 615 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
616 return 0;
617}
618
619int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
620 struct kvm_sregs *sregs)
621{
59674c1a 622 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 623 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
624 return 0;
625}
626
627int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
628{
4725c860
MS
629 if (test_fp_ctl(fpu->fpc))
630 return -EINVAL;
b0c632db 631 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
632 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
633 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
634 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
635 return 0;
636}
637
638int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
639{
b0c632db
HC
640 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
641 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
642 return 0;
643}
644
645static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
646{
647 int rc = 0;
648
9e6dabef 649 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 650 rc = -EBUSY;
d7b0b5eb
CO
651 else {
652 vcpu->run->psw_mask = psw.mask;
653 vcpu->run->psw_addr = psw.addr;
654 }
b0c632db
HC
655 return rc;
656}
657
658int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
659 struct kvm_translation *tr)
660{
661 return -EINVAL; /* not implemented yet */
662}
663
d0bfb940
JK
664int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
665 struct kvm_guest_debug *dbg)
b0c632db
HC
666{
667 return -EINVAL; /* not implemented yet */
668}
669
62d9f0db
MT
670int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
671 struct kvm_mp_state *mp_state)
672{
673 return -EINVAL; /* not implemented yet */
674}
675
676int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
677 struct kvm_mp_state *mp_state)
678{
679 return -EINVAL; /* not implemented yet */
680}
681
2c70fe44
CB
682static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
683{
684 /*
685 * We use MMU_RELOAD just to re-arm the ipte notifier for the
686 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
687 * This ensures that the ipte instruction for this request has
688 * already finished. We might race against a second unmapper that
689 * wants to set the blocking bit. Lets just retry the request loop.
690 */
691 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
692 int rc;
693 rc = gmap_ipte_notify(vcpu->arch.gmap,
694 vcpu->arch.sie_block->prefix,
695 PAGE_SIZE * 2);
696 if (rc)
697 return rc;
698 s390_vcpu_unblock(vcpu);
699 }
700 return 0;
701}
702
3fb4c40f 703static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 704{
3fb4c40f 705 int rc, cpuflags;
e168bf8d 706
5a32c1af 707 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
708
709 if (need_resched())
710 schedule();
711
71cde587
CB
712 if (test_thread_flag(TIF_MCCK_PENDING))
713 s390_handle_mcck();
714
d6b6d166
CO
715 if (!kvm_is_ucontrol(vcpu->kvm))
716 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 717
2c70fe44
CB
718 rc = kvm_s390_handle_requests(vcpu);
719 if (rc)
720 return rc;
721
b0c632db 722 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
723 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
724 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
725 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 726
3fb4c40f
TH
727 return 0;
728}
729
730static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
731{
732 int rc;
2b29a9fd
DD
733
734 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
735 vcpu->arch.sie_block->icptcode);
736 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
737
3fb4c40f 738 if (exit_reason >= 0) {
7c470539 739 rc = 0;
210b1607
TH
740 } else if (kvm_is_ucontrol(vcpu->kvm)) {
741 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
742 vcpu->run->s390_ucontrol.trans_exc_code =
743 current->thread.gmap_addr;
744 vcpu->run->s390_ucontrol.pgm_code = 0x10;
745 rc = -EREMOTE;
1f0d0f09 746 }
b0c632db 747
5a32c1af 748 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 749
a76ccff6
TH
750 if (rc == 0) {
751 if (kvm_is_ucontrol(vcpu->kvm))
752 rc = -EOPNOTSUPP;
753 else
754 rc = kvm_handle_sie_intercept(vcpu);
755 }
756
3fb4c40f
TH
757 return rc;
758}
759
760static int __vcpu_run(struct kvm_vcpu *vcpu)
761{
762 int rc, exit_reason;
763
800c1065
TH
764 /*
765 * We try to hold kvm->srcu during most of vcpu_run (except when run-
766 * ning the guest), so that memslots (and other stuff) are protected
767 */
768 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
769
a76ccff6
TH
770 do {
771 rc = vcpu_pre_run(vcpu);
772 if (rc)
773 break;
3fb4c40f 774
800c1065 775 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
776 /*
777 * As PF_VCPU will be used in fault handler, between
778 * guest_enter and guest_exit should be no uaccess.
779 */
780 preempt_disable();
781 kvm_guest_enter();
782 preempt_enable();
783 exit_reason = sie64a(vcpu->arch.sie_block,
784 vcpu->run->s.regs.gprs);
785 kvm_guest_exit();
800c1065 786 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
787
788 rc = vcpu_post_run(vcpu, exit_reason);
789 } while (!signal_pending(current) && !rc);
3fb4c40f 790
800c1065 791 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 792 return rc;
b0c632db
HC
793}
794
795int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
796{
8f2abe6a 797 int rc;
b0c632db
HC
798 sigset_t sigsaved;
799
b0c632db
HC
800 if (vcpu->sigset_active)
801 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
802
9e6dabef 803 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 804
ba5c1e9b
CO
805 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
806
8f2abe6a
CB
807 switch (kvm_run->exit_reason) {
808 case KVM_EXIT_S390_SIEIC:
8f2abe6a 809 case KVM_EXIT_UNKNOWN:
9ace903d 810 case KVM_EXIT_INTR:
8f2abe6a 811 case KVM_EXIT_S390_RESET:
e168bf8d 812 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 813 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
814 break;
815 default:
816 BUG();
817 }
818
d7b0b5eb
CO
819 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
820 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
821 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
822 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
823 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
824 }
9eed0735
CB
825 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
826 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
827 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
828 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
829 }
d7b0b5eb 830
dab4079d 831 might_fault();
a76ccff6 832 rc = __vcpu_run(vcpu);
9ace903d 833
b1d16c49
CE
834 if (signal_pending(current) && !rc) {
835 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 836 rc = -EINTR;
b1d16c49 837 }
8f2abe6a 838
b8e660b8 839 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
840 /* intercept cannot be handled in-kernel, prepare kvm-run */
841 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
842 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
843 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
844 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
845 rc = 0;
846 }
847
848 if (rc == -EREMOTE) {
849 /* intercept was handled, but userspace support is needed
850 * kvm_run has been prepared by the handler */
851 rc = 0;
852 }
b0c632db 853
d7b0b5eb
CO
854 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
855 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 856 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 857 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 858
b0c632db
HC
859 if (vcpu->sigset_active)
860 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
861
b0c632db 862 vcpu->stat.exit_userspace++;
7e8e6ab4 863 return rc;
b0c632db
HC
864}
865
092670cd 866static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
867 unsigned long n, int prefix)
868{
869 if (prefix)
870 return copy_to_guest(vcpu, guestdest, from, n);
871 else
872 return copy_to_guest_absolute(vcpu, guestdest, from, n);
873}
874
875/*
876 * store status at address
877 * we use have two special cases:
878 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
879 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
880 */
e879892c 881int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 882{
092670cd 883 unsigned char archmode = 1;
b0c632db 884 int prefix;
178bd789 885 u64 clkcomp;
b0c632db
HC
886
887 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
888 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
889 return -EFAULT;
890 addr = SAVE_AREA_BASE;
891 prefix = 0;
892 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
893 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
894 return -EFAULT;
895 addr = SAVE_AREA_BASE;
896 prefix = 1;
897 } else
898 prefix = 0;
899
f64ca217 900 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
901 vcpu->arch.guest_fpregs.fprs, 128, prefix))
902 return -EFAULT;
903
f64ca217 904 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 905 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
906 return -EFAULT;
907
f64ca217 908 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
909 &vcpu->arch.sie_block->gpsw, 16, prefix))
910 return -EFAULT;
911
f64ca217 912 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
913 &vcpu->arch.sie_block->prefix, 4, prefix))
914 return -EFAULT;
915
916 if (__guestcopy(vcpu,
f64ca217 917 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
918 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
919 return -EFAULT;
920
f64ca217 921 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
922 &vcpu->arch.sie_block->todpr, 4, prefix))
923 return -EFAULT;
924
f64ca217 925 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
926 &vcpu->arch.sie_block->cputm, 8, prefix))
927 return -EFAULT;
928
178bd789 929 clkcomp = vcpu->arch.sie_block->ckc >> 8;
f64ca217 930 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
178bd789 931 &clkcomp, 8, prefix))
b0c632db
HC
932 return -EFAULT;
933
f64ca217 934 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 935 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
936 return -EFAULT;
937
938 if (__guestcopy(vcpu,
f64ca217 939 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
940 &vcpu->arch.sie_block->gcr, 128, prefix))
941 return -EFAULT;
942 return 0;
943}
944
e879892c
TH
945int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
946{
947 /*
948 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
949 * copying in vcpu load/put. Lets update our copies before we save
950 * it into the save area
951 */
952 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
953 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
954 save_access_regs(vcpu->run->s.regs.acrs);
955
956 return kvm_s390_store_status_unloaded(vcpu, addr);
957}
958
d6712df9
CH
959static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
960 struct kvm_enable_cap *cap)
961{
962 int r;
963
964 if (cap->flags)
965 return -EINVAL;
966
967 switch (cap->cap) {
fa6b7fe9
CH
968 case KVM_CAP_S390_CSS_SUPPORT:
969 if (!vcpu->kvm->arch.css_support) {
970 vcpu->kvm->arch.css_support = 1;
971 trace_kvm_s390_enable_css(vcpu->kvm);
972 }
973 r = 0;
974 break;
d6712df9
CH
975 default:
976 r = -EINVAL;
977 break;
978 }
979 return r;
980}
981
b0c632db
HC
982long kvm_arch_vcpu_ioctl(struct file *filp,
983 unsigned int ioctl, unsigned long arg)
984{
985 struct kvm_vcpu *vcpu = filp->private_data;
986 void __user *argp = (void __user *)arg;
800c1065 987 int idx;
bc923cc9 988 long r;
b0c632db 989
93736624
AK
990 switch (ioctl) {
991 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
992 struct kvm_s390_interrupt s390int;
993
93736624 994 r = -EFAULT;
ba5c1e9b 995 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
996 break;
997 r = kvm_s390_inject_vcpu(vcpu, &s390int);
998 break;
ba5c1e9b 999 }
b0c632db 1000 case KVM_S390_STORE_STATUS:
800c1065 1001 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1002 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1003 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1004 break;
b0c632db
HC
1005 case KVM_S390_SET_INITIAL_PSW: {
1006 psw_t psw;
1007
bc923cc9 1008 r = -EFAULT;
b0c632db 1009 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1010 break;
1011 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1012 break;
b0c632db
HC
1013 }
1014 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1015 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1016 break;
14eebd91
CO
1017 case KVM_SET_ONE_REG:
1018 case KVM_GET_ONE_REG: {
1019 struct kvm_one_reg reg;
1020 r = -EFAULT;
1021 if (copy_from_user(&reg, argp, sizeof(reg)))
1022 break;
1023 if (ioctl == KVM_SET_ONE_REG)
1024 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1025 else
1026 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1027 break;
1028 }
27e0393f
CO
1029#ifdef CONFIG_KVM_S390_UCONTROL
1030 case KVM_S390_UCAS_MAP: {
1031 struct kvm_s390_ucas_mapping ucasmap;
1032
1033 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1034 r = -EFAULT;
1035 break;
1036 }
1037
1038 if (!kvm_is_ucontrol(vcpu->kvm)) {
1039 r = -EINVAL;
1040 break;
1041 }
1042
1043 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1044 ucasmap.vcpu_addr, ucasmap.length);
1045 break;
1046 }
1047 case KVM_S390_UCAS_UNMAP: {
1048 struct kvm_s390_ucas_mapping ucasmap;
1049
1050 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1051 r = -EFAULT;
1052 break;
1053 }
1054
1055 if (!kvm_is_ucontrol(vcpu->kvm)) {
1056 r = -EINVAL;
1057 break;
1058 }
1059
1060 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1061 ucasmap.length);
1062 break;
1063 }
1064#endif
ccc7910f
CO
1065 case KVM_S390_VCPU_FAULT: {
1066 r = gmap_fault(arg, vcpu->arch.gmap);
1067 if (!IS_ERR_VALUE(r))
1068 r = 0;
1069 break;
1070 }
d6712df9
CH
1071 case KVM_ENABLE_CAP:
1072 {
1073 struct kvm_enable_cap cap;
1074 r = -EFAULT;
1075 if (copy_from_user(&cap, argp, sizeof(cap)))
1076 break;
1077 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1078 break;
1079 }
b0c632db 1080 default:
3e6afcf1 1081 r = -ENOTTY;
b0c632db 1082 }
bc923cc9 1083 return r;
b0c632db
HC
1084}
1085
5b1c1493
CO
1086int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1087{
1088#ifdef CONFIG_KVM_S390_UCONTROL
1089 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1090 && (kvm_is_ucontrol(vcpu->kvm))) {
1091 vmf->page = virt_to_page(vcpu->arch.sie_block);
1092 get_page(vmf->page);
1093 return 0;
1094 }
1095#endif
1096 return VM_FAULT_SIGBUS;
1097}
1098
5587027c 1099void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1100 struct kvm_memory_slot *dont)
1101{
1102}
1103
5587027c
AK
1104int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1105 unsigned long npages)
db3fe4eb
TY
1106{
1107 return 0;
1108}
1109
e59dbe09
TY
1110void kvm_arch_memslots_updated(struct kvm *kvm)
1111{
1112}
1113
b0c632db 1114/* Section: memory related */
f7784b8e
MT
1115int kvm_arch_prepare_memory_region(struct kvm *kvm,
1116 struct kvm_memory_slot *memslot,
7b6195a9
TY
1117 struct kvm_userspace_memory_region *mem,
1118 enum kvm_mr_change change)
b0c632db 1119{
dd2887e7
NW
1120 /* A few sanity checks. We can have memory slots which have to be
1121 located/ended at a segment boundary (1MB). The memory in userland is
1122 ok to be fragmented into various different vmas. It is okay to mmap()
1123 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1124
598841ca 1125 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1126 return -EINVAL;
1127
598841ca 1128 if (mem->memory_size & 0xffffful)
b0c632db
HC
1129 return -EINVAL;
1130
f7784b8e
MT
1131 return 0;
1132}
1133
1134void kvm_arch_commit_memory_region(struct kvm *kvm,
1135 struct kvm_userspace_memory_region *mem,
8482644a
TY
1136 const struct kvm_memory_slot *old,
1137 enum kvm_mr_change change)
f7784b8e 1138{
f7850c92 1139 int rc;
f7784b8e 1140
2cef4deb
CB
1141 /* If the basics of the memslot do not change, we do not want
1142 * to update the gmap. Every update causes several unnecessary
1143 * segment translation exceptions. This is usually handled just
1144 * fine by the normal fault handler + gmap, but it will also
1145 * cause faults on the prefix page of running guest CPUs.
1146 */
1147 if (old->userspace_addr == mem->userspace_addr &&
1148 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1149 old->npages * PAGE_SIZE == mem->memory_size)
1150 return;
598841ca
CO
1151
1152 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1153 mem->guest_phys_addr, mem->memory_size);
1154 if (rc)
f7850c92 1155 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1156 return;
b0c632db
HC
1157}
1158
2df72e9b
MT
1159void kvm_arch_flush_shadow_all(struct kvm *kvm)
1160{
1161}
1162
1163void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1164 struct kvm_memory_slot *slot)
34d4cb8f
MT
1165{
1166}
1167
b0c632db
HC
1168static int __init kvm_s390_init(void)
1169{
ef50f7ac 1170 int ret;
0ee75bea 1171 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1172 if (ret)
1173 return ret;
1174
1175 /*
1176 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1177 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1178 * only set facilities that are known to work in KVM.
1179 */
78c4b59f
MM
1180 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1181 if (!vfacilities) {
ef50f7ac
CB
1182 kvm_exit();
1183 return -ENOMEM;
1184 }
78c4b59f 1185 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
7feb6bb8
MM
1186 vfacilities[0] &= 0xff82fff3f47c2000UL;
1187 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1188 return 0;
b0c632db
HC
1189}
1190
1191static void __exit kvm_s390_exit(void)
1192{
78c4b59f 1193 free_page((unsigned long) vfacilities);
b0c632db
HC
1194 kvm_exit();
1195}
1196
1197module_init(kvm_s390_init);
1198module_exit(kvm_s390_exit);
566af940
CH
1199
1200/*
1201 * Enable autoloading of the kvm module.
1202 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1203 * since x86 takes a different approach.
1204 */
1205#include <linux/miscdevice.h>
1206MODULE_ALIAS_MISCDEV(KVM_MINOR);
1207MODULE_ALIAS("devname:kvm");
This page took 0.555772 seconds and 5 git commands to generate.