Merge branch 'linus' into timers/core
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
1526bf9c 31#include <asm/sclp.h>
8f2abe6a 32#include "kvm-s390.h"
b0c632db
HC
33#include "gaccess.h"
34
5786fffa
CH
35#define CREATE_TRACE_POINTS
36#include "trace.h"
ade38c31 37#include "trace-s390.h"
5786fffa 38
b0c632db
HC
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 43 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 62 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
63 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
64 { "instruction_spx", VCPU_STAT(instruction_spx) },
65 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
66 { "instruction_stap", VCPU_STAT(instruction_stap) },
67 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
68 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
69 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
70 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
71 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 72 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 73 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 74 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 75 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
76 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
77 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
78 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
79 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
80 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 81 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 82 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 83 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
84 { NULL }
85};
86
ef50f7ac 87static unsigned long long *facilities;
2c70fe44 88static struct gmap_notifier gmap_notifier;
b0c632db
HC
89
90/* Section: not file related */
10474ae8 91int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
92{
93 /* every s390 is virtualization enabled ;-) */
10474ae8 94 return 0;
b0c632db
HC
95}
96
97void kvm_arch_hardware_disable(void *garbage)
98{
99}
100
2c70fe44
CB
101static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
102
b0c632db
HC
103int kvm_arch_hardware_setup(void)
104{
2c70fe44
CB
105 gmap_notifier.notifier_call = kvm_gmap_notifier;
106 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
107 return 0;
108}
109
110void kvm_arch_hardware_unsetup(void)
111{
2c70fe44 112 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
113}
114
115void kvm_arch_check_processor_compat(void *rtn)
116{
117}
118
119int kvm_arch_init(void *opaque)
120{
121 return 0;
122}
123
124void kvm_arch_exit(void)
125{
126}
127
128/* Section: device related */
129long kvm_arch_dev_ioctl(struct file *filp,
130 unsigned int ioctl, unsigned long arg)
131{
132 if (ioctl == KVM_S390_ENABLE_SIE)
133 return s390_enable_sie();
134 return -EINVAL;
135}
136
137int kvm_dev_ioctl_check_extension(long ext)
138{
d7b0b5eb
CO
139 int r;
140
2bd0ac4e 141 switch (ext) {
d7b0b5eb 142 case KVM_CAP_S390_PSW:
b6cf8788 143 case KVM_CAP_S390_GMAP:
52e16b18 144 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
145#ifdef CONFIG_KVM_S390_UCONTROL
146 case KVM_CAP_S390_UCONTROL:
147#endif
60b413c9 148 case KVM_CAP_SYNC_REGS:
14eebd91 149 case KVM_CAP_ONE_REG:
d6712df9 150 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 151 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 152 case KVM_CAP_IOEVENTFD:
d7b0b5eb
CO
153 r = 1;
154 break;
e726b1bd
CB
155 case KVM_CAP_NR_VCPUS:
156 case KVM_CAP_MAX_VCPUS:
157 r = KVM_MAX_VCPUS;
158 break;
e1e2e605
NW
159 case KVM_CAP_NR_MEMSLOTS:
160 r = KVM_USER_MEM_SLOTS;
161 break;
1526bf9c 162 case KVM_CAP_S390_COW:
abf09bed 163 r = MACHINE_HAS_ESOP;
1526bf9c 164 break;
2bd0ac4e 165 default:
d7b0b5eb 166 r = 0;
2bd0ac4e 167 }
d7b0b5eb 168 return r;
b0c632db
HC
169}
170
171/* Section: vm related */
172/*
173 * Get (and clear) the dirty memory log for a memory slot.
174 */
175int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
176 struct kvm_dirty_log *log)
177{
178 return 0;
179}
180
181long kvm_arch_vm_ioctl(struct file *filp,
182 unsigned int ioctl, unsigned long arg)
183{
184 struct kvm *kvm = filp->private_data;
185 void __user *argp = (void __user *)arg;
186 int r;
187
188 switch (ioctl) {
ba5c1e9b
CO
189 case KVM_S390_INTERRUPT: {
190 struct kvm_s390_interrupt s390int;
191
192 r = -EFAULT;
193 if (copy_from_user(&s390int, argp, sizeof(s390int)))
194 break;
195 r = kvm_s390_inject_vm(kvm, &s390int);
196 break;
197 }
b0c632db 198 default:
367e1319 199 r = -ENOTTY;
b0c632db
HC
200 }
201
202 return r;
203}
204
e08b9637 205int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 206{
b0c632db
HC
207 int rc;
208 char debug_name[16];
209
e08b9637
CO
210 rc = -EINVAL;
211#ifdef CONFIG_KVM_S390_UCONTROL
212 if (type & ~KVM_VM_S390_UCONTROL)
213 goto out_err;
214 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
215 goto out_err;
216#else
217 if (type)
218 goto out_err;
219#endif
220
b0c632db
HC
221 rc = s390_enable_sie();
222 if (rc)
d89f5eff 223 goto out_err;
b0c632db 224
b290411a
CO
225 rc = -ENOMEM;
226
b0c632db
HC
227 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
228 if (!kvm->arch.sca)
d89f5eff 229 goto out_err;
b0c632db
HC
230
231 sprintf(debug_name, "kvm-%u", current->pid);
232
233 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
234 if (!kvm->arch.dbf)
235 goto out_nodbf;
236
ba5c1e9b
CO
237 spin_lock_init(&kvm->arch.float_int.lock);
238 INIT_LIST_HEAD(&kvm->arch.float_int.list);
239
b0c632db
HC
240 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
241 VM_EVENT(kvm, 3, "%s", "vm created");
242
e08b9637
CO
243 if (type & KVM_VM_S390_UCONTROL) {
244 kvm->arch.gmap = NULL;
245 } else {
246 kvm->arch.gmap = gmap_alloc(current->mm);
247 if (!kvm->arch.gmap)
248 goto out_nogmap;
2c70fe44 249 kvm->arch.gmap->private = kvm;
e08b9637 250 }
fa6b7fe9
CH
251
252 kvm->arch.css_support = 0;
253
d89f5eff 254 return 0;
598841ca
CO
255out_nogmap:
256 debug_unregister(kvm->arch.dbf);
b0c632db
HC
257out_nodbf:
258 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
259out_err:
260 return rc;
b0c632db
HC
261}
262
d329c035
CB
263void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
264{
265 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 266 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
267 if (!kvm_is_ucontrol(vcpu->kvm)) {
268 clear_bit(63 - vcpu->vcpu_id,
269 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
270 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
271 (__u64) vcpu->arch.sie_block)
272 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
273 }
abf4a71e 274 smp_mb();
27e0393f
CO
275
276 if (kvm_is_ucontrol(vcpu->kvm))
277 gmap_free(vcpu->arch.gmap);
278
d329c035 279 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 280 kvm_vcpu_uninit(vcpu);
b110feaf 281 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
282}
283
284static void kvm_free_vcpus(struct kvm *kvm)
285{
286 unsigned int i;
988a2cae 287 struct kvm_vcpu *vcpu;
d329c035 288
988a2cae
GN
289 kvm_for_each_vcpu(i, vcpu, kvm)
290 kvm_arch_vcpu_destroy(vcpu);
291
292 mutex_lock(&kvm->lock);
293 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
294 kvm->vcpus[i] = NULL;
295
296 atomic_set(&kvm->online_vcpus, 0);
297 mutex_unlock(&kvm->lock);
d329c035
CB
298}
299
ad8ba2cd
SY
300void kvm_arch_sync_events(struct kvm *kvm)
301{
302}
303
b0c632db
HC
304void kvm_arch_destroy_vm(struct kvm *kvm)
305{
d329c035 306 kvm_free_vcpus(kvm);
b0c632db 307 free_page((unsigned long)(kvm->arch.sca));
d329c035 308 debug_unregister(kvm->arch.dbf);
27e0393f
CO
309 if (!kvm_is_ucontrol(kvm))
310 gmap_free(kvm->arch.gmap);
b0c632db
HC
311}
312
313/* Section: vcpu related */
314int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
315{
27e0393f
CO
316 if (kvm_is_ucontrol(vcpu->kvm)) {
317 vcpu->arch.gmap = gmap_alloc(current->mm);
318 if (!vcpu->arch.gmap)
319 return -ENOMEM;
2c70fe44 320 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
321 return 0;
322 }
323
598841ca 324 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
325 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
326 KVM_SYNC_GPRS |
9eed0735
CB
327 KVM_SYNC_ACRS |
328 KVM_SYNC_CRS;
b0c632db
HC
329 return 0;
330}
331
332void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
333{
6692cef3 334 /* Nothing todo */
b0c632db
HC
335}
336
337void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
338{
339 save_fp_regs(&vcpu->arch.host_fpregs);
340 save_access_regs(vcpu->arch.host_acrs);
341 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
342 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 343 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 344 gmap_enable(vcpu->arch.gmap);
9e6dabef 345 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
346}
347
348void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
349{
9e6dabef 350 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 351 gmap_disable(vcpu->arch.gmap);
b0c632db 352 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 353 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
354 restore_fp_regs(&vcpu->arch.host_fpregs);
355 restore_access_regs(vcpu->arch.host_acrs);
356}
357
358static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
359{
360 /* this equals initial cpu reset in pop, but we don't switch to ESA */
361 vcpu->arch.sie_block->gpsw.mask = 0UL;
362 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 363 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
364 vcpu->arch.sie_block->cputm = 0UL;
365 vcpu->arch.sie_block->ckc = 0UL;
366 vcpu->arch.sie_block->todpr = 0;
367 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
368 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
369 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
370 vcpu->arch.guest_fpregs.fpc = 0;
371 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
372 vcpu->arch.sie_block->gbea = 1;
61bde82c 373 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
374}
375
42897d86
MT
376int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
377{
378 return 0;
379}
380
b0c632db
HC
381int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
382{
9e6dabef
CH
383 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
384 CPUSTAT_SM |
69d0d3a3
CB
385 CPUSTAT_STOPPED |
386 CPUSTAT_GED);
fc34531d 387 vcpu->arch.sie_block->ecb = 6;
69d0d3a3 388 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 389 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 390 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
391 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
392 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
393 (unsigned long) vcpu);
394 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 395 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 396 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
397 return 0;
398}
399
400struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
401 unsigned int id)
402{
4d47555a
CO
403 struct kvm_vcpu *vcpu;
404 int rc = -EINVAL;
405
406 if (id >= KVM_MAX_VCPUS)
407 goto out;
408
409 rc = -ENOMEM;
b0c632db 410
b110feaf 411 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 412 if (!vcpu)
4d47555a 413 goto out;
b0c632db 414
180c12fb
CB
415 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
416 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
417
418 if (!vcpu->arch.sie_block)
419 goto out_free_cpu;
420
421 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
422 if (!kvm_is_ucontrol(kvm)) {
423 if (!kvm->arch.sca) {
424 WARN_ON_ONCE(1);
425 goto out_free_cpu;
426 }
427 if (!kvm->arch.sca->cpu[id].sda)
428 kvm->arch.sca->cpu[id].sda =
429 (__u64) vcpu->arch.sie_block;
430 vcpu->arch.sie_block->scaoh =
431 (__u32)(((__u64)kvm->arch.sca) >> 32);
432 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
433 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
434 }
b0c632db 435
ba5c1e9b
CO
436 spin_lock_init(&vcpu->arch.local_int.lock);
437 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
438 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 439 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b 440 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
d0321a24 441 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 442 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 443 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 444
b0c632db
HC
445 rc = kvm_vcpu_init(vcpu, kvm, id);
446 if (rc)
7b06bf2f 447 goto out_free_sie_block;
b0c632db
HC
448 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
449 vcpu->arch.sie_block);
ade38c31 450 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 451
b0c632db 452 return vcpu;
7b06bf2f
WY
453out_free_sie_block:
454 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 455out_free_cpu:
b110feaf 456 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 457out:
b0c632db
HC
458 return ERR_PTR(rc);
459}
460
b0c632db
HC
461int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
462{
463 /* kvm common code refers to this, but never calls it */
464 BUG();
465 return 0;
466}
467
49b99e1e
CB
468void s390_vcpu_block(struct kvm_vcpu *vcpu)
469{
470 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
471}
472
473void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
474{
475 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
476}
477
478/*
479 * Kick a guest cpu out of SIE and wait until SIE is not running.
480 * If the CPU is not running (e.g. waiting as idle) the function will
481 * return immediately. */
482void exit_sie(struct kvm_vcpu *vcpu)
483{
484 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
485 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
486 cpu_relax();
487}
488
489/* Kick a guest cpu out of SIE and prevent SIE-reentry */
490void exit_sie_sync(struct kvm_vcpu *vcpu)
491{
492 s390_vcpu_block(vcpu);
493 exit_sie(vcpu);
494}
495
2c70fe44
CB
496static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
497{
498 int i;
499 struct kvm *kvm = gmap->private;
500 struct kvm_vcpu *vcpu;
501
502 kvm_for_each_vcpu(i, vcpu, kvm) {
503 /* match against both prefix pages */
504 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
505 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
506 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
507 exit_sie_sync(vcpu);
508 }
509 }
510}
511
b6d33834
CD
512int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
513{
514 /* kvm common code refers to this, but never calls it */
515 BUG();
516 return 0;
517}
518
14eebd91
CO
519static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
520 struct kvm_one_reg *reg)
521{
522 int r = -EINVAL;
523
524 switch (reg->id) {
29b7c71b
CO
525 case KVM_REG_S390_TODPR:
526 r = put_user(vcpu->arch.sie_block->todpr,
527 (u32 __user *)reg->addr);
528 break;
529 case KVM_REG_S390_EPOCHDIFF:
530 r = put_user(vcpu->arch.sie_block->epoch,
531 (u64 __user *)reg->addr);
532 break;
46a6dd1c
J
533 case KVM_REG_S390_CPU_TIMER:
534 r = put_user(vcpu->arch.sie_block->cputm,
535 (u64 __user *)reg->addr);
536 break;
537 case KVM_REG_S390_CLOCK_COMP:
538 r = put_user(vcpu->arch.sie_block->ckc,
539 (u64 __user *)reg->addr);
540 break;
14eebd91
CO
541 default:
542 break;
543 }
544
545 return r;
546}
547
548static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
549 struct kvm_one_reg *reg)
550{
551 int r = -EINVAL;
552
553 switch (reg->id) {
29b7c71b
CO
554 case KVM_REG_S390_TODPR:
555 r = get_user(vcpu->arch.sie_block->todpr,
556 (u32 __user *)reg->addr);
557 break;
558 case KVM_REG_S390_EPOCHDIFF:
559 r = get_user(vcpu->arch.sie_block->epoch,
560 (u64 __user *)reg->addr);
561 break;
46a6dd1c
J
562 case KVM_REG_S390_CPU_TIMER:
563 r = get_user(vcpu->arch.sie_block->cputm,
564 (u64 __user *)reg->addr);
565 break;
566 case KVM_REG_S390_CLOCK_COMP:
567 r = get_user(vcpu->arch.sie_block->ckc,
568 (u64 __user *)reg->addr);
569 break;
14eebd91
CO
570 default:
571 break;
572 }
573
574 return r;
575}
b6d33834 576
b0c632db
HC
577static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
578{
b0c632db 579 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
580 return 0;
581}
582
583int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
584{
5a32c1af 585 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
586 return 0;
587}
588
589int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
590{
5a32c1af 591 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
592 return 0;
593}
594
595int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
596 struct kvm_sregs *sregs)
597{
59674c1a 598 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 599 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 600 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
601 return 0;
602}
603
604int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
605 struct kvm_sregs *sregs)
606{
59674c1a 607 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 608 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
609 return 0;
610}
611
612int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
613{
b0c632db 614 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 615 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 616 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
617 return 0;
618}
619
620int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
621{
b0c632db
HC
622 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
623 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
624 return 0;
625}
626
627static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
628{
629 int rc = 0;
630
9e6dabef 631 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 632 rc = -EBUSY;
d7b0b5eb
CO
633 else {
634 vcpu->run->psw_mask = psw.mask;
635 vcpu->run->psw_addr = psw.addr;
636 }
b0c632db
HC
637 return rc;
638}
639
640int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
641 struct kvm_translation *tr)
642{
643 return -EINVAL; /* not implemented yet */
644}
645
d0bfb940
JK
646int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
647 struct kvm_guest_debug *dbg)
b0c632db
HC
648{
649 return -EINVAL; /* not implemented yet */
650}
651
62d9f0db
MT
652int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
653 struct kvm_mp_state *mp_state)
654{
655 return -EINVAL; /* not implemented yet */
656}
657
658int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
659 struct kvm_mp_state *mp_state)
660{
661 return -EINVAL; /* not implemented yet */
662}
663
2c70fe44
CB
664static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
665{
666 /*
667 * We use MMU_RELOAD just to re-arm the ipte notifier for the
668 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
669 * This ensures that the ipte instruction for this request has
670 * already finished. We might race against a second unmapper that
671 * wants to set the blocking bit. Lets just retry the request loop.
672 */
673 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
674 int rc;
675 rc = gmap_ipte_notify(vcpu->arch.gmap,
676 vcpu->arch.sie_block->prefix,
677 PAGE_SIZE * 2);
678 if (rc)
679 return rc;
680 s390_vcpu_unblock(vcpu);
681 }
682 return 0;
683}
684
e168bf8d 685static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 686{
e168bf8d
CO
687 int rc;
688
5a32c1af 689 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
690
691 if (need_resched())
692 schedule();
693
71cde587
CB
694 if (test_thread_flag(TIF_MCCK_PENDING))
695 s390_handle_mcck();
696
d6b6d166
CO
697 if (!kvm_is_ucontrol(vcpu->kvm))
698 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 699
2c70fe44
CB
700 rc = kvm_s390_handle_requests(vcpu);
701 if (rc)
702 return rc;
703
b0c632db 704 vcpu->arch.sie_block->icptcode = 0;
b0c632db
HC
705 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
706 atomic_read(&vcpu->arch.sie_block->cpuflags));
5786fffa
CH
707 trace_kvm_s390_sie_enter(vcpu,
708 atomic_read(&vcpu->arch.sie_block->cpuflags));
2b29a9fd
DD
709
710 /*
711 * As PF_VCPU will be used in fault handler, between guest_enter
712 * and guest_exit should be no uaccess.
713 */
714 preempt_disable();
715 kvm_guest_enter();
716 preempt_enable();
5a32c1af 717 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
2b29a9fd
DD
718 kvm_guest_exit();
719
720 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
721 vcpu->arch.sie_block->icptcode);
722 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
723
7c470539
MS
724 if (rc > 0)
725 rc = 0;
726 if (rc < 0) {
e168bf8d
CO
727 if (kvm_is_ucontrol(vcpu->kvm)) {
728 rc = SIE_INTERCEPT_UCONTROL;
729 } else {
730 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
5786fffa 731 trace_kvm_s390_sie_fault(vcpu);
db4a29cb 732 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
e168bf8d 733 }
1f0d0f09 734 }
b0c632db 735
5a32c1af 736 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 737 return rc;
b0c632db
HC
738}
739
740int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
741{
8f2abe6a 742 int rc;
b0c632db
HC
743 sigset_t sigsaved;
744
9ace903d 745rerun_vcpu:
b0c632db
HC
746 if (vcpu->sigset_active)
747 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
748
9e6dabef 749 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 750
ba5c1e9b
CO
751 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
752
8f2abe6a
CB
753 switch (kvm_run->exit_reason) {
754 case KVM_EXIT_S390_SIEIC:
8f2abe6a 755 case KVM_EXIT_UNKNOWN:
9ace903d 756 case KVM_EXIT_INTR:
8f2abe6a 757 case KVM_EXIT_S390_RESET:
e168bf8d 758 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 759 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
760 break;
761 default:
762 BUG();
763 }
764
d7b0b5eb
CO
765 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
766 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
767 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
768 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
769 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
770 }
9eed0735
CB
771 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
772 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
773 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
774 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
775 }
d7b0b5eb 776
dab4079d 777 might_fault();
8f2abe6a
CB
778
779 do {
e168bf8d
CO
780 rc = __vcpu_run(vcpu);
781 if (rc)
782 break;
c0d744a9
CO
783 if (kvm_is_ucontrol(vcpu->kvm))
784 rc = -EOPNOTSUPP;
785 else
786 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
787 } while (!signal_pending(current) && !rc);
788
9ace903d
CE
789 if (rc == SIE_INTERCEPT_RERUNVCPU)
790 goto rerun_vcpu;
791
b1d16c49
CE
792 if (signal_pending(current) && !rc) {
793 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 794 rc = -EINTR;
b1d16c49 795 }
8f2abe6a 796
e168bf8d
CO
797#ifdef CONFIG_KVM_S390_UCONTROL
798 if (rc == SIE_INTERCEPT_UCONTROL) {
799 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
800 kvm_run->s390_ucontrol.trans_exc_code =
801 current->thread.gmap_addr;
802 kvm_run->s390_ucontrol.pgm_code = 0x10;
803 rc = 0;
804 }
805#endif
806
b8e660b8 807 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
808 /* intercept cannot be handled in-kernel, prepare kvm-run */
809 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
810 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
811 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
812 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
813 rc = 0;
814 }
815
816 if (rc == -EREMOTE) {
817 /* intercept was handled, but userspace support is needed
818 * kvm_run has been prepared by the handler */
819 rc = 0;
820 }
b0c632db 821
d7b0b5eb
CO
822 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
823 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 824 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 825 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 826
b0c632db
HC
827 if (vcpu->sigset_active)
828 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
829
b0c632db 830 vcpu->stat.exit_userspace++;
7e8e6ab4 831 return rc;
b0c632db
HC
832}
833
092670cd 834static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
835 unsigned long n, int prefix)
836{
837 if (prefix)
838 return copy_to_guest(vcpu, guestdest, from, n);
839 else
840 return copy_to_guest_absolute(vcpu, guestdest, from, n);
841}
842
843/*
844 * store status at address
845 * we use have two special cases:
846 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
847 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
848 */
971eb77f 849int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 850{
092670cd 851 unsigned char archmode = 1;
b0c632db
HC
852 int prefix;
853
854 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
855 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
856 return -EFAULT;
857 addr = SAVE_AREA_BASE;
858 prefix = 0;
859 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
860 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
861 return -EFAULT;
862 addr = SAVE_AREA_BASE;
863 prefix = 1;
864 } else
865 prefix = 0;
866
15bc8d84
CB
867 /*
868 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
869 * copying in vcpu load/put. Lets update our copies before we save
870 * it into the save area
871 */
872 save_fp_regs(&vcpu->arch.guest_fpregs);
873 save_access_regs(vcpu->run->s.regs.acrs);
874
f64ca217 875 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
876 vcpu->arch.guest_fpregs.fprs, 128, prefix))
877 return -EFAULT;
878
f64ca217 879 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 880 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
881 return -EFAULT;
882
f64ca217 883 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
884 &vcpu->arch.sie_block->gpsw, 16, prefix))
885 return -EFAULT;
886
f64ca217 887 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
888 &vcpu->arch.sie_block->prefix, 4, prefix))
889 return -EFAULT;
890
891 if (__guestcopy(vcpu,
f64ca217 892 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
893 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
894 return -EFAULT;
895
f64ca217 896 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
897 &vcpu->arch.sie_block->todpr, 4, prefix))
898 return -EFAULT;
899
f64ca217 900 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
901 &vcpu->arch.sie_block->cputm, 8, prefix))
902 return -EFAULT;
903
f64ca217 904 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
905 &vcpu->arch.sie_block->ckc, 8, prefix))
906 return -EFAULT;
907
f64ca217 908 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 909 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
910 return -EFAULT;
911
912 if (__guestcopy(vcpu,
f64ca217 913 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
914 &vcpu->arch.sie_block->gcr, 128, prefix))
915 return -EFAULT;
916 return 0;
917}
918
d6712df9
CH
919static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
920 struct kvm_enable_cap *cap)
921{
922 int r;
923
924 if (cap->flags)
925 return -EINVAL;
926
927 switch (cap->cap) {
fa6b7fe9
CH
928 case KVM_CAP_S390_CSS_SUPPORT:
929 if (!vcpu->kvm->arch.css_support) {
930 vcpu->kvm->arch.css_support = 1;
931 trace_kvm_s390_enable_css(vcpu->kvm);
932 }
933 r = 0;
934 break;
d6712df9
CH
935 default:
936 r = -EINVAL;
937 break;
938 }
939 return r;
940}
941
b0c632db
HC
942long kvm_arch_vcpu_ioctl(struct file *filp,
943 unsigned int ioctl, unsigned long arg)
944{
945 struct kvm_vcpu *vcpu = filp->private_data;
946 void __user *argp = (void __user *)arg;
bc923cc9 947 long r;
b0c632db 948
93736624
AK
949 switch (ioctl) {
950 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
951 struct kvm_s390_interrupt s390int;
952
93736624 953 r = -EFAULT;
ba5c1e9b 954 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
955 break;
956 r = kvm_s390_inject_vcpu(vcpu, &s390int);
957 break;
ba5c1e9b 958 }
b0c632db 959 case KVM_S390_STORE_STATUS:
bc923cc9
AK
960 r = kvm_s390_vcpu_store_status(vcpu, arg);
961 break;
b0c632db
HC
962 case KVM_S390_SET_INITIAL_PSW: {
963 psw_t psw;
964
bc923cc9 965 r = -EFAULT;
b0c632db 966 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
967 break;
968 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
969 break;
b0c632db
HC
970 }
971 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
972 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
973 break;
14eebd91
CO
974 case KVM_SET_ONE_REG:
975 case KVM_GET_ONE_REG: {
976 struct kvm_one_reg reg;
977 r = -EFAULT;
978 if (copy_from_user(&reg, argp, sizeof(reg)))
979 break;
980 if (ioctl == KVM_SET_ONE_REG)
981 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
982 else
983 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
984 break;
985 }
27e0393f
CO
986#ifdef CONFIG_KVM_S390_UCONTROL
987 case KVM_S390_UCAS_MAP: {
988 struct kvm_s390_ucas_mapping ucasmap;
989
990 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
991 r = -EFAULT;
992 break;
993 }
994
995 if (!kvm_is_ucontrol(vcpu->kvm)) {
996 r = -EINVAL;
997 break;
998 }
999
1000 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1001 ucasmap.vcpu_addr, ucasmap.length);
1002 break;
1003 }
1004 case KVM_S390_UCAS_UNMAP: {
1005 struct kvm_s390_ucas_mapping ucasmap;
1006
1007 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1008 r = -EFAULT;
1009 break;
1010 }
1011
1012 if (!kvm_is_ucontrol(vcpu->kvm)) {
1013 r = -EINVAL;
1014 break;
1015 }
1016
1017 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1018 ucasmap.length);
1019 break;
1020 }
1021#endif
ccc7910f
CO
1022 case KVM_S390_VCPU_FAULT: {
1023 r = gmap_fault(arg, vcpu->arch.gmap);
1024 if (!IS_ERR_VALUE(r))
1025 r = 0;
1026 break;
1027 }
d6712df9
CH
1028 case KVM_ENABLE_CAP:
1029 {
1030 struct kvm_enable_cap cap;
1031 r = -EFAULT;
1032 if (copy_from_user(&cap, argp, sizeof(cap)))
1033 break;
1034 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1035 break;
1036 }
b0c632db 1037 default:
3e6afcf1 1038 r = -ENOTTY;
b0c632db 1039 }
bc923cc9 1040 return r;
b0c632db
HC
1041}
1042
5b1c1493
CO
1043int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1044{
1045#ifdef CONFIG_KVM_S390_UCONTROL
1046 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1047 && (kvm_is_ucontrol(vcpu->kvm))) {
1048 vmf->page = virt_to_page(vcpu->arch.sie_block);
1049 get_page(vmf->page);
1050 return 0;
1051 }
1052#endif
1053 return VM_FAULT_SIGBUS;
1054}
1055
db3fe4eb
TY
1056void kvm_arch_free_memslot(struct kvm_memory_slot *free,
1057 struct kvm_memory_slot *dont)
1058{
1059}
1060
1061int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1062{
1063 return 0;
1064}
1065
b0c632db 1066/* Section: memory related */
f7784b8e
MT
1067int kvm_arch_prepare_memory_region(struct kvm *kvm,
1068 struct kvm_memory_slot *memslot,
7b6195a9
TY
1069 struct kvm_userspace_memory_region *mem,
1070 enum kvm_mr_change change)
b0c632db 1071{
dd2887e7
NW
1072 /* A few sanity checks. We can have memory slots which have to be
1073 located/ended at a segment boundary (1MB). The memory in userland is
1074 ok to be fragmented into various different vmas. It is okay to mmap()
1075 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1076
598841ca 1077 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1078 return -EINVAL;
1079
598841ca 1080 if (mem->memory_size & 0xffffful)
b0c632db
HC
1081 return -EINVAL;
1082
f7784b8e
MT
1083 return 0;
1084}
1085
1086void kvm_arch_commit_memory_region(struct kvm *kvm,
1087 struct kvm_userspace_memory_region *mem,
8482644a
TY
1088 const struct kvm_memory_slot *old,
1089 enum kvm_mr_change change)
f7784b8e 1090{
f7850c92 1091 int rc;
f7784b8e 1092
2cef4deb
CB
1093 /* If the basics of the memslot do not change, we do not want
1094 * to update the gmap. Every update causes several unnecessary
1095 * segment translation exceptions. This is usually handled just
1096 * fine by the normal fault handler + gmap, but it will also
1097 * cause faults on the prefix page of running guest CPUs.
1098 */
1099 if (old->userspace_addr == mem->userspace_addr &&
1100 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1101 old->npages * PAGE_SIZE == mem->memory_size)
1102 return;
598841ca
CO
1103
1104 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1105 mem->guest_phys_addr, mem->memory_size);
1106 if (rc)
f7850c92 1107 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1108 return;
b0c632db
HC
1109}
1110
2df72e9b
MT
1111void kvm_arch_flush_shadow_all(struct kvm *kvm)
1112{
1113}
1114
1115void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1116 struct kvm_memory_slot *slot)
34d4cb8f
MT
1117{
1118}
1119
b0c632db
HC
1120static int __init kvm_s390_init(void)
1121{
ef50f7ac 1122 int ret;
0ee75bea 1123 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1124 if (ret)
1125 return ret;
1126
1127 /*
1128 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1129 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1130 * only set facilities that are known to work in KVM.
1131 */
c2f0e8c8 1132 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
1133 if (!facilities) {
1134 kvm_exit();
1135 return -ENOMEM;
1136 }
14375bc4 1137 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
69d0d3a3 1138 facilities[0] &= 0xff82fff3f47c0000ULL;
87cac8f8 1139 facilities[1] &= 0x001c000000000000ULL;
ef50f7ac 1140 return 0;
b0c632db
HC
1141}
1142
1143static void __exit kvm_s390_exit(void)
1144{
ef50f7ac 1145 free_page((unsigned long) facilities);
b0c632db
HC
1146 kvm_exit();
1147}
1148
1149module_init(kvm_s390_init);
1150module_exit(kvm_s390_exit);
566af940
CH
1151
1152/*
1153 * Enable autoloading of the kvm module.
1154 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1155 * since x86 takes a different approach.
1156 */
1157#include <linux/miscdevice.h>
1158MODULE_ALIAS_MISCDEV(KVM_MINOR);
1159MODULE_ALIAS("devname:kvm");
This page took 0.383712 seconds and 5 git commands to generate.