KVM: s390: no timer interrupts when single-stepping a guest
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
cbb870c8 27#include <asm/asm-offsets.h>
b0c632db
HC
28#include <asm/lowcore.h>
29#include <asm/pgtable.h>
f5daba1d 30#include <asm/nmi.h>
a0616cde 31#include <asm/switch_to.h>
78c4b59f 32#include <asm/facility.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 53 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 54 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
55 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 57 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 58 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
59 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 66 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
67 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68 { "instruction_spx", VCPU_STAT(instruction_spx) },
69 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70 { "instruction_stap", VCPU_STAT(instruction_stap) },
71 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 72 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
73 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 75 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
76 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 78 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 79 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 80 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 81 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
82 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 87 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 88 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 89 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
90 { NULL }
91};
92
78c4b59f 93unsigned long *vfacilities;
2c70fe44 94static struct gmap_notifier gmap_notifier;
b0c632db 95
78c4b59f 96/* test availability of vfacility */
280ef0f1 97int test_vfacility(unsigned long nr)
78c4b59f
MM
98{
99 return __test_facility(nr, (void *) vfacilities);
100}
101
b0c632db 102/* Section: not file related */
10474ae8 103int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
104{
105 /* every s390 is virtualization enabled ;-) */
10474ae8 106 return 0;
b0c632db
HC
107}
108
109void kvm_arch_hardware_disable(void *garbage)
110{
111}
112
2c70fe44
CB
113static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
114
b0c632db
HC
115int kvm_arch_hardware_setup(void)
116{
2c70fe44
CB
117 gmap_notifier.notifier_call = kvm_gmap_notifier;
118 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
119 return 0;
120}
121
122void kvm_arch_hardware_unsetup(void)
123{
2c70fe44 124 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
125}
126
127void kvm_arch_check_processor_compat(void *rtn)
128{
129}
130
131int kvm_arch_init(void *opaque)
132{
133 return 0;
134}
135
136void kvm_arch_exit(void)
137{
138}
139
140/* Section: device related */
141long kvm_arch_dev_ioctl(struct file *filp,
142 unsigned int ioctl, unsigned long arg)
143{
144 if (ioctl == KVM_S390_ENABLE_SIE)
145 return s390_enable_sie();
146 return -EINVAL;
147}
148
149int kvm_dev_ioctl_check_extension(long ext)
150{
d7b0b5eb
CO
151 int r;
152
2bd0ac4e 153 switch (ext) {
d7b0b5eb 154 case KVM_CAP_S390_PSW:
b6cf8788 155 case KVM_CAP_S390_GMAP:
52e16b18 156 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
157#ifdef CONFIG_KVM_S390_UCONTROL
158 case KVM_CAP_S390_UCONTROL:
159#endif
3c038e6b 160 case KVM_CAP_ASYNC_PF:
60b413c9 161 case KVM_CAP_SYNC_REGS:
14eebd91 162 case KVM_CAP_ONE_REG:
d6712df9 163 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 164 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 165 case KVM_CAP_IOEVENTFD:
c05c4186 166 case KVM_CAP_DEVICE_CTRL:
d938dc55 167 case KVM_CAP_ENABLE_CAP_VM:
f2061656 168 case KVM_CAP_VM_ATTRIBUTES:
d7b0b5eb
CO
169 r = 1;
170 break;
e726b1bd
CB
171 case KVM_CAP_NR_VCPUS:
172 case KVM_CAP_MAX_VCPUS:
173 r = KVM_MAX_VCPUS;
174 break;
e1e2e605
NW
175 case KVM_CAP_NR_MEMSLOTS:
176 r = KVM_USER_MEM_SLOTS;
177 break;
1526bf9c 178 case KVM_CAP_S390_COW:
abf09bed 179 r = MACHINE_HAS_ESOP;
1526bf9c 180 break;
2bd0ac4e 181 default:
d7b0b5eb 182 r = 0;
2bd0ac4e 183 }
d7b0b5eb 184 return r;
b0c632db
HC
185}
186
15f36ebd
JH
187static void kvm_s390_sync_dirty_log(struct kvm *kvm,
188 struct kvm_memory_slot *memslot)
189{
190 gfn_t cur_gfn, last_gfn;
191 unsigned long address;
192 struct gmap *gmap = kvm->arch.gmap;
193
194 down_read(&gmap->mm->mmap_sem);
195 /* Loop over all guest pages */
196 last_gfn = memslot->base_gfn + memslot->npages;
197 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
198 address = gfn_to_hva_memslot(memslot, cur_gfn);
199
200 if (gmap_test_and_clear_dirty(address, gmap))
201 mark_page_dirty(kvm, cur_gfn);
202 }
203 up_read(&gmap->mm->mmap_sem);
204}
205
b0c632db
HC
206/* Section: vm related */
207/*
208 * Get (and clear) the dirty memory log for a memory slot.
209 */
210int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
211 struct kvm_dirty_log *log)
212{
15f36ebd
JH
213 int r;
214 unsigned long n;
215 struct kvm_memory_slot *memslot;
216 int is_dirty = 0;
217
218 mutex_lock(&kvm->slots_lock);
219
220 r = -EINVAL;
221 if (log->slot >= KVM_USER_MEM_SLOTS)
222 goto out;
223
224 memslot = id_to_memslot(kvm->memslots, log->slot);
225 r = -ENOENT;
226 if (!memslot->dirty_bitmap)
227 goto out;
228
229 kvm_s390_sync_dirty_log(kvm, memslot);
230 r = kvm_get_dirty_log(kvm, log, &is_dirty);
231 if (r)
232 goto out;
233
234 /* Clear the dirty log */
235 if (is_dirty) {
236 n = kvm_dirty_bitmap_bytes(memslot);
237 memset(memslot->dirty_bitmap, 0, n);
238 }
239 r = 0;
240out:
241 mutex_unlock(&kvm->slots_lock);
242 return r;
b0c632db
HC
243}
244
d938dc55
CH
245static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
246{
247 int r;
248
249 if (cap->flags)
250 return -EINVAL;
251
252 switch (cap->cap) {
84223598
CH
253 case KVM_CAP_S390_IRQCHIP:
254 kvm->arch.use_irqchip = 1;
255 r = 0;
256 break;
d938dc55
CH
257 default:
258 r = -EINVAL;
259 break;
260 }
261 return r;
262}
263
4f718eab
DD
264static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
265{
266 int ret;
267 unsigned int idx;
268 switch (attr->attr) {
269 case KVM_S390_VM_MEM_ENABLE_CMMA:
270 ret = -EBUSY;
271 mutex_lock(&kvm->lock);
272 if (atomic_read(&kvm->online_vcpus) == 0) {
273 kvm->arch.use_cmma = 1;
274 ret = 0;
275 }
276 mutex_unlock(&kvm->lock);
277 break;
278 case KVM_S390_VM_MEM_CLR_CMMA:
279 mutex_lock(&kvm->lock);
280 idx = srcu_read_lock(&kvm->srcu);
281 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
282 srcu_read_unlock(&kvm->srcu, idx);
283 mutex_unlock(&kvm->lock);
284 ret = 0;
285 break;
286 default:
287 ret = -ENXIO;
288 break;
289 }
290 return ret;
291}
292
f2061656
DD
293static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
294{
295 int ret;
296
297 switch (attr->group) {
4f718eab
DD
298 case KVM_S390_VM_MEM_CTRL:
299 ret = kvm_s390_mem_control(kvm, attr);
300 break;
f2061656
DD
301 default:
302 ret = -ENXIO;
303 break;
304 }
305
306 return ret;
307}
308
309static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
310{
311 return -ENXIO;
312}
313
314static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
315{
316 int ret;
317
318 switch (attr->group) {
4f718eab
DD
319 case KVM_S390_VM_MEM_CTRL:
320 switch (attr->attr) {
321 case KVM_S390_VM_MEM_ENABLE_CMMA:
322 case KVM_S390_VM_MEM_CLR_CMMA:
323 ret = 0;
324 break;
325 default:
326 ret = -ENXIO;
327 break;
328 }
329 break;
f2061656
DD
330 default:
331 ret = -ENXIO;
332 break;
333 }
334
335 return ret;
336}
337
b0c632db
HC
338long kvm_arch_vm_ioctl(struct file *filp,
339 unsigned int ioctl, unsigned long arg)
340{
341 struct kvm *kvm = filp->private_data;
342 void __user *argp = (void __user *)arg;
f2061656 343 struct kvm_device_attr attr;
b0c632db
HC
344 int r;
345
346 switch (ioctl) {
ba5c1e9b
CO
347 case KVM_S390_INTERRUPT: {
348 struct kvm_s390_interrupt s390int;
349
350 r = -EFAULT;
351 if (copy_from_user(&s390int, argp, sizeof(s390int)))
352 break;
353 r = kvm_s390_inject_vm(kvm, &s390int);
354 break;
355 }
d938dc55
CH
356 case KVM_ENABLE_CAP: {
357 struct kvm_enable_cap cap;
358 r = -EFAULT;
359 if (copy_from_user(&cap, argp, sizeof(cap)))
360 break;
361 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
362 break;
363 }
84223598
CH
364 case KVM_CREATE_IRQCHIP: {
365 struct kvm_irq_routing_entry routing;
366
367 r = -EINVAL;
368 if (kvm->arch.use_irqchip) {
369 /* Set up dummy routing. */
370 memset(&routing, 0, sizeof(routing));
371 kvm_set_irq_routing(kvm, &routing, 0, 0);
372 r = 0;
373 }
374 break;
375 }
f2061656
DD
376 case KVM_SET_DEVICE_ATTR: {
377 r = -EFAULT;
378 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
379 break;
380 r = kvm_s390_vm_set_attr(kvm, &attr);
381 break;
382 }
383 case KVM_GET_DEVICE_ATTR: {
384 r = -EFAULT;
385 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
386 break;
387 r = kvm_s390_vm_get_attr(kvm, &attr);
388 break;
389 }
390 case KVM_HAS_DEVICE_ATTR: {
391 r = -EFAULT;
392 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
393 break;
394 r = kvm_s390_vm_has_attr(kvm, &attr);
395 break;
396 }
b0c632db 397 default:
367e1319 398 r = -ENOTTY;
b0c632db
HC
399 }
400
401 return r;
402}
403
e08b9637 404int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 405{
b0c632db
HC
406 int rc;
407 char debug_name[16];
f6c137ff 408 static unsigned long sca_offset;
b0c632db 409
e08b9637
CO
410 rc = -EINVAL;
411#ifdef CONFIG_KVM_S390_UCONTROL
412 if (type & ~KVM_VM_S390_UCONTROL)
413 goto out_err;
414 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
415 goto out_err;
416#else
417 if (type)
418 goto out_err;
419#endif
420
b0c632db
HC
421 rc = s390_enable_sie();
422 if (rc)
d89f5eff 423 goto out_err;
b0c632db 424
b290411a
CO
425 rc = -ENOMEM;
426
b0c632db
HC
427 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
428 if (!kvm->arch.sca)
d89f5eff 429 goto out_err;
f6c137ff
CB
430 spin_lock(&kvm_lock);
431 sca_offset = (sca_offset + 16) & 0x7f0;
432 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
433 spin_unlock(&kvm_lock);
b0c632db
HC
434
435 sprintf(debug_name, "kvm-%u", current->pid);
436
437 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
438 if (!kvm->arch.dbf)
439 goto out_nodbf;
440
ba5c1e9b
CO
441 spin_lock_init(&kvm->arch.float_int.lock);
442 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 443 init_waitqueue_head(&kvm->arch.ipte_wq);
ba5c1e9b 444
b0c632db
HC
445 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
446 VM_EVENT(kvm, 3, "%s", "vm created");
447
e08b9637
CO
448 if (type & KVM_VM_S390_UCONTROL) {
449 kvm->arch.gmap = NULL;
450 } else {
451 kvm->arch.gmap = gmap_alloc(current->mm);
452 if (!kvm->arch.gmap)
453 goto out_nogmap;
2c70fe44 454 kvm->arch.gmap->private = kvm;
24eb3a82 455 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 456 }
fa6b7fe9
CH
457
458 kvm->arch.css_support = 0;
84223598 459 kvm->arch.use_irqchip = 0;
fa6b7fe9 460
d89f5eff 461 return 0;
598841ca
CO
462out_nogmap:
463 debug_unregister(kvm->arch.dbf);
b0c632db
HC
464out_nodbf:
465 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
466out_err:
467 return rc;
b0c632db
HC
468}
469
d329c035
CB
470void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
471{
472 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 473 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3c038e6b 474 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
475 if (!kvm_is_ucontrol(vcpu->kvm)) {
476 clear_bit(63 - vcpu->vcpu_id,
477 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
478 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
479 (__u64) vcpu->arch.sie_block)
480 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
481 }
abf4a71e 482 smp_mb();
27e0393f
CO
483
484 if (kvm_is_ucontrol(vcpu->kvm))
485 gmap_free(vcpu->arch.gmap);
486
b31605c1
DD
487 if (kvm_s390_cmma_enabled(vcpu->kvm))
488 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 489 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 490
6692cef3 491 kvm_vcpu_uninit(vcpu);
b110feaf 492 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
493}
494
495static void kvm_free_vcpus(struct kvm *kvm)
496{
497 unsigned int i;
988a2cae 498 struct kvm_vcpu *vcpu;
d329c035 499
988a2cae
GN
500 kvm_for_each_vcpu(i, vcpu, kvm)
501 kvm_arch_vcpu_destroy(vcpu);
502
503 mutex_lock(&kvm->lock);
504 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
505 kvm->vcpus[i] = NULL;
506
507 atomic_set(&kvm->online_vcpus, 0);
508 mutex_unlock(&kvm->lock);
d329c035
CB
509}
510
ad8ba2cd
SY
511void kvm_arch_sync_events(struct kvm *kvm)
512{
513}
514
b0c632db
HC
515void kvm_arch_destroy_vm(struct kvm *kvm)
516{
d329c035 517 kvm_free_vcpus(kvm);
b0c632db 518 free_page((unsigned long)(kvm->arch.sca));
d329c035 519 debug_unregister(kvm->arch.dbf);
27e0393f
CO
520 if (!kvm_is_ucontrol(kvm))
521 gmap_free(kvm->arch.gmap);
841b91c5 522 kvm_s390_destroy_adapters(kvm);
b0c632db
HC
523}
524
525/* Section: vcpu related */
526int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
527{
3c038e6b
DD
528 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
529 kvm_clear_async_pf_completion_queue(vcpu);
27e0393f
CO
530 if (kvm_is_ucontrol(vcpu->kvm)) {
531 vcpu->arch.gmap = gmap_alloc(current->mm);
532 if (!vcpu->arch.gmap)
533 return -ENOMEM;
2c70fe44 534 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
535 return 0;
536 }
537
598841ca 538 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
539 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
540 KVM_SYNC_GPRS |
9eed0735
CB
541 KVM_SYNC_ACRS |
542 KVM_SYNC_CRS;
b0c632db
HC
543 return 0;
544}
545
546void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
547{
6692cef3 548 /* Nothing todo */
b0c632db
HC
549}
550
551void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
552{
4725c860
MS
553 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
554 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 555 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
556 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
557 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 558 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 559 gmap_enable(vcpu->arch.gmap);
9e6dabef 560 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
561}
562
563void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
564{
9e6dabef 565 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 566 gmap_disable(vcpu->arch.gmap);
4725c860
MS
567 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
568 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 569 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
570 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
571 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
572 restore_access_regs(vcpu->arch.host_acrs);
573}
574
575static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
576{
577 /* this equals initial cpu reset in pop, but we don't switch to ESA */
578 vcpu->arch.sie_block->gpsw.mask = 0UL;
579 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 580 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
581 vcpu->arch.sie_block->cputm = 0UL;
582 vcpu->arch.sie_block->ckc = 0UL;
583 vcpu->arch.sie_block->todpr = 0;
584 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
585 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
586 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
587 vcpu->arch.guest_fpregs.fpc = 0;
588 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
589 vcpu->arch.sie_block->gbea = 1;
672550fb 590 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
591 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
592 kvm_clear_async_pf_completion_queue(vcpu);
61bde82c 593 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2ed10cc1 594 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
595}
596
42897d86
MT
597int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
598{
599 return 0;
600}
601
b31605c1
DD
602void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
603{
604 free_page(vcpu->arch.sie_block->cbrlo);
605 vcpu->arch.sie_block->cbrlo = 0;
606}
607
608int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
609{
610 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
611 if (!vcpu->arch.sie_block->cbrlo)
612 return -ENOMEM;
613
614 vcpu->arch.sie_block->ecb2 |= 0x80;
615 vcpu->arch.sie_block->ecb2 &= ~0x08;
616 return 0;
617}
618
b0c632db
HC
619int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
620{
b31605c1 621 int rc = 0;
b31288fa 622
9e6dabef
CH
623 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
624 CPUSTAT_SM |
69d0d3a3
CB
625 CPUSTAT_STOPPED |
626 CPUSTAT_GED);
fc34531d 627 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
628 if (test_vfacility(50) && test_vfacility(73))
629 vcpu->arch.sie_block->ecb |= 0x10;
630
69d0d3a3 631 vcpu->arch.sie_block->ecb2 = 8;
217a4406
HC
632 vcpu->arch.sie_block->eca = 0xC1002000U;
633 if (sclp_has_siif())
634 vcpu->arch.sie_block->eca |= 1;
78c4b59f 635 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
693ffc08 636 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
b31605c1
DD
637 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
638 rc = kvm_s390_vcpu_setup_cmma(vcpu);
639 if (rc)
640 return rc;
b31288fa 641 }
ca872302
CB
642 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
643 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
644 (unsigned long) vcpu);
645 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 646 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 647 vcpu->arch.cpu_id.version = 0xff;
b31605c1 648 return rc;
b0c632db
HC
649}
650
651struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
652 unsigned int id)
653{
4d47555a 654 struct kvm_vcpu *vcpu;
7feb6bb8 655 struct sie_page *sie_page;
4d47555a
CO
656 int rc = -EINVAL;
657
658 if (id >= KVM_MAX_VCPUS)
659 goto out;
660
661 rc = -ENOMEM;
b0c632db 662
b110feaf 663 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 664 if (!vcpu)
4d47555a 665 goto out;
b0c632db 666
7feb6bb8
MM
667 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
668 if (!sie_page)
b0c632db
HC
669 goto out_free_cpu;
670
7feb6bb8
MM
671 vcpu->arch.sie_block = &sie_page->sie_block;
672 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
673
b0c632db 674 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
675 if (!kvm_is_ucontrol(kvm)) {
676 if (!kvm->arch.sca) {
677 WARN_ON_ONCE(1);
678 goto out_free_cpu;
679 }
680 if (!kvm->arch.sca->cpu[id].sda)
681 kvm->arch.sca->cpu[id].sda =
682 (__u64) vcpu->arch.sie_block;
683 vcpu->arch.sie_block->scaoh =
684 (__u32)(((__u64)kvm->arch.sca) >> 32);
685 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
686 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
687 }
b0c632db 688
ba5c1e9b
CO
689 spin_lock_init(&vcpu->arch.local_int.lock);
690 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
691 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 692 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 693 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 694
b0c632db
HC
695 rc = kvm_vcpu_init(vcpu, kvm, id);
696 if (rc)
7b06bf2f 697 goto out_free_sie_block;
b0c632db
HC
698 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
699 vcpu->arch.sie_block);
ade38c31 700 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 701
b0c632db 702 return vcpu;
7b06bf2f
WY
703out_free_sie_block:
704 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 705out_free_cpu:
b110feaf 706 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 707out:
b0c632db
HC
708 return ERR_PTR(rc);
709}
710
b0c632db
HC
711int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
712{
f87618e8 713 return kvm_cpu_has_interrupt(vcpu);
b0c632db
HC
714}
715
49b99e1e
CB
716void s390_vcpu_block(struct kvm_vcpu *vcpu)
717{
718 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
719}
720
721void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
722{
723 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
724}
725
726/*
727 * Kick a guest cpu out of SIE and wait until SIE is not running.
728 * If the CPU is not running (e.g. waiting as idle) the function will
729 * return immediately. */
730void exit_sie(struct kvm_vcpu *vcpu)
731{
732 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
733 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
734 cpu_relax();
735}
736
737/* Kick a guest cpu out of SIE and prevent SIE-reentry */
738void exit_sie_sync(struct kvm_vcpu *vcpu)
739{
740 s390_vcpu_block(vcpu);
741 exit_sie(vcpu);
742}
743
2c70fe44
CB
744static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
745{
746 int i;
747 struct kvm *kvm = gmap->private;
748 struct kvm_vcpu *vcpu;
749
750 kvm_for_each_vcpu(i, vcpu, kvm) {
751 /* match against both prefix pages */
752 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
753 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
754 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
755 exit_sie_sync(vcpu);
756 }
757 }
758}
759
b6d33834
CD
760int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
761{
762 /* kvm common code refers to this, but never calls it */
763 BUG();
764 return 0;
765}
766
14eebd91
CO
767static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
768 struct kvm_one_reg *reg)
769{
770 int r = -EINVAL;
771
772 switch (reg->id) {
29b7c71b
CO
773 case KVM_REG_S390_TODPR:
774 r = put_user(vcpu->arch.sie_block->todpr,
775 (u32 __user *)reg->addr);
776 break;
777 case KVM_REG_S390_EPOCHDIFF:
778 r = put_user(vcpu->arch.sie_block->epoch,
779 (u64 __user *)reg->addr);
780 break;
46a6dd1c
J
781 case KVM_REG_S390_CPU_TIMER:
782 r = put_user(vcpu->arch.sie_block->cputm,
783 (u64 __user *)reg->addr);
784 break;
785 case KVM_REG_S390_CLOCK_COMP:
786 r = put_user(vcpu->arch.sie_block->ckc,
787 (u64 __user *)reg->addr);
788 break;
536336c2
DD
789 case KVM_REG_S390_PFTOKEN:
790 r = put_user(vcpu->arch.pfault_token,
791 (u64 __user *)reg->addr);
792 break;
793 case KVM_REG_S390_PFCOMPARE:
794 r = put_user(vcpu->arch.pfault_compare,
795 (u64 __user *)reg->addr);
796 break;
797 case KVM_REG_S390_PFSELECT:
798 r = put_user(vcpu->arch.pfault_select,
799 (u64 __user *)reg->addr);
800 break;
672550fb
CB
801 case KVM_REG_S390_PP:
802 r = put_user(vcpu->arch.sie_block->pp,
803 (u64 __user *)reg->addr);
804 break;
afa45ff5
CB
805 case KVM_REG_S390_GBEA:
806 r = put_user(vcpu->arch.sie_block->gbea,
807 (u64 __user *)reg->addr);
808 break;
14eebd91
CO
809 default:
810 break;
811 }
812
813 return r;
814}
815
816static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
817 struct kvm_one_reg *reg)
818{
819 int r = -EINVAL;
820
821 switch (reg->id) {
29b7c71b
CO
822 case KVM_REG_S390_TODPR:
823 r = get_user(vcpu->arch.sie_block->todpr,
824 (u32 __user *)reg->addr);
825 break;
826 case KVM_REG_S390_EPOCHDIFF:
827 r = get_user(vcpu->arch.sie_block->epoch,
828 (u64 __user *)reg->addr);
829 break;
46a6dd1c
J
830 case KVM_REG_S390_CPU_TIMER:
831 r = get_user(vcpu->arch.sie_block->cputm,
832 (u64 __user *)reg->addr);
833 break;
834 case KVM_REG_S390_CLOCK_COMP:
835 r = get_user(vcpu->arch.sie_block->ckc,
836 (u64 __user *)reg->addr);
837 break;
536336c2
DD
838 case KVM_REG_S390_PFTOKEN:
839 r = get_user(vcpu->arch.pfault_token,
840 (u64 __user *)reg->addr);
841 break;
842 case KVM_REG_S390_PFCOMPARE:
843 r = get_user(vcpu->arch.pfault_compare,
844 (u64 __user *)reg->addr);
845 break;
846 case KVM_REG_S390_PFSELECT:
847 r = get_user(vcpu->arch.pfault_select,
848 (u64 __user *)reg->addr);
849 break;
672550fb
CB
850 case KVM_REG_S390_PP:
851 r = get_user(vcpu->arch.sie_block->pp,
852 (u64 __user *)reg->addr);
853 break;
afa45ff5
CB
854 case KVM_REG_S390_GBEA:
855 r = get_user(vcpu->arch.sie_block->gbea,
856 (u64 __user *)reg->addr);
857 break;
14eebd91
CO
858 default:
859 break;
860 }
861
862 return r;
863}
b6d33834 864
b0c632db
HC
865static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
866{
b0c632db 867 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
868 return 0;
869}
870
871int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
872{
5a32c1af 873 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
874 return 0;
875}
876
877int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
878{
5a32c1af 879 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
880 return 0;
881}
882
883int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
884 struct kvm_sregs *sregs)
885{
59674c1a 886 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 887 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 888 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
889 return 0;
890}
891
892int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
893 struct kvm_sregs *sregs)
894{
59674c1a 895 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 896 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
897 return 0;
898}
899
900int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
901{
4725c860
MS
902 if (test_fp_ctl(fpu->fpc))
903 return -EINVAL;
b0c632db 904 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
905 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
906 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
907 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
908 return 0;
909}
910
911int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
912{
b0c632db
HC
913 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
914 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
915 return 0;
916}
917
918static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
919{
920 int rc = 0;
921
9e6dabef 922 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 923 rc = -EBUSY;
d7b0b5eb
CO
924 else {
925 vcpu->run->psw_mask = psw.mask;
926 vcpu->run->psw_addr = psw.addr;
927 }
b0c632db
HC
928 return rc;
929}
930
931int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
932 struct kvm_translation *tr)
933{
934 return -EINVAL; /* not implemented yet */
935}
936
27291e21
DH
937#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
938 KVM_GUESTDBG_USE_HW_BP | \
939 KVM_GUESTDBG_ENABLE)
940
d0bfb940
JK
941int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
942 struct kvm_guest_debug *dbg)
b0c632db 943{
27291e21
DH
944 int rc = 0;
945
946 vcpu->guest_debug = 0;
947 kvm_s390_clear_bp_data(vcpu);
948
949 if (vcpu->guest_debug & ~VALID_GUESTDBG_FLAGS)
950 return -EINVAL;
951
952 if (dbg->control & KVM_GUESTDBG_ENABLE) {
953 vcpu->guest_debug = dbg->control;
954 /* enforce guest PER */
955 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
956
957 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
958 rc = kvm_s390_import_bp_data(vcpu, dbg);
959 } else {
960 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
961 vcpu->arch.guestdbg.last_bp = 0;
962 }
963
964 if (rc) {
965 vcpu->guest_debug = 0;
966 kvm_s390_clear_bp_data(vcpu);
967 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
968 }
969
970 return rc;
b0c632db
HC
971}
972
62d9f0db
MT
973int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
974 struct kvm_mp_state *mp_state)
975{
976 return -EINVAL; /* not implemented yet */
977}
978
979int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
980 struct kvm_mp_state *mp_state)
981{
982 return -EINVAL; /* not implemented yet */
983}
984
b31605c1
DD
985bool kvm_s390_cmma_enabled(struct kvm *kvm)
986{
987 if (!MACHINE_IS_LPAR)
988 return false;
989 /* only enable for z10 and later */
990 if (!MACHINE_HAS_EDAT1)
991 return false;
992 if (!kvm->arch.use_cmma)
993 return false;
994 return true;
995}
996
2c70fe44
CB
997static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
998{
999 /*
1000 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1001 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1002 * This ensures that the ipte instruction for this request has
1003 * already finished. We might race against a second unmapper that
1004 * wants to set the blocking bit. Lets just retry the request loop.
1005 */
1006 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1007 int rc;
1008 rc = gmap_ipte_notify(vcpu->arch.gmap,
1009 vcpu->arch.sie_block->prefix,
1010 PAGE_SIZE * 2);
1011 if (rc)
1012 return rc;
1013 s390_vcpu_unblock(vcpu);
1014 }
1015 return 0;
1016}
1017
24eb3a82
DD
1018static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
1019{
1020 long rc;
1021 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
1022 struct mm_struct *mm = current->mm;
1023 down_read(&mm->mmap_sem);
1024 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
1025 up_read(&mm->mmap_sem);
1026 return rc;
1027}
1028
3c038e6b
DD
1029static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1030 unsigned long token)
1031{
1032 struct kvm_s390_interrupt inti;
1033 inti.parm64 = token;
1034
1035 if (start_token) {
1036 inti.type = KVM_S390_INT_PFAULT_INIT;
1037 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1038 } else {
1039 inti.type = KVM_S390_INT_PFAULT_DONE;
1040 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1041 }
1042}
1043
1044void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1045 struct kvm_async_pf *work)
1046{
1047 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1048 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1049}
1050
1051void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1052 struct kvm_async_pf *work)
1053{
1054 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1055 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1056}
1057
1058void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1059 struct kvm_async_pf *work)
1060{
1061 /* s390 will always inject the page directly */
1062}
1063
1064bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1065{
1066 /*
1067 * s390 will always inject the page directly,
1068 * but we still want check_async_completion to cleanup
1069 */
1070 return true;
1071}
1072
1073static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1074{
1075 hva_t hva;
1076 struct kvm_arch_async_pf arch;
1077 int rc;
1078
1079 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1080 return 0;
1081 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1082 vcpu->arch.pfault_compare)
1083 return 0;
1084 if (psw_extint_disabled(vcpu))
1085 return 0;
1086 if (kvm_cpu_has_interrupt(vcpu))
1087 return 0;
1088 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1089 return 0;
1090 if (!vcpu->arch.gmap->pfault_enabled)
1091 return 0;
1092
81480cc1
HC
1093 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1094 hva += current->thread.gmap_addr & ~PAGE_MASK;
1095 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1096 return 0;
1097
1098 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1099 return rc;
1100}
1101
3fb4c40f 1102static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1103{
3fb4c40f 1104 int rc, cpuflags;
e168bf8d 1105
3c038e6b
DD
1106 /*
1107 * On s390 notifications for arriving pages will be delivered directly
1108 * to the guest but the house keeping for completed pfaults is
1109 * handled outside the worker.
1110 */
1111 kvm_check_async_pf_completion(vcpu);
1112
5a32c1af 1113 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1114
1115 if (need_resched())
1116 schedule();
1117
71cde587
CB
1118 if (test_thread_flag(TIF_MCCK_PENDING))
1119 s390_handle_mcck();
1120
d6b6d166
CO
1121 if (!kvm_is_ucontrol(vcpu->kvm))
1122 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 1123
2c70fe44
CB
1124 rc = kvm_s390_handle_requests(vcpu);
1125 if (rc)
1126 return rc;
1127
27291e21
DH
1128 if (guestdbg_enabled(vcpu)) {
1129 kvm_s390_backup_guest_per_regs(vcpu);
1130 kvm_s390_patch_guest_per_regs(vcpu);
1131 }
1132
b0c632db 1133 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1134 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1135 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1136 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1137
3fb4c40f
TH
1138 return 0;
1139}
1140
1141static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1142{
24eb3a82 1143 int rc = -1;
2b29a9fd
DD
1144
1145 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1146 vcpu->arch.sie_block->icptcode);
1147 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1148
27291e21
DH
1149 if (guestdbg_enabled(vcpu))
1150 kvm_s390_restore_guest_per_regs(vcpu);
1151
3fb4c40f 1152 if (exit_reason >= 0) {
7c470539 1153 rc = 0;
210b1607
TH
1154 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1155 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1156 vcpu->run->s390_ucontrol.trans_exc_code =
1157 current->thread.gmap_addr;
1158 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1159 rc = -EREMOTE;
24eb3a82
DD
1160
1161 } else if (current->thread.gmap_pfault) {
3c038e6b 1162 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1163 current->thread.gmap_pfault = 0;
3c038e6b
DD
1164 if (kvm_arch_setup_async_pf(vcpu) ||
1165 (kvm_arch_fault_in_sync(vcpu) >= 0))
24eb3a82
DD
1166 rc = 0;
1167 }
1168
1169 if (rc == -1) {
699bde3b
CB
1170 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1171 trace_kvm_s390_sie_fault(vcpu);
1172 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1173 }
b0c632db 1174
5a32c1af 1175 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1176
a76ccff6
TH
1177 if (rc == 0) {
1178 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1179 /* Don't exit for host interrupts. */
1180 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1181 else
1182 rc = kvm_handle_sie_intercept(vcpu);
1183 }
1184
3fb4c40f
TH
1185 return rc;
1186}
1187
1188static int __vcpu_run(struct kvm_vcpu *vcpu)
1189{
1190 int rc, exit_reason;
1191
800c1065
TH
1192 /*
1193 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1194 * ning the guest), so that memslots (and other stuff) are protected
1195 */
1196 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1197
a76ccff6
TH
1198 do {
1199 rc = vcpu_pre_run(vcpu);
1200 if (rc)
1201 break;
3fb4c40f 1202
800c1065 1203 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1204 /*
1205 * As PF_VCPU will be used in fault handler, between
1206 * guest_enter and guest_exit should be no uaccess.
1207 */
1208 preempt_disable();
1209 kvm_guest_enter();
1210 preempt_enable();
1211 exit_reason = sie64a(vcpu->arch.sie_block,
1212 vcpu->run->s.regs.gprs);
1213 kvm_guest_exit();
800c1065 1214 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1215
1216 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1217 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1218
800c1065 1219 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1220 return rc;
b0c632db
HC
1221}
1222
1223int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1224{
8f2abe6a 1225 int rc;
b0c632db
HC
1226 sigset_t sigsaved;
1227
27291e21
DH
1228 if (guestdbg_exit_pending(vcpu)) {
1229 kvm_s390_prepare_debug_exit(vcpu);
1230 return 0;
1231 }
1232
b0c632db
HC
1233 if (vcpu->sigset_active)
1234 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1235
9e6dabef 1236 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 1237
8f2abe6a
CB
1238 switch (kvm_run->exit_reason) {
1239 case KVM_EXIT_S390_SIEIC:
8f2abe6a 1240 case KVM_EXIT_UNKNOWN:
9ace903d 1241 case KVM_EXIT_INTR:
8f2abe6a 1242 case KVM_EXIT_S390_RESET:
e168bf8d 1243 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 1244 case KVM_EXIT_S390_TSCH:
27291e21 1245 case KVM_EXIT_DEBUG:
8f2abe6a
CB
1246 break;
1247 default:
1248 BUG();
1249 }
1250
d7b0b5eb
CO
1251 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1252 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
1253 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1254 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1255 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1256 }
9eed0735
CB
1257 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1258 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1259 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1260 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1261 }
d7b0b5eb 1262
dab4079d 1263 might_fault();
a76ccff6 1264 rc = __vcpu_run(vcpu);
9ace903d 1265
b1d16c49
CE
1266 if (signal_pending(current) && !rc) {
1267 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1268 rc = -EINTR;
b1d16c49 1269 }
8f2abe6a 1270
27291e21
DH
1271 if (guestdbg_exit_pending(vcpu) && !rc) {
1272 kvm_s390_prepare_debug_exit(vcpu);
1273 rc = 0;
1274 }
1275
b8e660b8 1276 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1277 /* intercept cannot be handled in-kernel, prepare kvm-run */
1278 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1279 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1280 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1281 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1282 rc = 0;
1283 }
1284
1285 if (rc == -EREMOTE) {
1286 /* intercept was handled, but userspace support is needed
1287 * kvm_run has been prepared by the handler */
1288 rc = 0;
1289 }
b0c632db 1290
d7b0b5eb
CO
1291 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1292 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 1293 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 1294 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 1295
b0c632db
HC
1296 if (vcpu->sigset_active)
1297 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1298
b0c632db 1299 vcpu->stat.exit_userspace++;
7e8e6ab4 1300 return rc;
b0c632db
HC
1301}
1302
b0c632db
HC
1303/*
1304 * store status at address
1305 * we use have two special cases:
1306 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1307 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1308 */
d0bce605 1309int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1310{
092670cd 1311 unsigned char archmode = 1;
178bd789 1312 u64 clkcomp;
d0bce605 1313 int rc;
b0c632db 1314
d0bce605
HC
1315 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1316 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1317 return -EFAULT;
d0bce605
HC
1318 gpa = SAVE_AREA_BASE;
1319 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1320 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1321 return -EFAULT;
d0bce605
HC
1322 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1323 }
1324 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1325 vcpu->arch.guest_fpregs.fprs, 128);
1326 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1327 vcpu->run->s.regs.gprs, 128);
1328 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1329 &vcpu->arch.sie_block->gpsw, 16);
1330 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1331 &vcpu->arch.sie_block->prefix, 4);
1332 rc |= write_guest_abs(vcpu,
1333 gpa + offsetof(struct save_area, fp_ctrl_reg),
1334 &vcpu->arch.guest_fpregs.fpc, 4);
1335 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1336 &vcpu->arch.sie_block->todpr, 4);
1337 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1338 &vcpu->arch.sie_block->cputm, 8);
178bd789 1339 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1340 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1341 &clkcomp, 8);
1342 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1343 &vcpu->run->s.regs.acrs, 64);
1344 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1345 &vcpu->arch.sie_block->gcr, 128);
1346 return rc ? -EFAULT : 0;
b0c632db
HC
1347}
1348
e879892c
TH
1349int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1350{
1351 /*
1352 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1353 * copying in vcpu load/put. Lets update our copies before we save
1354 * it into the save area
1355 */
1356 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1357 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1358 save_access_regs(vcpu->run->s.regs.acrs);
1359
1360 return kvm_s390_store_status_unloaded(vcpu, addr);
1361}
1362
d6712df9
CH
1363static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1364 struct kvm_enable_cap *cap)
1365{
1366 int r;
1367
1368 if (cap->flags)
1369 return -EINVAL;
1370
1371 switch (cap->cap) {
fa6b7fe9
CH
1372 case KVM_CAP_S390_CSS_SUPPORT:
1373 if (!vcpu->kvm->arch.css_support) {
1374 vcpu->kvm->arch.css_support = 1;
1375 trace_kvm_s390_enable_css(vcpu->kvm);
1376 }
1377 r = 0;
1378 break;
d6712df9
CH
1379 default:
1380 r = -EINVAL;
1381 break;
1382 }
1383 return r;
1384}
1385
b0c632db
HC
1386long kvm_arch_vcpu_ioctl(struct file *filp,
1387 unsigned int ioctl, unsigned long arg)
1388{
1389 struct kvm_vcpu *vcpu = filp->private_data;
1390 void __user *argp = (void __user *)arg;
800c1065 1391 int idx;
bc923cc9 1392 long r;
b0c632db 1393
93736624
AK
1394 switch (ioctl) {
1395 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
1396 struct kvm_s390_interrupt s390int;
1397
93736624 1398 r = -EFAULT;
ba5c1e9b 1399 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
1400 break;
1401 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1402 break;
ba5c1e9b 1403 }
b0c632db 1404 case KVM_S390_STORE_STATUS:
800c1065 1405 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1406 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1407 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1408 break;
b0c632db
HC
1409 case KVM_S390_SET_INITIAL_PSW: {
1410 psw_t psw;
1411
bc923cc9 1412 r = -EFAULT;
b0c632db 1413 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1414 break;
1415 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1416 break;
b0c632db
HC
1417 }
1418 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1419 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1420 break;
14eebd91
CO
1421 case KVM_SET_ONE_REG:
1422 case KVM_GET_ONE_REG: {
1423 struct kvm_one_reg reg;
1424 r = -EFAULT;
1425 if (copy_from_user(&reg, argp, sizeof(reg)))
1426 break;
1427 if (ioctl == KVM_SET_ONE_REG)
1428 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1429 else
1430 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1431 break;
1432 }
27e0393f
CO
1433#ifdef CONFIG_KVM_S390_UCONTROL
1434 case KVM_S390_UCAS_MAP: {
1435 struct kvm_s390_ucas_mapping ucasmap;
1436
1437 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1438 r = -EFAULT;
1439 break;
1440 }
1441
1442 if (!kvm_is_ucontrol(vcpu->kvm)) {
1443 r = -EINVAL;
1444 break;
1445 }
1446
1447 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1448 ucasmap.vcpu_addr, ucasmap.length);
1449 break;
1450 }
1451 case KVM_S390_UCAS_UNMAP: {
1452 struct kvm_s390_ucas_mapping ucasmap;
1453
1454 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1455 r = -EFAULT;
1456 break;
1457 }
1458
1459 if (!kvm_is_ucontrol(vcpu->kvm)) {
1460 r = -EINVAL;
1461 break;
1462 }
1463
1464 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1465 ucasmap.length);
1466 break;
1467 }
1468#endif
ccc7910f
CO
1469 case KVM_S390_VCPU_FAULT: {
1470 r = gmap_fault(arg, vcpu->arch.gmap);
1471 if (!IS_ERR_VALUE(r))
1472 r = 0;
1473 break;
1474 }
d6712df9
CH
1475 case KVM_ENABLE_CAP:
1476 {
1477 struct kvm_enable_cap cap;
1478 r = -EFAULT;
1479 if (copy_from_user(&cap, argp, sizeof(cap)))
1480 break;
1481 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1482 break;
1483 }
b0c632db 1484 default:
3e6afcf1 1485 r = -ENOTTY;
b0c632db 1486 }
bc923cc9 1487 return r;
b0c632db
HC
1488}
1489
5b1c1493
CO
1490int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1491{
1492#ifdef CONFIG_KVM_S390_UCONTROL
1493 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1494 && (kvm_is_ucontrol(vcpu->kvm))) {
1495 vmf->page = virt_to_page(vcpu->arch.sie_block);
1496 get_page(vmf->page);
1497 return 0;
1498 }
1499#endif
1500 return VM_FAULT_SIGBUS;
1501}
1502
5587027c 1503void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1504 struct kvm_memory_slot *dont)
1505{
1506}
1507
5587027c
AK
1508int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1509 unsigned long npages)
db3fe4eb
TY
1510{
1511 return 0;
1512}
1513
e59dbe09
TY
1514void kvm_arch_memslots_updated(struct kvm *kvm)
1515{
1516}
1517
b0c632db 1518/* Section: memory related */
f7784b8e
MT
1519int kvm_arch_prepare_memory_region(struct kvm *kvm,
1520 struct kvm_memory_slot *memslot,
7b6195a9
TY
1521 struct kvm_userspace_memory_region *mem,
1522 enum kvm_mr_change change)
b0c632db 1523{
dd2887e7
NW
1524 /* A few sanity checks. We can have memory slots which have to be
1525 located/ended at a segment boundary (1MB). The memory in userland is
1526 ok to be fragmented into various different vmas. It is okay to mmap()
1527 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1528
598841ca 1529 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1530 return -EINVAL;
1531
598841ca 1532 if (mem->memory_size & 0xffffful)
b0c632db
HC
1533 return -EINVAL;
1534
f7784b8e
MT
1535 return 0;
1536}
1537
1538void kvm_arch_commit_memory_region(struct kvm *kvm,
1539 struct kvm_userspace_memory_region *mem,
8482644a
TY
1540 const struct kvm_memory_slot *old,
1541 enum kvm_mr_change change)
f7784b8e 1542{
f7850c92 1543 int rc;
f7784b8e 1544
2cef4deb
CB
1545 /* If the basics of the memslot do not change, we do not want
1546 * to update the gmap. Every update causes several unnecessary
1547 * segment translation exceptions. This is usually handled just
1548 * fine by the normal fault handler + gmap, but it will also
1549 * cause faults on the prefix page of running guest CPUs.
1550 */
1551 if (old->userspace_addr == mem->userspace_addr &&
1552 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1553 old->npages * PAGE_SIZE == mem->memory_size)
1554 return;
598841ca
CO
1555
1556 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1557 mem->guest_phys_addr, mem->memory_size);
1558 if (rc)
f7850c92 1559 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1560 return;
b0c632db
HC
1561}
1562
2df72e9b
MT
1563void kvm_arch_flush_shadow_all(struct kvm *kvm)
1564{
1565}
1566
1567void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1568 struct kvm_memory_slot *slot)
34d4cb8f
MT
1569{
1570}
1571
b0c632db
HC
1572static int __init kvm_s390_init(void)
1573{
ef50f7ac 1574 int ret;
0ee75bea 1575 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1576 if (ret)
1577 return ret;
1578
1579 /*
1580 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1581 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1582 * only set facilities that are known to work in KVM.
1583 */
78c4b59f
MM
1584 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1585 if (!vfacilities) {
ef50f7ac
CB
1586 kvm_exit();
1587 return -ENOMEM;
1588 }
78c4b59f 1589 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
d208c79d 1590 vfacilities[0] &= 0xff82fff3f4fc2000UL;
7feb6bb8 1591 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1592 return 0;
b0c632db
HC
1593}
1594
1595static void __exit kvm_s390_exit(void)
1596{
78c4b59f 1597 free_page((unsigned long) vfacilities);
b0c632db
HC
1598 kvm_exit();
1599}
1600
1601module_init(kvm_s390_init);
1602module_exit(kvm_s390_exit);
566af940
CH
1603
1604/*
1605 * Enable autoloading of the kvm module.
1606 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1607 * since x86 takes a different approach.
1608 */
1609#include <linux/miscdevice.h>
1610MODULE_ALIAS_MISCDEV(KVM_MINOR);
1611MODULE_ALIAS("devname:kvm");
This page took 0.572486 seconds and 5 git commands to generate.