KVM: s390: step VCPU cpu timer during kvm_run ioctl
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
cbb870c8 29#include <asm/asm-offsets.h>
b0c632db 30#include <asm/lowcore.h>
fdf03650 31#include <asm/etr.h>
b0c632db 32#include <asm/pgtable.h>
f5daba1d 33#include <asm/nmi.h>
a0616cde 34#include <asm/switch_to.h>
6d3da241 35#include <asm/isc.h>
1526bf9c 36#include <asm/sclp.h>
8f2abe6a 37#include "kvm-s390.h"
b0c632db
HC
38#include "gaccess.h"
39
ea2cdd27
DH
40#define KMSG_COMPONENT "kvm-s390"
41#undef pr_fmt
42#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43
5786fffa
CH
44#define CREATE_TRACE_POINTS
45#include "trace.h"
ade38c31 46#include "trace-s390.h"
5786fffa 47
41408c28 48#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
49#define LOCAL_IRQS 32
50#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 52
b0c632db
HC
53#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54
55struct kvm_stats_debugfs_item debugfs_entries[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 57 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
58 { "exit_validity", VCPU_STAT(exit_validity) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
60 { "exit_external_request", VCPU_STAT(exit_external_request) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
62 { "exit_instruction", VCPU_STAT(exit_instruction) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f7819512 65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 66 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
ce2e4f0b 67 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 68 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 69 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
70 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
71 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 72 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 73 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
74 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
75 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
76 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
77 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
78 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
79 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
80 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 81 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
82 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
83 { "instruction_spx", VCPU_STAT(instruction_spx) },
84 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
85 { "instruction_stap", VCPU_STAT(instruction_stap) },
86 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 87 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
88 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
89 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 90 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
91 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
92 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 93 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 94 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 95 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 96 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 97 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
98 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
99 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 100 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
101 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
102 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 103 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
104 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
105 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
106 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
107 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
108 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
109 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 110 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 111 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 112 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
175a5c9e
CB
113 { "diagnose_258", VCPU_STAT(diagnose_258) },
114 { "diagnose_308", VCPU_STAT(diagnose_308) },
115 { "diagnose_500", VCPU_STAT(diagnose_500) },
b0c632db
HC
116 { NULL }
117};
118
9d8d5786
MM
119/* upper facilities limit for kvm */
120unsigned long kvm_s390_fac_list_mask[] = {
a3ed8dae 121 0xffe6fffbfcfdfc40UL,
53df84f8 122 0x005e800000000000UL,
9d8d5786 123};
b0c632db 124
9d8d5786 125unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 126{
9d8d5786
MM
127 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
128 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
129}
130
9d8d5786 131static struct gmap_notifier gmap_notifier;
78f26131 132debug_info_t *kvm_s390_dbf;
9d8d5786 133
b0c632db 134/* Section: not file related */
13a34e06 135int kvm_arch_hardware_enable(void)
b0c632db
HC
136{
137 /* every s390 is virtualization enabled ;-) */
10474ae8 138 return 0;
b0c632db
HC
139}
140
2c70fe44
CB
141static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
142
fdf03650
FZ
143/*
144 * This callback is executed during stop_machine(). All CPUs are therefore
145 * temporarily stopped. In order not to change guest behavior, we have to
146 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
147 * so a CPU won't be stopped while calculating with the epoch.
148 */
149static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
150 void *v)
151{
152 struct kvm *kvm;
153 struct kvm_vcpu *vcpu;
154 int i;
155 unsigned long long *delta = v;
156
157 list_for_each_entry(kvm, &vm_list, vm_list) {
158 kvm->arch.epoch -= *delta;
159 kvm_for_each_vcpu(i, vcpu, kvm) {
160 vcpu->arch.sie_block->epoch -= *delta;
db0758b2
DH
161 if (vcpu->arch.cputm_enabled)
162 vcpu->arch.cputm_start += *delta;
fdf03650
FZ
163 }
164 }
165 return NOTIFY_OK;
166}
167
168static struct notifier_block kvm_clock_notifier = {
169 .notifier_call = kvm_clock_sync,
170};
171
b0c632db
HC
172int kvm_arch_hardware_setup(void)
173{
2c70fe44
CB
174 gmap_notifier.notifier_call = kvm_gmap_notifier;
175 gmap_register_ipte_notifier(&gmap_notifier);
fdf03650
FZ
176 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
177 &kvm_clock_notifier);
b0c632db
HC
178 return 0;
179}
180
181void kvm_arch_hardware_unsetup(void)
182{
2c70fe44 183 gmap_unregister_ipte_notifier(&gmap_notifier);
fdf03650
FZ
184 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
185 &kvm_clock_notifier);
b0c632db
HC
186}
187
b0c632db
HC
188int kvm_arch_init(void *opaque)
189{
78f26131
CB
190 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
191 if (!kvm_s390_dbf)
192 return -ENOMEM;
193
194 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
195 debug_unregister(kvm_s390_dbf);
196 return -ENOMEM;
197 }
198
84877d93
CH
199 /* Register floating interrupt controller interface. */
200 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
201}
202
78f26131
CB
203void kvm_arch_exit(void)
204{
205 debug_unregister(kvm_s390_dbf);
206}
207
b0c632db
HC
208/* Section: device related */
209long kvm_arch_dev_ioctl(struct file *filp,
210 unsigned int ioctl, unsigned long arg)
211{
212 if (ioctl == KVM_S390_ENABLE_SIE)
213 return s390_enable_sie();
214 return -EINVAL;
215}
216
784aa3d7 217int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 218{
d7b0b5eb
CO
219 int r;
220
2bd0ac4e 221 switch (ext) {
d7b0b5eb 222 case KVM_CAP_S390_PSW:
b6cf8788 223 case KVM_CAP_S390_GMAP:
52e16b18 224 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
225#ifdef CONFIG_KVM_S390_UCONTROL
226 case KVM_CAP_S390_UCONTROL:
227#endif
3c038e6b 228 case KVM_CAP_ASYNC_PF:
60b413c9 229 case KVM_CAP_SYNC_REGS:
14eebd91 230 case KVM_CAP_ONE_REG:
d6712df9 231 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 232 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 233 case KVM_CAP_IOEVENTFD:
c05c4186 234 case KVM_CAP_DEVICE_CTRL:
d938dc55 235 case KVM_CAP_ENABLE_CAP_VM:
78599d90 236 case KVM_CAP_S390_IRQCHIP:
f2061656 237 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 238 case KVM_CAP_MP_STATE:
47b43c52 239 case KVM_CAP_S390_INJECT_IRQ:
2444b352 240 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 241 case KVM_CAP_S390_USER_STSI:
30ee2a98 242 case KVM_CAP_S390_SKEYS:
816c7667 243 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
244 r = 1;
245 break;
41408c28
TH
246 case KVM_CAP_S390_MEM_OP:
247 r = MEM_OP_MAX_SIZE;
248 break;
e726b1bd
CB
249 case KVM_CAP_NR_VCPUS:
250 case KVM_CAP_MAX_VCPUS:
fe0edcb7
ED
251 r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
252 : KVM_S390_BSCA_CPU_SLOTS;
e726b1bd 253 break;
e1e2e605
NW
254 case KVM_CAP_NR_MEMSLOTS:
255 r = KVM_USER_MEM_SLOTS;
256 break;
1526bf9c 257 case KVM_CAP_S390_COW:
abf09bed 258 r = MACHINE_HAS_ESOP;
1526bf9c 259 break;
68c55750
EF
260 case KVM_CAP_S390_VECTOR_REGISTERS:
261 r = MACHINE_HAS_VX;
262 break;
c6e5f166
FZ
263 case KVM_CAP_S390_RI:
264 r = test_facility(64);
265 break;
2bd0ac4e 266 default:
d7b0b5eb 267 r = 0;
2bd0ac4e 268 }
d7b0b5eb 269 return r;
b0c632db
HC
270}
271
15f36ebd
JH
272static void kvm_s390_sync_dirty_log(struct kvm *kvm,
273 struct kvm_memory_slot *memslot)
274{
275 gfn_t cur_gfn, last_gfn;
276 unsigned long address;
277 struct gmap *gmap = kvm->arch.gmap;
278
15f36ebd
JH
279 /* Loop over all guest pages */
280 last_gfn = memslot->base_gfn + memslot->npages;
281 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
282 address = gfn_to_hva_memslot(memslot, cur_gfn);
283
284 if (gmap_test_and_clear_dirty(address, gmap))
285 mark_page_dirty(kvm, cur_gfn);
1763f8d0
CB
286 if (fatal_signal_pending(current))
287 return;
70c88a00 288 cond_resched();
15f36ebd 289 }
15f36ebd
JH
290}
291
b0c632db 292/* Section: vm related */
a6e2f683
ED
293static void sca_del_vcpu(struct kvm_vcpu *vcpu);
294
b0c632db
HC
295/*
296 * Get (and clear) the dirty memory log for a memory slot.
297 */
298int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
299 struct kvm_dirty_log *log)
300{
15f36ebd
JH
301 int r;
302 unsigned long n;
9f6b8029 303 struct kvm_memslots *slots;
15f36ebd
JH
304 struct kvm_memory_slot *memslot;
305 int is_dirty = 0;
306
307 mutex_lock(&kvm->slots_lock);
308
309 r = -EINVAL;
310 if (log->slot >= KVM_USER_MEM_SLOTS)
311 goto out;
312
9f6b8029
PB
313 slots = kvm_memslots(kvm);
314 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
315 r = -ENOENT;
316 if (!memslot->dirty_bitmap)
317 goto out;
318
319 kvm_s390_sync_dirty_log(kvm, memslot);
320 r = kvm_get_dirty_log(kvm, log, &is_dirty);
321 if (r)
322 goto out;
323
324 /* Clear the dirty log */
325 if (is_dirty) {
326 n = kvm_dirty_bitmap_bytes(memslot);
327 memset(memslot->dirty_bitmap, 0, n);
328 }
329 r = 0;
330out:
331 mutex_unlock(&kvm->slots_lock);
332 return r;
b0c632db
HC
333}
334
d938dc55
CH
335static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
336{
337 int r;
338
339 if (cap->flags)
340 return -EINVAL;
341
342 switch (cap->cap) {
84223598 343 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 344 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
345 kvm->arch.use_irqchip = 1;
346 r = 0;
347 break;
2444b352 348 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 349 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
350 kvm->arch.user_sigp = 1;
351 r = 0;
352 break;
68c55750 353 case KVM_CAP_S390_VECTOR_REGISTERS:
5967c17b
DH
354 mutex_lock(&kvm->lock);
355 if (atomic_read(&kvm->online_vcpus)) {
356 r = -EBUSY;
357 } else if (MACHINE_HAS_VX) {
18280d8b
MM
358 set_kvm_facility(kvm->arch.model.fac->mask, 129);
359 set_kvm_facility(kvm->arch.model.fac->list, 129);
360 r = 0;
361 } else
362 r = -EINVAL;
5967c17b 363 mutex_unlock(&kvm->lock);
c92ea7b9
CB
364 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
365 r ? "(not available)" : "(success)");
68c55750 366 break;
c6e5f166
FZ
367 case KVM_CAP_S390_RI:
368 r = -EINVAL;
369 mutex_lock(&kvm->lock);
370 if (atomic_read(&kvm->online_vcpus)) {
371 r = -EBUSY;
372 } else if (test_facility(64)) {
373 set_kvm_facility(kvm->arch.model.fac->mask, 64);
374 set_kvm_facility(kvm->arch.model.fac->list, 64);
375 r = 0;
376 }
377 mutex_unlock(&kvm->lock);
378 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
379 r ? "(not available)" : "(success)");
380 break;
e44fc8c9 381 case KVM_CAP_S390_USER_STSI:
c92ea7b9 382 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
383 kvm->arch.user_stsi = 1;
384 r = 0;
385 break;
d938dc55
CH
386 default:
387 r = -EINVAL;
388 break;
389 }
390 return r;
391}
392
8c0a7ce6
DD
393static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
394{
395 int ret;
396
397 switch (attr->attr) {
398 case KVM_S390_VM_MEM_LIMIT_SIZE:
399 ret = 0;
c92ea7b9 400 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
a3a92c31
DD
401 kvm->arch.mem_limit);
402 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8c0a7ce6
DD
403 ret = -EFAULT;
404 break;
405 default:
406 ret = -ENXIO;
407 break;
408 }
409 return ret;
410}
411
412static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
413{
414 int ret;
415 unsigned int idx;
416 switch (attr->attr) {
417 case KVM_S390_VM_MEM_ENABLE_CMMA:
e6db1d61
DD
418 /* enable CMMA only for z10 and later (EDAT_1) */
419 ret = -EINVAL;
420 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
421 break;
422
4f718eab 423 ret = -EBUSY;
c92ea7b9 424 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab
DD
425 mutex_lock(&kvm->lock);
426 if (atomic_read(&kvm->online_vcpus) == 0) {
427 kvm->arch.use_cmma = 1;
428 ret = 0;
429 }
430 mutex_unlock(&kvm->lock);
431 break;
432 case KVM_S390_VM_MEM_CLR_CMMA:
c3489155
DD
433 ret = -EINVAL;
434 if (!kvm->arch.use_cmma)
435 break;
436
c92ea7b9 437 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
438 mutex_lock(&kvm->lock);
439 idx = srcu_read_lock(&kvm->srcu);
a13cff31 440 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
441 srcu_read_unlock(&kvm->srcu, idx);
442 mutex_unlock(&kvm->lock);
443 ret = 0;
444 break;
8c0a7ce6
DD
445 case KVM_S390_VM_MEM_LIMIT_SIZE: {
446 unsigned long new_limit;
447
448 if (kvm_is_ucontrol(kvm))
449 return -EINVAL;
450
451 if (get_user(new_limit, (u64 __user *)attr->addr))
452 return -EFAULT;
453
a3a92c31
DD
454 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
455 new_limit > kvm->arch.mem_limit)
8c0a7ce6
DD
456 return -E2BIG;
457
a3a92c31
DD
458 if (!new_limit)
459 return -EINVAL;
460
461 /* gmap_alloc takes last usable address */
462 if (new_limit != KVM_S390_NO_MEM_LIMIT)
463 new_limit -= 1;
464
8c0a7ce6
DD
465 ret = -EBUSY;
466 mutex_lock(&kvm->lock);
467 if (atomic_read(&kvm->online_vcpus) == 0) {
468 /* gmap_alloc will round the limit up */
469 struct gmap *new = gmap_alloc(current->mm, new_limit);
470
471 if (!new) {
472 ret = -ENOMEM;
473 } else {
474 gmap_free(kvm->arch.gmap);
475 new->private = kvm;
476 kvm->arch.gmap = new;
477 ret = 0;
478 }
479 }
480 mutex_unlock(&kvm->lock);
a3a92c31
DD
481 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
482 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
483 (void *) kvm->arch.gmap->asce);
8c0a7ce6
DD
484 break;
485 }
4f718eab
DD
486 default:
487 ret = -ENXIO;
488 break;
489 }
490 return ret;
491}
492
a374e892
TK
493static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
494
495static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
496{
497 struct kvm_vcpu *vcpu;
498 int i;
499
9d8d5786 500 if (!test_kvm_facility(kvm, 76))
a374e892
TK
501 return -EINVAL;
502
503 mutex_lock(&kvm->lock);
504 switch (attr->attr) {
505 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
506 get_random_bytes(
507 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
508 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
509 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 510 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
511 break;
512 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
513 get_random_bytes(
514 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
515 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
516 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 517 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
518 break;
519 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
520 kvm->arch.crypto.aes_kw = 0;
521 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
522 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 523 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
524 break;
525 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
526 kvm->arch.crypto.dea_kw = 0;
527 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
528 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 529 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
530 break;
531 default:
532 mutex_unlock(&kvm->lock);
533 return -ENXIO;
534 }
535
536 kvm_for_each_vcpu(i, vcpu, kvm) {
537 kvm_s390_vcpu_crypto_setup(vcpu);
538 exit_sie(vcpu);
539 }
540 mutex_unlock(&kvm->lock);
541 return 0;
542}
543
72f25020
JH
544static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
545{
546 u8 gtod_high;
547
548 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
549 sizeof(gtod_high)))
550 return -EFAULT;
551
552 if (gtod_high != 0)
553 return -EINVAL;
58c383c6 554 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
72f25020
JH
555
556 return 0;
557}
558
559static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
560{
5a3d883a 561 u64 gtod;
72f25020
JH
562
563 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
564 return -EFAULT;
565
25ed1675 566 kvm_s390_set_tod_clock(kvm, gtod);
58c383c6 567 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
72f25020
JH
568 return 0;
569}
570
571static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
572{
573 int ret;
574
575 if (attr->flags)
576 return -EINVAL;
577
578 switch (attr->attr) {
579 case KVM_S390_VM_TOD_HIGH:
580 ret = kvm_s390_set_tod_high(kvm, attr);
581 break;
582 case KVM_S390_VM_TOD_LOW:
583 ret = kvm_s390_set_tod_low(kvm, attr);
584 break;
585 default:
586 ret = -ENXIO;
587 break;
588 }
589 return ret;
590}
591
592static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
593{
594 u8 gtod_high = 0;
595
596 if (copy_to_user((void __user *)attr->addr, &gtod_high,
597 sizeof(gtod_high)))
598 return -EFAULT;
58c383c6 599 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72f25020
JH
600
601 return 0;
602}
603
604static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
605{
5a3d883a 606 u64 gtod;
72f25020 607
60417fcc 608 gtod = kvm_s390_get_tod_clock_fast(kvm);
72f25020
JH
609 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
610 return -EFAULT;
58c383c6 611 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
72f25020
JH
612
613 return 0;
614}
615
616static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
617{
618 int ret;
619
620 if (attr->flags)
621 return -EINVAL;
622
623 switch (attr->attr) {
624 case KVM_S390_VM_TOD_HIGH:
625 ret = kvm_s390_get_tod_high(kvm, attr);
626 break;
627 case KVM_S390_VM_TOD_LOW:
628 ret = kvm_s390_get_tod_low(kvm, attr);
629 break;
630 default:
631 ret = -ENXIO;
632 break;
633 }
634 return ret;
635}
636
658b6eda
MM
637static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
638{
639 struct kvm_s390_vm_cpu_processor *proc;
640 int ret = 0;
641
642 mutex_lock(&kvm->lock);
643 if (atomic_read(&kvm->online_vcpus)) {
644 ret = -EBUSY;
645 goto out;
646 }
647 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
648 if (!proc) {
649 ret = -ENOMEM;
650 goto out;
651 }
652 if (!copy_from_user(proc, (void __user *)attr->addr,
653 sizeof(*proc))) {
654 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
655 sizeof(struct cpuid));
656 kvm->arch.model.ibc = proc->ibc;
981467c9 657 memcpy(kvm->arch.model.fac->list, proc->fac_list,
658b6eda
MM
658 S390_ARCH_FAC_LIST_SIZE_BYTE);
659 } else
660 ret = -EFAULT;
661 kfree(proc);
662out:
663 mutex_unlock(&kvm->lock);
664 return ret;
665}
666
667static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
668{
669 int ret = -ENXIO;
670
671 switch (attr->attr) {
672 case KVM_S390_VM_CPU_PROCESSOR:
673 ret = kvm_s390_set_processor(kvm, attr);
674 break;
675 }
676 return ret;
677}
678
679static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
680{
681 struct kvm_s390_vm_cpu_processor *proc;
682 int ret = 0;
683
684 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
685 if (!proc) {
686 ret = -ENOMEM;
687 goto out;
688 }
689 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
690 proc->ibc = kvm->arch.model.ibc;
981467c9 691 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
692 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
693 ret = -EFAULT;
694 kfree(proc);
695out:
696 return ret;
697}
698
699static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
700{
701 struct kvm_s390_vm_cpu_machine *mach;
702 int ret = 0;
703
704 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
705 if (!mach) {
706 ret = -ENOMEM;
707 goto out;
708 }
709 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 710 mach->ibc = sclp.ibc;
981467c9
MM
711 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
712 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 713 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 714 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
715 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
716 ret = -EFAULT;
717 kfree(mach);
718out:
719 return ret;
720}
721
722static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
723{
724 int ret = -ENXIO;
725
726 switch (attr->attr) {
727 case KVM_S390_VM_CPU_PROCESSOR:
728 ret = kvm_s390_get_processor(kvm, attr);
729 break;
730 case KVM_S390_VM_CPU_MACHINE:
731 ret = kvm_s390_get_machine(kvm, attr);
732 break;
733 }
734 return ret;
735}
736
f2061656
DD
737static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
738{
739 int ret;
740
741 switch (attr->group) {
4f718eab 742 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 743 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 744 break;
72f25020
JH
745 case KVM_S390_VM_TOD:
746 ret = kvm_s390_set_tod(kvm, attr);
747 break;
658b6eda
MM
748 case KVM_S390_VM_CPU_MODEL:
749 ret = kvm_s390_set_cpu_model(kvm, attr);
750 break;
a374e892
TK
751 case KVM_S390_VM_CRYPTO:
752 ret = kvm_s390_vm_set_crypto(kvm, attr);
753 break;
f2061656
DD
754 default:
755 ret = -ENXIO;
756 break;
757 }
758
759 return ret;
760}
761
762static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
763{
8c0a7ce6
DD
764 int ret;
765
766 switch (attr->group) {
767 case KVM_S390_VM_MEM_CTRL:
768 ret = kvm_s390_get_mem_control(kvm, attr);
769 break;
72f25020
JH
770 case KVM_S390_VM_TOD:
771 ret = kvm_s390_get_tod(kvm, attr);
772 break;
658b6eda
MM
773 case KVM_S390_VM_CPU_MODEL:
774 ret = kvm_s390_get_cpu_model(kvm, attr);
775 break;
8c0a7ce6
DD
776 default:
777 ret = -ENXIO;
778 break;
779 }
780
781 return ret;
f2061656
DD
782}
783
784static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
785{
786 int ret;
787
788 switch (attr->group) {
4f718eab
DD
789 case KVM_S390_VM_MEM_CTRL:
790 switch (attr->attr) {
791 case KVM_S390_VM_MEM_ENABLE_CMMA:
792 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 793 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
794 ret = 0;
795 break;
796 default:
797 ret = -ENXIO;
798 break;
799 }
800 break;
72f25020
JH
801 case KVM_S390_VM_TOD:
802 switch (attr->attr) {
803 case KVM_S390_VM_TOD_LOW:
804 case KVM_S390_VM_TOD_HIGH:
805 ret = 0;
806 break;
807 default:
808 ret = -ENXIO;
809 break;
810 }
811 break;
658b6eda
MM
812 case KVM_S390_VM_CPU_MODEL:
813 switch (attr->attr) {
814 case KVM_S390_VM_CPU_PROCESSOR:
815 case KVM_S390_VM_CPU_MACHINE:
816 ret = 0;
817 break;
818 default:
819 ret = -ENXIO;
820 break;
821 }
822 break;
a374e892
TK
823 case KVM_S390_VM_CRYPTO:
824 switch (attr->attr) {
825 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
826 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
827 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
828 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
829 ret = 0;
830 break;
831 default:
832 ret = -ENXIO;
833 break;
834 }
835 break;
f2061656
DD
836 default:
837 ret = -ENXIO;
838 break;
839 }
840
841 return ret;
842}
843
30ee2a98
JH
844static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
845{
846 uint8_t *keys;
847 uint64_t hva;
848 unsigned long curkey;
849 int i, r = 0;
850
851 if (args->flags != 0)
852 return -EINVAL;
853
854 /* Is this guest using storage keys? */
855 if (!mm_use_skey(current->mm))
856 return KVM_S390_GET_SKEYS_NONE;
857
858 /* Enforce sane limit on memory allocation */
859 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
860 return -EINVAL;
861
862 keys = kmalloc_array(args->count, sizeof(uint8_t),
863 GFP_KERNEL | __GFP_NOWARN);
864 if (!keys)
865 keys = vmalloc(sizeof(uint8_t) * args->count);
866 if (!keys)
867 return -ENOMEM;
868
869 for (i = 0; i < args->count; i++) {
870 hva = gfn_to_hva(kvm, args->start_gfn + i);
871 if (kvm_is_error_hva(hva)) {
872 r = -EFAULT;
873 goto out;
874 }
875
876 curkey = get_guest_storage_key(current->mm, hva);
877 if (IS_ERR_VALUE(curkey)) {
878 r = curkey;
879 goto out;
880 }
881 keys[i] = curkey;
882 }
883
884 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
885 sizeof(uint8_t) * args->count);
886 if (r)
887 r = -EFAULT;
888out:
889 kvfree(keys);
890 return r;
891}
892
893static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
894{
895 uint8_t *keys;
896 uint64_t hva;
897 int i, r = 0;
898
899 if (args->flags != 0)
900 return -EINVAL;
901
902 /* Enforce sane limit on memory allocation */
903 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
904 return -EINVAL;
905
906 keys = kmalloc_array(args->count, sizeof(uint8_t),
907 GFP_KERNEL | __GFP_NOWARN);
908 if (!keys)
909 keys = vmalloc(sizeof(uint8_t) * args->count);
910 if (!keys)
911 return -ENOMEM;
912
913 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
914 sizeof(uint8_t) * args->count);
915 if (r) {
916 r = -EFAULT;
917 goto out;
918 }
919
920 /* Enable storage key handling for the guest */
14d4a425
DD
921 r = s390_enable_skey();
922 if (r)
923 goto out;
30ee2a98
JH
924
925 for (i = 0; i < args->count; i++) {
926 hva = gfn_to_hva(kvm, args->start_gfn + i);
927 if (kvm_is_error_hva(hva)) {
928 r = -EFAULT;
929 goto out;
930 }
931
932 /* Lowest order bit is reserved */
933 if (keys[i] & 0x01) {
934 r = -EINVAL;
935 goto out;
936 }
937
938 r = set_guest_storage_key(current->mm, hva,
939 (unsigned long)keys[i], 0);
940 if (r)
941 goto out;
942 }
943out:
944 kvfree(keys);
945 return r;
946}
947
b0c632db
HC
948long kvm_arch_vm_ioctl(struct file *filp,
949 unsigned int ioctl, unsigned long arg)
950{
951 struct kvm *kvm = filp->private_data;
952 void __user *argp = (void __user *)arg;
f2061656 953 struct kvm_device_attr attr;
b0c632db
HC
954 int r;
955
956 switch (ioctl) {
ba5c1e9b
CO
957 case KVM_S390_INTERRUPT: {
958 struct kvm_s390_interrupt s390int;
959
960 r = -EFAULT;
961 if (copy_from_user(&s390int, argp, sizeof(s390int)))
962 break;
963 r = kvm_s390_inject_vm(kvm, &s390int);
964 break;
965 }
d938dc55
CH
966 case KVM_ENABLE_CAP: {
967 struct kvm_enable_cap cap;
968 r = -EFAULT;
969 if (copy_from_user(&cap, argp, sizeof(cap)))
970 break;
971 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
972 break;
973 }
84223598
CH
974 case KVM_CREATE_IRQCHIP: {
975 struct kvm_irq_routing_entry routing;
976
977 r = -EINVAL;
978 if (kvm->arch.use_irqchip) {
979 /* Set up dummy routing. */
980 memset(&routing, 0, sizeof(routing));
152b2839 981 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
982 }
983 break;
984 }
f2061656
DD
985 case KVM_SET_DEVICE_ATTR: {
986 r = -EFAULT;
987 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
988 break;
989 r = kvm_s390_vm_set_attr(kvm, &attr);
990 break;
991 }
992 case KVM_GET_DEVICE_ATTR: {
993 r = -EFAULT;
994 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
995 break;
996 r = kvm_s390_vm_get_attr(kvm, &attr);
997 break;
998 }
999 case KVM_HAS_DEVICE_ATTR: {
1000 r = -EFAULT;
1001 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1002 break;
1003 r = kvm_s390_vm_has_attr(kvm, &attr);
1004 break;
1005 }
30ee2a98
JH
1006 case KVM_S390_GET_SKEYS: {
1007 struct kvm_s390_skeys args;
1008
1009 r = -EFAULT;
1010 if (copy_from_user(&args, argp,
1011 sizeof(struct kvm_s390_skeys)))
1012 break;
1013 r = kvm_s390_get_skeys(kvm, &args);
1014 break;
1015 }
1016 case KVM_S390_SET_SKEYS: {
1017 struct kvm_s390_skeys args;
1018
1019 r = -EFAULT;
1020 if (copy_from_user(&args, argp,
1021 sizeof(struct kvm_s390_skeys)))
1022 break;
1023 r = kvm_s390_set_skeys(kvm, &args);
1024 break;
1025 }
b0c632db 1026 default:
367e1319 1027 r = -ENOTTY;
b0c632db
HC
1028 }
1029
1030 return r;
1031}
1032
45c9b47c
TK
1033static int kvm_s390_query_ap_config(u8 *config)
1034{
1035 u32 fcn_code = 0x04000000UL;
86044c8c 1036 u32 cc = 0;
45c9b47c 1037
86044c8c 1038 memset(config, 0, 128);
45c9b47c
TK
1039 asm volatile(
1040 "lgr 0,%1\n"
1041 "lgr 2,%2\n"
1042 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1043 "0: ipm %0\n"
45c9b47c 1044 "srl %0,28\n"
86044c8c
CB
1045 "1:\n"
1046 EX_TABLE(0b, 1b)
1047 : "+r" (cc)
45c9b47c
TK
1048 : "r" (fcn_code), "r" (config)
1049 : "cc", "0", "2", "memory"
1050 );
1051
1052 return cc;
1053}
1054
1055static int kvm_s390_apxa_installed(void)
1056{
1057 u8 config[128];
1058 int cc;
1059
a6aacc3f 1060 if (test_facility(12)) {
45c9b47c
TK
1061 cc = kvm_s390_query_ap_config(config);
1062
1063 if (cc)
1064 pr_err("PQAP(QCI) failed with cc=%d", cc);
1065 else
1066 return config[0] & 0x40;
1067 }
1068
1069 return 0;
1070}
1071
1072static void kvm_s390_set_crycb_format(struct kvm *kvm)
1073{
1074 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1075
1076 if (kvm_s390_apxa_installed())
1077 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1078 else
1079 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1080}
1081
9d8d5786
MM
1082static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
1083{
1084 get_cpu_id(cpu_id);
1085 cpu_id->version = 0xff;
1086}
1087
5102ee87
TK
1088static int kvm_s390_crypto_init(struct kvm *kvm)
1089{
9d8d5786 1090 if (!test_kvm_facility(kvm, 76))
5102ee87
TK
1091 return 0;
1092
1093 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1094 GFP_KERNEL | GFP_DMA);
1095 if (!kvm->arch.crypto.crycb)
1096 return -ENOMEM;
1097
45c9b47c 1098 kvm_s390_set_crycb_format(kvm);
5102ee87 1099
ed6f76b4
TK
1100 /* Enable AES/DEA protected key functions by default */
1101 kvm->arch.crypto.aes_kw = 1;
1102 kvm->arch.crypto.dea_kw = 1;
1103 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1104 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1105 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1106 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
a374e892 1107
5102ee87
TK
1108 return 0;
1109}
1110
7d43bafc
ED
1111static void sca_dispose(struct kvm *kvm)
1112{
1113 if (kvm->arch.use_esca)
5e044315 1114 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
7d43bafc
ED
1115 else
1116 free_page((unsigned long)(kvm->arch.sca));
1117 kvm->arch.sca = NULL;
1118}
1119
e08b9637 1120int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1121{
9d8d5786 1122 int i, rc;
b0c632db 1123 char debug_name[16];
f6c137ff 1124 static unsigned long sca_offset;
b0c632db 1125
e08b9637
CO
1126 rc = -EINVAL;
1127#ifdef CONFIG_KVM_S390_UCONTROL
1128 if (type & ~KVM_VM_S390_UCONTROL)
1129 goto out_err;
1130 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1131 goto out_err;
1132#else
1133 if (type)
1134 goto out_err;
1135#endif
1136
b0c632db
HC
1137 rc = s390_enable_sie();
1138 if (rc)
d89f5eff 1139 goto out_err;
b0c632db 1140
b290411a
CO
1141 rc = -ENOMEM;
1142
7d43bafc 1143 kvm->arch.use_esca = 0; /* start with basic SCA */
5e044315 1144 rwlock_init(&kvm->arch.sca_lock);
bc784cce 1145 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
b0c632db 1146 if (!kvm->arch.sca)
d89f5eff 1147 goto out_err;
f6c137ff 1148 spin_lock(&kvm_lock);
c5c2c393 1149 sca_offset += 16;
bc784cce 1150 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
c5c2c393 1151 sca_offset = 0;
bc784cce
ED
1152 kvm->arch.sca = (struct bsca_block *)
1153 ((char *) kvm->arch.sca + sca_offset);
f6c137ff 1154 spin_unlock(&kvm_lock);
b0c632db
HC
1155
1156 sprintf(debug_name, "kvm-%u", current->pid);
1157
1cb9cf72 1158 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1159 if (!kvm->arch.dbf)
40f5b735 1160 goto out_err;
b0c632db 1161
9d8d5786
MM
1162 /*
1163 * The architectural maximum amount of facilities is 16 kbit. To store
1164 * this amount, 2 kbyte of memory is required. Thus we need a full
981467c9
MM
1165 * page to hold the guest facility list (arch.model.fac->list) and the
1166 * facility mask (arch.model.fac->mask). Its address size has to be
9d8d5786
MM
1167 * 31 bits and word aligned.
1168 */
1169 kvm->arch.model.fac =
981467c9 1170 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9d8d5786 1171 if (!kvm->arch.model.fac)
40f5b735 1172 goto out_err;
9d8d5786 1173
fb5bf93f 1174 /* Populate the facility mask initially. */
981467c9 1175 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
94422ee8 1176 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1177 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1178 if (i < kvm_s390_fac_list_mask_size())
981467c9 1179 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1180 else
981467c9 1181 kvm->arch.model.fac->mask[i] = 0UL;
9d8d5786
MM
1182 }
1183
981467c9
MM
1184 /* Populate the facility list initially. */
1185 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1186 S390_ARCH_FAC_LIST_SIZE_BYTE);
1187
9d8d5786 1188 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
37c5f6c8 1189 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1190
5102ee87 1191 if (kvm_s390_crypto_init(kvm) < 0)
40f5b735 1192 goto out_err;
5102ee87 1193
ba5c1e9b 1194 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1195 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1196 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1197 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1198 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1199
b0c632db 1200 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 1201 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 1202
e08b9637
CO
1203 if (type & KVM_VM_S390_UCONTROL) {
1204 kvm->arch.gmap = NULL;
a3a92c31 1205 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
e08b9637 1206 } else {
32e6b236
GH
1207 if (sclp.hamax == U64_MAX)
1208 kvm->arch.mem_limit = TASK_MAX_SIZE;
1209 else
1210 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1211 sclp.hamax + 1);
a3a92c31 1212 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
e08b9637 1213 if (!kvm->arch.gmap)
40f5b735 1214 goto out_err;
2c70fe44 1215 kvm->arch.gmap->private = kvm;
24eb3a82 1216 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1217 }
fa6b7fe9
CH
1218
1219 kvm->arch.css_support = 0;
84223598 1220 kvm->arch.use_irqchip = 0;
72f25020 1221 kvm->arch.epoch = 0;
fa6b7fe9 1222
8ad35755 1223 spin_lock_init(&kvm->arch.start_stop_lock);
8335713a 1224 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
8ad35755 1225
d89f5eff 1226 return 0;
40f5b735 1227out_err:
5102ee87 1228 kfree(kvm->arch.crypto.crycb);
9d8d5786 1229 free_page((unsigned long)kvm->arch.model.fac);
598841ca 1230 debug_unregister(kvm->arch.dbf);
7d43bafc 1231 sca_dispose(kvm);
78f26131 1232 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 1233 return rc;
b0c632db
HC
1234}
1235
d329c035
CB
1236void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1237{
1238 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1239 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1240 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1241 kvm_clear_async_pf_completion_queue(vcpu);
bc784cce 1242 if (!kvm_is_ucontrol(vcpu->kvm))
a6e2f683 1243 sca_del_vcpu(vcpu);
27e0393f
CO
1244
1245 if (kvm_is_ucontrol(vcpu->kvm))
1246 gmap_free(vcpu->arch.gmap);
1247
e6db1d61 1248 if (vcpu->kvm->arch.use_cmma)
b31605c1 1249 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1250 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1251
6692cef3 1252 kvm_vcpu_uninit(vcpu);
b110feaf 1253 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1254}
1255
1256static void kvm_free_vcpus(struct kvm *kvm)
1257{
1258 unsigned int i;
988a2cae 1259 struct kvm_vcpu *vcpu;
d329c035 1260
988a2cae
GN
1261 kvm_for_each_vcpu(i, vcpu, kvm)
1262 kvm_arch_vcpu_destroy(vcpu);
1263
1264 mutex_lock(&kvm->lock);
1265 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1266 kvm->vcpus[i] = NULL;
1267
1268 atomic_set(&kvm->online_vcpus, 0);
1269 mutex_unlock(&kvm->lock);
d329c035
CB
1270}
1271
b0c632db
HC
1272void kvm_arch_destroy_vm(struct kvm *kvm)
1273{
d329c035 1274 kvm_free_vcpus(kvm);
9d8d5786 1275 free_page((unsigned long)kvm->arch.model.fac);
7d43bafc 1276 sca_dispose(kvm);
d329c035 1277 debug_unregister(kvm->arch.dbf);
5102ee87 1278 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
1279 if (!kvm_is_ucontrol(kvm))
1280 gmap_free(kvm->arch.gmap);
841b91c5 1281 kvm_s390_destroy_adapters(kvm);
67335e63 1282 kvm_s390_clear_float_irqs(kvm);
8335713a 1283 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
b0c632db
HC
1284}
1285
1286/* Section: vcpu related */
dafd032a
DD
1287static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1288{
1289 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1290 if (!vcpu->arch.gmap)
1291 return -ENOMEM;
1292 vcpu->arch.gmap->private = vcpu->kvm;
1293
1294 return 0;
1295}
1296
a6e2f683
ED
1297static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1298{
5e044315 1299 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
1300 if (vcpu->kvm->arch.use_esca) {
1301 struct esca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1302
7d43bafc 1303 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
10ce32d5 1304 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc
ED
1305 } else {
1306 struct bsca_block *sca = vcpu->kvm->arch.sca;
1307
1308 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
10ce32d5 1309 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc 1310 }
5e044315 1311 read_unlock(&vcpu->kvm->arch.sca_lock);
a6e2f683
ED
1312}
1313
eaa78f34 1314static void sca_add_vcpu(struct kvm_vcpu *vcpu)
a6e2f683 1315{
eaa78f34
DH
1316 read_lock(&vcpu->kvm->arch.sca_lock);
1317 if (vcpu->kvm->arch.use_esca) {
1318 struct esca_block *sca = vcpu->kvm->arch.sca;
7d43bafc 1319
eaa78f34 1320 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1322 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
25508824 1323 vcpu->arch.sie_block->ecb2 |= 0x04U;
eaa78f34 1324 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
7d43bafc 1325 } else {
eaa78f34 1326 struct bsca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1327
eaa78f34 1328 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1329 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1330 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
eaa78f34 1331 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
7d43bafc 1332 }
eaa78f34 1333 read_unlock(&vcpu->kvm->arch.sca_lock);
5e044315
ED
1334}
1335
1336/* Basic SCA to Extended SCA data copy routines */
1337static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1338{
1339 d->sda = s->sda;
1340 d->sigp_ctrl.c = s->sigp_ctrl.c;
1341 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1342}
1343
1344static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1345{
1346 int i;
1347
1348 d->ipte_control = s->ipte_control;
1349 d->mcn[0] = s->mcn;
1350 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1351 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1352}
1353
1354static int sca_switch_to_extended(struct kvm *kvm)
1355{
1356 struct bsca_block *old_sca = kvm->arch.sca;
1357 struct esca_block *new_sca;
1358 struct kvm_vcpu *vcpu;
1359 unsigned int vcpu_idx;
1360 u32 scaol, scaoh;
1361
1362 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1363 if (!new_sca)
1364 return -ENOMEM;
1365
1366 scaoh = (u32)((u64)(new_sca) >> 32);
1367 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1368
1369 kvm_s390_vcpu_block_all(kvm);
1370 write_lock(&kvm->arch.sca_lock);
1371
1372 sca_copy_b_to_e(new_sca, old_sca);
1373
1374 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1375 vcpu->arch.sie_block->scaoh = scaoh;
1376 vcpu->arch.sie_block->scaol = scaol;
1377 vcpu->arch.sie_block->ecb2 |= 0x04U;
1378 }
1379 kvm->arch.sca = new_sca;
1380 kvm->arch.use_esca = 1;
1381
1382 write_unlock(&kvm->arch.sca_lock);
1383 kvm_s390_vcpu_unblock_all(kvm);
1384
1385 free_page((unsigned long)old_sca);
1386
8335713a
CB
1387 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1388 old_sca, kvm->arch.sca);
5e044315 1389 return 0;
a6e2f683
ED
1390}
1391
1392static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1393{
5e044315
ED
1394 int rc;
1395
1396 if (id < KVM_S390_BSCA_CPU_SLOTS)
1397 return true;
1398 if (!sclp.has_esca)
1399 return false;
1400
1401 mutex_lock(&kvm->lock);
1402 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1403 mutex_unlock(&kvm->lock);
1404
1405 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
a6e2f683
ED
1406}
1407
b0c632db
HC
1408int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1409{
3c038e6b
DD
1410 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1411 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1412 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1413 KVM_SYNC_GPRS |
9eed0735 1414 KVM_SYNC_ACRS |
b028ee3e
DH
1415 KVM_SYNC_CRS |
1416 KVM_SYNC_ARCH0 |
1417 KVM_SYNC_PFAULT;
c6e5f166
FZ
1418 if (test_kvm_facility(vcpu->kvm, 64))
1419 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
f6aa6dc4
DH
1420 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1421 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1422 */
1423 if (MACHINE_HAS_VX)
68c55750 1424 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
6fd8e67d
DH
1425 else
1426 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
dafd032a
DD
1427
1428 if (kvm_is_ucontrol(vcpu->kvm))
1429 return __kvm_ucontrol_vcpu_init(vcpu);
1430
b0c632db
HC
1431 return 0;
1432}
1433
db0758b2
DH
1434/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1435static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1436{
1437 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
1438 vcpu->arch.cputm_start = get_tod_clock_fast();
1439}
1440
1441/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1442static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1443{
1444 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
1445 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1446 vcpu->arch.cputm_start = 0;
1447}
1448
1449/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1450static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1451{
1452 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1453 vcpu->arch.cputm_enabled = true;
1454 __start_cpu_timer_accounting(vcpu);
1455}
1456
1457/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1458static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1459{
1460 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1461 __stop_cpu_timer_accounting(vcpu);
1462 vcpu->arch.cputm_enabled = false;
1463}
1464
1465static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1466{
1467 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1468 __enable_cpu_timer_accounting(vcpu);
1469 preempt_enable();
1470}
1471
1472static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1473{
1474 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1475 __disable_cpu_timer_accounting(vcpu);
1476 preempt_enable();
1477}
1478
4287f247
DH
1479/* set the cpu timer - may only be called from the VCPU thread itself */
1480void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1481{
db0758b2
DH
1482 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1483 if (vcpu->arch.cputm_enabled)
1484 vcpu->arch.cputm_start = get_tod_clock_fast();
4287f247 1485 vcpu->arch.sie_block->cputm = cputm;
db0758b2 1486 preempt_enable();
4287f247
DH
1487}
1488
db0758b2 1489/* update and get the cpu timer - can also be called from other VCPU threads */
4287f247
DH
1490__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1491{
db0758b2
DH
1492 __u64 value;
1493 int me;
1494
1495 if (unlikely(!vcpu->arch.cputm_enabled))
1496 return vcpu->arch.sie_block->cputm;
1497
1498 me = get_cpu(); /* also protects from TOD sync and vcpu_load/put */
1499 value = vcpu->arch.sie_block->cputm;
1500 if (likely(me == vcpu->cpu)) {
1501 /* the VCPU itself will always read consistent values */
1502 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1503 }
1504 put_cpu();
1505 return value;
4287f247
DH
1506}
1507
b0c632db
HC
1508void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1509{
9977e886 1510 /* Save host register state */
d0164ee2 1511 save_fpu_regs();
9abc2a08
DH
1512 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1513 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
9977e886 1514
6fd8e67d
DH
1515 if (MACHINE_HAS_VX)
1516 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1517 else
1518 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
9abc2a08 1519 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
9977e886 1520 if (test_fp_ctl(current->thread.fpu.fpc))
96b2d7a8 1521 /* User space provided an invalid FPC, let's clear it */
9977e886
HB
1522 current->thread.fpu.fpc = 0;
1523
1524 save_access_regs(vcpu->arch.host_acrs);
59674c1a 1525 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1526 gmap_enable(vcpu->arch.gmap);
805de8f4 1527 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
db0758b2
DH
1528 if (vcpu->arch.cputm_enabled)
1529 __start_cpu_timer_accounting(vcpu);
01a745ac 1530 vcpu->cpu = cpu;
b0c632db
HC
1531}
1532
1533void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1534{
01a745ac 1535 vcpu->cpu = -1;
db0758b2
DH
1536 if (vcpu->arch.cputm_enabled)
1537 __stop_cpu_timer_accounting(vcpu);
805de8f4 1538 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1539 gmap_disable(vcpu->arch.gmap);
9977e886 1540
9abc2a08 1541 /* Save guest register state */
d0164ee2 1542 save_fpu_regs();
9abc2a08 1543 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
9977e886 1544
9abc2a08
DH
1545 /* Restore host register state */
1546 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1547 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
9977e886
HB
1548
1549 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1550 restore_access_regs(vcpu->arch.host_acrs);
1551}
1552
1553static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1554{
1555 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1556 vcpu->arch.sie_block->gpsw.mask = 0UL;
1557 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1558 kvm_s390_set_prefix(vcpu, 0);
4287f247 1559 kvm_s390_set_cpu_timer(vcpu, 0);
b0c632db
HC
1560 vcpu->arch.sie_block->ckc = 0UL;
1561 vcpu->arch.sie_block->todpr = 0;
1562 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1563 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1564 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
9abc2a08
DH
1565 /* make sure the new fpc will be lazily loaded */
1566 save_fpu_regs();
1567 current->thread.fpu.fpc = 0;
b0c632db 1568 vcpu->arch.sie_block->gbea = 1;
672550fb 1569 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1570 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1571 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1572 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1573 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1574 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1575}
1576
31928aa5 1577void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1578{
72f25020 1579 mutex_lock(&vcpu->kvm->lock);
fdf03650 1580 preempt_disable();
72f25020 1581 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
fdf03650 1582 preempt_enable();
72f25020 1583 mutex_unlock(&vcpu->kvm->lock);
25508824 1584 if (!kvm_is_ucontrol(vcpu->kvm)) {
dafd032a 1585 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
eaa78f34 1586 sca_add_vcpu(vcpu);
25508824
DH
1587 }
1588
42897d86
MT
1589}
1590
5102ee87
TK
1591static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1592{
9d8d5786 1593 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1594 return;
1595
a374e892
TK
1596 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1597
1598 if (vcpu->kvm->arch.crypto.aes_kw)
1599 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1600 if (vcpu->kvm->arch.crypto.dea_kw)
1601 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1602
5102ee87
TK
1603 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1604}
1605
b31605c1
DD
1606void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1607{
1608 free_page(vcpu->arch.sie_block->cbrlo);
1609 vcpu->arch.sie_block->cbrlo = 0;
1610}
1611
1612int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1613{
1614 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1615 if (!vcpu->arch.sie_block->cbrlo)
1616 return -ENOMEM;
1617
1618 vcpu->arch.sie_block->ecb2 |= 0x80;
1619 vcpu->arch.sie_block->ecb2 &= ~0x08;
1620 return 0;
1621}
1622
91520f1a
MM
1623static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1624{
1625 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1626
1627 vcpu->arch.cpu_id = model->cpu_id;
1628 vcpu->arch.sie_block->ibc = model->ibc;
1629 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1630}
1631
b0c632db
HC
1632int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1633{
b31605c1 1634 int rc = 0;
b31288fa 1635
9e6dabef
CH
1636 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1637 CPUSTAT_SM |
a4a4f191
GH
1638 CPUSTAT_STOPPED);
1639
53df84f8 1640 if (test_kvm_facility(vcpu->kvm, 78))
805de8f4 1641 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
53df84f8 1642 else if (test_kvm_facility(vcpu->kvm, 8))
805de8f4 1643 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
a4a4f191 1644
91520f1a
MM
1645 kvm_s390_vcpu_setup_model(vcpu);
1646
fc34531d 1647 vcpu->arch.sie_block->ecb = 6;
9d8d5786 1648 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1649 vcpu->arch.sie_block->ecb |= 0x10;
1650
69d0d3a3 1651 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 1652 vcpu->arch.sie_block->eca = 0xC1002000U;
37c5f6c8 1653 if (sclp.has_siif)
217a4406 1654 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1655 if (sclp.has_sigpif)
ea5f4969 1656 vcpu->arch.sie_block->eca |= 0x10000000U;
c6e5f166
FZ
1657 if (test_kvm_facility(vcpu->kvm, 64))
1658 vcpu->arch.sie_block->ecb3 |= 0x01;
18280d8b 1659 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1660 vcpu->arch.sie_block->eca |= 0x00020000;
1661 vcpu->arch.sie_block->ecd |= 0x20000000;
1662 }
c6e5f166 1663 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
492d8642 1664 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
5a5e6536 1665
e6db1d61 1666 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
1667 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1668 if (rc)
1669 return rc;
b31288fa 1670 }
0ac96caf 1671 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1672 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1673
5102ee87
TK
1674 kvm_s390_vcpu_crypto_setup(vcpu);
1675
b31605c1 1676 return rc;
b0c632db
HC
1677}
1678
1679struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1680 unsigned int id)
1681{
4d47555a 1682 struct kvm_vcpu *vcpu;
7feb6bb8 1683 struct sie_page *sie_page;
4d47555a
CO
1684 int rc = -EINVAL;
1685
4215825e 1686 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
4d47555a
CO
1687 goto out;
1688
1689 rc = -ENOMEM;
b0c632db 1690
b110feaf 1691 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1692 if (!vcpu)
4d47555a 1693 goto out;
b0c632db 1694
7feb6bb8
MM
1695 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1696 if (!sie_page)
b0c632db
HC
1697 goto out_free_cpu;
1698
7feb6bb8
MM
1699 vcpu->arch.sie_block = &sie_page->sie_block;
1700 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1701
b0c632db 1702 vcpu->arch.sie_block->icpua = id;
ba5c1e9b 1703 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1704 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1705 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1706 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1707
b0c632db
HC
1708 rc = kvm_vcpu_init(vcpu, kvm, id);
1709 if (rc)
9abc2a08 1710 goto out_free_sie_block;
8335713a 1711 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
b0c632db 1712 vcpu->arch.sie_block);
ade38c31 1713 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1714
b0c632db 1715 return vcpu;
7b06bf2f
WY
1716out_free_sie_block:
1717 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1718out_free_cpu:
b110feaf 1719 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1720out:
b0c632db
HC
1721 return ERR_PTR(rc);
1722}
1723
b0c632db
HC
1724int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1725{
9a022067 1726 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1727}
1728
27406cd5 1729void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 1730{
805de8f4 1731 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1732 exit_sie(vcpu);
49b99e1e
CB
1733}
1734
27406cd5 1735void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 1736{
805de8f4 1737 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
1738}
1739
8e236546
CB
1740static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1741{
805de8f4 1742 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1743 exit_sie(vcpu);
8e236546
CB
1744}
1745
1746static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1747{
9bf9fde2 1748 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
1749}
1750
49b99e1e
CB
1751/*
1752 * Kick a guest cpu out of SIE and wait until SIE is not running.
1753 * If the CPU is not running (e.g. waiting as idle) the function will
1754 * return immediately. */
1755void exit_sie(struct kvm_vcpu *vcpu)
1756{
805de8f4 1757 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
49b99e1e
CB
1758 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1759 cpu_relax();
1760}
1761
8e236546
CB
1762/* Kick a guest cpu out of SIE to process a request synchronously */
1763void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1764{
8e236546
CB
1765 kvm_make_request(req, vcpu);
1766 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1767}
1768
2c70fe44
CB
1769static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1770{
1771 int i;
1772 struct kvm *kvm = gmap->private;
1773 struct kvm_vcpu *vcpu;
1774
1775 kvm_for_each_vcpu(i, vcpu, kvm) {
1776 /* match against both prefix pages */
fda902cb 1777 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44 1778 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8e236546 1779 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
1780 }
1781 }
1782}
1783
b6d33834
CD
1784int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1785{
1786 /* kvm common code refers to this, but never calls it */
1787 BUG();
1788 return 0;
1789}
1790
14eebd91
CO
1791static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1792 struct kvm_one_reg *reg)
1793{
1794 int r = -EINVAL;
1795
1796 switch (reg->id) {
29b7c71b
CO
1797 case KVM_REG_S390_TODPR:
1798 r = put_user(vcpu->arch.sie_block->todpr,
1799 (u32 __user *)reg->addr);
1800 break;
1801 case KVM_REG_S390_EPOCHDIFF:
1802 r = put_user(vcpu->arch.sie_block->epoch,
1803 (u64 __user *)reg->addr);
1804 break;
46a6dd1c 1805 case KVM_REG_S390_CPU_TIMER:
4287f247 1806 r = put_user(kvm_s390_get_cpu_timer(vcpu),
46a6dd1c
J
1807 (u64 __user *)reg->addr);
1808 break;
1809 case KVM_REG_S390_CLOCK_COMP:
1810 r = put_user(vcpu->arch.sie_block->ckc,
1811 (u64 __user *)reg->addr);
1812 break;
536336c2
DD
1813 case KVM_REG_S390_PFTOKEN:
1814 r = put_user(vcpu->arch.pfault_token,
1815 (u64 __user *)reg->addr);
1816 break;
1817 case KVM_REG_S390_PFCOMPARE:
1818 r = put_user(vcpu->arch.pfault_compare,
1819 (u64 __user *)reg->addr);
1820 break;
1821 case KVM_REG_S390_PFSELECT:
1822 r = put_user(vcpu->arch.pfault_select,
1823 (u64 __user *)reg->addr);
1824 break;
672550fb
CB
1825 case KVM_REG_S390_PP:
1826 r = put_user(vcpu->arch.sie_block->pp,
1827 (u64 __user *)reg->addr);
1828 break;
afa45ff5
CB
1829 case KVM_REG_S390_GBEA:
1830 r = put_user(vcpu->arch.sie_block->gbea,
1831 (u64 __user *)reg->addr);
1832 break;
14eebd91
CO
1833 default:
1834 break;
1835 }
1836
1837 return r;
1838}
1839
1840static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1841 struct kvm_one_reg *reg)
1842{
1843 int r = -EINVAL;
4287f247 1844 __u64 val;
14eebd91
CO
1845
1846 switch (reg->id) {
29b7c71b
CO
1847 case KVM_REG_S390_TODPR:
1848 r = get_user(vcpu->arch.sie_block->todpr,
1849 (u32 __user *)reg->addr);
1850 break;
1851 case KVM_REG_S390_EPOCHDIFF:
1852 r = get_user(vcpu->arch.sie_block->epoch,
1853 (u64 __user *)reg->addr);
1854 break;
46a6dd1c 1855 case KVM_REG_S390_CPU_TIMER:
4287f247
DH
1856 r = get_user(val, (u64 __user *)reg->addr);
1857 if (!r)
1858 kvm_s390_set_cpu_timer(vcpu, val);
46a6dd1c
J
1859 break;
1860 case KVM_REG_S390_CLOCK_COMP:
1861 r = get_user(vcpu->arch.sie_block->ckc,
1862 (u64 __user *)reg->addr);
1863 break;
536336c2
DD
1864 case KVM_REG_S390_PFTOKEN:
1865 r = get_user(vcpu->arch.pfault_token,
1866 (u64 __user *)reg->addr);
9fbd8082
DH
1867 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1868 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1869 break;
1870 case KVM_REG_S390_PFCOMPARE:
1871 r = get_user(vcpu->arch.pfault_compare,
1872 (u64 __user *)reg->addr);
1873 break;
1874 case KVM_REG_S390_PFSELECT:
1875 r = get_user(vcpu->arch.pfault_select,
1876 (u64 __user *)reg->addr);
1877 break;
672550fb
CB
1878 case KVM_REG_S390_PP:
1879 r = get_user(vcpu->arch.sie_block->pp,
1880 (u64 __user *)reg->addr);
1881 break;
afa45ff5
CB
1882 case KVM_REG_S390_GBEA:
1883 r = get_user(vcpu->arch.sie_block->gbea,
1884 (u64 __user *)reg->addr);
1885 break;
14eebd91
CO
1886 default:
1887 break;
1888 }
1889
1890 return r;
1891}
b6d33834 1892
b0c632db
HC
1893static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1894{
b0c632db 1895 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1896 return 0;
1897}
1898
1899int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1900{
5a32c1af 1901 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1902 return 0;
1903}
1904
1905int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1906{
5a32c1af 1907 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1908 return 0;
1909}
1910
1911int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1912 struct kvm_sregs *sregs)
1913{
59674c1a 1914 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1915 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1916 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1917 return 0;
1918}
1919
1920int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1921 struct kvm_sregs *sregs)
1922{
59674c1a 1923 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1924 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1925 return 0;
1926}
1927
1928int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1929{
9abc2a08
DH
1930 /* make sure the new values will be lazily loaded */
1931 save_fpu_regs();
4725c860
MS
1932 if (test_fp_ctl(fpu->fpc))
1933 return -EINVAL;
9abc2a08
DH
1934 current->thread.fpu.fpc = fpu->fpc;
1935 if (MACHINE_HAS_VX)
1936 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
1937 else
1938 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
b0c632db
HC
1939 return 0;
1940}
1941
1942int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1943{
9abc2a08
DH
1944 /* make sure we have the latest values */
1945 save_fpu_regs();
1946 if (MACHINE_HAS_VX)
1947 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
1948 else
1949 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
1950 fpu->fpc = current->thread.fpu.fpc;
b0c632db
HC
1951 return 0;
1952}
1953
1954static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1955{
1956 int rc = 0;
1957
7a42fdc2 1958 if (!is_vcpu_stopped(vcpu))
b0c632db 1959 rc = -EBUSY;
d7b0b5eb
CO
1960 else {
1961 vcpu->run->psw_mask = psw.mask;
1962 vcpu->run->psw_addr = psw.addr;
1963 }
b0c632db
HC
1964 return rc;
1965}
1966
1967int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1968 struct kvm_translation *tr)
1969{
1970 return -EINVAL; /* not implemented yet */
1971}
1972
27291e21
DH
1973#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1974 KVM_GUESTDBG_USE_HW_BP | \
1975 KVM_GUESTDBG_ENABLE)
1976
d0bfb940
JK
1977int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1978 struct kvm_guest_debug *dbg)
b0c632db 1979{
27291e21
DH
1980 int rc = 0;
1981
1982 vcpu->guest_debug = 0;
1983 kvm_s390_clear_bp_data(vcpu);
1984
2de3bfc2 1985 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1986 return -EINVAL;
1987
1988 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1989 vcpu->guest_debug = dbg->control;
1990 /* enforce guest PER */
805de8f4 1991 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
1992
1993 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1994 rc = kvm_s390_import_bp_data(vcpu, dbg);
1995 } else {
805de8f4 1996 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
1997 vcpu->arch.guestdbg.last_bp = 0;
1998 }
1999
2000 if (rc) {
2001 vcpu->guest_debug = 0;
2002 kvm_s390_clear_bp_data(vcpu);
805de8f4 2003 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2004 }
2005
2006 return rc;
b0c632db
HC
2007}
2008
62d9f0db
MT
2009int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2010 struct kvm_mp_state *mp_state)
2011{
6352e4d2
DH
2012 /* CHECK_STOP and LOAD are not supported yet */
2013 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2014 KVM_MP_STATE_OPERATING;
62d9f0db
MT
2015}
2016
2017int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2018 struct kvm_mp_state *mp_state)
2019{
6352e4d2
DH
2020 int rc = 0;
2021
2022 /* user space knows about this interface - let it control the state */
2023 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2024
2025 switch (mp_state->mp_state) {
2026 case KVM_MP_STATE_STOPPED:
2027 kvm_s390_vcpu_stop(vcpu);
2028 break;
2029 case KVM_MP_STATE_OPERATING:
2030 kvm_s390_vcpu_start(vcpu);
2031 break;
2032 case KVM_MP_STATE_LOAD:
2033 case KVM_MP_STATE_CHECK_STOP:
2034 /* fall through - CHECK_STOP and LOAD are not supported yet */
2035 default:
2036 rc = -ENXIO;
2037 }
2038
2039 return rc;
62d9f0db
MT
2040}
2041
8ad35755
DH
2042static bool ibs_enabled(struct kvm_vcpu *vcpu)
2043{
2044 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2045}
2046
2c70fe44
CB
2047static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2048{
8ad35755 2049retry:
8e236546 2050 kvm_s390_vcpu_request_handled(vcpu);
586b7ccd
CB
2051 if (!vcpu->requests)
2052 return 0;
2c70fe44
CB
2053 /*
2054 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2055 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2056 * This ensures that the ipte instruction for this request has
2057 * already finished. We might race against a second unmapper that
2058 * wants to set the blocking bit. Lets just retry the request loop.
2059 */
8ad35755 2060 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
2061 int rc;
2062 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 2063 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
2064 PAGE_SIZE * 2);
2065 if (rc)
2066 return rc;
8ad35755 2067 goto retry;
2c70fe44 2068 }
8ad35755 2069
d3d692c8
DH
2070 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2071 vcpu->arch.sie_block->ihcpu = 0xffff;
2072 goto retry;
2073 }
2074
8ad35755
DH
2075 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2076 if (!ibs_enabled(vcpu)) {
2077 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
805de8f4 2078 atomic_or(CPUSTAT_IBS,
8ad35755
DH
2079 &vcpu->arch.sie_block->cpuflags);
2080 }
2081 goto retry;
2c70fe44 2082 }
8ad35755
DH
2083
2084 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2085 if (ibs_enabled(vcpu)) {
2086 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
805de8f4 2087 atomic_andnot(CPUSTAT_IBS,
8ad35755
DH
2088 &vcpu->arch.sie_block->cpuflags);
2089 }
2090 goto retry;
2091 }
2092
0759d068
DH
2093 /* nothing to do, just clear the request */
2094 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2095
2c70fe44
CB
2096 return 0;
2097}
2098
25ed1675
DH
2099void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2100{
2101 struct kvm_vcpu *vcpu;
2102 int i;
2103
2104 mutex_lock(&kvm->lock);
2105 preempt_disable();
2106 kvm->arch.epoch = tod - get_tod_clock();
2107 kvm_s390_vcpu_block_all(kvm);
2108 kvm_for_each_vcpu(i, vcpu, kvm)
2109 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2110 kvm_s390_vcpu_unblock_all(kvm);
2111 preempt_enable();
2112 mutex_unlock(&kvm->lock);
2113}
2114
fa576c58
TH
2115/**
2116 * kvm_arch_fault_in_page - fault-in guest page if necessary
2117 * @vcpu: The corresponding virtual cpu
2118 * @gpa: Guest physical address
2119 * @writable: Whether the page should be writable or not
2120 *
2121 * Make sure that a guest page has been faulted-in on the host.
2122 *
2123 * Return: Zero on success, negative error code otherwise.
2124 */
2125long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 2126{
527e30b4
MS
2127 return gmap_fault(vcpu->arch.gmap, gpa,
2128 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
2129}
2130
3c038e6b
DD
2131static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2132 unsigned long token)
2133{
2134 struct kvm_s390_interrupt inti;
383d0b05 2135 struct kvm_s390_irq irq;
3c038e6b
DD
2136
2137 if (start_token) {
383d0b05
JF
2138 irq.u.ext.ext_params2 = token;
2139 irq.type = KVM_S390_INT_PFAULT_INIT;
2140 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
2141 } else {
2142 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 2143 inti.parm64 = token;
3c038e6b
DD
2144 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2145 }
2146}
2147
2148void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2149 struct kvm_async_pf *work)
2150{
2151 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2152 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2153}
2154
2155void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2156 struct kvm_async_pf *work)
2157{
2158 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2159 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2160}
2161
2162void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2163 struct kvm_async_pf *work)
2164{
2165 /* s390 will always inject the page directly */
2166}
2167
2168bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2169{
2170 /*
2171 * s390 will always inject the page directly,
2172 * but we still want check_async_completion to cleanup
2173 */
2174 return true;
2175}
2176
2177static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2178{
2179 hva_t hva;
2180 struct kvm_arch_async_pf arch;
2181 int rc;
2182
2183 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2184 return 0;
2185 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2186 vcpu->arch.pfault_compare)
2187 return 0;
2188 if (psw_extint_disabled(vcpu))
2189 return 0;
9a022067 2190 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
2191 return 0;
2192 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2193 return 0;
2194 if (!vcpu->arch.gmap->pfault_enabled)
2195 return 0;
2196
81480cc1
HC
2197 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2198 hva += current->thread.gmap_addr & ~PAGE_MASK;
2199 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
2200 return 0;
2201
2202 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2203 return rc;
2204}
2205
3fb4c40f 2206static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 2207{
3fb4c40f 2208 int rc, cpuflags;
e168bf8d 2209
3c038e6b
DD
2210 /*
2211 * On s390 notifications for arriving pages will be delivered directly
2212 * to the guest but the house keeping for completed pfaults is
2213 * handled outside the worker.
2214 */
2215 kvm_check_async_pf_completion(vcpu);
2216
7ec7c8c7
CB
2217 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2218 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
b0c632db
HC
2219
2220 if (need_resched())
2221 schedule();
2222
d3a73acb 2223 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
2224 s390_handle_mcck();
2225
79395031
JF
2226 if (!kvm_is_ucontrol(vcpu->kvm)) {
2227 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2228 if (rc)
2229 return rc;
2230 }
0ff31867 2231
2c70fe44
CB
2232 rc = kvm_s390_handle_requests(vcpu);
2233 if (rc)
2234 return rc;
2235
27291e21
DH
2236 if (guestdbg_enabled(vcpu)) {
2237 kvm_s390_backup_guest_per_regs(vcpu);
2238 kvm_s390_patch_guest_per_regs(vcpu);
2239 }
2240
b0c632db 2241 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
2242 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2243 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2244 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 2245
3fb4c40f
TH
2246 return 0;
2247}
2248
492d8642
TH
2249static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2250{
56317920
DH
2251 struct kvm_s390_pgm_info pgm_info = {
2252 .code = PGM_ADDRESSING,
2253 };
2254 u8 opcode, ilen;
492d8642
TH
2255 int rc;
2256
2257 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2258 trace_kvm_s390_sie_fault(vcpu);
2259
2260 /*
2261 * We want to inject an addressing exception, which is defined as a
2262 * suppressing or terminating exception. However, since we came here
2263 * by a DAT access exception, the PSW still points to the faulting
2264 * instruction since DAT exceptions are nullifying. So we've got
2265 * to look up the current opcode to get the length of the instruction
2266 * to be able to forward the PSW.
2267 */
65977322 2268 rc = read_guest_instr(vcpu, &opcode, 1);
56317920 2269 ilen = insn_length(opcode);
9b0d721a
DH
2270 if (rc < 0) {
2271 return rc;
2272 } else if (rc) {
2273 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2274 * Forward by arbitrary ilc, injection will take care of
2275 * nullification if necessary.
2276 */
2277 pgm_info = vcpu->arch.pgm;
2278 ilen = 4;
2279 }
56317920
DH
2280 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2281 kvm_s390_forward_psw(vcpu, ilen);
2282 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
492d8642
TH
2283}
2284
3fb4c40f
TH
2285static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2286{
2b29a9fd
DD
2287 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2288 vcpu->arch.sie_block->icptcode);
2289 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2290
27291e21
DH
2291 if (guestdbg_enabled(vcpu))
2292 kvm_s390_restore_guest_per_regs(vcpu);
2293
7ec7c8c7
CB
2294 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2295 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
71f116bf
DH
2296
2297 if (vcpu->arch.sie_block->icptcode > 0) {
2298 int rc = kvm_handle_sie_intercept(vcpu);
2299
2300 if (rc != -EOPNOTSUPP)
2301 return rc;
2302 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2303 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2304 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2305 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2306 return -EREMOTE;
2307 } else if (exit_reason != -EFAULT) {
2308 vcpu->stat.exit_null++;
2309 return 0;
210b1607
TH
2310 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2311 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2312 vcpu->run->s390_ucontrol.trans_exc_code =
2313 current->thread.gmap_addr;
2314 vcpu->run->s390_ucontrol.pgm_code = 0x10;
71f116bf 2315 return -EREMOTE;
24eb3a82 2316 } else if (current->thread.gmap_pfault) {
3c038e6b 2317 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 2318 current->thread.gmap_pfault = 0;
71f116bf
DH
2319 if (kvm_arch_setup_async_pf(vcpu))
2320 return 0;
2321 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
a76ccff6 2322 }
71f116bf 2323 return vcpu_post_run_fault_in_sie(vcpu);
3fb4c40f
TH
2324}
2325
2326static int __vcpu_run(struct kvm_vcpu *vcpu)
2327{
2328 int rc, exit_reason;
2329
800c1065
TH
2330 /*
2331 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2332 * ning the guest), so that memslots (and other stuff) are protected
2333 */
2334 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2335
a76ccff6
TH
2336 do {
2337 rc = vcpu_pre_run(vcpu);
2338 if (rc)
2339 break;
3fb4c40f 2340
800c1065 2341 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2342 /*
2343 * As PF_VCPU will be used in fault handler, between
2344 * guest_enter and guest_exit should be no uaccess.
2345 */
0097d12e
CB
2346 local_irq_disable();
2347 __kvm_guest_enter();
db0758b2 2348 __disable_cpu_timer_accounting(vcpu);
0097d12e 2349 local_irq_enable();
a76ccff6
TH
2350 exit_reason = sie64a(vcpu->arch.sie_block,
2351 vcpu->run->s.regs.gprs);
0097d12e 2352 local_irq_disable();
db0758b2 2353 __enable_cpu_timer_accounting(vcpu);
0097d12e
CB
2354 __kvm_guest_exit();
2355 local_irq_enable();
800c1065 2356 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2357
2358 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2359 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2360
800c1065 2361 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2362 return rc;
b0c632db
HC
2363}
2364
b028ee3e
DH
2365static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2366{
2367 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2368 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2369 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2370 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2371 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2372 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2373 /* some control register changes require a tlb flush */
2374 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2375 }
2376 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4287f247 2377 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
b028ee3e
DH
2378 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2379 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2380 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2381 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2382 }
2383 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2384 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2385 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2386 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2387 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2388 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2389 }
2390 kvm_run->kvm_dirty_regs = 0;
2391}
2392
2393static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2394{
2395 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2396 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2397 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2398 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4287f247 2399 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
b028ee3e
DH
2400 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2401 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2402 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2403 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2404 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2405 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2406 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2407}
2408
b0c632db
HC
2409int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2410{
8f2abe6a 2411 int rc;
b0c632db
HC
2412 sigset_t sigsaved;
2413
27291e21
DH
2414 if (guestdbg_exit_pending(vcpu)) {
2415 kvm_s390_prepare_debug_exit(vcpu);
2416 return 0;
2417 }
2418
b0c632db
HC
2419 if (vcpu->sigset_active)
2420 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2421
6352e4d2
DH
2422 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2423 kvm_s390_vcpu_start(vcpu);
2424 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2425 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2426 vcpu->vcpu_id);
2427 return -EINVAL;
2428 }
b0c632db 2429
b028ee3e 2430 sync_regs(vcpu, kvm_run);
db0758b2 2431 enable_cpu_timer_accounting(vcpu);
d7b0b5eb 2432
dab4079d 2433 might_fault();
a76ccff6 2434 rc = __vcpu_run(vcpu);
9ace903d 2435
b1d16c49
CE
2436 if (signal_pending(current) && !rc) {
2437 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2438 rc = -EINTR;
b1d16c49 2439 }
8f2abe6a 2440
27291e21
DH
2441 if (guestdbg_exit_pending(vcpu) && !rc) {
2442 kvm_s390_prepare_debug_exit(vcpu);
2443 rc = 0;
2444 }
2445
8f2abe6a 2446 if (rc == -EREMOTE) {
71f116bf 2447 /* userspace support is needed, kvm_run has been prepared */
8f2abe6a
CB
2448 rc = 0;
2449 }
b0c632db 2450
db0758b2 2451 disable_cpu_timer_accounting(vcpu);
b028ee3e 2452 store_regs(vcpu, kvm_run);
d7b0b5eb 2453
b0c632db
HC
2454 if (vcpu->sigset_active)
2455 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2456
b0c632db 2457 vcpu->stat.exit_userspace++;
7e8e6ab4 2458 return rc;
b0c632db
HC
2459}
2460
b0c632db
HC
2461/*
2462 * store status at address
2463 * we use have two special cases:
2464 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2465 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2466 */
d0bce605 2467int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2468{
092670cd 2469 unsigned char archmode = 1;
9abc2a08 2470 freg_t fprs[NUM_FPRS];
fda902cb 2471 unsigned int px;
4287f247 2472 u64 clkcomp, cputm;
d0bce605 2473 int rc;
b0c632db 2474
d9a3a09a 2475 px = kvm_s390_get_prefix(vcpu);
d0bce605
HC
2476 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2477 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2478 return -EFAULT;
d9a3a09a 2479 gpa = 0;
d0bce605
HC
2480 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2481 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2482 return -EFAULT;
d9a3a09a
MS
2483 gpa = px;
2484 } else
2485 gpa -= __LC_FPREGS_SAVE_AREA;
9abc2a08
DH
2486
2487 /* manually convert vector registers if necessary */
2488 if (MACHINE_HAS_VX) {
2489 convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
2490 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2491 fprs, 128);
2492 } else {
2493 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
6fd8e67d 2494 vcpu->run->s.regs.fprs, 128);
9abc2a08 2495 }
d9a3a09a 2496 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
d0bce605 2497 vcpu->run->s.regs.gprs, 128);
d9a3a09a 2498 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
d0bce605 2499 &vcpu->arch.sie_block->gpsw, 16);
d9a3a09a 2500 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
fda902cb 2501 &px, 4);
d9a3a09a 2502 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
9abc2a08 2503 &vcpu->run->s.regs.fpc, 4);
d9a3a09a 2504 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
d0bce605 2505 &vcpu->arch.sie_block->todpr, 4);
4287f247 2506 cputm = kvm_s390_get_cpu_timer(vcpu);
d9a3a09a 2507 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4287f247 2508 &cputm, 8);
178bd789 2509 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d9a3a09a 2510 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
d0bce605 2511 &clkcomp, 8);
d9a3a09a 2512 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
d0bce605 2513 &vcpu->run->s.regs.acrs, 64);
d9a3a09a 2514 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
d0bce605
HC
2515 &vcpu->arch.sie_block->gcr, 128);
2516 return rc ? -EFAULT : 0;
b0c632db
HC
2517}
2518
e879892c
TH
2519int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2520{
2521 /*
2522 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2523 * copying in vcpu load/put. Lets update our copies before we save
2524 * it into the save area
2525 */
d0164ee2 2526 save_fpu_regs();
9abc2a08 2527 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
e879892c
TH
2528 save_access_regs(vcpu->run->s.regs.acrs);
2529
2530 return kvm_s390_store_status_unloaded(vcpu, addr);
2531}
2532
bc17de7c
EF
2533/*
2534 * store additional status at address
2535 */
2536int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2537 unsigned long gpa)
2538{
2539 /* Only bits 0-53 are used for address formation */
2540 if (!(gpa & ~0x3ff))
2541 return 0;
2542
2543 return write_guest_abs(vcpu, gpa & ~0x3ff,
2544 (void *)&vcpu->run->s.regs.vrs, 512);
2545}
2546
2547int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2548{
2549 if (!test_kvm_facility(vcpu->kvm, 129))
2550 return 0;
2551
2552 /*
2553 * The guest VXRS are in the host VXRs due to the lazy
9977e886
HB
2554 * copying in vcpu load/put. We can simply call save_fpu_regs()
2555 * to save the current register state because we are in the
2556 * middle of a load/put cycle.
2557 *
2558 * Let's update our copies before we save it into the save area.
bc17de7c 2559 */
d0164ee2 2560 save_fpu_regs();
bc17de7c
EF
2561
2562 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2563}
2564
8ad35755
DH
2565static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2566{
2567 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2568 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2569}
2570
2571static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2572{
2573 unsigned int i;
2574 struct kvm_vcpu *vcpu;
2575
2576 kvm_for_each_vcpu(i, vcpu, kvm) {
2577 __disable_ibs_on_vcpu(vcpu);
2578 }
2579}
2580
2581static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2582{
2583 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2584 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2585}
2586
6852d7b6
DH
2587void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2588{
8ad35755
DH
2589 int i, online_vcpus, started_vcpus = 0;
2590
2591 if (!is_vcpu_stopped(vcpu))
2592 return;
2593
6852d7b6 2594 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2595 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2596 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2597 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2598
2599 for (i = 0; i < online_vcpus; i++) {
2600 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2601 started_vcpus++;
2602 }
2603
2604 if (started_vcpus == 0) {
2605 /* we're the only active VCPU -> speed it up */
2606 __enable_ibs_on_vcpu(vcpu);
2607 } else if (started_vcpus == 1) {
2608 /*
2609 * As we are starting a second VCPU, we have to disable
2610 * the IBS facility on all VCPUs to remove potentially
2611 * oustanding ENABLE requests.
2612 */
2613 __disable_ibs_on_all_vcpus(vcpu->kvm);
2614 }
2615
805de8f4 2616 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2617 /*
2618 * Another VCPU might have used IBS while we were offline.
2619 * Let's play safe and flush the VCPU at startup.
2620 */
d3d692c8 2621 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2622 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2623 return;
6852d7b6
DH
2624}
2625
2626void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2627{
8ad35755
DH
2628 int i, online_vcpus, started_vcpus = 0;
2629 struct kvm_vcpu *started_vcpu = NULL;
2630
2631 if (is_vcpu_stopped(vcpu))
2632 return;
2633
6852d7b6 2634 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2635 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2636 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2637 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2638
32f5ff63 2639 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2640 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2641
805de8f4 2642 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2643 __disable_ibs_on_vcpu(vcpu);
2644
2645 for (i = 0; i < online_vcpus; i++) {
2646 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2647 started_vcpus++;
2648 started_vcpu = vcpu->kvm->vcpus[i];
2649 }
2650 }
2651
2652 if (started_vcpus == 1) {
2653 /*
2654 * As we only have one VCPU left, we want to enable the
2655 * IBS facility for that VCPU to speed it up.
2656 */
2657 __enable_ibs_on_vcpu(started_vcpu);
2658 }
2659
433b9ee4 2660 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2661 return;
6852d7b6
DH
2662}
2663
d6712df9
CH
2664static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2665 struct kvm_enable_cap *cap)
2666{
2667 int r;
2668
2669 if (cap->flags)
2670 return -EINVAL;
2671
2672 switch (cap->cap) {
fa6b7fe9
CH
2673 case KVM_CAP_S390_CSS_SUPPORT:
2674 if (!vcpu->kvm->arch.css_support) {
2675 vcpu->kvm->arch.css_support = 1;
c92ea7b9 2676 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
2677 trace_kvm_s390_enable_css(vcpu->kvm);
2678 }
2679 r = 0;
2680 break;
d6712df9
CH
2681 default:
2682 r = -EINVAL;
2683 break;
2684 }
2685 return r;
2686}
2687
41408c28
TH
2688static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2689 struct kvm_s390_mem_op *mop)
2690{
2691 void __user *uaddr = (void __user *)mop->buf;
2692 void *tmpbuf = NULL;
2693 int r, srcu_idx;
2694 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2695 | KVM_S390_MEMOP_F_CHECK_ONLY;
2696
2697 if (mop->flags & ~supported_flags)
2698 return -EINVAL;
2699
2700 if (mop->size > MEM_OP_MAX_SIZE)
2701 return -E2BIG;
2702
2703 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2704 tmpbuf = vmalloc(mop->size);
2705 if (!tmpbuf)
2706 return -ENOMEM;
2707 }
2708
2709 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2710
2711 switch (mop->op) {
2712 case KVM_S390_MEMOP_LOGICAL_READ:
2713 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2714 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2715 mop->size, GACC_FETCH);
41408c28
TH
2716 break;
2717 }
2718 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2719 if (r == 0) {
2720 if (copy_to_user(uaddr, tmpbuf, mop->size))
2721 r = -EFAULT;
2722 }
2723 break;
2724 case KVM_S390_MEMOP_LOGICAL_WRITE:
2725 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2726 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2727 mop->size, GACC_STORE);
41408c28
TH
2728 break;
2729 }
2730 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2731 r = -EFAULT;
2732 break;
2733 }
2734 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2735 break;
2736 default:
2737 r = -EINVAL;
2738 }
2739
2740 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2741
2742 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2743 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2744
2745 vfree(tmpbuf);
2746 return r;
2747}
2748
b0c632db
HC
2749long kvm_arch_vcpu_ioctl(struct file *filp,
2750 unsigned int ioctl, unsigned long arg)
2751{
2752 struct kvm_vcpu *vcpu = filp->private_data;
2753 void __user *argp = (void __user *)arg;
800c1065 2754 int idx;
bc923cc9 2755 long r;
b0c632db 2756
93736624 2757 switch (ioctl) {
47b43c52
JF
2758 case KVM_S390_IRQ: {
2759 struct kvm_s390_irq s390irq;
2760
2761 r = -EFAULT;
2762 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2763 break;
2764 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2765 break;
2766 }
93736624 2767 case KVM_S390_INTERRUPT: {
ba5c1e9b 2768 struct kvm_s390_interrupt s390int;
383d0b05 2769 struct kvm_s390_irq s390irq;
ba5c1e9b 2770
93736624 2771 r = -EFAULT;
ba5c1e9b 2772 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2773 break;
383d0b05
JF
2774 if (s390int_to_s390irq(&s390int, &s390irq))
2775 return -EINVAL;
2776 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2777 break;
ba5c1e9b 2778 }
b0c632db 2779 case KVM_S390_STORE_STATUS:
800c1065 2780 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2781 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2782 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2783 break;
b0c632db
HC
2784 case KVM_S390_SET_INITIAL_PSW: {
2785 psw_t psw;
2786
bc923cc9 2787 r = -EFAULT;
b0c632db 2788 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2789 break;
2790 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2791 break;
b0c632db
HC
2792 }
2793 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2794 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2795 break;
14eebd91
CO
2796 case KVM_SET_ONE_REG:
2797 case KVM_GET_ONE_REG: {
2798 struct kvm_one_reg reg;
2799 r = -EFAULT;
2800 if (copy_from_user(&reg, argp, sizeof(reg)))
2801 break;
2802 if (ioctl == KVM_SET_ONE_REG)
2803 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2804 else
2805 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2806 break;
2807 }
27e0393f
CO
2808#ifdef CONFIG_KVM_S390_UCONTROL
2809 case KVM_S390_UCAS_MAP: {
2810 struct kvm_s390_ucas_mapping ucasmap;
2811
2812 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2813 r = -EFAULT;
2814 break;
2815 }
2816
2817 if (!kvm_is_ucontrol(vcpu->kvm)) {
2818 r = -EINVAL;
2819 break;
2820 }
2821
2822 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2823 ucasmap.vcpu_addr, ucasmap.length);
2824 break;
2825 }
2826 case KVM_S390_UCAS_UNMAP: {
2827 struct kvm_s390_ucas_mapping ucasmap;
2828
2829 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2830 r = -EFAULT;
2831 break;
2832 }
2833
2834 if (!kvm_is_ucontrol(vcpu->kvm)) {
2835 r = -EINVAL;
2836 break;
2837 }
2838
2839 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2840 ucasmap.length);
2841 break;
2842 }
2843#endif
ccc7910f 2844 case KVM_S390_VCPU_FAULT: {
527e30b4 2845 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2846 break;
2847 }
d6712df9
CH
2848 case KVM_ENABLE_CAP:
2849 {
2850 struct kvm_enable_cap cap;
2851 r = -EFAULT;
2852 if (copy_from_user(&cap, argp, sizeof(cap)))
2853 break;
2854 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2855 break;
2856 }
41408c28
TH
2857 case KVM_S390_MEM_OP: {
2858 struct kvm_s390_mem_op mem_op;
2859
2860 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2861 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2862 else
2863 r = -EFAULT;
2864 break;
2865 }
816c7667
JF
2866 case KVM_S390_SET_IRQ_STATE: {
2867 struct kvm_s390_irq_state irq_state;
2868
2869 r = -EFAULT;
2870 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2871 break;
2872 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2873 irq_state.len == 0 ||
2874 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2875 r = -EINVAL;
2876 break;
2877 }
2878 r = kvm_s390_set_irq_state(vcpu,
2879 (void __user *) irq_state.buf,
2880 irq_state.len);
2881 break;
2882 }
2883 case KVM_S390_GET_IRQ_STATE: {
2884 struct kvm_s390_irq_state irq_state;
2885
2886 r = -EFAULT;
2887 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2888 break;
2889 if (irq_state.len == 0) {
2890 r = -EINVAL;
2891 break;
2892 }
2893 r = kvm_s390_get_irq_state(vcpu,
2894 (__u8 __user *) irq_state.buf,
2895 irq_state.len);
2896 break;
2897 }
b0c632db 2898 default:
3e6afcf1 2899 r = -ENOTTY;
b0c632db 2900 }
bc923cc9 2901 return r;
b0c632db
HC
2902}
2903
5b1c1493
CO
2904int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2905{
2906#ifdef CONFIG_KVM_S390_UCONTROL
2907 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2908 && (kvm_is_ucontrol(vcpu->kvm))) {
2909 vmf->page = virt_to_page(vcpu->arch.sie_block);
2910 get_page(vmf->page);
2911 return 0;
2912 }
2913#endif
2914 return VM_FAULT_SIGBUS;
2915}
2916
5587027c
AK
2917int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2918 unsigned long npages)
db3fe4eb
TY
2919{
2920 return 0;
2921}
2922
b0c632db 2923/* Section: memory related */
f7784b8e
MT
2924int kvm_arch_prepare_memory_region(struct kvm *kvm,
2925 struct kvm_memory_slot *memslot,
09170a49 2926 const struct kvm_userspace_memory_region *mem,
7b6195a9 2927 enum kvm_mr_change change)
b0c632db 2928{
dd2887e7
NW
2929 /* A few sanity checks. We can have memory slots which have to be
2930 located/ended at a segment boundary (1MB). The memory in userland is
2931 ok to be fragmented into various different vmas. It is okay to mmap()
2932 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2933
598841ca 2934 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2935 return -EINVAL;
2936
598841ca 2937 if (mem->memory_size & 0xffffful)
b0c632db
HC
2938 return -EINVAL;
2939
a3a92c31
DD
2940 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
2941 return -EINVAL;
2942
f7784b8e
MT
2943 return 0;
2944}
2945
2946void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 2947 const struct kvm_userspace_memory_region *mem,
8482644a 2948 const struct kvm_memory_slot *old,
f36f3f28 2949 const struct kvm_memory_slot *new,
8482644a 2950 enum kvm_mr_change change)
f7784b8e 2951{
f7850c92 2952 int rc;
f7784b8e 2953
2cef4deb
CB
2954 /* If the basics of the memslot do not change, we do not want
2955 * to update the gmap. Every update causes several unnecessary
2956 * segment translation exceptions. This is usually handled just
2957 * fine by the normal fault handler + gmap, but it will also
2958 * cause faults on the prefix page of running guest CPUs.
2959 */
2960 if (old->userspace_addr == mem->userspace_addr &&
2961 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2962 old->npages * PAGE_SIZE == mem->memory_size)
2963 return;
598841ca
CO
2964
2965 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2966 mem->guest_phys_addr, mem->memory_size);
2967 if (rc)
ea2cdd27 2968 pr_warn("failed to commit memory region\n");
598841ca 2969 return;
b0c632db
HC
2970}
2971
b0c632db
HC
2972static int __init kvm_s390_init(void)
2973{
07197fd0
DH
2974 if (!sclp.has_sief2) {
2975 pr_info("SIE not available\n");
2976 return -ENODEV;
2977 }
2978
9d8d5786 2979 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
2980}
2981
2982static void __exit kvm_s390_exit(void)
2983{
2984 kvm_exit();
2985}
2986
2987module_init(kvm_s390_init);
2988module_exit(kvm_s390_exit);
566af940
CH
2989
2990/*
2991 * Enable autoloading of the kvm module.
2992 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2993 * since x86 takes a different approach.
2994 */
2995#include <linux/miscdevice.h>
2996MODULE_ALIAS_MISCDEV(KVM_MINOR);
2997MODULE_ALIAS("devname:kvm");
This page took 0.658803 seconds and 5 git commands to generate.