KVM: s390: Enable up to 248 VCPUs per VM
[deliverable/linux.git] / arch / s390 / kvm / interrupt.c
CommitLineData
ba5c1e9b 1/*
a53c8fab 2 * handling kvm guest interrupts
ba5c1e9b 3 *
33b412ac 4 * Copyright IBM Corp. 2008, 2015
ba5c1e9b
CO
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
ca872302 13#include <linux/interrupt.h>
ba5c1e9b 14#include <linux/kvm_host.h>
cbb870c8 15#include <linux/hrtimer.h>
84223598 16#include <linux/mmu_context.h>
3cd61299 17#include <linux/signal.h>
5a0e3ad6 18#include <linux/slab.h>
383d0b05 19#include <linux/bitmap.h>
94aa033e 20#include <linux/vmalloc.h>
cbb870c8 21#include <asm/asm-offsets.h>
33b412ac 22#include <asm/dis.h>
cbb870c8 23#include <asm/uaccess.h>
ea5f4969 24#include <asm/sclp.h>
6d3da241 25#include <asm/isc.h>
ba5c1e9b
CO
26#include "kvm-s390.h"
27#include "gaccess.h"
ade38c31 28#include "trace-s390.h"
ba5c1e9b 29
d8346b7d
CH
30#define IOINT_SCHID_MASK 0x0000ffff
31#define IOINT_SSID_MASK 0x00030000
32#define IOINT_CSSID_MASK 0x03fc0000
44c6ca3d 33#define PFAULT_INIT 0x0600
60f90a14
JF
34#define PFAULT_DONE 0x0680
35#define VIRTIO_PARAM 0x0d00
d8346b7d 36
a5bd7647
ED
37/* handle external calls via sigp interpretation facility */
38static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
39{
7d43bafc
ED
40 int c, scn;
41
5e044315 42 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
43 if (vcpu->kvm->arch.use_esca) {
44 struct esca_block *sca = vcpu->kvm->arch.sca;
45 union esca_sigp_ctrl sigp_ctrl =
46 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
47
48 c = sigp_ctrl.c;
49 scn = sigp_ctrl.scn;
50 } else {
51 struct bsca_block *sca = vcpu->kvm->arch.sca;
52 union bsca_sigp_ctrl sigp_ctrl =
53 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
54
55 c = sigp_ctrl.c;
56 scn = sigp_ctrl.scn;
57 }
5e044315 58 read_unlock(&vcpu->kvm->arch.sca_lock);
a5bd7647
ED
59
60 if (src_id)
7d43bafc 61 *src_id = scn;
a5bd7647 62
7d43bafc 63 return c && atomic_read(&vcpu->arch.sie_block->cpuflags) &
a5bd7647
ED
64 CPUSTAT_ECALL_PEND;
65}
66
67static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
68{
bc784cce 69 int expect, rc;
a5bd7647 70
5e044315 71 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
72 if (vcpu->kvm->arch.use_esca) {
73 struct esca_block *sca = vcpu->kvm->arch.sca;
74 union esca_sigp_ctrl *sigp_ctrl =
75 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
76 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
77
78 new_val.scn = src_id;
79 new_val.c = 1;
80 old_val.c = 0;
81
82 expect = old_val.value;
83 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
84 } else {
85 struct bsca_block *sca = vcpu->kvm->arch.sca;
86 union bsca_sigp_ctrl *sigp_ctrl =
87 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
88 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
bc784cce 89
7d43bafc
ED
90 new_val.scn = src_id;
91 new_val.c = 1;
92 old_val.c = 0;
93
94 expect = old_val.value;
95 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
96 }
5e044315 97 read_unlock(&vcpu->kvm->arch.sca_lock);
bc784cce
ED
98
99 if (rc != expect) {
a5bd7647
ED
100 /* another external call is pending */
101 return -EBUSY;
102 }
103 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
104 return 0;
105}
106
107static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
108{
a5bd7647 109 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
7d43bafc 110 int rc, expect;
a5bd7647
ED
111
112 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
5e044315 113 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
114 if (vcpu->kvm->arch.use_esca) {
115 struct esca_block *sca = vcpu->kvm->arch.sca;
116 union esca_sigp_ctrl *sigp_ctrl =
117 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
118 union esca_sigp_ctrl old = *sigp_ctrl;
119
120 expect = old.value;
121 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
122 } else {
123 struct bsca_block *sca = vcpu->kvm->arch.sca;
124 union bsca_sigp_ctrl *sigp_ctrl =
125 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
126 union bsca_sigp_ctrl old = *sigp_ctrl;
127
128 expect = old.value;
129 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
130 }
5e044315 131 read_unlock(&vcpu->kvm->arch.sca_lock);
7d43bafc 132 WARN_ON(rc != expect); /* cannot clear? */
a5bd7647
ED
133}
134
3c038e6b 135int psw_extint_disabled(struct kvm_vcpu *vcpu)
ba5c1e9b
CO
136{
137 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
138}
139
d8346b7d
CH
140static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
141{
142 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
143}
144
48a3e950
CH
145static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
146{
147 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
148}
149
ba5c1e9b
CO
150static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
151{
fee0e0fd
DH
152 return psw_extint_disabled(vcpu) &&
153 psw_ioint_disabled(vcpu) &&
154 psw_mchk_disabled(vcpu);
ba5c1e9b
CO
155}
156
bb78c5ec
DH
157static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
158{
159 if (psw_extint_disabled(vcpu) ||
160 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
161 return 0;
f71d0dc5
DH
162 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
163 /* No timer interrupts when single stepping */
164 return 0;
bb78c5ec
DH
165 return 1;
166}
167
b4aec925
DH
168static int ckc_irq_pending(struct kvm_vcpu *vcpu)
169{
60417fcc 170 if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
b4aec925
DH
171 return 0;
172 return ckc_interrupts_enabled(vcpu);
173}
174
175static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
176{
177 return !psw_extint_disabled(vcpu) &&
178 (vcpu->arch.sie_block->gcr[0] & 0x400ul);
179}
180
181static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
182{
183 return (vcpu->arch.sie_block->cputm >> 63) &&
184 cpu_timer_interrupts_enabled(vcpu);
185}
186
6d3da241 187static inline int is_ioirq(unsigned long irq_type)
79fd50c6 188{
6d3da241
JF
189 return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
190 (irq_type <= IRQ_PEND_IO_ISC_7));
191}
79fd50c6 192
6d3da241
JF
193static uint64_t isc_to_isc_bits(int isc)
194{
79fd50c6
CH
195 return (0x80 >> isc) << 24;
196}
197
6d3da241 198static inline u8 int_word_to_isc(u32 int_word)
ba5c1e9b 199{
6d3da241
JF
200 return (int_word & 0x38000000) >> 27;
201}
202
5f94c58e 203static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
6d3da241 204{
5f94c58e
DH
205 return vcpu->kvm->arch.float_int.pending_irqs |
206 vcpu->arch.local_int.pending_irqs;
383d0b05
JF
207}
208
6d3da241
JF
209static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
210 unsigned long active_mask)
211{
212 int i;
213
214 for (i = 0; i <= MAX_ISC; i++)
215 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
216 active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
217
218 return active_mask;
219}
220
221static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
383d0b05 222{
6d3da241
JF
223 unsigned long active_mask;
224
5f94c58e 225 active_mask = pending_irqs(vcpu);
ffeca0ae
JF
226 if (!active_mask)
227 return 0;
383d0b05
JF
228
229 if (psw_extint_disabled(vcpu))
230 active_mask &= ~IRQ_PEND_EXT_MASK;
6d3da241
JF
231 if (psw_ioint_disabled(vcpu))
232 active_mask &= ~IRQ_PEND_IO_MASK;
233 else
234 active_mask = disable_iscs(vcpu, active_mask);
383d0b05
JF
235 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
236 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
237 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
238 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
239 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
240 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
241 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
242 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
6d3da241
JF
243 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
244 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
383d0b05
JF
245 if (psw_mchk_disabled(vcpu))
246 active_mask &= ~IRQ_PEND_MCHK_MASK;
6d3da241
JF
247 if (!(vcpu->arch.sie_block->gcr[14] &
248 vcpu->kvm->arch.float_int.mchk.cr14))
249 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
383d0b05 250
6cddd432
DH
251 /*
252 * STOP irqs will never be actively delivered. They are triggered via
253 * intercept requests and cleared when the stop intercept is performed.
254 */
255 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
256
383d0b05
JF
257 return active_mask;
258}
259
ba5c1e9b
CO
260static void __set_cpu_idle(struct kvm_vcpu *vcpu)
261{
805de8f4 262 atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
ba5c1e9b
CO
263 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
264}
265
266static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
267{
805de8f4 268 atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
ba5c1e9b
CO
269 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
270}
271
272static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
273{
805de8f4
PZ
274 atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
275 &vcpu->arch.sie_block->cpuflags);
ba5c1e9b 276 vcpu->arch.sie_block->lctl = 0x0000;
27291e21
DH
277 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
278
279 if (guestdbg_enabled(vcpu)) {
280 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
281 LCTL_CR10 | LCTL_CR11);
282 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
283 }
ba5c1e9b
CO
284}
285
286static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
287{
805de8f4 288 atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
ba5c1e9b
CO
289}
290
6d3da241
JF
291static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
292{
5f94c58e 293 if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
6d3da241
JF
294 return;
295 else if (psw_ioint_disabled(vcpu))
296 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
297 else
298 vcpu->arch.sie_block->lctl |= LCTL_CR6;
299}
300
383d0b05
JF
301static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
302{
5f94c58e 303 if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
383d0b05
JF
304 return;
305 if (psw_extint_disabled(vcpu))
306 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
307 else
308 vcpu->arch.sie_block->lctl |= LCTL_CR0;
309}
310
311static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
312{
5f94c58e 313 if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
383d0b05
JF
314 return;
315 if (psw_mchk_disabled(vcpu))
316 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
317 else
318 vcpu->arch.sie_block->lctl |= LCTL_CR14;
319}
320
6cddd432
DH
321static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
322{
323 if (kvm_s390_is_stop_irq_pending(vcpu))
324 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
325}
326
6d3da241
JF
327/* Set interception request for non-deliverable interrupts */
328static void set_intercept_indicators(struct kvm_vcpu *vcpu)
383d0b05 329{
6d3da241 330 set_intercept_indicators_io(vcpu);
383d0b05
JF
331 set_intercept_indicators_ext(vcpu);
332 set_intercept_indicators_mchk(vcpu);
6cddd432 333 set_intercept_indicators_stop(vcpu);
383d0b05
JF
334}
335
8a2ef71b
JF
336static u16 get_ilc(struct kvm_vcpu *vcpu)
337{
8a2ef71b
JF
338 switch (vcpu->arch.sie_block->icptcode) {
339 case ICPT_INST:
340 case ICPT_INSTPROGI:
341 case ICPT_OPEREXC:
342 case ICPT_PARTEXEC:
343 case ICPT_IOINST:
344 /* last instruction only stored for these icptcodes */
33b412ac 345 return insn_length(vcpu->arch.sie_block->ipa >> 8);
8a2ef71b
JF
346 case ICPT_PROGI:
347 return vcpu->arch.sie_block->pgmilc;
348 default:
349 return 0;
350 }
351}
352
0fb97abe
JF
353static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
354{
383d0b05 355 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
356 int rc;
357
358 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
359 0, 0);
360
361 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
362 (u16 *)__LC_EXT_INT_CODE);
467fc298 363 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
364 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
365 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
366 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
367 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 368 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
99e20009 369 return rc ? -EFAULT : 0;
0fb97abe
JF
370}
371
372static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
373{
383d0b05 374 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
375 int rc;
376
377 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
378 0, 0);
379
380 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
381 (u16 __user *)__LC_EXT_INT_CODE);
467fc298 382 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
383 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
384 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
385 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
386 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 387 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
99e20009 388 return rc ? -EFAULT : 0;
0fb97abe
JF
389}
390
383d0b05 391static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
0fb97abe 392{
383d0b05
JF
393 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
394 struct kvm_s390_ext_info ext;
0fb97abe
JF
395 int rc;
396
383d0b05
JF
397 spin_lock(&li->lock);
398 ext = li->irq.ext;
399 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
400 li->irq.ext.ext_params2 = 0;
401 spin_unlock(&li->lock);
402
3f24ba15
CB
403 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
404 ext.ext_params2);
0fb97abe
JF
405 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
406 KVM_S390_INT_PFAULT_INIT,
383d0b05 407 0, ext.ext_params2);
0fb97abe
JF
408
409 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
410 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
411 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
412 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
413 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
414 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 415 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
99e20009 416 return rc ? -EFAULT : 0;
0fb97abe
JF
417}
418
383d0b05 419static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
0fb97abe 420{
6d3da241 421 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
383d0b05 422 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
6d3da241 423 struct kvm_s390_mchk_info mchk = {};
bc17de7c 424 unsigned long adtl_status_addr;
6d3da241
JF
425 int deliver = 0;
426 int rc = 0;
0fb97abe 427
6d3da241 428 spin_lock(&fi->lock);
383d0b05 429 spin_lock(&li->lock);
6d3da241
JF
430 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
431 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
432 /*
433 * If there was an exigent machine check pending, then any
434 * repressible machine checks that might have been pending
435 * are indicated along with it, so always clear bits for
436 * repressible and exigent interrupts
437 */
438 mchk = li->irq.mchk;
439 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
440 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
441 memset(&li->irq.mchk, 0, sizeof(mchk));
442 deliver = 1;
443 }
383d0b05 444 /*
6d3da241
JF
445 * We indicate floating repressible conditions along with
446 * other pending conditions. Channel Report Pending and Channel
447 * Subsystem damage are the only two and and are indicated by
448 * bits in mcic and masked in cr14.
383d0b05 449 */
6d3da241
JF
450 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
451 mchk.mcic |= fi->mchk.mcic;
452 mchk.cr14 |= fi->mchk.cr14;
453 memset(&fi->mchk, 0, sizeof(mchk));
454 deliver = 1;
455 }
383d0b05 456 spin_unlock(&li->lock);
6d3da241 457 spin_unlock(&fi->lock);
383d0b05 458
6d3da241 459 if (deliver) {
3f24ba15 460 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
6d3da241
JF
461 mchk.mcic);
462 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
463 KVM_S390_MCHK,
464 mchk.cr14, mchk.mcic);
465
466 rc = kvm_s390_vcpu_store_status(vcpu,
467 KVM_S390_STORE_STATUS_PREFIXED);
468 rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
469 &adtl_status_addr,
470 sizeof(unsigned long));
471 rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
472 adtl_status_addr);
473 rc |= put_guest_lc(vcpu, mchk.mcic,
474 (u64 __user *) __LC_MCCK_CODE);
475 rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
476 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
477 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
478 &mchk.fixed_logout,
479 sizeof(mchk.fixed_logout));
480 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
481 &vcpu->arch.sie_block->gpsw,
482 sizeof(psw_t));
483 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
484 &vcpu->arch.sie_block->gpsw,
485 sizeof(psw_t));
486 }
99e20009 487 return rc ? -EFAULT : 0;
0fb97abe
JF
488}
489
490static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
491{
383d0b05 492 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
493 int rc;
494
3f24ba15 495 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
0fb97abe
JF
496 vcpu->stat.deliver_restart_signal++;
497 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
498
499 rc = write_guest_lc(vcpu,
500 offsetof(struct _lowcore, restart_old_psw),
501 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
502 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
503 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 504 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
99e20009 505 return rc ? -EFAULT : 0;
0fb97abe
JF
506}
507
383d0b05 508static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
0fb97abe 509{
383d0b05
JF
510 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
511 struct kvm_s390_prefix_info prefix;
512
513 spin_lock(&li->lock);
514 prefix = li->irq.prefix;
515 li->irq.prefix.address = 0;
516 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
517 spin_unlock(&li->lock);
0fb97abe 518
0fb97abe
JF
519 vcpu->stat.deliver_prefix_signal++;
520 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
521 KVM_S390_SIGP_SET_PREFIX,
383d0b05 522 prefix.address, 0);
0fb97abe 523
383d0b05 524 kvm_s390_set_prefix(vcpu, prefix.address);
0fb97abe
JF
525 return 0;
526}
527
383d0b05 528static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
0fb97abe 529{
383d0b05 530 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe 531 int rc;
383d0b05
JF
532 int cpu_addr;
533
534 spin_lock(&li->lock);
535 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
536 clear_bit(cpu_addr, li->sigp_emerg_pending);
537 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
538 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
539 spin_unlock(&li->lock);
0fb97abe 540
3f24ba15 541 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
0fb97abe 542 vcpu->stat.deliver_emergency_signal++;
383d0b05
JF
543 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
544 cpu_addr, 0);
0fb97abe
JF
545
546 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
547 (u16 *)__LC_EXT_INT_CODE);
383d0b05 548 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
549 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
550 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
551 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
552 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 553 return rc ? -EFAULT : 0;
0fb97abe
JF
554}
555
383d0b05 556static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
0fb97abe 557{
383d0b05
JF
558 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
559 struct kvm_s390_extcall_info extcall;
0fb97abe
JF
560 int rc;
561
383d0b05
JF
562 spin_lock(&li->lock);
563 extcall = li->irq.extcall;
564 li->irq.extcall.code = 0;
565 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
566 spin_unlock(&li->lock);
567
3f24ba15 568 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
0fb97abe
JF
569 vcpu->stat.deliver_external_call++;
570 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
571 KVM_S390_INT_EXTERNAL_CALL,
383d0b05 572 extcall.code, 0);
0fb97abe
JF
573
574 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
575 (u16 *)__LC_EXT_INT_CODE);
383d0b05 576 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
577 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
578 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
579 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
580 sizeof(psw_t));
99e20009 581 return rc ? -EFAULT : 0;
0fb97abe
JF
582}
583
383d0b05 584static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
8712836b 585{
383d0b05
JF
586 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
587 struct kvm_s390_pgm_info pgm_info;
a9a846fd 588 int rc = 0, nullifying = false;
8a2ef71b 589 u16 ilc = get_ilc(vcpu);
8712836b 590
383d0b05
JF
591 spin_lock(&li->lock);
592 pgm_info = li->irq.pgm;
593 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
594 memset(&li->irq.pgm, 0, sizeof(pgm_info));
595 spin_unlock(&li->lock);
596
3f24ba15 597 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d",
383d0b05 598 pgm_info.code, ilc);
0fb97abe
JF
599 vcpu->stat.deliver_program_int++;
600 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
383d0b05 601 pgm_info.code, 0);
0fb97abe 602
383d0b05 603 switch (pgm_info.code & ~PGM_PER) {
8712836b
DH
604 case PGM_AFX_TRANSLATION:
605 case PGM_ASX_TRANSLATION:
606 case PGM_EX_TRANSLATION:
607 case PGM_LFX_TRANSLATION:
608 case PGM_LSTE_SEQUENCE:
609 case PGM_LSX_TRANSLATION:
610 case PGM_LX_TRANSLATION:
611 case PGM_PRIMARY_AUTHORITY:
612 case PGM_SECONDARY_AUTHORITY:
a9a846fd
TH
613 nullifying = true;
614 /* fall through */
8712836b 615 case PGM_SPACE_SWITCH:
383d0b05 616 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b
DH
617 (u64 *)__LC_TRANS_EXC_CODE);
618 break;
619 case PGM_ALEN_TRANSLATION:
620 case PGM_ALE_SEQUENCE:
621 case PGM_ASTE_INSTANCE:
622 case PGM_ASTE_SEQUENCE:
623 case PGM_ASTE_VALIDITY:
624 case PGM_EXTENDED_AUTHORITY:
383d0b05 625 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 626 (u8 *)__LC_EXC_ACCESS_ID);
a9a846fd 627 nullifying = true;
8712836b
DH
628 break;
629 case PGM_ASCE_TYPE:
630 case PGM_PAGE_TRANSLATION:
631 case PGM_REGION_FIRST_TRANS:
632 case PGM_REGION_SECOND_TRANS:
633 case PGM_REGION_THIRD_TRANS:
634 case PGM_SEGMENT_TRANSLATION:
383d0b05 635 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 636 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 637 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 638 (u8 *)__LC_EXC_ACCESS_ID);
383d0b05 639 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
8712836b 640 (u8 *)__LC_OP_ACCESS_ID);
a9a846fd 641 nullifying = true;
8712836b
DH
642 break;
643 case PGM_MONITOR:
383d0b05 644 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
a36c5393 645 (u16 *)__LC_MON_CLASS_NR);
383d0b05 646 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
8712836b
DH
647 (u64 *)__LC_MON_CODE);
648 break;
403c8648 649 case PGM_VECTOR_PROCESSING:
8712836b 650 case PGM_DATA:
383d0b05 651 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
8712836b
DH
652 (u32 *)__LC_DATA_EXC_CODE);
653 break;
654 case PGM_PROTECTION:
383d0b05 655 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 656 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 657 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b
DH
658 (u8 *)__LC_EXC_ACCESS_ID);
659 break;
a9a846fd
TH
660 case PGM_STACK_FULL:
661 case PGM_STACK_EMPTY:
662 case PGM_STACK_SPECIFICATION:
663 case PGM_STACK_TYPE:
664 case PGM_STACK_OPERATION:
665 case PGM_TRACE_TABEL:
666 case PGM_CRYPTO_OPERATION:
667 nullifying = true;
668 break;
8712836b
DH
669 }
670
383d0b05
JF
671 if (pgm_info.code & PGM_PER) {
672 rc |= put_guest_lc(vcpu, pgm_info.per_code,
8712836b 673 (u8 *) __LC_PER_CODE);
383d0b05 674 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
8712836b 675 (u8 *)__LC_PER_ATMID);
383d0b05 676 rc |= put_guest_lc(vcpu, pgm_info.per_address,
8712836b 677 (u64 *) __LC_PER_ADDRESS);
383d0b05 678 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
8712836b
DH
679 (u8 *) __LC_PER_ACCESS_ID);
680 }
681
a9a846fd
TH
682 if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
683 kvm_s390_rewind_psw(vcpu, ilc);
684
8a2ef71b 685 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
2ba45968
DH
686 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
687 (u64 *) __LC_LAST_BREAK);
383d0b05 688 rc |= put_guest_lc(vcpu, pgm_info.code,
8712836b
DH
689 (u16 *)__LC_PGM_INT_CODE);
690 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
691 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
692 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
693 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 694 return rc ? -EFAULT : 0;
0fb97abe
JF
695}
696
6d3da241 697static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
0fb97abe 698{
6d3da241
JF
699 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
700 struct kvm_s390_ext_info ext;
701 int rc = 0;
702
703 spin_lock(&fi->lock);
704 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
705 spin_unlock(&fi->lock);
706 return 0;
707 }
708 ext = fi->srv_signal;
709 memset(&fi->srv_signal, 0, sizeof(ext));
710 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
711 spin_unlock(&fi->lock);
0fb97abe 712
3f24ba15 713 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
6d3da241 714 ext.ext_params);
0fb97abe 715 vcpu->stat.deliver_service_signal++;
6d3da241
JF
716 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
717 ext.ext_params, 0);
0fb97abe
JF
718
719 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
467fc298 720 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
721 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
722 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
723 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
724 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
6d3da241 725 rc |= put_guest_lc(vcpu, ext.ext_params,
0fb97abe 726 (u32 *)__LC_EXT_PARAMS);
6d3da241 727
99e20009 728 return rc ? -EFAULT : 0;
0fb97abe
JF
729}
730
6d3da241 731static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
0fb97abe 732{
6d3da241
JF
733 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
734 struct kvm_s390_interrupt_info *inti;
735 int rc = 0;
0fb97abe 736
6d3da241
JF
737 spin_lock(&fi->lock);
738 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
739 struct kvm_s390_interrupt_info,
740 list);
741 if (inti) {
6d3da241
JF
742 list_del(&inti->list);
743 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
744 }
745 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
746 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
747 spin_unlock(&fi->lock);
8712836b 748
6d3da241 749 if (inti) {
3f24ba15
CB
750 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
751 KVM_S390_INT_PFAULT_DONE, 0,
752 inti->ext.ext_params2);
753 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
754 inti->ext.ext_params2);
755
6d3da241
JF
756 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
757 (u16 *)__LC_EXT_INT_CODE);
758 rc |= put_guest_lc(vcpu, PFAULT_DONE,
759 (u16 *)__LC_EXT_CPU_ADDR);
760 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
761 &vcpu->arch.sie_block->gpsw,
762 sizeof(psw_t));
763 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
764 &vcpu->arch.sie_block->gpsw,
765 sizeof(psw_t));
766 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
767 (u64 *)__LC_EXT_PARAMS2);
768 kfree(inti);
769 }
99e20009 770 return rc ? -EFAULT : 0;
0fb97abe
JF
771}
772
6d3da241 773static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
0fb97abe 774{
6d3da241
JF
775 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
776 struct kvm_s390_interrupt_info *inti;
777 int rc = 0;
0fb97abe 778
6d3da241
JF
779 spin_lock(&fi->lock);
780 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
781 struct kvm_s390_interrupt_info,
782 list);
783 if (inti) {
784 VCPU_EVENT(vcpu, 4,
3f24ba15 785 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
6d3da241
JF
786 inti->ext.ext_params, inti->ext.ext_params2);
787 vcpu->stat.deliver_virtio_interrupt++;
788 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
789 inti->type,
790 inti->ext.ext_params,
791 inti->ext.ext_params2);
792 list_del(&inti->list);
793 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
794 }
795 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
796 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
797 spin_unlock(&fi->lock);
0fb97abe 798
6d3da241
JF
799 if (inti) {
800 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
801 (u16 *)__LC_EXT_INT_CODE);
802 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
803 (u16 *)__LC_EXT_CPU_ADDR);
804 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
805 &vcpu->arch.sie_block->gpsw,
806 sizeof(psw_t));
807 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
808 &vcpu->arch.sie_block->gpsw,
809 sizeof(psw_t));
810 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
811 (u32 *)__LC_EXT_PARAMS);
812 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
813 (u64 *)__LC_EXT_PARAMS2);
814 kfree(inti);
815 }
99e20009 816 return rc ? -EFAULT : 0;
0fb97abe
JF
817}
818
819static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
6d3da241 820 unsigned long irq_type)
0fb97abe 821{
6d3da241
JF
822 struct list_head *isc_list;
823 struct kvm_s390_float_interrupt *fi;
824 struct kvm_s390_interrupt_info *inti = NULL;
825 int rc = 0;
0fb97abe 826
6d3da241 827 fi = &vcpu->kvm->arch.float_int;
8712836b 828
6d3da241
JF
829 spin_lock(&fi->lock);
830 isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
831 inti = list_first_entry_or_null(isc_list,
832 struct kvm_s390_interrupt_info,
833 list);
834 if (inti) {
3f24ba15 835 VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type);
6d3da241
JF
836 vcpu->stat.deliver_io_int++;
837 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
838 inti->type,
839 ((__u32)inti->io.subchannel_id << 16) |
840 inti->io.subchannel_nr,
841 ((__u64)inti->io.io_int_parm << 32) |
842 inti->io.io_int_word);
843 list_del(&inti->list);
844 fi->counters[FIRQ_CNTR_IO] -= 1;
845 }
846 if (list_empty(isc_list))
847 clear_bit(irq_type, &fi->pending_irqs);
848 spin_unlock(&fi->lock);
849
850 if (inti) {
851 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
852 (u16 *)__LC_SUBCHANNEL_ID);
853 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
854 (u16 *)__LC_SUBCHANNEL_NR);
855 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
856 (u32 *)__LC_IO_INT_PARM);
857 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
858 (u32 *)__LC_IO_INT_WORD);
859 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
860 &vcpu->arch.sie_block->gpsw,
861 sizeof(psw_t));
862 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
863 &vcpu->arch.sie_block->gpsw,
864 sizeof(psw_t));
865 kfree(inti);
866 }
383d0b05 867
99e20009 868 return rc ? -EFAULT : 0;
383d0b05
JF
869}
870
871typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
872
873static const deliver_irq_t deliver_irq_funcs[] = {
874 [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
6d3da241 875 [IRQ_PEND_MCHK_REP] = __deliver_machine_check,
383d0b05
JF
876 [IRQ_PEND_PROG] = __deliver_prog,
877 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
878 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
879 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
880 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
881 [IRQ_PEND_RESTART] = __deliver_restart,
383d0b05
JF
882 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
883 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
6d3da241
JF
884 [IRQ_PEND_EXT_SERVICE] = __deliver_service,
885 [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
886 [IRQ_PEND_VIRTIO] = __deliver_virtio,
383d0b05
JF
887};
888
ea5f4969
DH
889/* Check whether an external call is pending (deliverable or not) */
890int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
4953919f 891{
ea5f4969 892 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
4953919f 893
37c5f6c8 894 if (!sclp.has_sigpif)
ea5f4969 895 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
4953919f 896
a5bd7647 897 return sca_ext_call_pending(vcpu, NULL);
4953919f
DH
898}
899
9a022067 900int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
ba5c1e9b 901{
4d32ad6b
DH
902 if (deliverable_irqs(vcpu))
903 return 1;
ba5c1e9b 904
4d32ad6b
DH
905 if (kvm_cpu_has_pending_timer(vcpu))
906 return 1;
ba5c1e9b 907
ea5f4969 908 /* external call pending and deliverable */
4d32ad6b 909 if (kvm_s390_ext_call_pending(vcpu) &&
ea5f4969
DH
910 !psw_extint_disabled(vcpu) &&
911 (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
4d32ad6b 912 return 1;
4953919f 913
4d32ad6b
DH
914 if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
915 return 1;
916 return 0;
ba5c1e9b
CO
917}
918
3d80840d
MT
919int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
920{
b4aec925 921 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
3d80840d
MT
922}
923
ba5c1e9b
CO
924int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
925{
926 u64 now, sltime;
ba5c1e9b
CO
927
928 vcpu->stat.exit_wait_state++;
ba5c1e9b 929
0759d068 930 /* fast path */
118b862b 931 if (kvm_arch_vcpu_runnable(vcpu))
0759d068 932 return 0;
e52b2af5 933
ba5c1e9b
CO
934 if (psw_interrupts_disabled(vcpu)) {
935 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
b8e660b8 936 return -EOPNOTSUPP; /* disabled wait */
ba5c1e9b
CO
937 }
938
bb78c5ec 939 if (!ckc_interrupts_enabled(vcpu)) {
ba5c1e9b 940 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
bda343ef 941 __set_cpu_idle(vcpu);
ba5c1e9b
CO
942 goto no_timer;
943 }
944
60417fcc 945 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
ed4f2094 946 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
bda343ef
DH
947
948 /* underflow */
949 if (vcpu->arch.sie_block->ckc < now)
950 return 0;
951
952 __set_cpu_idle(vcpu);
ca872302 953 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
3f24ba15 954 VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime);
ba5c1e9b 955no_timer:
800c1065 956 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
0759d068 957 kvm_vcpu_block(vcpu);
ba5c1e9b 958 __unset_cpu_idle(vcpu);
800c1065
TH
959 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
960
2d00f759 961 hrtimer_cancel(&vcpu->arch.ckc_timer);
ba5c1e9b
CO
962 return 0;
963}
964
0e9c85a5
DH
965void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
966{
967 if (waitqueue_active(&vcpu->wq)) {
968 /*
969 * The vcpu gave up the cpu voluntarily, mark it as a good
970 * yield-candidate.
971 */
972 vcpu->preempted = true;
973 wake_up_interruptible(&vcpu->wq);
ce2e4f0b 974 vcpu->stat.halt_wakeup++;
0e9c85a5
DH
975 }
976}
977
ca872302
CB
978enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
979{
980 struct kvm_vcpu *vcpu;
2d00f759 981 u64 now, sltime;
ca872302
CB
982
983 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
60417fcc 984 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
2d00f759 985 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
ca872302 986
2d00f759
DH
987 /*
988 * If the monotonic clock runs faster than the tod clock we might be
989 * woken up too early and have to go back to sleep to avoid deadlocks.
990 */
991 if (vcpu->arch.sie_block->ckc > now &&
992 hrtimer_forward_now(timer, ns_to_ktime(sltime)))
993 return HRTIMER_RESTART;
994 kvm_s390_vcpu_wakeup(vcpu);
ca872302
CB
995 return HRTIMER_NORESTART;
996}
ba5c1e9b 997
2ed10cc1
JF
998void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
999{
1000 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2ed10cc1 1001
4ae3c081 1002 spin_lock(&li->lock);
383d0b05
JF
1003 li->pending_irqs = 0;
1004 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1005 memset(&li->irq, 0, sizeof(li->irq));
4ae3c081 1006 spin_unlock(&li->lock);
4953919f 1007
a5bd7647 1008 sca_clear_ext_call(vcpu);
2ed10cc1
JF
1009}
1010
614aeab4 1011int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
ba5c1e9b 1012{
180c12fb 1013 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1014 deliver_irq_t func;
79395031 1015 int rc = 0;
383d0b05 1016 unsigned long irq_type;
6d3da241 1017 unsigned long irqs;
ba5c1e9b
CO
1018
1019 __reset_intercept_indicators(vcpu);
ba5c1e9b 1020
383d0b05
JF
1021 /* pending ckc conditions might have been invalidated */
1022 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
b4aec925 1023 if (ckc_irq_pending(vcpu))
383d0b05
JF
1024 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1025
b4aec925
DH
1026 /* pending cpu timer conditions might have been invalidated */
1027 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1028 if (cpu_timer_irq_pending(vcpu))
1029 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1030
ffeca0ae 1031 while ((irqs = deliverable_irqs(vcpu)) && !rc) {
383d0b05 1032 /* bits are in the order of interrupt priority */
6d3da241 1033 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
6d3da241
JF
1034 if (is_ioirq(irq_type)) {
1035 rc = __deliver_io(vcpu, irq_type);
1036 } else {
1037 func = deliver_irq_funcs[irq_type];
1038 if (!func) {
1039 WARN_ON_ONCE(func == NULL);
1040 clear_bit(irq_type, &li->pending_irqs);
1041 continue;
1042 }
1043 rc = func(vcpu);
383d0b05 1044 }
ffeca0ae 1045 }
383d0b05 1046
6d3da241 1047 set_intercept_indicators(vcpu);
79395031
JF
1048
1049 return rc;
ba5c1e9b
CO
1050}
1051
383d0b05 1052static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1053{
1054 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1055
ed2afcfa
DH
1056 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1057 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1058 irq->u.pgm.code, 0);
1059
238293b1
DH
1060 if (irq->u.pgm.code == PGM_PER) {
1061 li->irq.pgm.code |= PGM_PER;
1062 /* only modify PER related information */
1063 li->irq.pgm.per_address = irq->u.pgm.per_address;
1064 li->irq.pgm.per_code = irq->u.pgm.per_code;
1065 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1066 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1067 } else if (!(irq->u.pgm.code & PGM_PER)) {
1068 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1069 irq->u.pgm.code;
1070 /* only modify non-PER information */
1071 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1072 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1073 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1074 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1075 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1076 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1077 } else {
1078 li->irq.pgm = irq->u.pgm;
1079 }
9185124e 1080 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
0146a7b0
JF
1081 return 0;
1082}
1083
383d0b05 1084static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1085{
1086 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1087
3f24ba15
CB
1088 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1089 irq->u.ext.ext_params2);
383d0b05
JF
1090 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1091 irq->u.ext.ext_params,
ed2afcfa 1092 irq->u.ext.ext_params2);
383d0b05
JF
1093
1094 li->irq.ext = irq->u.ext;
1095 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
805de8f4 1096 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1097 return 0;
1098}
1099
0675d92d 1100static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1101{
1102 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1103 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
ea5f4969 1104 uint16_t src_id = irq->u.extcall.code;
0146a7b0 1105
3f24ba15 1106 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
ea5f4969 1107 src_id);
383d0b05 1108 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
ed2afcfa 1109 src_id, 0);
ea5f4969
DH
1110
1111 /* sending vcpu invalid */
152e9f65 1112 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
ea5f4969
DH
1113 return -EINVAL;
1114
37c5f6c8 1115 if (sclp.has_sigpif)
a5bd7647 1116 return sca_inject_ext_call(vcpu, src_id);
383d0b05 1117
b938eace 1118 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
ea5f4969 1119 return -EBUSY;
383d0b05 1120 *extcall = irq->u.extcall;
805de8f4 1121 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1122 return 0;
1123}
1124
383d0b05 1125static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1126{
1127 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1128 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
0146a7b0 1129
ed2afcfa 1130 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
556cc0da 1131 irq->u.prefix.address);
383d0b05 1132 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
ed2afcfa 1133 irq->u.prefix.address, 0);
383d0b05 1134
a3a9c59a
DH
1135 if (!is_vcpu_stopped(vcpu))
1136 return -EBUSY;
1137
383d0b05
JF
1138 *prefix = irq->u.prefix;
1139 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
0146a7b0
JF
1140 return 0;
1141}
1142
6cddd432 1143#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
383d0b05 1144static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1145{
1146 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2822545f 1147 struct kvm_s390_stop_info *stop = &li->irq.stop;
6cddd432 1148 int rc = 0;
0146a7b0 1149
ed2afcfa 1150 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
383d0b05 1151
2822545f
DH
1152 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1153 return -EINVAL;
1154
6cddd432
DH
1155 if (is_vcpu_stopped(vcpu)) {
1156 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1157 rc = kvm_s390_store_status_unloaded(vcpu,
1158 KVM_S390_STORE_STATUS_NOADDR);
1159 return rc;
1160 }
1161
1162 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1163 return -EBUSY;
2822545f 1164 stop->flags = irq->u.stop.flags;
6cddd432 1165 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
0146a7b0
JF
1166 return 0;
1167}
1168
1169static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
383d0b05 1170 struct kvm_s390_irq *irq)
0146a7b0
JF
1171{
1172 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1173
3f24ba15 1174 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
ed2afcfa 1175 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
383d0b05
JF
1176
1177 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
0146a7b0
JF
1178 return 0;
1179}
1180
1181static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
383d0b05 1182 struct kvm_s390_irq *irq)
0146a7b0
JF
1183{
1184 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1185
3f24ba15 1186 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
383d0b05
JF
1187 irq->u.emerg.code);
1188 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
ed2afcfa 1189 irq->u.emerg.code, 0);
383d0b05 1190
b85de33a
DH
1191 /* sending vcpu invalid */
1192 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1193 return -EINVAL;
1194
49538d12 1195 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
383d0b05 1196 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
805de8f4 1197 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1198 return 0;
1199}
1200
383d0b05 1201static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1202{
1203 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1204 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
0146a7b0 1205
3f24ba15 1206 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
556cc0da 1207 irq->u.mchk.mcic);
383d0b05 1208 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
ed2afcfa 1209 irq->u.mchk.mcic);
383d0b05
JF
1210
1211 /*
fc2020cf
JF
1212 * Because repressible machine checks can be indicated along with
1213 * exigent machine checks (PoP, Chapter 11, Interruption action)
1214 * we need to combine cr14, mcic and external damage code.
1215 * Failing storage address and the logout area should not be or'ed
1216 * together, we just indicate the last occurrence of the corresponding
1217 * machine check
383d0b05 1218 */
fc2020cf 1219 mchk->cr14 |= irq->u.mchk.cr14;
383d0b05 1220 mchk->mcic |= irq->u.mchk.mcic;
fc2020cf
JF
1221 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1222 mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1223 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1224 sizeof(mchk->fixed_logout));
383d0b05
JF
1225 if (mchk->mcic & MCHK_EX_MASK)
1226 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1227 else if (mchk->mcic & MCHK_REP_MASK)
1228 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
0146a7b0
JF
1229 return 0;
1230}
1231
383d0b05 1232static int __inject_ckc(struct kvm_vcpu *vcpu)
0146a7b0
JF
1233{
1234 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1235
3f24ba15 1236 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
383d0b05 1237 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
ed2afcfa 1238 0, 0);
383d0b05
JF
1239
1240 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
805de8f4 1241 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1242 return 0;
1243}
1244
383d0b05 1245static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0146a7b0
JF
1246{
1247 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1248
3f24ba15 1249 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
383d0b05 1250 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
ed2afcfa 1251 0, 0);
383d0b05
JF
1252
1253 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
805de8f4 1254 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
ba5c1e9b
CO
1255 return 0;
1256}
1257
6d3da241
JF
1258static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1259 int isc, u32 schid)
1260{
1261 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1262 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1263 struct kvm_s390_interrupt_info *iter;
1264 u16 id = (schid & 0xffff0000U) >> 16;
1265 u16 nr = schid & 0x0000ffffU;
1266
1267 spin_lock(&fi->lock);
1268 list_for_each_entry(iter, isc_list, list) {
1269 if (schid && (id != iter->io.subchannel_id ||
1270 nr != iter->io.subchannel_nr))
1271 continue;
1272 /* found an appropriate entry */
1273 list_del_init(&iter->list);
1274 fi->counters[FIRQ_CNTR_IO] -= 1;
1275 if (list_empty(isc_list))
1276 clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
1277 spin_unlock(&fi->lock);
1278 return iter;
1279 }
1280 spin_unlock(&fi->lock);
1281 return NULL;
1282}
383d0b05 1283
6d3da241
JF
1284/*
1285 * Dequeue and return an I/O interrupt matching any of the interruption
1286 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1287 */
fa6b7fe9 1288struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
6d3da241
JF
1289 u64 isc_mask, u32 schid)
1290{
1291 struct kvm_s390_interrupt_info *inti = NULL;
1292 int isc;
1293
1294 for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1295 if (isc_mask & isc_to_isc_bits(isc))
1296 inti = get_io_int(kvm, isc, schid);
1297 }
1298 return inti;
1299}
1300
1301#define SCCB_MASK 0xFFFFFFF8
1302#define SCCB_EVENT_PENDING 0x3
1303
1304static int __inject_service(struct kvm *kvm,
1305 struct kvm_s390_interrupt_info *inti)
1306{
1307 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1308
1309 spin_lock(&fi->lock);
1310 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1311 /*
1312 * Early versions of the QEMU s390 bios will inject several
1313 * service interrupts after another without handling a
1314 * condition code indicating busy.
1315 * We will silently ignore those superfluous sccb values.
1316 * A future version of QEMU will take care of serialization
1317 * of servc requests
1318 */
1319 if (fi->srv_signal.ext_params & SCCB_MASK)
1320 goto out;
1321 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1322 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1323out:
1324 spin_unlock(&fi->lock);
1325 kfree(inti);
1326 return 0;
1327}
1328
1329static int __inject_virtio(struct kvm *kvm,
1330 struct kvm_s390_interrupt_info *inti)
1331{
1332 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1333
1334 spin_lock(&fi->lock);
1335 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1336 spin_unlock(&fi->lock);
1337 return -EBUSY;
1338 }
1339 fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1340 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1341 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1342 spin_unlock(&fi->lock);
1343 return 0;
1344}
1345
1346static int __inject_pfault_done(struct kvm *kvm,
1347 struct kvm_s390_interrupt_info *inti)
1348{
1349 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1350
1351 spin_lock(&fi->lock);
1352 if (fi->counters[FIRQ_CNTR_PFAULT] >=
1353 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1354 spin_unlock(&fi->lock);
1355 return -EBUSY;
1356 }
1357 fi->counters[FIRQ_CNTR_PFAULT] += 1;
1358 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1359 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1360 spin_unlock(&fi->lock);
1361 return 0;
1362}
1363
1364#define CR_PENDING_SUBCLASS 28
1365static int __inject_float_mchk(struct kvm *kvm,
1366 struct kvm_s390_interrupt_info *inti)
1367{
1368 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1369
1370 spin_lock(&fi->lock);
1371 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1372 fi->mchk.mcic |= inti->mchk.mcic;
1373 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1374 spin_unlock(&fi->lock);
1375 kfree(inti);
1376 return 0;
1377}
1378
1379static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
fa6b7fe9
CH
1380{
1381 struct kvm_s390_float_interrupt *fi;
6d3da241
JF
1382 struct list_head *list;
1383 int isc;
fa6b7fe9 1384
fa6b7fe9
CH
1385 fi = &kvm->arch.float_int;
1386 spin_lock(&fi->lock);
6d3da241
JF
1387 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1388 spin_unlock(&fi->lock);
1389 return -EBUSY;
a91b8ebe 1390 }
6d3da241
JF
1391 fi->counters[FIRQ_CNTR_IO] += 1;
1392
1393 isc = int_word_to_isc(inti->io.io_int_word);
1394 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1395 list_add_tail(&inti->list, list);
1396 set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
fa6b7fe9 1397 spin_unlock(&fi->lock);
6d3da241 1398 return 0;
fa6b7fe9 1399}
ba5c1e9b 1400
96e0ed23
DH
1401/*
1402 * Find a destination VCPU for a floating irq and kick it.
1403 */
1404static void __floating_irq_kick(struct kvm *kvm, u64 type)
ba5c1e9b 1405{
96e0ed23 1406 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
180c12fb 1407 struct kvm_s390_local_interrupt *li;
96e0ed23
DH
1408 struct kvm_vcpu *dst_vcpu;
1409 int sigcpu, online_vcpus, nr_tries = 0;
1410
1411 online_vcpus = atomic_read(&kvm->online_vcpus);
1412 if (!online_vcpus)
1413 return;
1414
1415 /* find idle VCPUs first, then round robin */
1416 sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1417 if (sigcpu == online_vcpus) {
1418 do {
1419 sigcpu = fi->next_rr_cpu;
1420 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1421 /* avoid endless loops if all vcpus are stopped */
1422 if (nr_tries++ >= online_vcpus)
1423 return;
1424 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1425 }
1426 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1427
1428 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1429 li = &dst_vcpu->arch.local_int;
1430 spin_lock(&li->lock);
1431 switch (type) {
1432 case KVM_S390_MCHK:
805de8f4 1433 atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
96e0ed23
DH
1434 break;
1435 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
805de8f4 1436 atomic_or(CPUSTAT_IO_INT, li->cpuflags);
96e0ed23
DH
1437 break;
1438 default:
805de8f4 1439 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
96e0ed23
DH
1440 break;
1441 }
1442 spin_unlock(&li->lock);
1443 kvm_s390_vcpu_wakeup(dst_vcpu);
1444}
1445
1446static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1447{
6d3da241
JF
1448 u64 type = READ_ONCE(inti->type);
1449 int rc;
ba5c1e9b 1450
6d3da241
JF
1451 switch (type) {
1452 case KVM_S390_MCHK:
1453 rc = __inject_float_mchk(kvm, inti);
1454 break;
1455 case KVM_S390_INT_VIRTIO:
1456 rc = __inject_virtio(kvm, inti);
1457 break;
1458 case KVM_S390_INT_SERVICE:
1459 rc = __inject_service(kvm, inti);
1460 break;
1461 case KVM_S390_INT_PFAULT_DONE:
1462 rc = __inject_pfault_done(kvm, inti);
1463 break;
1464 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1465 rc = __inject_io(kvm, inti);
1466 break;
1467 default:
a91b8ebe 1468 rc = -EINVAL;
c05c4186 1469 }
6d3da241
JF
1470 if (rc)
1471 return rc;
1472
96e0ed23 1473 __floating_irq_kick(kvm, type);
6d3da241 1474 return 0;
c05c4186
JF
1475}
1476
1477int kvm_s390_inject_vm(struct kvm *kvm,
1478 struct kvm_s390_interrupt *s390int)
1479{
1480 struct kvm_s390_interrupt_info *inti;
428d53be 1481 int rc;
c05c4186 1482
ba5c1e9b
CO
1483 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1484 if (!inti)
1485 return -ENOMEM;
1486
c05c4186
JF
1487 inti->type = s390int->type;
1488 switch (inti->type) {
ba5c1e9b 1489 case KVM_S390_INT_VIRTIO:
33e19115 1490 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
ba5c1e9b 1491 s390int->parm, s390int->parm64);
ba5c1e9b
CO
1492 inti->ext.ext_params = s390int->parm;
1493 inti->ext.ext_params2 = s390int->parm64;
1494 break;
1495 case KVM_S390_INT_SERVICE:
3f24ba15 1496 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
ba5c1e9b
CO
1497 inti->ext.ext_params = s390int->parm;
1498 break;
3c038e6b 1499 case KVM_S390_INT_PFAULT_DONE:
3c038e6b
DD
1500 inti->ext.ext_params2 = s390int->parm64;
1501 break;
48a3e950 1502 case KVM_S390_MCHK:
3f24ba15 1503 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
48a3e950 1504 s390int->parm64);
48a3e950
CH
1505 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1506 inti->mchk.mcic = s390int->parm64;
1507 break;
d8346b7d 1508 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
a37281b6 1509 if (inti->type & KVM_S390_INT_IO_AI_MASK)
d8346b7d
CH
1510 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1511 else
1512 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1513 s390int->type & IOINT_CSSID_MASK,
1514 s390int->type & IOINT_SSID_MASK,
1515 s390int->type & IOINT_SCHID_MASK);
d8346b7d
CH
1516 inti->io.subchannel_id = s390int->parm >> 16;
1517 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1518 inti->io.io_int_parm = s390int->parm64 >> 32;
1519 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1520 break;
ba5c1e9b
CO
1521 default:
1522 kfree(inti);
1523 return -EINVAL;
1524 }
ade38c31
CH
1525 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1526 2);
ba5c1e9b 1527
428d53be
DH
1528 rc = __inject_vm(kvm, inti);
1529 if (rc)
1530 kfree(inti);
1531 return rc;
ba5c1e9b
CO
1532}
1533
15462e37 1534int kvm_s390_reinject_io_int(struct kvm *kvm,
2f32d4ea
CH
1535 struct kvm_s390_interrupt_info *inti)
1536{
15462e37 1537 return __inject_vm(kvm, inti);
2f32d4ea
CH
1538}
1539
383d0b05
JF
1540int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1541 struct kvm_s390_irq *irq)
1542{
1543 irq->type = s390int->type;
1544 switch (irq->type) {
1545 case KVM_S390_PROGRAM_INT:
1546 if (s390int->parm & 0xffff0000)
1547 return -EINVAL;
1548 irq->u.pgm.code = s390int->parm;
1549 break;
1550 case KVM_S390_SIGP_SET_PREFIX:
1551 irq->u.prefix.address = s390int->parm;
1552 break;
2822545f
DH
1553 case KVM_S390_SIGP_STOP:
1554 irq->u.stop.flags = s390int->parm;
1555 break;
383d0b05 1556 case KVM_S390_INT_EXTERNAL_CALL:
94d1f564 1557 if (s390int->parm & 0xffff0000)
383d0b05
JF
1558 return -EINVAL;
1559 irq->u.extcall.code = s390int->parm;
1560 break;
1561 case KVM_S390_INT_EMERGENCY:
94d1f564 1562 if (s390int->parm & 0xffff0000)
383d0b05
JF
1563 return -EINVAL;
1564 irq->u.emerg.code = s390int->parm;
1565 break;
1566 case KVM_S390_MCHK:
1567 irq->u.mchk.mcic = s390int->parm64;
1568 break;
1569 }
1570 return 0;
1571}
1572
6cddd432
DH
1573int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1574{
1575 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1576
1577 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1578}
1579
1580void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1581{
1582 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1583
1584 spin_lock(&li->lock);
1585 li->irq.stop.flags = 0;
1586 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1587 spin_unlock(&li->lock);
1588}
1589
79e87a10 1590static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
ba5c1e9b 1591{
0146a7b0 1592 int rc;
ba5c1e9b 1593
383d0b05 1594 switch (irq->type) {
ba5c1e9b 1595 case KVM_S390_PROGRAM_INT:
383d0b05 1596 rc = __inject_prog(vcpu, irq);
ba5c1e9b 1597 break;
b7e6e4d3 1598 case KVM_S390_SIGP_SET_PREFIX:
383d0b05 1599 rc = __inject_set_prefix(vcpu, irq);
b7e6e4d3 1600 break;
ba5c1e9b 1601 case KVM_S390_SIGP_STOP:
383d0b05 1602 rc = __inject_sigp_stop(vcpu, irq);
0146a7b0 1603 break;
ba5c1e9b 1604 case KVM_S390_RESTART:
383d0b05 1605 rc = __inject_sigp_restart(vcpu, irq);
0146a7b0 1606 break;
e029ae5b 1607 case KVM_S390_INT_CLOCK_COMP:
383d0b05 1608 rc = __inject_ckc(vcpu);
0146a7b0 1609 break;
e029ae5b 1610 case KVM_S390_INT_CPU_TIMER:
383d0b05 1611 rc = __inject_cpu_timer(vcpu);
82a12737 1612 break;
7697e71f 1613 case KVM_S390_INT_EXTERNAL_CALL:
383d0b05 1614 rc = __inject_extcall(vcpu, irq);
82a12737 1615 break;
ba5c1e9b 1616 case KVM_S390_INT_EMERGENCY:
383d0b05 1617 rc = __inject_sigp_emergency(vcpu, irq);
ba5c1e9b 1618 break;
48a3e950 1619 case KVM_S390_MCHK:
383d0b05 1620 rc = __inject_mchk(vcpu, irq);
48a3e950 1621 break;
3c038e6b 1622 case KVM_S390_INT_PFAULT_INIT:
383d0b05 1623 rc = __inject_pfault_init(vcpu, irq);
3c038e6b 1624 break;
ba5c1e9b
CO
1625 case KVM_S390_INT_VIRTIO:
1626 case KVM_S390_INT_SERVICE:
d8346b7d 1627 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
ba5c1e9b 1628 default:
0146a7b0 1629 rc = -EINVAL;
ba5c1e9b 1630 }
79e87a10
JF
1631
1632 return rc;
1633}
1634
1635int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1636{
1637 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1638 int rc;
1639
1640 spin_lock(&li->lock);
1641 rc = do_inject_vcpu(vcpu, irq);
4ae3c081 1642 spin_unlock(&li->lock);
0146a7b0
JF
1643 if (!rc)
1644 kvm_s390_vcpu_wakeup(vcpu);
0146a7b0 1645 return rc;
ba5c1e9b 1646}
c05c4186 1647
6d3da241 1648static inline void clear_irq_list(struct list_head *_list)
c05c4186 1649{
6d3da241 1650 struct kvm_s390_interrupt_info *inti, *n;
c05c4186 1651
6d3da241 1652 list_for_each_entry_safe(inti, n, _list, list) {
c05c4186
JF
1653 list_del(&inti->list);
1654 kfree(inti);
1655 }
c05c4186
JF
1656}
1657
94aa033e
JF
1658static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1659 struct kvm_s390_irq *irq)
c05c4186 1660{
94aa033e 1661 irq->type = inti->type;
c05c4186 1662 switch (inti->type) {
3c038e6b
DD
1663 case KVM_S390_INT_PFAULT_INIT:
1664 case KVM_S390_INT_PFAULT_DONE:
c05c4186 1665 case KVM_S390_INT_VIRTIO:
94aa033e 1666 irq->u.ext = inti->ext;
c05c4186
JF
1667 break;
1668 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
94aa033e 1669 irq->u.io = inti->io;
c05c4186 1670 break;
c05c4186 1671 }
c05c4186
JF
1672}
1673
6d3da241
JF
1674void kvm_s390_clear_float_irqs(struct kvm *kvm)
1675{
1676 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1677 int i;
1678
1679 spin_lock(&fi->lock);
f2ae45ed
JF
1680 fi->pending_irqs = 0;
1681 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1682 memset(&fi->mchk, 0, sizeof(fi->mchk));
6d3da241
JF
1683 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1684 clear_irq_list(&fi->lists[i]);
1685 for (i = 0; i < FIRQ_MAX_COUNT; i++)
1686 fi->counters[i] = 0;
1687 spin_unlock(&fi->lock);
1688};
1689
94aa033e 1690static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
c05c4186
JF
1691{
1692 struct kvm_s390_interrupt_info *inti;
1693 struct kvm_s390_float_interrupt *fi;
94aa033e 1694 struct kvm_s390_irq *buf;
6d3da241 1695 struct kvm_s390_irq *irq;
94aa033e 1696 int max_irqs;
c05c4186
JF
1697 int ret = 0;
1698 int n = 0;
6d3da241 1699 int i;
c05c4186 1700
94aa033e
JF
1701 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
1702 return -EINVAL;
1703
1704 /*
1705 * We are already using -ENOMEM to signal
1706 * userspace it may retry with a bigger buffer,
1707 * so we need to use something else for this case
1708 */
1709 buf = vzalloc(len);
1710 if (!buf)
1711 return -ENOBUFS;
1712
1713 max_irqs = len / sizeof(struct kvm_s390_irq);
1714
c05c4186
JF
1715 fi = &kvm->arch.float_int;
1716 spin_lock(&fi->lock);
6d3da241
JF
1717 for (i = 0; i < FIRQ_LIST_COUNT; i++) {
1718 list_for_each_entry(inti, &fi->lists[i], list) {
1719 if (n == max_irqs) {
1720 /* signal userspace to try again */
1721 ret = -ENOMEM;
1722 goto out;
1723 }
1724 inti_to_irq(inti, &buf[n]);
1725 n++;
1726 }
1727 }
1728 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
94aa033e 1729 if (n == max_irqs) {
c05c4186
JF
1730 /* signal userspace to try again */
1731 ret = -ENOMEM;
6d3da241 1732 goto out;
c05c4186 1733 }
6d3da241
JF
1734 irq = (struct kvm_s390_irq *) &buf[n];
1735 irq->type = KVM_S390_INT_SERVICE;
1736 irq->u.ext = fi->srv_signal;
c05c4186
JF
1737 n++;
1738 }
6d3da241
JF
1739 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
1740 if (n == max_irqs) {
1741 /* signal userspace to try again */
1742 ret = -ENOMEM;
1743 goto out;
1744 }
1745 irq = (struct kvm_s390_irq *) &buf[n];
1746 irq->type = KVM_S390_MCHK;
1747 irq->u.mchk = fi->mchk;
1748 n++;
1749}
1750
1751out:
c05c4186 1752 spin_unlock(&fi->lock);
94aa033e
JF
1753 if (!ret && n > 0) {
1754 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
1755 ret = -EFAULT;
1756 }
1757 vfree(buf);
c05c4186
JF
1758
1759 return ret < 0 ? ret : n;
1760}
1761
1762static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1763{
1764 int r;
1765
1766 switch (attr->group) {
1767 case KVM_DEV_FLIC_GET_ALL_IRQS:
94aa033e 1768 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
c05c4186
JF
1769 attr->attr);
1770 break;
1771 default:
1772 r = -EINVAL;
1773 }
1774
1775 return r;
1776}
1777
1778static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1779 u64 addr)
1780{
1781 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1782 void *target = NULL;
1783 void __user *source;
1784 u64 size;
1785
1786 if (get_user(inti->type, (u64 __user *)addr))
1787 return -EFAULT;
1788
1789 switch (inti->type) {
3c038e6b
DD
1790 case KVM_S390_INT_PFAULT_INIT:
1791 case KVM_S390_INT_PFAULT_DONE:
c05c4186
JF
1792 case KVM_S390_INT_VIRTIO:
1793 case KVM_S390_INT_SERVICE:
1794 target = (void *) &inti->ext;
1795 source = &uptr->u.ext;
1796 size = sizeof(inti->ext);
1797 break;
1798 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1799 target = (void *) &inti->io;
1800 source = &uptr->u.io;
1801 size = sizeof(inti->io);
1802 break;
1803 case KVM_S390_MCHK:
1804 target = (void *) &inti->mchk;
1805 source = &uptr->u.mchk;
1806 size = sizeof(inti->mchk);
1807 break;
1808 default:
1809 return -EINVAL;
1810 }
1811
1812 if (copy_from_user(target, source, size))
1813 return -EFAULT;
1814
1815 return 0;
1816}
1817
1818static int enqueue_floating_irq(struct kvm_device *dev,
1819 struct kvm_device_attr *attr)
1820{
1821 struct kvm_s390_interrupt_info *inti = NULL;
1822 int r = 0;
1823 int len = attr->attr;
1824
1825 if (len % sizeof(struct kvm_s390_irq) != 0)
1826 return -EINVAL;
1827 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1828 return -EINVAL;
1829
1830 while (len >= sizeof(struct kvm_s390_irq)) {
1831 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1832 if (!inti)
1833 return -ENOMEM;
1834
1835 r = copy_irq_from_user(inti, attr->addr);
1836 if (r) {
1837 kfree(inti);
1838 return r;
1839 }
a91b8ebe
JF
1840 r = __inject_vm(dev->kvm, inti);
1841 if (r) {
1842 kfree(inti);
1843 return r;
1844 }
c05c4186
JF
1845 len -= sizeof(struct kvm_s390_irq);
1846 attr->addr += sizeof(struct kvm_s390_irq);
1847 }
1848
1849 return r;
1850}
1851
841b91c5
CH
1852static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1853{
1854 if (id >= MAX_S390_IO_ADAPTERS)
1855 return NULL;
1856 return kvm->arch.adapters[id];
1857}
1858
1859static int register_io_adapter(struct kvm_device *dev,
1860 struct kvm_device_attr *attr)
1861{
1862 struct s390_io_adapter *adapter;
1863 struct kvm_s390_io_adapter adapter_info;
1864
1865 if (copy_from_user(&adapter_info,
1866 (void __user *)attr->addr, sizeof(adapter_info)))
1867 return -EFAULT;
1868
1869 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1870 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1871 return -EINVAL;
1872
1873 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1874 if (!adapter)
1875 return -ENOMEM;
1876
1877 INIT_LIST_HEAD(&adapter->maps);
1878 init_rwsem(&adapter->maps_lock);
1879 atomic_set(&adapter->nr_maps, 0);
1880 adapter->id = adapter_info.id;
1881 adapter->isc = adapter_info.isc;
1882 adapter->maskable = adapter_info.maskable;
1883 adapter->masked = false;
1884 adapter->swap = adapter_info.swap;
1885 dev->kvm->arch.adapters[adapter->id] = adapter;
1886
1887 return 0;
1888}
1889
1890int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1891{
1892 int ret;
1893 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1894
1895 if (!adapter || !adapter->maskable)
1896 return -EINVAL;
1897 ret = adapter->masked;
1898 adapter->masked = masked;
1899 return ret;
1900}
1901
1902static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1903{
1904 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1905 struct s390_map_info *map;
1906 int ret;
1907
1908 if (!adapter || !addr)
1909 return -EINVAL;
1910
1911 map = kzalloc(sizeof(*map), GFP_KERNEL);
1912 if (!map) {
1913 ret = -ENOMEM;
1914 goto out;
1915 }
1916 INIT_LIST_HEAD(&map->list);
1917 map->guest_addr = addr;
6e0a0431 1918 map->addr = gmap_translate(kvm->arch.gmap, addr);
841b91c5
CH
1919 if (map->addr == -EFAULT) {
1920 ret = -EFAULT;
1921 goto out;
1922 }
1923 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1924 if (ret < 0)
1925 goto out;
1926 BUG_ON(ret != 1);
1927 down_write(&adapter->maps_lock);
1928 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1929 list_add_tail(&map->list, &adapter->maps);
1930 ret = 0;
1931 } else {
1932 put_page(map->page);
1933 ret = -EINVAL;
1934 }
1935 up_write(&adapter->maps_lock);
1936out:
1937 if (ret)
1938 kfree(map);
1939 return ret;
1940}
1941
1942static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1943{
1944 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1945 struct s390_map_info *map, *tmp;
1946 int found = 0;
1947
1948 if (!adapter || !addr)
1949 return -EINVAL;
1950
1951 down_write(&adapter->maps_lock);
1952 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1953 if (map->guest_addr == addr) {
1954 found = 1;
1955 atomic_dec(&adapter->nr_maps);
1956 list_del(&map->list);
1957 put_page(map->page);
1958 kfree(map);
1959 break;
1960 }
1961 }
1962 up_write(&adapter->maps_lock);
1963
1964 return found ? 0 : -EINVAL;
1965}
1966
1967void kvm_s390_destroy_adapters(struct kvm *kvm)
1968{
1969 int i;
1970 struct s390_map_info *map, *tmp;
1971
1972 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1973 if (!kvm->arch.adapters[i])
1974 continue;
1975 list_for_each_entry_safe(map, tmp,
1976 &kvm->arch.adapters[i]->maps, list) {
1977 list_del(&map->list);
1978 put_page(map->page);
1979 kfree(map);
1980 }
1981 kfree(kvm->arch.adapters[i]);
1982 }
1983}
1984
1985static int modify_io_adapter(struct kvm_device *dev,
1986 struct kvm_device_attr *attr)
1987{
1988 struct kvm_s390_io_adapter_req req;
1989 struct s390_io_adapter *adapter;
1990 int ret;
1991
1992 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1993 return -EFAULT;
1994
1995 adapter = get_io_adapter(dev->kvm, req.id);
1996 if (!adapter)
1997 return -EINVAL;
1998 switch (req.type) {
1999 case KVM_S390_IO_ADAPTER_MASK:
2000 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2001 if (ret > 0)
2002 ret = 0;
2003 break;
2004 case KVM_S390_IO_ADAPTER_MAP:
2005 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
2006 break;
2007 case KVM_S390_IO_ADAPTER_UNMAP:
2008 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
2009 break;
2010 default:
2011 ret = -EINVAL;
2012 }
2013
2014 return ret;
2015}
2016
c05c4186
JF
2017static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2018{
2019 int r = 0;
3c038e6b
DD
2020 unsigned int i;
2021 struct kvm_vcpu *vcpu;
c05c4186
JF
2022
2023 switch (attr->group) {
2024 case KVM_DEV_FLIC_ENQUEUE:
2025 r = enqueue_floating_irq(dev, attr);
2026 break;
2027 case KVM_DEV_FLIC_CLEAR_IRQS:
67335e63 2028 kvm_s390_clear_float_irqs(dev->kvm);
c05c4186 2029 break;
3c038e6b
DD
2030 case KVM_DEV_FLIC_APF_ENABLE:
2031 dev->kvm->arch.gmap->pfault_enabled = 1;
2032 break;
2033 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2034 dev->kvm->arch.gmap->pfault_enabled = 0;
2035 /*
2036 * Make sure no async faults are in transition when
2037 * clearing the queues. So we don't need to worry
2038 * about late coming workers.
2039 */
2040 synchronize_srcu(&dev->kvm->srcu);
2041 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2042 kvm_clear_async_pf_completion_queue(vcpu);
2043 break;
841b91c5
CH
2044 case KVM_DEV_FLIC_ADAPTER_REGISTER:
2045 r = register_io_adapter(dev, attr);
2046 break;
2047 case KVM_DEV_FLIC_ADAPTER_MODIFY:
2048 r = modify_io_adapter(dev, attr);
2049 break;
c05c4186
JF
2050 default:
2051 r = -EINVAL;
2052 }
2053
2054 return r;
2055}
2056
2057static int flic_create(struct kvm_device *dev, u32 type)
2058{
2059 if (!dev)
2060 return -EINVAL;
2061 if (dev->kvm->arch.flic)
2062 return -EINVAL;
2063 dev->kvm->arch.flic = dev;
2064 return 0;
2065}
2066
2067static void flic_destroy(struct kvm_device *dev)
2068{
2069 dev->kvm->arch.flic = NULL;
2070 kfree(dev);
2071}
2072
2073/* s390 floating irq controller (flic) */
2074struct kvm_device_ops kvm_flic_ops = {
2075 .name = "kvm-flic",
2076 .get_attr = flic_get_attr,
2077 .set_attr = flic_set_attr,
2078 .create = flic_create,
2079 .destroy = flic_destroy,
2080};
84223598
CH
2081
2082static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2083{
2084 unsigned long bit;
2085
2086 bit = bit_nr + (addr % PAGE_SIZE) * 8;
2087
2088 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2089}
2090
2091static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2092 u64 addr)
2093{
2094 struct s390_map_info *map;
2095
2096 if (!adapter)
2097 return NULL;
2098
2099 list_for_each_entry(map, &adapter->maps, list) {
2100 if (map->guest_addr == addr)
2101 return map;
2102 }
2103 return NULL;
2104}
2105
2106static int adapter_indicators_set(struct kvm *kvm,
2107 struct s390_io_adapter *adapter,
2108 struct kvm_s390_adapter_int *adapter_int)
2109{
2110 unsigned long bit;
2111 int summary_set, idx;
2112 struct s390_map_info *info;
2113 void *map;
2114
2115 info = get_map_info(adapter, adapter_int->ind_addr);
2116 if (!info)
2117 return -1;
2118 map = page_address(info->page);
2119 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2120 set_bit(bit, map);
2121 idx = srcu_read_lock(&kvm->srcu);
2122 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2123 set_page_dirty_lock(info->page);
2124 info = get_map_info(adapter, adapter_int->summary_addr);
2125 if (!info) {
2126 srcu_read_unlock(&kvm->srcu, idx);
2127 return -1;
2128 }
2129 map = page_address(info->page);
2130 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2131 adapter->swap);
2132 summary_set = test_and_set_bit(bit, map);
2133 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2134 set_page_dirty_lock(info->page);
2135 srcu_read_unlock(&kvm->srcu, idx);
2136 return summary_set ? 0 : 1;
2137}
2138
2139/*
2140 * < 0 - not injected due to error
2141 * = 0 - coalesced, summary indicator already active
2142 * > 0 - injected interrupt
2143 */
2144static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2145 struct kvm *kvm, int irq_source_id, int level,
2146 bool line_status)
2147{
2148 int ret;
2149 struct s390_io_adapter *adapter;
2150
2151 /* We're only interested in the 0->1 transition. */
2152 if (!level)
2153 return 0;
2154 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2155 if (!adapter)
2156 return -1;
2157 down_read(&adapter->maps_lock);
2158 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2159 up_read(&adapter->maps_lock);
2160 if ((ret > 0) && !adapter->masked) {
2161 struct kvm_s390_interrupt s390int = {
2162 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2163 .parm = 0,
2164 .parm64 = (adapter->isc << 27) | 0x80000000,
2165 };
2166 ret = kvm_s390_inject_vm(kvm, &s390int);
2167 if (ret == 0)
2168 ret = 1;
2169 }
2170 return ret;
2171}
2172
8ba918d4 2173int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
84223598
CH
2174 const struct kvm_irq_routing_entry *ue)
2175{
2176 int ret;
2177
2178 switch (ue->type) {
2179 case KVM_IRQ_ROUTING_S390_ADAPTER:
2180 e->set = set_adapter_int;
2181 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2182 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2183 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2184 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2185 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2186 ret = 0;
2187 break;
2188 default:
2189 ret = -EINVAL;
2190 }
2191
2192 return ret;
2193}
2194
2195int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2196 int irq_source_id, int level, bool line_status)
2197{
2198 return -EINVAL;
2199}
816c7667
JF
2200
2201int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2202{
2203 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2204 struct kvm_s390_irq *buf;
2205 int r = 0;
2206 int n;
2207
2208 buf = vmalloc(len);
2209 if (!buf)
2210 return -ENOMEM;
2211
2212 if (copy_from_user((void *) buf, irqstate, len)) {
2213 r = -EFAULT;
2214 goto out_free;
2215 }
2216
2217 /*
2218 * Don't allow setting the interrupt state
2219 * when there are already interrupts pending
2220 */
2221 spin_lock(&li->lock);
2222 if (li->pending_irqs) {
2223 r = -EBUSY;
2224 goto out_unlock;
2225 }
2226
2227 for (n = 0; n < len / sizeof(*buf); n++) {
2228 r = do_inject_vcpu(vcpu, &buf[n]);
2229 if (r)
2230 break;
2231 }
2232
2233out_unlock:
2234 spin_unlock(&li->lock);
2235out_free:
2236 vfree(buf);
2237
2238 return r;
2239}
2240
2241static void store_local_irq(struct kvm_s390_local_interrupt *li,
2242 struct kvm_s390_irq *irq,
2243 unsigned long irq_type)
2244{
2245 switch (irq_type) {
2246 case IRQ_PEND_MCHK_EX:
2247 case IRQ_PEND_MCHK_REP:
2248 irq->type = KVM_S390_MCHK;
2249 irq->u.mchk = li->irq.mchk;
2250 break;
2251 case IRQ_PEND_PROG:
2252 irq->type = KVM_S390_PROGRAM_INT;
2253 irq->u.pgm = li->irq.pgm;
2254 break;
2255 case IRQ_PEND_PFAULT_INIT:
2256 irq->type = KVM_S390_INT_PFAULT_INIT;
2257 irq->u.ext = li->irq.ext;
2258 break;
2259 case IRQ_PEND_EXT_EXTERNAL:
2260 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2261 irq->u.extcall = li->irq.extcall;
2262 break;
2263 case IRQ_PEND_EXT_CLOCK_COMP:
2264 irq->type = KVM_S390_INT_CLOCK_COMP;
2265 break;
2266 case IRQ_PEND_EXT_CPU_TIMER:
2267 irq->type = KVM_S390_INT_CPU_TIMER;
2268 break;
2269 case IRQ_PEND_SIGP_STOP:
2270 irq->type = KVM_S390_SIGP_STOP;
2271 irq->u.stop = li->irq.stop;
2272 break;
2273 case IRQ_PEND_RESTART:
2274 irq->type = KVM_S390_RESTART;
2275 break;
2276 case IRQ_PEND_SET_PREFIX:
2277 irq->type = KVM_S390_SIGP_SET_PREFIX;
2278 irq->u.prefix = li->irq.prefix;
2279 break;
2280 }
2281}
2282
2283int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2284{
a5bd7647 2285 int scn;
816c7667
JF
2286 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2287 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2288 unsigned long pending_irqs;
2289 struct kvm_s390_irq irq;
2290 unsigned long irq_type;
2291 int cpuaddr;
2292 int n = 0;
2293
2294 spin_lock(&li->lock);
2295 pending_irqs = li->pending_irqs;
2296 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2297 sizeof(sigp_emerg_pending));
2298 spin_unlock(&li->lock);
2299
2300 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2301 memset(&irq, 0, sizeof(irq));
2302 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2303 continue;
2304 if (n + sizeof(irq) > len)
2305 return -ENOBUFS;
2306 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2307 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2308 return -EFAULT;
2309 n += sizeof(irq);
2310 }
2311
2312 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2313 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2314 memset(&irq, 0, sizeof(irq));
2315 if (n + sizeof(irq) > len)
2316 return -ENOBUFS;
2317 irq.type = KVM_S390_INT_EMERGENCY;
2318 irq.u.emerg.code = cpuaddr;
2319 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2320 return -EFAULT;
2321 n += sizeof(irq);
2322 }
2323 }
2324
a5bd7647 2325 if (sca_ext_call_pending(vcpu, &scn)) {
816c7667
JF
2326 if (n + sizeof(irq) > len)
2327 return -ENOBUFS;
2328 memset(&irq, 0, sizeof(irq));
2329 irq.type = KVM_S390_INT_EXTERNAL_CALL;
a5bd7647 2330 irq.u.extcall.code = scn;
816c7667
JF
2331 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2332 return -EFAULT;
2333 n += sizeof(irq);
2334 }
2335
2336 return n;
2337}
This page took 0.519806 seconds and 5 git commands to generate.