KVM: s390: disabled wait cares about machine checks, not PER
[deliverable/linux.git] / arch / s390 / kvm / interrupt.c
1 /*
2 * handling kvm guest interrupts
3 *
4 * Copyright IBM Corp. 2008, 2015
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <linux/vmalloc.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/dis.h>
23 #include <asm/uaccess.h>
24 #include <asm/sclp.h>
25 #include <asm/isc.h>
26 #include "kvm-s390.h"
27 #include "gaccess.h"
28 #include "trace-s390.h"
29
30 #define IOINT_SCHID_MASK 0x0000ffff
31 #define IOINT_SSID_MASK 0x00030000
32 #define IOINT_CSSID_MASK 0x03fc0000
33 #define PFAULT_INIT 0x0600
34 #define PFAULT_DONE 0x0680
35 #define VIRTIO_PARAM 0x0d00
36
37 int psw_extint_disabled(struct kvm_vcpu *vcpu)
38 {
39 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
40 }
41
42 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
43 {
44 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
45 }
46
47 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
48 {
49 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
50 }
51
52 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
53 {
54 return psw_extint_disabled(vcpu) &&
55 psw_ioint_disabled(vcpu) &&
56 psw_mchk_disabled(vcpu);
57 }
58
59 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
60 {
61 if (psw_extint_disabled(vcpu) ||
62 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
63 return 0;
64 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
65 /* No timer interrupts when single stepping */
66 return 0;
67 return 1;
68 }
69
70 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
71 {
72 preempt_disable();
73 if (!(vcpu->arch.sie_block->ckc <
74 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
75 preempt_enable();
76 return 0;
77 }
78 preempt_enable();
79 return ckc_interrupts_enabled(vcpu);
80 }
81
82 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
83 {
84 return !psw_extint_disabled(vcpu) &&
85 (vcpu->arch.sie_block->gcr[0] & 0x400ul);
86 }
87
88 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
89 {
90 return (vcpu->arch.sie_block->cputm >> 63) &&
91 cpu_timer_interrupts_enabled(vcpu);
92 }
93
94 static inline int is_ioirq(unsigned long irq_type)
95 {
96 return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
97 (irq_type <= IRQ_PEND_IO_ISC_7));
98 }
99
100 static uint64_t isc_to_isc_bits(int isc)
101 {
102 return (0x80 >> isc) << 24;
103 }
104
105 static inline u8 int_word_to_isc(u32 int_word)
106 {
107 return (int_word & 0x38000000) >> 27;
108 }
109
110 static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu)
111 {
112 return vcpu->kvm->arch.float_int.pending_irqs;
113 }
114
115 static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
116 {
117 return vcpu->arch.local_int.pending_irqs;
118 }
119
120 static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
121 unsigned long active_mask)
122 {
123 int i;
124
125 for (i = 0; i <= MAX_ISC; i++)
126 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
127 active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
128
129 return active_mask;
130 }
131
132 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
133 {
134 unsigned long active_mask;
135
136 active_mask = pending_local_irqs(vcpu);
137 active_mask |= pending_floating_irqs(vcpu);
138 if (!active_mask)
139 return 0;
140
141 if (psw_extint_disabled(vcpu))
142 active_mask &= ~IRQ_PEND_EXT_MASK;
143 if (psw_ioint_disabled(vcpu))
144 active_mask &= ~IRQ_PEND_IO_MASK;
145 else
146 active_mask = disable_iscs(vcpu, active_mask);
147 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
148 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
149 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
150 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
151 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
152 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
153 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
154 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
155 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
156 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
157 if (psw_mchk_disabled(vcpu))
158 active_mask &= ~IRQ_PEND_MCHK_MASK;
159 if (!(vcpu->arch.sie_block->gcr[14] &
160 vcpu->kvm->arch.float_int.mchk.cr14))
161 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
162
163 /*
164 * STOP irqs will never be actively delivered. They are triggered via
165 * intercept requests and cleared when the stop intercept is performed.
166 */
167 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
168
169 return active_mask;
170 }
171
172 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
173 {
174 atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
175 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
176 }
177
178 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
179 {
180 atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
181 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
182 }
183
184 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
185 {
186 atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
187 &vcpu->arch.sie_block->cpuflags);
188 vcpu->arch.sie_block->lctl = 0x0000;
189 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
190
191 if (guestdbg_enabled(vcpu)) {
192 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
193 LCTL_CR10 | LCTL_CR11);
194 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
195 }
196 }
197
198 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
199 {
200 atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
201 }
202
203 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
204 {
205 if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK))
206 return;
207 else if (psw_ioint_disabled(vcpu))
208 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
209 else
210 vcpu->arch.sie_block->lctl |= LCTL_CR6;
211 }
212
213 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
214 {
215 if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
216 return;
217 if (psw_extint_disabled(vcpu))
218 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
219 else
220 vcpu->arch.sie_block->lctl |= LCTL_CR0;
221 }
222
223 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
224 {
225 if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
226 return;
227 if (psw_mchk_disabled(vcpu))
228 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
229 else
230 vcpu->arch.sie_block->lctl |= LCTL_CR14;
231 }
232
233 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
234 {
235 if (kvm_s390_is_stop_irq_pending(vcpu))
236 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
237 }
238
239 /* Set interception request for non-deliverable interrupts */
240 static void set_intercept_indicators(struct kvm_vcpu *vcpu)
241 {
242 set_intercept_indicators_io(vcpu);
243 set_intercept_indicators_ext(vcpu);
244 set_intercept_indicators_mchk(vcpu);
245 set_intercept_indicators_stop(vcpu);
246 }
247
248 static u16 get_ilc(struct kvm_vcpu *vcpu)
249 {
250 switch (vcpu->arch.sie_block->icptcode) {
251 case ICPT_INST:
252 case ICPT_INSTPROGI:
253 case ICPT_OPEREXC:
254 case ICPT_PARTEXEC:
255 case ICPT_IOINST:
256 /* last instruction only stored for these icptcodes */
257 return insn_length(vcpu->arch.sie_block->ipa >> 8);
258 case ICPT_PROGI:
259 return vcpu->arch.sie_block->pgmilc;
260 default:
261 return 0;
262 }
263 }
264
265 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
266 {
267 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
268 int rc;
269
270 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
271 0, 0);
272
273 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
274 (u16 *)__LC_EXT_INT_CODE);
275 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
276 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
277 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
278 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
279 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
280 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
281 return rc ? -EFAULT : 0;
282 }
283
284 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
285 {
286 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
287 int rc;
288
289 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
290 0, 0);
291
292 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
293 (u16 __user *)__LC_EXT_INT_CODE);
294 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
295 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
296 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
297 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
298 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
299 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
300 return rc ? -EFAULT : 0;
301 }
302
303 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
304 {
305 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
306 struct kvm_s390_ext_info ext;
307 int rc;
308
309 spin_lock(&li->lock);
310 ext = li->irq.ext;
311 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
312 li->irq.ext.ext_params2 = 0;
313 spin_unlock(&li->lock);
314
315 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
316 ext.ext_params2);
317 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
318 KVM_S390_INT_PFAULT_INIT,
319 0, ext.ext_params2);
320
321 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
322 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
323 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
324 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
325 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
326 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
327 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
328 return rc ? -EFAULT : 0;
329 }
330
331 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
332 {
333 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
334 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
335 struct kvm_s390_mchk_info mchk = {};
336 unsigned long adtl_status_addr;
337 int deliver = 0;
338 int rc = 0;
339
340 spin_lock(&fi->lock);
341 spin_lock(&li->lock);
342 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
343 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
344 /*
345 * If there was an exigent machine check pending, then any
346 * repressible machine checks that might have been pending
347 * are indicated along with it, so always clear bits for
348 * repressible and exigent interrupts
349 */
350 mchk = li->irq.mchk;
351 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
352 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
353 memset(&li->irq.mchk, 0, sizeof(mchk));
354 deliver = 1;
355 }
356 /*
357 * We indicate floating repressible conditions along with
358 * other pending conditions. Channel Report Pending and Channel
359 * Subsystem damage are the only two and and are indicated by
360 * bits in mcic and masked in cr14.
361 */
362 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
363 mchk.mcic |= fi->mchk.mcic;
364 mchk.cr14 |= fi->mchk.cr14;
365 memset(&fi->mchk, 0, sizeof(mchk));
366 deliver = 1;
367 }
368 spin_unlock(&li->lock);
369 spin_unlock(&fi->lock);
370
371 if (deliver) {
372 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
373 mchk.mcic);
374 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
375 KVM_S390_MCHK,
376 mchk.cr14, mchk.mcic);
377
378 rc = kvm_s390_vcpu_store_status(vcpu,
379 KVM_S390_STORE_STATUS_PREFIXED);
380 rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
381 &adtl_status_addr,
382 sizeof(unsigned long));
383 rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
384 adtl_status_addr);
385 rc |= put_guest_lc(vcpu, mchk.mcic,
386 (u64 __user *) __LC_MCCK_CODE);
387 rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
388 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
389 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
390 &mchk.fixed_logout,
391 sizeof(mchk.fixed_logout));
392 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
393 &vcpu->arch.sie_block->gpsw,
394 sizeof(psw_t));
395 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
396 &vcpu->arch.sie_block->gpsw,
397 sizeof(psw_t));
398 }
399 return rc ? -EFAULT : 0;
400 }
401
402 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
403 {
404 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
405 int rc;
406
407 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
408 vcpu->stat.deliver_restart_signal++;
409 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
410
411 rc = write_guest_lc(vcpu,
412 offsetof(struct _lowcore, restart_old_psw),
413 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
414 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
415 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
416 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
417 return rc ? -EFAULT : 0;
418 }
419
420 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
421 {
422 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
423 struct kvm_s390_prefix_info prefix;
424
425 spin_lock(&li->lock);
426 prefix = li->irq.prefix;
427 li->irq.prefix.address = 0;
428 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
429 spin_unlock(&li->lock);
430
431 vcpu->stat.deliver_prefix_signal++;
432 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
433 KVM_S390_SIGP_SET_PREFIX,
434 prefix.address, 0);
435
436 kvm_s390_set_prefix(vcpu, prefix.address);
437 return 0;
438 }
439
440 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
441 {
442 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
443 int rc;
444 int cpu_addr;
445
446 spin_lock(&li->lock);
447 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
448 clear_bit(cpu_addr, li->sigp_emerg_pending);
449 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
450 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
451 spin_unlock(&li->lock);
452
453 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
454 vcpu->stat.deliver_emergency_signal++;
455 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
456 cpu_addr, 0);
457
458 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
459 (u16 *)__LC_EXT_INT_CODE);
460 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
461 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
462 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
463 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
464 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
465 return rc ? -EFAULT : 0;
466 }
467
468 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
469 {
470 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
471 struct kvm_s390_extcall_info extcall;
472 int rc;
473
474 spin_lock(&li->lock);
475 extcall = li->irq.extcall;
476 li->irq.extcall.code = 0;
477 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
478 spin_unlock(&li->lock);
479
480 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
481 vcpu->stat.deliver_external_call++;
482 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
483 KVM_S390_INT_EXTERNAL_CALL,
484 extcall.code, 0);
485
486 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
487 (u16 *)__LC_EXT_INT_CODE);
488 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
489 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
490 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
491 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
492 sizeof(psw_t));
493 return rc ? -EFAULT : 0;
494 }
495
496 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
497 {
498 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
499 struct kvm_s390_pgm_info pgm_info;
500 int rc = 0, nullifying = false;
501 u16 ilc = get_ilc(vcpu);
502
503 spin_lock(&li->lock);
504 pgm_info = li->irq.pgm;
505 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
506 memset(&li->irq.pgm, 0, sizeof(pgm_info));
507 spin_unlock(&li->lock);
508
509 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d",
510 pgm_info.code, ilc);
511 vcpu->stat.deliver_program_int++;
512 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
513 pgm_info.code, 0);
514
515 switch (pgm_info.code & ~PGM_PER) {
516 case PGM_AFX_TRANSLATION:
517 case PGM_ASX_TRANSLATION:
518 case PGM_EX_TRANSLATION:
519 case PGM_LFX_TRANSLATION:
520 case PGM_LSTE_SEQUENCE:
521 case PGM_LSX_TRANSLATION:
522 case PGM_LX_TRANSLATION:
523 case PGM_PRIMARY_AUTHORITY:
524 case PGM_SECONDARY_AUTHORITY:
525 nullifying = true;
526 /* fall through */
527 case PGM_SPACE_SWITCH:
528 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
529 (u64 *)__LC_TRANS_EXC_CODE);
530 break;
531 case PGM_ALEN_TRANSLATION:
532 case PGM_ALE_SEQUENCE:
533 case PGM_ASTE_INSTANCE:
534 case PGM_ASTE_SEQUENCE:
535 case PGM_ASTE_VALIDITY:
536 case PGM_EXTENDED_AUTHORITY:
537 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
538 (u8 *)__LC_EXC_ACCESS_ID);
539 nullifying = true;
540 break;
541 case PGM_ASCE_TYPE:
542 case PGM_PAGE_TRANSLATION:
543 case PGM_REGION_FIRST_TRANS:
544 case PGM_REGION_SECOND_TRANS:
545 case PGM_REGION_THIRD_TRANS:
546 case PGM_SEGMENT_TRANSLATION:
547 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
548 (u64 *)__LC_TRANS_EXC_CODE);
549 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
550 (u8 *)__LC_EXC_ACCESS_ID);
551 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
552 (u8 *)__LC_OP_ACCESS_ID);
553 nullifying = true;
554 break;
555 case PGM_MONITOR:
556 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
557 (u16 *)__LC_MON_CLASS_NR);
558 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
559 (u64 *)__LC_MON_CODE);
560 break;
561 case PGM_VECTOR_PROCESSING:
562 case PGM_DATA:
563 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
564 (u32 *)__LC_DATA_EXC_CODE);
565 break;
566 case PGM_PROTECTION:
567 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
568 (u64 *)__LC_TRANS_EXC_CODE);
569 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
570 (u8 *)__LC_EXC_ACCESS_ID);
571 break;
572 case PGM_STACK_FULL:
573 case PGM_STACK_EMPTY:
574 case PGM_STACK_SPECIFICATION:
575 case PGM_STACK_TYPE:
576 case PGM_STACK_OPERATION:
577 case PGM_TRACE_TABEL:
578 case PGM_CRYPTO_OPERATION:
579 nullifying = true;
580 break;
581 }
582
583 if (pgm_info.code & PGM_PER) {
584 rc |= put_guest_lc(vcpu, pgm_info.per_code,
585 (u8 *) __LC_PER_CODE);
586 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
587 (u8 *)__LC_PER_ATMID);
588 rc |= put_guest_lc(vcpu, pgm_info.per_address,
589 (u64 *) __LC_PER_ADDRESS);
590 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
591 (u8 *) __LC_PER_ACCESS_ID);
592 }
593
594 if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
595 kvm_s390_rewind_psw(vcpu, ilc);
596
597 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
598 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
599 (u64 *) __LC_LAST_BREAK);
600 rc |= put_guest_lc(vcpu, pgm_info.code,
601 (u16 *)__LC_PGM_INT_CODE);
602 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
603 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
604 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
605 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
606 return rc ? -EFAULT : 0;
607 }
608
609 static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
610 {
611 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
612 struct kvm_s390_ext_info ext;
613 int rc = 0;
614
615 spin_lock(&fi->lock);
616 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
617 spin_unlock(&fi->lock);
618 return 0;
619 }
620 ext = fi->srv_signal;
621 memset(&fi->srv_signal, 0, sizeof(ext));
622 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
623 spin_unlock(&fi->lock);
624
625 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
626 ext.ext_params);
627 vcpu->stat.deliver_service_signal++;
628 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
629 ext.ext_params, 0);
630
631 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
632 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
633 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
634 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
635 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
636 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
637 rc |= put_guest_lc(vcpu, ext.ext_params,
638 (u32 *)__LC_EXT_PARAMS);
639
640 return rc ? -EFAULT : 0;
641 }
642
643 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
644 {
645 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
646 struct kvm_s390_interrupt_info *inti;
647 int rc = 0;
648
649 spin_lock(&fi->lock);
650 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
651 struct kvm_s390_interrupt_info,
652 list);
653 if (inti) {
654 list_del(&inti->list);
655 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
656 }
657 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
658 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
659 spin_unlock(&fi->lock);
660
661 if (inti) {
662 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
663 KVM_S390_INT_PFAULT_DONE, 0,
664 inti->ext.ext_params2);
665 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
666 inti->ext.ext_params2);
667
668 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
669 (u16 *)__LC_EXT_INT_CODE);
670 rc |= put_guest_lc(vcpu, PFAULT_DONE,
671 (u16 *)__LC_EXT_CPU_ADDR);
672 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
673 &vcpu->arch.sie_block->gpsw,
674 sizeof(psw_t));
675 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
676 &vcpu->arch.sie_block->gpsw,
677 sizeof(psw_t));
678 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
679 (u64 *)__LC_EXT_PARAMS2);
680 kfree(inti);
681 }
682 return rc ? -EFAULT : 0;
683 }
684
685 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
686 {
687 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
688 struct kvm_s390_interrupt_info *inti;
689 int rc = 0;
690
691 spin_lock(&fi->lock);
692 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
693 struct kvm_s390_interrupt_info,
694 list);
695 if (inti) {
696 VCPU_EVENT(vcpu, 4,
697 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
698 inti->ext.ext_params, inti->ext.ext_params2);
699 vcpu->stat.deliver_virtio_interrupt++;
700 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
701 inti->type,
702 inti->ext.ext_params,
703 inti->ext.ext_params2);
704 list_del(&inti->list);
705 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
706 }
707 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
708 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
709 spin_unlock(&fi->lock);
710
711 if (inti) {
712 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
713 (u16 *)__LC_EXT_INT_CODE);
714 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
715 (u16 *)__LC_EXT_CPU_ADDR);
716 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
717 &vcpu->arch.sie_block->gpsw,
718 sizeof(psw_t));
719 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
720 &vcpu->arch.sie_block->gpsw,
721 sizeof(psw_t));
722 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
723 (u32 *)__LC_EXT_PARAMS);
724 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
725 (u64 *)__LC_EXT_PARAMS2);
726 kfree(inti);
727 }
728 return rc ? -EFAULT : 0;
729 }
730
731 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
732 unsigned long irq_type)
733 {
734 struct list_head *isc_list;
735 struct kvm_s390_float_interrupt *fi;
736 struct kvm_s390_interrupt_info *inti = NULL;
737 int rc = 0;
738
739 fi = &vcpu->kvm->arch.float_int;
740
741 spin_lock(&fi->lock);
742 isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
743 inti = list_first_entry_or_null(isc_list,
744 struct kvm_s390_interrupt_info,
745 list);
746 if (inti) {
747 VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type);
748 vcpu->stat.deliver_io_int++;
749 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
750 inti->type,
751 ((__u32)inti->io.subchannel_id << 16) |
752 inti->io.subchannel_nr,
753 ((__u64)inti->io.io_int_parm << 32) |
754 inti->io.io_int_word);
755 list_del(&inti->list);
756 fi->counters[FIRQ_CNTR_IO] -= 1;
757 }
758 if (list_empty(isc_list))
759 clear_bit(irq_type, &fi->pending_irqs);
760 spin_unlock(&fi->lock);
761
762 if (inti) {
763 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
764 (u16 *)__LC_SUBCHANNEL_ID);
765 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
766 (u16 *)__LC_SUBCHANNEL_NR);
767 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
768 (u32 *)__LC_IO_INT_PARM);
769 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
770 (u32 *)__LC_IO_INT_WORD);
771 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
772 &vcpu->arch.sie_block->gpsw,
773 sizeof(psw_t));
774 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
775 &vcpu->arch.sie_block->gpsw,
776 sizeof(psw_t));
777 kfree(inti);
778 }
779
780 return rc ? -EFAULT : 0;
781 }
782
783 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
784
785 static const deliver_irq_t deliver_irq_funcs[] = {
786 [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
787 [IRQ_PEND_MCHK_REP] = __deliver_machine_check,
788 [IRQ_PEND_PROG] = __deliver_prog,
789 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
790 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
791 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
792 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
793 [IRQ_PEND_RESTART] = __deliver_restart,
794 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
795 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
796 [IRQ_PEND_EXT_SERVICE] = __deliver_service,
797 [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
798 [IRQ_PEND_VIRTIO] = __deliver_virtio,
799 };
800
801 /* Check whether an external call is pending (deliverable or not) */
802 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
803 {
804 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
805 uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
806
807 if (!sclp.has_sigpif)
808 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
809
810 return (sigp_ctrl & SIGP_CTRL_C) &&
811 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
812 }
813
814 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
815 {
816 int rc;
817
818 rc = !!deliverable_irqs(vcpu);
819
820 if (!rc && kvm_cpu_has_pending_timer(vcpu))
821 rc = 1;
822
823 /* external call pending and deliverable */
824 if (!rc && kvm_s390_ext_call_pending(vcpu) &&
825 !psw_extint_disabled(vcpu) &&
826 (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
827 rc = 1;
828
829 if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
830 rc = 1;
831
832 return rc;
833 }
834
835 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
836 {
837 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
838 }
839
840 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
841 {
842 u64 now, sltime;
843
844 vcpu->stat.exit_wait_state++;
845
846 /* fast path */
847 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
848 return 0;
849
850 if (psw_interrupts_disabled(vcpu)) {
851 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
852 return -EOPNOTSUPP; /* disabled wait */
853 }
854
855 if (!ckc_interrupts_enabled(vcpu)) {
856 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
857 __set_cpu_idle(vcpu);
858 goto no_timer;
859 }
860
861 preempt_disable();
862 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
863 preempt_enable();
864 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
865
866 /* underflow */
867 if (vcpu->arch.sie_block->ckc < now)
868 return 0;
869
870 __set_cpu_idle(vcpu);
871 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
872 VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime);
873 no_timer:
874 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
875 kvm_vcpu_block(vcpu);
876 __unset_cpu_idle(vcpu);
877 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
878
879 hrtimer_cancel(&vcpu->arch.ckc_timer);
880 return 0;
881 }
882
883 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
884 {
885 if (waitqueue_active(&vcpu->wq)) {
886 /*
887 * The vcpu gave up the cpu voluntarily, mark it as a good
888 * yield-candidate.
889 */
890 vcpu->preempted = true;
891 wake_up_interruptible(&vcpu->wq);
892 vcpu->stat.halt_wakeup++;
893 }
894 }
895
896 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
897 {
898 struct kvm_vcpu *vcpu;
899 u64 now, sltime;
900
901 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
902 preempt_disable();
903 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
904 preempt_enable();
905 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
906
907 /*
908 * If the monotonic clock runs faster than the tod clock we might be
909 * woken up too early and have to go back to sleep to avoid deadlocks.
910 */
911 if (vcpu->arch.sie_block->ckc > now &&
912 hrtimer_forward_now(timer, ns_to_ktime(sltime)))
913 return HRTIMER_RESTART;
914 kvm_s390_vcpu_wakeup(vcpu);
915 return HRTIMER_NORESTART;
916 }
917
918 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
919 {
920 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
921
922 spin_lock(&li->lock);
923 li->pending_irqs = 0;
924 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
925 memset(&li->irq, 0, sizeof(li->irq));
926 spin_unlock(&li->lock);
927
928 /* clear pending external calls set by sigp interpretation facility */
929 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
930 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
931 }
932
933 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
934 {
935 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
936 deliver_irq_t func;
937 int rc = 0;
938 unsigned long irq_type;
939 unsigned long irqs;
940
941 __reset_intercept_indicators(vcpu);
942
943 /* pending ckc conditions might have been invalidated */
944 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
945 if (ckc_irq_pending(vcpu))
946 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
947
948 /* pending cpu timer conditions might have been invalidated */
949 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
950 if (cpu_timer_irq_pending(vcpu))
951 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
952
953 while ((irqs = deliverable_irqs(vcpu)) && !rc) {
954 /* bits are in the order of interrupt priority */
955 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
956 if (is_ioirq(irq_type)) {
957 rc = __deliver_io(vcpu, irq_type);
958 } else {
959 func = deliver_irq_funcs[irq_type];
960 if (!func) {
961 WARN_ON_ONCE(func == NULL);
962 clear_bit(irq_type, &li->pending_irqs);
963 continue;
964 }
965 rc = func(vcpu);
966 }
967 }
968
969 set_intercept_indicators(vcpu);
970
971 return rc;
972 }
973
974 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
975 {
976 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
977
978 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
979 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
980 irq->u.pgm.code, 0);
981
982 li->irq.pgm = irq->u.pgm;
983 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
984 return 0;
985 }
986
987 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
988 {
989 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
990 struct kvm_s390_irq irq;
991
992 spin_lock(&li->lock);
993 irq.u.pgm.code = code;
994 __inject_prog(vcpu, &irq);
995 BUG_ON(waitqueue_active(li->wq));
996 spin_unlock(&li->lock);
997 return 0;
998 }
999
1000 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
1001 struct kvm_s390_pgm_info *pgm_info)
1002 {
1003 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1004 struct kvm_s390_irq irq;
1005 int rc;
1006
1007 spin_lock(&li->lock);
1008 irq.u.pgm = *pgm_info;
1009 rc = __inject_prog(vcpu, &irq);
1010 BUG_ON(waitqueue_active(li->wq));
1011 spin_unlock(&li->lock);
1012 return rc;
1013 }
1014
1015 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1016 {
1017 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1018
1019 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1020 irq->u.ext.ext_params2);
1021 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1022 irq->u.ext.ext_params,
1023 irq->u.ext.ext_params2);
1024
1025 li->irq.ext = irq->u.ext;
1026 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1027 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1028 return 0;
1029 }
1030
1031 static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1032 {
1033 unsigned char new_val, old_val;
1034 uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
1035
1036 new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
1037 old_val = *sigp_ctrl & ~SIGP_CTRL_C;
1038 if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
1039 /* another external call is pending */
1040 return -EBUSY;
1041 }
1042 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1043 return 0;
1044 }
1045
1046 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1047 {
1048 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1049 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1050 uint16_t src_id = irq->u.extcall.code;
1051
1052 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1053 src_id);
1054 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1055 src_id, 0);
1056
1057 /* sending vcpu invalid */
1058 if (src_id >= KVM_MAX_VCPUS ||
1059 kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
1060 return -EINVAL;
1061
1062 if (sclp.has_sigpif)
1063 return __inject_extcall_sigpif(vcpu, src_id);
1064
1065 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1066 return -EBUSY;
1067 *extcall = irq->u.extcall;
1068 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1069 return 0;
1070 }
1071
1072 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1073 {
1074 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1075 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1076
1077 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1078 irq->u.prefix.address);
1079 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1080 irq->u.prefix.address, 0);
1081
1082 if (!is_vcpu_stopped(vcpu))
1083 return -EBUSY;
1084
1085 *prefix = irq->u.prefix;
1086 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1087 return 0;
1088 }
1089
1090 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1091 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1092 {
1093 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1094 struct kvm_s390_stop_info *stop = &li->irq.stop;
1095 int rc = 0;
1096
1097 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1098
1099 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1100 return -EINVAL;
1101
1102 if (is_vcpu_stopped(vcpu)) {
1103 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1104 rc = kvm_s390_store_status_unloaded(vcpu,
1105 KVM_S390_STORE_STATUS_NOADDR);
1106 return rc;
1107 }
1108
1109 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1110 return -EBUSY;
1111 stop->flags = irq->u.stop.flags;
1112 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
1113 return 0;
1114 }
1115
1116 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1117 struct kvm_s390_irq *irq)
1118 {
1119 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1120
1121 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1122 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1123
1124 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1125 return 0;
1126 }
1127
1128 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1129 struct kvm_s390_irq *irq)
1130 {
1131 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1132
1133 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1134 irq->u.emerg.code);
1135 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1136 irq->u.emerg.code, 0);
1137
1138 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1139 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1140 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1141 return 0;
1142 }
1143
1144 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1145 {
1146 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1147 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1148
1149 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1150 irq->u.mchk.mcic);
1151 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1152 irq->u.mchk.mcic);
1153
1154 /*
1155 * Because repressible machine checks can be indicated along with
1156 * exigent machine checks (PoP, Chapter 11, Interruption action)
1157 * we need to combine cr14, mcic and external damage code.
1158 * Failing storage address and the logout area should not be or'ed
1159 * together, we just indicate the last occurrence of the corresponding
1160 * machine check
1161 */
1162 mchk->cr14 |= irq->u.mchk.cr14;
1163 mchk->mcic |= irq->u.mchk.mcic;
1164 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1165 mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1166 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1167 sizeof(mchk->fixed_logout));
1168 if (mchk->mcic & MCHK_EX_MASK)
1169 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1170 else if (mchk->mcic & MCHK_REP_MASK)
1171 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
1172 return 0;
1173 }
1174
1175 static int __inject_ckc(struct kvm_vcpu *vcpu)
1176 {
1177 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1178
1179 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1180 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1181 0, 0);
1182
1183 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1184 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1185 return 0;
1186 }
1187
1188 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1189 {
1190 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1191
1192 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1193 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1194 0, 0);
1195
1196 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1197 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1198 return 0;
1199 }
1200
1201 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1202 int isc, u32 schid)
1203 {
1204 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1205 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1206 struct kvm_s390_interrupt_info *iter;
1207 u16 id = (schid & 0xffff0000U) >> 16;
1208 u16 nr = schid & 0x0000ffffU;
1209
1210 spin_lock(&fi->lock);
1211 list_for_each_entry(iter, isc_list, list) {
1212 if (schid && (id != iter->io.subchannel_id ||
1213 nr != iter->io.subchannel_nr))
1214 continue;
1215 /* found an appropriate entry */
1216 list_del_init(&iter->list);
1217 fi->counters[FIRQ_CNTR_IO] -= 1;
1218 if (list_empty(isc_list))
1219 clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
1220 spin_unlock(&fi->lock);
1221 return iter;
1222 }
1223 spin_unlock(&fi->lock);
1224 return NULL;
1225 }
1226
1227 /*
1228 * Dequeue and return an I/O interrupt matching any of the interruption
1229 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1230 */
1231 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1232 u64 isc_mask, u32 schid)
1233 {
1234 struct kvm_s390_interrupt_info *inti = NULL;
1235 int isc;
1236
1237 for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1238 if (isc_mask & isc_to_isc_bits(isc))
1239 inti = get_io_int(kvm, isc, schid);
1240 }
1241 return inti;
1242 }
1243
1244 #define SCCB_MASK 0xFFFFFFF8
1245 #define SCCB_EVENT_PENDING 0x3
1246
1247 static int __inject_service(struct kvm *kvm,
1248 struct kvm_s390_interrupt_info *inti)
1249 {
1250 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1251
1252 spin_lock(&fi->lock);
1253 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1254 /*
1255 * Early versions of the QEMU s390 bios will inject several
1256 * service interrupts after another without handling a
1257 * condition code indicating busy.
1258 * We will silently ignore those superfluous sccb values.
1259 * A future version of QEMU will take care of serialization
1260 * of servc requests
1261 */
1262 if (fi->srv_signal.ext_params & SCCB_MASK)
1263 goto out;
1264 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1265 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1266 out:
1267 spin_unlock(&fi->lock);
1268 kfree(inti);
1269 return 0;
1270 }
1271
1272 static int __inject_virtio(struct kvm *kvm,
1273 struct kvm_s390_interrupt_info *inti)
1274 {
1275 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1276
1277 spin_lock(&fi->lock);
1278 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1279 spin_unlock(&fi->lock);
1280 return -EBUSY;
1281 }
1282 fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1283 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1284 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1285 spin_unlock(&fi->lock);
1286 return 0;
1287 }
1288
1289 static int __inject_pfault_done(struct kvm *kvm,
1290 struct kvm_s390_interrupt_info *inti)
1291 {
1292 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1293
1294 spin_lock(&fi->lock);
1295 if (fi->counters[FIRQ_CNTR_PFAULT] >=
1296 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1297 spin_unlock(&fi->lock);
1298 return -EBUSY;
1299 }
1300 fi->counters[FIRQ_CNTR_PFAULT] += 1;
1301 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1302 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1303 spin_unlock(&fi->lock);
1304 return 0;
1305 }
1306
1307 #define CR_PENDING_SUBCLASS 28
1308 static int __inject_float_mchk(struct kvm *kvm,
1309 struct kvm_s390_interrupt_info *inti)
1310 {
1311 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1312
1313 spin_lock(&fi->lock);
1314 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1315 fi->mchk.mcic |= inti->mchk.mcic;
1316 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1317 spin_unlock(&fi->lock);
1318 kfree(inti);
1319 return 0;
1320 }
1321
1322 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1323 {
1324 struct kvm_s390_float_interrupt *fi;
1325 struct list_head *list;
1326 int isc;
1327
1328 fi = &kvm->arch.float_int;
1329 spin_lock(&fi->lock);
1330 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1331 spin_unlock(&fi->lock);
1332 return -EBUSY;
1333 }
1334 fi->counters[FIRQ_CNTR_IO] += 1;
1335
1336 isc = int_word_to_isc(inti->io.io_int_word);
1337 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1338 list_add_tail(&inti->list, list);
1339 set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
1340 spin_unlock(&fi->lock);
1341 return 0;
1342 }
1343
1344 /*
1345 * Find a destination VCPU for a floating irq and kick it.
1346 */
1347 static void __floating_irq_kick(struct kvm *kvm, u64 type)
1348 {
1349 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1350 struct kvm_s390_local_interrupt *li;
1351 struct kvm_vcpu *dst_vcpu;
1352 int sigcpu, online_vcpus, nr_tries = 0;
1353
1354 online_vcpus = atomic_read(&kvm->online_vcpus);
1355 if (!online_vcpus)
1356 return;
1357
1358 /* find idle VCPUs first, then round robin */
1359 sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1360 if (sigcpu == online_vcpus) {
1361 do {
1362 sigcpu = fi->next_rr_cpu;
1363 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1364 /* avoid endless loops if all vcpus are stopped */
1365 if (nr_tries++ >= online_vcpus)
1366 return;
1367 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1368 }
1369 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1370
1371 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1372 li = &dst_vcpu->arch.local_int;
1373 spin_lock(&li->lock);
1374 switch (type) {
1375 case KVM_S390_MCHK:
1376 atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
1377 break;
1378 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1379 atomic_or(CPUSTAT_IO_INT, li->cpuflags);
1380 break;
1381 default:
1382 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1383 break;
1384 }
1385 spin_unlock(&li->lock);
1386 kvm_s390_vcpu_wakeup(dst_vcpu);
1387 }
1388
1389 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1390 {
1391 u64 type = READ_ONCE(inti->type);
1392 int rc;
1393
1394 switch (type) {
1395 case KVM_S390_MCHK:
1396 rc = __inject_float_mchk(kvm, inti);
1397 break;
1398 case KVM_S390_INT_VIRTIO:
1399 rc = __inject_virtio(kvm, inti);
1400 break;
1401 case KVM_S390_INT_SERVICE:
1402 rc = __inject_service(kvm, inti);
1403 break;
1404 case KVM_S390_INT_PFAULT_DONE:
1405 rc = __inject_pfault_done(kvm, inti);
1406 break;
1407 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1408 rc = __inject_io(kvm, inti);
1409 break;
1410 default:
1411 rc = -EINVAL;
1412 }
1413 if (rc)
1414 return rc;
1415
1416 __floating_irq_kick(kvm, type);
1417 return 0;
1418 }
1419
1420 int kvm_s390_inject_vm(struct kvm *kvm,
1421 struct kvm_s390_interrupt *s390int)
1422 {
1423 struct kvm_s390_interrupt_info *inti;
1424 int rc;
1425
1426 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1427 if (!inti)
1428 return -ENOMEM;
1429
1430 inti->type = s390int->type;
1431 switch (inti->type) {
1432 case KVM_S390_INT_VIRTIO:
1433 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1434 s390int->parm, s390int->parm64);
1435 inti->ext.ext_params = s390int->parm;
1436 inti->ext.ext_params2 = s390int->parm64;
1437 break;
1438 case KVM_S390_INT_SERVICE:
1439 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
1440 inti->ext.ext_params = s390int->parm;
1441 break;
1442 case KVM_S390_INT_PFAULT_DONE:
1443 inti->ext.ext_params2 = s390int->parm64;
1444 break;
1445 case KVM_S390_MCHK:
1446 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
1447 s390int->parm64);
1448 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1449 inti->mchk.mcic = s390int->parm64;
1450 break;
1451 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1452 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1453 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1454 else
1455 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1456 s390int->type & IOINT_CSSID_MASK,
1457 s390int->type & IOINT_SSID_MASK,
1458 s390int->type & IOINT_SCHID_MASK);
1459 inti->io.subchannel_id = s390int->parm >> 16;
1460 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1461 inti->io.io_int_parm = s390int->parm64 >> 32;
1462 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1463 break;
1464 default:
1465 kfree(inti);
1466 return -EINVAL;
1467 }
1468 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1469 2);
1470
1471 rc = __inject_vm(kvm, inti);
1472 if (rc)
1473 kfree(inti);
1474 return rc;
1475 }
1476
1477 int kvm_s390_reinject_io_int(struct kvm *kvm,
1478 struct kvm_s390_interrupt_info *inti)
1479 {
1480 return __inject_vm(kvm, inti);
1481 }
1482
1483 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1484 struct kvm_s390_irq *irq)
1485 {
1486 irq->type = s390int->type;
1487 switch (irq->type) {
1488 case KVM_S390_PROGRAM_INT:
1489 if (s390int->parm & 0xffff0000)
1490 return -EINVAL;
1491 irq->u.pgm.code = s390int->parm;
1492 break;
1493 case KVM_S390_SIGP_SET_PREFIX:
1494 irq->u.prefix.address = s390int->parm;
1495 break;
1496 case KVM_S390_SIGP_STOP:
1497 irq->u.stop.flags = s390int->parm;
1498 break;
1499 case KVM_S390_INT_EXTERNAL_CALL:
1500 if (s390int->parm & 0xffff0000)
1501 return -EINVAL;
1502 irq->u.extcall.code = s390int->parm;
1503 break;
1504 case KVM_S390_INT_EMERGENCY:
1505 if (s390int->parm & 0xffff0000)
1506 return -EINVAL;
1507 irq->u.emerg.code = s390int->parm;
1508 break;
1509 case KVM_S390_MCHK:
1510 irq->u.mchk.mcic = s390int->parm64;
1511 break;
1512 }
1513 return 0;
1514 }
1515
1516 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1517 {
1518 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1519
1520 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1521 }
1522
1523 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1524 {
1525 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1526
1527 spin_lock(&li->lock);
1528 li->irq.stop.flags = 0;
1529 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1530 spin_unlock(&li->lock);
1531 }
1532
1533 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1534 {
1535 int rc;
1536
1537 switch (irq->type) {
1538 case KVM_S390_PROGRAM_INT:
1539 rc = __inject_prog(vcpu, irq);
1540 break;
1541 case KVM_S390_SIGP_SET_PREFIX:
1542 rc = __inject_set_prefix(vcpu, irq);
1543 break;
1544 case KVM_S390_SIGP_STOP:
1545 rc = __inject_sigp_stop(vcpu, irq);
1546 break;
1547 case KVM_S390_RESTART:
1548 rc = __inject_sigp_restart(vcpu, irq);
1549 break;
1550 case KVM_S390_INT_CLOCK_COMP:
1551 rc = __inject_ckc(vcpu);
1552 break;
1553 case KVM_S390_INT_CPU_TIMER:
1554 rc = __inject_cpu_timer(vcpu);
1555 break;
1556 case KVM_S390_INT_EXTERNAL_CALL:
1557 rc = __inject_extcall(vcpu, irq);
1558 break;
1559 case KVM_S390_INT_EMERGENCY:
1560 rc = __inject_sigp_emergency(vcpu, irq);
1561 break;
1562 case KVM_S390_MCHK:
1563 rc = __inject_mchk(vcpu, irq);
1564 break;
1565 case KVM_S390_INT_PFAULT_INIT:
1566 rc = __inject_pfault_init(vcpu, irq);
1567 break;
1568 case KVM_S390_INT_VIRTIO:
1569 case KVM_S390_INT_SERVICE:
1570 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1571 default:
1572 rc = -EINVAL;
1573 }
1574
1575 return rc;
1576 }
1577
1578 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1579 {
1580 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1581 int rc;
1582
1583 spin_lock(&li->lock);
1584 rc = do_inject_vcpu(vcpu, irq);
1585 spin_unlock(&li->lock);
1586 if (!rc)
1587 kvm_s390_vcpu_wakeup(vcpu);
1588 return rc;
1589 }
1590
1591 static inline void clear_irq_list(struct list_head *_list)
1592 {
1593 struct kvm_s390_interrupt_info *inti, *n;
1594
1595 list_for_each_entry_safe(inti, n, _list, list) {
1596 list_del(&inti->list);
1597 kfree(inti);
1598 }
1599 }
1600
1601 static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1602 struct kvm_s390_irq *irq)
1603 {
1604 irq->type = inti->type;
1605 switch (inti->type) {
1606 case KVM_S390_INT_PFAULT_INIT:
1607 case KVM_S390_INT_PFAULT_DONE:
1608 case KVM_S390_INT_VIRTIO:
1609 irq->u.ext = inti->ext;
1610 break;
1611 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1612 irq->u.io = inti->io;
1613 break;
1614 }
1615 }
1616
1617 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1618 {
1619 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1620 int i;
1621
1622 spin_lock(&fi->lock);
1623 fi->pending_irqs = 0;
1624 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1625 memset(&fi->mchk, 0, sizeof(fi->mchk));
1626 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1627 clear_irq_list(&fi->lists[i]);
1628 for (i = 0; i < FIRQ_MAX_COUNT; i++)
1629 fi->counters[i] = 0;
1630 spin_unlock(&fi->lock);
1631 };
1632
1633 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
1634 {
1635 struct kvm_s390_interrupt_info *inti;
1636 struct kvm_s390_float_interrupt *fi;
1637 struct kvm_s390_irq *buf;
1638 struct kvm_s390_irq *irq;
1639 int max_irqs;
1640 int ret = 0;
1641 int n = 0;
1642 int i;
1643
1644 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
1645 return -EINVAL;
1646
1647 /*
1648 * We are already using -ENOMEM to signal
1649 * userspace it may retry with a bigger buffer,
1650 * so we need to use something else for this case
1651 */
1652 buf = vzalloc(len);
1653 if (!buf)
1654 return -ENOBUFS;
1655
1656 max_irqs = len / sizeof(struct kvm_s390_irq);
1657
1658 fi = &kvm->arch.float_int;
1659 spin_lock(&fi->lock);
1660 for (i = 0; i < FIRQ_LIST_COUNT; i++) {
1661 list_for_each_entry(inti, &fi->lists[i], list) {
1662 if (n == max_irqs) {
1663 /* signal userspace to try again */
1664 ret = -ENOMEM;
1665 goto out;
1666 }
1667 inti_to_irq(inti, &buf[n]);
1668 n++;
1669 }
1670 }
1671 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
1672 if (n == max_irqs) {
1673 /* signal userspace to try again */
1674 ret = -ENOMEM;
1675 goto out;
1676 }
1677 irq = (struct kvm_s390_irq *) &buf[n];
1678 irq->type = KVM_S390_INT_SERVICE;
1679 irq->u.ext = fi->srv_signal;
1680 n++;
1681 }
1682 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
1683 if (n == max_irqs) {
1684 /* signal userspace to try again */
1685 ret = -ENOMEM;
1686 goto out;
1687 }
1688 irq = (struct kvm_s390_irq *) &buf[n];
1689 irq->type = KVM_S390_MCHK;
1690 irq->u.mchk = fi->mchk;
1691 n++;
1692 }
1693
1694 out:
1695 spin_unlock(&fi->lock);
1696 if (!ret && n > 0) {
1697 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
1698 ret = -EFAULT;
1699 }
1700 vfree(buf);
1701
1702 return ret < 0 ? ret : n;
1703 }
1704
1705 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1706 {
1707 int r;
1708
1709 switch (attr->group) {
1710 case KVM_DEV_FLIC_GET_ALL_IRQS:
1711 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
1712 attr->attr);
1713 break;
1714 default:
1715 r = -EINVAL;
1716 }
1717
1718 return r;
1719 }
1720
1721 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1722 u64 addr)
1723 {
1724 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1725 void *target = NULL;
1726 void __user *source;
1727 u64 size;
1728
1729 if (get_user(inti->type, (u64 __user *)addr))
1730 return -EFAULT;
1731
1732 switch (inti->type) {
1733 case KVM_S390_INT_PFAULT_INIT:
1734 case KVM_S390_INT_PFAULT_DONE:
1735 case KVM_S390_INT_VIRTIO:
1736 case KVM_S390_INT_SERVICE:
1737 target = (void *) &inti->ext;
1738 source = &uptr->u.ext;
1739 size = sizeof(inti->ext);
1740 break;
1741 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1742 target = (void *) &inti->io;
1743 source = &uptr->u.io;
1744 size = sizeof(inti->io);
1745 break;
1746 case KVM_S390_MCHK:
1747 target = (void *) &inti->mchk;
1748 source = &uptr->u.mchk;
1749 size = sizeof(inti->mchk);
1750 break;
1751 default:
1752 return -EINVAL;
1753 }
1754
1755 if (copy_from_user(target, source, size))
1756 return -EFAULT;
1757
1758 return 0;
1759 }
1760
1761 static int enqueue_floating_irq(struct kvm_device *dev,
1762 struct kvm_device_attr *attr)
1763 {
1764 struct kvm_s390_interrupt_info *inti = NULL;
1765 int r = 0;
1766 int len = attr->attr;
1767
1768 if (len % sizeof(struct kvm_s390_irq) != 0)
1769 return -EINVAL;
1770 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1771 return -EINVAL;
1772
1773 while (len >= sizeof(struct kvm_s390_irq)) {
1774 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1775 if (!inti)
1776 return -ENOMEM;
1777
1778 r = copy_irq_from_user(inti, attr->addr);
1779 if (r) {
1780 kfree(inti);
1781 return r;
1782 }
1783 r = __inject_vm(dev->kvm, inti);
1784 if (r) {
1785 kfree(inti);
1786 return r;
1787 }
1788 len -= sizeof(struct kvm_s390_irq);
1789 attr->addr += sizeof(struct kvm_s390_irq);
1790 }
1791
1792 return r;
1793 }
1794
1795 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1796 {
1797 if (id >= MAX_S390_IO_ADAPTERS)
1798 return NULL;
1799 return kvm->arch.adapters[id];
1800 }
1801
1802 static int register_io_adapter(struct kvm_device *dev,
1803 struct kvm_device_attr *attr)
1804 {
1805 struct s390_io_adapter *adapter;
1806 struct kvm_s390_io_adapter adapter_info;
1807
1808 if (copy_from_user(&adapter_info,
1809 (void __user *)attr->addr, sizeof(adapter_info)))
1810 return -EFAULT;
1811
1812 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1813 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1814 return -EINVAL;
1815
1816 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1817 if (!adapter)
1818 return -ENOMEM;
1819
1820 INIT_LIST_HEAD(&adapter->maps);
1821 init_rwsem(&adapter->maps_lock);
1822 atomic_set(&adapter->nr_maps, 0);
1823 adapter->id = adapter_info.id;
1824 adapter->isc = adapter_info.isc;
1825 adapter->maskable = adapter_info.maskable;
1826 adapter->masked = false;
1827 adapter->swap = adapter_info.swap;
1828 dev->kvm->arch.adapters[adapter->id] = adapter;
1829
1830 return 0;
1831 }
1832
1833 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1834 {
1835 int ret;
1836 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1837
1838 if (!adapter || !adapter->maskable)
1839 return -EINVAL;
1840 ret = adapter->masked;
1841 adapter->masked = masked;
1842 return ret;
1843 }
1844
1845 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1846 {
1847 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1848 struct s390_map_info *map;
1849 int ret;
1850
1851 if (!adapter || !addr)
1852 return -EINVAL;
1853
1854 map = kzalloc(sizeof(*map), GFP_KERNEL);
1855 if (!map) {
1856 ret = -ENOMEM;
1857 goto out;
1858 }
1859 INIT_LIST_HEAD(&map->list);
1860 map->guest_addr = addr;
1861 map->addr = gmap_translate(kvm->arch.gmap, addr);
1862 if (map->addr == -EFAULT) {
1863 ret = -EFAULT;
1864 goto out;
1865 }
1866 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1867 if (ret < 0)
1868 goto out;
1869 BUG_ON(ret != 1);
1870 down_write(&adapter->maps_lock);
1871 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1872 list_add_tail(&map->list, &adapter->maps);
1873 ret = 0;
1874 } else {
1875 put_page(map->page);
1876 ret = -EINVAL;
1877 }
1878 up_write(&adapter->maps_lock);
1879 out:
1880 if (ret)
1881 kfree(map);
1882 return ret;
1883 }
1884
1885 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1886 {
1887 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1888 struct s390_map_info *map, *tmp;
1889 int found = 0;
1890
1891 if (!adapter || !addr)
1892 return -EINVAL;
1893
1894 down_write(&adapter->maps_lock);
1895 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1896 if (map->guest_addr == addr) {
1897 found = 1;
1898 atomic_dec(&adapter->nr_maps);
1899 list_del(&map->list);
1900 put_page(map->page);
1901 kfree(map);
1902 break;
1903 }
1904 }
1905 up_write(&adapter->maps_lock);
1906
1907 return found ? 0 : -EINVAL;
1908 }
1909
1910 void kvm_s390_destroy_adapters(struct kvm *kvm)
1911 {
1912 int i;
1913 struct s390_map_info *map, *tmp;
1914
1915 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1916 if (!kvm->arch.adapters[i])
1917 continue;
1918 list_for_each_entry_safe(map, tmp,
1919 &kvm->arch.adapters[i]->maps, list) {
1920 list_del(&map->list);
1921 put_page(map->page);
1922 kfree(map);
1923 }
1924 kfree(kvm->arch.adapters[i]);
1925 }
1926 }
1927
1928 static int modify_io_adapter(struct kvm_device *dev,
1929 struct kvm_device_attr *attr)
1930 {
1931 struct kvm_s390_io_adapter_req req;
1932 struct s390_io_adapter *adapter;
1933 int ret;
1934
1935 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1936 return -EFAULT;
1937
1938 adapter = get_io_adapter(dev->kvm, req.id);
1939 if (!adapter)
1940 return -EINVAL;
1941 switch (req.type) {
1942 case KVM_S390_IO_ADAPTER_MASK:
1943 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1944 if (ret > 0)
1945 ret = 0;
1946 break;
1947 case KVM_S390_IO_ADAPTER_MAP:
1948 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1949 break;
1950 case KVM_S390_IO_ADAPTER_UNMAP:
1951 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1952 break;
1953 default:
1954 ret = -EINVAL;
1955 }
1956
1957 return ret;
1958 }
1959
1960 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1961 {
1962 int r = 0;
1963 unsigned int i;
1964 struct kvm_vcpu *vcpu;
1965
1966 switch (attr->group) {
1967 case KVM_DEV_FLIC_ENQUEUE:
1968 r = enqueue_floating_irq(dev, attr);
1969 break;
1970 case KVM_DEV_FLIC_CLEAR_IRQS:
1971 kvm_s390_clear_float_irqs(dev->kvm);
1972 break;
1973 case KVM_DEV_FLIC_APF_ENABLE:
1974 dev->kvm->arch.gmap->pfault_enabled = 1;
1975 break;
1976 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1977 dev->kvm->arch.gmap->pfault_enabled = 0;
1978 /*
1979 * Make sure no async faults are in transition when
1980 * clearing the queues. So we don't need to worry
1981 * about late coming workers.
1982 */
1983 synchronize_srcu(&dev->kvm->srcu);
1984 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1985 kvm_clear_async_pf_completion_queue(vcpu);
1986 break;
1987 case KVM_DEV_FLIC_ADAPTER_REGISTER:
1988 r = register_io_adapter(dev, attr);
1989 break;
1990 case KVM_DEV_FLIC_ADAPTER_MODIFY:
1991 r = modify_io_adapter(dev, attr);
1992 break;
1993 default:
1994 r = -EINVAL;
1995 }
1996
1997 return r;
1998 }
1999
2000 static int flic_create(struct kvm_device *dev, u32 type)
2001 {
2002 if (!dev)
2003 return -EINVAL;
2004 if (dev->kvm->arch.flic)
2005 return -EINVAL;
2006 dev->kvm->arch.flic = dev;
2007 return 0;
2008 }
2009
2010 static void flic_destroy(struct kvm_device *dev)
2011 {
2012 dev->kvm->arch.flic = NULL;
2013 kfree(dev);
2014 }
2015
2016 /* s390 floating irq controller (flic) */
2017 struct kvm_device_ops kvm_flic_ops = {
2018 .name = "kvm-flic",
2019 .get_attr = flic_get_attr,
2020 .set_attr = flic_set_attr,
2021 .create = flic_create,
2022 .destroy = flic_destroy,
2023 };
2024
2025 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2026 {
2027 unsigned long bit;
2028
2029 bit = bit_nr + (addr % PAGE_SIZE) * 8;
2030
2031 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2032 }
2033
2034 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2035 u64 addr)
2036 {
2037 struct s390_map_info *map;
2038
2039 if (!adapter)
2040 return NULL;
2041
2042 list_for_each_entry(map, &adapter->maps, list) {
2043 if (map->guest_addr == addr)
2044 return map;
2045 }
2046 return NULL;
2047 }
2048
2049 static int adapter_indicators_set(struct kvm *kvm,
2050 struct s390_io_adapter *adapter,
2051 struct kvm_s390_adapter_int *adapter_int)
2052 {
2053 unsigned long bit;
2054 int summary_set, idx;
2055 struct s390_map_info *info;
2056 void *map;
2057
2058 info = get_map_info(adapter, adapter_int->ind_addr);
2059 if (!info)
2060 return -1;
2061 map = page_address(info->page);
2062 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2063 set_bit(bit, map);
2064 idx = srcu_read_lock(&kvm->srcu);
2065 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2066 set_page_dirty_lock(info->page);
2067 info = get_map_info(adapter, adapter_int->summary_addr);
2068 if (!info) {
2069 srcu_read_unlock(&kvm->srcu, idx);
2070 return -1;
2071 }
2072 map = page_address(info->page);
2073 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2074 adapter->swap);
2075 summary_set = test_and_set_bit(bit, map);
2076 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2077 set_page_dirty_lock(info->page);
2078 srcu_read_unlock(&kvm->srcu, idx);
2079 return summary_set ? 0 : 1;
2080 }
2081
2082 /*
2083 * < 0 - not injected due to error
2084 * = 0 - coalesced, summary indicator already active
2085 * > 0 - injected interrupt
2086 */
2087 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2088 struct kvm *kvm, int irq_source_id, int level,
2089 bool line_status)
2090 {
2091 int ret;
2092 struct s390_io_adapter *adapter;
2093
2094 /* We're only interested in the 0->1 transition. */
2095 if (!level)
2096 return 0;
2097 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2098 if (!adapter)
2099 return -1;
2100 down_read(&adapter->maps_lock);
2101 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2102 up_read(&adapter->maps_lock);
2103 if ((ret > 0) && !adapter->masked) {
2104 struct kvm_s390_interrupt s390int = {
2105 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2106 .parm = 0,
2107 .parm64 = (adapter->isc << 27) | 0x80000000,
2108 };
2109 ret = kvm_s390_inject_vm(kvm, &s390int);
2110 if (ret == 0)
2111 ret = 1;
2112 }
2113 return ret;
2114 }
2115
2116 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
2117 const struct kvm_irq_routing_entry *ue)
2118 {
2119 int ret;
2120
2121 switch (ue->type) {
2122 case KVM_IRQ_ROUTING_S390_ADAPTER:
2123 e->set = set_adapter_int;
2124 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2125 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2126 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2127 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2128 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2129 ret = 0;
2130 break;
2131 default:
2132 ret = -EINVAL;
2133 }
2134
2135 return ret;
2136 }
2137
2138 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2139 int irq_source_id, int level, bool line_status)
2140 {
2141 return -EINVAL;
2142 }
2143
2144 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2145 {
2146 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2147 struct kvm_s390_irq *buf;
2148 int r = 0;
2149 int n;
2150
2151 buf = vmalloc(len);
2152 if (!buf)
2153 return -ENOMEM;
2154
2155 if (copy_from_user((void *) buf, irqstate, len)) {
2156 r = -EFAULT;
2157 goto out_free;
2158 }
2159
2160 /*
2161 * Don't allow setting the interrupt state
2162 * when there are already interrupts pending
2163 */
2164 spin_lock(&li->lock);
2165 if (li->pending_irqs) {
2166 r = -EBUSY;
2167 goto out_unlock;
2168 }
2169
2170 for (n = 0; n < len / sizeof(*buf); n++) {
2171 r = do_inject_vcpu(vcpu, &buf[n]);
2172 if (r)
2173 break;
2174 }
2175
2176 out_unlock:
2177 spin_unlock(&li->lock);
2178 out_free:
2179 vfree(buf);
2180
2181 return r;
2182 }
2183
2184 static void store_local_irq(struct kvm_s390_local_interrupt *li,
2185 struct kvm_s390_irq *irq,
2186 unsigned long irq_type)
2187 {
2188 switch (irq_type) {
2189 case IRQ_PEND_MCHK_EX:
2190 case IRQ_PEND_MCHK_REP:
2191 irq->type = KVM_S390_MCHK;
2192 irq->u.mchk = li->irq.mchk;
2193 break;
2194 case IRQ_PEND_PROG:
2195 irq->type = KVM_S390_PROGRAM_INT;
2196 irq->u.pgm = li->irq.pgm;
2197 break;
2198 case IRQ_PEND_PFAULT_INIT:
2199 irq->type = KVM_S390_INT_PFAULT_INIT;
2200 irq->u.ext = li->irq.ext;
2201 break;
2202 case IRQ_PEND_EXT_EXTERNAL:
2203 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2204 irq->u.extcall = li->irq.extcall;
2205 break;
2206 case IRQ_PEND_EXT_CLOCK_COMP:
2207 irq->type = KVM_S390_INT_CLOCK_COMP;
2208 break;
2209 case IRQ_PEND_EXT_CPU_TIMER:
2210 irq->type = KVM_S390_INT_CPU_TIMER;
2211 break;
2212 case IRQ_PEND_SIGP_STOP:
2213 irq->type = KVM_S390_SIGP_STOP;
2214 irq->u.stop = li->irq.stop;
2215 break;
2216 case IRQ_PEND_RESTART:
2217 irq->type = KVM_S390_RESTART;
2218 break;
2219 case IRQ_PEND_SET_PREFIX:
2220 irq->type = KVM_S390_SIGP_SET_PREFIX;
2221 irq->u.prefix = li->irq.prefix;
2222 break;
2223 }
2224 }
2225
2226 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2227 {
2228 uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
2229 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2230 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2231 unsigned long pending_irqs;
2232 struct kvm_s390_irq irq;
2233 unsigned long irq_type;
2234 int cpuaddr;
2235 int n = 0;
2236
2237 spin_lock(&li->lock);
2238 pending_irqs = li->pending_irqs;
2239 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2240 sizeof(sigp_emerg_pending));
2241 spin_unlock(&li->lock);
2242
2243 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2244 memset(&irq, 0, sizeof(irq));
2245 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2246 continue;
2247 if (n + sizeof(irq) > len)
2248 return -ENOBUFS;
2249 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2250 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2251 return -EFAULT;
2252 n += sizeof(irq);
2253 }
2254
2255 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2256 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2257 memset(&irq, 0, sizeof(irq));
2258 if (n + sizeof(irq) > len)
2259 return -ENOBUFS;
2260 irq.type = KVM_S390_INT_EMERGENCY;
2261 irq.u.emerg.code = cpuaddr;
2262 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2263 return -EFAULT;
2264 n += sizeof(irq);
2265 }
2266 }
2267
2268 if ((sigp_ctrl & SIGP_CTRL_C) &&
2269 (atomic_read(&vcpu->arch.sie_block->cpuflags) &
2270 CPUSTAT_ECALL_PEND)) {
2271 if (n + sizeof(irq) > len)
2272 return -ENOBUFS;
2273 memset(&irq, 0, sizeof(irq));
2274 irq.type = KVM_S390_INT_EXTERNAL_CALL;
2275 irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK;
2276 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2277 return -EFAULT;
2278 n += sizeof(irq);
2279 }
2280
2281 return n;
2282 }
This page took 0.080543 seconds and 6 git commands to generate.