KVM: s390: add bitmap for handling cpu-local interrupts
[deliverable/linux.git] / arch / s390 / kvm / sigp.c
CommitLineData
5288fbf0 1/*
a53c8fab 2 * handling interprocessor communication
5288fbf0 3 *
b13d3580 4 * Copyright IBM Corp. 2008, 2013
5288fbf0
CB
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
9ace903d 12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
5288fbf0
CB
13 */
14
15#include <linux/kvm.h>
16#include <linux/kvm_host.h>
5a0e3ad6 17#include <linux/slab.h>
a9ae32c3 18#include <asm/sigp.h>
5288fbf0
CB
19#include "gaccess.h"
20#include "kvm-s390.h"
5786fffa 21#include "trace.h"
5288fbf0 22
3d95c7d2 23static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
5a32c1af 24 u64 *reg)
5288fbf0 25{
1ee0bc55 26 struct kvm_s390_local_interrupt *li;
1ee0bc55 27 int cpuflags;
5288fbf0
CB
28 int rc;
29
1ee0bc55
JF
30 li = &dst_vcpu->arch.local_int;
31
32 cpuflags = atomic_read(li->cpuflags);
33 if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
21b26c08
CH
34 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
35 else {
5288fbf0 36 *reg &= 0xffffffff00000000UL;
1ee0bc55 37 if (cpuflags & CPUSTAT_ECALL_PEND)
21b26c08 38 *reg |= SIGP_STATUS_EXT_CALL_PENDING;
1ee0bc55 39 if (cpuflags & CPUSTAT_STOPPED)
21b26c08 40 *reg |= SIGP_STATUS_STOPPED;
ea1918dd 41 rc = SIGP_CC_STATUS_STORED;
5288fbf0 42 }
5288fbf0 43
3d95c7d2
DH
44 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
45 rc);
5288fbf0
CB
46 return rc;
47}
48
07b03035
DH
49static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
50 struct kvm_vcpu *dst_vcpu)
5288fbf0 51{
22ff4a33
JF
52 struct kvm_s390_interrupt s390int = {
53 .type = KVM_S390_INT_EMERGENCY,
54 .parm = vcpu->vcpu_id,
55 };
22ff4a33 56 int rc = 0;
5288fbf0 57
22ff4a33
JF
58 rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
59 if (!rc)
3d95c7d2
DH
60 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
61 dst_vcpu->vcpu_id);
5288fbf0 62
22ff4a33 63 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
7697e71f
CE
64}
65
07b03035
DH
66static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
67{
68 return __inject_sigp_emergency(vcpu, dst_vcpu);
69}
70
3d95c7d2
DH
71static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
72 struct kvm_vcpu *dst_vcpu,
b13d3580
TH
73 u16 asn, u64 *reg)
74{
b13d3580
TH
75 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
76 u16 p_asn, s_asn;
77 psw_t *psw;
78 u32 flags;
79
b13d3580
TH
80 flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
81 psw = &dst_vcpu->arch.sie_block->gpsw;
82 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
83 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
84
07b03035 85 /* Inject the emergency signal? */
b13d3580
TH
86 if (!(flags & CPUSTAT_STOPPED)
87 || (psw->mask & psw_int_mask) != psw_int_mask
88 || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
89 || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
07b03035 90 return __inject_sigp_emergency(vcpu, dst_vcpu);
b13d3580
TH
91 } else {
92 *reg &= 0xffffffff00000000UL;
93 *reg |= SIGP_STATUS_INCORRECT_STATE;
94 return SIGP_CC_STATUS_STORED;
95 }
96}
97
3d95c7d2
DH
98static int __sigp_external_call(struct kvm_vcpu *vcpu,
99 struct kvm_vcpu *dst_vcpu)
7697e71f 100{
22ff4a33
JF
101 struct kvm_s390_interrupt s390int = {
102 .type = KVM_S390_INT_EXTERNAL_CALL,
103 .parm = vcpu->vcpu_id,
104 };
22ff4a33 105 int rc;
7697e71f 106
22ff4a33
JF
107 rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
108 if (!rc)
3d95c7d2
DH
109 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
110 dst_vcpu->vcpu_id);
7697e71f 111
22ff4a33 112 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
5288fbf0
CB
113}
114
0e9c85a5 115static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
5288fbf0 116{
0e9c85a5 117 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
180c12fb 118 struct kvm_s390_interrupt_info *inti;
e879892c 119 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
5288fbf0 120
9940fa80 121 inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
5288fbf0
CB
122 if (!inti)
123 return -ENOMEM;
5288fbf0
CB
124 inti->type = KVM_S390_SIGP_STOP;
125
4ae3c081 126 spin_lock(&li->lock);
7dfc63cf
DH
127 if (li->action_bits & ACTION_STOP_ON_STOP) {
128 /* another SIGP STOP is pending */
d514f426 129 kfree(inti);
7dfc63cf
DH
130 rc = SIGP_CC_BUSY;
131 goto out;
132 }
a046b816
CD
133 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
134 kfree(inti);
e879892c
TH
135 if ((action & ACTION_STORE_ON_STOP) != 0)
136 rc = -ESHUTDOWN;
24a13044 137 goto out;
a046b816 138 }
5288fbf0
CB
139 list_add_tail(&inti->list, &li->list);
140 atomic_set(&li->active, 1);
9ace903d 141 li->action_bits |= action;
7dfc63cf 142 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
0e9c85a5 143 kvm_s390_vcpu_wakeup(dst_vcpu);
24a13044 144out:
4ae3c081 145 spin_unlock(&li->lock);
9ace903d 146
e879892c 147 return rc;
9ace903d
CE
148}
149
a6cc3108 150static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
9ace903d 151{
9ace903d
CE
152 int rc;
153
a6cc3108 154 rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP);
3d95c7d2 155 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id);
e879892c 156
a6cc3108
DH
157 return rc;
158}
159
160static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
161 struct kvm_vcpu *dst_vcpu, u64 *reg)
162{
163 int rc;
164
165 rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP |
166 ACTION_STORE_ON_STOP);
167 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
168 dst_vcpu->vcpu_id);
169
170 if (rc == -ESHUTDOWN) {
e879892c
TH
171 /* If the CPU has already been stopped, we still have
172 * to save the status when doing stop-and-store. This
173 * has to be done after unlocking all spinlocks. */
e879892c
TH
174 rc = kvm_s390_store_status_unloaded(dst_vcpu,
175 KVM_S390_STORE_STATUS_NOADDR);
176 }
177
5288fbf0
CB
178 return rc;
179}
180
181static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
182{
183 int rc;
3c038e6b
DD
184 unsigned int i;
185 struct kvm_vcpu *v;
5288fbf0
CB
186
187 switch (parameter & 0xff) {
188 case 0:
ea1918dd 189 rc = SIGP_CC_NOT_OPERATIONAL;
5288fbf0
CB
190 break;
191 case 1:
192 case 2:
3c038e6b
DD
193 kvm_for_each_vcpu(i, v, vcpu->kvm) {
194 v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
195 kvm_clear_async_pf_completion_queue(v);
196 }
197
ea1918dd 198 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
5288fbf0
CB
199 break;
200 default:
b8e660b8 201 rc = -EOPNOTSUPP;
5288fbf0
CB
202 }
203 return rc;
204}
205
3d95c7d2
DH
206static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
207 u32 address, u64 *reg)
5288fbf0 208{
13b191ae 209 struct kvm_s390_local_interrupt *li;
180c12fb 210 struct kvm_s390_interrupt_info *inti;
5288fbf0 211 int rc;
5288fbf0 212
13b191ae
TH
213 li = &dst_vcpu->arch.local_int;
214
665170cb
HC
215 /*
216 * Make sure the new value is valid memory. We only need to check the
217 * first page, since address is 8k aligned and memory pieces are always
218 * at least 1MB aligned and have at least a size of 1MB.
219 */
220 address &= 0x7fffe000u;
221 if (kvm_is_error_gpa(vcpu->kvm, address)) {
0744426e 222 *reg &= 0xffffffff00000000UL;
a9ae32c3 223 *reg |= SIGP_STATUS_INVALID_PARAMETER;
ea1918dd 224 return SIGP_CC_STATUS_STORED;
5288fbf0
CB
225 }
226
227 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
228 if (!inti)
ea1918dd 229 return SIGP_CC_BUSY;
5288fbf0 230
4ae3c081 231 spin_lock(&li->lock);
5288fbf0 232 /* cpu must be in stopped state */
9e6dabef 233 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
0744426e
HC
234 *reg &= 0xffffffff00000000UL;
235 *reg |= SIGP_STATUS_INCORRECT_STATE;
ea1918dd 236 rc = SIGP_CC_STATUS_STORED;
5288fbf0
CB
237 kfree(inti);
238 goto out_li;
239 }
240
241 inti->type = KVM_S390_SIGP_SET_PREFIX;
242 inti->prefix.address = address;
243
244 list_add_tail(&inti->list, &li->list);
245 atomic_set(&li->active, 1);
0e9c85a5 246 kvm_s390_vcpu_wakeup(dst_vcpu);
ea1918dd 247 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
5288fbf0 248
3d95c7d2
DH
249 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
250 address);
5288fbf0 251out_li:
4ae3c081 252 spin_unlock(&li->lock);
5288fbf0
CB
253 return rc;
254}
255
3d95c7d2
DH
256static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
257 struct kvm_vcpu *dst_vcpu,
258 u32 addr, u64 *reg)
00e9e435 259{
00e9e435
TH
260 int flags;
261 int rc;
262
4ae3c081 263 spin_lock(&dst_vcpu->arch.local_int.lock);
00e9e435 264 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
4ae3c081 265 spin_unlock(&dst_vcpu->arch.local_int.lock);
00e9e435
TH
266 if (!(flags & CPUSTAT_STOPPED)) {
267 *reg &= 0xffffffff00000000UL;
268 *reg |= SIGP_STATUS_INCORRECT_STATE;
269 return SIGP_CC_STATUS_STORED;
270 }
271
272 addr &= 0x7ffffe00;
273 rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
274 if (rc == -EFAULT) {
275 *reg &= 0xffffffff00000000UL;
276 *reg |= SIGP_STATUS_INVALID_PARAMETER;
277 rc = SIGP_CC_STATUS_STORED;
278 }
279 return rc;
280}
281
3d95c7d2
DH
282static int __sigp_sense_running(struct kvm_vcpu *vcpu,
283 struct kvm_vcpu *dst_vcpu, u64 *reg)
bd59d3a4 284{
1ee0bc55 285 struct kvm_s390_local_interrupt *li;
bd59d3a4 286 int rc;
bd59d3a4 287
1ee0bc55
JF
288 li = &dst_vcpu->arch.local_int;
289 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
290 /* running */
291 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
292 } else {
293 /* not running */
294 *reg &= 0xffffffff00000000UL;
295 *reg |= SIGP_STATUS_NOT_RUNNING;
296 rc = SIGP_CC_STATUS_STORED;
bd59d3a4 297 }
bd59d3a4 298
3d95c7d2
DH
299 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
300 dst_vcpu->vcpu_id, rc);
bd59d3a4
CH
301
302 return rc;
303}
304
b8983830
DH
305static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
306 struct kvm_vcpu *dst_vcpu, u8 order_code)
151104a7 307{
3d95c7d2 308 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
b8983830
DH
309 /* handle (RE)START in user space */
310 int rc = -EOPNOTSUPP;
151104a7 311
4ae3c081 312 spin_lock(&li->lock);
151104a7 313 if (li->action_bits & ACTION_STOP_ON_STOP)
ea1918dd 314 rc = SIGP_CC_BUSY;
4ae3c081 315 spin_unlock(&li->lock);
1ee0bc55 316
151104a7
JF
317 return rc;
318}
319
b8983830
DH
320static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
321 struct kvm_vcpu *dst_vcpu, u8 order_code)
322{
323 /* handle (INITIAL) CPU RESET in user space */
324 return -EOPNOTSUPP;
325}
326
327static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
328 struct kvm_vcpu *dst_vcpu)
329{
330 /* handle unknown orders in user space */
331 return -EOPNOTSUPP;
332}
333
3526a66b
DH
334static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
335 u16 cpu_addr, u32 parameter, u64 *status_reg)
5288fbf0 336{
5288fbf0 337 int rc;
3d95c7d2
DH
338 struct kvm_vcpu *dst_vcpu;
339
340 if (cpu_addr >= KVM_MAX_VCPUS)
341 return SIGP_CC_NOT_OPERATIONAL;
342
343 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
344 if (!dst_vcpu)
345 return SIGP_CC_NOT_OPERATIONAL;
5288fbf0 346
5288fbf0
CB
347 switch (order_code) {
348 case SIGP_SENSE:
349 vcpu->stat.instruction_sigp_sense++;
3d95c7d2 350 rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
5288fbf0 351 break;
7697e71f
CE
352 case SIGP_EXTERNAL_CALL:
353 vcpu->stat.instruction_sigp_external_call++;
3d95c7d2 354 rc = __sigp_external_call(vcpu, dst_vcpu);
7697e71f 355 break;
a9ae32c3 356 case SIGP_EMERGENCY_SIGNAL:
5288fbf0 357 vcpu->stat.instruction_sigp_emergency++;
3d95c7d2 358 rc = __sigp_emergency(vcpu, dst_vcpu);
5288fbf0
CB
359 break;
360 case SIGP_STOP:
361 vcpu->stat.instruction_sigp_stop++;
a6cc3108 362 rc = __sigp_stop(vcpu, dst_vcpu);
5288fbf0 363 break;
a9ae32c3 364 case SIGP_STOP_AND_STORE_STATUS:
42cb0c9f 365 vcpu->stat.instruction_sigp_stop_store_status++;
a6cc3108 366 rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
5288fbf0 367 break;
00e9e435 368 case SIGP_STORE_STATUS_AT_ADDRESS:
42cb0c9f 369 vcpu->stat.instruction_sigp_store_status++;
3d95c7d2 370 rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
3526a66b 371 status_reg);
5288fbf0
CB
372 break;
373 case SIGP_SET_PREFIX:
374 vcpu->stat.instruction_sigp_prefix++;
3d95c7d2 375 rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
5288fbf0 376 break;
b13d3580 377 case SIGP_COND_EMERGENCY_SIGNAL:
42cb0c9f 378 vcpu->stat.instruction_sigp_cond_emergency++;
3d95c7d2 379 rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
3526a66b 380 status_reg);
b13d3580 381 break;
bd59d3a4
CH
382 case SIGP_SENSE_RUNNING:
383 vcpu->stat.instruction_sigp_sense_running++;
3d95c7d2 384 rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
bd59d3a4 385 break;
58bc33b2 386 case SIGP_START:
42cb0c9f 387 vcpu->stat.instruction_sigp_start++;
b8983830 388 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
58bc33b2 389 break;
5288fbf0
CB
390 case SIGP_RESTART:
391 vcpu->stat.instruction_sigp_restart++;
b8983830
DH
392 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
393 break;
394 case SIGP_INITIAL_CPU_RESET:
42cb0c9f 395 vcpu->stat.instruction_sigp_init_cpu_reset++;
b8983830
DH
396 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
397 break;
398 case SIGP_CPU_RESET:
42cb0c9f 399 vcpu->stat.instruction_sigp_cpu_reset++;
b8983830 400 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
cc92d6de 401 break;
5288fbf0 402 default:
42cb0c9f 403 vcpu->stat.instruction_sigp_unknown++;
b8983830 404 rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
3526a66b
DH
405 }
406
b8983830
DH
407 if (rc == -EOPNOTSUPP)
408 VCPU_EVENT(vcpu, 4,
409 "sigp order %u -> cpu %x: handled in user space",
410 order_code, dst_vcpu->vcpu_id);
411
3526a66b
DH
412 return rc;
413}
414
415int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
416{
417 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
418 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
419 u32 parameter;
420 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
421 u8 order_code;
422 int rc;
423
424 /* sigp in userspace can exit */
425 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
426 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
427
428 order_code = kvm_s390_get_base_disp_rs(vcpu);
429
430 if (r1 % 2)
431 parameter = vcpu->run->s.regs.gprs[r1];
432 else
433 parameter = vcpu->run->s.regs.gprs[r1 + 1];
434
435 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
436 switch (order_code) {
437 case SIGP_SET_ARCHITECTURE:
438 vcpu->stat.instruction_sigp_arch++;
439 rc = __sigp_set_arch(vcpu, parameter);
440 break;
441 default:
442 rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
443 parameter,
444 &vcpu->run->s.regs.gprs[r1]);
5288fbf0
CB
445 }
446
447 if (rc < 0)
448 return rc;
449
949c007a 450 kvm_s390_set_psw_cc(vcpu, rc);
5288fbf0
CB
451 return 0;
452}
4953919f
DH
453
454/*
455 * Handle SIGP partial execution interception.
456 *
457 * This interception will occur at the source cpu when a source cpu sends an
458 * external call to a target cpu and the target cpu has the WAIT bit set in
459 * its cpuflags. Interception will occurr after the interrupt indicator bits at
460 * the target cpu have been set. All error cases will lead to instruction
461 * interception, therefore nothing is to be checked or prepared.
462 */
463int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
464{
465 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
466 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
467 struct kvm_vcpu *dest_vcpu;
468 u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
469
470 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
471
472 if (order_code == SIGP_EXTERNAL_CALL) {
473 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
474 BUG_ON(dest_vcpu == NULL);
475
0e9c85a5 476 kvm_s390_vcpu_wakeup(dest_vcpu);
4953919f
DH
477 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
478 return 0;
479 }
480
481 return -EOPNOTSUPP;
482}
This page took 0.453334 seconds and 5 git commands to generate.