2 * handling interprocessor communication
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
23 static int __sigp_sense(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
26 struct kvm_s390_local_interrupt
*li
;
27 struct kvm_vcpu
*dst_vcpu
= NULL
;
31 if (cpu_addr
>= KVM_MAX_VCPUS
)
32 return SIGP_CC_NOT_OPERATIONAL
;
34 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
36 return SIGP_CC_NOT_OPERATIONAL
;
37 li
= &dst_vcpu
->arch
.local_int
;
39 cpuflags
= atomic_read(li
->cpuflags
);
40 if (!(cpuflags
& (CPUSTAT_ECALL_PEND
| CPUSTAT_STOPPED
)))
41 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
43 *reg
&= 0xffffffff00000000UL
;
44 if (cpuflags
& CPUSTAT_ECALL_PEND
)
45 *reg
|= SIGP_STATUS_EXT_CALL_PENDING
;
46 if (cpuflags
& CPUSTAT_STOPPED
)
47 *reg
|= SIGP_STATUS_STOPPED
;
48 rc
= SIGP_CC_STATUS_STORED
;
51 VCPU_EVENT(vcpu
, 4, "sensed status of cpu %x rc %x", cpu_addr
, rc
);
55 static int __sigp_emergency(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
57 struct kvm_s390_interrupt s390int
= {
58 .type
= KVM_S390_INT_EMERGENCY
,
59 .parm
= vcpu
->vcpu_id
,
61 struct kvm_vcpu
*dst_vcpu
= NULL
;
64 if (cpu_addr
< KVM_MAX_VCPUS
)
65 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
67 return SIGP_CC_NOT_OPERATIONAL
;
69 rc
= kvm_s390_inject_vcpu(dst_vcpu
, &s390int
);
71 VCPU_EVENT(vcpu
, 4, "sent sigp emerg to cpu %x", cpu_addr
);
73 return rc
? rc
: SIGP_CC_ORDER_CODE_ACCEPTED
;
76 static int __sigp_conditional_emergency(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
79 struct kvm_vcpu
*dst_vcpu
= NULL
;
80 const u64 psw_int_mask
= PSW_MASK_IO
| PSW_MASK_EXT
;
85 if (cpu_addr
< KVM_MAX_VCPUS
)
86 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
88 return SIGP_CC_NOT_OPERATIONAL
;
89 flags
= atomic_read(&dst_vcpu
->arch
.sie_block
->cpuflags
);
90 psw
= &dst_vcpu
->arch
.sie_block
->gpsw
;
91 p_asn
= dst_vcpu
->arch
.sie_block
->gcr
[4] & 0xffff; /* Primary ASN */
92 s_asn
= dst_vcpu
->arch
.sie_block
->gcr
[3] & 0xffff; /* Secondary ASN */
94 /* Deliver the emergency signal? */
95 if (!(flags
& CPUSTAT_STOPPED
)
96 || (psw
->mask
& psw_int_mask
) != psw_int_mask
97 || ((flags
& CPUSTAT_WAIT
) && psw
->addr
!= 0)
98 || (!(flags
& CPUSTAT_WAIT
) && (asn
== p_asn
|| asn
== s_asn
))) {
99 return __sigp_emergency(vcpu
, cpu_addr
);
101 *reg
&= 0xffffffff00000000UL
;
102 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
103 return SIGP_CC_STATUS_STORED
;
107 static int __sigp_external_call(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
109 struct kvm_s390_interrupt s390int
= {
110 .type
= KVM_S390_INT_EXTERNAL_CALL
,
111 .parm
= vcpu
->vcpu_id
,
113 struct kvm_vcpu
*dst_vcpu
= NULL
;
116 if (cpu_addr
< KVM_MAX_VCPUS
)
117 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
119 return SIGP_CC_NOT_OPERATIONAL
;
121 rc
= kvm_s390_inject_vcpu(dst_vcpu
, &s390int
);
123 VCPU_EVENT(vcpu
, 4, "sent sigp ext call to cpu %x", cpu_addr
);
125 return rc
? rc
: SIGP_CC_ORDER_CODE_ACCEPTED
;
128 static int __inject_sigp_stop(struct kvm_s390_local_interrupt
*li
, int action
)
130 struct kvm_s390_interrupt_info
*inti
;
131 int rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
133 inti
= kzalloc(sizeof(*inti
), GFP_ATOMIC
);
136 inti
->type
= KVM_S390_SIGP_STOP
;
138 spin_lock_bh(&li
->lock
);
139 if ((atomic_read(li
->cpuflags
) & CPUSTAT_STOPPED
)) {
141 if ((action
& ACTION_STORE_ON_STOP
) != 0)
145 list_add_tail(&inti
->list
, &li
->list
);
146 atomic_set(&li
->active
, 1);
147 atomic_set_mask(CPUSTAT_STOP_INT
, li
->cpuflags
);
148 li
->action_bits
|= action
;
149 if (waitqueue_active(li
->wq
))
150 wake_up_interruptible(li
->wq
);
152 spin_unlock_bh(&li
->lock
);
157 static int __sigp_stop(struct kvm_vcpu
*vcpu
, u16 cpu_addr
, int action
)
159 struct kvm_s390_local_interrupt
*li
;
160 struct kvm_vcpu
*dst_vcpu
= NULL
;
163 if (cpu_addr
>= KVM_MAX_VCPUS
)
164 return SIGP_CC_NOT_OPERATIONAL
;
166 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
168 return SIGP_CC_NOT_OPERATIONAL
;
169 li
= &dst_vcpu
->arch
.local_int
;
171 rc
= __inject_sigp_stop(li
, action
);
173 VCPU_EVENT(vcpu
, 4, "sent sigp stop to cpu %x", cpu_addr
);
175 if ((action
& ACTION_STORE_ON_STOP
) != 0 && rc
== -ESHUTDOWN
) {
176 /* If the CPU has already been stopped, we still have
177 * to save the status when doing stop-and-store. This
178 * has to be done after unlocking all spinlocks. */
179 rc
= kvm_s390_store_status_unloaded(dst_vcpu
,
180 KVM_S390_STORE_STATUS_NOADDR
);
186 static int __sigp_set_arch(struct kvm_vcpu
*vcpu
, u32 parameter
)
192 switch (parameter
& 0xff) {
194 rc
= SIGP_CC_NOT_OPERATIONAL
;
198 kvm_for_each_vcpu(i
, v
, vcpu
->kvm
) {
199 v
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
200 kvm_clear_async_pf_completion_queue(v
);
203 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
211 static int __sigp_set_prefix(struct kvm_vcpu
*vcpu
, u16 cpu_addr
, u32 address
,
214 struct kvm_s390_local_interrupt
*li
;
215 struct kvm_vcpu
*dst_vcpu
= NULL
;
216 struct kvm_s390_interrupt_info
*inti
;
219 if (cpu_addr
< KVM_MAX_VCPUS
)
220 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
222 return SIGP_CC_NOT_OPERATIONAL
;
223 li
= &dst_vcpu
->arch
.local_int
;
226 * Make sure the new value is valid memory. We only need to check the
227 * first page, since address is 8k aligned and memory pieces are always
228 * at least 1MB aligned and have at least a size of 1MB.
230 address
&= 0x7fffe000u
;
231 if (kvm_is_error_gpa(vcpu
->kvm
, address
)) {
232 *reg
&= 0xffffffff00000000UL
;
233 *reg
|= SIGP_STATUS_INVALID_PARAMETER
;
234 return SIGP_CC_STATUS_STORED
;
237 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
241 spin_lock_bh(&li
->lock
);
242 /* cpu must be in stopped state */
243 if (!(atomic_read(li
->cpuflags
) & CPUSTAT_STOPPED
)) {
244 *reg
&= 0xffffffff00000000UL
;
245 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
246 rc
= SIGP_CC_STATUS_STORED
;
251 inti
->type
= KVM_S390_SIGP_SET_PREFIX
;
252 inti
->prefix
.address
= address
;
254 list_add_tail(&inti
->list
, &li
->list
);
255 atomic_set(&li
->active
, 1);
256 if (waitqueue_active(li
->wq
))
257 wake_up_interruptible(li
->wq
);
258 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
260 VCPU_EVENT(vcpu
, 4, "set prefix of cpu %02x to %x", cpu_addr
, address
);
262 spin_unlock_bh(&li
->lock
);
266 static int __sigp_store_status_at_addr(struct kvm_vcpu
*vcpu
, u16 cpu_id
,
269 struct kvm_vcpu
*dst_vcpu
= NULL
;
273 if (cpu_id
< KVM_MAX_VCPUS
)
274 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_id
);
276 return SIGP_CC_NOT_OPERATIONAL
;
278 spin_lock_bh(&dst_vcpu
->arch
.local_int
.lock
);
279 flags
= atomic_read(dst_vcpu
->arch
.local_int
.cpuflags
);
280 spin_unlock_bh(&dst_vcpu
->arch
.local_int
.lock
);
281 if (!(flags
& CPUSTAT_STOPPED
)) {
282 *reg
&= 0xffffffff00000000UL
;
283 *reg
|= SIGP_STATUS_INCORRECT_STATE
;
284 return SIGP_CC_STATUS_STORED
;
288 rc
= kvm_s390_store_status_unloaded(dst_vcpu
, addr
);
290 *reg
&= 0xffffffff00000000UL
;
291 *reg
|= SIGP_STATUS_INVALID_PARAMETER
;
292 rc
= SIGP_CC_STATUS_STORED
;
297 static int __sigp_sense_running(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
300 struct kvm_s390_local_interrupt
*li
;
301 struct kvm_vcpu
*dst_vcpu
= NULL
;
304 if (cpu_addr
>= KVM_MAX_VCPUS
)
305 return SIGP_CC_NOT_OPERATIONAL
;
307 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
309 return SIGP_CC_NOT_OPERATIONAL
;
310 li
= &dst_vcpu
->arch
.local_int
;
311 if (atomic_read(li
->cpuflags
) & CPUSTAT_RUNNING
) {
313 rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
316 *reg
&= 0xffffffff00000000UL
;
317 *reg
|= SIGP_STATUS_NOT_RUNNING
;
318 rc
= SIGP_CC_STATUS_STORED
;
321 VCPU_EVENT(vcpu
, 4, "sensed running status of cpu %x rc %x", cpu_addr
,
327 /* Test whether the destination CPU is available and not busy */
328 static int sigp_check_callable(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
330 struct kvm_s390_local_interrupt
*li
;
331 int rc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
332 struct kvm_vcpu
*dst_vcpu
= NULL
;
334 if (cpu_addr
>= KVM_MAX_VCPUS
)
335 return SIGP_CC_NOT_OPERATIONAL
;
337 dst_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
339 return SIGP_CC_NOT_OPERATIONAL
;
340 li
= &dst_vcpu
->arch
.local_int
;
341 spin_lock_bh(&li
->lock
);
342 if (li
->action_bits
& ACTION_STOP_ON_STOP
)
344 spin_unlock_bh(&li
->lock
);
349 int kvm_s390_handle_sigp(struct kvm_vcpu
*vcpu
)
351 int r1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
352 int r3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
354 u16 cpu_addr
= vcpu
->run
->s
.regs
.gprs
[r3
];
358 /* sigp in userspace can exit */
359 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
360 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
362 order_code
= kvm_s390_get_base_disp_rs(vcpu
);
365 parameter
= vcpu
->run
->s
.regs
.gprs
[r1
];
367 parameter
= vcpu
->run
->s
.regs
.gprs
[r1
+ 1];
369 trace_kvm_s390_handle_sigp(vcpu
, order_code
, cpu_addr
, parameter
);
370 switch (order_code
) {
372 vcpu
->stat
.instruction_sigp_sense
++;
373 rc
= __sigp_sense(vcpu
, cpu_addr
,
374 &vcpu
->run
->s
.regs
.gprs
[r1
]);
376 case SIGP_EXTERNAL_CALL
:
377 vcpu
->stat
.instruction_sigp_external_call
++;
378 rc
= __sigp_external_call(vcpu
, cpu_addr
);
380 case SIGP_EMERGENCY_SIGNAL
:
381 vcpu
->stat
.instruction_sigp_emergency
++;
382 rc
= __sigp_emergency(vcpu
, cpu_addr
);
385 vcpu
->stat
.instruction_sigp_stop
++;
386 rc
= __sigp_stop(vcpu
, cpu_addr
, ACTION_STOP_ON_STOP
);
388 case SIGP_STOP_AND_STORE_STATUS
:
389 vcpu
->stat
.instruction_sigp_stop
++;
390 rc
= __sigp_stop(vcpu
, cpu_addr
, ACTION_STORE_ON_STOP
|
391 ACTION_STOP_ON_STOP
);
393 case SIGP_STORE_STATUS_AT_ADDRESS
:
394 rc
= __sigp_store_status_at_addr(vcpu
, cpu_addr
, parameter
,
395 &vcpu
->run
->s
.regs
.gprs
[r1
]);
397 case SIGP_SET_ARCHITECTURE
:
398 vcpu
->stat
.instruction_sigp_arch
++;
399 rc
= __sigp_set_arch(vcpu
, parameter
);
401 case SIGP_SET_PREFIX
:
402 vcpu
->stat
.instruction_sigp_prefix
++;
403 rc
= __sigp_set_prefix(vcpu
, cpu_addr
, parameter
,
404 &vcpu
->run
->s
.regs
.gprs
[r1
]);
406 case SIGP_COND_EMERGENCY_SIGNAL
:
407 rc
= __sigp_conditional_emergency(vcpu
, cpu_addr
, parameter
,
408 &vcpu
->run
->s
.regs
.gprs
[r1
]);
410 case SIGP_SENSE_RUNNING
:
411 vcpu
->stat
.instruction_sigp_sense_running
++;
412 rc
= __sigp_sense_running(vcpu
, cpu_addr
,
413 &vcpu
->run
->s
.regs
.gprs
[r1
]);
416 rc
= sigp_check_callable(vcpu
, cpu_addr
);
417 if (rc
== SIGP_CC_ORDER_CODE_ACCEPTED
)
418 rc
= -EOPNOTSUPP
; /* Handle START in user space */
421 vcpu
->stat
.instruction_sigp_restart
++;
422 rc
= sigp_check_callable(vcpu
, cpu_addr
);
423 if (rc
== SIGP_CC_ORDER_CODE_ACCEPTED
) {
425 "sigp restart %x to handle userspace",
427 /* user space must know about restart */
438 kvm_s390_set_psw_cc(vcpu
, rc
);
443 * Handle SIGP partial execution interception.
445 * This interception will occur at the source cpu when a source cpu sends an
446 * external call to a target cpu and the target cpu has the WAIT bit set in
447 * its cpuflags. Interception will occurr after the interrupt indicator bits at
448 * the target cpu have been set. All error cases will lead to instruction
449 * interception, therefore nothing is to be checked or prepared.
451 int kvm_s390_handle_sigp_pei(struct kvm_vcpu
*vcpu
)
453 int r3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
454 u16 cpu_addr
= vcpu
->run
->s
.regs
.gprs
[r3
];
455 struct kvm_vcpu
*dest_vcpu
;
456 u8 order_code
= kvm_s390_get_base_disp_rs(vcpu
);
458 trace_kvm_s390_handle_sigp_pei(vcpu
, order_code
, cpu_addr
);
460 if (order_code
== SIGP_EXTERNAL_CALL
) {
461 dest_vcpu
= kvm_get_vcpu(vcpu
->kvm
, cpu_addr
);
462 BUG_ON(dest_vcpu
== NULL
);
464 spin_lock_bh(&dest_vcpu
->arch
.local_int
.lock
);
465 if (waitqueue_active(&dest_vcpu
->wq
))
466 wake_up_interruptible(&dest_vcpu
->wq
);
467 dest_vcpu
->preempted
= true;
468 spin_unlock_bh(&dest_vcpu
->arch
.local_int
.lock
);
470 kvm_s390_set_psw_cc(vcpu
, SIGP_CC_ORDER_CODE_ACCEPTED
);