Commit | Line | Data |
---|---|---|
5288fbf0 | 1 | /* |
a53c8fab | 2 | * handling interprocessor communication |
5288fbf0 | 3 | * |
b13d3580 | 4 | * Copyright IBM Corp. 2008, 2013 |
5288fbf0 CB |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License (version 2 only) | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | |
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | |
9ace903d | 12 | * Christian Ehrhardt <ehrhardt@de.ibm.com> |
5288fbf0 CB |
13 | */ |
14 | ||
15 | #include <linux/kvm.h> | |
16 | #include <linux/kvm_host.h> | |
5a0e3ad6 | 17 | #include <linux/slab.h> |
a9ae32c3 | 18 | #include <asm/sigp.h> |
5288fbf0 CB |
19 | #include "gaccess.h" |
20 | #include "kvm-s390.h" | |
5786fffa | 21 | #include "trace.h" |
5288fbf0 | 22 | |
0096369d | 23 | static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, |
5a32c1af | 24 | u64 *reg) |
5288fbf0 | 25 | { |
1ee0bc55 JF |
26 | struct kvm_s390_local_interrupt *li; |
27 | struct kvm_vcpu *dst_vcpu = NULL; | |
28 | int cpuflags; | |
5288fbf0 CB |
29 | int rc; |
30 | ||
31 | if (cpu_addr >= KVM_MAX_VCPUS) | |
ea1918dd | 32 | return SIGP_CC_NOT_OPERATIONAL; |
5288fbf0 | 33 | |
1ee0bc55 JF |
34 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
35 | if (!dst_vcpu) | |
36 | return SIGP_CC_NOT_OPERATIONAL; | |
37 | li = &dst_vcpu->arch.local_int; | |
38 | ||
39 | cpuflags = atomic_read(li->cpuflags); | |
40 | if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED))) | |
21b26c08 CH |
41 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
42 | else { | |
5288fbf0 | 43 | *reg &= 0xffffffff00000000UL; |
1ee0bc55 | 44 | if (cpuflags & CPUSTAT_ECALL_PEND) |
21b26c08 | 45 | *reg |= SIGP_STATUS_EXT_CALL_PENDING; |
1ee0bc55 | 46 | if (cpuflags & CPUSTAT_STOPPED) |
21b26c08 | 47 | *reg |= SIGP_STATUS_STOPPED; |
ea1918dd | 48 | rc = SIGP_CC_STATUS_STORED; |
5288fbf0 | 49 | } |
5288fbf0 CB |
50 | |
51 | VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); | |
52 | return rc; | |
53 | } | |
54 | ||
55 | static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) | |
56 | { | |
180c12fb CB |
57 | struct kvm_s390_local_interrupt *li; |
58 | struct kvm_s390_interrupt_info *inti; | |
1ee0bc55 | 59 | struct kvm_vcpu *dst_vcpu = NULL; |
5288fbf0 | 60 | |
91880d07 TH |
61 | if (cpu_addr < KVM_MAX_VCPUS) |
62 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | |
63 | if (!dst_vcpu) | |
ea1918dd | 64 | return SIGP_CC_NOT_OPERATIONAL; |
5288fbf0 CB |
65 | |
66 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | |
67 | if (!inti) | |
68 | return -ENOMEM; | |
69 | ||
70 | inti->type = KVM_S390_INT_EMERGENCY; | |
7697e71f | 71 | inti->emerg.code = vcpu->vcpu_id; |
5288fbf0 | 72 | |
1ee0bc55 | 73 | li = &dst_vcpu->arch.local_int; |
5288fbf0 CB |
74 | spin_lock_bh(&li->lock); |
75 | list_add_tail(&inti->list, &li->list); | |
76 | atomic_set(&li->active, 1); | |
77 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | |
d0321a24 CB |
78 | if (waitqueue_active(li->wq)) |
79 | wake_up_interruptible(li->wq); | |
5288fbf0 | 80 | spin_unlock_bh(&li->lock); |
7697e71f | 81 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); |
1ee0bc55 JF |
82 | |
83 | return SIGP_CC_ORDER_CODE_ACCEPTED; | |
7697e71f CE |
84 | } |
85 | ||
b13d3580 TH |
86 | static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, |
87 | u16 asn, u64 *reg) | |
88 | { | |
89 | struct kvm_vcpu *dst_vcpu = NULL; | |
90 | const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; | |
91 | u16 p_asn, s_asn; | |
92 | psw_t *psw; | |
93 | u32 flags; | |
94 | ||
95 | if (cpu_addr < KVM_MAX_VCPUS) | |
96 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | |
97 | if (!dst_vcpu) | |
98 | return SIGP_CC_NOT_OPERATIONAL; | |
99 | flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); | |
100 | psw = &dst_vcpu->arch.sie_block->gpsw; | |
101 | p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ | |
102 | s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ | |
103 | ||
104 | /* Deliver the emergency signal? */ | |
105 | if (!(flags & CPUSTAT_STOPPED) | |
106 | || (psw->mask & psw_int_mask) != psw_int_mask | |
107 | || ((flags & CPUSTAT_WAIT) && psw->addr != 0) | |
108 | || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) { | |
109 | return __sigp_emergency(vcpu, cpu_addr); | |
110 | } else { | |
111 | *reg &= 0xffffffff00000000UL; | |
112 | *reg |= SIGP_STATUS_INCORRECT_STATE; | |
113 | return SIGP_CC_STATUS_STORED; | |
114 | } | |
115 | } | |
116 | ||
7697e71f CE |
117 | static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) |
118 | { | |
7697e71f CE |
119 | struct kvm_s390_local_interrupt *li; |
120 | struct kvm_s390_interrupt_info *inti; | |
1ee0bc55 | 121 | struct kvm_vcpu *dst_vcpu = NULL; |
7697e71f | 122 | |
91880d07 TH |
123 | if (cpu_addr < KVM_MAX_VCPUS) |
124 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | |
125 | if (!dst_vcpu) | |
ea1918dd | 126 | return SIGP_CC_NOT_OPERATIONAL; |
7697e71f CE |
127 | |
128 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | |
129 | if (!inti) | |
130 | return -ENOMEM; | |
131 | ||
132 | inti->type = KVM_S390_INT_EXTERNAL_CALL; | |
133 | inti->extcall.code = vcpu->vcpu_id; | |
134 | ||
1ee0bc55 | 135 | li = &dst_vcpu->arch.local_int; |
7697e71f CE |
136 | spin_lock_bh(&li->lock); |
137 | list_add_tail(&inti->list, &li->list); | |
138 | atomic_set(&li->active, 1); | |
139 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | |
d0321a24 CB |
140 | if (waitqueue_active(li->wq)) |
141 | wake_up_interruptible(li->wq); | |
7697e71f | 142 | spin_unlock_bh(&li->lock); |
7697e71f | 143 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); |
1ee0bc55 JF |
144 | |
145 | return SIGP_CC_ORDER_CODE_ACCEPTED; | |
5288fbf0 CB |
146 | } |
147 | ||
9ace903d | 148 | static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) |
5288fbf0 | 149 | { |
180c12fb | 150 | struct kvm_s390_interrupt_info *inti; |
e879892c | 151 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
5288fbf0 | 152 | |
9940fa80 | 153 | inti = kzalloc(sizeof(*inti), GFP_ATOMIC); |
5288fbf0 CB |
154 | if (!inti) |
155 | return -ENOMEM; | |
5288fbf0 CB |
156 | inti->type = KVM_S390_SIGP_STOP; |
157 | ||
5288fbf0 | 158 | spin_lock_bh(&li->lock); |
a046b816 CD |
159 | if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
160 | kfree(inti); | |
e879892c TH |
161 | if ((action & ACTION_STORE_ON_STOP) != 0) |
162 | rc = -ESHUTDOWN; | |
24a13044 | 163 | goto out; |
a046b816 | 164 | } |
5288fbf0 CB |
165 | list_add_tail(&inti->list, &li->list); |
166 | atomic_set(&li->active, 1); | |
167 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | |
9ace903d | 168 | li->action_bits |= action; |
d0321a24 CB |
169 | if (waitqueue_active(li->wq)) |
170 | wake_up_interruptible(li->wq); | |
24a13044 | 171 | out: |
5288fbf0 | 172 | spin_unlock_bh(&li->lock); |
9ace903d | 173 | |
e879892c | 174 | return rc; |
9ace903d CE |
175 | } |
176 | ||
177 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) | |
178 | { | |
9ace903d | 179 | struct kvm_s390_local_interrupt *li; |
1ee0bc55 | 180 | struct kvm_vcpu *dst_vcpu = NULL; |
9ace903d CE |
181 | int rc; |
182 | ||
183 | if (cpu_addr >= KVM_MAX_VCPUS) | |
ea1918dd | 184 | return SIGP_CC_NOT_OPERATIONAL; |
9ace903d | 185 | |
1ee0bc55 JF |
186 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
187 | if (!dst_vcpu) | |
188 | return SIGP_CC_NOT_OPERATIONAL; | |
189 | li = &dst_vcpu->arch.local_int; | |
9ace903d CE |
190 | |
191 | rc = __inject_sigp_stop(li, action); | |
192 | ||
5288fbf0 | 193 | VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); |
e879892c TH |
194 | |
195 | if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { | |
196 | /* If the CPU has already been stopped, we still have | |
197 | * to save the status when doing stop-and-store. This | |
198 | * has to be done after unlocking all spinlocks. */ | |
e879892c TH |
199 | rc = kvm_s390_store_status_unloaded(dst_vcpu, |
200 | KVM_S390_STORE_STATUS_NOADDR); | |
201 | } | |
202 | ||
5288fbf0 CB |
203 | return rc; |
204 | } | |
205 | ||
206 | static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) | |
207 | { | |
208 | int rc; | |
3c038e6b DD |
209 | unsigned int i; |
210 | struct kvm_vcpu *v; | |
5288fbf0 CB |
211 | |
212 | switch (parameter & 0xff) { | |
213 | case 0: | |
ea1918dd | 214 | rc = SIGP_CC_NOT_OPERATIONAL; |
5288fbf0 CB |
215 | break; |
216 | case 1: | |
217 | case 2: | |
3c038e6b DD |
218 | kvm_for_each_vcpu(i, v, vcpu->kvm) { |
219 | v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; | |
220 | kvm_clear_async_pf_completion_queue(v); | |
221 | } | |
222 | ||
ea1918dd | 223 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
5288fbf0 CB |
224 | break; |
225 | default: | |
b8e660b8 | 226 | rc = -EOPNOTSUPP; |
5288fbf0 CB |
227 | } |
228 | return rc; | |
229 | } | |
230 | ||
231 | static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |
5a32c1af | 232 | u64 *reg) |
5288fbf0 | 233 | { |
13b191ae TH |
234 | struct kvm_s390_local_interrupt *li; |
235 | struct kvm_vcpu *dst_vcpu = NULL; | |
180c12fb | 236 | struct kvm_s390_interrupt_info *inti; |
5288fbf0 | 237 | int rc; |
5288fbf0 | 238 | |
13b191ae TH |
239 | if (cpu_addr < KVM_MAX_VCPUS) |
240 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | |
241 | if (!dst_vcpu) | |
242 | return SIGP_CC_NOT_OPERATIONAL; | |
243 | li = &dst_vcpu->arch.local_int; | |
244 | ||
665170cb HC |
245 | /* |
246 | * Make sure the new value is valid memory. We only need to check the | |
247 | * first page, since address is 8k aligned and memory pieces are always | |
248 | * at least 1MB aligned and have at least a size of 1MB. | |
249 | */ | |
250 | address &= 0x7fffe000u; | |
251 | if (kvm_is_error_gpa(vcpu->kvm, address)) { | |
0744426e | 252 | *reg &= 0xffffffff00000000UL; |
a9ae32c3 | 253 | *reg |= SIGP_STATUS_INVALID_PARAMETER; |
ea1918dd | 254 | return SIGP_CC_STATUS_STORED; |
5288fbf0 CB |
255 | } |
256 | ||
257 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | |
258 | if (!inti) | |
ea1918dd | 259 | return SIGP_CC_BUSY; |
5288fbf0 | 260 | |
5288fbf0 CB |
261 | spin_lock_bh(&li->lock); |
262 | /* cpu must be in stopped state */ | |
9e6dabef | 263 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
0744426e HC |
264 | *reg &= 0xffffffff00000000UL; |
265 | *reg |= SIGP_STATUS_INCORRECT_STATE; | |
ea1918dd | 266 | rc = SIGP_CC_STATUS_STORED; |
5288fbf0 CB |
267 | kfree(inti); |
268 | goto out_li; | |
269 | } | |
270 | ||
271 | inti->type = KVM_S390_SIGP_SET_PREFIX; | |
272 | inti->prefix.address = address; | |
273 | ||
274 | list_add_tail(&inti->list, &li->list); | |
275 | atomic_set(&li->active, 1); | |
d0321a24 CB |
276 | if (waitqueue_active(li->wq)) |
277 | wake_up_interruptible(li->wq); | |
ea1918dd | 278 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
5288fbf0 CB |
279 | |
280 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); | |
281 | out_li: | |
282 | spin_unlock_bh(&li->lock); | |
5288fbf0 CB |
283 | return rc; |
284 | } | |
285 | ||
00e9e435 TH |
286 | static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, |
287 | u32 addr, u64 *reg) | |
288 | { | |
289 | struct kvm_vcpu *dst_vcpu = NULL; | |
290 | int flags; | |
291 | int rc; | |
292 | ||
293 | if (cpu_id < KVM_MAX_VCPUS) | |
294 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id); | |
295 | if (!dst_vcpu) | |
296 | return SIGP_CC_NOT_OPERATIONAL; | |
297 | ||
298 | spin_lock_bh(&dst_vcpu->arch.local_int.lock); | |
299 | flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); | |
300 | spin_unlock_bh(&dst_vcpu->arch.local_int.lock); | |
301 | if (!(flags & CPUSTAT_STOPPED)) { | |
302 | *reg &= 0xffffffff00000000UL; | |
303 | *reg |= SIGP_STATUS_INCORRECT_STATE; | |
304 | return SIGP_CC_STATUS_STORED; | |
305 | } | |
306 | ||
307 | addr &= 0x7ffffe00; | |
308 | rc = kvm_s390_store_status_unloaded(dst_vcpu, addr); | |
309 | if (rc == -EFAULT) { | |
310 | *reg &= 0xffffffff00000000UL; | |
311 | *reg |= SIGP_STATUS_INVALID_PARAMETER; | |
312 | rc = SIGP_CC_STATUS_STORED; | |
313 | } | |
314 | return rc; | |
315 | } | |
316 | ||
bd59d3a4 | 317 | static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, |
5a32c1af | 318 | u64 *reg) |
bd59d3a4 | 319 | { |
1ee0bc55 JF |
320 | struct kvm_s390_local_interrupt *li; |
321 | struct kvm_vcpu *dst_vcpu = NULL; | |
bd59d3a4 | 322 | int rc; |
bd59d3a4 CH |
323 | |
324 | if (cpu_addr >= KVM_MAX_VCPUS) | |
ea1918dd | 325 | return SIGP_CC_NOT_OPERATIONAL; |
bd59d3a4 | 326 | |
1ee0bc55 JF |
327 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
328 | if (!dst_vcpu) | |
329 | return SIGP_CC_NOT_OPERATIONAL; | |
330 | li = &dst_vcpu->arch.local_int; | |
331 | if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { | |
332 | /* running */ | |
333 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; | |
334 | } else { | |
335 | /* not running */ | |
336 | *reg &= 0xffffffff00000000UL; | |
337 | *reg |= SIGP_STATUS_NOT_RUNNING; | |
338 | rc = SIGP_CC_STATUS_STORED; | |
bd59d3a4 | 339 | } |
bd59d3a4 CH |
340 | |
341 | VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, | |
342 | rc); | |
343 | ||
344 | return rc; | |
345 | } | |
346 | ||
cc92d6de TH |
347 | /* Test whether the destination CPU is available and not busy */ |
348 | static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) | |
151104a7 | 349 | { |
151104a7 | 350 | struct kvm_s390_local_interrupt *li; |
ea1918dd | 351 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
1ee0bc55 | 352 | struct kvm_vcpu *dst_vcpu = NULL; |
151104a7 JF |
353 | |
354 | if (cpu_addr >= KVM_MAX_VCPUS) | |
ea1918dd | 355 | return SIGP_CC_NOT_OPERATIONAL; |
151104a7 | 356 | |
1ee0bc55 JF |
357 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
358 | if (!dst_vcpu) | |
359 | return SIGP_CC_NOT_OPERATIONAL; | |
360 | li = &dst_vcpu->arch.local_int; | |
151104a7 JF |
361 | spin_lock_bh(&li->lock); |
362 | if (li->action_bits & ACTION_STOP_ON_STOP) | |
ea1918dd | 363 | rc = SIGP_CC_BUSY; |
151104a7 | 364 | spin_unlock_bh(&li->lock); |
1ee0bc55 | 365 | |
151104a7 JF |
366 | return rc; |
367 | } | |
368 | ||
5288fbf0 CB |
369 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) |
370 | { | |
371 | int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | |
372 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; | |
5288fbf0 | 373 | u32 parameter; |
5a32c1af | 374 | u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; |
5288fbf0 CB |
375 | u8 order_code; |
376 | int rc; | |
377 | ||
3eb77d51 CB |
378 | /* sigp in userspace can exit */ |
379 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | |
208dd756 | 380 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
3eb77d51 | 381 | |
b1c571a5 | 382 | order_code = kvm_s390_get_base_disp_rs(vcpu); |
5288fbf0 CB |
383 | |
384 | if (r1 % 2) | |
5a32c1af | 385 | parameter = vcpu->run->s.regs.gprs[r1]; |
5288fbf0 | 386 | else |
5a32c1af | 387 | parameter = vcpu->run->s.regs.gprs[r1 + 1]; |
5288fbf0 | 388 | |
5786fffa | 389 | trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); |
5288fbf0 CB |
390 | switch (order_code) { |
391 | case SIGP_SENSE: | |
392 | vcpu->stat.instruction_sigp_sense++; | |
393 | rc = __sigp_sense(vcpu, cpu_addr, | |
5a32c1af | 394 | &vcpu->run->s.regs.gprs[r1]); |
5288fbf0 | 395 | break; |
7697e71f CE |
396 | case SIGP_EXTERNAL_CALL: |
397 | vcpu->stat.instruction_sigp_external_call++; | |
398 | rc = __sigp_external_call(vcpu, cpu_addr); | |
399 | break; | |
a9ae32c3 | 400 | case SIGP_EMERGENCY_SIGNAL: |
5288fbf0 CB |
401 | vcpu->stat.instruction_sigp_emergency++; |
402 | rc = __sigp_emergency(vcpu, cpu_addr); | |
403 | break; | |
404 | case SIGP_STOP: | |
405 | vcpu->stat.instruction_sigp_stop++; | |
9ace903d | 406 | rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP); |
5288fbf0 | 407 | break; |
a9ae32c3 | 408 | case SIGP_STOP_AND_STORE_STATUS: |
5288fbf0 | 409 | vcpu->stat.instruction_sigp_stop++; |
9ec2d6dc JF |
410 | rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP | |
411 | ACTION_STOP_ON_STOP); | |
5288fbf0 | 412 | break; |
00e9e435 TH |
413 | case SIGP_STORE_STATUS_AT_ADDRESS: |
414 | rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter, | |
415 | &vcpu->run->s.regs.gprs[r1]); | |
416 | break; | |
a9ae32c3 | 417 | case SIGP_SET_ARCHITECTURE: |
5288fbf0 CB |
418 | vcpu->stat.instruction_sigp_arch++; |
419 | rc = __sigp_set_arch(vcpu, parameter); | |
420 | break; | |
421 | case SIGP_SET_PREFIX: | |
422 | vcpu->stat.instruction_sigp_prefix++; | |
423 | rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, | |
5a32c1af | 424 | &vcpu->run->s.regs.gprs[r1]); |
5288fbf0 | 425 | break; |
b13d3580 TH |
426 | case SIGP_COND_EMERGENCY_SIGNAL: |
427 | rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter, | |
428 | &vcpu->run->s.regs.gprs[r1]); | |
429 | break; | |
bd59d3a4 CH |
430 | case SIGP_SENSE_RUNNING: |
431 | vcpu->stat.instruction_sigp_sense_running++; | |
432 | rc = __sigp_sense_running(vcpu, cpu_addr, | |
5a32c1af | 433 | &vcpu->run->s.regs.gprs[r1]); |
bd59d3a4 | 434 | break; |
58bc33b2 TH |
435 | case SIGP_START: |
436 | rc = sigp_check_callable(vcpu, cpu_addr); | |
437 | if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) | |
438 | rc = -EOPNOTSUPP; /* Handle START in user space */ | |
439 | break; | |
5288fbf0 CB |
440 | case SIGP_RESTART: |
441 | vcpu->stat.instruction_sigp_restart++; | |
cc92d6de TH |
442 | rc = sigp_check_callable(vcpu, cpu_addr); |
443 | if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) { | |
444 | VCPU_EVENT(vcpu, 4, | |
445 | "sigp restart %x to handle userspace", | |
446 | cpu_addr); | |
447 | /* user space must know about restart */ | |
448 | rc = -EOPNOTSUPP; | |
449 | } | |
450 | break; | |
5288fbf0 | 451 | default: |
b8e660b8 | 452 | return -EOPNOTSUPP; |
5288fbf0 CB |
453 | } |
454 | ||
455 | if (rc < 0) | |
456 | return rc; | |
457 | ||
949c007a | 458 | kvm_s390_set_psw_cc(vcpu, rc); |
5288fbf0 CB |
459 | return 0; |
460 | } | |
4953919f DH |
461 | |
462 | /* | |
463 | * Handle SIGP partial execution interception. | |
464 | * | |
465 | * This interception will occur at the source cpu when a source cpu sends an | |
466 | * external call to a target cpu and the target cpu has the WAIT bit set in | |
467 | * its cpuflags. Interception will occurr after the interrupt indicator bits at | |
468 | * the target cpu have been set. All error cases will lead to instruction | |
469 | * interception, therefore nothing is to be checked or prepared. | |
470 | */ | |
471 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) | |
472 | { | |
473 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; | |
474 | u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; | |
475 | struct kvm_vcpu *dest_vcpu; | |
476 | u8 order_code = kvm_s390_get_base_disp_rs(vcpu); | |
477 | ||
478 | trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); | |
479 | ||
480 | if (order_code == SIGP_EXTERNAL_CALL) { | |
481 | dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | |
482 | BUG_ON(dest_vcpu == NULL); | |
483 | ||
484 | spin_lock_bh(&dest_vcpu->arch.local_int.lock); | |
485 | if (waitqueue_active(&dest_vcpu->wq)) | |
486 | wake_up_interruptible(&dest_vcpu->wq); | |
487 | dest_vcpu->preempted = true; | |
488 | spin_unlock_bh(&dest_vcpu->arch.local_int.lock); | |
489 | ||
490 | kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); | |
491 | return 0; | |
492 | } | |
493 | ||
494 | return -EOPNOTSUPP; | |
495 | } |