ACPI / cpuidle: Remove acpi_idle_suspend (to fix suspend regression)
[deliverable/linux.git] / arch / s390 / kvm / sigp.c
CommitLineData
5288fbf0
CB
1/*
2 * sigp.c - handlinge interprocessor communication
3 *
9ace903d 4 * Copyright IBM Corp. 2008,2009
5288fbf0
CB
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
9ace903d 12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
5288fbf0
CB
13 */
14
15#include <linux/kvm.h>
16#include <linux/kvm_host.h>
5a0e3ad6 17#include <linux/slab.h>
5288fbf0
CB
18#include "gaccess.h"
19#include "kvm-s390.h"
20
21/* sigp order codes */
22#define SIGP_SENSE 0x01
23#define SIGP_EXTERNAL_CALL 0x02
24#define SIGP_EMERGENCY 0x03
25#define SIGP_START 0x04
26#define SIGP_STOP 0x05
27#define SIGP_RESTART 0x06
28#define SIGP_STOP_STORE_STATUS 0x09
29#define SIGP_INITIAL_CPU_RESET 0x0b
30#define SIGP_CPU_RESET 0x0c
31#define SIGP_SET_PREFIX 0x0d
32#define SIGP_STORE_STATUS_ADDR 0x0e
33#define SIGP_SET_ARCH 0x12
34
35/* cpu status bits */
36#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
37#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
38#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
39#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
40#define SIGP_STAT_STOPPED 0x00000040UL
41#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
42#define SIGP_STAT_CHECK_STOP 0x00000010UL
43#define SIGP_STAT_INOPERATIVE 0x00000004UL
44#define SIGP_STAT_INVALID_ORDER 0x00000002UL
45#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
46
47
0096369d
MS
48static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
49 unsigned long *reg)
5288fbf0 50{
180c12fb 51 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
5288fbf0
CB
52 int rc;
53
54 if (cpu_addr >= KVM_MAX_VCPUS)
55 return 3; /* not operational */
56
b037a4f3 57 spin_lock(&fi->lock);
5288fbf0
CB
58 if (fi->local_int[cpu_addr] == NULL)
59 rc = 3; /* not operational */
60 else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
61 & CPUSTAT_RUNNING) {
62 *reg &= 0xffffffff00000000UL;
63 rc = 1; /* status stored */
64 } else {
65 *reg &= 0xffffffff00000000UL;
66 *reg |= SIGP_STAT_STOPPED;
67 rc = 1; /* status stored */
68 }
b037a4f3 69 spin_unlock(&fi->lock);
5288fbf0
CB
70
71 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
72 return rc;
73}
74
75static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
76{
180c12fb
CB
77 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
78 struct kvm_s390_local_interrupt *li;
79 struct kvm_s390_interrupt_info *inti;
5288fbf0
CB
80 int rc;
81
82 if (cpu_addr >= KVM_MAX_VCPUS)
83 return 3; /* not operational */
84
85 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
86 if (!inti)
87 return -ENOMEM;
88
89 inti->type = KVM_S390_INT_EMERGENCY;
7697e71f 90 inti->emerg.code = vcpu->vcpu_id;
5288fbf0 91
b037a4f3 92 spin_lock(&fi->lock);
5288fbf0
CB
93 li = fi->local_int[cpu_addr];
94 if (li == NULL) {
95 rc = 3; /* not operational */
96 kfree(inti);
97 goto unlock;
98 }
99 spin_lock_bh(&li->lock);
100 list_add_tail(&inti->list, &li->list);
101 atomic_set(&li->active, 1);
102 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
103 if (waitqueue_active(&li->wq))
104 wake_up_interruptible(&li->wq);
105 spin_unlock_bh(&li->lock);
106 rc = 0; /* order accepted */
7697e71f
CE
107 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
108unlock:
109 spin_unlock(&fi->lock);
110 return rc;
111}
112
113static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
114{
115 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
116 struct kvm_s390_local_interrupt *li;
117 struct kvm_s390_interrupt_info *inti;
118 int rc;
119
120 if (cpu_addr >= KVM_MAX_VCPUS)
121 return 3; /* not operational */
122
123 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
124 if (!inti)
125 return -ENOMEM;
126
127 inti->type = KVM_S390_INT_EXTERNAL_CALL;
128 inti->extcall.code = vcpu->vcpu_id;
129
130 spin_lock(&fi->lock);
131 li = fi->local_int[cpu_addr];
132 if (li == NULL) {
133 rc = 3; /* not operational */
134 kfree(inti);
135 goto unlock;
136 }
137 spin_lock_bh(&li->lock);
138 list_add_tail(&inti->list, &li->list);
139 atomic_set(&li->active, 1);
140 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
141 if (waitqueue_active(&li->wq))
142 wake_up_interruptible(&li->wq);
143 spin_unlock_bh(&li->lock);
144 rc = 0; /* order accepted */
145 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
5288fbf0 146unlock:
b037a4f3 147 spin_unlock(&fi->lock);
5288fbf0
CB
148 return rc;
149}
150
9ace903d 151static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
5288fbf0 152{
180c12fb 153 struct kvm_s390_interrupt_info *inti;
5288fbf0 154
9940fa80 155 inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
5288fbf0
CB
156 if (!inti)
157 return -ENOMEM;
5288fbf0
CB
158 inti->type = KVM_S390_SIGP_STOP;
159
5288fbf0
CB
160 spin_lock_bh(&li->lock);
161 list_add_tail(&inti->list, &li->list);
162 atomic_set(&li->active, 1);
163 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
9ace903d 164 li->action_bits |= action;
5288fbf0
CB
165 if (waitqueue_active(&li->wq))
166 wake_up_interruptible(&li->wq);
167 spin_unlock_bh(&li->lock);
9ace903d
CE
168
169 return 0; /* order accepted */
170}
171
172static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
173{
174 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
175 struct kvm_s390_local_interrupt *li;
176 int rc;
177
178 if (cpu_addr >= KVM_MAX_VCPUS)
179 return 3; /* not operational */
180
181 spin_lock(&fi->lock);
182 li = fi->local_int[cpu_addr];
183 if (li == NULL) {
184 rc = 3; /* not operational */
185 goto unlock;
186 }
187
188 rc = __inject_sigp_stop(li, action);
189
5288fbf0 190unlock:
b037a4f3 191 spin_unlock(&fi->lock);
5288fbf0
CB
192 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
193 return rc;
194}
195
9ace903d
CE
196int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
197{
198 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
199 return __inject_sigp_stop(li, action);
200}
201
5288fbf0
CB
202static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
203{
204 int rc;
205
206 switch (parameter & 0xff) {
207 case 0:
5288fbf0
CB
208 rc = 3; /* not operational */
209 break;
210 case 1:
211 case 2:
212 rc = 0; /* order accepted */
213 break;
214 default:
b8e660b8 215 rc = -EOPNOTSUPP;
5288fbf0
CB
216 }
217 return rc;
218}
219
220static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
0096369d 221 unsigned long *reg)
5288fbf0 222{
180c12fb 223 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
53cb780a 224 struct kvm_s390_local_interrupt *li = NULL;
180c12fb 225 struct kvm_s390_interrupt_info *inti;
5288fbf0
CB
226 int rc;
227 u8 tmp;
228
229 /* make sure that the new value is valid memory */
230 address = address & 0x7fffe000u;
092670cd
CO
231 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
232 copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
5288fbf0
CB
233 *reg |= SIGP_STAT_INVALID_PARAMETER;
234 return 1; /* invalid parameter */
235 }
236
237 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
238 if (!inti)
239 return 2; /* busy */
240
b037a4f3 241 spin_lock(&fi->lock);
53cb780a
RK
242 if (cpu_addr < KVM_MAX_VCPUS)
243 li = fi->local_int[cpu_addr];
5288fbf0 244
53cb780a 245 if (li == NULL) {
5288fbf0
CB
246 rc = 1; /* incorrect state */
247 *reg &= SIGP_STAT_INCORRECT_STATE;
248 kfree(inti);
249 goto out_fi;
250 }
251
252 spin_lock_bh(&li->lock);
253 /* cpu must be in stopped state */
254 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
255 rc = 1; /* incorrect state */
256 *reg &= SIGP_STAT_INCORRECT_STATE;
257 kfree(inti);
258 goto out_li;
259 }
260
261 inti->type = KVM_S390_SIGP_SET_PREFIX;
262 inti->prefix.address = address;
263
264 list_add_tail(&inti->list, &li->list);
265 atomic_set(&li->active, 1);
266 if (waitqueue_active(&li->wq))
267 wake_up_interruptible(&li->wq);
268 rc = 0; /* order accepted */
269
270 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
271out_li:
272 spin_unlock_bh(&li->lock);
273out_fi:
b037a4f3 274 spin_unlock(&fi->lock);
5288fbf0
CB
275 return rc;
276}
277
278int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
279{
280 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
281 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
282 int base2 = vcpu->arch.sie_block->ipb >> 28;
283 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
284 u32 parameter;
285 u16 cpu_addr = vcpu->arch.guest_gprs[r3];
286 u8 order_code;
287 int rc;
288
3eb77d51
CB
289 /* sigp in userspace can exit */
290 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
291 return kvm_s390_inject_program_int(vcpu,
292 PGM_PRIVILEGED_OPERATION);
293
5288fbf0
CB
294 order_code = disp2;
295 if (base2)
296 order_code += vcpu->arch.guest_gprs[base2];
297
298 if (r1 % 2)
299 parameter = vcpu->arch.guest_gprs[r1];
300 else
301 parameter = vcpu->arch.guest_gprs[r1 + 1];
302
303 switch (order_code) {
304 case SIGP_SENSE:
305 vcpu->stat.instruction_sigp_sense++;
306 rc = __sigp_sense(vcpu, cpu_addr,
307 &vcpu->arch.guest_gprs[r1]);
308 break;
7697e71f
CE
309 case SIGP_EXTERNAL_CALL:
310 vcpu->stat.instruction_sigp_external_call++;
311 rc = __sigp_external_call(vcpu, cpu_addr);
312 break;
5288fbf0
CB
313 case SIGP_EMERGENCY:
314 vcpu->stat.instruction_sigp_emergency++;
315 rc = __sigp_emergency(vcpu, cpu_addr);
316 break;
317 case SIGP_STOP:
318 vcpu->stat.instruction_sigp_stop++;
9ace903d 319 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
5288fbf0
CB
320 break;
321 case SIGP_STOP_STORE_STATUS:
322 vcpu->stat.instruction_sigp_stop++;
9ace903d 323 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP);
5288fbf0
CB
324 break;
325 case SIGP_SET_ARCH:
326 vcpu->stat.instruction_sigp_arch++;
327 rc = __sigp_set_arch(vcpu, parameter);
328 break;
329 case SIGP_SET_PREFIX:
330 vcpu->stat.instruction_sigp_prefix++;
331 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
332 &vcpu->arch.guest_gprs[r1]);
333 break;
334 case SIGP_RESTART:
335 vcpu->stat.instruction_sigp_restart++;
336 /* user space must know about restart */
337 default:
b8e660b8 338 return -EOPNOTSUPP;
5288fbf0
CB
339 }
340
341 if (rc < 0)
342 return rc;
343
344 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
345 vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
346 return 0;
347}
This page took 0.268114 seconds and 5 git commands to generate.