ASoC: fix ABE_TWL6040 dependency
[deliverable/linux.git] / arch / s390 / kvm / intercept.c
1 /*
2 * in-kernel handling for sie intercepts
3 *
4 * Copyright IBM Corp. 2008, 2014
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14 #include <linux/kvm_host.h>
15 #include <linux/errno.h>
16 #include <linux/pagemap.h>
17
18 #include <asm/kvm_host.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/irq.h>
21
22 #include "kvm-s390.h"
23 #include "gaccess.h"
24 #include "trace.h"
25 #include "trace-s390.h"
26
27
28 static const intercept_handler_t instruction_handlers[256] = {
29 [0x01] = kvm_s390_handle_01,
30 [0x82] = kvm_s390_handle_lpsw,
31 [0x83] = kvm_s390_handle_diag,
32 [0xae] = kvm_s390_handle_sigp,
33 [0xb2] = kvm_s390_handle_b2,
34 [0xb6] = kvm_s390_handle_stctl,
35 [0xb7] = kvm_s390_handle_lctl,
36 [0xb9] = kvm_s390_handle_b9,
37 [0xe5] = kvm_s390_handle_e5,
38 [0xeb] = kvm_s390_handle_eb,
39 };
40
41 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
42 {
43 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
44 u8 ilen = 0;
45
46 switch (vcpu->arch.sie_block->icptcode) {
47 case ICPT_INST:
48 case ICPT_INSTPROGI:
49 case ICPT_OPEREXC:
50 case ICPT_PARTEXEC:
51 case ICPT_IOINST:
52 /* instruction only stored for these icptcodes */
53 ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
54 /* Use the length of the EXECUTE instruction if necessary */
55 if (sie_block->icptstatus & 1) {
56 ilen = (sie_block->icptstatus >> 4) & 0x6;
57 if (!ilen)
58 ilen = 4;
59 }
60 break;
61 case ICPT_PROGI:
62 /* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
63 ilen = vcpu->arch.sie_block->pgmilc & 0x6;
64 break;
65 }
66 return ilen;
67 }
68
69 static int handle_noop(struct kvm_vcpu *vcpu)
70 {
71 switch (vcpu->arch.sie_block->icptcode) {
72 case 0x10:
73 vcpu->stat.exit_external_request++;
74 break;
75 default:
76 break; /* nothing */
77 }
78 return 0;
79 }
80
81 static int handle_stop(struct kvm_vcpu *vcpu)
82 {
83 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
84 int rc = 0;
85 uint8_t flags, stop_pending;
86
87 vcpu->stat.exit_stop_request++;
88
89 /* delay the stop if any non-stop irq is pending */
90 if (kvm_s390_vcpu_has_irq(vcpu, 1))
91 return 0;
92
93 /* avoid races with the injection/SIGP STOP code */
94 spin_lock(&li->lock);
95 flags = li->irq.stop.flags;
96 stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
97 spin_unlock(&li->lock);
98
99 trace_kvm_s390_stop_request(stop_pending, flags);
100 if (!stop_pending)
101 return 0;
102
103 if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
104 rc = kvm_s390_vcpu_store_status(vcpu,
105 KVM_S390_STORE_STATUS_NOADDR);
106 if (rc)
107 return rc;
108 }
109
110 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
111 kvm_s390_vcpu_stop(vcpu);
112 return -EOPNOTSUPP;
113 }
114
115 static int handle_validity(struct kvm_vcpu *vcpu)
116 {
117 int viwhy = vcpu->arch.sie_block->ipb >> 16;
118
119 vcpu->stat.exit_validity++;
120 trace_kvm_s390_intercept_validity(vcpu, viwhy);
121 WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy);
122 return -EOPNOTSUPP;
123 }
124
125 static int handle_instruction(struct kvm_vcpu *vcpu)
126 {
127 intercept_handler_t handler;
128
129 vcpu->stat.exit_instruction++;
130 trace_kvm_s390_intercept_instruction(vcpu,
131 vcpu->arch.sie_block->ipa,
132 vcpu->arch.sie_block->ipb);
133 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
134 if (handler)
135 return handler(vcpu);
136 return -EOPNOTSUPP;
137 }
138
139 static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
140 {
141 struct kvm_s390_pgm_info pgm_info = {
142 .code = vcpu->arch.sie_block->iprcc,
143 /* the PSW has already been rewound */
144 .flags = KVM_S390_PGM_FLAGS_NO_REWIND,
145 };
146
147 switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
148 case PGM_AFX_TRANSLATION:
149 case PGM_ASX_TRANSLATION:
150 case PGM_EX_TRANSLATION:
151 case PGM_LFX_TRANSLATION:
152 case PGM_LSTE_SEQUENCE:
153 case PGM_LSX_TRANSLATION:
154 case PGM_LX_TRANSLATION:
155 case PGM_PRIMARY_AUTHORITY:
156 case PGM_SECONDARY_AUTHORITY:
157 case PGM_SPACE_SWITCH:
158 pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
159 break;
160 case PGM_ALEN_TRANSLATION:
161 case PGM_ALE_SEQUENCE:
162 case PGM_ASTE_INSTANCE:
163 case PGM_ASTE_SEQUENCE:
164 case PGM_ASTE_VALIDITY:
165 case PGM_EXTENDED_AUTHORITY:
166 pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
167 break;
168 case PGM_ASCE_TYPE:
169 case PGM_PAGE_TRANSLATION:
170 case PGM_REGION_FIRST_TRANS:
171 case PGM_REGION_SECOND_TRANS:
172 case PGM_REGION_THIRD_TRANS:
173 case PGM_SEGMENT_TRANSLATION:
174 pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
175 pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
176 pgm_info.op_access_id = vcpu->arch.sie_block->oai;
177 break;
178 case PGM_MONITOR:
179 pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
180 pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
181 break;
182 case PGM_VECTOR_PROCESSING:
183 case PGM_DATA:
184 pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
185 break;
186 case PGM_PROTECTION:
187 pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
188 pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
189 break;
190 default:
191 break;
192 }
193
194 if (vcpu->arch.sie_block->iprcc & PGM_PER) {
195 pgm_info.per_code = vcpu->arch.sie_block->perc;
196 pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
197 pgm_info.per_address = vcpu->arch.sie_block->peraddr;
198 pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
199 }
200 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
201 }
202
203 /*
204 * restore ITDB to program-interruption TDB in guest lowcore
205 * and set TX abort indication if required
206 */
207 static int handle_itdb(struct kvm_vcpu *vcpu)
208 {
209 struct kvm_s390_itdb *itdb;
210 int rc;
211
212 if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
213 return 0;
214 if (current->thread.per_flags & PER_FLAG_NO_TE)
215 return 0;
216 itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
217 rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
218 if (rc)
219 return rc;
220 memset(itdb, 0, sizeof(*itdb));
221
222 return 0;
223 }
224
225 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
226
227 static int handle_prog(struct kvm_vcpu *vcpu)
228 {
229 psw_t psw;
230 int rc;
231
232 vcpu->stat.exit_program_interruption++;
233
234 if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
235 kvm_s390_handle_per_event(vcpu);
236 /* the interrupt might have been filtered out completely */
237 if (vcpu->arch.sie_block->iprcc == 0)
238 return 0;
239 }
240
241 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
242 if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
243 rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
244 if (rc)
245 return rc;
246 /* Avoid endless loops of specification exceptions */
247 if (!is_valid_psw(&psw))
248 return -EOPNOTSUPP;
249 }
250 rc = handle_itdb(vcpu);
251 if (rc)
252 return rc;
253
254 return inject_prog_on_prog_intercept(vcpu);
255 }
256
257 /**
258 * handle_external_interrupt - used for external interruption interceptions
259 *
260 * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
261 * the new PSW does not have external interrupts disabled. In the first case,
262 * we've got to deliver the interrupt manually, and in the second case, we
263 * drop to userspace to handle the situation there.
264 */
265 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
266 {
267 u16 eic = vcpu->arch.sie_block->eic;
268 struct kvm_s390_irq irq;
269 psw_t newpsw;
270 int rc;
271
272 vcpu->stat.exit_external_interrupt++;
273
274 rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
275 if (rc)
276 return rc;
277 /* We can not handle clock comparator or timer interrupt with bad PSW */
278 if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
279 (newpsw.mask & PSW_MASK_EXT))
280 return -EOPNOTSUPP;
281
282 switch (eic) {
283 case EXT_IRQ_CLK_COMP:
284 irq.type = KVM_S390_INT_CLOCK_COMP;
285 break;
286 case EXT_IRQ_CPU_TIMER:
287 irq.type = KVM_S390_INT_CPU_TIMER;
288 break;
289 case EXT_IRQ_EXTERNAL_CALL:
290 irq.type = KVM_S390_INT_EXTERNAL_CALL;
291 irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
292 rc = kvm_s390_inject_vcpu(vcpu, &irq);
293 /* ignore if another external call is already pending */
294 if (rc == -EBUSY)
295 return 0;
296 return rc;
297 default:
298 return -EOPNOTSUPP;
299 }
300
301 return kvm_s390_inject_vcpu(vcpu, &irq);
302 }
303
304 /**
305 * Handle MOVE PAGE partial execution interception.
306 *
307 * This interception can only happen for guests with DAT disabled and
308 * addresses that are currently not mapped in the host. Thus we try to
309 * set up the mappings for the corresponding user pages here (or throw
310 * addressing exceptions in case of illegal guest addresses).
311 */
312 static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
313 {
314 unsigned long srcaddr, dstaddr;
315 int reg1, reg2, rc;
316
317 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
318
319 /* Make sure that the source is paged-in */
320 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
321 reg2, &srcaddr, GACC_FETCH);
322 if (rc)
323 return kvm_s390_inject_prog_cond(vcpu, rc);
324 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
325 if (rc != 0)
326 return rc;
327
328 /* Make sure that the destination is paged-in */
329 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
330 reg1, &dstaddr, GACC_STORE);
331 if (rc)
332 return kvm_s390_inject_prog_cond(vcpu, rc);
333 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
334 if (rc != 0)
335 return rc;
336
337 kvm_s390_retry_instr(vcpu);
338
339 return 0;
340 }
341
342 static int handle_partial_execution(struct kvm_vcpu *vcpu)
343 {
344 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
345 return handle_mvpg_pei(vcpu);
346 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
347 return kvm_s390_handle_sigp_pei(vcpu);
348
349 return -EOPNOTSUPP;
350 }
351
352 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
353 {
354 if (kvm_is_ucontrol(vcpu->kvm))
355 return -EOPNOTSUPP;
356
357 switch (vcpu->arch.sie_block->icptcode) {
358 case 0x10:
359 case 0x18:
360 return handle_noop(vcpu);
361 case 0x04:
362 return handle_instruction(vcpu);
363 case 0x08:
364 return handle_prog(vcpu);
365 case 0x14:
366 return handle_external_interrupt(vcpu);
367 case 0x1c:
368 return kvm_s390_handle_wait(vcpu);
369 case 0x20:
370 return handle_validity(vcpu);
371 case 0x28:
372 return handle_stop(vcpu);
373 case 0x38:
374 return handle_partial_execution(vcpu);
375 default:
376 return -EOPNOTSUPP;
377 }
378 }
This page took 0.039448 seconds and 5 git commands to generate.