KVM: s390: Add support for machine checks.
[deliverable/linux.git] / arch / s390 / kvm / priv.c
CommitLineData
453423dc 1/*
a53c8fab 2 * handling privileged instructions
453423dc
CB
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14#include <linux/kvm.h>
5a0e3ad6 15#include <linux/gfp.h>
453423dc
CB
16#include <linux/errno.h>
17#include <asm/current.h>
18#include <asm/debug.h>
19#include <asm/ebcdic.h>
20#include <asm/sysinfo.h>
48a3e950
CH
21#include <asm/ptrace.h>
22#include <asm/compat.h>
453423dc
CB
23#include "gaccess.h"
24#include "kvm-s390.h"
5786fffa 25#include "trace.h"
453423dc
CB
26
27static int handle_set_prefix(struct kvm_vcpu *vcpu)
28{
453423dc
CB
29 u64 operand2;
30 u32 address = 0;
31 u8 tmp;
32
33 vcpu->stat.instruction_spx++;
34
b1c571a5 35 operand2 = kvm_s390_get_base_disp_s(vcpu);
453423dc
CB
36
37 /* must be word boundary */
38 if (operand2 & 3) {
39 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
40 goto out;
41 }
42
43 /* get the value */
44 if (get_guest_u32(vcpu, operand2, &address)) {
45 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
46 goto out;
47 }
48
49 address = address & 0x7fffe000u;
50
51 /* make sure that the new value is valid memory */
52 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
53 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
54 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
55 goto out;
56 }
57
8d26cf7b 58 kvm_s390_set_prefix(vcpu, address);
453423dc
CB
59
60 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
5786fffa 61 trace_kvm_s390_handle_prefix(vcpu, 1, address);
453423dc
CB
62out:
63 return 0;
64}
65
66static int handle_store_prefix(struct kvm_vcpu *vcpu)
67{
453423dc
CB
68 u64 operand2;
69 u32 address;
70
71 vcpu->stat.instruction_stpx++;
b1c571a5
CH
72
73 operand2 = kvm_s390_get_base_disp_s(vcpu);
453423dc
CB
74
75 /* must be word boundary */
76 if (operand2 & 3) {
77 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
78 goto out;
79 }
80
81 address = vcpu->arch.sie_block->prefix;
82 address = address & 0x7fffe000u;
83
84 /* get the value */
85 if (put_guest_u32(vcpu, operand2, address)) {
86 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
87 goto out;
88 }
89
90 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
5786fffa 91 trace_kvm_s390_handle_prefix(vcpu, 0, address);
453423dc
CB
92out:
93 return 0;
94}
95
96static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
97{
453423dc
CB
98 u64 useraddr;
99 int rc;
100
101 vcpu->stat.instruction_stap++;
b1c571a5
CH
102
103 useraddr = kvm_s390_get_base_disp_s(vcpu);
453423dc
CB
104
105 if (useraddr & 1) {
106 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
107 goto out;
108 }
109
110 rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
111 if (rc == -EFAULT) {
112 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
113 goto out;
114 }
115
33e19115 116 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
5786fffa 117 trace_kvm_s390_handle_stap(vcpu, useraddr);
453423dc
CB
118out:
119 return 0;
120}
121
122static int handle_skey(struct kvm_vcpu *vcpu)
123{
124 vcpu->stat.instruction_storage_key++;
125 vcpu->arch.sie_block->gpsw.addr -= 4;
126 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
127 return 0;
128}
129
130static int handle_stsch(struct kvm_vcpu *vcpu)
131{
132 vcpu->stat.instruction_stsch++;
133 VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3");
134 /* condition code 3 */
135 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
136 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
137 return 0;
138}
139
140static int handle_chsc(struct kvm_vcpu *vcpu)
141{
142 vcpu->stat.instruction_chsc++;
143 VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3");
144 /* condition code 3 */
145 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
146 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
147 return 0;
148}
149
453423dc
CB
150static int handle_stfl(struct kvm_vcpu *vcpu)
151{
14375bc4 152 unsigned int facility_list;
453423dc
CB
153 int rc;
154
155 vcpu->stat.instruction_stfl++;
a0046b6d 156 /* only pass the facility bits, which we can handle */
14375bc4 157 facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
453423dc
CB
158
159 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
160 &facility_list, sizeof(facility_list));
161 if (rc == -EFAULT)
162 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
5786fffa 163 else {
453423dc
CB
164 VCPU_EVENT(vcpu, 5, "store facility list value %x",
165 facility_list);
5786fffa
CH
166 trace_kvm_s390_handle_stfl(vcpu, facility_list);
167 }
453423dc
CB
168 return 0;
169}
170
48a3e950
CH
171static void handle_new_psw(struct kvm_vcpu *vcpu)
172{
173 /* Check whether the new psw is enabled for machine checks. */
174 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
175 kvm_s390_deliver_pending_machine_checks(vcpu);
176}
177
178#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
179#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
180#define PSW_ADDR_24 0x00000000000fffffUL
181#define PSW_ADDR_31 0x000000007fffffffUL
182
183int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
184{
185 u64 addr;
186 psw_compat_t new_psw;
187
188 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
189 return kvm_s390_inject_program_int(vcpu,
190 PGM_PRIVILEGED_OPERATION);
191
192 addr = kvm_s390_get_base_disp_s(vcpu);
193
194 if (addr & 7) {
195 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
196 goto out;
197 }
198
199 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
200 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
201 goto out;
202 }
203
204 if (!(new_psw.mask & PSW32_MASK_BASE)) {
205 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
206 goto out;
207 }
208
209 vcpu->arch.sie_block->gpsw.mask =
210 (new_psw.mask & ~PSW32_MASK_BASE) << 32;
211 vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
212
213 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
214 (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
215 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
216 ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
217 PSW_MASK_EA)) {
218 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
219 goto out;
220 }
221
222 handle_new_psw(vcpu);
223out:
224 return 0;
225}
226
227static int handle_lpswe(struct kvm_vcpu *vcpu)
228{
229 u64 addr;
230 psw_t new_psw;
231
232 addr = kvm_s390_get_base_disp_s(vcpu);
233
234 if (addr & 7) {
235 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
236 goto out;
237 }
238
239 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
240 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
241 goto out;
242 }
243
244 vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
245 vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
246
247 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
248 (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
249 PSW_MASK_BA) &&
250 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
251 (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
252 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
253 ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
254 PSW_MASK_EA)) {
255 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
256 goto out;
257 }
258
259 handle_new_psw(vcpu);
260out:
261 return 0;
262}
263
453423dc
CB
264static int handle_stidp(struct kvm_vcpu *vcpu)
265{
453423dc
CB
266 u64 operand2;
267 int rc;
268
269 vcpu->stat.instruction_stidp++;
b1c571a5
CH
270
271 operand2 = kvm_s390_get_base_disp_s(vcpu);
453423dc
CB
272
273 if (operand2 & 7) {
274 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
275 goto out;
276 }
277
278 rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
279 if (rc == -EFAULT) {
280 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
281 goto out;
282 }
283
284 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
285out:
286 return 0;
287}
288
289static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
290{
180c12fb 291 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
453423dc
CB
292 int cpus = 0;
293 int n;
294
b037a4f3 295 spin_lock(&fi->lock);
453423dc
CB
296 for (n = 0; n < KVM_MAX_VCPUS; n++)
297 if (fi->local_int[n])
298 cpus++;
b037a4f3 299 spin_unlock(&fi->lock);
453423dc
CB
300
301 /* deal with other level 3 hypervisors */
caf757c6 302 if (stsi(mem, 3, 2, 2))
453423dc
CB
303 mem->count = 0;
304 if (mem->count < 8)
305 mem->count++;
306 for (n = mem->count - 1; n > 0 ; n--)
307 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
308
309 mem->vm[0].cpus_total = cpus;
310 mem->vm[0].cpus_configured = cpus;
311 mem->vm[0].cpus_standby = 0;
312 mem->vm[0].cpus_reserved = 0;
313 mem->vm[0].caf = 1000;
314 memcpy(mem->vm[0].name, "KVMguest", 8);
315 ASCEBC(mem->vm[0].name, 8);
316 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
317 ASCEBC(mem->vm[0].cpi, 16);
318}
319
320static int handle_stsi(struct kvm_vcpu *vcpu)
321{
5a32c1af
CB
322 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
323 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
324 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
453423dc
CB
325 u64 operand2;
326 unsigned long mem;
327
328 vcpu->stat.instruction_stsi++;
329 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
330
b1c571a5 331 operand2 = kvm_s390_get_base_disp_s(vcpu);
453423dc
CB
332
333 if (operand2 & 0xfff && fc > 0)
334 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
335
336 switch (fc) {
337 case 0:
5a32c1af 338 vcpu->run->s.regs.gprs[0] = 3 << 28;
453423dc
CB
339 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
340 return 0;
341 case 1: /* same handling for 1 and 2 */
342 case 2:
343 mem = get_zeroed_page(GFP_KERNEL);
344 if (!mem)
345 goto out_fail;
caf757c6 346 if (stsi((void *) mem, fc, sel1, sel2))
453423dc
CB
347 goto out_mem;
348 break;
349 case 3:
350 if (sel1 != 2 || sel2 != 2)
351 goto out_fail;
352 mem = get_zeroed_page(GFP_KERNEL);
353 if (!mem)
354 goto out_fail;
355 handle_stsi_3_2_2(vcpu, (void *) mem);
356 break;
357 default:
358 goto out_fail;
359 }
360
361 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
362 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
363 goto out_mem;
364 }
5786fffa 365 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
453423dc
CB
366 free_page(mem);
367 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
5a32c1af 368 vcpu->run->s.regs.gprs[0] = 0;
453423dc
CB
369 return 0;
370out_mem:
371 free_page(mem);
372out_fail:
373 /* condition code 3 */
374 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
375 return 0;
376}
377
77975357 378static const intercept_handler_t priv_handlers[256] = {
453423dc
CB
379 [0x02] = handle_stidp,
380 [0x10] = handle_set_prefix,
381 [0x11] = handle_store_prefix,
382 [0x12] = handle_store_cpu_address,
383 [0x29] = handle_skey,
384 [0x2a] = handle_skey,
385 [0x2b] = handle_skey,
386 [0x34] = handle_stsch,
387 [0x5f] = handle_chsc,
388 [0x7d] = handle_stsi,
389 [0xb1] = handle_stfl,
48a3e950 390 [0xb2] = handle_lpswe,
453423dc
CB
391};
392
70455a36 393int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
453423dc
CB
394{
395 intercept_handler_t handler;
396
70455a36
CB
397 /*
398 * a lot of B2 instructions are priviledged. We first check for
25985edc 399 * the privileged ones, that we can handle in the kernel. If the
70455a36
CB
400 * kernel can handle this instruction, we check for the problem
401 * state bit and (a) handle the instruction or (b) send a code 2
402 * program check.
403 * Anything else goes to userspace.*/
453423dc 404 handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
70455a36
CB
405 if (handler) {
406 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
407 return kvm_s390_inject_program_int(vcpu,
408 PGM_PRIVILEGED_OPERATION);
409 else
410 return handler(vcpu);
411 }
b8e660b8 412 return -EOPNOTSUPP;
453423dc 413}
bb25b9ba 414
48a3e950
CH
415static int handle_epsw(struct kvm_vcpu *vcpu)
416{
417 int reg1, reg2;
418
419 reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
420 reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
421
422 /* This basically extracts the mask half of the psw. */
423 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
424 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
425 if (reg2) {
426 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
427 vcpu->run->s.regs.gprs[reg2] |=
428 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
429 }
430 return 0;
431}
432
433static const intercept_handler_t b9_handlers[256] = {
434 [0x8d] = handle_epsw,
435};
436
437int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
438{
439 intercept_handler_t handler;
440
441 /* This is handled just as for the B2 instructions. */
442 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
443 if (handler) {
444 if ((handler != handle_epsw) &&
445 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
446 return kvm_s390_inject_program_int(vcpu,
447 PGM_PRIVILEGED_OPERATION);
448 else
449 return handler(vcpu);
450 }
451 return -EOPNOTSUPP;
452}
453
bb25b9ba
CB
454static int handle_tprot(struct kvm_vcpu *vcpu)
455{
b1c571a5 456 u64 address1, address2;
bb25b9ba 457 struct vm_area_struct *vma;
1eddb85f 458 unsigned long user_address;
bb25b9ba
CB
459
460 vcpu->stat.instruction_tprot++;
461
b1c571a5
CH
462 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
463
bb25b9ba
CB
464 /* we only handle the Linux memory detection case:
465 * access key == 0
466 * guest DAT == off
467 * everything else goes to userspace. */
468 if (address2 & 0xf0)
469 return -EOPNOTSUPP;
470 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
471 return -EOPNOTSUPP;
472
473
1eddb85f
CB
474 /* we must resolve the address without holding the mmap semaphore.
475 * This is ok since the userspace hypervisor is not supposed to change
476 * the mapping while the guest queries the memory. Otherwise the guest
477 * might crash or get wrong info anyway. */
478 user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
479
bb25b9ba 480 down_read(&current->mm->mmap_sem);
1eddb85f 481 vma = find_vma(current->mm, user_address);
bb25b9ba
CB
482 if (!vma) {
483 up_read(&current->mm->mmap_sem);
484 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
485 }
486
487 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
488 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
489 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
490 if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
491 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
492
493 up_read(&current->mm->mmap_sem);
494 return 0;
495}
496
497int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
498{
499 /* For e5xx... instructions we only handle TPROT */
500 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
501 return handle_tprot(vcpu);
502 return -EOPNOTSUPP;
503}
504
8c3f61e2
CH
505static int handle_sckpf(struct kvm_vcpu *vcpu)
506{
507 u32 value;
508
509 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
510 return kvm_s390_inject_program_int(vcpu,
511 PGM_PRIVILEGED_OPERATION);
512
513 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
514 return kvm_s390_inject_program_int(vcpu,
515 PGM_SPECIFICATION);
516
517 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
518 vcpu->arch.sie_block->todpr = value;
519
520 return 0;
521}
522
77975357 523static const intercept_handler_t x01_handlers[256] = {
8c3f61e2
CH
524 [0x07] = handle_sckpf,
525};
526
527int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
528{
529 intercept_handler_t handler;
530
531 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
532 if (handler)
533 return handler(vcpu);
534 return -EOPNOTSUPP;
535}
This page took 2.513831 seconds and 5 git commands to generate.