KVM: s390: convert kvm_s390_store_status_unloaded()
[deliverable/linux.git] / arch / s390 / kvm / priv.c
CommitLineData
453423dc 1/*
a53c8fab 2 * handling privileged instructions
453423dc 3 *
69d0d3a3 4 * Copyright IBM Corp. 2008, 2013
453423dc
CB
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14#include <linux/kvm.h>
5a0e3ad6 15#include <linux/gfp.h>
453423dc 16#include <linux/errno.h>
b13b5dc7 17#include <linux/compat.h>
7c959e82 18#include <asm/asm-offsets.h>
e769ece3 19#include <asm/facility.h>
453423dc
CB
20#include <asm/current.h>
21#include <asm/debug.h>
22#include <asm/ebcdic.h>
23#include <asm/sysinfo.h>
69d0d3a3
CB
24#include <asm/pgtable.h>
25#include <asm/pgalloc.h>
26#include <asm/io.h>
48a3e950
CH
27#include <asm/ptrace.h>
28#include <asm/compat.h>
453423dc
CB
29#include "gaccess.h"
30#include "kvm-s390.h"
5786fffa 31#include "trace.h"
453423dc 32
6a3f95a6
TH
33/* Handle SCK (SET CLOCK) interception */
34static int handle_set_clock(struct kvm_vcpu *vcpu)
35{
36 struct kvm_vcpu *cpup;
37 s64 hostclk, val;
38 u64 op2;
39 int i;
40
41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
43
44 op2 = kvm_s390_get_base_disp_s(vcpu);
45 if (op2 & 7) /* Operand must be on a doubleword boundary */
46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47 if (get_guest(vcpu, val, (u64 __user *) op2))
48 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
49
50 if (store_tod_clock(&hostclk)) {
51 kvm_s390_set_psw_cc(vcpu, 3);
52 return 0;
53 }
54 val = (val - hostclk) & ~0x3fUL;
55
56 mutex_lock(&vcpu->kvm->lock);
57 kvm_for_each_vcpu(i, cpup, vcpu->kvm)
58 cpup->arch.sie_block->epoch = val;
59 mutex_unlock(&vcpu->kvm->lock);
60
61 kvm_s390_set_psw_cc(vcpu, 0);
62 return 0;
63}
64
453423dc
CB
65static int handle_set_prefix(struct kvm_vcpu *vcpu)
66{
453423dc
CB
67 u64 operand2;
68 u32 address = 0;
69 u8 tmp;
70
71 vcpu->stat.instruction_spx++;
72
5087dfa6
TH
73 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
74 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
75
b1c571a5 76 operand2 = kvm_s390_get_base_disp_s(vcpu);
453423dc
CB
77
78 /* must be word boundary */
db4a29cb
HC
79 if (operand2 & 3)
80 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc
CB
81
82 /* get the value */
db4a29cb
HC
83 if (get_guest(vcpu, address, (u32 __user *) operand2))
84 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
453423dc
CB
85
86 address = address & 0x7fffe000u;
87
88 /* make sure that the new value is valid memory */
89 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
db4a29cb
HC
90 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
91 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
453423dc 92
8d26cf7b 93 kvm_s390_set_prefix(vcpu, address);
453423dc
CB
94
95 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
5786fffa 96 trace_kvm_s390_handle_prefix(vcpu, 1, address);
453423dc
CB
97 return 0;
98}
99
100static int handle_store_prefix(struct kvm_vcpu *vcpu)
101{
453423dc
CB
102 u64 operand2;
103 u32 address;
104
105 vcpu->stat.instruction_stpx++;
b1c571a5 106
5087dfa6
TH
107 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
108 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
109
b1c571a5 110 operand2 = kvm_s390_get_base_disp_s(vcpu);
453423dc
CB
111
112 /* must be word boundary */
db4a29cb
HC
113 if (operand2 & 3)
114 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc
CB
115
116 address = vcpu->arch.sie_block->prefix;
117 address = address & 0x7fffe000u;
118
119 /* get the value */
db4a29cb
HC
120 if (put_guest(vcpu, address, (u32 __user *)operand2))
121 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
453423dc
CB
122
123 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
5786fffa 124 trace_kvm_s390_handle_prefix(vcpu, 0, address);
453423dc
CB
125 return 0;
126}
127
128static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
129{
453423dc 130 u64 useraddr;
453423dc
CB
131
132 vcpu->stat.instruction_stap++;
b1c571a5 133
5087dfa6
TH
134 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
135 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
136
b1c571a5 137 useraddr = kvm_s390_get_base_disp_s(vcpu);
453423dc 138
db4a29cb
HC
139 if (useraddr & 1)
140 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc 141
db4a29cb
HC
142 if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
143 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
453423dc 144
33e19115 145 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
5786fffa 146 trace_kvm_s390_handle_stap(vcpu, useraddr);
453423dc
CB
147 return 0;
148}
149
693ffc08
DD
150static void __skey_check_enable(struct kvm_vcpu *vcpu)
151{
152 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
153 return;
154
155 s390_enable_skey();
156 trace_kvm_s390_skey_related_inst(vcpu);
157 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
158}
159
160
453423dc
CB
161static int handle_skey(struct kvm_vcpu *vcpu)
162{
693ffc08
DD
163 __skey_check_enable(vcpu);
164
453423dc 165 vcpu->stat.instruction_storage_key++;
5087dfa6
TH
166
167 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
168 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
169
dfcf7dc6
MS
170 vcpu->arch.sie_block->gpsw.addr =
171 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
453423dc
CB
172 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
173 return 0;
174}
175
8a242234
HC
176static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
177{
178 psw_t *psw = &vcpu->arch.sie_block->gpsw;
179
180 vcpu->stat.instruction_ipte_interlock++;
181 if (psw_bits(*psw).p)
182 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
183 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
184 psw->addr = __rewind_psw(*psw, 4);
185 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
186 return 0;
187}
188
aca84241
TH
189static int handle_test_block(struct kvm_vcpu *vcpu)
190{
191 unsigned long hva;
192 gpa_t addr;
193 int reg2;
194
195 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
196 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
197
198 kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
199 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
200 addr = kvm_s390_real_to_abs(vcpu, addr);
201
202 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
203 if (kvm_is_error_hva(hva))
204 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
205 /*
206 * We don't expect errors on modern systems, and do not care
207 * about storage keys (yet), so let's just clear the page.
208 */
209 if (clear_user((void __user *)hva, PAGE_SIZE) != 0)
210 return -EFAULT;
211 kvm_s390_set_psw_cc(vcpu, 0);
212 vcpu->run->s.regs.gprs[0] = 0;
213 return 0;
214}
215
fa6b7fe9 216static int handle_tpi(struct kvm_vcpu *vcpu)
453423dc 217{
fa6b7fe9 218 struct kvm_s390_interrupt_info *inti;
7c959e82 219 u64 addr;
fa6b7fe9
CH
220 int cc;
221
222 addr = kvm_s390_get_base_disp_s(vcpu);
db4a29cb
HC
223 if (addr & 3)
224 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
7c959e82 225 cc = 0;
f092669e 226 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
7c959e82
HC
227 if (!inti)
228 goto no_interrupt;
229 cc = 1;
230 if (addr) {
231 /*
232 * Store the two-word I/O interruption code into the
233 * provided area.
234 */
133608f3
TH
235 if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
236 || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
237 || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
238 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
7c959e82
HC
239 } else {
240 /*
241 * Store the three-word I/O interruption code into
242 * the appropriate lowcore area.
243 */
0a75ca27
HC
244 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
245 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
246 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
247 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
7c959e82 248 }
fa6b7fe9 249 kfree(inti);
7c959e82 250no_interrupt:
fa6b7fe9 251 /* Set condition code and we're done. */
ea828ebf 252 kvm_s390_set_psw_cc(vcpu, cc);
453423dc
CB
253 return 0;
254}
255
fa6b7fe9
CH
256static int handle_tsch(struct kvm_vcpu *vcpu)
257{
258 struct kvm_s390_interrupt_info *inti;
259
260 inti = kvm_s390_get_io_int(vcpu->kvm, 0,
261 vcpu->run->s.regs.gprs[1]);
262
263 /*
264 * Prepare exit to userspace.
265 * We indicate whether we dequeued a pending I/O interrupt
266 * so that userspace can re-inject it if the instruction gets
267 * a program check. While this may re-order the pending I/O
268 * interrupts, this is no problem since the priority is kept
269 * intact.
270 */
271 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
272 vcpu->run->s390_tsch.dequeued = !!inti;
273 if (inti) {
274 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
275 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
276 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
277 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
278 }
279 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
280 kfree(inti);
281 return -EREMOTE;
282}
283
284static int handle_io_inst(struct kvm_vcpu *vcpu)
285{
286 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
287
5087dfa6
TH
288 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
289 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
290
fa6b7fe9
CH
291 if (vcpu->kvm->arch.css_support) {
292 /*
293 * Most I/O instructions will be handled by userspace.
294 * Exceptions are tpi and the interrupt portion of tsch.
295 */
296 if (vcpu->arch.sie_block->ipa == 0xb236)
297 return handle_tpi(vcpu);
298 if (vcpu->arch.sie_block->ipa == 0xb235)
299 return handle_tsch(vcpu);
300 /* Handle in userspace. */
301 return -EOPNOTSUPP;
302 } else {
303 /*
b4a96015 304 * Set condition code 3 to stop the guest from issuing channel
fa6b7fe9
CH
305 * I/O instructions.
306 */
ea828ebf 307 kvm_s390_set_psw_cc(vcpu, 3);
fa6b7fe9
CH
308 return 0;
309 }
310}
311
453423dc
CB
312static int handle_stfl(struct kvm_vcpu *vcpu)
313{
453423dc
CB
314 int rc;
315
316 vcpu->stat.instruction_stfl++;
5087dfa6
TH
317
318 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
319 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
320
0f9701c6
HC
321 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
322 vfacilities, 4);
dc5008b9 323 if (rc)
0f9701c6 324 return rc;
78c4b59f
MM
325 VCPU_EVENT(vcpu, 5, "store facility list value %x",
326 *(unsigned int *) vfacilities);
327 trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
453423dc
CB
328 return 0;
329}
330
48a3e950
CH
331static void handle_new_psw(struct kvm_vcpu *vcpu)
332{
333 /* Check whether the new psw is enabled for machine checks. */
334 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
335 kvm_s390_deliver_pending_machine_checks(vcpu);
336}
337
338#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
339#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
d21683ea 340#define PSW_ADDR_24 0x0000000000ffffffUL
48a3e950
CH
341#define PSW_ADDR_31 0x000000007fffffffUL
342
3736b874
HC
343static int is_valid_psw(psw_t *psw) {
344 if (psw->mask & PSW_MASK_UNASSIGNED)
345 return 0;
346 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
347 if (psw->addr & ~PSW_ADDR_31)
348 return 0;
349 }
350 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
351 return 0;
352 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
353 return 0;
354 return 1;
355}
356
48a3e950
CH
357int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
358{
3736b874 359 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
48a3e950 360 psw_compat_t new_psw;
3736b874 361 u64 addr;
48a3e950 362
3736b874 363 if (gpsw->mask & PSW_MASK_PSTATE)
208dd756
TH
364 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
365
48a3e950 366 addr = kvm_s390_get_base_disp_s(vcpu);
6fd0fcc9
HC
367 if (addr & 7)
368 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
6fd0fcc9
HC
369 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
370 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
6fd0fcc9
HC
371 if (!(new_psw.mask & PSW32_MASK_BASE))
372 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
3736b874
HC
373 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
374 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
375 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
376 if (!is_valid_psw(gpsw))
6fd0fcc9 377 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
48a3e950 378 handle_new_psw(vcpu);
48a3e950
CH
379 return 0;
380}
381
382static int handle_lpswe(struct kvm_vcpu *vcpu)
383{
48a3e950 384 psw_t new_psw;
3736b874 385 u64 addr;
48a3e950 386
5087dfa6
TH
387 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
388 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
389
48a3e950 390 addr = kvm_s390_get_base_disp_s(vcpu);
6fd0fcc9
HC
391 if (addr & 7)
392 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
6fd0fcc9
HC
393 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
394 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
3736b874
HC
395 vcpu->arch.sie_block->gpsw = new_psw;
396 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
6fd0fcc9 397 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
48a3e950 398 handle_new_psw(vcpu);
48a3e950
CH
399 return 0;
400}
401
453423dc
CB
402static int handle_stidp(struct kvm_vcpu *vcpu)
403{
453423dc 404 u64 operand2;
453423dc
CB
405
406 vcpu->stat.instruction_stidp++;
b1c571a5 407
5087dfa6
TH
408 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
409 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
410
b1c571a5 411 operand2 = kvm_s390_get_base_disp_s(vcpu);
453423dc 412
db4a29cb
HC
413 if (operand2 & 7)
414 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc 415
db4a29cb
HC
416 if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
417 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
453423dc
CB
418
419 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
453423dc
CB
420 return 0;
421}
422
423static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
424{
453423dc
CB
425 int cpus = 0;
426 int n;
427
ff520a63 428 cpus = atomic_read(&vcpu->kvm->online_vcpus);
453423dc
CB
429
430 /* deal with other level 3 hypervisors */
caf757c6 431 if (stsi(mem, 3, 2, 2))
453423dc
CB
432 mem->count = 0;
433 if (mem->count < 8)
434 mem->count++;
435 for (n = mem->count - 1; n > 0 ; n--)
436 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
437
438 mem->vm[0].cpus_total = cpus;
439 mem->vm[0].cpus_configured = cpus;
440 mem->vm[0].cpus_standby = 0;
441 mem->vm[0].cpus_reserved = 0;
442 mem->vm[0].caf = 1000;
443 memcpy(mem->vm[0].name, "KVMguest", 8);
444 ASCEBC(mem->vm[0].name, 8);
445 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
446 ASCEBC(mem->vm[0].cpi, 16);
447}
448
449static int handle_stsi(struct kvm_vcpu *vcpu)
450{
5a32c1af
CB
451 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
452 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
453 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
c51f068c 454 unsigned long mem = 0;
453423dc 455 u64 operand2;
db4a29cb 456 int rc = 0;
453423dc
CB
457
458 vcpu->stat.instruction_stsi++;
459 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
460
5087dfa6
TH
461 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
462 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
463
87d41fb4 464 if (fc > 3) {
ea828ebf 465 kvm_s390_set_psw_cc(vcpu, 3);
87d41fb4
TH
466 return 0;
467 }
453423dc 468
87d41fb4
TH
469 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
470 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
453423dc
CB
471 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
472
87d41fb4 473 if (fc == 0) {
5a32c1af 474 vcpu->run->s.regs.gprs[0] = 3 << 28;
ea828ebf 475 kvm_s390_set_psw_cc(vcpu, 0);
453423dc 476 return 0;
87d41fb4
TH
477 }
478
479 operand2 = kvm_s390_get_base_disp_s(vcpu);
480
481 if (operand2 & 0xfff)
482 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
483
484 switch (fc) {
453423dc
CB
485 case 1: /* same handling for 1 and 2 */
486 case 2:
487 mem = get_zeroed_page(GFP_KERNEL);
488 if (!mem)
c51f068c 489 goto out_no_data;
caf757c6 490 if (stsi((void *) mem, fc, sel1, sel2))
c51f068c 491 goto out_no_data;
453423dc
CB
492 break;
493 case 3:
494 if (sel1 != 2 || sel2 != 2)
c51f068c 495 goto out_no_data;
453423dc
CB
496 mem = get_zeroed_page(GFP_KERNEL);
497 if (!mem)
c51f068c 498 goto out_no_data;
453423dc
CB
499 handle_stsi_3_2_2(vcpu, (void *) mem);
500 break;
453423dc
CB
501 }
502
503 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
db4a29cb 504 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
c51f068c 505 goto out_exception;
453423dc 506 }
5786fffa 507 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
453423dc 508 free_page(mem);
ea828ebf 509 kvm_s390_set_psw_cc(vcpu, 0);
5a32c1af 510 vcpu->run->s.regs.gprs[0] = 0;
453423dc 511 return 0;
c51f068c 512out_no_data:
ea828ebf 513 kvm_s390_set_psw_cc(vcpu, 3);
c51f068c
HC
514out_exception:
515 free_page(mem);
db4a29cb 516 return rc;
453423dc
CB
517}
518
f379aae5 519static const intercept_handler_t b2_handlers[256] = {
453423dc 520 [0x02] = handle_stidp,
6a3f95a6 521 [0x04] = handle_set_clock,
453423dc
CB
522 [0x10] = handle_set_prefix,
523 [0x11] = handle_store_prefix,
524 [0x12] = handle_store_cpu_address,
8a242234 525 [0x21] = handle_ipte_interlock,
453423dc
CB
526 [0x29] = handle_skey,
527 [0x2a] = handle_skey,
528 [0x2b] = handle_skey,
aca84241 529 [0x2c] = handle_test_block,
f379aae5
CH
530 [0x30] = handle_io_inst,
531 [0x31] = handle_io_inst,
532 [0x32] = handle_io_inst,
533 [0x33] = handle_io_inst,
534 [0x34] = handle_io_inst,
535 [0x35] = handle_io_inst,
536 [0x36] = handle_io_inst,
537 [0x37] = handle_io_inst,
538 [0x38] = handle_io_inst,
539 [0x39] = handle_io_inst,
540 [0x3a] = handle_io_inst,
541 [0x3b] = handle_io_inst,
542 [0x3c] = handle_io_inst,
8a242234 543 [0x50] = handle_ipte_interlock,
f379aae5
CH
544 [0x5f] = handle_io_inst,
545 [0x74] = handle_io_inst,
546 [0x76] = handle_io_inst,
453423dc
CB
547 [0x7d] = handle_stsi,
548 [0xb1] = handle_stfl,
48a3e950 549 [0xb2] = handle_lpswe,
453423dc
CB
550};
551
70455a36 552int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
453423dc
CB
553{
554 intercept_handler_t handler;
555
70455a36 556 /*
5087dfa6
TH
557 * A lot of B2 instructions are priviledged. Here we check for
558 * the privileged ones, that we can handle in the kernel.
559 * Anything else goes to userspace.
560 */
f379aae5 561 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
5087dfa6
TH
562 if (handler)
563 return handler(vcpu);
564
b8e660b8 565 return -EOPNOTSUPP;
453423dc 566}
bb25b9ba 567
48a3e950
CH
568static int handle_epsw(struct kvm_vcpu *vcpu)
569{
570 int reg1, reg2;
571
aeb87c3c 572 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
48a3e950
CH
573
574 /* This basically extracts the mask half of the psw. */
843200e7 575 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
48a3e950
CH
576 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
577 if (reg2) {
843200e7 578 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
48a3e950 579 vcpu->run->s.regs.gprs[reg2] |=
843200e7 580 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
48a3e950
CH
581 }
582 return 0;
583}
584
69d0d3a3
CB
585#define PFMF_RESERVED 0xfffc0101UL
586#define PFMF_SK 0x00020000UL
587#define PFMF_CF 0x00010000UL
588#define PFMF_UI 0x00008000UL
589#define PFMF_FSC 0x00007000UL
590#define PFMF_NQ 0x00000800UL
591#define PFMF_MR 0x00000400UL
592#define PFMF_MC 0x00000200UL
593#define PFMF_KEY 0x000000feUL
594
595static int handle_pfmf(struct kvm_vcpu *vcpu)
596{
597 int reg1, reg2;
598 unsigned long start, end;
599
600 vcpu->stat.instruction_pfmf++;
601
602 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
603
604 if (!MACHINE_HAS_PFMF)
605 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
606
607 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
208dd756 608 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
69d0d3a3
CB
609
610 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
611 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
612
613 /* Only provide non-quiescing support if the host supports it */
e769ece3 614 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
69d0d3a3
CB
615 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
616
617 /* No support for conditional-SSKE */
618 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
619 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
620
621 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
622 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
623 case 0x00000000:
624 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
625 break;
626 case 0x00001000:
627 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
628 break;
629 /* We dont support EDAT2
630 case 0x00002000:
631 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
632 break;*/
633 default:
634 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
635 }
636 while (start < end) {
637 unsigned long useraddr;
638
639 useraddr = gmap_translate(start, vcpu->arch.gmap);
640 if (IS_ERR((void *)useraddr))
641 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
642
643 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
644 if (clear_user((void __user *)useraddr, PAGE_SIZE))
645 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
646 }
647
648 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
693ffc08 649 __skey_check_enable(vcpu);
69d0d3a3
CB
650 if (set_guest_storage_key(current->mm, useraddr,
651 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
652 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
653 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
654 }
655
656 start += PAGE_SIZE;
657 }
658 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
659 vcpu->run->s.regs.gprs[reg2] = end;
660 return 0;
661}
662
b31288fa
KW
663static int handle_essa(struct kvm_vcpu *vcpu)
664{
665 /* entries expected to be 1FF */
666 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
667 unsigned long *cbrlo, cbrle;
668 struct gmap *gmap;
669 int i;
670
671 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
672 gmap = vcpu->arch.gmap;
673 vcpu->stat.instruction_essa++;
b31605c1 674 if (!kvm_s390_cmma_enabled(vcpu->kvm))
b31288fa
KW
675 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
676
677 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
678 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
679
680 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
681 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
682
683 /* Rewind PSW to repeat the ESSA instruction */
684 vcpu->arch.sie_block->gpsw.addr =
685 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
686 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
687 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
688 down_read(&gmap->mm->mmap_sem);
689 for (i = 0; i < entries; ++i) {
690 cbrle = cbrlo[i];
691 if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
692 /* invalid entry */
693 break;
694 /* try to free backing */
695 __gmap_zap(cbrle, gmap);
696 }
697 up_read(&gmap->mm->mmap_sem);
698 if (i < entries)
699 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
700 return 0;
701}
702
48a3e950 703static const intercept_handler_t b9_handlers[256] = {
8a242234 704 [0x8a] = handle_ipte_interlock,
48a3e950 705 [0x8d] = handle_epsw,
8a242234
HC
706 [0x8e] = handle_ipte_interlock,
707 [0x8f] = handle_ipte_interlock,
b31288fa 708 [0xab] = handle_essa,
69d0d3a3 709 [0xaf] = handle_pfmf,
48a3e950
CH
710};
711
712int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
713{
714 intercept_handler_t handler;
715
716 /* This is handled just as for the B2 instructions. */
717 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
5087dfa6
TH
718 if (handler)
719 return handler(vcpu);
720
48a3e950
CH
721 return -EOPNOTSUPP;
722}
723
953ed88d
TH
724int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
725{
726 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
727 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
728 u64 useraddr;
729 u32 val = 0;
730 int reg, rc;
731
732 vcpu->stat.instruction_lctl++;
733
734 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
735 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
736
737 useraddr = kvm_s390_get_base_disp_rs(vcpu);
738
739 if (useraddr & 3)
740 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
741
742 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
743 useraddr);
744 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
745
746 reg = reg1;
747 do {
748 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
749 if (rc)
750 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
751 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
752 vcpu->arch.sie_block->gcr[reg] |= val;
753 useraddr += 4;
754 if (reg == reg3)
755 break;
756 reg = (reg + 1) % 16;
757 } while (1);
758
759 return 0;
760}
761
762static int handle_lctlg(struct kvm_vcpu *vcpu)
763{
764 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
765 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
766 u64 useraddr;
767 int reg, rc;
768
769 vcpu->stat.instruction_lctlg++;
770
771 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
772 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
773
774 useraddr = kvm_s390_get_base_disp_rsy(vcpu);
775
776 if (useraddr & 7)
777 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
778
779 reg = reg1;
780
781 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
782 useraddr);
783 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
784
785 do {
786 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
787 (u64 __user *) useraddr);
788 if (rc)
789 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
790 useraddr += 8;
791 if (reg == reg3)
792 break;
793 reg = (reg + 1) % 16;
794 } while (1);
795
796 return 0;
797}
798
f379aae5 799static const intercept_handler_t eb_handlers[256] = {
953ed88d 800 [0x2f] = handle_lctlg,
f379aae5
CH
801};
802
953ed88d 803int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
f379aae5
CH
804{
805 intercept_handler_t handler;
806
f379aae5
CH
807 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
808 if (handler)
809 return handler(vcpu);
810 return -EOPNOTSUPP;
811}
812
bb25b9ba
CB
813static int handle_tprot(struct kvm_vcpu *vcpu)
814{
b1c571a5 815 u64 address1, address2;
bb25b9ba 816 struct vm_area_struct *vma;
1eddb85f 817 unsigned long user_address;
bb25b9ba
CB
818
819 vcpu->stat.instruction_tprot++;
820
f9f6bbc6
TH
821 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
822 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
823
b1c571a5
CH
824 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
825
bb25b9ba
CB
826 /* we only handle the Linux memory detection case:
827 * access key == 0
828 * guest DAT == off
829 * everything else goes to userspace. */
830 if (address2 & 0xf0)
831 return -EOPNOTSUPP;
832 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
833 return -EOPNOTSUPP;
834
bb25b9ba 835 down_read(&current->mm->mmap_sem);
59a1fa2d
HC
836 user_address = __gmap_translate(address1, vcpu->arch.gmap);
837 if (IS_ERR_VALUE(user_address))
838 goto out_inject;
1eddb85f 839 vma = find_vma(current->mm, user_address);
59a1fa2d
HC
840 if (!vma)
841 goto out_inject;
bb25b9ba
CB
842 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
843 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
844 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
845 if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
846 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
847
848 up_read(&current->mm->mmap_sem);
849 return 0;
59a1fa2d
HC
850
851out_inject:
852 up_read(&current->mm->mmap_sem);
853 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
bb25b9ba
CB
854}
855
856int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
857{
858 /* For e5xx... instructions we only handle TPROT */
859 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
860 return handle_tprot(vcpu);
861 return -EOPNOTSUPP;
862}
863
8c3f61e2
CH
864static int handle_sckpf(struct kvm_vcpu *vcpu)
865{
866 u32 value;
867
868 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
208dd756 869 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
8c3f61e2
CH
870
871 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
872 return kvm_s390_inject_program_int(vcpu,
873 PGM_SPECIFICATION);
874
875 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
876 vcpu->arch.sie_block->todpr = value;
877
878 return 0;
879}
880
77975357 881static const intercept_handler_t x01_handlers[256] = {
8c3f61e2
CH
882 [0x07] = handle_sckpf,
883};
884
885int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
886{
887 intercept_handler_t handler;
888
889 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
890 if (handler)
891 return handler(vcpu);
892 return -EOPNOTSUPP;
893}
This page took 0.368762 seconds and 5 git commands to generate.