KVM: PPC: Pass EA to updating emulation ops
[deliverable/linux.git] / arch / powerpc / kvm / emulate.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26
27 #include <asm/reg.h>
28 #include <asm/time.h>
29 #include <asm/byteorder.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/disassemble.h>
32 #include "timing.h"
33 #include "trace.h"
34
35 #define OP_TRAP 3
36 #define OP_TRAP_64 2
37
38 #define OP_31_XOP_LWZX 23
39 #define OP_31_XOP_LBZX 87
40 #define OP_31_XOP_STWX 151
41 #define OP_31_XOP_STBX 215
42 #define OP_31_XOP_LBZUX 119
43 #define OP_31_XOP_STBUX 247
44 #define OP_31_XOP_LHZX 279
45 #define OP_31_XOP_LHZUX 311
46 #define OP_31_XOP_MFSPR 339
47 #define OP_31_XOP_LHAX 343
48 #define OP_31_XOP_STHX 407
49 #define OP_31_XOP_STHUX 439
50 #define OP_31_XOP_MTSPR 467
51 #define OP_31_XOP_DCBI 470
52 #define OP_31_XOP_LWBRX 534
53 #define OP_31_XOP_TLBSYNC 566
54 #define OP_31_XOP_STWBRX 662
55 #define OP_31_XOP_LHBRX 790
56 #define OP_31_XOP_STHBRX 918
57
58 #define OP_LWZ 32
59 #define OP_LWZU 33
60 #define OP_LBZ 34
61 #define OP_LBZU 35
62 #define OP_STW 36
63 #define OP_STWU 37
64 #define OP_STB 38
65 #define OP_STBU 39
66 #define OP_LHZ 40
67 #define OP_LHZU 41
68 #define OP_LHA 42
69 #define OP_LHAU 43
70 #define OP_STH 44
71 #define OP_STHU 45
72
73 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
74 {
75 unsigned long dec_nsec;
76 unsigned long long dec_time;
77
78 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
79 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
80
81 #ifdef CONFIG_PPC_BOOK3S
82 /* mtdec lowers the interrupt line when positive. */
83 kvmppc_core_dequeue_dec(vcpu);
84
85 /* POWER4+ triggers a dec interrupt if the value is < 0 */
86 if (vcpu->arch.dec & 0x80000000) {
87 kvmppc_core_queue_dec(vcpu);
88 return;
89 }
90 #endif
91
92 #ifdef CONFIG_BOOKE
93 /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
94 if (vcpu->arch.dec == 0)
95 return;
96 #endif
97
98 /*
99 * The decrementer ticks at the same rate as the timebase, so
100 * that's how we convert the guest DEC value to the number of
101 * host ticks.
102 */
103
104 dec_time = vcpu->arch.dec;
105 dec_time *= 1000;
106 do_div(dec_time, tb_ticks_per_usec);
107 dec_nsec = do_div(dec_time, NSEC_PER_SEC);
108 hrtimer_start(&vcpu->arch.dec_timer,
109 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
110 vcpu->arch.dec_jiffies = get_tb();
111 }
112
113 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
114 {
115 u64 jd = tb - vcpu->arch.dec_jiffies;
116
117 #ifdef CONFIG_BOOKE
118 if (vcpu->arch.dec < jd)
119 return 0;
120 #endif
121
122 return vcpu->arch.dec - jd;
123 }
124
125 /* XXX to do:
126 * lhax
127 * lhaux
128 * lswx
129 * lswi
130 * stswx
131 * stswi
132 * lha
133 * lhau
134 * lmw
135 * stmw
136 *
137 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
138 */
139 /* XXX Should probably auto-generate instruction decoding for a particular core
140 * from opcode tables in the future. */
141 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
142 {
143 u32 inst = kvmppc_get_last_inst(vcpu);
144 int ra;
145 int rb;
146 int rs;
147 int rt;
148 int sprn;
149 enum emulation_result emulated = EMULATE_DONE;
150 int advance = 1;
151
152 /* this default type might be overwritten by subcategories */
153 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
154
155 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
156
157 switch (get_op(inst)) {
158 case OP_TRAP:
159 #ifdef CONFIG_PPC_BOOK3S
160 case OP_TRAP_64:
161 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
162 #else
163 kvmppc_core_queue_program(vcpu,
164 vcpu->arch.shared->esr | ESR_PTR);
165 #endif
166 advance = 0;
167 break;
168
169 case 31:
170 switch (get_xop(inst)) {
171
172 case OP_31_XOP_LWZX:
173 rt = get_rt(inst);
174 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
175 break;
176
177 case OP_31_XOP_LBZX:
178 rt = get_rt(inst);
179 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
180 break;
181
182 case OP_31_XOP_LBZUX:
183 rt = get_rt(inst);
184 ra = get_ra(inst);
185 rb = get_rb(inst);
186
187 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
188 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
189 break;
190
191 case OP_31_XOP_STWX:
192 rs = get_rs(inst);
193 emulated = kvmppc_handle_store(run, vcpu,
194 kvmppc_get_gpr(vcpu, rs),
195 4, 1);
196 break;
197
198 case OP_31_XOP_STBX:
199 rs = get_rs(inst);
200 emulated = kvmppc_handle_store(run, vcpu,
201 kvmppc_get_gpr(vcpu, rs),
202 1, 1);
203 break;
204
205 case OP_31_XOP_STBUX:
206 rs = get_rs(inst);
207 ra = get_ra(inst);
208 rb = get_rb(inst);
209
210 emulated = kvmppc_handle_store(run, vcpu,
211 kvmppc_get_gpr(vcpu, rs),
212 1, 1);
213 kvmppc_set_gpr(vcpu, rs, vcpu->arch.vaddr_accessed);
214 break;
215
216 case OP_31_XOP_LHAX:
217 rt = get_rt(inst);
218 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
219 break;
220
221 case OP_31_XOP_LHZX:
222 rt = get_rt(inst);
223 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
224 break;
225
226 case OP_31_XOP_LHZUX:
227 rt = get_rt(inst);
228 ra = get_ra(inst);
229 rb = get_rb(inst);
230
231 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
232 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
233 break;
234
235 case OP_31_XOP_MFSPR:
236 sprn = get_sprn(inst);
237 rt = get_rt(inst);
238
239 switch (sprn) {
240 case SPRN_SRR0:
241 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
242 break;
243 case SPRN_SRR1:
244 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
245 break;
246 case SPRN_PVR:
247 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
248 case SPRN_PIR:
249 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
250 case SPRN_MSSSR0:
251 kvmppc_set_gpr(vcpu, rt, 0); break;
252
253 /* Note: mftb and TBRL/TBWL are user-accessible, so
254 * the guest can always access the real TB anyways.
255 * In fact, we probably will never see these traps. */
256 case SPRN_TBWL:
257 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
258 case SPRN_TBWU:
259 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
260
261 case SPRN_SPRG0:
262 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
263 break;
264 case SPRN_SPRG1:
265 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
266 break;
267 case SPRN_SPRG2:
268 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
269 break;
270 case SPRN_SPRG3:
271 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
272 break;
273 /* Note: SPRG4-7 are user-readable, so we don't get
274 * a trap. */
275
276 case SPRN_DEC:
277 {
278 kvmppc_set_gpr(vcpu, rt,
279 kvmppc_get_dec(vcpu, get_tb()));
280 break;
281 }
282 default:
283 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
284 if (emulated == EMULATE_FAIL) {
285 printk("mfspr: unknown spr %x\n", sprn);
286 kvmppc_set_gpr(vcpu, rt, 0);
287 }
288 break;
289 }
290 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
291 break;
292
293 case OP_31_XOP_STHX:
294 rs = get_rs(inst);
295 ra = get_ra(inst);
296 rb = get_rb(inst);
297
298 emulated = kvmppc_handle_store(run, vcpu,
299 kvmppc_get_gpr(vcpu, rs),
300 2, 1);
301 break;
302
303 case OP_31_XOP_STHUX:
304 rs = get_rs(inst);
305 ra = get_ra(inst);
306 rb = get_rb(inst);
307
308 emulated = kvmppc_handle_store(run, vcpu,
309 kvmppc_get_gpr(vcpu, rs),
310 2, 1);
311 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
312 break;
313
314 case OP_31_XOP_MTSPR:
315 sprn = get_sprn(inst);
316 rs = get_rs(inst);
317 switch (sprn) {
318 case SPRN_SRR0:
319 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
320 break;
321 case SPRN_SRR1:
322 vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
323 break;
324
325 /* XXX We need to context-switch the timebase for
326 * watchdog and FIT. */
327 case SPRN_TBWL: break;
328 case SPRN_TBWU: break;
329
330 case SPRN_MSSSR0: break;
331
332 case SPRN_DEC:
333 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
334 kvmppc_emulate_dec(vcpu);
335 break;
336
337 case SPRN_SPRG0:
338 vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
339 break;
340 case SPRN_SPRG1:
341 vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
342 break;
343 case SPRN_SPRG2:
344 vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
345 break;
346 case SPRN_SPRG3:
347 vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
348 break;
349
350 default:
351 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
352 if (emulated == EMULATE_FAIL)
353 printk("mtspr: unknown spr %x\n", sprn);
354 break;
355 }
356 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
357 break;
358
359 case OP_31_XOP_DCBI:
360 /* Do nothing. The guest is performing dcbi because
361 * hardware DMA is not snooped by the dcache, but
362 * emulated DMA either goes through the dcache as
363 * normal writes, or the host kernel has handled dcache
364 * coherence. */
365 break;
366
367 case OP_31_XOP_LWBRX:
368 rt = get_rt(inst);
369 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
370 break;
371
372 case OP_31_XOP_TLBSYNC:
373 break;
374
375 case OP_31_XOP_STWBRX:
376 rs = get_rs(inst);
377 ra = get_ra(inst);
378 rb = get_rb(inst);
379
380 emulated = kvmppc_handle_store(run, vcpu,
381 kvmppc_get_gpr(vcpu, rs),
382 4, 0);
383 break;
384
385 case OP_31_XOP_LHBRX:
386 rt = get_rt(inst);
387 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
388 break;
389
390 case OP_31_XOP_STHBRX:
391 rs = get_rs(inst);
392 ra = get_ra(inst);
393 rb = get_rb(inst);
394
395 emulated = kvmppc_handle_store(run, vcpu,
396 kvmppc_get_gpr(vcpu, rs),
397 2, 0);
398 break;
399
400 default:
401 /* Attempt core-specific emulation below. */
402 emulated = EMULATE_FAIL;
403 }
404 break;
405
406 case OP_LWZ:
407 rt = get_rt(inst);
408 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
409 break;
410
411 case OP_LWZU:
412 ra = get_ra(inst);
413 rt = get_rt(inst);
414 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
415 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
416 break;
417
418 case OP_LBZ:
419 rt = get_rt(inst);
420 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
421 break;
422
423 case OP_LBZU:
424 ra = get_ra(inst);
425 rt = get_rt(inst);
426 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
427 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
428 break;
429
430 case OP_STW:
431 rs = get_rs(inst);
432 emulated = kvmppc_handle_store(run, vcpu,
433 kvmppc_get_gpr(vcpu, rs),
434 4, 1);
435 break;
436
437 case OP_STWU:
438 ra = get_ra(inst);
439 rs = get_rs(inst);
440 emulated = kvmppc_handle_store(run, vcpu,
441 kvmppc_get_gpr(vcpu, rs),
442 4, 1);
443 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
444 break;
445
446 case OP_STB:
447 rs = get_rs(inst);
448 emulated = kvmppc_handle_store(run, vcpu,
449 kvmppc_get_gpr(vcpu, rs),
450 1, 1);
451 break;
452
453 case OP_STBU:
454 ra = get_ra(inst);
455 rs = get_rs(inst);
456 emulated = kvmppc_handle_store(run, vcpu,
457 kvmppc_get_gpr(vcpu, rs),
458 1, 1);
459 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
460 break;
461
462 case OP_LHZ:
463 rt = get_rt(inst);
464 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
465 break;
466
467 case OP_LHZU:
468 ra = get_ra(inst);
469 rt = get_rt(inst);
470 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
471 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
472 break;
473
474 case OP_LHA:
475 rt = get_rt(inst);
476 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
477 break;
478
479 case OP_LHAU:
480 ra = get_ra(inst);
481 rt = get_rt(inst);
482 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
483 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
484 break;
485
486 case OP_STH:
487 rs = get_rs(inst);
488 emulated = kvmppc_handle_store(run, vcpu,
489 kvmppc_get_gpr(vcpu, rs),
490 2, 1);
491 break;
492
493 case OP_STHU:
494 ra = get_ra(inst);
495 rs = get_rs(inst);
496 emulated = kvmppc_handle_store(run, vcpu,
497 kvmppc_get_gpr(vcpu, rs),
498 2, 1);
499 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
500 break;
501
502 default:
503 emulated = EMULATE_FAIL;
504 }
505
506 if (emulated == EMULATE_FAIL) {
507 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
508 if (emulated == EMULATE_AGAIN) {
509 advance = 0;
510 } else if (emulated == EMULATE_FAIL) {
511 advance = 0;
512 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
513 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
514 kvmppc_core_queue_program(vcpu, 0);
515 }
516 }
517
518 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
519
520 /* Advance past emulated instruction. */
521 if (advance)
522 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
523
524 return emulated;
525 }
This page took 0.058594 seconds and 5 git commands to generate.