a27d4dc3b4a3bd0b06b1a36f4fe5c160f7eb414f
[deliverable/linux.git] / arch / powerpc / kvm / emulate.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
27
28 #include <asm/reg.h>
29 #include <asm/time.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include "timing.h"
34 #include "trace.h"
35
36 #define OP_TRAP 3
37 #define OP_TRAP_64 2
38
39 #define OP_31_XOP_TRAP 4
40 #define OP_31_XOP_LWZX 23
41 #define OP_31_XOP_TRAP_64 68
42 #define OP_31_XOP_LBZX 87
43 #define OP_31_XOP_STWX 151
44 #define OP_31_XOP_STBX 215
45 #define OP_31_XOP_LBZUX 119
46 #define OP_31_XOP_STBUX 247
47 #define OP_31_XOP_LHZX 279
48 #define OP_31_XOP_LHZUX 311
49 #define OP_31_XOP_MFSPR 339
50 #define OP_31_XOP_LHAX 343
51 #define OP_31_XOP_STHX 407
52 #define OP_31_XOP_STHUX 439
53 #define OP_31_XOP_MTSPR 467
54 #define OP_31_XOP_DCBI 470
55 #define OP_31_XOP_LWBRX 534
56 #define OP_31_XOP_TLBSYNC 566
57 #define OP_31_XOP_STWBRX 662
58 #define OP_31_XOP_LHBRX 790
59 #define OP_31_XOP_STHBRX 918
60
61 #define OP_LWZ 32
62 #define OP_LWZU 33
63 #define OP_LBZ 34
64 #define OP_LBZU 35
65 #define OP_STW 36
66 #define OP_STWU 37
67 #define OP_STB 38
68 #define OP_STBU 39
69 #define OP_LHZ 40
70 #define OP_LHZU 41
71 #define OP_LHA 42
72 #define OP_LHAU 43
73 #define OP_STH 44
74 #define OP_STHU 45
75
76 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
77 {
78 unsigned long dec_nsec;
79 unsigned long long dec_time;
80
81 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
82 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
83
84 #ifdef CONFIG_PPC_BOOK3S
85 /* mtdec lowers the interrupt line when positive. */
86 kvmppc_core_dequeue_dec(vcpu);
87
88 /* POWER4+ triggers a dec interrupt if the value is < 0 */
89 if (vcpu->arch.dec & 0x80000000) {
90 kvmppc_core_queue_dec(vcpu);
91 return;
92 }
93 #endif
94
95 #ifdef CONFIG_BOOKE
96 /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
97 if (vcpu->arch.dec == 0)
98 return;
99 #endif
100
101 /*
102 * The decrementer ticks at the same rate as the timebase, so
103 * that's how we convert the guest DEC value to the number of
104 * host ticks.
105 */
106
107 dec_time = vcpu->arch.dec;
108 /*
109 * Guest timebase ticks at the same frequency as host decrementer.
110 * So use the host decrementer calculations for decrementer emulation.
111 */
112 dec_time = dec_time << decrementer_clockevent.shift;
113 do_div(dec_time, decrementer_clockevent.mult);
114 dec_nsec = do_div(dec_time, NSEC_PER_SEC);
115 hrtimer_start(&vcpu->arch.dec_timer,
116 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
117 vcpu->arch.dec_jiffies = get_tb();
118 }
119
120 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
121 {
122 u64 jd = tb - vcpu->arch.dec_jiffies;
123
124 #ifdef CONFIG_BOOKE
125 if (vcpu->arch.dec < jd)
126 return 0;
127 #endif
128
129 return vcpu->arch.dec - jd;
130 }
131
132 /* XXX to do:
133 * lhax
134 * lhaux
135 * lswx
136 * lswi
137 * stswx
138 * stswi
139 * lha
140 * lhau
141 * lmw
142 * stmw
143 *
144 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
145 */
146 /* XXX Should probably auto-generate instruction decoding for a particular core
147 * from opcode tables in the future. */
148 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
149 {
150 u32 inst = kvmppc_get_last_inst(vcpu);
151 int ra;
152 int rb;
153 int rs;
154 int rt;
155 int sprn;
156 enum emulation_result emulated = EMULATE_DONE;
157 int advance = 1;
158
159 /* this default type might be overwritten by subcategories */
160 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
161
162 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
163
164 switch (get_op(inst)) {
165 case OP_TRAP:
166 #ifdef CONFIG_PPC_BOOK3S
167 case OP_TRAP_64:
168 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
169 #else
170 kvmppc_core_queue_program(vcpu,
171 vcpu->arch.shared->esr | ESR_PTR);
172 #endif
173 advance = 0;
174 break;
175
176 case 31:
177 switch (get_xop(inst)) {
178
179 case OP_31_XOP_TRAP:
180 #ifdef CONFIG_64BIT
181 case OP_31_XOP_TRAP_64:
182 #endif
183 #ifdef CONFIG_PPC_BOOK3S
184 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
185 #else
186 kvmppc_core_queue_program(vcpu,
187 vcpu->arch.shared->esr | ESR_PTR);
188 #endif
189 advance = 0;
190 break;
191 case OP_31_XOP_LWZX:
192 rt = get_rt(inst);
193 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
194 break;
195
196 case OP_31_XOP_LBZX:
197 rt = get_rt(inst);
198 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
199 break;
200
201 case OP_31_XOP_LBZUX:
202 rt = get_rt(inst);
203 ra = get_ra(inst);
204 rb = get_rb(inst);
205
206 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
207 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
208 break;
209
210 case OP_31_XOP_STWX:
211 rs = get_rs(inst);
212 emulated = kvmppc_handle_store(run, vcpu,
213 kvmppc_get_gpr(vcpu, rs),
214 4, 1);
215 break;
216
217 case OP_31_XOP_STBX:
218 rs = get_rs(inst);
219 emulated = kvmppc_handle_store(run, vcpu,
220 kvmppc_get_gpr(vcpu, rs),
221 1, 1);
222 break;
223
224 case OP_31_XOP_STBUX:
225 rs = get_rs(inst);
226 ra = get_ra(inst);
227 rb = get_rb(inst);
228
229 emulated = kvmppc_handle_store(run, vcpu,
230 kvmppc_get_gpr(vcpu, rs),
231 1, 1);
232 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
233 break;
234
235 case OP_31_XOP_LHAX:
236 rt = get_rt(inst);
237 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
238 break;
239
240 case OP_31_XOP_LHZX:
241 rt = get_rt(inst);
242 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
243 break;
244
245 case OP_31_XOP_LHZUX:
246 rt = get_rt(inst);
247 ra = get_ra(inst);
248 rb = get_rb(inst);
249
250 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
251 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
252 break;
253
254 case OP_31_XOP_MFSPR:
255 sprn = get_sprn(inst);
256 rt = get_rt(inst);
257
258 switch (sprn) {
259 case SPRN_SRR0:
260 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
261 break;
262 case SPRN_SRR1:
263 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
264 break;
265 case SPRN_PVR:
266 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
267 case SPRN_PIR:
268 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
269 case SPRN_MSSSR0:
270 kvmppc_set_gpr(vcpu, rt, 0); break;
271
272 /* Note: mftb and TBRL/TBWL are user-accessible, so
273 * the guest can always access the real TB anyways.
274 * In fact, we probably will never see these traps. */
275 case SPRN_TBWL:
276 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
277 case SPRN_TBWU:
278 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
279
280 case SPRN_SPRG0:
281 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
282 break;
283 case SPRN_SPRG1:
284 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
285 break;
286 case SPRN_SPRG2:
287 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
288 break;
289 case SPRN_SPRG3:
290 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
291 break;
292 /* Note: SPRG4-7 are user-readable, so we don't get
293 * a trap. */
294
295 case SPRN_DEC:
296 {
297 kvmppc_set_gpr(vcpu, rt,
298 kvmppc_get_dec(vcpu, get_tb()));
299 break;
300 }
301 default:
302 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
303 if (emulated == EMULATE_FAIL) {
304 printk("mfspr: unknown spr %x\n", sprn);
305 kvmppc_set_gpr(vcpu, rt, 0);
306 }
307 break;
308 }
309 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
310 break;
311
312 case OP_31_XOP_STHX:
313 rs = get_rs(inst);
314 ra = get_ra(inst);
315 rb = get_rb(inst);
316
317 emulated = kvmppc_handle_store(run, vcpu,
318 kvmppc_get_gpr(vcpu, rs),
319 2, 1);
320 break;
321
322 case OP_31_XOP_STHUX:
323 rs = get_rs(inst);
324 ra = get_ra(inst);
325 rb = get_rb(inst);
326
327 emulated = kvmppc_handle_store(run, vcpu,
328 kvmppc_get_gpr(vcpu, rs),
329 2, 1);
330 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
331 break;
332
333 case OP_31_XOP_MTSPR:
334 sprn = get_sprn(inst);
335 rs = get_rs(inst);
336 switch (sprn) {
337 case SPRN_SRR0:
338 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
339 break;
340 case SPRN_SRR1:
341 vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
342 break;
343
344 /* XXX We need to context-switch the timebase for
345 * watchdog and FIT. */
346 case SPRN_TBWL: break;
347 case SPRN_TBWU: break;
348
349 case SPRN_MSSSR0: break;
350
351 case SPRN_DEC:
352 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
353 kvmppc_emulate_dec(vcpu);
354 break;
355
356 case SPRN_SPRG0:
357 vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
358 break;
359 case SPRN_SPRG1:
360 vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
361 break;
362 case SPRN_SPRG2:
363 vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
364 break;
365 case SPRN_SPRG3:
366 vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
367 break;
368
369 default:
370 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
371 if (emulated == EMULATE_FAIL)
372 printk("mtspr: unknown spr %x\n", sprn);
373 break;
374 }
375 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
376 break;
377
378 case OP_31_XOP_DCBI:
379 /* Do nothing. The guest is performing dcbi because
380 * hardware DMA is not snooped by the dcache, but
381 * emulated DMA either goes through the dcache as
382 * normal writes, or the host kernel has handled dcache
383 * coherence. */
384 break;
385
386 case OP_31_XOP_LWBRX:
387 rt = get_rt(inst);
388 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
389 break;
390
391 case OP_31_XOP_TLBSYNC:
392 break;
393
394 case OP_31_XOP_STWBRX:
395 rs = get_rs(inst);
396 ra = get_ra(inst);
397 rb = get_rb(inst);
398
399 emulated = kvmppc_handle_store(run, vcpu,
400 kvmppc_get_gpr(vcpu, rs),
401 4, 0);
402 break;
403
404 case OP_31_XOP_LHBRX:
405 rt = get_rt(inst);
406 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
407 break;
408
409 case OP_31_XOP_STHBRX:
410 rs = get_rs(inst);
411 ra = get_ra(inst);
412 rb = get_rb(inst);
413
414 emulated = kvmppc_handle_store(run, vcpu,
415 kvmppc_get_gpr(vcpu, rs),
416 2, 0);
417 break;
418
419 default:
420 /* Attempt core-specific emulation below. */
421 emulated = EMULATE_FAIL;
422 }
423 break;
424
425 case OP_LWZ:
426 rt = get_rt(inst);
427 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
428 break;
429
430 case OP_LWZU:
431 ra = get_ra(inst);
432 rt = get_rt(inst);
433 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
434 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
435 break;
436
437 case OP_LBZ:
438 rt = get_rt(inst);
439 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
440 break;
441
442 case OP_LBZU:
443 ra = get_ra(inst);
444 rt = get_rt(inst);
445 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
446 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
447 break;
448
449 case OP_STW:
450 rs = get_rs(inst);
451 emulated = kvmppc_handle_store(run, vcpu,
452 kvmppc_get_gpr(vcpu, rs),
453 4, 1);
454 break;
455
456 case OP_STWU:
457 ra = get_ra(inst);
458 rs = get_rs(inst);
459 emulated = kvmppc_handle_store(run, vcpu,
460 kvmppc_get_gpr(vcpu, rs),
461 4, 1);
462 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
463 break;
464
465 case OP_STB:
466 rs = get_rs(inst);
467 emulated = kvmppc_handle_store(run, vcpu,
468 kvmppc_get_gpr(vcpu, rs),
469 1, 1);
470 break;
471
472 case OP_STBU:
473 ra = get_ra(inst);
474 rs = get_rs(inst);
475 emulated = kvmppc_handle_store(run, vcpu,
476 kvmppc_get_gpr(vcpu, rs),
477 1, 1);
478 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
479 break;
480
481 case OP_LHZ:
482 rt = get_rt(inst);
483 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
484 break;
485
486 case OP_LHZU:
487 ra = get_ra(inst);
488 rt = get_rt(inst);
489 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
490 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
491 break;
492
493 case OP_LHA:
494 rt = get_rt(inst);
495 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
496 break;
497
498 case OP_LHAU:
499 ra = get_ra(inst);
500 rt = get_rt(inst);
501 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
502 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
503 break;
504
505 case OP_STH:
506 rs = get_rs(inst);
507 emulated = kvmppc_handle_store(run, vcpu,
508 kvmppc_get_gpr(vcpu, rs),
509 2, 1);
510 break;
511
512 case OP_STHU:
513 ra = get_ra(inst);
514 rs = get_rs(inst);
515 emulated = kvmppc_handle_store(run, vcpu,
516 kvmppc_get_gpr(vcpu, rs),
517 2, 1);
518 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
519 break;
520
521 default:
522 emulated = EMULATE_FAIL;
523 }
524
525 if (emulated == EMULATE_FAIL) {
526 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
527 if (emulated == EMULATE_AGAIN) {
528 advance = 0;
529 } else if (emulated == EMULATE_FAIL) {
530 advance = 0;
531 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
532 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
533 kvmppc_core_queue_program(vcpu, 0);
534 }
535 }
536
537 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
538
539 /* Advance past emulated instruction. */
540 if (advance)
541 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
542
543 return emulated;
544 }
This page took 0.043441 seconds and 4 git commands to generate.