KVM: PPC: Pass EA to updating emulation ops
[deliverable/linux.git] / arch / powerpc / kvm / emulate.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
dfd4d47e 16 * Copyright 2011 Freescale Semiconductor, Inc.
bbf45ba5
HB
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21#include <linux/jiffies.h>
544c6761 22#include <linux/hrtimer.h>
bbf45ba5
HB
23#include <linux/types.h>
24#include <linux/string.h>
25#include <linux/kvm_host.h>
26
75f74f0d 27#include <asm/reg.h>
bbf45ba5
HB
28#include <asm/time.h>
29#include <asm/byteorder.h>
30#include <asm/kvm_ppc.h>
c381a043 31#include <asm/disassemble.h>
73e75b41 32#include "timing.h"
46f43c6e 33#include "trace.h"
bbf45ba5 34
cea5d8c9 35#define OP_TRAP 3
513579e3 36#define OP_TRAP_64 2
cea5d8c9
HB
37
38#define OP_31_XOP_LWZX 23
39#define OP_31_XOP_LBZX 87
40#define OP_31_XOP_STWX 151
41#define OP_31_XOP_STBX 215
1c85e733 42#define OP_31_XOP_LBZUX 119
cea5d8c9
HB
43#define OP_31_XOP_STBUX 247
44#define OP_31_XOP_LHZX 279
45#define OP_31_XOP_LHZUX 311
46#define OP_31_XOP_MFSPR 339
1c85e733 47#define OP_31_XOP_LHAX 343
cea5d8c9
HB
48#define OP_31_XOP_STHX 407
49#define OP_31_XOP_STHUX 439
50#define OP_31_XOP_MTSPR 467
51#define OP_31_XOP_DCBI 470
52#define OP_31_XOP_LWBRX 534
53#define OP_31_XOP_TLBSYNC 566
54#define OP_31_XOP_STWBRX 662
55#define OP_31_XOP_LHBRX 790
56#define OP_31_XOP_STHBRX 918
57
58#define OP_LWZ 32
59#define OP_LWZU 33
60#define OP_LBZ 34
61#define OP_LBZU 35
62#define OP_STW 36
63#define OP_STWU 37
64#define OP_STB 38
65#define OP_STBU 39
66#define OP_LHZ 40
67#define OP_LHZU 41
3587d534
AG
68#define OP_LHA 42
69#define OP_LHAU 43
cea5d8c9
HB
70#define OP_STH 44
71#define OP_STHU 45
72
75f74f0d 73void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
bbf45ba5 74{
544c6761 75 unsigned long dec_nsec;
dc2babfe 76 unsigned long long dec_time;
9a7a9b09 77
544c6761 78 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
dfd4d47e
SW
79 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
80
00c3a37c 81#ifdef CONFIG_PPC_BOOK3S
7706664d
AG
82 /* mtdec lowers the interrupt line when positive. */
83 kvmppc_core_dequeue_dec(vcpu);
84
513579e3
AG
85 /* POWER4+ triggers a dec interrupt if the value is < 0 */
86 if (vcpu->arch.dec & 0x80000000) {
513579e3
AG
87 kvmppc_core_queue_dec(vcpu);
88 return;
89 }
90#endif
dfd4d47e
SW
91
92#ifdef CONFIG_BOOKE
93 /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
94 if (vcpu->arch.dec == 0)
95 return;
96#endif
97
98 /*
99 * The decrementer ticks at the same rate as the timebase, so
100 * that's how we convert the guest DEC value to the number of
101 * host ticks.
102 */
103
104 dec_time = vcpu->arch.dec;
105 dec_time *= 1000;
106 do_div(dec_time, tb_ticks_per_usec);
107 dec_nsec = do_div(dec_time, NSEC_PER_SEC);
108 hrtimer_start(&vcpu->arch.dec_timer,
109 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
110 vcpu->arch.dec_jiffies = get_tb();
bbf45ba5
HB
111}
112
5ce941ee
SW
113u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
114{
115 u64 jd = tb - vcpu->arch.dec_jiffies;
dfd4d47e
SW
116
117#ifdef CONFIG_BOOKE
118 if (vcpu->arch.dec < jd)
119 return 0;
120#endif
121
5ce941ee
SW
122 return vcpu->arch.dec - jd;
123}
124
bbf45ba5
HB
125/* XXX to do:
126 * lhax
127 * lhaux
128 * lswx
129 * lswi
130 * stswx
131 * stswi
132 * lha
133 * lhau
134 * lmw
135 * stmw
136 *
137 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
138 */
75f74f0d
HB
139/* XXX Should probably auto-generate instruction decoding for a particular core
140 * from opcode tables in the future. */
bbf45ba5
HB
141int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
142{
c7f38f46 143 u32 inst = kvmppc_get_last_inst(vcpu);
bbf45ba5
HB
144 int ra;
145 int rb;
bbf45ba5
HB
146 int rs;
147 int rt;
148 int sprn;
bbf45ba5
HB
149 enum emulation_result emulated = EMULATE_DONE;
150 int advance = 1;
151
73e75b41
HB
152 /* this default type might be overwritten by subcategories */
153 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
154
689fd14a 155 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
513579e3 156
bbf45ba5 157 switch (get_op(inst)) {
cea5d8c9 158 case OP_TRAP:
00c3a37c 159#ifdef CONFIG_PPC_BOOK3S
513579e3 160 case OP_TRAP_64:
daf5e271 161 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
513579e3 162#else
b5904972
SW
163 kvmppc_core_queue_program(vcpu,
164 vcpu->arch.shared->esr | ESR_PTR);
513579e3 165#endif
bbf45ba5
HB
166 advance = 0;
167 break;
168
bbf45ba5
HB
169 case 31:
170 switch (get_xop(inst)) {
171
cea5d8c9 172 case OP_31_XOP_LWZX:
ac3cd34e
HB
173 rt = get_rt(inst);
174 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
175 break;
176
cea5d8c9 177 case OP_31_XOP_LBZX:
bbf45ba5
HB
178 rt = get_rt(inst);
179 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
180 break;
181
1c85e733
AG
182 case OP_31_XOP_LBZUX:
183 rt = get_rt(inst);
184 ra = get_ra(inst);
185 rb = get_rb(inst);
186
1c85e733 187 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
6020c0f6 188 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
1c85e733
AG
189 break;
190
cea5d8c9 191 case OP_31_XOP_STWX:
ac3cd34e
HB
192 rs = get_rs(inst);
193 emulated = kvmppc_handle_store(run, vcpu,
8e5b26b5 194 kvmppc_get_gpr(vcpu, rs),
ac3cd34e
HB
195 4, 1);
196 break;
197
cea5d8c9 198 case OP_31_XOP_STBX:
bbf45ba5
HB
199 rs = get_rs(inst);
200 emulated = kvmppc_handle_store(run, vcpu,
8e5b26b5 201 kvmppc_get_gpr(vcpu, rs),
bbf45ba5
HB
202 1, 1);
203 break;
204
cea5d8c9 205 case OP_31_XOP_STBUX:
bbf45ba5
HB
206 rs = get_rs(inst);
207 ra = get_ra(inst);
208 rb = get_rb(inst);
209
bbf45ba5 210 emulated = kvmppc_handle_store(run, vcpu,
8e5b26b5 211 kvmppc_get_gpr(vcpu, rs),
bbf45ba5 212 1, 1);
6020c0f6 213 kvmppc_set_gpr(vcpu, rs, vcpu->arch.vaddr_accessed);
bbf45ba5
HB
214 break;
215
1c85e733
AG
216 case OP_31_XOP_LHAX:
217 rt = get_rt(inst);
218 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
219 break;
220
cea5d8c9 221 case OP_31_XOP_LHZX:
bbf45ba5
HB
222 rt = get_rt(inst);
223 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
224 break;
225
cea5d8c9 226 case OP_31_XOP_LHZUX:
bbf45ba5
HB
227 rt = get_rt(inst);
228 ra = get_ra(inst);
229 rb = get_rb(inst);
230
bbf45ba5 231 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
6020c0f6 232 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
bbf45ba5
HB
233 break;
234
cea5d8c9 235 case OP_31_XOP_MFSPR:
bbf45ba5
HB
236 sprn = get_sprn(inst);
237 rt = get_rt(inst);
238
239 switch (sprn) {
240 case SPRN_SRR0:
de7906c3
AG
241 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
242 break;
bbf45ba5 243 case SPRN_SRR1:
de7906c3
AG
244 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
245 break;
bbf45ba5 246 case SPRN_PVR:
8e5b26b5 247 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
06579dd9 248 case SPRN_PIR:
8e5b26b5 249 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
513579e3 250 case SPRN_MSSSR0:
8e5b26b5 251 kvmppc_set_gpr(vcpu, rt, 0); break;
bbf45ba5
HB
252
253 /* Note: mftb and TBRL/TBWL are user-accessible, so
254 * the guest can always access the real TB anyways.
255 * In fact, we probably will never see these traps. */
256 case SPRN_TBWL:
8e5b26b5 257 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
bbf45ba5 258 case SPRN_TBWU:
8e5b26b5 259 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
bbf45ba5
HB
260
261 case SPRN_SPRG0:
a73a9599
AG
262 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
263 break;
bbf45ba5 264 case SPRN_SPRG1:
a73a9599
AG
265 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
266 break;
bbf45ba5 267 case SPRN_SPRG2:
a73a9599
AG
268 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
269 break;
bbf45ba5 270 case SPRN_SPRG3:
a73a9599
AG
271 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
272 break;
bbf45ba5
HB
273 /* Note: SPRG4-7 are user-readable, so we don't get
274 * a trap. */
275
9a7a9b09
AG
276 case SPRN_DEC:
277 {
5ce941ee
SW
278 kvmppc_set_gpr(vcpu, rt,
279 kvmppc_get_dec(vcpu, get_tb()));
9a7a9b09
AG
280 break;
281 }
bbf45ba5 282 default:
75f74f0d
HB
283 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
284 if (emulated == EMULATE_FAIL) {
285 printk("mfspr: unknown spr %x\n", sprn);
8e5b26b5 286 kvmppc_set_gpr(vcpu, rt, 0);
75f74f0d 287 }
bbf45ba5
HB
288 break;
289 }
49ea0695 290 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
bbf45ba5
HB
291 break;
292
cea5d8c9 293 case OP_31_XOP_STHX:
bbf45ba5
HB
294 rs = get_rs(inst);
295 ra = get_ra(inst);
296 rb = get_rb(inst);
297
298 emulated = kvmppc_handle_store(run, vcpu,
8e5b26b5 299 kvmppc_get_gpr(vcpu, rs),
bbf45ba5
HB
300 2, 1);
301 break;
302
cea5d8c9 303 case OP_31_XOP_STHUX:
bbf45ba5
HB
304 rs = get_rs(inst);
305 ra = get_ra(inst);
306 rb = get_rb(inst);
307
bbf45ba5 308 emulated = kvmppc_handle_store(run, vcpu,
8e5b26b5 309 kvmppc_get_gpr(vcpu, rs),
bbf45ba5 310 2, 1);
6020c0f6 311 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
bbf45ba5
HB
312 break;
313
cea5d8c9 314 case OP_31_XOP_MTSPR:
bbf45ba5
HB
315 sprn = get_sprn(inst);
316 rs = get_rs(inst);
317 switch (sprn) {
318 case SPRN_SRR0:
de7906c3
AG
319 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
320 break;
bbf45ba5 321 case SPRN_SRR1:
de7906c3
AG
322 vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
323 break;
bbf45ba5
HB
324
325 /* XXX We need to context-switch the timebase for
326 * watchdog and FIT. */
327 case SPRN_TBWL: break;
328 case SPRN_TBWU: break;
329
513579e3
AG
330 case SPRN_MSSSR0: break;
331
bbf45ba5 332 case SPRN_DEC:
8e5b26b5 333 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
bbf45ba5
HB
334 kvmppc_emulate_dec(vcpu);
335 break;
336
bbf45ba5 337 case SPRN_SPRG0:
a73a9599
AG
338 vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
339 break;
bbf45ba5 340 case SPRN_SPRG1:
a73a9599
AG
341 vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
342 break;
bbf45ba5 343 case SPRN_SPRG2:
a73a9599
AG
344 vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
345 break;
bbf45ba5 346 case SPRN_SPRG3:
a73a9599
AG
347 vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
348 break;
bbf45ba5 349
bbf45ba5 350 default:
75f74f0d
HB
351 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
352 if (emulated == EMULATE_FAIL)
353 printk("mtspr: unknown spr %x\n", sprn);
bbf45ba5
HB
354 break;
355 }
49ea0695 356 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
bbf45ba5
HB
357 break;
358
cea5d8c9 359 case OP_31_XOP_DCBI:
bbf45ba5
HB
360 /* Do nothing. The guest is performing dcbi because
361 * hardware DMA is not snooped by the dcache, but
362 * emulated DMA either goes through the dcache as
363 * normal writes, or the host kernel has handled dcache
364 * coherence. */
365 break;
366
cea5d8c9 367 case OP_31_XOP_LWBRX:
bbf45ba5
HB
368 rt = get_rt(inst);
369 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
370 break;
371
cea5d8c9 372 case OP_31_XOP_TLBSYNC:
bbf45ba5
HB
373 break;
374
cea5d8c9 375 case OP_31_XOP_STWBRX:
bbf45ba5
HB
376 rs = get_rs(inst);
377 ra = get_ra(inst);
378 rb = get_rb(inst);
379
380 emulated = kvmppc_handle_store(run, vcpu,
8e5b26b5 381 kvmppc_get_gpr(vcpu, rs),
bbf45ba5
HB
382 4, 0);
383 break;
384
cea5d8c9 385 case OP_31_XOP_LHBRX:
bbf45ba5
HB
386 rt = get_rt(inst);
387 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
388 break;
389
cea5d8c9 390 case OP_31_XOP_STHBRX:
bbf45ba5
HB
391 rs = get_rs(inst);
392 ra = get_ra(inst);
393 rb = get_rb(inst);
394
395 emulated = kvmppc_handle_store(run, vcpu,
8e5b26b5 396 kvmppc_get_gpr(vcpu, rs),
bbf45ba5
HB
397 2, 0);
398 break;
399
bbf45ba5 400 default:
75f74f0d 401 /* Attempt core-specific emulation below. */
bbf45ba5 402 emulated = EMULATE_FAIL;
bbf45ba5
HB
403 }
404 break;
405
cea5d8c9 406 case OP_LWZ:
bbf45ba5
HB
407 rt = get_rt(inst);
408 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
409 break;
410
cea5d8c9 411 case OP_LWZU:
bbf45ba5
HB
412 ra = get_ra(inst);
413 rt = get_rt(inst);
414 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
6020c0f6 415 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
bbf45ba5
HB
416 break;
417
cea5d8c9 418 case OP_LBZ:
bbf45ba5
HB
419 rt = get_rt(inst);
420 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
421 break;
422
cea5d8c9 423 case OP_LBZU:
bbf45ba5
HB
424 ra = get_ra(inst);
425 rt = get_rt(inst);
426 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
6020c0f6 427 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
bbf45ba5
HB
428 break;
429
cea5d8c9 430 case OP_STW:
bbf45ba5 431 rs = get_rs(inst);
8e5b26b5
AG
432 emulated = kvmppc_handle_store(run, vcpu,
433 kvmppc_get_gpr(vcpu, rs),
bbf45ba5
HB
434 4, 1);
435 break;
436
cea5d8c9 437 case OP_STWU:
bbf45ba5
HB
438 ra = get_ra(inst);
439 rs = get_rs(inst);
8e5b26b5
AG
440 emulated = kvmppc_handle_store(run, vcpu,
441 kvmppc_get_gpr(vcpu, rs),
bbf45ba5 442 4, 1);
6020c0f6 443 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
bbf45ba5
HB
444 break;
445
cea5d8c9 446 case OP_STB:
bbf45ba5 447 rs = get_rs(inst);
8e5b26b5
AG
448 emulated = kvmppc_handle_store(run, vcpu,
449 kvmppc_get_gpr(vcpu, rs),
bbf45ba5
HB
450 1, 1);
451 break;
452
cea5d8c9 453 case OP_STBU:
bbf45ba5
HB
454 ra = get_ra(inst);
455 rs = get_rs(inst);
8e5b26b5
AG
456 emulated = kvmppc_handle_store(run, vcpu,
457 kvmppc_get_gpr(vcpu, rs),
bbf45ba5 458 1, 1);
6020c0f6 459 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
bbf45ba5
HB
460 break;
461
cea5d8c9 462 case OP_LHZ:
bbf45ba5
HB
463 rt = get_rt(inst);
464 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
465 break;
466
cea5d8c9 467 case OP_LHZU:
bbf45ba5
HB
468 ra = get_ra(inst);
469 rt = get_rt(inst);
470 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
6020c0f6 471 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
bbf45ba5
HB
472 break;
473
3587d534
AG
474 case OP_LHA:
475 rt = get_rt(inst);
476 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
477 break;
478
479 case OP_LHAU:
480 ra = get_ra(inst);
481 rt = get_rt(inst);
482 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
6020c0f6 483 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
3587d534
AG
484 break;
485
cea5d8c9 486 case OP_STH:
bbf45ba5 487 rs = get_rs(inst);
8e5b26b5
AG
488 emulated = kvmppc_handle_store(run, vcpu,
489 kvmppc_get_gpr(vcpu, rs),
bbf45ba5
HB
490 2, 1);
491 break;
492
cea5d8c9 493 case OP_STHU:
bbf45ba5
HB
494 ra = get_ra(inst);
495 rs = get_rs(inst);
8e5b26b5
AG
496 emulated = kvmppc_handle_store(run, vcpu,
497 kvmppc_get_gpr(vcpu, rs),
bbf45ba5 498 2, 1);
6020c0f6 499 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
bbf45ba5
HB
500 break;
501
502 default:
bbf45ba5 503 emulated = EMULATE_FAIL;
75f74f0d
HB
504 }
505
506 if (emulated == EMULATE_FAIL) {
507 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
37f5bca6
AG
508 if (emulated == EMULATE_AGAIN) {
509 advance = 0;
510 } else if (emulated == EMULATE_FAIL) {
75f74f0d
HB
511 advance = 0;
512 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
513 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
5f2b105a 514 kvmppc_core_queue_program(vcpu, 0);
75f74f0d 515 }
bbf45ba5
HB
516 }
517
c7f38f46 518 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
3b4bd796 519
c7f38f46 520 /* Advance past emulated instruction. */
bbf45ba5 521 if (advance)
c7f38f46 522 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
bbf45ba5
HB
523
524 return emulated;
525}
This page took 0.321606 seconds and 5 git commands to generate.