2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/gfp.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
29 #include <asm/cputable.h>
30 #include <asm/uaccess.h>
31 #include <asm/kvm_ppc.h>
33 #include <asm/cacheflush.h>
37 unsigned long kvmppc_booke_handlers
;
39 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42 struct kvm_stats_debugfs_item debugfs_entries
[] = {
43 { "mmio", VCPU_STAT(mmio_exits
) },
44 { "dcr", VCPU_STAT(dcr_exits
) },
45 { "sig", VCPU_STAT(signal_exits
) },
46 { "itlb_r", VCPU_STAT(itlb_real_miss_exits
) },
47 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits
) },
48 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits
) },
49 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits
) },
50 { "sysc", VCPU_STAT(syscall_exits
) },
51 { "isi", VCPU_STAT(isi_exits
) },
52 { "dsi", VCPU_STAT(dsi_exits
) },
53 { "inst_emu", VCPU_STAT(emulated_inst_exits
) },
54 { "dec", VCPU_STAT(dec_exits
) },
55 { "ext_intr", VCPU_STAT(ext_intr_exits
) },
56 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
60 /* TODO: use vcpu_printf() */
61 void kvmppc_dump_vcpu(struct kvm_vcpu
*vcpu
)
65 printk("pc: %08lx msr: %08lx\n", vcpu
->arch
.pc
, vcpu
->arch
.msr
);
66 printk("lr: %08lx ctr: %08lx\n", vcpu
->arch
.lr
, vcpu
->arch
.ctr
);
67 printk("srr0: %08lx srr1: %08lx\n", vcpu
->arch
.srr0
, vcpu
->arch
.srr1
);
69 printk("exceptions: %08lx\n", vcpu
->arch
.pending_exceptions
);
71 for (i
= 0; i
< 32; i
+= 4) {
72 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i
,
73 kvmppc_get_gpr(vcpu
, i
),
74 kvmppc_get_gpr(vcpu
, i
+1),
75 kvmppc_get_gpr(vcpu
, i
+2),
76 kvmppc_get_gpr(vcpu
, i
+3));
80 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu
*vcpu
,
81 unsigned int priority
)
83 set_bit(priority
, &vcpu
->arch
.pending_exceptions
);
86 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu
*vcpu
,
87 ulong dear_flags
, ulong esr_flags
)
89 vcpu
->arch
.queued_dear
= dear_flags
;
90 vcpu
->arch
.queued_esr
= esr_flags
;
91 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DTLB_MISS
);
94 static void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
,
95 ulong dear_flags
, ulong esr_flags
)
97 vcpu
->arch
.queued_dear
= dear_flags
;
98 vcpu
->arch
.queued_esr
= esr_flags
;
99 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DATA_STORAGE
);
102 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
,
105 vcpu
->arch
.queued_esr
= esr_flags
;
106 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_INST_STORAGE
);
109 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong esr_flags
)
111 vcpu
->arch
.queued_esr
= esr_flags
;
112 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_PROGRAM
);
115 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
117 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DECREMENTER
);
120 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
122 return test_bit(BOOKE_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
125 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
127 clear_bit(BOOKE_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
130 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
131 struct kvm_interrupt
*irq
)
133 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_EXTERNAL
);
136 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
,
137 struct kvm_interrupt
*irq
)
139 clear_bit(BOOKE_IRQPRIO_EXTERNAL
, &vcpu
->arch
.pending_exceptions
);
142 /* Deliver the interrupt of the corresponding priority, if possible. */
143 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu
*vcpu
,
144 unsigned int priority
)
148 bool update_esr
= false, update_dear
= false;
151 case BOOKE_IRQPRIO_DTLB_MISS
:
152 case BOOKE_IRQPRIO_DATA_STORAGE
:
155 case BOOKE_IRQPRIO_INST_STORAGE
:
156 case BOOKE_IRQPRIO_PROGRAM
:
159 case BOOKE_IRQPRIO_ITLB_MISS
:
160 case BOOKE_IRQPRIO_SYSCALL
:
161 case BOOKE_IRQPRIO_FP_UNAVAIL
:
162 case BOOKE_IRQPRIO_SPE_UNAVAIL
:
163 case BOOKE_IRQPRIO_SPE_FP_DATA
:
164 case BOOKE_IRQPRIO_SPE_FP_ROUND
:
165 case BOOKE_IRQPRIO_AP_UNAVAIL
:
166 case BOOKE_IRQPRIO_ALIGNMENT
:
168 msr_mask
= MSR_CE
|MSR_ME
|MSR_DE
;
170 case BOOKE_IRQPRIO_CRITICAL
:
171 case BOOKE_IRQPRIO_WATCHDOG
:
172 allowed
= vcpu
->arch
.msr
& MSR_CE
;
175 case BOOKE_IRQPRIO_MACHINE_CHECK
:
176 allowed
= vcpu
->arch
.msr
& MSR_ME
;
179 case BOOKE_IRQPRIO_EXTERNAL
:
180 case BOOKE_IRQPRIO_DECREMENTER
:
181 case BOOKE_IRQPRIO_FIT
:
182 allowed
= vcpu
->arch
.msr
& MSR_EE
;
183 msr_mask
= MSR_CE
|MSR_ME
|MSR_DE
;
185 case BOOKE_IRQPRIO_DEBUG
:
186 allowed
= vcpu
->arch
.msr
& MSR_DE
;
192 vcpu
->arch
.srr0
= vcpu
->arch
.pc
;
193 vcpu
->arch
.srr1
= vcpu
->arch
.msr
;
194 vcpu
->arch
.pc
= vcpu
->arch
.ivpr
| vcpu
->arch
.ivor
[priority
];
195 if (update_esr
== true)
196 vcpu
->arch
.esr
= vcpu
->arch
.queued_esr
;
197 if (update_dear
== true)
198 vcpu
->arch
.dear
= vcpu
->arch
.queued_dear
;
199 kvmppc_set_msr(vcpu
, vcpu
->arch
.msr
& msr_mask
);
201 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
207 /* Check pending exceptions and deliver one, if possible. */
208 void kvmppc_core_deliver_interrupts(struct kvm_vcpu
*vcpu
)
210 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
211 unsigned int priority
;
213 priority
= __ffs(*pending
);
214 while (priority
<= BOOKE_IRQPRIO_MAX
) {
215 if (kvmppc_booke_irqprio_deliver(vcpu
, priority
))
218 priority
= find_next_bit(pending
,
219 BITS_PER_BYTE
* sizeof(*pending
),
227 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
229 int kvmppc_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
230 unsigned int exit_nr
)
232 enum emulation_result er
;
235 /* update before a new last_exit_type is rewritten */
236 kvmppc_update_timing_stats(vcpu
);
240 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
241 run
->ready_for_interrupt_injection
= 1;
244 case BOOKE_INTERRUPT_MACHINE_CHECK
:
245 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR
));
246 kvmppc_dump_vcpu(vcpu
);
250 case BOOKE_INTERRUPT_EXTERNAL
:
251 kvmppc_account_exit(vcpu
, EXT_INTR_EXITS
);
257 case BOOKE_INTERRUPT_DECREMENTER
:
258 /* Since we switched IVPR back to the host's value, the host
259 * handled this interrupt the moment we enabled interrupts.
260 * Now we just offer it a chance to reschedule the guest. */
261 kvmppc_account_exit(vcpu
, DEC_EXITS
);
267 case BOOKE_INTERRUPT_PROGRAM
:
268 if (vcpu
->arch
.msr
& MSR_PR
) {
269 /* Program traps generated by user-level software must be handled
270 * by the guest kernel. */
271 kvmppc_core_queue_program(vcpu
, vcpu
->arch
.fault_esr
);
273 kvmppc_account_exit(vcpu
, USR_PR_INST
);
277 er
= kvmppc_emulate_instruction(run
, vcpu
);
280 /* don't overwrite subtypes, just account kvm_stats */
281 kvmppc_account_exit_stat(vcpu
, EMULATED_INST_EXITS
);
282 /* Future optimization: only reload non-volatiles if
283 * they were actually modified by emulation. */
287 run
->exit_reason
= KVM_EXIT_DCR
;
291 /* XXX Deliver Program interrupt to guest. */
292 printk(KERN_CRIT
"%s: emulation at %lx failed (%08x)\n",
293 __func__
, vcpu
->arch
.pc
, vcpu
->arch
.last_inst
);
294 /* For debugging, encode the failing instruction and
295 * report it to userspace. */
296 run
->hw
.hardware_exit_reason
= ~0ULL << 32;
297 run
->hw
.hardware_exit_reason
|= vcpu
->arch
.last_inst
;
305 case BOOKE_INTERRUPT_FP_UNAVAIL
:
306 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_FP_UNAVAIL
);
307 kvmppc_account_exit(vcpu
, FP_UNAVAIL
);
311 case BOOKE_INTERRUPT_SPE_UNAVAIL
:
312 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_UNAVAIL
);
316 case BOOKE_INTERRUPT_SPE_FP_DATA
:
317 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_FP_DATA
);
321 case BOOKE_INTERRUPT_SPE_FP_ROUND
:
322 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_FP_ROUND
);
326 case BOOKE_INTERRUPT_DATA_STORAGE
:
327 kvmppc_core_queue_data_storage(vcpu
, vcpu
->arch
.fault_dear
,
328 vcpu
->arch
.fault_esr
);
329 kvmppc_account_exit(vcpu
, DSI_EXITS
);
333 case BOOKE_INTERRUPT_INST_STORAGE
:
334 kvmppc_core_queue_inst_storage(vcpu
, vcpu
->arch
.fault_esr
);
335 kvmppc_account_exit(vcpu
, ISI_EXITS
);
339 case BOOKE_INTERRUPT_SYSCALL
:
340 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SYSCALL
);
341 kvmppc_account_exit(vcpu
, SYSCALL_EXITS
);
345 case BOOKE_INTERRUPT_DTLB_MISS
: {
346 unsigned long eaddr
= vcpu
->arch
.fault_dear
;
351 /* Check the guest TLB. */
352 gtlb_index
= kvmppc_mmu_dtlb_index(vcpu
, eaddr
);
353 if (gtlb_index
< 0) {
354 /* The guest didn't have a mapping for it. */
355 kvmppc_core_queue_dtlb_miss(vcpu
,
356 vcpu
->arch
.fault_dear
,
357 vcpu
->arch
.fault_esr
);
358 kvmppc_mmu_dtlb_miss(vcpu
);
359 kvmppc_account_exit(vcpu
, DTLB_REAL_MISS_EXITS
);
364 gpaddr
= kvmppc_mmu_xlate(vcpu
, gtlb_index
, eaddr
);
365 gfn
= gpaddr
>> PAGE_SHIFT
;
367 if (kvm_is_visible_gfn(vcpu
->kvm
, gfn
)) {
368 /* The guest TLB had a mapping, but the shadow TLB
369 * didn't, and it is RAM. This could be because:
370 * a) the entry is mapping the host kernel, or
371 * b) the guest used a large mapping which we're faking
372 * Either way, we need to satisfy the fault without
373 * invoking the guest. */
374 kvmppc_mmu_map(vcpu
, eaddr
, gpaddr
, gtlb_index
);
375 kvmppc_account_exit(vcpu
, DTLB_VIRT_MISS_EXITS
);
378 /* Guest has mapped and accessed a page which is not
380 vcpu
->arch
.paddr_accessed
= gpaddr
;
381 r
= kvmppc_emulate_mmio(run
, vcpu
);
382 kvmppc_account_exit(vcpu
, MMIO_EXITS
);
388 case BOOKE_INTERRUPT_ITLB_MISS
: {
389 unsigned long eaddr
= vcpu
->arch
.pc
;
396 /* Check the guest TLB. */
397 gtlb_index
= kvmppc_mmu_itlb_index(vcpu
, eaddr
);
398 if (gtlb_index
< 0) {
399 /* The guest didn't have a mapping for it. */
400 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_ITLB_MISS
);
401 kvmppc_mmu_itlb_miss(vcpu
);
402 kvmppc_account_exit(vcpu
, ITLB_REAL_MISS_EXITS
);
406 kvmppc_account_exit(vcpu
, ITLB_VIRT_MISS_EXITS
);
408 gpaddr
= kvmppc_mmu_xlate(vcpu
, gtlb_index
, eaddr
);
409 gfn
= gpaddr
>> PAGE_SHIFT
;
411 if (kvm_is_visible_gfn(vcpu
->kvm
, gfn
)) {
412 /* The guest TLB had a mapping, but the shadow TLB
413 * didn't. This could be because:
414 * a) the entry is mapping the host kernel, or
415 * b) the guest used a large mapping which we're faking
416 * Either way, we need to satisfy the fault without
417 * invoking the guest. */
418 kvmppc_mmu_map(vcpu
, eaddr
, gpaddr
, gtlb_index
);
420 /* Guest mapped and leaped at non-RAM! */
421 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_MACHINE_CHECK
);
427 case BOOKE_INTERRUPT_DEBUG
: {
430 vcpu
->arch
.pc
= mfspr(SPRN_CSRR0
);
432 /* clear IAC events in DBSR register */
433 dbsr
= mfspr(SPRN_DBSR
);
434 dbsr
&= DBSR_IAC1
| DBSR_IAC2
| DBSR_IAC3
| DBSR_IAC4
;
435 mtspr(SPRN_DBSR
, dbsr
);
437 run
->exit_reason
= KVM_EXIT_DEBUG
;
438 kvmppc_account_exit(vcpu
, DEBUG_EXITS
);
444 printk(KERN_EMERG
"exit_nr %d\n", exit_nr
);
450 kvmppc_core_deliver_interrupts(vcpu
);
452 if (!(r
& RESUME_HOST
)) {
453 /* To avoid clobbering exit_reason, only check for signals if
454 * we aren't already exiting to userspace for some other
456 if (signal_pending(current
)) {
457 run
->exit_reason
= KVM_EXIT_INTR
;
458 r
= (-EINTR
<< 2) | RESUME_HOST
| (r
& RESUME_FLAG_NV
);
459 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
466 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
467 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
471 kvmppc_set_gpr(vcpu
, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
473 vcpu
->arch
.shadow_pid
= 1;
475 /* Eye-catching number so we know if the guest takes an interrupt
476 * before it's programmed its own IVPR. */
477 vcpu
->arch
.ivpr
= 0x55550000;
479 kvmppc_init_timing_stats(vcpu
);
481 return kvmppc_core_vcpu_setup(vcpu
);
484 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
490 regs
->pc
= vcpu
->arch
.pc
;
491 regs
->cr
= kvmppc_get_cr(vcpu
);
492 regs
->ctr
= vcpu
->arch
.ctr
;
493 regs
->lr
= vcpu
->arch
.lr
;
494 regs
->xer
= kvmppc_get_xer(vcpu
);
495 regs
->msr
= vcpu
->arch
.msr
;
496 regs
->srr0
= vcpu
->arch
.srr0
;
497 regs
->srr1
= vcpu
->arch
.srr1
;
498 regs
->pid
= vcpu
->arch
.pid
;
499 regs
->sprg0
= vcpu
->arch
.sprg0
;
500 regs
->sprg1
= vcpu
->arch
.sprg1
;
501 regs
->sprg2
= vcpu
->arch
.sprg2
;
502 regs
->sprg3
= vcpu
->arch
.sprg3
;
503 regs
->sprg5
= vcpu
->arch
.sprg4
;
504 regs
->sprg6
= vcpu
->arch
.sprg5
;
505 regs
->sprg7
= vcpu
->arch
.sprg6
;
507 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
508 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
515 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
521 vcpu
->arch
.pc
= regs
->pc
;
522 kvmppc_set_cr(vcpu
, regs
->cr
);
523 vcpu
->arch
.ctr
= regs
->ctr
;
524 vcpu
->arch
.lr
= regs
->lr
;
525 kvmppc_set_xer(vcpu
, regs
->xer
);
526 kvmppc_set_msr(vcpu
, regs
->msr
);
527 vcpu
->arch
.srr0
= regs
->srr0
;
528 vcpu
->arch
.srr1
= regs
->srr1
;
529 vcpu
->arch
.sprg0
= regs
->sprg0
;
530 vcpu
->arch
.sprg1
= regs
->sprg1
;
531 vcpu
->arch
.sprg2
= regs
->sprg2
;
532 vcpu
->arch
.sprg3
= regs
->sprg3
;
533 vcpu
->arch
.sprg5
= regs
->sprg4
;
534 vcpu
->arch
.sprg6
= regs
->sprg5
;
535 vcpu
->arch
.sprg7
= regs
->sprg6
;
537 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
538 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
545 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
546 struct kvm_sregs
*sregs
)
551 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
552 struct kvm_sregs
*sregs
)
557 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
562 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
567 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
568 struct kvm_translation
*tr
)
573 r
= kvmppc_core_vcpu_translate(vcpu
, tr
);
578 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
583 int __init
kvmppc_booke_init(void)
585 unsigned long ivor
[16];
586 unsigned long max_ivor
= 0;
589 /* We install our own exception handlers by hijacking IVPR. IVPR must
590 * be 16-bit aligned, so we need a 64KB allocation. */
591 kvmppc_booke_handlers
= __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
593 if (!kvmppc_booke_handlers
)
596 /* XXX make sure our handlers are smaller than Linux's */
598 /* Copy our interrupt handlers to match host IVORs. That way we don't
599 * have to swap the IVORs on every guest/host transition. */
600 ivor
[0] = mfspr(SPRN_IVOR0
);
601 ivor
[1] = mfspr(SPRN_IVOR1
);
602 ivor
[2] = mfspr(SPRN_IVOR2
);
603 ivor
[3] = mfspr(SPRN_IVOR3
);
604 ivor
[4] = mfspr(SPRN_IVOR4
);
605 ivor
[5] = mfspr(SPRN_IVOR5
);
606 ivor
[6] = mfspr(SPRN_IVOR6
);
607 ivor
[7] = mfspr(SPRN_IVOR7
);
608 ivor
[8] = mfspr(SPRN_IVOR8
);
609 ivor
[9] = mfspr(SPRN_IVOR9
);
610 ivor
[10] = mfspr(SPRN_IVOR10
);
611 ivor
[11] = mfspr(SPRN_IVOR11
);
612 ivor
[12] = mfspr(SPRN_IVOR12
);
613 ivor
[13] = mfspr(SPRN_IVOR13
);
614 ivor
[14] = mfspr(SPRN_IVOR14
);
615 ivor
[15] = mfspr(SPRN_IVOR15
);
617 for (i
= 0; i
< 16; i
++) {
618 if (ivor
[i
] > max_ivor
)
621 memcpy((void *)kvmppc_booke_handlers
+ ivor
[i
],
622 kvmppc_handlers_start
+ i
* kvmppc_handler_len
,
625 flush_icache_range(kvmppc_booke_handlers
,
626 kvmppc_booke_handlers
+ max_ivor
+ kvmppc_handler_len
);
631 void __exit
kvmppc_booke_exit(void)
633 free_pages(kvmppc_booke_handlers
, VCPU_SIZE_ORDER
);