KVM: PPC: Book3E HV: call RECONCILE_IRQ_STATE to sync the software state
[deliverable/linux.git] / arch / powerpc / kvm / booke.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
4cd35f67 16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
bbf45ba5
HB
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
d30f6e48
SW
20 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
bbf45ba5
HB
22 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
5a0e3ad6 27#include <linux/gfp.h>
bbf45ba5
HB
28#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
7924bd41 31
bbf45ba5
HB
32#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
d9fbd03d 35#include <asm/cacheflush.h>
d30f6e48
SW
36#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
b50df19c 39#include <asm/time.h>
bbf45ba5 40
d30f6e48 41#include "timing.h"
75f74f0d 42#include "booke.h"
dba291f2
AK
43
44#define CREATE_TRACE_POINTS
45#include "trace_booke.h"
bbf45ba5 46
d9fbd03d
HB
47unsigned long kvmppc_booke_handlers;
48
bbf45ba5
HB
49#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
50#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
51
52struct kvm_stats_debugfs_item debugfs_entries[] = {
bbf45ba5
HB
53 { "mmio", VCPU_STAT(mmio_exits) },
54 { "dcr", VCPU_STAT(dcr_exits) },
55 { "sig", VCPU_STAT(signal_exits) },
bbf45ba5
HB
56 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
57 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
58 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
59 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
60 { "sysc", VCPU_STAT(syscall_exits) },
61 { "isi", VCPU_STAT(isi_exits) },
62 { "dsi", VCPU_STAT(dsi_exits) },
63 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
64 { "dec", VCPU_STAT(dec_exits) },
65 { "ext_intr", VCPU_STAT(ext_intr_exits) },
45c5eb67 66 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
d30f6e48
SW
67 { "doorbell", VCPU_STAT(dbell_exits) },
68 { "guest doorbell", VCPU_STAT(gdbell_exits) },
cf1c5ca4 69 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
bbf45ba5
HB
70 { NULL }
71};
72
bbf45ba5
HB
73/* TODO: use vcpu_printf() */
74void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
75{
76 int i;
77
666e7252 78 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
5cf8ca22 79 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
de7906c3
AG
80 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
81 vcpu->arch.shared->srr1);
bbf45ba5
HB
82
83 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
84
85 for (i = 0; i < 32; i += 4) {
5cf8ca22 86 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
8e5b26b5
AG
87 kvmppc_get_gpr(vcpu, i),
88 kvmppc_get_gpr(vcpu, i+1),
89 kvmppc_get_gpr(vcpu, i+2),
90 kvmppc_get_gpr(vcpu, i+3));
bbf45ba5
HB
91 }
92}
93
4cd35f67
SW
94#ifdef CONFIG_SPE
95void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
96{
97 preempt_disable();
98 enable_kernel_spe();
99 kvmppc_save_guest_spe(vcpu);
100 vcpu->arch.shadow_msr &= ~MSR_SPE;
101 preempt_enable();
102}
103
104static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
105{
106 preempt_disable();
107 enable_kernel_spe();
108 kvmppc_load_guest_spe(vcpu);
109 vcpu->arch.shadow_msr |= MSR_SPE;
110 preempt_enable();
111}
112
113static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
114{
115 if (vcpu->arch.shared->msr & MSR_SPE) {
116 if (!(vcpu->arch.shadow_msr & MSR_SPE))
117 kvmppc_vcpu_enable_spe(vcpu);
118 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
119 kvmppc_vcpu_disable_spe(vcpu);
120 }
121}
122#else
123static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
124{
125}
126#endif
127
7a08c274
AG
128static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
129{
130#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
131 /* We always treat the FP bit as enabled from the host
132 perspective, so only need to adjust the shadow MSR */
133 vcpu->arch.shadow_msr &= ~MSR_FP;
134 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
135#endif
136}
137
ce11e48b
BB
138static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
139{
140 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
141#ifndef CONFIG_KVM_BOOKE_HV
142 vcpu->arch.shadow_msr &= ~MSR_DE;
143 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
144#endif
145
146 /* Force enable debug interrupts when user space wants to debug */
147 if (vcpu->guest_debug) {
148#ifdef CONFIG_KVM_BOOKE_HV
149 /*
150 * Since there is no shadow MSR, sync MSR_DE into the guest
151 * visible MSR.
152 */
153 vcpu->arch.shared->msr |= MSR_DE;
154#else
155 vcpu->arch.shadow_msr |= MSR_DE;
156 vcpu->arch.shared->msr &= ~MSR_DE;
157#endif
158 }
159}
160
dd9ebf1f
LY
161/*
162 * Helper function for "full" MSR writes. No need to call this if only
163 * EE/CE/ME/DE/RI are changing.
164 */
4cd35f67
SW
165void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
166{
dd9ebf1f 167 u32 old_msr = vcpu->arch.shared->msr;
4cd35f67 168
d30f6e48
SW
169#ifdef CONFIG_KVM_BOOKE_HV
170 new_msr |= MSR_GS;
171#endif
172
4cd35f67
SW
173 vcpu->arch.shared->msr = new_msr;
174
dd9ebf1f 175 kvmppc_mmu_msr_notify(vcpu, old_msr);
4cd35f67 176 kvmppc_vcpu_sync_spe(vcpu);
7a08c274 177 kvmppc_vcpu_sync_fpu(vcpu);
ce11e48b 178 kvmppc_vcpu_sync_debug(vcpu);
4cd35f67
SW
179}
180
d4cf3892
HB
181static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
182 unsigned int priority)
9dd921cf 183{
6346046c 184 trace_kvm_booke_queue_irqprio(vcpu, priority);
9dd921cf
HB
185 set_bit(priority, &vcpu->arch.pending_exceptions);
186}
187
daf5e271
LY
188static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
189 ulong dear_flags, ulong esr_flags)
9dd921cf 190{
daf5e271
LY
191 vcpu->arch.queued_dear = dear_flags;
192 vcpu->arch.queued_esr = esr_flags;
193 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
194}
195
196static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
197 ulong dear_flags, ulong esr_flags)
198{
199 vcpu->arch.queued_dear = dear_flags;
200 vcpu->arch.queued_esr = esr_flags;
201 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
202}
203
204static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
205 ulong esr_flags)
206{
207 vcpu->arch.queued_esr = esr_flags;
208 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
209}
210
011da899
AG
211static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
212 ulong esr_flags)
213{
214 vcpu->arch.queued_dear = dear_flags;
215 vcpu->arch.queued_esr = esr_flags;
216 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
217}
218
daf5e271
LY
219void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
220{
221 vcpu->arch.queued_esr = esr_flags;
d4cf3892 222 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
9dd921cf
HB
223}
224
225void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
226{
d4cf3892 227 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
9dd921cf
HB
228}
229
230int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
231{
d4cf3892 232 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
9dd921cf
HB
233}
234
7706664d
AG
235void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
236{
237 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
238}
239
9dd921cf
HB
240void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
241 struct kvm_interrupt *irq)
242{
c5335f17
AG
243 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
244
245 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
246 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
247
248 kvmppc_booke_queue_irqprio(vcpu, prio);
9dd921cf
HB
249}
250
4fe27d2a 251void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
4496f974
AG
252{
253 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
c5335f17 254 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
4496f974
AG
255}
256
f61c94bb
BB
257static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
258{
259 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
260}
261
262static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
263{
264 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
265}
266
d30f6e48
SW
267static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
268{
269#ifdef CONFIG_KVM_BOOKE_HV
270 mtspr(SPRN_GSRR0, srr0);
271 mtspr(SPRN_GSRR1, srr1);
272#else
273 vcpu->arch.shared->srr0 = srr0;
274 vcpu->arch.shared->srr1 = srr1;
275#endif
276}
277
278static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
279{
280 vcpu->arch.csrr0 = srr0;
281 vcpu->arch.csrr1 = srr1;
282}
283
284static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
285{
286 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
287 vcpu->arch.dsrr0 = srr0;
288 vcpu->arch.dsrr1 = srr1;
289 } else {
290 set_guest_csrr(vcpu, srr0, srr1);
291 }
292}
293
294static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
295{
296 vcpu->arch.mcsrr0 = srr0;
297 vcpu->arch.mcsrr1 = srr1;
298}
299
300static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
301{
302#ifdef CONFIG_KVM_BOOKE_HV
303 return mfspr(SPRN_GDEAR);
304#else
305 return vcpu->arch.shared->dar;
306#endif
307}
308
309static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
310{
311#ifdef CONFIG_KVM_BOOKE_HV
312 mtspr(SPRN_GDEAR, dear);
313#else
314 vcpu->arch.shared->dar = dear;
315#endif
316}
317
318static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
319{
320#ifdef CONFIG_KVM_BOOKE_HV
321 return mfspr(SPRN_GESR);
322#else
323 return vcpu->arch.shared->esr;
324#endif
325}
326
327static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
328{
329#ifdef CONFIG_KVM_BOOKE_HV
330 mtspr(SPRN_GESR, esr);
331#else
332 vcpu->arch.shared->esr = esr;
333#endif
334}
335
324b3e63
AG
336static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
337{
338#ifdef CONFIG_KVM_BOOKE_HV
339 return mfspr(SPRN_GEPR);
340#else
341 return vcpu->arch.epr;
342#endif
343}
344
d4cf3892
HB
345/* Deliver the interrupt of the corresponding priority, if possible. */
346static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
347 unsigned int priority)
bbf45ba5 348{
d4cf3892 349 int allowed = 0;
79300f8c 350 ulong msr_mask = 0;
1c810636 351 bool update_esr = false, update_dear = false, update_epr = false;
5c6cedf4
AG
352 ulong crit_raw = vcpu->arch.shared->critical;
353 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
354 bool crit;
c5335f17 355 bool keep_irq = false;
d30f6e48 356 enum int_class int_class;
95e90b43 357 ulong new_msr = vcpu->arch.shared->msr;
5c6cedf4
AG
358
359 /* Truncate crit indicators in 32 bit mode */
360 if (!(vcpu->arch.shared->msr & MSR_SF)) {
361 crit_raw &= 0xffffffff;
362 crit_r1 &= 0xffffffff;
363 }
364
365 /* Critical section when crit == r1 */
366 crit = (crit_raw == crit_r1);
367 /* ... and we're in supervisor mode */
368 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
d4cf3892 369
c5335f17
AG
370 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
371 priority = BOOKE_IRQPRIO_EXTERNAL;
372 keep_irq = true;
373 }
374
5df554ad 375 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
1c810636
AG
376 update_epr = true;
377
d4cf3892 378 switch (priority) {
d4cf3892 379 case BOOKE_IRQPRIO_DTLB_MISS:
d4cf3892 380 case BOOKE_IRQPRIO_DATA_STORAGE:
011da899 381 case BOOKE_IRQPRIO_ALIGNMENT:
daf5e271
LY
382 update_dear = true;
383 /* fall through */
d4cf3892 384 case BOOKE_IRQPRIO_INST_STORAGE:
daf5e271
LY
385 case BOOKE_IRQPRIO_PROGRAM:
386 update_esr = true;
387 /* fall through */
388 case BOOKE_IRQPRIO_ITLB_MISS:
389 case BOOKE_IRQPRIO_SYSCALL:
d4cf3892 390 case BOOKE_IRQPRIO_FP_UNAVAIL:
bb3a8a17
HB
391 case BOOKE_IRQPRIO_SPE_UNAVAIL:
392 case BOOKE_IRQPRIO_SPE_FP_DATA:
393 case BOOKE_IRQPRIO_SPE_FP_ROUND:
d4cf3892 394 case BOOKE_IRQPRIO_AP_UNAVAIL:
d4cf3892 395 allowed = 1;
79300f8c 396 msr_mask = MSR_CE | MSR_ME | MSR_DE;
d30f6e48 397 int_class = INT_CLASS_NONCRIT;
bbf45ba5 398 break;
f61c94bb 399 case BOOKE_IRQPRIO_WATCHDOG:
d4cf3892 400 case BOOKE_IRQPRIO_CRITICAL:
4ab96919 401 case BOOKE_IRQPRIO_DBELL_CRIT:
666e7252 402 allowed = vcpu->arch.shared->msr & MSR_CE;
d30f6e48 403 allowed = allowed && !crit;
79300f8c 404 msr_mask = MSR_ME;
d30f6e48 405 int_class = INT_CLASS_CRIT;
bbf45ba5 406 break;
d4cf3892 407 case BOOKE_IRQPRIO_MACHINE_CHECK:
666e7252 408 allowed = vcpu->arch.shared->msr & MSR_ME;
d30f6e48 409 allowed = allowed && !crit;
d30f6e48 410 int_class = INT_CLASS_MC;
bbf45ba5 411 break;
d4cf3892
HB
412 case BOOKE_IRQPRIO_DECREMENTER:
413 case BOOKE_IRQPRIO_FIT:
dfd4d47e
SW
414 keep_irq = true;
415 /* fall through */
416 case BOOKE_IRQPRIO_EXTERNAL:
4ab96919 417 case BOOKE_IRQPRIO_DBELL:
666e7252 418 allowed = vcpu->arch.shared->msr & MSR_EE;
5c6cedf4 419 allowed = allowed && !crit;
79300f8c 420 msr_mask = MSR_CE | MSR_ME | MSR_DE;
d30f6e48 421 int_class = INT_CLASS_NONCRIT;
bbf45ba5 422 break;
d4cf3892 423 case BOOKE_IRQPRIO_DEBUG:
666e7252 424 allowed = vcpu->arch.shared->msr & MSR_DE;
d30f6e48 425 allowed = allowed && !crit;
79300f8c 426 msr_mask = MSR_ME;
d30f6e48 427 int_class = INT_CLASS_CRIT;
bbf45ba5 428 break;
bbf45ba5
HB
429 }
430
d4cf3892 431 if (allowed) {
d30f6e48
SW
432 switch (int_class) {
433 case INT_CLASS_NONCRIT:
434 set_guest_srr(vcpu, vcpu->arch.pc,
435 vcpu->arch.shared->msr);
436 break;
437 case INT_CLASS_CRIT:
438 set_guest_csrr(vcpu, vcpu->arch.pc,
439 vcpu->arch.shared->msr);
440 break;
441 case INT_CLASS_DBG:
442 set_guest_dsrr(vcpu, vcpu->arch.pc,
443 vcpu->arch.shared->msr);
444 break;
445 case INT_CLASS_MC:
446 set_guest_mcsrr(vcpu, vcpu->arch.pc,
447 vcpu->arch.shared->msr);
448 break;
449 }
450
d4cf3892 451 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
daf5e271 452 if (update_esr == true)
d30f6e48 453 set_guest_esr(vcpu, vcpu->arch.queued_esr);
daf5e271 454 if (update_dear == true)
d30f6e48 455 set_guest_dear(vcpu, vcpu->arch.queued_dear);
5df554ad
SW
456 if (update_epr == true) {
457 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
458 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
eb1e4f43
SW
459 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
460 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
461 kvmppc_mpic_set_epr(vcpu);
462 }
5df554ad 463 }
95e90b43
MC
464
465 new_msr &= msr_mask;
466#if defined(CONFIG_64BIT)
467 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
468 new_msr |= MSR_CM;
469#endif
470 kvmppc_set_msr(vcpu, new_msr);
bbf45ba5 471
c5335f17
AG
472 if (!keep_irq)
473 clear_bit(priority, &vcpu->arch.pending_exceptions);
bbf45ba5
HB
474 }
475
d30f6e48
SW
476#ifdef CONFIG_KVM_BOOKE_HV
477 /*
478 * If an interrupt is pending but masked, raise a guest doorbell
479 * so that we are notified when the guest enables the relevant
480 * MSR bit.
481 */
482 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
483 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
484 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
485 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
486 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
487 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
488#endif
489
d4cf3892 490 return allowed;
bbf45ba5
HB
491}
492
f61c94bb
BB
493/*
494 * Return the number of jiffies until the next timeout. If the timeout is
495 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
496 * because the larger value can break the timer APIs.
497 */
498static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
499{
500 u64 tb, wdt_tb, wdt_ticks = 0;
501 u64 nr_jiffies = 0;
502 u32 period = TCR_GET_WP(vcpu->arch.tcr);
503
504 wdt_tb = 1ULL << (63 - period);
505 tb = get_tb();
506 /*
507 * The watchdog timeout will hapeen when TB bit corresponding
508 * to watchdog will toggle from 0 to 1.
509 */
510 if (tb & wdt_tb)
511 wdt_ticks = wdt_tb;
512
513 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
514
515 /* Convert timebase ticks to jiffies */
516 nr_jiffies = wdt_ticks;
517
518 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
519 nr_jiffies++;
520
521 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
522}
523
524static void arm_next_watchdog(struct kvm_vcpu *vcpu)
525{
526 unsigned long nr_jiffies;
527 unsigned long flags;
528
529 /*
530 * If TSR_ENW and TSR_WIS are not set then no need to exit to
531 * userspace, so clear the KVM_REQ_WATCHDOG request.
532 */
533 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
534 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
535
536 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
537 nr_jiffies = watchdog_next_timeout(vcpu);
538 /*
539 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
540 * then do not run the watchdog timer as this can break timer APIs.
541 */
542 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
543 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
544 else
545 del_timer(&vcpu->arch.wdt_timer);
546 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
547}
548
549void kvmppc_watchdog_func(unsigned long data)
550{
551 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
552 u32 tsr, new_tsr;
553 int final;
554
555 do {
556 new_tsr = tsr = vcpu->arch.tsr;
557 final = 0;
558
559 /* Time out event */
560 if (tsr & TSR_ENW) {
561 if (tsr & TSR_WIS)
562 final = 1;
563 else
564 new_tsr = tsr | TSR_WIS;
565 } else {
566 new_tsr = tsr | TSR_ENW;
567 }
568 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
569
570 if (new_tsr & TSR_WIS) {
571 smp_wmb();
572 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
573 kvm_vcpu_kick(vcpu);
574 }
575
576 /*
577 * If this is final watchdog expiry and some action is required
578 * then exit to userspace.
579 */
580 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
581 vcpu->arch.watchdog_enabled) {
582 smp_wmb();
583 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
584 kvm_vcpu_kick(vcpu);
585 }
586
587 /*
588 * Stop running the watchdog timer after final expiration to
589 * prevent the host from being flooded with timers if the
590 * guest sets a short period.
591 * Timers will resume when TSR/TCR is updated next time.
592 */
593 if (!final)
594 arm_next_watchdog(vcpu);
595}
596
dfd4d47e
SW
597static void update_timer_ints(struct kvm_vcpu *vcpu)
598{
599 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
600 kvmppc_core_queue_dec(vcpu);
601 else
602 kvmppc_core_dequeue_dec(vcpu);
f61c94bb
BB
603
604 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
605 kvmppc_core_queue_watchdog(vcpu);
606 else
607 kvmppc_core_dequeue_watchdog(vcpu);
dfd4d47e
SW
608}
609
c59a6a3e 610static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
bbf45ba5
HB
611{
612 unsigned long *pending = &vcpu->arch.pending_exceptions;
bbf45ba5
HB
613 unsigned int priority;
614
9ab80843 615 priority = __ffs(*pending);
8b3a00fc 616 while (priority < BOOKE_IRQPRIO_MAX) {
d4cf3892 617 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
bbf45ba5 618 break;
bbf45ba5
HB
619
620 priority = find_next_bit(pending,
621 BITS_PER_BYTE * sizeof(*pending),
622 priority + 1);
623 }
90bba358
AG
624
625 /* Tell the guest about our interrupt status */
29ac26ef 626 vcpu->arch.shared->int_pending = !!*pending;
bbf45ba5
HB
627}
628
c59a6a3e 629/* Check pending exceptions and deliver one, if possible. */
a8e4ef84 630int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
c59a6a3e 631{
a8e4ef84 632 int r = 0;
c59a6a3e
SW
633 WARN_ON_ONCE(!irqs_disabled());
634
635 kvmppc_core_check_exceptions(vcpu);
636
b8c649a9
AG
637 if (vcpu->requests) {
638 /* Exception delivery raised request; start over */
639 return 1;
640 }
641
c59a6a3e
SW
642 if (vcpu->arch.shared->msr & MSR_WE) {
643 local_irq_enable();
644 kvm_vcpu_block(vcpu);
966cd0f3 645 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
c59a6a3e
SW
646 local_irq_disable();
647
648 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
a8e4ef84 649 r = 1;
c59a6a3e 650 };
a8e4ef84
AG
651
652 return r;
653}
654
7c973a2e 655int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
4ffc6356 656{
7c973a2e
AG
657 int r = 1; /* Indicate we want to get back into the guest */
658
2d8185d4
AG
659 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
660 update_timer_ints(vcpu);
862d31f7 661#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2d8185d4
AG
662 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
663 kvmppc_core_flush_tlb(vcpu);
862d31f7 664#endif
7c973a2e 665
f61c94bb
BB
666 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
667 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
668 r = 0;
669 }
670
1c810636
AG
671 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
672 vcpu->run->epr.epr = 0;
673 vcpu->arch.epr_needed = true;
674 vcpu->run->exit_reason = KVM_EXIT_EPR;
675 r = 0;
676 }
677
7c973a2e 678 return r;
4ffc6356
AG
679}
680
df6909e5
PM
681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
682{
7ee78855 683 int ret, s;
ce11e48b 684 struct thread_struct thread;
df6909e5 685
af8f38b3
AG
686 if (!vcpu->arch.sane) {
687 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
688 return -EINVAL;
689 }
690
df6909e5 691 local_irq_disable();
7ee78855
AG
692 s = kvmppc_prepare_to_enter(vcpu);
693 if (s <= 0) {
24afa37b 694 local_irq_enable();
7ee78855 695 ret = s;
1d1ef222
SW
696 goto out;
697 }
698
8fae845f
SW
699#ifdef CONFIG_PPC_FPU
700 /* Save userspace FPU state in stack */
701 enable_kernel_fp();
8fae845f
SW
702
703 /*
704 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
705 * as always using the FPU. Kernel usage of FP (via
706 * enable_kernel_fp()) in this thread must not occur while
707 * vcpu->fpu_active is set.
708 */
709 vcpu->fpu_active = 1;
710
711 kvmppc_load_guest_fp(vcpu);
712#endif
713
ce11e48b
BB
714 /* Switch to guest debug context */
715 thread.debug = vcpu->arch.shadow_dbg_reg;
716 switch_booke_debug_regs(&thread);
717 thread.debug = current->thread.debug;
718 current->thread.debug = vcpu->arch.shadow_dbg_reg;
719
08c9a188 720 vcpu->arch.pgdir = current->mm->pgd;
5f1c248f 721 kvmppc_fix_ee_before_entry();
f8941fbe 722
df6909e5 723 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
8fae845f 724
24afa37b
AG
725 /* No need for kvm_guest_exit. It's done in handle_exit.
726 We also get here with interrupts enabled. */
727
ce11e48b
BB
728 /* Switch back to user space debug context */
729 switch_booke_debug_regs(&thread);
730 current->thread.debug = thread.debug;
731
8fae845f
SW
732#ifdef CONFIG_PPC_FPU
733 kvmppc_save_guest_fp(vcpu);
734
735 vcpu->fpu_active = 0;
8fae845f
SW
736#endif
737
1d1ef222 738out:
d69c6436 739 vcpu->mode = OUTSIDE_GUEST_MODE;
df6909e5
PM
740 return ret;
741}
742
d30f6e48
SW
743static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
744{
745 enum emulation_result er;
746
747 er = kvmppc_emulate_instruction(run, vcpu);
748 switch (er) {
749 case EMULATE_DONE:
750 /* don't overwrite subtypes, just account kvm_stats */
751 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
752 /* Future optimization: only reload non-volatiles if
753 * they were actually modified by emulation. */
754 return RESUME_GUEST_NV;
755
756 case EMULATE_DO_DCR:
757 run->exit_reason = KVM_EXIT_DCR;
758 return RESUME_HOST;
759
760 case EMULATE_FAIL:
d30f6e48
SW
761 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
762 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
763 /* For debugging, encode the failing instruction and
764 * report it to userspace. */
765 run->hw.hardware_exit_reason = ~0ULL << 32;
766 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
d1ff5499 767 kvmppc_core_queue_program(vcpu, ESR_PIL);
d30f6e48
SW
768 return RESUME_HOST;
769
9b4f5308
BB
770 case EMULATE_EXIT_USER:
771 return RESUME_HOST;
772
d30f6e48
SW
773 default:
774 BUG();
775 }
776}
777
ce11e48b
BB
778static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
779{
780 struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
781 u32 dbsr = vcpu->arch.dbsr;
782
783 run->debug.arch.status = 0;
784 run->debug.arch.address = vcpu->arch.pc;
785
786 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
787 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
788 } else {
789 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
790 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
791 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
792 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
793 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
794 run->debug.arch.address = dbg_reg->dac1;
795 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
796 run->debug.arch.address = dbg_reg->dac2;
797 }
798
799 return RESUME_HOST;
800}
801
4e642ccb 802static void kvmppc_fill_pt_regs(struct pt_regs *regs)
bbf45ba5 803{
4e642ccb 804 ulong r1, ip, msr, lr;
bbf45ba5 805
4e642ccb
AG
806 asm("mr %0, 1" : "=r"(r1));
807 asm("mflr %0" : "=r"(lr));
808 asm("mfmsr %0" : "=r"(msr));
809 asm("bl 1f; 1: mflr %0" : "=r"(ip));
810
811 memset(regs, 0, sizeof(*regs));
812 regs->gpr[1] = r1;
813 regs->nip = ip;
814 regs->msr = msr;
815 regs->link = lr;
816}
817
6328e593
BB
818/*
819 * For interrupts needed to be handled by host interrupt handlers,
820 * corresponding host handler are called from here in similar way
821 * (but not exact) as they are called from low level handler
822 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
823 */
4e642ccb
AG
824static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
825 unsigned int exit_nr)
826{
827 struct pt_regs regs;
73e75b41 828
d30f6e48
SW
829 switch (exit_nr) {
830 case BOOKE_INTERRUPT_EXTERNAL:
4e642ccb
AG
831 kvmppc_fill_pt_regs(&regs);
832 do_IRQ(&regs);
d30f6e48 833 break;
d30f6e48 834 case BOOKE_INTERRUPT_DECREMENTER:
4e642ccb
AG
835 kvmppc_fill_pt_regs(&regs);
836 timer_interrupt(&regs);
d30f6e48 837 break;
5f17ce8b 838#if defined(CONFIG_PPC_DOORBELL)
d30f6e48 839 case BOOKE_INTERRUPT_DOORBELL:
4e642ccb
AG
840 kvmppc_fill_pt_regs(&regs);
841 doorbell_exception(&regs);
d30f6e48
SW
842 break;
843#endif
844 case BOOKE_INTERRUPT_MACHINE_CHECK:
845 /* FIXME */
846 break;
7cc1e8ee
AG
847 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
848 kvmppc_fill_pt_regs(&regs);
849 performance_monitor_exception(&regs);
850 break;
6328e593
BB
851 case BOOKE_INTERRUPT_WATCHDOG:
852 kvmppc_fill_pt_regs(&regs);
853#ifdef CONFIG_BOOKE_WDT
854 WatchdogException(&regs);
855#else
856 unknown_exception(&regs);
857#endif
858 break;
859 case BOOKE_INTERRUPT_CRITICAL:
860 unknown_exception(&regs);
861 break;
ce11e48b
BB
862 case BOOKE_INTERRUPT_DEBUG:
863 /* Save DBSR before preemption is enabled */
864 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
865 kvmppc_clear_dbsr();
866 break;
d30f6e48 867 }
4e642ccb
AG
868}
869
870/**
871 * kvmppc_handle_exit
872 *
873 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
874 */
875int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
876 unsigned int exit_nr)
877{
878 int r = RESUME_HOST;
7ee78855 879 int s;
f1e89028 880 int idx;
4e642ccb
AG
881
882 /* update before a new last_exit_type is rewritten */
883 kvmppc_update_timing_stats(vcpu);
884
885 /* restart interrupts if they were meant for the host */
886 kvmppc_restart_interrupt(vcpu, exit_nr);
d30f6e48 887
bbf45ba5
HB
888 local_irq_enable();
889
97c95059 890 trace_kvm_exit(exit_nr, vcpu);
706fb730 891 kvm_guest_exit();
97c95059 892
bbf45ba5
HB
893 run->exit_reason = KVM_EXIT_UNKNOWN;
894 run->ready_for_interrupt_injection = 1;
895
896 switch (exit_nr) {
897 case BOOKE_INTERRUPT_MACHINE_CHECK:
c35c9d84
AG
898 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
899 kvmppc_dump_vcpu(vcpu);
900 /* For debugging, send invalid exit reason to user space */
901 run->hw.hardware_exit_reason = ~1ULL << 32;
902 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
903 r = RESUME_HOST;
bbf45ba5
HB
904 break;
905
906 case BOOKE_INTERRUPT_EXTERNAL:
7b701591 907 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1b6766c7
HB
908 r = RESUME_GUEST;
909 break;
910
bbf45ba5 911 case BOOKE_INTERRUPT_DECREMENTER:
7b701591 912 kvmppc_account_exit(vcpu, DEC_EXITS);
bbf45ba5
HB
913 r = RESUME_GUEST;
914 break;
915
6328e593
BB
916 case BOOKE_INTERRUPT_WATCHDOG:
917 r = RESUME_GUEST;
918 break;
919
d30f6e48
SW
920 case BOOKE_INTERRUPT_DOORBELL:
921 kvmppc_account_exit(vcpu, DBELL_EXITS);
d30f6e48
SW
922 r = RESUME_GUEST;
923 break;
924
925 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
926 kvmppc_account_exit(vcpu, GDBELL_EXITS);
927
928 /*
929 * We are here because there is a pending guest interrupt
930 * which could not be delivered as MSR_CE or MSR_ME was not
931 * set. Once we break from here we will retry delivery.
932 */
933 r = RESUME_GUEST;
934 break;
935
936 case BOOKE_INTERRUPT_GUEST_DBELL:
937 kvmppc_account_exit(vcpu, GDBELL_EXITS);
938
939 /*
940 * We are here because there is a pending guest interrupt
941 * which could not be delivered as MSR_EE was not set. Once
942 * we break from here we will retry delivery.
943 */
944 r = RESUME_GUEST;
945 break;
946
95f2e921
AG
947 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
948 r = RESUME_GUEST;
949 break;
950
d30f6e48
SW
951 case BOOKE_INTERRUPT_HV_PRIV:
952 r = emulation_exit(run, vcpu);
953 break;
954
bbf45ba5 955 case BOOKE_INTERRUPT_PROGRAM:
d30f6e48 956 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
0268597c
AG
957 /*
958 * Program traps generated by user-level software must
959 * be handled by the guest kernel.
960 *
961 * In GS mode, hypervisor privileged instructions trap
962 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
963 * actual program interrupts, handled by the guest.
964 */
daf5e271 965 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
bbf45ba5 966 r = RESUME_GUEST;
7b701591 967 kvmppc_account_exit(vcpu, USR_PR_INST);
bbf45ba5
HB
968 break;
969 }
970
d30f6e48 971 r = emulation_exit(run, vcpu);
bbf45ba5
HB
972 break;
973
de368dce 974 case BOOKE_INTERRUPT_FP_UNAVAIL:
d4cf3892 975 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
7b701591 976 kvmppc_account_exit(vcpu, FP_UNAVAIL);
de368dce
CE
977 r = RESUME_GUEST;
978 break;
979
4cd35f67
SW
980#ifdef CONFIG_SPE
981 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
982 if (vcpu->arch.shared->msr & MSR_SPE)
983 kvmppc_vcpu_enable_spe(vcpu);
984 else
985 kvmppc_booke_queue_irqprio(vcpu,
986 BOOKE_IRQPRIO_SPE_UNAVAIL);
bb3a8a17
HB
987 r = RESUME_GUEST;
988 break;
4cd35f67 989 }
bb3a8a17
HB
990
991 case BOOKE_INTERRUPT_SPE_FP_DATA:
992 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
993 r = RESUME_GUEST;
994 break;
995
996 case BOOKE_INTERRUPT_SPE_FP_ROUND:
997 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
998 r = RESUME_GUEST;
999 break;
4cd35f67
SW
1000#else
1001 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1002 /*
1003 * Guest wants SPE, but host kernel doesn't support it. Send
1004 * an "unimplemented operation" program check to the guest.
1005 */
1006 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1007 r = RESUME_GUEST;
1008 break;
1009
1010 /*
1011 * These really should never happen without CONFIG_SPE,
1012 * as we should never enable the real MSR[SPE] in the guest.
1013 */
1014 case BOOKE_INTERRUPT_SPE_FP_DATA:
1015 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1016 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1017 __func__, exit_nr, vcpu->arch.pc);
1018 run->hw.hardware_exit_reason = exit_nr;
1019 r = RESUME_HOST;
1020 break;
1021#endif
bb3a8a17 1022
bbf45ba5 1023 case BOOKE_INTERRUPT_DATA_STORAGE:
daf5e271
LY
1024 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1025 vcpu->arch.fault_esr);
7b701591 1026 kvmppc_account_exit(vcpu, DSI_EXITS);
bbf45ba5
HB
1027 r = RESUME_GUEST;
1028 break;
1029
1030 case BOOKE_INTERRUPT_INST_STORAGE:
daf5e271 1031 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
7b701591 1032 kvmppc_account_exit(vcpu, ISI_EXITS);
bbf45ba5
HB
1033 r = RESUME_GUEST;
1034 break;
1035
011da899
AG
1036 case BOOKE_INTERRUPT_ALIGNMENT:
1037 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1038 vcpu->arch.fault_esr);
1039 r = RESUME_GUEST;
1040 break;
1041
d30f6e48
SW
1042#ifdef CONFIG_KVM_BOOKE_HV
1043 case BOOKE_INTERRUPT_HV_SYSCALL:
1044 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1045 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1046 } else {
1047 /*
1048 * hcall from guest userspace -- send privileged
1049 * instruction program check.
1050 */
1051 kvmppc_core_queue_program(vcpu, ESR_PPR);
1052 }
1053
1054 r = RESUME_GUEST;
1055 break;
1056#else
bbf45ba5 1057 case BOOKE_INTERRUPT_SYSCALL:
2a342ed5
AG
1058 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1059 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1060 /* KVM PV hypercalls */
1061 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1062 r = RESUME_GUEST;
1063 } else {
1064 /* Guest syscalls */
1065 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1066 }
7b701591 1067 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
bbf45ba5
HB
1068 r = RESUME_GUEST;
1069 break;
d30f6e48 1070#endif
bbf45ba5
HB
1071
1072 case BOOKE_INTERRUPT_DTLB_MISS: {
bbf45ba5 1073 unsigned long eaddr = vcpu->arch.fault_dear;
7924bd41 1074 int gtlb_index;
475e7cdd 1075 gpa_t gpaddr;
bbf45ba5
HB
1076 gfn_t gfn;
1077
bf7ca4bd 1078#ifdef CONFIG_KVM_E500V2
a4cd8b23
SW
1079 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1080 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1081 kvmppc_map_magic(vcpu);
1082 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1083 r = RESUME_GUEST;
1084
1085 break;
1086 }
1087#endif
1088
bbf45ba5 1089 /* Check the guest TLB. */
fa86b8dd 1090 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
7924bd41 1091 if (gtlb_index < 0) {
bbf45ba5 1092 /* The guest didn't have a mapping for it. */
daf5e271
LY
1093 kvmppc_core_queue_dtlb_miss(vcpu,
1094 vcpu->arch.fault_dear,
1095 vcpu->arch.fault_esr);
b52a638c 1096 kvmppc_mmu_dtlb_miss(vcpu);
7b701591 1097 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
bbf45ba5
HB
1098 r = RESUME_GUEST;
1099 break;
1100 }
1101
f1e89028
SW
1102 idx = srcu_read_lock(&vcpu->kvm->srcu);
1103
be8d1cae 1104 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
475e7cdd 1105 gfn = gpaddr >> PAGE_SHIFT;
bbf45ba5
HB
1106
1107 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1108 /* The guest TLB had a mapping, but the shadow TLB
1109 * didn't, and it is RAM. This could be because:
1110 * a) the entry is mapping the host kernel, or
1111 * b) the guest used a large mapping which we're faking
1112 * Either way, we need to satisfy the fault without
1113 * invoking the guest. */
58a96214 1114 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
7b701591 1115 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
bbf45ba5
HB
1116 r = RESUME_GUEST;
1117 } else {
1118 /* Guest has mapped and accessed a page which is not
1119 * actually RAM. */
475e7cdd 1120 vcpu->arch.paddr_accessed = gpaddr;
6020c0f6 1121 vcpu->arch.vaddr_accessed = eaddr;
bbf45ba5 1122 r = kvmppc_emulate_mmio(run, vcpu);
7b701591 1123 kvmppc_account_exit(vcpu, MMIO_EXITS);
bbf45ba5
HB
1124 }
1125
f1e89028 1126 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bbf45ba5
HB
1127 break;
1128 }
1129
1130 case BOOKE_INTERRUPT_ITLB_MISS: {
bbf45ba5 1131 unsigned long eaddr = vcpu->arch.pc;
89168618 1132 gpa_t gpaddr;
bbf45ba5 1133 gfn_t gfn;
7924bd41 1134 int gtlb_index;
bbf45ba5
HB
1135
1136 r = RESUME_GUEST;
1137
1138 /* Check the guest TLB. */
fa86b8dd 1139 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
7924bd41 1140 if (gtlb_index < 0) {
bbf45ba5 1141 /* The guest didn't have a mapping for it. */
d4cf3892 1142 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
b52a638c 1143 kvmppc_mmu_itlb_miss(vcpu);
7b701591 1144 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
bbf45ba5
HB
1145 break;
1146 }
1147
7b701591 1148 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
bbf45ba5 1149
f1e89028
SW
1150 idx = srcu_read_lock(&vcpu->kvm->srcu);
1151
be8d1cae 1152 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
89168618 1153 gfn = gpaddr >> PAGE_SHIFT;
bbf45ba5
HB
1154
1155 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1156 /* The guest TLB had a mapping, but the shadow TLB
1157 * didn't. This could be because:
1158 * a) the entry is mapping the host kernel, or
1159 * b) the guest used a large mapping which we're faking
1160 * Either way, we need to satisfy the fault without
1161 * invoking the guest. */
58a96214 1162 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
bbf45ba5
HB
1163 } else {
1164 /* Guest mapped and leaped at non-RAM! */
d4cf3892 1165 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
bbf45ba5
HB
1166 }
1167
f1e89028 1168 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bbf45ba5
HB
1169 break;
1170 }
1171
6a0ab738 1172 case BOOKE_INTERRUPT_DEBUG: {
ce11e48b
BB
1173 r = kvmppc_handle_debug(run, vcpu);
1174 if (r == RESUME_HOST)
1175 run->exit_reason = KVM_EXIT_DEBUG;
7b701591 1176 kvmppc_account_exit(vcpu, DEBUG_EXITS);
6a0ab738
HB
1177 break;
1178 }
1179
bbf45ba5
HB
1180 default:
1181 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1182 BUG();
1183 }
1184
a8e4ef84
AG
1185 /*
1186 * To avoid clobbering exit_reason, only check for signals if we
1187 * aren't already exiting to userspace for some other reason.
1188 */
03660ba2
AG
1189 if (!(r & RESUME_HOST)) {
1190 local_irq_disable();
7ee78855
AG
1191 s = kvmppc_prepare_to_enter(vcpu);
1192 if (s <= 0) {
24afa37b 1193 local_irq_enable();
7ee78855 1194 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
24afa37b 1195 } else {
5f1c248f 1196 kvmppc_fix_ee_before_entry();
03660ba2 1197 }
bbf45ba5
HB
1198 }
1199
1200 return r;
1201}
1202
d26f22c9
BB
1203static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1204{
1205 u32 old_tsr = vcpu->arch.tsr;
1206
1207 vcpu->arch.tsr = new_tsr;
1208
1209 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1210 arm_next_watchdog(vcpu);
1211
1212 update_timer_ints(vcpu);
1213}
1214
bbf45ba5
HB
1215/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1216int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1217{
082decf2 1218 int i;
af8f38b3 1219 int r;
082decf2 1220
bbf45ba5 1221 vcpu->arch.pc = 0;
b5904972 1222 vcpu->arch.shared->pir = vcpu->vcpu_id;
8e5b26b5 1223 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
d30f6e48 1224 kvmppc_set_msr(vcpu, 0);
bbf45ba5 1225
d30f6e48 1226#ifndef CONFIG_KVM_BOOKE_HV
ce11e48b 1227 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
49dd2c49 1228 vcpu->arch.shadow_pid = 1;
d30f6e48
SW
1229 vcpu->arch.shared->msr = 0;
1230#endif
49dd2c49 1231
082decf2
HB
1232 /* Eye-catching numbers so we know if the guest takes an interrupt
1233 * before it's programmed its own IVPR/IVORs. */
bbf45ba5 1234 vcpu->arch.ivpr = 0x55550000;
082decf2
HB
1235 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1236 vcpu->arch.ivor[i] = 0x7700 | i * 4;
bbf45ba5 1237
73e75b41
HB
1238 kvmppc_init_timing_stats(vcpu);
1239
af8f38b3
AG
1240 r = kvmppc_core_vcpu_setup(vcpu);
1241 kvmppc_sanity_check(vcpu);
1242 return r;
bbf45ba5
HB
1243}
1244
f61c94bb
BB
1245int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1246{
1247 /* setup watchdog timer once */
1248 spin_lock_init(&vcpu->arch.wdt_lock);
1249 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1250 (unsigned long)vcpu);
1251
1252 return 0;
1253}
1254
1255void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1256{
1257 del_timer_sync(&vcpu->arch.wdt_timer);
1258}
1259
bbf45ba5
HB
1260int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1261{
1262 int i;
1263
1264 regs->pc = vcpu->arch.pc;
992b5b29 1265 regs->cr = kvmppc_get_cr(vcpu);
bbf45ba5
HB
1266 regs->ctr = vcpu->arch.ctr;
1267 regs->lr = vcpu->arch.lr;
992b5b29 1268 regs->xer = kvmppc_get_xer(vcpu);
666e7252 1269 regs->msr = vcpu->arch.shared->msr;
de7906c3
AG
1270 regs->srr0 = vcpu->arch.shared->srr0;
1271 regs->srr1 = vcpu->arch.shared->srr1;
bbf45ba5 1272 regs->pid = vcpu->arch.pid;
a73a9599
AG
1273 regs->sprg0 = vcpu->arch.shared->sprg0;
1274 regs->sprg1 = vcpu->arch.shared->sprg1;
1275 regs->sprg2 = vcpu->arch.shared->sprg2;
1276 regs->sprg3 = vcpu->arch.shared->sprg3;
b5904972
SW
1277 regs->sprg4 = vcpu->arch.shared->sprg4;
1278 regs->sprg5 = vcpu->arch.shared->sprg5;
1279 regs->sprg6 = vcpu->arch.shared->sprg6;
1280 regs->sprg7 = vcpu->arch.shared->sprg7;
bbf45ba5
HB
1281
1282 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
8e5b26b5 1283 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
bbf45ba5
HB
1284
1285 return 0;
1286}
1287
1288int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1289{
1290 int i;
1291
1292 vcpu->arch.pc = regs->pc;
992b5b29 1293 kvmppc_set_cr(vcpu, regs->cr);
bbf45ba5
HB
1294 vcpu->arch.ctr = regs->ctr;
1295 vcpu->arch.lr = regs->lr;
992b5b29 1296 kvmppc_set_xer(vcpu, regs->xer);
b8fd68ac 1297 kvmppc_set_msr(vcpu, regs->msr);
de7906c3
AG
1298 vcpu->arch.shared->srr0 = regs->srr0;
1299 vcpu->arch.shared->srr1 = regs->srr1;
5ce941ee 1300 kvmppc_set_pid(vcpu, regs->pid);
a73a9599
AG
1301 vcpu->arch.shared->sprg0 = regs->sprg0;
1302 vcpu->arch.shared->sprg1 = regs->sprg1;
1303 vcpu->arch.shared->sprg2 = regs->sprg2;
1304 vcpu->arch.shared->sprg3 = regs->sprg3;
b5904972
SW
1305 vcpu->arch.shared->sprg4 = regs->sprg4;
1306 vcpu->arch.shared->sprg5 = regs->sprg5;
1307 vcpu->arch.shared->sprg6 = regs->sprg6;
1308 vcpu->arch.shared->sprg7 = regs->sprg7;
bbf45ba5 1309
8e5b26b5
AG
1310 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1311 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
bbf45ba5
HB
1312
1313 return 0;
1314}
1315
5ce941ee
SW
1316static void get_sregs_base(struct kvm_vcpu *vcpu,
1317 struct kvm_sregs *sregs)
1318{
1319 u64 tb = get_tb();
1320
1321 sregs->u.e.features |= KVM_SREGS_E_BASE;
1322
1323 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1324 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1325 sregs->u.e.mcsr = vcpu->arch.mcsr;
d30f6e48
SW
1326 sregs->u.e.esr = get_guest_esr(vcpu);
1327 sregs->u.e.dear = get_guest_dear(vcpu);
5ce941ee
SW
1328 sregs->u.e.tsr = vcpu->arch.tsr;
1329 sregs->u.e.tcr = vcpu->arch.tcr;
1330 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1331 sregs->u.e.tb = tb;
1332 sregs->u.e.vrsave = vcpu->arch.vrsave;
1333}
1334
1335static int set_sregs_base(struct kvm_vcpu *vcpu,
1336 struct kvm_sregs *sregs)
1337{
1338 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1339 return 0;
1340
1341 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1342 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1343 vcpu->arch.mcsr = sregs->u.e.mcsr;
d30f6e48
SW
1344 set_guest_esr(vcpu, sregs->u.e.esr);
1345 set_guest_dear(vcpu, sregs->u.e.dear);
5ce941ee 1346 vcpu->arch.vrsave = sregs->u.e.vrsave;
dfd4d47e 1347 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
5ce941ee 1348
dfd4d47e 1349 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
5ce941ee 1350 vcpu->arch.dec = sregs->u.e.dec;
dfd4d47e
SW
1351 kvmppc_emulate_dec(vcpu);
1352 }
5ce941ee 1353
d26f22c9
BB
1354 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1355 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
5ce941ee
SW
1356
1357 return 0;
1358}
1359
1360static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1361 struct kvm_sregs *sregs)
1362{
1363 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1364
841741f2 1365 sregs->u.e.pir = vcpu->vcpu_id;
5ce941ee
SW
1366 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1367 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1368 sregs->u.e.decar = vcpu->arch.decar;
1369 sregs->u.e.ivpr = vcpu->arch.ivpr;
1370}
1371
1372static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1373 struct kvm_sregs *sregs)
1374{
1375 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1376 return 0;
1377
841741f2 1378 if (sregs->u.e.pir != vcpu->vcpu_id)
5ce941ee
SW
1379 return -EINVAL;
1380
1381 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1382 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1383 vcpu->arch.decar = sregs->u.e.decar;
1384 vcpu->arch.ivpr = sregs->u.e.ivpr;
1385
1386 return 0;
1387}
1388
3a167bea 1389int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
5ce941ee
SW
1390{
1391 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1392
1393 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1394 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1395 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1396 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1397 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1398 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1399 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1400 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1401 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1402 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1403 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1404 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1405 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1406 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1407 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1408 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
3a167bea 1409 return 0;
5ce941ee
SW
1410}
1411
1412int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1413{
1414 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1415 return 0;
1416
1417 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1418 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1419 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1420 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1421 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1422 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1423 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1424 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1425 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1426 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1427 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1428 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1429 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1430 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1431 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1432 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1433
1434 return 0;
1435}
1436
bbf45ba5
HB
1437int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1438 struct kvm_sregs *sregs)
1439{
5ce941ee
SW
1440 sregs->pvr = vcpu->arch.pvr;
1441
1442 get_sregs_base(vcpu, sregs);
1443 get_sregs_arch206(vcpu, sregs);
cbbc58d4 1444 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
bbf45ba5
HB
1445}
1446
1447int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1448 struct kvm_sregs *sregs)
1449{
5ce941ee
SW
1450 int ret;
1451
1452 if (vcpu->arch.pvr != sregs->pvr)
1453 return -EINVAL;
1454
1455 ret = set_sregs_base(vcpu, sregs);
1456 if (ret < 0)
1457 return ret;
1458
1459 ret = set_sregs_arch206(vcpu, sregs);
1460 if (ret < 0)
1461 return ret;
1462
cbbc58d4 1463 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
bbf45ba5
HB
1464}
1465
31f3438e
PM
1466int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1467{
35b299e2
MC
1468 int r = 0;
1469 union kvmppc_one_reg val;
1470 int size;
35b299e2
MC
1471
1472 size = one_reg_size(reg->id);
1473 if (size > sizeof(val))
1474 return -EINVAL;
6df8d3fc
BB
1475
1476 switch (reg->id) {
1477 case KVM_REG_PPC_IAC1:
547465ef
BB
1478 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
1479 break;
6df8d3fc 1480 case KVM_REG_PPC_IAC2:
547465ef
BB
1481 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
1482 break;
1483#if CONFIG_PPC_ADV_DEBUG_IACS > 2
6df8d3fc 1484 case KVM_REG_PPC_IAC3:
547465ef
BB
1485 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
1486 break;
35b299e2 1487 case KVM_REG_PPC_IAC4:
547465ef 1488 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
6df8d3fc 1489 break;
547465ef 1490#endif
6df8d3fc 1491 case KVM_REG_PPC_DAC1:
547465ef
BB
1492 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
1493 break;
35b299e2 1494 case KVM_REG_PPC_DAC2:
547465ef 1495 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
6df8d3fc 1496 break;
324b3e63
AG
1497 case KVM_REG_PPC_EPR: {
1498 u32 epr = get_guest_epr(vcpu);
35b299e2 1499 val = get_reg_val(reg->id, epr);
324b3e63
AG
1500 break;
1501 }
352df1de
MC
1502#if defined(CONFIG_64BIT)
1503 case KVM_REG_PPC_EPCR:
35b299e2 1504 val = get_reg_val(reg->id, vcpu->arch.epcr);
352df1de
MC
1505 break;
1506#endif
78accda4 1507 case KVM_REG_PPC_TCR:
35b299e2 1508 val = get_reg_val(reg->id, vcpu->arch.tcr);
78accda4
BB
1509 break;
1510 case KVM_REG_PPC_TSR:
35b299e2 1511 val = get_reg_val(reg->id, vcpu->arch.tsr);
78accda4 1512 break;
35b299e2 1513 case KVM_REG_PPC_DEBUG_INST:
b12c7841 1514 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
8c32a2ea 1515 break;
8b75cbbe
PM
1516 case KVM_REG_PPC_VRSAVE:
1517 val = get_reg_val(reg->id, vcpu->arch.vrsave);
8c32a2ea 1518 break;
6df8d3fc 1519 default:
cbbc58d4 1520 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
6df8d3fc
BB
1521 break;
1522 }
35b299e2
MC
1523
1524 if (r)
1525 return r;
1526
1527 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1528 r = -EFAULT;
1529
6df8d3fc 1530 return r;
31f3438e
PM
1531}
1532
1533int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1534{
35b299e2
MC
1535 int r = 0;
1536 union kvmppc_one_reg val;
1537 int size;
35b299e2
MC
1538
1539 size = one_reg_size(reg->id);
1540 if (size > sizeof(val))
1541 return -EINVAL;
1542
1543 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1544 return -EFAULT;
6df8d3fc
BB
1545
1546 switch (reg->id) {
1547 case KVM_REG_PPC_IAC1:
547465ef
BB
1548 vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
1549 break;
6df8d3fc 1550 case KVM_REG_PPC_IAC2:
547465ef
BB
1551 vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
1552 break;
1553#if CONFIG_PPC_ADV_DEBUG_IACS > 2
6df8d3fc 1554 case KVM_REG_PPC_IAC3:
547465ef
BB
1555 vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
1556 break;
35b299e2 1557 case KVM_REG_PPC_IAC4:
547465ef 1558 vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
6df8d3fc 1559 break;
547465ef 1560#endif
6df8d3fc 1561 case KVM_REG_PPC_DAC1:
547465ef
BB
1562 vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
1563 break;
35b299e2 1564 case KVM_REG_PPC_DAC2:
547465ef 1565 vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
6df8d3fc 1566 break;
324b3e63 1567 case KVM_REG_PPC_EPR: {
35b299e2
MC
1568 u32 new_epr = set_reg_val(reg->id, val);
1569 kvmppc_set_epr(vcpu, new_epr);
324b3e63
AG
1570 break;
1571 }
352df1de
MC
1572#if defined(CONFIG_64BIT)
1573 case KVM_REG_PPC_EPCR: {
35b299e2
MC
1574 u32 new_epcr = set_reg_val(reg->id, val);
1575 kvmppc_set_epcr(vcpu, new_epcr);
352df1de
MC
1576 break;
1577 }
1578#endif
78accda4 1579 case KVM_REG_PPC_OR_TSR: {
35b299e2 1580 u32 tsr_bits = set_reg_val(reg->id, val);
78accda4
BB
1581 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1582 break;
1583 }
1584 case KVM_REG_PPC_CLEAR_TSR: {
35b299e2 1585 u32 tsr_bits = set_reg_val(reg->id, val);
78accda4
BB
1586 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1587 break;
1588 }
1589 case KVM_REG_PPC_TSR: {
35b299e2 1590 u32 tsr = set_reg_val(reg->id, val);
78accda4
BB
1591 kvmppc_set_tsr(vcpu, tsr);
1592 break;
1593 }
1594 case KVM_REG_PPC_TCR: {
35b299e2 1595 u32 tcr = set_reg_val(reg->id, val);
78accda4
BB
1596 kvmppc_set_tcr(vcpu, tcr);
1597 break;
1598 }
8b75cbbe
PM
1599 case KVM_REG_PPC_VRSAVE:
1600 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1601 break;
6df8d3fc 1602 default:
cbbc58d4 1603 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
6df8d3fc
BB
1604 break;
1605 }
35b299e2 1606
6df8d3fc 1607 return r;
31f3438e
PM
1608}
1609
bbf45ba5
HB
1610int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1611{
1612 return -ENOTSUPP;
1613}
1614
1615int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1616{
1617 return -ENOTSUPP;
1618}
1619
bbf45ba5
HB
1620int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1621 struct kvm_translation *tr)
1622{
98001d8d
AK
1623 int r;
1624
98001d8d 1625 r = kvmppc_core_vcpu_translate(vcpu, tr);
98001d8d 1626 return r;
bbf45ba5 1627}
d9fbd03d 1628
4e755758
AG
1629int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1630{
1631 return -ENOTSUPP;
1632}
1633
5587027c 1634void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
a66b48c3
PM
1635 struct kvm_memory_slot *dont)
1636{
1637}
1638
5587027c 1639int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
a66b48c3
PM
1640 unsigned long npages)
1641{
1642 return 0;
1643}
1644
f9e0554d 1645int kvmppc_core_prepare_memory_region(struct kvm *kvm,
a66b48c3 1646 struct kvm_memory_slot *memslot,
f9e0554d
PM
1647 struct kvm_userspace_memory_region *mem)
1648{
1649 return 0;
1650}
1651
1652void kvmppc_core_commit_memory_region(struct kvm *kvm,
dfe49dbd 1653 struct kvm_userspace_memory_region *mem,
8482644a 1654 const struct kvm_memory_slot *old)
dfe49dbd
PM
1655{
1656}
1657
1658void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
f9e0554d
PM
1659{
1660}
1661
38f98824
MC
1662void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1663{
1664#if defined(CONFIG_64BIT)
1665 vcpu->arch.epcr = new_epcr;
1666#ifdef CONFIG_KVM_BOOKE_HV
1667 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1668 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1669 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1670#endif
1671#endif
1672}
1673
dfd4d47e
SW
1674void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1675{
1676 vcpu->arch.tcr = new_tcr;
f61c94bb 1677 arm_next_watchdog(vcpu);
dfd4d47e
SW
1678 update_timer_ints(vcpu);
1679}
1680
1681void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1682{
1683 set_bits(tsr_bits, &vcpu->arch.tsr);
1684 smp_wmb();
1685 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1686 kvm_vcpu_kick(vcpu);
1687}
1688
1689void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1690{
1691 clear_bits(tsr_bits, &vcpu->arch.tsr);
f61c94bb
BB
1692
1693 /*
1694 * We may have stopped the watchdog due to
1695 * being stuck on final expiration.
1696 */
1697 if (tsr_bits & (TSR_ENW | TSR_WIS))
1698 arm_next_watchdog(vcpu);
1699
dfd4d47e
SW
1700 update_timer_ints(vcpu);
1701}
1702
1703void kvmppc_decrementer_func(unsigned long data)
1704{
1705 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1706
21bd000a
BB
1707 if (vcpu->arch.tcr & TCR_ARE) {
1708 vcpu->arch.dec = vcpu->arch.decar;
1709 kvmppc_emulate_dec(vcpu);
1710 }
1711
dfd4d47e
SW
1712 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1713}
1714
ce11e48b
BB
1715static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1716 uint64_t addr, int index)
1717{
1718 switch (index) {
1719 case 0:
1720 dbg_reg->dbcr0 |= DBCR0_IAC1;
1721 dbg_reg->iac1 = addr;
1722 break;
1723 case 1:
1724 dbg_reg->dbcr0 |= DBCR0_IAC2;
1725 dbg_reg->iac2 = addr;
1726 break;
1727#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1728 case 2:
1729 dbg_reg->dbcr0 |= DBCR0_IAC3;
1730 dbg_reg->iac3 = addr;
1731 break;
1732 case 3:
1733 dbg_reg->dbcr0 |= DBCR0_IAC4;
1734 dbg_reg->iac4 = addr;
1735 break;
1736#endif
1737 default:
1738 return -EINVAL;
1739 }
1740
1741 dbg_reg->dbcr0 |= DBCR0_IDM;
1742 return 0;
1743}
1744
1745static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1746 int type, int index)
1747{
1748 switch (index) {
1749 case 0:
1750 if (type & KVMPPC_DEBUG_WATCH_READ)
1751 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1752 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1753 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1754 dbg_reg->dac1 = addr;
1755 break;
1756 case 1:
1757 if (type & KVMPPC_DEBUG_WATCH_READ)
1758 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1759 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1760 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1761 dbg_reg->dac2 = addr;
1762 break;
1763 default:
1764 return -EINVAL;
1765 }
1766
1767 dbg_reg->dbcr0 |= DBCR0_IDM;
1768 return 0;
1769}
1770void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1771{
1772 /* XXX: Add similar MSR protection for BookE-PR */
1773#ifdef CONFIG_KVM_BOOKE_HV
1774 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1775 if (set) {
1776 if (prot_bitmap & MSR_UCLE)
1777 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1778 if (prot_bitmap & MSR_DE)
1779 vcpu->arch.shadow_msrp |= MSRP_DEP;
1780 if (prot_bitmap & MSR_PMM)
1781 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1782 } else {
1783 if (prot_bitmap & MSR_UCLE)
1784 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1785 if (prot_bitmap & MSR_DE)
1786 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1787 if (prot_bitmap & MSR_PMM)
1788 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1789 }
1790#endif
1791}
1792
1793int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1794 struct kvm_guest_debug *dbg)
1795{
1796 struct debug_reg *dbg_reg;
1797 int n, b = 0, w = 0;
1798
1799 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
1800 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1801 vcpu->guest_debug = 0;
1802 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1803 return 0;
1804 }
1805
1806 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1807 vcpu->guest_debug = dbg->control;
1808 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1809 /* Set DBCR0_EDM in guest visible DBCR0 register. */
1810 vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
1811
1812 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1813 vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1814
1815 /* Code below handles only HW breakpoints */
1816 dbg_reg = &(vcpu->arch.shadow_dbg_reg);
1817
1818#ifdef CONFIG_KVM_BOOKE_HV
1819 /*
1820 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
1821 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
1822 */
1823 dbg_reg->dbcr1 = 0;
1824 dbg_reg->dbcr2 = 0;
1825#else
1826 /*
1827 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
1828 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
1829 * is set.
1830 */
1831 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
1832 DBCR1_IAC4US;
1833 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
1834#endif
1835
1836 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1837 return 0;
1838
1839 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
1840 uint64_t addr = dbg->arch.bp[n].addr;
1841 uint32_t type = dbg->arch.bp[n].type;
1842
1843 if (type == KVMPPC_DEBUG_NONE)
1844 continue;
1845
1846 if (type & !(KVMPPC_DEBUG_WATCH_READ |
1847 KVMPPC_DEBUG_WATCH_WRITE |
1848 KVMPPC_DEBUG_BREAKPOINT))
1849 return -EINVAL;
1850
1851 if (type & KVMPPC_DEBUG_BREAKPOINT) {
1852 /* Setting H/W breakpoint */
1853 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
1854 return -EINVAL;
1855 } else {
1856 /* Setting H/W watchpoint */
1857 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
1858 type, w++))
1859 return -EINVAL;
1860 }
1861 }
1862
1863 return 0;
1864}
1865
94fa9d99
SW
1866void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1867{
a47d72f3 1868 vcpu->cpu = smp_processor_id();
d30f6e48 1869 current->thread.kvm_vcpu = vcpu;
94fa9d99
SW
1870}
1871
1872void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1873{
d30f6e48 1874 current->thread.kvm_vcpu = NULL;
a47d72f3 1875 vcpu->cpu = -1;
ce11e48b
BB
1876
1877 /* Clear pending debug event in DBSR */
1878 kvmppc_clear_dbsr();
94fa9d99
SW
1879}
1880
3a167bea
AK
1881void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
1882{
cbbc58d4 1883 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
3a167bea
AK
1884}
1885
1886int kvmppc_core_init_vm(struct kvm *kvm)
1887{
cbbc58d4 1888 return kvm->arch.kvm_ops->init_vm(kvm);
3a167bea
AK
1889}
1890
1891struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1892{
cbbc58d4 1893 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
3a167bea
AK
1894}
1895
1896void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1897{
cbbc58d4 1898 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
3a167bea
AK
1899}
1900
1901void kvmppc_core_destroy_vm(struct kvm *kvm)
1902{
cbbc58d4 1903 kvm->arch.kvm_ops->destroy_vm(kvm);
3a167bea
AK
1904}
1905
1906void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1907{
cbbc58d4 1908 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
3a167bea
AK
1909}
1910
1911void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
1912{
cbbc58d4 1913 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
94fa9d99
SW
1914}
1915
2986b8c7 1916int __init kvmppc_booke_init(void)
d9fbd03d 1917{
d30f6e48 1918#ifndef CONFIG_KVM_BOOKE_HV
d9fbd03d 1919 unsigned long ivor[16];
1d542d9c 1920 unsigned long *handler = kvmppc_booke_handler_addr;
d9fbd03d 1921 unsigned long max_ivor = 0;
1d542d9c 1922 unsigned long handler_len;
d9fbd03d
HB
1923 int i;
1924
1925 /* We install our own exception handlers by hijacking IVPR. IVPR must
1926 * be 16-bit aligned, so we need a 64KB allocation. */
1927 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1928 VCPU_SIZE_ORDER);
1929 if (!kvmppc_booke_handlers)
1930 return -ENOMEM;
1931
1932 /* XXX make sure our handlers are smaller than Linux's */
1933
1934 /* Copy our interrupt handlers to match host IVORs. That way we don't
1935 * have to swap the IVORs on every guest/host transition. */
1936 ivor[0] = mfspr(SPRN_IVOR0);
1937 ivor[1] = mfspr(SPRN_IVOR1);
1938 ivor[2] = mfspr(SPRN_IVOR2);
1939 ivor[3] = mfspr(SPRN_IVOR3);
1940 ivor[4] = mfspr(SPRN_IVOR4);
1941 ivor[5] = mfspr(SPRN_IVOR5);
1942 ivor[6] = mfspr(SPRN_IVOR6);
1943 ivor[7] = mfspr(SPRN_IVOR7);
1944 ivor[8] = mfspr(SPRN_IVOR8);
1945 ivor[9] = mfspr(SPRN_IVOR9);
1946 ivor[10] = mfspr(SPRN_IVOR10);
1947 ivor[11] = mfspr(SPRN_IVOR11);
1948 ivor[12] = mfspr(SPRN_IVOR12);
1949 ivor[13] = mfspr(SPRN_IVOR13);
1950 ivor[14] = mfspr(SPRN_IVOR14);
1951 ivor[15] = mfspr(SPRN_IVOR15);
1952
1953 for (i = 0; i < 16; i++) {
1954 if (ivor[i] > max_ivor)
1d542d9c 1955 max_ivor = i;
d9fbd03d 1956
1d542d9c 1957 handler_len = handler[i + 1] - handler[i];
d9fbd03d 1958 memcpy((void *)kvmppc_booke_handlers + ivor[i],
1d542d9c 1959 (void *)handler[i], handler_len);
d9fbd03d 1960 }
1d542d9c
BB
1961
1962 handler_len = handler[max_ivor + 1] - handler[max_ivor];
1963 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
1964 ivor[max_ivor] + handler_len);
d30f6e48 1965#endif /* !BOOKE_HV */
db93f574 1966 return 0;
d9fbd03d
HB
1967}
1968
db93f574 1969void __exit kvmppc_booke_exit(void)
d9fbd03d
HB
1970{
1971 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1972 kvm_exit();
1973}
This page took 0.426338 seconds and 5 git commands to generate.