KVM: PPC: Add missing vcpu_load()/vcpu_put() in vcpu ioctls
[deliverable/linux.git] / arch / powerpc / kvm / booke.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/gfp.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
27 #include <linux/fs.h>
28
29 #include <asm/cputable.h>
30 #include <asm/uaccess.h>
31 #include <asm/kvm_ppc.h>
32 #include "timing.h"
33 #include <asm/cacheflush.h>
34
35 #include "booke.h"
36
37 unsigned long kvmppc_booke_handlers;
38
39 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42 struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "mmio", VCPU_STAT(mmio_exits) },
44 { "dcr", VCPU_STAT(dcr_exits) },
45 { "sig", VCPU_STAT(signal_exits) },
46 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
47 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
48 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
49 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
50 { "sysc", VCPU_STAT(syscall_exits) },
51 { "isi", VCPU_STAT(isi_exits) },
52 { "dsi", VCPU_STAT(dsi_exits) },
53 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
54 { "dec", VCPU_STAT(dec_exits) },
55 { "ext_intr", VCPU_STAT(ext_intr_exits) },
56 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
57 { NULL }
58 };
59
60 /* TODO: use vcpu_printf() */
61 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
62 {
63 int i;
64
65 printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr);
66 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
67 printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
68
69 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
70
71 for (i = 0; i < 32; i += 4) {
72 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
73 kvmppc_get_gpr(vcpu, i),
74 kvmppc_get_gpr(vcpu, i+1),
75 kvmppc_get_gpr(vcpu, i+2),
76 kvmppc_get_gpr(vcpu, i+3));
77 }
78 }
79
80 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
81 unsigned int priority)
82 {
83 set_bit(priority, &vcpu->arch.pending_exceptions);
84 }
85
86 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
87 ulong dear_flags, ulong esr_flags)
88 {
89 vcpu->arch.queued_dear = dear_flags;
90 vcpu->arch.queued_esr = esr_flags;
91 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
92 }
93
94 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
95 ulong dear_flags, ulong esr_flags)
96 {
97 vcpu->arch.queued_dear = dear_flags;
98 vcpu->arch.queued_esr = esr_flags;
99 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
100 }
101
102 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
103 ulong esr_flags)
104 {
105 vcpu->arch.queued_esr = esr_flags;
106 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
107 }
108
109 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
110 {
111 vcpu->arch.queued_esr = esr_flags;
112 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
113 }
114
115 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
116 {
117 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
118 }
119
120 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
121 {
122 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
123 }
124
125 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
126 {
127 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
128 }
129
130 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
131 struct kvm_interrupt *irq)
132 {
133 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
134 }
135
136 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
137 struct kvm_interrupt *irq)
138 {
139 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
140 }
141
142 /* Deliver the interrupt of the corresponding priority, if possible. */
143 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
144 unsigned int priority)
145 {
146 int allowed = 0;
147 ulong msr_mask;
148 bool update_esr = false, update_dear = false;
149
150 switch (priority) {
151 case BOOKE_IRQPRIO_DTLB_MISS:
152 case BOOKE_IRQPRIO_DATA_STORAGE:
153 update_dear = true;
154 /* fall through */
155 case BOOKE_IRQPRIO_INST_STORAGE:
156 case BOOKE_IRQPRIO_PROGRAM:
157 update_esr = true;
158 /* fall through */
159 case BOOKE_IRQPRIO_ITLB_MISS:
160 case BOOKE_IRQPRIO_SYSCALL:
161 case BOOKE_IRQPRIO_FP_UNAVAIL:
162 case BOOKE_IRQPRIO_SPE_UNAVAIL:
163 case BOOKE_IRQPRIO_SPE_FP_DATA:
164 case BOOKE_IRQPRIO_SPE_FP_ROUND:
165 case BOOKE_IRQPRIO_AP_UNAVAIL:
166 case BOOKE_IRQPRIO_ALIGNMENT:
167 allowed = 1;
168 msr_mask = MSR_CE|MSR_ME|MSR_DE;
169 break;
170 case BOOKE_IRQPRIO_CRITICAL:
171 case BOOKE_IRQPRIO_WATCHDOG:
172 allowed = vcpu->arch.msr & MSR_CE;
173 msr_mask = MSR_ME;
174 break;
175 case BOOKE_IRQPRIO_MACHINE_CHECK:
176 allowed = vcpu->arch.msr & MSR_ME;
177 msr_mask = 0;
178 break;
179 case BOOKE_IRQPRIO_EXTERNAL:
180 case BOOKE_IRQPRIO_DECREMENTER:
181 case BOOKE_IRQPRIO_FIT:
182 allowed = vcpu->arch.msr & MSR_EE;
183 msr_mask = MSR_CE|MSR_ME|MSR_DE;
184 break;
185 case BOOKE_IRQPRIO_DEBUG:
186 allowed = vcpu->arch.msr & MSR_DE;
187 msr_mask = MSR_ME;
188 break;
189 }
190
191 if (allowed) {
192 vcpu->arch.srr0 = vcpu->arch.pc;
193 vcpu->arch.srr1 = vcpu->arch.msr;
194 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
195 if (update_esr == true)
196 vcpu->arch.esr = vcpu->arch.queued_esr;
197 if (update_dear == true)
198 vcpu->arch.dear = vcpu->arch.queued_dear;
199 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
200
201 clear_bit(priority, &vcpu->arch.pending_exceptions);
202 }
203
204 return allowed;
205 }
206
207 /* Check pending exceptions and deliver one, if possible. */
208 void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
209 {
210 unsigned long *pending = &vcpu->arch.pending_exceptions;
211 unsigned int priority;
212
213 priority = __ffs(*pending);
214 while (priority <= BOOKE_IRQPRIO_MAX) {
215 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
216 break;
217
218 priority = find_next_bit(pending,
219 BITS_PER_BYTE * sizeof(*pending),
220 priority + 1);
221 }
222 }
223
224 /**
225 * kvmppc_handle_exit
226 *
227 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
228 */
229 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
230 unsigned int exit_nr)
231 {
232 enum emulation_result er;
233 int r = RESUME_HOST;
234
235 /* update before a new last_exit_type is rewritten */
236 kvmppc_update_timing_stats(vcpu);
237
238 local_irq_enable();
239
240 run->exit_reason = KVM_EXIT_UNKNOWN;
241 run->ready_for_interrupt_injection = 1;
242
243 switch (exit_nr) {
244 case BOOKE_INTERRUPT_MACHINE_CHECK:
245 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
246 kvmppc_dump_vcpu(vcpu);
247 r = RESUME_HOST;
248 break;
249
250 case BOOKE_INTERRUPT_EXTERNAL:
251 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
252 if (need_resched())
253 cond_resched();
254 r = RESUME_GUEST;
255 break;
256
257 case BOOKE_INTERRUPT_DECREMENTER:
258 /* Since we switched IVPR back to the host's value, the host
259 * handled this interrupt the moment we enabled interrupts.
260 * Now we just offer it a chance to reschedule the guest. */
261 kvmppc_account_exit(vcpu, DEC_EXITS);
262 if (need_resched())
263 cond_resched();
264 r = RESUME_GUEST;
265 break;
266
267 case BOOKE_INTERRUPT_PROGRAM:
268 if (vcpu->arch.msr & MSR_PR) {
269 /* Program traps generated by user-level software must be handled
270 * by the guest kernel. */
271 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
272 r = RESUME_GUEST;
273 kvmppc_account_exit(vcpu, USR_PR_INST);
274 break;
275 }
276
277 er = kvmppc_emulate_instruction(run, vcpu);
278 switch (er) {
279 case EMULATE_DONE:
280 /* don't overwrite subtypes, just account kvm_stats */
281 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
282 /* Future optimization: only reload non-volatiles if
283 * they were actually modified by emulation. */
284 r = RESUME_GUEST_NV;
285 break;
286 case EMULATE_DO_DCR:
287 run->exit_reason = KVM_EXIT_DCR;
288 r = RESUME_HOST;
289 break;
290 case EMULATE_FAIL:
291 /* XXX Deliver Program interrupt to guest. */
292 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
293 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
294 /* For debugging, encode the failing instruction and
295 * report it to userspace. */
296 run->hw.hardware_exit_reason = ~0ULL << 32;
297 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
298 r = RESUME_HOST;
299 break;
300 default:
301 BUG();
302 }
303 break;
304
305 case BOOKE_INTERRUPT_FP_UNAVAIL:
306 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
307 kvmppc_account_exit(vcpu, FP_UNAVAIL);
308 r = RESUME_GUEST;
309 break;
310
311 case BOOKE_INTERRUPT_SPE_UNAVAIL:
312 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
313 r = RESUME_GUEST;
314 break;
315
316 case BOOKE_INTERRUPT_SPE_FP_DATA:
317 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
318 r = RESUME_GUEST;
319 break;
320
321 case BOOKE_INTERRUPT_SPE_FP_ROUND:
322 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
323 r = RESUME_GUEST;
324 break;
325
326 case BOOKE_INTERRUPT_DATA_STORAGE:
327 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
328 vcpu->arch.fault_esr);
329 kvmppc_account_exit(vcpu, DSI_EXITS);
330 r = RESUME_GUEST;
331 break;
332
333 case BOOKE_INTERRUPT_INST_STORAGE:
334 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
335 kvmppc_account_exit(vcpu, ISI_EXITS);
336 r = RESUME_GUEST;
337 break;
338
339 case BOOKE_INTERRUPT_SYSCALL:
340 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
341 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
342 r = RESUME_GUEST;
343 break;
344
345 case BOOKE_INTERRUPT_DTLB_MISS: {
346 unsigned long eaddr = vcpu->arch.fault_dear;
347 int gtlb_index;
348 gpa_t gpaddr;
349 gfn_t gfn;
350
351 /* Check the guest TLB. */
352 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
353 if (gtlb_index < 0) {
354 /* The guest didn't have a mapping for it. */
355 kvmppc_core_queue_dtlb_miss(vcpu,
356 vcpu->arch.fault_dear,
357 vcpu->arch.fault_esr);
358 kvmppc_mmu_dtlb_miss(vcpu);
359 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
360 r = RESUME_GUEST;
361 break;
362 }
363
364 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
365 gfn = gpaddr >> PAGE_SHIFT;
366
367 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
368 /* The guest TLB had a mapping, but the shadow TLB
369 * didn't, and it is RAM. This could be because:
370 * a) the entry is mapping the host kernel, or
371 * b) the guest used a large mapping which we're faking
372 * Either way, we need to satisfy the fault without
373 * invoking the guest. */
374 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
375 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
376 r = RESUME_GUEST;
377 } else {
378 /* Guest has mapped and accessed a page which is not
379 * actually RAM. */
380 vcpu->arch.paddr_accessed = gpaddr;
381 r = kvmppc_emulate_mmio(run, vcpu);
382 kvmppc_account_exit(vcpu, MMIO_EXITS);
383 }
384
385 break;
386 }
387
388 case BOOKE_INTERRUPT_ITLB_MISS: {
389 unsigned long eaddr = vcpu->arch.pc;
390 gpa_t gpaddr;
391 gfn_t gfn;
392 int gtlb_index;
393
394 r = RESUME_GUEST;
395
396 /* Check the guest TLB. */
397 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
398 if (gtlb_index < 0) {
399 /* The guest didn't have a mapping for it. */
400 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
401 kvmppc_mmu_itlb_miss(vcpu);
402 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
403 break;
404 }
405
406 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
407
408 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
409 gfn = gpaddr >> PAGE_SHIFT;
410
411 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
412 /* The guest TLB had a mapping, but the shadow TLB
413 * didn't. This could be because:
414 * a) the entry is mapping the host kernel, or
415 * b) the guest used a large mapping which we're faking
416 * Either way, we need to satisfy the fault without
417 * invoking the guest. */
418 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
419 } else {
420 /* Guest mapped and leaped at non-RAM! */
421 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
422 }
423
424 break;
425 }
426
427 case BOOKE_INTERRUPT_DEBUG: {
428 u32 dbsr;
429
430 vcpu->arch.pc = mfspr(SPRN_CSRR0);
431
432 /* clear IAC events in DBSR register */
433 dbsr = mfspr(SPRN_DBSR);
434 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
435 mtspr(SPRN_DBSR, dbsr);
436
437 run->exit_reason = KVM_EXIT_DEBUG;
438 kvmppc_account_exit(vcpu, DEBUG_EXITS);
439 r = RESUME_HOST;
440 break;
441 }
442
443 default:
444 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
445 BUG();
446 }
447
448 local_irq_disable();
449
450 kvmppc_core_deliver_interrupts(vcpu);
451
452 if (!(r & RESUME_HOST)) {
453 /* To avoid clobbering exit_reason, only check for signals if
454 * we aren't already exiting to userspace for some other
455 * reason. */
456 if (signal_pending(current)) {
457 run->exit_reason = KVM_EXIT_INTR;
458 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
459 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
460 }
461 }
462
463 return r;
464 }
465
466 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
467 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
468 {
469 vcpu->arch.pc = 0;
470 vcpu->arch.msr = 0;
471 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
472
473 vcpu->arch.shadow_pid = 1;
474
475 /* Eye-catching number so we know if the guest takes an interrupt
476 * before it's programmed its own IVPR. */
477 vcpu->arch.ivpr = 0x55550000;
478
479 kvmppc_init_timing_stats(vcpu);
480
481 return kvmppc_core_vcpu_setup(vcpu);
482 }
483
484 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
485 {
486 int i;
487
488 vcpu_load(vcpu);
489
490 regs->pc = vcpu->arch.pc;
491 regs->cr = kvmppc_get_cr(vcpu);
492 regs->ctr = vcpu->arch.ctr;
493 regs->lr = vcpu->arch.lr;
494 regs->xer = kvmppc_get_xer(vcpu);
495 regs->msr = vcpu->arch.msr;
496 regs->srr0 = vcpu->arch.srr0;
497 regs->srr1 = vcpu->arch.srr1;
498 regs->pid = vcpu->arch.pid;
499 regs->sprg0 = vcpu->arch.sprg0;
500 regs->sprg1 = vcpu->arch.sprg1;
501 regs->sprg2 = vcpu->arch.sprg2;
502 regs->sprg3 = vcpu->arch.sprg3;
503 regs->sprg5 = vcpu->arch.sprg4;
504 regs->sprg6 = vcpu->arch.sprg5;
505 regs->sprg7 = vcpu->arch.sprg6;
506
507 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
508 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
509
510 vcpu_put(vcpu);
511
512 return 0;
513 }
514
515 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
516 {
517 int i;
518
519 vcpu_load(vcpu);
520
521 vcpu->arch.pc = regs->pc;
522 kvmppc_set_cr(vcpu, regs->cr);
523 vcpu->arch.ctr = regs->ctr;
524 vcpu->arch.lr = regs->lr;
525 kvmppc_set_xer(vcpu, regs->xer);
526 kvmppc_set_msr(vcpu, regs->msr);
527 vcpu->arch.srr0 = regs->srr0;
528 vcpu->arch.srr1 = regs->srr1;
529 vcpu->arch.sprg0 = regs->sprg0;
530 vcpu->arch.sprg1 = regs->sprg1;
531 vcpu->arch.sprg2 = regs->sprg2;
532 vcpu->arch.sprg3 = regs->sprg3;
533 vcpu->arch.sprg5 = regs->sprg4;
534 vcpu->arch.sprg6 = regs->sprg5;
535 vcpu->arch.sprg7 = regs->sprg6;
536
537 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
538 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
539
540 vcpu_put(vcpu);
541
542 return 0;
543 }
544
545 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
546 struct kvm_sregs *sregs)
547 {
548 return -ENOTSUPP;
549 }
550
551 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
552 struct kvm_sregs *sregs)
553 {
554 return -ENOTSUPP;
555 }
556
557 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
558 {
559 return -ENOTSUPP;
560 }
561
562 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
563 {
564 return -ENOTSUPP;
565 }
566
567 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
568 struct kvm_translation *tr)
569 {
570 int r;
571
572 vcpu_load(vcpu);
573 r = kvmppc_core_vcpu_translate(vcpu, tr);
574 vcpu_put(vcpu);
575 return r;
576 }
577
578 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
579 {
580 return -ENOTSUPP;
581 }
582
583 int __init kvmppc_booke_init(void)
584 {
585 unsigned long ivor[16];
586 unsigned long max_ivor = 0;
587 int i;
588
589 /* We install our own exception handlers by hijacking IVPR. IVPR must
590 * be 16-bit aligned, so we need a 64KB allocation. */
591 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
592 VCPU_SIZE_ORDER);
593 if (!kvmppc_booke_handlers)
594 return -ENOMEM;
595
596 /* XXX make sure our handlers are smaller than Linux's */
597
598 /* Copy our interrupt handlers to match host IVORs. That way we don't
599 * have to swap the IVORs on every guest/host transition. */
600 ivor[0] = mfspr(SPRN_IVOR0);
601 ivor[1] = mfspr(SPRN_IVOR1);
602 ivor[2] = mfspr(SPRN_IVOR2);
603 ivor[3] = mfspr(SPRN_IVOR3);
604 ivor[4] = mfspr(SPRN_IVOR4);
605 ivor[5] = mfspr(SPRN_IVOR5);
606 ivor[6] = mfspr(SPRN_IVOR6);
607 ivor[7] = mfspr(SPRN_IVOR7);
608 ivor[8] = mfspr(SPRN_IVOR8);
609 ivor[9] = mfspr(SPRN_IVOR9);
610 ivor[10] = mfspr(SPRN_IVOR10);
611 ivor[11] = mfspr(SPRN_IVOR11);
612 ivor[12] = mfspr(SPRN_IVOR12);
613 ivor[13] = mfspr(SPRN_IVOR13);
614 ivor[14] = mfspr(SPRN_IVOR14);
615 ivor[15] = mfspr(SPRN_IVOR15);
616
617 for (i = 0; i < 16; i++) {
618 if (ivor[i] > max_ivor)
619 max_ivor = ivor[i];
620
621 memcpy((void *)kvmppc_booke_handlers + ivor[i],
622 kvmppc_handlers_start + i * kvmppc_handler_len,
623 kvmppc_handler_len);
624 }
625 flush_icache_range(kvmppc_booke_handlers,
626 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
627
628 return 0;
629 }
630
631 void __exit kvmppc_booke_exit(void)
632 {
633 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
634 kvm_exit();
635 }
This page took 0.046837 seconds and 5 git commands to generate.