KVM: Separate irq ack notification out of arch/x86/kvm/irq.c
[deliverable/linux.git] / arch / ia64 / kvm / kvm-ia64.c
CommitLineData
b024b793
XZ
1/*
2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
3 *
4 *
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/percpu.h>
26#include <linux/gfp.h>
27#include <linux/fs.h>
28#include <linux/smp.h>
29#include <linux/kvm_host.h>
30#include <linux/kvm.h>
31#include <linux/bitops.h>
32#include <linux/hrtimer.h>
33#include <linux/uaccess.h>
34
35#include <asm/pgtable.h>
36#include <asm/gcc_intrin.h>
37#include <asm/pal.h>
38#include <asm/cacheflush.h>
39#include <asm/div64.h>
40#include <asm/tlb.h>
9f726323 41#include <asm/elf.h>
b024b793
XZ
42
43#include "misc.h"
44#include "vti.h"
45#include "iodev.h"
46#include "ioapic.h"
47#include "lapic.h"
48
49static unsigned long kvm_vmm_base;
50static unsigned long kvm_vsa_base;
51static unsigned long kvm_vm_buffer;
52static unsigned long kvm_vm_buffer_size;
53unsigned long kvm_vmm_gp;
54
55static long vp_env_info;
56
57static struct kvm_vmm_info *kvm_vmm_info;
58
59static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
60
61struct kvm_stats_debugfs_item debugfs_entries[] = {
62 { NULL }
63};
64
b024b793
XZ
65static void kvm_flush_icache(unsigned long start, unsigned long len)
66{
67 int l;
68
69 for (l = 0; l < (len + 32); l += 32)
70 ia64_fc(start + l);
71
72 ia64_sync_i();
73 ia64_srlz_i();
74}
75
76static void kvm_flush_tlb_all(void)
77{
78 unsigned long i, j, count0, count1, stride0, stride1, addr;
79 long flags;
80
81 addr = local_cpu_data->ptce_base;
82 count0 = local_cpu_data->ptce_count[0];
83 count1 = local_cpu_data->ptce_count[1];
84 stride0 = local_cpu_data->ptce_stride[0];
85 stride1 = local_cpu_data->ptce_stride[1];
86
87 local_irq_save(flags);
88 for (i = 0; i < count0; ++i) {
89 for (j = 0; j < count1; ++j) {
90 ia64_ptce(addr);
91 addr += stride1;
92 }
93 addr += stride0;
94 }
95 local_irq_restore(flags);
96 ia64_srlz_i(); /* srlz.i implies srlz.d */
97}
98
99long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
100{
101 struct ia64_pal_retval iprv;
102
103 PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
104 (u64)opt_handler);
105
106 return iprv.status;
107}
108
109static DEFINE_SPINLOCK(vp_lock);
110
111void kvm_arch_hardware_enable(void *garbage)
112{
113 long status;
114 long tmp_base;
115 unsigned long pte;
116 unsigned long saved_psr;
117 int slot;
118
119 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
120 PAGE_KERNEL));
121 local_irq_save(saved_psr);
122 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
cab7a1ee 123 local_irq_restore(saved_psr);
b024b793
XZ
124 if (slot < 0)
125 return;
b024b793
XZ
126
127 spin_lock(&vp_lock);
128 status = ia64_pal_vp_init_env(kvm_vsa_base ?
129 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
130 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
131 if (status != 0) {
132 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
133 return ;
134 }
135
136 if (!kvm_vsa_base) {
137 kvm_vsa_base = tmp_base;
138 printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
139 }
140 spin_unlock(&vp_lock);
141 ia64_ptr_entry(0x3, slot);
142}
143
144void kvm_arch_hardware_disable(void *garbage)
145{
146
147 long status;
148 int slot;
149 unsigned long pte;
150 unsigned long saved_psr;
151 unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
152
153 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
154 PAGE_KERNEL));
155
156 local_irq_save(saved_psr);
157 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
cab7a1ee 158 local_irq_restore(saved_psr);
b024b793
XZ
159 if (slot < 0)
160 return;
b024b793
XZ
161
162 status = ia64_pal_vp_exit_env(host_iva);
163 if (status)
164 printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
165 status);
166 ia64_ptr_entry(0x3, slot);
167}
168
169void kvm_arch_check_processor_compat(void *rtn)
170{
171 *(int *)rtn = 0;
172}
173
174int kvm_dev_ioctl_check_extension(long ext)
175{
176
177 int r;
178
179 switch (ext) {
180 case KVM_CAP_IRQCHIP:
181 case KVM_CAP_USER_MEMORY:
8c4b537d 182 case KVM_CAP_MP_STATE:
b024b793
XZ
183
184 r = 1;
185 break;
7f39f8ac
LV
186 case KVM_CAP_COALESCED_MMIO:
187 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
188 break;
b024b793
XZ
189 default:
190 r = 0;
191 }
192 return r;
193
194}
195
196static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
92760499 197 gpa_t addr, int len, int is_write)
b024b793
XZ
198{
199 struct kvm_io_device *dev;
200
92760499 201 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
b024b793
XZ
202
203 return dev;
204}
205
206static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
207{
208 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
209 kvm_run->hw.hardware_exit_reason = 1;
210 return 0;
211}
212
213static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
214{
215 struct kvm_mmio_req *p;
216 struct kvm_io_device *mmio_dev;
217
218 p = kvm_get_vcpu_ioreq(vcpu);
219
220 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
221 goto mmio;
222 vcpu->mmio_needed = 1;
223 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
224 vcpu->mmio_size = kvm_run->mmio.len = p->size;
225 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
226
227 if (vcpu->mmio_is_write)
228 memcpy(vcpu->mmio_data, &p->data, p->size);
229 memcpy(kvm_run->mmio.data, &p->data, p->size);
230 kvm_run->exit_reason = KVM_EXIT_MMIO;
231 return 0;
232mmio:
92760499 233 mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
b024b793
XZ
234 if (mmio_dev) {
235 if (!p->dir)
236 kvm_iodevice_write(mmio_dev, p->addr, p->size,
237 &p->data);
238 else
239 kvm_iodevice_read(mmio_dev, p->addr, p->size,
240 &p->data);
241
242 } else
243 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
244 p->state = STATE_IORESP_READY;
245
246 return 1;
247}
248
249static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
250{
251 struct exit_ctl_data *p;
252
253 p = kvm_get_exit_data(vcpu);
254
255 if (p->exit_reason == EXIT_REASON_PAL_CALL)
256 return kvm_pal_emul(vcpu, kvm_run);
257 else {
258 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
259 kvm_run->hw.hardware_exit_reason = 2;
260 return 0;
261 }
262}
263
264static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
265{
266 struct exit_ctl_data *p;
267
268 p = kvm_get_exit_data(vcpu);
269
270 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
271 kvm_sal_emul(vcpu);
272 return 1;
273 } else {
274 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
275 kvm_run->hw.hardware_exit_reason = 3;
276 return 0;
277 }
278
279}
280
281/*
282 * offset: address offset to IPI space.
283 * value: deliver value.
284 */
285static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
286 uint64_t vector)
287{
288 switch (dm) {
289 case SAPIC_FIXED:
290 kvm_apic_set_irq(vcpu, vector, 0);
291 break;
292 case SAPIC_NMI:
293 kvm_apic_set_irq(vcpu, 2, 0);
294 break;
295 case SAPIC_EXTINT:
296 kvm_apic_set_irq(vcpu, 0, 0);
297 break;
298 case SAPIC_INIT:
299 case SAPIC_PMI:
300 default:
301 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
302 break;
303 }
304}
305
306static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
307 unsigned long eid)
308{
309 union ia64_lid lid;
310 int i;
311
312 for (i = 0; i < KVM_MAX_VCPUS; i++) {
313 if (kvm->vcpus[i]) {
314 lid.val = VCPU_LID(kvm->vcpus[i]);
315 if (lid.id == id && lid.eid == eid)
316 return kvm->vcpus[i];
317 }
318 }
319
320 return NULL;
321}
322
323static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
324{
325 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
326 struct kvm_vcpu *target_vcpu;
327 struct kvm_pt_regs *regs;
328 union ia64_ipi_a addr = p->u.ipi_data.addr;
329 union ia64_ipi_d data = p->u.ipi_data.data;
330
331 target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
332 if (!target_vcpu)
333 return handle_vm_error(vcpu, kvm_run);
334
335 if (!target_vcpu->arch.launched) {
336 regs = vcpu_regs(target_vcpu);
337
338 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
339 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
340
a4535290 341 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b024b793
XZ
342 if (waitqueue_active(&target_vcpu->wq))
343 wake_up_interruptible(&target_vcpu->wq);
344 } else {
345 vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
346 if (target_vcpu != vcpu)
347 kvm_vcpu_kick(target_vcpu);
348 }
349
350 return 1;
351}
352
353struct call_data {
354 struct kvm_ptc_g ptc_g_data;
355 struct kvm_vcpu *vcpu;
356};
357
358static void vcpu_global_purge(void *info)
359{
360 struct call_data *p = (struct call_data *)info;
361 struct kvm_vcpu *vcpu = p->vcpu;
362
363 if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
364 return;
365
366 set_bit(KVM_REQ_PTC_G, &vcpu->requests);
367 if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
368 vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
369 p->ptc_g_data;
370 } else {
371 clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
372 vcpu->arch.ptc_g_count = 0;
373 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
374 }
375}
376
377static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
378{
379 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
380 struct kvm *kvm = vcpu->kvm;
381 struct call_data call_data;
382 int i;
383 call_data.ptc_g_data = p->u.ptc_g_data;
384
385 for (i = 0; i < KVM_MAX_VCPUS; i++) {
386 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
a4535290 387 KVM_MP_STATE_UNINITIALIZED ||
b024b793
XZ
388 vcpu == kvm->vcpus[i])
389 continue;
390
391 if (waitqueue_active(&kvm->vcpus[i]->wq))
392 wake_up_interruptible(&kvm->vcpus[i]->wq);
393
394 if (kvm->vcpus[i]->cpu != -1) {
395 call_data.vcpu = kvm->vcpus[i];
396 smp_call_function_single(kvm->vcpus[i]->cpu,
2f73ccab 397 vcpu_global_purge, &call_data, 1);
b024b793
XZ
398 } else
399 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
400
401 }
402 return 1;
403}
404
405static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
406{
407 return 1;
408}
409
410int kvm_emulate_halt(struct kvm_vcpu *vcpu)
411{
412
413 ktime_t kt;
414 long itc_diff;
415 unsigned long vcpu_now_itc;
416
417 unsigned long expires;
418 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
419 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
420 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
421
422 vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
423
424 if (time_after(vcpu_now_itc, vpd->itm)) {
425 vcpu->arch.timer_check = 1;
426 return 1;
427 }
428 itc_diff = vpd->itm - vcpu_now_itc;
429 if (itc_diff < 0)
430 itc_diff = -itc_diff;
431
6f6d6a1a 432 expires = div64_u64(itc_diff, cyc_per_usec);
b024b793
XZ
433 kt = ktime_set(0, 1000 * expires);
434 vcpu->arch.ht_active = 1;
435 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
436
437 if (irqchip_in_kernel(vcpu->kvm)) {
a4535290 438 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
b024b793
XZ
439 kvm_vcpu_block(vcpu);
440 hrtimer_cancel(p_ht);
441 vcpu->arch.ht_active = 0;
442
a4535290 443 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
b024b793
XZ
444 return -EINTR;
445 return 1;
446 } else {
447 printk(KERN_ERR"kvm: Unsupported userspace halt!");
448 return 0;
449 }
450}
451
452static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
453 struct kvm_run *kvm_run)
454{
455 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
456 return 0;
457}
458
459static int handle_external_interrupt(struct kvm_vcpu *vcpu,
460 struct kvm_run *kvm_run)
461{
462 return 1;
463}
464
465static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
466 struct kvm_run *kvm_run) = {
467 [EXIT_REASON_VM_PANIC] = handle_vm_error,
468 [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
469 [EXIT_REASON_PAL_CALL] = handle_pal_call,
470 [EXIT_REASON_SAL_CALL] = handle_sal_call,
471 [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
472 [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
473 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
474 [EXIT_REASON_IPI] = handle_ipi,
475 [EXIT_REASON_PTC_G] = handle_global_purge,
476
477};
478
479static const int kvm_vti_max_exit_handlers =
480 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
481
482static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
483{
484}
485
486static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
487{
488 struct exit_ctl_data *p_exit_data;
489
490 p_exit_data = kvm_get_exit_data(vcpu);
491 return p_exit_data->exit_reason;
492}
493
494/*
495 * The guest has exited. See if we can fix it or if we need userspace
496 * assistance.
497 */
498static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
499{
500 u32 exit_reason = kvm_get_exit_reason(vcpu);
501 vcpu->arch.last_exit = exit_reason;
502
503 if (exit_reason < kvm_vti_max_exit_handlers
504 && kvm_vti_exit_handlers[exit_reason])
505 return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
506 else {
507 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
508 kvm_run->hw.hardware_exit_reason = exit_reason;
509 }
510 return 0;
511}
512
513static inline void vti_set_rr6(unsigned long rr6)
514{
515 ia64_set_rr(RR6, rr6);
516 ia64_srlz_i();
517}
518
519static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
520{
521 unsigned long pte;
522 struct kvm *kvm = vcpu->kvm;
523 int r;
524
525 /*Insert a pair of tr to map vmm*/
526 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
527 r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
528 if (r < 0)
529 goto out;
530 vcpu->arch.vmm_tr_slot = r;
531 /*Insert a pairt of tr to map data of vm*/
532 pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
533 r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
534 pte, KVM_VM_DATA_SHIFT);
535 if (r < 0)
536 goto out;
537 vcpu->arch.vm_tr_slot = r;
538 r = 0;
539out:
540 return r;
541
542}
543
544static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
545{
546
547 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
548 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
549
550}
551
552static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
553{
554 int cpu = smp_processor_id();
555
556 if (vcpu->arch.last_run_cpu != cpu ||
557 per_cpu(last_vcpu, cpu) != vcpu) {
558 per_cpu(last_vcpu, cpu) = vcpu;
559 vcpu->arch.last_run_cpu = cpu;
560 kvm_flush_tlb_all();
561 }
562
563 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
564 vti_set_rr6(vcpu->arch.vmm_rr);
565 return kvm_insert_vmm_mapping(vcpu);
566}
567static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
568{
569 kvm_purge_vmm_mapping(vcpu);
570 vti_set_rr6(vcpu->arch.host_rr6);
571}
572
573static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
574{
575 union context *host_ctx, *guest_ctx;
576 int r;
577
578 /*Get host and guest context with guest address space.*/
579 host_ctx = kvm_get_host_context(vcpu);
580 guest_ctx = kvm_get_guest_context(vcpu);
581
582 r = kvm_vcpu_pre_transition(vcpu);
583 if (r < 0)
584 goto out;
585 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
586 kvm_vcpu_post_transition(vcpu);
587 r = 0;
588out:
589 return r;
590}
591
592static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
593{
594 int r;
595
596again:
597 preempt_disable();
598
599 kvm_prepare_guest_switch(vcpu);
600 local_irq_disable();
601
602 if (signal_pending(current)) {
603 local_irq_enable();
604 preempt_enable();
605 r = -EINTR;
606 kvm_run->exit_reason = KVM_EXIT_INTR;
607 goto out;
608 }
609
610 vcpu->guest_mode = 1;
611 kvm_guest_enter();
612
613 r = vti_vcpu_run(vcpu, kvm_run);
614 if (r < 0) {
615 local_irq_enable();
616 preempt_enable();
617 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
618 goto out;
619 }
620
621 vcpu->arch.launched = 1;
622 vcpu->guest_mode = 0;
623 local_irq_enable();
624
625 /*
626 * We must have an instruction between local_irq_enable() and
627 * kvm_guest_exit(), so the timer interrupt isn't delayed by
628 * the interrupt shadow. The stat.exits increment will do nicely.
629 * But we need to prevent reordering, hence this barrier():
630 */
631 barrier();
632
633 kvm_guest_exit();
634
635 preempt_enable();
636
637 r = kvm_handle_exit(kvm_run, vcpu);
638
639 if (r > 0) {
640 if (!need_resched())
641 goto again;
642 }
643
644out:
645 if (r > 0) {
646 kvm_resched(vcpu);
647 goto again;
648 }
649
650 return r;
651}
652
653static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
654{
655 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
656
657 if (!vcpu->mmio_is_write)
658 memcpy(&p->data, vcpu->mmio_data, 8);
659 p->state = STATE_IORESP_READY;
660}
661
662int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
663{
664 int r;
665 sigset_t sigsaved;
666
667 vcpu_load(vcpu);
668
a4535290 669 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b024b793
XZ
670 kvm_vcpu_block(vcpu);
671 vcpu_put(vcpu);
672 return -EAGAIN;
673 }
674
675 if (vcpu->sigset_active)
676 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
677
678 if (vcpu->mmio_needed) {
679 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
680 kvm_set_mmio_data(vcpu);
681 vcpu->mmio_read_completed = 1;
682 vcpu->mmio_needed = 0;
683 }
684 r = __vcpu_run(vcpu, kvm_run);
685
686 if (vcpu->sigset_active)
687 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
688
689 vcpu_put(vcpu);
690 return r;
691}
692
693/*
694 * Allocate 16M memory for every vm to hold its specific data.
695 * Its memory map is defined in kvm_host.h.
696 */
697static struct kvm *kvm_alloc_kvm(void)
698{
699
700 struct kvm *kvm;
701 uint64_t vm_base;
702
703 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
704
705 if (!vm_base)
706 return ERR_PTR(-ENOMEM);
707 printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
708
709 /* Zero all pages before use! */
710 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
711
712 kvm = (struct kvm *)(vm_base + KVM_VM_OFS);
713 kvm->arch.vm_base = vm_base;
714
715 return kvm;
716}
717
718struct kvm_io_range {
719 unsigned long start;
720 unsigned long size;
721 unsigned long type;
722};
723
724static const struct kvm_io_range io_ranges[] = {
725 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
726 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
727 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
728 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
729 {PIB_START, PIB_SIZE, GPFN_PIB},
730};
731
732static void kvm_build_io_pmt(struct kvm *kvm)
733{
734 unsigned long i, j;
735
736 /* Mark I/O ranges */
737 for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
738 i++) {
739 for (j = io_ranges[i].start;
740 j < io_ranges[i].start + io_ranges[i].size;
741 j += PAGE_SIZE)
742 kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
743 io_ranges[i].type, 0);
744 }
745
746}
747
748/*Use unused rids to virtualize guest rid.*/
749#define GUEST_PHYSICAL_RR0 0x1739
750#define GUEST_PHYSICAL_RR4 0x2739
751#define VMM_INIT_RR 0x1660
752
753static void kvm_init_vm(struct kvm *kvm)
754{
755 long vm_base;
756
757 BUG_ON(!kvm);
758
759 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
760 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
761 kvm->arch.vmm_init_rr = VMM_INIT_RR;
762
763 vm_base = kvm->arch.vm_base;
764 if (vm_base) {
765 kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
766 kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
767 kvm->arch.vpd_base = vm_base + KVM_VPD_OFS;
768 }
769
770 /*
771 *Fill P2M entries for MMIO/IO ranges
772 */
773 kvm_build_io_pmt(kvm);
774
775}
776
777struct kvm *kvm_arch_create_vm(void)
778{
779 struct kvm *kvm = kvm_alloc_kvm();
780
781 if (IS_ERR(kvm))
782 return ERR_PTR(-ENOMEM);
783 kvm_init_vm(kvm);
784
785 return kvm;
786
787}
788
789static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
790 struct kvm_irqchip *chip)
791{
792 int r;
793
794 r = 0;
795 switch (chip->chip_id) {
796 case KVM_IRQCHIP_IOAPIC:
797 memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
798 sizeof(struct kvm_ioapic_state));
799 break;
800 default:
801 r = -EINVAL;
802 break;
803 }
804 return r;
805}
806
807static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
808{
809 int r;
810
811 r = 0;
812 switch (chip->chip_id) {
813 case KVM_IRQCHIP_IOAPIC:
814 memcpy(ioapic_irqchip(kvm),
815 &chip->chip.ioapic,
816 sizeof(struct kvm_ioapic_state));
817 break;
818 default:
819 r = -EINVAL;
820 break;
821 }
822 return r;
823}
824
825#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
826
827int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
828{
829 int i;
830 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
831 int r;
832
833 vcpu_load(vcpu);
834
835 for (i = 0; i < 16; i++) {
836 vpd->vgr[i] = regs->vpd.vgr[i];
837 vpd->vbgr[i] = regs->vpd.vbgr[i];
838 }
839 for (i = 0; i < 128; i++)
840 vpd->vcr[i] = regs->vpd.vcr[i];
841 vpd->vhpi = regs->vpd.vhpi;
842 vpd->vnat = regs->vpd.vnat;
843 vpd->vbnat = regs->vpd.vbnat;
844 vpd->vpsr = regs->vpd.vpsr;
845
846 vpd->vpr = regs->vpd.vpr;
847
848 r = -EFAULT;
849 r = copy_from_user(&vcpu->arch.guest, regs->saved_guest,
850 sizeof(union context));
851 if (r)
852 goto out;
853 r = copy_from_user(vcpu + 1, regs->saved_stack +
854 sizeof(struct kvm_vcpu),
855 IA64_STK_OFFSET - sizeof(struct kvm_vcpu));
856 if (r)
857 goto out;
858 vcpu->arch.exit_data =
859 ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data;
860
861 RESTORE_REGS(mp_state);
862 RESTORE_REGS(vmm_rr);
863 memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
864 memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
865 RESTORE_REGS(itr_regions);
866 RESTORE_REGS(dtr_regions);
867 RESTORE_REGS(tc_regions);
868 RESTORE_REGS(irq_check);
869 RESTORE_REGS(itc_check);
870 RESTORE_REGS(timer_check);
871 RESTORE_REGS(timer_pending);
872 RESTORE_REGS(last_itc);
873 for (i = 0; i < 8; i++) {
874 vcpu->arch.vrr[i] = regs->vrr[i];
875 vcpu->arch.ibr[i] = regs->ibr[i];
876 vcpu->arch.dbr[i] = regs->dbr[i];
877 }
878 for (i = 0; i < 4; i++)
879 vcpu->arch.insvc[i] = regs->insvc[i];
880 RESTORE_REGS(xtp);
881 RESTORE_REGS(metaphysical_rr0);
882 RESTORE_REGS(metaphysical_rr4);
883 RESTORE_REGS(metaphysical_saved_rr0);
884 RESTORE_REGS(metaphysical_saved_rr4);
885 RESTORE_REGS(fp_psr);
886 RESTORE_REGS(saved_gp);
887
888 vcpu->arch.irq_new_pending = 1;
889 vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
890 set_bit(KVM_REQ_RESUME, &vcpu->requests);
891
892 vcpu_put(vcpu);
893 r = 0;
894out:
895 return r;
896}
897
898long kvm_arch_vm_ioctl(struct file *filp,
899 unsigned int ioctl, unsigned long arg)
900{
901 struct kvm *kvm = filp->private_data;
902 void __user *argp = (void __user *)arg;
903 int r = -EINVAL;
904
905 switch (ioctl) {
906 case KVM_SET_MEMORY_REGION: {
907 struct kvm_memory_region kvm_mem;
908 struct kvm_userspace_memory_region kvm_userspace_mem;
909
910 r = -EFAULT;
911 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
912 goto out;
913 kvm_userspace_mem.slot = kvm_mem.slot;
914 kvm_userspace_mem.flags = kvm_mem.flags;
915 kvm_userspace_mem.guest_phys_addr =
916 kvm_mem.guest_phys_addr;
917 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
918 r = kvm_vm_ioctl_set_memory_region(kvm,
919 &kvm_userspace_mem, 0);
920 if (r)
921 goto out;
922 break;
923 }
924 case KVM_CREATE_IRQCHIP:
925 r = -EFAULT;
926 r = kvm_ioapic_init(kvm);
927 if (r)
928 goto out;
929 break;
930 case KVM_IRQ_LINE: {
931 struct kvm_irq_level irq_event;
932
933 r = -EFAULT;
934 if (copy_from_user(&irq_event, argp, sizeof irq_event))
935 goto out;
936 if (irqchip_in_kernel(kvm)) {
937 mutex_lock(&kvm->lock);
938 kvm_ioapic_set_irq(kvm->arch.vioapic,
939 irq_event.irq,
940 irq_event.level);
941 mutex_unlock(&kvm->lock);
942 r = 0;
943 }
944 break;
945 }
946 case KVM_GET_IRQCHIP: {
947 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
948 struct kvm_irqchip chip;
949
950 r = -EFAULT;
951 if (copy_from_user(&chip, argp, sizeof chip))
952 goto out;
953 r = -ENXIO;
954 if (!irqchip_in_kernel(kvm))
955 goto out;
956 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
957 if (r)
958 goto out;
959 r = -EFAULT;
960 if (copy_to_user(argp, &chip, sizeof chip))
961 goto out;
962 r = 0;
963 break;
964 }
965 case KVM_SET_IRQCHIP: {
966 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
967 struct kvm_irqchip chip;
968
969 r = -EFAULT;
970 if (copy_from_user(&chip, argp, sizeof chip))
971 goto out;
972 r = -ENXIO;
973 if (!irqchip_in_kernel(kvm))
974 goto out;
975 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
976 if (r)
977 goto out;
978 r = 0;
979 break;
980 }
981 default:
982 ;
983 }
984out:
985 return r;
986}
987
988int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
989 struct kvm_sregs *sregs)
990{
991 return -EINVAL;
992}
993
994int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
995 struct kvm_sregs *sregs)
996{
997 return -EINVAL;
998
999}
1000int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1001 struct kvm_translation *tr)
1002{
1003
1004 return -EINVAL;
1005}
1006
1007static int kvm_alloc_vmm_area(void)
1008{
1009 if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
1010 kvm_vmm_base = __get_free_pages(GFP_KERNEL,
1011 get_order(KVM_VMM_SIZE));
1012 if (!kvm_vmm_base)
1013 return -ENOMEM;
1014
1015 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1016 kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
1017
1018 printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1019 kvm_vmm_base, kvm_vm_buffer);
1020 }
1021
1022 return 0;
1023}
1024
1025static void kvm_free_vmm_area(void)
1026{
1027 if (kvm_vmm_base) {
1028 /*Zero this area before free to avoid bits leak!!*/
1029 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1030 free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
1031 kvm_vmm_base = 0;
1032 kvm_vm_buffer = 0;
1033 kvm_vsa_base = 0;
1034 }
1035}
1036
b024b793
XZ
1037static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1038{
1039}
1040
1041static int vti_init_vpd(struct kvm_vcpu *vcpu)
1042{
1043 int i;
1044 union cpuid3_t cpuid3;
1045 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1046
1047 if (IS_ERR(vpd))
1048 return PTR_ERR(vpd);
1049
1050 /* CPUID init */
1051 for (i = 0; i < 5; i++)
1052 vpd->vcpuid[i] = ia64_get_cpuid(i);
1053
1054 /* Limit the CPUID number to 5 */
1055 cpuid3.value = vpd->vcpuid[3];
1056 cpuid3.number = 4; /* 5 - 1 */
1057 vpd->vcpuid[3] = cpuid3.value;
1058
1059 /*Set vac and vdc fields*/
1060 vpd->vac.a_from_int_cr = 1;
1061 vpd->vac.a_to_int_cr = 1;
1062 vpd->vac.a_from_psr = 1;
1063 vpd->vac.a_from_cpuid = 1;
1064 vpd->vac.a_cover = 1;
1065 vpd->vac.a_bsw = 1;
1066 vpd->vac.a_int = 1;
1067 vpd->vdc.d_vmsw = 1;
1068
1069 /*Set virtual buffer*/
1070 vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
1071
1072 return 0;
1073}
1074
1075static int vti_create_vp(struct kvm_vcpu *vcpu)
1076{
1077 long ret;
1078 struct vpd *vpd = vcpu->arch.vpd;
1079 unsigned long vmm_ivt;
1080
1081 vmm_ivt = kvm_vmm_info->vmm_ivt;
1082
1083 printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
1084
1085 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
1086
1087 if (ret) {
1088 printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
1089 return -EINVAL;
1090 }
1091 return 0;
1092}
1093
1094static void init_ptce_info(struct kvm_vcpu *vcpu)
1095{
1096 ia64_ptce_info_t ptce = {0};
1097
1098 ia64_get_ptce(&ptce);
1099 vcpu->arch.ptce_base = ptce.base;
1100 vcpu->arch.ptce_count[0] = ptce.count[0];
1101 vcpu->arch.ptce_count[1] = ptce.count[1];
1102 vcpu->arch.ptce_stride[0] = ptce.stride[0];
1103 vcpu->arch.ptce_stride[1] = ptce.stride[1];
1104}
1105
1106static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
1107{
1108 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
1109
1110 if (hrtimer_cancel(p_ht))
1111 hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS);
1112}
1113
1114static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1115{
1116 struct kvm_vcpu *vcpu;
1117 wait_queue_head_t *q;
1118
1119 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
a4535290 1120 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
b024b793
XZ
1121 goto out;
1122
1123 q = &vcpu->wq;
1124 if (waitqueue_active(q)) {
a4535290 1125 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b024b793
XZ
1126 wake_up_interruptible(q);
1127 }
1128out:
1129 vcpu->arch.timer_check = 1;
1130 return HRTIMER_NORESTART;
1131}
1132
1133#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1134
1135int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1136{
1137 struct kvm_vcpu *v;
1138 int r;
1139 int i;
1140 long itc_offset;
1141 struct kvm *kvm = vcpu->kvm;
1142 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1143
1144 union context *p_ctx = &vcpu->arch.guest;
1145 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
1146
1147 /*Init vcpu context for first run.*/
1148 if (IS_ERR(vmm_vcpu))
1149 return PTR_ERR(vmm_vcpu);
1150
1151 if (vcpu->vcpu_id == 0) {
a4535290 1152 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b024b793
XZ
1153
1154 /*Set entry address for first run.*/
1155 regs->cr_iip = PALE_RESET_ENTRY;
1156
1157 /*Initilize itc offset for vcpus*/
1158 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
1159 for (i = 0; i < MAX_VCPU_NUM; i++) {
1160 v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
1161 v->arch.itc_offset = itc_offset;
1162 v->arch.last_itc = 0;
1163 }
1164 } else
a4535290 1165 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
b024b793
XZ
1166
1167 r = -ENOMEM;
1168 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
1169 if (!vcpu->arch.apic)
1170 goto out;
1171 vcpu->arch.apic->vcpu = vcpu;
1172
1173 p_ctx->gr[1] = 0;
1174 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET);
1175 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1176 p_ctx->psr = 0x1008522000UL;
1177 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
1178 p_ctx->caller_unat = 0;
1179 p_ctx->pr = 0x0;
1180 p_ctx->ar[36] = 0x0; /*unat*/
1181 p_ctx->ar[19] = 0x0; /*rnat*/
1182 p_ctx->ar[18] = (unsigned long)vmm_vcpu +
1183 ((sizeof(struct kvm_vcpu)+15) & ~15);
1184 p_ctx->ar[64] = 0x0; /*pfs*/
1185 p_ctx->cr[0] = 0x7e04UL;
1186 p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
1187 p_ctx->cr[8] = 0x3c;
1188
1189 /*Initilize region register*/
1190 p_ctx->rr[0] = 0x30;
1191 p_ctx->rr[1] = 0x30;
1192 p_ctx->rr[2] = 0x30;
1193 p_ctx->rr[3] = 0x30;
1194 p_ctx->rr[4] = 0x30;
1195 p_ctx->rr[5] = 0x30;
1196 p_ctx->rr[7] = 0x30;
1197
1198 /*Initilize branch register 0*/
1199 p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
1200
1201 vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
1202 vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
1203 vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
1204
1205 hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1206 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1207
1208 vcpu->arch.last_run_cpu = -1;
1209 vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id);
1210 vcpu->arch.vsa_base = kvm_vsa_base;
1211 vcpu->arch.__gp = kvm_vmm_gp;
1212 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
1213 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id);
1214 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id);
1215 init_ptce_info(vcpu);
1216
1217 r = 0;
1218out:
1219 return r;
1220}
1221
1222static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1223{
1224 unsigned long psr;
1225 int r;
1226
1227 local_irq_save(psr);
1228 r = kvm_insert_vmm_mapping(vcpu);
1229 if (r)
1230 goto fail;
1231 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
1232 if (r)
1233 goto fail;
1234
1235 r = vti_init_vpd(vcpu);
1236 if (r) {
1237 printk(KERN_DEBUG"kvm: vpd init error!!\n");
1238 goto uninit;
1239 }
1240
1241 r = vti_create_vp(vcpu);
1242 if (r)
1243 goto uninit;
1244
1245 kvm_purge_vmm_mapping(vcpu);
1246 local_irq_restore(psr);
1247
1248 return 0;
1249uninit:
1250 kvm_vcpu_uninit(vcpu);
1251fail:
cab7a1ee 1252 local_irq_restore(psr);
b024b793
XZ
1253 return r;
1254}
1255
1256struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1257 unsigned int id)
1258{
1259 struct kvm_vcpu *vcpu;
1260 unsigned long vm_base = kvm->arch.vm_base;
1261 int r;
1262 int cpu;
1263
1264 r = -ENOMEM;
1265 if (!vm_base) {
1266 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1267 goto fail;
1268 }
1269 vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id);
1270 vcpu->kvm = kvm;
1271
1272 cpu = get_cpu();
1273 vti_vcpu_load(vcpu, cpu);
1274 r = vti_vcpu_setup(vcpu, id);
1275 put_cpu();
1276
1277 if (r) {
1278 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1279 goto fail;
1280 }
1281
1282 return vcpu;
1283fail:
1284 return ERR_PTR(r);
1285}
1286
1287int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1288{
1289 return 0;
1290}
1291
1292int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1293{
1294 return -EINVAL;
1295}
1296
1297int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1298{
1299 return -EINVAL;
1300}
1301
1302int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
1303 struct kvm_debug_guest *dbg)
1304{
1305 return -EINVAL;
1306}
1307
1308static void free_kvm(struct kvm *kvm)
1309{
1310 unsigned long vm_base = kvm->arch.vm_base;
1311
1312 if (vm_base) {
1313 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
1314 free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
1315 }
1316
1317}
1318
1319static void kvm_release_vm_pages(struct kvm *kvm)
1320{
1321 struct kvm_memory_slot *memslot;
1322 int i, j;
1323 unsigned long base_gfn;
1324
1325 for (i = 0; i < kvm->nmemslots; i++) {
1326 memslot = &kvm->memslots[i];
1327 base_gfn = memslot->base_gfn;
1328
1329 for (j = 0; j < memslot->npages; j++) {
1330 if (memslot->rmap[j])
1331 put_page((struct page *)memslot->rmap[j]);
1332 }
1333 }
1334}
1335
1336void kvm_arch_destroy_vm(struct kvm *kvm)
1337{
1338 kfree(kvm->arch.vioapic);
1339 kvm_release_vm_pages(kvm);
1340 kvm_free_physmem(kvm);
1341 free_kvm(kvm);
1342}
1343
1344void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1345{
1346}
1347
1348void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1349{
1350 if (cpu != vcpu->cpu) {
1351 vcpu->cpu = cpu;
1352 if (vcpu->arch.ht_active)
1353 kvm_migrate_hlt_timer(vcpu);
1354 }
1355}
1356
1357#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1358
1359int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1360{
1361 int i;
1362 int r;
1363 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1364 vcpu_load(vcpu);
1365
1366 for (i = 0; i < 16; i++) {
1367 regs->vpd.vgr[i] = vpd->vgr[i];
1368 regs->vpd.vbgr[i] = vpd->vbgr[i];
1369 }
1370 for (i = 0; i < 128; i++)
1371 regs->vpd.vcr[i] = vpd->vcr[i];
1372 regs->vpd.vhpi = vpd->vhpi;
1373 regs->vpd.vnat = vpd->vnat;
1374 regs->vpd.vbnat = vpd->vbnat;
1375 regs->vpd.vpsr = vpd->vpsr;
1376 regs->vpd.vpr = vpd->vpr;
1377
1378 r = -EFAULT;
1379 r = copy_to_user(regs->saved_guest, &vcpu->arch.guest,
1380 sizeof(union context));
1381 if (r)
1382 goto out;
1383 r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET);
1384 if (r)
1385 goto out;
1386 SAVE_REGS(mp_state);
1387 SAVE_REGS(vmm_rr);
1388 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
1389 memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
1390 SAVE_REGS(itr_regions);
1391 SAVE_REGS(dtr_regions);
1392 SAVE_REGS(tc_regions);
1393 SAVE_REGS(irq_check);
1394 SAVE_REGS(itc_check);
1395 SAVE_REGS(timer_check);
1396 SAVE_REGS(timer_pending);
1397 SAVE_REGS(last_itc);
1398 for (i = 0; i < 8; i++) {
1399 regs->vrr[i] = vcpu->arch.vrr[i];
1400 regs->ibr[i] = vcpu->arch.ibr[i];
1401 regs->dbr[i] = vcpu->arch.dbr[i];
1402 }
1403 for (i = 0; i < 4; i++)
1404 regs->insvc[i] = vcpu->arch.insvc[i];
1405 regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
1406 SAVE_REGS(xtp);
1407 SAVE_REGS(metaphysical_rr0);
1408 SAVE_REGS(metaphysical_rr4);
1409 SAVE_REGS(metaphysical_saved_rr0);
1410 SAVE_REGS(metaphysical_saved_rr4);
1411 SAVE_REGS(fp_psr);
1412 SAVE_REGS(saved_gp);
1413 vcpu_put(vcpu);
1414 r = 0;
1415out:
1416 return r;
1417}
1418
1419void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1420{
1421
1422 hrtimer_cancel(&vcpu->arch.hlt_timer);
1423 kfree(vcpu->arch.apic);
1424}
1425
1426
1427long kvm_arch_vcpu_ioctl(struct file *filp,
1428 unsigned int ioctl, unsigned long arg)
1429{
1430 return -EINVAL;
1431}
1432
1433int kvm_arch_set_memory_region(struct kvm *kvm,
1434 struct kvm_userspace_memory_region *mem,
1435 struct kvm_memory_slot old,
1436 int user_alloc)
1437{
1438 unsigned long i;
1439 struct page *page;
1440 int npages = mem->memory_size >> PAGE_SHIFT;
1441 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1442 unsigned long base_gfn = memslot->base_gfn;
1443
1444 for (i = 0; i < npages; i++) {
1445 page = gfn_to_page(kvm, base_gfn + i);
1446 kvm_set_pmt_entry(kvm, base_gfn + i,
1447 page_to_pfn(page) << PAGE_SHIFT,
1448 _PAGE_AR_RWX|_PAGE_MA_WB);
1449 memslot->rmap[i] = (unsigned long)page;
1450 }
1451
1452 return 0;
1453}
1454
34d4cb8f
MT
1455void kvm_arch_flush_shadow(struct kvm *kvm)
1456{
1457}
b024b793
XZ
1458
1459long kvm_arch_dev_ioctl(struct file *filp,
1460 unsigned int ioctl, unsigned long arg)
1461{
1462 return -EINVAL;
1463}
1464
1465void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1466{
1467 kvm_vcpu_uninit(vcpu);
1468}
1469
1470static int vti_cpu_has_kvm_support(void)
1471{
1472 long avail = 1, status = 1, control = 1;
1473 long ret;
1474
1475 ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
1476 if (ret)
1477 goto out;
1478
1479 if (!(avail & PAL_PROC_VM_BIT))
1480 goto out;
1481
1482 printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
1483
1484 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1485 if (ret)
1486 goto out;
1487 printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
1488
1489 if (!(vp_env_info & VP_OPCODE)) {
1490 printk(KERN_WARNING"kvm: No opcode ability on hardware, "
1491 "vm_env_info:0x%lx\n", vp_env_info);
1492 }
1493
1494 return 1;
1495out:
1496 return 0;
1497}
1498
1499static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
1500 struct module *module)
1501{
1502 unsigned long module_base;
1503 unsigned long vmm_size;
1504
1505 unsigned long vmm_offset, func_offset, fdesc_offset;
1506 struct fdesc *p_fdesc;
1507
1508 BUG_ON(!module);
1509
1510 if (!kvm_vmm_base) {
1511 printk("kvm: kvm area hasn't been initilized yet!!\n");
1512 return -EFAULT;
1513 }
1514
1515 /*Calculate new position of relocated vmm module.*/
1516 module_base = (unsigned long)module->module_core;
1517 vmm_size = module->core_size;
1518 if (unlikely(vmm_size > KVM_VMM_SIZE))
1519 return -EFAULT;
1520
1521 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
1522 kvm_flush_icache(kvm_vmm_base, vmm_size);
1523
1524 /*Recalculate kvm_vmm_info based on new VMM*/
1525 vmm_offset = vmm_info->vmm_ivt - module_base;
1526 kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
1527 printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1528 kvm_vmm_info->vmm_ivt);
1529
1530 fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
1531 kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
1532 fdesc_offset);
1533 func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
1534 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1535 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1536 p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
1537
1538 printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1539 KVM_VMM_BASE+func_offset);
1540
1541 fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
1542 kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
1543 fdesc_offset);
1544 func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
1545 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1546 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1547 p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
1548
1549 kvm_vmm_gp = p_fdesc->gp;
1550
1551 printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
1552 kvm_vmm_info->vmm_entry);
1553 printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1554 KVM_VMM_BASE + func_offset);
1555
1556 return 0;
1557}
1558
1559int kvm_arch_init(void *opaque)
1560{
1561 int r;
1562 struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
1563
1564 if (!vti_cpu_has_kvm_support()) {
1565 printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
1566 r = -EOPNOTSUPP;
1567 goto out;
1568 }
1569
1570 if (kvm_vmm_info) {
1571 printk(KERN_ERR "kvm: Already loaded VMM module!\n");
1572 r = -EEXIST;
1573 goto out;
1574 }
1575
1576 r = -ENOMEM;
1577 kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
1578 if (!kvm_vmm_info)
1579 goto out;
1580
1581 if (kvm_alloc_vmm_area())
1582 goto out_free0;
1583
1584 r = kvm_relocate_vmm(vmm_info, vmm_info->module);
1585 if (r)
1586 goto out_free1;
1587
1588 return 0;
1589
1590out_free1:
1591 kvm_free_vmm_area();
1592out_free0:
1593 kfree(kvm_vmm_info);
1594out:
1595 return r;
1596}
1597
1598void kvm_arch_exit(void)
1599{
1600 kvm_free_vmm_area();
1601 kfree(kvm_vmm_info);
1602 kvm_vmm_info = NULL;
1603}
1604
1605static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1606 struct kvm_dirty_log *log)
1607{
1608 struct kvm_memory_slot *memslot;
1609 int r, i;
1610 long n, base;
1611 unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS
1612 + KVM_MEM_DIRTY_LOG_OFS);
1613
1614 r = -EINVAL;
1615 if (log->slot >= KVM_MEMORY_SLOTS)
1616 goto out;
1617
1618 memslot = &kvm->memslots[log->slot];
1619 r = -ENOENT;
1620 if (!memslot->dirty_bitmap)
1621 goto out;
1622
1623 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1624 base = memslot->base_gfn / BITS_PER_LONG;
1625
1626 for (i = 0; i < n/sizeof(long); ++i) {
1627 memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
1628 dirty_bitmap[base + i] = 0;
1629 }
1630 r = 0;
1631out:
1632 return r;
1633}
1634
1635int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1636 struct kvm_dirty_log *log)
1637{
1638 int r;
1639 int n;
1640 struct kvm_memory_slot *memslot;
1641 int is_dirty = 0;
1642
1643 spin_lock(&kvm->arch.dirty_log_lock);
1644
1645 r = kvm_ia64_sync_dirty_log(kvm, log);
1646 if (r)
1647 goto out;
1648
1649 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1650 if (r)
1651 goto out;
1652
1653 /* If nothing is dirty, don't bother messing with page tables. */
1654 if (is_dirty) {
1655 kvm_flush_remote_tlbs(kvm);
1656 memslot = &kvm->memslots[log->slot];
1657 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1658 memset(memslot->dirty_bitmap, 0, n);
1659 }
1660 r = 0;
1661out:
1662 spin_unlock(&kvm->arch.dirty_log_lock);
1663 return r;
1664}
1665
1666int kvm_arch_hardware_setup(void)
1667{
1668 return 0;
1669}
1670
1671void kvm_arch_hardware_unsetup(void)
1672{
1673}
1674
1675static void vcpu_kick_intr(void *info)
1676{
1677#ifdef DEBUG
1678 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
1679 printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
1680#endif
1681}
1682
1683void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1684{
1685 int ipi_pcpu = vcpu->cpu;
1686
1687 if (waitqueue_active(&vcpu->wq))
1688 wake_up_interruptible(&vcpu->wq);
1689
1690 if (vcpu->guest_mode)
2f73ccab 1691 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
b024b793
XZ
1692}
1693
1694int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
1695{
1696
1697 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1698
1699 if (!test_and_set_bit(vec, &vpd->irr[0])) {
1700 vcpu->arch.irq_new_pending = 1;
a4535290 1701 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
b024b793 1702 kvm_vcpu_kick(vcpu);
a4535290
AK
1703 else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
1704 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b024b793
XZ
1705 if (waitqueue_active(&vcpu->wq))
1706 wake_up_interruptible(&vcpu->wq);
1707 }
1708 return 1;
1709 }
1710 return 0;
1711}
1712
1713int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
1714{
1715 return apic->vcpu->vcpu_id == dest;
1716}
1717
1718int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1719{
1720 return 0;
1721}
1722
1723struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
1724 unsigned long bitmap)
1725{
1726 struct kvm_vcpu *lvcpu = kvm->vcpus[0];
1727 int i;
1728
1729 for (i = 1; i < KVM_MAX_VCPUS; i++) {
1730 if (!kvm->vcpus[i])
1731 continue;
1732 if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
1733 lvcpu = kvm->vcpus[i];
1734 }
1735
1736 return lvcpu;
1737}
1738
1739static int find_highest_bits(int *dat)
1740{
1741 u32 bits, bitnum;
1742 int i;
1743
1744 /* loop for all 256 bits */
1745 for (i = 7; i >= 0 ; i--) {
1746 bits = dat[i];
1747 if (bits) {
1748 bitnum = fls(bits);
1749 return i * 32 + bitnum - 1;
1750 }
1751 }
1752
1753 return -1;
1754}
1755
1756int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
1757{
1758 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1759
1760 if (vpd->irr[0] & (1UL << NMI_VECTOR))
1761 return NMI_VECTOR;
1762 if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
1763 return ExtINT_VECTOR;
1764
1765 return find_highest_bits((int *)&vpd->irr[0]);
1766}
1767
1768int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
1769{
1770 if (kvm_highest_pending_irq(vcpu) != -1)
1771 return 1;
1772 return 0;
1773}
1774
3d80840d
MT
1775int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1776{
1777 return 0;
1778}
1779
b024b793
XZ
1780gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1781{
1782 return gfn;
1783}
1784
1785int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1786{
a4535290 1787 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
b024b793 1788}
62d9f0db
MT
1789
1790int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1791 struct kvm_mp_state *mp_state)
1792{
8c4b537d
XZ
1793 vcpu_load(vcpu);
1794 mp_state->mp_state = vcpu->arch.mp_state;
1795 vcpu_put(vcpu);
1796 return 0;
1797}
1798
1799static int vcpu_reset(struct kvm_vcpu *vcpu)
1800{
1801 int r;
1802 long psr;
1803 local_irq_save(psr);
1804 r = kvm_insert_vmm_mapping(vcpu);
1805 if (r)
1806 goto fail;
1807
1808 vcpu->arch.launched = 0;
1809 kvm_arch_vcpu_uninit(vcpu);
1810 r = kvm_arch_vcpu_init(vcpu);
1811 if (r)
1812 goto fail;
1813
1814 kvm_purge_vmm_mapping(vcpu);
1815 r = 0;
1816fail:
1817 local_irq_restore(psr);
1818 return r;
62d9f0db
MT
1819}
1820
1821int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1822 struct kvm_mp_state *mp_state)
1823{
8c4b537d
XZ
1824 int r = 0;
1825
1826 vcpu_load(vcpu);
1827 vcpu->arch.mp_state = mp_state->mp_state;
1828 if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
1829 r = vcpu_reset(vcpu);
1830 vcpu_put(vcpu);
1831 return r;
62d9f0db 1832}
This page took 0.142263 seconds and 5 git commands to generate.