2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
26 #include <linux/gfp.h>
28 #include <linux/smp.h>
29 #include <linux/kvm_host.h>
30 #include <linux/kvm.h>
31 #include <linux/bitops.h>
32 #include <linux/hrtimer.h>
33 #include <linux/uaccess.h>
35 #include <asm/pgtable.h>
36 #include <asm/gcc_intrin.h>
38 #include <asm/cacheflush.h>
39 #include <asm/div64.h>
49 static unsigned long kvm_vmm_base
;
50 static unsigned long kvm_vsa_base
;
51 static unsigned long kvm_vm_buffer
;
52 static unsigned long kvm_vm_buffer_size
;
53 unsigned long kvm_vmm_gp
;
55 static long vp_env_info
;
57 static struct kvm_vmm_info
*kvm_vmm_info
;
59 static DEFINE_PER_CPU(struct kvm_vcpu
*, last_vcpu
);
61 struct kvm_stats_debugfs_item debugfs_entries
[] = {
65 static void kvm_flush_icache(unsigned long start
, unsigned long len
)
69 for (l
= 0; l
< (len
+ 32); l
+= 32)
76 static void kvm_flush_tlb_all(void)
78 unsigned long i
, j
, count0
, count1
, stride0
, stride1
, addr
;
81 addr
= local_cpu_data
->ptce_base
;
82 count0
= local_cpu_data
->ptce_count
[0];
83 count1
= local_cpu_data
->ptce_count
[1];
84 stride0
= local_cpu_data
->ptce_stride
[0];
85 stride1
= local_cpu_data
->ptce_stride
[1];
87 local_irq_save(flags
);
88 for (i
= 0; i
< count0
; ++i
) {
89 for (j
= 0; j
< count1
; ++j
) {
95 local_irq_restore(flags
);
96 ia64_srlz_i(); /* srlz.i implies srlz.d */
99 long ia64_pal_vp_create(u64
*vpd
, u64
*host_iva
, u64
*opt_handler
)
101 struct ia64_pal_retval iprv
;
103 PAL_CALL_STK(iprv
, PAL_VP_CREATE
, (u64
)vpd
, (u64
)host_iva
,
109 static DEFINE_SPINLOCK(vp_lock
);
111 void kvm_arch_hardware_enable(void *garbage
)
116 unsigned long saved_psr
;
119 pte
= pte_val(mk_pte_phys(__pa(kvm_vmm_base
),
121 local_irq_save(saved_psr
);
122 slot
= ia64_itr_entry(0x3, KVM_VMM_BASE
, pte
, KVM_VMM_SHIFT
);
123 local_irq_restore(saved_psr
);
128 status
= ia64_pal_vp_init_env(kvm_vsa_base
?
129 VP_INIT_ENV
: VP_INIT_ENV_INITALIZE
,
130 __pa(kvm_vm_buffer
), KVM_VM_BUFFER_BASE
, &tmp_base
);
132 printk(KERN_WARNING
"kvm: Failed to Enable VT Support!!!!\n");
137 kvm_vsa_base
= tmp_base
;
138 printk(KERN_INFO
"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base
);
140 spin_unlock(&vp_lock
);
141 ia64_ptr_entry(0x3, slot
);
144 void kvm_arch_hardware_disable(void *garbage
)
150 unsigned long saved_psr
;
151 unsigned long host_iva
= ia64_getreg(_IA64_REG_CR_IVA
);
153 pte
= pte_val(mk_pte_phys(__pa(kvm_vmm_base
),
156 local_irq_save(saved_psr
);
157 slot
= ia64_itr_entry(0x3, KVM_VMM_BASE
, pte
, KVM_VMM_SHIFT
);
158 local_irq_restore(saved_psr
);
162 status
= ia64_pal_vp_exit_env(host_iva
);
164 printk(KERN_DEBUG
"kvm: Failed to disable VT support! :%ld\n",
166 ia64_ptr_entry(0x3, slot
);
169 void kvm_arch_check_processor_compat(void *rtn
)
174 int kvm_dev_ioctl_check_extension(long ext
)
180 case KVM_CAP_IRQCHIP
:
181 case KVM_CAP_USER_MEMORY
:
185 case KVM_CAP_COALESCED_MMIO
:
186 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
195 static struct kvm_io_device
*vcpu_find_mmio_dev(struct kvm_vcpu
*vcpu
,
196 gpa_t addr
, int len
, int is_write
)
198 struct kvm_io_device
*dev
;
200 dev
= kvm_io_bus_find_dev(&vcpu
->kvm
->mmio_bus
, addr
, len
, is_write
);
205 static int handle_vm_error(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
207 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
208 kvm_run
->hw
.hardware_exit_reason
= 1;
212 static int handle_mmio(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
214 struct kvm_mmio_req
*p
;
215 struct kvm_io_device
*mmio_dev
;
217 p
= kvm_get_vcpu_ioreq(vcpu
);
219 if ((p
->addr
& PAGE_MASK
) == IOAPIC_DEFAULT_BASE_ADDRESS
)
221 vcpu
->mmio_needed
= 1;
222 vcpu
->mmio_phys_addr
= kvm_run
->mmio
.phys_addr
= p
->addr
;
223 vcpu
->mmio_size
= kvm_run
->mmio
.len
= p
->size
;
224 vcpu
->mmio_is_write
= kvm_run
->mmio
.is_write
= !p
->dir
;
226 if (vcpu
->mmio_is_write
)
227 memcpy(vcpu
->mmio_data
, &p
->data
, p
->size
);
228 memcpy(kvm_run
->mmio
.data
, &p
->data
, p
->size
);
229 kvm_run
->exit_reason
= KVM_EXIT_MMIO
;
232 mmio_dev
= vcpu_find_mmio_dev(vcpu
, p
->addr
, p
->size
, !p
->dir
);
235 kvm_iodevice_write(mmio_dev
, p
->addr
, p
->size
,
238 kvm_iodevice_read(mmio_dev
, p
->addr
, p
->size
,
242 printk(KERN_ERR
"kvm: No iodevice found! addr:%lx\n", p
->addr
);
243 p
->state
= STATE_IORESP_READY
;
248 static int handle_pal_call(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
250 struct exit_ctl_data
*p
;
252 p
= kvm_get_exit_data(vcpu
);
254 if (p
->exit_reason
== EXIT_REASON_PAL_CALL
)
255 return kvm_pal_emul(vcpu
, kvm_run
);
257 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
258 kvm_run
->hw
.hardware_exit_reason
= 2;
263 static int handle_sal_call(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
265 struct exit_ctl_data
*p
;
267 p
= kvm_get_exit_data(vcpu
);
269 if (p
->exit_reason
== EXIT_REASON_SAL_CALL
) {
273 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
274 kvm_run
->hw
.hardware_exit_reason
= 3;
281 * offset: address offset to IPI space.
282 * value: deliver value.
284 static void vcpu_deliver_ipi(struct kvm_vcpu
*vcpu
, uint64_t dm
,
289 kvm_apic_set_irq(vcpu
, vector
, 0);
292 kvm_apic_set_irq(vcpu
, 2, 0);
295 kvm_apic_set_irq(vcpu
, 0, 0);
300 printk(KERN_ERR
"kvm: Unimplemented Deliver reserved IPI!\n");
305 static struct kvm_vcpu
*lid_to_vcpu(struct kvm
*kvm
, unsigned long id
,
311 for (i
= 0; i
< KVM_MAX_VCPUS
; i
++) {
313 lid
.val
= VCPU_LID(kvm
->vcpus
[i
]);
314 if (lid
.id
== id
&& lid
.eid
== eid
)
315 return kvm
->vcpus
[i
];
322 static int handle_ipi(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
324 struct exit_ctl_data
*p
= kvm_get_exit_data(vcpu
);
325 struct kvm_vcpu
*target_vcpu
;
326 struct kvm_pt_regs
*regs
;
327 union ia64_ipi_a addr
= p
->u
.ipi_data
.addr
;
328 union ia64_ipi_d data
= p
->u
.ipi_data
.data
;
330 target_vcpu
= lid_to_vcpu(vcpu
->kvm
, addr
.id
, addr
.eid
);
332 return handle_vm_error(vcpu
, kvm_run
);
334 if (!target_vcpu
->arch
.launched
) {
335 regs
= vcpu_regs(target_vcpu
);
337 regs
->cr_iip
= vcpu
->kvm
->arch
.rdv_sal_data
.boot_ip
;
338 regs
->r1
= vcpu
->kvm
->arch
.rdv_sal_data
.boot_gp
;
340 target_vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
341 if (waitqueue_active(&target_vcpu
->wq
))
342 wake_up_interruptible(&target_vcpu
->wq
);
344 vcpu_deliver_ipi(target_vcpu
, data
.dm
, data
.vector
);
345 if (target_vcpu
!= vcpu
)
346 kvm_vcpu_kick(target_vcpu
);
353 struct kvm_ptc_g ptc_g_data
;
354 struct kvm_vcpu
*vcpu
;
357 static void vcpu_global_purge(void *info
)
359 struct call_data
*p
= (struct call_data
*)info
;
360 struct kvm_vcpu
*vcpu
= p
->vcpu
;
362 if (test_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
))
365 set_bit(KVM_REQ_PTC_G
, &vcpu
->requests
);
366 if (vcpu
->arch
.ptc_g_count
< MAX_PTC_G_NUM
) {
367 vcpu
->arch
.ptc_g_data
[vcpu
->arch
.ptc_g_count
++] =
370 clear_bit(KVM_REQ_PTC_G
, &vcpu
->requests
);
371 vcpu
->arch
.ptc_g_count
= 0;
372 set_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
);
376 static int handle_global_purge(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
378 struct exit_ctl_data
*p
= kvm_get_exit_data(vcpu
);
379 struct kvm
*kvm
= vcpu
->kvm
;
380 struct call_data call_data
;
382 call_data
.ptc_g_data
= p
->u
.ptc_g_data
;
384 for (i
= 0; i
< KVM_MAX_VCPUS
; i
++) {
385 if (!kvm
->vcpus
[i
] || kvm
->vcpus
[i
]->arch
.mp_state
==
386 KVM_MP_STATE_UNINITIALIZED
||
387 vcpu
== kvm
->vcpus
[i
])
390 if (waitqueue_active(&kvm
->vcpus
[i
]->wq
))
391 wake_up_interruptible(&kvm
->vcpus
[i
]->wq
);
393 if (kvm
->vcpus
[i
]->cpu
!= -1) {
394 call_data
.vcpu
= kvm
->vcpus
[i
];
395 smp_call_function_single(kvm
->vcpus
[i
]->cpu
,
396 vcpu_global_purge
, &call_data
, 1);
398 printk(KERN_WARNING
"kvm: Uninit vcpu received ipi!\n");
404 static int handle_switch_rr6(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
409 int kvm_emulate_halt(struct kvm_vcpu
*vcpu
)
414 unsigned long vcpu_now_itc
;
416 unsigned long expires
;
417 struct hrtimer
*p_ht
= &vcpu
->arch
.hlt_timer
;
418 unsigned long cyc_per_usec
= local_cpu_data
->cyc_per_usec
;
419 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
421 vcpu_now_itc
= ia64_getreg(_IA64_REG_AR_ITC
) + vcpu
->arch
.itc_offset
;
423 if (time_after(vcpu_now_itc
, vpd
->itm
)) {
424 vcpu
->arch
.timer_check
= 1;
427 itc_diff
= vpd
->itm
- vcpu_now_itc
;
429 itc_diff
= -itc_diff
;
431 expires
= div64_u64(itc_diff
, cyc_per_usec
);
432 kt
= ktime_set(0, 1000 * expires
);
433 vcpu
->arch
.ht_active
= 1;
434 hrtimer_start(p_ht
, kt
, HRTIMER_MODE_ABS
);
436 if (irqchip_in_kernel(vcpu
->kvm
)) {
437 vcpu
->arch
.mp_state
= KVM_MP_STATE_HALTED
;
438 kvm_vcpu_block(vcpu
);
439 hrtimer_cancel(p_ht
);
440 vcpu
->arch
.ht_active
= 0;
442 if (vcpu
->arch
.mp_state
!= KVM_MP_STATE_RUNNABLE
)
446 printk(KERN_ERR
"kvm: Unsupported userspace halt!");
451 static int handle_vm_shutdown(struct kvm_vcpu
*vcpu
,
452 struct kvm_run
*kvm_run
)
454 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
458 static int handle_external_interrupt(struct kvm_vcpu
*vcpu
,
459 struct kvm_run
*kvm_run
)
464 static int (*kvm_vti_exit_handlers
[])(struct kvm_vcpu
*vcpu
,
465 struct kvm_run
*kvm_run
) = {
466 [EXIT_REASON_VM_PANIC
] = handle_vm_error
,
467 [EXIT_REASON_MMIO_INSTRUCTION
] = handle_mmio
,
468 [EXIT_REASON_PAL_CALL
] = handle_pal_call
,
469 [EXIT_REASON_SAL_CALL
] = handle_sal_call
,
470 [EXIT_REASON_SWITCH_RR6
] = handle_switch_rr6
,
471 [EXIT_REASON_VM_DESTROY
] = handle_vm_shutdown
,
472 [EXIT_REASON_EXTERNAL_INTERRUPT
] = handle_external_interrupt
,
473 [EXIT_REASON_IPI
] = handle_ipi
,
474 [EXIT_REASON_PTC_G
] = handle_global_purge
,
478 static const int kvm_vti_max_exit_handlers
=
479 sizeof(kvm_vti_exit_handlers
)/sizeof(*kvm_vti_exit_handlers
);
481 static void kvm_prepare_guest_switch(struct kvm_vcpu
*vcpu
)
485 static uint32_t kvm_get_exit_reason(struct kvm_vcpu
*vcpu
)
487 struct exit_ctl_data
*p_exit_data
;
489 p_exit_data
= kvm_get_exit_data(vcpu
);
490 return p_exit_data
->exit_reason
;
494 * The guest has exited. See if we can fix it or if we need userspace
497 static int kvm_handle_exit(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
499 u32 exit_reason
= kvm_get_exit_reason(vcpu
);
500 vcpu
->arch
.last_exit
= exit_reason
;
502 if (exit_reason
< kvm_vti_max_exit_handlers
503 && kvm_vti_exit_handlers
[exit_reason
])
504 return kvm_vti_exit_handlers
[exit_reason
](vcpu
, kvm_run
);
506 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
507 kvm_run
->hw
.hardware_exit_reason
= exit_reason
;
512 static inline void vti_set_rr6(unsigned long rr6
)
514 ia64_set_rr(RR6
, rr6
);
518 static int kvm_insert_vmm_mapping(struct kvm_vcpu
*vcpu
)
521 struct kvm
*kvm
= vcpu
->kvm
;
524 /*Insert a pair of tr to map vmm*/
525 pte
= pte_val(mk_pte_phys(__pa(kvm_vmm_base
), PAGE_KERNEL
));
526 r
= ia64_itr_entry(0x3, KVM_VMM_BASE
, pte
, KVM_VMM_SHIFT
);
529 vcpu
->arch
.vmm_tr_slot
= r
;
530 /*Insert a pairt of tr to map data of vm*/
531 pte
= pte_val(mk_pte_phys(__pa(kvm
->arch
.vm_base
), PAGE_KERNEL
));
532 r
= ia64_itr_entry(0x3, KVM_VM_DATA_BASE
,
533 pte
, KVM_VM_DATA_SHIFT
);
536 vcpu
->arch
.vm_tr_slot
= r
;
543 static void kvm_purge_vmm_mapping(struct kvm_vcpu
*vcpu
)
546 ia64_ptr_entry(0x3, vcpu
->arch
.vmm_tr_slot
);
547 ia64_ptr_entry(0x3, vcpu
->arch
.vm_tr_slot
);
551 static int kvm_vcpu_pre_transition(struct kvm_vcpu
*vcpu
)
553 int cpu
= smp_processor_id();
555 if (vcpu
->arch
.last_run_cpu
!= cpu
||
556 per_cpu(last_vcpu
, cpu
) != vcpu
) {
557 per_cpu(last_vcpu
, cpu
) = vcpu
;
558 vcpu
->arch
.last_run_cpu
= cpu
;
562 vcpu
->arch
.host_rr6
= ia64_get_rr(RR6
);
563 vti_set_rr6(vcpu
->arch
.vmm_rr
);
564 return kvm_insert_vmm_mapping(vcpu
);
566 static void kvm_vcpu_post_transition(struct kvm_vcpu
*vcpu
)
568 kvm_purge_vmm_mapping(vcpu
);
569 vti_set_rr6(vcpu
->arch
.host_rr6
);
572 static int vti_vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
574 union context
*host_ctx
, *guest_ctx
;
577 /*Get host and guest context with guest address space.*/
578 host_ctx
= kvm_get_host_context(vcpu
);
579 guest_ctx
= kvm_get_guest_context(vcpu
);
581 r
= kvm_vcpu_pre_transition(vcpu
);
584 kvm_vmm_info
->tramp_entry(host_ctx
, guest_ctx
);
585 kvm_vcpu_post_transition(vcpu
);
591 static int __vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
598 kvm_prepare_guest_switch(vcpu
);
601 if (signal_pending(current
)) {
605 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
609 vcpu
->guest_mode
= 1;
612 r
= vti_vcpu_run(vcpu
, kvm_run
);
616 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
620 vcpu
->arch
.launched
= 1;
621 vcpu
->guest_mode
= 0;
625 * We must have an instruction between local_irq_enable() and
626 * kvm_guest_exit(), so the timer interrupt isn't delayed by
627 * the interrupt shadow. The stat.exits increment will do nicely.
628 * But we need to prevent reordering, hence this barrier():
636 r
= kvm_handle_exit(kvm_run
, vcpu
);
652 static void kvm_set_mmio_data(struct kvm_vcpu
*vcpu
)
654 struct kvm_mmio_req
*p
= kvm_get_vcpu_ioreq(vcpu
);
656 if (!vcpu
->mmio_is_write
)
657 memcpy(&p
->data
, vcpu
->mmio_data
, 8);
658 p
->state
= STATE_IORESP_READY
;
661 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
668 if (unlikely(vcpu
->arch
.mp_state
== KVM_MP_STATE_UNINITIALIZED
)) {
669 kvm_vcpu_block(vcpu
);
674 if (vcpu
->sigset_active
)
675 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
677 if (vcpu
->mmio_needed
) {
678 memcpy(vcpu
->mmio_data
, kvm_run
->mmio
.data
, 8);
679 kvm_set_mmio_data(vcpu
);
680 vcpu
->mmio_read_completed
= 1;
681 vcpu
->mmio_needed
= 0;
683 r
= __vcpu_run(vcpu
, kvm_run
);
685 if (vcpu
->sigset_active
)
686 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
693 * Allocate 16M memory for every vm to hold its specific data.
694 * Its memory map is defined in kvm_host.h.
696 static struct kvm
*kvm_alloc_kvm(void)
702 vm_base
= __get_free_pages(GFP_KERNEL
, get_order(KVM_VM_DATA_SIZE
));
705 return ERR_PTR(-ENOMEM
);
706 printk(KERN_DEBUG
"kvm: VM data's base Address:0x%lx\n", vm_base
);
708 /* Zero all pages before use! */
709 memset((void *)vm_base
, 0, KVM_VM_DATA_SIZE
);
711 kvm
= (struct kvm
*)(vm_base
+ KVM_VM_OFS
);
712 kvm
->arch
.vm_base
= vm_base
;
717 struct kvm_io_range
{
723 static const struct kvm_io_range io_ranges
[] = {
724 {VGA_IO_START
, VGA_IO_SIZE
, GPFN_FRAME_BUFFER
},
725 {MMIO_START
, MMIO_SIZE
, GPFN_LOW_MMIO
},
726 {LEGACY_IO_START
, LEGACY_IO_SIZE
, GPFN_LEGACY_IO
},
727 {IO_SAPIC_START
, IO_SAPIC_SIZE
, GPFN_IOSAPIC
},
728 {PIB_START
, PIB_SIZE
, GPFN_PIB
},
731 static void kvm_build_io_pmt(struct kvm
*kvm
)
735 /* Mark I/O ranges */
736 for (i
= 0; i
< (sizeof(io_ranges
) / sizeof(struct kvm_io_range
));
738 for (j
= io_ranges
[i
].start
;
739 j
< io_ranges
[i
].start
+ io_ranges
[i
].size
;
741 kvm_set_pmt_entry(kvm
, j
>> PAGE_SHIFT
,
742 io_ranges
[i
].type
, 0);
747 /*Use unused rids to virtualize guest rid.*/
748 #define GUEST_PHYSICAL_RR0 0x1739
749 #define GUEST_PHYSICAL_RR4 0x2739
750 #define VMM_INIT_RR 0x1660
752 static void kvm_init_vm(struct kvm
*kvm
)
758 kvm
->arch
.metaphysical_rr0
= GUEST_PHYSICAL_RR0
;
759 kvm
->arch
.metaphysical_rr4
= GUEST_PHYSICAL_RR4
;
760 kvm
->arch
.vmm_init_rr
= VMM_INIT_RR
;
762 vm_base
= kvm
->arch
.vm_base
;
764 kvm
->arch
.vhpt_base
= vm_base
+ KVM_VHPT_OFS
;
765 kvm
->arch
.vtlb_base
= vm_base
+ KVM_VTLB_OFS
;
766 kvm
->arch
.vpd_base
= vm_base
+ KVM_VPD_OFS
;
770 *Fill P2M entries for MMIO/IO ranges
772 kvm_build_io_pmt(kvm
);
776 struct kvm
*kvm_arch_create_vm(void)
778 struct kvm
*kvm
= kvm_alloc_kvm();
781 return ERR_PTR(-ENOMEM
);
788 static int kvm_vm_ioctl_get_irqchip(struct kvm
*kvm
,
789 struct kvm_irqchip
*chip
)
794 switch (chip
->chip_id
) {
795 case KVM_IRQCHIP_IOAPIC
:
796 memcpy(&chip
->chip
.ioapic
, ioapic_irqchip(kvm
),
797 sizeof(struct kvm_ioapic_state
));
806 static int kvm_vm_ioctl_set_irqchip(struct kvm
*kvm
, struct kvm_irqchip
*chip
)
811 switch (chip
->chip_id
) {
812 case KVM_IRQCHIP_IOAPIC
:
813 memcpy(ioapic_irqchip(kvm
),
815 sizeof(struct kvm_ioapic_state
));
824 #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
826 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
829 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
834 for (i
= 0; i
< 16; i
++) {
835 vpd
->vgr
[i
] = regs
->vpd
.vgr
[i
];
836 vpd
->vbgr
[i
] = regs
->vpd
.vbgr
[i
];
838 for (i
= 0; i
< 128; i
++)
839 vpd
->vcr
[i
] = regs
->vpd
.vcr
[i
];
840 vpd
->vhpi
= regs
->vpd
.vhpi
;
841 vpd
->vnat
= regs
->vpd
.vnat
;
842 vpd
->vbnat
= regs
->vpd
.vbnat
;
843 vpd
->vpsr
= regs
->vpd
.vpsr
;
845 vpd
->vpr
= regs
->vpd
.vpr
;
848 r
= copy_from_user(&vcpu
->arch
.guest
, regs
->saved_guest
,
849 sizeof(union context
));
852 r
= copy_from_user(vcpu
+ 1, regs
->saved_stack
+
853 sizeof(struct kvm_vcpu
),
854 IA64_STK_OFFSET
- sizeof(struct kvm_vcpu
));
857 vcpu
->arch
.exit_data
=
858 ((struct kvm_vcpu
*)(regs
->saved_stack
))->arch
.exit_data
;
860 RESTORE_REGS(mp_state
);
861 RESTORE_REGS(vmm_rr
);
862 memcpy(vcpu
->arch
.itrs
, regs
->itrs
, sizeof(struct thash_data
) * NITRS
);
863 memcpy(vcpu
->arch
.dtrs
, regs
->dtrs
, sizeof(struct thash_data
) * NDTRS
);
864 RESTORE_REGS(itr_regions
);
865 RESTORE_REGS(dtr_regions
);
866 RESTORE_REGS(tc_regions
);
867 RESTORE_REGS(irq_check
);
868 RESTORE_REGS(itc_check
);
869 RESTORE_REGS(timer_check
);
870 RESTORE_REGS(timer_pending
);
871 RESTORE_REGS(last_itc
);
872 for (i
= 0; i
< 8; i
++) {
873 vcpu
->arch
.vrr
[i
] = regs
->vrr
[i
];
874 vcpu
->arch
.ibr
[i
] = regs
->ibr
[i
];
875 vcpu
->arch
.dbr
[i
] = regs
->dbr
[i
];
877 for (i
= 0; i
< 4; i
++)
878 vcpu
->arch
.insvc
[i
] = regs
->insvc
[i
];
880 RESTORE_REGS(metaphysical_rr0
);
881 RESTORE_REGS(metaphysical_rr4
);
882 RESTORE_REGS(metaphysical_saved_rr0
);
883 RESTORE_REGS(metaphysical_saved_rr4
);
884 RESTORE_REGS(fp_psr
);
885 RESTORE_REGS(saved_gp
);
887 vcpu
->arch
.irq_new_pending
= 1;
888 vcpu
->arch
.itc_offset
= regs
->saved_itc
- ia64_getreg(_IA64_REG_AR_ITC
);
889 set_bit(KVM_REQ_RESUME
, &vcpu
->requests
);
897 long kvm_arch_vm_ioctl(struct file
*filp
,
898 unsigned int ioctl
, unsigned long arg
)
900 struct kvm
*kvm
= filp
->private_data
;
901 void __user
*argp
= (void __user
*)arg
;
905 case KVM_SET_MEMORY_REGION
: {
906 struct kvm_memory_region kvm_mem
;
907 struct kvm_userspace_memory_region kvm_userspace_mem
;
910 if (copy_from_user(&kvm_mem
, argp
, sizeof kvm_mem
))
912 kvm_userspace_mem
.slot
= kvm_mem
.slot
;
913 kvm_userspace_mem
.flags
= kvm_mem
.flags
;
914 kvm_userspace_mem
.guest_phys_addr
=
915 kvm_mem
.guest_phys_addr
;
916 kvm_userspace_mem
.memory_size
= kvm_mem
.memory_size
;
917 r
= kvm_vm_ioctl_set_memory_region(kvm
,
918 &kvm_userspace_mem
, 0);
923 case KVM_CREATE_IRQCHIP
:
925 r
= kvm_ioapic_init(kvm
);
930 struct kvm_irq_level irq_event
;
933 if (copy_from_user(&irq_event
, argp
, sizeof irq_event
))
935 if (irqchip_in_kernel(kvm
)) {
936 mutex_lock(&kvm
->lock
);
937 kvm_ioapic_set_irq(kvm
->arch
.vioapic
,
940 mutex_unlock(&kvm
->lock
);
945 case KVM_GET_IRQCHIP
: {
946 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
947 struct kvm_irqchip chip
;
950 if (copy_from_user(&chip
, argp
, sizeof chip
))
953 if (!irqchip_in_kernel(kvm
))
955 r
= kvm_vm_ioctl_get_irqchip(kvm
, &chip
);
959 if (copy_to_user(argp
, &chip
, sizeof chip
))
964 case KVM_SET_IRQCHIP
: {
965 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
966 struct kvm_irqchip chip
;
969 if (copy_from_user(&chip
, argp
, sizeof chip
))
972 if (!irqchip_in_kernel(kvm
))
974 r
= kvm_vm_ioctl_set_irqchip(kvm
, &chip
);
987 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
988 struct kvm_sregs
*sregs
)
993 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
994 struct kvm_sregs
*sregs
)
999 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1000 struct kvm_translation
*tr
)
1006 static int kvm_alloc_vmm_area(void)
1008 if (!kvm_vmm_base
&& (kvm_vm_buffer_size
< KVM_VM_BUFFER_SIZE
)) {
1009 kvm_vmm_base
= __get_free_pages(GFP_KERNEL
,
1010 get_order(KVM_VMM_SIZE
));
1014 memset((void *)kvm_vmm_base
, 0, KVM_VMM_SIZE
);
1015 kvm_vm_buffer
= kvm_vmm_base
+ VMM_SIZE
;
1017 printk(KERN_DEBUG
"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1018 kvm_vmm_base
, kvm_vm_buffer
);
1024 static void kvm_free_vmm_area(void)
1027 /*Zero this area before free to avoid bits leak!!*/
1028 memset((void *)kvm_vmm_base
, 0, KVM_VMM_SIZE
);
1029 free_pages(kvm_vmm_base
, get_order(KVM_VMM_SIZE
));
1036 static void vti_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1040 static int vti_init_vpd(struct kvm_vcpu
*vcpu
)
1043 union cpuid3_t cpuid3
;
1044 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
1047 return PTR_ERR(vpd
);
1050 for (i
= 0; i
< 5; i
++)
1051 vpd
->vcpuid
[i
] = ia64_get_cpuid(i
);
1053 /* Limit the CPUID number to 5 */
1054 cpuid3
.value
= vpd
->vcpuid
[3];
1055 cpuid3
.number
= 4; /* 5 - 1 */
1056 vpd
->vcpuid
[3] = cpuid3
.value
;
1058 /*Set vac and vdc fields*/
1059 vpd
->vac
.a_from_int_cr
= 1;
1060 vpd
->vac
.a_to_int_cr
= 1;
1061 vpd
->vac
.a_from_psr
= 1;
1062 vpd
->vac
.a_from_cpuid
= 1;
1063 vpd
->vac
.a_cover
= 1;
1066 vpd
->vdc
.d_vmsw
= 1;
1068 /*Set virtual buffer*/
1069 vpd
->virt_env_vaddr
= KVM_VM_BUFFER_BASE
;
1074 static int vti_create_vp(struct kvm_vcpu
*vcpu
)
1077 struct vpd
*vpd
= vcpu
->arch
.vpd
;
1078 unsigned long vmm_ivt
;
1080 vmm_ivt
= kvm_vmm_info
->vmm_ivt
;
1082 printk(KERN_DEBUG
"kvm: vcpu:%p,ivt: 0x%lx\n", vcpu
, vmm_ivt
);
1084 ret
= ia64_pal_vp_create((u64
*)vpd
, (u64
*)vmm_ivt
, 0);
1087 printk(KERN_ERR
"kvm: ia64_pal_vp_create failed!\n");
1093 static void init_ptce_info(struct kvm_vcpu
*vcpu
)
1095 ia64_ptce_info_t ptce
= {0};
1097 ia64_get_ptce(&ptce
);
1098 vcpu
->arch
.ptce_base
= ptce
.base
;
1099 vcpu
->arch
.ptce_count
[0] = ptce
.count
[0];
1100 vcpu
->arch
.ptce_count
[1] = ptce
.count
[1];
1101 vcpu
->arch
.ptce_stride
[0] = ptce
.stride
[0];
1102 vcpu
->arch
.ptce_stride
[1] = ptce
.stride
[1];
1105 static void kvm_migrate_hlt_timer(struct kvm_vcpu
*vcpu
)
1107 struct hrtimer
*p_ht
= &vcpu
->arch
.hlt_timer
;
1109 if (hrtimer_cancel(p_ht
))
1110 hrtimer_start(p_ht
, p_ht
->expires
, HRTIMER_MODE_ABS
);
1113 static enum hrtimer_restart
hlt_timer_fn(struct hrtimer
*data
)
1115 struct kvm_vcpu
*vcpu
;
1116 wait_queue_head_t
*q
;
1118 vcpu
= container_of(data
, struct kvm_vcpu
, arch
.hlt_timer
);
1119 if (vcpu
->arch
.mp_state
!= KVM_MP_STATE_HALTED
)
1123 if (waitqueue_active(q
)) {
1124 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
1125 wake_up_interruptible(q
);
1128 vcpu
->arch
.timer_check
= 1;
1129 return HRTIMER_NORESTART
;
1132 #define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1134 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1140 struct kvm
*kvm
= vcpu
->kvm
;
1141 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1143 union context
*p_ctx
= &vcpu
->arch
.guest
;
1144 struct kvm_vcpu
*vmm_vcpu
= to_guest(vcpu
->kvm
, vcpu
);
1146 /*Init vcpu context for first run.*/
1147 if (IS_ERR(vmm_vcpu
))
1148 return PTR_ERR(vmm_vcpu
);
1150 if (vcpu
->vcpu_id
== 0) {
1151 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
1153 /*Set entry address for first run.*/
1154 regs
->cr_iip
= PALE_RESET_ENTRY
;
1156 /*Initilize itc offset for vcpus*/
1157 itc_offset
= 0UL - ia64_getreg(_IA64_REG_AR_ITC
);
1158 for (i
= 0; i
< MAX_VCPU_NUM
; i
++) {
1159 v
= (struct kvm_vcpu
*)((char *)vcpu
+ VCPU_SIZE
* i
);
1160 v
->arch
.itc_offset
= itc_offset
;
1161 v
->arch
.last_itc
= 0;
1164 vcpu
->arch
.mp_state
= KVM_MP_STATE_UNINITIALIZED
;
1167 vcpu
->arch
.apic
= kzalloc(sizeof(struct kvm_lapic
), GFP_KERNEL
);
1168 if (!vcpu
->arch
.apic
)
1170 vcpu
->arch
.apic
->vcpu
= vcpu
;
1173 p_ctx
->gr
[12] = (unsigned long)((char *)vmm_vcpu
+ IA64_STK_OFFSET
);
1174 p_ctx
->gr
[13] = (unsigned long)vmm_vcpu
;
1175 p_ctx
->psr
= 0x1008522000UL
;
1176 p_ctx
->ar
[40] = FPSR_DEFAULT
; /*fpsr*/
1177 p_ctx
->caller_unat
= 0;
1179 p_ctx
->ar
[36] = 0x0; /*unat*/
1180 p_ctx
->ar
[19] = 0x0; /*rnat*/
1181 p_ctx
->ar
[18] = (unsigned long)vmm_vcpu
+
1182 ((sizeof(struct kvm_vcpu
)+15) & ~15);
1183 p_ctx
->ar
[64] = 0x0; /*pfs*/
1184 p_ctx
->cr
[0] = 0x7e04UL
;
1185 p_ctx
->cr
[2] = (unsigned long)kvm_vmm_info
->vmm_ivt
;
1186 p_ctx
->cr
[8] = 0x3c;
1188 /*Initilize region register*/
1189 p_ctx
->rr
[0] = 0x30;
1190 p_ctx
->rr
[1] = 0x30;
1191 p_ctx
->rr
[2] = 0x30;
1192 p_ctx
->rr
[3] = 0x30;
1193 p_ctx
->rr
[4] = 0x30;
1194 p_ctx
->rr
[5] = 0x30;
1195 p_ctx
->rr
[7] = 0x30;
1197 /*Initilize branch register 0*/
1198 p_ctx
->br
[0] = *(unsigned long *)kvm_vmm_info
->vmm_entry
;
1200 vcpu
->arch
.vmm_rr
= kvm
->arch
.vmm_init_rr
;
1201 vcpu
->arch
.metaphysical_rr0
= kvm
->arch
.metaphysical_rr0
;
1202 vcpu
->arch
.metaphysical_rr4
= kvm
->arch
.metaphysical_rr4
;
1204 hrtimer_init(&vcpu
->arch
.hlt_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
1205 vcpu
->arch
.hlt_timer
.function
= hlt_timer_fn
;
1207 vcpu
->arch
.last_run_cpu
= -1;
1208 vcpu
->arch
.vpd
= (struct vpd
*)VPD_ADDR(vcpu
->vcpu_id
);
1209 vcpu
->arch
.vsa_base
= kvm_vsa_base
;
1210 vcpu
->arch
.__gp
= kvm_vmm_gp
;
1211 vcpu
->arch
.dirty_log_lock_pa
= __pa(&kvm
->arch
.dirty_log_lock
);
1212 vcpu
->arch
.vhpt
.hash
= (struct thash_data
*)VHPT_ADDR(vcpu
->vcpu_id
);
1213 vcpu
->arch
.vtlb
.hash
= (struct thash_data
*)VTLB_ADDR(vcpu
->vcpu_id
);
1214 init_ptce_info(vcpu
);
1221 static int vti_vcpu_setup(struct kvm_vcpu
*vcpu
, int id
)
1226 local_irq_save(psr
);
1227 r
= kvm_insert_vmm_mapping(vcpu
);
1230 r
= kvm_vcpu_init(vcpu
, vcpu
->kvm
, id
);
1234 r
= vti_init_vpd(vcpu
);
1236 printk(KERN_DEBUG
"kvm: vpd init error!!\n");
1240 r
= vti_create_vp(vcpu
);
1244 kvm_purge_vmm_mapping(vcpu
);
1245 local_irq_restore(psr
);
1249 kvm_vcpu_uninit(vcpu
);
1251 local_irq_restore(psr
);
1255 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
1258 struct kvm_vcpu
*vcpu
;
1259 unsigned long vm_base
= kvm
->arch
.vm_base
;
1265 printk(KERN_ERR
"kvm: Create vcpu[%d] error!\n", id
);
1268 vcpu
= (struct kvm_vcpu
*)(vm_base
+ KVM_VCPU_OFS
+ VCPU_SIZE
* id
);
1272 vti_vcpu_load(vcpu
, cpu
);
1273 r
= vti_vcpu_setup(vcpu
, id
);
1277 printk(KERN_DEBUG
"kvm: vcpu_setup error!!\n");
1286 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1291 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1296 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1301 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu
*vcpu
,
1302 struct kvm_debug_guest
*dbg
)
1307 static void free_kvm(struct kvm
*kvm
)
1309 unsigned long vm_base
= kvm
->arch
.vm_base
;
1312 memset((void *)vm_base
, 0, KVM_VM_DATA_SIZE
);
1313 free_pages(vm_base
, get_order(KVM_VM_DATA_SIZE
));
1318 static void kvm_release_vm_pages(struct kvm
*kvm
)
1320 struct kvm_memory_slot
*memslot
;
1322 unsigned long base_gfn
;
1324 for (i
= 0; i
< kvm
->nmemslots
; i
++) {
1325 memslot
= &kvm
->memslots
[i
];
1326 base_gfn
= memslot
->base_gfn
;
1328 for (j
= 0; j
< memslot
->npages
; j
++) {
1329 if (memslot
->rmap
[j
])
1330 put_page((struct page
*)memslot
->rmap
[j
]);
1335 void kvm_arch_destroy_vm(struct kvm
*kvm
)
1337 kfree(kvm
->arch
.vioapic
);
1338 kvm_release_vm_pages(kvm
);
1339 kvm_free_physmem(kvm
);
1343 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1347 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1349 if (cpu
!= vcpu
->cpu
) {
1351 if (vcpu
->arch
.ht_active
)
1352 kvm_migrate_hlt_timer(vcpu
);
1356 #define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1358 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1362 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
1365 for (i
= 0; i
< 16; i
++) {
1366 regs
->vpd
.vgr
[i
] = vpd
->vgr
[i
];
1367 regs
->vpd
.vbgr
[i
] = vpd
->vbgr
[i
];
1369 for (i
= 0; i
< 128; i
++)
1370 regs
->vpd
.vcr
[i
] = vpd
->vcr
[i
];
1371 regs
->vpd
.vhpi
= vpd
->vhpi
;
1372 regs
->vpd
.vnat
= vpd
->vnat
;
1373 regs
->vpd
.vbnat
= vpd
->vbnat
;
1374 regs
->vpd
.vpsr
= vpd
->vpsr
;
1375 regs
->vpd
.vpr
= vpd
->vpr
;
1378 r
= copy_to_user(regs
->saved_guest
, &vcpu
->arch
.guest
,
1379 sizeof(union context
));
1382 r
= copy_to_user(regs
->saved_stack
, (void *)vcpu
, IA64_STK_OFFSET
);
1385 SAVE_REGS(mp_state
);
1387 memcpy(regs
->itrs
, vcpu
->arch
.itrs
, sizeof(struct thash_data
) * NITRS
);
1388 memcpy(regs
->dtrs
, vcpu
->arch
.dtrs
, sizeof(struct thash_data
) * NDTRS
);
1389 SAVE_REGS(itr_regions
);
1390 SAVE_REGS(dtr_regions
);
1391 SAVE_REGS(tc_regions
);
1392 SAVE_REGS(irq_check
);
1393 SAVE_REGS(itc_check
);
1394 SAVE_REGS(timer_check
);
1395 SAVE_REGS(timer_pending
);
1396 SAVE_REGS(last_itc
);
1397 for (i
= 0; i
< 8; i
++) {
1398 regs
->vrr
[i
] = vcpu
->arch
.vrr
[i
];
1399 regs
->ibr
[i
] = vcpu
->arch
.ibr
[i
];
1400 regs
->dbr
[i
] = vcpu
->arch
.dbr
[i
];
1402 for (i
= 0; i
< 4; i
++)
1403 regs
->insvc
[i
] = vcpu
->arch
.insvc
[i
];
1404 regs
->saved_itc
= vcpu
->arch
.itc_offset
+ ia64_getreg(_IA64_REG_AR_ITC
);
1406 SAVE_REGS(metaphysical_rr0
);
1407 SAVE_REGS(metaphysical_rr4
);
1408 SAVE_REGS(metaphysical_saved_rr0
);
1409 SAVE_REGS(metaphysical_saved_rr4
);
1411 SAVE_REGS(saved_gp
);
1418 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
1421 hrtimer_cancel(&vcpu
->arch
.hlt_timer
);
1422 kfree(vcpu
->arch
.apic
);
1426 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1427 unsigned int ioctl
, unsigned long arg
)
1432 int kvm_arch_set_memory_region(struct kvm
*kvm
,
1433 struct kvm_userspace_memory_region
*mem
,
1434 struct kvm_memory_slot old
,
1439 int npages
= mem
->memory_size
>> PAGE_SHIFT
;
1440 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[mem
->slot
];
1441 unsigned long base_gfn
= memslot
->base_gfn
;
1443 for (i
= 0; i
< npages
; i
++) {
1444 page
= gfn_to_page(kvm
, base_gfn
+ i
);
1445 kvm_set_pmt_entry(kvm
, base_gfn
+ i
,
1446 page_to_pfn(page
) << PAGE_SHIFT
,
1447 _PAGE_AR_RWX
|_PAGE_MA_WB
);
1448 memslot
->rmap
[i
] = (unsigned long)page
;
1454 void kvm_arch_flush_shadow(struct kvm
*kvm
)
1458 long kvm_arch_dev_ioctl(struct file
*filp
,
1459 unsigned int ioctl
, unsigned long arg
)
1464 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1466 kvm_vcpu_uninit(vcpu
);
1469 static int vti_cpu_has_kvm_support(void)
1471 long avail
= 1, status
= 1, control
= 1;
1474 ret
= ia64_pal_proc_get_features(&avail
, &status
, &control
, 0);
1478 if (!(avail
& PAL_PROC_VM_BIT
))
1481 printk(KERN_DEBUG
"kvm: Hardware Supports VT\n");
1483 ret
= ia64_pal_vp_env_info(&kvm_vm_buffer_size
, &vp_env_info
);
1486 printk(KERN_DEBUG
"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size
);
1488 if (!(vp_env_info
& VP_OPCODE
)) {
1489 printk(KERN_WARNING
"kvm: No opcode ability on hardware, "
1490 "vm_env_info:0x%lx\n", vp_env_info
);
1498 static int kvm_relocate_vmm(struct kvm_vmm_info
*vmm_info
,
1499 struct module
*module
)
1501 unsigned long module_base
;
1502 unsigned long vmm_size
;
1504 unsigned long vmm_offset
, func_offset
, fdesc_offset
;
1505 struct fdesc
*p_fdesc
;
1509 if (!kvm_vmm_base
) {
1510 printk("kvm: kvm area hasn't been initilized yet!!\n");
1514 /*Calculate new position of relocated vmm module.*/
1515 module_base
= (unsigned long)module
->module_core
;
1516 vmm_size
= module
->core_size
;
1517 if (unlikely(vmm_size
> KVM_VMM_SIZE
))
1520 memcpy((void *)kvm_vmm_base
, (void *)module_base
, vmm_size
);
1521 kvm_flush_icache(kvm_vmm_base
, vmm_size
);
1523 /*Recalculate kvm_vmm_info based on new VMM*/
1524 vmm_offset
= vmm_info
->vmm_ivt
- module_base
;
1525 kvm_vmm_info
->vmm_ivt
= KVM_VMM_BASE
+ vmm_offset
;
1526 printk(KERN_DEBUG
"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1527 kvm_vmm_info
->vmm_ivt
);
1529 fdesc_offset
= (unsigned long)vmm_info
->vmm_entry
- module_base
;
1530 kvm_vmm_info
->vmm_entry
= (kvm_vmm_entry
*)(KVM_VMM_BASE
+
1532 func_offset
= *(unsigned long *)vmm_info
->vmm_entry
- module_base
;
1533 p_fdesc
= (struct fdesc
*)(kvm_vmm_base
+ fdesc_offset
);
1534 p_fdesc
->ip
= KVM_VMM_BASE
+ func_offset
;
1535 p_fdesc
->gp
= KVM_VMM_BASE
+(p_fdesc
->gp
- module_base
);
1537 printk(KERN_DEBUG
"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1538 KVM_VMM_BASE
+func_offset
);
1540 fdesc_offset
= (unsigned long)vmm_info
->tramp_entry
- module_base
;
1541 kvm_vmm_info
->tramp_entry
= (kvm_tramp_entry
*)(KVM_VMM_BASE
+
1543 func_offset
= *(unsigned long *)vmm_info
->tramp_entry
- module_base
;
1544 p_fdesc
= (struct fdesc
*)(kvm_vmm_base
+ fdesc_offset
);
1545 p_fdesc
->ip
= KVM_VMM_BASE
+ func_offset
;
1546 p_fdesc
->gp
= KVM_VMM_BASE
+ (p_fdesc
->gp
- module_base
);
1548 kvm_vmm_gp
= p_fdesc
->gp
;
1550 printk(KERN_DEBUG
"kvm: Relocated VMM's Entry IP:%p\n",
1551 kvm_vmm_info
->vmm_entry
);
1552 printk(KERN_DEBUG
"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1553 KVM_VMM_BASE
+ func_offset
);
1558 int kvm_arch_init(void *opaque
)
1561 struct kvm_vmm_info
*vmm_info
= (struct kvm_vmm_info
*)opaque
;
1563 if (!vti_cpu_has_kvm_support()) {
1564 printk(KERN_ERR
"kvm: No Hardware Virtualization Support!\n");
1570 printk(KERN_ERR
"kvm: Already loaded VMM module!\n");
1576 kvm_vmm_info
= kzalloc(sizeof(struct kvm_vmm_info
), GFP_KERNEL
);
1580 if (kvm_alloc_vmm_area())
1583 r
= kvm_relocate_vmm(vmm_info
, vmm_info
->module
);
1590 kvm_free_vmm_area();
1592 kfree(kvm_vmm_info
);
1597 void kvm_arch_exit(void)
1599 kvm_free_vmm_area();
1600 kfree(kvm_vmm_info
);
1601 kvm_vmm_info
= NULL
;
1604 static int kvm_ia64_sync_dirty_log(struct kvm
*kvm
,
1605 struct kvm_dirty_log
*log
)
1607 struct kvm_memory_slot
*memslot
;
1610 unsigned long *dirty_bitmap
= (unsigned long *)((void *)kvm
- KVM_VM_OFS
1611 + KVM_MEM_DIRTY_LOG_OFS
);
1614 if (log
->slot
>= KVM_MEMORY_SLOTS
)
1617 memslot
= &kvm
->memslots
[log
->slot
];
1619 if (!memslot
->dirty_bitmap
)
1622 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
1623 base
= memslot
->base_gfn
/ BITS_PER_LONG
;
1625 for (i
= 0; i
< n
/sizeof(long); ++i
) {
1626 memslot
->dirty_bitmap
[i
] = dirty_bitmap
[base
+ i
];
1627 dirty_bitmap
[base
+ i
] = 0;
1634 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
1635 struct kvm_dirty_log
*log
)
1639 struct kvm_memory_slot
*memslot
;
1642 spin_lock(&kvm
->arch
.dirty_log_lock
);
1644 r
= kvm_ia64_sync_dirty_log(kvm
, log
);
1648 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
1652 /* If nothing is dirty, don't bother messing with page tables. */
1654 kvm_flush_remote_tlbs(kvm
);
1655 memslot
= &kvm
->memslots
[log
->slot
];
1656 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
1657 memset(memslot
->dirty_bitmap
, 0, n
);
1661 spin_unlock(&kvm
->arch
.dirty_log_lock
);
1665 int kvm_arch_hardware_setup(void)
1670 void kvm_arch_hardware_unsetup(void)
1674 static void vcpu_kick_intr(void *info
)
1677 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)info
;
1678 printk(KERN_DEBUG
"vcpu_kick_intr %p \n", vcpu
);
1682 void kvm_vcpu_kick(struct kvm_vcpu
*vcpu
)
1684 int ipi_pcpu
= vcpu
->cpu
;
1686 if (waitqueue_active(&vcpu
->wq
))
1687 wake_up_interruptible(&vcpu
->wq
);
1689 if (vcpu
->guest_mode
)
1690 smp_call_function_single(ipi_pcpu
, vcpu_kick_intr
, vcpu
, 0);
1693 int kvm_apic_set_irq(struct kvm_vcpu
*vcpu
, u8 vec
, u8 trig
)
1696 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
1698 if (!test_and_set_bit(vec
, &vpd
->irr
[0])) {
1699 vcpu
->arch
.irq_new_pending
= 1;
1700 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_RUNNABLE
)
1701 kvm_vcpu_kick(vcpu
);
1702 else if (vcpu
->arch
.mp_state
== KVM_MP_STATE_HALTED
) {
1703 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
1704 if (waitqueue_active(&vcpu
->wq
))
1705 wake_up_interruptible(&vcpu
->wq
);
1712 int kvm_apic_match_physical_addr(struct kvm_lapic
*apic
, u16 dest
)
1714 return apic
->vcpu
->vcpu_id
== dest
;
1717 int kvm_apic_match_logical_addr(struct kvm_lapic
*apic
, u8 mda
)
1722 struct kvm_vcpu
*kvm_get_lowest_prio_vcpu(struct kvm
*kvm
, u8 vector
,
1723 unsigned long bitmap
)
1725 struct kvm_vcpu
*lvcpu
= kvm
->vcpus
[0];
1728 for (i
= 1; i
< KVM_MAX_VCPUS
; i
++) {
1731 if (lvcpu
->arch
.xtp
> kvm
->vcpus
[i
]->arch
.xtp
)
1732 lvcpu
= kvm
->vcpus
[i
];
1738 static int find_highest_bits(int *dat
)
1743 /* loop for all 256 bits */
1744 for (i
= 7; i
>= 0 ; i
--) {
1748 return i
* 32 + bitnum
- 1;
1755 int kvm_highest_pending_irq(struct kvm_vcpu
*vcpu
)
1757 struct vpd
*vpd
= to_host(vcpu
->kvm
, vcpu
->arch
.vpd
);
1759 if (vpd
->irr
[0] & (1UL << NMI_VECTOR
))
1761 if (vpd
->irr
[0] & (1UL << ExtINT_VECTOR
))
1762 return ExtINT_VECTOR
;
1764 return find_highest_bits((int *)&vpd
->irr
[0]);
1767 int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
)
1769 if (kvm_highest_pending_irq(vcpu
) != -1)
1774 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
1779 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
)
1784 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
1786 return vcpu
->arch
.mp_state
== KVM_MP_STATE_RUNNABLE
;
1789 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1790 struct kvm_mp_state
*mp_state
)
1795 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1796 struct kvm_mp_state
*mp_state
)