5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
17 #include <linux/preempt.h>
18 #include <linux/marker.h>
19 #include <linux/msi.h>
20 #include <asm/signal.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_para.h>
25 #include <linux/kvm_types.h>
27 #include <asm/kvm_host.h>
30 * vcpu->requests bit members
32 #define KVM_REQ_TLB_FLUSH 0
33 #define KVM_REQ_MIGRATE_TIMER 1
34 #define KVM_REQ_REPORT_TPR_ACCESS 2
35 #define KVM_REQ_MMU_RELOAD 3
36 #define KVM_REQ_TRIPLE_FAULT 4
37 #define KVM_REQ_PENDING_TIMER 5
38 #define KVM_REQ_UNHALT 6
39 #define KVM_REQ_MMU_SYNC 7
40 #define KVM_REQ_KVMCLOCK_UPDATE 8
41 #define KVM_REQ_KICK 9
43 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
46 extern struct kmem_cache
*kvm_vcpu_cache
;
49 * It would be nice to use something smarter than a linear search, TBD...
50 * Thankfully we dont expect many devices to register (famous last words :),
51 * so until then it will suffice. At least its abstracted so we can change
56 #define NR_IOBUS_DEVS 6
57 struct kvm_io_device
*devs
[NR_IOBUS_DEVS
];
60 void kvm_io_bus_init(struct kvm_io_bus
*bus
);
61 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
);
62 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
,
63 gpa_t addr
, int len
, int is_write
);
64 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
,
65 struct kvm_io_device
*dev
);
69 #ifdef CONFIG_PREEMPT_NOTIFIERS
70 struct preempt_notifier preempt_notifier
;
76 unsigned long requests
;
77 unsigned long guest_debug
;
83 struct kvm_vcpu_stat stat
;
85 #ifdef CONFIG_HAS_IOMEM
87 int mmio_read_completed
;
90 unsigned char mmio_data
[8];
94 struct kvm_vcpu_arch arch
;
97 struct kvm_memory_slot
{
102 unsigned long *dirty_bitmap
;
104 unsigned long rmap_pde
;
106 } *lpage_info
[KVM_NR_PAGE_SIZES
- 1];
107 unsigned long userspace_addr
;
111 struct kvm_kernel_irq_routing_entry
{
114 int (*set
)(struct kvm_kernel_irq_routing_entry
*e
,
115 struct kvm
*kvm
, int level
);
123 struct list_head link
;
128 spinlock_t requests_lock
;
129 struct rw_semaphore slots_lock
;
130 struct mm_struct
*mm
; /* userspace tied to this vm */
132 struct kvm_memory_slot memslots
[KVM_MEMORY_SLOTS
+
133 KVM_PRIVATE_MEM_SLOTS
];
134 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
136 struct kvm_vcpu
*bsp_vcpu
;
138 struct kvm_vcpu
*vcpus
[KVM_MAX_VCPUS
];
139 atomic_t online_vcpus
;
140 struct list_head vm_list
;
142 struct kvm_io_bus mmio_bus
;
143 struct kvm_io_bus pio_bus
;
144 #ifdef CONFIG_HAVE_KVM_EVENTFD
147 struct list_head items
;
150 struct kvm_vm_stat stat
;
151 struct kvm_arch arch
;
152 atomic_t users_count
;
153 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
154 struct kvm_coalesced_mmio_dev
*coalesced_mmio_dev
;
155 struct kvm_coalesced_mmio_ring
*coalesced_mmio_ring
;
158 struct mutex irq_lock
;
159 #ifdef CONFIG_HAVE_KVM_IRQCHIP
160 struct list_head irq_routing
; /* of kvm_kernel_irq_routing_entry */
161 struct hlist_head mask_notifier_list
;
164 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
165 struct mmu_notifier mmu_notifier
;
166 unsigned long mmu_notifier_seq
;
167 long mmu_notifier_count
;
171 /* The guest did something we don't support. */
172 #define pr_unimpl(vcpu, fmt, ...) \
174 if (printk_ratelimit()) \
175 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
176 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
179 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
180 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
182 static inline struct kvm_vcpu
*kvm_get_vcpu(struct kvm
*kvm
, int i
)
185 return kvm
->vcpus
[i
];
188 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
189 for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
190 idx < atomic_read(&kvm->online_vcpus) && vcpup; \
191 vcpup = kvm_get_vcpu(kvm, ++idx))
193 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
);
194 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
);
196 void vcpu_load(struct kvm_vcpu
*vcpu
);
197 void vcpu_put(struct kvm_vcpu
*vcpu
);
199 int kvm_init(void *opaque
, unsigned int vcpu_size
,
200 struct module
*module
);
203 void kvm_get_kvm(struct kvm
*kvm
);
204 void kvm_put_kvm(struct kvm
*kvm
);
206 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
207 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
208 static inline int is_error_hpa(hpa_t hpa
) { return hpa
>> HPA_MSB
; }
209 struct page
*gva_to_page(struct kvm_vcpu
*vcpu
, gva_t gva
);
211 extern struct page
*bad_page
;
212 extern pfn_t bad_pfn
;
214 int is_error_page(struct page
*page
);
215 int is_error_pfn(pfn_t pfn
);
216 int kvm_is_error_hva(unsigned long addr
);
217 int kvm_set_memory_region(struct kvm
*kvm
,
218 struct kvm_userspace_memory_region
*mem
,
220 int __kvm_set_memory_region(struct kvm
*kvm
,
221 struct kvm_userspace_memory_region
*mem
,
223 int kvm_arch_set_memory_region(struct kvm
*kvm
,
224 struct kvm_userspace_memory_region
*mem
,
225 struct kvm_memory_slot old
,
227 void kvm_disable_largepages(void);
228 void kvm_arch_flush_shadow(struct kvm
*kvm
);
229 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
);
230 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
);
231 unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
);
232 void kvm_release_page_clean(struct page
*page
);
233 void kvm_release_page_dirty(struct page
*page
);
234 void kvm_set_page_dirty(struct page
*page
);
235 void kvm_set_page_accessed(struct page
*page
);
237 pfn_t
gfn_to_pfn(struct kvm
*kvm
, gfn_t gfn
);
238 void kvm_release_pfn_dirty(pfn_t
);
239 void kvm_release_pfn_clean(pfn_t pfn
);
240 void kvm_set_pfn_dirty(pfn_t pfn
);
241 void kvm_set_pfn_accessed(pfn_t pfn
);
242 void kvm_get_pfn(pfn_t pfn
);
244 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
246 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
248 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
);
249 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
250 int offset
, int len
);
251 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
253 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
);
254 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
);
255 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
);
256 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
);
257 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
);
259 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
);
260 void kvm_resched(struct kvm_vcpu
*vcpu
);
261 void kvm_load_guest_fpu(struct kvm_vcpu
*vcpu
);
262 void kvm_put_guest_fpu(struct kvm_vcpu
*vcpu
);
263 void kvm_flush_remote_tlbs(struct kvm
*kvm
);
264 void kvm_reload_remote_mmus(struct kvm
*kvm
);
266 long kvm_arch_dev_ioctl(struct file
*filp
,
267 unsigned int ioctl
, unsigned long arg
);
268 long kvm_arch_vcpu_ioctl(struct file
*filp
,
269 unsigned int ioctl
, unsigned long arg
);
271 int kvm_dev_ioctl_check_extension(long ext
);
273 int kvm_get_dirty_log(struct kvm
*kvm
,
274 struct kvm_dirty_log
*log
, int *is_dirty
);
275 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
276 struct kvm_dirty_log
*log
);
278 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
280 kvm_userspace_memory_region
*mem
,
282 long kvm_arch_vm_ioctl(struct file
*filp
,
283 unsigned int ioctl
, unsigned long arg
);
285 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
);
286 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
);
288 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
289 struct kvm_translation
*tr
);
291 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
);
292 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
);
293 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
294 struct kvm_sregs
*sregs
);
295 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
296 struct kvm_sregs
*sregs
);
297 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
298 struct kvm_mp_state
*mp_state
);
299 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
300 struct kvm_mp_state
*mp_state
);
301 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
302 struct kvm_guest_debug
*dbg
);
303 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
);
305 int kvm_arch_init(void *opaque
);
306 void kvm_arch_exit(void);
308 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
);
309 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
);
311 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
);
312 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
313 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
);
314 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
);
315 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
);
316 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
);
318 int kvm_arch_vcpu_reset(struct kvm_vcpu
*vcpu
);
319 void kvm_arch_hardware_enable(void *garbage
);
320 void kvm_arch_hardware_disable(void *garbage
);
321 int kvm_arch_hardware_setup(void);
322 void kvm_arch_hardware_unsetup(void);
323 void kvm_arch_check_processor_compat(void *rtn
);
324 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
);
325 int kvm_arch_interrupt_allowed(struct kvm_vcpu
*vcpu
);
327 void kvm_free_physmem(struct kvm
*kvm
);
329 struct kvm
*kvm_arch_create_vm(void);
330 void kvm_arch_destroy_vm(struct kvm
*kvm
);
331 void kvm_free_all_assigned_devices(struct kvm
*kvm
);
332 void kvm_arch_sync_events(struct kvm
*kvm
);
334 int kvm_cpu_get_interrupt(struct kvm_vcpu
*v
);
335 int kvm_cpu_has_interrupt(struct kvm_vcpu
*v
);
336 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
);
337 void kvm_vcpu_kick(struct kvm_vcpu
*vcpu
);
339 int kvm_is_mmio_pfn(pfn_t pfn
);
341 struct kvm_irq_ack_notifier
{
342 struct hlist_node link
;
344 void (*irq_acked
)(struct kvm_irq_ack_notifier
*kian
);
347 #define KVM_ASSIGNED_MSIX_PENDING 0x1
348 struct kvm_guest_msix_entry
{
354 struct kvm_assigned_dev_kernel
{
355 struct kvm_irq_ack_notifier ack_notifier
;
356 struct work_struct interrupt_work
;
357 struct list_head list
;
361 unsigned int entries_nr
;
363 bool host_irq_disabled
;
364 struct msix_entry
*host_msix_entries
;
366 struct kvm_guest_msix_entry
*guest_msix_entries
;
367 unsigned long irq_requested_type
;
372 spinlock_t assigned_dev_lock
;
375 struct kvm_irq_mask_notifier
{
376 void (*func
)(struct kvm_irq_mask_notifier
*kimn
, bool masked
);
378 struct hlist_node link
;
381 void kvm_register_irq_mask_notifier(struct kvm
*kvm
, int irq
,
382 struct kvm_irq_mask_notifier
*kimn
);
383 void kvm_unregister_irq_mask_notifier(struct kvm
*kvm
, int irq
,
384 struct kvm_irq_mask_notifier
*kimn
);
385 void kvm_fire_mask_notifiers(struct kvm
*kvm
, int irq
, bool mask
);
387 int kvm_set_irq(struct kvm
*kvm
, int irq_source_id
, int irq
, int level
);
388 void kvm_notify_acked_irq(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
);
389 void kvm_register_irq_ack_notifier(struct kvm
*kvm
,
390 struct kvm_irq_ack_notifier
*kian
);
391 void kvm_unregister_irq_ack_notifier(struct kvm
*kvm
,
392 struct kvm_irq_ack_notifier
*kian
);
393 int kvm_request_irq_source_id(struct kvm
*kvm
);
394 void kvm_free_irq_source_id(struct kvm
*kvm
, int irq_source_id
);
396 /* For vcpu->arch.iommu_flags */
397 #define KVM_IOMMU_CACHE_COHERENCY 0x1
399 #ifdef CONFIG_IOMMU_API
400 int kvm_iommu_map_pages(struct kvm
*kvm
, gfn_t base_gfn
,
401 unsigned long npages
);
402 int kvm_iommu_map_guest(struct kvm
*kvm
);
403 int kvm_iommu_unmap_guest(struct kvm
*kvm
);
404 int kvm_assign_device(struct kvm
*kvm
,
405 struct kvm_assigned_dev_kernel
*assigned_dev
);
406 int kvm_deassign_device(struct kvm
*kvm
,
407 struct kvm_assigned_dev_kernel
*assigned_dev
);
408 #else /* CONFIG_IOMMU_API */
409 static inline int kvm_iommu_map_pages(struct kvm
*kvm
,
411 unsigned long npages
)
416 static inline int kvm_iommu_map_guest(struct kvm
*kvm
)
421 static inline int kvm_iommu_unmap_guest(struct kvm
*kvm
)
426 static inline int kvm_assign_device(struct kvm
*kvm
,
427 struct kvm_assigned_dev_kernel
*assigned_dev
)
432 static inline int kvm_deassign_device(struct kvm
*kvm
,
433 struct kvm_assigned_dev_kernel
*assigned_dev
)
437 #endif /* CONFIG_IOMMU_API */
439 static inline void kvm_guest_enter(void)
441 account_system_vtime(current
);
442 current
->flags
|= PF_VCPU
;
445 static inline void kvm_guest_exit(void)
447 account_system_vtime(current
);
448 current
->flags
&= ~PF_VCPU
;
451 static inline int memslot_id(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
453 return slot
- kvm
->memslots
;
456 static inline gpa_t
gfn_to_gpa(gfn_t gfn
)
458 return (gpa_t
)gfn
<< PAGE_SHIFT
;
461 static inline hpa_t
pfn_to_hpa(pfn_t pfn
)
463 return (hpa_t
)pfn
<< PAGE_SHIFT
;
466 static inline void kvm_migrate_timers(struct kvm_vcpu
*vcpu
)
468 set_bit(KVM_REQ_MIGRATE_TIMER
, &vcpu
->requests
);
476 struct kvm_stats_debugfs_item
{
479 enum kvm_stat_kind kind
;
480 struct dentry
*dentry
;
482 extern struct kvm_stats_debugfs_item debugfs_entries
[];
483 extern struct dentry
*kvm_debugfs_dir
;
485 #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
486 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
487 vcpu, 5, d1, d2, d3, d4, d5)
488 #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
489 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
490 vcpu, 4, d1, d2, d3, d4, 0)
491 #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
492 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
493 vcpu, 3, d1, d2, d3, 0, 0)
494 #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
495 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
496 vcpu, 2, d1, d2, 0, 0, 0)
497 #define KVMTRACE_1D(evt, vcpu, d1, name) \
498 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
499 vcpu, 1, d1, 0, 0, 0, 0)
500 #define KVMTRACE_0D(evt, vcpu, name) \
501 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
502 vcpu, 0, 0, 0, 0, 0, 0)
504 #ifdef CONFIG_KVM_TRACE
505 int kvm_trace_ioctl(unsigned int ioctl
, unsigned long arg
);
506 void kvm_trace_cleanup(void);
509 int kvm_trace_ioctl(unsigned int ioctl
, unsigned long arg
)
513 #define kvm_trace_cleanup() ((void)0)
516 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
517 static inline int mmu_notifier_retry(struct kvm_vcpu
*vcpu
, unsigned long mmu_seq
)
519 if (unlikely(vcpu
->kvm
->mmu_notifier_count
))
522 * Both reads happen under the mmu_lock and both values are
523 * modified under mmu_lock, so there's no need of smb_rmb()
524 * here in between, otherwise mmu_notifier_count should be
525 * read before mmu_notifier_seq, see
526 * mmu_notifier_invalidate_range_end write side.
528 if (vcpu
->kvm
->mmu_notifier_seq
!= mmu_seq
)
534 #ifdef CONFIG_HAVE_KVM_IRQCHIP
536 #define KVM_MAX_IRQ_ROUTES 1024
538 int kvm_setup_default_irq_routing(struct kvm
*kvm
);
539 int kvm_set_irq_routing(struct kvm
*kvm
,
540 const struct kvm_irq_routing_entry
*entries
,
543 void kvm_free_irq_routing(struct kvm
*kvm
);
547 static inline void kvm_free_irq_routing(struct kvm
*kvm
) {}
551 #ifdef CONFIG_HAVE_KVM_EVENTFD
553 void kvm_irqfd_init(struct kvm
*kvm
);
554 int kvm_irqfd(struct kvm
*kvm
, int fd
, int gsi
, int flags
);
555 void kvm_irqfd_release(struct kvm
*kvm
);
559 static inline void kvm_irqfd_init(struct kvm
*kvm
) {}
560 static inline int kvm_irqfd(struct kvm
*kvm
, int fd
, int gsi
, int flags
)
565 static inline void kvm_irqfd_release(struct kvm
*kvm
) {}
567 #endif /* CONFIG_HAVE_KVM_EVENTFD */
569 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
570 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu
*vcpu
)
572 return vcpu
->kvm
->bsp_vcpu
== vcpu
;