Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm
[deliverable/linux.git] / include / linux / kvm_host.h
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3
4 /*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/preempt.h>
18 #include <linux/marker.h>
19 #include <linux/msi.h>
20 #include <asm/signal.h>
21
22 #include <linux/kvm.h>
23 #include <linux/kvm_para.h>
24
25 #include <linux/kvm_types.h>
26
27 #include <asm/kvm_host.h>
28
29 /*
30 * vcpu->requests bit members
31 */
32 #define KVM_REQ_TLB_FLUSH 0
33 #define KVM_REQ_MIGRATE_TIMER 1
34 #define KVM_REQ_REPORT_TPR_ACCESS 2
35 #define KVM_REQ_MMU_RELOAD 3
36 #define KVM_REQ_TRIPLE_FAULT 4
37 #define KVM_REQ_PENDING_TIMER 5
38 #define KVM_REQ_UNHALT 6
39 #define KVM_REQ_MMU_SYNC 7
40 #define KVM_REQ_KVMCLOCK_UPDATE 8
41 #define KVM_REQ_KICK 9
42
43 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
44
45 struct kvm;
46 struct kvm_vcpu;
47 extern struct kmem_cache *kvm_vcpu_cache;
48
49 /*
50 * It would be nice to use something smarter than a linear search, TBD...
51 * Thankfully we dont expect many devices to register (famous last words :),
52 * so until then it will suffice. At least its abstracted so we can change
53 * in one place.
54 */
55 struct kvm_io_bus {
56 int dev_count;
57 #define NR_IOBUS_DEVS 6
58 struct kvm_io_device *devs[NR_IOBUS_DEVS];
59 };
60
61 void kvm_io_bus_init(struct kvm_io_bus *bus);
62 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
63 int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len,
64 const void *val);
65 int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len,
66 void *val);
67 int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
68 struct kvm_io_device *dev);
69 int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
70 struct kvm_io_device *dev);
71 void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
72 struct kvm_io_device *dev);
73 void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus,
74 struct kvm_io_device *dev);
75
76 struct kvm_vcpu {
77 struct kvm *kvm;
78 #ifdef CONFIG_PREEMPT_NOTIFIERS
79 struct preempt_notifier preempt_notifier;
80 #endif
81 int vcpu_id;
82 struct mutex mutex;
83 int cpu;
84 struct kvm_run *run;
85 unsigned long requests;
86 unsigned long guest_debug;
87 int fpu_active;
88 int guest_fpu_loaded;
89 wait_queue_head_t wq;
90 int sigset_active;
91 sigset_t sigset;
92 struct kvm_vcpu_stat stat;
93
94 #ifdef CONFIG_HAS_IOMEM
95 int mmio_needed;
96 int mmio_read_completed;
97 int mmio_is_write;
98 int mmio_size;
99 unsigned char mmio_data[8];
100 gpa_t mmio_phys_addr;
101 #endif
102
103 struct kvm_vcpu_arch arch;
104 };
105
106 struct kvm_memory_slot {
107 gfn_t base_gfn;
108 unsigned long npages;
109 unsigned long flags;
110 unsigned long *rmap;
111 unsigned long *dirty_bitmap;
112 struct {
113 unsigned long rmap_pde;
114 int write_count;
115 } *lpage_info[KVM_NR_PAGE_SIZES - 1];
116 unsigned long userspace_addr;
117 int user_alloc;
118 };
119
120 struct kvm_kernel_irq_routing_entry {
121 u32 gsi;
122 u32 type;
123 int (*set)(struct kvm_kernel_irq_routing_entry *e,
124 struct kvm *kvm, int level);
125 union {
126 struct {
127 unsigned irqchip;
128 unsigned pin;
129 } irqchip;
130 struct msi_msg msi;
131 };
132 struct list_head link;
133 };
134
135 struct kvm {
136 spinlock_t mmu_lock;
137 spinlock_t requests_lock;
138 struct rw_semaphore slots_lock;
139 struct mm_struct *mm; /* userspace tied to this vm */
140 int nmemslots;
141 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
142 KVM_PRIVATE_MEM_SLOTS];
143 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
144 u32 bsp_vcpu_id;
145 struct kvm_vcpu *bsp_vcpu;
146 #endif
147 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
148 atomic_t online_vcpus;
149 struct list_head vm_list;
150 struct mutex lock;
151 struct kvm_io_bus mmio_bus;
152 struct kvm_io_bus pio_bus;
153 #ifdef CONFIG_HAVE_KVM_EVENTFD
154 struct {
155 spinlock_t lock;
156 struct list_head items;
157 } irqfds;
158 struct list_head ioeventfds;
159 #endif
160 struct kvm_vm_stat stat;
161 struct kvm_arch arch;
162 atomic_t users_count;
163 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
164 struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
165 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
166 #endif
167
168 struct mutex irq_lock;
169 #ifdef CONFIG_HAVE_KVM_IRQCHIP
170 struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */
171 struct hlist_head mask_notifier_list;
172 #endif
173
174 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
175 struct mmu_notifier mmu_notifier;
176 unsigned long mmu_notifier_seq;
177 long mmu_notifier_count;
178 #endif
179 };
180
181 /* The guest did something we don't support. */
182 #define pr_unimpl(vcpu, fmt, ...) \
183 do { \
184 if (printk_ratelimit()) \
185 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
186 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
187 } while (0)
188
189 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
190 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
191
192 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
193 {
194 smp_rmb();
195 return kvm->vcpus[i];
196 }
197
198 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
199 for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
200 idx < atomic_read(&kvm->online_vcpus) && vcpup; \
201 vcpup = kvm_get_vcpu(kvm, ++idx))
202
203 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
204 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
205
206 void vcpu_load(struct kvm_vcpu *vcpu);
207 void vcpu_put(struct kvm_vcpu *vcpu);
208
209 int kvm_init(void *opaque, unsigned int vcpu_size,
210 struct module *module);
211 void kvm_exit(void);
212
213 void kvm_get_kvm(struct kvm *kvm);
214 void kvm_put_kvm(struct kvm *kvm);
215
216 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
217 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
218 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
219 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
220
221 extern struct page *bad_page;
222 extern pfn_t bad_pfn;
223
224 int is_error_page(struct page *page);
225 int is_error_pfn(pfn_t pfn);
226 int kvm_is_error_hva(unsigned long addr);
227 int kvm_set_memory_region(struct kvm *kvm,
228 struct kvm_userspace_memory_region *mem,
229 int user_alloc);
230 int __kvm_set_memory_region(struct kvm *kvm,
231 struct kvm_userspace_memory_region *mem,
232 int user_alloc);
233 int kvm_arch_set_memory_region(struct kvm *kvm,
234 struct kvm_userspace_memory_region *mem,
235 struct kvm_memory_slot old,
236 int user_alloc);
237 void kvm_disable_largepages(void);
238 void kvm_arch_flush_shadow(struct kvm *kvm);
239 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
240 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
241 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
242 void kvm_release_page_clean(struct page *page);
243 void kvm_release_page_dirty(struct page *page);
244 void kvm_set_page_dirty(struct page *page);
245 void kvm_set_page_accessed(struct page *page);
246
247 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
248 void kvm_release_pfn_dirty(pfn_t);
249 void kvm_release_pfn_clean(pfn_t pfn);
250 void kvm_set_pfn_dirty(pfn_t pfn);
251 void kvm_set_pfn_accessed(pfn_t pfn);
252 void kvm_get_pfn(pfn_t pfn);
253
254 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
255 int len);
256 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
257 unsigned long len);
258 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
259 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
260 int offset, int len);
261 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
262 unsigned long len);
263 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
264 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
265 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
266 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
267 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
268
269 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
270 void kvm_resched(struct kvm_vcpu *vcpu);
271 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
272 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
273 void kvm_flush_remote_tlbs(struct kvm *kvm);
274 void kvm_reload_remote_mmus(struct kvm *kvm);
275
276 long kvm_arch_dev_ioctl(struct file *filp,
277 unsigned int ioctl, unsigned long arg);
278 long kvm_arch_vcpu_ioctl(struct file *filp,
279 unsigned int ioctl, unsigned long arg);
280
281 int kvm_dev_ioctl_check_extension(long ext);
282
283 int kvm_get_dirty_log(struct kvm *kvm,
284 struct kvm_dirty_log *log, int *is_dirty);
285 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
286 struct kvm_dirty_log *log);
287
288 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
289 struct
290 kvm_userspace_memory_region *mem,
291 int user_alloc);
292 long kvm_arch_vm_ioctl(struct file *filp,
293 unsigned int ioctl, unsigned long arg);
294
295 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
296 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
297
298 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
299 struct kvm_translation *tr);
300
301 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
302 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
303 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
304 struct kvm_sregs *sregs);
305 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
306 struct kvm_sregs *sregs);
307 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
308 struct kvm_mp_state *mp_state);
309 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
310 struct kvm_mp_state *mp_state);
311 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
312 struct kvm_guest_debug *dbg);
313 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
314
315 int kvm_arch_init(void *opaque);
316 void kvm_arch_exit(void);
317
318 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
319 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
320
321 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
322 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
323 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
324 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
325 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
326 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
327
328 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
329 void kvm_arch_hardware_enable(void *garbage);
330 void kvm_arch_hardware_disable(void *garbage);
331 int kvm_arch_hardware_setup(void);
332 void kvm_arch_hardware_unsetup(void);
333 void kvm_arch_check_processor_compat(void *rtn);
334 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
335
336 void kvm_free_physmem(struct kvm *kvm);
337
338 struct kvm *kvm_arch_create_vm(void);
339 void kvm_arch_destroy_vm(struct kvm *kvm);
340 void kvm_free_all_assigned_devices(struct kvm *kvm);
341 void kvm_arch_sync_events(struct kvm *kvm);
342
343 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
344 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
345
346 int kvm_is_mmio_pfn(pfn_t pfn);
347
348 struct kvm_irq_ack_notifier {
349 struct hlist_node link;
350 unsigned gsi;
351 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
352 };
353
354 #define KVM_ASSIGNED_MSIX_PENDING 0x1
355 struct kvm_guest_msix_entry {
356 u32 vector;
357 u16 entry;
358 u16 flags;
359 };
360
361 struct kvm_assigned_dev_kernel {
362 struct kvm_irq_ack_notifier ack_notifier;
363 struct work_struct interrupt_work;
364 struct list_head list;
365 int assigned_dev_id;
366 int host_busnr;
367 int host_devfn;
368 unsigned int entries_nr;
369 int host_irq;
370 bool host_irq_disabled;
371 struct msix_entry *host_msix_entries;
372 int guest_irq;
373 struct kvm_guest_msix_entry *guest_msix_entries;
374 unsigned long irq_requested_type;
375 int irq_source_id;
376 int flags;
377 struct pci_dev *dev;
378 struct kvm *kvm;
379 spinlock_t assigned_dev_lock;
380 };
381
382 struct kvm_irq_mask_notifier {
383 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
384 int irq;
385 struct hlist_node link;
386 };
387
388 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
389 struct kvm_irq_mask_notifier *kimn);
390 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
391 struct kvm_irq_mask_notifier *kimn);
392 void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
393
394 int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
395 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
396 void kvm_register_irq_ack_notifier(struct kvm *kvm,
397 struct kvm_irq_ack_notifier *kian);
398 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
399 struct kvm_irq_ack_notifier *kian);
400 int kvm_request_irq_source_id(struct kvm *kvm);
401 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
402
403 /* For vcpu->arch.iommu_flags */
404 #define KVM_IOMMU_CACHE_COHERENCY 0x1
405
406 #ifdef CONFIG_IOMMU_API
407 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
408 unsigned long npages);
409 int kvm_iommu_map_guest(struct kvm *kvm);
410 int kvm_iommu_unmap_guest(struct kvm *kvm);
411 int kvm_assign_device(struct kvm *kvm,
412 struct kvm_assigned_dev_kernel *assigned_dev);
413 int kvm_deassign_device(struct kvm *kvm,
414 struct kvm_assigned_dev_kernel *assigned_dev);
415 #else /* CONFIG_IOMMU_API */
416 static inline int kvm_iommu_map_pages(struct kvm *kvm,
417 gfn_t base_gfn,
418 unsigned long npages)
419 {
420 return 0;
421 }
422
423 static inline int kvm_iommu_map_guest(struct kvm *kvm)
424 {
425 return -ENODEV;
426 }
427
428 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
429 {
430 return 0;
431 }
432
433 static inline int kvm_assign_device(struct kvm *kvm,
434 struct kvm_assigned_dev_kernel *assigned_dev)
435 {
436 return 0;
437 }
438
439 static inline int kvm_deassign_device(struct kvm *kvm,
440 struct kvm_assigned_dev_kernel *assigned_dev)
441 {
442 return 0;
443 }
444 #endif /* CONFIG_IOMMU_API */
445
446 static inline void kvm_guest_enter(void)
447 {
448 account_system_vtime(current);
449 current->flags |= PF_VCPU;
450 }
451
452 static inline void kvm_guest_exit(void)
453 {
454 account_system_vtime(current);
455 current->flags &= ~PF_VCPU;
456 }
457
458 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
459 {
460 return slot - kvm->memslots;
461 }
462
463 static inline gpa_t gfn_to_gpa(gfn_t gfn)
464 {
465 return (gpa_t)gfn << PAGE_SHIFT;
466 }
467
468 static inline hpa_t pfn_to_hpa(pfn_t pfn)
469 {
470 return (hpa_t)pfn << PAGE_SHIFT;
471 }
472
473 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
474 {
475 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
476 }
477
478 enum kvm_stat_kind {
479 KVM_STAT_VM,
480 KVM_STAT_VCPU,
481 };
482
483 struct kvm_stats_debugfs_item {
484 const char *name;
485 int offset;
486 enum kvm_stat_kind kind;
487 struct dentry *dentry;
488 };
489 extern struct kvm_stats_debugfs_item debugfs_entries[];
490 extern struct dentry *kvm_debugfs_dir;
491
492 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
493 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
494 {
495 if (unlikely(vcpu->kvm->mmu_notifier_count))
496 return 1;
497 /*
498 * Both reads happen under the mmu_lock and both values are
499 * modified under mmu_lock, so there's no need of smb_rmb()
500 * here in between, otherwise mmu_notifier_count should be
501 * read before mmu_notifier_seq, see
502 * mmu_notifier_invalidate_range_end write side.
503 */
504 if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
505 return 1;
506 return 0;
507 }
508 #endif
509
510 #ifdef CONFIG_HAVE_KVM_IRQCHIP
511
512 #define KVM_MAX_IRQ_ROUTES 1024
513
514 int kvm_setup_default_irq_routing(struct kvm *kvm);
515 int kvm_set_irq_routing(struct kvm *kvm,
516 const struct kvm_irq_routing_entry *entries,
517 unsigned nr,
518 unsigned flags);
519 void kvm_free_irq_routing(struct kvm *kvm);
520
521 #else
522
523 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
524
525 #endif
526
527 #ifdef CONFIG_HAVE_KVM_EVENTFD
528
529 void kvm_eventfd_init(struct kvm *kvm);
530 int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
531 void kvm_irqfd_release(struct kvm *kvm);
532 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
533
534 #else
535
536 static inline void kvm_eventfd_init(struct kvm *kvm) {}
537 static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
538 {
539 return -EINVAL;
540 }
541
542 static inline void kvm_irqfd_release(struct kvm *kvm) {}
543 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
544 {
545 return -ENOSYS;
546 }
547
548 #endif /* CONFIG_HAVE_KVM_EVENTFD */
549
550 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
551 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
552 {
553 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
554 }
555 #endif
556 #endif
This page took 0.052386 seconds and 6 git commands to generate.