KVM: Portability: Move vcpu regs enumeration definition to x86.h
[deliverable/linux.git] / drivers / kvm / kvm.h
1 #ifndef __KVM_H
2 #define __KVM_H
3
4 /*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/preempt.h>
18 #include <asm/signal.h>
19
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
22
23 #define KVM_MAX_VCPUS 4
24 #define KVM_ALIAS_SLOTS 4
25 #define KVM_MEMORY_SLOTS 8
26 /* memory slots that does not exposed to userspace */
27 #define KVM_PRIVATE_MEM_SLOTS 4
28 #define KVM_PERMILLE_MMU_PAGES 20
29 #define KVM_MIN_ALLOC_MMU_PAGES 64
30 #define KVM_NUM_MMU_PAGES 1024
31 #define KVM_MIN_FREE_MMU_PAGES 5
32 #define KVM_REFILL_PAGES 25
33 #define KVM_MAX_CPUID_ENTRIES 40
34
35 #define KVM_PIO_PAGE_OFFSET 1
36
37 /*
38 * vcpu->requests bit members
39 */
40 #define KVM_REQ_TLB_FLUSH 0
41
42 /*
43 * Address types:
44 *
45 * gva - guest virtual address
46 * gpa - guest physical address
47 * gfn - guest frame number
48 * hva - host virtual address
49 * hpa - host physical address
50 * hfn - host frame number
51 */
52
53 typedef unsigned long gva_t;
54 typedef u64 gpa_t;
55 typedef unsigned long gfn_t;
56
57 typedef unsigned long hva_t;
58 typedef u64 hpa_t;
59 typedef unsigned long hfn_t;
60
61 #define NR_PTE_CHAIN_ENTRIES 5
62
63 struct kvm_pte_chain {
64 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
65 struct hlist_node link;
66 };
67
68 /*
69 * kvm_mmu_page_role, below, is defined as:
70 *
71 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
72 * bits 4:7 - page table level for this shadow (1-4)
73 * bits 8:9 - page table quadrant for 2-level guests
74 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
75 * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
76 */
77 union kvm_mmu_page_role {
78 unsigned word;
79 struct {
80 unsigned glevels : 4;
81 unsigned level : 4;
82 unsigned quadrant : 2;
83 unsigned pad_for_nice_hex_output : 6;
84 unsigned metaphysical : 1;
85 unsigned hugepage_access : 3;
86 };
87 };
88
89 struct kvm_mmu_page {
90 struct list_head link;
91 struct hlist_node hash_link;
92
93 /*
94 * The following two entries are used to key the shadow page in the
95 * hash table.
96 */
97 gfn_t gfn;
98 union kvm_mmu_page_role role;
99
100 u64 *spt;
101 /* hold the gfn of each spte inside spt */
102 gfn_t *gfns;
103 unsigned long slot_bitmap; /* One bit set per slot which has memory
104 * in this shadow page.
105 */
106 int multimapped; /* More than one parent_pte? */
107 int root_count; /* Currently serving as active root */
108 union {
109 u64 *parent_pte; /* !multimapped */
110 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
111 };
112 };
113
114 struct kvm_vcpu;
115 extern struct kmem_cache *kvm_vcpu_cache;
116
117 /*
118 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
119 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
120 * mode.
121 */
122 struct kvm_mmu {
123 void (*new_cr3)(struct kvm_vcpu *vcpu);
124 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
125 void (*free)(struct kvm_vcpu *vcpu);
126 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
127 void (*prefetch_page)(struct kvm_vcpu *vcpu,
128 struct kvm_mmu_page *page);
129 hpa_t root_hpa;
130 int root_level;
131 int shadow_root_level;
132
133 u64 *pae_root;
134 };
135
136 #define KVM_NR_MEM_OBJS 40
137
138 struct kvm_mmu_memory_cache {
139 int nobjs;
140 void *objects[KVM_NR_MEM_OBJS];
141 };
142
143 /*
144 * We don't want allocation failures within the mmu code, so we preallocate
145 * enough memory for a single page fault in a cache.
146 */
147 struct kvm_guest_debug {
148 int enabled;
149 unsigned long bp[4];
150 int singlestep;
151 };
152
153 struct kvm_pio_request {
154 unsigned long count;
155 int cur_count;
156 struct page *guest_pages[2];
157 unsigned guest_page_offset;
158 int in;
159 int port;
160 int size;
161 int string;
162 int down;
163 int rep;
164 };
165
166 struct kvm_vcpu_stat {
167 u32 pf_fixed;
168 u32 pf_guest;
169 u32 tlb_flush;
170 u32 invlpg;
171
172 u32 exits;
173 u32 io_exits;
174 u32 mmio_exits;
175 u32 signal_exits;
176 u32 irq_window_exits;
177 u32 halt_exits;
178 u32 halt_wakeup;
179 u32 request_irq_exits;
180 u32 irq_exits;
181 u32 host_state_reload;
182 u32 efer_reload;
183 u32 fpu_reload;
184 u32 insn_emulation;
185 u32 insn_emulation_fail;
186 };
187
188 struct kvm_io_device {
189 void (*read)(struct kvm_io_device *this,
190 gpa_t addr,
191 int len,
192 void *val);
193 void (*write)(struct kvm_io_device *this,
194 gpa_t addr,
195 int len,
196 const void *val);
197 int (*in_range)(struct kvm_io_device *this, gpa_t addr);
198 void (*destructor)(struct kvm_io_device *this);
199
200 void *private;
201 };
202
203 static inline void kvm_iodevice_read(struct kvm_io_device *dev,
204 gpa_t addr,
205 int len,
206 void *val)
207 {
208 dev->read(dev, addr, len, val);
209 }
210
211 static inline void kvm_iodevice_write(struct kvm_io_device *dev,
212 gpa_t addr,
213 int len,
214 const void *val)
215 {
216 dev->write(dev, addr, len, val);
217 }
218
219 static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
220 {
221 return dev->in_range(dev, addr);
222 }
223
224 static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
225 {
226 if (dev->destructor)
227 dev->destructor(dev);
228 }
229
230 /*
231 * It would be nice to use something smarter than a linear search, TBD...
232 * Thankfully we dont expect many devices to register (famous last words :),
233 * so until then it will suffice. At least its abstracted so we can change
234 * in one place.
235 */
236 struct kvm_io_bus {
237 int dev_count;
238 #define NR_IOBUS_DEVS 6
239 struct kvm_io_device *devs[NR_IOBUS_DEVS];
240 };
241
242 void kvm_io_bus_init(struct kvm_io_bus *bus);
243 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
244 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
245 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
246 struct kvm_io_device *dev);
247
248 #ifdef CONFIG_HAS_IOMEM
249 #define KVM_VCPU_MMIO \
250 int mmio_needed; \
251 int mmio_read_completed; \
252 int mmio_is_write; \
253 int mmio_size; \
254 unsigned char mmio_data[8]; \
255 gpa_t mmio_phys_addr;
256
257 #else
258 #define KVM_VCPU_MMIO
259
260 #endif
261
262 #define KVM_VCPU_COMM \
263 struct kvm *kvm; \
264 struct preempt_notifier preempt_notifier; \
265 int vcpu_id; \
266 struct mutex mutex; \
267 int cpu; \
268 struct kvm_run *run; \
269 int guest_mode; \
270 unsigned long requests; \
271 struct kvm_guest_debug guest_debug; \
272 int fpu_active; \
273 int guest_fpu_loaded; \
274 wait_queue_head_t wq; \
275 int sigset_active; \
276 sigset_t sigset; \
277 struct kvm_vcpu_stat stat; \
278 KVM_VCPU_MMIO
279
280 struct kvm_mem_alias {
281 gfn_t base_gfn;
282 unsigned long npages;
283 gfn_t target_gfn;
284 };
285
286 struct kvm_memory_slot {
287 gfn_t base_gfn;
288 unsigned long npages;
289 unsigned long flags;
290 unsigned long *rmap;
291 unsigned long *dirty_bitmap;
292 unsigned long userspace_addr;
293 int user_alloc;
294 };
295
296 struct kvm_vm_stat {
297 u32 mmu_shadow_zapped;
298 u32 mmu_pte_write;
299 u32 mmu_pte_updated;
300 u32 mmu_pde_zapped;
301 u32 mmu_flooded;
302 u32 mmu_recycled;
303 };
304
305 struct kvm {
306 struct mutex lock; /* protects everything except vcpus */
307 int naliases;
308 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
309 int nmemslots;
310 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
311 KVM_PRIVATE_MEM_SLOTS];
312 /*
313 * Hash table of struct kvm_mmu_page.
314 */
315 struct list_head active_mmu_pages;
316 unsigned int n_free_mmu_pages;
317 unsigned int n_requested_mmu_pages;
318 unsigned int n_alloc_mmu_pages;
319 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
320 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
321 unsigned long rmap_overflow;
322 struct list_head vm_list;
323 struct file *filp;
324 struct kvm_io_bus mmio_bus;
325 struct kvm_io_bus pio_bus;
326 struct kvm_pic *vpic;
327 struct kvm_ioapic *vioapic;
328 int round_robin_prev_vcpu;
329 unsigned int tss_addr;
330 struct page *apic_access_page;
331 struct kvm_vm_stat stat;
332 };
333
334 static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
335 {
336 return kvm->vpic;
337 }
338
339 static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
340 {
341 return kvm->vioapic;
342 }
343
344 static inline int irqchip_in_kernel(struct kvm *kvm)
345 {
346 return pic_irqchip(kvm) != NULL;
347 }
348
349 struct descriptor_table {
350 u16 limit;
351 unsigned long base;
352 } __attribute__((packed));
353
354 /* The guest did something we don't support. */
355 #define pr_unimpl(vcpu, fmt, ...) \
356 do { \
357 if (printk_ratelimit()) \
358 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
359 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
360 } while (0)
361
362 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
363 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
364
365 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
366 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
367
368 void vcpu_load(struct kvm_vcpu *vcpu);
369 void vcpu_put(struct kvm_vcpu *vcpu);
370
371 void decache_vcpus_on_cpu(int cpu);
372
373
374 int kvm_init(void *opaque, unsigned int vcpu_size,
375 struct module *module);
376 void kvm_exit(void);
377
378 int kvm_mmu_module_init(void);
379 void kvm_mmu_module_exit(void);
380
381 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
382 int kvm_mmu_create(struct kvm_vcpu *vcpu);
383 int kvm_mmu_setup(struct kvm_vcpu *vcpu);
384 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
385
386 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
387 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
388 void kvm_mmu_zap_all(struct kvm *kvm);
389 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
390
391 hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa);
392 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
393 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
394 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
395 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
396 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
397
398 extern struct page *bad_page;
399
400 int is_error_page(struct page *page);
401 int kvm_is_error_hva(unsigned long addr);
402 int kvm_set_memory_region(struct kvm *kvm,
403 struct kvm_userspace_memory_region *mem,
404 int user_alloc);
405 int __kvm_set_memory_region(struct kvm *kvm,
406 struct kvm_userspace_memory_region *mem,
407 int user_alloc);
408 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
409 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
410 void kvm_release_page(struct page *page);
411 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
412 int len);
413 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
414 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
415 int offset, int len);
416 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
417 unsigned long len);
418 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
419 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
420 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
421 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
422 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
423
424 enum emulation_result {
425 EMULATE_DONE, /* no further processing */
426 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
427 EMULATE_FAIL, /* can't emulate this instruction */
428 };
429
430 int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
431 unsigned long cr2, u16 error_code, int no_decode);
432 void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
433 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
434 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
435 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
436 unsigned long *rflags);
437
438 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
439 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
440 unsigned long *rflags);
441 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
442 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
443
444 struct x86_emulate_ctxt;
445
446 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
447 int size, unsigned port);
448 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
449 int size, unsigned long count, int down,
450 gva_t address, int rep, unsigned port);
451 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
452 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
453 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
454 int emulate_clts(struct kvm_vcpu *vcpu);
455 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
456 unsigned long *dest);
457 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
458 unsigned long value);
459
460 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
461 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
462 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
463 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
464 unsigned long get_cr8(struct kvm_vcpu *vcpu);
465 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
466 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
467
468 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
469 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
470
471 void fx_init(struct kvm_vcpu *vcpu);
472
473 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
474 void kvm_resched(struct kvm_vcpu *vcpu);
475 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
476 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
477 void kvm_flush_remote_tlbs(struct kvm *kvm);
478
479 int emulator_read_std(unsigned long addr,
480 void *val,
481 unsigned int bytes,
482 struct kvm_vcpu *vcpu);
483 int emulator_write_emulated(unsigned long addr,
484 const void *val,
485 unsigned int bytes,
486 struct kvm_vcpu *vcpu);
487
488 unsigned long segment_base(u16 selector);
489
490 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
491 const u8 *new, int bytes);
492 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
493 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
494 int kvm_mmu_load(struct kvm_vcpu *vcpu);
495 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
496
497 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
498
499 int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
500
501 long kvm_arch_dev_ioctl(struct file *filp,
502 unsigned int ioctl, unsigned long arg);
503 long kvm_arch_vcpu_ioctl(struct file *filp,
504 unsigned int ioctl, unsigned long arg);
505 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
506 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
507
508 int kvm_dev_ioctl_check_extension(long ext);
509
510 int kvm_get_dirty_log(struct kvm *kvm,
511 struct kvm_dirty_log *log, int *is_dirty);
512 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
513 struct kvm_dirty_log *log);
514
515 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
516 struct
517 kvm_userspace_memory_region *mem,
518 int user_alloc);
519 long kvm_arch_vm_ioctl(struct file *filp,
520 unsigned int ioctl, unsigned long arg);
521 void kvm_arch_destroy_vm(struct kvm *kvm);
522
523 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
524 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
525
526 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
527 struct kvm_translation *tr);
528
529 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
530 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
531 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
532 struct kvm_sregs *sregs);
533 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
534 struct kvm_sregs *sregs);
535 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
536 struct kvm_debug_guest *dbg);
537 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
538
539 int kvm_arch_init(void *opaque);
540 void kvm_arch_exit(void);
541
542 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
543 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
544
545 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
546 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
547 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
548 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
549 void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu);
550
551 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
552 void kvm_arch_hardware_enable(void *garbage);
553 void kvm_arch_hardware_disable(void *garbage);
554 int kvm_arch_hardware_setup(void);
555 void kvm_arch_hardware_unsetup(void);
556 void kvm_arch_check_processor_compat(void *rtn);
557
558 void kvm_free_physmem(struct kvm *kvm);
559
560 struct kvm *kvm_arch_create_vm(void);
561 void kvm_arch_destroy_vm(struct kvm *kvm);
562
563 static inline void kvm_guest_enter(void)
564 {
565 account_system_vtime(current);
566 current->flags |= PF_VCPU;
567 }
568
569 static inline void kvm_guest_exit(void)
570 {
571 account_system_vtime(current);
572 current->flags &= ~PF_VCPU;
573 }
574
575 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
576 {
577 return slot - kvm->memslots;
578 }
579
580 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
581 {
582 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
583
584 return (struct kvm_mmu_page *)page_private(page);
585 }
586
587 static inline u16 read_fs(void)
588 {
589 u16 seg;
590 asm("mov %%fs, %0" : "=g"(seg));
591 return seg;
592 }
593
594 static inline u16 read_gs(void)
595 {
596 u16 seg;
597 asm("mov %%gs, %0" : "=g"(seg));
598 return seg;
599 }
600
601 static inline u16 read_ldt(void)
602 {
603 u16 ldt;
604 asm("sldt %0" : "=g"(ldt));
605 return ldt;
606 }
607
608 static inline void load_fs(u16 sel)
609 {
610 asm("mov %0, %%fs" : : "rm"(sel));
611 }
612
613 static inline void load_gs(u16 sel)
614 {
615 asm("mov %0, %%gs" : : "rm"(sel));
616 }
617
618 #ifndef load_ldt
619 static inline void load_ldt(u16 sel)
620 {
621 asm("lldt %0" : : "rm"(sel));
622 }
623 #endif
624
625 static inline void get_idt(struct descriptor_table *table)
626 {
627 asm("sidt %0" : "=m"(*table));
628 }
629
630 static inline void get_gdt(struct descriptor_table *table)
631 {
632 asm("sgdt %0" : "=m"(*table));
633 }
634
635 static inline unsigned long read_tr_base(void)
636 {
637 u16 tr;
638 asm("str %0" : "=g"(tr));
639 return segment_base(tr);
640 }
641
642 #ifdef CONFIG_X86_64
643 static inline unsigned long read_msr(unsigned long msr)
644 {
645 u64 value;
646
647 rdmsrl(msr, value);
648 return value;
649 }
650 #endif
651
652 static inline void fx_save(struct i387_fxsave_struct *image)
653 {
654 asm("fxsave (%0)":: "r" (image));
655 }
656
657 static inline void fx_restore(struct i387_fxsave_struct *image)
658 {
659 asm("fxrstor (%0)":: "r" (image));
660 }
661
662 static inline void fpu_init(void)
663 {
664 asm("finit");
665 }
666
667 static inline u32 get_rdx_init_val(void)
668 {
669 return 0x600; /* P6 family */
670 }
671
672 #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
673 #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
674 #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
675 #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
676 #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
677 #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
678 #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
679 #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
680 #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
681
682 #define MSR_IA32_TIME_STAMP_COUNTER 0x010
683
684 #define TSS_IOPB_BASE_OFFSET 0x66
685 #define TSS_BASE_SIZE 0x68
686 #define TSS_IOPB_SIZE (65536 / 8)
687 #define TSS_REDIRECTION_SIZE (256 / 8)
688 #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
689
690 enum kvm_stat_kind {
691 KVM_STAT_VM,
692 KVM_STAT_VCPU,
693 };
694
695 struct kvm_stats_debugfs_item {
696 const char *name;
697 int offset;
698 enum kvm_stat_kind kind;
699 struct dentry *dentry;
700 };
701 extern struct kvm_stats_debugfs_item debugfs_entries[];
702
703 #endif
This page took 0.044682 seconds and 6 git commands to generate.