1 #ifndef __ASM_PARAVIRT_H
2 #define __ASM_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
9 /* Bitmask of what can be clobbered: usually at least eax. */
17 #include <linux/types.h>
18 #include <linux/cpumask.h>
19 #include <asm/kmap_types.h>
20 #include <asm/desc_defs.h>
31 unsigned int kernel_rpl
;
32 int shared_kernel_pmd
;
39 * Patch may replace one of the defined code sequences with
40 * arbitrary code, subject to the same register constraints.
41 * This generally means the code is not free to clobber any
42 * registers other than EAX. The patch function should return
43 * the number of bytes of code generated, as we nop pad the
44 * rest in generic code.
46 unsigned (*patch
)(u8 type
, u16 clobber
, void *insnbuf
,
47 unsigned long addr
, unsigned len
);
49 /* Basic arch-specific setup */
50 void (*arch_setup
)(void);
51 char *(*memory_setup
)(void);
52 void (*post_allocator_init
)(void);
54 /* Print a banner to identify the environment */
60 /* Set deferred update mode, used for batching operations. */
66 void (*time_init
)(void);
68 /* Set and set time of day */
69 unsigned long (*get_wallclock
)(void);
70 int (*set_wallclock
)(unsigned long);
72 unsigned long long (*sched_clock
)(void);
73 unsigned long (*get_cpu_khz
)(void);
77 /* hooks for various privileged instructions */
78 unsigned long (*get_debugreg
)(int regno
);
79 void (*set_debugreg
)(int regno
, unsigned long value
);
83 unsigned long (*read_cr0
)(void);
84 void (*write_cr0
)(unsigned long);
86 unsigned long (*read_cr4_safe
)(void);
87 unsigned long (*read_cr4
)(void);
88 void (*write_cr4
)(unsigned long);
90 /* Segment descriptor handling */
91 void (*load_tr_desc
)(void);
92 void (*load_gdt
)(const struct desc_ptr
*);
93 void (*load_idt
)(const struct desc_ptr
*);
94 void (*store_gdt
)(struct desc_ptr
*);
95 void (*store_idt
)(struct desc_ptr
*);
96 void (*set_ldt
)(const void *desc
, unsigned entries
);
97 unsigned long (*store_tr
)(void);
98 void (*load_tls
)(struct thread_struct
*t
, unsigned int cpu
);
99 void (*write_ldt_entry
)(struct desc_struct
*ldt
, int entrynum
,
101 void (*write_gdt_entry
)(struct desc_struct
*,
102 int entrynum
, const void *desc
, int size
);
103 void (*write_idt_entry
)(gate_desc
*,
104 int entrynum
, const gate_desc
*gate
);
105 void (*load_sp0
)(struct tss_struct
*tss
, struct thread_struct
*t
);
107 void (*set_iopl_mask
)(unsigned mask
);
109 void (*wbinvd
)(void);
110 void (*io_delay
)(void);
112 /* cpuid emulation, mostly so that caps bits can be disabled */
113 void (*cpuid
)(unsigned int *eax
, unsigned int *ebx
,
114 unsigned int *ecx
, unsigned int *edx
);
116 /* MSR, PMC and TSR operations.
117 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
118 u64 (*read_msr
)(unsigned int msr
, int *err
);
119 int (*write_msr
)(unsigned int msr
, unsigned low
, unsigned high
);
121 u64 (*read_tsc
)(void);
122 u64 (*read_pmc
)(int counter
);
124 /* These two are jmp to, not actually called. */
125 void (*irq_enable_syscall_ret
)(void);
128 struct pv_lazy_ops lazy_mode
;
132 void (*init_IRQ
)(void);
135 * Get/set interrupt state. save_fl and restore_fl are only
136 * expected to use X86_EFLAGS_IF; all other bits
137 * returned from save_fl are undefined, and may be ignored by
140 unsigned long (*save_fl
)(void);
141 void (*restore_fl
)(unsigned long);
142 void (*irq_disable
)(void);
143 void (*irq_enable
)(void);
144 void (*safe_halt
)(void);
149 #ifdef CONFIG_X86_LOCAL_APIC
151 * Direct APIC operations, principally for VMI. Ideally
152 * these shouldn't be in this interface.
154 void (*apic_write
)(unsigned long reg
, u32 v
);
155 void (*apic_write_atomic
)(unsigned long reg
, u32 v
);
156 u32 (*apic_read
)(unsigned long reg
);
157 void (*setup_boot_clock
)(void);
158 void (*setup_secondary_clock
)(void);
160 void (*startup_ipi_hook
)(int phys_apicid
,
161 unsigned long start_eip
,
162 unsigned long start_esp
);
168 * Called before/after init_mm pagetable setup. setup_start
169 * may reset %cr3, and may pre-install parts of the pagetable;
170 * pagetable setup is expected to preserve any existing
173 void (*pagetable_setup_start
)(pgd_t
*pgd_base
);
174 void (*pagetable_setup_done
)(pgd_t
*pgd_base
);
176 unsigned long (*read_cr2
)(void);
177 void (*write_cr2
)(unsigned long);
179 unsigned long (*read_cr3
)(void);
180 void (*write_cr3
)(unsigned long);
183 * Hooks for intercepting the creation/use/destruction of an
186 void (*activate_mm
)(struct mm_struct
*prev
,
187 struct mm_struct
*next
);
188 void (*dup_mmap
)(struct mm_struct
*oldmm
,
189 struct mm_struct
*mm
);
190 void (*exit_mmap
)(struct mm_struct
*mm
);
194 void (*flush_tlb_user
)(void);
195 void (*flush_tlb_kernel
)(void);
196 void (*flush_tlb_single
)(unsigned long addr
);
197 void (*flush_tlb_others
)(const cpumask_t
*cpus
, struct mm_struct
*mm
,
200 /* Hooks for allocating/releasing pagetable pages */
201 void (*alloc_pt
)(struct mm_struct
*mm
, u32 pfn
);
202 void (*alloc_pd
)(u32 pfn
);
203 void (*alloc_pd_clone
)(u32 pfn
, u32 clonepfn
, u32 start
, u32 count
);
204 void (*release_pt
)(u32 pfn
);
205 void (*release_pd
)(u32 pfn
);
207 /* Pagetable manipulation functions */
208 void (*set_pte
)(pte_t
*ptep
, pte_t pteval
);
209 void (*set_pte_at
)(struct mm_struct
*mm
, unsigned long addr
,
210 pte_t
*ptep
, pte_t pteval
);
211 void (*set_pmd
)(pmd_t
*pmdp
, pmd_t pmdval
);
212 void (*pte_update
)(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
);
213 void (*pte_update_defer
)(struct mm_struct
*mm
,
214 unsigned long addr
, pte_t
*ptep
);
216 #ifdef CONFIG_X86_PAE
217 void (*set_pte_atomic
)(pte_t
*ptep
, pte_t pteval
);
218 void (*set_pte_present
)(struct mm_struct
*mm
, unsigned long addr
,
219 pte_t
*ptep
, pte_t pte
);
220 void (*set_pud
)(pud_t
*pudp
, pud_t pudval
);
221 void (*pte_clear
)(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
);
222 void (*pmd_clear
)(pmd_t
*pmdp
);
224 unsigned long long (*pte_val
)(pte_t
);
225 unsigned long long (*pmd_val
)(pmd_t
);
226 unsigned long long (*pgd_val
)(pgd_t
);
228 pte_t (*make_pte
)(unsigned long long pte
);
229 pmd_t (*make_pmd
)(unsigned long long pmd
);
230 pgd_t (*make_pgd
)(unsigned long long pgd
);
232 unsigned long (*pte_val
)(pte_t
);
233 unsigned long (*pgd_val
)(pgd_t
);
235 pte_t (*make_pte
)(unsigned long pte
);
236 pgd_t (*make_pgd
)(unsigned long pgd
);
239 #ifdef CONFIG_HIGHPTE
240 void *(*kmap_atomic_pte
)(struct page
*page
, enum km_type type
);
243 struct pv_lazy_ops lazy_mode
;
246 /* This contains all the paravirt structures: we get a convenient
247 * number for each function using the offset which we use to indicate
249 struct paravirt_patch_template
251 struct pv_init_ops pv_init_ops
;
252 struct pv_time_ops pv_time_ops
;
253 struct pv_cpu_ops pv_cpu_ops
;
254 struct pv_irq_ops pv_irq_ops
;
255 struct pv_apic_ops pv_apic_ops
;
256 struct pv_mmu_ops pv_mmu_ops
;
259 extern struct pv_info pv_info
;
260 extern struct pv_init_ops pv_init_ops
;
261 extern struct pv_time_ops pv_time_ops
;
262 extern struct pv_cpu_ops pv_cpu_ops
;
263 extern struct pv_irq_ops pv_irq_ops
;
264 extern struct pv_apic_ops pv_apic_ops
;
265 extern struct pv_mmu_ops pv_mmu_ops
;
267 #define PARAVIRT_PATCH(x) \
268 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
270 #define paravirt_type(op) \
271 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
272 [paravirt_opptr] "m" (op)
273 #define paravirt_clobber(clobber) \
274 [paravirt_clobber] "i" (clobber)
277 * Generate some code, and mark it as patchable by the
278 * apply_paravirt() alternate instruction patcher.
280 #define _paravirt_alt(insn_string, type, clobber) \
281 "771:\n\t" insn_string "\n" "772:\n" \
282 ".pushsection .parainstructions,\"a\"\n" \
284 " .byte " type "\n" \
285 " .byte 772b-771b\n" \
286 " .short " clobber "\n" \
289 /* Generate patchable code, with the default asm parameters. */
290 #define paravirt_alt(insn_string) \
291 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
293 unsigned paravirt_patch_nop(void);
294 unsigned paravirt_patch_ignore(unsigned len
);
295 unsigned paravirt_patch_call(void *insnbuf
,
296 const void *target
, u16 tgt_clobbers
,
297 unsigned long addr
, u16 site_clobbers
,
299 unsigned paravirt_patch_jmp(void *insnbuf
, const void *target
,
300 unsigned long addr
, unsigned len
);
301 unsigned paravirt_patch_default(u8 type
, u16 clobbers
, void *insnbuf
,
302 unsigned long addr
, unsigned len
);
304 unsigned paravirt_patch_insns(void *insnbuf
, unsigned len
,
305 const char *start
, const char *end
);
307 int paravirt_disable_iospace(void);
310 * This generates an indirect call based on the operation type number.
311 * The type number, computed in PARAVIRT_PATCH, is derived from the
312 * offset into the paravirt_patch_template structure, and can therefore be
313 * freely converted back into a structure offset.
315 #define PARAVIRT_CALL "call *%[paravirt_opptr];"
318 * These macros are intended to wrap calls through one of the paravirt
319 * ops structs, so that they can be later identified and patched at
322 * Normally, a call to a pv_op function is a simple indirect call:
323 * (pv_op_struct.operations)(args...).
325 * Unfortunately, this is a relatively slow operation for modern CPUs,
326 * because it cannot necessarily determine what the destination
327 * address is. In this case, the address is a runtime constant, so at
328 * the very least we can patch the call to e a simple direct call, or
329 * ideally, patch an inline implementation into the callsite. (Direct
330 * calls are essentially free, because the call and return addresses
331 * are completely predictable.)
333 * For i386, these macros rely on the standard gcc "regparm(3)" calling
334 * convention, in which the first three arguments are placed in %eax,
335 * %edx, %ecx (in that order), and the remaining arguments are placed
336 * on the stack. All caller-save registers (eax,edx,ecx) are expected
337 * to be modified (either clobbered or used for return values).
338 * X86_64, on the other hand, already specifies a register-based calling
339 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
340 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
341 * special handling for dealing with 4 arguments, unlike i386.
342 * However, x86_64 also have to clobber all caller saved registers, which
343 * unfortunately, are quite a bit (r8 - r11)
345 * The call instruction itself is marked by placing its start address
346 * and size into the .parainstructions section, so that
347 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
348 * appropriate patching under the control of the backend pv_init_ops
351 * Unfortunately there's no way to get gcc to generate the args setup
352 * for the call, and then allow the call itself to be generated by an
353 * inline asm. Because of this, we must do the complete arg setup and
354 * return value handling from within these macros. This is fairly
357 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
358 * It could be extended to more arguments, but there would be little
359 * to be gained from that. For each number of arguments, there are
360 * the two VCALL and CALL variants for void and non-void functions.
362 * When there is a return value, the invoker of the macro must specify
363 * the return type. The macro then uses sizeof() on that type to
364 * determine whether its a 32 or 64 bit value, and places the return
365 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
366 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
367 * the return value size.
369 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
370 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
373 * Small structures are passed and returned in registers. The macro
374 * calling convention can't directly deal with this, so the wrapper
375 * functions must do this.
377 * These PVOP_* macros are only defined within this header. This
378 * means that all uses must be wrapped in inline functions. This also
379 * makes sure the incoming and outgoing types are always correct.
382 #define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
383 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
384 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
386 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
387 #define EXTRA_CLOBBERS
388 #define VEXTRA_CLOBBERS
390 #define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
391 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
392 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
393 "=S" (__esi), "=d" (__edx), \
396 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
398 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
399 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
402 #define __PVOP_CALL(rettype, op, pre, post, ...) \
406 /* This is 32-bit specific, but is okay in 64-bit */ \
407 /* since this condition will never hold */ \
408 if (sizeof(rettype) > sizeof(unsigned long)) { \
410 paravirt_alt(PARAVIRT_CALL) \
412 : PVOP_CALL_CLOBBERS \
413 : paravirt_type(op), \
414 paravirt_clobber(CLBR_ANY), \
416 : "memory", "cc" EXTRA_CLOBBERS); \
417 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
420 paravirt_alt(PARAVIRT_CALL) \
422 : PVOP_CALL_CLOBBERS \
423 : paravirt_type(op), \
424 paravirt_clobber(CLBR_ANY), \
426 : "memory", "cc" EXTRA_CLOBBERS); \
427 __ret = (rettype)__eax; \
431 #define __PVOP_VCALL(op, pre, post, ...) \
435 paravirt_alt(PARAVIRT_CALL) \
437 : PVOP_VCALL_CLOBBERS \
438 : paravirt_type(op), \
439 paravirt_clobber(CLBR_ANY), \
441 : "memory", "cc" VEXTRA_CLOBBERS); \
444 #define PVOP_CALL0(rettype, op) \
445 __PVOP_CALL(rettype, op, "", "")
446 #define PVOP_VCALL0(op) \
447 __PVOP_VCALL(op, "", "")
449 #define PVOP_CALL1(rettype, op, arg1) \
450 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
451 #define PVOP_VCALL1(op, arg1) \
452 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
454 #define PVOP_CALL2(rettype, op, arg1, arg2) \
455 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
456 "1" ((unsigned long)(arg2)))
457 #define PVOP_VCALL2(op, arg1, arg2) \
458 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
459 "1" ((unsigned long)(arg2)))
461 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
462 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
463 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
464 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
465 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
466 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
468 /* This is the only difference in x86_64. We can make it much simpler */
470 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
471 __PVOP_CALL(rettype, op, \
472 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
473 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
474 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
475 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
477 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
478 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
479 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
481 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
482 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
483 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
484 "3"((unsigned long)(arg4)))
485 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
486 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
487 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
488 "3"((unsigned long)(arg4)))
491 static inline int paravirt_enabled(void)
493 return pv_info
.paravirt_enabled
;
496 static inline void load_sp0(struct tss_struct
*tss
,
497 struct thread_struct
*thread
)
499 PVOP_VCALL2(pv_cpu_ops
.load_sp0
, tss
, thread
);
502 #define ARCH_SETUP pv_init_ops.arch_setup();
503 static inline unsigned long get_wallclock(void)
505 return PVOP_CALL0(unsigned long, pv_time_ops
.get_wallclock
);
508 static inline int set_wallclock(unsigned long nowtime
)
510 return PVOP_CALL1(int, pv_time_ops
.set_wallclock
, nowtime
);
513 static inline void (*choose_time_init(void))(void)
515 return pv_time_ops
.time_init
;
518 /* The paravirtualized CPUID instruction. */
519 static inline void __cpuid(unsigned int *eax
, unsigned int *ebx
,
520 unsigned int *ecx
, unsigned int *edx
)
522 PVOP_VCALL4(pv_cpu_ops
.cpuid
, eax
, ebx
, ecx
, edx
);
526 * These special macros can be used to get or set a debugging register
528 static inline unsigned long paravirt_get_debugreg(int reg
)
530 return PVOP_CALL1(unsigned long, pv_cpu_ops
.get_debugreg
, reg
);
532 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
533 static inline void set_debugreg(unsigned long val
, int reg
)
535 PVOP_VCALL2(pv_cpu_ops
.set_debugreg
, reg
, val
);
538 static inline void clts(void)
540 PVOP_VCALL0(pv_cpu_ops
.clts
);
543 static inline unsigned long read_cr0(void)
545 return PVOP_CALL0(unsigned long, pv_cpu_ops
.read_cr0
);
548 static inline void write_cr0(unsigned long x
)
550 PVOP_VCALL1(pv_cpu_ops
.write_cr0
, x
);
553 static inline unsigned long read_cr2(void)
555 return PVOP_CALL0(unsigned long, pv_mmu_ops
.read_cr2
);
558 static inline void write_cr2(unsigned long x
)
560 PVOP_VCALL1(pv_mmu_ops
.write_cr2
, x
);
563 static inline unsigned long read_cr3(void)
565 return PVOP_CALL0(unsigned long, pv_mmu_ops
.read_cr3
);
568 static inline void write_cr3(unsigned long x
)
570 PVOP_VCALL1(pv_mmu_ops
.write_cr3
, x
);
573 static inline unsigned long read_cr4(void)
575 return PVOP_CALL0(unsigned long, pv_cpu_ops
.read_cr4
);
577 static inline unsigned long read_cr4_safe(void)
579 return PVOP_CALL0(unsigned long, pv_cpu_ops
.read_cr4_safe
);
582 static inline void write_cr4(unsigned long x
)
584 PVOP_VCALL1(pv_cpu_ops
.write_cr4
, x
);
587 static inline void raw_safe_halt(void)
589 PVOP_VCALL0(pv_irq_ops
.safe_halt
);
592 static inline void halt(void)
594 PVOP_VCALL0(pv_irq_ops
.safe_halt
);
597 static inline void wbinvd(void)
599 PVOP_VCALL0(pv_cpu_ops
.wbinvd
);
602 #define get_kernel_rpl() (pv_info.kernel_rpl)
604 static inline u64
paravirt_read_msr(unsigned msr
, int *err
)
606 return PVOP_CALL2(u64
, pv_cpu_ops
.read_msr
, msr
, err
);
608 static inline int paravirt_write_msr(unsigned msr
, unsigned low
, unsigned high
)
610 return PVOP_CALL3(int, pv_cpu_ops
.write_msr
, msr
, low
, high
);
613 /* These should all do BUG_ON(_err), but our headers are too tangled. */
614 #define rdmsr(msr,val1,val2) do { \
616 u64 _l = paravirt_read_msr(msr, &_err); \
621 #define wrmsr(msr,val1,val2) do { \
622 paravirt_write_msr(msr, val1, val2); \
625 #define rdmsrl(msr,val) do { \
627 val = paravirt_read_msr(msr, &_err); \
630 #define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
631 #define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b)
633 /* rdmsr with exception handling */
634 #define rdmsr_safe(msr,a,b) ({ \
636 u64 _l = paravirt_read_msr(msr, &_err); \
642 static inline u64
paravirt_read_tsc(void)
644 return PVOP_CALL0(u64
, pv_cpu_ops
.read_tsc
);
647 #define rdtscl(low) do { \
648 u64 _l = paravirt_read_tsc(); \
652 #define rdtscll(val) (val = paravirt_read_tsc())
654 static inline unsigned long long paravirt_sched_clock(void)
656 return PVOP_CALL0(unsigned long long, pv_time_ops
.sched_clock
);
658 #define calculate_cpu_khz() (pv_time_ops.get_cpu_khz())
660 static inline unsigned long long paravirt_read_pmc(int counter
)
662 return PVOP_CALL1(u64
, pv_cpu_ops
.read_pmc
, counter
);
665 #define rdpmc(counter,low,high) do { \
666 u64 _l = paravirt_read_pmc(counter); \
671 static inline void load_TR_desc(void)
673 PVOP_VCALL0(pv_cpu_ops
.load_tr_desc
);
675 static inline void load_gdt(const struct desc_ptr
*dtr
)
677 PVOP_VCALL1(pv_cpu_ops
.load_gdt
, dtr
);
679 static inline void load_idt(const struct desc_ptr
*dtr
)
681 PVOP_VCALL1(pv_cpu_ops
.load_idt
, dtr
);
683 static inline void set_ldt(const void *addr
, unsigned entries
)
685 PVOP_VCALL2(pv_cpu_ops
.set_ldt
, addr
, entries
);
687 static inline void store_gdt(struct desc_ptr
*dtr
)
689 PVOP_VCALL1(pv_cpu_ops
.store_gdt
, dtr
);
691 static inline void store_idt(struct desc_ptr
*dtr
)
693 PVOP_VCALL1(pv_cpu_ops
.store_idt
, dtr
);
695 static inline unsigned long paravirt_store_tr(void)
697 return PVOP_CALL0(unsigned long, pv_cpu_ops
.store_tr
);
699 #define store_tr(tr) ((tr) = paravirt_store_tr())
700 static inline void load_TLS(struct thread_struct
*t
, unsigned cpu
)
702 PVOP_VCALL2(pv_cpu_ops
.load_tls
, t
, cpu
);
705 static inline void write_ldt_entry(struct desc_struct
*dt
, int entry
,
708 PVOP_VCALL3(pv_cpu_ops
.write_ldt_entry
, dt
, entry
, desc
);
711 static inline void write_gdt_entry(struct desc_struct
*dt
, int entry
,
712 void *desc
, int type
)
714 PVOP_VCALL4(pv_cpu_ops
.write_gdt_entry
, dt
, entry
, desc
, type
);
717 static inline void write_idt_entry(gate_desc
*dt
, int entry
, const gate_desc
*g
)
719 PVOP_VCALL3(pv_cpu_ops
.write_idt_entry
, dt
, entry
, g
);
721 static inline void set_iopl_mask(unsigned mask
)
723 PVOP_VCALL1(pv_cpu_ops
.set_iopl_mask
, mask
);
726 /* The paravirtualized I/O functions */
727 static inline void slow_down_io(void) {
728 pv_cpu_ops
.io_delay();
729 #ifdef REALLY_SLOW_IO
730 pv_cpu_ops
.io_delay();
731 pv_cpu_ops
.io_delay();
732 pv_cpu_ops
.io_delay();
736 #ifdef CONFIG_X86_LOCAL_APIC
738 * Basic functions accessing APICs.
740 static inline void apic_write(unsigned long reg
, u32 v
)
742 PVOP_VCALL2(pv_apic_ops
.apic_write
, reg
, v
);
745 static inline void apic_write_atomic(unsigned long reg
, u32 v
)
747 PVOP_VCALL2(pv_apic_ops
.apic_write_atomic
, reg
, v
);
750 static inline u32
apic_read(unsigned long reg
)
752 return PVOP_CALL1(unsigned long, pv_apic_ops
.apic_read
, reg
);
755 static inline void setup_boot_clock(void)
757 PVOP_VCALL0(pv_apic_ops
.setup_boot_clock
);
760 static inline void setup_secondary_clock(void)
762 PVOP_VCALL0(pv_apic_ops
.setup_secondary_clock
);
766 static inline void paravirt_post_allocator_init(void)
768 if (pv_init_ops
.post_allocator_init
)
769 (*pv_init_ops
.post_allocator_init
)();
772 static inline void paravirt_pagetable_setup_start(pgd_t
*base
)
774 (*pv_mmu_ops
.pagetable_setup_start
)(base
);
777 static inline void paravirt_pagetable_setup_done(pgd_t
*base
)
779 (*pv_mmu_ops
.pagetable_setup_done
)(base
);
783 static inline void startup_ipi_hook(int phys_apicid
, unsigned long start_eip
,
784 unsigned long start_esp
)
786 PVOP_VCALL3(pv_apic_ops
.startup_ipi_hook
,
787 phys_apicid
, start_eip
, start_esp
);
791 static inline void paravirt_activate_mm(struct mm_struct
*prev
,
792 struct mm_struct
*next
)
794 PVOP_VCALL2(pv_mmu_ops
.activate_mm
, prev
, next
);
797 static inline void arch_dup_mmap(struct mm_struct
*oldmm
,
798 struct mm_struct
*mm
)
800 PVOP_VCALL2(pv_mmu_ops
.dup_mmap
, oldmm
, mm
);
803 static inline void arch_exit_mmap(struct mm_struct
*mm
)
805 PVOP_VCALL1(pv_mmu_ops
.exit_mmap
, mm
);
808 static inline void __flush_tlb(void)
810 PVOP_VCALL0(pv_mmu_ops
.flush_tlb_user
);
812 static inline void __flush_tlb_global(void)
814 PVOP_VCALL0(pv_mmu_ops
.flush_tlb_kernel
);
816 static inline void __flush_tlb_single(unsigned long addr
)
818 PVOP_VCALL1(pv_mmu_ops
.flush_tlb_single
, addr
);
821 static inline void flush_tlb_others(cpumask_t cpumask
, struct mm_struct
*mm
,
824 PVOP_VCALL3(pv_mmu_ops
.flush_tlb_others
, &cpumask
, mm
, va
);
827 static inline void paravirt_alloc_pt(struct mm_struct
*mm
, unsigned pfn
)
829 PVOP_VCALL2(pv_mmu_ops
.alloc_pt
, mm
, pfn
);
831 static inline void paravirt_release_pt(unsigned pfn
)
833 PVOP_VCALL1(pv_mmu_ops
.release_pt
, pfn
);
836 static inline void paravirt_alloc_pd(unsigned pfn
)
838 PVOP_VCALL1(pv_mmu_ops
.alloc_pd
, pfn
);
841 static inline void paravirt_alloc_pd_clone(unsigned pfn
, unsigned clonepfn
,
842 unsigned start
, unsigned count
)
844 PVOP_VCALL4(pv_mmu_ops
.alloc_pd_clone
, pfn
, clonepfn
, start
, count
);
846 static inline void paravirt_release_pd(unsigned pfn
)
848 PVOP_VCALL1(pv_mmu_ops
.release_pd
, pfn
);
851 #ifdef CONFIG_HIGHPTE
852 static inline void *kmap_atomic_pte(struct page
*page
, enum km_type type
)
855 ret
= PVOP_CALL2(unsigned long, pv_mmu_ops
.kmap_atomic_pte
, page
, type
);
860 static inline void pte_update(struct mm_struct
*mm
, unsigned long addr
,
863 PVOP_VCALL3(pv_mmu_ops
.pte_update
, mm
, addr
, ptep
);
866 static inline void pte_update_defer(struct mm_struct
*mm
, unsigned long addr
,
869 PVOP_VCALL3(pv_mmu_ops
.pte_update_defer
, mm
, addr
, ptep
);
872 #ifdef CONFIG_X86_PAE
873 static inline pte_t
__pte(unsigned long long val
)
875 unsigned long long ret
= PVOP_CALL2(unsigned long long,
878 return (pte_t
) { ret
, ret
>> 32 };
881 static inline pmd_t
__pmd(unsigned long long val
)
883 return (pmd_t
) { PVOP_CALL2(unsigned long long, pv_mmu_ops
.make_pmd
,
887 static inline pgd_t
__pgd(unsigned long long val
)
889 return (pgd_t
) { PVOP_CALL2(unsigned long long, pv_mmu_ops
.make_pgd
,
893 static inline unsigned long long pte_val(pte_t x
)
895 return PVOP_CALL2(unsigned long long, pv_mmu_ops
.pte_val
,
896 x
.pte_low
, x
.pte_high
);
899 static inline unsigned long long pmd_val(pmd_t x
)
901 return PVOP_CALL2(unsigned long long, pv_mmu_ops
.pmd_val
,
905 static inline unsigned long long pgd_val(pgd_t x
)
907 return PVOP_CALL2(unsigned long long, pv_mmu_ops
.pgd_val
,
911 static inline void set_pte(pte_t
*ptep
, pte_t pteval
)
913 PVOP_VCALL3(pv_mmu_ops
.set_pte
, ptep
, pteval
.pte_low
, pteval
.pte_high
);
916 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
917 pte_t
*ptep
, pte_t pteval
)
920 pv_mmu_ops
.set_pte_at(mm
, addr
, ptep
, pteval
);
923 static inline void set_pte_atomic(pte_t
*ptep
, pte_t pteval
)
925 PVOP_VCALL3(pv_mmu_ops
.set_pte_atomic
, ptep
,
926 pteval
.pte_low
, pteval
.pte_high
);
929 static inline void set_pte_present(struct mm_struct
*mm
, unsigned long addr
,
930 pte_t
*ptep
, pte_t pte
)
933 pv_mmu_ops
.set_pte_present(mm
, addr
, ptep
, pte
);
936 static inline void set_pmd(pmd_t
*pmdp
, pmd_t pmdval
)
938 PVOP_VCALL3(pv_mmu_ops
.set_pmd
, pmdp
,
939 pmdval
.pmd
, pmdval
.pmd
>> 32);
942 static inline void set_pud(pud_t
*pudp
, pud_t pudval
)
944 PVOP_VCALL3(pv_mmu_ops
.set_pud
, pudp
,
945 pudval
.pgd
.pgd
, pudval
.pgd
.pgd
>> 32);
948 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
950 PVOP_VCALL3(pv_mmu_ops
.pte_clear
, mm
, addr
, ptep
);
953 static inline void pmd_clear(pmd_t
*pmdp
)
955 PVOP_VCALL1(pv_mmu_ops
.pmd_clear
, pmdp
);
958 #else /* !CONFIG_X86_PAE */
960 static inline pte_t
__pte(unsigned long val
)
962 return (pte_t
) { PVOP_CALL1(unsigned long, pv_mmu_ops
.make_pte
, val
) };
965 static inline pgd_t
__pgd(unsigned long val
)
967 return (pgd_t
) { PVOP_CALL1(unsigned long, pv_mmu_ops
.make_pgd
, val
) };
970 static inline unsigned long pte_val(pte_t x
)
972 return PVOP_CALL1(unsigned long, pv_mmu_ops
.pte_val
, x
.pte_low
);
975 static inline unsigned long pgd_val(pgd_t x
)
977 return PVOP_CALL1(unsigned long, pv_mmu_ops
.pgd_val
, x
.pgd
);
980 static inline void set_pte(pte_t
*ptep
, pte_t pteval
)
982 PVOP_VCALL2(pv_mmu_ops
.set_pte
, ptep
, pteval
.pte_low
);
985 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
986 pte_t
*ptep
, pte_t pteval
)
988 PVOP_VCALL4(pv_mmu_ops
.set_pte_at
, mm
, addr
, ptep
, pteval
.pte_low
);
991 static inline void set_pmd(pmd_t
*pmdp
, pmd_t pmdval
)
993 PVOP_VCALL2(pv_mmu_ops
.set_pmd
, pmdp
, pmdval
.pud
.pgd
.pgd
);
995 #endif /* CONFIG_X86_PAE */
997 /* Lazy mode for batching updates / context switch */
998 enum paravirt_lazy_mode
{
1004 enum paravirt_lazy_mode
paravirt_get_lazy_mode(void);
1005 void paravirt_enter_lazy_cpu(void);
1006 void paravirt_leave_lazy_cpu(void);
1007 void paravirt_enter_lazy_mmu(void);
1008 void paravirt_leave_lazy_mmu(void);
1009 void paravirt_leave_lazy(enum paravirt_lazy_mode mode
);
1011 #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1012 static inline void arch_enter_lazy_cpu_mode(void)
1014 PVOP_VCALL0(pv_cpu_ops
.lazy_mode
.enter
);
1017 static inline void arch_leave_lazy_cpu_mode(void)
1019 PVOP_VCALL0(pv_cpu_ops
.lazy_mode
.leave
);
1022 static inline void arch_flush_lazy_cpu_mode(void)
1024 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU
)) {
1025 arch_leave_lazy_cpu_mode();
1026 arch_enter_lazy_cpu_mode();
1031 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1032 static inline void arch_enter_lazy_mmu_mode(void)
1034 PVOP_VCALL0(pv_mmu_ops
.lazy_mode
.enter
);
1037 static inline void arch_leave_lazy_mmu_mode(void)
1039 PVOP_VCALL0(pv_mmu_ops
.lazy_mode
.leave
);
1042 static inline void arch_flush_lazy_mmu_mode(void)
1044 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU
)) {
1045 arch_leave_lazy_mmu_mode();
1046 arch_enter_lazy_mmu_mode();
1050 void _paravirt_nop(void);
1051 #define paravirt_nop ((void *)_paravirt_nop)
1053 /* These all sit in the .parainstructions section to tell us what to patch. */
1054 struct paravirt_patch_site
{
1055 u8
*instr
; /* original instructions */
1056 u8 instrtype
; /* type of this instruction */
1057 u8 len
; /* length of original instruction */
1058 u16 clobbers
; /* what registers you may clobber */
1061 extern struct paravirt_patch_site __parainstructions
[],
1062 __parainstructions_end
[];
1064 static inline unsigned long __raw_local_save_flags(void)
1068 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1070 "popl %%edx; popl %%ecx")
1072 : paravirt_type(pv_irq_ops
.save_fl
),
1073 paravirt_clobber(CLBR_EAX
)
1078 static inline void raw_local_irq_restore(unsigned long f
)
1080 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1082 "popl %%edx; popl %%ecx")
1085 paravirt_type(pv_irq_ops
.restore_fl
),
1086 paravirt_clobber(CLBR_EAX
)
1090 static inline void raw_local_irq_disable(void)
1092 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1094 "popl %%edx; popl %%ecx")
1096 : paravirt_type(pv_irq_ops
.irq_disable
),
1097 paravirt_clobber(CLBR_EAX
)
1098 : "memory", "eax", "cc");
1101 static inline void raw_local_irq_enable(void)
1103 asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1105 "popl %%edx; popl %%ecx")
1107 : paravirt_type(pv_irq_ops
.irq_enable
),
1108 paravirt_clobber(CLBR_EAX
)
1109 : "memory", "eax", "cc");
1112 static inline unsigned long __raw_local_irq_save(void)
1116 f
= __raw_local_save_flags();
1117 raw_local_irq_disable();
1121 /* Make sure as little as possible of this mess escapes. */
1122 #undef PARAVIRT_CALL
1136 #else /* __ASSEMBLY__ */
1138 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1140 #define PARA_SITE(ptype, clobbers, ops) \
1144 .pushsection .parainstructions,"a"; \
1151 #define INTERRUPT_RETURN \
1152 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1153 jmp *%cs:pv_cpu_ops+PV_CPU_iret)
1155 #define DISABLE_INTERRUPTS(clobbers) \
1156 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1157 pushl %eax; pushl %ecx; pushl %edx; \
1158 call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \
1159 popl %edx; popl %ecx; popl %eax) \
1161 #define ENABLE_INTERRUPTS(clobbers) \
1162 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1163 pushl %eax; pushl %ecx; pushl %edx; \
1164 call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \
1165 popl %edx; popl %ecx; popl %eax)
1167 #define ENABLE_INTERRUPTS_SYSCALL_RET \
1168 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\
1170 jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret)
1172 #define GET_CR0_INTO_EAX \
1173 push %ecx; push %edx; \
1174 call *pv_cpu_ops+PV_CPU_read_cr0; \
1177 #endif /* __ASSEMBLY__ */
1178 #endif /* CONFIG_PARAVIRT */
1179 #endif /* __ASM_PARAVIRT_H */