1 #ifndef _ASM_X86_PARAVIRT_TYPES_H
2 #define _ASM_X86_PARAVIRT_TYPES_H
4 /* Bitmask of what can be clobbered: usually at least eax. */
6 #define CLBR_EAX (1 << 0)
7 #define CLBR_ECX (1 << 1)
8 #define CLBR_EDX (1 << 2)
9 #define CLBR_EDI (1 << 3)
12 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
13 #define CLBR_ANY ((1 << 4) - 1)
15 #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
16 #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
17 #define CLBR_SCRATCH (0)
19 #define CLBR_RAX CLBR_EAX
20 #define CLBR_RCX CLBR_ECX
21 #define CLBR_RDX CLBR_EDX
22 #define CLBR_RDI CLBR_EDI
23 #define CLBR_RSI (1 << 4)
24 #define CLBR_R8 (1 << 5)
25 #define CLBR_R9 (1 << 6)
26 #define CLBR_R10 (1 << 7)
27 #define CLBR_R11 (1 << 8)
29 #define CLBR_ANY ((1 << 9) - 1)
31 #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
32 CLBR_RCX | CLBR_R8 | CLBR_R9)
33 #define CLBR_RET_REG (CLBR_RAX)
34 #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
38 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
42 #include <asm/desc_defs.h>
43 #include <asm/kmap_types.h>
55 * Wrapper type for pointers to code which uses the non-standard
56 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
58 struct paravirt_callee_save
{
64 unsigned int kernel_rpl
;
65 int shared_kernel_pmd
;
72 * Patch may replace one of the defined code sequences with
73 * arbitrary code, subject to the same register constraints.
74 * This generally means the code is not free to clobber any
75 * registers other than EAX. The patch function should return
76 * the number of bytes of code generated, as we nop pad the
77 * rest in generic code.
79 unsigned (*patch
)(u8 type
, u16 clobber
, void *insnbuf
,
80 unsigned long addr
, unsigned len
);
85 /* Set deferred update mode, used for batching operations. */
91 unsigned long long (*sched_clock
)(void);
92 unsigned long long (*steal_clock
)(int cpu
);
93 unsigned long (*get_tsc_khz
)(void);
97 /* hooks for various privileged instructions */
98 unsigned long (*get_debugreg
)(int regno
);
99 void (*set_debugreg
)(int regno
, unsigned long value
);
103 unsigned long (*read_cr0
)(void);
104 void (*write_cr0
)(unsigned long);
106 unsigned long (*read_cr4_safe
)(void);
107 unsigned long (*read_cr4
)(void);
108 void (*write_cr4
)(unsigned long);
111 unsigned long (*read_cr8
)(void);
112 void (*write_cr8
)(unsigned long);
115 /* Segment descriptor handling */
116 void (*load_tr_desc
)(void);
117 void (*load_gdt
)(const struct desc_ptr
*);
118 void (*load_idt
)(const struct desc_ptr
*);
119 void (*store_gdt
)(struct desc_ptr
*);
120 void (*store_idt
)(struct desc_ptr
*);
121 void (*set_ldt
)(const void *desc
, unsigned entries
);
122 unsigned long (*store_tr
)(void);
123 void (*load_tls
)(struct thread_struct
*t
, unsigned int cpu
);
125 void (*load_gs_index
)(unsigned int idx
);
127 void (*write_ldt_entry
)(struct desc_struct
*ldt
, int entrynum
,
129 void (*write_gdt_entry
)(struct desc_struct
*,
130 int entrynum
, const void *desc
, int size
);
131 void (*write_idt_entry
)(gate_desc
*,
132 int entrynum
, const gate_desc
*gate
);
133 void (*alloc_ldt
)(struct desc_struct
*ldt
, unsigned entries
);
134 void (*free_ldt
)(struct desc_struct
*ldt
, unsigned entries
);
136 void (*load_sp0
)(struct tss_struct
*tss
, struct thread_struct
*t
);
138 void (*set_iopl_mask
)(unsigned mask
);
140 void (*wbinvd
)(void);
141 void (*io_delay
)(void);
143 /* cpuid emulation, mostly so that caps bits can be disabled */
144 void (*cpuid
)(unsigned int *eax
, unsigned int *ebx
,
145 unsigned int *ecx
, unsigned int *edx
);
147 /* MSR, PMC and TSR operations.
148 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
149 u64 (*read_msr
)(unsigned int msr
, int *err
);
150 int (*rdmsr_regs
)(u32
*regs
);
151 int (*write_msr
)(unsigned int msr
, unsigned low
, unsigned high
);
152 int (*wrmsr_regs
)(u32
*regs
);
154 u64 (*read_tsc
)(void);
155 u64 (*read_pmc
)(int counter
);
156 unsigned long long (*read_tscp
)(unsigned int *aux
);
159 * Atomically enable interrupts and return to userspace. This
160 * is only ever used to return to 32-bit processes; in a
161 * 64-bit kernel, it's used for 32-on-64 compat processes, but
162 * never native 64-bit processes. (Jump, not call.)
164 void (*irq_enable_sysexit
)(void);
167 * Switch to usermode gs and return to 64-bit usermode using
168 * sysret. Only used in 64-bit kernels to return to 64-bit
169 * processes. Usermode register state, including %rsp, must
170 * already be restored.
172 void (*usergs_sysret64
)(void);
175 * Switch to usermode gs and return to 32-bit usermode using
176 * sysret. Used to return to 32-on-64 compat processes.
177 * Other usermode register state, including %esp, must already
180 void (*usergs_sysret32
)(void);
182 /* Normal iret. Jump to this with the standard iret stack
186 void (*swapgs
)(void);
188 void (*start_context_switch
)(struct task_struct
*prev
);
189 void (*end_context_switch
)(struct task_struct
*next
);
194 * Get/set interrupt state. save_fl and restore_fl are only
195 * expected to use X86_EFLAGS_IF; all other bits
196 * returned from save_fl are undefined, and may be ignored by
199 * NOTE: These functions callers expect the callee to preserve
200 * more registers than the standard C calling convention.
202 struct paravirt_callee_save save_fl
;
203 struct paravirt_callee_save restore_fl
;
204 struct paravirt_callee_save irq_disable
;
205 struct paravirt_callee_save irq_enable
;
207 void (*safe_halt
)(void);
211 void (*adjust_exception_frame
)(void);
216 #ifdef CONFIG_X86_LOCAL_APIC
217 void (*startup_ipi_hook
)(int phys_apicid
,
218 unsigned long start_eip
,
219 unsigned long start_esp
);
224 unsigned long (*read_cr2
)(void);
225 void (*write_cr2
)(unsigned long);
227 unsigned long (*read_cr3
)(void);
228 void (*write_cr3
)(unsigned long);
231 * Hooks for intercepting the creation/use/destruction of an
234 void (*activate_mm
)(struct mm_struct
*prev
,
235 struct mm_struct
*next
);
236 void (*dup_mmap
)(struct mm_struct
*oldmm
,
237 struct mm_struct
*mm
);
238 void (*exit_mmap
)(struct mm_struct
*mm
);
242 void (*flush_tlb_user
)(void);
243 void (*flush_tlb_kernel
)(void);
244 void (*flush_tlb_single
)(unsigned long addr
);
245 void (*flush_tlb_others
)(const struct cpumask
*cpus
,
246 struct mm_struct
*mm
,
249 /* Hooks for allocating and freeing a pagetable top-level */
250 int (*pgd_alloc
)(struct mm_struct
*mm
);
251 void (*pgd_free
)(struct mm_struct
*mm
, pgd_t
*pgd
);
254 * Hooks for allocating/releasing pagetable pages when they're
255 * attached to a pagetable
257 void (*alloc_pte
)(struct mm_struct
*mm
, unsigned long pfn
);
258 void (*alloc_pmd
)(struct mm_struct
*mm
, unsigned long pfn
);
259 void (*alloc_pud
)(struct mm_struct
*mm
, unsigned long pfn
);
260 void (*release_pte
)(unsigned long pfn
);
261 void (*release_pmd
)(unsigned long pfn
);
262 void (*release_pud
)(unsigned long pfn
);
264 /* Pagetable manipulation functions */
265 void (*set_pte
)(pte_t
*ptep
, pte_t pteval
);
266 void (*set_pte_at
)(struct mm_struct
*mm
, unsigned long addr
,
267 pte_t
*ptep
, pte_t pteval
);
268 void (*set_pmd
)(pmd_t
*pmdp
, pmd_t pmdval
);
269 void (*set_pmd_at
)(struct mm_struct
*mm
, unsigned long addr
,
270 pmd_t
*pmdp
, pmd_t pmdval
);
271 void (*pte_update
)(struct mm_struct
*mm
, unsigned long addr
,
273 void (*pte_update_defer
)(struct mm_struct
*mm
,
274 unsigned long addr
, pte_t
*ptep
);
275 void (*pmd_update
)(struct mm_struct
*mm
, unsigned long addr
,
277 void (*pmd_update_defer
)(struct mm_struct
*mm
,
278 unsigned long addr
, pmd_t
*pmdp
);
280 pte_t (*ptep_modify_prot_start
)(struct mm_struct
*mm
, unsigned long addr
,
282 void (*ptep_modify_prot_commit
)(struct mm_struct
*mm
, unsigned long addr
,
283 pte_t
*ptep
, pte_t pte
);
285 struct paravirt_callee_save pte_val
;
286 struct paravirt_callee_save make_pte
;
288 struct paravirt_callee_save pgd_val
;
289 struct paravirt_callee_save make_pgd
;
291 #if PAGETABLE_LEVELS >= 3
292 #ifdef CONFIG_X86_PAE
293 void (*set_pte_atomic
)(pte_t
*ptep
, pte_t pteval
);
294 void (*pte_clear
)(struct mm_struct
*mm
, unsigned long addr
,
296 void (*pmd_clear
)(pmd_t
*pmdp
);
298 #endif /* CONFIG_X86_PAE */
300 void (*set_pud
)(pud_t
*pudp
, pud_t pudval
);
302 struct paravirt_callee_save pmd_val
;
303 struct paravirt_callee_save make_pmd
;
305 #if PAGETABLE_LEVELS == 4
306 struct paravirt_callee_save pud_val
;
307 struct paravirt_callee_save make_pud
;
309 void (*set_pgd
)(pgd_t
*pudp
, pgd_t pgdval
);
310 #endif /* PAGETABLE_LEVELS == 4 */
311 #endif /* PAGETABLE_LEVELS >= 3 */
313 struct pv_lazy_ops lazy_mode
;
317 /* Sometimes the physical address is a pfn, and sometimes its
318 an mfn. We can tell which is which from the index. */
319 void (*set_fixmap
)(unsigned /* enum fixed_addresses */ idx
,
320 phys_addr_t phys
, pgprot_t flags
);
323 struct arch_spinlock
;
325 int (*spin_is_locked
)(struct arch_spinlock
*lock
);
326 int (*spin_is_contended
)(struct arch_spinlock
*lock
);
327 void (*spin_lock
)(struct arch_spinlock
*lock
);
328 void (*spin_lock_flags
)(struct arch_spinlock
*lock
, unsigned long flags
);
329 int (*spin_trylock
)(struct arch_spinlock
*lock
);
330 void (*spin_unlock
)(struct arch_spinlock
*lock
);
333 /* This contains all the paravirt structures: we get a convenient
334 * number for each function using the offset which we use to indicate
336 struct paravirt_patch_template
{
337 struct pv_init_ops pv_init_ops
;
338 struct pv_time_ops pv_time_ops
;
339 struct pv_cpu_ops pv_cpu_ops
;
340 struct pv_irq_ops pv_irq_ops
;
341 struct pv_apic_ops pv_apic_ops
;
342 struct pv_mmu_ops pv_mmu_ops
;
343 struct pv_lock_ops pv_lock_ops
;
346 extern struct pv_info pv_info
;
347 extern struct pv_init_ops pv_init_ops
;
348 extern struct pv_time_ops pv_time_ops
;
349 extern struct pv_cpu_ops pv_cpu_ops
;
350 extern struct pv_irq_ops pv_irq_ops
;
351 extern struct pv_apic_ops pv_apic_ops
;
352 extern struct pv_mmu_ops pv_mmu_ops
;
353 extern struct pv_lock_ops pv_lock_ops
;
355 #define PARAVIRT_PATCH(x) \
356 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
358 #define paravirt_type(op) \
359 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
360 [paravirt_opptr] "i" (&(op))
361 #define paravirt_clobber(clobber) \
362 [paravirt_clobber] "i" (clobber)
365 * Generate some code, and mark it as patchable by the
366 * apply_paravirt() alternate instruction patcher.
368 #define _paravirt_alt(insn_string, type, clobber) \
369 "771:\n\t" insn_string "\n" "772:\n" \
370 ".pushsection .parainstructions,\"a\"\n" \
373 " .byte " type "\n" \
374 " .byte 772b-771b\n" \
375 " .short " clobber "\n" \
378 /* Generate patchable code, with the default asm parameters. */
379 #define paravirt_alt(insn_string) \
380 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
382 /* Simple instruction patching code. */
383 #define DEF_NATIVE(ops, name, code) \
384 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
385 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
387 unsigned paravirt_patch_nop(void);
388 unsigned paravirt_patch_ident_32(void *insnbuf
, unsigned len
);
389 unsigned paravirt_patch_ident_64(void *insnbuf
, unsigned len
);
390 unsigned paravirt_patch_ignore(unsigned len
);
391 unsigned paravirt_patch_call(void *insnbuf
,
392 const void *target
, u16 tgt_clobbers
,
393 unsigned long addr
, u16 site_clobbers
,
395 unsigned paravirt_patch_jmp(void *insnbuf
, const void *target
,
396 unsigned long addr
, unsigned len
);
397 unsigned paravirt_patch_default(u8 type
, u16 clobbers
, void *insnbuf
,
398 unsigned long addr
, unsigned len
);
400 unsigned paravirt_patch_insns(void *insnbuf
, unsigned len
,
401 const char *start
, const char *end
);
403 unsigned native_patch(u8 type
, u16 clobbers
, void *ibuf
,
404 unsigned long addr
, unsigned len
);
406 int paravirt_disable_iospace(void);
409 * This generates an indirect call based on the operation type number.
410 * The type number, computed in PARAVIRT_PATCH, is derived from the
411 * offset into the paravirt_patch_template structure, and can therefore be
412 * freely converted back into a structure offset.
414 #define PARAVIRT_CALL "call *%c[paravirt_opptr];"
417 * These macros are intended to wrap calls through one of the paravirt
418 * ops structs, so that they can be later identified and patched at
421 * Normally, a call to a pv_op function is a simple indirect call:
422 * (pv_op_struct.operations)(args...).
424 * Unfortunately, this is a relatively slow operation for modern CPUs,
425 * because it cannot necessarily determine what the destination
426 * address is. In this case, the address is a runtime constant, so at
427 * the very least we can patch the call to e a simple direct call, or
428 * ideally, patch an inline implementation into the callsite. (Direct
429 * calls are essentially free, because the call and return addresses
430 * are completely predictable.)
432 * For i386, these macros rely on the standard gcc "regparm(3)" calling
433 * convention, in which the first three arguments are placed in %eax,
434 * %edx, %ecx (in that order), and the remaining arguments are placed
435 * on the stack. All caller-save registers (eax,edx,ecx) are expected
436 * to be modified (either clobbered or used for return values).
437 * X86_64, on the other hand, already specifies a register-based calling
438 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
439 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
440 * special handling for dealing with 4 arguments, unlike i386.
441 * However, x86_64 also have to clobber all caller saved registers, which
442 * unfortunately, are quite a bit (r8 - r11)
444 * The call instruction itself is marked by placing its start address
445 * and size into the .parainstructions section, so that
446 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
447 * appropriate patching under the control of the backend pv_init_ops
450 * Unfortunately there's no way to get gcc to generate the args setup
451 * for the call, and then allow the call itself to be generated by an
452 * inline asm. Because of this, we must do the complete arg setup and
453 * return value handling from within these macros. This is fairly
456 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
457 * It could be extended to more arguments, but there would be little
458 * to be gained from that. For each number of arguments, there are
459 * the two VCALL and CALL variants for void and non-void functions.
461 * When there is a return value, the invoker of the macro must specify
462 * the return type. The macro then uses sizeof() on that type to
463 * determine whether its a 32 or 64 bit value, and places the return
464 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
465 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
466 * the return value size.
468 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
469 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
472 * Small structures are passed and returned in registers. The macro
473 * calling convention can't directly deal with this, so the wrapper
474 * functions must do this.
476 * These PVOP_* macros are only defined within this header. This
477 * means that all uses must be wrapped in inline functions. This also
478 * makes sure the incoming and outgoing types are always correct.
481 #define PVOP_VCALL_ARGS \
482 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
483 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
485 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
486 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
487 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
489 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
491 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
493 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
494 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
496 #define EXTRA_CLOBBERS
497 #define VEXTRA_CLOBBERS
498 #else /* CONFIG_X86_64 */
499 /* [re]ax isn't an arg, but the return val */
500 #define PVOP_VCALL_ARGS \
501 unsigned long __edi = __edi, __esi = __esi, \
502 __edx = __edx, __ecx = __ecx, __eax = __eax
503 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
505 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
506 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
507 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
508 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
510 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
511 "=S" (__esi), "=d" (__edx), \
513 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
515 /* void functions are still allowed [re]ax for scratch */
516 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
517 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
519 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
520 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
521 #endif /* CONFIG_X86_32 */
523 #ifdef CONFIG_PARAVIRT_DEBUG
524 #define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
526 #define PVOP_TEST_NULL(op) ((void)op)
529 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
534 PVOP_TEST_NULL(op); \
535 /* This is 32-bit specific, but is okay in 64-bit */ \
536 /* since this condition will never hold */ \
537 if (sizeof(rettype) > sizeof(unsigned long)) { \
539 paravirt_alt(PARAVIRT_CALL) \
542 : paravirt_type(op), \
543 paravirt_clobber(clbr), \
545 : "memory", "cc" extra_clbr); \
546 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
549 paravirt_alt(PARAVIRT_CALL) \
552 : paravirt_type(op), \
553 paravirt_clobber(clbr), \
555 : "memory", "cc" extra_clbr); \
556 __ret = (rettype)__eax; \
561 #define __PVOP_CALL(rettype, op, pre, post, ...) \
562 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
563 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
565 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
566 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
567 PVOP_CALLEE_CLOBBERS, , \
568 pre, post, ##__VA_ARGS__)
571 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
574 PVOP_TEST_NULL(op); \
576 paravirt_alt(PARAVIRT_CALL) \
579 : paravirt_type(op), \
580 paravirt_clobber(clbr), \
582 : "memory", "cc" extra_clbr); \
585 #define __PVOP_VCALL(op, pre, post, ...) \
586 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
588 pre, post, ##__VA_ARGS__)
590 #define __PVOP_VCALLEESAVE(op, pre, post, ...) \
591 ____PVOP_VCALL(op.func, CLBR_RET_REG, \
592 PVOP_VCALLEE_CLOBBERS, , \
593 pre, post, ##__VA_ARGS__)
597 #define PVOP_CALL0(rettype, op) \
598 __PVOP_CALL(rettype, op, "", "")
599 #define PVOP_VCALL0(op) \
600 __PVOP_VCALL(op, "", "")
602 #define PVOP_CALLEE0(rettype, op) \
603 __PVOP_CALLEESAVE(rettype, op, "", "")
604 #define PVOP_VCALLEE0(op) \
605 __PVOP_VCALLEESAVE(op, "", "")
608 #define PVOP_CALL1(rettype, op, arg1) \
609 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
610 #define PVOP_VCALL1(op, arg1) \
611 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
613 #define PVOP_CALLEE1(rettype, op, arg1) \
614 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
615 #define PVOP_VCALLEE1(op, arg1) \
616 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
619 #define PVOP_CALL2(rettype, op, arg1, arg2) \
620 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
621 PVOP_CALL_ARG2(arg2))
622 #define PVOP_VCALL2(op, arg1, arg2) \
623 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
624 PVOP_CALL_ARG2(arg2))
626 #define PVOP_CALLEE2(rettype, op, arg1, arg2) \
627 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
628 PVOP_CALL_ARG2(arg2))
629 #define PVOP_VCALLEE2(op, arg1, arg2) \
630 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
631 PVOP_CALL_ARG2(arg2))
634 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
635 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
636 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
637 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
638 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
639 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
641 /* This is the only difference in x86_64. We can make it much simpler */
643 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
644 __PVOP_CALL(rettype, op, \
645 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
646 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
647 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
648 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
650 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
651 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
652 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
654 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
655 __PVOP_CALL(rettype, op, "", "", \
656 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
657 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
658 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
659 __PVOP_VCALL(op, "", "", \
660 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
661 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
664 /* Lazy mode for batching updates / context switch */
665 enum paravirt_lazy_mode
{
671 enum paravirt_lazy_mode
paravirt_get_lazy_mode(void);
672 void paravirt_start_context_switch(struct task_struct
*prev
);
673 void paravirt_end_context_switch(struct task_struct
*next
);
675 void paravirt_enter_lazy_mmu(void);
676 void paravirt_leave_lazy_mmu(void);
678 void _paravirt_nop(void);
679 u32
_paravirt_ident_32(u32
);
680 u64
_paravirt_ident_64(u64
);
682 #define paravirt_nop ((void *)_paravirt_nop)
684 /* These all sit in the .parainstructions section to tell us what to patch. */
685 struct paravirt_patch_site
{
686 u8
*instr
; /* original instructions */
687 u8 instrtype
; /* type of this instruction */
688 u8 len
; /* length of original instruction */
689 u16 clobbers
; /* what registers you may clobber */
692 extern struct paravirt_patch_site __parainstructions
[],
693 __parainstructions_end
[];
695 #endif /* __ASSEMBLY__ */
697 #endif /* _ASM_X86_PARAVIRT_TYPES_H */