1 #ifndef _ASM_X86_PARAVIRT_TYPES_H
2 #define _ASM_X86_PARAVIRT_TYPES_H
4 /* Bitmask of what can be clobbered: usually at least eax. */
6 #define CLBR_EAX (1 << 0)
7 #define CLBR_ECX (1 << 1)
8 #define CLBR_EDX (1 << 2)
9 #define CLBR_EDI (1 << 3)
12 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
13 #define CLBR_ANY ((1 << 4) - 1)
15 #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
16 #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
17 #define CLBR_SCRATCH (0)
19 #define CLBR_RAX CLBR_EAX
20 #define CLBR_RCX CLBR_ECX
21 #define CLBR_RDX CLBR_EDX
22 #define CLBR_RDI CLBR_EDI
23 #define CLBR_RSI (1 << 4)
24 #define CLBR_R8 (1 << 5)
25 #define CLBR_R9 (1 << 6)
26 #define CLBR_R10 (1 << 7)
27 #define CLBR_R11 (1 << 8)
29 #define CLBR_ANY ((1 << 9) - 1)
31 #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
32 CLBR_RCX | CLBR_R8 | CLBR_R9)
33 #define CLBR_RET_REG (CLBR_RAX)
34 #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
38 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
42 #include <asm/desc_defs.h>
43 #include <asm/kmap_types.h>
55 * Wrapper type for pointers to code which uses the non-standard
56 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
58 struct paravirt_callee_save
{
64 unsigned int kernel_rpl
;
65 int shared_kernel_pmd
;
72 * Patch may replace one of the defined code sequences with
73 * arbitrary code, subject to the same register constraints.
74 * This generally means the code is not free to clobber any
75 * registers other than EAX. The patch function should return
76 * the number of bytes of code generated, as we nop pad the
77 * rest in generic code.
79 unsigned (*patch
)(u8 type
, u16 clobber
, void *insnbuf
,
80 unsigned long addr
, unsigned len
);
85 /* Set deferred update mode, used for batching operations. */
91 unsigned long long (*sched_clock
)(void);
92 unsigned long (*get_tsc_khz
)(void);
96 /* hooks for various privileged instructions */
97 unsigned long (*get_debugreg
)(int regno
);
98 void (*set_debugreg
)(int regno
, unsigned long value
);
102 unsigned long (*read_cr0
)(void);
103 void (*write_cr0
)(unsigned long);
105 unsigned long (*read_cr4_safe
)(void);
106 unsigned long (*read_cr4
)(void);
107 void (*write_cr4
)(unsigned long);
110 unsigned long (*read_cr8
)(void);
111 void (*write_cr8
)(unsigned long);
114 /* Segment descriptor handling */
115 void (*load_tr_desc
)(void);
116 void (*load_gdt
)(const struct desc_ptr
*);
117 void (*load_idt
)(const struct desc_ptr
*);
118 void (*store_gdt
)(struct desc_ptr
*);
119 void (*store_idt
)(struct desc_ptr
*);
120 void (*set_ldt
)(const void *desc
, unsigned entries
);
121 unsigned long (*store_tr
)(void);
122 void (*load_tls
)(struct thread_struct
*t
, unsigned int cpu
);
124 void (*load_gs_index
)(unsigned int idx
);
126 void (*write_ldt_entry
)(struct desc_struct
*ldt
, int entrynum
,
128 void (*write_gdt_entry
)(struct desc_struct
*,
129 int entrynum
, const void *desc
, int size
);
130 void (*write_idt_entry
)(gate_desc
*,
131 int entrynum
, const gate_desc
*gate
);
132 void (*alloc_ldt
)(struct desc_struct
*ldt
, unsigned entries
);
133 void (*free_ldt
)(struct desc_struct
*ldt
, unsigned entries
);
135 void (*load_sp0
)(struct tss_struct
*tss
, struct thread_struct
*t
);
137 void (*set_iopl_mask
)(unsigned mask
);
139 void (*wbinvd
)(void);
140 void (*io_delay
)(void);
142 /* cpuid emulation, mostly so that caps bits can be disabled */
143 void (*cpuid
)(unsigned int *eax
, unsigned int *ebx
,
144 unsigned int *ecx
, unsigned int *edx
);
146 /* MSR, PMC and TSR operations.
147 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
148 u64 (*read_msr
)(unsigned int msr
, int *err
);
149 int (*rdmsr_regs
)(u32
*regs
);
150 int (*write_msr
)(unsigned int msr
, unsigned low
, unsigned high
);
151 int (*wrmsr_regs
)(u32
*regs
);
153 u64 (*read_tsc
)(void);
154 u64 (*read_pmc
)(int counter
);
155 unsigned long long (*read_tscp
)(unsigned int *aux
);
158 * Atomically enable interrupts and return to userspace. This
159 * is only ever used to return to 32-bit processes; in a
160 * 64-bit kernel, it's used for 32-on-64 compat processes, but
161 * never native 64-bit processes. (Jump, not call.)
163 void (*irq_enable_sysexit
)(void);
166 * Switch to usermode gs and return to 64-bit usermode using
167 * sysret. Only used in 64-bit kernels to return to 64-bit
168 * processes. Usermode register state, including %rsp, must
169 * already be restored.
171 void (*usergs_sysret64
)(void);
174 * Switch to usermode gs and return to 32-bit usermode using
175 * sysret. Used to return to 32-on-64 compat processes.
176 * Other usermode register state, including %esp, must already
179 void (*usergs_sysret32
)(void);
181 /* Normal iret. Jump to this with the standard iret stack
185 void (*swapgs
)(void);
187 void (*start_context_switch
)(struct task_struct
*prev
);
188 void (*end_context_switch
)(struct task_struct
*next
);
193 * Get/set interrupt state. save_fl and restore_fl are only
194 * expected to use X86_EFLAGS_IF; all other bits
195 * returned from save_fl are undefined, and may be ignored by
198 * NOTE: These functions callers expect the callee to preserve
199 * more registers than the standard C calling convention.
201 struct paravirt_callee_save save_fl
;
202 struct paravirt_callee_save restore_fl
;
203 struct paravirt_callee_save irq_disable
;
204 struct paravirt_callee_save irq_enable
;
206 void (*safe_halt
)(void);
210 void (*adjust_exception_frame
)(void);
215 #ifdef CONFIG_X86_LOCAL_APIC
216 void (*startup_ipi_hook
)(int phys_apicid
,
217 unsigned long start_eip
,
218 unsigned long start_esp
);
223 unsigned long (*read_cr2
)(void);
224 void (*write_cr2
)(unsigned long);
226 unsigned long (*read_cr3
)(void);
227 void (*write_cr3
)(unsigned long);
230 * Hooks for intercepting the creation/use/destruction of an
233 void (*activate_mm
)(struct mm_struct
*prev
,
234 struct mm_struct
*next
);
235 void (*dup_mmap
)(struct mm_struct
*oldmm
,
236 struct mm_struct
*mm
);
237 void (*exit_mmap
)(struct mm_struct
*mm
);
241 void (*flush_tlb_user
)(void);
242 void (*flush_tlb_kernel
)(void);
243 void (*flush_tlb_single
)(unsigned long addr
);
244 void (*flush_tlb_others
)(const struct cpumask
*cpus
,
245 struct mm_struct
*mm
,
248 /* Hooks for allocating and freeing a pagetable top-level */
249 int (*pgd_alloc
)(struct mm_struct
*mm
);
250 void (*pgd_free
)(struct mm_struct
*mm
, pgd_t
*pgd
);
253 * Hooks for allocating/releasing pagetable pages when they're
254 * attached to a pagetable
256 void (*alloc_pte
)(struct mm_struct
*mm
, unsigned long pfn
);
257 void (*alloc_pmd
)(struct mm_struct
*mm
, unsigned long pfn
);
258 void (*alloc_pud
)(struct mm_struct
*mm
, unsigned long pfn
);
259 void (*release_pte
)(unsigned long pfn
);
260 void (*release_pmd
)(unsigned long pfn
);
261 void (*release_pud
)(unsigned long pfn
);
263 /* Pagetable manipulation functions */
264 void (*set_pte
)(pte_t
*ptep
, pte_t pteval
);
265 void (*set_pte_at
)(struct mm_struct
*mm
, unsigned long addr
,
266 pte_t
*ptep
, pte_t pteval
);
267 void (*set_pmd
)(pmd_t
*pmdp
, pmd_t pmdval
);
268 void (*set_pmd_at
)(struct mm_struct
*mm
, unsigned long addr
,
269 pmd_t
*pmdp
, pmd_t pmdval
);
270 void (*pte_update
)(struct mm_struct
*mm
, unsigned long addr
,
272 void (*pte_update_defer
)(struct mm_struct
*mm
,
273 unsigned long addr
, pte_t
*ptep
);
274 void (*pmd_update
)(struct mm_struct
*mm
, unsigned long addr
,
276 void (*pmd_update_defer
)(struct mm_struct
*mm
,
277 unsigned long addr
, pmd_t
*pmdp
);
279 pte_t (*ptep_modify_prot_start
)(struct mm_struct
*mm
, unsigned long addr
,
281 void (*ptep_modify_prot_commit
)(struct mm_struct
*mm
, unsigned long addr
,
282 pte_t
*ptep
, pte_t pte
);
284 struct paravirt_callee_save pte_val
;
285 struct paravirt_callee_save make_pte
;
287 struct paravirt_callee_save pgd_val
;
288 struct paravirt_callee_save make_pgd
;
290 #if PAGETABLE_LEVELS >= 3
291 #ifdef CONFIG_X86_PAE
292 void (*set_pte_atomic
)(pte_t
*ptep
, pte_t pteval
);
293 void (*pte_clear
)(struct mm_struct
*mm
, unsigned long addr
,
295 void (*pmd_clear
)(pmd_t
*pmdp
);
297 #endif /* CONFIG_X86_PAE */
299 void (*set_pud
)(pud_t
*pudp
, pud_t pudval
);
301 struct paravirt_callee_save pmd_val
;
302 struct paravirt_callee_save make_pmd
;
304 #if PAGETABLE_LEVELS == 4
305 struct paravirt_callee_save pud_val
;
306 struct paravirt_callee_save make_pud
;
308 void (*set_pgd
)(pgd_t
*pudp
, pgd_t pgdval
);
309 #endif /* PAGETABLE_LEVELS == 4 */
310 #endif /* PAGETABLE_LEVELS >= 3 */
312 struct pv_lazy_ops lazy_mode
;
316 /* Sometimes the physical address is a pfn, and sometimes its
317 an mfn. We can tell which is which from the index. */
318 void (*set_fixmap
)(unsigned /* enum fixed_addresses */ idx
,
319 phys_addr_t phys
, pgprot_t flags
);
322 struct arch_spinlock
;
324 int (*spin_is_locked
)(struct arch_spinlock
*lock
);
325 int (*spin_is_contended
)(struct arch_spinlock
*lock
);
326 void (*spin_lock
)(struct arch_spinlock
*lock
);
327 void (*spin_lock_flags
)(struct arch_spinlock
*lock
, unsigned long flags
);
328 int (*spin_trylock
)(struct arch_spinlock
*lock
);
329 void (*spin_unlock
)(struct arch_spinlock
*lock
);
332 /* This contains all the paravirt structures: we get a convenient
333 * number for each function using the offset which we use to indicate
335 struct paravirt_patch_template
{
336 struct pv_init_ops pv_init_ops
;
337 struct pv_time_ops pv_time_ops
;
338 struct pv_cpu_ops pv_cpu_ops
;
339 struct pv_irq_ops pv_irq_ops
;
340 struct pv_apic_ops pv_apic_ops
;
341 struct pv_mmu_ops pv_mmu_ops
;
342 struct pv_lock_ops pv_lock_ops
;
345 extern struct pv_info pv_info
;
346 extern struct pv_init_ops pv_init_ops
;
347 extern struct pv_time_ops pv_time_ops
;
348 extern struct pv_cpu_ops pv_cpu_ops
;
349 extern struct pv_irq_ops pv_irq_ops
;
350 extern struct pv_apic_ops pv_apic_ops
;
351 extern struct pv_mmu_ops pv_mmu_ops
;
352 extern struct pv_lock_ops pv_lock_ops
;
354 #define PARAVIRT_PATCH(x) \
355 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
357 #define paravirt_type(op) \
358 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
359 [paravirt_opptr] "i" (&(op))
360 #define paravirt_clobber(clobber) \
361 [paravirt_clobber] "i" (clobber)
364 * Generate some code, and mark it as patchable by the
365 * apply_paravirt() alternate instruction patcher.
367 #define _paravirt_alt(insn_string, type, clobber) \
368 "771:\n\t" insn_string "\n" "772:\n" \
369 ".pushsection .parainstructions,\"a\"\n" \
372 " .byte " type "\n" \
373 " .byte 772b-771b\n" \
374 " .short " clobber "\n" \
377 /* Generate patchable code, with the default asm parameters. */
378 #define paravirt_alt(insn_string) \
379 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
381 /* Simple instruction patching code. */
382 #define DEF_NATIVE(ops, name, code) \
383 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
384 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
386 unsigned paravirt_patch_nop(void);
387 unsigned paravirt_patch_ident_32(void *insnbuf
, unsigned len
);
388 unsigned paravirt_patch_ident_64(void *insnbuf
, unsigned len
);
389 unsigned paravirt_patch_ignore(unsigned len
);
390 unsigned paravirt_patch_call(void *insnbuf
,
391 const void *target
, u16 tgt_clobbers
,
392 unsigned long addr
, u16 site_clobbers
,
394 unsigned paravirt_patch_jmp(void *insnbuf
, const void *target
,
395 unsigned long addr
, unsigned len
);
396 unsigned paravirt_patch_default(u8 type
, u16 clobbers
, void *insnbuf
,
397 unsigned long addr
, unsigned len
);
399 unsigned paravirt_patch_insns(void *insnbuf
, unsigned len
,
400 const char *start
, const char *end
);
402 unsigned native_patch(u8 type
, u16 clobbers
, void *ibuf
,
403 unsigned long addr
, unsigned len
);
405 int paravirt_disable_iospace(void);
408 * This generates an indirect call based on the operation type number.
409 * The type number, computed in PARAVIRT_PATCH, is derived from the
410 * offset into the paravirt_patch_template structure, and can therefore be
411 * freely converted back into a structure offset.
413 #define PARAVIRT_CALL "call *%c[paravirt_opptr];"
416 * These macros are intended to wrap calls through one of the paravirt
417 * ops structs, so that they can be later identified and patched at
420 * Normally, a call to a pv_op function is a simple indirect call:
421 * (pv_op_struct.operations)(args...).
423 * Unfortunately, this is a relatively slow operation for modern CPUs,
424 * because it cannot necessarily determine what the destination
425 * address is. In this case, the address is a runtime constant, so at
426 * the very least we can patch the call to e a simple direct call, or
427 * ideally, patch an inline implementation into the callsite. (Direct
428 * calls are essentially free, because the call and return addresses
429 * are completely predictable.)
431 * For i386, these macros rely on the standard gcc "regparm(3)" calling
432 * convention, in which the first three arguments are placed in %eax,
433 * %edx, %ecx (in that order), and the remaining arguments are placed
434 * on the stack. All caller-save registers (eax,edx,ecx) are expected
435 * to be modified (either clobbered or used for return values).
436 * X86_64, on the other hand, already specifies a register-based calling
437 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
438 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
439 * special handling for dealing with 4 arguments, unlike i386.
440 * However, x86_64 also have to clobber all caller saved registers, which
441 * unfortunately, are quite a bit (r8 - r11)
443 * The call instruction itself is marked by placing its start address
444 * and size into the .parainstructions section, so that
445 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
446 * appropriate patching under the control of the backend pv_init_ops
449 * Unfortunately there's no way to get gcc to generate the args setup
450 * for the call, and then allow the call itself to be generated by an
451 * inline asm. Because of this, we must do the complete arg setup and
452 * return value handling from within these macros. This is fairly
455 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
456 * It could be extended to more arguments, but there would be little
457 * to be gained from that. For each number of arguments, there are
458 * the two VCALL and CALL variants for void and non-void functions.
460 * When there is a return value, the invoker of the macro must specify
461 * the return type. The macro then uses sizeof() on that type to
462 * determine whether its a 32 or 64 bit value, and places the return
463 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
464 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
465 * the return value size.
467 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
468 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
471 * Small structures are passed and returned in registers. The macro
472 * calling convention can't directly deal with this, so the wrapper
473 * functions must do this.
475 * These PVOP_* macros are only defined within this header. This
476 * means that all uses must be wrapped in inline functions. This also
477 * makes sure the incoming and outgoing types are always correct.
480 #define PVOP_VCALL_ARGS \
481 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
482 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
484 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
485 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
486 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
488 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
490 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
492 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
493 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
495 #define EXTRA_CLOBBERS
496 #define VEXTRA_CLOBBERS
497 #else /* CONFIG_X86_64 */
498 /* [re]ax isn't an arg, but the return val */
499 #define PVOP_VCALL_ARGS \
500 unsigned long __edi = __edi, __esi = __esi, \
501 __edx = __edx, __ecx = __ecx, __eax = __eax
502 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
504 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
505 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
506 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
507 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
509 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
510 "=S" (__esi), "=d" (__edx), \
512 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
514 /* void functions are still allowed [re]ax for scratch */
515 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
516 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
518 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
519 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
520 #endif /* CONFIG_X86_32 */
522 #ifdef CONFIG_PARAVIRT_DEBUG
523 #define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
525 #define PVOP_TEST_NULL(op) ((void)op)
528 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
533 PVOP_TEST_NULL(op); \
534 /* This is 32-bit specific, but is okay in 64-bit */ \
535 /* since this condition will never hold */ \
536 if (sizeof(rettype) > sizeof(unsigned long)) { \
538 paravirt_alt(PARAVIRT_CALL) \
541 : paravirt_type(op), \
542 paravirt_clobber(clbr), \
544 : "memory", "cc" extra_clbr); \
545 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
548 paravirt_alt(PARAVIRT_CALL) \
551 : paravirt_type(op), \
552 paravirt_clobber(clbr), \
554 : "memory", "cc" extra_clbr); \
555 __ret = (rettype)__eax; \
560 #define __PVOP_CALL(rettype, op, pre, post, ...) \
561 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
562 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
564 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
565 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
566 PVOP_CALLEE_CLOBBERS, , \
567 pre, post, ##__VA_ARGS__)
570 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
573 PVOP_TEST_NULL(op); \
575 paravirt_alt(PARAVIRT_CALL) \
578 : paravirt_type(op), \
579 paravirt_clobber(clbr), \
581 : "memory", "cc" extra_clbr); \
584 #define __PVOP_VCALL(op, pre, post, ...) \
585 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
587 pre, post, ##__VA_ARGS__)
589 #define __PVOP_VCALLEESAVE(op, pre, post, ...) \
590 ____PVOP_VCALL(op.func, CLBR_RET_REG, \
591 PVOP_VCALLEE_CLOBBERS, , \
592 pre, post, ##__VA_ARGS__)
596 #define PVOP_CALL0(rettype, op) \
597 __PVOP_CALL(rettype, op, "", "")
598 #define PVOP_VCALL0(op) \
599 __PVOP_VCALL(op, "", "")
601 #define PVOP_CALLEE0(rettype, op) \
602 __PVOP_CALLEESAVE(rettype, op, "", "")
603 #define PVOP_VCALLEE0(op) \
604 __PVOP_VCALLEESAVE(op, "", "")
607 #define PVOP_CALL1(rettype, op, arg1) \
608 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
609 #define PVOP_VCALL1(op, arg1) \
610 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
612 #define PVOP_CALLEE1(rettype, op, arg1) \
613 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
614 #define PVOP_VCALLEE1(op, arg1) \
615 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
618 #define PVOP_CALL2(rettype, op, arg1, arg2) \
619 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
620 PVOP_CALL_ARG2(arg2))
621 #define PVOP_VCALL2(op, arg1, arg2) \
622 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
623 PVOP_CALL_ARG2(arg2))
625 #define PVOP_CALLEE2(rettype, op, arg1, arg2) \
626 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
627 PVOP_CALL_ARG2(arg2))
628 #define PVOP_VCALLEE2(op, arg1, arg2) \
629 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
630 PVOP_CALL_ARG2(arg2))
633 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
634 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
635 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
636 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
637 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
638 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
640 /* This is the only difference in x86_64. We can make it much simpler */
642 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
643 __PVOP_CALL(rettype, op, \
644 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
645 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
646 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
647 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
649 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
650 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
651 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
653 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
654 __PVOP_CALL(rettype, op, "", "", \
655 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
656 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
657 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
658 __PVOP_VCALL(op, "", "", \
659 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
660 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
663 /* Lazy mode for batching updates / context switch */
664 enum paravirt_lazy_mode
{
670 enum paravirt_lazy_mode
paravirt_get_lazy_mode(void);
671 void paravirt_start_context_switch(struct task_struct
*prev
);
672 void paravirt_end_context_switch(struct task_struct
*next
);
674 void paravirt_enter_lazy_mmu(void);
675 void paravirt_leave_lazy_mmu(void);
677 void _paravirt_nop(void);
678 u32
_paravirt_ident_32(u32
);
679 u64
_paravirt_ident_64(u64
);
681 #define paravirt_nop ((void *)_paravirt_nop)
683 /* These all sit in the .parainstructions section to tell us what to patch. */
684 struct paravirt_patch_site
{
685 u8
*instr
; /* original instructions */
686 u8 instrtype
; /* type of this instruction */
687 u8 len
; /* length of original instruction */
688 u16 clobbers
; /* what registers you may clobber */
691 extern struct paravirt_patch_site __parainstructions
[],
692 __parainstructions_end
[];
694 #endif /* __ASSEMBLY__ */
696 #endif /* _ASM_X86_PARAVIRT_TYPES_H */