x86/paravirt: add spin_lock_flags lock op
[deliverable/linux.git] / include / asm-x86 / paravirt.h
CommitLineData
d3561b7f
RR
1#ifndef __ASM_PARAVIRT_H
2#define __ASM_PARAVIRT_H
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
b239fb25
JF
5
6#ifdef CONFIG_PARAVIRT
da181a8b 7#include <asm/page.h>
658be9d3 8#include <asm/asm.h>
d3561b7f 9
139ec7c4 10/* Bitmask of what can be clobbered: usually at least eax. */
21438f7c
GOC
11#define CLBR_NONE 0
12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2)
15
16#ifdef CONFIG_X86_64
17#define CLBR_RSI (1 << 3)
18#define CLBR_RDI (1 << 4)
19#define CLBR_R8 (1 << 5)
20#define CLBR_R9 (1 << 6)
21#define CLBR_R10 (1 << 7)
22#define CLBR_R11 (1 << 8)
23#define CLBR_ANY ((1 << 9) - 1)
24#include <asm/desc_defs.h>
25#else
26/* CLBR_ANY should match all regs platform has. For i386, that's just it */
27#define CLBR_ANY ((1 << 3) - 1)
28#endif /* X86_64 */
139ec7c4 29
d3561b7f 30#ifndef __ASSEMBLY__
3dc494e8 31#include <linux/types.h>
d4c10477 32#include <linux/cpumask.h>
ce6234b5 33#include <asm/kmap_types.h>
8d947344 34#include <asm/desc_defs.h>
3dc494e8 35
ce6234b5 36struct page;
d3561b7f 37struct thread_struct;
6b68f01b 38struct desc_ptr;
d3561b7f 39struct tss_struct;
da181a8b 40struct mm_struct;
90a0a06a 41struct desc_struct;
294688c0 42
93b1eab3
JF
43/* general info */
44struct pv_info {
d3561b7f 45 unsigned int kernel_rpl;
5311ab62 46 int shared_kernel_pmd;
93b1eab3 47 int paravirt_enabled;
d3561b7f 48 const char *name;
93b1eab3 49};
d3561b7f 50
93b1eab3 51struct pv_init_ops {
139ec7c4 52 /*
93b1eab3
JF
53 * Patch may replace one of the defined code sequences with
54 * arbitrary code, subject to the same register constraints.
55 * This generally means the code is not free to clobber any
56 * registers other than EAX. The patch function should return
57 * the number of bytes of code generated, as we nop pad the
58 * rest in generic code.
139ec7c4 59 */
ab144f5e
AK
60 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
61 unsigned long addr, unsigned len);
139ec7c4 62
294688c0 63 /* Basic arch-specific setup */
d3561b7f
RR
64 void (*arch_setup)(void);
65 char *(*memory_setup)(void);
6996d3b6
JF
66 void (*post_allocator_init)(void);
67
294688c0 68 /* Print a banner to identify the environment */
d3561b7f 69 void (*banner)(void);
93b1eab3
JF
70};
71
72
8965c1c0 73struct pv_lazy_ops {
93b1eab3 74 /* Set deferred update mode, used for batching operations. */
8965c1c0
JF
75 void (*enter)(void);
76 void (*leave)(void);
93b1eab3
JF
77};
78
79struct pv_time_ops {
80 void (*time_init)(void);
d3561b7f 81
294688c0 82 /* Set and set time of day */
d3561b7f
RR
83 unsigned long (*get_wallclock)(void);
84 int (*set_wallclock)(unsigned long);
d3561b7f 85
93b1eab3 86 unsigned long long (*sched_clock)(void);
e93ef949 87 unsigned long (*get_tsc_khz)(void);
93b1eab3 88};
d3561b7f 89
93b1eab3 90struct pv_cpu_ops {
294688c0 91 /* hooks for various privileged instructions */
1a1eecd1
AK
92 unsigned long (*get_debugreg)(int regno);
93 void (*set_debugreg)(int regno, unsigned long value);
d3561b7f 94
1a1eecd1 95 void (*clts)(void);
d3561b7f 96
1a1eecd1
AK
97 unsigned long (*read_cr0)(void);
98 void (*write_cr0)(unsigned long);
d3561b7f 99
1a1eecd1
AK
100 unsigned long (*read_cr4_safe)(void);
101 unsigned long (*read_cr4)(void);
102 void (*write_cr4)(unsigned long);
d3561b7f 103
4c9890c2
GOC
104#ifdef CONFIG_X86_64
105 unsigned long (*read_cr8)(void);
106 void (*write_cr8)(unsigned long);
107#endif
108
294688c0 109 /* Segment descriptor handling */
1a1eecd1 110 void (*load_tr_desc)(void);
6b68f01b
GOC
111 void (*load_gdt)(const struct desc_ptr *);
112 void (*load_idt)(const struct desc_ptr *);
113 void (*store_gdt)(struct desc_ptr *);
114 void (*store_idt)(struct desc_ptr *);
1a1eecd1
AK
115 void (*set_ldt)(const void *desc, unsigned entries);
116 unsigned long (*store_tr)(void);
117 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
9f9d489a
JF
118#ifdef CONFIG_X86_64
119 void (*load_gs_index)(unsigned int idx);
120#endif
75b8bb3e
GOC
121 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
122 const void *desc);
90a0a06a 123 void (*write_gdt_entry)(struct desc_struct *,
014b15be 124 int entrynum, const void *desc, int size);
8d947344
GOC
125 void (*write_idt_entry)(gate_desc *,
126 int entrynum, const gate_desc *gate);
38ffbe66
JF
127 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
128 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
129
faca6227 130 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
d3561b7f 131
1a1eecd1 132 void (*set_iopl_mask)(unsigned mask);
93b1eab3
JF
133
134 void (*wbinvd)(void);
1a1eecd1 135 void (*io_delay)(void);
d3561b7f 136
93b1eab3
JF
137 /* cpuid emulation, mostly so that caps bits can be disabled */
138 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
139 unsigned int *ecx, unsigned int *edx);
140
141 /* MSR, PMC and TSR operations.
142 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
143 u64 (*read_msr)(unsigned int msr, int *err);
c9dcda5c 144 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
93b1eab3
JF
145
146 u64 (*read_tsc)(void);
b8d1fae7 147 u64 (*read_pmc)(int counter);
e5aaac44 148 unsigned long long (*read_tscp)(unsigned int *aux);
93b1eab3 149
2be29982
JF
150 /*
151 * Atomically enable interrupts and return to userspace. This
152 * is only ever used to return to 32-bit processes; in a
153 * 64-bit kernel, it's used for 32-on-64 compat processes, but
154 * never native 64-bit processes. (Jump, not call.)
155 */
d75cd22f 156 void (*irq_enable_sysexit)(void);
2be29982
JF
157
158 /*
159 * Switch to usermode gs and return to 64-bit usermode using
160 * sysret. Only used in 64-bit kernels to return to 64-bit
161 * processes. Usermode register state, including %rsp, must
162 * already be restored.
163 */
164 void (*usergs_sysret64)(void);
165
166 /*
167 * Switch to usermode gs and return to 32-bit usermode using
168 * sysret. Used to return to 32-on-64 compat processes.
169 * Other usermode register state, including %esp, must already
170 * be restored.
171 */
172 void (*usergs_sysret32)(void);
173
174 /* Normal iret. Jump to this with the standard iret stack
175 frame set up. */
93b1eab3 176 void (*iret)(void);
8965c1c0 177
e801f864
GOC
178 void (*swapgs)(void);
179
8965c1c0 180 struct pv_lazy_ops lazy_mode;
93b1eab3
JF
181};
182
183struct pv_irq_ops {
184 void (*init_IRQ)(void);
185
294688c0 186 /*
93b1eab3
JF
187 * Get/set interrupt state. save_fl and restore_fl are only
188 * expected to use X86_EFLAGS_IF; all other bits
189 * returned from save_fl are undefined, and may be ignored by
190 * restore_fl.
294688c0 191 */
93b1eab3
JF
192 unsigned long (*save_fl)(void);
193 void (*restore_fl)(unsigned long);
194 void (*irq_disable)(void);
195 void (*irq_enable)(void);
196 void (*safe_halt)(void);
197 void (*halt)(void);
fab58420
JF
198
199#ifdef CONFIG_X86_64
200 void (*adjust_exception_frame)(void);
201#endif
93b1eab3 202};
d6dd61c8 203
93b1eab3 204struct pv_apic_ops {
13623d79 205#ifdef CONFIG_X86_LOCAL_APIC
294688c0
JF
206 /*
207 * Direct APIC operations, principally for VMI. Ideally
208 * these shouldn't be in this interface.
209 */
42e0a9aa 210 void (*apic_write)(unsigned long reg, u32 v);
42e0a9aa 211 u32 (*apic_read)(unsigned long reg);
bbab4f3b
ZA
212 void (*setup_boot_clock)(void);
213 void (*setup_secondary_clock)(void);
294688c0
JF
214
215 void (*startup_ipi_hook)(int phys_apicid,
216 unsigned long start_eip,
217 unsigned long start_esp);
13623d79 218#endif
93b1eab3
JF
219};
220
221struct pv_mmu_ops {
222 /*
223 * Called before/after init_mm pagetable setup. setup_start
224 * may reset %cr3, and may pre-install parts of the pagetable;
225 * pagetable setup is expected to preserve any existing
226 * mapping.
227 */
228 void (*pagetable_setup_start)(pgd_t *pgd_base);
229 void (*pagetable_setup_done)(pgd_t *pgd_base);
230
231 unsigned long (*read_cr2)(void);
232 void (*write_cr2)(unsigned long);
233
234 unsigned long (*read_cr3)(void);
235 void (*write_cr3)(unsigned long);
236
237 /*
238 * Hooks for intercepting the creation/use/destruction of an
239 * mm_struct.
240 */
241 void (*activate_mm)(struct mm_struct *prev,
242 struct mm_struct *next);
243 void (*dup_mmap)(struct mm_struct *oldmm,
244 struct mm_struct *mm);
245 void (*exit_mmap)(struct mm_struct *mm);
246
13623d79 247
294688c0 248 /* TLB operations */
1a1eecd1
AK
249 void (*flush_tlb_user)(void);
250 void (*flush_tlb_kernel)(void);
f8822f42 251 void (*flush_tlb_single)(unsigned long addr);
d4c10477
JF
252 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
253 unsigned long va);
1a1eecd1 254
eba0045f
JF
255 /* Hooks for allocating and freeing a pagetable top-level */
256 int (*pgd_alloc)(struct mm_struct *mm);
257 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
258
259 /*
260 * Hooks for allocating/releasing pagetable pages when they're
261 * attached to a pagetable
262 */
6944a9c8
JF
263 void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
264 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
265 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
2761fa09 266 void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
6944a9c8
JF
267 void (*release_pte)(u32 pfn);
268 void (*release_pmd)(u32 pfn);
2761fa09 269 void (*release_pud)(u32 pfn);
1a1eecd1 270
294688c0 271 /* Pagetable manipulation functions */
1a1eecd1 272 void (*set_pte)(pte_t *ptep, pte_t pteval);
294688c0
JF
273 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
274 pte_t *ptep, pte_t pteval);
1a1eecd1 275 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
49cd740b
JP
276 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
277 pte_t *ptep);
294688c0
JF
278 void (*pte_update_defer)(struct mm_struct *mm,
279 unsigned long addr, pte_t *ptep);
3dc494e8 280
08b882c6
JF
281 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
282 pte_t *ptep);
283 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
284 pte_t *ptep, pte_t pte);
285
5b8dd1e9 286 pteval_t (*pte_val)(pte_t);
a15af1c9 287 pteval_t (*pte_flags)(pte_t);
5b8dd1e9
JF
288 pte_t (*make_pte)(pteval_t pte);
289
290 pgdval_t (*pgd_val)(pgd_t);
291 pgd_t (*make_pgd)(pgdval_t pgd);
292
293#if PAGETABLE_LEVELS >= 3
da181a8b 294#ifdef CONFIG_X86_PAE
1a1eecd1 295 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
93b1eab3
JF
296 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
297 pte_t *ptep, pte_t pte);
49cd740b
JP
298 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
299 pte_t *ptep);
1a1eecd1 300 void (*pmd_clear)(pmd_t *pmdp);
3dc494e8 301
5b8dd1e9 302#endif /* CONFIG_X86_PAE */
3dc494e8 303
5b8dd1e9 304 void (*set_pud)(pud_t *pudp, pud_t pudval);
3dc494e8 305
5b8dd1e9
JF
306 pmdval_t (*pmd_val)(pmd_t);
307 pmd_t (*make_pmd)(pmdval_t pmd);
308
309#if PAGETABLE_LEVELS == 4
310 pudval_t (*pud_val)(pud_t);
311 pud_t (*make_pud)(pudval_t pud);
9042219c
EH
312
313 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
5b8dd1e9
JF
314#endif /* PAGETABLE_LEVELS == 4 */
315#endif /* PAGETABLE_LEVELS >= 3 */
da181a8b 316
93b1eab3
JF
317#ifdef CONFIG_HIGHPTE
318 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
319#endif
8965c1c0
JF
320
321 struct pv_lazy_ops lazy_mode;
aeaaa59c
JF
322
323 /* dom0 ops */
324
325 /* Sometimes the physical address is a pfn, and sometimes its
326 an mfn. We can tell which is which from the index. */
327 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
328 unsigned long phys, pgprot_t flags);
93b1eab3 329};
9226d125 330
74d4affd
JF
331struct raw_spinlock;
332struct pv_lock_ops {
333 int (*spin_is_locked)(struct raw_spinlock *lock);
334 int (*spin_is_contended)(struct raw_spinlock *lock);
335 void (*spin_lock)(struct raw_spinlock *lock);
63d3a75d 336 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
74d4affd
JF
337 int (*spin_trylock)(struct raw_spinlock *lock);
338 void (*spin_unlock)(struct raw_spinlock *lock);
339};
340
93b1eab3
JF
341/* This contains all the paravirt structures: we get a convenient
342 * number for each function using the offset which we use to indicate
343 * what to patch. */
49cd740b 344struct paravirt_patch_template {
93b1eab3 345 struct pv_init_ops pv_init_ops;
93b1eab3
JF
346 struct pv_time_ops pv_time_ops;
347 struct pv_cpu_ops pv_cpu_ops;
348 struct pv_irq_ops pv_irq_ops;
349 struct pv_apic_ops pv_apic_ops;
350 struct pv_mmu_ops pv_mmu_ops;
74d4affd 351 struct pv_lock_ops pv_lock_ops;
d3561b7f
RR
352};
353
93b1eab3
JF
354extern struct pv_info pv_info;
355extern struct pv_init_ops pv_init_ops;
93b1eab3
JF
356extern struct pv_time_ops pv_time_ops;
357extern struct pv_cpu_ops pv_cpu_ops;
358extern struct pv_irq_ops pv_irq_ops;
359extern struct pv_apic_ops pv_apic_ops;
360extern struct pv_mmu_ops pv_mmu_ops;
74d4affd 361extern struct pv_lock_ops pv_lock_ops;
d3561b7f 362
d5822035 363#define PARAVIRT_PATCH(x) \
93b1eab3 364 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
d5822035 365
93b1eab3
JF
366#define paravirt_type(op) \
367 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
368 [paravirt_opptr] "m" (op)
d5822035
JF
369#define paravirt_clobber(clobber) \
370 [paravirt_clobber] "i" (clobber)
371
294688c0
JF
372/*
373 * Generate some code, and mark it as patchable by the
374 * apply_paravirt() alternate instruction patcher.
375 */
d5822035
JF
376#define _paravirt_alt(insn_string, type, clobber) \
377 "771:\n\t" insn_string "\n" "772:\n" \
378 ".pushsection .parainstructions,\"a\"\n" \
658be9d3
GOC
379 _ASM_ALIGN "\n" \
380 _ASM_PTR " 771b\n" \
d5822035
JF
381 " .byte " type "\n" \
382 " .byte 772b-771b\n" \
383 " .short " clobber "\n" \
384 ".popsection\n"
385
294688c0 386/* Generate patchable code, with the default asm parameters. */
f8822f42 387#define paravirt_alt(insn_string) \
d5822035
JF
388 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
389
2f485ef5
GOC
390/* Simple instruction patching code. */
391#define DEF_NATIVE(ops, name, code) \
392 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
393 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
394
63f70270
JF
395unsigned paravirt_patch_nop(void);
396unsigned paravirt_patch_ignore(unsigned len);
ab144f5e
AK
397unsigned paravirt_patch_call(void *insnbuf,
398 const void *target, u16 tgt_clobbers,
399 unsigned long addr, u16 site_clobbers,
63f70270 400 unsigned len);
93b1eab3 401unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
ab144f5e
AK
402 unsigned long addr, unsigned len);
403unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
404 unsigned long addr, unsigned len);
63f70270 405
ab144f5e 406unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
63f70270
JF
407 const char *start, const char *end);
408
2f485ef5
GOC
409unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
410 unsigned long addr, unsigned len);
411
d572929c 412int paravirt_disable_iospace(void);
63f70270 413
294688c0
JF
414/*
415 * This generates an indirect call based on the operation type number.
416 * The type number, computed in PARAVIRT_PATCH, is derived from the
93b1eab3
JF
417 * offset into the paravirt_patch_template structure, and can therefore be
418 * freely converted back into a structure offset.
294688c0 419 */
93b1eab3 420#define PARAVIRT_CALL "call *%[paravirt_opptr];"
294688c0
JF
421
422/*
93b1eab3
JF
423 * These macros are intended to wrap calls through one of the paravirt
424 * ops structs, so that they can be later identified and patched at
294688c0
JF
425 * runtime.
426 *
427 * Normally, a call to a pv_op function is a simple indirect call:
a4746364 428 * (pv_op_struct.operations)(args...).
294688c0
JF
429 *
430 * Unfortunately, this is a relatively slow operation for modern CPUs,
431 * because it cannot necessarily determine what the destination
432 * address is. In this case, the address is a runtime constant, so at
433 * the very least we can patch the call to e a simple direct call, or
434 * ideally, patch an inline implementation into the callsite. (Direct
435 * calls are essentially free, because the call and return addresses
436 * are completely predictable.)
437 *
a4746364 438 * For i386, these macros rely on the standard gcc "regparm(3)" calling
294688c0
JF
439 * convention, in which the first three arguments are placed in %eax,
440 * %edx, %ecx (in that order), and the remaining arguments are placed
441 * on the stack. All caller-save registers (eax,edx,ecx) are expected
442 * to be modified (either clobbered or used for return values).
a4746364
GOC
443 * X86_64, on the other hand, already specifies a register-based calling
444 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
445 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
446 * special handling for dealing with 4 arguments, unlike i386.
447 * However, x86_64 also have to clobber all caller saved registers, which
448 * unfortunately, are quite a bit (r8 - r11)
294688c0
JF
449 *
450 * The call instruction itself is marked by placing its start address
451 * and size into the .parainstructions section, so that
452 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
93b1eab3 453 * appropriate patching under the control of the backend pv_init_ops
294688c0
JF
454 * implementation.
455 *
456 * Unfortunately there's no way to get gcc to generate the args setup
457 * for the call, and then allow the call itself to be generated by an
458 * inline asm. Because of this, we must do the complete arg setup and
459 * return value handling from within these macros. This is fairly
460 * cumbersome.
461 *
462 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
463 * It could be extended to more arguments, but there would be little
464 * to be gained from that. For each number of arguments, there are
465 * the two VCALL and CALL variants for void and non-void functions.
466 *
467 * When there is a return value, the invoker of the macro must specify
468 * the return type. The macro then uses sizeof() on that type to
469 * determine whether its a 32 or 64 bit value, and places the return
470 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
a4746364
GOC
471 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
472 * the return value size.
294688c0
JF
473 *
474 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
a4746364
GOC
475 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
476 * in low,high order
294688c0
JF
477 *
478 * Small structures are passed and returned in registers. The macro
479 * calling convention can't directly deal with this, so the wrapper
480 * functions must do this.
481 *
482 * These PVOP_* macros are only defined within this header. This
483 * means that all uses must be wrapped in inline functions. This also
484 * makes sure the incoming and outgoing types are always correct.
485 */
a4746364
GOC
486#ifdef CONFIG_X86_32
487#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
488#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
489#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
490 "=c" (__ecx)
491#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
492#define EXTRA_CLOBBERS
493#define VEXTRA_CLOBBERS
494#else
495#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
496#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
497#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
498 "=S" (__esi), "=d" (__edx), \
499 "=c" (__ecx)
500
501#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
502
503#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
504#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
505#endif
506
97349135
JF
507#ifdef CONFIG_PARAVIRT_DEBUG
508#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
509#else
510#define PVOP_TEST_NULL(op) ((void)op)
511#endif
512
1a45b7aa 513#define __PVOP_CALL(rettype, op, pre, post, ...) \
f8822f42 514 ({ \
1a45b7aa 515 rettype __ret; \
a4746364 516 PVOP_CALL_ARGS; \
97349135 517 PVOP_TEST_NULL(op); \
a4746364
GOC
518 /* This is 32-bit specific, but is okay in 64-bit */ \
519 /* since this condition will never hold */ \
1a45b7aa
JF
520 if (sizeof(rettype) > sizeof(unsigned long)) { \
521 asm volatile(pre \
522 paravirt_alt(PARAVIRT_CALL) \
523 post \
a4746364 524 : PVOP_CALL_CLOBBERS \
1a45b7aa
JF
525 : paravirt_type(op), \
526 paravirt_clobber(CLBR_ANY), \
527 ##__VA_ARGS__ \
a4746364 528 : "memory", "cc" EXTRA_CLOBBERS); \
1a45b7aa 529 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
f8822f42 530 } else { \
1a45b7aa 531 asm volatile(pre \
f8822f42 532 paravirt_alt(PARAVIRT_CALL) \
1a45b7aa 533 post \
a4746364 534 : PVOP_CALL_CLOBBERS \
1a45b7aa
JF
535 : paravirt_type(op), \
536 paravirt_clobber(CLBR_ANY), \
537 ##__VA_ARGS__ \
a4746364 538 : "memory", "cc" EXTRA_CLOBBERS); \
1a45b7aa 539 __ret = (rettype)__eax; \
f8822f42
JF
540 } \
541 __ret; \
542 })
1a45b7aa 543#define __PVOP_VCALL(op, pre, post, ...) \
f8822f42 544 ({ \
a4746364 545 PVOP_VCALL_ARGS; \
97349135 546 PVOP_TEST_NULL(op); \
1a45b7aa 547 asm volatile(pre \
f8822f42 548 paravirt_alt(PARAVIRT_CALL) \
1a45b7aa 549 post \
a4746364 550 : PVOP_VCALL_CLOBBERS \
1a45b7aa
JF
551 : paravirt_type(op), \
552 paravirt_clobber(CLBR_ANY), \
553 ##__VA_ARGS__ \
a4746364 554 : "memory", "cc" VEXTRA_CLOBBERS); \
f8822f42
JF
555 })
556
1a45b7aa
JF
557#define PVOP_CALL0(rettype, op) \
558 __PVOP_CALL(rettype, op, "", "")
559#define PVOP_VCALL0(op) \
560 __PVOP_VCALL(op, "", "")
561
562#define PVOP_CALL1(rettype, op, arg1) \
a4746364 563 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
1a45b7aa 564#define PVOP_VCALL1(op, arg1) \
a4746364 565 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
1a45b7aa
JF
566
567#define PVOP_CALL2(rettype, op, arg1, arg2) \
a4746364
GOC
568 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
569 "1" ((unsigned long)(arg2)))
1a45b7aa 570#define PVOP_VCALL2(op, arg1, arg2) \
a4746364
GOC
571 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
572 "1" ((unsigned long)(arg2)))
1a45b7aa
JF
573
574#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
a4746364
GOC
575 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
576 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
1a45b7aa 577#define PVOP_VCALL3(op, arg1, arg2, arg3) \
a4746364
GOC
578 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
579 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
1a45b7aa 580
a4746364
GOC
581/* This is the only difference in x86_64. We can make it much simpler */
582#ifdef CONFIG_X86_32
1a45b7aa
JF
583#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
584 __PVOP_CALL(rettype, op, \
585 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
586 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
587 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
588#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
589 __PVOP_VCALL(op, \
590 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
591 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
592 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
a4746364
GOC
593#else
594#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
595 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
596 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
597 "3"((unsigned long)(arg4)))
598#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
599 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
600 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
601 "3"((unsigned long)(arg4)))
602#endif
1a45b7aa 603
f8822f42
JF
604static inline int paravirt_enabled(void)
605{
93b1eab3 606 return pv_info.paravirt_enabled;
f8822f42 607}
d3561b7f 608
faca6227 609static inline void load_sp0(struct tss_struct *tss,
d3561b7f
RR
610 struct thread_struct *thread)
611{
faca6227 612 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
d3561b7f
RR
613}
614
93b1eab3 615#define ARCH_SETUP pv_init_ops.arch_setup();
d3561b7f
RR
616static inline unsigned long get_wallclock(void)
617{
93b1eab3 618 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
d3561b7f
RR
619}
620
621static inline int set_wallclock(unsigned long nowtime)
622{
93b1eab3 623 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
d3561b7f
RR
624}
625
e30fab3a 626static inline void (*choose_time_init(void))(void)
d3561b7f 627{
93b1eab3 628 return pv_time_ops.time_init;
d3561b7f
RR
629}
630
631/* The paravirtualized CPUID instruction. */
632static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
633 unsigned int *ecx, unsigned int *edx)
634{
93b1eab3 635 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
636}
637
638/*
639 * These special macros can be used to get or set a debugging register
640 */
f8822f42
JF
641static inline unsigned long paravirt_get_debugreg(int reg)
642{
93b1eab3 643 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
f8822f42
JF
644}
645#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
646static inline void set_debugreg(unsigned long val, int reg)
647{
93b1eab3 648 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
f8822f42 649}
d3561b7f 650
f8822f42
JF
651static inline void clts(void)
652{
93b1eab3 653 PVOP_VCALL0(pv_cpu_ops.clts);
f8822f42 654}
d3561b7f 655
f8822f42
JF
656static inline unsigned long read_cr0(void)
657{
93b1eab3 658 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
f8822f42 659}
d3561b7f 660
f8822f42
JF
661static inline void write_cr0(unsigned long x)
662{
93b1eab3 663 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
f8822f42
JF
664}
665
666static inline unsigned long read_cr2(void)
667{
93b1eab3 668 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
f8822f42
JF
669}
670
671static inline void write_cr2(unsigned long x)
672{
93b1eab3 673 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
f8822f42
JF
674}
675
676static inline unsigned long read_cr3(void)
677{
93b1eab3 678 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
f8822f42 679}
d3561b7f 680
f8822f42
JF
681static inline void write_cr3(unsigned long x)
682{
93b1eab3 683 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
f8822f42 684}
d3561b7f 685
f8822f42
JF
686static inline unsigned long read_cr4(void)
687{
93b1eab3 688 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
f8822f42
JF
689}
690static inline unsigned long read_cr4_safe(void)
691{
93b1eab3 692 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
f8822f42 693}
d3561b7f 694
f8822f42
JF
695static inline void write_cr4(unsigned long x)
696{
93b1eab3 697 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
f8822f42 698}
3dc494e8 699
94ea03cd 700#ifdef CONFIG_X86_64
4c9890c2
GOC
701static inline unsigned long read_cr8(void)
702{
703 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
704}
705
706static inline void write_cr8(unsigned long x)
707{
708 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
709}
94ea03cd 710#endif
4c9890c2 711
d3561b7f
RR
712static inline void raw_safe_halt(void)
713{
93b1eab3 714 PVOP_VCALL0(pv_irq_ops.safe_halt);
d3561b7f
RR
715}
716
717static inline void halt(void)
718{
93b1eab3 719 PVOP_VCALL0(pv_irq_ops.safe_halt);
f8822f42
JF
720}
721
722static inline void wbinvd(void)
723{
93b1eab3 724 PVOP_VCALL0(pv_cpu_ops.wbinvd);
d3561b7f 725}
d3561b7f 726
93b1eab3 727#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 728
f8822f42
JF
729static inline u64 paravirt_read_msr(unsigned msr, int *err)
730{
93b1eab3 731 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
f8822f42
JF
732}
733static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
734{
93b1eab3 735 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
f8822f42
JF
736}
737
90a0a06a 738/* These should all do BUG_ON(_err), but our headers are too tangled. */
49cd740b
JP
739#define rdmsr(msr, val1, val2) \
740do { \
f8822f42
JF
741 int _err; \
742 u64 _l = paravirt_read_msr(msr, &_err); \
743 val1 = (u32)_l; \
744 val2 = _l >> 32; \
49cd740b 745} while (0)
d3561b7f 746
49cd740b
JP
747#define wrmsr(msr, val1, val2) \
748do { \
f8822f42 749 paravirt_write_msr(msr, val1, val2); \
49cd740b 750} while (0)
d3561b7f 751
49cd740b
JP
752#define rdmsrl(msr, val) \
753do { \
f8822f42
JF
754 int _err; \
755 val = paravirt_read_msr(msr, &_err); \
49cd740b 756} while (0)
d3561b7f 757
49cd740b
JP
758#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
759#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
d3561b7f
RR
760
761/* rdmsr with exception handling */
49cd740b
JP
762#define rdmsr_safe(msr, a, b) \
763({ \
f8822f42
JF
764 int _err; \
765 u64 _l = paravirt_read_msr(msr, &_err); \
766 (*a) = (u32)_l; \
767 (*b) = _l >> 32; \
49cd740b
JP
768 _err; \
769})
d3561b7f 770
1de87bd4
AK
771static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
772{
773 int err;
774
775 *p = paravirt_read_msr(msr, &err);
776 return err;
777}
f8822f42
JF
778
779static inline u64 paravirt_read_tsc(void)
780{
93b1eab3 781 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
f8822f42 782}
d3561b7f 783
49cd740b
JP
784#define rdtscl(low) \
785do { \
f8822f42
JF
786 u64 _l = paravirt_read_tsc(); \
787 low = (int)_l; \
49cd740b 788} while (0)
d3561b7f 789
f8822f42 790#define rdtscll(val) (val = paravirt_read_tsc())
d3561b7f 791
688340ea
JF
792static inline unsigned long long paravirt_sched_clock(void)
793{
93b1eab3 794 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
688340ea 795}
e93ef949 796#define calibrate_tsc() (pv_time_ops.get_tsc_khz())
6cb9a835 797
f8822f42
JF
798static inline unsigned long long paravirt_read_pmc(int counter)
799{
93b1eab3 800 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
f8822f42 801}
d3561b7f 802
49cd740b
JP
803#define rdpmc(counter, low, high) \
804do { \
f8822f42
JF
805 u64 _l = paravirt_read_pmc(counter); \
806 low = (u32)_l; \
807 high = _l >> 32; \
49cd740b 808} while (0)
3dc494e8 809
e5aaac44
GOC
810static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
811{
812 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
813}
814
815#define rdtscp(low, high, aux) \
816do { \
817 int __aux; \
818 unsigned long __val = paravirt_rdtscp(&__aux); \
819 (low) = (u32)__val; \
820 (high) = (u32)(__val >> 32); \
821 (aux) = __aux; \
822} while (0)
823
824#define rdtscpll(val, aux) \
825do { \
826 unsigned long __aux; \
827 val = paravirt_rdtscp(&__aux); \
828 (aux) = __aux; \
829} while (0)
830
38ffbe66
JF
831static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
832{
833 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
834}
835
836static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
837{
838 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
839}
840
f8822f42
JF
841static inline void load_TR_desc(void)
842{
93b1eab3 843 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
f8822f42 844}
6b68f01b 845static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 846{
93b1eab3 847 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
f8822f42 848}
6b68f01b 849static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 850{
93b1eab3 851 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
f8822f42
JF
852}
853static inline void set_ldt(const void *addr, unsigned entries)
854{
93b1eab3 855 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
f8822f42 856}
6b68f01b 857static inline void store_gdt(struct desc_ptr *dtr)
f8822f42 858{
93b1eab3 859 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
f8822f42 860}
6b68f01b 861static inline void store_idt(struct desc_ptr *dtr)
f8822f42 862{
93b1eab3 863 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
f8822f42
JF
864}
865static inline unsigned long paravirt_store_tr(void)
866{
93b1eab3 867 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
f8822f42
JF
868}
869#define store_tr(tr) ((tr) = paravirt_store_tr())
870static inline void load_TLS(struct thread_struct *t, unsigned cpu)
871{
93b1eab3 872 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
f8822f42 873}
75b8bb3e 874
9f9d489a
JF
875#ifdef CONFIG_X86_64
876static inline void load_gs_index(unsigned int gs)
877{
878 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
879}
880#endif
881
75b8bb3e
GOC
882static inline void write_ldt_entry(struct desc_struct *dt, int entry,
883 const void *desc)
f8822f42 884{
75b8bb3e 885 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
f8822f42 886}
014b15be
GOC
887
888static inline void write_gdt_entry(struct desc_struct *dt, int entry,
889 void *desc, int type)
f8822f42 890{
014b15be 891 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
f8822f42 892}
014b15be 893
8d947344 894static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 895{
8d947344 896 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
f8822f42
JF
897}
898static inline void set_iopl_mask(unsigned mask)
899{
93b1eab3 900 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
f8822f42 901}
3dc494e8 902
d3561b7f 903/* The paravirtualized I/O functions */
49cd740b
JP
904static inline void slow_down_io(void)
905{
93b1eab3 906 pv_cpu_ops.io_delay();
d3561b7f 907#ifdef REALLY_SLOW_IO
93b1eab3
JF
908 pv_cpu_ops.io_delay();
909 pv_cpu_ops.io_delay();
910 pv_cpu_ops.io_delay();
d3561b7f
RR
911#endif
912}
913
13623d79
RR
914#ifdef CONFIG_X86_LOCAL_APIC
915/*
916 * Basic functions accessing APICs.
917 */
42e0a9aa 918static inline void apic_write(unsigned long reg, u32 v)
13623d79 919{
93b1eab3 920 PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
13623d79
RR
921}
922
42e0a9aa 923static inline u32 apic_read(unsigned long reg)
13623d79 924{
93b1eab3 925 return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
13623d79 926}
bbab4f3b
ZA
927
928static inline void setup_boot_clock(void)
929{
93b1eab3 930 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
bbab4f3b
ZA
931}
932
933static inline void setup_secondary_clock(void)
934{
93b1eab3 935 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
bbab4f3b 936}
13623d79
RR
937#endif
938
6996d3b6
JF
939static inline void paravirt_post_allocator_init(void)
940{
93b1eab3
JF
941 if (pv_init_ops.post_allocator_init)
942 (*pv_init_ops.post_allocator_init)();
6996d3b6
JF
943}
944
b239fb25
JF
945static inline void paravirt_pagetable_setup_start(pgd_t *base)
946{
93b1eab3 947 (*pv_mmu_ops.pagetable_setup_start)(base);
b239fb25
JF
948}
949
950static inline void paravirt_pagetable_setup_done(pgd_t *base)
951{
93b1eab3 952 (*pv_mmu_ops.pagetable_setup_done)(base);
b239fb25 953}
3dc494e8 954
ae5da273
ZA
955#ifdef CONFIG_SMP
956static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
957 unsigned long start_esp)
958{
93b1eab3
JF
959 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
960 phys_apicid, start_eip, start_esp);
ae5da273
ZA
961}
962#endif
13623d79 963
d6dd61c8
JF
964static inline void paravirt_activate_mm(struct mm_struct *prev,
965 struct mm_struct *next)
966{
93b1eab3 967 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
d6dd61c8
JF
968}
969
970static inline void arch_dup_mmap(struct mm_struct *oldmm,
971 struct mm_struct *mm)
972{
93b1eab3 973 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
d6dd61c8
JF
974}
975
976static inline void arch_exit_mmap(struct mm_struct *mm)
977{
93b1eab3 978 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
d6dd61c8
JF
979}
980
f8822f42
JF
981static inline void __flush_tlb(void)
982{
93b1eab3 983 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
f8822f42
JF
984}
985static inline void __flush_tlb_global(void)
986{
93b1eab3 987 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
f8822f42
JF
988}
989static inline void __flush_tlb_single(unsigned long addr)
990{
93b1eab3 991 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
f8822f42 992}
da181a8b 993
d4c10477
JF
994static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
995 unsigned long va)
996{
93b1eab3 997 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
d4c10477
JF
998}
999
eba0045f
JF
1000static inline int paravirt_pgd_alloc(struct mm_struct *mm)
1001{
1002 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
1003}
1004
1005static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1006{
1007 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1008}
1009
6944a9c8 1010static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
f8822f42 1011{
6944a9c8 1012 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
f8822f42 1013}
6944a9c8 1014static inline void paravirt_release_pte(unsigned pfn)
f8822f42 1015{
6944a9c8 1016 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
f8822f42 1017}
c119ecce 1018
6944a9c8 1019static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
f8822f42 1020{
6944a9c8 1021 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
f8822f42 1022}
c119ecce 1023
6944a9c8
JF
1024static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
1025 unsigned start, unsigned count)
f8822f42 1026{
6944a9c8 1027 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
f8822f42 1028}
6944a9c8 1029static inline void paravirt_release_pmd(unsigned pfn)
da181a8b 1030{
6944a9c8 1031 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
da181a8b
RR
1032}
1033
2761fa09
JF
1034static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
1035{
1036 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1037}
1038static inline void paravirt_release_pud(unsigned pfn)
1039{
1040 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1041}
1042
ce6234b5
JF
1043#ifdef CONFIG_HIGHPTE
1044static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1045{
1046 unsigned long ret;
93b1eab3 1047 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
ce6234b5
JF
1048 return (void *)ret;
1049}
1050#endif
1051
f8822f42
JF
1052static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1053 pte_t *ptep)
da181a8b 1054{
93b1eab3 1055 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
da181a8b
RR
1056}
1057
f8822f42
JF
1058static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1059 pte_t *ptep)
da181a8b 1060{
93b1eab3 1061 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
da181a8b
RR
1062}
1063
773221f4 1064static inline pte_t __pte(pteval_t val)
da181a8b 1065{
773221f4
JF
1066 pteval_t ret;
1067
1068 if (sizeof(pteval_t) > sizeof(long))
1069 ret = PVOP_CALL2(pteval_t,
1070 pv_mmu_ops.make_pte,
1071 val, (u64)val >> 32);
1072 else
1073 ret = PVOP_CALL1(pteval_t,
1074 pv_mmu_ops.make_pte,
1075 val);
1076
c8e5393a 1077 return (pte_t) { .pte = ret };
da181a8b
RR
1078}
1079
773221f4
JF
1080static inline pteval_t pte_val(pte_t pte)
1081{
1082 pteval_t ret;
1083
1084 if (sizeof(pteval_t) > sizeof(long))
1085 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
1086 pte.pte, (u64)pte.pte >> 32);
1087 else
1088 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1089 pte.pte);
1090
1091 return ret;
1092}
1093
a15af1c9
JF
1094static inline pteval_t pte_flags(pte_t pte)
1095{
1096 pteval_t ret;
1097
1098 if (sizeof(pteval_t) > sizeof(long))
1099 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1100 pte.pte, (u64)pte.pte >> 32);
1101 else
1102 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1103 pte.pte);
1104
c2e3277f 1105#ifdef CONFIG_PARAVIRT_DEBUG
59438c9f 1106 BUG_ON(ret & PTE_PFN_MASK);
c2e3277f 1107#endif
a15af1c9
JF
1108 return ret;
1109}
1110
ef38503e 1111static inline pgd_t __pgd(pgdval_t val)
da181a8b 1112{
ef38503e
JF
1113 pgdval_t ret;
1114
1115 if (sizeof(pgdval_t) > sizeof(long))
1116 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
1117 val, (u64)val >> 32);
1118 else
1119 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
1120 val);
1121
1122 return (pgd_t) { ret };
1123}
1124
1125static inline pgdval_t pgd_val(pgd_t pgd)
1126{
1127 pgdval_t ret;
1128
1129 if (sizeof(pgdval_t) > sizeof(long))
1130 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
1131 pgd.pgd, (u64)pgd.pgd >> 32);
1132 else
1133 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
1134 pgd.pgd);
1135
1136 return ret;
f8822f42
JF
1137}
1138
08b882c6
JF
1139#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1140static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1141 pte_t *ptep)
1142{
1143 pteval_t ret;
1144
1145 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1146 mm, addr, ptep);
1147
1148 return (pte_t) { .pte = ret };
1149}
1150
1151static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1152 pte_t *ptep, pte_t pte)
1153{
1154 if (sizeof(pteval_t) > sizeof(long))
1155 /* 5 arg words */
1156 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1157 else
1158 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1159 mm, addr, ptep, pte.pte);
1160}
1161
4eed80cd
JF
1162static inline void set_pte(pte_t *ptep, pte_t pte)
1163{
1164 if (sizeof(pteval_t) > sizeof(long))
1165 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1166 pte.pte, (u64)pte.pte >> 32);
1167 else
1168 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1169 pte.pte);
1170}
1171
1172static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1173 pte_t *ptep, pte_t pte)
1174{
1175 if (sizeof(pteval_t) > sizeof(long))
1176 /* 5 arg words */
1177 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1178 else
1179 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1180}
1181
60b3f626
JF
1182static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1183{
1184 pmdval_t val = native_pmd_val(pmd);
1185
1186 if (sizeof(pmdval_t) > sizeof(long))
1187 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1188 else
1189 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1190}
1191
1fe91514
GOC
1192#if PAGETABLE_LEVELS >= 3
1193static inline pmd_t __pmd(pmdval_t val)
1194{
1195 pmdval_t ret;
1196
1197 if (sizeof(pmdval_t) > sizeof(long))
1198 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
1199 val, (u64)val >> 32);
1200 else
1201 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
1202 val);
1203
1204 return (pmd_t) { ret };
1205}
1206
1207static inline pmdval_t pmd_val(pmd_t pmd)
1208{
1209 pmdval_t ret;
1210
1211 if (sizeof(pmdval_t) > sizeof(long))
1212 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
1213 pmd.pmd, (u64)pmd.pmd >> 32);
1214 else
1215 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
1216 pmd.pmd);
1217
1218 return ret;
1219}
1220
1221static inline void set_pud(pud_t *pudp, pud_t pud)
1222{
1223 pudval_t val = native_pud_val(pud);
1224
1225 if (sizeof(pudval_t) > sizeof(long))
1226 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1227 val, (u64)val >> 32);
1228 else
1229 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1230 val);
1231}
9042219c
EH
1232#if PAGETABLE_LEVELS == 4
1233static inline pud_t __pud(pudval_t val)
1234{
1235 pudval_t ret;
1236
1237 if (sizeof(pudval_t) > sizeof(long))
1238 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
1239 val, (u64)val >> 32);
1240 else
1241 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
1242 val);
1243
1244 return (pud_t) { ret };
1245}
1246
1247static inline pudval_t pud_val(pud_t pud)
1248{
1249 pudval_t ret;
1250
1251 if (sizeof(pudval_t) > sizeof(long))
1252 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
1253 pud.pud, (u64)pud.pud >> 32);
1254 else
1255 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
1256 pud.pud);
1257
1258 return ret;
1259}
1260
1261static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1262{
1263 pgdval_t val = native_pgd_val(pgd);
1264
1265 if (sizeof(pgdval_t) > sizeof(long))
1266 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1267 val, (u64)val >> 32);
1268 else
1269 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1270 val);
1271}
1272
1273static inline void pgd_clear(pgd_t *pgdp)
1274{
1275 set_pgd(pgdp, __pgd(0));
1276}
1277
1278static inline void pud_clear(pud_t *pudp)
1279{
1280 set_pud(pudp, __pud(0));
1281}
1282
1283#endif /* PAGETABLE_LEVELS == 4 */
1284
1fe91514
GOC
1285#endif /* PAGETABLE_LEVELS >= 3 */
1286
4eed80cd
JF
1287#ifdef CONFIG_X86_PAE
1288/* Special-case pte-setting operations for PAE, which can't update a
1289 64-bit pte atomically */
1290static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1291{
1292 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1293 pte.pte, pte.pte >> 32);
1294}
1295
1296static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1297 pte_t *ptep, pte_t pte)
1298{
1299 /* 5 arg words */
1300 pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1301}
1302
1303static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1304 pte_t *ptep)
1305{
1306 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1307}
60b3f626
JF
1308
1309static inline void pmd_clear(pmd_t *pmdp)
1310{
1311 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1312}
4eed80cd
JF
1313#else /* !CONFIG_X86_PAE */
1314static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1315{
1316 set_pte(ptep, pte);
1317}
1318
1319static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1320 pte_t *ptep, pte_t pte)
1321{
1322 set_pte(ptep, pte);
1323}
1324
1325static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1326 pte_t *ptep)
1327{
1328 set_pte_at(mm, addr, ptep, __pte(0));
1329}
60b3f626
JF
1330
1331static inline void pmd_clear(pmd_t *pmdp)
1332{
1333 set_pmd(pmdp, __pmd(0));
1334}
4eed80cd
JF
1335#endif /* CONFIG_X86_PAE */
1336
8965c1c0
JF
1337/* Lazy mode for batching updates / context switch */
1338enum paravirt_lazy_mode {
1339 PARAVIRT_LAZY_NONE,
1340 PARAVIRT_LAZY_MMU,
1341 PARAVIRT_LAZY_CPU,
1342};
1343
1344enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1345void paravirt_enter_lazy_cpu(void);
1346void paravirt_leave_lazy_cpu(void);
1347void paravirt_enter_lazy_mmu(void);
1348void paravirt_leave_lazy_mmu(void);
1349void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1350
9226d125 1351#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
f8822f42
JF
1352static inline void arch_enter_lazy_cpu_mode(void)
1353{
8965c1c0 1354 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
f8822f42
JF
1355}
1356
1357static inline void arch_leave_lazy_cpu_mode(void)
1358{
8965c1c0 1359 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
f8822f42
JF
1360}
1361
1362static inline void arch_flush_lazy_cpu_mode(void)
1363{
8965c1c0
JF
1364 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1365 arch_leave_lazy_cpu_mode();
1366 arch_enter_lazy_cpu_mode();
1367 }
f8822f42
JF
1368}
1369
9226d125
ZA
1370
1371#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
1372static inline void arch_enter_lazy_mmu_mode(void)
1373{
8965c1c0 1374 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
f8822f42
JF
1375}
1376
1377static inline void arch_leave_lazy_mmu_mode(void)
1378{
8965c1c0 1379 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
f8822f42
JF
1380}
1381
1382static inline void arch_flush_lazy_mmu_mode(void)
1383{
8965c1c0
JF
1384 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1385 arch_leave_lazy_mmu_mode();
1386 arch_enter_lazy_mmu_mode();
1387 }
f8822f42 1388}
9226d125 1389
aeaaa59c
JF
1390static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1391 unsigned long phys, pgprot_t flags)
1392{
1393 pv_mmu_ops.set_fixmap(idx, phys, flags);
1394}
1395
45876233
JF
1396void _paravirt_nop(void);
1397#define paravirt_nop ((void *)_paravirt_nop)
1398
8efcbab6
JF
1399void paravirt_use_bytelocks(void);
1400
4bb689ee
IM
1401#ifdef CONFIG_SMP
1402
74d4affd
JF
1403static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1404{
1405 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1406}
1407
1408static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1409{
1410 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1411}
1412
1413static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1414{
32172561 1415 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
74d4affd
JF
1416}
1417
63d3a75d
JF
1418static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1419 unsigned long flags)
1420{
1421 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1422}
1423
74d4affd
JF
1424static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1425{
1426 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1427}
1428
1429static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1430{
32172561 1431 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
74d4affd
JF
1432}
1433
4bb689ee
IM
1434#endif
1435
139ec7c4 1436/* These all sit in the .parainstructions section to tell us what to patch. */
98de032b 1437struct paravirt_patch_site {
139ec7c4
RR
1438 u8 *instr; /* original instructions */
1439 u8 instrtype; /* type of this instruction */
1440 u8 len; /* length of original instruction */
1441 u16 clobbers; /* what registers you may clobber */
1442};
1443
98de032b
JF
1444extern struct paravirt_patch_site __parainstructions[],
1445 __parainstructions_end[];
1446
2e47d3e6
GOC
1447#ifdef CONFIG_X86_32
1448#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
1449#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
1450#define PV_FLAGS_ARG "0"
1451#define PV_EXTRA_CLOBBERS
1452#define PV_VEXTRA_CLOBBERS
1453#else
1454/* We save some registers, but all of them, that's too much. We clobber all
1455 * caller saved registers but the argument parameter */
1456#define PV_SAVE_REGS "pushq %%rdi;"
1457#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
1458#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1459#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
1460#define PV_FLAGS_ARG "D"
1461#endif
1462
139ec7c4
RR
1463static inline unsigned long __raw_local_save_flags(void)
1464{
1465 unsigned long f;
1466
2e47d3e6 1467 asm volatile(paravirt_alt(PV_SAVE_REGS
d5822035 1468 PARAVIRT_CALL
2e47d3e6 1469 PV_RESTORE_REGS)
d5822035 1470 : "=a"(f)
93b1eab3 1471 : paravirt_type(pv_irq_ops.save_fl),
42c24fa2 1472 paravirt_clobber(CLBR_EAX)
2e47d3e6 1473 : "memory", "cc" PV_VEXTRA_CLOBBERS);
139ec7c4
RR
1474 return f;
1475}
1476
1477static inline void raw_local_irq_restore(unsigned long f)
1478{
2e47d3e6 1479 asm volatile(paravirt_alt(PV_SAVE_REGS
d5822035 1480 PARAVIRT_CALL
2e47d3e6 1481 PV_RESTORE_REGS)
d5822035 1482 : "=a"(f)
2e47d3e6 1483 : PV_FLAGS_ARG(f),
93b1eab3 1484 paravirt_type(pv_irq_ops.restore_fl),
d5822035 1485 paravirt_clobber(CLBR_EAX)
2e47d3e6 1486 : "memory", "cc" PV_EXTRA_CLOBBERS);
139ec7c4
RR
1487}
1488
1489static inline void raw_local_irq_disable(void)
1490{
2e47d3e6 1491 asm volatile(paravirt_alt(PV_SAVE_REGS
d5822035 1492 PARAVIRT_CALL
2e47d3e6 1493 PV_RESTORE_REGS)
d5822035 1494 :
93b1eab3 1495 : paravirt_type(pv_irq_ops.irq_disable),
d5822035 1496 paravirt_clobber(CLBR_EAX)
2e47d3e6 1497 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
139ec7c4
RR
1498}
1499
1500static inline void raw_local_irq_enable(void)
1501{
2e47d3e6 1502 asm volatile(paravirt_alt(PV_SAVE_REGS
d5822035 1503 PARAVIRT_CALL
2e47d3e6 1504 PV_RESTORE_REGS)
d5822035 1505 :
93b1eab3 1506 : paravirt_type(pv_irq_ops.irq_enable),
d5822035 1507 paravirt_clobber(CLBR_EAX)
2e47d3e6 1508 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
139ec7c4
RR
1509}
1510
1511static inline unsigned long __raw_local_irq_save(void)
1512{
1513 unsigned long f;
1514
d5822035
JF
1515 f = __raw_local_save_flags();
1516 raw_local_irq_disable();
139ec7c4
RR
1517 return f;
1518}
1519
74d4affd 1520
294688c0 1521/* Make sure as little as possible of this mess escapes. */
d5822035 1522#undef PARAVIRT_CALL
1a45b7aa
JF
1523#undef __PVOP_CALL
1524#undef __PVOP_VCALL
f8822f42
JF
1525#undef PVOP_VCALL0
1526#undef PVOP_CALL0
1527#undef PVOP_VCALL1
1528#undef PVOP_CALL1
1529#undef PVOP_VCALL2
1530#undef PVOP_CALL2
1531#undef PVOP_VCALL3
1532#undef PVOP_CALL3
1533#undef PVOP_VCALL4
1534#undef PVOP_CALL4
139ec7c4 1535
d3561b7f
RR
1536#else /* __ASSEMBLY__ */
1537
658be9d3 1538#define _PVSITE(ptype, clobbers, ops, word, algn) \
139ec7c4
RR
1539771:; \
1540 ops; \
1541772:; \
1542 .pushsection .parainstructions,"a"; \
658be9d3
GOC
1543 .align algn; \
1544 word 771b; \
139ec7c4
RR
1545 .byte ptype; \
1546 .byte 772b-771b; \
1547 .short clobbers; \
1548 .popsection
1549
658be9d3
GOC
1550
1551#ifdef CONFIG_X86_64
c24481e9
JF
1552#define PV_SAVE_REGS \
1553 push %rax; \
1554 push %rcx; \
1555 push %rdx; \
1556 push %rsi; \
1557 push %rdi; \
1558 push %r8; \
1559 push %r9; \
1560 push %r10; \
1561 push %r11
1562#define PV_RESTORE_REGS \
1563 pop %r11; \
1564 pop %r10; \
1565 pop %r9; \
1566 pop %r8; \
1567 pop %rdi; \
1568 pop %rsi; \
1569 pop %rdx; \
1570 pop %rcx; \
1571 pop %rax
6057fc82 1572#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
658be9d3 1573#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
491eccb7 1574#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 1575#else
6057fc82
GOC
1576#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1577#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1578#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
658be9d3 1579#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
491eccb7 1580#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
1581#endif
1582
93b1eab3
JF
1583#define INTERRUPT_RETURN \
1584 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
491eccb7 1585 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
d5822035
JF
1586
1587#define DISABLE_INTERRUPTS(clobbers) \
93b1eab3 1588 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
491eccb7
JF
1589 PV_SAVE_REGS; \
1590 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
6057fc82 1591 PV_RESTORE_REGS;) \
d5822035
JF
1592
1593#define ENABLE_INTERRUPTS(clobbers) \
93b1eab3 1594 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
491eccb7
JF
1595 PV_SAVE_REGS; \
1596 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
6057fc82 1597 PV_RESTORE_REGS;)
d5822035 1598
2be29982
JF
1599#define USERGS_SYSRET32 \
1600 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
6abcd98f 1601 CLBR_NONE, \
2be29982 1602 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
2e47d3e6 1603
6057fc82 1604#ifdef CONFIG_X86_32
491eccb7
JF
1605#define GET_CR0_INTO_EAX \
1606 push %ecx; push %edx; \
1607 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
42c24fa2 1608 pop %edx; pop %ecx
2be29982
JF
1609
1610#define ENABLE_INTERRUPTS_SYSEXIT \
1611 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1612 CLBR_NONE, \
1613 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1614
1615
1616#else /* !CONFIG_X86_32 */
a00394f8
JF
1617
1618/*
1619 * If swapgs is used while the userspace stack is still current,
1620 * there's no way to call a pvop. The PV replacement *must* be
1621 * inlined, or the swapgs instruction must be trapped and emulated.
1622 */
1623#define SWAPGS_UNSAFE_STACK \
1624 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1625 swapgs)
1626
e801f864
GOC
1627#define SWAPGS \
1628 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1629 PV_SAVE_REGS; \
491eccb7 1630 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
e801f864
GOC
1631 PV_RESTORE_REGS \
1632 )
1633
491eccb7
JF
1634#define GET_CR2_INTO_RCX \
1635 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1636 movq %rax, %rcx; \
4a8c4c4e
GOC
1637 xorq %rax, %rax;
1638
fab58420
JF
1639#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1640 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1641 CLBR_NONE, \
1642 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1643
2be29982
JF
1644#define USERGS_SYSRET64 \
1645 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
d75cd22f 1646 CLBR_NONE, \
2be29982
JF
1647 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1648
1649#define ENABLE_INTERRUPTS_SYSEXIT32 \
1650 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1651 CLBR_NONE, \
1652 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1653#endif /* CONFIG_X86_32 */
139ec7c4 1654
d3561b7f
RR
1655#endif /* __ASSEMBLY__ */
1656#endif /* CONFIG_PARAVIRT */
1657#endif /* __ASM_PARAVIRT_H */
This page took 0.449444 seconds and 5 git commands to generate.