Commit | Line | Data |
---|---|---|
d3561b7f RR |
1 | #ifndef __ASM_PARAVIRT_H |
2 | #define __ASM_PARAVIRT_H | |
3 | /* Various instructions on x86 need to be replaced for | |
4 | * para-virtualization: those hooks are defined here. */ | |
b239fb25 JF |
5 | |
6 | #ifdef CONFIG_PARAVIRT | |
da181a8b | 7 | #include <asm/page.h> |
658be9d3 | 8 | #include <asm/asm.h> |
d3561b7f | 9 | |
139ec7c4 | 10 | /* Bitmask of what can be clobbered: usually at least eax. */ |
21438f7c GOC |
11 | #define CLBR_NONE 0 |
12 | #define CLBR_EAX (1 << 0) | |
13 | #define CLBR_ECX (1 << 1) | |
14 | #define CLBR_EDX (1 << 2) | |
15 | ||
16 | #ifdef CONFIG_X86_64 | |
17 | #define CLBR_RSI (1 << 3) | |
18 | #define CLBR_RDI (1 << 4) | |
19 | #define CLBR_R8 (1 << 5) | |
20 | #define CLBR_R9 (1 << 6) | |
21 | #define CLBR_R10 (1 << 7) | |
22 | #define CLBR_R11 (1 << 8) | |
23 | #define CLBR_ANY ((1 << 9) - 1) | |
24 | #include <asm/desc_defs.h> | |
25 | #else | |
26 | /* CLBR_ANY should match all regs platform has. For i386, that's just it */ | |
27 | #define CLBR_ANY ((1 << 3) - 1) | |
28 | #endif /* X86_64 */ | |
139ec7c4 | 29 | |
d3561b7f | 30 | #ifndef __ASSEMBLY__ |
3dc494e8 | 31 | #include <linux/types.h> |
d4c10477 | 32 | #include <linux/cpumask.h> |
ce6234b5 | 33 | #include <asm/kmap_types.h> |
8d947344 | 34 | #include <asm/desc_defs.h> |
3dc494e8 | 35 | |
ce6234b5 | 36 | struct page; |
d3561b7f | 37 | struct thread_struct; |
6b68f01b | 38 | struct desc_ptr; |
d3561b7f | 39 | struct tss_struct; |
da181a8b | 40 | struct mm_struct; |
90a0a06a | 41 | struct desc_struct; |
294688c0 | 42 | |
93b1eab3 JF |
43 | /* general info */ |
44 | struct pv_info { | |
d3561b7f | 45 | unsigned int kernel_rpl; |
5311ab62 | 46 | int shared_kernel_pmd; |
93b1eab3 | 47 | int paravirt_enabled; |
d3561b7f | 48 | const char *name; |
93b1eab3 | 49 | }; |
d3561b7f | 50 | |
93b1eab3 | 51 | struct pv_init_ops { |
139ec7c4 | 52 | /* |
93b1eab3 JF |
53 | * Patch may replace one of the defined code sequences with |
54 | * arbitrary code, subject to the same register constraints. | |
55 | * This generally means the code is not free to clobber any | |
56 | * registers other than EAX. The patch function should return | |
57 | * the number of bytes of code generated, as we nop pad the | |
58 | * rest in generic code. | |
139ec7c4 | 59 | */ |
ab144f5e AK |
60 | unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, |
61 | unsigned long addr, unsigned len); | |
139ec7c4 | 62 | |
294688c0 | 63 | /* Basic arch-specific setup */ |
d3561b7f RR |
64 | void (*arch_setup)(void); |
65 | char *(*memory_setup)(void); | |
6996d3b6 JF |
66 | void (*post_allocator_init)(void); |
67 | ||
294688c0 | 68 | /* Print a banner to identify the environment */ |
d3561b7f | 69 | void (*banner)(void); |
93b1eab3 JF |
70 | }; |
71 | ||
72 | ||
8965c1c0 | 73 | struct pv_lazy_ops { |
93b1eab3 | 74 | /* Set deferred update mode, used for batching operations. */ |
8965c1c0 JF |
75 | void (*enter)(void); |
76 | void (*leave)(void); | |
93b1eab3 JF |
77 | }; |
78 | ||
79 | struct pv_time_ops { | |
80 | void (*time_init)(void); | |
d3561b7f | 81 | |
294688c0 | 82 | /* Set and set time of day */ |
d3561b7f RR |
83 | unsigned long (*get_wallclock)(void); |
84 | int (*set_wallclock)(unsigned long); | |
d3561b7f | 85 | |
93b1eab3 | 86 | unsigned long long (*sched_clock)(void); |
e93ef949 | 87 | unsigned long (*get_tsc_khz)(void); |
93b1eab3 | 88 | }; |
d3561b7f | 89 | |
93b1eab3 | 90 | struct pv_cpu_ops { |
294688c0 | 91 | /* hooks for various privileged instructions */ |
1a1eecd1 AK |
92 | unsigned long (*get_debugreg)(int regno); |
93 | void (*set_debugreg)(int regno, unsigned long value); | |
d3561b7f | 94 | |
1a1eecd1 | 95 | void (*clts)(void); |
d3561b7f | 96 | |
1a1eecd1 AK |
97 | unsigned long (*read_cr0)(void); |
98 | void (*write_cr0)(unsigned long); | |
d3561b7f | 99 | |
1a1eecd1 AK |
100 | unsigned long (*read_cr4_safe)(void); |
101 | unsigned long (*read_cr4)(void); | |
102 | void (*write_cr4)(unsigned long); | |
d3561b7f | 103 | |
4c9890c2 GOC |
104 | #ifdef CONFIG_X86_64 |
105 | unsigned long (*read_cr8)(void); | |
106 | void (*write_cr8)(unsigned long); | |
107 | #endif | |
108 | ||
294688c0 | 109 | /* Segment descriptor handling */ |
1a1eecd1 | 110 | void (*load_tr_desc)(void); |
6b68f01b GOC |
111 | void (*load_gdt)(const struct desc_ptr *); |
112 | void (*load_idt)(const struct desc_ptr *); | |
113 | void (*store_gdt)(struct desc_ptr *); | |
114 | void (*store_idt)(struct desc_ptr *); | |
1a1eecd1 AK |
115 | void (*set_ldt)(const void *desc, unsigned entries); |
116 | unsigned long (*store_tr)(void); | |
117 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); | |
9f9d489a JF |
118 | #ifdef CONFIG_X86_64 |
119 | void (*load_gs_index)(unsigned int idx); | |
120 | #endif | |
75b8bb3e GOC |
121 | void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, |
122 | const void *desc); | |
90a0a06a | 123 | void (*write_gdt_entry)(struct desc_struct *, |
014b15be | 124 | int entrynum, const void *desc, int size); |
8d947344 GOC |
125 | void (*write_idt_entry)(gate_desc *, |
126 | int entrynum, const gate_desc *gate); | |
faca6227 | 127 | void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); |
d3561b7f | 128 | |
1a1eecd1 | 129 | void (*set_iopl_mask)(unsigned mask); |
93b1eab3 JF |
130 | |
131 | void (*wbinvd)(void); | |
1a1eecd1 | 132 | void (*io_delay)(void); |
d3561b7f | 133 | |
93b1eab3 JF |
134 | /* cpuid emulation, mostly so that caps bits can be disabled */ |
135 | void (*cpuid)(unsigned int *eax, unsigned int *ebx, | |
136 | unsigned int *ecx, unsigned int *edx); | |
137 | ||
138 | /* MSR, PMC and TSR operations. | |
139 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | |
140 | u64 (*read_msr)(unsigned int msr, int *err); | |
c9dcda5c | 141 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); |
93b1eab3 JF |
142 | |
143 | u64 (*read_tsc)(void); | |
b8d1fae7 | 144 | u64 (*read_pmc)(int counter); |
e5aaac44 | 145 | unsigned long long (*read_tscp)(unsigned int *aux); |
93b1eab3 | 146 | |
2be29982 JF |
147 | /* |
148 | * Atomically enable interrupts and return to userspace. This | |
149 | * is only ever used to return to 32-bit processes; in a | |
150 | * 64-bit kernel, it's used for 32-on-64 compat processes, but | |
151 | * never native 64-bit processes. (Jump, not call.) | |
152 | */ | |
d75cd22f | 153 | void (*irq_enable_sysexit)(void); |
2be29982 JF |
154 | |
155 | /* | |
156 | * Switch to usermode gs and return to 64-bit usermode using | |
157 | * sysret. Only used in 64-bit kernels to return to 64-bit | |
158 | * processes. Usermode register state, including %rsp, must | |
159 | * already be restored. | |
160 | */ | |
161 | void (*usergs_sysret64)(void); | |
162 | ||
163 | /* | |
164 | * Switch to usermode gs and return to 32-bit usermode using | |
165 | * sysret. Used to return to 32-on-64 compat processes. | |
166 | * Other usermode register state, including %esp, must already | |
167 | * be restored. | |
168 | */ | |
169 | void (*usergs_sysret32)(void); | |
170 | ||
171 | /* Normal iret. Jump to this with the standard iret stack | |
172 | frame set up. */ | |
93b1eab3 | 173 | void (*iret)(void); |
8965c1c0 | 174 | |
e801f864 GOC |
175 | void (*swapgs)(void); |
176 | ||
8965c1c0 | 177 | struct pv_lazy_ops lazy_mode; |
93b1eab3 JF |
178 | }; |
179 | ||
180 | struct pv_irq_ops { | |
181 | void (*init_IRQ)(void); | |
182 | ||
294688c0 | 183 | /* |
93b1eab3 JF |
184 | * Get/set interrupt state. save_fl and restore_fl are only |
185 | * expected to use X86_EFLAGS_IF; all other bits | |
186 | * returned from save_fl are undefined, and may be ignored by | |
187 | * restore_fl. | |
294688c0 | 188 | */ |
93b1eab3 JF |
189 | unsigned long (*save_fl)(void); |
190 | void (*restore_fl)(unsigned long); | |
191 | void (*irq_disable)(void); | |
192 | void (*irq_enable)(void); | |
193 | void (*safe_halt)(void); | |
194 | void (*halt)(void); | |
fab58420 JF |
195 | |
196 | #ifdef CONFIG_X86_64 | |
197 | void (*adjust_exception_frame)(void); | |
198 | #endif | |
93b1eab3 | 199 | }; |
d6dd61c8 | 200 | |
93b1eab3 | 201 | struct pv_apic_ops { |
13623d79 | 202 | #ifdef CONFIG_X86_LOCAL_APIC |
294688c0 JF |
203 | /* |
204 | * Direct APIC operations, principally for VMI. Ideally | |
205 | * these shouldn't be in this interface. | |
206 | */ | |
42e0a9aa TG |
207 | void (*apic_write)(unsigned long reg, u32 v); |
208 | void (*apic_write_atomic)(unsigned long reg, u32 v); | |
209 | u32 (*apic_read)(unsigned long reg); | |
bbab4f3b ZA |
210 | void (*setup_boot_clock)(void); |
211 | void (*setup_secondary_clock)(void); | |
294688c0 JF |
212 | |
213 | void (*startup_ipi_hook)(int phys_apicid, | |
214 | unsigned long start_eip, | |
215 | unsigned long start_esp); | |
13623d79 | 216 | #endif |
93b1eab3 JF |
217 | }; |
218 | ||
219 | struct pv_mmu_ops { | |
220 | /* | |
221 | * Called before/after init_mm pagetable setup. setup_start | |
222 | * may reset %cr3, and may pre-install parts of the pagetable; | |
223 | * pagetable setup is expected to preserve any existing | |
224 | * mapping. | |
225 | */ | |
226 | void (*pagetable_setup_start)(pgd_t *pgd_base); | |
227 | void (*pagetable_setup_done)(pgd_t *pgd_base); | |
228 | ||
229 | unsigned long (*read_cr2)(void); | |
230 | void (*write_cr2)(unsigned long); | |
231 | ||
232 | unsigned long (*read_cr3)(void); | |
233 | void (*write_cr3)(unsigned long); | |
234 | ||
235 | /* | |
236 | * Hooks for intercepting the creation/use/destruction of an | |
237 | * mm_struct. | |
238 | */ | |
239 | void (*activate_mm)(struct mm_struct *prev, | |
240 | struct mm_struct *next); | |
241 | void (*dup_mmap)(struct mm_struct *oldmm, | |
242 | struct mm_struct *mm); | |
243 | void (*exit_mmap)(struct mm_struct *mm); | |
244 | ||
13623d79 | 245 | |
294688c0 | 246 | /* TLB operations */ |
1a1eecd1 AK |
247 | void (*flush_tlb_user)(void); |
248 | void (*flush_tlb_kernel)(void); | |
f8822f42 | 249 | void (*flush_tlb_single)(unsigned long addr); |
d4c10477 JF |
250 | void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, |
251 | unsigned long va); | |
1a1eecd1 | 252 | |
eba0045f JF |
253 | /* Hooks for allocating and freeing a pagetable top-level */ |
254 | int (*pgd_alloc)(struct mm_struct *mm); | |
255 | void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); | |
256 | ||
257 | /* | |
258 | * Hooks for allocating/releasing pagetable pages when they're | |
259 | * attached to a pagetable | |
260 | */ | |
6944a9c8 JF |
261 | void (*alloc_pte)(struct mm_struct *mm, u32 pfn); |
262 | void (*alloc_pmd)(struct mm_struct *mm, u32 pfn); | |
263 | void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); | |
2761fa09 | 264 | void (*alloc_pud)(struct mm_struct *mm, u32 pfn); |
6944a9c8 JF |
265 | void (*release_pte)(u32 pfn); |
266 | void (*release_pmd)(u32 pfn); | |
2761fa09 | 267 | void (*release_pud)(u32 pfn); |
1a1eecd1 | 268 | |
294688c0 | 269 | /* Pagetable manipulation functions */ |
1a1eecd1 | 270 | void (*set_pte)(pte_t *ptep, pte_t pteval); |
294688c0 JF |
271 | void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, |
272 | pte_t *ptep, pte_t pteval); | |
1a1eecd1 | 273 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); |
49cd740b JP |
274 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, |
275 | pte_t *ptep); | |
294688c0 JF |
276 | void (*pte_update_defer)(struct mm_struct *mm, |
277 | unsigned long addr, pte_t *ptep); | |
3dc494e8 | 278 | |
08b882c6 JF |
279 | pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, |
280 | pte_t *ptep); | |
281 | void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, | |
282 | pte_t *ptep, pte_t pte); | |
283 | ||
5b8dd1e9 | 284 | pteval_t (*pte_val)(pte_t); |
a15af1c9 | 285 | pteval_t (*pte_flags)(pte_t); |
5b8dd1e9 JF |
286 | pte_t (*make_pte)(pteval_t pte); |
287 | ||
288 | pgdval_t (*pgd_val)(pgd_t); | |
289 | pgd_t (*make_pgd)(pgdval_t pgd); | |
290 | ||
291 | #if PAGETABLE_LEVELS >= 3 | |
da181a8b | 292 | #ifdef CONFIG_X86_PAE |
1a1eecd1 | 293 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); |
93b1eab3 JF |
294 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, |
295 | pte_t *ptep, pte_t pte); | |
49cd740b JP |
296 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, |
297 | pte_t *ptep); | |
1a1eecd1 | 298 | void (*pmd_clear)(pmd_t *pmdp); |
3dc494e8 | 299 | |
5b8dd1e9 | 300 | #endif /* CONFIG_X86_PAE */ |
3dc494e8 | 301 | |
5b8dd1e9 | 302 | void (*set_pud)(pud_t *pudp, pud_t pudval); |
3dc494e8 | 303 | |
5b8dd1e9 JF |
304 | pmdval_t (*pmd_val)(pmd_t); |
305 | pmd_t (*make_pmd)(pmdval_t pmd); | |
306 | ||
307 | #if PAGETABLE_LEVELS == 4 | |
308 | pudval_t (*pud_val)(pud_t); | |
309 | pud_t (*make_pud)(pudval_t pud); | |
9042219c EH |
310 | |
311 | void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); | |
5b8dd1e9 JF |
312 | #endif /* PAGETABLE_LEVELS == 4 */ |
313 | #endif /* PAGETABLE_LEVELS >= 3 */ | |
da181a8b | 314 | |
93b1eab3 JF |
315 | #ifdef CONFIG_HIGHPTE |
316 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); | |
317 | #endif | |
8965c1c0 JF |
318 | |
319 | struct pv_lazy_ops lazy_mode; | |
aeaaa59c JF |
320 | |
321 | /* dom0 ops */ | |
322 | ||
323 | /* Sometimes the physical address is a pfn, and sometimes its | |
324 | an mfn. We can tell which is which from the index. */ | |
325 | void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, | |
326 | unsigned long phys, pgprot_t flags); | |
93b1eab3 | 327 | }; |
9226d125 | 328 | |
93b1eab3 JF |
329 | /* This contains all the paravirt structures: we get a convenient |
330 | * number for each function using the offset which we use to indicate | |
331 | * what to patch. */ | |
49cd740b | 332 | struct paravirt_patch_template { |
93b1eab3 | 333 | struct pv_init_ops pv_init_ops; |
93b1eab3 JF |
334 | struct pv_time_ops pv_time_ops; |
335 | struct pv_cpu_ops pv_cpu_ops; | |
336 | struct pv_irq_ops pv_irq_ops; | |
337 | struct pv_apic_ops pv_apic_ops; | |
338 | struct pv_mmu_ops pv_mmu_ops; | |
d3561b7f RR |
339 | }; |
340 | ||
93b1eab3 JF |
341 | extern struct pv_info pv_info; |
342 | extern struct pv_init_ops pv_init_ops; | |
93b1eab3 JF |
343 | extern struct pv_time_ops pv_time_ops; |
344 | extern struct pv_cpu_ops pv_cpu_ops; | |
345 | extern struct pv_irq_ops pv_irq_ops; | |
346 | extern struct pv_apic_ops pv_apic_ops; | |
347 | extern struct pv_mmu_ops pv_mmu_ops; | |
d3561b7f | 348 | |
d5822035 | 349 | #define PARAVIRT_PATCH(x) \ |
93b1eab3 | 350 | (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) |
d5822035 | 351 | |
93b1eab3 JF |
352 | #define paravirt_type(op) \ |
353 | [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ | |
354 | [paravirt_opptr] "m" (op) | |
d5822035 JF |
355 | #define paravirt_clobber(clobber) \ |
356 | [paravirt_clobber] "i" (clobber) | |
357 | ||
294688c0 JF |
358 | /* |
359 | * Generate some code, and mark it as patchable by the | |
360 | * apply_paravirt() alternate instruction patcher. | |
361 | */ | |
d5822035 JF |
362 | #define _paravirt_alt(insn_string, type, clobber) \ |
363 | "771:\n\t" insn_string "\n" "772:\n" \ | |
364 | ".pushsection .parainstructions,\"a\"\n" \ | |
658be9d3 GOC |
365 | _ASM_ALIGN "\n" \ |
366 | _ASM_PTR " 771b\n" \ | |
d5822035 JF |
367 | " .byte " type "\n" \ |
368 | " .byte 772b-771b\n" \ | |
369 | " .short " clobber "\n" \ | |
370 | ".popsection\n" | |
371 | ||
294688c0 | 372 | /* Generate patchable code, with the default asm parameters. */ |
f8822f42 | 373 | #define paravirt_alt(insn_string) \ |
d5822035 JF |
374 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") |
375 | ||
2f485ef5 GOC |
376 | /* Simple instruction patching code. */ |
377 | #define DEF_NATIVE(ops, name, code) \ | |
378 | extern const char start_##ops##_##name[], end_##ops##_##name[]; \ | |
379 | asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") | |
380 | ||
63f70270 JF |
381 | unsigned paravirt_patch_nop(void); |
382 | unsigned paravirt_patch_ignore(unsigned len); | |
ab144f5e AK |
383 | unsigned paravirt_patch_call(void *insnbuf, |
384 | const void *target, u16 tgt_clobbers, | |
385 | unsigned long addr, u16 site_clobbers, | |
63f70270 | 386 | unsigned len); |
93b1eab3 | 387 | unsigned paravirt_patch_jmp(void *insnbuf, const void *target, |
ab144f5e AK |
388 | unsigned long addr, unsigned len); |
389 | unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, | |
390 | unsigned long addr, unsigned len); | |
63f70270 | 391 | |
ab144f5e | 392 | unsigned paravirt_patch_insns(void *insnbuf, unsigned len, |
63f70270 JF |
393 | const char *start, const char *end); |
394 | ||
2f485ef5 GOC |
395 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, |
396 | unsigned long addr, unsigned len); | |
397 | ||
d572929c | 398 | int paravirt_disable_iospace(void); |
63f70270 | 399 | |
294688c0 JF |
400 | /* |
401 | * This generates an indirect call based on the operation type number. | |
402 | * The type number, computed in PARAVIRT_PATCH, is derived from the | |
93b1eab3 JF |
403 | * offset into the paravirt_patch_template structure, and can therefore be |
404 | * freely converted back into a structure offset. | |
294688c0 | 405 | */ |
93b1eab3 | 406 | #define PARAVIRT_CALL "call *%[paravirt_opptr];" |
294688c0 JF |
407 | |
408 | /* | |
93b1eab3 JF |
409 | * These macros are intended to wrap calls through one of the paravirt |
410 | * ops structs, so that they can be later identified and patched at | |
294688c0 JF |
411 | * runtime. |
412 | * | |
413 | * Normally, a call to a pv_op function is a simple indirect call: | |
a4746364 | 414 | * (pv_op_struct.operations)(args...). |
294688c0 JF |
415 | * |
416 | * Unfortunately, this is a relatively slow operation for modern CPUs, | |
417 | * because it cannot necessarily determine what the destination | |
418 | * address is. In this case, the address is a runtime constant, so at | |
419 | * the very least we can patch the call to e a simple direct call, or | |
420 | * ideally, patch an inline implementation into the callsite. (Direct | |
421 | * calls are essentially free, because the call and return addresses | |
422 | * are completely predictable.) | |
423 | * | |
a4746364 | 424 | * For i386, these macros rely on the standard gcc "regparm(3)" calling |
294688c0 JF |
425 | * convention, in which the first three arguments are placed in %eax, |
426 | * %edx, %ecx (in that order), and the remaining arguments are placed | |
427 | * on the stack. All caller-save registers (eax,edx,ecx) are expected | |
428 | * to be modified (either clobbered or used for return values). | |
a4746364 GOC |
429 | * X86_64, on the other hand, already specifies a register-based calling |
430 | * conventions, returning at %rax, with parameteres going on %rdi, %rsi, | |
431 | * %rdx, and %rcx. Note that for this reason, x86_64 does not need any | |
432 | * special handling for dealing with 4 arguments, unlike i386. | |
433 | * However, x86_64 also have to clobber all caller saved registers, which | |
434 | * unfortunately, are quite a bit (r8 - r11) | |
294688c0 JF |
435 | * |
436 | * The call instruction itself is marked by placing its start address | |
437 | * and size into the .parainstructions section, so that | |
438 | * apply_paravirt() in arch/i386/kernel/alternative.c can do the | |
93b1eab3 | 439 | * appropriate patching under the control of the backend pv_init_ops |
294688c0 JF |
440 | * implementation. |
441 | * | |
442 | * Unfortunately there's no way to get gcc to generate the args setup | |
443 | * for the call, and then allow the call itself to be generated by an | |
444 | * inline asm. Because of this, we must do the complete arg setup and | |
445 | * return value handling from within these macros. This is fairly | |
446 | * cumbersome. | |
447 | * | |
448 | * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. | |
449 | * It could be extended to more arguments, but there would be little | |
450 | * to be gained from that. For each number of arguments, there are | |
451 | * the two VCALL and CALL variants for void and non-void functions. | |
452 | * | |
453 | * When there is a return value, the invoker of the macro must specify | |
454 | * the return type. The macro then uses sizeof() on that type to | |
455 | * determine whether its a 32 or 64 bit value, and places the return | |
456 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for | |
a4746364 GOC |
457 | * 64-bit). For x86_64 machines, it just returns at %rax regardless of |
458 | * the return value size. | |
294688c0 JF |
459 | * |
460 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments | |
a4746364 GOC |
461 | * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments |
462 | * in low,high order | |
294688c0 JF |
463 | * |
464 | * Small structures are passed and returned in registers. The macro | |
465 | * calling convention can't directly deal with this, so the wrapper | |
466 | * functions must do this. | |
467 | * | |
468 | * These PVOP_* macros are only defined within this header. This | |
469 | * means that all uses must be wrapped in inline functions. This also | |
470 | * makes sure the incoming and outgoing types are always correct. | |
471 | */ | |
a4746364 GOC |
472 | #ifdef CONFIG_X86_32 |
473 | #define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx | |
474 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS | |
475 | #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ | |
476 | "=c" (__ecx) | |
477 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS | |
478 | #define EXTRA_CLOBBERS | |
479 | #define VEXTRA_CLOBBERS | |
480 | #else | |
481 | #define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx | |
482 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax | |
483 | #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ | |
484 | "=S" (__esi), "=d" (__edx), \ | |
485 | "=c" (__ecx) | |
486 | ||
487 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) | |
488 | ||
489 | #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" | |
490 | #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" | |
491 | #endif | |
492 | ||
97349135 JF |
493 | #ifdef CONFIG_PARAVIRT_DEBUG |
494 | #define PVOP_TEST_NULL(op) BUG_ON(op == NULL) | |
495 | #else | |
496 | #define PVOP_TEST_NULL(op) ((void)op) | |
497 | #endif | |
498 | ||
1a45b7aa | 499 | #define __PVOP_CALL(rettype, op, pre, post, ...) \ |
f8822f42 | 500 | ({ \ |
1a45b7aa | 501 | rettype __ret; \ |
a4746364 | 502 | PVOP_CALL_ARGS; \ |
97349135 | 503 | PVOP_TEST_NULL(op); \ |
a4746364 GOC |
504 | /* This is 32-bit specific, but is okay in 64-bit */ \ |
505 | /* since this condition will never hold */ \ | |
1a45b7aa JF |
506 | if (sizeof(rettype) > sizeof(unsigned long)) { \ |
507 | asm volatile(pre \ | |
508 | paravirt_alt(PARAVIRT_CALL) \ | |
509 | post \ | |
a4746364 | 510 | : PVOP_CALL_CLOBBERS \ |
1a45b7aa JF |
511 | : paravirt_type(op), \ |
512 | paravirt_clobber(CLBR_ANY), \ | |
513 | ##__VA_ARGS__ \ | |
a4746364 | 514 | : "memory", "cc" EXTRA_CLOBBERS); \ |
1a45b7aa | 515 | __ret = (rettype)((((u64)__edx) << 32) | __eax); \ |
f8822f42 | 516 | } else { \ |
1a45b7aa | 517 | asm volatile(pre \ |
f8822f42 | 518 | paravirt_alt(PARAVIRT_CALL) \ |
1a45b7aa | 519 | post \ |
a4746364 | 520 | : PVOP_CALL_CLOBBERS \ |
1a45b7aa JF |
521 | : paravirt_type(op), \ |
522 | paravirt_clobber(CLBR_ANY), \ | |
523 | ##__VA_ARGS__ \ | |
a4746364 | 524 | : "memory", "cc" EXTRA_CLOBBERS); \ |
1a45b7aa | 525 | __ret = (rettype)__eax; \ |
f8822f42 JF |
526 | } \ |
527 | __ret; \ | |
528 | }) | |
1a45b7aa | 529 | #define __PVOP_VCALL(op, pre, post, ...) \ |
f8822f42 | 530 | ({ \ |
a4746364 | 531 | PVOP_VCALL_ARGS; \ |
97349135 | 532 | PVOP_TEST_NULL(op); \ |
1a45b7aa | 533 | asm volatile(pre \ |
f8822f42 | 534 | paravirt_alt(PARAVIRT_CALL) \ |
1a45b7aa | 535 | post \ |
a4746364 | 536 | : PVOP_VCALL_CLOBBERS \ |
1a45b7aa JF |
537 | : paravirt_type(op), \ |
538 | paravirt_clobber(CLBR_ANY), \ | |
539 | ##__VA_ARGS__ \ | |
a4746364 | 540 | : "memory", "cc" VEXTRA_CLOBBERS); \ |
f8822f42 JF |
541 | }) |
542 | ||
1a45b7aa JF |
543 | #define PVOP_CALL0(rettype, op) \ |
544 | __PVOP_CALL(rettype, op, "", "") | |
545 | #define PVOP_VCALL0(op) \ | |
546 | __PVOP_VCALL(op, "", "") | |
547 | ||
548 | #define PVOP_CALL1(rettype, op, arg1) \ | |
a4746364 | 549 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1))) |
1a45b7aa | 550 | #define PVOP_VCALL1(op, arg1) \ |
a4746364 | 551 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1))) |
1a45b7aa JF |
552 | |
553 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ | |
a4746364 GOC |
554 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ |
555 | "1" ((unsigned long)(arg2))) | |
1a45b7aa | 556 | #define PVOP_VCALL2(op, arg1, arg2) \ |
a4746364 GOC |
557 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ |
558 | "1" ((unsigned long)(arg2))) | |
1a45b7aa JF |
559 | |
560 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ | |
a4746364 GOC |
561 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ |
562 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) | |
1a45b7aa | 563 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ |
a4746364 GOC |
564 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ |
565 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) | |
1a45b7aa | 566 | |
a4746364 GOC |
567 | /* This is the only difference in x86_64. We can make it much simpler */ |
568 | #ifdef CONFIG_X86_32 | |
1a45b7aa JF |
569 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ |
570 | __PVOP_CALL(rettype, op, \ | |
571 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | |
572 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ | |
573 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) | |
574 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ | |
575 | __PVOP_VCALL(op, \ | |
576 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | |
577 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ | |
578 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) | |
a4746364 GOC |
579 | #else |
580 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | |
581 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ | |
582 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ | |
583 | "3"((unsigned long)(arg4))) | |
584 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ | |
585 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ | |
586 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ | |
587 | "3"((unsigned long)(arg4))) | |
588 | #endif | |
1a45b7aa | 589 | |
f8822f42 JF |
590 | static inline int paravirt_enabled(void) |
591 | { | |
93b1eab3 | 592 | return pv_info.paravirt_enabled; |
f8822f42 | 593 | } |
d3561b7f | 594 | |
faca6227 | 595 | static inline void load_sp0(struct tss_struct *tss, |
d3561b7f RR |
596 | struct thread_struct *thread) |
597 | { | |
faca6227 | 598 | PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); |
d3561b7f RR |
599 | } |
600 | ||
93b1eab3 | 601 | #define ARCH_SETUP pv_init_ops.arch_setup(); |
d3561b7f RR |
602 | static inline unsigned long get_wallclock(void) |
603 | { | |
93b1eab3 | 604 | return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock); |
d3561b7f RR |
605 | } |
606 | ||
607 | static inline int set_wallclock(unsigned long nowtime) | |
608 | { | |
93b1eab3 | 609 | return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime); |
d3561b7f RR |
610 | } |
611 | ||
e30fab3a | 612 | static inline void (*choose_time_init(void))(void) |
d3561b7f | 613 | { |
93b1eab3 | 614 | return pv_time_ops.time_init; |
d3561b7f RR |
615 | } |
616 | ||
617 | /* The paravirtualized CPUID instruction. */ | |
618 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | |
619 | unsigned int *ecx, unsigned int *edx) | |
620 | { | |
93b1eab3 | 621 | PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx); |
d3561b7f RR |
622 | } |
623 | ||
624 | /* | |
625 | * These special macros can be used to get or set a debugging register | |
626 | */ | |
f8822f42 JF |
627 | static inline unsigned long paravirt_get_debugreg(int reg) |
628 | { | |
93b1eab3 | 629 | return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg); |
f8822f42 JF |
630 | } |
631 | #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) | |
632 | static inline void set_debugreg(unsigned long val, int reg) | |
633 | { | |
93b1eab3 | 634 | PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val); |
f8822f42 | 635 | } |
d3561b7f | 636 | |
f8822f42 JF |
637 | static inline void clts(void) |
638 | { | |
93b1eab3 | 639 | PVOP_VCALL0(pv_cpu_ops.clts); |
f8822f42 | 640 | } |
d3561b7f | 641 | |
f8822f42 JF |
642 | static inline unsigned long read_cr0(void) |
643 | { | |
93b1eab3 | 644 | return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0); |
f8822f42 | 645 | } |
d3561b7f | 646 | |
f8822f42 JF |
647 | static inline void write_cr0(unsigned long x) |
648 | { | |
93b1eab3 | 649 | PVOP_VCALL1(pv_cpu_ops.write_cr0, x); |
f8822f42 JF |
650 | } |
651 | ||
652 | static inline unsigned long read_cr2(void) | |
653 | { | |
93b1eab3 | 654 | return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2); |
f8822f42 JF |
655 | } |
656 | ||
657 | static inline void write_cr2(unsigned long x) | |
658 | { | |
93b1eab3 | 659 | PVOP_VCALL1(pv_mmu_ops.write_cr2, x); |
f8822f42 JF |
660 | } |
661 | ||
662 | static inline unsigned long read_cr3(void) | |
663 | { | |
93b1eab3 | 664 | return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3); |
f8822f42 | 665 | } |
d3561b7f | 666 | |
f8822f42 JF |
667 | static inline void write_cr3(unsigned long x) |
668 | { | |
93b1eab3 | 669 | PVOP_VCALL1(pv_mmu_ops.write_cr3, x); |
f8822f42 | 670 | } |
d3561b7f | 671 | |
f8822f42 JF |
672 | static inline unsigned long read_cr4(void) |
673 | { | |
93b1eab3 | 674 | return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4); |
f8822f42 JF |
675 | } |
676 | static inline unsigned long read_cr4_safe(void) | |
677 | { | |
93b1eab3 | 678 | return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe); |
f8822f42 | 679 | } |
d3561b7f | 680 | |
f8822f42 JF |
681 | static inline void write_cr4(unsigned long x) |
682 | { | |
93b1eab3 | 683 | PVOP_VCALL1(pv_cpu_ops.write_cr4, x); |
f8822f42 | 684 | } |
3dc494e8 | 685 | |
94ea03cd | 686 | #ifdef CONFIG_X86_64 |
4c9890c2 GOC |
687 | static inline unsigned long read_cr8(void) |
688 | { | |
689 | return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8); | |
690 | } | |
691 | ||
692 | static inline void write_cr8(unsigned long x) | |
693 | { | |
694 | PVOP_VCALL1(pv_cpu_ops.write_cr8, x); | |
695 | } | |
94ea03cd | 696 | #endif |
4c9890c2 | 697 | |
d3561b7f RR |
698 | static inline void raw_safe_halt(void) |
699 | { | |
93b1eab3 | 700 | PVOP_VCALL0(pv_irq_ops.safe_halt); |
d3561b7f RR |
701 | } |
702 | ||
703 | static inline void halt(void) | |
704 | { | |
93b1eab3 | 705 | PVOP_VCALL0(pv_irq_ops.safe_halt); |
f8822f42 JF |
706 | } |
707 | ||
708 | static inline void wbinvd(void) | |
709 | { | |
93b1eab3 | 710 | PVOP_VCALL0(pv_cpu_ops.wbinvd); |
d3561b7f | 711 | } |
d3561b7f | 712 | |
93b1eab3 | 713 | #define get_kernel_rpl() (pv_info.kernel_rpl) |
d3561b7f | 714 | |
f8822f42 JF |
715 | static inline u64 paravirt_read_msr(unsigned msr, int *err) |
716 | { | |
93b1eab3 | 717 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); |
f8822f42 JF |
718 | } |
719 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | |
720 | { | |
93b1eab3 | 721 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); |
f8822f42 JF |
722 | } |
723 | ||
90a0a06a | 724 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ |
49cd740b JP |
725 | #define rdmsr(msr, val1, val2) \ |
726 | do { \ | |
f8822f42 JF |
727 | int _err; \ |
728 | u64 _l = paravirt_read_msr(msr, &_err); \ | |
729 | val1 = (u32)_l; \ | |
730 | val2 = _l >> 32; \ | |
49cd740b | 731 | } while (0) |
d3561b7f | 732 | |
49cd740b JP |
733 | #define wrmsr(msr, val1, val2) \ |
734 | do { \ | |
f8822f42 | 735 | paravirt_write_msr(msr, val1, val2); \ |
49cd740b | 736 | } while (0) |
d3561b7f | 737 | |
49cd740b JP |
738 | #define rdmsrl(msr, val) \ |
739 | do { \ | |
f8822f42 JF |
740 | int _err; \ |
741 | val = paravirt_read_msr(msr, &_err); \ | |
49cd740b | 742 | } while (0) |
d3561b7f | 743 | |
49cd740b JP |
744 | #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) |
745 | #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b) | |
d3561b7f RR |
746 | |
747 | /* rdmsr with exception handling */ | |
49cd740b JP |
748 | #define rdmsr_safe(msr, a, b) \ |
749 | ({ \ | |
f8822f42 JF |
750 | int _err; \ |
751 | u64 _l = paravirt_read_msr(msr, &_err); \ | |
752 | (*a) = (u32)_l; \ | |
753 | (*b) = _l >> 32; \ | |
49cd740b JP |
754 | _err; \ |
755 | }) | |
d3561b7f | 756 | |
1de87bd4 AK |
757 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) |
758 | { | |
759 | int err; | |
760 | ||
761 | *p = paravirt_read_msr(msr, &err); | |
762 | return err; | |
763 | } | |
f8822f42 JF |
764 | |
765 | static inline u64 paravirt_read_tsc(void) | |
766 | { | |
93b1eab3 | 767 | return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); |
f8822f42 | 768 | } |
d3561b7f | 769 | |
49cd740b JP |
770 | #define rdtscl(low) \ |
771 | do { \ | |
f8822f42 JF |
772 | u64 _l = paravirt_read_tsc(); \ |
773 | low = (int)_l; \ | |
49cd740b | 774 | } while (0) |
d3561b7f | 775 | |
f8822f42 | 776 | #define rdtscll(val) (val = paravirt_read_tsc()) |
d3561b7f | 777 | |
688340ea JF |
778 | static inline unsigned long long paravirt_sched_clock(void) |
779 | { | |
93b1eab3 | 780 | return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); |
688340ea | 781 | } |
e93ef949 | 782 | #define calibrate_tsc() (pv_time_ops.get_tsc_khz()) |
6cb9a835 | 783 | |
f8822f42 JF |
784 | static inline unsigned long long paravirt_read_pmc(int counter) |
785 | { | |
93b1eab3 | 786 | return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); |
f8822f42 | 787 | } |
d3561b7f | 788 | |
49cd740b JP |
789 | #define rdpmc(counter, low, high) \ |
790 | do { \ | |
f8822f42 JF |
791 | u64 _l = paravirt_read_pmc(counter); \ |
792 | low = (u32)_l; \ | |
793 | high = _l >> 32; \ | |
49cd740b | 794 | } while (0) |
3dc494e8 | 795 | |
e5aaac44 GOC |
796 | static inline unsigned long long paravirt_rdtscp(unsigned int *aux) |
797 | { | |
798 | return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); | |
799 | } | |
800 | ||
801 | #define rdtscp(low, high, aux) \ | |
802 | do { \ | |
803 | int __aux; \ | |
804 | unsigned long __val = paravirt_rdtscp(&__aux); \ | |
805 | (low) = (u32)__val; \ | |
806 | (high) = (u32)(__val >> 32); \ | |
807 | (aux) = __aux; \ | |
808 | } while (0) | |
809 | ||
810 | #define rdtscpll(val, aux) \ | |
811 | do { \ | |
812 | unsigned long __aux; \ | |
813 | val = paravirt_rdtscp(&__aux); \ | |
814 | (aux) = __aux; \ | |
815 | } while (0) | |
816 | ||
f8822f42 JF |
817 | static inline void load_TR_desc(void) |
818 | { | |
93b1eab3 | 819 | PVOP_VCALL0(pv_cpu_ops.load_tr_desc); |
f8822f42 | 820 | } |
6b68f01b | 821 | static inline void load_gdt(const struct desc_ptr *dtr) |
f8822f42 | 822 | { |
93b1eab3 | 823 | PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr); |
f8822f42 | 824 | } |
6b68f01b | 825 | static inline void load_idt(const struct desc_ptr *dtr) |
f8822f42 | 826 | { |
93b1eab3 | 827 | PVOP_VCALL1(pv_cpu_ops.load_idt, dtr); |
f8822f42 JF |
828 | } |
829 | static inline void set_ldt(const void *addr, unsigned entries) | |
830 | { | |
93b1eab3 | 831 | PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); |
f8822f42 | 832 | } |
6b68f01b | 833 | static inline void store_gdt(struct desc_ptr *dtr) |
f8822f42 | 834 | { |
93b1eab3 | 835 | PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr); |
f8822f42 | 836 | } |
6b68f01b | 837 | static inline void store_idt(struct desc_ptr *dtr) |
f8822f42 | 838 | { |
93b1eab3 | 839 | PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); |
f8822f42 JF |
840 | } |
841 | static inline unsigned long paravirt_store_tr(void) | |
842 | { | |
93b1eab3 | 843 | return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr); |
f8822f42 JF |
844 | } |
845 | #define store_tr(tr) ((tr) = paravirt_store_tr()) | |
846 | static inline void load_TLS(struct thread_struct *t, unsigned cpu) | |
847 | { | |
93b1eab3 | 848 | PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); |
f8822f42 | 849 | } |
75b8bb3e | 850 | |
9f9d489a JF |
851 | #ifdef CONFIG_X86_64 |
852 | static inline void load_gs_index(unsigned int gs) | |
853 | { | |
854 | PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs); | |
855 | } | |
856 | #endif | |
857 | ||
75b8bb3e GOC |
858 | static inline void write_ldt_entry(struct desc_struct *dt, int entry, |
859 | const void *desc) | |
f8822f42 | 860 | { |
75b8bb3e | 861 | PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc); |
f8822f42 | 862 | } |
014b15be GOC |
863 | |
864 | static inline void write_gdt_entry(struct desc_struct *dt, int entry, | |
865 | void *desc, int type) | |
f8822f42 | 866 | { |
014b15be | 867 | PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type); |
f8822f42 | 868 | } |
014b15be | 869 | |
8d947344 | 870 | static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) |
f8822f42 | 871 | { |
8d947344 | 872 | PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g); |
f8822f42 JF |
873 | } |
874 | static inline void set_iopl_mask(unsigned mask) | |
875 | { | |
93b1eab3 | 876 | PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask); |
f8822f42 | 877 | } |
3dc494e8 | 878 | |
d3561b7f | 879 | /* The paravirtualized I/O functions */ |
49cd740b JP |
880 | static inline void slow_down_io(void) |
881 | { | |
93b1eab3 | 882 | pv_cpu_ops.io_delay(); |
d3561b7f | 883 | #ifdef REALLY_SLOW_IO |
93b1eab3 JF |
884 | pv_cpu_ops.io_delay(); |
885 | pv_cpu_ops.io_delay(); | |
886 | pv_cpu_ops.io_delay(); | |
d3561b7f RR |
887 | #endif |
888 | } | |
889 | ||
13623d79 RR |
890 | #ifdef CONFIG_X86_LOCAL_APIC |
891 | /* | |
892 | * Basic functions accessing APICs. | |
893 | */ | |
42e0a9aa | 894 | static inline void apic_write(unsigned long reg, u32 v) |
13623d79 | 895 | { |
93b1eab3 | 896 | PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); |
13623d79 RR |
897 | } |
898 | ||
42e0a9aa | 899 | static inline void apic_write_atomic(unsigned long reg, u32 v) |
13623d79 | 900 | { |
93b1eab3 | 901 | PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v); |
13623d79 RR |
902 | } |
903 | ||
42e0a9aa | 904 | static inline u32 apic_read(unsigned long reg) |
13623d79 | 905 | { |
93b1eab3 | 906 | return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); |
13623d79 | 907 | } |
bbab4f3b ZA |
908 | |
909 | static inline void setup_boot_clock(void) | |
910 | { | |
93b1eab3 | 911 | PVOP_VCALL0(pv_apic_ops.setup_boot_clock); |
bbab4f3b ZA |
912 | } |
913 | ||
914 | static inline void setup_secondary_clock(void) | |
915 | { | |
93b1eab3 | 916 | PVOP_VCALL0(pv_apic_ops.setup_secondary_clock); |
bbab4f3b | 917 | } |
13623d79 RR |
918 | #endif |
919 | ||
6996d3b6 JF |
920 | static inline void paravirt_post_allocator_init(void) |
921 | { | |
93b1eab3 JF |
922 | if (pv_init_ops.post_allocator_init) |
923 | (*pv_init_ops.post_allocator_init)(); | |
6996d3b6 JF |
924 | } |
925 | ||
b239fb25 JF |
926 | static inline void paravirt_pagetable_setup_start(pgd_t *base) |
927 | { | |
93b1eab3 | 928 | (*pv_mmu_ops.pagetable_setup_start)(base); |
b239fb25 JF |
929 | } |
930 | ||
931 | static inline void paravirt_pagetable_setup_done(pgd_t *base) | |
932 | { | |
93b1eab3 | 933 | (*pv_mmu_ops.pagetable_setup_done)(base); |
b239fb25 | 934 | } |
3dc494e8 | 935 | |
ae5da273 ZA |
936 | #ifdef CONFIG_SMP |
937 | static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, | |
938 | unsigned long start_esp) | |
939 | { | |
93b1eab3 JF |
940 | PVOP_VCALL3(pv_apic_ops.startup_ipi_hook, |
941 | phys_apicid, start_eip, start_esp); | |
ae5da273 ZA |
942 | } |
943 | #endif | |
13623d79 | 944 | |
d6dd61c8 JF |
945 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
946 | struct mm_struct *next) | |
947 | { | |
93b1eab3 | 948 | PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next); |
d6dd61c8 JF |
949 | } |
950 | ||
951 | static inline void arch_dup_mmap(struct mm_struct *oldmm, | |
952 | struct mm_struct *mm) | |
953 | { | |
93b1eab3 | 954 | PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm); |
d6dd61c8 JF |
955 | } |
956 | ||
957 | static inline void arch_exit_mmap(struct mm_struct *mm) | |
958 | { | |
93b1eab3 | 959 | PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm); |
d6dd61c8 JF |
960 | } |
961 | ||
f8822f42 JF |
962 | static inline void __flush_tlb(void) |
963 | { | |
93b1eab3 | 964 | PVOP_VCALL0(pv_mmu_ops.flush_tlb_user); |
f8822f42 JF |
965 | } |
966 | static inline void __flush_tlb_global(void) | |
967 | { | |
93b1eab3 | 968 | PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); |
f8822f42 JF |
969 | } |
970 | static inline void __flush_tlb_single(unsigned long addr) | |
971 | { | |
93b1eab3 | 972 | PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); |
f8822f42 | 973 | } |
da181a8b | 974 | |
d4c10477 JF |
975 | static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, |
976 | unsigned long va) | |
977 | { | |
93b1eab3 | 978 | PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); |
d4c10477 JF |
979 | } |
980 | ||
eba0045f JF |
981 | static inline int paravirt_pgd_alloc(struct mm_struct *mm) |
982 | { | |
983 | return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm); | |
984 | } | |
985 | ||
986 | static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
987 | { | |
988 | PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); | |
989 | } | |
990 | ||
6944a9c8 | 991 | static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn) |
f8822f42 | 992 | { |
6944a9c8 | 993 | PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); |
f8822f42 | 994 | } |
6944a9c8 | 995 | static inline void paravirt_release_pte(unsigned pfn) |
f8822f42 | 996 | { |
6944a9c8 | 997 | PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); |
f8822f42 | 998 | } |
c119ecce | 999 | |
6944a9c8 | 1000 | static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn) |
f8822f42 | 1001 | { |
6944a9c8 | 1002 | PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); |
f8822f42 | 1003 | } |
c119ecce | 1004 | |
6944a9c8 JF |
1005 | static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn, |
1006 | unsigned start, unsigned count) | |
f8822f42 | 1007 | { |
6944a9c8 | 1008 | PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); |
f8822f42 | 1009 | } |
6944a9c8 | 1010 | static inline void paravirt_release_pmd(unsigned pfn) |
da181a8b | 1011 | { |
6944a9c8 | 1012 | PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); |
da181a8b RR |
1013 | } |
1014 | ||
2761fa09 JF |
1015 | static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn) |
1016 | { | |
1017 | PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); | |
1018 | } | |
1019 | static inline void paravirt_release_pud(unsigned pfn) | |
1020 | { | |
1021 | PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); | |
1022 | } | |
1023 | ||
ce6234b5 JF |
1024 | #ifdef CONFIG_HIGHPTE |
1025 | static inline void *kmap_atomic_pte(struct page *page, enum km_type type) | |
1026 | { | |
1027 | unsigned long ret; | |
93b1eab3 | 1028 | ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type); |
ce6234b5 JF |
1029 | return (void *)ret; |
1030 | } | |
1031 | #endif | |
1032 | ||
f8822f42 JF |
1033 | static inline void pte_update(struct mm_struct *mm, unsigned long addr, |
1034 | pte_t *ptep) | |
da181a8b | 1035 | { |
93b1eab3 | 1036 | PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); |
da181a8b RR |
1037 | } |
1038 | ||
f8822f42 JF |
1039 | static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, |
1040 | pte_t *ptep) | |
da181a8b | 1041 | { |
93b1eab3 | 1042 | PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); |
da181a8b RR |
1043 | } |
1044 | ||
773221f4 | 1045 | static inline pte_t __pte(pteval_t val) |
da181a8b | 1046 | { |
773221f4 JF |
1047 | pteval_t ret; |
1048 | ||
1049 | if (sizeof(pteval_t) > sizeof(long)) | |
1050 | ret = PVOP_CALL2(pteval_t, | |
1051 | pv_mmu_ops.make_pte, | |
1052 | val, (u64)val >> 32); | |
1053 | else | |
1054 | ret = PVOP_CALL1(pteval_t, | |
1055 | pv_mmu_ops.make_pte, | |
1056 | val); | |
1057 | ||
c8e5393a | 1058 | return (pte_t) { .pte = ret }; |
da181a8b RR |
1059 | } |
1060 | ||
773221f4 JF |
1061 | static inline pteval_t pte_val(pte_t pte) |
1062 | { | |
1063 | pteval_t ret; | |
1064 | ||
1065 | if (sizeof(pteval_t) > sizeof(long)) | |
1066 | ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val, | |
1067 | pte.pte, (u64)pte.pte >> 32); | |
1068 | else | |
1069 | ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val, | |
1070 | pte.pte); | |
1071 | ||
1072 | return ret; | |
1073 | } | |
1074 | ||
a15af1c9 JF |
1075 | static inline pteval_t pte_flags(pte_t pte) |
1076 | { | |
1077 | pteval_t ret; | |
1078 | ||
1079 | if (sizeof(pteval_t) > sizeof(long)) | |
1080 | ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags, | |
1081 | pte.pte, (u64)pte.pte >> 32); | |
1082 | else | |
1083 | ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags, | |
1084 | pte.pte); | |
1085 | ||
1086 | return ret; | |
1087 | } | |
1088 | ||
ef38503e | 1089 | static inline pgd_t __pgd(pgdval_t val) |
da181a8b | 1090 | { |
ef38503e JF |
1091 | pgdval_t ret; |
1092 | ||
1093 | if (sizeof(pgdval_t) > sizeof(long)) | |
1094 | ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd, | |
1095 | val, (u64)val >> 32); | |
1096 | else | |
1097 | ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, | |
1098 | val); | |
1099 | ||
1100 | return (pgd_t) { ret }; | |
1101 | } | |
1102 | ||
1103 | static inline pgdval_t pgd_val(pgd_t pgd) | |
1104 | { | |
1105 | pgdval_t ret; | |
1106 | ||
1107 | if (sizeof(pgdval_t) > sizeof(long)) | |
1108 | ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val, | |
1109 | pgd.pgd, (u64)pgd.pgd >> 32); | |
1110 | else | |
1111 | ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, | |
1112 | pgd.pgd); | |
1113 | ||
1114 | return ret; | |
f8822f42 JF |
1115 | } |
1116 | ||
08b882c6 JF |
1117 | #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION |
1118 | static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, | |
1119 | pte_t *ptep) | |
1120 | { | |
1121 | pteval_t ret; | |
1122 | ||
1123 | ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start, | |
1124 | mm, addr, ptep); | |
1125 | ||
1126 | return (pte_t) { .pte = ret }; | |
1127 | } | |
1128 | ||
1129 | static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |
1130 | pte_t *ptep, pte_t pte) | |
1131 | { | |
1132 | if (sizeof(pteval_t) > sizeof(long)) | |
1133 | /* 5 arg words */ | |
1134 | pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte); | |
1135 | else | |
1136 | PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit, | |
1137 | mm, addr, ptep, pte.pte); | |
1138 | } | |
1139 | ||
4eed80cd JF |
1140 | static inline void set_pte(pte_t *ptep, pte_t pte) |
1141 | { | |
1142 | if (sizeof(pteval_t) > sizeof(long)) | |
1143 | PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, | |
1144 | pte.pte, (u64)pte.pte >> 32); | |
1145 | else | |
1146 | PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, | |
1147 | pte.pte); | |
1148 | } | |
1149 | ||
1150 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |
1151 | pte_t *ptep, pte_t pte) | |
1152 | { | |
1153 | if (sizeof(pteval_t) > sizeof(long)) | |
1154 | /* 5 arg words */ | |
1155 | pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); | |
1156 | else | |
1157 | PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); | |
1158 | } | |
1159 | ||
60b3f626 JF |
1160 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
1161 | { | |
1162 | pmdval_t val = native_pmd_val(pmd); | |
1163 | ||
1164 | if (sizeof(pmdval_t) > sizeof(long)) | |
1165 | PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32); | |
1166 | else | |
1167 | PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); | |
1168 | } | |
1169 | ||
1fe91514 GOC |
1170 | #if PAGETABLE_LEVELS >= 3 |
1171 | static inline pmd_t __pmd(pmdval_t val) | |
1172 | { | |
1173 | pmdval_t ret; | |
1174 | ||
1175 | if (sizeof(pmdval_t) > sizeof(long)) | |
1176 | ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, | |
1177 | val, (u64)val >> 32); | |
1178 | else | |
1179 | ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, | |
1180 | val); | |
1181 | ||
1182 | return (pmd_t) { ret }; | |
1183 | } | |
1184 | ||
1185 | static inline pmdval_t pmd_val(pmd_t pmd) | |
1186 | { | |
1187 | pmdval_t ret; | |
1188 | ||
1189 | if (sizeof(pmdval_t) > sizeof(long)) | |
1190 | ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, | |
1191 | pmd.pmd, (u64)pmd.pmd >> 32); | |
1192 | else | |
1193 | ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, | |
1194 | pmd.pmd); | |
1195 | ||
1196 | return ret; | |
1197 | } | |
1198 | ||
1199 | static inline void set_pud(pud_t *pudp, pud_t pud) | |
1200 | { | |
1201 | pudval_t val = native_pud_val(pud); | |
1202 | ||
1203 | if (sizeof(pudval_t) > sizeof(long)) | |
1204 | PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, | |
1205 | val, (u64)val >> 32); | |
1206 | else | |
1207 | PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, | |
1208 | val); | |
1209 | } | |
9042219c EH |
1210 | #if PAGETABLE_LEVELS == 4 |
1211 | static inline pud_t __pud(pudval_t val) | |
1212 | { | |
1213 | pudval_t ret; | |
1214 | ||
1215 | if (sizeof(pudval_t) > sizeof(long)) | |
1216 | ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, | |
1217 | val, (u64)val >> 32); | |
1218 | else | |
1219 | ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, | |
1220 | val); | |
1221 | ||
1222 | return (pud_t) { ret }; | |
1223 | } | |
1224 | ||
1225 | static inline pudval_t pud_val(pud_t pud) | |
1226 | { | |
1227 | pudval_t ret; | |
1228 | ||
1229 | if (sizeof(pudval_t) > sizeof(long)) | |
1230 | ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val, | |
1231 | pud.pud, (u64)pud.pud >> 32); | |
1232 | else | |
1233 | ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, | |
1234 | pud.pud); | |
1235 | ||
1236 | return ret; | |
1237 | } | |
1238 | ||
1239 | static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) | |
1240 | { | |
1241 | pgdval_t val = native_pgd_val(pgd); | |
1242 | ||
1243 | if (sizeof(pgdval_t) > sizeof(long)) | |
1244 | PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp, | |
1245 | val, (u64)val >> 32); | |
1246 | else | |
1247 | PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, | |
1248 | val); | |
1249 | } | |
1250 | ||
1251 | static inline void pgd_clear(pgd_t *pgdp) | |
1252 | { | |
1253 | set_pgd(pgdp, __pgd(0)); | |
1254 | } | |
1255 | ||
1256 | static inline void pud_clear(pud_t *pudp) | |
1257 | { | |
1258 | set_pud(pudp, __pud(0)); | |
1259 | } | |
1260 | ||
1261 | #endif /* PAGETABLE_LEVELS == 4 */ | |
1262 | ||
1fe91514 GOC |
1263 | #endif /* PAGETABLE_LEVELS >= 3 */ |
1264 | ||
4eed80cd JF |
1265 | #ifdef CONFIG_X86_PAE |
1266 | /* Special-case pte-setting operations for PAE, which can't update a | |
1267 | 64-bit pte atomically */ | |
1268 | static inline void set_pte_atomic(pte_t *ptep, pte_t pte) | |
1269 | { | |
1270 | PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, | |
1271 | pte.pte, pte.pte >> 32); | |
1272 | } | |
1273 | ||
1274 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, | |
1275 | pte_t *ptep, pte_t pte) | |
1276 | { | |
1277 | /* 5 arg words */ | |
1278 | pv_mmu_ops.set_pte_present(mm, addr, ptep, pte); | |
1279 | } | |
1280 | ||
1281 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | |
1282 | pte_t *ptep) | |
1283 | { | |
1284 | PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); | |
1285 | } | |
60b3f626 JF |
1286 | |
1287 | static inline void pmd_clear(pmd_t *pmdp) | |
1288 | { | |
1289 | PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); | |
1290 | } | |
4eed80cd JF |
1291 | #else /* !CONFIG_X86_PAE */ |
1292 | static inline void set_pte_atomic(pte_t *ptep, pte_t pte) | |
1293 | { | |
1294 | set_pte(ptep, pte); | |
1295 | } | |
1296 | ||
1297 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, | |
1298 | pte_t *ptep, pte_t pte) | |
1299 | { | |
1300 | set_pte(ptep, pte); | |
1301 | } | |
1302 | ||
1303 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | |
1304 | pte_t *ptep) | |
1305 | { | |
1306 | set_pte_at(mm, addr, ptep, __pte(0)); | |
1307 | } | |
60b3f626 JF |
1308 | |
1309 | static inline void pmd_clear(pmd_t *pmdp) | |
1310 | { | |
1311 | set_pmd(pmdp, __pmd(0)); | |
1312 | } | |
4eed80cd JF |
1313 | #endif /* CONFIG_X86_PAE */ |
1314 | ||
8965c1c0 JF |
1315 | /* Lazy mode for batching updates / context switch */ |
1316 | enum paravirt_lazy_mode { | |
1317 | PARAVIRT_LAZY_NONE, | |
1318 | PARAVIRT_LAZY_MMU, | |
1319 | PARAVIRT_LAZY_CPU, | |
1320 | }; | |
1321 | ||
1322 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void); | |
1323 | void paravirt_enter_lazy_cpu(void); | |
1324 | void paravirt_leave_lazy_cpu(void); | |
1325 | void paravirt_enter_lazy_mmu(void); | |
1326 | void paravirt_leave_lazy_mmu(void); | |
1327 | void paravirt_leave_lazy(enum paravirt_lazy_mode mode); | |
1328 | ||
9226d125 | 1329 | #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE |
f8822f42 JF |
1330 | static inline void arch_enter_lazy_cpu_mode(void) |
1331 | { | |
8965c1c0 | 1332 | PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter); |
f8822f42 JF |
1333 | } |
1334 | ||
1335 | static inline void arch_leave_lazy_cpu_mode(void) | |
1336 | { | |
8965c1c0 | 1337 | PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave); |
f8822f42 JF |
1338 | } |
1339 | ||
1340 | static inline void arch_flush_lazy_cpu_mode(void) | |
1341 | { | |
8965c1c0 JF |
1342 | if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) { |
1343 | arch_leave_lazy_cpu_mode(); | |
1344 | arch_enter_lazy_cpu_mode(); | |
1345 | } | |
f8822f42 JF |
1346 | } |
1347 | ||
9226d125 ZA |
1348 | |
1349 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | |
f8822f42 JF |
1350 | static inline void arch_enter_lazy_mmu_mode(void) |
1351 | { | |
8965c1c0 | 1352 | PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter); |
f8822f42 JF |
1353 | } |
1354 | ||
1355 | static inline void arch_leave_lazy_mmu_mode(void) | |
1356 | { | |
8965c1c0 | 1357 | PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); |
f8822f42 JF |
1358 | } |
1359 | ||
1360 | static inline void arch_flush_lazy_mmu_mode(void) | |
1361 | { | |
8965c1c0 JF |
1362 | if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) { |
1363 | arch_leave_lazy_mmu_mode(); | |
1364 | arch_enter_lazy_mmu_mode(); | |
1365 | } | |
f8822f42 | 1366 | } |
9226d125 | 1367 | |
aeaaa59c JF |
1368 | static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, |
1369 | unsigned long phys, pgprot_t flags) | |
1370 | { | |
1371 | pv_mmu_ops.set_fixmap(idx, phys, flags); | |
1372 | } | |
1373 | ||
45876233 JF |
1374 | void _paravirt_nop(void); |
1375 | #define paravirt_nop ((void *)_paravirt_nop) | |
1376 | ||
139ec7c4 | 1377 | /* These all sit in the .parainstructions section to tell us what to patch. */ |
98de032b | 1378 | struct paravirt_patch_site { |
139ec7c4 RR |
1379 | u8 *instr; /* original instructions */ |
1380 | u8 instrtype; /* type of this instruction */ | |
1381 | u8 len; /* length of original instruction */ | |
1382 | u16 clobbers; /* what registers you may clobber */ | |
1383 | }; | |
1384 | ||
98de032b JF |
1385 | extern struct paravirt_patch_site __parainstructions[], |
1386 | __parainstructions_end[]; | |
1387 | ||
2e47d3e6 GOC |
1388 | #ifdef CONFIG_X86_32 |
1389 | #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;" | |
1390 | #define PV_RESTORE_REGS "popl %%edx; popl %%ecx" | |
1391 | #define PV_FLAGS_ARG "0" | |
1392 | #define PV_EXTRA_CLOBBERS | |
1393 | #define PV_VEXTRA_CLOBBERS | |
1394 | #else | |
1395 | /* We save some registers, but all of them, that's too much. We clobber all | |
1396 | * caller saved registers but the argument parameter */ | |
1397 | #define PV_SAVE_REGS "pushq %%rdi;" | |
1398 | #define PV_RESTORE_REGS "popq %%rdi;" | |
1399 | #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx" | |
1400 | #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx" | |
1401 | #define PV_FLAGS_ARG "D" | |
1402 | #endif | |
1403 | ||
139ec7c4 RR |
1404 | static inline unsigned long __raw_local_save_flags(void) |
1405 | { | |
1406 | unsigned long f; | |
1407 | ||
2e47d3e6 | 1408 | asm volatile(paravirt_alt(PV_SAVE_REGS |
d5822035 | 1409 | PARAVIRT_CALL |
2e47d3e6 | 1410 | PV_RESTORE_REGS) |
d5822035 | 1411 | : "=a"(f) |
93b1eab3 | 1412 | : paravirt_type(pv_irq_ops.save_fl), |
42c24fa2 | 1413 | paravirt_clobber(CLBR_EAX) |
2e47d3e6 | 1414 | : "memory", "cc" PV_VEXTRA_CLOBBERS); |
139ec7c4 RR |
1415 | return f; |
1416 | } | |
1417 | ||
1418 | static inline void raw_local_irq_restore(unsigned long f) | |
1419 | { | |
2e47d3e6 | 1420 | asm volatile(paravirt_alt(PV_SAVE_REGS |
d5822035 | 1421 | PARAVIRT_CALL |
2e47d3e6 | 1422 | PV_RESTORE_REGS) |
d5822035 | 1423 | : "=a"(f) |
2e47d3e6 | 1424 | : PV_FLAGS_ARG(f), |
93b1eab3 | 1425 | paravirt_type(pv_irq_ops.restore_fl), |
d5822035 | 1426 | paravirt_clobber(CLBR_EAX) |
2e47d3e6 | 1427 | : "memory", "cc" PV_EXTRA_CLOBBERS); |
139ec7c4 RR |
1428 | } |
1429 | ||
1430 | static inline void raw_local_irq_disable(void) | |
1431 | { | |
2e47d3e6 | 1432 | asm volatile(paravirt_alt(PV_SAVE_REGS |
d5822035 | 1433 | PARAVIRT_CALL |
2e47d3e6 | 1434 | PV_RESTORE_REGS) |
d5822035 | 1435 | : |
93b1eab3 | 1436 | : paravirt_type(pv_irq_ops.irq_disable), |
d5822035 | 1437 | paravirt_clobber(CLBR_EAX) |
2e47d3e6 | 1438 | : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); |
139ec7c4 RR |
1439 | } |
1440 | ||
1441 | static inline void raw_local_irq_enable(void) | |
1442 | { | |
2e47d3e6 | 1443 | asm volatile(paravirt_alt(PV_SAVE_REGS |
d5822035 | 1444 | PARAVIRT_CALL |
2e47d3e6 | 1445 | PV_RESTORE_REGS) |
d5822035 | 1446 | : |
93b1eab3 | 1447 | : paravirt_type(pv_irq_ops.irq_enable), |
d5822035 | 1448 | paravirt_clobber(CLBR_EAX) |
2e47d3e6 | 1449 | : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); |
139ec7c4 RR |
1450 | } |
1451 | ||
1452 | static inline unsigned long __raw_local_irq_save(void) | |
1453 | { | |
1454 | unsigned long f; | |
1455 | ||
d5822035 JF |
1456 | f = __raw_local_save_flags(); |
1457 | raw_local_irq_disable(); | |
139ec7c4 RR |
1458 | return f; |
1459 | } | |
1460 | ||
294688c0 | 1461 | /* Make sure as little as possible of this mess escapes. */ |
d5822035 | 1462 | #undef PARAVIRT_CALL |
1a45b7aa JF |
1463 | #undef __PVOP_CALL |
1464 | #undef __PVOP_VCALL | |
f8822f42 JF |
1465 | #undef PVOP_VCALL0 |
1466 | #undef PVOP_CALL0 | |
1467 | #undef PVOP_VCALL1 | |
1468 | #undef PVOP_CALL1 | |
1469 | #undef PVOP_VCALL2 | |
1470 | #undef PVOP_CALL2 | |
1471 | #undef PVOP_VCALL3 | |
1472 | #undef PVOP_CALL3 | |
1473 | #undef PVOP_VCALL4 | |
1474 | #undef PVOP_CALL4 | |
139ec7c4 | 1475 | |
d3561b7f RR |
1476 | #else /* __ASSEMBLY__ */ |
1477 | ||
658be9d3 | 1478 | #define _PVSITE(ptype, clobbers, ops, word, algn) \ |
139ec7c4 RR |
1479 | 771:; \ |
1480 | ops; \ | |
1481 | 772:; \ | |
1482 | .pushsection .parainstructions,"a"; \ | |
658be9d3 GOC |
1483 | .align algn; \ |
1484 | word 771b; \ | |
139ec7c4 RR |
1485 | .byte ptype; \ |
1486 | .byte 772b-771b; \ | |
1487 | .short clobbers; \ | |
1488 | .popsection | |
1489 | ||
658be9d3 GOC |
1490 | |
1491 | #ifdef CONFIG_X86_64 | |
6057fc82 GOC |
1492 | #define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx |
1493 | #define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax | |
1494 | #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) | |
658be9d3 | 1495 | #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) |
491eccb7 | 1496 | #define PARA_INDIRECT(addr) *addr(%rip) |
658be9d3 | 1497 | #else |
6057fc82 GOC |
1498 | #define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx |
1499 | #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax | |
1500 | #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) | |
658be9d3 | 1501 | #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) |
491eccb7 | 1502 | #define PARA_INDIRECT(addr) *%cs:addr |
658be9d3 GOC |
1503 | #endif |
1504 | ||
93b1eab3 JF |
1505 | #define INTERRUPT_RETURN \ |
1506 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ | |
491eccb7 | 1507 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) |
d5822035 JF |
1508 | |
1509 | #define DISABLE_INTERRUPTS(clobbers) \ | |
93b1eab3 | 1510 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ |
491eccb7 JF |
1511 | PV_SAVE_REGS; \ |
1512 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ | |
6057fc82 | 1513 | PV_RESTORE_REGS;) \ |
d5822035 JF |
1514 | |
1515 | #define ENABLE_INTERRUPTS(clobbers) \ | |
93b1eab3 | 1516 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ |
491eccb7 JF |
1517 | PV_SAVE_REGS; \ |
1518 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ | |
6057fc82 | 1519 | PV_RESTORE_REGS;) |
d5822035 | 1520 | |
2be29982 JF |
1521 | #define USERGS_SYSRET32 \ |
1522 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ | |
6abcd98f | 1523 | CLBR_NONE, \ |
2be29982 | 1524 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32)) |
2e47d3e6 | 1525 | |
6057fc82 | 1526 | #ifdef CONFIG_X86_32 |
491eccb7 JF |
1527 | #define GET_CR0_INTO_EAX \ |
1528 | push %ecx; push %edx; \ | |
1529 | call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ | |
42c24fa2 | 1530 | pop %edx; pop %ecx |
2be29982 JF |
1531 | |
1532 | #define ENABLE_INTERRUPTS_SYSEXIT \ | |
1533 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ | |
1534 | CLBR_NONE, \ | |
1535 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) | |
1536 | ||
1537 | ||
1538 | #else /* !CONFIG_X86_32 */ | |
a00394f8 JF |
1539 | |
1540 | /* | |
1541 | * If swapgs is used while the userspace stack is still current, | |
1542 | * there's no way to call a pvop. The PV replacement *must* be | |
1543 | * inlined, or the swapgs instruction must be trapped and emulated. | |
1544 | */ | |
1545 | #define SWAPGS_UNSAFE_STACK \ | |
1546 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ | |
1547 | swapgs) | |
1548 | ||
e801f864 GOC |
1549 | #define SWAPGS \ |
1550 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ | |
1551 | PV_SAVE_REGS; \ | |
491eccb7 | 1552 | call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \ |
e801f864 GOC |
1553 | PV_RESTORE_REGS \ |
1554 | ) | |
1555 | ||
491eccb7 JF |
1556 | #define GET_CR2_INTO_RCX \ |
1557 | call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \ | |
1558 | movq %rax, %rcx; \ | |
4a8c4c4e GOC |
1559 | xorq %rax, %rax; |
1560 | ||
fab58420 JF |
1561 | #define PARAVIRT_ADJUST_EXCEPTION_FRAME \ |
1562 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \ | |
1563 | CLBR_NONE, \ | |
1564 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame)) | |
1565 | ||
2be29982 JF |
1566 | #define USERGS_SYSRET64 \ |
1567 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ | |
d75cd22f | 1568 | CLBR_NONE, \ |
2be29982 JF |
1569 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) |
1570 | ||
1571 | #define ENABLE_INTERRUPTS_SYSEXIT32 \ | |
1572 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ | |
1573 | CLBR_NONE, \ | |
1574 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) | |
1575 | #endif /* CONFIG_X86_32 */ | |
139ec7c4 | 1576 | |
d3561b7f RR |
1577 | #endif /* __ASSEMBLY__ */ |
1578 | #endif /* CONFIG_PARAVIRT */ | |
1579 | #endif /* __ASM_PARAVIRT_H */ |