Merge branches 'fixes', 'pgt-next' and 'versatile' into devel
[deliverable/linux.git] / arch / x86 / include / asm / paravirt.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PARAVIRT_H
2#define _ASM_X86_PARAVIRT_H
d3561b7f
RR
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
b239fb25
JF
5
6#ifdef CONFIG_PARAVIRT
54321d94 7#include <asm/pgtable_types.h>
658be9d3 8#include <asm/asm.h>
d3561b7f 9
ac5672f8 10#include <asm/paravirt_types.h>
ecb93d1c 11
d3561b7f 12#ifndef __ASSEMBLY__
3dc494e8 13#include <linux/types.h>
d4c10477 14#include <linux/cpumask.h>
1a45b7aa 15
f8822f42
JF
16static inline int paravirt_enabled(void)
17{
93b1eab3 18 return pv_info.paravirt_enabled;
f8822f42 19}
d3561b7f 20
faca6227 21static inline void load_sp0(struct tss_struct *tss,
d3561b7f
RR
22 struct thread_struct *thread)
23{
faca6227 24 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
d3561b7f
RR
25}
26
d3561b7f
RR
27/* The paravirtualized CPUID instruction. */
28static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
29 unsigned int *ecx, unsigned int *edx)
30{
93b1eab3 31 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
32}
33
34/*
35 * These special macros can be used to get or set a debugging register
36 */
f8822f42
JF
37static inline unsigned long paravirt_get_debugreg(int reg)
38{
93b1eab3 39 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
f8822f42
JF
40}
41#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
42static inline void set_debugreg(unsigned long val, int reg)
43{
93b1eab3 44 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
f8822f42 45}
d3561b7f 46
f8822f42
JF
47static inline void clts(void)
48{
93b1eab3 49 PVOP_VCALL0(pv_cpu_ops.clts);
f8822f42 50}
d3561b7f 51
f8822f42
JF
52static inline unsigned long read_cr0(void)
53{
93b1eab3 54 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
f8822f42 55}
d3561b7f 56
f8822f42
JF
57static inline void write_cr0(unsigned long x)
58{
93b1eab3 59 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
f8822f42
JF
60}
61
62static inline unsigned long read_cr2(void)
63{
93b1eab3 64 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
f8822f42
JF
65}
66
67static inline void write_cr2(unsigned long x)
68{
93b1eab3 69 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
f8822f42
JF
70}
71
72static inline unsigned long read_cr3(void)
73{
93b1eab3 74 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
f8822f42 75}
d3561b7f 76
f8822f42
JF
77static inline void write_cr3(unsigned long x)
78{
93b1eab3 79 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
f8822f42 80}
d3561b7f 81
f8822f42
JF
82static inline unsigned long read_cr4(void)
83{
93b1eab3 84 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
f8822f42
JF
85}
86static inline unsigned long read_cr4_safe(void)
87{
93b1eab3 88 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
f8822f42 89}
d3561b7f 90
f8822f42
JF
91static inline void write_cr4(unsigned long x)
92{
93b1eab3 93 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
f8822f42 94}
3dc494e8 95
94ea03cd 96#ifdef CONFIG_X86_64
4c9890c2
GOC
97static inline unsigned long read_cr8(void)
98{
99 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
100}
101
102static inline void write_cr8(unsigned long x)
103{
104 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
105}
94ea03cd 106#endif
4c9890c2 107
df9ee292 108static inline void arch_safe_halt(void)
d3561b7f 109{
93b1eab3 110 PVOP_VCALL0(pv_irq_ops.safe_halt);
d3561b7f
RR
111}
112
113static inline void halt(void)
114{
c8217b83 115 PVOP_VCALL0(pv_irq_ops.halt);
f8822f42
JF
116}
117
118static inline void wbinvd(void)
119{
93b1eab3 120 PVOP_VCALL0(pv_cpu_ops.wbinvd);
d3561b7f 121}
d3561b7f 122
93b1eab3 123#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 124
f8822f42
JF
125static inline u64 paravirt_read_msr(unsigned msr, int *err)
126{
93b1eab3 127 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
f8822f42 128}
132ec92f
BP
129
130static inline int paravirt_rdmsr_regs(u32 *regs)
131{
132 return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
133}
134
f8822f42
JF
135static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
136{
93b1eab3 137 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
f8822f42
JF
138}
139
132ec92f
BP
140static inline int paravirt_wrmsr_regs(u32 *regs)
141{
142 return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
143}
144
90a0a06a 145/* These should all do BUG_ON(_err), but our headers are too tangled. */
49cd740b
JP
146#define rdmsr(msr, val1, val2) \
147do { \
f8822f42
JF
148 int _err; \
149 u64 _l = paravirt_read_msr(msr, &_err); \
150 val1 = (u32)_l; \
151 val2 = _l >> 32; \
49cd740b 152} while (0)
d3561b7f 153
49cd740b
JP
154#define wrmsr(msr, val1, val2) \
155do { \
f8822f42 156 paravirt_write_msr(msr, val1, val2); \
49cd740b 157} while (0)
d3561b7f 158
49cd740b
JP
159#define rdmsrl(msr, val) \
160do { \
f8822f42
JF
161 int _err; \
162 val = paravirt_read_msr(msr, &_err); \
49cd740b 163} while (0)
d3561b7f 164
49cd740b
JP
165#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
166#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
d3561b7f
RR
167
168/* rdmsr with exception handling */
49cd740b
JP
169#define rdmsr_safe(msr, a, b) \
170({ \
f8822f42
JF
171 int _err; \
172 u64 _l = paravirt_read_msr(msr, &_err); \
173 (*a) = (u32)_l; \
174 (*b) = _l >> 32; \
49cd740b
JP
175 _err; \
176})
d3561b7f 177
132ec92f
BP
178#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
179#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
180
1de87bd4
AK
181static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
182{
183 int err;
184
185 *p = paravirt_read_msr(msr, &err);
186 return err;
187}
b05f78f5
YL
188static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
189{
177fed1e 190 u32 gprs[8] = { 0 };
b05f78f5
YL
191 int err;
192
177fed1e
BP
193 gprs[1] = msr;
194 gprs[7] = 0x9c5a203a;
195
196 err = paravirt_rdmsr_regs(gprs);
197
198 *p = gprs[0] | ((u64)gprs[2] << 32);
199
b05f78f5
YL
200 return err;
201}
f8822f42 202
177fed1e
BP
203static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
204{
205 u32 gprs[8] = { 0 };
206
207 gprs[0] = (u32)val;
208 gprs[1] = msr;
209 gprs[2] = val >> 32;
210 gprs[7] = 0x9c5a203a;
211
212 return paravirt_wrmsr_regs(gprs);
213}
214
f8822f42
JF
215static inline u64 paravirt_read_tsc(void)
216{
93b1eab3 217 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
f8822f42 218}
d3561b7f 219
49cd740b
JP
220#define rdtscl(low) \
221do { \
f8822f42
JF
222 u64 _l = paravirt_read_tsc(); \
223 low = (int)_l; \
49cd740b 224} while (0)
d3561b7f 225
f8822f42 226#define rdtscll(val) (val = paravirt_read_tsc())
d3561b7f 227
688340ea
JF
228static inline unsigned long long paravirt_sched_clock(void)
229{
93b1eab3 230 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
688340ea 231}
6cb9a835 232
f8822f42
JF
233static inline unsigned long long paravirt_read_pmc(int counter)
234{
93b1eab3 235 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
f8822f42 236}
d3561b7f 237
49cd740b
JP
238#define rdpmc(counter, low, high) \
239do { \
f8822f42
JF
240 u64 _l = paravirt_read_pmc(counter); \
241 low = (u32)_l; \
242 high = _l >> 32; \
49cd740b 243} while (0)
3dc494e8 244
e5aaac44
GOC
245static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
246{
247 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
248}
249
250#define rdtscp(low, high, aux) \
251do { \
252 int __aux; \
253 unsigned long __val = paravirt_rdtscp(&__aux); \
254 (low) = (u32)__val; \
255 (high) = (u32)(__val >> 32); \
256 (aux) = __aux; \
257} while (0)
258
259#define rdtscpll(val, aux) \
260do { \
261 unsigned long __aux; \
262 val = paravirt_rdtscp(&__aux); \
263 (aux) = __aux; \
264} while (0)
265
38ffbe66
JF
266static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
267{
268 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
269}
270
271static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
272{
273 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
274}
275
f8822f42
JF
276static inline void load_TR_desc(void)
277{
93b1eab3 278 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
f8822f42 279}
6b68f01b 280static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 281{
93b1eab3 282 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
f8822f42 283}
6b68f01b 284static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 285{
93b1eab3 286 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
f8822f42
JF
287}
288static inline void set_ldt(const void *addr, unsigned entries)
289{
93b1eab3 290 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
f8822f42 291}
6b68f01b 292static inline void store_gdt(struct desc_ptr *dtr)
f8822f42 293{
93b1eab3 294 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
f8822f42 295}
6b68f01b 296static inline void store_idt(struct desc_ptr *dtr)
f8822f42 297{
93b1eab3 298 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
f8822f42
JF
299}
300static inline unsigned long paravirt_store_tr(void)
301{
93b1eab3 302 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
f8822f42
JF
303}
304#define store_tr(tr) ((tr) = paravirt_store_tr())
305static inline void load_TLS(struct thread_struct *t, unsigned cpu)
306{
93b1eab3 307 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
f8822f42 308}
75b8bb3e 309
9f9d489a
JF
310#ifdef CONFIG_X86_64
311static inline void load_gs_index(unsigned int gs)
312{
313 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
314}
315#endif
316
75b8bb3e
GOC
317static inline void write_ldt_entry(struct desc_struct *dt, int entry,
318 const void *desc)
f8822f42 319{
75b8bb3e 320 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
f8822f42 321}
014b15be
GOC
322
323static inline void write_gdt_entry(struct desc_struct *dt, int entry,
324 void *desc, int type)
f8822f42 325{
014b15be 326 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
f8822f42 327}
014b15be 328
8d947344 329static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 330{
8d947344 331 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
f8822f42
JF
332}
333static inline void set_iopl_mask(unsigned mask)
334{
93b1eab3 335 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
f8822f42 336}
3dc494e8 337
d3561b7f 338/* The paravirtualized I/O functions */
49cd740b
JP
339static inline void slow_down_io(void)
340{
93b1eab3 341 pv_cpu_ops.io_delay();
d3561b7f 342#ifdef REALLY_SLOW_IO
93b1eab3
JF
343 pv_cpu_ops.io_delay();
344 pv_cpu_ops.io_delay();
345 pv_cpu_ops.io_delay();
d3561b7f
RR
346#endif
347}
348
ae5da273
ZA
349#ifdef CONFIG_SMP
350static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
351 unsigned long start_esp)
352{
93b1eab3
JF
353 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
354 phys_apicid, start_eip, start_esp);
ae5da273
ZA
355}
356#endif
13623d79 357
d6dd61c8
JF
358static inline void paravirt_activate_mm(struct mm_struct *prev,
359 struct mm_struct *next)
360{
93b1eab3 361 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
d6dd61c8
JF
362}
363
364static inline void arch_dup_mmap(struct mm_struct *oldmm,
365 struct mm_struct *mm)
366{
93b1eab3 367 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
d6dd61c8
JF
368}
369
370static inline void arch_exit_mmap(struct mm_struct *mm)
371{
93b1eab3 372 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
d6dd61c8
JF
373}
374
f8822f42
JF
375static inline void __flush_tlb(void)
376{
93b1eab3 377 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
f8822f42
JF
378}
379static inline void __flush_tlb_global(void)
380{
93b1eab3 381 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
f8822f42
JF
382}
383static inline void __flush_tlb_single(unsigned long addr)
384{
93b1eab3 385 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
f8822f42 386}
da181a8b 387
4595f962
RR
388static inline void flush_tlb_others(const struct cpumask *cpumask,
389 struct mm_struct *mm,
d4c10477
JF
390 unsigned long va)
391{
4595f962 392 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
d4c10477
JF
393}
394
eba0045f
JF
395static inline int paravirt_pgd_alloc(struct mm_struct *mm)
396{
397 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
398}
399
400static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
401{
402 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
403}
404
f8639939 405static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
f8822f42 406{
6944a9c8 407 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
f8822f42 408}
f8639939 409static inline void paravirt_release_pte(unsigned long pfn)
f8822f42 410{
6944a9c8 411 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
f8822f42 412}
c119ecce 413
f8639939 414static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
f8822f42 415{
6944a9c8 416 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
f8822f42 417}
c119ecce 418
f8639939 419static inline void paravirt_release_pmd(unsigned long pfn)
da181a8b 420{
6944a9c8 421 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
da181a8b
RR
422}
423
f8639939 424static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2761fa09
JF
425{
426 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
427}
f8639939 428static inline void paravirt_release_pud(unsigned long pfn)
2761fa09
JF
429{
430 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
431}
432
f8822f42
JF
433static inline void pte_update(struct mm_struct *mm, unsigned long addr,
434 pte_t *ptep)
da181a8b 435{
93b1eab3 436 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
da181a8b 437}
331127f7
AA
438static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
439 pmd_t *pmdp)
440{
441 PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
442}
da181a8b 443
f8822f42
JF
444static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
445 pte_t *ptep)
da181a8b 446{
93b1eab3 447 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
da181a8b
RR
448}
449
331127f7
AA
450static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
451 pmd_t *pmdp)
452{
453 PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
454}
455
773221f4 456static inline pte_t __pte(pteval_t val)
da181a8b 457{
773221f4
JF
458 pteval_t ret;
459
460 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
461 ret = PVOP_CALLEE2(pteval_t,
462 pv_mmu_ops.make_pte,
463 val, (u64)val >> 32);
773221f4 464 else
da5de7c2
JF
465 ret = PVOP_CALLEE1(pteval_t,
466 pv_mmu_ops.make_pte,
467 val);
773221f4 468
c8e5393a 469 return (pte_t) { .pte = ret };
da181a8b
RR
470}
471
773221f4
JF
472static inline pteval_t pte_val(pte_t pte)
473{
474 pteval_t ret;
475
476 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
477 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
478 pte.pte, (u64)pte.pte >> 32);
773221f4 479 else
da5de7c2
JF
480 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
481 pte.pte);
773221f4
JF
482
483 return ret;
484}
485
ef38503e 486static inline pgd_t __pgd(pgdval_t val)
da181a8b 487{
ef38503e
JF
488 pgdval_t ret;
489
490 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
491 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
492 val, (u64)val >> 32);
ef38503e 493 else
da5de7c2
JF
494 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
495 val);
ef38503e
JF
496
497 return (pgd_t) { ret };
498}
499
500static inline pgdval_t pgd_val(pgd_t pgd)
501{
502 pgdval_t ret;
503
504 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
505 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
506 pgd.pgd, (u64)pgd.pgd >> 32);
ef38503e 507 else
da5de7c2
JF
508 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
509 pgd.pgd);
ef38503e
JF
510
511 return ret;
f8822f42
JF
512}
513
08b882c6
JF
514#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
515static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
516 pte_t *ptep)
517{
518 pteval_t ret;
519
520 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
521 mm, addr, ptep);
522
523 return (pte_t) { .pte = ret };
524}
525
526static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
527 pte_t *ptep, pte_t pte)
528{
529 if (sizeof(pteval_t) > sizeof(long))
530 /* 5 arg words */
531 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
532 else
533 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
534 mm, addr, ptep, pte.pte);
535}
536
4eed80cd
JF
537static inline void set_pte(pte_t *ptep, pte_t pte)
538{
539 if (sizeof(pteval_t) > sizeof(long))
540 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
541 pte.pte, (u64)pte.pte >> 32);
542 else
543 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
544 pte.pte);
545}
546
547static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
548 pte_t *ptep, pte_t pte)
549{
550 if (sizeof(pteval_t) > sizeof(long))
551 /* 5 arg words */
552 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
553 else
554 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
555}
556
331127f7
AA
557#ifdef CONFIG_TRANSPARENT_HUGEPAGE
558static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
559 pmd_t *pmdp, pmd_t pmd)
560{
331127f7
AA
561 if (sizeof(pmdval_t) > sizeof(long))
562 /* 5 arg words */
563 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
564 else
cacf061c
AA
565 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
566 native_pmd_val(pmd));
331127f7
AA
567}
568#endif
569
60b3f626
JF
570static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
571{
572 pmdval_t val = native_pmd_val(pmd);
573
574 if (sizeof(pmdval_t) > sizeof(long))
575 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
576 else
577 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
578}
579
1fe91514
GOC
580#if PAGETABLE_LEVELS >= 3
581static inline pmd_t __pmd(pmdval_t val)
582{
583 pmdval_t ret;
584
585 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
586 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
587 val, (u64)val >> 32);
1fe91514 588 else
da5de7c2
JF
589 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
590 val);
1fe91514
GOC
591
592 return (pmd_t) { ret };
593}
594
595static inline pmdval_t pmd_val(pmd_t pmd)
596{
597 pmdval_t ret;
598
599 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
600 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
601 pmd.pmd, (u64)pmd.pmd >> 32);
1fe91514 602 else
da5de7c2
JF
603 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
604 pmd.pmd);
1fe91514
GOC
605
606 return ret;
607}
608
609static inline void set_pud(pud_t *pudp, pud_t pud)
610{
611 pudval_t val = native_pud_val(pud);
612
613 if (sizeof(pudval_t) > sizeof(long))
614 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
615 val, (u64)val >> 32);
616 else
617 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
618 val);
619}
9042219c
EH
620#if PAGETABLE_LEVELS == 4
621static inline pud_t __pud(pudval_t val)
622{
623 pudval_t ret;
624
625 if (sizeof(pudval_t) > sizeof(long))
da5de7c2
JF
626 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
627 val, (u64)val >> 32);
9042219c 628 else
da5de7c2
JF
629 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
630 val);
9042219c
EH
631
632 return (pud_t) { ret };
633}
634
635static inline pudval_t pud_val(pud_t pud)
636{
637 pudval_t ret;
638
639 if (sizeof(pudval_t) > sizeof(long))
4767afbf
JF
640 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
641 pud.pud, (u64)pud.pud >> 32);
9042219c 642 else
4767afbf
JF
643 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
644 pud.pud);
9042219c
EH
645
646 return ret;
647}
648
649static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
650{
651 pgdval_t val = native_pgd_val(pgd);
652
653 if (sizeof(pgdval_t) > sizeof(long))
654 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
655 val, (u64)val >> 32);
656 else
657 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
658 val);
659}
660
661static inline void pgd_clear(pgd_t *pgdp)
662{
663 set_pgd(pgdp, __pgd(0));
664}
665
666static inline void pud_clear(pud_t *pudp)
667{
668 set_pud(pudp, __pud(0));
669}
670
671#endif /* PAGETABLE_LEVELS == 4 */
672
1fe91514
GOC
673#endif /* PAGETABLE_LEVELS >= 3 */
674
4eed80cd
JF
675#ifdef CONFIG_X86_PAE
676/* Special-case pte-setting operations for PAE, which can't update a
677 64-bit pte atomically */
678static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
679{
680 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
681 pte.pte, pte.pte >> 32);
682}
683
4eed80cd
JF
684static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
685 pte_t *ptep)
686{
687 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
688}
60b3f626
JF
689
690static inline void pmd_clear(pmd_t *pmdp)
691{
692 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
693}
4eed80cd
JF
694#else /* !CONFIG_X86_PAE */
695static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
696{
697 set_pte(ptep, pte);
698}
699
4eed80cd
JF
700static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
701 pte_t *ptep)
702{
703 set_pte_at(mm, addr, ptep, __pte(0));
704}
60b3f626
JF
705
706static inline void pmd_clear(pmd_t *pmdp)
707{
708 set_pmd(pmdp, __pmd(0));
709}
4eed80cd
JF
710#endif /* CONFIG_X86_PAE */
711
7fd7d83d 712#define __HAVE_ARCH_START_CONTEXT_SWITCH
224101ed 713static inline void arch_start_context_switch(struct task_struct *prev)
f8822f42 714{
224101ed 715 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
f8822f42
JF
716}
717
224101ed 718static inline void arch_end_context_switch(struct task_struct *next)
f8822f42 719{
224101ed 720 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
f8822f42
JF
721}
722
9226d125 723#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
724static inline void arch_enter_lazy_mmu_mode(void)
725{
8965c1c0 726 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
f8822f42
JF
727}
728
729static inline void arch_leave_lazy_mmu_mode(void)
730{
8965c1c0 731 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
f8822f42
JF
732}
733
d85cf93d 734void arch_flush_lazy_mmu_mode(void);
9226d125 735
aeaaa59c 736static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
3b3809ac 737 phys_addr_t phys, pgprot_t flags)
aeaaa59c
JF
738{
739 pv_mmu_ops.set_fixmap(idx, phys, flags);
740}
741
b4ecc126 742#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
4bb689ee 743
0199c4e6 744static inline int arch_spin_is_locked(struct arch_spinlock *lock)
74d4affd
JF
745{
746 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
747}
748
0199c4e6 749static inline int arch_spin_is_contended(struct arch_spinlock *lock)
74d4affd
JF
750{
751 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
752}
0199c4e6 753#define arch_spin_is_contended arch_spin_is_contended
74d4affd 754
0199c4e6 755static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
74d4affd 756{
32172561 757 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
74d4affd
JF
758}
759
0199c4e6 760static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
63d3a75d
JF
761 unsigned long flags)
762{
763 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
764}
765
0199c4e6 766static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
74d4affd
JF
767{
768 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
769}
770
0199c4e6 771static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
74d4affd 772{
32172561 773 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
74d4affd
JF
774}
775
4bb689ee
IM
776#endif
777
2e47d3e6 778#ifdef CONFIG_X86_32
ecb93d1c
JF
779#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
780#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
781
782/* save and restore all caller-save registers, except return value */
e584f559
JF
783#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
784#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
ecb93d1c 785
2e47d3e6
GOC
786#define PV_FLAGS_ARG "0"
787#define PV_EXTRA_CLOBBERS
788#define PV_VEXTRA_CLOBBERS
789#else
ecb93d1c
JF
790/* save and restore all caller-save registers, except return value */
791#define PV_SAVE_ALL_CALLER_REGS \
792 "push %rcx;" \
793 "push %rdx;" \
794 "push %rsi;" \
795 "push %rdi;" \
796 "push %r8;" \
797 "push %r9;" \
798 "push %r10;" \
799 "push %r11;"
800#define PV_RESTORE_ALL_CALLER_REGS \
801 "pop %r11;" \
802 "pop %r10;" \
803 "pop %r9;" \
804 "pop %r8;" \
805 "pop %rdi;" \
806 "pop %rsi;" \
807 "pop %rdx;" \
808 "pop %rcx;"
809
2e47d3e6
GOC
810/* We save some registers, but all of them, that's too much. We clobber all
811 * caller saved registers but the argument parameter */
812#define PV_SAVE_REGS "pushq %%rdi;"
813#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
814#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
815#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
816#define PV_FLAGS_ARG "D"
817#endif
818
ecb93d1c
JF
819/*
820 * Generate a thunk around a function which saves all caller-save
821 * registers except for the return value. This allows C functions to
822 * be called from assembler code where fewer than normal registers are
823 * available. It may also help code generation around calls from C
824 * code if the common case doesn't use many registers.
825 *
826 * When a callee is wrapped in a thunk, the caller can assume that all
827 * arg regs and all scratch registers are preserved across the
828 * call. The return value in rax/eax will not be saved, even for void
829 * functions.
830 */
831#define PV_CALLEE_SAVE_REGS_THUNK(func) \
832 extern typeof(func) __raw_callee_save_##func; \
833 static void *__##func##__ __used = func; \
834 \
835 asm(".pushsection .text;" \
836 "__raw_callee_save_" #func ": " \
837 PV_SAVE_ALL_CALLER_REGS \
838 "call " #func ";" \
839 PV_RESTORE_ALL_CALLER_REGS \
840 "ret;" \
841 ".popsection")
842
843/* Get a reference to a callee-save function */
844#define PV_CALLEE_SAVE(func) \
845 ((struct paravirt_callee_save) { __raw_callee_save_##func })
846
847/* Promise that "func" already uses the right calling convention */
848#define __PV_IS_CALLEE_SAVE(func) \
849 ((struct paravirt_callee_save) { func })
850
b5908548 851static inline notrace unsigned long arch_local_save_flags(void)
139ec7c4 852{
71999d98 853 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
139ec7c4
RR
854}
855
b5908548 856static inline notrace void arch_local_irq_restore(unsigned long f)
139ec7c4 857{
71999d98 858 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
139ec7c4
RR
859}
860
b5908548 861static inline notrace void arch_local_irq_disable(void)
139ec7c4 862{
71999d98 863 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
139ec7c4
RR
864}
865
b5908548 866static inline notrace void arch_local_irq_enable(void)
139ec7c4 867{
71999d98 868 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
139ec7c4
RR
869}
870
b5908548 871static inline notrace unsigned long arch_local_irq_save(void)
139ec7c4
RR
872{
873 unsigned long f;
874
df9ee292
DH
875 f = arch_local_save_flags();
876 arch_local_irq_disable();
139ec7c4
RR
877 return f;
878}
879
74d4affd 880
294688c0 881/* Make sure as little as possible of this mess escapes. */
d5822035 882#undef PARAVIRT_CALL
1a45b7aa
JF
883#undef __PVOP_CALL
884#undef __PVOP_VCALL
f8822f42
JF
885#undef PVOP_VCALL0
886#undef PVOP_CALL0
887#undef PVOP_VCALL1
888#undef PVOP_CALL1
889#undef PVOP_VCALL2
890#undef PVOP_CALL2
891#undef PVOP_VCALL3
892#undef PVOP_CALL3
893#undef PVOP_VCALL4
894#undef PVOP_CALL4
139ec7c4 895
6f30c1ac
TG
896extern void default_banner(void);
897
d3561b7f
RR
898#else /* __ASSEMBLY__ */
899
658be9d3 900#define _PVSITE(ptype, clobbers, ops, word, algn) \
139ec7c4
RR
901771:; \
902 ops; \
903772:; \
904 .pushsection .parainstructions,"a"; \
658be9d3
GOC
905 .align algn; \
906 word 771b; \
139ec7c4
RR
907 .byte ptype; \
908 .byte 772b-771b; \
909 .short clobbers; \
910 .popsection
911
658be9d3 912
9104a18d 913#define COND_PUSH(set, mask, reg) \
ecb93d1c 914 .if ((~(set)) & mask); push %reg; .endif
9104a18d 915#define COND_POP(set, mask, reg) \
ecb93d1c 916 .if ((~(set)) & mask); pop %reg; .endif
9104a18d 917
658be9d3 918#ifdef CONFIG_X86_64
9104a18d
JF
919
920#define PV_SAVE_REGS(set) \
921 COND_PUSH(set, CLBR_RAX, rax); \
922 COND_PUSH(set, CLBR_RCX, rcx); \
923 COND_PUSH(set, CLBR_RDX, rdx); \
924 COND_PUSH(set, CLBR_RSI, rsi); \
925 COND_PUSH(set, CLBR_RDI, rdi); \
926 COND_PUSH(set, CLBR_R8, r8); \
927 COND_PUSH(set, CLBR_R9, r9); \
928 COND_PUSH(set, CLBR_R10, r10); \
929 COND_PUSH(set, CLBR_R11, r11)
930#define PV_RESTORE_REGS(set) \
931 COND_POP(set, CLBR_R11, r11); \
932 COND_POP(set, CLBR_R10, r10); \
933 COND_POP(set, CLBR_R9, r9); \
934 COND_POP(set, CLBR_R8, r8); \
935 COND_POP(set, CLBR_RDI, rdi); \
936 COND_POP(set, CLBR_RSI, rsi); \
937 COND_POP(set, CLBR_RDX, rdx); \
938 COND_POP(set, CLBR_RCX, rcx); \
939 COND_POP(set, CLBR_RAX, rax)
940
6057fc82 941#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
658be9d3 942#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
491eccb7 943#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 944#else
9104a18d
JF
945#define PV_SAVE_REGS(set) \
946 COND_PUSH(set, CLBR_EAX, eax); \
947 COND_PUSH(set, CLBR_EDI, edi); \
948 COND_PUSH(set, CLBR_ECX, ecx); \
949 COND_PUSH(set, CLBR_EDX, edx)
950#define PV_RESTORE_REGS(set) \
951 COND_POP(set, CLBR_EDX, edx); \
952 COND_POP(set, CLBR_ECX, ecx); \
953 COND_POP(set, CLBR_EDI, edi); \
954 COND_POP(set, CLBR_EAX, eax)
955
6057fc82 956#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
658be9d3 957#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
491eccb7 958#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
959#endif
960
93b1eab3
JF
961#define INTERRUPT_RETURN \
962 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
491eccb7 963 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
d5822035
JF
964
965#define DISABLE_INTERRUPTS(clobbers) \
93b1eab3 966 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
ecb93d1c 967 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 968 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
ecb93d1c 969 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035
JF
970
971#define ENABLE_INTERRUPTS(clobbers) \
93b1eab3 972 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
ecb93d1c 973 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 974 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
ecb93d1c 975 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035 976
2be29982
JF
977#define USERGS_SYSRET32 \
978 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
6abcd98f 979 CLBR_NONE, \
2be29982 980 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
2e47d3e6 981
6057fc82 982#ifdef CONFIG_X86_32
491eccb7
JF
983#define GET_CR0_INTO_EAX \
984 push %ecx; push %edx; \
985 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
42c24fa2 986 pop %edx; pop %ecx
2be29982
JF
987
988#define ENABLE_INTERRUPTS_SYSEXIT \
989 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
990 CLBR_NONE, \
991 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
992
993
994#else /* !CONFIG_X86_32 */
a00394f8
JF
995
996/*
997 * If swapgs is used while the userspace stack is still current,
998 * there's no way to call a pvop. The PV replacement *must* be
999 * inlined, or the swapgs instruction must be trapped and emulated.
1000 */
1001#define SWAPGS_UNSAFE_STACK \
1002 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1003 swapgs)
1004
9104a18d
JF
1005/*
1006 * Note: swapgs is very special, and in practise is either going to be
1007 * implemented with a single "swapgs" instruction or something very
1008 * special. Either way, we don't need to save any registers for
1009 * it.
1010 */
e801f864
GOC
1011#define SWAPGS \
1012 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
9104a18d 1013 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
e801f864
GOC
1014 )
1015
491eccb7
JF
1016#define GET_CR2_INTO_RCX \
1017 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1018 movq %rax, %rcx; \
4a8c4c4e
GOC
1019 xorq %rax, %rax;
1020
fab58420
JF
1021#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1022 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1023 CLBR_NONE, \
1024 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1025
2be29982
JF
1026#define USERGS_SYSRET64 \
1027 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
d75cd22f 1028 CLBR_NONE, \
2be29982
JF
1029 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1030
1031#define ENABLE_INTERRUPTS_SYSEXIT32 \
1032 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1033 CLBR_NONE, \
1034 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1035#endif /* CONFIG_X86_32 */
139ec7c4 1036
d3561b7f 1037#endif /* __ASSEMBLY__ */
6f30c1ac
TG
1038#else /* CONFIG_PARAVIRT */
1039# define default_banner x86_init_noop
1040#endif /* !CONFIG_PARAVIRT */
1965aae3 1041#endif /* _ASM_X86_PARAVIRT_H */
This page took 0.582787 seconds and 5 git commands to generate.