Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / arch / x86 / include / asm / paravirt.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PARAVIRT_H
2#define _ASM_X86_PARAVIRT_H
d3561b7f
RR
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
b239fb25
JF
5
6#ifdef CONFIG_PARAVIRT
54321d94 7#include <asm/pgtable_types.h>
658be9d3 8#include <asm/asm.h>
d3561b7f 9
ac5672f8 10#include <asm/paravirt_types.h>
ecb93d1c 11
d3561b7f 12#ifndef __ASSEMBLY__
3dc494e8 13#include <linux/types.h>
d4c10477 14#include <linux/cpumask.h>
1a45b7aa 15
f8822f42
JF
16static inline int paravirt_enabled(void)
17{
93b1eab3 18 return pv_info.paravirt_enabled;
f8822f42 19}
d3561b7f 20
faca6227 21static inline void load_sp0(struct tss_struct *tss,
d3561b7f
RR
22 struct thread_struct *thread)
23{
faca6227 24 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
d3561b7f
RR
25}
26
d3561b7f
RR
27/* The paravirtualized CPUID instruction. */
28static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
29 unsigned int *ecx, unsigned int *edx)
30{
93b1eab3 31 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
32}
33
34/*
35 * These special macros can be used to get or set a debugging register
36 */
f8822f42
JF
37static inline unsigned long paravirt_get_debugreg(int reg)
38{
93b1eab3 39 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
f8822f42
JF
40}
41#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
42static inline void set_debugreg(unsigned long val, int reg)
43{
93b1eab3 44 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
f8822f42 45}
d3561b7f 46
f8822f42
JF
47static inline void clts(void)
48{
93b1eab3 49 PVOP_VCALL0(pv_cpu_ops.clts);
f8822f42 50}
d3561b7f 51
f8822f42
JF
52static inline unsigned long read_cr0(void)
53{
93b1eab3 54 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
f8822f42 55}
d3561b7f 56
f8822f42
JF
57static inline void write_cr0(unsigned long x)
58{
93b1eab3 59 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
f8822f42
JF
60}
61
62static inline unsigned long read_cr2(void)
63{
93b1eab3 64 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
f8822f42
JF
65}
66
67static inline void write_cr2(unsigned long x)
68{
93b1eab3 69 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
f8822f42
JF
70}
71
72static inline unsigned long read_cr3(void)
73{
93b1eab3 74 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
f8822f42 75}
d3561b7f 76
f8822f42
JF
77static inline void write_cr3(unsigned long x)
78{
93b1eab3 79 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
f8822f42 80}
d3561b7f 81
f8822f42
JF
82static inline unsigned long read_cr4(void)
83{
93b1eab3 84 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
f8822f42
JF
85}
86static inline unsigned long read_cr4_safe(void)
87{
93b1eab3 88 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
f8822f42 89}
d3561b7f 90
f8822f42
JF
91static inline void write_cr4(unsigned long x)
92{
93b1eab3 93 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
f8822f42 94}
3dc494e8 95
94ea03cd 96#ifdef CONFIG_X86_64
4c9890c2
GOC
97static inline unsigned long read_cr8(void)
98{
99 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
100}
101
102static inline void write_cr8(unsigned long x)
103{
104 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
105}
94ea03cd 106#endif
4c9890c2 107
d3561b7f
RR
108static inline void raw_safe_halt(void)
109{
93b1eab3 110 PVOP_VCALL0(pv_irq_ops.safe_halt);
d3561b7f
RR
111}
112
113static inline void halt(void)
114{
93b1eab3 115 PVOP_VCALL0(pv_irq_ops.safe_halt);
f8822f42
JF
116}
117
118static inline void wbinvd(void)
119{
93b1eab3 120 PVOP_VCALL0(pv_cpu_ops.wbinvd);
d3561b7f 121}
d3561b7f 122
93b1eab3 123#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 124
f8822f42
JF
125static inline u64 paravirt_read_msr(unsigned msr, int *err)
126{
93b1eab3 127 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
f8822f42 128}
132ec92f
BP
129
130static inline int paravirt_rdmsr_regs(u32 *regs)
131{
132 return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
133}
134
f8822f42
JF
135static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
136{
93b1eab3 137 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
f8822f42
JF
138}
139
132ec92f
BP
140static inline int paravirt_wrmsr_regs(u32 *regs)
141{
142 return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
143}
144
90a0a06a 145/* These should all do BUG_ON(_err), but our headers are too tangled. */
49cd740b
JP
146#define rdmsr(msr, val1, val2) \
147do { \
f8822f42
JF
148 int _err; \
149 u64 _l = paravirt_read_msr(msr, &_err); \
150 val1 = (u32)_l; \
151 val2 = _l >> 32; \
49cd740b 152} while (0)
d3561b7f 153
49cd740b
JP
154#define wrmsr(msr, val1, val2) \
155do { \
f8822f42 156 paravirt_write_msr(msr, val1, val2); \
49cd740b 157} while (0)
d3561b7f 158
49cd740b
JP
159#define rdmsrl(msr, val) \
160do { \
f8822f42
JF
161 int _err; \
162 val = paravirt_read_msr(msr, &_err); \
49cd740b 163} while (0)
d3561b7f 164
49cd740b
JP
165#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
166#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
d3561b7f
RR
167
168/* rdmsr with exception handling */
49cd740b
JP
169#define rdmsr_safe(msr, a, b) \
170({ \
f8822f42
JF
171 int _err; \
172 u64 _l = paravirt_read_msr(msr, &_err); \
173 (*a) = (u32)_l; \
174 (*b) = _l >> 32; \
49cd740b
JP
175 _err; \
176})
d3561b7f 177
132ec92f
BP
178#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
179#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
180
1de87bd4
AK
181static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
182{
183 int err;
184
185 *p = paravirt_read_msr(msr, &err);
186 return err;
187}
b05f78f5
YL
188static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
189{
177fed1e 190 u32 gprs[8] = { 0 };
b05f78f5
YL
191 int err;
192
177fed1e
BP
193 gprs[1] = msr;
194 gprs[7] = 0x9c5a203a;
195
196 err = paravirt_rdmsr_regs(gprs);
197
198 *p = gprs[0] | ((u64)gprs[2] << 32);
199
b05f78f5
YL
200 return err;
201}
f8822f42 202
177fed1e
BP
203static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
204{
205 u32 gprs[8] = { 0 };
206
207 gprs[0] = (u32)val;
208 gprs[1] = msr;
209 gprs[2] = val >> 32;
210 gprs[7] = 0x9c5a203a;
211
212 return paravirt_wrmsr_regs(gprs);
213}
214
f8822f42
JF
215static inline u64 paravirt_read_tsc(void)
216{
93b1eab3 217 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
f8822f42 218}
d3561b7f 219
49cd740b
JP
220#define rdtscl(low) \
221do { \
f8822f42
JF
222 u64 _l = paravirt_read_tsc(); \
223 low = (int)_l; \
49cd740b 224} while (0)
d3561b7f 225
f8822f42 226#define rdtscll(val) (val = paravirt_read_tsc())
d3561b7f 227
688340ea
JF
228static inline unsigned long long paravirt_sched_clock(void)
229{
93b1eab3 230 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
688340ea 231}
6cb9a835 232
f8822f42
JF
233static inline unsigned long long paravirt_read_pmc(int counter)
234{
93b1eab3 235 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
f8822f42 236}
d3561b7f 237
49cd740b
JP
238#define rdpmc(counter, low, high) \
239do { \
f8822f42
JF
240 u64 _l = paravirt_read_pmc(counter); \
241 low = (u32)_l; \
242 high = _l >> 32; \
49cd740b 243} while (0)
3dc494e8 244
e5aaac44
GOC
245static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
246{
247 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
248}
249
250#define rdtscp(low, high, aux) \
251do { \
252 int __aux; \
253 unsigned long __val = paravirt_rdtscp(&__aux); \
254 (low) = (u32)__val; \
255 (high) = (u32)(__val >> 32); \
256 (aux) = __aux; \
257} while (0)
258
259#define rdtscpll(val, aux) \
260do { \
261 unsigned long __aux; \
262 val = paravirt_rdtscp(&__aux); \
263 (aux) = __aux; \
264} while (0)
265
38ffbe66
JF
266static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
267{
268 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
269}
270
271static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
272{
273 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
274}
275
f8822f42
JF
276static inline void load_TR_desc(void)
277{
93b1eab3 278 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
f8822f42 279}
6b68f01b 280static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 281{
93b1eab3 282 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
f8822f42 283}
6b68f01b 284static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 285{
93b1eab3 286 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
f8822f42
JF
287}
288static inline void set_ldt(const void *addr, unsigned entries)
289{
93b1eab3 290 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
f8822f42 291}
6b68f01b 292static inline void store_gdt(struct desc_ptr *dtr)
f8822f42 293{
93b1eab3 294 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
f8822f42 295}
6b68f01b 296static inline void store_idt(struct desc_ptr *dtr)
f8822f42 297{
93b1eab3 298 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
f8822f42
JF
299}
300static inline unsigned long paravirt_store_tr(void)
301{
93b1eab3 302 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
f8822f42
JF
303}
304#define store_tr(tr) ((tr) = paravirt_store_tr())
305static inline void load_TLS(struct thread_struct *t, unsigned cpu)
306{
93b1eab3 307 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
f8822f42 308}
75b8bb3e 309
9f9d489a
JF
310#ifdef CONFIG_X86_64
311static inline void load_gs_index(unsigned int gs)
312{
313 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
314}
315#endif
316
75b8bb3e
GOC
317static inline void write_ldt_entry(struct desc_struct *dt, int entry,
318 const void *desc)
f8822f42 319{
75b8bb3e 320 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
f8822f42 321}
014b15be
GOC
322
323static inline void write_gdt_entry(struct desc_struct *dt, int entry,
324 void *desc, int type)
f8822f42 325{
014b15be 326 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
f8822f42 327}
014b15be 328
8d947344 329static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 330{
8d947344 331 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
f8822f42
JF
332}
333static inline void set_iopl_mask(unsigned mask)
334{
93b1eab3 335 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
f8822f42 336}
3dc494e8 337
d3561b7f 338/* The paravirtualized I/O functions */
49cd740b
JP
339static inline void slow_down_io(void)
340{
93b1eab3 341 pv_cpu_ops.io_delay();
d3561b7f 342#ifdef REALLY_SLOW_IO
93b1eab3
JF
343 pv_cpu_ops.io_delay();
344 pv_cpu_ops.io_delay();
345 pv_cpu_ops.io_delay();
d3561b7f
RR
346#endif
347}
348
ae5da273
ZA
349#ifdef CONFIG_SMP
350static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
351 unsigned long start_esp)
352{
93b1eab3
JF
353 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
354 phys_apicid, start_eip, start_esp);
ae5da273
ZA
355}
356#endif
13623d79 357
d6dd61c8
JF
358static inline void paravirt_activate_mm(struct mm_struct *prev,
359 struct mm_struct *next)
360{
93b1eab3 361 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
d6dd61c8
JF
362}
363
364static inline void arch_dup_mmap(struct mm_struct *oldmm,
365 struct mm_struct *mm)
366{
93b1eab3 367 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
d6dd61c8
JF
368}
369
370static inline void arch_exit_mmap(struct mm_struct *mm)
371{
93b1eab3 372 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
d6dd61c8
JF
373}
374
f8822f42
JF
375static inline void __flush_tlb(void)
376{
93b1eab3 377 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
f8822f42
JF
378}
379static inline void __flush_tlb_global(void)
380{
93b1eab3 381 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
f8822f42
JF
382}
383static inline void __flush_tlb_single(unsigned long addr)
384{
93b1eab3 385 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
f8822f42 386}
da181a8b 387
4595f962
RR
388static inline void flush_tlb_others(const struct cpumask *cpumask,
389 struct mm_struct *mm,
d4c10477
JF
390 unsigned long va)
391{
4595f962 392 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
d4c10477
JF
393}
394
eba0045f
JF
395static inline int paravirt_pgd_alloc(struct mm_struct *mm)
396{
397 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
398}
399
400static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
401{
402 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
403}
404
f8639939 405static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
f8822f42 406{
6944a9c8 407 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
f8822f42 408}
f8639939 409static inline void paravirt_release_pte(unsigned long pfn)
f8822f42 410{
6944a9c8 411 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
f8822f42 412}
c119ecce 413
f8639939 414static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
f8822f42 415{
6944a9c8 416 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
f8822f42 417}
c119ecce 418
f8639939
EH
419static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
420 unsigned long start, unsigned long count)
f8822f42 421{
6944a9c8 422 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
f8822f42 423}
f8639939 424static inline void paravirt_release_pmd(unsigned long pfn)
da181a8b 425{
6944a9c8 426 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
da181a8b
RR
427}
428
f8639939 429static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2761fa09
JF
430{
431 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
432}
f8639939 433static inline void paravirt_release_pud(unsigned long pfn)
2761fa09
JF
434{
435 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
436}
437
f8822f42
JF
438static inline void pte_update(struct mm_struct *mm, unsigned long addr,
439 pte_t *ptep)
da181a8b 440{
93b1eab3 441 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
da181a8b
RR
442}
443
f8822f42
JF
444static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
445 pte_t *ptep)
da181a8b 446{
93b1eab3 447 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
da181a8b
RR
448}
449
773221f4 450static inline pte_t __pte(pteval_t val)
da181a8b 451{
773221f4
JF
452 pteval_t ret;
453
454 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
455 ret = PVOP_CALLEE2(pteval_t,
456 pv_mmu_ops.make_pte,
457 val, (u64)val >> 32);
773221f4 458 else
da5de7c2
JF
459 ret = PVOP_CALLEE1(pteval_t,
460 pv_mmu_ops.make_pte,
461 val);
773221f4 462
c8e5393a 463 return (pte_t) { .pte = ret };
da181a8b
RR
464}
465
773221f4
JF
466static inline pteval_t pte_val(pte_t pte)
467{
468 pteval_t ret;
469
470 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
471 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
472 pte.pte, (u64)pte.pte >> 32);
773221f4 473 else
da5de7c2
JF
474 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
475 pte.pte);
773221f4
JF
476
477 return ret;
478}
479
ef38503e 480static inline pgd_t __pgd(pgdval_t val)
da181a8b 481{
ef38503e
JF
482 pgdval_t ret;
483
484 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
485 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
486 val, (u64)val >> 32);
ef38503e 487 else
da5de7c2
JF
488 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
489 val);
ef38503e
JF
490
491 return (pgd_t) { ret };
492}
493
494static inline pgdval_t pgd_val(pgd_t pgd)
495{
496 pgdval_t ret;
497
498 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
499 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
500 pgd.pgd, (u64)pgd.pgd >> 32);
ef38503e 501 else
da5de7c2
JF
502 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
503 pgd.pgd);
ef38503e
JF
504
505 return ret;
f8822f42
JF
506}
507
08b882c6
JF
508#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
509static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
510 pte_t *ptep)
511{
512 pteval_t ret;
513
514 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
515 mm, addr, ptep);
516
517 return (pte_t) { .pte = ret };
518}
519
520static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
521 pte_t *ptep, pte_t pte)
522{
523 if (sizeof(pteval_t) > sizeof(long))
524 /* 5 arg words */
525 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
526 else
527 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
528 mm, addr, ptep, pte.pte);
529}
530
4eed80cd
JF
531static inline void set_pte(pte_t *ptep, pte_t pte)
532{
533 if (sizeof(pteval_t) > sizeof(long))
534 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
535 pte.pte, (u64)pte.pte >> 32);
536 else
537 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
538 pte.pte);
539}
540
541static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
542 pte_t *ptep, pte_t pte)
543{
544 if (sizeof(pteval_t) > sizeof(long))
545 /* 5 arg words */
546 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
547 else
548 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
549}
550
60b3f626
JF
551static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
552{
553 pmdval_t val = native_pmd_val(pmd);
554
555 if (sizeof(pmdval_t) > sizeof(long))
556 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
557 else
558 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
559}
560
1fe91514
GOC
561#if PAGETABLE_LEVELS >= 3
562static inline pmd_t __pmd(pmdval_t val)
563{
564 pmdval_t ret;
565
566 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
567 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
568 val, (u64)val >> 32);
1fe91514 569 else
da5de7c2
JF
570 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
571 val);
1fe91514
GOC
572
573 return (pmd_t) { ret };
574}
575
576static inline pmdval_t pmd_val(pmd_t pmd)
577{
578 pmdval_t ret;
579
580 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
581 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
582 pmd.pmd, (u64)pmd.pmd >> 32);
1fe91514 583 else
da5de7c2
JF
584 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
585 pmd.pmd);
1fe91514
GOC
586
587 return ret;
588}
589
590static inline void set_pud(pud_t *pudp, pud_t pud)
591{
592 pudval_t val = native_pud_val(pud);
593
594 if (sizeof(pudval_t) > sizeof(long))
595 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
596 val, (u64)val >> 32);
597 else
598 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
599 val);
600}
9042219c
EH
601#if PAGETABLE_LEVELS == 4
602static inline pud_t __pud(pudval_t val)
603{
604 pudval_t ret;
605
606 if (sizeof(pudval_t) > sizeof(long))
da5de7c2
JF
607 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
608 val, (u64)val >> 32);
9042219c 609 else
da5de7c2
JF
610 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
611 val);
9042219c
EH
612
613 return (pud_t) { ret };
614}
615
616static inline pudval_t pud_val(pud_t pud)
617{
618 pudval_t ret;
619
620 if (sizeof(pudval_t) > sizeof(long))
4767afbf
JF
621 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
622 pud.pud, (u64)pud.pud >> 32);
9042219c 623 else
4767afbf
JF
624 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
625 pud.pud);
9042219c
EH
626
627 return ret;
628}
629
630static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
631{
632 pgdval_t val = native_pgd_val(pgd);
633
634 if (sizeof(pgdval_t) > sizeof(long))
635 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
636 val, (u64)val >> 32);
637 else
638 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
639 val);
640}
641
642static inline void pgd_clear(pgd_t *pgdp)
643{
644 set_pgd(pgdp, __pgd(0));
645}
646
647static inline void pud_clear(pud_t *pudp)
648{
649 set_pud(pudp, __pud(0));
650}
651
652#endif /* PAGETABLE_LEVELS == 4 */
653
1fe91514
GOC
654#endif /* PAGETABLE_LEVELS >= 3 */
655
4eed80cd
JF
656#ifdef CONFIG_X86_PAE
657/* Special-case pte-setting operations for PAE, which can't update a
658 64-bit pte atomically */
659static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
660{
661 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
662 pte.pte, pte.pte >> 32);
663}
664
4eed80cd
JF
665static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
666 pte_t *ptep)
667{
668 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
669}
60b3f626
JF
670
671static inline void pmd_clear(pmd_t *pmdp)
672{
673 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
674}
4eed80cd
JF
675#else /* !CONFIG_X86_PAE */
676static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
677{
678 set_pte(ptep, pte);
679}
680
4eed80cd
JF
681static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
682 pte_t *ptep)
683{
684 set_pte_at(mm, addr, ptep, __pte(0));
685}
60b3f626
JF
686
687static inline void pmd_clear(pmd_t *pmdp)
688{
689 set_pmd(pmdp, __pmd(0));
690}
4eed80cd
JF
691#endif /* CONFIG_X86_PAE */
692
7fd7d83d 693#define __HAVE_ARCH_START_CONTEXT_SWITCH
224101ed 694static inline void arch_start_context_switch(struct task_struct *prev)
f8822f42 695{
224101ed 696 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
f8822f42
JF
697}
698
224101ed 699static inline void arch_end_context_switch(struct task_struct *next)
f8822f42 700{
224101ed 701 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
f8822f42
JF
702}
703
9226d125 704#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
705static inline void arch_enter_lazy_mmu_mode(void)
706{
8965c1c0 707 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
f8822f42
JF
708}
709
710static inline void arch_leave_lazy_mmu_mode(void)
711{
8965c1c0 712 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
f8822f42
JF
713}
714
d85cf93d 715void arch_flush_lazy_mmu_mode(void);
9226d125 716
aeaaa59c 717static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
3b3809ac 718 phys_addr_t phys, pgprot_t flags)
aeaaa59c
JF
719{
720 pv_mmu_ops.set_fixmap(idx, phys, flags);
721}
722
b4ecc126 723#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
4bb689ee 724
0199c4e6 725static inline int arch_spin_is_locked(struct arch_spinlock *lock)
74d4affd
JF
726{
727 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
728}
729
0199c4e6 730static inline int arch_spin_is_contended(struct arch_spinlock *lock)
74d4affd
JF
731{
732 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
733}
0199c4e6 734#define arch_spin_is_contended arch_spin_is_contended
74d4affd 735
0199c4e6 736static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
74d4affd 737{
32172561 738 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
74d4affd
JF
739}
740
0199c4e6 741static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
63d3a75d
JF
742 unsigned long flags)
743{
744 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
745}
746
0199c4e6 747static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
74d4affd
JF
748{
749 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
750}
751
0199c4e6 752static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
74d4affd 753{
32172561 754 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
74d4affd
JF
755}
756
4bb689ee
IM
757#endif
758
2e47d3e6 759#ifdef CONFIG_X86_32
ecb93d1c
JF
760#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
761#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
762
763/* save and restore all caller-save registers, except return value */
e584f559
JF
764#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
765#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
ecb93d1c 766
2e47d3e6
GOC
767#define PV_FLAGS_ARG "0"
768#define PV_EXTRA_CLOBBERS
769#define PV_VEXTRA_CLOBBERS
770#else
ecb93d1c
JF
771/* save and restore all caller-save registers, except return value */
772#define PV_SAVE_ALL_CALLER_REGS \
773 "push %rcx;" \
774 "push %rdx;" \
775 "push %rsi;" \
776 "push %rdi;" \
777 "push %r8;" \
778 "push %r9;" \
779 "push %r10;" \
780 "push %r11;"
781#define PV_RESTORE_ALL_CALLER_REGS \
782 "pop %r11;" \
783 "pop %r10;" \
784 "pop %r9;" \
785 "pop %r8;" \
786 "pop %rdi;" \
787 "pop %rsi;" \
788 "pop %rdx;" \
789 "pop %rcx;"
790
2e47d3e6
GOC
791/* We save some registers, but all of them, that's too much. We clobber all
792 * caller saved registers but the argument parameter */
793#define PV_SAVE_REGS "pushq %%rdi;"
794#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
795#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
796#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
797#define PV_FLAGS_ARG "D"
798#endif
799
ecb93d1c
JF
800/*
801 * Generate a thunk around a function which saves all caller-save
802 * registers except for the return value. This allows C functions to
803 * be called from assembler code where fewer than normal registers are
804 * available. It may also help code generation around calls from C
805 * code if the common case doesn't use many registers.
806 *
807 * When a callee is wrapped in a thunk, the caller can assume that all
808 * arg regs and all scratch registers are preserved across the
809 * call. The return value in rax/eax will not be saved, even for void
810 * functions.
811 */
812#define PV_CALLEE_SAVE_REGS_THUNK(func) \
813 extern typeof(func) __raw_callee_save_##func; \
814 static void *__##func##__ __used = func; \
815 \
816 asm(".pushsection .text;" \
817 "__raw_callee_save_" #func ": " \
818 PV_SAVE_ALL_CALLER_REGS \
819 "call " #func ";" \
820 PV_RESTORE_ALL_CALLER_REGS \
821 "ret;" \
822 ".popsection")
823
824/* Get a reference to a callee-save function */
825#define PV_CALLEE_SAVE(func) \
826 ((struct paravirt_callee_save) { __raw_callee_save_##func })
827
828/* Promise that "func" already uses the right calling convention */
829#define __PV_IS_CALLEE_SAVE(func) \
830 ((struct paravirt_callee_save) { func })
831
139ec7c4
RR
832static inline unsigned long __raw_local_save_flags(void)
833{
71999d98 834 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
139ec7c4
RR
835}
836
837static inline void raw_local_irq_restore(unsigned long f)
838{
71999d98 839 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
139ec7c4
RR
840}
841
842static inline void raw_local_irq_disable(void)
843{
71999d98 844 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
139ec7c4
RR
845}
846
847static inline void raw_local_irq_enable(void)
848{
71999d98 849 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
139ec7c4
RR
850}
851
852static inline unsigned long __raw_local_irq_save(void)
853{
854 unsigned long f;
855
d5822035
JF
856 f = __raw_local_save_flags();
857 raw_local_irq_disable();
139ec7c4
RR
858 return f;
859}
860
74d4affd 861
294688c0 862/* Make sure as little as possible of this mess escapes. */
d5822035 863#undef PARAVIRT_CALL
1a45b7aa
JF
864#undef __PVOP_CALL
865#undef __PVOP_VCALL
f8822f42
JF
866#undef PVOP_VCALL0
867#undef PVOP_CALL0
868#undef PVOP_VCALL1
869#undef PVOP_CALL1
870#undef PVOP_VCALL2
871#undef PVOP_CALL2
872#undef PVOP_VCALL3
873#undef PVOP_CALL3
874#undef PVOP_VCALL4
875#undef PVOP_CALL4
139ec7c4 876
6f30c1ac
TG
877extern void default_banner(void);
878
d3561b7f
RR
879#else /* __ASSEMBLY__ */
880
658be9d3 881#define _PVSITE(ptype, clobbers, ops, word, algn) \
139ec7c4
RR
882771:; \
883 ops; \
884772:; \
885 .pushsection .parainstructions,"a"; \
658be9d3
GOC
886 .align algn; \
887 word 771b; \
139ec7c4
RR
888 .byte ptype; \
889 .byte 772b-771b; \
890 .short clobbers; \
891 .popsection
892
658be9d3 893
9104a18d 894#define COND_PUSH(set, mask, reg) \
ecb93d1c 895 .if ((~(set)) & mask); push %reg; .endif
9104a18d 896#define COND_POP(set, mask, reg) \
ecb93d1c 897 .if ((~(set)) & mask); pop %reg; .endif
9104a18d 898
658be9d3 899#ifdef CONFIG_X86_64
9104a18d
JF
900
901#define PV_SAVE_REGS(set) \
902 COND_PUSH(set, CLBR_RAX, rax); \
903 COND_PUSH(set, CLBR_RCX, rcx); \
904 COND_PUSH(set, CLBR_RDX, rdx); \
905 COND_PUSH(set, CLBR_RSI, rsi); \
906 COND_PUSH(set, CLBR_RDI, rdi); \
907 COND_PUSH(set, CLBR_R8, r8); \
908 COND_PUSH(set, CLBR_R9, r9); \
909 COND_PUSH(set, CLBR_R10, r10); \
910 COND_PUSH(set, CLBR_R11, r11)
911#define PV_RESTORE_REGS(set) \
912 COND_POP(set, CLBR_R11, r11); \
913 COND_POP(set, CLBR_R10, r10); \
914 COND_POP(set, CLBR_R9, r9); \
915 COND_POP(set, CLBR_R8, r8); \
916 COND_POP(set, CLBR_RDI, rdi); \
917 COND_POP(set, CLBR_RSI, rsi); \
918 COND_POP(set, CLBR_RDX, rdx); \
919 COND_POP(set, CLBR_RCX, rcx); \
920 COND_POP(set, CLBR_RAX, rax)
921
6057fc82 922#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
658be9d3 923#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
491eccb7 924#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 925#else
9104a18d
JF
926#define PV_SAVE_REGS(set) \
927 COND_PUSH(set, CLBR_EAX, eax); \
928 COND_PUSH(set, CLBR_EDI, edi); \
929 COND_PUSH(set, CLBR_ECX, ecx); \
930 COND_PUSH(set, CLBR_EDX, edx)
931#define PV_RESTORE_REGS(set) \
932 COND_POP(set, CLBR_EDX, edx); \
933 COND_POP(set, CLBR_ECX, ecx); \
934 COND_POP(set, CLBR_EDI, edi); \
935 COND_POP(set, CLBR_EAX, eax)
936
6057fc82 937#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
658be9d3 938#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
491eccb7 939#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
940#endif
941
93b1eab3
JF
942#define INTERRUPT_RETURN \
943 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
491eccb7 944 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
d5822035
JF
945
946#define DISABLE_INTERRUPTS(clobbers) \
93b1eab3 947 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
ecb93d1c 948 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 949 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
ecb93d1c 950 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035
JF
951
952#define ENABLE_INTERRUPTS(clobbers) \
93b1eab3 953 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
ecb93d1c 954 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 955 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
ecb93d1c 956 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035 957
2be29982
JF
958#define USERGS_SYSRET32 \
959 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
6abcd98f 960 CLBR_NONE, \
2be29982 961 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
2e47d3e6 962
6057fc82 963#ifdef CONFIG_X86_32
491eccb7
JF
964#define GET_CR0_INTO_EAX \
965 push %ecx; push %edx; \
966 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
42c24fa2 967 pop %edx; pop %ecx
2be29982
JF
968
969#define ENABLE_INTERRUPTS_SYSEXIT \
970 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
971 CLBR_NONE, \
972 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
973
974
975#else /* !CONFIG_X86_32 */
a00394f8
JF
976
977/*
978 * If swapgs is used while the userspace stack is still current,
979 * there's no way to call a pvop. The PV replacement *must* be
980 * inlined, or the swapgs instruction must be trapped and emulated.
981 */
982#define SWAPGS_UNSAFE_STACK \
983 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
984 swapgs)
985
9104a18d
JF
986/*
987 * Note: swapgs is very special, and in practise is either going to be
988 * implemented with a single "swapgs" instruction or something very
989 * special. Either way, we don't need to save any registers for
990 * it.
991 */
e801f864
GOC
992#define SWAPGS \
993 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
9104a18d 994 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
e801f864
GOC
995 )
996
491eccb7
JF
997#define GET_CR2_INTO_RCX \
998 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
999 movq %rax, %rcx; \
4a8c4c4e
GOC
1000 xorq %rax, %rax;
1001
fab58420
JF
1002#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1003 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1004 CLBR_NONE, \
1005 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1006
2be29982
JF
1007#define USERGS_SYSRET64 \
1008 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
d75cd22f 1009 CLBR_NONE, \
2be29982
JF
1010 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1011
1012#define ENABLE_INTERRUPTS_SYSEXIT32 \
1013 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1014 CLBR_NONE, \
1015 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1016#endif /* CONFIG_X86_32 */
139ec7c4 1017
d3561b7f 1018#endif /* __ASSEMBLY__ */
6f30c1ac
TG
1019#else /* CONFIG_PARAVIRT */
1020# define default_banner x86_init_noop
1021#endif /* !CONFIG_PARAVIRT */
1965aae3 1022#endif /* _ASM_X86_PARAVIRT_H */
This page took 0.940252 seconds and 5 git commands to generate.