Merge tag 'sunxi-dt-for-3.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / x86 / include / asm / paravirt.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PARAVIRT_H
2#define _ASM_X86_PARAVIRT_H
d3561b7f
RR
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
b239fb25
JF
5
6#ifdef CONFIG_PARAVIRT
54321d94 7#include <asm/pgtable_types.h>
658be9d3 8#include <asm/asm.h>
d3561b7f 9
ac5672f8 10#include <asm/paravirt_types.h>
ecb93d1c 11
d3561b7f 12#ifndef __ASSEMBLY__
187f1882 13#include <linux/bug.h>
3dc494e8 14#include <linux/types.h>
d4c10477 15#include <linux/cpumask.h>
1a45b7aa 16
f8822f42
JF
17static inline int paravirt_enabled(void)
18{
93b1eab3 19 return pv_info.paravirt_enabled;
f8822f42 20}
d3561b7f 21
faca6227 22static inline void load_sp0(struct tss_struct *tss,
d3561b7f
RR
23 struct thread_struct *thread)
24{
faca6227 25 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
d3561b7f
RR
26}
27
d3561b7f
RR
28/* The paravirtualized CPUID instruction. */
29static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
30 unsigned int *ecx, unsigned int *edx)
31{
93b1eab3 32 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
33}
34
35/*
36 * These special macros can be used to get or set a debugging register
37 */
f8822f42
JF
38static inline unsigned long paravirt_get_debugreg(int reg)
39{
93b1eab3 40 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
f8822f42
JF
41}
42#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
43static inline void set_debugreg(unsigned long val, int reg)
44{
93b1eab3 45 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
f8822f42 46}
d3561b7f 47
f8822f42
JF
48static inline void clts(void)
49{
93b1eab3 50 PVOP_VCALL0(pv_cpu_ops.clts);
f8822f42 51}
d3561b7f 52
f8822f42
JF
53static inline unsigned long read_cr0(void)
54{
93b1eab3 55 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
f8822f42 56}
d3561b7f 57
f8822f42
JF
58static inline void write_cr0(unsigned long x)
59{
93b1eab3 60 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
f8822f42
JF
61}
62
63static inline unsigned long read_cr2(void)
64{
93b1eab3 65 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
f8822f42
JF
66}
67
68static inline void write_cr2(unsigned long x)
69{
93b1eab3 70 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
f8822f42
JF
71}
72
73static inline unsigned long read_cr3(void)
74{
93b1eab3 75 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
f8822f42 76}
d3561b7f 77
f8822f42
JF
78static inline void write_cr3(unsigned long x)
79{
93b1eab3 80 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
f8822f42 81}
d3561b7f 82
f8822f42
JF
83static inline unsigned long read_cr4(void)
84{
93b1eab3 85 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
f8822f42
JF
86}
87static inline unsigned long read_cr4_safe(void)
88{
93b1eab3 89 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
f8822f42 90}
d3561b7f 91
f8822f42
JF
92static inline void write_cr4(unsigned long x)
93{
93b1eab3 94 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
f8822f42 95}
3dc494e8 96
94ea03cd 97#ifdef CONFIG_X86_64
4c9890c2
GOC
98static inline unsigned long read_cr8(void)
99{
100 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
101}
102
103static inline void write_cr8(unsigned long x)
104{
105 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
106}
94ea03cd 107#endif
4c9890c2 108
df9ee292 109static inline void arch_safe_halt(void)
d3561b7f 110{
93b1eab3 111 PVOP_VCALL0(pv_irq_ops.safe_halt);
d3561b7f
RR
112}
113
114static inline void halt(void)
115{
c8217b83 116 PVOP_VCALL0(pv_irq_ops.halt);
f8822f42
JF
117}
118
119static inline void wbinvd(void)
120{
93b1eab3 121 PVOP_VCALL0(pv_cpu_ops.wbinvd);
d3561b7f 122}
d3561b7f 123
93b1eab3 124#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 125
f8822f42
JF
126static inline u64 paravirt_read_msr(unsigned msr, int *err)
127{
93b1eab3 128 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
f8822f42 129}
132ec92f 130
f8822f42
JF
131static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
132{
93b1eab3 133 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
f8822f42
JF
134}
135
90a0a06a 136/* These should all do BUG_ON(_err), but our headers are too tangled. */
49cd740b
JP
137#define rdmsr(msr, val1, val2) \
138do { \
f8822f42
JF
139 int _err; \
140 u64 _l = paravirt_read_msr(msr, &_err); \
141 val1 = (u32)_l; \
142 val2 = _l >> 32; \
49cd740b 143} while (0)
d3561b7f 144
49cd740b
JP
145#define wrmsr(msr, val1, val2) \
146do { \
f8822f42 147 paravirt_write_msr(msr, val1, val2); \
49cd740b 148} while (0)
d3561b7f 149
49cd740b
JP
150#define rdmsrl(msr, val) \
151do { \
f8822f42
JF
152 int _err; \
153 val = paravirt_read_msr(msr, &_err); \
49cd740b 154} while (0)
d3561b7f 155
49cd740b
JP
156#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
157#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
d3561b7f
RR
158
159/* rdmsr with exception handling */
49cd740b
JP
160#define rdmsr_safe(msr, a, b) \
161({ \
f8822f42
JF
162 int _err; \
163 u64 _l = paravirt_read_msr(msr, &_err); \
164 (*a) = (u32)_l; \
165 (*b) = _l >> 32; \
49cd740b
JP
166 _err; \
167})
d3561b7f 168
1de87bd4
AK
169static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
170{
171 int err;
172
173 *p = paravirt_read_msr(msr, &err);
174 return err;
175}
177fed1e 176
f8822f42
JF
177static inline u64 paravirt_read_tsc(void)
178{
93b1eab3 179 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
f8822f42 180}
d3561b7f 181
49cd740b
JP
182#define rdtscl(low) \
183do { \
f8822f42
JF
184 u64 _l = paravirt_read_tsc(); \
185 low = (int)_l; \
49cd740b 186} while (0)
d3561b7f 187
f8822f42 188#define rdtscll(val) (val = paravirt_read_tsc())
d3561b7f 189
688340ea
JF
190static inline unsigned long long paravirt_sched_clock(void)
191{
93b1eab3 192 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
688340ea 193}
6cb9a835 194
c5905afb
IM
195struct static_key;
196extern struct static_key paravirt_steal_enabled;
197extern struct static_key paravirt_steal_rq_enabled;
3c404b57
GC
198
199static inline u64 paravirt_steal_clock(int cpu)
200{
201 return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
202}
203
f8822f42
JF
204static inline unsigned long long paravirt_read_pmc(int counter)
205{
93b1eab3 206 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
f8822f42 207}
d3561b7f 208
49cd740b
JP
209#define rdpmc(counter, low, high) \
210do { \
f8822f42
JF
211 u64 _l = paravirt_read_pmc(counter); \
212 low = (u32)_l; \
213 high = _l >> 32; \
49cd740b 214} while (0)
3dc494e8 215
1ff4d58a
AK
216#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
217
e5aaac44
GOC
218static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
219{
220 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
221}
222
223#define rdtscp(low, high, aux) \
224do { \
225 int __aux; \
226 unsigned long __val = paravirt_rdtscp(&__aux); \
227 (low) = (u32)__val; \
228 (high) = (u32)(__val >> 32); \
229 (aux) = __aux; \
230} while (0)
231
232#define rdtscpll(val, aux) \
233do { \
234 unsigned long __aux; \
235 val = paravirt_rdtscp(&__aux); \
236 (aux) = __aux; \
237} while (0)
238
38ffbe66
JF
239static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
240{
241 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
242}
243
244static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
245{
246 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
247}
248
f8822f42
JF
249static inline void load_TR_desc(void)
250{
93b1eab3 251 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
f8822f42 252}
6b68f01b 253static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 254{
93b1eab3 255 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
f8822f42 256}
6b68f01b 257static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 258{
93b1eab3 259 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
f8822f42
JF
260}
261static inline void set_ldt(const void *addr, unsigned entries)
262{
93b1eab3 263 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
f8822f42 264}
6b68f01b 265static inline void store_idt(struct desc_ptr *dtr)
f8822f42 266{
93b1eab3 267 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
f8822f42
JF
268}
269static inline unsigned long paravirt_store_tr(void)
270{
93b1eab3 271 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
f8822f42
JF
272}
273#define store_tr(tr) ((tr) = paravirt_store_tr())
274static inline void load_TLS(struct thread_struct *t, unsigned cpu)
275{
93b1eab3 276 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
f8822f42 277}
75b8bb3e 278
9f9d489a
JF
279#ifdef CONFIG_X86_64
280static inline void load_gs_index(unsigned int gs)
281{
282 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
283}
284#endif
285
75b8bb3e
GOC
286static inline void write_ldt_entry(struct desc_struct *dt, int entry,
287 const void *desc)
f8822f42 288{
75b8bb3e 289 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
f8822f42 290}
014b15be
GOC
291
292static inline void write_gdt_entry(struct desc_struct *dt, int entry,
293 void *desc, int type)
f8822f42 294{
014b15be 295 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
f8822f42 296}
014b15be 297
8d947344 298static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 299{
8d947344 300 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
f8822f42
JF
301}
302static inline void set_iopl_mask(unsigned mask)
303{
93b1eab3 304 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
f8822f42 305}
3dc494e8 306
d3561b7f 307/* The paravirtualized I/O functions */
49cd740b
JP
308static inline void slow_down_io(void)
309{
93b1eab3 310 pv_cpu_ops.io_delay();
d3561b7f 311#ifdef REALLY_SLOW_IO
93b1eab3
JF
312 pv_cpu_ops.io_delay();
313 pv_cpu_ops.io_delay();
314 pv_cpu_ops.io_delay();
d3561b7f
RR
315#endif
316}
317
ae5da273
ZA
318#ifdef CONFIG_SMP
319static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
320 unsigned long start_esp)
321{
93b1eab3
JF
322 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
323 phys_apicid, start_eip, start_esp);
ae5da273
ZA
324}
325#endif
13623d79 326
d6dd61c8
JF
327static inline void paravirt_activate_mm(struct mm_struct *prev,
328 struct mm_struct *next)
329{
93b1eab3 330 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
d6dd61c8
JF
331}
332
333static inline void arch_dup_mmap(struct mm_struct *oldmm,
334 struct mm_struct *mm)
335{
93b1eab3 336 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
d6dd61c8
JF
337}
338
339static inline void arch_exit_mmap(struct mm_struct *mm)
340{
93b1eab3 341 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
d6dd61c8
JF
342}
343
f8822f42
JF
344static inline void __flush_tlb(void)
345{
93b1eab3 346 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
f8822f42
JF
347}
348static inline void __flush_tlb_global(void)
349{
93b1eab3 350 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
f8822f42
JF
351}
352static inline void __flush_tlb_single(unsigned long addr)
353{
93b1eab3 354 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
f8822f42 355}
da181a8b 356
4595f962
RR
357static inline void flush_tlb_others(const struct cpumask *cpumask,
358 struct mm_struct *mm,
e7b52ffd
AS
359 unsigned long start,
360 unsigned long end)
d4c10477 361{
e7b52ffd 362 PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
d4c10477
JF
363}
364
eba0045f
JF
365static inline int paravirt_pgd_alloc(struct mm_struct *mm)
366{
367 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
368}
369
370static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
371{
372 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
373}
374
f8639939 375static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
f8822f42 376{
6944a9c8 377 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
f8822f42 378}
f8639939 379static inline void paravirt_release_pte(unsigned long pfn)
f8822f42 380{
6944a9c8 381 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
f8822f42 382}
c119ecce 383
f8639939 384static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
f8822f42 385{
6944a9c8 386 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
f8822f42 387}
c119ecce 388
f8639939 389static inline void paravirt_release_pmd(unsigned long pfn)
da181a8b 390{
6944a9c8 391 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
da181a8b
RR
392}
393
f8639939 394static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2761fa09
JF
395{
396 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
397}
f8639939 398static inline void paravirt_release_pud(unsigned long pfn)
2761fa09
JF
399{
400 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
401}
402
f8822f42
JF
403static inline void pte_update(struct mm_struct *mm, unsigned long addr,
404 pte_t *ptep)
da181a8b 405{
93b1eab3 406 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
da181a8b 407}
331127f7
AA
408static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
409 pmd_t *pmdp)
410{
411 PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
412}
da181a8b 413
f8822f42
JF
414static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
415 pte_t *ptep)
da181a8b 416{
93b1eab3 417 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
da181a8b
RR
418}
419
331127f7
AA
420static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
421 pmd_t *pmdp)
422{
423 PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
424}
425
773221f4 426static inline pte_t __pte(pteval_t val)
da181a8b 427{
773221f4
JF
428 pteval_t ret;
429
430 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
431 ret = PVOP_CALLEE2(pteval_t,
432 pv_mmu_ops.make_pte,
433 val, (u64)val >> 32);
773221f4 434 else
da5de7c2
JF
435 ret = PVOP_CALLEE1(pteval_t,
436 pv_mmu_ops.make_pte,
437 val);
773221f4 438
c8e5393a 439 return (pte_t) { .pte = ret };
da181a8b
RR
440}
441
773221f4
JF
442static inline pteval_t pte_val(pte_t pte)
443{
444 pteval_t ret;
445
446 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
447 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
448 pte.pte, (u64)pte.pte >> 32);
773221f4 449 else
da5de7c2
JF
450 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
451 pte.pte);
773221f4
JF
452
453 return ret;
454}
455
ef38503e 456static inline pgd_t __pgd(pgdval_t val)
da181a8b 457{
ef38503e
JF
458 pgdval_t ret;
459
460 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
461 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
462 val, (u64)val >> 32);
ef38503e 463 else
da5de7c2
JF
464 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
465 val);
ef38503e
JF
466
467 return (pgd_t) { ret };
468}
469
470static inline pgdval_t pgd_val(pgd_t pgd)
471{
472 pgdval_t ret;
473
474 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
475 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
476 pgd.pgd, (u64)pgd.pgd >> 32);
ef38503e 477 else
da5de7c2
JF
478 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
479 pgd.pgd);
ef38503e
JF
480
481 return ret;
f8822f42
JF
482}
483
08b882c6
JF
484#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
485static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
486 pte_t *ptep)
487{
488 pteval_t ret;
489
490 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
491 mm, addr, ptep);
492
493 return (pte_t) { .pte = ret };
494}
495
496static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
497 pte_t *ptep, pte_t pte)
498{
499 if (sizeof(pteval_t) > sizeof(long))
500 /* 5 arg words */
501 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
502 else
503 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
504 mm, addr, ptep, pte.pte);
505}
506
4eed80cd
JF
507static inline void set_pte(pte_t *ptep, pte_t pte)
508{
509 if (sizeof(pteval_t) > sizeof(long))
510 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
511 pte.pte, (u64)pte.pte >> 32);
512 else
513 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
514 pte.pte);
515}
516
517static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
518 pte_t *ptep, pte_t pte)
519{
520 if (sizeof(pteval_t) > sizeof(long))
521 /* 5 arg words */
522 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
523 else
524 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
525}
526
331127f7
AA
527static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
528 pmd_t *pmdp, pmd_t pmd)
529{
331127f7
AA
530 if (sizeof(pmdval_t) > sizeof(long))
531 /* 5 arg words */
532 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
533 else
cacf061c
AA
534 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
535 native_pmd_val(pmd));
331127f7 536}
331127f7 537
60b3f626
JF
538static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
539{
540 pmdval_t val = native_pmd_val(pmd);
541
542 if (sizeof(pmdval_t) > sizeof(long))
543 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
544 else
545 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
546}
547
1fe91514
GOC
548#if PAGETABLE_LEVELS >= 3
549static inline pmd_t __pmd(pmdval_t val)
550{
551 pmdval_t ret;
552
553 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
554 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
555 val, (u64)val >> 32);
1fe91514 556 else
da5de7c2
JF
557 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
558 val);
1fe91514
GOC
559
560 return (pmd_t) { ret };
561}
562
563static inline pmdval_t pmd_val(pmd_t pmd)
564{
565 pmdval_t ret;
566
567 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
568 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
569 pmd.pmd, (u64)pmd.pmd >> 32);
1fe91514 570 else
da5de7c2
JF
571 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
572 pmd.pmd);
1fe91514
GOC
573
574 return ret;
575}
576
577static inline void set_pud(pud_t *pudp, pud_t pud)
578{
579 pudval_t val = native_pud_val(pud);
580
581 if (sizeof(pudval_t) > sizeof(long))
582 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
583 val, (u64)val >> 32);
584 else
585 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
586 val);
587}
9042219c
EH
588#if PAGETABLE_LEVELS == 4
589static inline pud_t __pud(pudval_t val)
590{
591 pudval_t ret;
592
593 if (sizeof(pudval_t) > sizeof(long))
da5de7c2
JF
594 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
595 val, (u64)val >> 32);
9042219c 596 else
da5de7c2
JF
597 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
598 val);
9042219c
EH
599
600 return (pud_t) { ret };
601}
602
603static inline pudval_t pud_val(pud_t pud)
604{
605 pudval_t ret;
606
607 if (sizeof(pudval_t) > sizeof(long))
4767afbf
JF
608 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
609 pud.pud, (u64)pud.pud >> 32);
9042219c 610 else
4767afbf
JF
611 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
612 pud.pud);
9042219c
EH
613
614 return ret;
615}
616
617static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
618{
619 pgdval_t val = native_pgd_val(pgd);
620
621 if (sizeof(pgdval_t) > sizeof(long))
622 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
623 val, (u64)val >> 32);
624 else
625 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
626 val);
627}
628
629static inline void pgd_clear(pgd_t *pgdp)
630{
631 set_pgd(pgdp, __pgd(0));
632}
633
634static inline void pud_clear(pud_t *pudp)
635{
636 set_pud(pudp, __pud(0));
637}
638
639#endif /* PAGETABLE_LEVELS == 4 */
640
1fe91514
GOC
641#endif /* PAGETABLE_LEVELS >= 3 */
642
4eed80cd
JF
643#ifdef CONFIG_X86_PAE
644/* Special-case pte-setting operations for PAE, which can't update a
645 64-bit pte atomically */
646static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
647{
648 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
649 pte.pte, pte.pte >> 32);
650}
651
4eed80cd
JF
652static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
653 pte_t *ptep)
654{
655 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
656}
60b3f626
JF
657
658static inline void pmd_clear(pmd_t *pmdp)
659{
660 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
661}
4eed80cd
JF
662#else /* !CONFIG_X86_PAE */
663static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
664{
665 set_pte(ptep, pte);
666}
667
4eed80cd
JF
668static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
669 pte_t *ptep)
670{
671 set_pte_at(mm, addr, ptep, __pte(0));
672}
60b3f626
JF
673
674static inline void pmd_clear(pmd_t *pmdp)
675{
676 set_pmd(pmdp, __pmd(0));
677}
4eed80cd
JF
678#endif /* CONFIG_X86_PAE */
679
7fd7d83d 680#define __HAVE_ARCH_START_CONTEXT_SWITCH
224101ed 681static inline void arch_start_context_switch(struct task_struct *prev)
f8822f42 682{
224101ed 683 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
f8822f42
JF
684}
685
224101ed 686static inline void arch_end_context_switch(struct task_struct *next)
f8822f42 687{
224101ed 688 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
f8822f42
JF
689}
690
9226d125 691#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
692static inline void arch_enter_lazy_mmu_mode(void)
693{
8965c1c0 694 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
f8822f42
JF
695}
696
697static inline void arch_leave_lazy_mmu_mode(void)
698{
8965c1c0 699 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
f8822f42
JF
700}
701
511ba86e
BO
702static inline void arch_flush_lazy_mmu_mode(void)
703{
704 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
705}
9226d125 706
aeaaa59c 707static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
3b3809ac 708 phys_addr_t phys, pgprot_t flags)
aeaaa59c
JF
709{
710 pv_mmu_ops.set_fixmap(idx, phys, flags);
711}
712
b4ecc126 713#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
4bb689ee 714
545ac138
JF
715static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
716 __ticket_t ticket)
74d4affd 717{
354714dd 718 PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
74d4affd
JF
719}
720
96f853ea 721static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
545ac138 722 __ticket_t ticket)
74d4affd 723{
545ac138 724 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
74d4affd
JF
725}
726
4bb689ee
IM
727#endif
728
2e47d3e6 729#ifdef CONFIG_X86_32
ecb93d1c
JF
730#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
731#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
732
733/* save and restore all caller-save registers, except return value */
e584f559
JF
734#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
735#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
ecb93d1c 736
2e47d3e6
GOC
737#define PV_FLAGS_ARG "0"
738#define PV_EXTRA_CLOBBERS
739#define PV_VEXTRA_CLOBBERS
740#else
ecb93d1c
JF
741/* save and restore all caller-save registers, except return value */
742#define PV_SAVE_ALL_CALLER_REGS \
743 "push %rcx;" \
744 "push %rdx;" \
745 "push %rsi;" \
746 "push %rdi;" \
747 "push %r8;" \
748 "push %r9;" \
749 "push %r10;" \
750 "push %r11;"
751#define PV_RESTORE_ALL_CALLER_REGS \
752 "pop %r11;" \
753 "pop %r10;" \
754 "pop %r9;" \
755 "pop %r8;" \
756 "pop %rdi;" \
757 "pop %rsi;" \
758 "pop %rdx;" \
759 "pop %rcx;"
760
2e47d3e6
GOC
761/* We save some registers, but all of them, that's too much. We clobber all
762 * caller saved registers but the argument parameter */
763#define PV_SAVE_REGS "pushq %%rdi;"
764#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
765#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
766#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
767#define PV_FLAGS_ARG "D"
768#endif
769
ecb93d1c
JF
770/*
771 * Generate a thunk around a function which saves all caller-save
772 * registers except for the return value. This allows C functions to
773 * be called from assembler code where fewer than normal registers are
774 * available. It may also help code generation around calls from C
775 * code if the common case doesn't use many registers.
776 *
777 * When a callee is wrapped in a thunk, the caller can assume that all
778 * arg regs and all scratch registers are preserved across the
779 * call. The return value in rax/eax will not be saved, even for void
780 * functions.
781 */
782#define PV_CALLEE_SAVE_REGS_THUNK(func) \
783 extern typeof(func) __raw_callee_save_##func; \
ecb93d1c
JF
784 \
785 asm(".pushsection .text;" \
a2e7f0e3 786 ".globl __raw_callee_save_" #func " ; " \
ecb93d1c
JF
787 "__raw_callee_save_" #func ": " \
788 PV_SAVE_ALL_CALLER_REGS \
789 "call " #func ";" \
790 PV_RESTORE_ALL_CALLER_REGS \
791 "ret;" \
792 ".popsection")
793
794/* Get a reference to a callee-save function */
795#define PV_CALLEE_SAVE(func) \
796 ((struct paravirt_callee_save) { __raw_callee_save_##func })
797
798/* Promise that "func" already uses the right calling convention */
799#define __PV_IS_CALLEE_SAVE(func) \
800 ((struct paravirt_callee_save) { func })
801
b5908548 802static inline notrace unsigned long arch_local_save_flags(void)
139ec7c4 803{
71999d98 804 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
139ec7c4
RR
805}
806
b5908548 807static inline notrace void arch_local_irq_restore(unsigned long f)
139ec7c4 808{
71999d98 809 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
139ec7c4
RR
810}
811
b5908548 812static inline notrace void arch_local_irq_disable(void)
139ec7c4 813{
71999d98 814 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
139ec7c4
RR
815}
816
b5908548 817static inline notrace void arch_local_irq_enable(void)
139ec7c4 818{
71999d98 819 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
139ec7c4
RR
820}
821
b5908548 822static inline notrace unsigned long arch_local_irq_save(void)
139ec7c4
RR
823{
824 unsigned long f;
825
df9ee292
DH
826 f = arch_local_save_flags();
827 arch_local_irq_disable();
139ec7c4
RR
828 return f;
829}
830
74d4affd 831
294688c0 832/* Make sure as little as possible of this mess escapes. */
d5822035 833#undef PARAVIRT_CALL
1a45b7aa
JF
834#undef __PVOP_CALL
835#undef __PVOP_VCALL
f8822f42
JF
836#undef PVOP_VCALL0
837#undef PVOP_CALL0
838#undef PVOP_VCALL1
839#undef PVOP_CALL1
840#undef PVOP_VCALL2
841#undef PVOP_CALL2
842#undef PVOP_VCALL3
843#undef PVOP_CALL3
844#undef PVOP_VCALL4
845#undef PVOP_CALL4
139ec7c4 846
6f30c1ac
TG
847extern void default_banner(void);
848
d3561b7f
RR
849#else /* __ASSEMBLY__ */
850
658be9d3 851#define _PVSITE(ptype, clobbers, ops, word, algn) \
139ec7c4
RR
852771:; \
853 ops; \
854772:; \
855 .pushsection .parainstructions,"a"; \
658be9d3
GOC
856 .align algn; \
857 word 771b; \
139ec7c4
RR
858 .byte ptype; \
859 .byte 772b-771b; \
860 .short clobbers; \
861 .popsection
862
658be9d3 863
9104a18d 864#define COND_PUSH(set, mask, reg) \
ecb93d1c 865 .if ((~(set)) & mask); push %reg; .endif
9104a18d 866#define COND_POP(set, mask, reg) \
ecb93d1c 867 .if ((~(set)) & mask); pop %reg; .endif
9104a18d 868
658be9d3 869#ifdef CONFIG_X86_64
9104a18d
JF
870
871#define PV_SAVE_REGS(set) \
872 COND_PUSH(set, CLBR_RAX, rax); \
873 COND_PUSH(set, CLBR_RCX, rcx); \
874 COND_PUSH(set, CLBR_RDX, rdx); \
875 COND_PUSH(set, CLBR_RSI, rsi); \
876 COND_PUSH(set, CLBR_RDI, rdi); \
877 COND_PUSH(set, CLBR_R8, r8); \
878 COND_PUSH(set, CLBR_R9, r9); \
879 COND_PUSH(set, CLBR_R10, r10); \
880 COND_PUSH(set, CLBR_R11, r11)
881#define PV_RESTORE_REGS(set) \
882 COND_POP(set, CLBR_R11, r11); \
883 COND_POP(set, CLBR_R10, r10); \
884 COND_POP(set, CLBR_R9, r9); \
885 COND_POP(set, CLBR_R8, r8); \
886 COND_POP(set, CLBR_RDI, rdi); \
887 COND_POP(set, CLBR_RSI, rsi); \
888 COND_POP(set, CLBR_RDX, rdx); \
889 COND_POP(set, CLBR_RCX, rcx); \
890 COND_POP(set, CLBR_RAX, rax)
891
6057fc82 892#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
658be9d3 893#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
491eccb7 894#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 895#else
9104a18d
JF
896#define PV_SAVE_REGS(set) \
897 COND_PUSH(set, CLBR_EAX, eax); \
898 COND_PUSH(set, CLBR_EDI, edi); \
899 COND_PUSH(set, CLBR_ECX, ecx); \
900 COND_PUSH(set, CLBR_EDX, edx)
901#define PV_RESTORE_REGS(set) \
902 COND_POP(set, CLBR_EDX, edx); \
903 COND_POP(set, CLBR_ECX, ecx); \
904 COND_POP(set, CLBR_EDI, edi); \
905 COND_POP(set, CLBR_EAX, eax)
906
6057fc82 907#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
658be9d3 908#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
491eccb7 909#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
910#endif
911
93b1eab3
JF
912#define INTERRUPT_RETURN \
913 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
491eccb7 914 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
d5822035
JF
915
916#define DISABLE_INTERRUPTS(clobbers) \
93b1eab3 917 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
ecb93d1c 918 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 919 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
ecb93d1c 920 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035
JF
921
922#define ENABLE_INTERRUPTS(clobbers) \
93b1eab3 923 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
ecb93d1c 924 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 925 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
ecb93d1c 926 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035 927
2be29982
JF
928#define USERGS_SYSRET32 \
929 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
6abcd98f 930 CLBR_NONE, \
2be29982 931 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
2e47d3e6 932
6057fc82 933#ifdef CONFIG_X86_32
491eccb7
JF
934#define GET_CR0_INTO_EAX \
935 push %ecx; push %edx; \
936 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
42c24fa2 937 pop %edx; pop %ecx
2be29982
JF
938
939#define ENABLE_INTERRUPTS_SYSEXIT \
940 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
941 CLBR_NONE, \
942 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
943
944
945#else /* !CONFIG_X86_32 */
a00394f8
JF
946
947/*
948 * If swapgs is used while the userspace stack is still current,
949 * there's no way to call a pvop. The PV replacement *must* be
950 * inlined, or the swapgs instruction must be trapped and emulated.
951 */
952#define SWAPGS_UNSAFE_STACK \
953 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
954 swapgs)
955
9104a18d
JF
956/*
957 * Note: swapgs is very special, and in practise is either going to be
958 * implemented with a single "swapgs" instruction or something very
959 * special. Either way, we don't need to save any registers for
960 * it.
961 */
e801f864
GOC
962#define SWAPGS \
963 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
9104a18d 964 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
e801f864
GOC
965 )
966
ffc4bc9c
PA
967#define GET_CR2_INTO_RAX \
968 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
4a8c4c4e 969
fab58420
JF
970#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
971 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
972 CLBR_NONE, \
973 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
974
2be29982
JF
975#define USERGS_SYSRET64 \
976 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
d75cd22f 977 CLBR_NONE, \
2be29982
JF
978 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
979
980#define ENABLE_INTERRUPTS_SYSEXIT32 \
981 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
982 CLBR_NONE, \
983 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
984#endif /* CONFIG_X86_32 */
139ec7c4 985
d3561b7f 986#endif /* __ASSEMBLY__ */
6f30c1ac
TG
987#else /* CONFIG_PARAVIRT */
988# define default_banner x86_init_noop
989#endif /* !CONFIG_PARAVIRT */
1965aae3 990#endif /* _ASM_X86_PARAVIRT_H */
This page took 0.811913 seconds and 5 git commands to generate.