Merge tag 'tpm-fixes-for-4.2-rc2' of https://github.com/PeterHuewe/linux-tpmdd into...
[deliverable/linux.git] / arch / x86 / include / asm / paravirt.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PARAVIRT_H
2#define _ASM_X86_PARAVIRT_H
d3561b7f
RR
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
b239fb25
JF
5
6#ifdef CONFIG_PARAVIRT
54321d94 7#include <asm/pgtable_types.h>
658be9d3 8#include <asm/asm.h>
d3561b7f 9
ac5672f8 10#include <asm/paravirt_types.h>
ecb93d1c 11
d3561b7f 12#ifndef __ASSEMBLY__
187f1882 13#include <linux/bug.h>
3dc494e8 14#include <linux/types.h>
d4c10477 15#include <linux/cpumask.h>
1a45b7aa 16
f8822f42
JF
17static inline int paravirt_enabled(void)
18{
93b1eab3 19 return pv_info.paravirt_enabled;
f8822f42 20}
d3561b7f 21
faca6227 22static inline void load_sp0(struct tss_struct *tss,
d3561b7f
RR
23 struct thread_struct *thread)
24{
faca6227 25 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
d3561b7f
RR
26}
27
d3561b7f
RR
28/* The paravirtualized CPUID instruction. */
29static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
30 unsigned int *ecx, unsigned int *edx)
31{
93b1eab3 32 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
33}
34
35/*
36 * These special macros can be used to get or set a debugging register
37 */
f8822f42
JF
38static inline unsigned long paravirt_get_debugreg(int reg)
39{
93b1eab3 40 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
f8822f42
JF
41}
42#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
43static inline void set_debugreg(unsigned long val, int reg)
44{
93b1eab3 45 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
f8822f42 46}
d3561b7f 47
f8822f42
JF
48static inline void clts(void)
49{
93b1eab3 50 PVOP_VCALL0(pv_cpu_ops.clts);
f8822f42 51}
d3561b7f 52
f8822f42
JF
53static inline unsigned long read_cr0(void)
54{
93b1eab3 55 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
f8822f42 56}
d3561b7f 57
f8822f42
JF
58static inline void write_cr0(unsigned long x)
59{
93b1eab3 60 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
f8822f42
JF
61}
62
63static inline unsigned long read_cr2(void)
64{
93b1eab3 65 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
f8822f42
JF
66}
67
68static inline void write_cr2(unsigned long x)
69{
93b1eab3 70 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
f8822f42
JF
71}
72
73static inline unsigned long read_cr3(void)
74{
93b1eab3 75 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
f8822f42 76}
d3561b7f 77
f8822f42
JF
78static inline void write_cr3(unsigned long x)
79{
93b1eab3 80 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
f8822f42 81}
d3561b7f 82
1e02ce4c 83static inline unsigned long __read_cr4(void)
f8822f42 84{
93b1eab3 85 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
f8822f42 86}
1e02ce4c 87static inline unsigned long __read_cr4_safe(void)
f8822f42 88{
93b1eab3 89 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
f8822f42 90}
d3561b7f 91
1e02ce4c 92static inline void __write_cr4(unsigned long x)
f8822f42 93{
93b1eab3 94 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
f8822f42 95}
3dc494e8 96
94ea03cd 97#ifdef CONFIG_X86_64
4c9890c2
GOC
98static inline unsigned long read_cr8(void)
99{
100 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
101}
102
103static inline void write_cr8(unsigned long x)
104{
105 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
106}
94ea03cd 107#endif
4c9890c2 108
df9ee292 109static inline void arch_safe_halt(void)
d3561b7f 110{
93b1eab3 111 PVOP_VCALL0(pv_irq_ops.safe_halt);
d3561b7f
RR
112}
113
114static inline void halt(void)
115{
c8217b83 116 PVOP_VCALL0(pv_irq_ops.halt);
f8822f42
JF
117}
118
119static inline void wbinvd(void)
120{
93b1eab3 121 PVOP_VCALL0(pv_cpu_ops.wbinvd);
d3561b7f 122}
d3561b7f 123
93b1eab3 124#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 125
f8822f42
JF
126static inline u64 paravirt_read_msr(unsigned msr, int *err)
127{
93b1eab3 128 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
f8822f42 129}
132ec92f 130
f8822f42
JF
131static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
132{
93b1eab3 133 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
f8822f42
JF
134}
135
90a0a06a 136/* These should all do BUG_ON(_err), but our headers are too tangled. */
49cd740b
JP
137#define rdmsr(msr, val1, val2) \
138do { \
f8822f42
JF
139 int _err; \
140 u64 _l = paravirt_read_msr(msr, &_err); \
141 val1 = (u32)_l; \
142 val2 = _l >> 32; \
49cd740b 143} while (0)
d3561b7f 144
49cd740b
JP
145#define wrmsr(msr, val1, val2) \
146do { \
f8822f42 147 paravirt_write_msr(msr, val1, val2); \
49cd740b 148} while (0)
d3561b7f 149
49cd740b
JP
150#define rdmsrl(msr, val) \
151do { \
f8822f42
JF
152 int _err; \
153 val = paravirt_read_msr(msr, &_err); \
49cd740b 154} while (0)
d3561b7f 155
49cd740b
JP
156#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
157#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
d3561b7f
RR
158
159/* rdmsr with exception handling */
49cd740b
JP
160#define rdmsr_safe(msr, a, b) \
161({ \
f8822f42
JF
162 int _err; \
163 u64 _l = paravirt_read_msr(msr, &_err); \
164 (*a) = (u32)_l; \
165 (*b) = _l >> 32; \
49cd740b
JP
166 _err; \
167})
d3561b7f 168
1de87bd4
AK
169static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
170{
171 int err;
172
173 *p = paravirt_read_msr(msr, &err);
174 return err;
175}
177fed1e 176
f8822f42
JF
177static inline u64 paravirt_read_tsc(void)
178{
93b1eab3 179 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
f8822f42 180}
d3561b7f 181
49cd740b
JP
182#define rdtscl(low) \
183do { \
f8822f42
JF
184 u64 _l = paravirt_read_tsc(); \
185 low = (int)_l; \
49cd740b 186} while (0)
d3561b7f 187
f8822f42 188#define rdtscll(val) (val = paravirt_read_tsc())
d3561b7f 189
688340ea
JF
190static inline unsigned long long paravirt_sched_clock(void)
191{
93b1eab3 192 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
688340ea 193}
6cb9a835 194
c5905afb
IM
195struct static_key;
196extern struct static_key paravirt_steal_enabled;
197extern struct static_key paravirt_steal_rq_enabled;
3c404b57
GC
198
199static inline u64 paravirt_steal_clock(int cpu)
200{
201 return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
202}
203
f8822f42
JF
204static inline unsigned long long paravirt_read_pmc(int counter)
205{
93b1eab3 206 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
f8822f42 207}
d3561b7f 208
49cd740b
JP
209#define rdpmc(counter, low, high) \
210do { \
f8822f42
JF
211 u64 _l = paravirt_read_pmc(counter); \
212 low = (u32)_l; \
213 high = _l >> 32; \
49cd740b 214} while (0)
3dc494e8 215
1ff4d58a
AK
216#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
217
e5aaac44
GOC
218static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
219{
220 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
221}
222
223#define rdtscp(low, high, aux) \
224do { \
225 int __aux; \
226 unsigned long __val = paravirt_rdtscp(&__aux); \
227 (low) = (u32)__val; \
228 (high) = (u32)(__val >> 32); \
229 (aux) = __aux; \
230} while (0)
231
232#define rdtscpll(val, aux) \
233do { \
234 unsigned long __aux; \
235 val = paravirt_rdtscp(&__aux); \
236 (aux) = __aux; \
237} while (0)
238
38ffbe66
JF
239static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
240{
241 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
242}
243
244static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
245{
246 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
247}
248
f8822f42
JF
249static inline void load_TR_desc(void)
250{
93b1eab3 251 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
f8822f42 252}
6b68f01b 253static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 254{
93b1eab3 255 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
f8822f42 256}
6b68f01b 257static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 258{
93b1eab3 259 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
f8822f42
JF
260}
261static inline void set_ldt(const void *addr, unsigned entries)
262{
93b1eab3 263 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
f8822f42 264}
6b68f01b 265static inline void store_idt(struct desc_ptr *dtr)
f8822f42 266{
93b1eab3 267 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
f8822f42
JF
268}
269static inline unsigned long paravirt_store_tr(void)
270{
93b1eab3 271 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
f8822f42
JF
272}
273#define store_tr(tr) ((tr) = paravirt_store_tr())
274static inline void load_TLS(struct thread_struct *t, unsigned cpu)
275{
93b1eab3 276 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
f8822f42 277}
75b8bb3e 278
9f9d489a
JF
279#ifdef CONFIG_X86_64
280static inline void load_gs_index(unsigned int gs)
281{
282 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
283}
284#endif
285
75b8bb3e
GOC
286static inline void write_ldt_entry(struct desc_struct *dt, int entry,
287 const void *desc)
f8822f42 288{
75b8bb3e 289 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
f8822f42 290}
014b15be
GOC
291
292static inline void write_gdt_entry(struct desc_struct *dt, int entry,
293 void *desc, int type)
f8822f42 294{
014b15be 295 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
f8822f42 296}
014b15be 297
8d947344 298static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 299{
8d947344 300 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
f8822f42
JF
301}
302static inline void set_iopl_mask(unsigned mask)
303{
93b1eab3 304 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
f8822f42 305}
3dc494e8 306
d3561b7f 307/* The paravirtualized I/O functions */
49cd740b
JP
308static inline void slow_down_io(void)
309{
93b1eab3 310 pv_cpu_ops.io_delay();
d3561b7f 311#ifdef REALLY_SLOW_IO
93b1eab3
JF
312 pv_cpu_ops.io_delay();
313 pv_cpu_ops.io_delay();
314 pv_cpu_ops.io_delay();
d3561b7f
RR
315#endif
316}
317
ae5da273
ZA
318#ifdef CONFIG_SMP
319static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
320 unsigned long start_esp)
321{
93b1eab3
JF
322 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
323 phys_apicid, start_eip, start_esp);
ae5da273
ZA
324}
325#endif
13623d79 326
d6dd61c8
JF
327static inline void paravirt_activate_mm(struct mm_struct *prev,
328 struct mm_struct *next)
329{
93b1eab3 330 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
d6dd61c8
JF
331}
332
a1ea1c03
DH
333static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
334 struct mm_struct *mm)
d6dd61c8 335{
93b1eab3 336 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
d6dd61c8
JF
337}
338
a1ea1c03 339static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
d6dd61c8 340{
93b1eab3 341 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
d6dd61c8
JF
342}
343
f8822f42
JF
344static inline void __flush_tlb(void)
345{
93b1eab3 346 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
f8822f42
JF
347}
348static inline void __flush_tlb_global(void)
349{
93b1eab3 350 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
f8822f42
JF
351}
352static inline void __flush_tlb_single(unsigned long addr)
353{
93b1eab3 354 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
f8822f42 355}
da181a8b 356
4595f962
RR
357static inline void flush_tlb_others(const struct cpumask *cpumask,
358 struct mm_struct *mm,
e7b52ffd
AS
359 unsigned long start,
360 unsigned long end)
d4c10477 361{
e7b52ffd 362 PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
d4c10477
JF
363}
364
eba0045f
JF
365static inline int paravirt_pgd_alloc(struct mm_struct *mm)
366{
367 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
368}
369
370static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
371{
372 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
373}
374
f8639939 375static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
f8822f42 376{
6944a9c8 377 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
f8822f42 378}
f8639939 379static inline void paravirt_release_pte(unsigned long pfn)
f8822f42 380{
6944a9c8 381 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
f8822f42 382}
c119ecce 383
f8639939 384static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
f8822f42 385{
6944a9c8 386 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
f8822f42 387}
c119ecce 388
f8639939 389static inline void paravirt_release_pmd(unsigned long pfn)
da181a8b 390{
6944a9c8 391 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
da181a8b
RR
392}
393
f8639939 394static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2761fa09
JF
395{
396 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
397}
f8639939 398static inline void paravirt_release_pud(unsigned long pfn)
2761fa09
JF
399{
400 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
401}
402
f8822f42
JF
403static inline void pte_update(struct mm_struct *mm, unsigned long addr,
404 pte_t *ptep)
da181a8b 405{
93b1eab3 406 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
da181a8b 407}
331127f7
AA
408static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
409 pmd_t *pmdp)
410{
411 PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
412}
da181a8b 413
f8822f42
JF
414static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
415 pte_t *ptep)
da181a8b 416{
93b1eab3 417 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
da181a8b
RR
418}
419
331127f7
AA
420static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
421 pmd_t *pmdp)
422{
423 PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
424}
425
773221f4 426static inline pte_t __pte(pteval_t val)
da181a8b 427{
773221f4
JF
428 pteval_t ret;
429
430 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
431 ret = PVOP_CALLEE2(pteval_t,
432 pv_mmu_ops.make_pte,
433 val, (u64)val >> 32);
773221f4 434 else
da5de7c2
JF
435 ret = PVOP_CALLEE1(pteval_t,
436 pv_mmu_ops.make_pte,
437 val);
773221f4 438
c8e5393a 439 return (pte_t) { .pte = ret };
da181a8b
RR
440}
441
773221f4
JF
442static inline pteval_t pte_val(pte_t pte)
443{
444 pteval_t ret;
445
446 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
447 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
448 pte.pte, (u64)pte.pte >> 32);
773221f4 449 else
da5de7c2
JF
450 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
451 pte.pte);
773221f4
JF
452
453 return ret;
454}
455
ef38503e 456static inline pgd_t __pgd(pgdval_t val)
da181a8b 457{
ef38503e
JF
458 pgdval_t ret;
459
460 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
461 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
462 val, (u64)val >> 32);
ef38503e 463 else
da5de7c2
JF
464 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
465 val);
ef38503e
JF
466
467 return (pgd_t) { ret };
468}
469
470static inline pgdval_t pgd_val(pgd_t pgd)
471{
472 pgdval_t ret;
473
474 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
475 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
476 pgd.pgd, (u64)pgd.pgd >> 32);
ef38503e 477 else
da5de7c2
JF
478 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
479 pgd.pgd);
ef38503e
JF
480
481 return ret;
f8822f42
JF
482}
483
08b882c6
JF
484#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
485static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
486 pte_t *ptep)
487{
488 pteval_t ret;
489
490 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
491 mm, addr, ptep);
492
493 return (pte_t) { .pte = ret };
494}
495
496static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
497 pte_t *ptep, pte_t pte)
498{
499 if (sizeof(pteval_t) > sizeof(long))
500 /* 5 arg words */
501 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
502 else
503 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
504 mm, addr, ptep, pte.pte);
505}
506
4eed80cd
JF
507static inline void set_pte(pte_t *ptep, pte_t pte)
508{
509 if (sizeof(pteval_t) > sizeof(long))
510 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
511 pte.pte, (u64)pte.pte >> 32);
512 else
513 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
514 pte.pte);
515}
516
517static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
518 pte_t *ptep, pte_t pte)
519{
520 if (sizeof(pteval_t) > sizeof(long))
521 /* 5 arg words */
522 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
523 else
524 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
525}
526
331127f7
AA
527static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
528 pmd_t *pmdp, pmd_t pmd)
529{
331127f7
AA
530 if (sizeof(pmdval_t) > sizeof(long))
531 /* 5 arg words */
532 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
533 else
cacf061c
AA
534 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
535 native_pmd_val(pmd));
331127f7 536}
331127f7 537
60b3f626
JF
538static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
539{
540 pmdval_t val = native_pmd_val(pmd);
541
542 if (sizeof(pmdval_t) > sizeof(long))
543 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
544 else
545 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
546}
547
98233368 548#if CONFIG_PGTABLE_LEVELS >= 3
1fe91514
GOC
549static inline pmd_t __pmd(pmdval_t val)
550{
551 pmdval_t ret;
552
553 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
554 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
555 val, (u64)val >> 32);
1fe91514 556 else
da5de7c2
JF
557 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
558 val);
1fe91514
GOC
559
560 return (pmd_t) { ret };
561}
562
563static inline pmdval_t pmd_val(pmd_t pmd)
564{
565 pmdval_t ret;
566
567 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
568 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
569 pmd.pmd, (u64)pmd.pmd >> 32);
1fe91514 570 else
da5de7c2
JF
571 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
572 pmd.pmd);
1fe91514
GOC
573
574 return ret;
575}
576
577static inline void set_pud(pud_t *pudp, pud_t pud)
578{
579 pudval_t val = native_pud_val(pud);
580
581 if (sizeof(pudval_t) > sizeof(long))
582 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
583 val, (u64)val >> 32);
584 else
585 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
586 val);
587}
98233368 588#if CONFIG_PGTABLE_LEVELS == 4
9042219c
EH
589static inline pud_t __pud(pudval_t val)
590{
591 pudval_t ret;
592
593 if (sizeof(pudval_t) > sizeof(long))
da5de7c2
JF
594 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
595 val, (u64)val >> 32);
9042219c 596 else
da5de7c2
JF
597 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
598 val);
9042219c
EH
599
600 return (pud_t) { ret };
601}
602
603static inline pudval_t pud_val(pud_t pud)
604{
605 pudval_t ret;
606
607 if (sizeof(pudval_t) > sizeof(long))
4767afbf
JF
608 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
609 pud.pud, (u64)pud.pud >> 32);
9042219c 610 else
4767afbf
JF
611 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
612 pud.pud);
9042219c
EH
613
614 return ret;
615}
616
617static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
618{
619 pgdval_t val = native_pgd_val(pgd);
620
621 if (sizeof(pgdval_t) > sizeof(long))
622 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
623 val, (u64)val >> 32);
624 else
625 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
626 val);
627}
628
629static inline void pgd_clear(pgd_t *pgdp)
630{
631 set_pgd(pgdp, __pgd(0));
632}
633
634static inline void pud_clear(pud_t *pudp)
635{
636 set_pud(pudp, __pud(0));
637}
638
98233368 639#endif /* CONFIG_PGTABLE_LEVELS == 4 */
9042219c 640
98233368 641#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
1fe91514 642
4eed80cd
JF
643#ifdef CONFIG_X86_PAE
644/* Special-case pte-setting operations for PAE, which can't update a
645 64-bit pte atomically */
646static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
647{
648 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
649 pte.pte, pte.pte >> 32);
650}
651
4eed80cd
JF
652static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
653 pte_t *ptep)
654{
655 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
656}
60b3f626
JF
657
658static inline void pmd_clear(pmd_t *pmdp)
659{
660 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
661}
4eed80cd
JF
662#else /* !CONFIG_X86_PAE */
663static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
664{
665 set_pte(ptep, pte);
666}
667
4eed80cd
JF
668static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
669 pte_t *ptep)
670{
671 set_pte_at(mm, addr, ptep, __pte(0));
672}
60b3f626
JF
673
674static inline void pmd_clear(pmd_t *pmdp)
675{
676 set_pmd(pmdp, __pmd(0));
677}
4eed80cd
JF
678#endif /* CONFIG_X86_PAE */
679
7fd7d83d 680#define __HAVE_ARCH_START_CONTEXT_SWITCH
224101ed 681static inline void arch_start_context_switch(struct task_struct *prev)
f8822f42 682{
224101ed 683 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
f8822f42
JF
684}
685
224101ed 686static inline void arch_end_context_switch(struct task_struct *next)
f8822f42 687{
224101ed 688 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
f8822f42
JF
689}
690
9226d125 691#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
692static inline void arch_enter_lazy_mmu_mode(void)
693{
8965c1c0 694 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
f8822f42
JF
695}
696
697static inline void arch_leave_lazy_mmu_mode(void)
698{
8965c1c0 699 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
f8822f42
JF
700}
701
511ba86e
BO
702static inline void arch_flush_lazy_mmu_mode(void)
703{
704 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
705}
9226d125 706
aeaaa59c 707static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
3b3809ac 708 phys_addr_t phys, pgprot_t flags)
aeaaa59c
JF
709{
710 pv_mmu_ops.set_fixmap(idx, phys, flags);
711}
712
b4ecc126 713#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
4bb689ee 714
62c7a1e9 715#ifdef CONFIG_QUEUED_SPINLOCKS
f233f7f1
PZI
716
717static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
718 u32 val)
719{
720 PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
721}
722
723static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
724{
725 PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
726}
727
728static __always_inline void pv_wait(u8 *ptr, u8 val)
729{
730 PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
731}
732
733static __always_inline void pv_kick(int cpu)
734{
735 PVOP_VCALL1(pv_lock_ops.kick, cpu);
736}
737
62c7a1e9 738#else /* !CONFIG_QUEUED_SPINLOCKS */
f233f7f1 739
545ac138
JF
740static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
741 __ticket_t ticket)
74d4affd 742{
354714dd 743 PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
74d4affd
JF
744}
745
96f853ea 746static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
545ac138 747 __ticket_t ticket)
74d4affd 748{
545ac138 749 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
74d4affd
JF
750}
751
62c7a1e9 752#endif /* CONFIG_QUEUED_SPINLOCKS */
f233f7f1
PZI
753
754#endif /* SMP && PARAVIRT_SPINLOCKS */
4bb689ee 755
2e47d3e6 756#ifdef CONFIG_X86_32
ecb93d1c
JF
757#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
758#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
759
760/* save and restore all caller-save registers, except return value */
e584f559
JF
761#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
762#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
ecb93d1c 763
2e47d3e6
GOC
764#define PV_FLAGS_ARG "0"
765#define PV_EXTRA_CLOBBERS
766#define PV_VEXTRA_CLOBBERS
767#else
ecb93d1c
JF
768/* save and restore all caller-save registers, except return value */
769#define PV_SAVE_ALL_CALLER_REGS \
770 "push %rcx;" \
771 "push %rdx;" \
772 "push %rsi;" \
773 "push %rdi;" \
774 "push %r8;" \
775 "push %r9;" \
776 "push %r10;" \
777 "push %r11;"
778#define PV_RESTORE_ALL_CALLER_REGS \
779 "pop %r11;" \
780 "pop %r10;" \
781 "pop %r9;" \
782 "pop %r8;" \
783 "pop %rdi;" \
784 "pop %rsi;" \
785 "pop %rdx;" \
786 "pop %rcx;"
787
2e47d3e6
GOC
788/* We save some registers, but all of them, that's too much. We clobber all
789 * caller saved registers but the argument parameter */
790#define PV_SAVE_REGS "pushq %%rdi;"
791#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
792#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
793#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
794#define PV_FLAGS_ARG "D"
795#endif
796
ecb93d1c
JF
797/*
798 * Generate a thunk around a function which saves all caller-save
799 * registers except for the return value. This allows C functions to
800 * be called from assembler code where fewer than normal registers are
801 * available. It may also help code generation around calls from C
802 * code if the common case doesn't use many registers.
803 *
804 * When a callee is wrapped in a thunk, the caller can assume that all
805 * arg regs and all scratch registers are preserved across the
806 * call. The return value in rax/eax will not be saved, even for void
807 * functions.
808 */
809#define PV_CALLEE_SAVE_REGS_THUNK(func) \
810 extern typeof(func) __raw_callee_save_##func; \
ecb93d1c
JF
811 \
812 asm(".pushsection .text;" \
a2e7f0e3 813 ".globl __raw_callee_save_" #func " ; " \
ecb93d1c
JF
814 "__raw_callee_save_" #func ": " \
815 PV_SAVE_ALL_CALLER_REGS \
816 "call " #func ";" \
817 PV_RESTORE_ALL_CALLER_REGS \
818 "ret;" \
819 ".popsection")
820
821/* Get a reference to a callee-save function */
822#define PV_CALLEE_SAVE(func) \
823 ((struct paravirt_callee_save) { __raw_callee_save_##func })
824
825/* Promise that "func" already uses the right calling convention */
826#define __PV_IS_CALLEE_SAVE(func) \
827 ((struct paravirt_callee_save) { func })
828
b5908548 829static inline notrace unsigned long arch_local_save_flags(void)
139ec7c4 830{
71999d98 831 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
139ec7c4
RR
832}
833
b5908548 834static inline notrace void arch_local_irq_restore(unsigned long f)
139ec7c4 835{
71999d98 836 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
139ec7c4
RR
837}
838
b5908548 839static inline notrace void arch_local_irq_disable(void)
139ec7c4 840{
71999d98 841 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
139ec7c4
RR
842}
843
b5908548 844static inline notrace void arch_local_irq_enable(void)
139ec7c4 845{
71999d98 846 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
139ec7c4
RR
847}
848
b5908548 849static inline notrace unsigned long arch_local_irq_save(void)
139ec7c4
RR
850{
851 unsigned long f;
852
df9ee292
DH
853 f = arch_local_save_flags();
854 arch_local_irq_disable();
139ec7c4
RR
855 return f;
856}
857
74d4affd 858
294688c0 859/* Make sure as little as possible of this mess escapes. */
d5822035 860#undef PARAVIRT_CALL
1a45b7aa
JF
861#undef __PVOP_CALL
862#undef __PVOP_VCALL
f8822f42
JF
863#undef PVOP_VCALL0
864#undef PVOP_CALL0
865#undef PVOP_VCALL1
866#undef PVOP_CALL1
867#undef PVOP_VCALL2
868#undef PVOP_CALL2
869#undef PVOP_VCALL3
870#undef PVOP_CALL3
871#undef PVOP_VCALL4
872#undef PVOP_CALL4
139ec7c4 873
6f30c1ac
TG
874extern void default_banner(void);
875
d3561b7f
RR
876#else /* __ASSEMBLY__ */
877
658be9d3 878#define _PVSITE(ptype, clobbers, ops, word, algn) \
139ec7c4
RR
879771:; \
880 ops; \
881772:; \
882 .pushsection .parainstructions,"a"; \
658be9d3
GOC
883 .align algn; \
884 word 771b; \
139ec7c4
RR
885 .byte ptype; \
886 .byte 772b-771b; \
887 .short clobbers; \
888 .popsection
889
658be9d3 890
9104a18d 891#define COND_PUSH(set, mask, reg) \
ecb93d1c 892 .if ((~(set)) & mask); push %reg; .endif
9104a18d 893#define COND_POP(set, mask, reg) \
ecb93d1c 894 .if ((~(set)) & mask); pop %reg; .endif
9104a18d 895
658be9d3 896#ifdef CONFIG_X86_64
9104a18d
JF
897
898#define PV_SAVE_REGS(set) \
899 COND_PUSH(set, CLBR_RAX, rax); \
900 COND_PUSH(set, CLBR_RCX, rcx); \
901 COND_PUSH(set, CLBR_RDX, rdx); \
902 COND_PUSH(set, CLBR_RSI, rsi); \
903 COND_PUSH(set, CLBR_RDI, rdi); \
904 COND_PUSH(set, CLBR_R8, r8); \
905 COND_PUSH(set, CLBR_R9, r9); \
906 COND_PUSH(set, CLBR_R10, r10); \
907 COND_PUSH(set, CLBR_R11, r11)
908#define PV_RESTORE_REGS(set) \
909 COND_POP(set, CLBR_R11, r11); \
910 COND_POP(set, CLBR_R10, r10); \
911 COND_POP(set, CLBR_R9, r9); \
912 COND_POP(set, CLBR_R8, r8); \
913 COND_POP(set, CLBR_RDI, rdi); \
914 COND_POP(set, CLBR_RSI, rsi); \
915 COND_POP(set, CLBR_RDX, rdx); \
916 COND_POP(set, CLBR_RCX, rcx); \
917 COND_POP(set, CLBR_RAX, rax)
918
6057fc82 919#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
658be9d3 920#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
491eccb7 921#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 922#else
9104a18d
JF
923#define PV_SAVE_REGS(set) \
924 COND_PUSH(set, CLBR_EAX, eax); \
925 COND_PUSH(set, CLBR_EDI, edi); \
926 COND_PUSH(set, CLBR_ECX, ecx); \
927 COND_PUSH(set, CLBR_EDX, edx)
928#define PV_RESTORE_REGS(set) \
929 COND_POP(set, CLBR_EDX, edx); \
930 COND_POP(set, CLBR_ECX, ecx); \
931 COND_POP(set, CLBR_EDI, edi); \
932 COND_POP(set, CLBR_EAX, eax)
933
6057fc82 934#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
658be9d3 935#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
491eccb7 936#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
937#endif
938
93b1eab3
JF
939#define INTERRUPT_RETURN \
940 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
491eccb7 941 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
d5822035
JF
942
943#define DISABLE_INTERRUPTS(clobbers) \
93b1eab3 944 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
ecb93d1c 945 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 946 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
ecb93d1c 947 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035
JF
948
949#define ENABLE_INTERRUPTS(clobbers) \
93b1eab3 950 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
ecb93d1c 951 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 952 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
ecb93d1c 953 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035 954
2be29982
JF
955#define USERGS_SYSRET32 \
956 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
6abcd98f 957 CLBR_NONE, \
2be29982 958 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
2e47d3e6 959
6057fc82 960#ifdef CONFIG_X86_32
491eccb7
JF
961#define GET_CR0_INTO_EAX \
962 push %ecx; push %edx; \
963 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
42c24fa2 964 pop %edx; pop %ecx
2be29982
JF
965
966#define ENABLE_INTERRUPTS_SYSEXIT \
967 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
968 CLBR_NONE, \
969 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
970
971
972#else /* !CONFIG_X86_32 */
a00394f8
JF
973
974/*
975 * If swapgs is used while the userspace stack is still current,
976 * there's no way to call a pvop. The PV replacement *must* be
977 * inlined, or the swapgs instruction must be trapped and emulated.
978 */
979#define SWAPGS_UNSAFE_STACK \
980 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
981 swapgs)
982
9104a18d
JF
983/*
984 * Note: swapgs is very special, and in practise is either going to be
985 * implemented with a single "swapgs" instruction or something very
986 * special. Either way, we don't need to save any registers for
987 * it.
988 */
e801f864
GOC
989#define SWAPGS \
990 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
9104a18d 991 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
e801f864
GOC
992 )
993
ffc4bc9c
PA
994#define GET_CR2_INTO_RAX \
995 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
4a8c4c4e 996
fab58420
JF
997#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
998 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
999 CLBR_NONE, \
1000 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1001
2be29982
JF
1002#define USERGS_SYSRET64 \
1003 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
d75cd22f 1004 CLBR_NONE, \
2be29982 1005 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
2be29982 1006#endif /* CONFIG_X86_32 */
139ec7c4 1007
d3561b7f 1008#endif /* __ASSEMBLY__ */
6f30c1ac
TG
1009#else /* CONFIG_PARAVIRT */
1010# define default_banner x86_init_noop
a1ea1c03
DH
1011#ifndef __ASSEMBLY__
1012static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
1013 struct mm_struct *mm)
1014{
1015}
1016
1017static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
1018{
1019}
1020#endif /* __ASSEMBLY__ */
6f30c1ac 1021#endif /* !CONFIG_PARAVIRT */
1965aae3 1022#endif /* _ASM_X86_PARAVIRT_H */
This page took 1.107123 seconds and 5 git commands to generate.