Merge branch 'regmap-linus' into regmap-next
[deliverable/linux.git] / arch / x86 / include / asm / paravirt.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PARAVIRT_H
2#define _ASM_X86_PARAVIRT_H
d3561b7f
RR
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
b239fb25
JF
5
6#ifdef CONFIG_PARAVIRT
54321d94 7#include <asm/pgtable_types.h>
658be9d3 8#include <asm/asm.h>
d3561b7f 9
ac5672f8 10#include <asm/paravirt_types.h>
ecb93d1c 11
d3561b7f 12#ifndef __ASSEMBLY__
3dc494e8 13#include <linux/types.h>
d4c10477 14#include <linux/cpumask.h>
1a45b7aa 15
f8822f42
JF
16static inline int paravirt_enabled(void)
17{
93b1eab3 18 return pv_info.paravirt_enabled;
f8822f42 19}
d3561b7f 20
faca6227 21static inline void load_sp0(struct tss_struct *tss,
d3561b7f
RR
22 struct thread_struct *thread)
23{
faca6227 24 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
d3561b7f
RR
25}
26
d3561b7f
RR
27/* The paravirtualized CPUID instruction. */
28static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
29 unsigned int *ecx, unsigned int *edx)
30{
93b1eab3 31 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
32}
33
34/*
35 * These special macros can be used to get or set a debugging register
36 */
f8822f42
JF
37static inline unsigned long paravirt_get_debugreg(int reg)
38{
93b1eab3 39 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
f8822f42
JF
40}
41#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
42static inline void set_debugreg(unsigned long val, int reg)
43{
93b1eab3 44 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
f8822f42 45}
d3561b7f 46
f8822f42
JF
47static inline void clts(void)
48{
93b1eab3 49 PVOP_VCALL0(pv_cpu_ops.clts);
f8822f42 50}
d3561b7f 51
f8822f42
JF
52static inline unsigned long read_cr0(void)
53{
93b1eab3 54 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
f8822f42 55}
d3561b7f 56
f8822f42
JF
57static inline void write_cr0(unsigned long x)
58{
93b1eab3 59 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
f8822f42
JF
60}
61
62static inline unsigned long read_cr2(void)
63{
93b1eab3 64 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
f8822f42
JF
65}
66
67static inline void write_cr2(unsigned long x)
68{
93b1eab3 69 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
f8822f42
JF
70}
71
72static inline unsigned long read_cr3(void)
73{
93b1eab3 74 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
f8822f42 75}
d3561b7f 76
f8822f42
JF
77static inline void write_cr3(unsigned long x)
78{
93b1eab3 79 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
f8822f42 80}
d3561b7f 81
f8822f42
JF
82static inline unsigned long read_cr4(void)
83{
93b1eab3 84 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
f8822f42
JF
85}
86static inline unsigned long read_cr4_safe(void)
87{
93b1eab3 88 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
f8822f42 89}
d3561b7f 90
f8822f42
JF
91static inline void write_cr4(unsigned long x)
92{
93b1eab3 93 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
f8822f42 94}
3dc494e8 95
94ea03cd 96#ifdef CONFIG_X86_64
4c9890c2
GOC
97static inline unsigned long read_cr8(void)
98{
99 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
100}
101
102static inline void write_cr8(unsigned long x)
103{
104 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
105}
94ea03cd 106#endif
4c9890c2 107
df9ee292 108static inline void arch_safe_halt(void)
d3561b7f 109{
93b1eab3 110 PVOP_VCALL0(pv_irq_ops.safe_halt);
d3561b7f
RR
111}
112
113static inline void halt(void)
114{
c8217b83 115 PVOP_VCALL0(pv_irq_ops.halt);
f8822f42
JF
116}
117
118static inline void wbinvd(void)
119{
93b1eab3 120 PVOP_VCALL0(pv_cpu_ops.wbinvd);
d3561b7f 121}
d3561b7f 122
93b1eab3 123#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 124
f8822f42
JF
125static inline u64 paravirt_read_msr(unsigned msr, int *err)
126{
93b1eab3 127 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
f8822f42 128}
132ec92f
BP
129
130static inline int paravirt_rdmsr_regs(u32 *regs)
131{
132 return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
133}
134
f8822f42
JF
135static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
136{
93b1eab3 137 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
f8822f42
JF
138}
139
132ec92f
BP
140static inline int paravirt_wrmsr_regs(u32 *regs)
141{
142 return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
143}
144
90a0a06a 145/* These should all do BUG_ON(_err), but our headers are too tangled. */
49cd740b
JP
146#define rdmsr(msr, val1, val2) \
147do { \
f8822f42
JF
148 int _err; \
149 u64 _l = paravirt_read_msr(msr, &_err); \
150 val1 = (u32)_l; \
151 val2 = _l >> 32; \
49cd740b 152} while (0)
d3561b7f 153
49cd740b
JP
154#define wrmsr(msr, val1, val2) \
155do { \
f8822f42 156 paravirt_write_msr(msr, val1, val2); \
49cd740b 157} while (0)
d3561b7f 158
49cd740b
JP
159#define rdmsrl(msr, val) \
160do { \
f8822f42
JF
161 int _err; \
162 val = paravirt_read_msr(msr, &_err); \
49cd740b 163} while (0)
d3561b7f 164
49cd740b
JP
165#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
166#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
d3561b7f
RR
167
168/* rdmsr with exception handling */
49cd740b
JP
169#define rdmsr_safe(msr, a, b) \
170({ \
f8822f42
JF
171 int _err; \
172 u64 _l = paravirt_read_msr(msr, &_err); \
173 (*a) = (u32)_l; \
174 (*b) = _l >> 32; \
49cd740b
JP
175 _err; \
176})
d3561b7f 177
132ec92f
BP
178#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
179#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
180
1de87bd4
AK
181static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
182{
183 int err;
184
185 *p = paravirt_read_msr(msr, &err);
186 return err;
187}
b05f78f5
YL
188static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
189{
177fed1e 190 u32 gprs[8] = { 0 };
b05f78f5
YL
191 int err;
192
177fed1e
BP
193 gprs[1] = msr;
194 gprs[7] = 0x9c5a203a;
195
196 err = paravirt_rdmsr_regs(gprs);
197
198 *p = gprs[0] | ((u64)gprs[2] << 32);
199
b05f78f5
YL
200 return err;
201}
f8822f42 202
177fed1e
BP
203static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
204{
205 u32 gprs[8] = { 0 };
206
207 gprs[0] = (u32)val;
208 gprs[1] = msr;
209 gprs[2] = val >> 32;
210 gprs[7] = 0x9c5a203a;
211
212 return paravirt_wrmsr_regs(gprs);
213}
214
f8822f42
JF
215static inline u64 paravirt_read_tsc(void)
216{
93b1eab3 217 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
f8822f42 218}
d3561b7f 219
49cd740b
JP
220#define rdtscl(low) \
221do { \
f8822f42
JF
222 u64 _l = paravirt_read_tsc(); \
223 low = (int)_l; \
49cd740b 224} while (0)
d3561b7f 225
f8822f42 226#define rdtscll(val) (val = paravirt_read_tsc())
d3561b7f 227
688340ea
JF
228static inline unsigned long long paravirt_sched_clock(void)
229{
93b1eab3 230 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
688340ea 231}
6cb9a835 232
3c404b57
GC
233struct jump_label_key;
234extern struct jump_label_key paravirt_steal_enabled;
235extern struct jump_label_key paravirt_steal_rq_enabled;
236
237static inline u64 paravirt_steal_clock(int cpu)
238{
239 return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
240}
241
f8822f42
JF
242static inline unsigned long long paravirt_read_pmc(int counter)
243{
93b1eab3 244 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
f8822f42 245}
d3561b7f 246
49cd740b
JP
247#define rdpmc(counter, low, high) \
248do { \
f8822f42
JF
249 u64 _l = paravirt_read_pmc(counter); \
250 low = (u32)_l; \
251 high = _l >> 32; \
49cd740b 252} while (0)
3dc494e8 253
e5aaac44
GOC
254static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
255{
256 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
257}
258
259#define rdtscp(low, high, aux) \
260do { \
261 int __aux; \
262 unsigned long __val = paravirt_rdtscp(&__aux); \
263 (low) = (u32)__val; \
264 (high) = (u32)(__val >> 32); \
265 (aux) = __aux; \
266} while (0)
267
268#define rdtscpll(val, aux) \
269do { \
270 unsigned long __aux; \
271 val = paravirt_rdtscp(&__aux); \
272 (aux) = __aux; \
273} while (0)
274
38ffbe66
JF
275static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
276{
277 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
278}
279
280static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
281{
282 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
283}
284
f8822f42
JF
285static inline void load_TR_desc(void)
286{
93b1eab3 287 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
f8822f42 288}
6b68f01b 289static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 290{
93b1eab3 291 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
f8822f42 292}
6b68f01b 293static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 294{
93b1eab3 295 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
f8822f42
JF
296}
297static inline void set_ldt(const void *addr, unsigned entries)
298{
93b1eab3 299 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
f8822f42 300}
6b68f01b 301static inline void store_gdt(struct desc_ptr *dtr)
f8822f42 302{
93b1eab3 303 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
f8822f42 304}
6b68f01b 305static inline void store_idt(struct desc_ptr *dtr)
f8822f42 306{
93b1eab3 307 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
f8822f42
JF
308}
309static inline unsigned long paravirt_store_tr(void)
310{
93b1eab3 311 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
f8822f42
JF
312}
313#define store_tr(tr) ((tr) = paravirt_store_tr())
314static inline void load_TLS(struct thread_struct *t, unsigned cpu)
315{
93b1eab3 316 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
f8822f42 317}
75b8bb3e 318
9f9d489a
JF
319#ifdef CONFIG_X86_64
320static inline void load_gs_index(unsigned int gs)
321{
322 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
323}
324#endif
325
75b8bb3e
GOC
326static inline void write_ldt_entry(struct desc_struct *dt, int entry,
327 const void *desc)
f8822f42 328{
75b8bb3e 329 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
f8822f42 330}
014b15be
GOC
331
332static inline void write_gdt_entry(struct desc_struct *dt, int entry,
333 void *desc, int type)
f8822f42 334{
014b15be 335 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
f8822f42 336}
014b15be 337
8d947344 338static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 339{
8d947344 340 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
f8822f42
JF
341}
342static inline void set_iopl_mask(unsigned mask)
343{
93b1eab3 344 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
f8822f42 345}
3dc494e8 346
d3561b7f 347/* The paravirtualized I/O functions */
49cd740b
JP
348static inline void slow_down_io(void)
349{
93b1eab3 350 pv_cpu_ops.io_delay();
d3561b7f 351#ifdef REALLY_SLOW_IO
93b1eab3
JF
352 pv_cpu_ops.io_delay();
353 pv_cpu_ops.io_delay();
354 pv_cpu_ops.io_delay();
d3561b7f
RR
355#endif
356}
357
ae5da273
ZA
358#ifdef CONFIG_SMP
359static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
360 unsigned long start_esp)
361{
93b1eab3
JF
362 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
363 phys_apicid, start_eip, start_esp);
ae5da273
ZA
364}
365#endif
13623d79 366
d6dd61c8
JF
367static inline void paravirt_activate_mm(struct mm_struct *prev,
368 struct mm_struct *next)
369{
93b1eab3 370 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
d6dd61c8
JF
371}
372
373static inline void arch_dup_mmap(struct mm_struct *oldmm,
374 struct mm_struct *mm)
375{
93b1eab3 376 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
d6dd61c8
JF
377}
378
379static inline void arch_exit_mmap(struct mm_struct *mm)
380{
93b1eab3 381 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
d6dd61c8
JF
382}
383
f8822f42
JF
384static inline void __flush_tlb(void)
385{
93b1eab3 386 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
f8822f42
JF
387}
388static inline void __flush_tlb_global(void)
389{
93b1eab3 390 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
f8822f42
JF
391}
392static inline void __flush_tlb_single(unsigned long addr)
393{
93b1eab3 394 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
f8822f42 395}
da181a8b 396
4595f962
RR
397static inline void flush_tlb_others(const struct cpumask *cpumask,
398 struct mm_struct *mm,
d4c10477
JF
399 unsigned long va)
400{
4595f962 401 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
d4c10477
JF
402}
403
eba0045f
JF
404static inline int paravirt_pgd_alloc(struct mm_struct *mm)
405{
406 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
407}
408
409static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
410{
411 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
412}
413
f8639939 414static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
f8822f42 415{
6944a9c8 416 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
f8822f42 417}
f8639939 418static inline void paravirt_release_pte(unsigned long pfn)
f8822f42 419{
6944a9c8 420 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
f8822f42 421}
c119ecce 422
f8639939 423static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
f8822f42 424{
6944a9c8 425 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
f8822f42 426}
c119ecce 427
f8639939 428static inline void paravirt_release_pmd(unsigned long pfn)
da181a8b 429{
6944a9c8 430 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
da181a8b
RR
431}
432
f8639939 433static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2761fa09
JF
434{
435 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
436}
f8639939 437static inline void paravirt_release_pud(unsigned long pfn)
2761fa09
JF
438{
439 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
440}
441
f8822f42
JF
442static inline void pte_update(struct mm_struct *mm, unsigned long addr,
443 pte_t *ptep)
da181a8b 444{
93b1eab3 445 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
da181a8b 446}
331127f7
AA
447static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
448 pmd_t *pmdp)
449{
450 PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
451}
da181a8b 452
f8822f42
JF
453static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
454 pte_t *ptep)
da181a8b 455{
93b1eab3 456 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
da181a8b
RR
457}
458
331127f7
AA
459static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
460 pmd_t *pmdp)
461{
462 PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
463}
464
773221f4 465static inline pte_t __pte(pteval_t val)
da181a8b 466{
773221f4
JF
467 pteval_t ret;
468
469 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
470 ret = PVOP_CALLEE2(pteval_t,
471 pv_mmu_ops.make_pte,
472 val, (u64)val >> 32);
773221f4 473 else
da5de7c2
JF
474 ret = PVOP_CALLEE1(pteval_t,
475 pv_mmu_ops.make_pte,
476 val);
773221f4 477
c8e5393a 478 return (pte_t) { .pte = ret };
da181a8b
RR
479}
480
773221f4
JF
481static inline pteval_t pte_val(pte_t pte)
482{
483 pteval_t ret;
484
485 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
486 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
487 pte.pte, (u64)pte.pte >> 32);
773221f4 488 else
da5de7c2
JF
489 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
490 pte.pte);
773221f4
JF
491
492 return ret;
493}
494
ef38503e 495static inline pgd_t __pgd(pgdval_t val)
da181a8b 496{
ef38503e
JF
497 pgdval_t ret;
498
499 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
500 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
501 val, (u64)val >> 32);
ef38503e 502 else
da5de7c2
JF
503 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
504 val);
ef38503e
JF
505
506 return (pgd_t) { ret };
507}
508
509static inline pgdval_t pgd_val(pgd_t pgd)
510{
511 pgdval_t ret;
512
513 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
514 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
515 pgd.pgd, (u64)pgd.pgd >> 32);
ef38503e 516 else
da5de7c2
JF
517 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
518 pgd.pgd);
ef38503e
JF
519
520 return ret;
f8822f42
JF
521}
522
08b882c6
JF
523#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
524static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
525 pte_t *ptep)
526{
527 pteval_t ret;
528
529 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
530 mm, addr, ptep);
531
532 return (pte_t) { .pte = ret };
533}
534
535static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
536 pte_t *ptep, pte_t pte)
537{
538 if (sizeof(pteval_t) > sizeof(long))
539 /* 5 arg words */
540 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
541 else
542 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
543 mm, addr, ptep, pte.pte);
544}
545
4eed80cd
JF
546static inline void set_pte(pte_t *ptep, pte_t pte)
547{
548 if (sizeof(pteval_t) > sizeof(long))
549 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
550 pte.pte, (u64)pte.pte >> 32);
551 else
552 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
553 pte.pte);
554}
555
556static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
557 pte_t *ptep, pte_t pte)
558{
559 if (sizeof(pteval_t) > sizeof(long))
560 /* 5 arg words */
561 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
562 else
563 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
564}
565
331127f7
AA
566#ifdef CONFIG_TRANSPARENT_HUGEPAGE
567static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
568 pmd_t *pmdp, pmd_t pmd)
569{
331127f7
AA
570 if (sizeof(pmdval_t) > sizeof(long))
571 /* 5 arg words */
572 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
573 else
cacf061c
AA
574 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
575 native_pmd_val(pmd));
331127f7
AA
576}
577#endif
578
60b3f626
JF
579static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
580{
581 pmdval_t val = native_pmd_val(pmd);
582
583 if (sizeof(pmdval_t) > sizeof(long))
584 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
585 else
586 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
587}
588
1fe91514
GOC
589#if PAGETABLE_LEVELS >= 3
590static inline pmd_t __pmd(pmdval_t val)
591{
592 pmdval_t ret;
593
594 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
595 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
596 val, (u64)val >> 32);
1fe91514 597 else
da5de7c2
JF
598 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
599 val);
1fe91514
GOC
600
601 return (pmd_t) { ret };
602}
603
604static inline pmdval_t pmd_val(pmd_t pmd)
605{
606 pmdval_t ret;
607
608 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
609 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
610 pmd.pmd, (u64)pmd.pmd >> 32);
1fe91514 611 else
da5de7c2
JF
612 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
613 pmd.pmd);
1fe91514
GOC
614
615 return ret;
616}
617
618static inline void set_pud(pud_t *pudp, pud_t pud)
619{
620 pudval_t val = native_pud_val(pud);
621
622 if (sizeof(pudval_t) > sizeof(long))
623 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
624 val, (u64)val >> 32);
625 else
626 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
627 val);
628}
9042219c
EH
629#if PAGETABLE_LEVELS == 4
630static inline pud_t __pud(pudval_t val)
631{
632 pudval_t ret;
633
634 if (sizeof(pudval_t) > sizeof(long))
da5de7c2
JF
635 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
636 val, (u64)val >> 32);
9042219c 637 else
da5de7c2
JF
638 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
639 val);
9042219c
EH
640
641 return (pud_t) { ret };
642}
643
644static inline pudval_t pud_val(pud_t pud)
645{
646 pudval_t ret;
647
648 if (sizeof(pudval_t) > sizeof(long))
4767afbf
JF
649 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
650 pud.pud, (u64)pud.pud >> 32);
9042219c 651 else
4767afbf
JF
652 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
653 pud.pud);
9042219c
EH
654
655 return ret;
656}
657
658static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
659{
660 pgdval_t val = native_pgd_val(pgd);
661
662 if (sizeof(pgdval_t) > sizeof(long))
663 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
664 val, (u64)val >> 32);
665 else
666 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
667 val);
668}
669
670static inline void pgd_clear(pgd_t *pgdp)
671{
672 set_pgd(pgdp, __pgd(0));
673}
674
675static inline void pud_clear(pud_t *pudp)
676{
677 set_pud(pudp, __pud(0));
678}
679
680#endif /* PAGETABLE_LEVELS == 4 */
681
1fe91514
GOC
682#endif /* PAGETABLE_LEVELS >= 3 */
683
4eed80cd
JF
684#ifdef CONFIG_X86_PAE
685/* Special-case pte-setting operations for PAE, which can't update a
686 64-bit pte atomically */
687static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
688{
689 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
690 pte.pte, pte.pte >> 32);
691}
692
4eed80cd
JF
693static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
694 pte_t *ptep)
695{
696 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
697}
60b3f626
JF
698
699static inline void pmd_clear(pmd_t *pmdp)
700{
701 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
702}
4eed80cd
JF
703#else /* !CONFIG_X86_PAE */
704static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
705{
706 set_pte(ptep, pte);
707}
708
4eed80cd
JF
709static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
710 pte_t *ptep)
711{
712 set_pte_at(mm, addr, ptep, __pte(0));
713}
60b3f626
JF
714
715static inline void pmd_clear(pmd_t *pmdp)
716{
717 set_pmd(pmdp, __pmd(0));
718}
4eed80cd
JF
719#endif /* CONFIG_X86_PAE */
720
7fd7d83d 721#define __HAVE_ARCH_START_CONTEXT_SWITCH
224101ed 722static inline void arch_start_context_switch(struct task_struct *prev)
f8822f42 723{
224101ed 724 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
f8822f42
JF
725}
726
224101ed 727static inline void arch_end_context_switch(struct task_struct *next)
f8822f42 728{
224101ed 729 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
f8822f42
JF
730}
731
9226d125 732#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
733static inline void arch_enter_lazy_mmu_mode(void)
734{
8965c1c0 735 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
f8822f42
JF
736}
737
738static inline void arch_leave_lazy_mmu_mode(void)
739{
8965c1c0 740 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
f8822f42
JF
741}
742
d85cf93d 743void arch_flush_lazy_mmu_mode(void);
9226d125 744
aeaaa59c 745static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
3b3809ac 746 phys_addr_t phys, pgprot_t flags)
aeaaa59c
JF
747{
748 pv_mmu_ops.set_fixmap(idx, phys, flags);
749}
750
b4ecc126 751#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
4bb689ee 752
0199c4e6 753static inline int arch_spin_is_locked(struct arch_spinlock *lock)
74d4affd
JF
754{
755 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
756}
757
0199c4e6 758static inline int arch_spin_is_contended(struct arch_spinlock *lock)
74d4affd
JF
759{
760 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
761}
0199c4e6 762#define arch_spin_is_contended arch_spin_is_contended
74d4affd 763
0199c4e6 764static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
74d4affd 765{
32172561 766 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
74d4affd
JF
767}
768
0199c4e6 769static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
63d3a75d
JF
770 unsigned long flags)
771{
772 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
773}
774
0199c4e6 775static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
74d4affd
JF
776{
777 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
778}
779
0199c4e6 780static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
74d4affd 781{
32172561 782 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
74d4affd
JF
783}
784
4bb689ee
IM
785#endif
786
2e47d3e6 787#ifdef CONFIG_X86_32
ecb93d1c
JF
788#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
789#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
790
791/* save and restore all caller-save registers, except return value */
e584f559
JF
792#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
793#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
ecb93d1c 794
2e47d3e6
GOC
795#define PV_FLAGS_ARG "0"
796#define PV_EXTRA_CLOBBERS
797#define PV_VEXTRA_CLOBBERS
798#else
ecb93d1c
JF
799/* save and restore all caller-save registers, except return value */
800#define PV_SAVE_ALL_CALLER_REGS \
801 "push %rcx;" \
802 "push %rdx;" \
803 "push %rsi;" \
804 "push %rdi;" \
805 "push %r8;" \
806 "push %r9;" \
807 "push %r10;" \
808 "push %r11;"
809#define PV_RESTORE_ALL_CALLER_REGS \
810 "pop %r11;" \
811 "pop %r10;" \
812 "pop %r9;" \
813 "pop %r8;" \
814 "pop %rdi;" \
815 "pop %rsi;" \
816 "pop %rdx;" \
817 "pop %rcx;"
818
2e47d3e6
GOC
819/* We save some registers, but all of them, that's too much. We clobber all
820 * caller saved registers but the argument parameter */
821#define PV_SAVE_REGS "pushq %%rdi;"
822#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
823#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
824#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
825#define PV_FLAGS_ARG "D"
826#endif
827
ecb93d1c
JF
828/*
829 * Generate a thunk around a function which saves all caller-save
830 * registers except for the return value. This allows C functions to
831 * be called from assembler code where fewer than normal registers are
832 * available. It may also help code generation around calls from C
833 * code if the common case doesn't use many registers.
834 *
835 * When a callee is wrapped in a thunk, the caller can assume that all
836 * arg regs and all scratch registers are preserved across the
837 * call. The return value in rax/eax will not be saved, even for void
838 * functions.
839 */
840#define PV_CALLEE_SAVE_REGS_THUNK(func) \
841 extern typeof(func) __raw_callee_save_##func; \
842 static void *__##func##__ __used = func; \
843 \
844 asm(".pushsection .text;" \
845 "__raw_callee_save_" #func ": " \
846 PV_SAVE_ALL_CALLER_REGS \
847 "call " #func ";" \
848 PV_RESTORE_ALL_CALLER_REGS \
849 "ret;" \
850 ".popsection")
851
852/* Get a reference to a callee-save function */
853#define PV_CALLEE_SAVE(func) \
854 ((struct paravirt_callee_save) { __raw_callee_save_##func })
855
856/* Promise that "func" already uses the right calling convention */
857#define __PV_IS_CALLEE_SAVE(func) \
858 ((struct paravirt_callee_save) { func })
859
b5908548 860static inline notrace unsigned long arch_local_save_flags(void)
139ec7c4 861{
71999d98 862 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
139ec7c4
RR
863}
864
b5908548 865static inline notrace void arch_local_irq_restore(unsigned long f)
139ec7c4 866{
71999d98 867 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
139ec7c4
RR
868}
869
b5908548 870static inline notrace void arch_local_irq_disable(void)
139ec7c4 871{
71999d98 872 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
139ec7c4
RR
873}
874
b5908548 875static inline notrace void arch_local_irq_enable(void)
139ec7c4 876{
71999d98 877 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
139ec7c4
RR
878}
879
b5908548 880static inline notrace unsigned long arch_local_irq_save(void)
139ec7c4
RR
881{
882 unsigned long f;
883
df9ee292
DH
884 f = arch_local_save_flags();
885 arch_local_irq_disable();
139ec7c4
RR
886 return f;
887}
888
74d4affd 889
294688c0 890/* Make sure as little as possible of this mess escapes. */
d5822035 891#undef PARAVIRT_CALL
1a45b7aa
JF
892#undef __PVOP_CALL
893#undef __PVOP_VCALL
f8822f42
JF
894#undef PVOP_VCALL0
895#undef PVOP_CALL0
896#undef PVOP_VCALL1
897#undef PVOP_CALL1
898#undef PVOP_VCALL2
899#undef PVOP_CALL2
900#undef PVOP_VCALL3
901#undef PVOP_CALL3
902#undef PVOP_VCALL4
903#undef PVOP_CALL4
139ec7c4 904
6f30c1ac
TG
905extern void default_banner(void);
906
d3561b7f
RR
907#else /* __ASSEMBLY__ */
908
658be9d3 909#define _PVSITE(ptype, clobbers, ops, word, algn) \
139ec7c4
RR
910771:; \
911 ops; \
912772:; \
913 .pushsection .parainstructions,"a"; \
658be9d3
GOC
914 .align algn; \
915 word 771b; \
139ec7c4
RR
916 .byte ptype; \
917 .byte 772b-771b; \
918 .short clobbers; \
919 .popsection
920
658be9d3 921
9104a18d 922#define COND_PUSH(set, mask, reg) \
ecb93d1c 923 .if ((~(set)) & mask); push %reg; .endif
9104a18d 924#define COND_POP(set, mask, reg) \
ecb93d1c 925 .if ((~(set)) & mask); pop %reg; .endif
9104a18d 926
658be9d3 927#ifdef CONFIG_X86_64
9104a18d
JF
928
929#define PV_SAVE_REGS(set) \
930 COND_PUSH(set, CLBR_RAX, rax); \
931 COND_PUSH(set, CLBR_RCX, rcx); \
932 COND_PUSH(set, CLBR_RDX, rdx); \
933 COND_PUSH(set, CLBR_RSI, rsi); \
934 COND_PUSH(set, CLBR_RDI, rdi); \
935 COND_PUSH(set, CLBR_R8, r8); \
936 COND_PUSH(set, CLBR_R9, r9); \
937 COND_PUSH(set, CLBR_R10, r10); \
938 COND_PUSH(set, CLBR_R11, r11)
939#define PV_RESTORE_REGS(set) \
940 COND_POP(set, CLBR_R11, r11); \
941 COND_POP(set, CLBR_R10, r10); \
942 COND_POP(set, CLBR_R9, r9); \
943 COND_POP(set, CLBR_R8, r8); \
944 COND_POP(set, CLBR_RDI, rdi); \
945 COND_POP(set, CLBR_RSI, rsi); \
946 COND_POP(set, CLBR_RDX, rdx); \
947 COND_POP(set, CLBR_RCX, rcx); \
948 COND_POP(set, CLBR_RAX, rax)
949
6057fc82 950#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
658be9d3 951#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
491eccb7 952#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 953#else
9104a18d
JF
954#define PV_SAVE_REGS(set) \
955 COND_PUSH(set, CLBR_EAX, eax); \
956 COND_PUSH(set, CLBR_EDI, edi); \
957 COND_PUSH(set, CLBR_ECX, ecx); \
958 COND_PUSH(set, CLBR_EDX, edx)
959#define PV_RESTORE_REGS(set) \
960 COND_POP(set, CLBR_EDX, edx); \
961 COND_POP(set, CLBR_ECX, ecx); \
962 COND_POP(set, CLBR_EDI, edi); \
963 COND_POP(set, CLBR_EAX, eax)
964
6057fc82 965#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
658be9d3 966#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
491eccb7 967#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
968#endif
969
93b1eab3
JF
970#define INTERRUPT_RETURN \
971 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
491eccb7 972 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
d5822035
JF
973
974#define DISABLE_INTERRUPTS(clobbers) \
93b1eab3 975 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
ecb93d1c 976 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 977 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
ecb93d1c 978 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035
JF
979
980#define ENABLE_INTERRUPTS(clobbers) \
93b1eab3 981 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
ecb93d1c 982 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 983 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
ecb93d1c 984 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035 985
2be29982
JF
986#define USERGS_SYSRET32 \
987 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
6abcd98f 988 CLBR_NONE, \
2be29982 989 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
2e47d3e6 990
6057fc82 991#ifdef CONFIG_X86_32
491eccb7
JF
992#define GET_CR0_INTO_EAX \
993 push %ecx; push %edx; \
994 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
42c24fa2 995 pop %edx; pop %ecx
2be29982
JF
996
997#define ENABLE_INTERRUPTS_SYSEXIT \
998 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
999 CLBR_NONE, \
1000 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1001
1002
1003#else /* !CONFIG_X86_32 */
a00394f8
JF
1004
1005/*
1006 * If swapgs is used while the userspace stack is still current,
1007 * there's no way to call a pvop. The PV replacement *must* be
1008 * inlined, or the swapgs instruction must be trapped and emulated.
1009 */
1010#define SWAPGS_UNSAFE_STACK \
1011 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1012 swapgs)
1013
9104a18d
JF
1014/*
1015 * Note: swapgs is very special, and in practise is either going to be
1016 * implemented with a single "swapgs" instruction or something very
1017 * special. Either way, we don't need to save any registers for
1018 * it.
1019 */
e801f864
GOC
1020#define SWAPGS \
1021 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
9104a18d 1022 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
e801f864
GOC
1023 )
1024
491eccb7
JF
1025#define GET_CR2_INTO_RCX \
1026 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1027 movq %rax, %rcx; \
4a8c4c4e
GOC
1028 xorq %rax, %rax;
1029
fab58420
JF
1030#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1031 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1032 CLBR_NONE, \
1033 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1034
2be29982
JF
1035#define USERGS_SYSRET64 \
1036 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
d75cd22f 1037 CLBR_NONE, \
2be29982
JF
1038 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1039
1040#define ENABLE_INTERRUPTS_SYSEXIT32 \
1041 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1042 CLBR_NONE, \
1043 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1044#endif /* CONFIG_X86_32 */
139ec7c4 1045
d3561b7f 1046#endif /* __ASSEMBLY__ */
6f30c1ac
TG
1047#else /* CONFIG_PARAVIRT */
1048# define default_banner x86_init_noop
1049#endif /* !CONFIG_PARAVIRT */
1965aae3 1050#endif /* _ASM_X86_PARAVIRT_H */
This page took 0.645131 seconds and 5 git commands to generate.