89c1fece224e7875d8d539c00846823a46df09a9
[deliverable/linux.git] / arch / x86 / include / asm / mmu_context.h
1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
3
4 #include <asm/desc.h>
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
7
8 #include <trace/events/tlb.h>
9
10 #include <asm/pgalloc.h>
11 #include <asm/tlbflush.h>
12 #include <asm/paravirt.h>
13 #include <asm/mpx.h>
14 #ifndef CONFIG_PARAVIRT
15 static inline void paravirt_activate_mm(struct mm_struct *prev,
16 struct mm_struct *next)
17 {
18 }
19 #endif /* !CONFIG_PARAVIRT */
20
21 #ifdef CONFIG_PERF_EVENTS
22 static inline void load_mm_cr4(struct mm_struct *mm)
23 {
24 if (atomic_read(&mm->context.perf_rdpmc_allowed))
25 cr4_set_bits(X86_CR4_PCE);
26 else
27 cr4_clear_bits(X86_CR4_PCE);
28 }
29 #else
30 static inline void load_mm_cr4(struct mm_struct *mm) {}
31 #endif
32
33 /*
34 * Used for LDT copy/destruction.
35 */
36 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
37 void destroy_context(struct mm_struct *mm);
38
39
40 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
41 {
42 #ifdef CONFIG_SMP
43 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
44 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
45 #endif
46 }
47
48 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
49 struct task_struct *tsk)
50 {
51 unsigned cpu = smp_processor_id();
52
53 if (likely(prev != next)) {
54 #ifdef CONFIG_SMP
55 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
56 this_cpu_write(cpu_tlbstate.active_mm, next);
57 #endif
58 cpumask_set_cpu(cpu, mm_cpumask(next));
59
60 /* Re-load page tables */
61 load_cr3(next->pgd);
62 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
63
64 /* Stop flush ipis for the previous mm */
65 cpumask_clear_cpu(cpu, mm_cpumask(prev));
66
67 /* Load per-mm CR4 state */
68 load_mm_cr4(next);
69
70 /*
71 * Load the LDT, if the LDT is different.
72 *
73 * It's possible that prev->context.ldt doesn't match
74 * the LDT register. This can happen if leave_mm(prev)
75 * was called and then modify_ldt changed
76 * prev->context.ldt but suppressed an IPI to this CPU.
77 * In this case, prev->context.ldt != NULL, because we
78 * never free an LDT while the mm still exists. That
79 * means that next->context.ldt != prev->context.ldt,
80 * because mms never share an LDT.
81 */
82 if (unlikely(prev->context.ldt != next->context.ldt))
83 load_LDT_nolock(&next->context);
84 }
85 #ifdef CONFIG_SMP
86 else {
87 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
88 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
89
90 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
91 /*
92 * On established mms, the mm_cpumask is only changed
93 * from irq context, from ptep_clear_flush() while in
94 * lazy tlb mode, and here. Irqs are blocked during
95 * schedule, protecting us from simultaneous changes.
96 */
97 cpumask_set_cpu(cpu, mm_cpumask(next));
98 /*
99 * We were in lazy tlb mode and leave_mm disabled
100 * tlb flush IPI delivery. We must reload CR3
101 * to make sure to use no freed page tables.
102 */
103 load_cr3(next->pgd);
104 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
105 load_mm_cr4(next);
106 load_LDT_nolock(&next->context);
107 }
108 }
109 #endif
110 }
111
112 #define activate_mm(prev, next) \
113 do { \
114 paravirt_activate_mm((prev), (next)); \
115 switch_mm((prev), (next), NULL); \
116 } while (0);
117
118 #ifdef CONFIG_X86_32
119 #define deactivate_mm(tsk, mm) \
120 do { \
121 lazy_load_gs(0); \
122 } while (0)
123 #else
124 #define deactivate_mm(tsk, mm) \
125 do { \
126 load_gs_index(0); \
127 loadsegment(fs, 0); \
128 } while (0)
129 #endif
130
131 static inline void arch_dup_mmap(struct mm_struct *oldmm,
132 struct mm_struct *mm)
133 {
134 paravirt_arch_dup_mmap(oldmm, mm);
135 }
136
137 static inline void arch_exit_mmap(struct mm_struct *mm)
138 {
139 paravirt_arch_exit_mmap(mm);
140 }
141
142 static inline void arch_bprm_mm_init(struct mm_struct *mm,
143 struct vm_area_struct *vma)
144 {
145 mpx_mm_init(mm);
146 }
147
148 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
149 unsigned long start, unsigned long end)
150 {
151 /*
152 * mpx_notify_unmap() goes and reads a rarely-hot
153 * cacheline in the mm_struct. That can be expensive
154 * enough to be seen in profiles.
155 *
156 * The mpx_notify_unmap() call and its contents have been
157 * observed to affect munmap() performance on hardware
158 * where MPX is not present.
159 *
160 * The unlikely() optimizes for the fast case: no MPX
161 * in the CPU, or no MPX use in the process. Even if
162 * we get this wrong (in the unlikely event that MPX
163 * is widely enabled on some system) the overhead of
164 * MPX itself (reading bounds tables) is expected to
165 * overwhelm the overhead of getting this unlikely()
166 * consistently wrong.
167 */
168 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
169 mpx_notify_unmap(mm, vma, start, end);
170 }
171
172 #endif /* _ASM_X86_MMU_CONTEXT_H */
This page took 0.03683 seconds and 5 git commands to generate.