Commit | Line | Data |
---|---|---|
c048fdfe GC |
1 | #include <linux/spinlock.h> |
2 | #include <linux/cpu.h> | |
3 | #include <linux/interrupt.h> | |
4 | ||
5 | #include <asm/tlbflush.h> | |
6 | ||
7 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) | |
8 | ____cacheline_aligned = { &init_mm, 0, }; | |
9 | ||
10 | /* must come after the send_IPI functions above for inlining */ | |
11 | #include <mach_ipi.h> | |
12 | ||
13 | /* | |
14 | * Smarter SMP flushing macros. | |
15 | * c/o Linus Torvalds. | |
16 | * | |
17 | * These mean you can really definitely utterly forget about | |
18 | * writing to user space from interrupts. (Its not allowed anyway). | |
19 | * | |
20 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
21 | */ | |
22 | ||
4595f962 | 23 | static cpumask_var_t flush_cpumask; |
c048fdfe GC |
24 | static struct mm_struct *flush_mm; |
25 | static unsigned long flush_va; | |
26 | static DEFINE_SPINLOCK(tlbstate_lock); | |
27 | ||
28 | /* | |
29 | * We cannot call mmdrop() because we are in interrupt context, | |
30 | * instead update mm->cpu_vm_mask. | |
31 | * | |
32 | * We need to reload %cr3 since the page tables may be going | |
33 | * away from under us.. | |
34 | */ | |
35 | void leave_mm(int cpu) | |
36 | { | |
cfc31983 JB |
37 | BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK); |
38 | cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask); | |
c048fdfe GC |
39 | load_cr3(swapper_pg_dir); |
40 | } | |
41 | EXPORT_SYMBOL_GPL(leave_mm); | |
42 | ||
43 | /* | |
44 | * | |
45 | * The flush IPI assumes that a thread switch happens in this order: | |
46 | * [cpu0: the cpu that switches] | |
47 | * 1) switch_mm() either 1a) or 1b) | |
48 | * 1a) thread switch to a different mm | |
49 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); | |
50 | * Stop ipi delivery for the old mm. This is not synchronized with | |
51 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | |
52 | * for the wrong mm, and in the worst case we perform a superfluous | |
53 | * tlb flush. | |
54 | * 1a2) set cpu_tlbstate to TLBSTATE_OK | |
55 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | |
56 | * was in lazy tlb mode. | |
57 | * 1a3) update cpu_tlbstate[].active_mm | |
58 | * Now cpu0 accepts tlb flushes for the new mm. | |
59 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); | |
60 | * Now the other cpus will send tlb flush ipis. | |
61 | * 1a4) change cr3. | |
62 | * 1b) thread switch without mm change | |
63 | * cpu_tlbstate[].active_mm is correct, cpu0 already handles | |
64 | * flush ipis. | |
65 | * 1b1) set cpu_tlbstate to TLBSTATE_OK | |
66 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | |
67 | * Atomically set the bit [other cpus will start sending flush ipis], | |
68 | * and test the bit. | |
69 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | |
70 | * 2) switch %%esp, ie current | |
71 | * | |
72 | * The interrupt must handle 2 special cases: | |
73 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
74 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
75 | * runs in kernel space, the cpu could load tlb entries for user space | |
76 | * pages. | |
77 | * | |
78 | * The good news is that cpu_tlbstate is local to each cpu, no | |
79 | * write/read ordering problems. | |
80 | */ | |
81 | ||
82 | /* | |
83 | * TLB flush IPI: | |
84 | * | |
85 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | |
86 | * 2) Leave the mm if we are in the lazy tlb mode. | |
87 | */ | |
88 | ||
89 | void smp_invalidate_interrupt(struct pt_regs *regs) | |
90 | { | |
91 | unsigned long cpu; | |
92 | ||
93 | cpu = get_cpu(); | |
94 | ||
4595f962 | 95 | if (!cpumask_test_cpu(cpu, flush_cpumask)) |
c048fdfe GC |
96 | goto out; |
97 | /* | |
98 | * This was a BUG() but until someone can quote me the | |
99 | * line from the intel manual that guarantees an IPI to | |
100 | * multiple CPUs is retried _only_ on the erroring CPUs | |
101 | * its staying as a return | |
102 | * | |
103 | * BUG(); | |
104 | */ | |
105 | ||
cfc31983 JB |
106 | if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) { |
107 | if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) { | |
c048fdfe GC |
108 | if (flush_va == TLB_FLUSH_ALL) |
109 | local_flush_tlb(); | |
110 | else | |
111 | __flush_tlb_one(flush_va); | |
112 | } else | |
113 | leave_mm(cpu); | |
114 | } | |
115 | ack_APIC_irq(); | |
116 | smp_mb__before_clear_bit(); | |
4595f962 | 117 | cpumask_clear_cpu(cpu, flush_cpumask); |
c048fdfe GC |
118 | smp_mb__after_clear_bit(); |
119 | out: | |
120 | put_cpu_no_resched(); | |
8ae93669 | 121 | inc_irq_stat(irq_tlb_count); |
c048fdfe GC |
122 | } |
123 | ||
4595f962 RR |
124 | void native_flush_tlb_others(const struct cpumask *cpumask, |
125 | struct mm_struct *mm, unsigned long va) | |
c048fdfe | 126 | { |
c048fdfe | 127 | /* |
5766b842 | 128 | * mm must exist :) |
c048fdfe | 129 | */ |
c048fdfe GC |
130 | BUG_ON(!mm); |
131 | ||
c048fdfe GC |
132 | /* |
133 | * i'm not happy about this global shared spinlock in the | |
134 | * MM hot path, but we'll see how contended it is. | |
135 | * AK: x86-64 has a faster method that could be ported. | |
136 | */ | |
137 | spin_lock(&tlbstate_lock); | |
138 | ||
4595f962 | 139 | cpumask_andnot(flush_cpumask, cpumask, cpumask_of(smp_processor_id())); |
4595f962 | 140 | cpumask_and(flush_cpumask, flush_cpumask, cpu_online_mask); |
5766b842 IM |
141 | |
142 | /* | |
143 | * If a task whose mm mask we are looking at has descheduled and | |
144 | * has cleared its presence from the mask, or if a CPU which we ran | |
145 | * on has gone down then there might be no flush work left: | |
146 | */ | |
4595f962 RR |
147 | if (unlikely(cpumask_empty(flush_cpumask))) { |
148 | spin_unlock(&tlbstate_lock); | |
149 | return; | |
150 | } | |
5766b842 | 151 | |
c048fdfe GC |
152 | flush_mm = mm; |
153 | flush_va = va; | |
d6f0f39b SS |
154 | |
155 | /* | |
156 | * Make the above memory operations globally visible before | |
157 | * sending the IPI. | |
158 | */ | |
159 | smp_mb(); | |
c048fdfe GC |
160 | /* |
161 | * We have to send the IPI only to | |
162 | * CPUs affected. | |
163 | */ | |
4595f962 | 164 | send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR); |
c048fdfe | 165 | |
4595f962 | 166 | while (!cpumask_empty(flush_cpumask)) |
c048fdfe GC |
167 | /* nothing. lockup detection does not belong here */ |
168 | cpu_relax(); | |
169 | ||
170 | flush_mm = NULL; | |
171 | flush_va = 0; | |
172 | spin_unlock(&tlbstate_lock); | |
173 | } | |
174 | ||
175 | void flush_tlb_current_task(void) | |
176 | { | |
177 | struct mm_struct *mm = current->mm; | |
c048fdfe GC |
178 | |
179 | preempt_disable(); | |
c048fdfe GC |
180 | |
181 | local_flush_tlb(); | |
4595f962 RR |
182 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) |
183 | flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); | |
c048fdfe GC |
184 | preempt_enable(); |
185 | } | |
186 | ||
187 | void flush_tlb_mm(struct mm_struct *mm) | |
188 | { | |
c048fdfe GC |
189 | |
190 | preempt_disable(); | |
c048fdfe GC |
191 | |
192 | if (current->active_mm == mm) { | |
193 | if (current->mm) | |
194 | local_flush_tlb(); | |
195 | else | |
196 | leave_mm(smp_processor_id()); | |
197 | } | |
4595f962 RR |
198 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) |
199 | flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); | |
c048fdfe GC |
200 | |
201 | preempt_enable(); | |
202 | } | |
203 | ||
204 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |
205 | { | |
206 | struct mm_struct *mm = vma->vm_mm; | |
c048fdfe GC |
207 | |
208 | preempt_disable(); | |
c048fdfe GC |
209 | |
210 | if (current->active_mm == mm) { | |
211 | if (current->mm) | |
212 | __flush_tlb_one(va); | |
213 | else | |
214 | leave_mm(smp_processor_id()); | |
215 | } | |
216 | ||
4595f962 RR |
217 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) |
218 | flush_tlb_others(&mm->cpu_vm_mask, mm, va); | |
c048fdfe GC |
219 | preempt_enable(); |
220 | } | |
221 | EXPORT_SYMBOL(flush_tlb_page); | |
222 | ||
223 | static void do_flush_tlb_all(void *info) | |
224 | { | |
225 | unsigned long cpu = smp_processor_id(); | |
226 | ||
227 | __flush_tlb_all(); | |
cfc31983 | 228 | if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY) |
c048fdfe GC |
229 | leave_mm(cpu); |
230 | } | |
231 | ||
232 | void flush_tlb_all(void) | |
233 | { | |
15c8b6c1 | 234 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
c048fdfe GC |
235 | } |
236 | ||
913da64b AN |
237 | void reset_lazy_tlbstate(void) |
238 | { | |
239 | int cpu = raw_smp_processor_id(); | |
240 | ||
241 | per_cpu(cpu_tlbstate, cpu).state = 0; | |
242 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; | |
243 | } | |
244 | ||
4595f962 RR |
245 | static int init_flush_cpumask(void) |
246 | { | |
247 | alloc_cpumask_var(&flush_cpumask, GFP_KERNEL); | |
248 | return 0; | |
249 | } | |
250 | early_initcall(init_flush_cpumask); |