Merge remote-tracking branch 'asoc/fix/intel' into asoc-linus
[deliverable/linux.git] / arch / x86 / mm / tlb.c
CommitLineData
c048fdfe
GC
1#include <linux/init.h>
2
3#include <linux/mm.h>
c048fdfe
GC
4#include <linux/spinlock.h>
5#include <linux/smp.h>
c048fdfe 6#include <linux/interrupt.h>
6dd01bed 7#include <linux/module.h>
93296720 8#include <linux/cpu.h>
c048fdfe 9
c048fdfe 10#include <asm/tlbflush.h>
c048fdfe 11#include <asm/mmu_context.h>
350f8f56 12#include <asm/cache.h>
6dd01bed 13#include <asm/apic.h>
bdbcdd48 14#include <asm/uv/uv.h>
3df3212f 15#include <linux/debugfs.h>
5af5573e 16
c048fdfe
GC
17/*
18 * Smarter SMP flushing macros.
19 * c/o Linus Torvalds.
20 *
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
23 *
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
25 *
26 * More scalable flush, from Andi Kleen
27 *
52aec330 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
c048fdfe
GC
29 */
30
52aec330
AS
31struct flush_tlb_info {
32 struct mm_struct *flush_mm;
33 unsigned long flush_start;
34 unsigned long flush_end;
35};
93296720 36
c048fdfe
GC
37/*
38 * We cannot call mmdrop() because we are in interrupt context,
39 * instead update mm->cpu_vm_mask.
40 */
41void leave_mm(int cpu)
42{
02171b4a 43 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
c6ae41e7 44 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
c048fdfe 45 BUG();
a6fca40f
SS
46 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
47 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
48 load_cr3(swapper_pg_dir);
7c7f1547
DH
49 /*
50 * This gets called in the idle path where RCU
51 * functions differently. Tracing normally
52 * uses RCU, so we have to call the tracepoint
53 * specially here.
54 */
55 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
a6fca40f 56 }
c048fdfe
GC
57}
58EXPORT_SYMBOL_GPL(leave_mm);
59
60/*
c048fdfe
GC
61 * The flush IPI assumes that a thread switch happens in this order:
62 * [cpu0: the cpu that switches]
63 * 1) switch_mm() either 1a) or 1b)
64 * 1a) thread switch to a different mm
52aec330
AS
65 * 1a1) set cpu_tlbstate to TLBSTATE_OK
66 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
67 * if cpu0 was in lazy tlb mode.
68 * 1a2) update cpu active_mm
c048fdfe 69 * Now cpu0 accepts tlb flushes for the new mm.
52aec330 70 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
c048fdfe
GC
71 * Now the other cpus will send tlb flush ipis.
72 * 1a4) change cr3.
52aec330
AS
73 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
74 * Stop ipi delivery for the old mm. This is not synchronized with
75 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
76 * mm, and in the worst case we perform a superfluous tlb flush.
c048fdfe 77 * 1b) thread switch without mm change
52aec330
AS
78 * cpu active_mm is correct, cpu0 already handles flush ipis.
79 * 1b1) set cpu_tlbstate to TLBSTATE_OK
c048fdfe
GC
80 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
81 * Atomically set the bit [other cpus will start sending flush ipis],
82 * and test the bit.
83 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
84 * 2) switch %%esp, ie current
85 *
86 * The interrupt must handle 2 special cases:
87 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
88 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
89 * runs in kernel space, the cpu could load tlb entries for user space
90 * pages.
91 *
52aec330 92 * The good news is that cpu_tlbstate is local to each cpu, no
c048fdfe
GC
93 * write/read ordering problems.
94 */
95
96/*
52aec330 97 * TLB flush funcation:
c048fdfe
GC
98 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
99 * 2) Leave the mm if we are in the lazy tlb mode.
02cf94c3 100 */
52aec330 101static void flush_tlb_func(void *info)
c048fdfe 102{
52aec330 103 struct flush_tlb_info *f = info;
c048fdfe 104
fd0f5869
TS
105 inc_irq_stat(irq_tlb_count);
106
52aec330
AS
107 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
108 return;
a23421f1
DH
109 if (!f->flush_end)
110 f->flush_end = f->flush_start + PAGE_SIZE;
c048fdfe 111
ec659934 112 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
52aec330 113 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
d17d8f9d 114 if (f->flush_end == TLB_FLUSH_ALL) {
52aec330 115 local_flush_tlb();
d17d8f9d
DH
116 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
117 } else {
52aec330 118 unsigned long addr;
d17d8f9d
DH
119 unsigned long nr_pages =
120 f->flush_end - f->flush_start / PAGE_SIZE;
52aec330
AS
121 addr = f->flush_start;
122 while (addr < f->flush_end) {
123 __flush_tlb_single(addr);
124 addr += PAGE_SIZE;
e7b52ffd 125 }
d17d8f9d 126 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
52aec330
AS
127 }
128 } else
129 leave_mm(smp_processor_id());
c048fdfe 130
c048fdfe
GC
131}
132
4595f962 133void native_flush_tlb_others(const struct cpumask *cpumask,
e7b52ffd
AS
134 struct mm_struct *mm, unsigned long start,
135 unsigned long end)
4595f962 136{
52aec330
AS
137 struct flush_tlb_info info;
138 info.flush_mm = mm;
139 info.flush_start = start;
140 info.flush_end = end;
141
ec659934 142 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
4595f962 143 if (is_uv_system()) {
bdbcdd48 144 unsigned int cpu;
0e21990a 145
25542c64 146 cpu = smp_processor_id();
e7b52ffd 147 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
bdbcdd48 148 if (cpumask)
52aec330
AS
149 smp_call_function_many(cpumask, flush_tlb_func,
150 &info, 1);
0e21990a 151 return;
4595f962 152 }
52aec330 153 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
c048fdfe 154}
c048fdfe
GC
155
156void flush_tlb_current_task(void)
157{
158 struct mm_struct *mm = current->mm;
c048fdfe
GC
159
160 preempt_disable();
c048fdfe 161
ec659934 162 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
c048fdfe 163 local_flush_tlb();
d17d8f9d 164 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
78f1c4d6 165 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
e7b52ffd 166 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
c048fdfe
GC
167 preempt_enable();
168}
169
a5102476
DH
170/*
171 * See Documentation/x86/tlb.txt for details. We choose 33
172 * because it is large enough to cover the vast majority (at
173 * least 95%) of allocations, and is small enough that we are
174 * confident it will not cause too much overhead. Each single
175 * flush is about 100 ns, so this caps the maximum overhead at
176 * _about_ 3,000 ns.
177 *
178 * This is in units of pages.
179 */
86426851 180static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
e9f4e0a9 181
611ae8e3
AS
182void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
183 unsigned long end, unsigned long vmflag)
184{
185 unsigned long addr;
9dfa6dee
DH
186 /* do a global flush by default */
187 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
e7b52ffd
AS
188
189 preempt_disable();
611ae8e3 190 if (current->active_mm != mm)
4995ab9c 191 goto out;
e7b52ffd 192
611ae8e3
AS
193 if (!current->mm) {
194 leave_mm(smp_processor_id());
4995ab9c 195 goto out;
611ae8e3 196 }
c048fdfe 197
9dfa6dee
DH
198 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
199 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
e7b52ffd 200
9dfa6dee
DH
201 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
202 base_pages_to_flush = TLB_FLUSH_ALL;
ec659934 203 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
611ae8e3 204 local_flush_tlb();
9824cf97 205 } else {
611ae8e3 206 /* flush range by one by one 'invlpg' */
9824cf97 207 for (addr = start; addr < end; addr += PAGE_SIZE) {
ec659934 208 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
611ae8e3 209 __flush_tlb_single(addr);
9824cf97 210 }
e7b52ffd 211 }
d17d8f9d 212 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
4995ab9c 213out:
9dfa6dee 214 if (base_pages_to_flush == TLB_FLUSH_ALL) {
4995ab9c
DH
215 start = 0UL;
216 end = TLB_FLUSH_ALL;
217 }
e7b52ffd 218 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
4995ab9c 219 flush_tlb_others(mm_cpumask(mm), mm, start, end);
c048fdfe
GC
220 preempt_enable();
221}
222
e7b52ffd 223void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
c048fdfe
GC
224{
225 struct mm_struct *mm = vma->vm_mm;
c048fdfe
GC
226
227 preempt_disable();
c048fdfe
GC
228
229 if (current->active_mm == mm) {
230 if (current->mm)
e7b52ffd 231 __flush_tlb_one(start);
c048fdfe
GC
232 else
233 leave_mm(smp_processor_id());
234 }
235
78f1c4d6 236 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
e7b52ffd 237 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
c048fdfe
GC
238
239 preempt_enable();
240}
241
242static void do_flush_tlb_all(void *info)
243{
ec659934 244 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
c048fdfe 245 __flush_tlb_all();
c6ae41e7 246 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
3f8afb77 247 leave_mm(smp_processor_id());
c048fdfe
GC
248}
249
250void flush_tlb_all(void)
251{
ec659934 252 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
15c8b6c1 253 on_each_cpu(do_flush_tlb_all, NULL, 1);
c048fdfe 254}
3df3212f 255
effee4b9
AS
256static void do_kernel_range_flush(void *info)
257{
258 struct flush_tlb_info *f = info;
259 unsigned long addr;
260
261 /* flush range by one by one 'invlpg' */
6df46865 262 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
effee4b9
AS
263 __flush_tlb_single(addr);
264}
265
266void flush_tlb_kernel_range(unsigned long start, unsigned long end)
267{
effee4b9
AS
268
269 /* Balance as user space task's flush, a bit conservative */
e9f4e0a9
DH
270 if (end == TLB_FLUSH_ALL ||
271 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
effee4b9 272 on_each_cpu(do_flush_tlb_all, NULL, 1);
e9f4e0a9
DH
273 } else {
274 struct flush_tlb_info info;
effee4b9
AS
275 info.flush_start = start;
276 info.flush_end = end;
277 on_each_cpu(do_kernel_range_flush, &info, 1);
278 }
279}
2d040a1c
DH
280
281static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
282 size_t count, loff_t *ppos)
283{
284 char buf[32];
285 unsigned int len;
286
287 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
288 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
289}
290
291static ssize_t tlbflush_write_file(struct file *file,
292 const char __user *user_buf, size_t count, loff_t *ppos)
293{
294 char buf[32];
295 ssize_t len;
296 int ceiling;
297
298 len = min(count, sizeof(buf) - 1);
299 if (copy_from_user(buf, user_buf, len))
300 return -EFAULT;
301
302 buf[len] = '\0';
303 if (kstrtoint(buf, 0, &ceiling))
304 return -EINVAL;
305
306 if (ceiling < 0)
307 return -EINVAL;
308
309 tlb_single_page_flush_ceiling = ceiling;
310 return count;
311}
312
313static const struct file_operations fops_tlbflush = {
314 .read = tlbflush_read_file,
315 .write = tlbflush_write_file,
316 .llseek = default_llseek,
317};
318
319static int __init create_tlb_single_page_flush_ceiling(void)
320{
321 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
322 arch_debugfs_dir, NULL, &fops_tlbflush);
323 return 0;
324}
325late_initcall(create_tlb_single_page_flush_ceiling);
This page took 0.44972 seconds and 5 git commands to generate.