x86/mm: Unify remote INVLPG code
[deliverable/linux.git] / arch / x86 / mm / tlb.c
CommitLineData
c048fdfe
GC
1#include <linux/init.h>
2
3#include <linux/mm.h>
c048fdfe
GC
4#include <linux/spinlock.h>
5#include <linux/smp.h>
c048fdfe 6#include <linux/interrupt.h>
6dd01bed 7#include <linux/module.h>
93296720 8#include <linux/cpu.h>
c048fdfe 9
c048fdfe 10#include <asm/tlbflush.h>
c048fdfe 11#include <asm/mmu_context.h>
350f8f56 12#include <asm/cache.h>
6dd01bed 13#include <asm/apic.h>
bdbcdd48 14#include <asm/uv/uv.h>
3df3212f 15#include <linux/debugfs.h>
5af5573e 16
9eb912d1
BG
17DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
18 = { &init_mm, 0, };
19
c048fdfe
GC
20/*
21 * Smarter SMP flushing macros.
22 * c/o Linus Torvalds.
23 *
24 * These mean you can really definitely utterly forget about
25 * writing to user space from interrupts. (Its not allowed anyway).
26 *
27 * Optimizations Manfred Spraul <manfred@colorfullife.com>
28 *
29 * More scalable flush, from Andi Kleen
30 *
52aec330 31 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
c048fdfe
GC
32 */
33
52aec330
AS
34struct flush_tlb_info {
35 struct mm_struct *flush_mm;
36 unsigned long flush_start;
37 unsigned long flush_end;
38};
93296720 39
c048fdfe
GC
40/*
41 * We cannot call mmdrop() because we are in interrupt context,
42 * instead update mm->cpu_vm_mask.
43 */
44void leave_mm(int cpu)
45{
02171b4a 46 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
c6ae41e7 47 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
c048fdfe 48 BUG();
a6fca40f
SS
49 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
50 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
51 load_cr3(swapper_pg_dir);
52 }
c048fdfe
GC
53}
54EXPORT_SYMBOL_GPL(leave_mm);
55
56/*
c048fdfe
GC
57 * The flush IPI assumes that a thread switch happens in this order:
58 * [cpu0: the cpu that switches]
59 * 1) switch_mm() either 1a) or 1b)
60 * 1a) thread switch to a different mm
52aec330
AS
61 * 1a1) set cpu_tlbstate to TLBSTATE_OK
62 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
63 * if cpu0 was in lazy tlb mode.
64 * 1a2) update cpu active_mm
c048fdfe 65 * Now cpu0 accepts tlb flushes for the new mm.
52aec330 66 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
c048fdfe
GC
67 * Now the other cpus will send tlb flush ipis.
68 * 1a4) change cr3.
52aec330
AS
69 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
70 * Stop ipi delivery for the old mm. This is not synchronized with
71 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
72 * mm, and in the worst case we perform a superfluous tlb flush.
c048fdfe 73 * 1b) thread switch without mm change
52aec330
AS
74 * cpu active_mm is correct, cpu0 already handles flush ipis.
75 * 1b1) set cpu_tlbstate to TLBSTATE_OK
c048fdfe
GC
76 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
77 * Atomically set the bit [other cpus will start sending flush ipis],
78 * and test the bit.
79 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
80 * 2) switch %%esp, ie current
81 *
82 * The interrupt must handle 2 special cases:
83 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
84 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
85 * runs in kernel space, the cpu could load tlb entries for user space
86 * pages.
87 *
52aec330 88 * The good news is that cpu_tlbstate is local to each cpu, no
c048fdfe
GC
89 * write/read ordering problems.
90 */
91
92/*
52aec330 93 * TLB flush funcation:
c048fdfe
GC
94 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
95 * 2) Leave the mm if we are in the lazy tlb mode.
02cf94c3 96 */
52aec330 97static void flush_tlb_func(void *info)
c048fdfe 98{
52aec330 99 struct flush_tlb_info *f = info;
c048fdfe 100
fd0f5869
TS
101 inc_irq_stat(irq_tlb_count);
102
52aec330
AS
103 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
104 return;
a23421f1
DH
105 if (!f->flush_end)
106 f->flush_end = f->flush_start + PAGE_SIZE;
c048fdfe 107
ec659934 108 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
52aec330 109 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
094ab1db 110 if (f->flush_end == TLB_FLUSH_ALL)
52aec330 111 local_flush_tlb();
52aec330
AS
112 else {
113 unsigned long addr;
114 addr = f->flush_start;
115 while (addr < f->flush_end) {
116 __flush_tlb_single(addr);
117 addr += PAGE_SIZE;
e7b52ffd 118 }
52aec330
AS
119 }
120 } else
121 leave_mm(smp_processor_id());
c048fdfe 122
c048fdfe
GC
123}
124
4595f962 125void native_flush_tlb_others(const struct cpumask *cpumask,
e7b52ffd
AS
126 struct mm_struct *mm, unsigned long start,
127 unsigned long end)
4595f962 128{
52aec330
AS
129 struct flush_tlb_info info;
130 info.flush_mm = mm;
131 info.flush_start = start;
132 info.flush_end = end;
133
ec659934 134 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
4595f962 135 if (is_uv_system()) {
bdbcdd48 136 unsigned int cpu;
0e21990a 137
25542c64 138 cpu = smp_processor_id();
e7b52ffd 139 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
bdbcdd48 140 if (cpumask)
52aec330
AS
141 smp_call_function_many(cpumask, flush_tlb_func,
142 &info, 1);
0e21990a 143 return;
4595f962 144 }
52aec330 145 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
c048fdfe 146}
c048fdfe
GC
147
148void flush_tlb_current_task(void)
149{
150 struct mm_struct *mm = current->mm;
c048fdfe
GC
151
152 preempt_disable();
c048fdfe 153
ec659934 154 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
c048fdfe 155 local_flush_tlb();
78f1c4d6 156 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
e7b52ffd 157 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
c048fdfe
GC
158 preempt_enable();
159}
160
e9f4e0a9
DH
161/* in units of pages */
162unsigned long tlb_single_page_flush_ceiling = 1;
163
611ae8e3
AS
164void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
165 unsigned long end, unsigned long vmflag)
166{
167 unsigned long addr;
9dfa6dee
DH
168 /* do a global flush by default */
169 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
e7b52ffd
AS
170
171 preempt_disable();
611ae8e3 172 if (current->active_mm != mm)
4995ab9c 173 goto out;
e7b52ffd 174
611ae8e3
AS
175 if (!current->mm) {
176 leave_mm(smp_processor_id());
4995ab9c 177 goto out;
611ae8e3 178 }
c048fdfe 179
9dfa6dee
DH
180 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
181 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
e7b52ffd 182
9dfa6dee
DH
183 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
184 base_pages_to_flush = TLB_FLUSH_ALL;
ec659934 185 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
611ae8e3 186 local_flush_tlb();
9824cf97 187 } else {
611ae8e3 188 /* flush range by one by one 'invlpg' */
9824cf97 189 for (addr = start; addr < end; addr += PAGE_SIZE) {
ec659934 190 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
611ae8e3 191 __flush_tlb_single(addr);
9824cf97 192 }
e7b52ffd 193 }
4995ab9c 194out:
9dfa6dee 195 if (base_pages_to_flush == TLB_FLUSH_ALL) {
4995ab9c
DH
196 start = 0UL;
197 end = TLB_FLUSH_ALL;
198 }
e7b52ffd 199 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
4995ab9c 200 flush_tlb_others(mm_cpumask(mm), mm, start, end);
c048fdfe
GC
201 preempt_enable();
202}
203
e7b52ffd 204void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
c048fdfe
GC
205{
206 struct mm_struct *mm = vma->vm_mm;
c048fdfe
GC
207
208 preempt_disable();
c048fdfe
GC
209
210 if (current->active_mm == mm) {
211 if (current->mm)
e7b52ffd 212 __flush_tlb_one(start);
c048fdfe
GC
213 else
214 leave_mm(smp_processor_id());
215 }
216
78f1c4d6 217 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
e7b52ffd 218 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
c048fdfe
GC
219
220 preempt_enable();
221}
222
223static void do_flush_tlb_all(void *info)
224{
ec659934 225 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
c048fdfe 226 __flush_tlb_all();
c6ae41e7 227 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
3f8afb77 228 leave_mm(smp_processor_id());
c048fdfe
GC
229}
230
231void flush_tlb_all(void)
232{
ec659934 233 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
15c8b6c1 234 on_each_cpu(do_flush_tlb_all, NULL, 1);
c048fdfe 235}
3df3212f 236
effee4b9
AS
237static void do_kernel_range_flush(void *info)
238{
239 struct flush_tlb_info *f = info;
240 unsigned long addr;
241
242 /* flush range by one by one 'invlpg' */
6df46865 243 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
effee4b9
AS
244 __flush_tlb_single(addr);
245}
246
247void flush_tlb_kernel_range(unsigned long start, unsigned long end)
248{
effee4b9
AS
249
250 /* Balance as user space task's flush, a bit conservative */
e9f4e0a9
DH
251 if (end == TLB_FLUSH_ALL ||
252 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
effee4b9 253 on_each_cpu(do_flush_tlb_all, NULL, 1);
e9f4e0a9
DH
254 } else {
255 struct flush_tlb_info info;
effee4b9
AS
256 info.flush_start = start;
257 info.flush_end = end;
258 on_each_cpu(do_kernel_range_flush, &info, 1);
259 }
260}
This page took 0.433322 seconds and 5 git commands to generate.