Commit | Line | Data |
---|---|---|
f048aace BH |
1 | /* |
2 | * This file contains the routines for TLB flushing. | |
3 | * On machines where the MMU does not use a hash table to store virtual to | |
4 | * physical translations (ie, SW loaded TLBs or Book3E compilant processors, | |
5 | * this does -not- include 603 however which shares the implementation with | |
6 | * hash based processors) | |
7 | * | |
8 | * -- BenH | |
9 | * | |
10 | * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> | |
11 | * IBM Corp. | |
12 | * | |
13 | * Derived from arch/ppc/mm/init.c: | |
14 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
15 | * | |
16 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
17 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
18 | * Copyright (C) 1996 Paul Mackerras | |
19 | * | |
20 | * Derived from "arch/i386/mm/init.c" | |
21 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
22 | * | |
23 | * This program is free software; you can redistribute it and/or | |
24 | * modify it under the terms of the GNU General Public License | |
25 | * as published by the Free Software Foundation; either version | |
26 | * 2 of the License, or (at your option) any later version. | |
27 | * | |
28 | */ | |
29 | ||
30 | #include <linux/kernel.h> | |
31 | #include <linux/mm.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/highmem.h> | |
34 | #include <linux/pagemap.h> | |
35 | #include <linux/preempt.h> | |
36 | #include <linux/spinlock.h> | |
37 | ||
38 | #include <asm/tlbflush.h> | |
39 | #include <asm/tlb.h> | |
40 | ||
41 | #include "mmu_decl.h" | |
42 | ||
43 | /* | |
44 | * Base TLB flushing operations: | |
45 | * | |
46 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
47 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
48 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
49 | * - flush_tlb_kernel_range(start, end) flushes kernel pages | |
50 | * | |
51 | * - local_* variants of page and mm only apply to the current | |
52 | * processor | |
53 | */ | |
54 | ||
55 | /* | |
56 | * These are the base non-SMP variants of page and mm flushing | |
57 | */ | |
58 | void local_flush_tlb_mm(struct mm_struct *mm) | |
59 | { | |
60 | unsigned int pid; | |
61 | ||
62 | preempt_disable(); | |
63 | pid = mm->context.id; | |
64 | if (pid != MMU_NO_CONTEXT) | |
65 | _tlbil_pid(pid); | |
66 | preempt_enable(); | |
67 | } | |
68 | EXPORT_SYMBOL(local_flush_tlb_mm); | |
69 | ||
70 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) | |
71 | { | |
72 | unsigned int pid; | |
73 | ||
74 | preempt_disable(); | |
75 | pid = vma ? vma->vm_mm->context.id : 0; | |
76 | if (pid != MMU_NO_CONTEXT) | |
77 | _tlbil_va(vmaddr, pid); | |
78 | preempt_enable(); | |
79 | } | |
80 | EXPORT_SYMBOL(local_flush_tlb_page); | |
81 | ||
82 | ||
83 | /* | |
84 | * And here are the SMP non-local implementations | |
85 | */ | |
86 | #ifdef CONFIG_SMP | |
87 | ||
88 | static DEFINE_SPINLOCK(tlbivax_lock); | |
89 | ||
90 | struct tlb_flush_param { | |
91 | unsigned long addr; | |
92 | unsigned int pid; | |
93 | }; | |
94 | ||
95 | static void do_flush_tlb_mm_ipi(void *param) | |
96 | { | |
97 | struct tlb_flush_param *p = param; | |
98 | ||
99 | _tlbil_pid(p ? p->pid : 0); | |
100 | } | |
101 | ||
102 | static void do_flush_tlb_page_ipi(void *param) | |
103 | { | |
104 | struct tlb_flush_param *p = param; | |
105 | ||
106 | _tlbil_va(p->addr, p->pid); | |
107 | } | |
108 | ||
109 | ||
110 | /* Note on invalidations and PID: | |
111 | * | |
112 | * We snapshot the PID with preempt disabled. At this point, it can still | |
113 | * change either because: | |
114 | * - our context is being stolen (PID -> NO_CONTEXT) on another CPU | |
115 | * - we are invaliating some target that isn't currently running here | |
116 | * and is concurrently acquiring a new PID on another CPU | |
117 | * - some other CPU is re-acquiring a lost PID for this mm | |
118 | * etc... | |
119 | * | |
120 | * However, this shouldn't be a problem as we only guarantee | |
121 | * invalidation of TLB entries present prior to this call, so we | |
122 | * don't care about the PID changing, and invalidating a stale PID | |
123 | * is generally harmless. | |
124 | */ | |
125 | ||
126 | void flush_tlb_mm(struct mm_struct *mm) | |
127 | { | |
128 | cpumask_t cpu_mask; | |
129 | unsigned int pid; | |
130 | ||
131 | preempt_disable(); | |
132 | pid = mm->context.id; | |
133 | if (unlikely(pid == MMU_NO_CONTEXT)) | |
134 | goto no_context; | |
135 | cpu_mask = mm->cpu_vm_mask; | |
136 | cpu_clear(smp_processor_id(), cpu_mask); | |
137 | if (!cpus_empty(cpu_mask)) { | |
138 | struct tlb_flush_param p = { .pid = pid }; | |
139 | smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1); | |
140 | } | |
141 | _tlbil_pid(pid); | |
142 | no_context: | |
143 | preempt_enable(); | |
144 | } | |
145 | EXPORT_SYMBOL(flush_tlb_mm); | |
146 | ||
147 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) | |
148 | { | |
149 | cpumask_t cpu_mask; | |
150 | unsigned int pid; | |
151 | ||
152 | preempt_disable(); | |
153 | pid = vma ? vma->vm_mm->context.id : 0; | |
154 | if (unlikely(pid == MMU_NO_CONTEXT)) | |
155 | goto bail; | |
156 | cpu_mask = vma->vm_mm->cpu_vm_mask; | |
157 | cpu_clear(smp_processor_id(), cpu_mask); | |
158 | if (!cpus_empty(cpu_mask)) { | |
159 | /* If broadcast tlbivax is supported, use it */ | |
160 | if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { | |
161 | int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); | |
162 | if (lock) | |
163 | spin_lock(&tlbivax_lock); | |
164 | _tlbivax_bcast(vmaddr, pid); | |
165 | if (lock) | |
166 | spin_unlock(&tlbivax_lock); | |
167 | goto bail; | |
168 | } else { | |
169 | struct tlb_flush_param p = { .pid = pid, .addr = vmaddr }; | |
170 | smp_call_function_mask(cpu_mask, | |
171 | do_flush_tlb_page_ipi, &p, 1); | |
172 | } | |
173 | } | |
174 | _tlbil_va(vmaddr, pid); | |
175 | bail: | |
176 | preempt_enable(); | |
177 | } | |
178 | EXPORT_SYMBOL(flush_tlb_page); | |
179 | ||
180 | #endif /* CONFIG_SMP */ | |
181 | ||
182 | /* | |
183 | * Flush kernel TLB entries in the given range | |
184 | */ | |
185 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
186 | { | |
187 | #ifdef CONFIG_SMP | |
188 | preempt_disable(); | |
189 | smp_call_function(do_flush_tlb_mm_ipi, NULL, 1); | |
190 | _tlbil_pid(0); | |
191 | preempt_enable(); | |
192 | #endif | |
193 | _tlbil_pid(0); | |
194 | } | |
195 | EXPORT_SYMBOL(flush_tlb_kernel_range); | |
196 | ||
197 | /* | |
198 | * Currently, for range flushing, we just do a full mm flush. This should | |
199 | * be optimized based on a threshold on the size of the range, since | |
200 | * some implementation can stack multiple tlbivax before a tlbsync but | |
201 | * for now, we keep it that way | |
202 | */ | |
203 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
204 | unsigned long end) | |
205 | ||
206 | { | |
207 | flush_tlb_mm(vma->vm_mm); | |
208 | } | |
209 | EXPORT_SYMBOL(flush_tlb_range); |