Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/include/asm-arm/tlbflush.h | |
3 | * | |
4 | * Copyright (C) 1999-2003 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef _ASMARM_TLBFLUSH_H | |
11 | #define _ASMARM_TLBFLUSH_H | |
12 | ||
13 | #include <linux/config.h> | |
14 | #include <asm/glue.h> | |
15 | ||
16 | #define TLB_V3_PAGE (1 << 0) | |
17 | #define TLB_V4_U_PAGE (1 << 1) | |
18 | #define TLB_V4_D_PAGE (1 << 2) | |
19 | #define TLB_V4_I_PAGE (1 << 3) | |
20 | #define TLB_V6_U_PAGE (1 << 4) | |
21 | #define TLB_V6_D_PAGE (1 << 5) | |
22 | #define TLB_V6_I_PAGE (1 << 6) | |
23 | ||
24 | #define TLB_V3_FULL (1 << 8) | |
25 | #define TLB_V4_U_FULL (1 << 9) | |
26 | #define TLB_V4_D_FULL (1 << 10) | |
27 | #define TLB_V4_I_FULL (1 << 11) | |
28 | #define TLB_V6_U_FULL (1 << 12) | |
29 | #define TLB_V6_D_FULL (1 << 13) | |
30 | #define TLB_V6_I_FULL (1 << 14) | |
31 | ||
32 | #define TLB_V6_U_ASID (1 << 16) | |
33 | #define TLB_V6_D_ASID (1 << 17) | |
34 | #define TLB_V6_I_ASID (1 << 18) | |
35 | ||
36 | #define TLB_DCLEAN (1 << 30) | |
37 | #define TLB_WB (1 << 31) | |
38 | ||
39 | /* | |
40 | * MMU TLB Model | |
41 | * ============= | |
42 | * | |
43 | * We have the following to choose from: | |
44 | * v3 - ARMv3 | |
45 | * v4 - ARMv4 without write buffer | |
46 | * v4wb - ARMv4 with write buffer without I TLB flush entry instruction | |
47 | * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction | |
48 | * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction | |
49 | */ | |
50 | #undef _TLB | |
51 | #undef MULTI_TLB | |
52 | ||
53 | #define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) | |
54 | ||
55 | #ifdef CONFIG_CPU_TLB_V3 | |
56 | # define v3_possible_flags v3_tlb_flags | |
57 | # define v3_always_flags v3_tlb_flags | |
58 | # ifdef _TLB | |
59 | # define MULTI_TLB 1 | |
60 | # else | |
61 | # define _TLB v3 | |
62 | # endif | |
63 | #else | |
64 | # define v3_possible_flags 0 | |
65 | # define v3_always_flags (-1UL) | |
66 | #endif | |
67 | ||
68 | #define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE) | |
69 | ||
70 | #ifdef CONFIG_CPU_TLB_V4WT | |
71 | # define v4_possible_flags v4_tlb_flags | |
72 | # define v4_always_flags v4_tlb_flags | |
73 | # ifdef _TLB | |
74 | # define MULTI_TLB 1 | |
75 | # else | |
76 | # define _TLB v4 | |
77 | # endif | |
78 | #else | |
79 | # define v4_possible_flags 0 | |
80 | # define v4_always_flags (-1UL) | |
81 | #endif | |
82 | ||
83 | #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \ | |
84 | TLB_V4_I_FULL | TLB_V4_D_FULL | \ | |
85 | TLB_V4_I_PAGE | TLB_V4_D_PAGE) | |
86 | ||
87 | #ifdef CONFIG_CPU_TLB_V4WBI | |
88 | # define v4wbi_possible_flags v4wbi_tlb_flags | |
89 | # define v4wbi_always_flags v4wbi_tlb_flags | |
90 | # ifdef _TLB | |
91 | # define MULTI_TLB 1 | |
92 | # else | |
93 | # define _TLB v4wbi | |
94 | # endif | |
95 | #else | |
96 | # define v4wbi_possible_flags 0 | |
97 | # define v4wbi_always_flags (-1UL) | |
98 | #endif | |
99 | ||
100 | #define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \ | |
101 | TLB_V4_I_FULL | TLB_V4_D_FULL | \ | |
102 | TLB_V4_D_PAGE) | |
103 | ||
104 | #ifdef CONFIG_CPU_TLB_V4WB | |
105 | # define v4wb_possible_flags v4wb_tlb_flags | |
106 | # define v4wb_always_flags v4wb_tlb_flags | |
107 | # ifdef _TLB | |
108 | # define MULTI_TLB 1 | |
109 | # else | |
110 | # define _TLB v4wb | |
111 | # endif | |
112 | #else | |
113 | # define v4wb_possible_flags 0 | |
114 | # define v4wb_always_flags (-1UL) | |
115 | #endif | |
116 | ||
117 | #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \ | |
118 | TLB_V6_I_FULL | TLB_V6_D_FULL | \ | |
119 | TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ | |
120 | TLB_V6_I_ASID | TLB_V6_D_ASID) | |
121 | ||
122 | #ifdef CONFIG_CPU_TLB_V6 | |
123 | # define v6wbi_possible_flags v6wbi_tlb_flags | |
124 | # define v6wbi_always_flags v6wbi_tlb_flags | |
125 | # ifdef _TLB | |
126 | # define MULTI_TLB 1 | |
127 | # else | |
128 | # define _TLB v6wbi | |
129 | # endif | |
130 | #else | |
131 | # define v6wbi_possible_flags 0 | |
132 | # define v6wbi_always_flags (-1UL) | |
133 | #endif | |
134 | ||
135 | #ifndef _TLB | |
136 | #error Unknown TLB model | |
137 | #endif | |
138 | ||
139 | #ifndef __ASSEMBLY__ | |
140 | ||
141 | struct cpu_tlb_fns { | |
142 | void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *); | |
143 | void (*flush_kern_range)(unsigned long, unsigned long); | |
144 | unsigned long tlb_flags; | |
145 | }; | |
146 | ||
147 | /* | |
148 | * Select the calling method | |
149 | */ | |
150 | #ifdef MULTI_TLB | |
151 | ||
152 | #define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range | |
153 | #define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range | |
154 | ||
155 | #else | |
156 | ||
157 | #define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range) | |
158 | #define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range) | |
159 | ||
160 | extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); | |
161 | extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); | |
162 | ||
163 | #endif | |
164 | ||
165 | extern struct cpu_tlb_fns cpu_tlb; | |
166 | ||
167 | #define __cpu_tlb_flags cpu_tlb.tlb_flags | |
168 | ||
169 | /* | |
170 | * TLB Management | |
171 | * ============== | |
172 | * | |
173 | * The arch/arm/mm/tlb-*.S files implement these methods. | |
174 | * | |
175 | * The TLB specific code is expected to perform whatever tests it | |
176 | * needs to determine if it should invalidate the TLB for each | |
177 | * call. Start addresses are inclusive and end addresses are | |
178 | * exclusive; it is safe to round these addresses down. | |
179 | * | |
180 | * flush_tlb_all() | |
181 | * | |
182 | * Invalidate the entire TLB. | |
183 | * | |
184 | * flush_tlb_mm(mm) | |
185 | * | |
186 | * Invalidate all TLB entries in a particular address | |
187 | * space. | |
188 | * - mm - mm_struct describing address space | |
189 | * | |
190 | * flush_tlb_range(mm,start,end) | |
191 | * | |
192 | * Invalidate a range of TLB entries in the specified | |
193 | * address space. | |
194 | * - mm - mm_struct describing address space | |
195 | * - start - start address (may not be aligned) | |
196 | * - end - end address (exclusive, may not be aligned) | |
197 | * | |
198 | * flush_tlb_page(vaddr,vma) | |
199 | * | |
200 | * Invalidate the specified page in the specified address range. | |
201 | * - vaddr - virtual address (may not be aligned) | |
202 | * - vma - vma_struct describing address range | |
203 | * | |
204 | * flush_kern_tlb_page(kaddr) | |
205 | * | |
206 | * Invalidate the TLB entry for the specified page. The address | |
207 | * will be in the kernels virtual memory space. Current uses | |
208 | * only require the D-TLB to be invalidated. | |
209 | * - kaddr - Kernel virtual memory address | |
210 | */ | |
211 | ||
212 | /* | |
213 | * We optimise the code below by: | |
214 | * - building a set of TLB flags that might be set in __cpu_tlb_flags | |
215 | * - building a set of TLB flags that will always be set in __cpu_tlb_flags | |
216 | * - if we're going to need __cpu_tlb_flags, access it once and only once | |
217 | * | |
218 | * This allows us to build optimal assembly for the single-CPU type case, | |
219 | * and as close to optimal given the compiler constrants for multi-CPU | |
220 | * case. We could do better for the multi-CPU case if the compiler | |
221 | * implemented the "%?" method, but this has been discontinued due to too | |
222 | * many people getting it wrong. | |
223 | */ | |
224 | #define possible_tlb_flags (v3_possible_flags | \ | |
225 | v4_possible_flags | \ | |
226 | v4wbi_possible_flags | \ | |
227 | v4wb_possible_flags | \ | |
228 | v6wbi_possible_flags) | |
229 | ||
230 | #define always_tlb_flags (v3_always_flags & \ | |
231 | v4_always_flags & \ | |
232 | v4wbi_always_flags & \ | |
233 | v4wb_always_flags & \ | |
234 | v6wbi_always_flags) | |
235 | ||
236 | #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) | |
237 | ||
603fff54 | 238 | static inline void local_flush_tlb_all(void) |
1da177e4 LT |
239 | { |
240 | const int zero = 0; | |
241 | const unsigned int __tlb_flag = __cpu_tlb_flags; | |
242 | ||
243 | if (tlb_flag(TLB_WB)) | |
244 | asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); | |
245 | ||
246 | if (tlb_flag(TLB_V3_FULL)) | |
247 | asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero)); | |
248 | if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL)) | |
249 | asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero)); | |
250 | if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL)) | |
251 | asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero)); | |
252 | if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) | |
253 | asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); | |
254 | } | |
255 | ||
603fff54 | 256 | static inline void local_flush_tlb_mm(struct mm_struct *mm) |
1da177e4 LT |
257 | { |
258 | const int zero = 0; | |
259 | const int asid = ASID(mm); | |
260 | const unsigned int __tlb_flag = __cpu_tlb_flags; | |
261 | ||
262 | if (tlb_flag(TLB_WB)) | |
263 | asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); | |
264 | ||
265 | if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { | |
266 | if (tlb_flag(TLB_V3_FULL)) | |
267 | asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero)); | |
268 | if (tlb_flag(TLB_V4_U_FULL)) | |
269 | asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero)); | |
270 | if (tlb_flag(TLB_V4_D_FULL)) | |
271 | asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero)); | |
272 | if (tlb_flag(TLB_V4_I_FULL)) | |
273 | asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); | |
274 | } | |
275 | ||
276 | if (tlb_flag(TLB_V6_U_ASID)) | |
277 | asm("mcr%? p15, 0, %0, c8, c7, 2" : : "r" (asid)); | |
278 | if (tlb_flag(TLB_V6_D_ASID)) | |
279 | asm("mcr%? p15, 0, %0, c8, c6, 2" : : "r" (asid)); | |
280 | if (tlb_flag(TLB_V6_I_ASID)) | |
281 | asm("mcr%? p15, 0, %0, c8, c5, 2" : : "r" (asid)); | |
282 | } | |
283 | ||
284 | static inline void | |
603fff54 | 285 | local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
1da177e4 LT |
286 | { |
287 | const int zero = 0; | |
288 | const unsigned int __tlb_flag = __cpu_tlb_flags; | |
289 | ||
290 | uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); | |
291 | ||
292 | if (tlb_flag(TLB_WB)) | |
293 | asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); | |
294 | ||
295 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | |
296 | if (tlb_flag(TLB_V3_PAGE)) | |
297 | asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (uaddr)); | |
298 | if (tlb_flag(TLB_V4_U_PAGE)) | |
299 | asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr)); | |
300 | if (tlb_flag(TLB_V4_D_PAGE)) | |
301 | asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr)); | |
302 | if (tlb_flag(TLB_V4_I_PAGE)) | |
303 | asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr)); | |
304 | if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) | |
305 | asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); | |
306 | } | |
307 | ||
308 | if (tlb_flag(TLB_V6_U_PAGE)) | |
309 | asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr)); | |
310 | if (tlb_flag(TLB_V6_D_PAGE)) | |
311 | asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr)); | |
312 | if (tlb_flag(TLB_V6_I_PAGE)) | |
313 | asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr)); | |
314 | } | |
315 | ||
603fff54 | 316 | static inline void local_flush_tlb_kernel_page(unsigned long kaddr) |
1da177e4 LT |
317 | { |
318 | const int zero = 0; | |
319 | const unsigned int __tlb_flag = __cpu_tlb_flags; | |
320 | ||
321 | kaddr &= PAGE_MASK; | |
322 | ||
323 | if (tlb_flag(TLB_WB)) | |
324 | asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); | |
325 | ||
326 | if (tlb_flag(TLB_V3_PAGE)) | |
327 | asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (kaddr)); | |
328 | if (tlb_flag(TLB_V4_U_PAGE)) | |
329 | asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr)); | |
330 | if (tlb_flag(TLB_V4_D_PAGE)) | |
331 | asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr)); | |
332 | if (tlb_flag(TLB_V4_I_PAGE)) | |
333 | asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr)); | |
334 | if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) | |
335 | asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); | |
336 | ||
337 | if (tlb_flag(TLB_V6_U_PAGE)) | |
338 | asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr)); | |
339 | if (tlb_flag(TLB_V6_D_PAGE)) | |
340 | asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr)); | |
341 | if (tlb_flag(TLB_V6_I_PAGE)) | |
342 | asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr)); | |
343 | } | |
344 | ||
345 | /* | |
346 | * flush_pmd_entry | |
347 | * | |
348 | * Flush a PMD entry (word aligned, or double-word aligned) to | |
349 | * RAM if the TLB for the CPU we are running on requires this. | |
350 | * This is typically used when we are creating PMD entries. | |
351 | * | |
352 | * clean_pmd_entry | |
353 | * | |
354 | * Clean (but don't drain the write buffer) if the CPU requires | |
355 | * these operations. This is typically used when we are removing | |
356 | * PMD entries. | |
357 | */ | |
358 | static inline void flush_pmd_entry(pmd_t *pmd) | |
359 | { | |
360 | const unsigned int zero = 0; | |
361 | const unsigned int __tlb_flag = __cpu_tlb_flags; | |
362 | ||
363 | if (tlb_flag(TLB_DCLEAN)) | |
364 | asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd" | |
365 | : : "r" (pmd)); | |
366 | if (tlb_flag(TLB_WB)) | |
367 | asm("mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd" | |
368 | : : "r" (zero)); | |
369 | } | |
370 | ||
371 | static inline void clean_pmd_entry(pmd_t *pmd) | |
372 | { | |
373 | const unsigned int __tlb_flag = __cpu_tlb_flags; | |
374 | ||
375 | if (tlb_flag(TLB_DCLEAN)) | |
376 | asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd" | |
377 | : : "r" (pmd)); | |
378 | } | |
379 | ||
380 | #undef tlb_flag | |
381 | #undef always_tlb_flags | |
382 | #undef possible_tlb_flags | |
383 | ||
384 | /* | |
385 | * Convert calls to our calling convention. | |
386 | */ | |
603fff54 RK |
387 | #define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) |
388 | #define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) | |
389 | ||
390 | #ifndef CONFIG_SMP | |
391 | #define flush_tlb_all local_flush_tlb_all | |
392 | #define flush_tlb_mm local_flush_tlb_mm | |
393 | #define flush_tlb_page local_flush_tlb_page | |
394 | #define flush_tlb_kernel_page local_flush_tlb_kernel_page | |
395 | #define flush_tlb_range local_flush_tlb_range | |
396 | #define flush_tlb_kernel_range local_flush_tlb_kernel_range | |
397 | #else | |
398 | extern void flush_tlb_all(void); | |
399 | extern void flush_tlb_mm(struct mm_struct *mm); | |
400 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); | |
401 | extern void flush_tlb_kernel_page(unsigned long kaddr); | |
402 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | |
403 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | |
404 | #endif | |
1da177e4 LT |
405 | |
406 | /* | |
407 | * if PG_dcache_dirty is set for the page, we need to ensure that any | |
408 | * cache entries for the kernels virtual memory range are written | |
409 | * back to the page. | |
410 | */ | |
411 | extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); | |
412 | ||
413 | /* | |
414 | * ARM processors do not cache TLB tables in RAM. | |
415 | */ | |
416 | #define flush_tlb_pgtables(mm,start,end) do { } while (0) | |
417 | ||
418 | #endif | |
419 | ||
420 | #endif |