Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/flush.c | |
3 | * | |
4 | * Copyright (C) 1995-2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/module.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/pagemap.h> | |
39af22a7 | 13 | #include <linux/highmem.h> |
1da177e4 LT |
14 | |
15 | #include <asm/cacheflush.h> | |
46097c7d | 16 | #include <asm/cachetype.h> |
7e5a69e8 | 17 | #include <asm/highmem.h> |
2ef7f3db | 18 | #include <asm/smp_plat.h> |
8d802d28 | 19 | #include <asm/tlbflush.h> |
0b19f933 | 20 | #include <linux/hugetlb.h> |
8d802d28 | 21 | |
1b2e2b73 RK |
22 | #include "mm.h" |
23 | ||
f8130906 | 24 | #ifdef CONFIG_ARM_HEAVY_MB |
4e1f8a6f RK |
25 | void (*soc_mb)(void); |
26 | ||
f8130906 RK |
27 | void arm_heavy_mb(void) |
28 | { | |
29 | #ifdef CONFIG_OUTER_CACHE_SYNC | |
30 | if (outer_cache.sync) | |
31 | outer_cache.sync(); | |
32 | #endif | |
4e1f8a6f RK |
33 | if (soc_mb) |
34 | soc_mb(); | |
f8130906 RK |
35 | } |
36 | EXPORT_SYMBOL(arm_heavy_mb); | |
37 | #endif | |
38 | ||
8d802d28 | 39 | #ifdef CONFIG_CPU_CACHE_VIPT |
d7b6b358 | 40 | |
481467d6 CM |
41 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
42 | { | |
de27c308 | 43 | unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
141fa40c | 44 | const int zero = 0; |
481467d6 | 45 | |
67ece144 | 46 | set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); |
481467d6 CM |
47 | |
48 | asm( "mcrr p15, 0, %1, %0, c14\n" | |
df71dfd4 | 49 | " mcr p15, 0, %2, c7, c10, 4" |
481467d6 | 50 | : |
12e669b4 | 51 | : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero) |
481467d6 CM |
52 | : "cc"); |
53 | } | |
54 | ||
c4e259c8 WD |
55 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) |
56 | { | |
67ece144 | 57 | unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
c4e259c8 WD |
58 | unsigned long offset = vaddr & (PAGE_SIZE - 1); |
59 | unsigned long to; | |
60 | ||
67ece144 RK |
61 | set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); |
62 | to = va + offset; | |
c4e259c8 WD |
63 | flush_icache_range(to, to + len); |
64 | } | |
65 | ||
d7b6b358 RK |
66 | void flush_cache_mm(struct mm_struct *mm) |
67 | { | |
68 | if (cache_is_vivt()) { | |
2f0b1926 | 69 | vivt_flush_cache_mm(mm); |
d7b6b358 RK |
70 | return; |
71 | } | |
72 | ||
73 | if (cache_is_vipt_aliasing()) { | |
74 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
df71dfd4 | 75 | " mcr p15, 0, %0, c7, c10, 4" |
d7b6b358 RK |
76 | : |
77 | : "r" (0) | |
78 | : "cc"); | |
79 | } | |
80 | } | |
81 | ||
82 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
83 | { | |
84 | if (cache_is_vivt()) { | |
2f0b1926 | 85 | vivt_flush_cache_range(vma, start, end); |
d7b6b358 RK |
86 | return; |
87 | } | |
88 | ||
89 | if (cache_is_vipt_aliasing()) { | |
90 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
df71dfd4 | 91 | " mcr p15, 0, %0, c7, c10, 4" |
d7b6b358 RK |
92 | : |
93 | : "r" (0) | |
94 | : "cc"); | |
95 | } | |
9e95922b | 96 | |
6060e8df | 97 | if (vma->vm_flags & VM_EXEC) |
9e95922b | 98 | __flush_icache_all(); |
d7b6b358 RK |
99 | } |
100 | ||
101 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | |
102 | { | |
103 | if (cache_is_vivt()) { | |
2f0b1926 | 104 | vivt_flush_cache_page(vma, user_addr, pfn); |
d7b6b358 RK |
105 | return; |
106 | } | |
107 | ||
2df341ed | 108 | if (cache_is_vipt_aliasing()) { |
d7b6b358 | 109 | flush_pfn_alias(pfn, user_addr); |
2df341ed RK |
110 | __flush_icache_all(); |
111 | } | |
9e95922b RK |
112 | |
113 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | |
114 | __flush_icache_all(); | |
d7b6b358 | 115 | } |
c4e259c8 | 116 | |
2ef7f3db | 117 | #else |
c4e259c8 WD |
118 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) |
119 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) | |
2ef7f3db | 120 | #endif |
a188ad2b | 121 | |
72e6ae28 VK |
122 | #define FLAG_PA_IS_EXEC 1 |
123 | #define FLAG_PA_CORE_IN_MM 2 | |
124 | ||
2ef7f3db RK |
125 | static void flush_ptrace_access_other(void *args) |
126 | { | |
127 | __flush_icache_all(); | |
128 | } | |
2ef7f3db | 129 | |
72e6ae28 VK |
130 | static inline |
131 | void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, | |
132 | unsigned long len, unsigned int flags) | |
a188ad2b GD |
133 | { |
134 | if (cache_is_vivt()) { | |
72e6ae28 | 135 | if (flags & FLAG_PA_CORE_IN_MM) { |
2ef7f3db RK |
136 | unsigned long addr = (unsigned long)kaddr; |
137 | __cpuc_coherent_kern_range(addr, addr + len); | |
138 | } | |
a188ad2b GD |
139 | return; |
140 | } | |
141 | ||
142 | if (cache_is_vipt_aliasing()) { | |
143 | flush_pfn_alias(page_to_pfn(page), uaddr); | |
2df341ed | 144 | __flush_icache_all(); |
a188ad2b GD |
145 | return; |
146 | } | |
147 | ||
c4e259c8 | 148 | /* VIPT non-aliasing D-cache */ |
72e6ae28 | 149 | if (flags & FLAG_PA_IS_EXEC) { |
a188ad2b | 150 | unsigned long addr = (unsigned long)kaddr; |
c4e259c8 WD |
151 | if (icache_is_vipt_aliasing()) |
152 | flush_icache_alias(page_to_pfn(page), uaddr, len); | |
153 | else | |
154 | __cpuc_coherent_kern_range(addr, addr + len); | |
2ef7f3db RK |
155 | if (cache_ops_need_broadcast()) |
156 | smp_call_function(flush_ptrace_access_other, | |
157 | NULL, 1); | |
a188ad2b GD |
158 | } |
159 | } | |
2ef7f3db | 160 | |
72e6ae28 VK |
161 | static |
162 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |
163 | unsigned long uaddr, void *kaddr, unsigned long len) | |
164 | { | |
165 | unsigned int flags = 0; | |
166 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) | |
167 | flags |= FLAG_PA_CORE_IN_MM; | |
168 | if (vma->vm_flags & VM_EXEC) | |
169 | flags |= FLAG_PA_IS_EXEC; | |
170 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | |
171 | } | |
172 | ||
173 | void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, | |
174 | void *kaddr, unsigned long len) | |
175 | { | |
176 | unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC; | |
177 | ||
178 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | |
179 | } | |
180 | ||
2ef7f3db RK |
181 | /* |
182 | * Copy user data from/to a page which is mapped into a different | |
183 | * processes address space. Really, we want to allow our "user | |
184 | * space" model to handle this. | |
185 | * | |
186 | * Note that this code needs to run on the current CPU. | |
187 | */ | |
188 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |
189 | unsigned long uaddr, void *dst, const void *src, | |
190 | unsigned long len) | |
191 | { | |
192 | #ifdef CONFIG_SMP | |
193 | preempt_disable(); | |
8d802d28 | 194 | #endif |
2ef7f3db RK |
195 | memcpy(dst, src, len); |
196 | flush_ptrace_access(vma, page, uaddr, dst, len); | |
197 | #ifdef CONFIG_SMP | |
198 | preempt_enable(); | |
199 | #endif | |
200 | } | |
1da177e4 | 201 | |
8830f04a | 202 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
1da177e4 | 203 | { |
1da177e4 LT |
204 | /* |
205 | * Writeback any data associated with the kernel mapping of this | |
206 | * page. This ensures that data in the physical page is mutually | |
207 | * coherent with the kernels mapping. | |
208 | */ | |
7e5a69e8 | 209 | if (!PageHighMem(page)) { |
0b19f933 SC |
210 | size_t page_size = PAGE_SIZE << compound_order(page); |
211 | __cpuc_flush_dcache_area(page_address(page), page_size); | |
7e5a69e8 | 212 | } else { |
0b19f933 | 213 | unsigned long i; |
dd0f67f4 | 214 | if (cache_is_vipt_nonaliasing()) { |
0b19f933 | 215 | for (i = 0; i < (1 << compound_order(page)); i++) { |
2a7cfcbc | 216 | void *addr = kmap_atomic(page + i); |
dd0f67f4 | 217 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
0b19f933 SC |
218 | kunmap_atomic(addr); |
219 | } | |
220 | } else { | |
221 | for (i = 0; i < (1 << compound_order(page)); i++) { | |
2a7cfcbc | 222 | void *addr = kmap_high_get(page + i); |
0b19f933 SC |
223 | if (addr) { |
224 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | |
2a7cfcbc | 225 | kunmap_high(page + i); |
0b19f933 | 226 | } |
dd0f67f4 | 227 | } |
7e5a69e8 NP |
228 | } |
229 | } | |
1da177e4 LT |
230 | |
231 | /* | |
8830f04a RK |
232 | * If this is a page cache page, and we have an aliasing VIPT cache, |
233 | * we only need to do one flush - which would be at the relevant | |
8d802d28 RK |
234 | * userspace colour, which is congruent with page->index. |
235 | */ | |
f91fb05d | 236 | if (mapping && cache_is_vipt_aliasing()) |
8830f04a | 237 | flush_pfn_alias(page_to_pfn(page), |
09cbfeaf | 238 | page->index << PAGE_SHIFT); |
8830f04a RK |
239 | } |
240 | ||
241 | static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) | |
242 | { | |
243 | struct mm_struct *mm = current->active_mm; | |
244 | struct vm_area_struct *mpnt; | |
8830f04a | 245 | pgoff_t pgoff; |
8d802d28 | 246 | |
1da177e4 LT |
247 | /* |
248 | * There are possible user space mappings of this page: | |
249 | * - VIVT cache: we need to also write back and invalidate all user | |
250 | * data in the current VM view associated with this page. | |
251 | * - aliasing VIPT: we only need to find one mapping of this page. | |
252 | */ | |
09cbfeaf | 253 | pgoff = page->index; |
1da177e4 LT |
254 | |
255 | flush_dcache_mmap_lock(mapping); | |
6b2dbba8 | 256 | vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { |
1da177e4 LT |
257 | unsigned long offset; |
258 | ||
259 | /* | |
260 | * If this VMA is not in our MM, we can ignore it. | |
261 | */ | |
262 | if (mpnt->vm_mm != mm) | |
263 | continue; | |
264 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | |
265 | continue; | |
266 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | |
267 | flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); | |
1da177e4 LT |
268 | } |
269 | flush_dcache_mmap_unlock(mapping); | |
270 | } | |
271 | ||
6012191a CM |
272 | #if __LINUX_ARM_ARCH__ >= 6 |
273 | void __sync_icache_dcache(pte_t pteval) | |
274 | { | |
275 | unsigned long pfn; | |
276 | struct page *page; | |
277 | struct address_space *mapping; | |
278 | ||
6012191a CM |
279 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) |
280 | /* only flush non-aliasing VIPT caches for exec mappings */ | |
281 | return; | |
282 | pfn = pte_pfn(pteval); | |
283 | if (!pfn_valid(pfn)) | |
284 | return; | |
285 | ||
286 | page = pfn_to_page(pfn); | |
287 | if (cache_is_vipt_aliasing()) | |
288 | mapping = page_mapping(page); | |
289 | else | |
290 | mapping = NULL; | |
291 | ||
292 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) | |
293 | __flush_dcache_page(mapping, page); | |
8373dc38 | 294 | |
295 | if (pte_exec(pteval)) | |
6012191a CM |
296 | __flush_icache_all(); |
297 | } | |
298 | #endif | |
299 | ||
1da177e4 LT |
300 | /* |
301 | * Ensure cache coherency between kernel mapping and userspace mapping | |
302 | * of this page. | |
303 | * | |
304 | * We have three cases to consider: | |
305 | * - VIPT non-aliasing cache: fully coherent so nothing required. | |
306 | * - VIVT: fully aliasing, so we need to handle every alias in our | |
307 | * current VM view. | |
308 | * - VIPT aliasing: need to handle one alias in our current VM view. | |
309 | * | |
310 | * If we need to handle aliasing: | |
311 | * If the page only exists in the page cache and there are no user | |
312 | * space mappings, we can be lazy and remember that we may have dirty | |
313 | * kernel cache lines for later. Otherwise, we assume we have | |
314 | * aliasing mappings. | |
df2f5e72 | 315 | * |
31bee4cf | 316 | * Note that we disable the lazy flush for SMP configurations where |
317 | * the cache maintenance operations are not automatically broadcasted. | |
1da177e4 LT |
318 | */ |
319 | void flush_dcache_page(struct page *page) | |
320 | { | |
421fe93c RK |
321 | struct address_space *mapping; |
322 | ||
323 | /* | |
324 | * The zero page is never written to, so never has any dirty | |
325 | * cache lines, and therefore never needs to be flushed. | |
326 | */ | |
327 | if (page == ZERO_PAGE(0)) | |
328 | return; | |
329 | ||
330 | mapping = page_mapping(page); | |
1da177e4 | 331 | |
85848dd7 | 332 | if (!cache_ops_need_broadcast() && |
e1534ae9 | 333 | mapping && !page_mapcount(page)) |
c0177800 | 334 | clear_bit(PG_dcache_clean, &page->flags); |
85848dd7 | 335 | else { |
1da177e4 | 336 | __flush_dcache_page(mapping, page); |
8830f04a RK |
337 | if (mapping && cache_is_vivt()) |
338 | __flush_dcache_aliases(mapping, page); | |
826cbdaf CM |
339 | else if (mapping) |
340 | __flush_icache_all(); | |
c0177800 | 341 | set_bit(PG_dcache_clean, &page->flags); |
8830f04a | 342 | } |
1da177e4 LT |
343 | } |
344 | EXPORT_SYMBOL(flush_dcache_page); | |
1bc39742 SB |
345 | |
346 | /* | |
347 | * Ensure cache coherency for the kernel mapping of this page. We can | |
348 | * assume that the page is pinned via kmap. | |
349 | * | |
350 | * If the page only exists in the page cache and there are no user | |
351 | * space mappings, this is a no-op since the page was already marked | |
352 | * dirty at creation. Otherwise, we need to flush the dirty kernel | |
353 | * cache lines directly. | |
354 | */ | |
355 | void flush_kernel_dcache_page(struct page *page) | |
356 | { | |
357 | if (cache_is_vivt() || cache_is_vipt_aliasing()) { | |
358 | struct address_space *mapping; | |
359 | ||
360 | mapping = page_mapping(page); | |
361 | ||
362 | if (!mapping || mapping_mapped(mapping)) { | |
363 | void *addr; | |
364 | ||
365 | addr = page_address(page); | |
366 | /* | |
367 | * kmap_atomic() doesn't set the page virtual | |
368 | * address for highmem pages, and | |
369 | * kunmap_atomic() takes care of cache | |
370 | * flushing already. | |
371 | */ | |
372 | if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) | |
373 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | |
374 | } | |
375 | } | |
376 | } | |
377 | EXPORT_SYMBOL(flush_kernel_dcache_page); | |
6020dff0 RK |
378 | |
379 | /* | |
380 | * Flush an anonymous page so that users of get_user_pages() | |
381 | * can safely access the data. The expected sequence is: | |
382 | * | |
383 | * get_user_pages() | |
384 | * -> flush_anon_page | |
385 | * memcpy() to/from page | |
386 | * if written to page, flush_dcache_page() | |
387 | */ | |
388 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | |
389 | { | |
390 | unsigned long pfn; | |
391 | ||
392 | /* VIPT non-aliasing caches need do nothing */ | |
393 | if (cache_is_vipt_nonaliasing()) | |
394 | return; | |
395 | ||
396 | /* | |
397 | * Write back and invalidate userspace mapping. | |
398 | */ | |
399 | pfn = page_to_pfn(page); | |
400 | if (cache_is_vivt()) { | |
401 | flush_cache_page(vma, vmaddr, pfn); | |
402 | } else { | |
403 | /* | |
404 | * For aliasing VIPT, we can flush an alias of the | |
405 | * userspace address only. | |
406 | */ | |
407 | flush_pfn_alias(pfn, vmaddr); | |
2df341ed | 408 | __flush_icache_all(); |
6020dff0 RK |
409 | } |
410 | ||
411 | /* | |
412 | * Invalidate kernel mapping. No data should be contained | |
413 | * in this mapping of the page. FIXME: this is overkill | |
414 | * since we actually ask for a write-back and invalidate. | |
415 | */ | |
2c9b9c84 | 416 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
6020dff0 | 417 | } |