Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/flush.c | |
3 | * | |
4 | * Copyright (C) 1995-2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/module.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/pagemap.h> | |
39af22a7 | 13 | #include <linux/highmem.h> |
1da177e4 LT |
14 | |
15 | #include <asm/cacheflush.h> | |
46097c7d | 16 | #include <asm/cachetype.h> |
7e5a69e8 | 17 | #include <asm/highmem.h> |
2ef7f3db | 18 | #include <asm/smp_plat.h> |
8d802d28 | 19 | #include <asm/tlbflush.h> |
0b19f933 | 20 | #include <linux/hugetlb.h> |
8d802d28 | 21 | |
1b2e2b73 RK |
22 | #include "mm.h" |
23 | ||
8d802d28 | 24 | #ifdef CONFIG_CPU_CACHE_VIPT |
d7b6b358 | 25 | |
481467d6 CM |
26 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
27 | { | |
de27c308 | 28 | unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
141fa40c | 29 | const int zero = 0; |
481467d6 | 30 | |
67ece144 | 31 | set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); |
481467d6 CM |
32 | |
33 | asm( "mcrr p15, 0, %1, %0, c14\n" | |
df71dfd4 | 34 | " mcr p15, 0, %2, c7, c10, 4" |
481467d6 | 35 | : |
141fa40c | 36 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) |
481467d6 CM |
37 | : "cc"); |
38 | } | |
39 | ||
c4e259c8 WD |
40 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) |
41 | { | |
67ece144 | 42 | unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
c4e259c8 WD |
43 | unsigned long offset = vaddr & (PAGE_SIZE - 1); |
44 | unsigned long to; | |
45 | ||
67ece144 RK |
46 | set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); |
47 | to = va + offset; | |
c4e259c8 WD |
48 | flush_icache_range(to, to + len); |
49 | } | |
50 | ||
d7b6b358 RK |
51 | void flush_cache_mm(struct mm_struct *mm) |
52 | { | |
53 | if (cache_is_vivt()) { | |
2f0b1926 | 54 | vivt_flush_cache_mm(mm); |
d7b6b358 RK |
55 | return; |
56 | } | |
57 | ||
58 | if (cache_is_vipt_aliasing()) { | |
59 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
df71dfd4 | 60 | " mcr p15, 0, %0, c7, c10, 4" |
d7b6b358 RK |
61 | : |
62 | : "r" (0) | |
63 | : "cc"); | |
64 | } | |
65 | } | |
66 | ||
67 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
68 | { | |
69 | if (cache_is_vivt()) { | |
2f0b1926 | 70 | vivt_flush_cache_range(vma, start, end); |
d7b6b358 RK |
71 | return; |
72 | } | |
73 | ||
74 | if (cache_is_vipt_aliasing()) { | |
75 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
df71dfd4 | 76 | " mcr p15, 0, %0, c7, c10, 4" |
d7b6b358 RK |
77 | : |
78 | : "r" (0) | |
79 | : "cc"); | |
80 | } | |
9e95922b | 81 | |
6060e8df | 82 | if (vma->vm_flags & VM_EXEC) |
9e95922b | 83 | __flush_icache_all(); |
d7b6b358 RK |
84 | } |
85 | ||
86 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | |
87 | { | |
88 | if (cache_is_vivt()) { | |
2f0b1926 | 89 | vivt_flush_cache_page(vma, user_addr, pfn); |
d7b6b358 RK |
90 | return; |
91 | } | |
92 | ||
2df341ed | 93 | if (cache_is_vipt_aliasing()) { |
d7b6b358 | 94 | flush_pfn_alias(pfn, user_addr); |
2df341ed RK |
95 | __flush_icache_all(); |
96 | } | |
9e95922b RK |
97 | |
98 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | |
99 | __flush_icache_all(); | |
d7b6b358 | 100 | } |
c4e259c8 | 101 | |
2ef7f3db | 102 | #else |
c4e259c8 WD |
103 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) |
104 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) | |
2ef7f3db | 105 | #endif |
a188ad2b | 106 | |
72e6ae28 VK |
107 | #define FLAG_PA_IS_EXEC 1 |
108 | #define FLAG_PA_CORE_IN_MM 2 | |
109 | ||
2ef7f3db RK |
110 | static void flush_ptrace_access_other(void *args) |
111 | { | |
112 | __flush_icache_all(); | |
113 | } | |
2ef7f3db | 114 | |
72e6ae28 VK |
115 | static inline |
116 | void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, | |
117 | unsigned long len, unsigned int flags) | |
a188ad2b GD |
118 | { |
119 | if (cache_is_vivt()) { | |
72e6ae28 | 120 | if (flags & FLAG_PA_CORE_IN_MM) { |
2ef7f3db RK |
121 | unsigned long addr = (unsigned long)kaddr; |
122 | __cpuc_coherent_kern_range(addr, addr + len); | |
123 | } | |
a188ad2b GD |
124 | return; |
125 | } | |
126 | ||
127 | if (cache_is_vipt_aliasing()) { | |
128 | flush_pfn_alias(page_to_pfn(page), uaddr); | |
2df341ed | 129 | __flush_icache_all(); |
a188ad2b GD |
130 | return; |
131 | } | |
132 | ||
c4e259c8 | 133 | /* VIPT non-aliasing D-cache */ |
72e6ae28 | 134 | if (flags & FLAG_PA_IS_EXEC) { |
a188ad2b | 135 | unsigned long addr = (unsigned long)kaddr; |
c4e259c8 WD |
136 | if (icache_is_vipt_aliasing()) |
137 | flush_icache_alias(page_to_pfn(page), uaddr, len); | |
138 | else | |
139 | __cpuc_coherent_kern_range(addr, addr + len); | |
2ef7f3db RK |
140 | if (cache_ops_need_broadcast()) |
141 | smp_call_function(flush_ptrace_access_other, | |
142 | NULL, 1); | |
a188ad2b GD |
143 | } |
144 | } | |
2ef7f3db | 145 | |
72e6ae28 VK |
146 | static |
147 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |
148 | unsigned long uaddr, void *kaddr, unsigned long len) | |
149 | { | |
150 | unsigned int flags = 0; | |
151 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) | |
152 | flags |= FLAG_PA_CORE_IN_MM; | |
153 | if (vma->vm_flags & VM_EXEC) | |
154 | flags |= FLAG_PA_IS_EXEC; | |
155 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | |
156 | } | |
157 | ||
158 | void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, | |
159 | void *kaddr, unsigned long len) | |
160 | { | |
161 | unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC; | |
162 | ||
163 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | |
164 | } | |
165 | ||
2ef7f3db RK |
166 | /* |
167 | * Copy user data from/to a page which is mapped into a different | |
168 | * processes address space. Really, we want to allow our "user | |
169 | * space" model to handle this. | |
170 | * | |
171 | * Note that this code needs to run on the current CPU. | |
172 | */ | |
173 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |
174 | unsigned long uaddr, void *dst, const void *src, | |
175 | unsigned long len) | |
176 | { | |
177 | #ifdef CONFIG_SMP | |
178 | preempt_disable(); | |
8d802d28 | 179 | #endif |
2ef7f3db RK |
180 | memcpy(dst, src, len); |
181 | flush_ptrace_access(vma, page, uaddr, dst, len); | |
182 | #ifdef CONFIG_SMP | |
183 | preempt_enable(); | |
184 | #endif | |
185 | } | |
1da177e4 | 186 | |
8830f04a | 187 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
1da177e4 | 188 | { |
1da177e4 LT |
189 | /* |
190 | * Writeback any data associated with the kernel mapping of this | |
191 | * page. This ensures that data in the physical page is mutually | |
192 | * coherent with the kernels mapping. | |
193 | */ | |
7e5a69e8 | 194 | if (!PageHighMem(page)) { |
0b19f933 SC |
195 | size_t page_size = PAGE_SIZE << compound_order(page); |
196 | __cpuc_flush_dcache_area(page_address(page), page_size); | |
7e5a69e8 | 197 | } else { |
0b19f933 | 198 | unsigned long i; |
dd0f67f4 | 199 | if (cache_is_vipt_nonaliasing()) { |
0b19f933 | 200 | for (i = 0; i < (1 << compound_order(page)); i++) { |
2a7cfcbc | 201 | void *addr = kmap_atomic(page + i); |
dd0f67f4 | 202 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
0b19f933 SC |
203 | kunmap_atomic(addr); |
204 | } | |
205 | } else { | |
206 | for (i = 0; i < (1 << compound_order(page)); i++) { | |
2a7cfcbc | 207 | void *addr = kmap_high_get(page + i); |
0b19f933 SC |
208 | if (addr) { |
209 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | |
2a7cfcbc | 210 | kunmap_high(page + i); |
0b19f933 | 211 | } |
dd0f67f4 | 212 | } |
7e5a69e8 NP |
213 | } |
214 | } | |
1da177e4 LT |
215 | |
216 | /* | |
8830f04a RK |
217 | * If this is a page cache page, and we have an aliasing VIPT cache, |
218 | * we only need to do one flush - which would be at the relevant | |
8d802d28 RK |
219 | * userspace colour, which is congruent with page->index. |
220 | */ | |
f91fb05d | 221 | if (mapping && cache_is_vipt_aliasing()) |
8830f04a RK |
222 | flush_pfn_alias(page_to_pfn(page), |
223 | page->index << PAGE_CACHE_SHIFT); | |
224 | } | |
225 | ||
226 | static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) | |
227 | { | |
228 | struct mm_struct *mm = current->active_mm; | |
229 | struct vm_area_struct *mpnt; | |
8830f04a | 230 | pgoff_t pgoff; |
8d802d28 | 231 | |
1da177e4 LT |
232 | /* |
233 | * There are possible user space mappings of this page: | |
234 | * - VIVT cache: we need to also write back and invalidate all user | |
235 | * data in the current VM view associated with this page. | |
236 | * - aliasing VIPT: we only need to find one mapping of this page. | |
237 | */ | |
238 | pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
239 | ||
240 | flush_dcache_mmap_lock(mapping); | |
6b2dbba8 | 241 | vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { |
1da177e4 LT |
242 | unsigned long offset; |
243 | ||
244 | /* | |
245 | * If this VMA is not in our MM, we can ignore it. | |
246 | */ | |
247 | if (mpnt->vm_mm != mm) | |
248 | continue; | |
249 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | |
250 | continue; | |
251 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | |
252 | flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); | |
1da177e4 LT |
253 | } |
254 | flush_dcache_mmap_unlock(mapping); | |
255 | } | |
256 | ||
6012191a CM |
257 | #if __LINUX_ARM_ARCH__ >= 6 |
258 | void __sync_icache_dcache(pte_t pteval) | |
259 | { | |
260 | unsigned long pfn; | |
261 | struct page *page; | |
262 | struct address_space *mapping; | |
263 | ||
6012191a CM |
264 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) |
265 | /* only flush non-aliasing VIPT caches for exec mappings */ | |
266 | return; | |
267 | pfn = pte_pfn(pteval); | |
268 | if (!pfn_valid(pfn)) | |
269 | return; | |
270 | ||
271 | page = pfn_to_page(pfn); | |
272 | if (cache_is_vipt_aliasing()) | |
273 | mapping = page_mapping(page); | |
274 | else | |
275 | mapping = NULL; | |
276 | ||
277 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) | |
278 | __flush_dcache_page(mapping, page); | |
8373dc38 | 279 | |
280 | if (pte_exec(pteval)) | |
6012191a CM |
281 | __flush_icache_all(); |
282 | } | |
283 | #endif | |
284 | ||
1da177e4 LT |
285 | /* |
286 | * Ensure cache coherency between kernel mapping and userspace mapping | |
287 | * of this page. | |
288 | * | |
289 | * We have three cases to consider: | |
290 | * - VIPT non-aliasing cache: fully coherent so nothing required. | |
291 | * - VIVT: fully aliasing, so we need to handle every alias in our | |
292 | * current VM view. | |
293 | * - VIPT aliasing: need to handle one alias in our current VM view. | |
294 | * | |
295 | * If we need to handle aliasing: | |
296 | * If the page only exists in the page cache and there are no user | |
297 | * space mappings, we can be lazy and remember that we may have dirty | |
298 | * kernel cache lines for later. Otherwise, we assume we have | |
299 | * aliasing mappings. | |
df2f5e72 | 300 | * |
31bee4cf | 301 | * Note that we disable the lazy flush for SMP configurations where |
302 | * the cache maintenance operations are not automatically broadcasted. | |
1da177e4 LT |
303 | */ |
304 | void flush_dcache_page(struct page *page) | |
305 | { | |
421fe93c RK |
306 | struct address_space *mapping; |
307 | ||
308 | /* | |
309 | * The zero page is never written to, so never has any dirty | |
310 | * cache lines, and therefore never needs to be flushed. | |
311 | */ | |
312 | if (page == ZERO_PAGE(0)) | |
313 | return; | |
314 | ||
315 | mapping = page_mapping(page); | |
1da177e4 | 316 | |
85848dd7 | 317 | if (!cache_ops_need_broadcast() && |
81f28946 | 318 | mapping && !page_mapped(page)) |
c0177800 | 319 | clear_bit(PG_dcache_clean, &page->flags); |
85848dd7 | 320 | else { |
1da177e4 | 321 | __flush_dcache_page(mapping, page); |
8830f04a RK |
322 | if (mapping && cache_is_vivt()) |
323 | __flush_dcache_aliases(mapping, page); | |
826cbdaf CM |
324 | else if (mapping) |
325 | __flush_icache_all(); | |
c0177800 | 326 | set_bit(PG_dcache_clean, &page->flags); |
8830f04a | 327 | } |
1da177e4 LT |
328 | } |
329 | EXPORT_SYMBOL(flush_dcache_page); | |
1bc39742 SB |
330 | |
331 | /* | |
332 | * Ensure cache coherency for the kernel mapping of this page. We can | |
333 | * assume that the page is pinned via kmap. | |
334 | * | |
335 | * If the page only exists in the page cache and there are no user | |
336 | * space mappings, this is a no-op since the page was already marked | |
337 | * dirty at creation. Otherwise, we need to flush the dirty kernel | |
338 | * cache lines directly. | |
339 | */ | |
340 | void flush_kernel_dcache_page(struct page *page) | |
341 | { | |
342 | if (cache_is_vivt() || cache_is_vipt_aliasing()) { | |
343 | struct address_space *mapping; | |
344 | ||
345 | mapping = page_mapping(page); | |
346 | ||
347 | if (!mapping || mapping_mapped(mapping)) { | |
348 | void *addr; | |
349 | ||
350 | addr = page_address(page); | |
351 | /* | |
352 | * kmap_atomic() doesn't set the page virtual | |
353 | * address for highmem pages, and | |
354 | * kunmap_atomic() takes care of cache | |
355 | * flushing already. | |
356 | */ | |
357 | if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) | |
358 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | |
359 | } | |
360 | } | |
361 | } | |
362 | EXPORT_SYMBOL(flush_kernel_dcache_page); | |
6020dff0 RK |
363 | |
364 | /* | |
365 | * Flush an anonymous page so that users of get_user_pages() | |
366 | * can safely access the data. The expected sequence is: | |
367 | * | |
368 | * get_user_pages() | |
369 | * -> flush_anon_page | |
370 | * memcpy() to/from page | |
371 | * if written to page, flush_dcache_page() | |
372 | */ | |
373 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | |
374 | { | |
375 | unsigned long pfn; | |
376 | ||
377 | /* VIPT non-aliasing caches need do nothing */ | |
378 | if (cache_is_vipt_nonaliasing()) | |
379 | return; | |
380 | ||
381 | /* | |
382 | * Write back and invalidate userspace mapping. | |
383 | */ | |
384 | pfn = page_to_pfn(page); | |
385 | if (cache_is_vivt()) { | |
386 | flush_cache_page(vma, vmaddr, pfn); | |
387 | } else { | |
388 | /* | |
389 | * For aliasing VIPT, we can flush an alias of the | |
390 | * userspace address only. | |
391 | */ | |
392 | flush_pfn_alias(pfn, vmaddr); | |
2df341ed | 393 | __flush_icache_all(); |
6020dff0 RK |
394 | } |
395 | ||
396 | /* | |
397 | * Invalidate kernel mapping. No data should be contained | |
398 | * in this mapping of the page. FIXME: this is overkill | |
399 | * since we actually ask for a write-back and invalidate. | |
400 | */ | |
2c9b9c84 | 401 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
6020dff0 | 402 | } |