Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/flush.c | |
3 | * | |
4 | * Copyright (C) 1995-2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/module.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/pagemap.h> | |
39af22a7 | 13 | #include <linux/highmem.h> |
1da177e4 LT |
14 | |
15 | #include <asm/cacheflush.h> | |
46097c7d | 16 | #include <asm/cachetype.h> |
7e5a69e8 | 17 | #include <asm/highmem.h> |
2ef7f3db | 18 | #include <asm/smp_plat.h> |
8d802d28 RK |
19 | #include <asm/tlbflush.h> |
20 | ||
1b2e2b73 RK |
21 | #include "mm.h" |
22 | ||
8d802d28 | 23 | #ifdef CONFIG_CPU_CACHE_VIPT |
d7b6b358 | 24 | |
481467d6 CM |
25 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
26 | { | |
de27c308 | 27 | unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
141fa40c | 28 | const int zero = 0; |
481467d6 | 29 | |
67ece144 | 30 | set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); |
481467d6 CM |
31 | |
32 | asm( "mcrr p15, 0, %1, %0, c14\n" | |
df71dfd4 | 33 | " mcr p15, 0, %2, c7, c10, 4" |
481467d6 | 34 | : |
141fa40c | 35 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) |
481467d6 CM |
36 | : "cc"); |
37 | } | |
38 | ||
c4e259c8 WD |
39 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) |
40 | { | |
67ece144 | 41 | unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
c4e259c8 WD |
42 | unsigned long offset = vaddr & (PAGE_SIZE - 1); |
43 | unsigned long to; | |
44 | ||
67ece144 RK |
45 | set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); |
46 | to = va + offset; | |
c4e259c8 WD |
47 | flush_icache_range(to, to + len); |
48 | } | |
49 | ||
d7b6b358 RK |
50 | void flush_cache_mm(struct mm_struct *mm) |
51 | { | |
52 | if (cache_is_vivt()) { | |
2f0b1926 | 53 | vivt_flush_cache_mm(mm); |
d7b6b358 RK |
54 | return; |
55 | } | |
56 | ||
57 | if (cache_is_vipt_aliasing()) { | |
58 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
df71dfd4 | 59 | " mcr p15, 0, %0, c7, c10, 4" |
d7b6b358 RK |
60 | : |
61 | : "r" (0) | |
62 | : "cc"); | |
63 | } | |
64 | } | |
65 | ||
66 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
67 | { | |
68 | if (cache_is_vivt()) { | |
2f0b1926 | 69 | vivt_flush_cache_range(vma, start, end); |
d7b6b358 RK |
70 | return; |
71 | } | |
72 | ||
73 | if (cache_is_vipt_aliasing()) { | |
74 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
df71dfd4 | 75 | " mcr p15, 0, %0, c7, c10, 4" |
d7b6b358 RK |
76 | : |
77 | : "r" (0) | |
78 | : "cc"); | |
79 | } | |
9e95922b | 80 | |
6060e8df | 81 | if (vma->vm_flags & VM_EXEC) |
9e95922b | 82 | __flush_icache_all(); |
d7b6b358 RK |
83 | } |
84 | ||
85 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | |
86 | { | |
87 | if (cache_is_vivt()) { | |
2f0b1926 | 88 | vivt_flush_cache_page(vma, user_addr, pfn); |
d7b6b358 RK |
89 | return; |
90 | } | |
91 | ||
2df341ed | 92 | if (cache_is_vipt_aliasing()) { |
d7b6b358 | 93 | flush_pfn_alias(pfn, user_addr); |
2df341ed RK |
94 | __flush_icache_all(); |
95 | } | |
9e95922b RK |
96 | |
97 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | |
98 | __flush_icache_all(); | |
d7b6b358 | 99 | } |
c4e259c8 | 100 | |
2ef7f3db | 101 | #else |
c4e259c8 WD |
102 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) |
103 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) | |
2ef7f3db | 104 | #endif |
a188ad2b | 105 | |
2ef7f3db RK |
106 | static void flush_ptrace_access_other(void *args) |
107 | { | |
108 | __flush_icache_all(); | |
109 | } | |
2ef7f3db RK |
110 | |
111 | static | |
a188ad2b | 112 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
2ef7f3db | 113 | unsigned long uaddr, void *kaddr, unsigned long len) |
a188ad2b GD |
114 | { |
115 | if (cache_is_vivt()) { | |
2ef7f3db RK |
116 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
117 | unsigned long addr = (unsigned long)kaddr; | |
118 | __cpuc_coherent_kern_range(addr, addr + len); | |
119 | } | |
a188ad2b GD |
120 | return; |
121 | } | |
122 | ||
123 | if (cache_is_vipt_aliasing()) { | |
124 | flush_pfn_alias(page_to_pfn(page), uaddr); | |
2df341ed | 125 | __flush_icache_all(); |
a188ad2b GD |
126 | return; |
127 | } | |
128 | ||
c4e259c8 | 129 | /* VIPT non-aliasing D-cache */ |
2ef7f3db | 130 | if (vma->vm_flags & VM_EXEC) { |
a188ad2b | 131 | unsigned long addr = (unsigned long)kaddr; |
c4e259c8 WD |
132 | if (icache_is_vipt_aliasing()) |
133 | flush_icache_alias(page_to_pfn(page), uaddr, len); | |
134 | else | |
135 | __cpuc_coherent_kern_range(addr, addr + len); | |
2ef7f3db RK |
136 | if (cache_ops_need_broadcast()) |
137 | smp_call_function(flush_ptrace_access_other, | |
138 | NULL, 1); | |
a188ad2b GD |
139 | } |
140 | } | |
2ef7f3db RK |
141 | |
142 | /* | |
143 | * Copy user data from/to a page which is mapped into a different | |
144 | * processes address space. Really, we want to allow our "user | |
145 | * space" model to handle this. | |
146 | * | |
147 | * Note that this code needs to run on the current CPU. | |
148 | */ | |
149 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |
150 | unsigned long uaddr, void *dst, const void *src, | |
151 | unsigned long len) | |
152 | { | |
153 | #ifdef CONFIG_SMP | |
154 | preempt_disable(); | |
8d802d28 | 155 | #endif |
2ef7f3db RK |
156 | memcpy(dst, src, len); |
157 | flush_ptrace_access(vma, page, uaddr, dst, len); | |
158 | #ifdef CONFIG_SMP | |
159 | preempt_enable(); | |
160 | #endif | |
161 | } | |
1da177e4 | 162 | |
8830f04a | 163 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
1da177e4 | 164 | { |
1da177e4 LT |
165 | /* |
166 | * Writeback any data associated with the kernel mapping of this | |
167 | * page. This ensures that data in the physical page is mutually | |
168 | * coherent with the kernels mapping. | |
169 | */ | |
7e5a69e8 NP |
170 | if (!PageHighMem(page)) { |
171 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); | |
172 | } else { | |
dd0f67f4 JK |
173 | void *addr; |
174 | ||
175 | if (cache_is_vipt_nonaliasing()) { | |
39af22a7 | 176 | addr = kmap_atomic(page); |
7e5a69e8 | 177 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
39af22a7 | 178 | kunmap_atomic(addr); |
dd0f67f4 JK |
179 | } else { |
180 | addr = kmap_high_get(page); | |
181 | if (addr) { | |
182 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | |
183 | kunmap_high(page); | |
184 | } | |
7e5a69e8 NP |
185 | } |
186 | } | |
1da177e4 LT |
187 | |
188 | /* | |
8830f04a RK |
189 | * If this is a page cache page, and we have an aliasing VIPT cache, |
190 | * we only need to do one flush - which would be at the relevant | |
8d802d28 RK |
191 | * userspace colour, which is congruent with page->index. |
192 | */ | |
f91fb05d | 193 | if (mapping && cache_is_vipt_aliasing()) |
8830f04a RK |
194 | flush_pfn_alias(page_to_pfn(page), |
195 | page->index << PAGE_CACHE_SHIFT); | |
196 | } | |
197 | ||
198 | static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) | |
199 | { | |
200 | struct mm_struct *mm = current->active_mm; | |
201 | struct vm_area_struct *mpnt; | |
8830f04a | 202 | pgoff_t pgoff; |
8d802d28 | 203 | |
1da177e4 LT |
204 | /* |
205 | * There are possible user space mappings of this page: | |
206 | * - VIVT cache: we need to also write back and invalidate all user | |
207 | * data in the current VM view associated with this page. | |
208 | * - aliasing VIPT: we only need to find one mapping of this page. | |
209 | */ | |
210 | pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
211 | ||
212 | flush_dcache_mmap_lock(mapping); | |
6b2dbba8 | 213 | vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { |
1da177e4 LT |
214 | unsigned long offset; |
215 | ||
216 | /* | |
217 | * If this VMA is not in our MM, we can ignore it. | |
218 | */ | |
219 | if (mpnt->vm_mm != mm) | |
220 | continue; | |
221 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | |
222 | continue; | |
223 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | |
224 | flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); | |
1da177e4 LT |
225 | } |
226 | flush_dcache_mmap_unlock(mapping); | |
227 | } | |
228 | ||
6012191a CM |
229 | #if __LINUX_ARM_ARCH__ >= 6 |
230 | void __sync_icache_dcache(pte_t pteval) | |
231 | { | |
232 | unsigned long pfn; | |
233 | struct page *page; | |
234 | struct address_space *mapping; | |
235 | ||
6012191a CM |
236 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) |
237 | /* only flush non-aliasing VIPT caches for exec mappings */ | |
238 | return; | |
239 | pfn = pte_pfn(pteval); | |
240 | if (!pfn_valid(pfn)) | |
241 | return; | |
242 | ||
243 | page = pfn_to_page(pfn); | |
244 | if (cache_is_vipt_aliasing()) | |
245 | mapping = page_mapping(page); | |
246 | else | |
247 | mapping = NULL; | |
248 | ||
249 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) | |
250 | __flush_dcache_page(mapping, page); | |
8373dc38 | 251 | |
252 | if (pte_exec(pteval)) | |
6012191a CM |
253 | __flush_icache_all(); |
254 | } | |
255 | #endif | |
256 | ||
1da177e4 LT |
257 | /* |
258 | * Ensure cache coherency between kernel mapping and userspace mapping | |
259 | * of this page. | |
260 | * | |
261 | * We have three cases to consider: | |
262 | * - VIPT non-aliasing cache: fully coherent so nothing required. | |
263 | * - VIVT: fully aliasing, so we need to handle every alias in our | |
264 | * current VM view. | |
265 | * - VIPT aliasing: need to handle one alias in our current VM view. | |
266 | * | |
267 | * If we need to handle aliasing: | |
268 | * If the page only exists in the page cache and there are no user | |
269 | * space mappings, we can be lazy and remember that we may have dirty | |
270 | * kernel cache lines for later. Otherwise, we assume we have | |
271 | * aliasing mappings. | |
df2f5e72 | 272 | * |
31bee4cf | 273 | * Note that we disable the lazy flush for SMP configurations where |
274 | * the cache maintenance operations are not automatically broadcasted. | |
1da177e4 LT |
275 | */ |
276 | void flush_dcache_page(struct page *page) | |
277 | { | |
421fe93c RK |
278 | struct address_space *mapping; |
279 | ||
280 | /* | |
281 | * The zero page is never written to, so never has any dirty | |
282 | * cache lines, and therefore never needs to be flushed. | |
283 | */ | |
284 | if (page == ZERO_PAGE(0)) | |
285 | return; | |
286 | ||
287 | mapping = page_mapping(page); | |
1da177e4 | 288 | |
85848dd7 CM |
289 | if (!cache_ops_need_broadcast() && |
290 | mapping && !mapping_mapped(mapping)) | |
c0177800 | 291 | clear_bit(PG_dcache_clean, &page->flags); |
85848dd7 | 292 | else { |
1da177e4 | 293 | __flush_dcache_page(mapping, page); |
8830f04a RK |
294 | if (mapping && cache_is_vivt()) |
295 | __flush_dcache_aliases(mapping, page); | |
826cbdaf CM |
296 | else if (mapping) |
297 | __flush_icache_all(); | |
c0177800 | 298 | set_bit(PG_dcache_clean, &page->flags); |
8830f04a | 299 | } |
1da177e4 LT |
300 | } |
301 | EXPORT_SYMBOL(flush_dcache_page); | |
6020dff0 RK |
302 | |
303 | /* | |
304 | * Flush an anonymous page so that users of get_user_pages() | |
305 | * can safely access the data. The expected sequence is: | |
306 | * | |
307 | * get_user_pages() | |
308 | * -> flush_anon_page | |
309 | * memcpy() to/from page | |
310 | * if written to page, flush_dcache_page() | |
311 | */ | |
312 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | |
313 | { | |
314 | unsigned long pfn; | |
315 | ||
316 | /* VIPT non-aliasing caches need do nothing */ | |
317 | if (cache_is_vipt_nonaliasing()) | |
318 | return; | |
319 | ||
320 | /* | |
321 | * Write back and invalidate userspace mapping. | |
322 | */ | |
323 | pfn = page_to_pfn(page); | |
324 | if (cache_is_vivt()) { | |
325 | flush_cache_page(vma, vmaddr, pfn); | |
326 | } else { | |
327 | /* | |
328 | * For aliasing VIPT, we can flush an alias of the | |
329 | * userspace address only. | |
330 | */ | |
331 | flush_pfn_alias(pfn, vmaddr); | |
2df341ed | 332 | __flush_icache_all(); |
6020dff0 RK |
333 | } |
334 | ||
335 | /* | |
336 | * Invalidate kernel mapping. No data should be contained | |
337 | * in this mapping of the page. FIXME: this is overkill | |
338 | * since we actually ask for a write-back and invalidate. | |
339 | */ | |
2c9b9c84 | 340 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
6020dff0 | 341 | } |