ARM: move heavy barrier support out of line
[deliverable/linux.git] / arch / arm / mm / flush.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/flush.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
39af22a7 13#include <linux/highmem.h>
1da177e4
LT
14
15#include <asm/cacheflush.h>
46097c7d 16#include <asm/cachetype.h>
7e5a69e8 17#include <asm/highmem.h>
2ef7f3db 18#include <asm/smp_plat.h>
8d802d28 19#include <asm/tlbflush.h>
0b19f933 20#include <linux/hugetlb.h>
8d802d28 21
1b2e2b73
RK
22#include "mm.h"
23
f8130906
RK
24#ifdef CONFIG_ARM_HEAVY_MB
25void arm_heavy_mb(void)
26{
27#ifdef CONFIG_OUTER_CACHE_SYNC
28 if (outer_cache.sync)
29 outer_cache.sync();
30#endif
31}
32EXPORT_SYMBOL(arm_heavy_mb);
33#endif
34
8d802d28 35#ifdef CONFIG_CPU_CACHE_VIPT
d7b6b358 36
481467d6
CM
37static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
38{
de27c308 39 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
141fa40c 40 const int zero = 0;
481467d6 41
67ece144 42 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
481467d6
CM
43
44 asm( "mcrr p15, 0, %1, %0, c14\n"
df71dfd4 45 " mcr p15, 0, %2, c7, c10, 4"
481467d6 46 :
12e669b4 47 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
481467d6
CM
48 : "cc");
49}
50
c4e259c8
WD
51static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
52{
67ece144 53 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
c4e259c8
WD
54 unsigned long offset = vaddr & (PAGE_SIZE - 1);
55 unsigned long to;
56
67ece144
RK
57 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
58 to = va + offset;
c4e259c8
WD
59 flush_icache_range(to, to + len);
60}
61
d7b6b358
RK
62void flush_cache_mm(struct mm_struct *mm)
63{
64 if (cache_is_vivt()) {
2f0b1926 65 vivt_flush_cache_mm(mm);
d7b6b358
RK
66 return;
67 }
68
69 if (cache_is_vipt_aliasing()) {
70 asm( "mcr p15, 0, %0, c7, c14, 0\n"
df71dfd4 71 " mcr p15, 0, %0, c7, c10, 4"
d7b6b358
RK
72 :
73 : "r" (0)
74 : "cc");
75 }
76}
77
78void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
79{
80 if (cache_is_vivt()) {
2f0b1926 81 vivt_flush_cache_range(vma, start, end);
d7b6b358
RK
82 return;
83 }
84
85 if (cache_is_vipt_aliasing()) {
86 asm( "mcr p15, 0, %0, c7, c14, 0\n"
df71dfd4 87 " mcr p15, 0, %0, c7, c10, 4"
d7b6b358
RK
88 :
89 : "r" (0)
90 : "cc");
91 }
9e95922b 92
6060e8df 93 if (vma->vm_flags & VM_EXEC)
9e95922b 94 __flush_icache_all();
d7b6b358
RK
95}
96
97void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
98{
99 if (cache_is_vivt()) {
2f0b1926 100 vivt_flush_cache_page(vma, user_addr, pfn);
d7b6b358
RK
101 return;
102 }
103
2df341ed 104 if (cache_is_vipt_aliasing()) {
d7b6b358 105 flush_pfn_alias(pfn, user_addr);
2df341ed
RK
106 __flush_icache_all();
107 }
9e95922b
RK
108
109 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
110 __flush_icache_all();
d7b6b358 111}
c4e259c8 112
2ef7f3db 113#else
c4e259c8
WD
114#define flush_pfn_alias(pfn,vaddr) do { } while (0)
115#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
2ef7f3db 116#endif
a188ad2b 117
72e6ae28
VK
118#define FLAG_PA_IS_EXEC 1
119#define FLAG_PA_CORE_IN_MM 2
120
2ef7f3db
RK
121static void flush_ptrace_access_other(void *args)
122{
123 __flush_icache_all();
124}
2ef7f3db 125
72e6ae28
VK
126static inline
127void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
128 unsigned long len, unsigned int flags)
a188ad2b
GD
129{
130 if (cache_is_vivt()) {
72e6ae28 131 if (flags & FLAG_PA_CORE_IN_MM) {
2ef7f3db
RK
132 unsigned long addr = (unsigned long)kaddr;
133 __cpuc_coherent_kern_range(addr, addr + len);
134 }
a188ad2b
GD
135 return;
136 }
137
138 if (cache_is_vipt_aliasing()) {
139 flush_pfn_alias(page_to_pfn(page), uaddr);
2df341ed 140 __flush_icache_all();
a188ad2b
GD
141 return;
142 }
143
c4e259c8 144 /* VIPT non-aliasing D-cache */
72e6ae28 145 if (flags & FLAG_PA_IS_EXEC) {
a188ad2b 146 unsigned long addr = (unsigned long)kaddr;
c4e259c8
WD
147 if (icache_is_vipt_aliasing())
148 flush_icache_alias(page_to_pfn(page), uaddr, len);
149 else
150 __cpuc_coherent_kern_range(addr, addr + len);
2ef7f3db
RK
151 if (cache_ops_need_broadcast())
152 smp_call_function(flush_ptrace_access_other,
153 NULL, 1);
a188ad2b
GD
154 }
155}
2ef7f3db 156
72e6ae28
VK
157static
158void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
159 unsigned long uaddr, void *kaddr, unsigned long len)
160{
161 unsigned int flags = 0;
162 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
163 flags |= FLAG_PA_CORE_IN_MM;
164 if (vma->vm_flags & VM_EXEC)
165 flags |= FLAG_PA_IS_EXEC;
166 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
167}
168
169void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
170 void *kaddr, unsigned long len)
171{
172 unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
173
174 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
175}
176
2ef7f3db
RK
177/*
178 * Copy user data from/to a page which is mapped into a different
179 * processes address space. Really, we want to allow our "user
180 * space" model to handle this.
181 *
182 * Note that this code needs to run on the current CPU.
183 */
184void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
185 unsigned long uaddr, void *dst, const void *src,
186 unsigned long len)
187{
188#ifdef CONFIG_SMP
189 preempt_disable();
8d802d28 190#endif
2ef7f3db
RK
191 memcpy(dst, src, len);
192 flush_ptrace_access(vma, page, uaddr, dst, len);
193#ifdef CONFIG_SMP
194 preempt_enable();
195#endif
196}
1da177e4 197
8830f04a 198void __flush_dcache_page(struct address_space *mapping, struct page *page)
1da177e4 199{
1da177e4
LT
200 /*
201 * Writeback any data associated with the kernel mapping of this
202 * page. This ensures that data in the physical page is mutually
203 * coherent with the kernels mapping.
204 */
7e5a69e8 205 if (!PageHighMem(page)) {
0b19f933
SC
206 size_t page_size = PAGE_SIZE << compound_order(page);
207 __cpuc_flush_dcache_area(page_address(page), page_size);
7e5a69e8 208 } else {
0b19f933 209 unsigned long i;
dd0f67f4 210 if (cache_is_vipt_nonaliasing()) {
0b19f933 211 for (i = 0; i < (1 << compound_order(page)); i++) {
2a7cfcbc 212 void *addr = kmap_atomic(page + i);
dd0f67f4 213 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
0b19f933
SC
214 kunmap_atomic(addr);
215 }
216 } else {
217 for (i = 0; i < (1 << compound_order(page)); i++) {
2a7cfcbc 218 void *addr = kmap_high_get(page + i);
0b19f933
SC
219 if (addr) {
220 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
2a7cfcbc 221 kunmap_high(page + i);
0b19f933 222 }
dd0f67f4 223 }
7e5a69e8
NP
224 }
225 }
1da177e4
LT
226
227 /*
8830f04a
RK
228 * If this is a page cache page, and we have an aliasing VIPT cache,
229 * we only need to do one flush - which would be at the relevant
8d802d28
RK
230 * userspace colour, which is congruent with page->index.
231 */
f91fb05d 232 if (mapping && cache_is_vipt_aliasing())
8830f04a
RK
233 flush_pfn_alias(page_to_pfn(page),
234 page->index << PAGE_CACHE_SHIFT);
235}
236
237static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
238{
239 struct mm_struct *mm = current->active_mm;
240 struct vm_area_struct *mpnt;
8830f04a 241 pgoff_t pgoff;
8d802d28 242
1da177e4
LT
243 /*
244 * There are possible user space mappings of this page:
245 * - VIVT cache: we need to also write back and invalidate all user
246 * data in the current VM view associated with this page.
247 * - aliasing VIPT: we only need to find one mapping of this page.
248 */
249 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
250
251 flush_dcache_mmap_lock(mapping);
6b2dbba8 252 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
1da177e4
LT
253 unsigned long offset;
254
255 /*
256 * If this VMA is not in our MM, we can ignore it.
257 */
258 if (mpnt->vm_mm != mm)
259 continue;
260 if (!(mpnt->vm_flags & VM_MAYSHARE))
261 continue;
262 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
263 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
1da177e4
LT
264 }
265 flush_dcache_mmap_unlock(mapping);
266}
267
6012191a
CM
268#if __LINUX_ARM_ARCH__ >= 6
269void __sync_icache_dcache(pte_t pteval)
270{
271 unsigned long pfn;
272 struct page *page;
273 struct address_space *mapping;
274
6012191a
CM
275 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
276 /* only flush non-aliasing VIPT caches for exec mappings */
277 return;
278 pfn = pte_pfn(pteval);
279 if (!pfn_valid(pfn))
280 return;
281
282 page = pfn_to_page(pfn);
283 if (cache_is_vipt_aliasing())
284 mapping = page_mapping(page);
285 else
286 mapping = NULL;
287
288 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
289 __flush_dcache_page(mapping, page);
8373dc38 290
291 if (pte_exec(pteval))
6012191a
CM
292 __flush_icache_all();
293}
294#endif
295
1da177e4
LT
296/*
297 * Ensure cache coherency between kernel mapping and userspace mapping
298 * of this page.
299 *
300 * We have three cases to consider:
301 * - VIPT non-aliasing cache: fully coherent so nothing required.
302 * - VIVT: fully aliasing, so we need to handle every alias in our
303 * current VM view.
304 * - VIPT aliasing: need to handle one alias in our current VM view.
305 *
306 * If we need to handle aliasing:
307 * If the page only exists in the page cache and there are no user
308 * space mappings, we can be lazy and remember that we may have dirty
309 * kernel cache lines for later. Otherwise, we assume we have
310 * aliasing mappings.
df2f5e72 311 *
31bee4cf 312 * Note that we disable the lazy flush for SMP configurations where
313 * the cache maintenance operations are not automatically broadcasted.
1da177e4
LT
314 */
315void flush_dcache_page(struct page *page)
316{
421fe93c
RK
317 struct address_space *mapping;
318
319 /*
320 * The zero page is never written to, so never has any dirty
321 * cache lines, and therefore never needs to be flushed.
322 */
323 if (page == ZERO_PAGE(0))
324 return;
325
326 mapping = page_mapping(page);
1da177e4 327
85848dd7 328 if (!cache_ops_need_broadcast() &&
81f28946 329 mapping && !page_mapped(page))
c0177800 330 clear_bit(PG_dcache_clean, &page->flags);
85848dd7 331 else {
1da177e4 332 __flush_dcache_page(mapping, page);
8830f04a
RK
333 if (mapping && cache_is_vivt())
334 __flush_dcache_aliases(mapping, page);
826cbdaf
CM
335 else if (mapping)
336 __flush_icache_all();
c0177800 337 set_bit(PG_dcache_clean, &page->flags);
8830f04a 338 }
1da177e4
LT
339}
340EXPORT_SYMBOL(flush_dcache_page);
1bc39742
SB
341
342/*
343 * Ensure cache coherency for the kernel mapping of this page. We can
344 * assume that the page is pinned via kmap.
345 *
346 * If the page only exists in the page cache and there are no user
347 * space mappings, this is a no-op since the page was already marked
348 * dirty at creation. Otherwise, we need to flush the dirty kernel
349 * cache lines directly.
350 */
351void flush_kernel_dcache_page(struct page *page)
352{
353 if (cache_is_vivt() || cache_is_vipt_aliasing()) {
354 struct address_space *mapping;
355
356 mapping = page_mapping(page);
357
358 if (!mapping || mapping_mapped(mapping)) {
359 void *addr;
360
361 addr = page_address(page);
362 /*
363 * kmap_atomic() doesn't set the page virtual
364 * address for highmem pages, and
365 * kunmap_atomic() takes care of cache
366 * flushing already.
367 */
368 if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
369 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
370 }
371 }
372}
373EXPORT_SYMBOL(flush_kernel_dcache_page);
6020dff0
RK
374
375/*
376 * Flush an anonymous page so that users of get_user_pages()
377 * can safely access the data. The expected sequence is:
378 *
379 * get_user_pages()
380 * -> flush_anon_page
381 * memcpy() to/from page
382 * if written to page, flush_dcache_page()
383 */
384void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
385{
386 unsigned long pfn;
387
388 /* VIPT non-aliasing caches need do nothing */
389 if (cache_is_vipt_nonaliasing())
390 return;
391
392 /*
393 * Write back and invalidate userspace mapping.
394 */
395 pfn = page_to_pfn(page);
396 if (cache_is_vivt()) {
397 flush_cache_page(vma, vmaddr, pfn);
398 } else {
399 /*
400 * For aliasing VIPT, we can flush an alias of the
401 * userspace address only.
402 */
403 flush_pfn_alias(pfn, vmaddr);
2df341ed 404 __flush_icache_all();
6020dff0
RK
405 }
406
407 /*
408 * Invalidate kernel mapping. No data should be contained
409 * in this mapping of the page. FIXME: this is overkill
410 * since we actually ask for a write-back and invalidate.
411 */
2c9b9c84 412 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
6020dff0 413}
b8cd51af
SC
414
415#ifdef CONFIG_TRANSPARENT_HUGEPAGE
416#ifdef CONFIG_HAVE_RCU_TABLE_FREE
417void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
418 pmd_t *pmdp)
419{
420 pmd_t pmd = pmd_mksplitting(*pmdp);
421 VM_BUG_ON(address & ~PMD_MASK);
422 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
423
424 /* dummy IPI to serialise against fast_gup */
425 kick_all_cpus_sync();
426}
427#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
428#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
This page took 0.81227 seconds and 5 git commands to generate.