ARM: Fix errata 411920 workarounds
[deliverable/linux.git] / arch / arm / mm / flush.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/flush.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13
14#include <asm/cacheflush.h>
46097c7d 15#include <asm/cachetype.h>
1da177e4 16#include <asm/system.h>
8d802d28
RK
17#include <asm/tlbflush.h>
18
1b2e2b73
RK
19#include "mm.h"
20
8d802d28 21#ifdef CONFIG_CPU_CACHE_VIPT
d7b6b358 22
481467d6
CM
23#define ALIAS_FLUSH_START 0xffff4000
24
481467d6
CM
25static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
26{
27 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
141fa40c 28 const int zero = 0;
481467d6 29
ad1ae2fe 30 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
481467d6
CM
31 flush_tlb_kernel_page(to);
32
33 asm( "mcrr p15, 0, %1, %0, c14\n"
df71dfd4 34 " mcr p15, 0, %2, c7, c10, 4"
481467d6 35 :
141fa40c 36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
481467d6 37 : "cc");
df71dfd4 38 __flush_icache_all();
481467d6
CM
39}
40
d7b6b358
RK
41void flush_cache_mm(struct mm_struct *mm)
42{
43 if (cache_is_vivt()) {
56f8ba83 44 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
d7b6b358
RK
45 __cpuc_flush_user_all();
46 return;
47 }
48
49 if (cache_is_vipt_aliasing()) {
50 asm( "mcr p15, 0, %0, c7, c14, 0\n"
df71dfd4 51 " mcr p15, 0, %0, c7, c10, 4"
d7b6b358
RK
52 :
53 : "r" (0)
54 : "cc");
df71dfd4 55 __flush_icache_all();
d7b6b358
RK
56 }
57}
58
59void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
60{
61 if (cache_is_vivt()) {
56f8ba83 62 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
d7b6b358
RK
63 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
64 vma->vm_flags);
65 return;
66 }
67
68 if (cache_is_vipt_aliasing()) {
69 asm( "mcr p15, 0, %0, c7, c14, 0\n"
df71dfd4 70 " mcr p15, 0, %0, c7, c10, 4"
d7b6b358
RK
71 :
72 : "r" (0)
73 : "cc");
df71dfd4 74 __flush_icache_all();
d7b6b358
RK
75 }
76}
77
78void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
79{
80 if (cache_is_vivt()) {
56f8ba83 81 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
d7b6b358
RK
82 unsigned long addr = user_addr & PAGE_MASK;
83 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
84 }
85 return;
86 }
87
88 if (cache_is_vipt_aliasing())
89 flush_pfn_alias(pfn, user_addr);
90}
a188ad2b
GD
91
92void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
93 unsigned long uaddr, void *kaddr,
94 unsigned long len, int write)
95{
96 if (cache_is_vivt()) {
56f8ba83 97 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
a188ad2b
GD
98 unsigned long addr = (unsigned long)kaddr;
99 __cpuc_coherent_kern_range(addr, addr + len);
100 }
101 return;
102 }
103
104 if (cache_is_vipt_aliasing()) {
105 flush_pfn_alias(page_to_pfn(page), uaddr);
106 return;
107 }
108
109 /* VIPT non-aliasing cache */
56f8ba83 110 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
a71ebdfa 111 vma->vm_flags & VM_EXEC) {
a188ad2b
GD
112 unsigned long addr = (unsigned long)kaddr;
113 /* only flushing the kernel mapping on non-aliasing VIPT */
114 __cpuc_coherent_kern_range(addr, addr + len);
115 }
116}
8d802d28
RK
117#else
118#define flush_pfn_alias(pfn,vaddr) do { } while (0)
119#endif
1da177e4 120
8830f04a 121void __flush_dcache_page(struct address_space *mapping, struct page *page)
1da177e4 122{
1da177e4
LT
123 /*
124 * Writeback any data associated with the kernel mapping of this
125 * page. This ensures that data in the physical page is mutually
126 * coherent with the kernels mapping.
127 */
13f96d8f
NP
128#ifdef CONFIG_HIGHMEM
129 /*
130 * kmap_atomic() doesn't set the page virtual address, and
131 * kunmap_atomic() takes care of cache flushing already.
132 */
133 if (page_address(page))
134#endif
135 __cpuc_flush_dcache_page(page_address(page));
1da177e4
LT
136
137 /*
8830f04a
RK
138 * If this is a page cache page, and we have an aliasing VIPT cache,
139 * we only need to do one flush - which would be at the relevant
8d802d28
RK
140 * userspace colour, which is congruent with page->index.
141 */
8830f04a
RK
142 if (mapping && cache_is_vipt_aliasing())
143 flush_pfn_alias(page_to_pfn(page),
144 page->index << PAGE_CACHE_SHIFT);
145}
146
147static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
148{
149 struct mm_struct *mm = current->active_mm;
150 struct vm_area_struct *mpnt;
151 struct prio_tree_iter iter;
152 pgoff_t pgoff;
8d802d28 153
1da177e4
LT
154 /*
155 * There are possible user space mappings of this page:
156 * - VIVT cache: we need to also write back and invalidate all user
157 * data in the current VM view associated with this page.
158 * - aliasing VIPT: we only need to find one mapping of this page.
159 */
160 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
161
162 flush_dcache_mmap_lock(mapping);
163 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
164 unsigned long offset;
165
166 /*
167 * If this VMA is not in our MM, we can ignore it.
168 */
169 if (mpnt->vm_mm != mm)
170 continue;
171 if (!(mpnt->vm_flags & VM_MAYSHARE))
172 continue;
173 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
174 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
1da177e4
LT
175 }
176 flush_dcache_mmap_unlock(mapping);
177}
178
179/*
180 * Ensure cache coherency between kernel mapping and userspace mapping
181 * of this page.
182 *
183 * We have three cases to consider:
184 * - VIPT non-aliasing cache: fully coherent so nothing required.
185 * - VIVT: fully aliasing, so we need to handle every alias in our
186 * current VM view.
187 * - VIPT aliasing: need to handle one alias in our current VM view.
188 *
189 * If we need to handle aliasing:
190 * If the page only exists in the page cache and there are no user
191 * space mappings, we can be lazy and remember that we may have dirty
192 * kernel cache lines for later. Otherwise, we assume we have
193 * aliasing mappings.
df2f5e72
RK
194 *
195 * Note that we disable the lazy flush for SMP.
1da177e4
LT
196 */
197void flush_dcache_page(struct page *page)
198{
199 struct address_space *mapping = page_mapping(page);
200
df2f5e72 201#ifndef CONFIG_SMP
d73cd428 202 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
1da177e4 203 set_bit(PG_dcache_dirty, &page->flags);
df2f5e72
RK
204 else
205#endif
206 {
1da177e4 207 __flush_dcache_page(mapping, page);
8830f04a
RK
208 if (mapping && cache_is_vivt())
209 __flush_dcache_aliases(mapping, page);
826cbdaf
CM
210 else if (mapping)
211 __flush_icache_all();
8830f04a 212 }
1da177e4
LT
213}
214EXPORT_SYMBOL(flush_dcache_page);
6020dff0
RK
215
216/*
217 * Flush an anonymous page so that users of get_user_pages()
218 * can safely access the data. The expected sequence is:
219 *
220 * get_user_pages()
221 * -> flush_anon_page
222 * memcpy() to/from page
223 * if written to page, flush_dcache_page()
224 */
225void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
226{
227 unsigned long pfn;
228
229 /* VIPT non-aliasing caches need do nothing */
230 if (cache_is_vipt_nonaliasing())
231 return;
232
233 /*
234 * Write back and invalidate userspace mapping.
235 */
236 pfn = page_to_pfn(page);
237 if (cache_is_vivt()) {
238 flush_cache_page(vma, vmaddr, pfn);
239 } else {
240 /*
241 * For aliasing VIPT, we can flush an alias of the
242 * userspace address only.
243 */
244 flush_pfn_alias(pfn, vmaddr);
245 }
246
247 /*
248 * Invalidate kernel mapping. No data should be contained
249 * in this mapping of the page. FIXME: this is overkill
250 * since we actually ask for a write-back and invalidate.
251 */
252 __cpuc_flush_dcache_page(page_address(page));
253}
This page took 0.459993 seconds and 5 git commands to generate.