Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
f26b2a56 | 2 | * arch/sh/mm/cache.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | |
a6198a23 | 5 | * Copyright (C) 2002 - 2010 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Released under the terms of the GNU GPL v2.0. | |
8 | */ | |
1da177e4 | 9 | #include <linux/mm.h> |
acca4f4d | 10 | #include <linux/init.h> |
52e27782 | 11 | #include <linux/mutex.h> |
e06c4e57 | 12 | #include <linux/fs.h> |
f26b2a56 | 13 | #include <linux/smp.h> |
7747b9a4 PM |
14 | #include <linux/highmem.h> |
15 | #include <linux/module.h> | |
1da177e4 LT |
16 | #include <asm/mmu_context.h> |
17 | #include <asm/cacheflush.h> | |
18 | ||
f26b2a56 PM |
19 | void (*local_flush_cache_all)(void *args) = cache_noop; |
20 | void (*local_flush_cache_mm)(void *args) = cache_noop; | |
21 | void (*local_flush_cache_dup_mm)(void *args) = cache_noop; | |
22 | void (*local_flush_cache_page)(void *args) = cache_noop; | |
23 | void (*local_flush_cache_range)(void *args) = cache_noop; | |
24 | void (*local_flush_dcache_page)(void *args) = cache_noop; | |
25 | void (*local_flush_icache_range)(void *args) = cache_noop; | |
26 | void (*local_flush_icache_page)(void *args) = cache_noop; | |
27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; | |
28 | ||
37443ef3 | 29 | void (*__flush_wback_region)(void *start, int size); |
0a993b0a | 30 | EXPORT_SYMBOL(__flush_wback_region); |
37443ef3 | 31 | void (*__flush_purge_region)(void *start, int size); |
0a993b0a | 32 | EXPORT_SYMBOL(__flush_purge_region); |
37443ef3 | 33 | void (*__flush_invalidate_region)(void *start, int size); |
0a993b0a | 34 | EXPORT_SYMBOL(__flush_invalidate_region); |
37443ef3 | 35 | |
37443ef3 PM |
36 | static inline void noop__flush_region(void *start, int size) |
37 | { | |
38 | } | |
39 | ||
6f379578 PM |
40 | static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info, |
41 | int wait) | |
42 | { | |
43 | preempt_disable(); | |
a6198a23 | 44 | |
a1e262f6 RF |
45 | /* Needing IPI for cross-core flush is SHX3-specific. */ |
46 | #ifdef CONFIG_CPU_SHX3 | |
a6198a23 PM |
47 | /* |
48 | * It's possible that this gets called early on when IRQs are | |
49 | * still disabled due to ioremapping by the boot CPU, so don't | |
50 | * even attempt IPIs unless there are other CPUs online. | |
51 | */ | |
52 | if (num_online_cpus() > 1) | |
53 | smp_call_function(func, info, wait); | |
a1e262f6 | 54 | #endif |
a6198a23 | 55 | |
6f379578 | 56 | func(info); |
a6198a23 | 57 | |
6f379578 PM |
58 | preempt_enable(); |
59 | } | |
60 | ||
ba1789ef PM |
61 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
62 | unsigned long vaddr, void *dst, const void *src, | |
63 | unsigned long len) | |
1da177e4 | 64 | { |
e1534ae9 | 65 | if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) && |
55661fc1 | 66 | test_bit(PG_dcache_clean, &page->flags)) { |
2277ab4a PM |
67 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
68 | memcpy(vto, src, len); | |
0906a3ad | 69 | kunmap_coherent(vto); |
2277ab4a PM |
70 | } else { |
71 | memcpy(dst, src, len); | |
0dfae7d5 | 72 | if (boot_cpu_data.dcache.n_aliases) |
55661fc1 | 73 | clear_bit(PG_dcache_clean, &page->flags); |
2277ab4a | 74 | } |
ba1789ef PM |
75 | |
76 | if (vma->vm_flags & VM_EXEC) | |
77 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | |
78 | } | |
79 | ||
80 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | |
81 | unsigned long vaddr, void *dst, const void *src, | |
82 | unsigned long len) | |
83 | { | |
e1534ae9 | 84 | if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) && |
55661fc1 | 85 | test_bit(PG_dcache_clean, &page->flags)) { |
2277ab4a PM |
86 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
87 | memcpy(dst, vfrom, len); | |
0906a3ad | 88 | kunmap_coherent(vfrom); |
2277ab4a PM |
89 | } else { |
90 | memcpy(dst, src, len); | |
0dfae7d5 | 91 | if (boot_cpu_data.dcache.n_aliases) |
55661fc1 | 92 | clear_bit(PG_dcache_clean, &page->flags); |
2277ab4a | 93 | } |
1da177e4 | 94 | } |
39e688a9 | 95 | |
7747b9a4 PM |
96 | void copy_user_highpage(struct page *to, struct page *from, |
97 | unsigned long vaddr, struct vm_area_struct *vma) | |
98 | { | |
99 | void *vfrom, *vto; | |
100 | ||
bc3e11be | 101 | vto = kmap_atomic(to); |
7747b9a4 | 102 | |
e1534ae9 | 103 | if (boot_cpu_data.dcache.n_aliases && page_mapcount(from) && |
55661fc1 | 104 | test_bit(PG_dcache_clean, &from->flags)) { |
7e01c949 | 105 | vfrom = kmap_coherent(from, vaddr); |
2277ab4a | 106 | copy_page(vto, vfrom); |
7e01c949 PM |
107 | kunmap_coherent(vfrom); |
108 | } else { | |
bc3e11be | 109 | vfrom = kmap_atomic(from); |
7e01c949 | 110 | copy_page(vto, vfrom); |
bc3e11be | 111 | kunmap_atomic(vfrom); |
7e01c949 | 112 | } |
7747b9a4 | 113 | |
a25bbe12 SM |
114 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || |
115 | (vma->vm_flags & VM_EXEC)) | |
7e01c949 | 116 | __flush_purge_region(vto, PAGE_SIZE); |
39ac11c1 | 117 | |
bc3e11be | 118 | kunmap_atomic(vto); |
7747b9a4 PM |
119 | /* Make sure this page is cleared on other CPU's too before using it */ |
120 | smp_wmb(); | |
121 | } | |
122 | EXPORT_SYMBOL(copy_user_highpage); | |
dfff0fa6 PM |
123 | |
124 | void clear_user_highpage(struct page *page, unsigned long vaddr) | |
125 | { | |
bc3e11be | 126 | void *kaddr = kmap_atomic(page); |
dfff0fa6 | 127 | |
7e01c949 | 128 | clear_page(kaddr); |
dfff0fa6 | 129 | |
7e01c949 PM |
130 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) |
131 | __flush_purge_region(kaddr, PAGE_SIZE); | |
dfff0fa6 | 132 | |
bc3e11be | 133 | kunmap_atomic(kaddr); |
dfff0fa6 PM |
134 | } |
135 | EXPORT_SYMBOL(clear_user_highpage); | |
9cef7492 PM |
136 | |
137 | void __update_cache(struct vm_area_struct *vma, | |
138 | unsigned long address, pte_t pte) | |
139 | { | |
140 | struct page *page; | |
141 | unsigned long pfn = pte_pfn(pte); | |
142 | ||
143 | if (!boot_cpu_data.dcache.n_aliases) | |
144 | return; | |
145 | ||
146 | page = pfn_to_page(pfn); | |
964f7e5a | 147 | if (pfn_valid(pfn)) { |
55661fc1 | 148 | int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags); |
76382b5b MP |
149 | if (dirty) |
150 | __flush_purge_region(page_address(page), PAGE_SIZE); | |
9cef7492 PM |
151 | } |
152 | } | |
c0fe478d PM |
153 | |
154 | void __flush_anon_page(struct page *page, unsigned long vmaddr) | |
155 | { | |
156 | unsigned long addr = (unsigned long) page_address(page); | |
157 | ||
158 | if (pages_do_alias(addr, vmaddr)) { | |
e1534ae9 | 159 | if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) && |
55661fc1 | 160 | test_bit(PG_dcache_clean, &page->flags)) { |
c0fe478d PM |
161 | void *kaddr; |
162 | ||
163 | kaddr = kmap_coherent(page, vmaddr); | |
6e4154d4 PM |
164 | /* XXX.. For now kunmap_coherent() does a purge */ |
165 | /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */ | |
0906a3ad | 166 | kunmap_coherent(kaddr); |
c0fe478d | 167 | } else |
6e4154d4 | 168 | __flush_purge_region((void *)addr, PAGE_SIZE); |
c0fe478d PM |
169 | } |
170 | } | |
ecba1060 | 171 | |
f26b2a56 PM |
172 | void flush_cache_all(void) |
173 | { | |
6f379578 | 174 | cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); |
f26b2a56 | 175 | } |
0a993b0a | 176 | EXPORT_SYMBOL(flush_cache_all); |
f26b2a56 PM |
177 | |
178 | void flush_cache_mm(struct mm_struct *mm) | |
179 | { | |
654d364e PM |
180 | if (boot_cpu_data.dcache.n_aliases == 0) |
181 | return; | |
182 | ||
6f379578 | 183 | cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); |
f26b2a56 PM |
184 | } |
185 | ||
186 | void flush_cache_dup_mm(struct mm_struct *mm) | |
187 | { | |
654d364e PM |
188 | if (boot_cpu_data.dcache.n_aliases == 0) |
189 | return; | |
190 | ||
6f379578 | 191 | cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); |
f26b2a56 PM |
192 | } |
193 | ||
194 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, | |
195 | unsigned long pfn) | |
196 | { | |
197 | struct flusher_data data; | |
198 | ||
199 | data.vma = vma; | |
200 | data.addr1 = addr; | |
201 | data.addr2 = pfn; | |
202 | ||
6f379578 | 203 | cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1); |
f26b2a56 PM |
204 | } |
205 | ||
206 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |
207 | unsigned long end) | |
208 | { | |
209 | struct flusher_data data; | |
210 | ||
211 | data.vma = vma; | |
212 | data.addr1 = start; | |
213 | data.addr2 = end; | |
214 | ||
6f379578 | 215 | cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); |
f26b2a56 | 216 | } |
0a993b0a | 217 | EXPORT_SYMBOL(flush_cache_range); |
f26b2a56 PM |
218 | |
219 | void flush_dcache_page(struct page *page) | |
220 | { | |
6f379578 | 221 | cacheop_on_each_cpu(local_flush_dcache_page, page, 1); |
f26b2a56 | 222 | } |
0a993b0a | 223 | EXPORT_SYMBOL(flush_dcache_page); |
f26b2a56 PM |
224 | |
225 | void flush_icache_range(unsigned long start, unsigned long end) | |
226 | { | |
227 | struct flusher_data data; | |
228 | ||
229 | data.vma = NULL; | |
230 | data.addr1 = start; | |
231 | data.addr2 = end; | |
232 | ||
6f379578 | 233 | cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1); |
f26b2a56 | 234 | } |
e3560305 | 235 | EXPORT_SYMBOL(flush_icache_range); |
f26b2a56 PM |
236 | |
237 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | |
238 | { | |
239 | /* Nothing uses the VMA, so just pass the struct page along */ | |
6f379578 | 240 | cacheop_on_each_cpu(local_flush_icache_page, page, 1); |
f26b2a56 PM |
241 | } |
242 | ||
243 | void flush_cache_sigtramp(unsigned long address) | |
244 | { | |
6f379578 | 245 | cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); |
f26b2a56 PM |
246 | } |
247 | ||
27d59ec1 PM |
248 | static void compute_alias(struct cache_info *c) |
249 | { | |
57155c65 | 250 | #ifdef CONFIG_MMU |
27d59ec1 | 251 | c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); |
57155c65 RF |
252 | #else |
253 | c->alias_mask = 0; | |
254 | #endif | |
27d59ec1 PM |
255 | c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; |
256 | } | |
257 | ||
258 | static void __init emit_cache_params(void) | |
259 | { | |
260 | printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n", | |
261 | boot_cpu_data.icache.ways, | |
262 | boot_cpu_data.icache.sets, | |
263 | boot_cpu_data.icache.way_incr); | |
264 | printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | |
265 | boot_cpu_data.icache.entry_mask, | |
266 | boot_cpu_data.icache.alias_mask, | |
267 | boot_cpu_data.icache.n_aliases); | |
268 | printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n", | |
269 | boot_cpu_data.dcache.ways, | |
270 | boot_cpu_data.dcache.sets, | |
271 | boot_cpu_data.dcache.way_incr); | |
272 | printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | |
273 | boot_cpu_data.dcache.entry_mask, | |
274 | boot_cpu_data.dcache.alias_mask, | |
275 | boot_cpu_data.dcache.n_aliases); | |
276 | ||
277 | /* | |
278 | * Emit Secondary Cache parameters if the CPU has a probed L2. | |
279 | */ | |
280 | if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { | |
281 | printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n", | |
282 | boot_cpu_data.scache.ways, | |
283 | boot_cpu_data.scache.sets, | |
284 | boot_cpu_data.scache.way_incr); | |
285 | printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | |
286 | boot_cpu_data.scache.entry_mask, | |
287 | boot_cpu_data.scache.alias_mask, | |
288 | boot_cpu_data.scache.n_aliases); | |
289 | } | |
290 | } | |
291 | ||
ecba1060 PM |
292 | void __init cpu_cache_init(void) |
293 | { | |
3af539e5 PM |
294 | unsigned int cache_disabled = 0; |
295 | ||
a5f6ea29 GU |
296 | #ifdef SH_CCR |
297 | cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE); | |
3af539e5 | 298 | #endif |
5fb80ae8 | 299 | |
27d59ec1 PM |
300 | compute_alias(&boot_cpu_data.icache); |
301 | compute_alias(&boot_cpu_data.dcache); | |
302 | compute_alias(&boot_cpu_data.scache); | |
303 | ||
37443ef3 PM |
304 | __flush_wback_region = noop__flush_region; |
305 | __flush_purge_region = noop__flush_region; | |
306 | __flush_invalidate_region = noop__flush_region; | |
307 | ||
5fb80ae8 MD |
308 | /* |
309 | * No flushing is necessary in the disabled cache case so we can | |
310 | * just keep the noop functions in local_flush_..() and __flush_..() | |
311 | */ | |
312 | if (unlikely(cache_disabled)) | |
313 | goto skip; | |
314 | ||
5a846aba RF |
315 | if (boot_cpu_data.type == CPU_J2) { |
316 | extern void __weak j2_cache_init(void); | |
317 | ||
318 | j2_cache_init(); | |
319 | } else if (boot_cpu_data.family == CPU_FAMILY_SH2) { | |
109b44a8 PM |
320 | extern void __weak sh2_cache_init(void); |
321 | ||
322 | sh2_cache_init(); | |
323 | } | |
324 | ||
a58e1a2a PM |
325 | if (boot_cpu_data.family == CPU_FAMILY_SH2A) { |
326 | extern void __weak sh2a_cache_init(void); | |
327 | ||
328 | sh2a_cache_init(); | |
329 | } | |
330 | ||
79f1c9da PM |
331 | if (boot_cpu_data.family == CPU_FAMILY_SH3) { |
332 | extern void __weak sh3_cache_init(void); | |
333 | ||
334 | sh3_cache_init(); | |
0d051d90 PM |
335 | |
336 | if ((boot_cpu_data.type == CPU_SH7705) && | |
337 | (boot_cpu_data.dcache.sets == 512)) { | |
338 | extern void __weak sh7705_cache_init(void); | |
339 | ||
340 | sh7705_cache_init(); | |
341 | } | |
79f1c9da PM |
342 | } |
343 | ||
ecba1060 PM |
344 | if ((boot_cpu_data.family == CPU_FAMILY_SH4) || |
345 | (boot_cpu_data.family == CPU_FAMILY_SH4A) || | |
346 | (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { | |
347 | extern void __weak sh4_cache_init(void); | |
348 | ||
349 | sh4_cache_init(); | |
3cf6fa1e PM |
350 | |
351 | if ((boot_cpu_data.type == CPU_SH7786) || | |
352 | (boot_cpu_data.type == CPU_SHX3)) { | |
353 | extern void __weak shx3_cache_init(void); | |
354 | ||
355 | shx3_cache_init(); | |
356 | } | |
ecba1060 | 357 | } |
27d59ec1 | 358 | |
2b431518 PM |
359 | if (boot_cpu_data.family == CPU_FAMILY_SH5) { |
360 | extern void __weak sh5_cache_init(void); | |
361 | ||
362 | sh5_cache_init(); | |
363 | } | |
364 | ||
5fb80ae8 | 365 | skip: |
27d59ec1 | 366 | emit_cache_params(); |
ecba1060 | 367 | } |