4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2002 - 2009 Paul Mundt
7 * Released under the terms of the GNU GPL v2.0.
10 #include <linux/init.h>
11 #include <linux/mutex.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
19 void (*local_flush_cache_all
)(void *args
) = cache_noop
;
20 void (*local_flush_cache_mm
)(void *args
) = cache_noop
;
21 void (*local_flush_cache_dup_mm
)(void *args
) = cache_noop
;
22 void (*local_flush_cache_page
)(void *args
) = cache_noop
;
23 void (*local_flush_cache_range
)(void *args
) = cache_noop
;
24 void (*local_flush_dcache_page
)(void *args
) = cache_noop
;
25 void (*local_flush_icache_range
)(void *args
) = cache_noop
;
26 void (*local_flush_icache_page
)(void *args
) = cache_noop
;
27 void (*local_flush_cache_sigtramp
)(void *args
) = cache_noop
;
29 void (*__flush_wback_region
)(void *start
, int size
);
30 EXPORT_SYMBOL(__flush_wback_region
);
31 void (*__flush_purge_region
)(void *start
, int size
);
32 EXPORT_SYMBOL(__flush_purge_region
);
33 void (*__flush_invalidate_region
)(void *start
, int size
);
34 EXPORT_SYMBOL(__flush_invalidate_region
);
36 static inline void noop__flush_region(void *start
, int size
)
40 static inline void cacheop_on_each_cpu(void (*func
) (void *info
), void *info
,
44 smp_call_function(func
, info
, wait
);
51 * @vma: vm_area_struct holding the pages
53 * @vaddr: user space address
54 * @dst: address of page in kernel space (possibly from kmap)
55 * @src: source address in kernel logical memory
56 * @len: length of data in bytes (may be less than PAGE_SIZE)
58 * Copy data into the address space of a process other than the current
59 * process (eg for ptrace).
61 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
62 unsigned long vaddr
, void *dst
, const void *src
,
65 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapped(page
) &&
66 !test_bit(PG_dcache_dirty
, &page
->flags
)) {
67 void *vto
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
68 memcpy(vto
, src
, len
);
71 memcpy(dst
, src
, len
);
72 if (boot_cpu_data
.dcache
.n_aliases
)
73 set_bit(PG_dcache_dirty
, &page
->flags
);
76 if (vma
->vm_flags
& VM_EXEC
)
77 flush_cache_page(vma
, vaddr
, page_to_pfn(page
));
80 void copy_from_user_page(struct vm_area_struct
*vma
, struct page
*page
,
81 unsigned long vaddr
, void *dst
, const void *src
,
84 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapped(page
) &&
85 !test_bit(PG_dcache_dirty
, &page
->flags
)) {
86 void *vfrom
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
87 memcpy(dst
, vfrom
, len
);
88 kunmap_coherent(vfrom
);
90 memcpy(dst
, src
, len
);
91 if (boot_cpu_data
.dcache
.n_aliases
)
92 set_bit(PG_dcache_dirty
, &page
->flags
);
98 * @to: destination page
100 * @vaddr: address of pages in user address space
101 * @vma: vm_area_struct holding the pages
103 * This is used in COW implementation to copy data from page @from to
104 * page @to. @from was previousl mapped at @vaddr, and @to will be.
105 * As this is used only in the COW implementation, this means that the
106 * source is unmodified, and so we don't have to worry about cache
107 * aliasing on that side.
109 #ifdef CONFIG_HIGHMEM
111 * If we ever have a real highmem system, this code will need fixing
112 * (as will clear_user/clear_user_highmem), because the kmap potentitally
113 * creates another alias risk.
115 #error This code is broken with real HIGHMEM
117 void copy_user_highpage(struct page
*to
, struct page
*from
,
118 unsigned long vaddr
, struct vm_area_struct
*vma
)
122 vto
= kmap_atomic(to
, KM_USER1
);
123 vfrom
= kmap_atomic(from
, KM_USER0
);
125 if (pages_do_alias((unsigned long)vto
, vaddr
& PAGE_MASK
))
126 __flush_invalidate_region(vto
, PAGE_SIZE
);
128 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapped(from
) &&
129 !test_bit(PG_dcache_dirty
, &from
->flags
)) {
130 void *vto_coloured
= kmap_coherent(to
, vaddr
);
131 copy_page(vto_coloured
, vfrom
);
132 kunmap_coherent(vto_coloured
);
134 copy_page(vto
, vfrom
);
136 kunmap_atomic(vfrom
, KM_USER0
);
137 kunmap_atomic(vto
, KM_USER1
);
139 /* Make sure this page is cleared on other CPU's too before using it */
142 EXPORT_SYMBOL(copy_user_highpage
);
144 void clear_user_highpage(struct page
*page
, unsigned long vaddr
)
146 void *kaddr
= kmap_atomic(page
, KM_USER0
);
148 if (pages_do_alias((unsigned long)kaddr
, vaddr
& PAGE_MASK
)) {
151 /* Kernel alias may have modified data in the cache. */
152 __flush_invalidate_region(kaddr
, PAGE_SIZE
);
154 vto
= kmap_coherent(page
, vaddr
);
156 kunmap_coherent(vto
);
160 kunmap_atomic(kaddr
, KM_USER0
);
162 EXPORT_SYMBOL(clear_user_highpage
);
164 void __update_cache(struct vm_area_struct
*vma
,
165 unsigned long address
, pte_t pte
)
168 unsigned long pfn
= pte_pfn(pte
);
170 if (!boot_cpu_data
.dcache
.n_aliases
)
173 page
= pfn_to_page(pfn
);
174 if (pfn_valid(pfn
)) {
175 int dirty
= test_and_clear_bit(PG_dcache_dirty
, &page
->flags
);
177 unsigned long addr
= (unsigned long)page_address(page
);
179 if (pages_do_alias(addr
, address
& PAGE_MASK
))
180 __flush_purge_region((void *)addr
, PAGE_SIZE
);
185 void __flush_anon_page(struct page
*page
, unsigned long vmaddr
)
187 unsigned long addr
= (unsigned long) page_address(page
);
189 if (pages_do_alias(addr
, vmaddr
)) {
190 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapped(page
) &&
191 !test_bit(PG_dcache_dirty
, &page
->flags
)) {
194 kaddr
= kmap_coherent(page
, vmaddr
);
195 /* XXX.. For now kunmap_coherent() does a purge */
196 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
197 kunmap_coherent(kaddr
);
199 __flush_purge_region((void *)addr
, PAGE_SIZE
);
203 void flush_cache_all(void)
205 cacheop_on_each_cpu(local_flush_cache_all
, NULL
, 1);
207 EXPORT_SYMBOL(flush_cache_all
);
209 void flush_cache_mm(struct mm_struct
*mm
)
211 if (boot_cpu_data
.dcache
.n_aliases
== 0)
214 cacheop_on_each_cpu(local_flush_cache_mm
, mm
, 1);
217 void flush_cache_dup_mm(struct mm_struct
*mm
)
219 if (boot_cpu_data
.dcache
.n_aliases
== 0)
222 cacheop_on_each_cpu(local_flush_cache_dup_mm
, mm
, 1);
225 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long addr
,
228 struct flusher_data data
;
234 cacheop_on_each_cpu(local_flush_cache_page
, (void *)&data
, 1);
237 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
240 struct flusher_data data
;
246 cacheop_on_each_cpu(local_flush_cache_range
, (void *)&data
, 1);
248 EXPORT_SYMBOL(flush_cache_range
);
250 void flush_dcache_page(struct page
*page
)
252 cacheop_on_each_cpu(local_flush_dcache_page
, page
, 1);
254 EXPORT_SYMBOL(flush_dcache_page
);
256 void flush_icache_range(unsigned long start
, unsigned long end
)
258 struct flusher_data data
;
264 cacheop_on_each_cpu(local_flush_icache_range
, (void *)&data
, 1);
267 void flush_icache_page(struct vm_area_struct
*vma
, struct page
*page
)
269 /* Nothing uses the VMA, so just pass the struct page along */
270 cacheop_on_each_cpu(local_flush_icache_page
, page
, 1);
273 void flush_cache_sigtramp(unsigned long address
)
275 cacheop_on_each_cpu(local_flush_cache_sigtramp
, (void *)address
, 1);
278 static void compute_alias(struct cache_info
*c
)
280 c
->alias_mask
= ((c
->sets
- 1) << c
->entry_shift
) & ~(PAGE_SIZE
- 1);
281 c
->n_aliases
= c
->alias_mask
? (c
->alias_mask
>> PAGE_SHIFT
) + 1 : 0;
284 static void __init
emit_cache_params(void)
286 printk(KERN_NOTICE
"I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
287 boot_cpu_data
.icache
.ways
,
288 boot_cpu_data
.icache
.sets
,
289 boot_cpu_data
.icache
.way_incr
);
290 printk(KERN_NOTICE
"I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
291 boot_cpu_data
.icache
.entry_mask
,
292 boot_cpu_data
.icache
.alias_mask
,
293 boot_cpu_data
.icache
.n_aliases
);
294 printk(KERN_NOTICE
"D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
295 boot_cpu_data
.dcache
.ways
,
296 boot_cpu_data
.dcache
.sets
,
297 boot_cpu_data
.dcache
.way_incr
);
298 printk(KERN_NOTICE
"D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
299 boot_cpu_data
.dcache
.entry_mask
,
300 boot_cpu_data
.dcache
.alias_mask
,
301 boot_cpu_data
.dcache
.n_aliases
);
304 * Emit Secondary Cache parameters if the CPU has a probed L2.
306 if (boot_cpu_data
.flags
& CPU_HAS_L2_CACHE
) {
307 printk(KERN_NOTICE
"S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
308 boot_cpu_data
.scache
.ways
,
309 boot_cpu_data
.scache
.sets
,
310 boot_cpu_data
.scache
.way_incr
);
311 printk(KERN_NOTICE
"S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
312 boot_cpu_data
.scache
.entry_mask
,
313 boot_cpu_data
.scache
.alias_mask
,
314 boot_cpu_data
.scache
.n_aliases
);
318 void __init
cpu_cache_init(void)
320 unsigned int cache_disabled
= 0;
323 cache_disabled
= !(__raw_readl(CCR
) & CCR_CACHE_ENABLE
);
326 compute_alias(&boot_cpu_data
.icache
);
327 compute_alias(&boot_cpu_data
.dcache
);
328 compute_alias(&boot_cpu_data
.scache
);
330 __flush_wback_region
= noop__flush_region
;
331 __flush_purge_region
= noop__flush_region
;
332 __flush_invalidate_region
= noop__flush_region
;
335 * No flushing is necessary in the disabled cache case so we can
336 * just keep the noop functions in local_flush_..() and __flush_..()
338 if (unlikely(cache_disabled
))
341 if (boot_cpu_data
.family
== CPU_FAMILY_SH2
) {
342 extern void __weak
sh2_cache_init(void);
347 if (boot_cpu_data
.family
== CPU_FAMILY_SH2A
) {
348 extern void __weak
sh2a_cache_init(void);
353 if (boot_cpu_data
.family
== CPU_FAMILY_SH3
) {
354 extern void __weak
sh3_cache_init(void);
358 if ((boot_cpu_data
.type
== CPU_SH7705
) &&
359 (boot_cpu_data
.dcache
.sets
== 512)) {
360 extern void __weak
sh7705_cache_init(void);
366 if ((boot_cpu_data
.family
== CPU_FAMILY_SH4
) ||
367 (boot_cpu_data
.family
== CPU_FAMILY_SH4A
) ||
368 (boot_cpu_data
.family
== CPU_FAMILY_SH4AL_DSP
)) {
369 extern void __weak
sh4_cache_init(void);
374 if (boot_cpu_data
.family
== CPU_FAMILY_SH5
) {
375 extern void __weak
sh5_cache_init(void);