sh64: Kill off dead i/d-cache disabled bits.
[deliverable/linux.git] / arch / sh / mm / cache.c
CommitLineData
1da177e4 1/*
0dfae7d5 2 * arch/sh/mm/pg-mmu.c
1da177e4
LT
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
dfff0fa6 5 * Copyright (C) 2002 - 2009 Paul Mundt
1da177e4
LT
6 *
7 * Released under the terms of the GNU GPL v2.0.
8 */
1da177e4 9#include <linux/mm.h>
acca4f4d 10#include <linux/init.h>
52e27782 11#include <linux/mutex.h>
e06c4e57 12#include <linux/fs.h>
7747b9a4
PM
13#include <linux/highmem.h>
14#include <linux/module.h>
1da177e4
LT
15#include <asm/mmu_context.h>
16#include <asm/cacheflush.h>
17
37443ef3
PM
18void (*flush_cache_all)(void);
19void (*flush_cache_mm)(struct mm_struct *mm);
20void (*flush_cache_dup_mm)(struct mm_struct *mm);
21void (*flush_cache_page)(struct vm_area_struct *vma,
22 unsigned long addr, unsigned long pfn);
23void (*flush_cache_range)(struct vm_area_struct *vma,
24 unsigned long start, unsigned long end);
25void (*flush_dcache_page)(struct page *page);
26void (*flush_icache_range)(unsigned long start, unsigned long end);
27void (*flush_icache_page)(struct vm_area_struct *vma,
28 struct page *page);
29void (*flush_cache_sigtramp)(unsigned long address);
30void (*__flush_wback_region)(void *start, int size);
31void (*__flush_purge_region)(void *start, int size);
32void (*__flush_invalidate_region)(void *start, int size);
33
34static inline void noop_flush_cache_all(void)
35{
36}
37
38static inline void noop_flush_cache_mm(struct mm_struct *mm)
39{
40}
41
42static inline void noop_flush_cache_page(struct vm_area_struct *vma,
43 unsigned long addr, unsigned long pfn)
44{
45}
46
47static inline void noop_flush_cache_range(struct vm_area_struct *vma,
48 unsigned long start, unsigned long end)
49{
50}
51
52static inline void noop_flush_dcache_page(struct page *page)
53{
54}
55
56static inline void noop_flush_icache_range(unsigned long start,
57 unsigned long end)
58{
59}
60
61static inline void noop_flush_icache_page(struct vm_area_struct *vma,
62 struct page *page)
63{
64}
65
66static inline void noop_flush_cache_sigtramp(unsigned long address)
67{
68}
69
70static inline void noop__flush_region(void *start, int size)
71{
72}
73
ba1789ef
PM
74void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
75 unsigned long vaddr, void *dst, const void *src,
76 unsigned long len)
1da177e4 77{
0dfae7d5
PM
78 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
79 !test_bit(PG_dcache_dirty, &page->flags)) {
2277ab4a
PM
80 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
81 memcpy(vto, src, len);
b5eb10ae 82 kunmap_coherent();
2277ab4a
PM
83 } else {
84 memcpy(dst, src, len);
0dfae7d5
PM
85 if (boot_cpu_data.dcache.n_aliases)
86 set_bit(PG_dcache_dirty, &page->flags);
2277ab4a 87 }
ba1789ef
PM
88
89 if (vma->vm_flags & VM_EXEC)
90 flush_cache_page(vma, vaddr, page_to_pfn(page));
91}
92
93void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
94 unsigned long vaddr, void *dst, const void *src,
95 unsigned long len)
96{
0dfae7d5
PM
97 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
98 !test_bit(PG_dcache_dirty, &page->flags)) {
2277ab4a
PM
99 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
100 memcpy(dst, vfrom, len);
b5eb10ae 101 kunmap_coherent();
2277ab4a
PM
102 } else {
103 memcpy(dst, src, len);
0dfae7d5
PM
104 if (boot_cpu_data.dcache.n_aliases)
105 set_bit(PG_dcache_dirty, &page->flags);
2277ab4a 106 }
1da177e4 107}
39e688a9 108
7747b9a4
PM
109void copy_user_highpage(struct page *to, struct page *from,
110 unsigned long vaddr, struct vm_area_struct *vma)
111{
112 void *vfrom, *vto;
113
7747b9a4 114 vto = kmap_atomic(to, KM_USER1);
7747b9a4 115
0dfae7d5
PM
116 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
117 !test_bit(PG_dcache_dirty, &from->flags)) {
2277ab4a
PM
118 vfrom = kmap_coherent(from, vaddr);
119 copy_page(vto, vfrom);
b5eb10ae 120 kunmap_coherent();
2277ab4a
PM
121 } else {
122 vfrom = kmap_atomic(from, KM_USER0);
123 copy_page(vto, vfrom);
124 kunmap_atomic(vfrom, KM_USER0);
125 }
126
127 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
7747b9a4
PM
128 __flush_wback_region(vto, PAGE_SIZE);
129
130 kunmap_atomic(vto, KM_USER1);
131 /* Make sure this page is cleared on other CPU's too before using it */
132 smp_wmb();
133}
134EXPORT_SYMBOL(copy_user_highpage);
dfff0fa6
PM
135
136void clear_user_highpage(struct page *page, unsigned long vaddr)
137{
138 void *kaddr = kmap_atomic(page, KM_USER0);
139
140 clear_page(kaddr);
141
142 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
143 __flush_wback_region(kaddr, PAGE_SIZE);
144
145 kunmap_atomic(kaddr, KM_USER0);
146}
147EXPORT_SYMBOL(clear_user_highpage);
9cef7492
PM
148
149void __update_cache(struct vm_area_struct *vma,
150 unsigned long address, pte_t pte)
151{
152 struct page *page;
153 unsigned long pfn = pte_pfn(pte);
154
155 if (!boot_cpu_data.dcache.n_aliases)
156 return;
157
158 page = pfn_to_page(pfn);
159 if (pfn_valid(pfn) && page_mapping(page)) {
160 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
161 if (dirty) {
162 unsigned long addr = (unsigned long)page_address(page);
163
164 if (pages_do_alias(addr, address & PAGE_MASK))
165 __flush_wback_region((void *)addr, PAGE_SIZE);
166 }
167 }
168}
c0fe478d
PM
169
170void __flush_anon_page(struct page *page, unsigned long vmaddr)
171{
172 unsigned long addr = (unsigned long) page_address(page);
173
174 if (pages_do_alias(addr, vmaddr)) {
175 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
176 !test_bit(PG_dcache_dirty, &page->flags)) {
177 void *kaddr;
178
179 kaddr = kmap_coherent(page, vmaddr);
180 __flush_wback_region((void *)kaddr, PAGE_SIZE);
181 kunmap_coherent();
182 } else
183 __flush_wback_region((void *)addr, PAGE_SIZE);
184 }
185}
ecba1060 186
27d59ec1
PM
187static void compute_alias(struct cache_info *c)
188{
189 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
190 c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
191}
192
193static void __init emit_cache_params(void)
194{
195 printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
196 boot_cpu_data.icache.ways,
197 boot_cpu_data.icache.sets,
198 boot_cpu_data.icache.way_incr);
199 printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
200 boot_cpu_data.icache.entry_mask,
201 boot_cpu_data.icache.alias_mask,
202 boot_cpu_data.icache.n_aliases);
203 printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
204 boot_cpu_data.dcache.ways,
205 boot_cpu_data.dcache.sets,
206 boot_cpu_data.dcache.way_incr);
207 printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
208 boot_cpu_data.dcache.entry_mask,
209 boot_cpu_data.dcache.alias_mask,
210 boot_cpu_data.dcache.n_aliases);
211
212 /*
213 * Emit Secondary Cache parameters if the CPU has a probed L2.
214 */
215 if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
216 printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
217 boot_cpu_data.scache.ways,
218 boot_cpu_data.scache.sets,
219 boot_cpu_data.scache.way_incr);
220 printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
221 boot_cpu_data.scache.entry_mask,
222 boot_cpu_data.scache.alias_mask,
223 boot_cpu_data.scache.n_aliases);
224 }
225}
226
ecba1060
PM
227void __init cpu_cache_init(void)
228{
27d59ec1
PM
229 compute_alias(&boot_cpu_data.icache);
230 compute_alias(&boot_cpu_data.dcache);
231 compute_alias(&boot_cpu_data.scache);
232
37443ef3
PM
233 flush_cache_all = noop_flush_cache_all;
234 flush_cache_mm = noop_flush_cache_mm;
235 flush_cache_dup_mm = noop_flush_cache_mm;
236 flush_cache_page = noop_flush_cache_page;
237 flush_cache_range = noop_flush_cache_range;
238 flush_dcache_page = noop_flush_dcache_page;
239 flush_icache_range = noop_flush_icache_range;
240 flush_icache_page = noop_flush_icache_page;
241 flush_cache_sigtramp = noop_flush_cache_sigtramp;
242
243 __flush_wback_region = noop__flush_region;
244 __flush_purge_region = noop__flush_region;
245 __flush_invalidate_region = noop__flush_region;
246
109b44a8
PM
247 if (boot_cpu_data.family == CPU_FAMILY_SH2) {
248 extern void __weak sh2_cache_init(void);
249
250 sh2_cache_init();
251 }
252
a58e1a2a
PM
253 if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
254 extern void __weak sh2a_cache_init(void);
255
256 sh2a_cache_init();
257 }
258
79f1c9da
PM
259 if (boot_cpu_data.family == CPU_FAMILY_SH3) {
260 extern void __weak sh3_cache_init(void);
261
262 sh3_cache_init();
0d051d90
PM
263
264 if ((boot_cpu_data.type == CPU_SH7705) &&
265 (boot_cpu_data.dcache.sets == 512)) {
266 extern void __weak sh7705_cache_init(void);
267
268 sh7705_cache_init();
269 }
79f1c9da
PM
270 }
271
ecba1060
PM
272 if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
273 (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
274 (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
275 extern void __weak sh4_cache_init(void);
276
277 sh4_cache_init();
278 }
27d59ec1
PM
279
280 emit_cache_params();
ecba1060 281}
This page took 0.426022 seconds and 5 git commands to generate.