Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/mm/cache-sh4.c | |
3 | * | |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | |
a252710f | 5 | * Copyright (C) 2001, 2002, 2003, 2004, 2005 Paul Mundt |
1da177e4 LT |
6 | * Copyright (C) 2003 Richard Curnow |
7 | * | |
8 | * This file is subject to the terms and conditions of the GNU General Public | |
9 | * License. See the file "COPYING" in the main directory of this archive | |
10 | * for more details. | |
11 | */ | |
12 | ||
1da177e4 LT |
13 | #include <linux/init.h> |
14 | #include <linux/mman.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/threads.h> | |
17 | #include <asm/addrspace.h> | |
18 | #include <asm/page.h> | |
19 | #include <asm/pgtable.h> | |
20 | #include <asm/processor.h> | |
21 | #include <asm/cache.h> | |
22 | #include <asm/io.h> | |
23 | #include <asm/uaccess.h> | |
24 | #include <asm/pgalloc.h> | |
25 | #include <asm/mmu_context.h> | |
26 | #include <asm/cacheflush.h> | |
27 | ||
b638d0b9 RC |
28 | static void __flush_dcache_segment_1way(unsigned long start, |
29 | unsigned long extent); | |
30 | static void __flush_dcache_segment_2way(unsigned long start, | |
31 | unsigned long extent); | |
32 | static void __flush_dcache_segment_4way(unsigned long start, | |
33 | unsigned long extent); | |
34 | ||
35 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, | |
a252710f | 36 | unsigned long exec_offset); |
b638d0b9 RC |
37 | |
38 | /* | |
39 | * This is initialised here to ensure that it is not placed in the BSS. If | |
40 | * that were to happen, note that cache_init gets called before the BSS is | |
41 | * cleared, so this would get nulled out which would be hopeless. | |
42 | */ | |
43 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = | |
44 | (void (*)(unsigned long, unsigned long))0xdeadbeef; | |
45 | ||
46 | static void compute_alias(struct cache_info *c) | |
47 | { | |
48 | c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); | |
49 | c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1; | |
50 | } | |
51 | ||
52 | static void __init emit_cache_params(void) | |
53 | { | |
54 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | |
55 | ctrl_inl(CCN_PVR), | |
56 | ctrl_inl(CCN_CVR), | |
57 | ctrl_inl(CCN_PRR)); | |
58 | printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n", | |
59 | cpu_data->icache.ways, | |
60 | cpu_data->icache.sets, | |
61 | cpu_data->icache.way_incr); | |
62 | printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | |
63 | cpu_data->icache.entry_mask, | |
64 | cpu_data->icache.alias_mask, | |
65 | cpu_data->icache.n_aliases); | |
66 | printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n", | |
67 | cpu_data->dcache.ways, | |
68 | cpu_data->dcache.sets, | |
69 | cpu_data->dcache.way_incr); | |
70 | printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | |
71 | cpu_data->dcache.entry_mask, | |
72 | cpu_data->dcache.alias_mask, | |
73 | cpu_data->dcache.n_aliases); | |
74 | ||
75 | if (!__flush_dcache_segment_fn) | |
76 | panic("unknown number of cache ways\n"); | |
77 | } | |
1da177e4 LT |
78 | |
79 | /* | |
80 | * SH-4 has virtually indexed and physically tagged cache. | |
81 | */ | |
82 | ||
b638d0b9 RC |
83 | /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */ |
84 | #define MAX_P3_SEMAPHORES 16 | |
85 | ||
86 | struct semaphore p3map_sem[MAX_P3_SEMAPHORES]; | |
1da177e4 LT |
87 | |
88 | void __init p3_cache_init(void) | |
89 | { | |
b638d0b9 RC |
90 | int i; |
91 | ||
92 | compute_alias(&cpu_data->icache); | |
93 | compute_alias(&cpu_data->dcache); | |
94 | ||
95 | switch (cpu_data->dcache.ways) { | |
96 | case 1: | |
97 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | |
98 | break; | |
99 | case 2: | |
100 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | |
101 | break; | |
102 | case 4: | |
103 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | |
104 | break; | |
105 | default: | |
106 | __flush_dcache_segment_fn = NULL; | |
107 | break; | |
108 | } | |
109 | ||
110 | emit_cache_params(); | |
111 | ||
112 | if (remap_area_pages(P3SEG, 0, PAGE_SIZE * 4, _PAGE_CACHABLE)) | |
1da177e4 LT |
113 | panic("%s failed.", __FUNCTION__); |
114 | ||
b638d0b9 RC |
115 | for (i = 0; i < cpu_data->dcache.n_aliases; i++) |
116 | sema_init(&p3map_sem[i], 1); | |
1da177e4 LT |
117 | } |
118 | ||
119 | /* | |
120 | * Write back the dirty D-caches, but not invalidate them. | |
121 | * | |
122 | * START: Virtual Address (U0, P1, or P3) | |
123 | * SIZE: Size of the region. | |
124 | */ | |
125 | void __flush_wback_region(void *start, int size) | |
126 | { | |
127 | unsigned long v; | |
128 | unsigned long begin, end; | |
129 | ||
130 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | |
131 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | |
132 | & ~(L1_CACHE_BYTES-1); | |
133 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | |
134 | asm volatile("ocbwb %0" | |
135 | : /* no output */ | |
136 | : "m" (__m(v))); | |
137 | } | |
138 | } | |
139 | ||
140 | /* | |
141 | * Write back the dirty D-caches and invalidate them. | |
142 | * | |
143 | * START: Virtual Address (U0, P1, or P3) | |
144 | * SIZE: Size of the region. | |
145 | */ | |
146 | void __flush_purge_region(void *start, int size) | |
147 | { | |
148 | unsigned long v; | |
149 | unsigned long begin, end; | |
150 | ||
151 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | |
152 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | |
153 | & ~(L1_CACHE_BYTES-1); | |
154 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | |
155 | asm volatile("ocbp %0" | |
156 | : /* no output */ | |
157 | : "m" (__m(v))); | |
158 | } | |
159 | } | |
160 | ||
1da177e4 LT |
161 | /* |
162 | * No write back please | |
163 | */ | |
164 | void __flush_invalidate_region(void *start, int size) | |
165 | { | |
166 | unsigned long v; | |
167 | unsigned long begin, end; | |
168 | ||
169 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | |
170 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | |
171 | & ~(L1_CACHE_BYTES-1); | |
172 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | |
173 | asm volatile("ocbi %0" | |
174 | : /* no output */ | |
175 | : "m" (__m(v))); | |
176 | } | |
177 | } | |
178 | ||
1da177e4 LT |
179 | /* |
180 | * Write back the range of D-cache, and purge the I-cache. | |
181 | * | |
182 | * Called from kernel/module.c:sys_init_module and routine for a.out format. | |
183 | */ | |
184 | void flush_icache_range(unsigned long start, unsigned long end) | |
185 | { | |
186 | flush_cache_all(); | |
187 | } | |
188 | ||
189 | /* | |
a252710f | 190 | * Write back the D-cache and purge the I-cache for signal trampoline. |
1da177e4 LT |
191 | * .. which happens to be the same behavior as flush_icache_range(). |
192 | * So, we simply flush out a line. | |
193 | */ | |
194 | void flush_cache_sigtramp(unsigned long addr) | |
195 | { | |
196 | unsigned long v, index; | |
a252710f | 197 | unsigned long flags; |
1da177e4 LT |
198 | int i; |
199 | ||
200 | v = addr & ~(L1_CACHE_BYTES-1); | |
201 | asm volatile("ocbwb %0" | |
202 | : /* no output */ | |
203 | : "m" (__m(v))); | |
204 | ||
205 | index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask); | |
206 | ||
207 | local_irq_save(flags); | |
208 | jump_to_P2(); | |
b638d0b9 | 209 | |
a252710f PM |
210 | for (i = 0; i < cpu_data->icache.ways; |
211 | i++, index += cpu_data->icache.way_incr) | |
1da177e4 | 212 | ctrl_outl(0, index); /* Clear out Valid-bit */ |
b638d0b9 | 213 | |
1da177e4 | 214 | back_to_P1(); |
fdfc74f9 | 215 | wmb(); |
1da177e4 LT |
216 | local_irq_restore(flags); |
217 | } | |
218 | ||
219 | static inline void flush_cache_4096(unsigned long start, | |
220 | unsigned long phys) | |
221 | { | |
a252710f | 222 | unsigned long flags; |
1da177e4 LT |
223 | |
224 | /* | |
b638d0b9 RC |
225 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. |
226 | * Some types of SH-4 require PC to be in P2 to operate on the D-cache. | |
1da177e4 LT |
227 | */ |
228 | if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) | |
229 | || start < CACHE_OC_ADDRESS_ARRAY) { | |
230 | local_irq_save(flags); | |
a252710f PM |
231 | __flush_cache_4096(start | SH_CACHE_ASSOC, |
232 | P1SEGADDR(phys), 0x20000000); | |
1da177e4 LT |
233 | local_irq_restore(flags); |
234 | } else { | |
a252710f PM |
235 | __flush_cache_4096(start | SH_CACHE_ASSOC, |
236 | P1SEGADDR(phys), 0); | |
1da177e4 LT |
237 | } |
238 | } | |
239 | ||
240 | /* | |
241 | * Write back & invalidate the D-cache of the page. | |
242 | * (To avoid "alias" issues) | |
243 | */ | |
244 | void flush_dcache_page(struct page *page) | |
245 | { | |
246 | if (test_bit(PG_mapped, &page->flags)) { | |
247 | unsigned long phys = PHYSADDR(page_address(page)); | |
b638d0b9 RC |
248 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; |
249 | int i, n; | |
1da177e4 LT |
250 | |
251 | /* Loop all the D-cache */ | |
b638d0b9 RC |
252 | n = cpu_data->dcache.n_aliases; |
253 | for (i = 0; i < n; i++, addr += PAGE_SIZE) | |
254 | flush_cache_4096(addr, phys); | |
1da177e4 | 255 | } |
fdfc74f9 PM |
256 | |
257 | wmb(); | |
1da177e4 LT |
258 | } |
259 | ||
260 | static inline void flush_icache_all(void) | |
261 | { | |
262 | unsigned long flags, ccr; | |
263 | ||
264 | local_irq_save(flags); | |
265 | jump_to_P2(); | |
266 | ||
267 | /* Flush I-cache */ | |
268 | ccr = ctrl_inl(CCR); | |
269 | ccr |= CCR_CACHE_ICI; | |
270 | ctrl_outl(ccr, CCR); | |
271 | ||
29847622 PM |
272 | /* |
273 | * back_to_P1() will take care of the barrier for us, don't add | |
274 | * another one! | |
275 | */ | |
276 | ||
1da177e4 LT |
277 | back_to_P1(); |
278 | local_irq_restore(flags); | |
279 | } | |
280 | ||
a252710f | 281 | void flush_dcache_all(void) |
1da177e4 | 282 | { |
b638d0b9 | 283 | (*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size); |
fdfc74f9 | 284 | wmb(); |
a252710f PM |
285 | } |
286 | ||
287 | void flush_cache_all(void) | |
288 | { | |
289 | flush_dcache_all(); | |
1da177e4 LT |
290 | flush_icache_all(); |
291 | } | |
292 | ||
293 | void flush_cache_mm(struct mm_struct *mm) | |
294 | { | |
b638d0b9 RC |
295 | /* |
296 | * Note : (RPC) since the caches are physically tagged, the only point | |
297 | * of flush_cache_mm for SH-4 is to get rid of aliases from the | |
298 | * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that | |
299 | * lines can stay resident so long as the virtual address they were | |
300 | * accessed with (hence cache set) is in accord with the physical | |
301 | * address (i.e. tag). It's no different here. So I reckon we don't | |
302 | * need to flush the I-cache, since aliases don't matter for that. We | |
303 | * should try that. | |
304 | */ | |
1da177e4 LT |
305 | flush_cache_all(); |
306 | } | |
307 | ||
308 | /* | |
309 | * Write back and invalidate I/D-caches for the page. | |
310 | * | |
311 | * ADDR: Virtual Address (U0 address) | |
312 | * PFN: Physical page number | |
313 | */ | |
314 | void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) | |
315 | { | |
316 | unsigned long phys = pfn << PAGE_SHIFT; | |
b638d0b9 RC |
317 | unsigned int alias_mask; |
318 | ||
319 | alias_mask = cpu_data->dcache.alias_mask; | |
1da177e4 LT |
320 | |
321 | /* We only need to flush D-cache when we have alias */ | |
b638d0b9 | 322 | if ((address^phys) & alias_mask) { |
1da177e4 LT |
323 | /* Loop 4K of the D-cache */ |
324 | flush_cache_4096( | |
b638d0b9 | 325 | CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), |
1da177e4 LT |
326 | phys); |
327 | /* Loop another 4K of the D-cache */ | |
328 | flush_cache_4096( | |
b638d0b9 | 329 | CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), |
1da177e4 LT |
330 | phys); |
331 | } | |
332 | ||
b638d0b9 RC |
333 | alias_mask = cpu_data->icache.alias_mask; |
334 | if (vma->vm_flags & VM_EXEC) { | |
335 | /* | |
336 | * Evict entries from the portion of the cache from which code | |
337 | * may have been executed at this address (virtual). There's | |
338 | * no need to evict from the portion corresponding to the | |
339 | * physical address as for the D-cache, because we know the | |
340 | * kernel has never executed the code through its identity | |
341 | * translation. | |
342 | */ | |
1da177e4 | 343 | flush_cache_4096( |
b638d0b9 | 344 | CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), |
1da177e4 | 345 | phys); |
b638d0b9 | 346 | } |
1da177e4 LT |
347 | } |
348 | ||
349 | /* | |
350 | * Write back and invalidate D-caches. | |
351 | * | |
352 | * START, END: Virtual Address (U0 address) | |
353 | * | |
354 | * NOTE: We need to flush the _physical_ page entry. | |
355 | * Flushing the cache lines for U0 only isn't enough. | |
356 | * We need to flush for P1 too, which may contain aliases. | |
357 | */ | |
358 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |
359 | unsigned long end) | |
360 | { | |
b638d0b9 RC |
361 | unsigned long d = 0, p = start & PAGE_MASK; |
362 | unsigned long alias_mask = cpu_data->dcache.alias_mask; | |
363 | unsigned long n_aliases = cpu_data->dcache.n_aliases; | |
364 | unsigned long select_bit; | |
365 | unsigned long all_aliases_mask; | |
366 | unsigned long addr_offset; | |
367 | unsigned long phys; | |
1da177e4 LT |
368 | pgd_t *dir; |
369 | pmd_t *pmd; | |
a252710f | 370 | pud_t *pud; |
1da177e4 LT |
371 | pte_t *pte; |
372 | pte_t entry; | |
b638d0b9 RC |
373 | int i; |
374 | ||
375 | /* | |
376 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | |
377 | * the cache is physically tagged, the data can just be left in there. | |
378 | */ | |
379 | if (n_aliases == 0) | |
380 | return; | |
381 | ||
382 | all_aliases_mask = (1 << n_aliases) - 1; | |
1da177e4 | 383 | |
a252710f PM |
384 | /* |
385 | * Don't bother with the lookup and alias check if we have a | |
386 | * wide range to cover, just blow away the dcache in its | |
387 | * entirety instead. -- PFM. | |
388 | */ | |
389 | if (((end - start) >> PAGE_SHIFT) >= 64) { | |
390 | flush_dcache_all(); | |
391 | ||
392 | if (vma->vm_flags & VM_EXEC) | |
393 | flush_icache_all(); | |
394 | ||
395 | return; | |
396 | } | |
397 | ||
1da177e4 | 398 | dir = pgd_offset(vma->vm_mm, p); |
a252710f PM |
399 | pud = pud_offset(dir, p); |
400 | pmd = pmd_offset(pud, p); | |
401 | end = PAGE_ALIGN(end); | |
1da177e4 LT |
402 | |
403 | do { | |
404 | if (pmd_none(*pmd) || pmd_bad(*pmd)) { | |
b638d0b9 | 405 | p &= ~((1 << PMD_SHIFT) - 1); |
1da177e4 LT |
406 | p += (1 << PMD_SHIFT); |
407 | pmd++; | |
b638d0b9 | 408 | |
1da177e4 LT |
409 | continue; |
410 | } | |
b638d0b9 | 411 | |
1da177e4 | 412 | pte = pte_offset_kernel(pmd, p); |
b638d0b9 | 413 | |
1da177e4 LT |
414 | do { |
415 | entry = *pte; | |
b638d0b9 | 416 | |
1da177e4 | 417 | if ((pte_val(entry) & _PAGE_PRESENT)) { |
b638d0b9 RC |
418 | phys = pte_val(entry) & PTE_PHYS_MASK; |
419 | ||
420 | if ((p ^ phys) & alias_mask) { | |
421 | d |= 1 << ((p & alias_mask) >> PAGE_SHIFT); | |
422 | d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT); | |
423 | ||
424 | if (d == all_aliases_mask) | |
1da177e4 LT |
425 | goto loop_exit; |
426 | } | |
427 | } | |
b638d0b9 | 428 | |
1da177e4 LT |
429 | pte++; |
430 | p += PAGE_SIZE; | |
431 | } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); | |
432 | pmd++; | |
433 | } while (p < end); | |
b638d0b9 RC |
434 | |
435 | loop_exit: | |
436 | for (i = 0, select_bit = 0x1, addr_offset = 0x0; i < n_aliases; | |
437 | i++, select_bit <<= 1, addr_offset += PAGE_SIZE) | |
438 | if (d & select_bit) { | |
439 | (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE); | |
440 | wmb(); | |
441 | } | |
442 | ||
443 | if (vma->vm_flags & VM_EXEC) { | |
444 | /* | |
445 | * TODO: Is this required??? Need to look at how I-cache | |
446 | * coherency is assured when new programs are loaded to see if | |
447 | * this matters. | |
448 | */ | |
1da177e4 | 449 | flush_icache_all(); |
b638d0b9 | 450 | } |
1da177e4 LT |
451 | } |
452 | ||
453 | /* | |
454 | * flush_icache_user_range | |
455 | * @vma: VMA of the process | |
456 | * @page: page | |
457 | * @addr: U0 address | |
458 | * @len: length of the range (< page size) | |
459 | */ | |
460 | void flush_icache_user_range(struct vm_area_struct *vma, | |
461 | struct page *page, unsigned long addr, int len) | |
462 | { | |
463 | flush_cache_page(vma, addr, page_to_pfn(page)); | |
fdfc74f9 | 464 | mb(); |
1da177e4 LT |
465 | } |
466 | ||
b638d0b9 RC |
467 | /** |
468 | * __flush_cache_4096 | |
469 | * | |
470 | * @addr: address in memory mapped cache array | |
471 | * @phys: P1 address to flush (has to match tags if addr has 'A' bit | |
472 | * set i.e. associative write) | |
473 | * @exec_offset: set to 0x20000000 if flush has to be executed from P2 | |
474 | * region else 0x0 | |
475 | * | |
476 | * The offset into the cache array implied by 'addr' selects the | |
477 | * 'colour' of the virtual address range that will be flushed. The | |
478 | * operation (purge/write-back) is selected by the lower 2 bits of | |
479 | * 'phys'. | |
480 | */ | |
481 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, | |
482 | unsigned long exec_offset) | |
483 | { | |
484 | int way_count; | |
485 | unsigned long base_addr = addr; | |
486 | struct cache_info *dcache; | |
487 | unsigned long way_incr; | |
488 | unsigned long a, ea, p; | |
489 | unsigned long temp_pc; | |
490 | ||
491 | dcache = &cpu_data->dcache; | |
492 | /* Write this way for better assembly. */ | |
493 | way_count = dcache->ways; | |
494 | way_incr = dcache->way_incr; | |
495 | ||
496 | /* | |
497 | * Apply exec_offset (i.e. branch to P2 if required.). | |
498 | * | |
499 | * FIXME: | |
500 | * | |
501 | * If I write "=r" for the (temp_pc), it puts this in r6 hence | |
502 | * trashing exec_offset before it's been added on - why? Hence | |
503 | * "=&r" as a 'workaround' | |
504 | */ | |
505 | asm volatile("mov.l 1f, %0\n\t" | |
506 | "add %1, %0\n\t" | |
507 | "jmp @%0\n\t" | |
508 | "nop\n\t" | |
509 | ".balign 4\n\t" | |
510 | "1: .long 2f\n\t" | |
511 | "2:\n" : "=&r" (temp_pc) : "r" (exec_offset)); | |
512 | ||
513 | /* | |
514 | * We know there will be >=1 iteration, so write as do-while to avoid | |
515 | * pointless nead-of-loop check for 0 iterations. | |
516 | */ | |
517 | do { | |
518 | ea = base_addr + PAGE_SIZE; | |
519 | a = base_addr; | |
520 | p = phys; | |
521 | ||
522 | do { | |
523 | *(volatile unsigned long *)a = p; | |
524 | /* | |
525 | * Next line: intentionally not p+32, saves an add, p | |
526 | * will do since only the cache tag bits need to | |
527 | * match. | |
528 | */ | |
529 | *(volatile unsigned long *)(a+32) = p; | |
530 | a += 64; | |
531 | p += 64; | |
532 | } while (a < ea); | |
533 | ||
534 | base_addr += way_incr; | |
535 | } while (--way_count != 0); | |
536 | } | |
537 | ||
538 | /* | |
539 | * Break the 1, 2 and 4 way variants of this out into separate functions to | |
540 | * avoid nearly all the overhead of having the conditional stuff in the function | |
541 | * bodies (+ the 1 and 2 way cases avoid saving any registers too). | |
542 | */ | |
543 | static void __flush_dcache_segment_1way(unsigned long start, | |
544 | unsigned long extent_per_way) | |
545 | { | |
546 | unsigned long orig_sr, sr_with_bl; | |
547 | unsigned long base_addr; | |
548 | unsigned long way_incr, linesz, way_size; | |
549 | struct cache_info *dcache; | |
550 | register unsigned long a0, a0e; | |
551 | ||
552 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | |
553 | sr_with_bl = orig_sr | (1<<28); | |
554 | base_addr = ((unsigned long)&empty_zero_page[0]); | |
555 | ||
556 | /* | |
557 | * The previous code aligned base_addr to 16k, i.e. the way_size of all | |
558 | * existing SH-4 D-caches. Whilst I don't see a need to have this | |
559 | * aligned to any better than the cache line size (which it will be | |
560 | * anyway by construction), let's align it to at least the way_size of | |
561 | * any existing or conceivable SH-4 D-cache. -- RPC | |
562 | */ | |
563 | base_addr = ((base_addr >> 16) << 16); | |
564 | base_addr |= start; | |
565 | ||
566 | dcache = &cpu_data->dcache; | |
567 | linesz = dcache->linesz; | |
568 | way_incr = dcache->way_incr; | |
569 | way_size = dcache->way_size; | |
570 | ||
571 | a0 = base_addr; | |
572 | a0e = base_addr + extent_per_way; | |
573 | do { | |
574 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | |
575 | asm volatile("movca.l r0, @%0\n\t" | |
576 | "ocbi @%0" : : "r" (a0)); | |
577 | a0 += linesz; | |
578 | asm volatile("movca.l r0, @%0\n\t" | |
579 | "ocbi @%0" : : "r" (a0)); | |
580 | a0 += linesz; | |
581 | asm volatile("movca.l r0, @%0\n\t" | |
582 | "ocbi @%0" : : "r" (a0)); | |
583 | a0 += linesz; | |
584 | asm volatile("movca.l r0, @%0\n\t" | |
585 | "ocbi @%0" : : "r" (a0)); | |
586 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | |
587 | a0 += linesz; | |
588 | } while (a0 < a0e); | |
589 | } | |
590 | ||
591 | static void __flush_dcache_segment_2way(unsigned long start, | |
592 | unsigned long extent_per_way) | |
593 | { | |
594 | unsigned long orig_sr, sr_with_bl; | |
595 | unsigned long base_addr; | |
596 | unsigned long way_incr, linesz, way_size; | |
597 | struct cache_info *dcache; | |
598 | register unsigned long a0, a1, a0e; | |
599 | ||
600 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | |
601 | sr_with_bl = orig_sr | (1<<28); | |
602 | base_addr = ((unsigned long)&empty_zero_page[0]); | |
603 | ||
604 | /* See comment under 1-way above */ | |
605 | base_addr = ((base_addr >> 16) << 16); | |
606 | base_addr |= start; | |
607 | ||
608 | dcache = &cpu_data->dcache; | |
609 | linesz = dcache->linesz; | |
610 | way_incr = dcache->way_incr; | |
611 | way_size = dcache->way_size; | |
612 | ||
613 | a0 = base_addr; | |
614 | a1 = a0 + way_incr; | |
615 | a0e = base_addr + extent_per_way; | |
616 | do { | |
617 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | |
618 | asm volatile("movca.l r0, @%0\n\t" | |
619 | "movca.l r0, @%1\n\t" | |
620 | "ocbi @%0\n\t" | |
621 | "ocbi @%1" : : | |
622 | "r" (a0), "r" (a1)); | |
623 | a0 += linesz; | |
624 | a1 += linesz; | |
625 | asm volatile("movca.l r0, @%0\n\t" | |
626 | "movca.l r0, @%1\n\t" | |
627 | "ocbi @%0\n\t" | |
628 | "ocbi @%1" : : | |
629 | "r" (a0), "r" (a1)); | |
630 | a0 += linesz; | |
631 | a1 += linesz; | |
632 | asm volatile("movca.l r0, @%0\n\t" | |
633 | "movca.l r0, @%1\n\t" | |
634 | "ocbi @%0\n\t" | |
635 | "ocbi @%1" : : | |
636 | "r" (a0), "r" (a1)); | |
637 | a0 += linesz; | |
638 | a1 += linesz; | |
639 | asm volatile("movca.l r0, @%0\n\t" | |
640 | "movca.l r0, @%1\n\t" | |
641 | "ocbi @%0\n\t" | |
642 | "ocbi @%1" : : | |
643 | "r" (a0), "r" (a1)); | |
644 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | |
645 | a0 += linesz; | |
646 | a1 += linesz; | |
647 | } while (a0 < a0e); | |
648 | } | |
649 | ||
650 | static void __flush_dcache_segment_4way(unsigned long start, | |
651 | unsigned long extent_per_way) | |
652 | { | |
653 | unsigned long orig_sr, sr_with_bl; | |
654 | unsigned long base_addr; | |
655 | unsigned long way_incr, linesz, way_size; | |
656 | struct cache_info *dcache; | |
657 | register unsigned long a0, a1, a2, a3, a0e; | |
658 | ||
659 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | |
660 | sr_with_bl = orig_sr | (1<<28); | |
661 | base_addr = ((unsigned long)&empty_zero_page[0]); | |
662 | ||
663 | /* See comment under 1-way above */ | |
664 | base_addr = ((base_addr >> 16) << 16); | |
665 | base_addr |= start; | |
666 | ||
667 | dcache = &cpu_data->dcache; | |
668 | linesz = dcache->linesz; | |
669 | way_incr = dcache->way_incr; | |
670 | way_size = dcache->way_size; | |
671 | ||
672 | a0 = base_addr; | |
673 | a1 = a0 + way_incr; | |
674 | a2 = a1 + way_incr; | |
675 | a3 = a2 + way_incr; | |
676 | a0e = base_addr + extent_per_way; | |
677 | do { | |
678 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | |
679 | asm volatile("movca.l r0, @%0\n\t" | |
680 | "movca.l r0, @%1\n\t" | |
681 | "movca.l r0, @%2\n\t" | |
682 | "movca.l r0, @%3\n\t" | |
683 | "ocbi @%0\n\t" | |
684 | "ocbi @%1\n\t" | |
685 | "ocbi @%2\n\t" | |
686 | "ocbi @%3\n\t" : : | |
687 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | |
688 | a0 += linesz; | |
689 | a1 += linesz; | |
690 | a2 += linesz; | |
691 | a3 += linesz; | |
692 | asm volatile("movca.l r0, @%0\n\t" | |
693 | "movca.l r0, @%1\n\t" | |
694 | "movca.l r0, @%2\n\t" | |
695 | "movca.l r0, @%3\n\t" | |
696 | "ocbi @%0\n\t" | |
697 | "ocbi @%1\n\t" | |
698 | "ocbi @%2\n\t" | |
699 | "ocbi @%3\n\t" : : | |
700 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | |
701 | a0 += linesz; | |
702 | a1 += linesz; | |
703 | a2 += linesz; | |
704 | a3 += linesz; | |
705 | asm volatile("movca.l r0, @%0\n\t" | |
706 | "movca.l r0, @%1\n\t" | |
707 | "movca.l r0, @%2\n\t" | |
708 | "movca.l r0, @%3\n\t" | |
709 | "ocbi @%0\n\t" | |
710 | "ocbi @%1\n\t" | |
711 | "ocbi @%2\n\t" | |
712 | "ocbi @%3\n\t" : : | |
713 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | |
714 | a0 += linesz; | |
715 | a1 += linesz; | |
716 | a2 += linesz; | |
717 | a3 += linesz; | |
718 | asm volatile("movca.l r0, @%0\n\t" | |
719 | "movca.l r0, @%1\n\t" | |
720 | "movca.l r0, @%2\n\t" | |
721 | "movca.l r0, @%3\n\t" | |
722 | "ocbi @%0\n\t" | |
723 | "ocbi @%1\n\t" | |
724 | "ocbi @%2\n\t" | |
725 | "ocbi @%3\n\t" : : | |
726 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | |
727 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | |
728 | a0 += linesz; | |
729 | a1 += linesz; | |
730 | a2 += linesz; | |
731 | a3 += linesz; | |
732 | } while (a0 < a0e); | |
733 | } | |
734 |