sh: Kill off kgdb's magical NMI debouncing.
[deliverable/linux.git] / arch / sh / mm / cache-sh4.c
CommitLineData
1da177e4
LT
1/*
2 * arch/sh/mm/cache-sh4.c
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
d10040f7 5 * Copyright (C) 2001 - 2007 Paul Mundt
1da177e4 6 * Copyright (C) 2003 Richard Curnow
09b5a10c 7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
1da177e4
LT
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
1da177e4 13#include <linux/init.h>
1da177e4 14#include <linux/mm.h>
52e27782
PM
15#include <linux/io.h>
16#include <linux/mutex.h>
2277ab4a 17#include <linux/fs.h>
1da177e4
LT
18#include <asm/mmu_context.h>
19#include <asm/cacheflush.h>
20
28ccf7f9
PM
21/*
22 * The maximum number of pages we support up to when doing ranged dcache
23 * flushing. Anything exceeding this will simply flush the dcache in its
24 * entirety.
25 */
26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
09b5a10c 27#define MAX_ICACHE_PAGES 32
28ccf7f9 28
b638d0b9 29static void __flush_cache_4096(unsigned long addr, unsigned long phys,
a252710f 30 unsigned long exec_offset);
b638d0b9
RC
31
32/*
33 * This is initialised here to ensure that it is not placed in the BSS. If
34 * that were to happen, note that cache_init gets called before the BSS is
35 * cleared, so this would get nulled out which would be hopeless.
36 */
37static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
38 (void (*)(unsigned long, unsigned long))0xdeadbeef;
39
1da177e4
LT
40/*
41 * Write back the range of D-cache, and purge the I-cache.
42 *
09b5a10c
CS
43 * Called from kernel/module.c:sys_init_module and routine for a.out format,
44 * signal handler code and kprobes code
1da177e4 45 */
f26b2a56 46static void sh4_flush_icache_range(void *args)
1da177e4 47{
f26b2a56 48 struct flusher_data *data = args;
09b5a10c 49 int icacheaddr;
f26b2a56 50 unsigned long start, end;
64a6d722 51 unsigned long v;
1da177e4
LT
52 int i;
53
f26b2a56
PM
54 start = data->addr1;
55 end = data->addr2;
56
64a6d722
PM
57 /* If there are too many pages then just blow the caches */
58 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
59 local_flush_cache_all(args);
60 } else {
61 /* selectively flush d-cache then invalidate the i-cache */
62 /* this is inefficient, so only use for small ranges */
63 start &= ~(L1_CACHE_BYTES-1);
64 end += L1_CACHE_BYTES-1;
65 end &= ~(L1_CACHE_BYTES-1);
66
67 jump_to_uncached();
68
69 for (v = start; v < end; v+=L1_CACHE_BYTES) {
70 __ocbwb(v);
71
72 icacheaddr = CACHE_IC_ADDRESS_ARRAY |
73 (v & cpu_data->icache.entry_mask);
74
75 for (i = 0; i < cpu_data->icache.ways;
76 i++, icacheaddr += cpu_data->icache.way_incr)
77 /* Clear i-cache line valid-bit */
78 ctrl_outl(0, icacheaddr);
79 }
09b5a10c
CS
80
81 back_to_cached();
09b5a10c 82 }
1da177e4
LT
83}
84
85static inline void flush_cache_4096(unsigned long start,
86 unsigned long phys)
87{
64a6d722 88 unsigned long exec_offset = 0;
33573c0e 89
1da177e4 90 /*
b638d0b9
RC
91 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
92 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
1da177e4 93 */
7ec9d6f8 94 if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
33573c0e 95 (start < CACHE_OC_ADDRESS_ARRAY))
510c72ad 96 exec_offset = 0x20000000;
33573c0e 97
33573c0e
PM
98 __flush_cache_4096(start | SH_CACHE_ASSOC,
99 P1SEGADDR(phys), exec_offset);
1da177e4
LT
100}
101
102/*
103 * Write back & invalidate the D-cache of the page.
104 * (To avoid "alias" issues)
105 */
e76a0136 106static void sh4_flush_dcache_page(void *arg)
1da177e4 107{
e76a0136 108 struct page *page = arg;
c139a595 109#ifndef CONFIG_SMP
2277ab4a
PM
110 struct address_space *mapping = page_mapping(page);
111
2277ab4a
PM
112 if (mapping && !mapping_mapped(mapping))
113 set_bit(PG_dcache_dirty, &page->flags);
114 else
115#endif
116 {
1da177e4 117 unsigned long phys = PHYSADDR(page_address(page));
b638d0b9
RC
118 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
119 int i, n;
1da177e4
LT
120
121 /* Loop all the D-cache */
7ec9d6f8 122 n = boot_cpu_data.dcache.n_aliases;
510c72ad 123 for (i = 0; i < n; i++, addr += 4096)
b638d0b9 124 flush_cache_4096(addr, phys);
1da177e4 125 }
fdfc74f9
PM
126
127 wmb();
1da177e4
LT
128}
129
28ccf7f9 130/* TODO: Selective icache invalidation through IC address array.. */
205a3b43 131static void __uses_jump_to_uncached flush_icache_all(void)
1da177e4 132{
64a6d722 133 unsigned long ccr;
1da177e4 134
cbaa118e 135 jump_to_uncached();
1da177e4
LT
136
137 /* Flush I-cache */
138 ccr = ctrl_inl(CCR);
139 ccr |= CCR_CACHE_ICI;
140 ctrl_outl(ccr, CCR);
141
29847622 142 /*
cbaa118e 143 * back_to_cached() will take care of the barrier for us, don't add
29847622
PM
144 * another one!
145 */
cbaa118e 146 back_to_cached();
1da177e4
LT
147}
148
0b445dca 149static inline void flush_dcache_all(void)
1da177e4 150{
7ec9d6f8 151 (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
fdfc74f9 152 wmb();
a252710f
PM
153}
154
f26b2a56 155static void sh4_flush_cache_all(void *unused)
a252710f
PM
156{
157 flush_dcache_all();
1da177e4
LT
158 flush_icache_all();
159}
160
28ccf7f9
PM
161static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
162 unsigned long end)
163{
164 unsigned long d = 0, p = start & PAGE_MASK;
7ec9d6f8
PM
165 unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
166 unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
28ccf7f9
PM
167 unsigned long select_bit;
168 unsigned long all_aliases_mask;
169 unsigned long addr_offset;
170 pgd_t *dir;
171 pmd_t *pmd;
172 pud_t *pud;
173 pte_t *pte;
174 int i;
175
176 dir = pgd_offset(mm, p);
177 pud = pud_offset(dir, p);
178 pmd = pmd_offset(pud, p);
179 end = PAGE_ALIGN(end);
180
181 all_aliases_mask = (1 << n_aliases) - 1;
182
183 do {
184 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
185 p &= PMD_MASK;
186 p += PMD_SIZE;
187 pmd++;
188
189 continue;
190 }
191
192 pte = pte_offset_kernel(pmd, p);
193
194 do {
195 unsigned long phys;
196 pte_t entry = *pte;
197
198 if (!(pte_val(entry) & _PAGE_PRESENT)) {
199 pte++;
200 p += PAGE_SIZE;
201 continue;
202 }
203
204 phys = pte_val(entry) & PTE_PHYS_MASK;
205
206 if ((p ^ phys) & alias_mask) {
207 d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
208 d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
209
210 if (d == all_aliases_mask)
211 goto loop_exit;
212 }
213
214 pte++;
215 p += PAGE_SIZE;
216 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
217 pmd++;
218 } while (p < end);
219
220loop_exit:
221 addr_offset = 0;
222 select_bit = 1;
223
224 for (i = 0; i < n_aliases; i++) {
225 if (d & select_bit) {
226 (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
227 wmb();
228 }
229
230 select_bit <<= 1;
231 addr_offset += PAGE_SIZE;
232 }
233}
234
235/*
236 * Note : (RPC) since the caches are physically tagged, the only point
237 * of flush_cache_mm for SH-4 is to get rid of aliases from the
238 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
239 * lines can stay resident so long as the virtual address they were
240 * accessed with (hence cache set) is in accord with the physical
241 * address (i.e. tag). It's no different here. So I reckon we don't
242 * need to flush the I-cache, since aliases don't matter for that. We
243 * should try that.
244 *
245 * Caller takes mm->mmap_sem.
246 */
f26b2a56 247static void sh4_flush_cache_mm(void *arg)
1da177e4 248{
f26b2a56
PM
249 struct mm_struct *mm = arg;
250
e7b8b7f1
PM
251 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
252 return;
253
b638d0b9 254 /*
28ccf7f9
PM
255 * If cache is only 4k-per-way, there are never any 'aliases'. Since
256 * the cache is physically tagged, the data can just be left in there.
257 */
7ec9d6f8 258 if (boot_cpu_data.dcache.n_aliases == 0)
28ccf7f9
PM
259 return;
260
261 /*
262 * Don't bother groveling around the dcache for the VMA ranges
263 * if there are too many PTEs to make it worthwhile.
b638d0b9 264 */
28ccf7f9
PM
265 if (mm->nr_ptes >= MAX_DCACHE_PAGES)
266 flush_dcache_all();
267 else {
268 struct vm_area_struct *vma;
269
270 /*
271 * In this case there are reasonably sized ranges to flush,
272 * iterate through the VMA list and take care of any aliases.
273 */
274 for (vma = mm->mmap; vma; vma = vma->vm_next)
275 __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
276 }
277
278 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
279 if (mm->exec_vm)
280 flush_icache_all();
1da177e4
LT
281}
282
283/*
284 * Write back and invalidate I/D-caches for the page.
285 *
286 * ADDR: Virtual Address (U0 address)
287 * PFN: Physical page number
288 */
f26b2a56 289static void sh4_flush_cache_page(void *args)
1da177e4 290{
f26b2a56
PM
291 struct flusher_data *data = args;
292 struct vm_area_struct *vma;
293 unsigned long address, pfn, phys;
b638d0b9
RC
294 unsigned int alias_mask;
295
f26b2a56
PM
296 vma = data->vma;
297 address = data->addr1;
298 pfn = data->addr2;
299 phys = pfn << PAGE_SHIFT;
300
e7b8b7f1
PM
301 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
302 return;
303
7ec9d6f8 304 alias_mask = boot_cpu_data.dcache.alias_mask;
1da177e4
LT
305
306 /* We only need to flush D-cache when we have alias */
b638d0b9 307 if ((address^phys) & alias_mask) {
1da177e4
LT
308 /* Loop 4K of the D-cache */
309 flush_cache_4096(
b638d0b9 310 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
1da177e4
LT
311 phys);
312 /* Loop another 4K of the D-cache */
313 flush_cache_4096(
b638d0b9 314 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
1da177e4
LT
315 phys);
316 }
317
7ec9d6f8 318 alias_mask = boot_cpu_data.icache.alias_mask;
b638d0b9
RC
319 if (vma->vm_flags & VM_EXEC) {
320 /*
321 * Evict entries from the portion of the cache from which code
322 * may have been executed at this address (virtual). There's
323 * no need to evict from the portion corresponding to the
324 * physical address as for the D-cache, because we know the
325 * kernel has never executed the code through its identity
326 * translation.
327 */
1da177e4 328 flush_cache_4096(
b638d0b9 329 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
1da177e4 330 phys);
b638d0b9 331 }
1da177e4
LT
332}
333
334/*
335 * Write back and invalidate D-caches.
336 *
337 * START, END: Virtual Address (U0 address)
338 *
339 * NOTE: We need to flush the _physical_ page entry.
340 * Flushing the cache lines for U0 only isn't enough.
341 * We need to flush for P1 too, which may contain aliases.
342 */
f26b2a56 343static void sh4_flush_cache_range(void *args)
1da177e4 344{
f26b2a56
PM
345 struct flusher_data *data = args;
346 struct vm_area_struct *vma;
347 unsigned long start, end;
348
349 vma = data->vma;
350 start = data->addr1;
351 end = data->addr2;
352
e7b8b7f1
PM
353 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
354 return;
355
b638d0b9
RC
356 /*
357 * If cache is only 4k-per-way, there are never any 'aliases'. Since
358 * the cache is physically tagged, the data can just be left in there.
359 */
7ec9d6f8 360 if (boot_cpu_data.dcache.n_aliases == 0)
b638d0b9
RC
361 return;
362
a252710f
PM
363 /*
364 * Don't bother with the lookup and alias check if we have a
365 * wide range to cover, just blow away the dcache in its
366 * entirety instead. -- PFM.
367 */
28ccf7f9 368 if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
a252710f 369 flush_dcache_all();
28ccf7f9
PM
370 else
371 __flush_cache_mm(vma->vm_mm, start, end);
b638d0b9
RC
372
373 if (vma->vm_flags & VM_EXEC) {
374 /*
375 * TODO: Is this required??? Need to look at how I-cache
376 * coherency is assured when new programs are loaded to see if
377 * this matters.
378 */
1da177e4 379 flush_icache_all();
b638d0b9 380 }
1da177e4
LT
381}
382
b638d0b9
RC
383/**
384 * __flush_cache_4096
385 *
386 * @addr: address in memory mapped cache array
387 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
388 * set i.e. associative write)
389 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
390 * region else 0x0
391 *
392 * The offset into the cache array implied by 'addr' selects the
393 * 'colour' of the virtual address range that will be flushed. The
394 * operation (purge/write-back) is selected by the lower 2 bits of
395 * 'phys'.
396 */
397static void __flush_cache_4096(unsigned long addr, unsigned long phys,
398 unsigned long exec_offset)
399{
400 int way_count;
401 unsigned long base_addr = addr;
402 struct cache_info *dcache;
403 unsigned long way_incr;
404 unsigned long a, ea, p;
405 unsigned long temp_pc;
406
7ec9d6f8 407 dcache = &boot_cpu_data.dcache;
b638d0b9
RC
408 /* Write this way for better assembly. */
409 way_count = dcache->ways;
410 way_incr = dcache->way_incr;
411
412 /*
413 * Apply exec_offset (i.e. branch to P2 if required.).
414 *
415 * FIXME:
416 *
417 * If I write "=r" for the (temp_pc), it puts this in r6 hence
418 * trashing exec_offset before it's been added on - why? Hence
419 * "=&r" as a 'workaround'
420 */
421 asm volatile("mov.l 1f, %0\n\t"
422 "add %1, %0\n\t"
423 "jmp @%0\n\t"
424 "nop\n\t"
425 ".balign 4\n\t"
426 "1: .long 2f\n\t"
427 "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
428
429 /*
430 * We know there will be >=1 iteration, so write as do-while to avoid
431 * pointless nead-of-loop check for 0 iterations.
432 */
433 do {
434 ea = base_addr + PAGE_SIZE;
435 a = base_addr;
436 p = phys;
437
438 do {
439 *(volatile unsigned long *)a = p;
440 /*
441 * Next line: intentionally not p+32, saves an add, p
442 * will do since only the cache tag bits need to
443 * match.
444 */
445 *(volatile unsigned long *)(a+32) = p;
446 a += 64;
447 p += 64;
448 } while (a < ea);
449
450 base_addr += way_incr;
451 } while (--way_count != 0);
452}
453
454/*
455 * Break the 1, 2 and 4 way variants of this out into separate functions to
456 * avoid nearly all the overhead of having the conditional stuff in the function
457 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
a5cf9e24
SM
458 *
459 * We want to eliminate unnecessary bus transactions, so this code uses
460 * a non-obvious technique.
461 *
462 * Loop over a cache way sized block of, one cache line at a time. For each
463 * line, use movca.a to cause the current cache line contents to be written
464 * back, but without reading anything from main memory. However this has the
465 * side effect that the cache is now caching that memory location. So follow
466 * this with a cache invalidate to mark the cache line invalid. And do all
467 * this with interrupts disabled, to avoid the cache line being accidently
468 * evicted while it is holding garbage.
ffad9d7a
SM
469 *
470 * This also breaks in a number of circumstances:
471 * - if there are modifications to the region of memory just above
472 * empty_zero_page (for example because a breakpoint has been placed
473 * there), then these can be lost.
474 *
475 * This is because the the memory address which the cache temporarily
476 * caches in the above description is empty_zero_page. So the
477 * movca.l hits the cache (it is assumed that it misses, or at least
478 * isn't dirty), modifies the line and then invalidates it, losing the
479 * required change.
480 *
481 * - If caches are disabled or configured in write-through mode, then
482 * the movca.l writes garbage directly into memory.
b638d0b9 483 */
ce3f7cb9
MF
484static void __flush_dcache_segment_writethrough(unsigned long start,
485 unsigned long extent_per_way)
486{
487 unsigned long addr;
488 int i;
489
490 addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
491
492 while (extent_per_way) {
493 for (i = 0; i < cpu_data->dcache.ways; i++)
494 __raw_writel(0, addr + cpu_data->dcache.way_incr * i);
495
496 addr += cpu_data->dcache.linesz;
497 extent_per_way -= cpu_data->dcache.linesz;
498 }
499}
500
b638d0b9
RC
501static void __flush_dcache_segment_1way(unsigned long start,
502 unsigned long extent_per_way)
503{
504 unsigned long orig_sr, sr_with_bl;
505 unsigned long base_addr;
506 unsigned long way_incr, linesz, way_size;
507 struct cache_info *dcache;
508 register unsigned long a0, a0e;
509
510 asm volatile("stc sr, %0" : "=r" (orig_sr));
511 sr_with_bl = orig_sr | (1<<28);
512 base_addr = ((unsigned long)&empty_zero_page[0]);
513
514 /*
515 * The previous code aligned base_addr to 16k, i.e. the way_size of all
516 * existing SH-4 D-caches. Whilst I don't see a need to have this
517 * aligned to any better than the cache line size (which it will be
518 * anyway by construction), let's align it to at least the way_size of
519 * any existing or conceivable SH-4 D-cache. -- RPC
520 */
521 base_addr = ((base_addr >> 16) << 16);
522 base_addr |= start;
523
7ec9d6f8 524 dcache = &boot_cpu_data.dcache;
b638d0b9
RC
525 linesz = dcache->linesz;
526 way_incr = dcache->way_incr;
527 way_size = dcache->way_size;
528
529 a0 = base_addr;
530 a0e = base_addr + extent_per_way;
531 do {
532 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
533 asm volatile("movca.l r0, @%0\n\t"
534 "ocbi @%0" : : "r" (a0));
535 a0 += linesz;
536 asm volatile("movca.l r0, @%0\n\t"
537 "ocbi @%0" : : "r" (a0));
538 a0 += linesz;
539 asm volatile("movca.l r0, @%0\n\t"
540 "ocbi @%0" : : "r" (a0));
541 a0 += linesz;
542 asm volatile("movca.l r0, @%0\n\t"
543 "ocbi @%0" : : "r" (a0));
544 asm volatile("ldc %0, sr" : : "r" (orig_sr));
545 a0 += linesz;
546 } while (a0 < a0e);
547}
548
549static void __flush_dcache_segment_2way(unsigned long start,
550 unsigned long extent_per_way)
551{
552 unsigned long orig_sr, sr_with_bl;
553 unsigned long base_addr;
554 unsigned long way_incr, linesz, way_size;
555 struct cache_info *dcache;
556 register unsigned long a0, a1, a0e;
557
558 asm volatile("stc sr, %0" : "=r" (orig_sr));
559 sr_with_bl = orig_sr | (1<<28);
560 base_addr = ((unsigned long)&empty_zero_page[0]);
561
562 /* See comment under 1-way above */
563 base_addr = ((base_addr >> 16) << 16);
564 base_addr |= start;
565
7ec9d6f8 566 dcache = &boot_cpu_data.dcache;
b638d0b9
RC
567 linesz = dcache->linesz;
568 way_incr = dcache->way_incr;
569 way_size = dcache->way_size;
570
571 a0 = base_addr;
572 a1 = a0 + way_incr;
573 a0e = base_addr + extent_per_way;
574 do {
575 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
576 asm volatile("movca.l r0, @%0\n\t"
577 "movca.l r0, @%1\n\t"
578 "ocbi @%0\n\t"
579 "ocbi @%1" : :
580 "r" (a0), "r" (a1));
581 a0 += linesz;
582 a1 += linesz;
583 asm volatile("movca.l r0, @%0\n\t"
584 "movca.l r0, @%1\n\t"
585 "ocbi @%0\n\t"
586 "ocbi @%1" : :
587 "r" (a0), "r" (a1));
588 a0 += linesz;
589 a1 += linesz;
590 asm volatile("movca.l r0, @%0\n\t"
591 "movca.l r0, @%1\n\t"
592 "ocbi @%0\n\t"
593 "ocbi @%1" : :
594 "r" (a0), "r" (a1));
595 a0 += linesz;
596 a1 += linesz;
597 asm volatile("movca.l r0, @%0\n\t"
598 "movca.l r0, @%1\n\t"
599 "ocbi @%0\n\t"
600 "ocbi @%1" : :
601 "r" (a0), "r" (a1));
602 asm volatile("ldc %0, sr" : : "r" (orig_sr));
603 a0 += linesz;
604 a1 += linesz;
605 } while (a0 < a0e);
606}
607
608static void __flush_dcache_segment_4way(unsigned long start,
609 unsigned long extent_per_way)
610{
611 unsigned long orig_sr, sr_with_bl;
612 unsigned long base_addr;
613 unsigned long way_incr, linesz, way_size;
614 struct cache_info *dcache;
615 register unsigned long a0, a1, a2, a3, a0e;
616
617 asm volatile("stc sr, %0" : "=r" (orig_sr));
618 sr_with_bl = orig_sr | (1<<28);
619 base_addr = ((unsigned long)&empty_zero_page[0]);
620
621 /* See comment under 1-way above */
622 base_addr = ((base_addr >> 16) << 16);
623 base_addr |= start;
624
7ec9d6f8 625 dcache = &boot_cpu_data.dcache;
b638d0b9
RC
626 linesz = dcache->linesz;
627 way_incr = dcache->way_incr;
628 way_size = dcache->way_size;
629
630 a0 = base_addr;
631 a1 = a0 + way_incr;
632 a2 = a1 + way_incr;
633 a3 = a2 + way_incr;
634 a0e = base_addr + extent_per_way;
635 do {
636 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
637 asm volatile("movca.l r0, @%0\n\t"
638 "movca.l r0, @%1\n\t"
639 "movca.l r0, @%2\n\t"
640 "movca.l r0, @%3\n\t"
641 "ocbi @%0\n\t"
642 "ocbi @%1\n\t"
643 "ocbi @%2\n\t"
644 "ocbi @%3\n\t" : :
645 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
646 a0 += linesz;
647 a1 += linesz;
648 a2 += linesz;
649 a3 += linesz;
650 asm volatile("movca.l r0, @%0\n\t"
651 "movca.l r0, @%1\n\t"
652 "movca.l r0, @%2\n\t"
653 "movca.l r0, @%3\n\t"
654 "ocbi @%0\n\t"
655 "ocbi @%1\n\t"
656 "ocbi @%2\n\t"
657 "ocbi @%3\n\t" : :
658 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
659 a0 += linesz;
660 a1 += linesz;
661 a2 += linesz;
662 a3 += linesz;
663 asm volatile("movca.l r0, @%0\n\t"
664 "movca.l r0, @%1\n\t"
665 "movca.l r0, @%2\n\t"
666 "movca.l r0, @%3\n\t"
667 "ocbi @%0\n\t"
668 "ocbi @%1\n\t"
669 "ocbi @%2\n\t"
670 "ocbi @%3\n\t" : :
671 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
672 a0 += linesz;
673 a1 += linesz;
674 a2 += linesz;
675 a3 += linesz;
676 asm volatile("movca.l r0, @%0\n\t"
677 "movca.l r0, @%1\n\t"
678 "movca.l r0, @%2\n\t"
679 "movca.l r0, @%3\n\t"
680 "ocbi @%0\n\t"
681 "ocbi @%1\n\t"
682 "ocbi @%2\n\t"
683 "ocbi @%3\n\t" : :
684 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
685 asm volatile("ldc %0, sr" : : "r" (orig_sr));
686 a0 += linesz;
687 a1 += linesz;
688 a2 += linesz;
689 a3 += linesz;
690 } while (a0 < a0e);
691}
37443ef3
PM
692
693extern void __weak sh4__flush_region_init(void);
694
695/*
696 * SH-4 has virtually indexed and physically tagged cache.
697 */
698void __init sh4_cache_init(void)
699{
ac6a0cf6
PM
700 unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
701
37443ef3
PM
702 printk("PVR=%08x CVR=%08x PRR=%08x\n",
703 ctrl_inl(CCN_PVR),
704 ctrl_inl(CCN_CVR),
705 ctrl_inl(CCN_PRR));
706
ac6a0cf6
PM
707 if (wt_enabled)
708 __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
709 else {
710 switch (boot_cpu_data.dcache.ways) {
711 case 1:
712 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
713 break;
714 case 2:
715 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
716 break;
717 case 4:
718 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
719 break;
720 default:
721 panic("unknown number of cache ways\n");
722 break;
723 }
37443ef3
PM
724 }
725
f26b2a56
PM
726 local_flush_icache_range = sh4_flush_icache_range;
727 local_flush_dcache_page = sh4_flush_dcache_page;
728 local_flush_cache_all = sh4_flush_cache_all;
729 local_flush_cache_mm = sh4_flush_cache_mm;
730 local_flush_cache_dup_mm = sh4_flush_cache_mm;
731 local_flush_cache_page = sh4_flush_cache_page;
732 local_flush_cache_range = sh4_flush_cache_range;
37443ef3
PM
733
734 sh4__flush_region_init();
735}
This page took 0.428221 seconds and 5 git commands to generate.