2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2006 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/init.h>
15 #include <linux/mutex.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
20 * The maximum number of pages we support up to when doing ranged dcache
21 * flushing. Anything exceeding this will simply flush the dcache in its
24 #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
26 static void __flush_dcache_segment_1way(unsigned long start
,
27 unsigned long extent
);
28 static void __flush_dcache_segment_2way(unsigned long start
,
29 unsigned long extent
);
30 static void __flush_dcache_segment_4way(unsigned long start
,
31 unsigned long extent
);
33 static void __flush_cache_4096(unsigned long addr
, unsigned long phys
,
34 unsigned long exec_offset
);
37 * This is initialised here to ensure that it is not placed in the BSS. If
38 * that were to happen, note that cache_init gets called before the BSS is
39 * cleared, so this would get nulled out which would be hopeless.
41 static void (*__flush_dcache_segment_fn
)(unsigned long, unsigned long) =
42 (void (*)(unsigned long, unsigned long))0xdeadbeef;
44 static void compute_alias(struct cache_info
*c
)
46 c
->alias_mask
= ((c
->sets
- 1) << c
->entry_shift
) & ~(PAGE_SIZE
- 1);
47 c
->n_aliases
= (c
->alias_mask
>> PAGE_SHIFT
) + 1;
50 static void __init
emit_cache_params(void)
52 printk("PVR=%08x CVR=%08x PRR=%08x\n",
56 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
57 current_cpu_data
.icache
.ways
,
58 current_cpu_data
.icache
.sets
,
59 current_cpu_data
.icache
.way_incr
);
60 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
61 current_cpu_data
.icache
.entry_mask
,
62 current_cpu_data
.icache
.alias_mask
,
63 current_cpu_data
.icache
.n_aliases
);
64 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
65 current_cpu_data
.dcache
.ways
,
66 current_cpu_data
.dcache
.sets
,
67 current_cpu_data
.dcache
.way_incr
);
68 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
69 current_cpu_data
.dcache
.entry_mask
,
70 current_cpu_data
.dcache
.alias_mask
,
71 current_cpu_data
.dcache
.n_aliases
);
73 if (!__flush_dcache_segment_fn
)
74 panic("unknown number of cache ways\n");
78 * SH-4 has virtually indexed and physically tagged cache.
80 void __init
p3_cache_init(void)
82 compute_alias(¤t_cpu_data
.icache
);
83 compute_alias(¤t_cpu_data
.dcache
);
85 switch (current_cpu_data
.dcache
.ways
) {
87 __flush_dcache_segment_fn
= __flush_dcache_segment_1way
;
90 __flush_dcache_segment_fn
= __flush_dcache_segment_2way
;
93 __flush_dcache_segment_fn
= __flush_dcache_segment_4way
;
96 __flush_dcache_segment_fn
= NULL
;
104 * Write back the dirty D-caches, but not invalidate them.
106 * START: Virtual Address (U0, P1, or P3)
107 * SIZE: Size of the region.
109 void __flush_wback_region(void *start
, int size
)
112 unsigned long begin
, end
;
114 begin
= (unsigned long)start
& ~(L1_CACHE_BYTES
-1);
115 end
= ((unsigned long)start
+ size
+ L1_CACHE_BYTES
-1)
116 & ~(L1_CACHE_BYTES
-1);
117 for (v
= begin
; v
< end
; v
+=L1_CACHE_BYTES
) {
118 asm volatile("ocbwb %0"
125 * Write back the dirty D-caches and invalidate them.
127 * START: Virtual Address (U0, P1, or P3)
128 * SIZE: Size of the region.
130 void __flush_purge_region(void *start
, int size
)
133 unsigned long begin
, end
;
135 begin
= (unsigned long)start
& ~(L1_CACHE_BYTES
-1);
136 end
= ((unsigned long)start
+ size
+ L1_CACHE_BYTES
-1)
137 & ~(L1_CACHE_BYTES
-1);
138 for (v
= begin
; v
< end
; v
+=L1_CACHE_BYTES
) {
139 asm volatile("ocbp %0"
146 * No write back please
148 void __flush_invalidate_region(void *start
, int size
)
151 unsigned long begin
, end
;
153 begin
= (unsigned long)start
& ~(L1_CACHE_BYTES
-1);
154 end
= ((unsigned long)start
+ size
+ L1_CACHE_BYTES
-1)
155 & ~(L1_CACHE_BYTES
-1);
156 for (v
= begin
; v
< end
; v
+=L1_CACHE_BYTES
) {
157 asm volatile("ocbi %0"
164 * Write back the range of D-cache, and purge the I-cache.
166 * Called from kernel/module.c:sys_init_module and routine for a.out format.
168 void flush_icache_range(unsigned long start
, unsigned long end
)
174 * Write back the D-cache and purge the I-cache for signal trampoline.
175 * .. which happens to be the same behavior as flush_icache_range().
176 * So, we simply flush out a line.
178 void flush_cache_sigtramp(unsigned long addr
)
180 unsigned long v
, index
;
184 v
= addr
& ~(L1_CACHE_BYTES
-1);
185 asm volatile("ocbwb %0"
189 index
= CACHE_IC_ADDRESS_ARRAY
|
190 (v
& current_cpu_data
.icache
.entry_mask
);
192 local_irq_save(flags
);
195 for (i
= 0; i
< current_cpu_data
.icache
.ways
;
196 i
++, index
+= current_cpu_data
.icache
.way_incr
)
197 ctrl_outl(0, index
); /* Clear out Valid-bit */
201 local_irq_restore(flags
);
204 static inline void flush_cache_4096(unsigned long start
,
207 unsigned long flags
, exec_offset
= 0;
210 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
211 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
213 if ((current_cpu_data
.flags
& CPU_HAS_P2_FLUSH_BUG
) ||
214 (start
< CACHE_OC_ADDRESS_ARRAY
))
215 exec_offset
= 0x20000000;
217 local_irq_save(flags
);
218 __flush_cache_4096(start
| SH_CACHE_ASSOC
,
219 P1SEGADDR(phys
), exec_offset
);
220 local_irq_restore(flags
);
224 * Write back & invalidate the D-cache of the page.
225 * (To avoid "alias" issues)
227 void flush_dcache_page(struct page
*page
)
229 if (test_bit(PG_mapped
, &page
->flags
)) {
230 unsigned long phys
= PHYSADDR(page_address(page
));
231 unsigned long addr
= CACHE_OC_ADDRESS_ARRAY
;
234 /* Loop all the D-cache */
235 n
= current_cpu_data
.dcache
.n_aliases
;
236 for (i
= 0; i
< n
; i
++, addr
+= 4096)
237 flush_cache_4096(addr
, phys
);
243 /* TODO: Selective icache invalidation through IC address array.. */
244 static inline void flush_icache_all(void)
246 unsigned long flags
, ccr
;
248 local_irq_save(flags
);
253 ccr
|= CCR_CACHE_ICI
;
257 * back_to_P1() will take care of the barrier for us, don't add
262 local_irq_restore(flags
);
265 void flush_dcache_all(void)
267 (*__flush_dcache_segment_fn
)(0UL, current_cpu_data
.dcache
.way_size
);
271 void flush_cache_all(void)
277 static void __flush_cache_mm(struct mm_struct
*mm
, unsigned long start
,
280 unsigned long d
= 0, p
= start
& PAGE_MASK
;
281 unsigned long alias_mask
= current_cpu_data
.dcache
.alias_mask
;
282 unsigned long n_aliases
= current_cpu_data
.dcache
.n_aliases
;
283 unsigned long select_bit
;
284 unsigned long all_aliases_mask
;
285 unsigned long addr_offset
;
292 dir
= pgd_offset(mm
, p
);
293 pud
= pud_offset(dir
, p
);
294 pmd
= pmd_offset(pud
, p
);
295 end
= PAGE_ALIGN(end
);
297 all_aliases_mask
= (1 << n_aliases
) - 1;
300 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
))) {
308 pte
= pte_offset_kernel(pmd
, p
);
314 if (!(pte_val(entry
) & _PAGE_PRESENT
)) {
320 phys
= pte_val(entry
) & PTE_PHYS_MASK
;
322 if ((p
^ phys
) & alias_mask
) {
323 d
|= 1 << ((p
& alias_mask
) >> PAGE_SHIFT
);
324 d
|= 1 << ((phys
& alias_mask
) >> PAGE_SHIFT
);
326 if (d
== all_aliases_mask
)
332 } while (p
< end
&& ((unsigned long)pte
& ~PAGE_MASK
));
340 for (i
= 0; i
< n_aliases
; i
++) {
341 if (d
& select_bit
) {
342 (*__flush_dcache_segment_fn
)(addr_offset
, PAGE_SIZE
);
347 addr_offset
+= PAGE_SIZE
;
352 * Note : (RPC) since the caches are physically tagged, the only point
353 * of flush_cache_mm for SH-4 is to get rid of aliases from the
354 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
355 * lines can stay resident so long as the virtual address they were
356 * accessed with (hence cache set) is in accord with the physical
357 * address (i.e. tag). It's no different here. So I reckon we don't
358 * need to flush the I-cache, since aliases don't matter for that. We
361 * Caller takes mm->mmap_sem.
363 void flush_cache_mm(struct mm_struct
*mm
)
366 * If cache is only 4k-per-way, there are never any 'aliases'. Since
367 * the cache is physically tagged, the data can just be left in there.
369 if (current_cpu_data
.dcache
.n_aliases
== 0)
373 * Don't bother groveling around the dcache for the VMA ranges
374 * if there are too many PTEs to make it worthwhile.
376 if (mm
->nr_ptes
>= MAX_DCACHE_PAGES
)
379 struct vm_area_struct
*vma
;
382 * In this case there are reasonably sized ranges to flush,
383 * iterate through the VMA list and take care of any aliases.
385 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
)
386 __flush_cache_mm(mm
, vma
->vm_start
, vma
->vm_end
);
389 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
395 * Write back and invalidate I/D-caches for the page.
397 * ADDR: Virtual Address (U0 address)
398 * PFN: Physical page number
400 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long address
,
403 unsigned long phys
= pfn
<< PAGE_SHIFT
;
404 unsigned int alias_mask
;
406 alias_mask
= current_cpu_data
.dcache
.alias_mask
;
408 /* We only need to flush D-cache when we have alias */
409 if ((address
^phys
) & alias_mask
) {
410 /* Loop 4K of the D-cache */
412 CACHE_OC_ADDRESS_ARRAY
| (address
& alias_mask
),
414 /* Loop another 4K of the D-cache */
416 CACHE_OC_ADDRESS_ARRAY
| (phys
& alias_mask
),
420 alias_mask
= current_cpu_data
.icache
.alias_mask
;
421 if (vma
->vm_flags
& VM_EXEC
) {
423 * Evict entries from the portion of the cache from which code
424 * may have been executed at this address (virtual). There's
425 * no need to evict from the portion corresponding to the
426 * physical address as for the D-cache, because we know the
427 * kernel has never executed the code through its identity
431 CACHE_IC_ADDRESS_ARRAY
| (address
& alias_mask
),
437 * Write back and invalidate D-caches.
439 * START, END: Virtual Address (U0 address)
441 * NOTE: We need to flush the _physical_ page entry.
442 * Flushing the cache lines for U0 only isn't enough.
443 * We need to flush for P1 too, which may contain aliases.
445 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
449 * If cache is only 4k-per-way, there are never any 'aliases'. Since
450 * the cache is physically tagged, the data can just be left in there.
452 if (current_cpu_data
.dcache
.n_aliases
== 0)
456 * Don't bother with the lookup and alias check if we have a
457 * wide range to cover, just blow away the dcache in its
458 * entirety instead. -- PFM.
460 if (((end
- start
) >> PAGE_SHIFT
) >= MAX_DCACHE_PAGES
)
463 __flush_cache_mm(vma
->vm_mm
, start
, end
);
465 if (vma
->vm_flags
& VM_EXEC
) {
467 * TODO: Is this required??? Need to look at how I-cache
468 * coherency is assured when new programs are loaded to see if
476 * flush_icache_user_range
477 * @vma: VMA of the process
480 * @len: length of the range (< page size)
482 void flush_icache_user_range(struct vm_area_struct
*vma
,
483 struct page
*page
, unsigned long addr
, int len
)
485 flush_cache_page(vma
, addr
, page_to_pfn(page
));
492 * @addr: address in memory mapped cache array
493 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
494 * set i.e. associative write)
495 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
498 * The offset into the cache array implied by 'addr' selects the
499 * 'colour' of the virtual address range that will be flushed. The
500 * operation (purge/write-back) is selected by the lower 2 bits of
503 static void __flush_cache_4096(unsigned long addr
, unsigned long phys
,
504 unsigned long exec_offset
)
507 unsigned long base_addr
= addr
;
508 struct cache_info
*dcache
;
509 unsigned long way_incr
;
510 unsigned long a
, ea
, p
;
511 unsigned long temp_pc
;
513 dcache
= ¤t_cpu_data
.dcache
;
514 /* Write this way for better assembly. */
515 way_count
= dcache
->ways
;
516 way_incr
= dcache
->way_incr
;
519 * Apply exec_offset (i.e. branch to P2 if required.).
523 * If I write "=r" for the (temp_pc), it puts this in r6 hence
524 * trashing exec_offset before it's been added on - why? Hence
525 * "=&r" as a 'workaround'
527 asm volatile("mov.l 1f, %0\n\t"
533 "2:\n" : "=&r" (temp_pc
) : "r" (exec_offset
));
536 * We know there will be >=1 iteration, so write as do-while to avoid
537 * pointless nead-of-loop check for 0 iterations.
540 ea
= base_addr
+ PAGE_SIZE
;
545 *(volatile unsigned long *)a
= p
;
547 * Next line: intentionally not p+32, saves an add, p
548 * will do since only the cache tag bits need to
551 *(volatile unsigned long *)(a
+32) = p
;
556 base_addr
+= way_incr
;
557 } while (--way_count
!= 0);
561 * Break the 1, 2 and 4 way variants of this out into separate functions to
562 * avoid nearly all the overhead of having the conditional stuff in the function
563 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
565 static void __flush_dcache_segment_1way(unsigned long start
,
566 unsigned long extent_per_way
)
568 unsigned long orig_sr
, sr_with_bl
;
569 unsigned long base_addr
;
570 unsigned long way_incr
, linesz
, way_size
;
571 struct cache_info
*dcache
;
572 register unsigned long a0
, a0e
;
574 asm volatile("stc sr, %0" : "=r" (orig_sr
));
575 sr_with_bl
= orig_sr
| (1<<28);
576 base_addr
= ((unsigned long)&empty_zero_page
[0]);
579 * The previous code aligned base_addr to 16k, i.e. the way_size of all
580 * existing SH-4 D-caches. Whilst I don't see a need to have this
581 * aligned to any better than the cache line size (which it will be
582 * anyway by construction), let's align it to at least the way_size of
583 * any existing or conceivable SH-4 D-cache. -- RPC
585 base_addr
= ((base_addr
>> 16) << 16);
588 dcache
= ¤t_cpu_data
.dcache
;
589 linesz
= dcache
->linesz
;
590 way_incr
= dcache
->way_incr
;
591 way_size
= dcache
->way_size
;
594 a0e
= base_addr
+ extent_per_way
;
596 asm volatile("ldc %0, sr" : : "r" (sr_with_bl
));
597 asm volatile("movca.l r0, @%0\n\t"
598 "ocbi @%0" : : "r" (a0
));
600 asm volatile("movca.l r0, @%0\n\t"
601 "ocbi @%0" : : "r" (a0
));
603 asm volatile("movca.l r0, @%0\n\t"
604 "ocbi @%0" : : "r" (a0
));
606 asm volatile("movca.l r0, @%0\n\t"
607 "ocbi @%0" : : "r" (a0
));
608 asm volatile("ldc %0, sr" : : "r" (orig_sr
));
613 static void __flush_dcache_segment_2way(unsigned long start
,
614 unsigned long extent_per_way
)
616 unsigned long orig_sr
, sr_with_bl
;
617 unsigned long base_addr
;
618 unsigned long way_incr
, linesz
, way_size
;
619 struct cache_info
*dcache
;
620 register unsigned long a0
, a1
, a0e
;
622 asm volatile("stc sr, %0" : "=r" (orig_sr
));
623 sr_with_bl
= orig_sr
| (1<<28);
624 base_addr
= ((unsigned long)&empty_zero_page
[0]);
626 /* See comment under 1-way above */
627 base_addr
= ((base_addr
>> 16) << 16);
630 dcache
= ¤t_cpu_data
.dcache
;
631 linesz
= dcache
->linesz
;
632 way_incr
= dcache
->way_incr
;
633 way_size
= dcache
->way_size
;
637 a0e
= base_addr
+ extent_per_way
;
639 asm volatile("ldc %0, sr" : : "r" (sr_with_bl
));
640 asm volatile("movca.l r0, @%0\n\t"
641 "movca.l r0, @%1\n\t"
647 asm volatile("movca.l r0, @%0\n\t"
648 "movca.l r0, @%1\n\t"
654 asm volatile("movca.l r0, @%0\n\t"
655 "movca.l r0, @%1\n\t"
661 asm volatile("movca.l r0, @%0\n\t"
662 "movca.l r0, @%1\n\t"
666 asm volatile("ldc %0, sr" : : "r" (orig_sr
));
672 static void __flush_dcache_segment_4way(unsigned long start
,
673 unsigned long extent_per_way
)
675 unsigned long orig_sr
, sr_with_bl
;
676 unsigned long base_addr
;
677 unsigned long way_incr
, linesz
, way_size
;
678 struct cache_info
*dcache
;
679 register unsigned long a0
, a1
, a2
, a3
, a0e
;
681 asm volatile("stc sr, %0" : "=r" (orig_sr
));
682 sr_with_bl
= orig_sr
| (1<<28);
683 base_addr
= ((unsigned long)&empty_zero_page
[0]);
685 /* See comment under 1-way above */
686 base_addr
= ((base_addr
>> 16) << 16);
689 dcache
= ¤t_cpu_data
.dcache
;
690 linesz
= dcache
->linesz
;
691 way_incr
= dcache
->way_incr
;
692 way_size
= dcache
->way_size
;
698 a0e
= base_addr
+ extent_per_way
;
700 asm volatile("ldc %0, sr" : : "r" (sr_with_bl
));
701 asm volatile("movca.l r0, @%0\n\t"
702 "movca.l r0, @%1\n\t"
703 "movca.l r0, @%2\n\t"
704 "movca.l r0, @%3\n\t"
709 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
714 asm volatile("movca.l r0, @%0\n\t"
715 "movca.l r0, @%1\n\t"
716 "movca.l r0, @%2\n\t"
717 "movca.l r0, @%3\n\t"
722 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
727 asm volatile("movca.l r0, @%0\n\t"
728 "movca.l r0, @%1\n\t"
729 "movca.l r0, @%2\n\t"
730 "movca.l r0, @%3\n\t"
735 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
740 asm volatile("movca.l r0, @%0\n\t"
741 "movca.l r0, @%1\n\t"
742 "movca.l r0, @%2\n\t"
743 "movca.l r0, @%3\n\t"
748 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
749 asm volatile("ldc %0, sr" : : "r" (orig_sr
));