sh: unwinder: Fix up invalid PC refetch in dwarf unwinder.
[deliverable/linux.git] / arch / sh / mm / cache-sh4.c
CommitLineData
1da177e4
LT
1/*
2 * arch/sh/mm/cache-sh4.c
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
d10040f7 5 * Copyright (C) 2001 - 2007 Paul Mundt
1da177e4 6 * Copyright (C) 2003 Richard Curnow
09b5a10c 7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
1da177e4
LT
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
1da177e4 13#include <linux/init.h>
1da177e4 14#include <linux/mm.h>
52e27782
PM
15#include <linux/io.h>
16#include <linux/mutex.h>
2277ab4a 17#include <linux/fs.h>
1da177e4
LT
18#include <asm/mmu_context.h>
19#include <asm/cacheflush.h>
20
28ccf7f9
PM
21/*
22 * The maximum number of pages we support up to when doing ranged dcache
23 * flushing. Anything exceeding this will simply flush the dcache in its
24 * entirety.
25 */
26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
09b5a10c 27#define MAX_ICACHE_PAGES 32
28ccf7f9 28
a7a7c0e1 29static void __flush_cache_one(unsigned long addr, unsigned long phys,
a252710f 30 unsigned long exec_offset);
b638d0b9
RC
31
32/*
33 * This is initialised here to ensure that it is not placed in the BSS. If
34 * that were to happen, note that cache_init gets called before the BSS is
35 * cleared, so this would get nulled out which would be hopeless.
36 */
37static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
38 (void (*)(unsigned long, unsigned long))0xdeadbeef;
39
1da177e4
LT
40/*
41 * Write back the range of D-cache, and purge the I-cache.
42 *
09b5a10c
CS
43 * Called from kernel/module.c:sys_init_module and routine for a.out format,
44 * signal handler code and kprobes code
1da177e4 45 */
a6325247 46static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
1da177e4 47{
f26b2a56 48 struct flusher_data *data = args;
f26b2a56 49 unsigned long start, end;
983f4c51 50 unsigned long flags, v;
1da177e4
LT
51 int i;
52
f26b2a56
PM
53 start = data->addr1;
54 end = data->addr2;
55
682f88ab
PM
56 /* If there are too many pages then just blow away the caches */
57 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
58 local_flush_cache_all(NULL);
59 return;
60 }
61
62 /*
63 * Selectively flush d-cache then invalidate the i-cache.
64 * This is inefficient, so only use this for small ranges.
65 */
66 start &= ~(L1_CACHE_BYTES-1);
67 end += L1_CACHE_BYTES-1;
68 end &= ~(L1_CACHE_BYTES-1);
983f4c51 69
682f88ab
PM
70 local_irq_save(flags);
71 jump_to_uncached();
983f4c51 72
682f88ab
PM
73 for (v = start; v < end; v += L1_CACHE_BYTES) {
74 unsigned long icacheaddr;
983f4c51 75
682f88ab 76 __ocbwb(v);
983f4c51 77
682f88ab
PM
78 icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
79 cpu_data->icache.entry_mask);
09b5a10c 80
682f88ab
PM
81 /* Clear i-cache line valid-bit */
82 for (i = 0; i < cpu_data->icache.ways; i++) {
83 __raw_writel(0, icacheaddr);
84 icacheaddr += cpu_data->icache.way_incr;
85 }
09b5a10c 86 }
682f88ab
PM
87
88 back_to_cached();
89 local_irq_restore(flags);
1da177e4
LT
90}
91
a7a7c0e1 92static inline void flush_cache_one(unsigned long start, unsigned long phys)
1da177e4 93{
983f4c51 94 unsigned long flags, exec_offset = 0;
33573c0e 95
1da177e4 96 /*
b638d0b9
RC
97 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
98 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
1da177e4 99 */
7ec9d6f8 100 if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
33573c0e 101 (start < CACHE_OC_ADDRESS_ARRAY))
510c72ad 102 exec_offset = 0x20000000;
33573c0e 103
983f4c51 104 local_irq_save(flags);
a7a7c0e1 105 __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset);
983f4c51 106 local_irq_restore(flags);
1da177e4
LT
107}
108
109/*
110 * Write back & invalidate the D-cache of the page.
111 * (To avoid "alias" issues)
112 */
e76a0136 113static void sh4_flush_dcache_page(void *arg)
1da177e4 114{
e76a0136 115 struct page *page = arg;
c139a595 116#ifndef CONFIG_SMP
2277ab4a
PM
117 struct address_space *mapping = page_mapping(page);
118
2277ab4a
PM
119 if (mapping && !mapping_mapped(mapping))
120 set_bit(PG_dcache_dirty, &page->flags);
121 else
122#endif
123 {
1da177e4 124 unsigned long phys = PHYSADDR(page_address(page));
b638d0b9
RC
125 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
126 int i, n;
1da177e4
LT
127
128 /* Loop all the D-cache */
7ec9d6f8 129 n = boot_cpu_data.dcache.n_aliases;
a7a7c0e1
VS
130 for (i = 0; i < n; i++, addr += PAGE_SIZE)
131 flush_cache_one(addr, phys);
1da177e4 132 }
fdfc74f9
PM
133
134 wmb();
1da177e4
LT
135}
136
28ccf7f9 137/* TODO: Selective icache invalidation through IC address array.. */
205a3b43 138static void __uses_jump_to_uncached flush_icache_all(void)
1da177e4 139{
983f4c51 140 unsigned long flags, ccr;
1da177e4 141
983f4c51 142 local_irq_save(flags);
cbaa118e 143 jump_to_uncached();
1da177e4
LT
144
145 /* Flush I-cache */
146 ccr = ctrl_inl(CCR);
147 ccr |= CCR_CACHE_ICI;
148 ctrl_outl(ccr, CCR);
149
29847622 150 /*
cbaa118e 151 * back_to_cached() will take care of the barrier for us, don't add
29847622
PM
152 * another one!
153 */
983f4c51 154
cbaa118e 155 back_to_cached();
983f4c51 156 local_irq_restore(flags);
1da177e4
LT
157}
158
0b445dca 159static inline void flush_dcache_all(void)
1da177e4 160{
7ec9d6f8 161 (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
fdfc74f9 162 wmb();
a252710f
PM
163}
164
f26b2a56 165static void sh4_flush_cache_all(void *unused)
a252710f
PM
166{
167 flush_dcache_all();
1da177e4
LT
168 flush_icache_all();
169}
170
28ccf7f9
PM
171static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
172 unsigned long end)
173{
174 unsigned long d = 0, p = start & PAGE_MASK;
7ec9d6f8
PM
175 unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
176 unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
28ccf7f9
PM
177 unsigned long select_bit;
178 unsigned long all_aliases_mask;
179 unsigned long addr_offset;
180 pgd_t *dir;
181 pmd_t *pmd;
182 pud_t *pud;
183 pte_t *pte;
184 int i;
185
186 dir = pgd_offset(mm, p);
187 pud = pud_offset(dir, p);
188 pmd = pmd_offset(pud, p);
189 end = PAGE_ALIGN(end);
190
191 all_aliases_mask = (1 << n_aliases) - 1;
192
193 do {
194 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
195 p &= PMD_MASK;
196 p += PMD_SIZE;
197 pmd++;
198
199 continue;
200 }
201
202 pte = pte_offset_kernel(pmd, p);
203
204 do {
205 unsigned long phys;
206 pte_t entry = *pte;
207
208 if (!(pte_val(entry) & _PAGE_PRESENT)) {
209 pte++;
210 p += PAGE_SIZE;
211 continue;
212 }
213
214 phys = pte_val(entry) & PTE_PHYS_MASK;
215
216 if ((p ^ phys) & alias_mask) {
217 d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
218 d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
219
220 if (d == all_aliases_mask)
221 goto loop_exit;
222 }
223
224 pte++;
225 p += PAGE_SIZE;
226 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
227 pmd++;
228 } while (p < end);
229
230loop_exit:
231 addr_offset = 0;
232 select_bit = 1;
233
234 for (i = 0; i < n_aliases; i++) {
235 if (d & select_bit) {
236 (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
237 wmb();
238 }
239
240 select_bit <<= 1;
241 addr_offset += PAGE_SIZE;
242 }
243}
244
245/*
246 * Note : (RPC) since the caches are physically tagged, the only point
247 * of flush_cache_mm for SH-4 is to get rid of aliases from the
248 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
249 * lines can stay resident so long as the virtual address they were
250 * accessed with (hence cache set) is in accord with the physical
251 * address (i.e. tag). It's no different here. So I reckon we don't
252 * need to flush the I-cache, since aliases don't matter for that. We
253 * should try that.
254 *
255 * Caller takes mm->mmap_sem.
256 */
f26b2a56 257static void sh4_flush_cache_mm(void *arg)
1da177e4 258{
f26b2a56
PM
259 struct mm_struct *mm = arg;
260
e7b8b7f1
PM
261 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
262 return;
263
b638d0b9 264 /*
28ccf7f9
PM
265 * If cache is only 4k-per-way, there are never any 'aliases'. Since
266 * the cache is physically tagged, the data can just be left in there.
267 */
7ec9d6f8 268 if (boot_cpu_data.dcache.n_aliases == 0)
28ccf7f9
PM
269 return;
270
271 /*
272 * Don't bother groveling around the dcache for the VMA ranges
273 * if there are too many PTEs to make it worthwhile.
b638d0b9 274 */
28ccf7f9
PM
275 if (mm->nr_ptes >= MAX_DCACHE_PAGES)
276 flush_dcache_all();
277 else {
278 struct vm_area_struct *vma;
279
280 /*
281 * In this case there are reasonably sized ranges to flush,
282 * iterate through the VMA list and take care of any aliases.
283 */
284 for (vma = mm->mmap; vma; vma = vma->vm_next)
285 __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
286 }
287
288 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
289 if (mm->exec_vm)
290 flush_icache_all();
1da177e4
LT
291}
292
293/*
294 * Write back and invalidate I/D-caches for the page.
295 *
296 * ADDR: Virtual Address (U0 address)
297 * PFN: Physical page number
298 */
f26b2a56 299static void sh4_flush_cache_page(void *args)
1da177e4 300{
f26b2a56
PM
301 struct flusher_data *data = args;
302 struct vm_area_struct *vma;
303 unsigned long address, pfn, phys;
b638d0b9
RC
304 unsigned int alias_mask;
305
f26b2a56
PM
306 vma = data->vma;
307 address = data->addr1;
308 pfn = data->addr2;
309 phys = pfn << PAGE_SHIFT;
310
e7b8b7f1
PM
311 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
312 return;
313
7ec9d6f8 314 alias_mask = boot_cpu_data.dcache.alias_mask;
1da177e4
LT
315
316 /* We only need to flush D-cache when we have alias */
b638d0b9 317 if ((address^phys) & alias_mask) {
1da177e4 318 /* Loop 4K of the D-cache */
a7a7c0e1 319 flush_cache_one(
b638d0b9 320 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
1da177e4
LT
321 phys);
322 /* Loop another 4K of the D-cache */
a7a7c0e1 323 flush_cache_one(
b638d0b9 324 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
1da177e4
LT
325 phys);
326 }
327
7ec9d6f8 328 alias_mask = boot_cpu_data.icache.alias_mask;
b638d0b9
RC
329 if (vma->vm_flags & VM_EXEC) {
330 /*
331 * Evict entries from the portion of the cache from which code
332 * may have been executed at this address (virtual). There's
333 * no need to evict from the portion corresponding to the
334 * physical address as for the D-cache, because we know the
335 * kernel has never executed the code through its identity
336 * translation.
337 */
a7a7c0e1 338 flush_cache_one(
b638d0b9 339 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
1da177e4 340 phys);
b638d0b9 341 }
1da177e4
LT
342}
343
344/*
345 * Write back and invalidate D-caches.
346 *
347 * START, END: Virtual Address (U0 address)
348 *
349 * NOTE: We need to flush the _physical_ page entry.
350 * Flushing the cache lines for U0 only isn't enough.
351 * We need to flush for P1 too, which may contain aliases.
352 */
f26b2a56 353static void sh4_flush_cache_range(void *args)
1da177e4 354{
f26b2a56
PM
355 struct flusher_data *data = args;
356 struct vm_area_struct *vma;
357 unsigned long start, end;
358
359 vma = data->vma;
360 start = data->addr1;
361 end = data->addr2;
362
e7b8b7f1
PM
363 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
364 return;
365
b638d0b9
RC
366 /*
367 * If cache is only 4k-per-way, there are never any 'aliases'. Since
368 * the cache is physically tagged, the data can just be left in there.
369 */
7ec9d6f8 370 if (boot_cpu_data.dcache.n_aliases == 0)
b638d0b9
RC
371 return;
372
a252710f
PM
373 /*
374 * Don't bother with the lookup and alias check if we have a
375 * wide range to cover, just blow away the dcache in its
376 * entirety instead. -- PFM.
377 */
28ccf7f9 378 if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
a252710f 379 flush_dcache_all();
28ccf7f9
PM
380 else
381 __flush_cache_mm(vma->vm_mm, start, end);
b638d0b9
RC
382
383 if (vma->vm_flags & VM_EXEC) {
384 /*
385 * TODO: Is this required??? Need to look at how I-cache
386 * coherency is assured when new programs are loaded to see if
387 * this matters.
388 */
1da177e4 389 flush_icache_all();
b638d0b9 390 }
1da177e4
LT
391}
392
b638d0b9 393/**
a7a7c0e1 394 * __flush_cache_one
b638d0b9
RC
395 *
396 * @addr: address in memory mapped cache array
397 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
398 * set i.e. associative write)
399 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
400 * region else 0x0
401 *
402 * The offset into the cache array implied by 'addr' selects the
403 * 'colour' of the virtual address range that will be flushed. The
404 * operation (purge/write-back) is selected by the lower 2 bits of
405 * 'phys'.
406 */
a7a7c0e1 407static void __flush_cache_one(unsigned long addr, unsigned long phys,
b638d0b9
RC
408 unsigned long exec_offset)
409{
410 int way_count;
411 unsigned long base_addr = addr;
412 struct cache_info *dcache;
413 unsigned long way_incr;
414 unsigned long a, ea, p;
415 unsigned long temp_pc;
416
7ec9d6f8 417 dcache = &boot_cpu_data.dcache;
b638d0b9
RC
418 /* Write this way for better assembly. */
419 way_count = dcache->ways;
420 way_incr = dcache->way_incr;
421
422 /*
423 * Apply exec_offset (i.e. branch to P2 if required.).
424 *
425 * FIXME:
426 *
427 * If I write "=r" for the (temp_pc), it puts this in r6 hence
428 * trashing exec_offset before it's been added on - why? Hence
429 * "=&r" as a 'workaround'
430 */
431 asm volatile("mov.l 1f, %0\n\t"
432 "add %1, %0\n\t"
433 "jmp @%0\n\t"
434 "nop\n\t"
435 ".balign 4\n\t"
436 "1: .long 2f\n\t"
437 "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
438
439 /*
440 * We know there will be >=1 iteration, so write as do-while to avoid
441 * pointless nead-of-loop check for 0 iterations.
442 */
443 do {
444 ea = base_addr + PAGE_SIZE;
445 a = base_addr;
446 p = phys;
447
448 do {
449 *(volatile unsigned long *)a = p;
450 /*
451 * Next line: intentionally not p+32, saves an add, p
452 * will do since only the cache tag bits need to
453 * match.
454 */
455 *(volatile unsigned long *)(a+32) = p;
456 a += 64;
457 p += 64;
458 } while (a < ea);
459
460 base_addr += way_incr;
461 } while (--way_count != 0);
462}
463
464/*
465 * Break the 1, 2 and 4 way variants of this out into separate functions to
466 * avoid nearly all the overhead of having the conditional stuff in the function
467 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
a5cf9e24
SM
468 *
469 * We want to eliminate unnecessary bus transactions, so this code uses
470 * a non-obvious technique.
471 *
472 * Loop over a cache way sized block of, one cache line at a time. For each
473 * line, use movca.a to cause the current cache line contents to be written
474 * back, but without reading anything from main memory. However this has the
475 * side effect that the cache is now caching that memory location. So follow
476 * this with a cache invalidate to mark the cache line invalid. And do all
477 * this with interrupts disabled, to avoid the cache line being accidently
478 * evicted while it is holding garbage.
ffad9d7a
SM
479 *
480 * This also breaks in a number of circumstances:
481 * - if there are modifications to the region of memory just above
482 * empty_zero_page (for example because a breakpoint has been placed
483 * there), then these can be lost.
484 *
485 * This is because the the memory address which the cache temporarily
486 * caches in the above description is empty_zero_page. So the
487 * movca.l hits the cache (it is assumed that it misses, or at least
488 * isn't dirty), modifies the line and then invalidates it, losing the
489 * required change.
490 *
491 * - If caches are disabled or configured in write-through mode, then
492 * the movca.l writes garbage directly into memory.
b638d0b9 493 */
ce3f7cb9
MF
494static void __flush_dcache_segment_writethrough(unsigned long start,
495 unsigned long extent_per_way)
496{
497 unsigned long addr;
498 int i;
499
500 addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
501
502 while (extent_per_way) {
503 for (i = 0; i < cpu_data->dcache.ways; i++)
504 __raw_writel(0, addr + cpu_data->dcache.way_incr * i);
505
506 addr += cpu_data->dcache.linesz;
507 extent_per_way -= cpu_data->dcache.linesz;
508 }
509}
510
b638d0b9
RC
511static void __flush_dcache_segment_1way(unsigned long start,
512 unsigned long extent_per_way)
513{
514 unsigned long orig_sr, sr_with_bl;
515 unsigned long base_addr;
516 unsigned long way_incr, linesz, way_size;
517 struct cache_info *dcache;
518 register unsigned long a0, a0e;
519
520 asm volatile("stc sr, %0" : "=r" (orig_sr));
521 sr_with_bl = orig_sr | (1<<28);
522 base_addr = ((unsigned long)&empty_zero_page[0]);
523
524 /*
525 * The previous code aligned base_addr to 16k, i.e. the way_size of all
526 * existing SH-4 D-caches. Whilst I don't see a need to have this
527 * aligned to any better than the cache line size (which it will be
528 * anyway by construction), let's align it to at least the way_size of
529 * any existing or conceivable SH-4 D-cache. -- RPC
530 */
531 base_addr = ((base_addr >> 16) << 16);
532 base_addr |= start;
533
7ec9d6f8 534 dcache = &boot_cpu_data.dcache;
b638d0b9
RC
535 linesz = dcache->linesz;
536 way_incr = dcache->way_incr;
537 way_size = dcache->way_size;
538
539 a0 = base_addr;
540 a0e = base_addr + extent_per_way;
541 do {
542 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
543 asm volatile("movca.l r0, @%0\n\t"
544 "ocbi @%0" : : "r" (a0));
545 a0 += linesz;
546 asm volatile("movca.l r0, @%0\n\t"
547 "ocbi @%0" : : "r" (a0));
548 a0 += linesz;
549 asm volatile("movca.l r0, @%0\n\t"
550 "ocbi @%0" : : "r" (a0));
551 a0 += linesz;
552 asm volatile("movca.l r0, @%0\n\t"
553 "ocbi @%0" : : "r" (a0));
554 asm volatile("ldc %0, sr" : : "r" (orig_sr));
555 a0 += linesz;
556 } while (a0 < a0e);
557}
558
559static void __flush_dcache_segment_2way(unsigned long start,
560 unsigned long extent_per_way)
561{
562 unsigned long orig_sr, sr_with_bl;
563 unsigned long base_addr;
564 unsigned long way_incr, linesz, way_size;
565 struct cache_info *dcache;
566 register unsigned long a0, a1, a0e;
567
568 asm volatile("stc sr, %0" : "=r" (orig_sr));
569 sr_with_bl = orig_sr | (1<<28);
570 base_addr = ((unsigned long)&empty_zero_page[0]);
571
572 /* See comment under 1-way above */
573 base_addr = ((base_addr >> 16) << 16);
574 base_addr |= start;
575
7ec9d6f8 576 dcache = &boot_cpu_data.dcache;
b638d0b9
RC
577 linesz = dcache->linesz;
578 way_incr = dcache->way_incr;
579 way_size = dcache->way_size;
580
581 a0 = base_addr;
582 a1 = a0 + way_incr;
583 a0e = base_addr + extent_per_way;
584 do {
585 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
586 asm volatile("movca.l r0, @%0\n\t"
587 "movca.l r0, @%1\n\t"
588 "ocbi @%0\n\t"
589 "ocbi @%1" : :
590 "r" (a0), "r" (a1));
591 a0 += linesz;
592 a1 += linesz;
593 asm volatile("movca.l r0, @%0\n\t"
594 "movca.l r0, @%1\n\t"
595 "ocbi @%0\n\t"
596 "ocbi @%1" : :
597 "r" (a0), "r" (a1));
598 a0 += linesz;
599 a1 += linesz;
600 asm volatile("movca.l r0, @%0\n\t"
601 "movca.l r0, @%1\n\t"
602 "ocbi @%0\n\t"
603 "ocbi @%1" : :
604 "r" (a0), "r" (a1));
605 a0 += linesz;
606 a1 += linesz;
607 asm volatile("movca.l r0, @%0\n\t"
608 "movca.l r0, @%1\n\t"
609 "ocbi @%0\n\t"
610 "ocbi @%1" : :
611 "r" (a0), "r" (a1));
612 asm volatile("ldc %0, sr" : : "r" (orig_sr));
613 a0 += linesz;
614 a1 += linesz;
615 } while (a0 < a0e);
616}
617
618static void __flush_dcache_segment_4way(unsigned long start,
619 unsigned long extent_per_way)
620{
621 unsigned long orig_sr, sr_with_bl;
622 unsigned long base_addr;
623 unsigned long way_incr, linesz, way_size;
624 struct cache_info *dcache;
625 register unsigned long a0, a1, a2, a3, a0e;
626
627 asm volatile("stc sr, %0" : "=r" (orig_sr));
628 sr_with_bl = orig_sr | (1<<28);
629 base_addr = ((unsigned long)&empty_zero_page[0]);
630
631 /* See comment under 1-way above */
632 base_addr = ((base_addr >> 16) << 16);
633 base_addr |= start;
634
7ec9d6f8 635 dcache = &boot_cpu_data.dcache;
b638d0b9
RC
636 linesz = dcache->linesz;
637 way_incr = dcache->way_incr;
638 way_size = dcache->way_size;
639
640 a0 = base_addr;
641 a1 = a0 + way_incr;
642 a2 = a1 + way_incr;
643 a3 = a2 + way_incr;
644 a0e = base_addr + extent_per_way;
645 do {
646 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
647 asm volatile("movca.l r0, @%0\n\t"
648 "movca.l r0, @%1\n\t"
649 "movca.l r0, @%2\n\t"
650 "movca.l r0, @%3\n\t"
651 "ocbi @%0\n\t"
652 "ocbi @%1\n\t"
653 "ocbi @%2\n\t"
654 "ocbi @%3\n\t" : :
655 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
656 a0 += linesz;
657 a1 += linesz;
658 a2 += linesz;
659 a3 += linesz;
660 asm volatile("movca.l r0, @%0\n\t"
661 "movca.l r0, @%1\n\t"
662 "movca.l r0, @%2\n\t"
663 "movca.l r0, @%3\n\t"
664 "ocbi @%0\n\t"
665 "ocbi @%1\n\t"
666 "ocbi @%2\n\t"
667 "ocbi @%3\n\t" : :
668 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
669 a0 += linesz;
670 a1 += linesz;
671 a2 += linesz;
672 a3 += linesz;
673 asm volatile("movca.l r0, @%0\n\t"
674 "movca.l r0, @%1\n\t"
675 "movca.l r0, @%2\n\t"
676 "movca.l r0, @%3\n\t"
677 "ocbi @%0\n\t"
678 "ocbi @%1\n\t"
679 "ocbi @%2\n\t"
680 "ocbi @%3\n\t" : :
681 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
682 a0 += linesz;
683 a1 += linesz;
684 a2 += linesz;
685 a3 += linesz;
686 asm volatile("movca.l r0, @%0\n\t"
687 "movca.l r0, @%1\n\t"
688 "movca.l r0, @%2\n\t"
689 "movca.l r0, @%3\n\t"
690 "ocbi @%0\n\t"
691 "ocbi @%1\n\t"
692 "ocbi @%2\n\t"
693 "ocbi @%3\n\t" : :
694 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
695 asm volatile("ldc %0, sr" : : "r" (orig_sr));
696 a0 += linesz;
697 a1 += linesz;
698 a2 += linesz;
699 a3 += linesz;
700 } while (a0 < a0e);
701}
37443ef3
PM
702
703extern void __weak sh4__flush_region_init(void);
704
705/*
706 * SH-4 has virtually indexed and physically tagged cache.
707 */
708void __init sh4_cache_init(void)
709{
ac6a0cf6
PM
710 unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
711
37443ef3
PM
712 printk("PVR=%08x CVR=%08x PRR=%08x\n",
713 ctrl_inl(CCN_PVR),
714 ctrl_inl(CCN_CVR),
715 ctrl_inl(CCN_PRR));
716
ac6a0cf6
PM
717 if (wt_enabled)
718 __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
719 else {
720 switch (boot_cpu_data.dcache.ways) {
721 case 1:
722 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
723 break;
724 case 2:
725 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
726 break;
727 case 4:
728 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
729 break;
730 default:
731 panic("unknown number of cache ways\n");
732 break;
733 }
37443ef3
PM
734 }
735
f26b2a56
PM
736 local_flush_icache_range = sh4_flush_icache_range;
737 local_flush_dcache_page = sh4_flush_dcache_page;
738 local_flush_cache_all = sh4_flush_cache_all;
739 local_flush_cache_mm = sh4_flush_cache_mm;
740 local_flush_cache_dup_mm = sh4_flush_cache_mm;
741 local_flush_cache_page = sh4_flush_cache_page;
742 local_flush_cache_range = sh4_flush_cache_range;
37443ef3
PM
743
744 sh4__flush_region_init();
745}
This page took 0.463222 seconds and 5 git commands to generate.