Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shaggy...
[deliverable/linux.git] / arch / mips / mm / cache.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2003 by Ralf Baechle
7 */
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13
14 #include <asm/cacheflush.h>
15 #include <asm/processor.h>
16 #include <asm/cpu.h>
17 #include <asm/cpu-features.h>
18
19 /* Cache operations. */
20 void (*flush_cache_all)(void);
21 void (*__flush_cache_all)(void);
22 void (*flush_cache_mm)(struct mm_struct *mm);
23 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
24 unsigned long end);
25 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
26 unsigned long pfn);
27 void (*flush_icache_range)(unsigned long start, unsigned long end);
28
29 /* MIPS specific cache operations */
30 void (*flush_cache_sigtramp)(unsigned long addr);
31 void (*local_flush_data_cache_page)(void * addr);
32 void (*flush_data_cache_page)(unsigned long addr);
33 void (*flush_icache_all)(void);
34
35 EXPORT_SYMBOL(flush_data_cache_page);
36
37 #ifdef CONFIG_DMA_NONCOHERENT
38
39 /* DMA cache operations. */
40 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
41 void (*_dma_cache_wback)(unsigned long start, unsigned long size);
42 void (*_dma_cache_inv)(unsigned long start, unsigned long size);
43
44 EXPORT_SYMBOL(_dma_cache_wback_inv);
45 EXPORT_SYMBOL(_dma_cache_wback);
46 EXPORT_SYMBOL(_dma_cache_inv);
47
48 #endif /* CONFIG_DMA_NONCOHERENT */
49
50 /*
51 * We could optimize the case where the cache argument is not BCACHE but
52 * that seems very atypical use ...
53 */
54 asmlinkage int sys_cacheflush(unsigned long addr,
55 unsigned long bytes, unsigned int cache)
56 {
57 if (bytes == 0)
58 return 0;
59 if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
60 return -EFAULT;
61
62 flush_icache_range(addr, addr + bytes);
63
64 return 0;
65 }
66
67 void __flush_dcache_page(struct page *page)
68 {
69 struct address_space *mapping = page_mapping(page);
70 unsigned long addr;
71
72 if (PageHighMem(page))
73 return;
74 if (mapping && !mapping_mapped(mapping)) {
75 SetPageDcacheDirty(page);
76 return;
77 }
78
79 /*
80 * We could delay the flush for the !page_mapping case too. But that
81 * case is for exec env/arg pages and those are %99 certainly going to
82 * get faulted into the tlb (and thus flushed) anyways.
83 */
84 addr = (unsigned long) page_address(page);
85 flush_data_cache_page(addr);
86 }
87
88 EXPORT_SYMBOL(__flush_dcache_page);
89
90 void __update_cache(struct vm_area_struct *vma, unsigned long address,
91 pte_t pte)
92 {
93 struct page *page;
94 unsigned long pfn, addr;
95 int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
96
97 pfn = pte_pfn(pte);
98 if (unlikely(!pfn_valid(pfn)))
99 return;
100 page = pfn_to_page(pfn);
101 if (page_mapping(page) && Page_dcache_dirty(page)) {
102 addr = (unsigned long) page_address(page);
103 if (exec || pages_do_alias(addr, address & PAGE_MASK))
104 flush_data_cache_page(addr);
105 ClearPageDcacheDirty(page);
106 }
107 }
108
109 #define __weak __attribute__((weak))
110
111 static char cache_panic[] __initdata = "Yeee, unsupported cache architecture.";
112
113 void __init cpu_cache_init(void)
114 {
115 if (cpu_has_3k_cache) {
116 extern void __weak r3k_cache_init(void);
117
118 r3k_cache_init();
119 return;
120 }
121 if (cpu_has_6k_cache) {
122 extern void __weak r6k_cache_init(void);
123
124 r6k_cache_init();
125 return;
126 }
127 if (cpu_has_4k_cache) {
128 extern void __weak r4k_cache_init(void);
129
130 r4k_cache_init();
131 return;
132 }
133 if (cpu_has_8k_cache) {
134 extern void __weak r8k_cache_init(void);
135
136 r8k_cache_init();
137 return;
138 }
139 if (cpu_has_tx39_cache) {
140 extern void __weak tx39_cache_init(void);
141
142 tx39_cache_init();
143 return;
144 }
145 if (cpu_has_sb1_cache) {
146 extern void __weak sb1_cache_init(void);
147
148 sb1_cache_init();
149 return;
150 }
151
152 panic(cache_panic);
153 }
This page took 0.034735 seconds and 6 git commands to generate.