Commit | Line | Data |
---|---|---|
5b3b1688 DD |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2005-2007 Cavium Networks | |
7 | */ | |
f65aad41 | 8 | #include <linux/export.h> |
5b3b1688 DD |
9 | #include <linux/init.h> |
10 | #include <linux/kernel.h> | |
11 | #include <linux/sched.h> | |
631330f5 | 12 | #include <linux/smp.h> |
5b3b1688 DD |
13 | #include <linux/mm.h> |
14 | #include <linux/bitops.h> | |
15 | #include <linux/cpu.h> | |
16 | #include <linux/io.h> | |
17 | ||
18 | #include <asm/bcache.h> | |
19 | #include <asm/bootinfo.h> | |
20 | #include <asm/cacheops.h> | |
21 | #include <asm/cpu-features.h> | |
69f24d17 | 22 | #include <asm/cpu-type.h> |
5b3b1688 DD |
23 | #include <asm/page.h> |
24 | #include <asm/pgtable.h> | |
25 | #include <asm/r4kcache.h> | |
586016eb | 26 | #include <asm/traps.h> |
5b3b1688 DD |
27 | #include <asm/mmu_context.h> |
28 | #include <asm/war.h> | |
29 | ||
30 | #include <asm/octeon/octeon.h> | |
31 | ||
32 | unsigned long long cache_err_dcache[NR_CPUS]; | |
f65aad41 | 33 | EXPORT_SYMBOL_GPL(cache_err_dcache); |
5b3b1688 DD |
34 | |
35 | /** | |
36 | * Octeon automatically flushes the dcache on tlb changes, so | |
37 | * from Linux's viewpoint it acts much like a physically | |
38 | * tagged cache. No flushing is needed | |
39 | * | |
40 | */ | |
41 | static void octeon_flush_data_cache_page(unsigned long addr) | |
42 | { | |
43 | /* Nothing to do */ | |
44 | } | |
45 | ||
46 | static inline void octeon_local_flush_icache(void) | |
47 | { | |
48 | asm volatile ("synci 0($0)"); | |
49 | } | |
50 | ||
51 | /* | |
52 | * Flush local I-cache for the specified range. | |
53 | */ | |
54 | static void local_octeon_flush_icache_range(unsigned long start, | |
55 | unsigned long end) | |
56 | { | |
57 | octeon_local_flush_icache(); | |
58 | } | |
59 | ||
60 | /** | |
61 | * Flush caches as necessary for all cores affected by a | |
62 | * vma. If no vma is supplied, all cores are flushed. | |
63 | * | |
64 | * @vma: VMA to flush or NULL to flush all icaches. | |
65 | */ | |
66 | static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) | |
67 | { | |
68 | extern void octeon_send_ipi_single(int cpu, unsigned int action); | |
69 | #ifdef CONFIG_SMP | |
70 | int cpu; | |
71 | cpumask_t mask; | |
72 | #endif | |
73 | ||
74 | mb(); | |
75 | octeon_local_flush_icache(); | |
76 | #ifdef CONFIG_SMP | |
77 | preempt_disable(); | |
78 | cpu = smp_processor_id(); | |
79 | ||
80 | /* | |
81 | * If we have a vma structure, we only need to worry about | |
82 | * cores it has been used on | |
83 | */ | |
84 | if (vma) | |
55b8cab4 | 85 | mask = *mm_cpumask(vma->vm_mm); |
5b3b1688 | 86 | else |
0b5f9c00 RR |
87 | mask = *cpu_online_mask; |
88 | cpumask_clear_cpu(cpu, &mask); | |
89 | for_each_cpu(cpu, &mask) | |
5b3b1688 DD |
90 | octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); |
91 | ||
92 | preempt_enable(); | |
93 | #endif | |
94 | } | |
95 | ||
96 | ||
97 | /** | |
98 | * Called to flush the icache on all cores | |
99 | */ | |
100 | static void octeon_flush_icache_all(void) | |
101 | { | |
102 | octeon_flush_icache_all_cores(NULL); | |
103 | } | |
104 | ||
105 | ||
106 | /** | |
107 | * Called to flush all memory associated with a memory | |
108 | * context. | |
109 | * | |
70342287 | 110 | * @mm: Memory context to flush |
5b3b1688 DD |
111 | */ |
112 | static void octeon_flush_cache_mm(struct mm_struct *mm) | |
113 | { | |
114 | /* | |
115 | * According to the R4K version of this file, CPUs without | |
116 | * dcache aliases don't need to do anything here | |
117 | */ | |
118 | } | |
119 | ||
120 | ||
121 | /** | |
122 | * Flush a range of kernel addresses out of the icache | |
123 | * | |
124 | */ | |
125 | static void octeon_flush_icache_range(unsigned long start, unsigned long end) | |
126 | { | |
127 | octeon_flush_icache_all_cores(NULL); | |
128 | } | |
129 | ||
130 | ||
131 | /** | |
132 | * Flush the icache for a trampoline. These are used for interrupt | |
133 | * and exception hooking. | |
134 | * | |
135 | * @addr: Address to flush | |
136 | */ | |
137 | static void octeon_flush_cache_sigtramp(unsigned long addr) | |
138 | { | |
139 | struct vm_area_struct *vma; | |
140 | ||
141 | vma = find_vma(current->mm, addr); | |
142 | octeon_flush_icache_all_cores(vma); | |
143 | } | |
144 | ||
145 | ||
146 | /** | |
147 | * Flush a range out of a vma | |
148 | * | |
149 | * @vma: VMA to flush | |
150 | * @start: | |
151 | * @end: | |
152 | */ | |
153 | static void octeon_flush_cache_range(struct vm_area_struct *vma, | |
154 | unsigned long start, unsigned long end) | |
155 | { | |
156 | if (vma->vm_flags & VM_EXEC) | |
157 | octeon_flush_icache_all_cores(vma); | |
158 | } | |
159 | ||
160 | ||
161 | /** | |
162 | * Flush a specific page of a vma | |
163 | * | |
164 | * @vma: VMA to flush page for | |
165 | * @page: Page to flush | |
166 | * @pfn: | |
167 | */ | |
168 | static void octeon_flush_cache_page(struct vm_area_struct *vma, | |
169 | unsigned long page, unsigned long pfn) | |
170 | { | |
171 | if (vma->vm_flags & VM_EXEC) | |
172 | octeon_flush_icache_all_cores(vma); | |
173 | } | |
174 | ||
d9cdc901 RB |
175 | static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size) |
176 | { | |
177 | BUG(); | |
178 | } | |
5b3b1688 DD |
179 | |
180 | /** | |
181 | * Probe Octeon's caches | |
182 | * | |
183 | */ | |
078a55fc | 184 | static void probe_octeon(void) |
5b3b1688 DD |
185 | { |
186 | unsigned long icache_size; | |
187 | unsigned long dcache_size; | |
188 | unsigned int config1; | |
189 | struct cpuinfo_mips *c = ¤t_cpu_data; | |
69f24d17 | 190 | int cputype = current_cpu_type(); |
5b3b1688 | 191 | |
f8bf7e68 | 192 | config1 = read_c0_config1(); |
69f24d17 | 193 | switch (cputype) { |
5b3b1688 | 194 | case CPU_CAVIUM_OCTEON: |
6f329468 | 195 | case CPU_CAVIUM_OCTEON_PLUS: |
5b3b1688 DD |
196 | c->icache.linesz = 2 << ((config1 >> 19) & 7); |
197 | c->icache.sets = 64 << ((config1 >> 22) & 7); | |
198 | c->icache.ways = 1 + ((config1 >> 16) & 7); | |
199 | c->icache.flags |= MIPS_CACHE_VTAG; | |
200 | icache_size = | |
201 | c->icache.sets * c->icache.ways * c->icache.linesz; | |
202 | c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; | |
203 | c->dcache.linesz = 128; | |
69f24d17 | 204 | if (cputype == CPU_CAVIUM_OCTEON_PLUS) |
5b3b1688 | 205 | c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ |
6f329468 DD |
206 | else |
207 | c->dcache.sets = 1; /* CN3XXX has one Dcache set */ | |
5b3b1688 DD |
208 | c->dcache.ways = 64; |
209 | dcache_size = | |
210 | c->dcache.sets * c->dcache.ways * c->dcache.linesz; | |
211 | c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1; | |
212 | c->options |= MIPS_CPU_PREFETCH; | |
213 | break; | |
214 | ||
f8bf7e68 DD |
215 | case CPU_CAVIUM_OCTEON2: |
216 | c->icache.linesz = 2 << ((config1 >> 19) & 7); | |
217 | c->icache.sets = 8; | |
218 | c->icache.ways = 37; | |
219 | c->icache.flags |= MIPS_CACHE_VTAG; | |
220 | icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; | |
221 | ||
222 | c->dcache.linesz = 128; | |
223 | c->dcache.ways = 32; | |
224 | c->dcache.sets = 8; | |
225 | dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; | |
226 | c->options |= MIPS_CPU_PREFETCH; | |
227 | break; | |
228 | ||
62597c60 DD |
229 | case CPU_CAVIUM_OCTEON3: |
230 | c->icache.linesz = 128; | |
231 | c->icache.sets = 16; | |
232 | c->icache.ways = 39; | |
233 | c->icache.flags |= MIPS_CACHE_VTAG; | |
234 | icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; | |
235 | ||
236 | c->dcache.linesz = 128; | |
237 | c->dcache.ways = 32; | |
238 | c->dcache.sets = 8; | |
239 | dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; | |
240 | c->options |= MIPS_CPU_PREFETCH; | |
241 | break; | |
242 | ||
5b3b1688 | 243 | default: |
ab75dc02 | 244 | panic("Unsupported Cavium Networks CPU type"); |
5b3b1688 DD |
245 | break; |
246 | } | |
247 | ||
248 | /* compute a couple of other cache variables */ | |
249 | c->icache.waysize = icache_size / c->icache.ways; | |
250 | c->dcache.waysize = dcache_size / c->dcache.ways; | |
251 | ||
252 | c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); | |
253 | c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); | |
254 | ||
255 | if (smp_processor_id() == 0) { | |
256 | pr_notice("Primary instruction cache %ldkB, %s, %d way, " | |
257 | "%d sets, linesize %d bytes.\n", | |
258 | icache_size >> 10, | |
259 | cpu_has_vtag_icache ? | |
260 | "virtually tagged" : "physically tagged", | |
261 | c->icache.ways, c->icache.sets, c->icache.linesz); | |
262 | ||
263 | pr_notice("Primary data cache %ldkB, %d-way, %d sets, " | |
264 | "linesize %d bytes.\n", | |
265 | dcache_size >> 10, c->dcache.ways, | |
266 | c->dcache.sets, c->dcache.linesz); | |
267 | } | |
268 | } | |
269 | ||
078a55fc | 270 | static void octeon_cache_error_setup(void) |
586016eb DD |
271 | { |
272 | extern char except_vec2_octeon; | |
273 | set_handler(0x100, &except_vec2_octeon, 0x80); | |
274 | } | |
5b3b1688 DD |
275 | |
276 | /** | |
277 | * Setup the Octeon cache flush routines | |
278 | * | |
279 | */ | |
078a55fc | 280 | void octeon_cache_init(void) |
5b3b1688 | 281 | { |
5b3b1688 DD |
282 | probe_octeon(); |
283 | ||
284 | shm_align_mask = PAGE_SIZE - 1; | |
285 | ||
286 | flush_cache_all = octeon_flush_icache_all; | |
287 | __flush_cache_all = octeon_flush_icache_all; | |
288 | flush_cache_mm = octeon_flush_cache_mm; | |
289 | flush_cache_page = octeon_flush_cache_page; | |
290 | flush_cache_range = octeon_flush_cache_range; | |
291 | flush_cache_sigtramp = octeon_flush_cache_sigtramp; | |
292 | flush_icache_all = octeon_flush_icache_all; | |
293 | flush_data_cache_page = octeon_flush_data_cache_page; | |
294 | flush_icache_range = octeon_flush_icache_range; | |
295 | local_flush_icache_range = local_octeon_flush_icache_range; | |
296 | ||
d9cdc901 RB |
297 | __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range; |
298 | ||
5b3b1688 DD |
299 | build_clear_page(); |
300 | build_copy_page(); | |
586016eb DD |
301 | |
302 | board_cache_error_setup = octeon_cache_error_setup; | |
5b3b1688 DD |
303 | } |
304 | ||
e1ced097 | 305 | /* |
5b3b1688 DD |
306 | * Handle a cache error exception |
307 | */ | |
f65aad41 RB |
308 | static RAW_NOTIFIER_HEAD(co_cache_error_chain); |
309 | ||
310 | int register_co_cache_error_notifier(struct notifier_block *nb) | |
5b3b1688 | 311 | { |
f65aad41 RB |
312 | return raw_notifier_chain_register(&co_cache_error_chain, nb); |
313 | } | |
314 | EXPORT_SYMBOL_GPL(register_co_cache_error_notifier); | |
5b3b1688 | 315 | |
f65aad41 RB |
316 | int unregister_co_cache_error_notifier(struct notifier_block *nb) |
317 | { | |
318 | return raw_notifier_chain_unregister(&co_cache_error_chain, nb); | |
319 | } | |
320 | EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier); | |
321 | ||
e1ced097 | 322 | static void co_cache_error_call_notifiers(unsigned long val) |
f65aad41 | 323 | { |
e1ced097 DD |
324 | int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL); |
325 | if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) { | |
326 | u64 dcache_err; | |
327 | unsigned long coreid = cvmx_get_core_num(); | |
328 | u64 icache_err = read_octeon_c0_icacheerr(); | |
329 | ||
330 | if (val) { | |
331 | dcache_err = cache_err_dcache[coreid]; | |
332 | cache_err_dcache[coreid] = 0; | |
333 | } else { | |
334 | dcache_err = read_octeon_c0_dcacheerr(); | |
335 | } | |
336 | ||
337 | pr_err("Core%lu: Cache error exception:\n", coreid); | |
338 | pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); | |
339 | if (icache_err & 1) { | |
340 | pr_err("CacheErr (Icache) == %llx\n", | |
341 | (unsigned long long)icache_err); | |
342 | write_octeon_c0_icacheerr(0); | |
343 | } | |
344 | if (dcache_err & 1) { | |
345 | pr_err("CacheErr (Dcache) == %llx\n", | |
346 | (unsigned long long)dcache_err); | |
347 | } | |
348 | } | |
5b3b1688 DD |
349 | } |
350 | ||
e1ced097 | 351 | /* |
1c1a90d8 | 352 | * Called when the the exception is recoverable |
5b3b1688 | 353 | */ |
e1ced097 | 354 | |
5b3b1688 DD |
355 | asmlinkage void cache_parity_error_octeon_recoverable(void) |
356 | { | |
f65aad41 | 357 | co_cache_error_call_notifiers(0); |
5b3b1688 DD |
358 | } |
359 | ||
360 | /** | |
1c1a90d8 | 361 | * Called when the the exception is not recoverable |
5b3b1688 | 362 | */ |
e1ced097 | 363 | |
5b3b1688 DD |
364 | asmlinkage void cache_parity_error_octeon_non_recoverable(void) |
365 | { | |
f65aad41 RB |
366 | co_cache_error_call_notifiers(1); |
367 | panic("Can't handle cache error: nested exception"); | |
5b3b1688 | 368 | } |