Merge tag 'usb-4.8-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[deliverable/linux.git] / arch / mips / mm / c-tx39.c
1 /*
2 * r2300.c: R2000 and R3000 specific mmu/cache code.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
5 *
6 * with a lot of changes to make this thing work for R3000s
7 * Tx39XX R4k style caches added. HK
8 * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
9 * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
10 */
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16
17 #include <asm/cacheops.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h>
21 #include <asm/isadep.h>
22 #include <asm/io.h>
23 #include <asm/bootinfo.h>
24 #include <asm/cpu.h>
25
26 /* For R3000 cores with R4000 style caches */
27 static unsigned long icache_size, dcache_size; /* Size in bytes */
28
29 #include <asm/r4kcache.h>
30
31 /* This sequence is required to ensure icache is disabled immediately */
32 #define TX39_STOP_STREAMING() \
33 __asm__ __volatile__( \
34 ".set push\n\t" \
35 ".set noreorder\n\t" \
36 "b 1f\n\t" \
37 "nop\n\t" \
38 "1:\n\t" \
39 ".set pop" \
40 )
41
42 /* TX39H-style cache flush routines. */
43 static void tx39h_flush_icache_all(void)
44 {
45 unsigned long flags, config;
46
47 /* disable icache (set ICE#) */
48 local_irq_save(flags);
49 config = read_c0_conf();
50 write_c0_conf(config & ~TX39_CONF_ICE);
51 TX39_STOP_STREAMING();
52 blast_icache16();
53 write_c0_conf(config);
54 local_irq_restore(flags);
55 }
56
57 static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
58 {
59 /* Catch bad driver code */
60 BUG_ON(size == 0);
61
62 iob();
63 blast_inv_dcache_range(addr, addr + size);
64 }
65
66
67 /* TX39H2,TX39H3 */
68 static inline void tx39_blast_dcache_page(unsigned long addr)
69 {
70 if (current_cpu_type() != CPU_TX3912)
71 blast_dcache16_page(addr);
72 }
73
74 static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
75 {
76 blast_dcache16_page_indexed(addr);
77 }
78
79 static inline void tx39_blast_dcache(void)
80 {
81 blast_dcache16();
82 }
83
84 static inline void tx39_blast_icache_page(unsigned long addr)
85 {
86 unsigned long flags, config;
87 /* disable icache (set ICE#) */
88 local_irq_save(flags);
89 config = read_c0_conf();
90 write_c0_conf(config & ~TX39_CONF_ICE);
91 TX39_STOP_STREAMING();
92 blast_icache16_page(addr);
93 write_c0_conf(config);
94 local_irq_restore(flags);
95 }
96
97 static inline void tx39_blast_icache_page_indexed(unsigned long addr)
98 {
99 unsigned long flags, config;
100 /* disable icache (set ICE#) */
101 local_irq_save(flags);
102 config = read_c0_conf();
103 write_c0_conf(config & ~TX39_CONF_ICE);
104 TX39_STOP_STREAMING();
105 blast_icache16_page_indexed(addr);
106 write_c0_conf(config);
107 local_irq_restore(flags);
108 }
109
110 static inline void tx39_blast_icache(void)
111 {
112 unsigned long flags, config;
113 /* disable icache (set ICE#) */
114 local_irq_save(flags);
115 config = read_c0_conf();
116 write_c0_conf(config & ~TX39_CONF_ICE);
117 TX39_STOP_STREAMING();
118 blast_icache16();
119 write_c0_conf(config);
120 local_irq_restore(flags);
121 }
122
123 static void tx39__flush_cache_vmap(void)
124 {
125 tx39_blast_dcache();
126 }
127
128 static void tx39__flush_cache_vunmap(void)
129 {
130 tx39_blast_dcache();
131 }
132
133 static inline void tx39_flush_cache_all(void)
134 {
135 if (!cpu_has_dc_aliases)
136 return;
137
138 tx39_blast_dcache();
139 }
140
141 static inline void tx39___flush_cache_all(void)
142 {
143 tx39_blast_dcache();
144 tx39_blast_icache();
145 }
146
147 static void tx39_flush_cache_mm(struct mm_struct *mm)
148 {
149 if (!cpu_has_dc_aliases)
150 return;
151
152 if (cpu_context(smp_processor_id(), mm) != 0)
153 tx39_blast_dcache();
154 }
155
156 static void tx39_flush_cache_range(struct vm_area_struct *vma,
157 unsigned long start, unsigned long end)
158 {
159 if (!cpu_has_dc_aliases)
160 return;
161 if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
162 return;
163
164 tx39_blast_dcache();
165 }
166
167 static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
168 {
169 int exec = vma->vm_flags & VM_EXEC;
170 struct mm_struct *mm = vma->vm_mm;
171 pgd_t *pgdp;
172 pud_t *pudp;
173 pmd_t *pmdp;
174 pte_t *ptep;
175
176 /*
177 * If ownes no valid ASID yet, cannot possibly have gotten
178 * this page into the cache.
179 */
180 if (cpu_context(smp_processor_id(), mm) == 0)
181 return;
182
183 page &= PAGE_MASK;
184 pgdp = pgd_offset(mm, page);
185 pudp = pud_offset(pgdp, page);
186 pmdp = pmd_offset(pudp, page);
187 ptep = pte_offset(pmdp, page);
188
189 /*
190 * If the page isn't marked valid, the page cannot possibly be
191 * in the cache.
192 */
193 if (!(pte_val(*ptep) & _PAGE_PRESENT))
194 return;
195
196 /*
197 * Doing flushes for another ASID than the current one is
198 * too difficult since stupid R4k caches do a TLB translation
199 * for every cache flush operation. So we do indexed flushes
200 * in that case, which doesn't overly flush the cache too much.
201 */
202 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
203 if (cpu_has_dc_aliases || exec)
204 tx39_blast_dcache_page(page);
205 if (exec)
206 tx39_blast_icache_page(page);
207
208 return;
209 }
210
211 /*
212 * Do indexed flush, too much work to get the (possible) TLB refills
213 * to work correctly.
214 */
215 if (cpu_has_dc_aliases || exec)
216 tx39_blast_dcache_page_indexed(page);
217 if (exec)
218 tx39_blast_icache_page_indexed(page);
219 }
220
221 static void local_tx39_flush_data_cache_page(void * addr)
222 {
223 tx39_blast_dcache_page((unsigned long)addr);
224 }
225
226 static void tx39_flush_data_cache_page(unsigned long addr)
227 {
228 tx39_blast_dcache_page(addr);
229 }
230
231 static void tx39_flush_icache_range(unsigned long start, unsigned long end)
232 {
233 if (end - start > dcache_size)
234 tx39_blast_dcache();
235 else
236 protected_blast_dcache_range(start, end);
237
238 if (end - start > icache_size)
239 tx39_blast_icache();
240 else {
241 unsigned long flags, config;
242 /* disable icache (set ICE#) */
243 local_irq_save(flags);
244 config = read_c0_conf();
245 write_c0_conf(config & ~TX39_CONF_ICE);
246 TX39_STOP_STREAMING();
247 protected_blast_icache_range(start, end);
248 write_c0_conf(config);
249 local_irq_restore(flags);
250 }
251 }
252
253 static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
254 {
255 BUG();
256 }
257
258 static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
259 {
260 unsigned long end;
261
262 if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
263 end = addr + size;
264 do {
265 tx39_blast_dcache_page(addr);
266 addr += PAGE_SIZE;
267 } while(addr != end);
268 } else if (size > dcache_size) {
269 tx39_blast_dcache();
270 } else {
271 blast_dcache_range(addr, addr + size);
272 }
273 }
274
275 static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
276 {
277 unsigned long end;
278
279 if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
280 end = addr + size;
281 do {
282 tx39_blast_dcache_page(addr);
283 addr += PAGE_SIZE;
284 } while(addr != end);
285 } else if (size > dcache_size) {
286 tx39_blast_dcache();
287 } else {
288 blast_inv_dcache_range(addr, addr + size);
289 }
290 }
291
292 static void tx39_flush_cache_sigtramp(unsigned long addr)
293 {
294 unsigned long ic_lsize = current_cpu_data.icache.linesz;
295 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
296 unsigned long config;
297 unsigned long flags;
298
299 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
300
301 /* disable icache (set ICE#) */
302 local_irq_save(flags);
303 config = read_c0_conf();
304 write_c0_conf(config & ~TX39_CONF_ICE);
305 TX39_STOP_STREAMING();
306 protected_flush_icache_line(addr & ~(ic_lsize - 1));
307 write_c0_conf(config);
308 local_irq_restore(flags);
309 }
310
311 static __init void tx39_probe_cache(void)
312 {
313 unsigned long config;
314
315 config = read_c0_conf();
316
317 icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
318 TX39_CONF_ICS_SHIFT));
319 dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
320 TX39_CONF_DCS_SHIFT));
321
322 current_cpu_data.icache.linesz = 16;
323 switch (current_cpu_type()) {
324 case CPU_TX3912:
325 current_cpu_data.icache.ways = 1;
326 current_cpu_data.dcache.ways = 1;
327 current_cpu_data.dcache.linesz = 4;
328 break;
329
330 case CPU_TX3927:
331 current_cpu_data.icache.ways = 2;
332 current_cpu_data.dcache.ways = 2;
333 current_cpu_data.dcache.linesz = 16;
334 break;
335
336 case CPU_TX3922:
337 default:
338 current_cpu_data.icache.ways = 1;
339 current_cpu_data.dcache.ways = 1;
340 current_cpu_data.dcache.linesz = 16;
341 break;
342 }
343 }
344
345 void tx39_cache_init(void)
346 {
347 extern void build_clear_page(void);
348 extern void build_copy_page(void);
349 unsigned long config;
350
351 config = read_c0_conf();
352 config &= ~TX39_CONF_WBON;
353 write_c0_conf(config);
354
355 tx39_probe_cache();
356
357 switch (current_cpu_type()) {
358 case CPU_TX3912:
359 /* TX39/H core (writethru direct-map cache) */
360 __flush_cache_vmap = tx39__flush_cache_vmap;
361 __flush_cache_vunmap = tx39__flush_cache_vunmap;
362 flush_cache_all = tx39h_flush_icache_all;
363 __flush_cache_all = tx39h_flush_icache_all;
364 flush_cache_mm = (void *) tx39h_flush_icache_all;
365 flush_cache_range = (void *) tx39h_flush_icache_all;
366 flush_cache_page = (void *) tx39h_flush_icache_all;
367 flush_icache_range = (void *) tx39h_flush_icache_all;
368 local_flush_icache_range = (void *) tx39h_flush_icache_all;
369
370 flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
371 local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
372 flush_data_cache_page = (void *) tx39h_flush_icache_all;
373
374 _dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
375
376 shm_align_mask = PAGE_SIZE - 1;
377
378 break;
379
380 case CPU_TX3922:
381 case CPU_TX3927:
382 default:
383 /* TX39/H2,H3 core (writeback 2way-set-associative cache) */
384 /* board-dependent init code may set WBON */
385
386 __flush_cache_vmap = tx39__flush_cache_vmap;
387 __flush_cache_vunmap = tx39__flush_cache_vunmap;
388
389 flush_cache_all = tx39_flush_cache_all;
390 __flush_cache_all = tx39___flush_cache_all;
391 flush_cache_mm = tx39_flush_cache_mm;
392 flush_cache_range = tx39_flush_cache_range;
393 flush_cache_page = tx39_flush_cache_page;
394 flush_icache_range = tx39_flush_icache_range;
395 local_flush_icache_range = tx39_flush_icache_range;
396
397 __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
398
399 flush_cache_sigtramp = tx39_flush_cache_sigtramp;
400 local_flush_data_cache_page = local_tx39_flush_data_cache_page;
401 flush_data_cache_page = tx39_flush_data_cache_page;
402
403 _dma_cache_wback_inv = tx39_dma_cache_wback_inv;
404 _dma_cache_wback = tx39_dma_cache_wback_inv;
405 _dma_cache_inv = tx39_dma_cache_inv;
406
407 shm_align_mask = max_t(unsigned long,
408 (dcache_size / current_cpu_data.dcache.ways) - 1,
409 PAGE_SIZE - 1);
410
411 break;
412 }
413
414 current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
415 current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
416
417 current_cpu_data.icache.sets =
418 current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
419 current_cpu_data.dcache.sets =
420 current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
421
422 if (current_cpu_data.dcache.waysize > PAGE_SIZE)
423 current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
424
425 current_cpu_data.icache.waybit = 0;
426 current_cpu_data.dcache.waybit = 0;
427
428 printk("Primary instruction cache %ldkB, linesize %d bytes\n",
429 icache_size >> 10, current_cpu_data.icache.linesz);
430 printk("Primary data cache %ldkB, linesize %d bytes\n",
431 dcache_size >> 10, current_cpu_data.dcache.linesz);
432
433 build_clear_page();
434 build_copy_page();
435 tx39h_flush_icache_all();
436 }
This page took 0.044642 seconds and 5 git commands to generate.