Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux...
[deliverable/linux.git] / arch / mips / mm / tlb-r4k.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
10 */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16
17 #include <asm/cpu.h>
18 #include <asm/bootinfo.h>
19 #include <asm/mmu_context.h>
20 #include <asm/pgtable.h>
21 #include <asm/system.h>
22
23 extern void build_tlb_refill_handler(void);
24
25 /*
26 * Make sure all entries differ. If they're not different
27 * MIPS32 will take revenge ...
28 */
29 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
30
31 /* Atomicity and interruptability */
32 #ifdef CONFIG_MIPS_MT_SMTC
33
34 #include <asm/smtc.h>
35 #include <asm/mipsmtregs.h>
36
37 #define ENTER_CRITICAL(flags) \
38 { \
39 unsigned int mvpflags; \
40 local_irq_save(flags);\
41 mvpflags = dvpe()
42 #define EXIT_CRITICAL(flags) \
43 evpe(mvpflags); \
44 local_irq_restore(flags); \
45 }
46 #else
47
48 #define ENTER_CRITICAL(flags) local_irq_save(flags)
49 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
50
51 #endif /* CONFIG_MIPS_MT_SMTC */
52
53 #if defined(CONFIG_CPU_LOONGSON2)
54 /*
55 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
56 * unfortrunately, itlb is not totally transparent to software.
57 */
58 #define FLUSH_ITLB write_c0_diag(4);
59
60 #define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); }
61
62 #else
63
64 #define FLUSH_ITLB
65 #define FLUSH_ITLB_VM(vma)
66
67 #endif
68
69 void local_flush_tlb_all(void)
70 {
71 unsigned long flags;
72 unsigned long old_ctx;
73 int entry;
74
75 ENTER_CRITICAL(flags);
76 /* Save old context and create impossible VPN2 value */
77 old_ctx = read_c0_entryhi();
78 write_c0_entrylo0(0);
79 write_c0_entrylo1(0);
80
81 entry = read_c0_wired();
82
83 /* Blast 'em all away. */
84 while (entry < current_cpu_data.tlbsize) {
85 /* Make sure all entries differ. */
86 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
87 write_c0_index(entry);
88 mtc0_tlbw_hazard();
89 tlb_write_indexed();
90 entry++;
91 }
92 tlbw_use_hazard();
93 write_c0_entryhi(old_ctx);
94 FLUSH_ITLB;
95 EXIT_CRITICAL(flags);
96 }
97
98 /* All entries common to a mm share an asid. To effectively flush
99 these entries, we just bump the asid. */
100 void local_flush_tlb_mm(struct mm_struct *mm)
101 {
102 int cpu;
103
104 preempt_disable();
105
106 cpu = smp_processor_id();
107
108 if (cpu_context(cpu, mm) != 0) {
109 drop_mmu_context(mm, cpu);
110 }
111
112 preempt_enable();
113 }
114
115 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
116 unsigned long end)
117 {
118 struct mm_struct *mm = vma->vm_mm;
119 int cpu = smp_processor_id();
120
121 if (cpu_context(cpu, mm) != 0) {
122 unsigned long size, flags;
123
124 ENTER_CRITICAL(flags);
125 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
126 size = (size + 1) >> 1;
127 if (size <= current_cpu_data.tlbsize/2) {
128 int oldpid = read_c0_entryhi();
129 int newpid = cpu_asid(cpu, mm);
130
131 start &= (PAGE_MASK << 1);
132 end += ((PAGE_SIZE << 1) - 1);
133 end &= (PAGE_MASK << 1);
134 while (start < end) {
135 int idx;
136
137 write_c0_entryhi(start | newpid);
138 start += (PAGE_SIZE << 1);
139 mtc0_tlbw_hazard();
140 tlb_probe();
141 tlb_probe_hazard();
142 idx = read_c0_index();
143 write_c0_entrylo0(0);
144 write_c0_entrylo1(0);
145 if (idx < 0)
146 continue;
147 /* Make sure all entries differ. */
148 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
149 mtc0_tlbw_hazard();
150 tlb_write_indexed();
151 }
152 tlbw_use_hazard();
153 write_c0_entryhi(oldpid);
154 } else {
155 drop_mmu_context(mm, cpu);
156 }
157 FLUSH_ITLB;
158 EXIT_CRITICAL(flags);
159 }
160 }
161
162 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
163 {
164 unsigned long size, flags;
165
166 ENTER_CRITICAL(flags);
167 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
168 size = (size + 1) >> 1;
169 if (size <= current_cpu_data.tlbsize / 2) {
170 int pid = read_c0_entryhi();
171
172 start &= (PAGE_MASK << 1);
173 end += ((PAGE_SIZE << 1) - 1);
174 end &= (PAGE_MASK << 1);
175
176 while (start < end) {
177 int idx;
178
179 write_c0_entryhi(start);
180 start += (PAGE_SIZE << 1);
181 mtc0_tlbw_hazard();
182 tlb_probe();
183 tlb_probe_hazard();
184 idx = read_c0_index();
185 write_c0_entrylo0(0);
186 write_c0_entrylo1(0);
187 if (idx < 0)
188 continue;
189 /* Make sure all entries differ. */
190 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
191 mtc0_tlbw_hazard();
192 tlb_write_indexed();
193 }
194 tlbw_use_hazard();
195 write_c0_entryhi(pid);
196 } else {
197 local_flush_tlb_all();
198 }
199 FLUSH_ITLB;
200 EXIT_CRITICAL(flags);
201 }
202
203 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
204 {
205 int cpu = smp_processor_id();
206
207 if (cpu_context(cpu, vma->vm_mm) != 0) {
208 unsigned long flags;
209 int oldpid, newpid, idx;
210
211 newpid = cpu_asid(cpu, vma->vm_mm);
212 page &= (PAGE_MASK << 1);
213 ENTER_CRITICAL(flags);
214 oldpid = read_c0_entryhi();
215 write_c0_entryhi(page | newpid);
216 mtc0_tlbw_hazard();
217 tlb_probe();
218 tlb_probe_hazard();
219 idx = read_c0_index();
220 write_c0_entrylo0(0);
221 write_c0_entrylo1(0);
222 if (idx < 0)
223 goto finish;
224 /* Make sure all entries differ. */
225 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
226 mtc0_tlbw_hazard();
227 tlb_write_indexed();
228 tlbw_use_hazard();
229
230 finish:
231 write_c0_entryhi(oldpid);
232 FLUSH_ITLB_VM(vma);
233 EXIT_CRITICAL(flags);
234 }
235 }
236
237 /*
238 * This one is only used for pages with the global bit set so we don't care
239 * much about the ASID.
240 */
241 void local_flush_tlb_one(unsigned long page)
242 {
243 unsigned long flags;
244 int oldpid, idx;
245
246 ENTER_CRITICAL(flags);
247 oldpid = read_c0_entryhi();
248 page &= (PAGE_MASK << 1);
249 write_c0_entryhi(page);
250 mtc0_tlbw_hazard();
251 tlb_probe();
252 tlb_probe_hazard();
253 idx = read_c0_index();
254 write_c0_entrylo0(0);
255 write_c0_entrylo1(0);
256 if (idx >= 0) {
257 /* Make sure all entries differ. */
258 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
259 mtc0_tlbw_hazard();
260 tlb_write_indexed();
261 tlbw_use_hazard();
262 }
263 write_c0_entryhi(oldpid);
264 FLUSH_ITLB;
265 EXIT_CRITICAL(flags);
266 }
267
268 /*
269 * We will need multiple versions of update_mmu_cache(), one that just
270 * updates the TLB with the new pte(s), and another which also checks
271 * for the R4k "end of page" hardware bug and does the needy.
272 */
273 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
274 {
275 unsigned long flags;
276 pgd_t *pgdp;
277 pud_t *pudp;
278 pmd_t *pmdp;
279 pte_t *ptep;
280 int idx, pid;
281
282 /*
283 * Handle debugger faulting in for debugee.
284 */
285 if (current->active_mm != vma->vm_mm)
286 return;
287
288 ENTER_CRITICAL(flags);
289
290 pid = read_c0_entryhi() & ASID_MASK;
291 address &= (PAGE_MASK << 1);
292 write_c0_entryhi(address | pid);
293 pgdp = pgd_offset(vma->vm_mm, address);
294 mtc0_tlbw_hazard();
295 tlb_probe();
296 tlb_probe_hazard();
297 pudp = pud_offset(pgdp, address);
298 pmdp = pmd_offset(pudp, address);
299 idx = read_c0_index();
300 #ifdef CONFIG_HUGETLB_PAGE
301 /* this could be a huge page */
302 if (pmd_huge(*pmdp)) {
303 unsigned long lo;
304 write_c0_pagemask(PM_HUGE_MASK);
305 ptep = (pte_t *)pmdp;
306 lo = pte_val(*ptep) >> 6;
307 write_c0_entrylo0(lo);
308 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
309
310 mtc0_tlbw_hazard();
311 if (idx < 0)
312 tlb_write_random();
313 else
314 tlb_write_indexed();
315 write_c0_pagemask(PM_DEFAULT_MASK);
316 } else
317 #endif
318 {
319 ptep = pte_offset_map(pmdp, address);
320
321 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
322 write_c0_entrylo0(ptep->pte_high);
323 ptep++;
324 write_c0_entrylo1(ptep->pte_high);
325 #else
326 write_c0_entrylo0(pte_val(*ptep++) >> 6);
327 write_c0_entrylo1(pte_val(*ptep) >> 6);
328 #endif
329 mtc0_tlbw_hazard();
330 if (idx < 0)
331 tlb_write_random();
332 else
333 tlb_write_indexed();
334 }
335 tlbw_use_hazard();
336 FLUSH_ITLB_VM(vma);
337 EXIT_CRITICAL(flags);
338 }
339
340 #if 0
341 static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
342 unsigned long address, pte_t pte)
343 {
344 unsigned long flags;
345 unsigned int asid;
346 pgd_t *pgdp;
347 pmd_t *pmdp;
348 pte_t *ptep;
349 int idx;
350
351 ENTER_CRITICAL(flags);
352 address &= (PAGE_MASK << 1);
353 asid = read_c0_entryhi() & ASID_MASK;
354 write_c0_entryhi(address | asid);
355 pgdp = pgd_offset(vma->vm_mm, address);
356 mtc0_tlbw_hazard();
357 tlb_probe();
358 tlb_probe_hazard();
359 pmdp = pmd_offset(pgdp, address);
360 idx = read_c0_index();
361 ptep = pte_offset_map(pmdp, address);
362 write_c0_entrylo0(pte_val(*ptep++) >> 6);
363 write_c0_entrylo1(pte_val(*ptep) >> 6);
364 mtc0_tlbw_hazard();
365 if (idx < 0)
366 tlb_write_random();
367 else
368 tlb_write_indexed();
369 tlbw_use_hazard();
370 EXIT_CRITICAL(flags);
371 }
372 #endif
373
374 void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
375 unsigned long entryhi, unsigned long pagemask)
376 {
377 unsigned long flags;
378 unsigned long wired;
379 unsigned long old_pagemask;
380 unsigned long old_ctx;
381
382 ENTER_CRITICAL(flags);
383 /* Save old context and create impossible VPN2 value */
384 old_ctx = read_c0_entryhi();
385 old_pagemask = read_c0_pagemask();
386 wired = read_c0_wired();
387 write_c0_wired(wired + 1);
388 write_c0_index(wired);
389 tlbw_use_hazard(); /* What is the hazard here? */
390 write_c0_pagemask(pagemask);
391 write_c0_entryhi(entryhi);
392 write_c0_entrylo0(entrylo0);
393 write_c0_entrylo1(entrylo1);
394 mtc0_tlbw_hazard();
395 tlb_write_indexed();
396 tlbw_use_hazard();
397
398 write_c0_entryhi(old_ctx);
399 tlbw_use_hazard(); /* What is the hazard here? */
400 write_c0_pagemask(old_pagemask);
401 local_flush_tlb_all();
402 EXIT_CRITICAL(flags);
403 }
404
405 /*
406 * Used for loading TLB entries before trap_init() has started, when we
407 * don't actually want to add a wired entry which remains throughout the
408 * lifetime of the system
409 */
410
411 static int temp_tlb_entry __cpuinitdata;
412
413 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
414 unsigned long entryhi, unsigned long pagemask)
415 {
416 int ret = 0;
417 unsigned long flags;
418 unsigned long wired;
419 unsigned long old_pagemask;
420 unsigned long old_ctx;
421
422 ENTER_CRITICAL(flags);
423 /* Save old context and create impossible VPN2 value */
424 old_ctx = read_c0_entryhi();
425 old_pagemask = read_c0_pagemask();
426 wired = read_c0_wired();
427 if (--temp_tlb_entry < wired) {
428 printk(KERN_WARNING
429 "No TLB space left for add_temporary_entry\n");
430 ret = -ENOSPC;
431 goto out;
432 }
433
434 write_c0_index(temp_tlb_entry);
435 write_c0_pagemask(pagemask);
436 write_c0_entryhi(entryhi);
437 write_c0_entrylo0(entrylo0);
438 write_c0_entrylo1(entrylo1);
439 mtc0_tlbw_hazard();
440 tlb_write_indexed();
441 tlbw_use_hazard();
442
443 write_c0_entryhi(old_ctx);
444 write_c0_pagemask(old_pagemask);
445 out:
446 EXIT_CRITICAL(flags);
447 return ret;
448 }
449
450 static void __cpuinit probe_tlb(unsigned long config)
451 {
452 struct cpuinfo_mips *c = &current_cpu_data;
453 unsigned int reg;
454
455 /*
456 * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
457 * is not supported, we assume R4k style. Cpu probing already figured
458 * out the number of tlb entries.
459 */
460 if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
461 return;
462 #ifdef CONFIG_MIPS_MT_SMTC
463 /*
464 * If TLB is shared in SMTC system, total size already
465 * has been calculated and written into cpu_data tlbsize
466 */
467 if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
468 return;
469 #endif /* CONFIG_MIPS_MT_SMTC */
470
471 reg = read_c0_config1();
472 if (!((config >> 7) & 3))
473 panic("No TLB present");
474
475 c->tlbsize = ((reg >> 25) & 0x3f) + 1;
476 }
477
478 static int __cpuinitdata ntlb;
479 static int __init set_ntlb(char *str)
480 {
481 get_option(&str, &ntlb);
482 return 1;
483 }
484
485 __setup("ntlb=", set_ntlb);
486
487 void __cpuinit tlb_init(void)
488 {
489 unsigned int config = read_c0_config();
490
491 /*
492 * You should never change this register:
493 * - On R4600 1.7 the tlbp never hits for pages smaller than
494 * the value in the c0_pagemask register.
495 * - The entire mm handling assumes the c0_pagemask register to
496 * be set to fixed-size pages.
497 */
498 probe_tlb(config);
499 write_c0_pagemask(PM_DEFAULT_MASK);
500 write_c0_wired(0);
501 if (current_cpu_type() == CPU_R10000 ||
502 current_cpu_type() == CPU_R12000 ||
503 current_cpu_type() == CPU_R14000)
504 write_c0_framemask(0);
505 temp_tlb_entry = current_cpu_data.tlbsize - 1;
506
507 /* From this point on the ARC firmware is dead. */
508 local_flush_tlb_all();
509
510 /* Did I tell you that ARC SUCKS? */
511
512 if (ntlb) {
513 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
514 int wired = current_cpu_data.tlbsize - ntlb;
515 write_c0_wired(wired);
516 write_c0_index(wired-1);
517 printk("Restricting TLB to %d entries\n", ntlb);
518 } else
519 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
520 }
521
522 build_tlb_refill_handler();
523 }
This page took 0.041881 seconds and 5 git commands to generate.