Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
79add627 | 6 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
1da177e4 LT |
7 | * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org |
8 | * Carsten Langgaard, carstenl@mips.com | |
9 | * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. | |
10 | */ | |
eaa38d63 | 11 | #include <linux/cpu_pm.h> |
1da177e4 LT |
12 | #include <linux/init.h> |
13 | #include <linux/sched.h> | |
631330f5 | 14 | #include <linux/smp.h> |
1da177e4 | 15 | #include <linux/mm.h> |
fd062c84 | 16 | #include <linux/hugetlb.h> |
f2e3656d | 17 | #include <linux/module.h> |
1da177e4 LT |
18 | |
19 | #include <asm/cpu.h> | |
69f24d17 | 20 | #include <asm/cpu-type.h> |
1da177e4 | 21 | #include <asm/bootinfo.h> |
091bc3a4 | 22 | #include <asm/hazards.h> |
1da177e4 LT |
23 | #include <asm/mmu_context.h> |
24 | #include <asm/pgtable.h> | |
c01905ee | 25 | #include <asm/tlb.h> |
3d18c983 | 26 | #include <asm/tlbmisc.h> |
1da177e4 LT |
27 | |
28 | extern void build_tlb_refill_handler(void); | |
29 | ||
2a21c730 | 30 | /* |
c579d310 HC |
31 | * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, |
32 | * unfortunately, itlb is not totally transparent to software. | |
2a21c730 | 33 | */ |
14bd8c08 RB |
34 | static inline void flush_itlb(void) |
35 | { | |
36 | switch (current_cpu_type()) { | |
37 | case CPU_LOONGSON2: | |
c579d310 | 38 | case CPU_LOONGSON3: |
14bd8c08 RB |
39 | write_c0_diag(4); |
40 | break; | |
41 | default: | |
42 | break; | |
43 | } | |
44 | } | |
2a21c730 | 45 | |
14bd8c08 RB |
46 | static inline void flush_itlb_vm(struct vm_area_struct *vma) |
47 | { | |
48 | if (vma->vm_flags & VM_EXEC) | |
49 | flush_itlb(); | |
50 | } | |
2a21c730 | 51 | |
1da177e4 LT |
52 | void local_flush_tlb_all(void) |
53 | { | |
54 | unsigned long flags; | |
55 | unsigned long old_ctx; | |
75b5b5e0 | 56 | int entry, ftlbhighset; |
1da177e4 | 57 | |
b633648c | 58 | local_irq_save(flags); |
1da177e4 LT |
59 | /* Save old context and create impossible VPN2 value */ |
60 | old_ctx = read_c0_entryhi(); | |
f1014d1b | 61 | htw_stop(); |
1da177e4 LT |
62 | write_c0_entrylo0(0); |
63 | write_c0_entrylo1(0); | |
64 | ||
65 | entry = read_c0_wired(); | |
66 | ||
67 | /* Blast 'em all away. */ | |
75b5b5e0 LY |
68 | if (cpu_has_tlbinv) { |
69 | if (current_cpu_data.tlbsizevtlb) { | |
70 | write_c0_index(0); | |
71 | mtc0_tlbw_hazard(); | |
72 | tlbinvf(); /* invalidate VTLB */ | |
73 | } | |
74 | ftlbhighset = current_cpu_data.tlbsizevtlb + | |
75 | current_cpu_data.tlbsizeftlbsets; | |
76 | for (entry = current_cpu_data.tlbsizevtlb; | |
77 | entry < ftlbhighset; | |
78 | entry++) { | |
79 | write_c0_index(entry); | |
80 | mtc0_tlbw_hazard(); | |
81 | tlbinvf(); /* invalidate one FTLB set */ | |
82 | } | |
601cfa7b LY |
83 | } else { |
84 | while (entry < current_cpu_data.tlbsize) { | |
85 | /* Make sure all entries differ. */ | |
86 | write_c0_entryhi(UNIQUE_ENTRYHI(entry)); | |
87 | write_c0_index(entry); | |
88 | mtc0_tlbw_hazard(); | |
89 | tlb_write_indexed(); | |
90 | entry++; | |
91 | } | |
1da177e4 LT |
92 | } |
93 | tlbw_use_hazard(); | |
94 | write_c0_entryhi(old_ctx); | |
f1014d1b | 95 | htw_start(); |
14bd8c08 | 96 | flush_itlb(); |
b633648c | 97 | local_irq_restore(flags); |
1da177e4 | 98 | } |
f2e3656d | 99 | EXPORT_SYMBOL(local_flush_tlb_all); |
1da177e4 | 100 | |
172546bf TS |
101 | /* All entries common to a mm share an asid. To effectively flush |
102 | these entries, we just bump the asid. */ | |
1da177e4 LT |
103 | void local_flush_tlb_mm(struct mm_struct *mm) |
104 | { | |
172546bf TS |
105 | int cpu; |
106 | ||
107 | preempt_disable(); | |
1da177e4 | 108 | |
172546bf TS |
109 | cpu = smp_processor_id(); |
110 | ||
111 | if (cpu_context(cpu, mm) != 0) { | |
112 | drop_mmu_context(mm, cpu); | |
113 | } | |
114 | ||
115 | preempt_enable(); | |
1da177e4 LT |
116 | } |
117 | ||
118 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
119 | unsigned long end) | |
120 | { | |
121 | struct mm_struct *mm = vma->vm_mm; | |
122 | int cpu = smp_processor_id(); | |
123 | ||
124 | if (cpu_context(cpu, mm) != 0) { | |
a5e696e5 | 125 | unsigned long size, flags; |
1da177e4 | 126 | |
b633648c | 127 | local_irq_save(flags); |
ac53c4fc DD |
128 | start = round_down(start, PAGE_SIZE << 1); |
129 | end = round_up(end, PAGE_SIZE << 1); | |
130 | size = (end - start) >> (PAGE_SHIFT + 1); | |
75b5b5e0 LY |
131 | if (size <= (current_cpu_data.tlbsizeftlbsets ? |
132 | current_cpu_data.tlbsize / 8 : | |
133 | current_cpu_data.tlbsize / 2)) { | |
1da177e4 LT |
134 | int oldpid = read_c0_entryhi(); |
135 | int newpid = cpu_asid(cpu, mm); | |
136 | ||
f1014d1b | 137 | htw_stop(); |
1da177e4 LT |
138 | while (start < end) { |
139 | int idx; | |
140 | ||
141 | write_c0_entryhi(start | newpid); | |
ac53c4fc | 142 | start += (PAGE_SIZE << 1); |
1da177e4 LT |
143 | mtc0_tlbw_hazard(); |
144 | tlb_probe(); | |
432bef2a | 145 | tlb_probe_hazard(); |
1da177e4 LT |
146 | idx = read_c0_index(); |
147 | write_c0_entrylo0(0); | |
148 | write_c0_entrylo1(0); | |
149 | if (idx < 0) | |
150 | continue; | |
151 | /* Make sure all entries differ. */ | |
172546bf | 152 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
1da177e4 LT |
153 | mtc0_tlbw_hazard(); |
154 | tlb_write_indexed(); | |
155 | } | |
156 | tlbw_use_hazard(); | |
157 | write_c0_entryhi(oldpid); | |
f1014d1b | 158 | htw_start(); |
1da177e4 LT |
159 | } else { |
160 | drop_mmu_context(mm, cpu); | |
161 | } | |
14bd8c08 | 162 | flush_itlb(); |
b633648c | 163 | local_irq_restore(flags); |
1da177e4 LT |
164 | } |
165 | } | |
166 | ||
167 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
168 | { | |
a5e696e5 | 169 | unsigned long size, flags; |
1da177e4 | 170 | |
b633648c | 171 | local_irq_save(flags); |
1da177e4 LT |
172 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
173 | size = (size + 1) >> 1; | |
75b5b5e0 LY |
174 | if (size <= (current_cpu_data.tlbsizeftlbsets ? |
175 | current_cpu_data.tlbsize / 8 : | |
176 | current_cpu_data.tlbsize / 2)) { | |
1da177e4 LT |
177 | int pid = read_c0_entryhi(); |
178 | ||
179 | start &= (PAGE_MASK << 1); | |
180 | end += ((PAGE_SIZE << 1) - 1); | |
181 | end &= (PAGE_MASK << 1); | |
f1014d1b | 182 | htw_stop(); |
1da177e4 LT |
183 | |
184 | while (start < end) { | |
185 | int idx; | |
186 | ||
187 | write_c0_entryhi(start); | |
188 | start += (PAGE_SIZE << 1); | |
189 | mtc0_tlbw_hazard(); | |
190 | tlb_probe(); | |
432bef2a | 191 | tlb_probe_hazard(); |
1da177e4 LT |
192 | idx = read_c0_index(); |
193 | write_c0_entrylo0(0); | |
194 | write_c0_entrylo1(0); | |
195 | if (idx < 0) | |
196 | continue; | |
197 | /* Make sure all entries differ. */ | |
172546bf | 198 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
1da177e4 LT |
199 | mtc0_tlbw_hazard(); |
200 | tlb_write_indexed(); | |
201 | } | |
202 | tlbw_use_hazard(); | |
203 | write_c0_entryhi(pid); | |
f1014d1b | 204 | htw_start(); |
1da177e4 LT |
205 | } else { |
206 | local_flush_tlb_all(); | |
207 | } | |
14bd8c08 | 208 | flush_itlb(); |
b633648c | 209 | local_irq_restore(flags); |
1da177e4 LT |
210 | } |
211 | ||
212 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |
213 | { | |
214 | int cpu = smp_processor_id(); | |
215 | ||
216 | if (cpu_context(cpu, vma->vm_mm) != 0) { | |
217 | unsigned long flags; | |
218 | int oldpid, newpid, idx; | |
219 | ||
220 | newpid = cpu_asid(cpu, vma->vm_mm); | |
221 | page &= (PAGE_MASK << 1); | |
b633648c | 222 | local_irq_save(flags); |
1da177e4 | 223 | oldpid = read_c0_entryhi(); |
f1014d1b | 224 | htw_stop(); |
1da177e4 LT |
225 | write_c0_entryhi(page | newpid); |
226 | mtc0_tlbw_hazard(); | |
227 | tlb_probe(); | |
432bef2a | 228 | tlb_probe_hazard(); |
1da177e4 LT |
229 | idx = read_c0_index(); |
230 | write_c0_entrylo0(0); | |
231 | write_c0_entrylo1(0); | |
232 | if (idx < 0) | |
233 | goto finish; | |
234 | /* Make sure all entries differ. */ | |
172546bf | 235 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
1da177e4 LT |
236 | mtc0_tlbw_hazard(); |
237 | tlb_write_indexed(); | |
238 | tlbw_use_hazard(); | |
239 | ||
240 | finish: | |
241 | write_c0_entryhi(oldpid); | |
f1014d1b | 242 | htw_start(); |
14bd8c08 | 243 | flush_itlb_vm(vma); |
b633648c | 244 | local_irq_restore(flags); |
1da177e4 LT |
245 | } |
246 | } | |
247 | ||
248 | /* | |
249 | * This one is only used for pages with the global bit set so we don't care | |
250 | * much about the ASID. | |
251 | */ | |
252 | void local_flush_tlb_one(unsigned long page) | |
253 | { | |
254 | unsigned long flags; | |
255 | int oldpid, idx; | |
256 | ||
b633648c | 257 | local_irq_save(flags); |
1da177e4 | 258 | oldpid = read_c0_entryhi(); |
f1014d1b | 259 | htw_stop(); |
172546bf | 260 | page &= (PAGE_MASK << 1); |
1da177e4 LT |
261 | write_c0_entryhi(page); |
262 | mtc0_tlbw_hazard(); | |
263 | tlb_probe(); | |
432bef2a | 264 | tlb_probe_hazard(); |
1da177e4 LT |
265 | idx = read_c0_index(); |
266 | write_c0_entrylo0(0); | |
267 | write_c0_entrylo1(0); | |
268 | if (idx >= 0) { | |
269 | /* Make sure all entries differ. */ | |
172546bf | 270 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
1da177e4 LT |
271 | mtc0_tlbw_hazard(); |
272 | tlb_write_indexed(); | |
273 | tlbw_use_hazard(); | |
274 | } | |
275 | write_c0_entryhi(oldpid); | |
f1014d1b | 276 | htw_start(); |
14bd8c08 | 277 | flush_itlb(); |
b633648c | 278 | local_irq_restore(flags); |
1da177e4 LT |
279 | } |
280 | ||
281 | /* | |
282 | * We will need multiple versions of update_mmu_cache(), one that just | |
283 | * updates the TLB with the new pte(s), and another which also checks | |
284 | * for the R4k "end of page" hardware bug and does the needy. | |
285 | */ | |
286 | void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |
287 | { | |
288 | unsigned long flags; | |
289 | pgd_t *pgdp; | |
c6e8b587 | 290 | pud_t *pudp; |
1da177e4 LT |
291 | pmd_t *pmdp; |
292 | pte_t *ptep; | |
293 | int idx, pid; | |
294 | ||
295 | /* | |
296 | * Handle debugger faulting in for debugee. | |
297 | */ | |
298 | if (current->active_mm != vma->vm_mm) | |
299 | return; | |
300 | ||
b633648c | 301 | local_irq_save(flags); |
172546bf | 302 | |
6a8dff6a | 303 | htw_stop(); |
48c4ac97 | 304 | pid = read_c0_entryhi() & ASID_MASK; |
1da177e4 LT |
305 | address &= (PAGE_MASK << 1); |
306 | write_c0_entryhi(address | pid); | |
307 | pgdp = pgd_offset(vma->vm_mm, address); | |
308 | mtc0_tlbw_hazard(); | |
309 | tlb_probe(); | |
432bef2a | 310 | tlb_probe_hazard(); |
c6e8b587 RB |
311 | pudp = pud_offset(pgdp, address); |
312 | pmdp = pmd_offset(pudp, address); | |
1da177e4 | 313 | idx = read_c0_index(); |
aa1762f4 | 314 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
fd062c84 DD |
315 | /* this could be a huge page */ |
316 | if (pmd_huge(*pmdp)) { | |
317 | unsigned long lo; | |
318 | write_c0_pagemask(PM_HUGE_MASK); | |
319 | ptep = (pte_t *)pmdp; | |
6dd9344c | 320 | lo = pte_to_entrylo(pte_val(*ptep)); |
fd062c84 DD |
321 | write_c0_entrylo0(lo); |
322 | write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); | |
323 | ||
324 | mtc0_tlbw_hazard(); | |
325 | if (idx < 0) | |
326 | tlb_write_random(); | |
327 | else | |
328 | tlb_write_indexed(); | |
fb944c9b | 329 | tlbw_use_hazard(); |
fd062c84 DD |
330 | write_c0_pagemask(PM_DEFAULT_MASK); |
331 | } else | |
332 | #endif | |
333 | { | |
334 | ptep = pte_offset_map(pmdp, address); | |
1da177e4 | 335 | |
34adb28d | 336 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
c5b36783 SH |
337 | #ifdef CONFIG_XPA |
338 | write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); | |
339 | writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); | |
340 | ptep++; | |
341 | write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); | |
342 | writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); | |
343 | #else | |
fd062c84 DD |
344 | write_c0_entrylo0(ptep->pte_high); |
345 | ptep++; | |
346 | write_c0_entrylo1(ptep->pte_high); | |
c5b36783 | 347 | #endif |
1da177e4 | 348 | #else |
6dd9344c DD |
349 | write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); |
350 | write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); | |
1da177e4 | 351 | #endif |
fd062c84 DD |
352 | mtc0_tlbw_hazard(); |
353 | if (idx < 0) | |
354 | tlb_write_random(); | |
355 | else | |
356 | tlb_write_indexed(); | |
357 | } | |
1da177e4 | 358 | tlbw_use_hazard(); |
6a8dff6a | 359 | htw_start(); |
14bd8c08 | 360 | flush_itlb_vm(vma); |
b633648c | 361 | local_irq_restore(flags); |
1da177e4 LT |
362 | } |
363 | ||
694b8c35 ML |
364 | void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, |
365 | unsigned long entryhi, unsigned long pagemask) | |
1da177e4 | 366 | { |
c5b36783 SH |
367 | #ifdef CONFIG_XPA |
368 | panic("Broken for XPA kernels"); | |
369 | #else | |
1da177e4 LT |
370 | unsigned long flags; |
371 | unsigned long wired; | |
372 | unsigned long old_pagemask; | |
373 | unsigned long old_ctx; | |
374 | ||
b633648c | 375 | local_irq_save(flags); |
1da177e4 LT |
376 | /* Save old context and create impossible VPN2 value */ |
377 | old_ctx = read_c0_entryhi(); | |
f1014d1b | 378 | htw_stop(); |
1da177e4 LT |
379 | old_pagemask = read_c0_pagemask(); |
380 | wired = read_c0_wired(); | |
381 | write_c0_wired(wired + 1); | |
382 | write_c0_index(wired); | |
432bef2a | 383 | tlbw_use_hazard(); /* What is the hazard here? */ |
1da177e4 LT |
384 | write_c0_pagemask(pagemask); |
385 | write_c0_entryhi(entryhi); | |
386 | write_c0_entrylo0(entrylo0); | |
387 | write_c0_entrylo1(entrylo1); | |
388 | mtc0_tlbw_hazard(); | |
389 | tlb_write_indexed(); | |
390 | tlbw_use_hazard(); | |
391 | ||
392 | write_c0_entryhi(old_ctx); | |
432bef2a | 393 | tlbw_use_hazard(); /* What is the hazard here? */ |
f1014d1b | 394 | htw_start(); |
1da177e4 LT |
395 | write_c0_pagemask(old_pagemask); |
396 | local_flush_tlb_all(); | |
b633648c | 397 | local_irq_restore(flags); |
c5b36783 | 398 | #endif |
1da177e4 LT |
399 | } |
400 | ||
970d032f RB |
401 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
402 | ||
403 | int __init has_transparent_hugepage(void) | |
404 | { | |
405 | unsigned int mask; | |
406 | unsigned long flags; | |
407 | ||
b633648c | 408 | local_irq_save(flags); |
970d032f RB |
409 | write_c0_pagemask(PM_HUGE_MASK); |
410 | back_to_back_c0_hazard(); | |
411 | mask = read_c0_pagemask(); | |
412 | write_c0_pagemask(PM_DEFAULT_MASK); | |
413 | ||
b633648c | 414 | local_irq_restore(flags); |
970d032f RB |
415 | |
416 | return mask == PM_HUGE_MASK; | |
417 | } | |
418 | ||
419 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
420 | ||
d377732c RM |
421 | /* |
422 | * Used for loading TLB entries before trap_init() has started, when we | |
423 | * don't actually want to add a wired entry which remains throughout the | |
424 | * lifetime of the system | |
425 | */ | |
426 | ||
b1f7e112 | 427 | int temp_tlb_entry; |
d377732c RM |
428 | |
429 | __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |
430 | unsigned long entryhi, unsigned long pagemask) | |
431 | { | |
432 | int ret = 0; | |
433 | unsigned long flags; | |
434 | unsigned long wired; | |
435 | unsigned long old_pagemask; | |
436 | unsigned long old_ctx; | |
437 | ||
438 | local_irq_save(flags); | |
439 | /* Save old context and create impossible VPN2 value */ | |
6a8dff6a | 440 | htw_stop(); |
d377732c RM |
441 | old_ctx = read_c0_entryhi(); |
442 | old_pagemask = read_c0_pagemask(); | |
443 | wired = read_c0_wired(); | |
444 | if (--temp_tlb_entry < wired) { | |
445 | printk(KERN_WARNING | |
446 | "No TLB space left for add_temporary_entry\n"); | |
447 | ret = -ENOSPC; | |
448 | goto out; | |
449 | } | |
450 | ||
451 | write_c0_index(temp_tlb_entry); | |
452 | write_c0_pagemask(pagemask); | |
453 | write_c0_entryhi(entryhi); | |
454 | write_c0_entrylo0(entrylo0); | |
455 | write_c0_entrylo1(entrylo1); | |
456 | mtc0_tlbw_hazard(); | |
457 | tlb_write_indexed(); | |
458 | tlbw_use_hazard(); | |
459 | ||
460 | write_c0_entryhi(old_ctx); | |
461 | write_c0_pagemask(old_pagemask); | |
6a8dff6a | 462 | htw_start(); |
d377732c RM |
463 | out: |
464 | local_irq_restore(flags); | |
465 | return ret; | |
466 | } | |
467 | ||
078a55fc | 468 | static int ntlb; |
41c594ab RB |
469 | static int __init set_ntlb(char *str) |
470 | { | |
471 | get_option(&str, &ntlb); | |
472 | return 1; | |
473 | } | |
474 | ||
475 | __setup("ntlb=", set_ntlb); | |
476 | ||
eaa38d63 JH |
477 | /* |
478 | * Configure TLB (for init or after a CPU has been powered off). | |
479 | */ | |
480 | static void r4k_tlb_configure(void) | |
1da177e4 | 481 | { |
1da177e4 LT |
482 | /* |
483 | * You should never change this register: | |
484 | * - On R4600 1.7 the tlbp never hits for pages smaller than | |
485 | * the value in the c0_pagemask register. | |
486 | * - The entire mm handling assumes the c0_pagemask register to | |
a7c2996e | 487 | * be set to fixed-size pages. |
1da177e4 | 488 | */ |
1da177e4 | 489 | write_c0_pagemask(PM_DEFAULT_MASK); |
091bc3a4 PB |
490 | back_to_back_c0_hazard(); |
491 | if (read_c0_pagemask() != PM_DEFAULT_MASK) | |
492 | panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE); | |
493 | ||
1da177e4 | 494 | write_c0_wired(0); |
cde15b59 RB |
495 | if (current_cpu_type() == CPU_R10000 || |
496 | current_cpu_type() == CPU_R12000 || | |
30577391 JK |
497 | current_cpu_type() == CPU_R14000 || |
498 | current_cpu_type() == CPU_R16000) | |
cde15b59 | 499 | write_c0_framemask(0); |
6dd9344c | 500 | |
05857c64 | 501 | if (cpu_has_rixi) { |
6dd9344c | 502 | /* |
e05cb568 | 503 | * Enable the no read, no exec bits, and enable large physical |
6dd9344c DD |
504 | * address. |
505 | */ | |
6dd9344c | 506 | #ifdef CONFIG_64BIT |
a5770df0 SH |
507 | set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA); |
508 | #else | |
509 | set_c0_pagegrain(PG_RIE | PG_XIE); | |
6dd9344c | 510 | #endif |
6dd9344c DD |
511 | } |
512 | ||
d377732c RM |
513 | temp_tlb_entry = current_cpu_data.tlbsize - 1; |
514 | ||
70342287 | 515 | /* From this point on the ARC firmware is dead. */ |
1da177e4 LT |
516 | local_flush_tlb_all(); |
517 | ||
c6281edb | 518 | /* Did I tell you that ARC SUCKS? */ |
eaa38d63 JH |
519 | } |
520 | ||
521 | void tlb_init(void) | |
522 | { | |
523 | r4k_tlb_configure(); | |
c6281edb | 524 | |
41c594ab RB |
525 | if (ntlb) { |
526 | if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { | |
527 | int wired = current_cpu_data.tlbsize - ntlb; | |
528 | write_c0_wired(wired); | |
529 | write_c0_index(wired-1); | |
49a89efb | 530 | printk("Restricting TLB to %d entries\n", ntlb); |
41c594ab RB |
531 | } else |
532 | printk("Ignoring invalid argument ntlb=%d\n", ntlb); | |
533 | } | |
534 | ||
1da177e4 LT |
535 | build_tlb_refill_handler(); |
536 | } | |
eaa38d63 JH |
537 | |
538 | static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd, | |
539 | void *v) | |
540 | { | |
541 | switch (cmd) { | |
542 | case CPU_PM_ENTER_FAILED: | |
543 | case CPU_PM_EXIT: | |
544 | r4k_tlb_configure(); | |
545 | break; | |
546 | } | |
547 | ||
548 | return NOTIFY_OK; | |
549 | } | |
550 | ||
551 | static struct notifier_block r4k_tlb_pm_notifier_block = { | |
552 | .notifier_call = r4k_tlb_pm_notifier, | |
553 | }; | |
554 | ||
555 | static int __init r4k_tlb_init_pm(void) | |
556 | { | |
557 | return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block); | |
558 | } | |
559 | arch_initcall(r4k_tlb_init_pm); |