Commit | Line | Data |
---|---|---|
f1f3347d VG |
1 | /* |
2 | * TLB Management (flush/create/diagnostics) for ARC700 | |
3 | * | |
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
d79e678d VG |
9 | * |
10 | * vineetg: Aug 2011 | |
11 | * -Reintroduce duplicate PD fixup - some customer chips still have the issue | |
12 | * | |
13 | * vineetg: May 2011 | |
14 | * -No need to flush_cache_page( ) for each call to update_mmu_cache() | |
15 | * some of the LMBench tests improved amazingly | |
16 | * = page-fault thrice as fast (75 usec to 28 usec) | |
17 | * = mmap twice as fast (9.6 msec to 4.6 msec), | |
18 | * = fork (5.3 msec to 3.7 msec) | |
19 | * | |
20 | * vineetg: April 2011 : | |
21 | * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, | |
22 | * helps avoid a shift when preparing PD0 from PTE | |
23 | * | |
24 | * vineetg: April 2011 : Preparing for MMU V3 | |
25 | * -MMU v2/v3 BCRs decoded differently | |
26 | * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512 | |
27 | * -tlb_entry_erase( ) can be void | |
28 | * -local_flush_tlb_range( ): | |
29 | * = need not "ceil" @end | |
30 | * = walks MMU only if range spans < 32 entries, as opposed to 256 | |
31 | * | |
32 | * Vineetg: Sept 10th 2008 | |
33 | * -Changes related to MMU v2 (Rel 4.8) | |
34 | * | |
35 | * Vineetg: Aug 29th 2008 | |
36 | * -In TLB Flush operations (Metal Fix MMU) there is a explict command to | |
37 | * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd, | |
38 | * it fails. Thus need to load it with ANY valid value before invoking | |
39 | * TLBIVUTLB cmd | |
40 | * | |
41 | * Vineetg: Aug 21th 2008: | |
42 | * -Reduced the duration of IRQ lockouts in TLB Flush routines | |
43 | * -Multiple copies of TLB erase code seperated into a "single" function | |
44 | * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID | |
45 | * in interrupt-safe region. | |
46 | * | |
47 | * Vineetg: April 23rd Bug #93131 | |
48 | * Problem: tlb_flush_kernel_range() doesnt do anything if the range to | |
49 | * flush is more than the size of TLB itself. | |
50 | * | |
51 | * Rahul Trivedi : Codito Technologies 2004 | |
f1f3347d VG |
52 | */ |
53 | ||
54 | #include <linux/module.h> | |
483e9bcb | 55 | #include <linux/bug.h> |
f1f3347d | 56 | #include <asm/arcregs.h> |
d79e678d | 57 | #include <asm/setup.h> |
f1f3347d | 58 | #include <asm/mmu_context.h> |
da1677b0 | 59 | #include <asm/mmu.h> |
f1f3347d | 60 | |
d79e678d VG |
61 | /* Need for ARC MMU v2 |
62 | * | |
63 | * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc. | |
64 | * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages | |
65 | * map into same set, there would be contention for the 2 ways causing severe | |
66 | * Thrashing. | |
67 | * | |
68 | * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has | |
69 | * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways. | |
70 | * Given this, the thrasing problem should never happen because once the 3 | |
71 | * J-TLB entries are created (even though 3rd will knock out one of the prev | |
72 | * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy | |
73 | * | |
74 | * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs. | |
75 | * This is a simple design for keeping them in sync. So what do we do? | |
76 | * The solution which James came up was pretty neat. It utilised the assoc | |
77 | * of uTLBs by not invalidating always but only when absolutely necessary. | |
78 | * | |
79 | * - Existing TLB commands work as before | |
80 | * - New command (TLBWriteNI) for TLB write without clearing uTLBs | |
81 | * - New command (TLBIVUTLB) to invalidate uTLBs. | |
82 | * | |
83 | * The uTLBs need only be invalidated when pages are being removed from the | |
84 | * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB | |
85 | * as a result of a miss, the removed entry is still allowed to exist in the | |
86 | * uTLBs as it is still valid and present in the OS page table. This allows the | |
87 | * full associativity of the uTLBs to hide the limited associativity of the main | |
88 | * TLB. | |
89 | * | |
90 | * During a miss handler, the new "TLBWriteNI" command is used to load | |
91 | * entries without clearing the uTLBs. | |
92 | * | |
93 | * When the OS page table is updated, TLB entries that may be associated with a | |
94 | * removed page are removed (flushed) from the TLB using TLBWrite. In this | |
95 | * circumstance, the uTLBs must also be cleared. This is done by using the | |
96 | * existing TLBWrite command. An explicit IVUTLB is also required for those | |
97 | * corner cases when TLBWrite was not executed at all because the corresp | |
98 | * J-TLB entry got evicted/replaced. | |
99 | */ | |
100 | ||
da1677b0 | 101 | |
f1f3347d | 102 | /* A copy of the ASID from the PID reg is kept in asid_cache */ |
63eca94c | 103 | DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; |
cc562d2e | 104 | |
d79e678d VG |
105 | /* |
106 | * Utility Routine to erase a J-TLB entry | |
483e9bcb | 107 | * Caller needs to setup Index Reg (manually or via getIndex) |
d79e678d | 108 | */ |
483e9bcb | 109 | static inline void __tlb_entry_erase(void) |
d79e678d VG |
110 | { |
111 | write_aux_reg(ARC_REG_TLBPD1, 0); | |
112 | write_aux_reg(ARC_REG_TLBPD0, 0); | |
113 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
114 | } | |
115 | ||
483e9bcb | 116 | static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid) |
d79e678d VG |
117 | { |
118 | unsigned int idx; | |
119 | ||
d79e678d | 120 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); |
483e9bcb | 121 | |
d79e678d VG |
122 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); |
123 | idx = read_aux_reg(ARC_REG_TLBINDEX); | |
124 | ||
483e9bcb VG |
125 | return idx; |
126 | } | |
127 | ||
128 | static void tlb_entry_erase(unsigned int vaddr_n_asid) | |
129 | { | |
130 | unsigned int idx; | |
131 | ||
132 | /* Locate the TLB entry for this vaddr + ASID */ | |
133 | idx = tlb_entry_lkup(vaddr_n_asid); | |
134 | ||
d79e678d VG |
135 | /* No error means entry found, zero it out */ |
136 | if (likely(!(idx & TLB_LKUP_ERR))) { | |
137 | __tlb_entry_erase(); | |
483e9bcb | 138 | } else { |
d79e678d | 139 | /* Duplicate entry error */ |
483e9bcb VG |
140 | WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n", |
141 | vaddr_n_asid); | |
d79e678d VG |
142 | } |
143 | } | |
144 | ||
145 | /**************************************************************************** | |
146 | * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs) | |
147 | * | |
148 | * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB | |
149 | * | |
150 | * utlb_invalidate ( ) | |
151 | * -For v2 MMU calls Flush uTLB Cmd | |
152 | * -For v1 MMU does nothing (except for Metal Fix v1 MMU) | |
153 | * This is because in v1 TLBWrite itself invalidate uTLBs | |
154 | ***************************************************************************/ | |
155 | ||
156 | static void utlb_invalidate(void) | |
157 | { | |
158 | #if (CONFIG_ARC_MMU_VER >= 2) | |
159 | ||
483e9bcb | 160 | #if (CONFIG_ARC_MMU_VER == 2) |
d79e678d VG |
161 | /* MMU v2 introduced the uTLB Flush command. |
162 | * There was however an obscure hardware bug, where uTLB flush would | |
163 | * fail when a prior probe for J-TLB (both totally unrelated) would | |
164 | * return lkup err - because the entry didnt exist in MMU. | |
165 | * The Workround was to set Index reg with some valid value, prior to | |
166 | * flush. This was fixed in MMU v3 hence not needed any more | |
167 | */ | |
168 | unsigned int idx; | |
169 | ||
170 | /* make sure INDEX Reg is valid */ | |
171 | idx = read_aux_reg(ARC_REG_TLBINDEX); | |
172 | ||
173 | /* If not write some dummy val */ | |
174 | if (unlikely(idx & TLB_LKUP_ERR)) | |
175 | write_aux_reg(ARC_REG_TLBINDEX, 0xa); | |
176 | #endif | |
177 | ||
178 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB); | |
179 | #endif | |
180 | ||
181 | } | |
182 | ||
483e9bcb VG |
183 | static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) |
184 | { | |
185 | unsigned int idx; | |
186 | ||
187 | /* | |
188 | * First verify if entry for this vaddr+ASID already exists | |
189 | * This also sets up PD0 (vaddr, ASID..) for final commit | |
190 | */ | |
191 | idx = tlb_entry_lkup(pd0); | |
192 | ||
193 | /* | |
194 | * If Not already present get a free slot from MMU. | |
195 | * Otherwise, Probe would have located the entry and set INDEX Reg | |
196 | * with existing location. This will cause Write CMD to over-write | |
197 | * existing entry with new PD0 and PD1 | |
198 | */ | |
199 | if (likely(idx & TLB_LKUP_ERR)) | |
200 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); | |
201 | ||
202 | /* setup the other half of TLB entry (pfn, rwx..) */ | |
203 | write_aux_reg(ARC_REG_TLBPD1, pd1); | |
204 | ||
205 | /* | |
206 | * Commit the Entry to MMU | |
207 | * It doesnt sound safe to use the TLBWriteNI cmd here | |
208 | * which doesn't flush uTLBs. I'd rather be safe than sorry. | |
209 | */ | |
210 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
211 | } | |
212 | ||
d79e678d VG |
213 | /* |
214 | * Un-conditionally (without lookup) erase the entire MMU contents | |
215 | */ | |
216 | ||
217 | noinline void local_flush_tlb_all(void) | |
218 | { | |
219 | unsigned long flags; | |
220 | unsigned int entry; | |
221 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
222 | ||
223 | local_irq_save(flags); | |
224 | ||
225 | /* Load PD0 and PD1 with template for a Blank Entry */ | |
226 | write_aux_reg(ARC_REG_TLBPD1, 0); | |
227 | write_aux_reg(ARC_REG_TLBPD0, 0); | |
228 | ||
229 | for (entry = 0; entry < mmu->num_tlb; entry++) { | |
230 | /* write this entry to the TLB */ | |
231 | write_aux_reg(ARC_REG_TLBINDEX, entry); | |
232 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
233 | } | |
234 | ||
235 | utlb_invalidate(); | |
236 | ||
237 | local_irq_restore(flags); | |
238 | } | |
239 | ||
240 | /* | |
241 | * Flush the entrie MM for userland. The fastest way is to move to Next ASID | |
242 | */ | |
243 | noinline void local_flush_tlb_mm(struct mm_struct *mm) | |
244 | { | |
245 | /* | |
246 | * Small optimisation courtesy IA64 | |
247 | * flush_mm called during fork,exit,munmap etc, multiple times as well. | |
248 | * Only for fork( ) do we need to move parent to a new MMU ctxt, | |
249 | * all other cases are NOPs, hence this check. | |
250 | */ | |
251 | if (atomic_read(&mm->mm_users) == 0) | |
252 | return; | |
253 | ||
254 | /* | |
3daa48d1 VG |
255 | * - Move to a new ASID, but only if the mm is still wired in |
256 | * (Android Binder ended up calling this for vma->mm != tsk->mm, | |
257 | * causing h/w - s/w ASID to get out of sync) | |
258 | * - Also get_new_mmu_context() new implementation allocates a new | |
259 | * ASID only if it is not allocated already - so unallocate first | |
d79e678d | 260 | */ |
3daa48d1 VG |
261 | destroy_context(mm); |
262 | if (current->mm == mm) | |
d79e678d VG |
263 | get_new_mmu_context(mm); |
264 | } | |
265 | ||
266 | /* | |
267 | * Flush a Range of TLB entries for userland. | |
268 | * @start is inclusive, while @end is exclusive | |
269 | * Difference between this and Kernel Range Flush is | |
270 | * -Here the fastest way (if range is too large) is to move to next ASID | |
271 | * without doing any explicit Shootdown | |
272 | * -In case of kernel Flush, entry has to be shot down explictly | |
273 | */ | |
274 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
275 | unsigned long end) | |
276 | { | |
63eca94c | 277 | const unsigned int cpu = smp_processor_id(); |
d79e678d | 278 | unsigned long flags; |
d79e678d VG |
279 | |
280 | /* If range @start to @end is more than 32 TLB entries deep, | |
281 | * its better to move to a new ASID rather than searching for | |
282 | * individual entries and then shooting them down | |
283 | * | |
284 | * The calc above is rough, doesn't account for unaligned parts, | |
285 | * since this is heuristics based anyways | |
286 | */ | |
287 | if (unlikely((end - start) >= PAGE_SIZE * 32)) { | |
288 | local_flush_tlb_mm(vma->vm_mm); | |
289 | return; | |
290 | } | |
291 | ||
292 | /* | |
293 | * @start moved to page start: this alone suffices for checking | |
294 | * loop end condition below, w/o need for aligning @end to end | |
295 | * e.g. 2000 to 4001 will anyhow loop twice | |
296 | */ | |
297 | start &= PAGE_MASK; | |
298 | ||
299 | local_irq_save(flags); | |
d79e678d | 300 | |
63eca94c | 301 | if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { |
d79e678d | 302 | while (start < end) { |
63eca94c | 303 | tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); |
d79e678d VG |
304 | start += PAGE_SIZE; |
305 | } | |
306 | } | |
307 | ||
308 | utlb_invalidate(); | |
309 | ||
310 | local_irq_restore(flags); | |
311 | } | |
312 | ||
313 | /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective) | |
314 | * @start, @end interpreted as kvaddr | |
315 | * Interestingly, shared TLB entries can also be flushed using just | |
316 | * @start,@end alone (interpreted as user vaddr), although technically SASID | |
317 | * is also needed. However our smart TLbProbe lookup takes care of that. | |
318 | */ | |
319 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
320 | { | |
321 | unsigned long flags; | |
322 | ||
323 | /* exactly same as above, except for TLB entry not taking ASID */ | |
324 | ||
325 | if (unlikely((end - start) >= PAGE_SIZE * 32)) { | |
326 | local_flush_tlb_all(); | |
327 | return; | |
328 | } | |
329 | ||
330 | start &= PAGE_MASK; | |
331 | ||
332 | local_irq_save(flags); | |
333 | while (start < end) { | |
334 | tlb_entry_erase(start); | |
335 | start += PAGE_SIZE; | |
336 | } | |
337 | ||
338 | utlb_invalidate(); | |
339 | ||
340 | local_irq_restore(flags); | |
341 | } | |
342 | ||
343 | /* | |
344 | * Delete TLB entry in MMU for a given page (??? address) | |
345 | * NOTE One TLB entry contains translation for single PAGE | |
346 | */ | |
347 | ||
348 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |
349 | { | |
63eca94c | 350 | const unsigned int cpu = smp_processor_id(); |
d79e678d VG |
351 | unsigned long flags; |
352 | ||
353 | /* Note that it is critical that interrupts are DISABLED between | |
354 | * checking the ASID and using it flush the TLB entry | |
355 | */ | |
356 | local_irq_save(flags); | |
357 | ||
63eca94c VG |
358 | if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { |
359 | tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); | |
d79e678d VG |
360 | utlb_invalidate(); |
361 | } | |
362 | ||
363 | local_irq_restore(flags); | |
364 | } | |
cc562d2e | 365 | |
5ea72a90 VG |
366 | #ifdef CONFIG_SMP |
367 | ||
368 | struct tlb_args { | |
369 | struct vm_area_struct *ta_vma; | |
370 | unsigned long ta_start; | |
371 | unsigned long ta_end; | |
372 | }; | |
373 | ||
374 | static inline void ipi_flush_tlb_page(void *arg) | |
375 | { | |
376 | struct tlb_args *ta = arg; | |
377 | ||
378 | local_flush_tlb_page(ta->ta_vma, ta->ta_start); | |
379 | } | |
380 | ||
381 | static inline void ipi_flush_tlb_range(void *arg) | |
382 | { | |
383 | struct tlb_args *ta = arg; | |
384 | ||
385 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); | |
386 | } | |
387 | ||
388 | static inline void ipi_flush_tlb_kernel_range(void *arg) | |
389 | { | |
390 | struct tlb_args *ta = (struct tlb_args *)arg; | |
391 | ||
392 | local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); | |
393 | } | |
394 | ||
395 | void flush_tlb_all(void) | |
396 | { | |
397 | on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1); | |
398 | } | |
399 | ||
400 | void flush_tlb_mm(struct mm_struct *mm) | |
401 | { | |
402 | on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm, | |
403 | mm, 1); | |
404 | } | |
405 | ||
406 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |
407 | { | |
408 | struct tlb_args ta = { | |
409 | .ta_vma = vma, | |
410 | .ta_start = uaddr | |
411 | }; | |
412 | ||
413 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); | |
414 | } | |
415 | ||
416 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
417 | unsigned long end) | |
418 | { | |
419 | struct tlb_args ta = { | |
420 | .ta_vma = vma, | |
421 | .ta_start = start, | |
422 | .ta_end = end | |
423 | }; | |
424 | ||
425 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); | |
426 | } | |
427 | ||
428 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
429 | { | |
430 | struct tlb_args ta = { | |
431 | .ta_start = start, | |
432 | .ta_end = end | |
433 | }; | |
434 | ||
435 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); | |
436 | } | |
437 | #endif | |
438 | ||
cc562d2e VG |
439 | /* |
440 | * Routine to create a TLB entry | |
441 | */ | |
442 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |
443 | { | |
444 | unsigned long flags; | |
483e9bcb VG |
445 | unsigned int asid_or_sasid, rwx; |
446 | unsigned long pd0, pd1; | |
cc562d2e VG |
447 | |
448 | /* | |
449 | * create_tlb() assumes that current->mm == vma->mm, since | |
450 | * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) | |
451 | * -completes the lazy write to SASID reg (again valid for curr tsk) | |
452 | * | |
453 | * Removing the assumption involves | |
454 | * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg. | |
455 | * -Fix the TLB paranoid debug code to not trigger false negatives. | |
456 | * -More importantly it makes this handler inconsistent with fast-path | |
457 | * TLB Refill handler which always deals with "current" | |
458 | * | |
459 | * Lets see the use cases when current->mm != vma->mm and we land here | |
460 | * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault | |
461 | * Here VM wants to pre-install a TLB entry for user stack while | |
462 | * current->mm still points to pre-execve mm (hence the condition). | |
463 | * However the stack vaddr is soon relocated (randomization) and | |
464 | * move_page_tables() tries to undo that TLB entry. | |
465 | * Thus not creating TLB entry is not any worse. | |
466 | * | |
467 | * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a | |
468 | * breakpoint in debugged task. Not creating a TLB now is not | |
469 | * performance critical. | |
470 | * | |
471 | * Both the cases above are not good enough for code churn. | |
472 | */ | |
473 | if (current->active_mm != vma->vm_mm) | |
474 | return; | |
475 | ||
476 | local_irq_save(flags); | |
477 | ||
63eca94c | 478 | tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); |
cc562d2e VG |
479 | |
480 | address &= PAGE_MASK; | |
481 | ||
482 | /* update this PTE credentials */ | |
483 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); | |
484 | ||
d091fcb9 | 485 | /* Create HW TLB(PD0,PD1) from PTE */ |
cc562d2e VG |
486 | |
487 | /* ASID for this task */ | |
488 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; | |
489 | ||
483e9bcb | 490 | pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); |
cc562d2e | 491 | |
64b703ef VG |
492 | /* |
493 | * ARC MMU provides fully orthogonal access bits for K/U mode, | |
494 | * however Linux only saves 1 set to save PTE real-estate | |
495 | * Here we convert 3 PTE bits into 6 MMU bits: | |
496 | * -Kernel only entries have Kr Kw Kx 0 0 0 | |
497 | * -User entries have mirrored K and U bits | |
498 | */ | |
499 | rwx = pte_val(*ptep) & PTE_BITS_RWX; | |
500 | ||
501 | if (pte_val(*ptep) & _PAGE_GLOBAL) | |
502 | rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */ | |
503 | else | |
504 | rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */ | |
505 | ||
483e9bcb | 506 | pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1); |
cc562d2e | 507 | |
483e9bcb | 508 | tlb_entry_insert(pd0, pd1); |
cc562d2e VG |
509 | |
510 | local_irq_restore(flags); | |
511 | } | |
512 | ||
eacd0e95 VG |
513 | /* |
514 | * Called at the end of pagefault, for a userspace mapped page | |
515 | * -pre-install the corresponding TLB entry into MMU | |
4102b533 VG |
516 | * -Finalize the delayed D-cache flush of kernel mapping of page due to |
517 | * flush_dcache_page(), copy_user_page() | |
518 | * | |
519 | * Note that flush (when done) involves both WBACK - so physical page is | |
520 | * in sync as well as INV - so any non-congruent aliases don't remain | |
cc562d2e | 521 | */ |
24603fdd | 522 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, |
cc562d2e VG |
523 | pte_t *ptep) |
524 | { | |
24603fdd | 525 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; |
4102b533 | 526 | unsigned long paddr = pte_val(*ptep) & PAGE_MASK; |
29b93c68 | 527 | struct page *page = pfn_to_page(pte_pfn(*ptep)); |
24603fdd VG |
528 | |
529 | create_tlb(vma, vaddr, ptep); | |
cc562d2e | 530 | |
29b93c68 VG |
531 | if (page == ZERO_PAGE(0)) { |
532 | return; | |
533 | } | |
534 | ||
4102b533 VG |
535 | /* |
536 | * Exec page : Independent of aliasing/page-color considerations, | |
537 | * since icache doesn't snoop dcache on ARC, any dirty | |
538 | * K-mapping of a code page needs to be wback+inv so that | |
539 | * icache fetch by userspace sees code correctly. | |
540 | * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it | |
541 | * so userspace sees the right data. | |
542 | * (Avoids the flush for Non-exec + congruent mapping case) | |
543 | */ | |
3e87974d VG |
544 | if ((vma->vm_flags & VM_EXEC) || |
545 | addr_not_cache_congruent(paddr, vaddr)) { | |
eacd0e95 | 546 | |
2ed21dae | 547 | int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); |
eacd0e95 | 548 | if (dirty) { |
4102b533 | 549 | /* wback + inv dcache lines */ |
6ec18a81 | 550 | __flush_dcache_page(paddr, paddr); |
4102b533 VG |
551 | |
552 | /* invalidate any existing icache lines */ | |
553 | if (vma->vm_flags & VM_EXEC) | |
554 | __inv_icache_page(paddr, vaddr); | |
eacd0e95 | 555 | } |
24603fdd | 556 | } |
cc562d2e VG |
557 | } |
558 | ||
559 | /* Read the Cache Build Confuration Registers, Decode them and save into | |
560 | * the cpuinfo structure for later use. | |
561 | * No Validation is done here, simply read/convert the BCRs | |
562 | */ | |
ce759956 | 563 | void read_decode_mmu_bcr(void) |
cc562d2e | 564 | { |
cc562d2e | 565 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; |
da1677b0 VG |
566 | unsigned int tmp; |
567 | struct bcr_mmu_1_2 { | |
568 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
569 | unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8; | |
570 | #else | |
571 | unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8; | |
572 | #endif | |
573 | } *mmu2; | |
574 | ||
575 | struct bcr_mmu_3 { | |
576 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
577 | unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4, | |
578 | u_itlb:4, u_dtlb:4; | |
579 | #else | |
580 | unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4, | |
581 | ways:4, ver:8; | |
582 | #endif | |
583 | } *mmu3; | |
cc562d2e VG |
584 | |
585 | tmp = read_aux_reg(ARC_REG_MMU_BCR); | |
586 | mmu->ver = (tmp >> 24); | |
587 | ||
588 | if (mmu->ver <= 2) { | |
589 | mmu2 = (struct bcr_mmu_1_2 *)&tmp; | |
590 | mmu->pg_sz = PAGE_SIZE; | |
591 | mmu->sets = 1 << mmu2->sets; | |
592 | mmu->ways = 1 << mmu2->ways; | |
593 | mmu->u_dtlb = mmu2->u_dtlb; | |
594 | mmu->u_itlb = mmu2->u_itlb; | |
595 | } else { | |
596 | mmu3 = (struct bcr_mmu_3 *)&tmp; | |
597 | mmu->pg_sz = 512 << mmu3->pg_sz; | |
598 | mmu->sets = 1 << mmu3->sets; | |
599 | mmu->ways = 1 << mmu3->ways; | |
600 | mmu->u_dtlb = mmu3->u_dtlb; | |
601 | mmu->u_itlb = mmu3->u_itlb; | |
602 | } | |
603 | ||
604 | mmu->num_tlb = mmu->sets * mmu->ways; | |
605 | } | |
606 | ||
af617428 VG |
607 | char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) |
608 | { | |
609 | int n = 0; | |
e3edeb67 | 610 | struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; |
af617428 VG |
611 | |
612 | n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ", | |
613 | p_mmu->ver, TO_KB(p_mmu->pg_sz)); | |
614 | ||
615 | n += scnprintf(buf + n, len - n, | |
616 | "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n", | |
617 | p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, | |
618 | p_mmu->u_dtlb, p_mmu->u_itlb, | |
8235703e | 619 | IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : ""); |
af617428 VG |
620 | |
621 | return buf; | |
622 | } | |
623 | ||
ce759956 | 624 | void arc_mmu_init(void) |
cc562d2e | 625 | { |
af617428 VG |
626 | char str[256]; |
627 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
628 | ||
629 | printk(arc_mmu_mumbojumbo(0, str, sizeof(str))); | |
630 | ||
631 | /* For efficiency sake, kernel is compile time built for a MMU ver | |
632 | * This must match the hardware it is running on. | |
633 | * Linux built for MMU V2, if run on MMU V1 will break down because V1 | |
634 | * hardware doesn't understand cmds such as WriteNI, or IVUTLB | |
635 | * On the other hand, Linux built for V1 if run on MMU V2 will do | |
636 | * un-needed workarounds to prevent memcpy thrashing. | |
637 | * Similarly MMU V3 has new features which won't work on older MMU | |
638 | */ | |
639 | if (mmu->ver != CONFIG_ARC_MMU_VER) { | |
640 | panic("MMU ver %d doesn't match kernel built for %d...\n", | |
641 | mmu->ver, CONFIG_ARC_MMU_VER); | |
642 | } | |
643 | ||
644 | if (mmu->pg_sz != PAGE_SIZE) | |
645 | panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); | |
646 | ||
cc562d2e VG |
647 | /* Enable the MMU */ |
648 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); | |
41195d23 VG |
649 | |
650 | /* In smp we use this reg for interrupt 1 scratch */ | |
651 | #ifndef CONFIG_SMP | |
652 | /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ | |
653 | write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); | |
654 | #endif | |
cc562d2e VG |
655 | } |
656 | ||
657 | /* | |
658 | * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4} | |
659 | * The mapping is Column-first. | |
660 | * --------------------- ----------- | |
661 | * |way0|way1|way2|way3| |way0|way1| | |
662 | * --------------------- ----------- | |
663 | * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 | | |
664 | * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 | | |
665 | * ~ ~ ~ ~ | |
666 | * [set127] | 508| 509| 510| 511| | 254| 255| | |
667 | * --------------------- ----------- | |
668 | * For normal operations we don't(must not) care how above works since | |
669 | * MMU cmd getIndex(vaddr) abstracts that out. | |
670 | * However for walking WAYS of a SET, we need to know this | |
671 | */ | |
672 | #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) | |
673 | ||
674 | /* Handling of Duplicate PD (TLB entry) in MMU. | |
675 | * -Could be due to buggy customer tapeouts or obscure kernel bugs | |
676 | * -MMU complaints not at the time of duplicate PD installation, but at the | |
677 | * time of lookup matching multiple ways. | |
678 | * -Ideally these should never happen - but if they do - workaround by deleting | |
679 | * the duplicate one. | |
680 | * -Knob to be verbose abt it.(TODO: hook them up to debugfs) | |
681 | */ | |
682 | volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */ | |
683 | ||
684 | void do_tlb_overlap_fault(unsigned long cause, unsigned long address, | |
685 | struct pt_regs *regs) | |
686 | { | |
687 | int set, way, n; | |
cc562d2e VG |
688 | unsigned long flags, is_valid; |
689 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
0a4c40a3 | 690 | unsigned int pd0[mmu->ways], pd1[mmu->ways]; |
cc562d2e VG |
691 | |
692 | local_irq_save(flags); | |
693 | ||
694 | /* re-enable the MMU */ | |
695 | write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID)); | |
696 | ||
697 | /* loop thru all sets of TLB */ | |
698 | for (set = 0; set < mmu->sets; set++) { | |
699 | ||
700 | /* read out all the ways of current set */ | |
701 | for (way = 0, is_valid = 0; way < mmu->ways; way++) { | |
702 | write_aux_reg(ARC_REG_TLBINDEX, | |
703 | SET_WAY_TO_IDX(mmu, set, way)); | |
704 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); | |
705 | pd0[way] = read_aux_reg(ARC_REG_TLBPD0); | |
706 | pd1[way] = read_aux_reg(ARC_REG_TLBPD1); | |
707 | is_valid |= pd0[way] & _PAGE_PRESENT; | |
708 | } | |
709 | ||
710 | /* If all the WAYS in SET are empty, skip to next SET */ | |
711 | if (!is_valid) | |
712 | continue; | |
713 | ||
714 | /* Scan the set for duplicate ways: needs a nested loop */ | |
0a4c40a3 | 715 | for (way = 0; way < mmu->ways - 1; way++) { |
cc562d2e VG |
716 | if (!pd0[way]) |
717 | continue; | |
718 | ||
719 | for (n = way + 1; n < mmu->ways; n++) { | |
720 | if ((pd0[way] & PAGE_MASK) == | |
721 | (pd0[n] & PAGE_MASK)) { | |
722 | ||
723 | if (dup_pd_verbose) { | |
724 | pr_info("Duplicate PD's @" | |
725 | "[%d:%d]/[%d:%d]\n", | |
726 | set, way, set, n); | |
727 | pr_info("TLBPD0[%u]: %08x\n", | |
728 | way, pd0[way]); | |
729 | } | |
730 | ||
731 | /* | |
732 | * clear entry @way and not @n. This is | |
733 | * critical to our optimised loop | |
734 | */ | |
735 | pd0[way] = pd1[way] = 0; | |
736 | write_aux_reg(ARC_REG_TLBINDEX, | |
737 | SET_WAY_TO_IDX(mmu, set, way)); | |
738 | __tlb_entry_erase(); | |
739 | } | |
740 | } | |
741 | } | |
742 | } | |
743 | ||
744 | local_irq_restore(flags); | |
745 | } | |
746 | ||
747 | /*********************************************************************** | |
748 | * Diagnostic Routines | |
749 | * -Called from Low Level TLB Hanlders if things don;t look good | |
750 | **********************************************************************/ | |
751 | ||
752 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA | |
753 | ||
754 | /* | |
755 | * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS | |
756 | * don't match | |
757 | */ | |
5bd87adf | 758 | void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path) |
cc562d2e | 759 | { |
cc562d2e | 760 | pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n", |
5bd87adf | 761 | is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid); |
cc562d2e VG |
762 | |
763 | __asm__ __volatile__("flag 1"); | |
764 | } | |
765 | ||
5bd87adf | 766 | void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr) |
cc562d2e | 767 | { |
5bd87adf | 768 | unsigned int mmu_asid; |
cc562d2e | 769 | |
5bd87adf | 770 | mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff; |
cc562d2e | 771 | |
5bd87adf VG |
772 | /* |
773 | * At the time of a TLB miss/installation | |
774 | * - HW version needs to match SW version | |
775 | * - SW needs to have a valid ASID | |
776 | */ | |
777 | if (addr < 0x70000000 && | |
947bf103 VG |
778 | ((mm_asid == MM_CTXT_NO_ASID) || |
779 | (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK)))) | |
5bd87adf | 780 | print_asid_mismatch(mm_asid, mmu_asid, 0); |
cc562d2e VG |
781 | } |
782 | #endif |