c03364af9363174e3185834293198d1e3780c51b
[deliverable/linux.git] / arch / arc / mm / tlb.c
1 /*
2 * TLB Management (flush/create/diagnostics) for ARC700
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * vineetg: Aug 2011
11 * -Reintroduce duplicate PD fixup - some customer chips still have the issue
12 *
13 * vineetg: May 2011
14 * -No need to flush_cache_page( ) for each call to update_mmu_cache()
15 * some of the LMBench tests improved amazingly
16 * = page-fault thrice as fast (75 usec to 28 usec)
17 * = mmap twice as fast (9.6 msec to 4.6 msec),
18 * = fork (5.3 msec to 3.7 msec)
19 *
20 * vineetg: April 2011 :
21 * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
22 * helps avoid a shift when preparing PD0 from PTE
23 *
24 * vineetg: April 2011 : Preparing for MMU V3
25 * -MMU v2/v3 BCRs decoded differently
26 * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
27 * -tlb_entry_erase( ) can be void
28 * -local_flush_tlb_range( ):
29 * = need not "ceil" @end
30 * = walks MMU only if range spans < 32 entries, as opposed to 256
31 *
32 * Vineetg: Sept 10th 2008
33 * -Changes related to MMU v2 (Rel 4.8)
34 *
35 * Vineetg: Aug 29th 2008
36 * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
37 * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
38 * it fails. Thus need to load it with ANY valid value before invoking
39 * TLBIVUTLB cmd
40 *
41 * Vineetg: Aug 21th 2008:
42 * -Reduced the duration of IRQ lockouts in TLB Flush routines
43 * -Multiple copies of TLB erase code seperated into a "single" function
44 * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
45 * in interrupt-safe region.
46 *
47 * Vineetg: April 23rd Bug #93131
48 * Problem: tlb_flush_kernel_range() doesnt do anything if the range to
49 * flush is more than the size of TLB itself.
50 *
51 * Rahul Trivedi : Codito Technologies 2004
52 */
53
54 #include <linux/module.h>
55 #include <asm/arcregs.h>
56 #include <asm/setup.h>
57 #include <asm/mmu_context.h>
58 #include <asm/tlb.h>
59
60 /* Need for ARC MMU v2
61 *
62 * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
63 * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
64 * map into same set, there would be contention for the 2 ways causing severe
65 * Thrashing.
66 *
67 * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
68 * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
69 * Given this, the thrasing problem should never happen because once the 3
70 * J-TLB entries are created (even though 3rd will knock out one of the prev
71 * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
72 *
73 * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
74 * This is a simple design for keeping them in sync. So what do we do?
75 * The solution which James came up was pretty neat. It utilised the assoc
76 * of uTLBs by not invalidating always but only when absolutely necessary.
77 *
78 * - Existing TLB commands work as before
79 * - New command (TLBWriteNI) for TLB write without clearing uTLBs
80 * - New command (TLBIVUTLB) to invalidate uTLBs.
81 *
82 * The uTLBs need only be invalidated when pages are being removed from the
83 * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
84 * as a result of a miss, the removed entry is still allowed to exist in the
85 * uTLBs as it is still valid and present in the OS page table. This allows the
86 * full associativity of the uTLBs to hide the limited associativity of the main
87 * TLB.
88 *
89 * During a miss handler, the new "TLBWriteNI" command is used to load
90 * entries without clearing the uTLBs.
91 *
92 * When the OS page table is updated, TLB entries that may be associated with a
93 * removed page are removed (flushed) from the TLB using TLBWrite. In this
94 * circumstance, the uTLBs must also be cleared. This is done by using the
95 * existing TLBWrite command. An explicit IVUTLB is also required for those
96 * corner cases when TLBWrite was not executed at all because the corresp
97 * J-TLB entry got evicted/replaced.
98 */
99
100 /* A copy of the ASID from the PID reg is kept in asid_cache */
101 int asid_cache = FIRST_ASID;
102
103 /* ASID to mm struct mapping. We have one extra entry corresponding to
104 * NO_ASID to save us a compare when clearing the mm entry for old asid
105 * see get_new_mmu_context (asm-arc/mmu_context.h)
106 */
107 struct mm_struct *asid_mm_map[NUM_ASID + 1];
108
109 /*
110 * Utility Routine to erase a J-TLB entry
111 * The procedure is to look it up in the MMU. If found, ERASE it by
112 * issuing a TlbWrite CMD with PD0 = PD1 = 0
113 */
114
115 static void __tlb_entry_erase(void)
116 {
117 write_aux_reg(ARC_REG_TLBPD1, 0);
118 write_aux_reg(ARC_REG_TLBPD0, 0);
119 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
120 }
121
122 static void tlb_entry_erase(unsigned int vaddr_n_asid)
123 {
124 unsigned int idx;
125
126 /* Locate the TLB entry for this vaddr + ASID */
127 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
128 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
129 idx = read_aux_reg(ARC_REG_TLBINDEX);
130
131 /* No error means entry found, zero it out */
132 if (likely(!(idx & TLB_LKUP_ERR))) {
133 __tlb_entry_erase();
134 } else { /* Some sort of Error */
135
136 /* Duplicate entry error */
137 if (idx & 0x1) {
138 /* TODO we need to handle this case too */
139 pr_emerg("unhandled Duplicate flush for %x\n",
140 vaddr_n_asid);
141 }
142 /* else entry not found so nothing to do */
143 }
144 }
145
146 /****************************************************************************
147 * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
148 *
149 * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
150 *
151 * utlb_invalidate ( )
152 * -For v2 MMU calls Flush uTLB Cmd
153 * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
154 * This is because in v1 TLBWrite itself invalidate uTLBs
155 ***************************************************************************/
156
157 static void utlb_invalidate(void)
158 {
159 #if (CONFIG_ARC_MMU_VER >= 2)
160
161 #if (CONFIG_ARC_MMU_VER < 3)
162 /* MMU v2 introduced the uTLB Flush command.
163 * There was however an obscure hardware bug, where uTLB flush would
164 * fail when a prior probe for J-TLB (both totally unrelated) would
165 * return lkup err - because the entry didnt exist in MMU.
166 * The Workround was to set Index reg with some valid value, prior to
167 * flush. This was fixed in MMU v3 hence not needed any more
168 */
169 unsigned int idx;
170
171 /* make sure INDEX Reg is valid */
172 idx = read_aux_reg(ARC_REG_TLBINDEX);
173
174 /* If not write some dummy val */
175 if (unlikely(idx & TLB_LKUP_ERR))
176 write_aux_reg(ARC_REG_TLBINDEX, 0xa);
177 #endif
178
179 write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
180 #endif
181
182 }
183
184 /*
185 * Un-conditionally (without lookup) erase the entire MMU contents
186 */
187
188 noinline void local_flush_tlb_all(void)
189 {
190 unsigned long flags;
191 unsigned int entry;
192 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
193
194 local_irq_save(flags);
195
196 /* Load PD0 and PD1 with template for a Blank Entry */
197 write_aux_reg(ARC_REG_TLBPD1, 0);
198 write_aux_reg(ARC_REG_TLBPD0, 0);
199
200 for (entry = 0; entry < mmu->num_tlb; entry++) {
201 /* write this entry to the TLB */
202 write_aux_reg(ARC_REG_TLBINDEX, entry);
203 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
204 }
205
206 utlb_invalidate();
207
208 local_irq_restore(flags);
209 }
210
211 /*
212 * Flush the entrie MM for userland. The fastest way is to move to Next ASID
213 */
214 noinline void local_flush_tlb_mm(struct mm_struct *mm)
215 {
216 /*
217 * Small optimisation courtesy IA64
218 * flush_mm called during fork,exit,munmap etc, multiple times as well.
219 * Only for fork( ) do we need to move parent to a new MMU ctxt,
220 * all other cases are NOPs, hence this check.
221 */
222 if (atomic_read(&mm->mm_users) == 0)
223 return;
224
225 /*
226 * Workaround for Android weirdism:
227 * A binder VMA could end up in a task such that vma->mm != tsk->mm
228 * old code would cause h/w - s/w ASID to get out of sync
229 */
230 if (current->mm != mm)
231 destroy_context(mm);
232 else
233 get_new_mmu_context(mm);
234 }
235
236 /*
237 * Flush a Range of TLB entries for userland.
238 * @start is inclusive, while @end is exclusive
239 * Difference between this and Kernel Range Flush is
240 * -Here the fastest way (if range is too large) is to move to next ASID
241 * without doing any explicit Shootdown
242 * -In case of kernel Flush, entry has to be shot down explictly
243 */
244 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
245 unsigned long end)
246 {
247 unsigned long flags;
248 unsigned int asid;
249
250 /* If range @start to @end is more than 32 TLB entries deep,
251 * its better to move to a new ASID rather than searching for
252 * individual entries and then shooting them down
253 *
254 * The calc above is rough, doesn't account for unaligned parts,
255 * since this is heuristics based anyways
256 */
257 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
258 local_flush_tlb_mm(vma->vm_mm);
259 return;
260 }
261
262 /*
263 * @start moved to page start: this alone suffices for checking
264 * loop end condition below, w/o need for aligning @end to end
265 * e.g. 2000 to 4001 will anyhow loop twice
266 */
267 start &= PAGE_MASK;
268
269 local_irq_save(flags);
270 asid = vma->vm_mm->context.asid;
271
272 if (asid != NO_ASID) {
273 while (start < end) {
274 tlb_entry_erase(start | (asid & 0xff));
275 start += PAGE_SIZE;
276 }
277 }
278
279 utlb_invalidate();
280
281 local_irq_restore(flags);
282 }
283
284 /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
285 * @start, @end interpreted as kvaddr
286 * Interestingly, shared TLB entries can also be flushed using just
287 * @start,@end alone (interpreted as user vaddr), although technically SASID
288 * is also needed. However our smart TLbProbe lookup takes care of that.
289 */
290 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
291 {
292 unsigned long flags;
293
294 /* exactly same as above, except for TLB entry not taking ASID */
295
296 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
297 local_flush_tlb_all();
298 return;
299 }
300
301 start &= PAGE_MASK;
302
303 local_irq_save(flags);
304 while (start < end) {
305 tlb_entry_erase(start);
306 start += PAGE_SIZE;
307 }
308
309 utlb_invalidate();
310
311 local_irq_restore(flags);
312 }
313
314 /*
315 * Delete TLB entry in MMU for a given page (??? address)
316 * NOTE One TLB entry contains translation for single PAGE
317 */
318
319 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
320 {
321 unsigned long flags;
322
323 /* Note that it is critical that interrupts are DISABLED between
324 * checking the ASID and using it flush the TLB entry
325 */
326 local_irq_save(flags);
327
328 if (vma->vm_mm->context.asid != NO_ASID) {
329 tlb_entry_erase((page & PAGE_MASK) |
330 (vma->vm_mm->context.asid & 0xff));
331 utlb_invalidate();
332 }
333
334 local_irq_restore(flags);
335 }
336
337 /*
338 * Routine to create a TLB entry
339 */
340 void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
341 {
342 unsigned long flags;
343 unsigned int idx, asid_or_sasid;
344 unsigned long pd0_flags;
345
346 /*
347 * create_tlb() assumes that current->mm == vma->mm, since
348 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
349 * -completes the lazy write to SASID reg (again valid for curr tsk)
350 *
351 * Removing the assumption involves
352 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
353 * -Fix the TLB paranoid debug code to not trigger false negatives.
354 * -More importantly it makes this handler inconsistent with fast-path
355 * TLB Refill handler which always deals with "current"
356 *
357 * Lets see the use cases when current->mm != vma->mm and we land here
358 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
359 * Here VM wants to pre-install a TLB entry for user stack while
360 * current->mm still points to pre-execve mm (hence the condition).
361 * However the stack vaddr is soon relocated (randomization) and
362 * move_page_tables() tries to undo that TLB entry.
363 * Thus not creating TLB entry is not any worse.
364 *
365 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
366 * breakpoint in debugged task. Not creating a TLB now is not
367 * performance critical.
368 *
369 * Both the cases above are not good enough for code churn.
370 */
371 if (current->active_mm != vma->vm_mm)
372 return;
373
374 local_irq_save(flags);
375
376 tlb_paranoid_check(vma->vm_mm->context.asid, address);
377
378 address &= PAGE_MASK;
379
380 /* update this PTE credentials */
381 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
382
383 /* Create HW TLB entry Flags (in PD0) from PTE Flags */
384 #if (CONFIG_ARC_MMU_VER <= 2)
385 pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1);
386 #else
387 pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0));
388 #endif
389
390 /* ASID for this task */
391 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
392
393 write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid);
394
395 /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */
396 write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1));
397
398 /* First verify if entry for this vaddr+ASID already exists */
399 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
400 idx = read_aux_reg(ARC_REG_TLBINDEX);
401
402 /*
403 * If Not already present get a free slot from MMU.
404 * Otherwise, Probe would have located the entry and set INDEX Reg
405 * with existing location. This will cause Write CMD to over-write
406 * existing entry with new PD0 and PD1
407 */
408 if (likely(idx & TLB_LKUP_ERR))
409 write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
410
411 /*
412 * Commit the Entry to MMU
413 * It doesnt sound safe to use the TLBWriteNI cmd here
414 * which doesn't flush uTLBs. I'd rather be safe than sorry.
415 */
416 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
417
418 local_irq_restore(flags);
419 }
420
421 /* arch hook called by core VM at the end of handle_mm_fault( ),
422 * when a new PTE is entered in Page Tables or an existing one
423 * is modified. We aggresively pre-install a TLB entry
424 */
425
426 void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress,
427 pte_t *ptep)
428 {
429
430 create_tlb(vma, vaddress, ptep);
431 }
432
433 /* Read the Cache Build Confuration Registers, Decode them and save into
434 * the cpuinfo structure for later use.
435 * No Validation is done here, simply read/convert the BCRs
436 */
437 void __cpuinit read_decode_mmu_bcr(void)
438 {
439 unsigned int tmp;
440 struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */
441 struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */
442 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
443
444 tmp = read_aux_reg(ARC_REG_MMU_BCR);
445 mmu->ver = (tmp >> 24);
446
447 if (mmu->ver <= 2) {
448 mmu2 = (struct bcr_mmu_1_2 *)&tmp;
449 mmu->pg_sz = PAGE_SIZE;
450 mmu->sets = 1 << mmu2->sets;
451 mmu->ways = 1 << mmu2->ways;
452 mmu->u_dtlb = mmu2->u_dtlb;
453 mmu->u_itlb = mmu2->u_itlb;
454 } else {
455 mmu3 = (struct bcr_mmu_3 *)&tmp;
456 mmu->pg_sz = 512 << mmu3->pg_sz;
457 mmu->sets = 1 << mmu3->sets;
458 mmu->ways = 1 << mmu3->ways;
459 mmu->u_dtlb = mmu3->u_dtlb;
460 mmu->u_itlb = mmu3->u_itlb;
461 }
462
463 mmu->num_tlb = mmu->sets * mmu->ways;
464 }
465
466 char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
467 {
468 int n = 0;
469 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
470
471 n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
472 p_mmu->ver, TO_KB(p_mmu->pg_sz));
473
474 n += scnprintf(buf + n, len - n,
475 "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n",
476 p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
477 p_mmu->u_dtlb, p_mmu->u_itlb,
478 __CONFIG_ARC_MMU_SASID_VAL ? "SASID" : "");
479
480 return buf;
481 }
482
483 void __cpuinit arc_mmu_init(void)
484 {
485 char str[256];
486 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
487
488 printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
489
490 /* For efficiency sake, kernel is compile time built for a MMU ver
491 * This must match the hardware it is running on.
492 * Linux built for MMU V2, if run on MMU V1 will break down because V1
493 * hardware doesn't understand cmds such as WriteNI, or IVUTLB
494 * On the other hand, Linux built for V1 if run on MMU V2 will do
495 * un-needed workarounds to prevent memcpy thrashing.
496 * Similarly MMU V3 has new features which won't work on older MMU
497 */
498 if (mmu->ver != CONFIG_ARC_MMU_VER) {
499 panic("MMU ver %d doesn't match kernel built for %d...\n",
500 mmu->ver, CONFIG_ARC_MMU_VER);
501 }
502
503 if (mmu->pg_sz != PAGE_SIZE)
504 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
505
506 /*
507 * ASID mgmt data structures are compile time init
508 * asid_cache = FIRST_ASID and asid_mm_map[] all zeroes
509 */
510
511 local_flush_tlb_all();
512
513 /* Enable the MMU */
514 write_aux_reg(ARC_REG_PID, MMU_ENABLE);
515
516 /* In smp we use this reg for interrupt 1 scratch */
517 #ifndef CONFIG_SMP
518 /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
519 write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
520 #endif
521 }
522
523 /*
524 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
525 * The mapping is Column-first.
526 * --------------------- -----------
527 * |way0|way1|way2|way3| |way0|way1|
528 * --------------------- -----------
529 * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
530 * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
531 * ~ ~ ~ ~
532 * [set127] | 508| 509| 510| 511| | 254| 255|
533 * --------------------- -----------
534 * For normal operations we don't(must not) care how above works since
535 * MMU cmd getIndex(vaddr) abstracts that out.
536 * However for walking WAYS of a SET, we need to know this
537 */
538 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
539
540 /* Handling of Duplicate PD (TLB entry) in MMU.
541 * -Could be due to buggy customer tapeouts or obscure kernel bugs
542 * -MMU complaints not at the time of duplicate PD installation, but at the
543 * time of lookup matching multiple ways.
544 * -Ideally these should never happen - but if they do - workaround by deleting
545 * the duplicate one.
546 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
547 */
548 volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
549
550 void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
551 struct pt_regs *regs)
552 {
553 int set, way, n;
554 unsigned int pd0[4], pd1[4]; /* assume max 4 ways */
555 unsigned long flags, is_valid;
556 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
557
558 local_irq_save(flags);
559
560 /* re-enable the MMU */
561 write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
562
563 /* loop thru all sets of TLB */
564 for (set = 0; set < mmu->sets; set++) {
565
566 /* read out all the ways of current set */
567 for (way = 0, is_valid = 0; way < mmu->ways; way++) {
568 write_aux_reg(ARC_REG_TLBINDEX,
569 SET_WAY_TO_IDX(mmu, set, way));
570 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
571 pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
572 pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
573 is_valid |= pd0[way] & _PAGE_PRESENT;
574 }
575
576 /* If all the WAYS in SET are empty, skip to next SET */
577 if (!is_valid)
578 continue;
579
580 /* Scan the set for duplicate ways: needs a nested loop */
581 for (way = 0; way < mmu->ways; way++) {
582 if (!pd0[way])
583 continue;
584
585 for (n = way + 1; n < mmu->ways; n++) {
586 if ((pd0[way] & PAGE_MASK) ==
587 (pd0[n] & PAGE_MASK)) {
588
589 if (dup_pd_verbose) {
590 pr_info("Duplicate PD's @"
591 "[%d:%d]/[%d:%d]\n",
592 set, way, set, n);
593 pr_info("TLBPD0[%u]: %08x\n",
594 way, pd0[way]);
595 }
596
597 /*
598 * clear entry @way and not @n. This is
599 * critical to our optimised loop
600 */
601 pd0[way] = pd1[way] = 0;
602 write_aux_reg(ARC_REG_TLBINDEX,
603 SET_WAY_TO_IDX(mmu, set, way));
604 __tlb_entry_erase();
605 }
606 }
607 }
608 }
609
610 local_irq_restore(flags);
611 }
612
613 /***********************************************************************
614 * Diagnostic Routines
615 * -Called from Low Level TLB Hanlders if things don;t look good
616 **********************************************************************/
617
618 #ifdef CONFIG_ARC_DBG_TLB_PARANOIA
619
620 /*
621 * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
622 * don't match
623 */
624 void print_asid_mismatch(int is_fast_path)
625 {
626 int pid_sw, pid_hw;
627 pid_sw = current->active_mm->context.asid;
628 pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
629
630 pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
631 is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw);
632
633 __asm__ __volatile__("flag 1");
634 }
635
636 void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr)
637 {
638 unsigned int pid_hw;
639
640 pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
641
642 if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID)))
643 print_asid_mismatch(0);
644 }
645 #endif
This page took 0.044458 seconds and 4 git commands to generate.