Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Switch a MMU context. | |
3 | * | |
4 | * This file is subject to the terms and conditions of the GNU General Public | |
5 | * License. See the file "COPYING" in the main directory of this archive | |
6 | * for more details. | |
7 | * | |
8 | * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle | |
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | |
10 | */ | |
11 | #ifndef _ASM_MMU_CONTEXT_H | |
12 | #define _ASM_MMU_CONTEXT_H | |
13 | ||
1da177e4 LT |
14 | #include <linux/errno.h> |
15 | #include <linux/sched.h> | |
631330f5 | 16 | #include <linux/smp.h> |
1da177e4 LT |
17 | #include <linux/slab.h> |
18 | #include <asm/cacheflush.h> | |
19 | #include <asm/tlbflush.h> | |
41c594ab RB |
20 | #ifdef CONFIG_MIPS_MT_SMTC |
21 | #include <asm/mipsmtregs.h> | |
22 | #include <asm/smtc.h> | |
23 | #endif /* SMTC */ | |
d6dd61c8 | 24 | #include <asm-generic/mm_hooks.h> |
1da177e4 LT |
25 | |
26 | /* | |
27 | * For the fast tlb miss handlers, we keep a per cpu array of pointers | |
28 | * to the current pgd for each processor. Also, the proc. id is stuffed | |
29 | * into the context register. | |
30 | */ | |
31 | extern unsigned long pgd_current[]; | |
32 | ||
33 | #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ | |
34 | pgd_current[smp_processor_id()] = (unsigned long)(pgd) | |
35 | ||
875d43e7 | 36 | #ifdef CONFIG_32BIT |
1da177e4 | 37 | #define TLBMISS_HANDLER_SETUP() \ |
1b3a6e97 | 38 | write_c0_context((unsigned long) smp_processor_id() << 25); \ |
1da177e4 LT |
39 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) |
40 | #endif | |
8145095c | 41 | #ifdef CONFIG_64BIT |
1da177e4 | 42 | #define TLBMISS_HANDLER_SETUP() \ |
1b3a6e97 | 43 | write_c0_context((unsigned long) smp_processor_id() << 26); \ |
1da177e4 LT |
44 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) |
45 | #endif | |
46 | ||
47 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | |
48 | ||
49 | #define ASID_INC 0x40 | |
50 | #define ASID_MASK 0xfc0 | |
51 | ||
52 | #elif defined(CONFIG_CPU_R8000) | |
53 | ||
54 | #define ASID_INC 0x10 | |
55 | #define ASID_MASK 0xff0 | |
56 | ||
57 | #elif defined(CONFIG_CPU_RM9000) | |
58 | ||
59 | #define ASID_INC 0x1 | |
60 | #define ASID_MASK 0xfff | |
61 | ||
41c594ab RB |
62 | /* SMTC/34K debug hack - but maybe we'll keep it */ |
63 | #elif defined(CONFIG_MIPS_MT_SMTC) | |
64 | ||
65 | #define ASID_INC 0x1 | |
66 | extern unsigned long smtc_asid_mask; | |
67 | #define ASID_MASK (smtc_asid_mask) | |
68 | #define HW_ASID_MASK 0xff | |
69 | /* End SMTC/34K debug hack */ | |
1da177e4 LT |
70 | #else /* FIXME: not correct for R6000 */ |
71 | ||
72 | #define ASID_INC 0x1 | |
73 | #define ASID_MASK 0xff | |
74 | ||
75 | #endif | |
76 | ||
77 | #define cpu_context(cpu, mm) ((mm)->context[cpu]) | |
78 | #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) | |
79 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) | |
80 | ||
81 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
82 | { | |
83 | } | |
84 | ||
85 | /* | |
86 | * All unused by hardware upper bits will be considered | |
87 | * as a software asid extension. | |
88 | */ | |
89 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) | |
90 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) | |
91 | ||
41c594ab RB |
92 | #ifndef CONFIG_MIPS_MT_SMTC |
93 | /* Normal, classic MIPS get_new_mmu_context */ | |
1da177e4 LT |
94 | static inline void |
95 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |
96 | { | |
97 | unsigned long asid = asid_cache(cpu); | |
98 | ||
99 | if (! ((asid += ASID_INC) & ASID_MASK) ) { | |
100 | if (cpu_has_vtag_icache) | |
101 | flush_icache_all(); | |
102 | local_flush_tlb_all(); /* start new asid cycle */ | |
103 | if (!asid) /* fix version if needed */ | |
104 | asid = ASID_FIRST_VERSION; | |
105 | } | |
106 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; | |
107 | } | |
108 | ||
41c594ab RB |
109 | #else /* CONFIG_MIPS_MT_SMTC */ |
110 | ||
21a151d8 | 111 | #define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu)) |
41c594ab RB |
112 | |
113 | #endif /* CONFIG_MIPS_MT_SMTC */ | |
114 | ||
1da177e4 LT |
115 | /* |
116 | * Initialize the context related info for a new mm_struct | |
117 | * instance. | |
118 | */ | |
119 | static inline int | |
120 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
121 | { | |
122 | int i; | |
123 | ||
b5eb5511 | 124 | for_each_online_cpu(i) |
1da177e4 LT |
125 | cpu_context(i, mm) = 0; |
126 | ||
127 | return 0; | |
128 | } | |
129 | ||
130 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
131 | struct task_struct *tsk) | |
132 | { | |
133 | unsigned int cpu = smp_processor_id(); | |
134 | unsigned long flags; | |
41c594ab RB |
135 | #ifdef CONFIG_MIPS_MT_SMTC |
136 | unsigned long oldasid; | |
137 | unsigned long mtflags; | |
138 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | |
1da177e4 | 139 | local_irq_save(flags); |
41c594ab RB |
140 | mtflags = dvpe(); |
141 | #else /* Not SMTC */ | |
142 | local_irq_save(flags); | |
143 | #endif /* CONFIG_MIPS_MT_SMTC */ | |
1da177e4 LT |
144 | |
145 | /* Check if our ASID is of an older version and thus invalid */ | |
146 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) | |
147 | get_new_mmu_context(next, cpu); | |
41c594ab RB |
148 | #ifdef CONFIG_MIPS_MT_SMTC |
149 | /* | |
150 | * If the EntryHi ASID being replaced happens to be | |
151 | * the value flagged at ASID recycling time as having | |
152 | * an extended life, clear the bit showing it being | |
153 | * in use by this "CPU", and if that's the last bit, | |
154 | * free up the ASID value for use and flush any old | |
155 | * instances of it from the TLB. | |
156 | */ | |
157 | oldasid = (read_c0_entryhi() & ASID_MASK); | |
158 | if(smtc_live_asid[mytlb][oldasid]) { | |
159 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | |
160 | if(smtc_live_asid[mytlb][oldasid] == 0) | |
161 | smtc_flush_tlb_asid(oldasid); | |
162 | } | |
163 | /* | |
164 | * Tread softly on EntryHi, and so long as we support | |
165 | * having ASID_MASK smaller than the hardware maximum, | |
166 | * make sure no "soft" bits become "hard"... | |
167 | */ | |
168 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | |
169 | | (cpu_context(cpu, next) & ASID_MASK)); | |
170 | ehb(); /* Make sure it propagates to TCStatus */ | |
171 | evpe(mtflags); | |
172 | #else | |
1da177e4 | 173 | write_c0_entryhi(cpu_context(cpu, next)); |
41c594ab | 174 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1da177e4 LT |
175 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
176 | ||
177 | /* | |
178 | * Mark current->active_mm as not "active" anymore. | |
179 | * We don't want to mislead possible IPI tlb flush routines. | |
180 | */ | |
181 | cpu_clear(cpu, prev->cpu_vm_mask); | |
182 | cpu_set(cpu, next->cpu_vm_mask); | |
183 | ||
184 | local_irq_restore(flags); | |
185 | } | |
186 | ||
187 | /* | |
188 | * Destroy context related info for an mm_struct that is about | |
189 | * to be put to rest. | |
190 | */ | |
191 | static inline void destroy_context(struct mm_struct *mm) | |
192 | { | |
193 | } | |
194 | ||
21a151d8 | 195 | #define deactivate_mm(tsk, mm) do { } while (0) |
1da177e4 LT |
196 | |
197 | /* | |
198 | * After we have set current->mm to a new value, this activates | |
199 | * the context for the new mm so we see the new mappings. | |
200 | */ | |
201 | static inline void | |
202 | activate_mm(struct mm_struct *prev, struct mm_struct *next) | |
203 | { | |
204 | unsigned long flags; | |
205 | unsigned int cpu = smp_processor_id(); | |
206 | ||
41c594ab RB |
207 | #ifdef CONFIG_MIPS_MT_SMTC |
208 | unsigned long oldasid; | |
209 | unsigned long mtflags; | |
210 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | |
211 | #endif /* CONFIG_MIPS_MT_SMTC */ | |
212 | ||
1da177e4 LT |
213 | local_irq_save(flags); |
214 | ||
215 | /* Unconditionally get a new ASID. */ | |
216 | get_new_mmu_context(next, cpu); | |
217 | ||
41c594ab RB |
218 | #ifdef CONFIG_MIPS_MT_SMTC |
219 | /* See comments for similar code above */ | |
220 | mtflags = dvpe(); | |
221 | oldasid = read_c0_entryhi() & ASID_MASK; | |
222 | if(smtc_live_asid[mytlb][oldasid]) { | |
223 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | |
384740dc RB |
224 | if(smtc_live_asid[mytlb][oldasid] == 0) |
225 | smtc_flush_tlb_asid(oldasid); | |
41c594ab RB |
226 | } |
227 | /* See comments for similar code above */ | |
228 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | | |
229 | (cpu_context(cpu, next) & ASID_MASK)); | |
230 | ehb(); /* Make sure it propagates to TCStatus */ | |
231 | evpe(mtflags); | |
232 | #else | |
1da177e4 | 233 | write_c0_entryhi(cpu_context(cpu, next)); |
41c594ab | 234 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1da177e4 LT |
235 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
236 | ||
237 | /* mark mmu ownership change */ | |
238 | cpu_clear(cpu, prev->cpu_vm_mask); | |
239 | cpu_set(cpu, next->cpu_vm_mask); | |
240 | ||
241 | local_irq_restore(flags); | |
242 | } | |
243 | ||
244 | /* | |
245 | * If mm is currently active_mm, we can't really drop it. Instead, | |
246 | * we will get a new one for it. | |
247 | */ | |
248 | static inline void | |
249 | drop_mmu_context(struct mm_struct *mm, unsigned cpu) | |
250 | { | |
251 | unsigned long flags; | |
41c594ab RB |
252 | #ifdef CONFIG_MIPS_MT_SMTC |
253 | unsigned long oldasid; | |
254 | /* Can't use spinlock because called from TLB flush within DVPE */ | |
255 | unsigned int prevvpe; | |
256 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | |
257 | #endif /* CONFIG_MIPS_MT_SMTC */ | |
1da177e4 LT |
258 | |
259 | local_irq_save(flags); | |
260 | ||
261 | if (cpu_isset(cpu, mm->cpu_vm_mask)) { | |
262 | get_new_mmu_context(mm, cpu); | |
41c594ab RB |
263 | #ifdef CONFIG_MIPS_MT_SMTC |
264 | /* See comments for similar code above */ | |
265 | prevvpe = dvpe(); | |
266 | oldasid = (read_c0_entryhi() & ASID_MASK); | |
6b8aab09 RB |
267 | if (smtc_live_asid[mytlb][oldasid]) { |
268 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | |
269 | if(smtc_live_asid[mytlb][oldasid] == 0) | |
270 | smtc_flush_tlb_asid(oldasid); | |
41c594ab RB |
271 | } |
272 | /* See comments for similar code above */ | |
273 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | |
274 | | cpu_asid(cpu, mm)); | |
275 | ehb(); /* Make sure it propagates to TCStatus */ | |
276 | evpe(prevvpe); | |
277 | #else /* not CONFIG_MIPS_MT_SMTC */ | |
1da177e4 | 278 | write_c0_entryhi(cpu_asid(cpu, mm)); |
41c594ab | 279 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1da177e4 LT |
280 | } else { |
281 | /* will get a new context next time */ | |
41c594ab | 282 | #ifndef CONFIG_MIPS_MT_SMTC |
1da177e4 | 283 | cpu_context(cpu, mm) = 0; |
41c594ab RB |
284 | #else /* SMTC */ |
285 | int i; | |
286 | ||
287 | /* SMTC shares the TLB (and ASIDs) across VPEs */ | |
b5eb5511 | 288 | for_each_online_cpu(i) { |
384740dc RB |
289 | if((smtc_status & SMTC_TLB_SHARED) |
290 | || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | |
41c594ab RB |
291 | cpu_context(i, mm) = 0; |
292 | } | |
293 | #endif /* CONFIG_MIPS_MT_SMTC */ | |
1da177e4 | 294 | } |
1da177e4 LT |
295 | local_irq_restore(flags); |
296 | } | |
297 | ||
298 | #endif /* _ASM_MMU_CONTEXT_H */ |