[SPARC64]: Add a secondary TSB for hugepage mappings.
[deliverable/linux.git] / arch / sparc64 / mm / tsb.c
1 /* arch/sparc64/mm/tsb.c
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6 #include <linux/kernel.h>
7 #include <asm/system.h>
8 #include <asm/page.h>
9 #include <asm/tlbflush.h>
10 #include <asm/tlb.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgtable.h>
13 #include <asm/tsb.h>
14 #include <asm/oplib.h>
15
16 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
17
18 static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
19 {
20 vaddr >>= hash_shift;
21 return vaddr & (nentries - 1);
22 }
23
24 static inline int tag_compare(unsigned long tag, unsigned long vaddr)
25 {
26 return (tag == (vaddr >> 22));
27 }
28
29 /* TSB flushes need only occur on the processor initiating the address
30 * space modification, not on each cpu the address space has run on.
31 * Only the TLB flush needs that treatment.
32 */
33
34 void flush_tsb_kernel_range(unsigned long start, unsigned long end)
35 {
36 unsigned long v;
37
38 for (v = start; v < end; v += PAGE_SIZE) {
39 unsigned long hash = tsb_hash(v, PAGE_SHIFT,
40 KERNEL_TSB_NENTRIES);
41 struct tsb *ent = &swapper_tsb[hash];
42
43 if (tag_compare(ent->tag, v)) {
44 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
45 membar_storeload_storestore();
46 }
47 }
48 }
49
50 static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
51 {
52 unsigned long i;
53
54 for (i = 0; i < mp->tlb_nr; i++) {
55 unsigned long v = mp->vaddrs[i];
56 unsigned long tag, ent, hash;
57
58 v &= ~0x1UL;
59
60 hash = tsb_hash(v, hash_shift, nentries);
61 ent = tsb + (hash * sizeof(struct tsb));
62 tag = (v >> 22UL);
63
64 tsb_flush(ent, tag);
65 }
66 }
67
68 void flush_tsb_user(struct mmu_gather *mp)
69 {
70 struct mm_struct *mm = mp->mm;
71 unsigned long nentries, base, flags;
72
73 spin_lock_irqsave(&mm->context.lock, flags);
74
75 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
76 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
77 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
78 base = __pa(base);
79 __flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
80
81 #ifdef CONFIG_HUGETLB_PAGE
82 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
83 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
84 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
85 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
86 base = __pa(base);
87 __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
88 }
89 #endif
90 spin_unlock_irqrestore(&mm->context.lock, flags);
91 }
92
93 #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
94 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
95 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
96 #elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
97 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
98 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
99 #elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
100 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_512K
101 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_512K
102 #elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
103 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_4MB
104 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_4MB
105 #else
106 #error Broken base page size setting...
107 #endif
108
109 #ifdef CONFIG_HUGETLB_PAGE
110 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
111 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K
112 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K
113 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
114 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K
115 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K
116 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
117 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
118 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
119 #else
120 #error Broken huge page size setting...
121 #endif
122 #endif
123
124 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
125 {
126 unsigned long tsb_reg, base, tsb_paddr;
127 unsigned long page_sz, tte;
128
129 mm->context.tsb_block[tsb_idx].tsb_nentries =
130 tsb_bytes / sizeof(struct tsb);
131
132 base = TSBMAP_BASE;
133 tte = pgprot_val(PAGE_KERNEL_LOCKED);
134 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
135 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
136
137 /* Use the smallest page size that can map the whole TSB
138 * in one TLB entry.
139 */
140 switch (tsb_bytes) {
141 case 8192 << 0:
142 tsb_reg = 0x0UL;
143 #ifdef DCACHE_ALIASING_POSSIBLE
144 base += (tsb_paddr & 8192);
145 #endif
146 page_sz = 8192;
147 break;
148
149 case 8192 << 1:
150 tsb_reg = 0x1UL;
151 page_sz = 64 * 1024;
152 break;
153
154 case 8192 << 2:
155 tsb_reg = 0x2UL;
156 page_sz = 64 * 1024;
157 break;
158
159 case 8192 << 3:
160 tsb_reg = 0x3UL;
161 page_sz = 64 * 1024;
162 break;
163
164 case 8192 << 4:
165 tsb_reg = 0x4UL;
166 page_sz = 512 * 1024;
167 break;
168
169 case 8192 << 5:
170 tsb_reg = 0x5UL;
171 page_sz = 512 * 1024;
172 break;
173
174 case 8192 << 6:
175 tsb_reg = 0x6UL;
176 page_sz = 512 * 1024;
177 break;
178
179 case 8192 << 7:
180 tsb_reg = 0x7UL;
181 page_sz = 4 * 1024 * 1024;
182 break;
183
184 default:
185 BUG();
186 };
187 tte |= pte_sz_bits(page_sz);
188
189 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
190 /* Physical mapping, no locked TLB entry for TSB. */
191 tsb_reg |= tsb_paddr;
192
193 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
194 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
195 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
196 } else {
197 tsb_reg |= base;
198 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
199 tte |= (tsb_paddr & ~(page_sz - 1UL));
200
201 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
202 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
203 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
204 }
205
206 /* Setup the Hypervisor TSB descriptor. */
207 if (tlb_type == hypervisor) {
208 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
209
210 switch (tsb_idx) {
211 case MM_TSB_BASE:
212 hp->pgsz_idx = HV_PGSZ_IDX_BASE;
213 break;
214 #ifdef CONFIG_HUGETLB_PAGE
215 case MM_TSB_HUGE:
216 hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
217 break;
218 #endif
219 default:
220 BUG();
221 };
222 hp->assoc = 1;
223 hp->num_ttes = tsb_bytes / 16;
224 hp->ctx_idx = 0;
225 switch (tsb_idx) {
226 case MM_TSB_BASE:
227 hp->pgsz_mask = HV_PGSZ_MASK_BASE;
228 break;
229 #ifdef CONFIG_HUGETLB_PAGE
230 case MM_TSB_HUGE:
231 hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
232 break;
233 #endif
234 default:
235 BUG();
236 };
237 hp->tsb_base = tsb_paddr;
238 hp->resv = 0;
239 }
240 }
241
242 static kmem_cache_t *tsb_caches[8] __read_mostly;
243
244 static const char *tsb_cache_names[8] = {
245 "tsb_8KB",
246 "tsb_16KB",
247 "tsb_32KB",
248 "tsb_64KB",
249 "tsb_128KB",
250 "tsb_256KB",
251 "tsb_512KB",
252 "tsb_1MB",
253 };
254
255 void __init tsb_cache_init(void)
256 {
257 unsigned long i;
258
259 for (i = 0; i < 8; i++) {
260 unsigned long size = 8192 << i;
261 const char *name = tsb_cache_names[i];
262
263 tsb_caches[i] = kmem_cache_create(name,
264 size, size,
265 SLAB_HWCACHE_ALIGN |
266 SLAB_MUST_HWCACHE_ALIGN,
267 NULL, NULL);
268 if (!tsb_caches[i]) {
269 prom_printf("Could not create %s cache\n", name);
270 prom_halt();
271 }
272 }
273 }
274
275 /* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
276 * do_sparc64_fault() invokes this routine to try and grow it.
277 *
278 * When we reach the maximum TSB size supported, we stick ~0UL into
279 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
280 * will not trigger any longer.
281 *
282 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
283 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
284 * must be 512K aligned. It also must be physically contiguous, so we
285 * cannot use vmalloc().
286 *
287 * The idea here is to grow the TSB when the RSS of the process approaches
288 * the number of entries that the current TSB can hold at once. Currently,
289 * we trigger when the RSS hits 3/4 of the TSB capacity.
290 */
291 void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
292 {
293 unsigned long max_tsb_size = 1 * 1024 * 1024;
294 unsigned long new_size, old_size, flags;
295 struct tsb *old_tsb, *new_tsb;
296 unsigned long new_cache_index, old_cache_index;
297 unsigned long new_rss_limit;
298 gfp_t gfp_flags;
299
300 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
301 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
302
303 new_cache_index = 0;
304 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
305 unsigned long n_entries = new_size / sizeof(struct tsb);
306
307 n_entries = (n_entries * 3) / 4;
308 if (n_entries > rss)
309 break;
310
311 new_cache_index++;
312 }
313
314 if (new_size == max_tsb_size)
315 new_rss_limit = ~0UL;
316 else
317 new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4;
318
319 retry_tsb_alloc:
320 gfp_flags = GFP_KERNEL;
321 if (new_size > (PAGE_SIZE * 2))
322 gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
323
324 new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags);
325 if (unlikely(!new_tsb)) {
326 /* Not being able to fork due to a high-order TSB
327 * allocation failure is very bad behavior. Just back
328 * down to a 0-order allocation and force no TSB
329 * growing for this address space.
330 */
331 if (mm->context.tsb_block[tsb_index].tsb == NULL &&
332 new_cache_index > 0) {
333 new_cache_index = 0;
334 new_size = 8192;
335 new_rss_limit = ~0UL;
336 goto retry_tsb_alloc;
337 }
338
339 /* If we failed on a TSB grow, we are under serious
340 * memory pressure so don't try to grow any more.
341 */
342 if (mm->context.tsb_block[tsb_index].tsb != NULL)
343 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
344 return;
345 }
346
347 /* Mark all tags as invalid. */
348 tsb_init(new_tsb, new_size);
349
350 /* Ok, we are about to commit the changes. If we are
351 * growing an existing TSB the locking is very tricky,
352 * so WATCH OUT!
353 *
354 * We have to hold mm->context.lock while committing to the
355 * new TSB, this synchronizes us with processors in
356 * flush_tsb_user() and switch_mm() for this address space.
357 *
358 * But even with that lock held, processors run asynchronously
359 * accessing the old TSB via TLB miss handling. This is OK
360 * because those actions are just propagating state from the
361 * Linux page tables into the TSB, page table mappings are not
362 * being changed. If a real fault occurs, the processor will
363 * synchronize with us when it hits flush_tsb_user(), this is
364 * also true for the case where vmscan is modifying the page
365 * tables. The only thing we need to be careful with is to
366 * skip any locked TSB entries during copy_tsb().
367 *
368 * When we finish committing to the new TSB, we have to drop
369 * the lock and ask all other cpus running this address space
370 * to run tsb_context_switch() to see the new TSB table.
371 */
372 spin_lock_irqsave(&mm->context.lock, flags);
373
374 old_tsb = mm->context.tsb_block[tsb_index].tsb;
375 old_cache_index =
376 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
377 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
378 sizeof(struct tsb));
379
380
381 /* Handle multiple threads trying to grow the TSB at the same time.
382 * One will get in here first, and bump the size and the RSS limit.
383 * The others will get in here next and hit this check.
384 */
385 if (unlikely(old_tsb &&
386 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
387 spin_unlock_irqrestore(&mm->context.lock, flags);
388
389 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
390 return;
391 }
392
393 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
394
395 if (old_tsb) {
396 extern void copy_tsb(unsigned long old_tsb_base,
397 unsigned long old_tsb_size,
398 unsigned long new_tsb_base,
399 unsigned long new_tsb_size);
400 unsigned long old_tsb_base = (unsigned long) old_tsb;
401 unsigned long new_tsb_base = (unsigned long) new_tsb;
402
403 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
404 old_tsb_base = __pa(old_tsb_base);
405 new_tsb_base = __pa(new_tsb_base);
406 }
407 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
408 }
409
410 mm->context.tsb_block[tsb_index].tsb = new_tsb;
411 setup_tsb_params(mm, tsb_index, new_size);
412
413 spin_unlock_irqrestore(&mm->context.lock, flags);
414
415 /* If old_tsb is NULL, we're being invoked for the first time
416 * from init_new_context().
417 */
418 if (old_tsb) {
419 /* Reload it on the local cpu. */
420 tsb_context_switch(mm);
421
422 /* Now force other processors to do the same. */
423 smp_tsb_sync(mm);
424
425 /* Now it is safe to free the old tsb. */
426 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
427 }
428 }
429
430 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
431 {
432 #ifdef CONFIG_HUGETLB_PAGE
433 unsigned long huge_pte_count;
434 #endif
435 unsigned int i;
436
437 spin_lock_init(&mm->context.lock);
438
439 mm->context.sparc64_ctx_val = 0UL;
440
441 #ifdef CONFIG_HUGETLB_PAGE
442 /* We reset it to zero because the fork() page copying
443 * will re-increment the counters as the parent PTEs are
444 * copied into the child address space.
445 */
446 huge_pte_count = mm->context.huge_pte_count;
447 mm->context.huge_pte_count = 0;
448 #endif
449
450 /* copy_mm() copies over the parent's mm_struct before calling
451 * us, so we need to zero out the TSB pointer or else tsb_grow()
452 * will be confused and think there is an older TSB to free up.
453 */
454 for (i = 0; i < MM_NUM_TSBS; i++)
455 mm->context.tsb_block[i].tsb = NULL;
456
457 /* If this is fork, inherit the parent's TSB size. We would
458 * grow it to that size on the first page fault anyways.
459 */
460 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
461
462 #ifdef CONFIG_HUGETLB_PAGE
463 if (unlikely(huge_pte_count))
464 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
465 #endif
466
467 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
468 return -ENOMEM;
469
470 return 0;
471 }
472
473 static void tsb_destroy_one(struct tsb_config *tp)
474 {
475 unsigned long cache_index;
476
477 if (!tp->tsb)
478 return;
479 cache_index = tp->tsb_reg_val & 0x7UL;
480 kmem_cache_free(tsb_caches[cache_index], tp->tsb);
481 tp->tsb = NULL;
482 tp->tsb_reg_val = 0UL;
483 }
484
485 void destroy_context(struct mm_struct *mm)
486 {
487 unsigned long flags, i;
488
489 for (i = 0; i < MM_NUM_TSBS; i++)
490 tsb_destroy_one(&mm->context.tsb_block[i]);
491
492 spin_lock_irqsave(&ctx_alloc_lock, flags);
493
494 if (CTX_VALID(mm->context)) {
495 unsigned long nr = CTX_NRBITS(mm->context);
496 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
497 }
498
499 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
500 }
This page took 0.042827 seconds and 6 git commands to generate.