2865077e01598f14a0b651790ecf7dc45cb12d0a
[deliverable/linux.git] / arch / powerpc / mm / hugetlbpage.c
1 /*
2 * PPC Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
6 *
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/io.h>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/moduleparam.h>
20 #include <asm/pgtable.h>
21 #include <asm/pgalloc.h>
22 #include <asm/tlb.h>
23 #include <asm/setup.h>
24 #include <asm/hugetlb.h>
25
26 #ifdef CONFIG_HUGETLB_PAGE
27
28 #define PAGE_SHIFT_64K 16
29 #define PAGE_SHIFT_16M 24
30 #define PAGE_SHIFT_16G 34
31
32 unsigned int HPAGE_SHIFT;
33
34 /*
35 * Tracks gpages after the device tree is scanned and before the
36 * huge_boot_pages list is ready. On non-Freescale implementations, this is
37 * just used to track 16G pages and so is a single array. FSL-based
38 * implementations may have more than one gpage size, so we need multiple
39 * arrays
40 */
41 #ifdef CONFIG_PPC_FSL_BOOK3E
42 #define MAX_NUMBER_GPAGES 128
43 struct psize_gpages {
44 u64 gpage_list[MAX_NUMBER_GPAGES];
45 unsigned int nr_gpages;
46 };
47 static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
48 #else
49 #define MAX_NUMBER_GPAGES 1024
50 static u64 gpage_freearray[MAX_NUMBER_GPAGES];
51 static unsigned nr_gpages;
52 #endif
53
54 #define hugepd_none(hpd) ((hpd).pd == 0)
55
56 #ifdef CONFIG_PPC_BOOK3S_64
57 /*
58 * At this point we do the placement change only for BOOK3S 64. This would
59 * possibly work on other subarchs.
60 */
61
62 /*
63 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
64 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
65 */
66 int pmd_huge(pmd_t pmd)
67 {
68 /*
69 * leaf pte for huge page, bottom two bits != 00
70 */
71 return ((pmd_val(pmd) & 0x3) != 0x0);
72 }
73
74 int pud_huge(pud_t pud)
75 {
76 /*
77 * leaf pte for huge page, bottom two bits != 00
78 */
79 return ((pud_val(pud) & 0x3) != 0x0);
80 }
81
82 int pgd_huge(pgd_t pgd)
83 {
84 /*
85 * leaf pte for huge page, bottom two bits != 00
86 */
87 return ((pgd_val(pgd) & 0x3) != 0x0);
88 }
89 #else
90 int pmd_huge(pmd_t pmd)
91 {
92 return 0;
93 }
94
95 int pud_huge(pud_t pud)
96 {
97 return 0;
98 }
99
100 int pgd_huge(pgd_t pgd)
101 {
102 return 0;
103 }
104 #endif
105
106 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
107 {
108 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
109 }
110
111 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
112 unsigned long address, unsigned pdshift, unsigned pshift)
113 {
114 struct kmem_cache *cachep;
115 pte_t *new;
116
117 #ifdef CONFIG_PPC_FSL_BOOK3E
118 int i;
119 int num_hugepd = 1 << (pshift - pdshift);
120 cachep = hugepte_cache;
121 #else
122 cachep = PGT_CACHE(pdshift - pshift);
123 #endif
124
125 new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
126
127 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
128 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
129
130 if (! new)
131 return -ENOMEM;
132
133 spin_lock(&mm->page_table_lock);
134 #ifdef CONFIG_PPC_FSL_BOOK3E
135 /*
136 * We have multiple higher-level entries that point to the same
137 * actual pte location. Fill in each as we go and backtrack on error.
138 * We need all of these so the DTLB pgtable walk code can find the
139 * right higher-level entry without knowing if it's a hugepage or not.
140 */
141 for (i = 0; i < num_hugepd; i++, hpdp++) {
142 if (unlikely(!hugepd_none(*hpdp)))
143 break;
144 else
145 /* We use the old format for PPC_FSL_BOOK3E */
146 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
147 }
148 /* If we bailed from the for loop early, an error occurred, clean up */
149 if (i < num_hugepd) {
150 for (i = i - 1 ; i >= 0; i--, hpdp--)
151 hpdp->pd = 0;
152 kmem_cache_free(cachep, new);
153 }
154 #else
155 if (!hugepd_none(*hpdp))
156 kmem_cache_free(cachep, new);
157 else {
158 #ifdef CONFIG_PPC_BOOK3S_64
159 hpdp->pd = (unsigned long)new |
160 (shift_to_mmu_psize(pshift) << 2);
161 #else
162 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
163 #endif
164 }
165 #endif
166 spin_unlock(&mm->page_table_lock);
167 return 0;
168 }
169
170 /*
171 * These macros define how to determine which level of the page table holds
172 * the hpdp.
173 */
174 #ifdef CONFIG_PPC_FSL_BOOK3E
175 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
176 #define HUGEPD_PUD_SHIFT PUD_SHIFT
177 #else
178 #define HUGEPD_PGD_SHIFT PUD_SHIFT
179 #define HUGEPD_PUD_SHIFT PMD_SHIFT
180 #endif
181
182 #ifdef CONFIG_PPC_BOOK3S_64
183 /*
184 * At this point we do the placement change only for BOOK3S 64. This would
185 * possibly work on other subarchs.
186 */
187 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
188 {
189 pgd_t *pg;
190 pud_t *pu;
191 pmd_t *pm;
192 hugepd_t *hpdp = NULL;
193 unsigned pshift = __ffs(sz);
194 unsigned pdshift = PGDIR_SHIFT;
195
196 addr &= ~(sz-1);
197 pg = pgd_offset(mm, addr);
198
199 if (pshift == PGDIR_SHIFT)
200 /* 16GB huge page */
201 return (pte_t *) pg;
202 else if (pshift > PUD_SHIFT)
203 /*
204 * We need to use hugepd table
205 */
206 hpdp = (hugepd_t *)pg;
207 else {
208 pdshift = PUD_SHIFT;
209 pu = pud_alloc(mm, pg, addr);
210 if (pshift == PUD_SHIFT)
211 return (pte_t *)pu;
212 else if (pshift > PMD_SHIFT)
213 hpdp = (hugepd_t *)pu;
214 else {
215 pdshift = PMD_SHIFT;
216 pm = pmd_alloc(mm, pu, addr);
217 if (pshift == PMD_SHIFT)
218 /* 16MB hugepage */
219 return (pte_t *)pm;
220 else
221 hpdp = (hugepd_t *)pm;
222 }
223 }
224 if (!hpdp)
225 return NULL;
226
227 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
228
229 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
230 return NULL;
231
232 return hugepte_offset(hpdp, addr, pdshift);
233 }
234
235 #else
236
237 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
238 {
239 pgd_t *pg;
240 pud_t *pu;
241 pmd_t *pm;
242 hugepd_t *hpdp = NULL;
243 unsigned pshift = __ffs(sz);
244 unsigned pdshift = PGDIR_SHIFT;
245
246 addr &= ~(sz-1);
247
248 pg = pgd_offset(mm, addr);
249
250 if (pshift >= HUGEPD_PGD_SHIFT) {
251 hpdp = (hugepd_t *)pg;
252 } else {
253 pdshift = PUD_SHIFT;
254 pu = pud_alloc(mm, pg, addr);
255 if (pshift >= HUGEPD_PUD_SHIFT) {
256 hpdp = (hugepd_t *)pu;
257 } else {
258 pdshift = PMD_SHIFT;
259 pm = pmd_alloc(mm, pu, addr);
260 hpdp = (hugepd_t *)pm;
261 }
262 }
263
264 if (!hpdp)
265 return NULL;
266
267 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
268
269 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
270 return NULL;
271
272 return hugepte_offset(hpdp, addr, pdshift);
273 }
274 #endif
275
276 #ifdef CONFIG_PPC_FSL_BOOK3E
277 /* Build list of addresses of gigantic pages. This function is used in early
278 * boot before the buddy or bootmem allocator is setup.
279 */
280 void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
281 {
282 unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
283 int i;
284
285 if (addr == 0)
286 return;
287
288 gpage_freearray[idx].nr_gpages = number_of_pages;
289
290 for (i = 0; i < number_of_pages; i++) {
291 gpage_freearray[idx].gpage_list[i] = addr;
292 addr += page_size;
293 }
294 }
295
296 /*
297 * Moves the gigantic page addresses from the temporary list to the
298 * huge_boot_pages list.
299 */
300 int alloc_bootmem_huge_page(struct hstate *hstate)
301 {
302 struct huge_bootmem_page *m;
303 int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT);
304 int nr_gpages = gpage_freearray[idx].nr_gpages;
305
306 if (nr_gpages == 0)
307 return 0;
308
309 #ifdef CONFIG_HIGHMEM
310 /*
311 * If gpages can be in highmem we can't use the trick of storing the
312 * data structure in the page; allocate space for this
313 */
314 m = alloc_bootmem(sizeof(struct huge_bootmem_page));
315 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
316 #else
317 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
318 #endif
319
320 list_add(&m->list, &huge_boot_pages);
321 gpage_freearray[idx].nr_gpages = nr_gpages;
322 gpage_freearray[idx].gpage_list[nr_gpages] = 0;
323 m->hstate = hstate;
324
325 return 1;
326 }
327 /*
328 * Scan the command line hugepagesz= options for gigantic pages; store those in
329 * a list that we use to allocate the memory once all options are parsed.
330 */
331
332 unsigned long gpage_npages[MMU_PAGE_COUNT];
333
334 static int __init do_gpage_early_setup(char *param, char *val,
335 const char *unused)
336 {
337 static phys_addr_t size;
338 unsigned long npages;
339
340 /*
341 * The hugepagesz and hugepages cmdline options are interleaved. We
342 * use the size variable to keep track of whether or not this was done
343 * properly and skip over instances where it is incorrect. Other
344 * command-line parsing code will issue warnings, so we don't need to.
345 *
346 */
347 if ((strcmp(param, "default_hugepagesz") == 0) ||
348 (strcmp(param, "hugepagesz") == 0)) {
349 size = memparse(val, NULL);
350 } else if (strcmp(param, "hugepages") == 0) {
351 if (size != 0) {
352 if (sscanf(val, "%lu", &npages) <= 0)
353 npages = 0;
354 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
355 size = 0;
356 }
357 }
358 return 0;
359 }
360
361
362 /*
363 * This function allocates physical space for pages that are larger than the
364 * buddy allocator can handle. We want to allocate these in highmem because
365 * the amount of lowmem is limited. This means that this function MUST be
366 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
367 * allocate to grab highmem.
368 */
369 void __init reserve_hugetlb_gpages(void)
370 {
371 static __initdata char cmdline[COMMAND_LINE_SIZE];
372 phys_addr_t size, base;
373 int i;
374
375 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
376 parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
377 &do_gpage_early_setup);
378
379 /*
380 * Walk gpage list in reverse, allocating larger page sizes first.
381 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
382 * When we reach the point in the list where pages are no longer
383 * considered gpages, we're done.
384 */
385 for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
386 if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
387 continue;
388 else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
389 break;
390
391 size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
392 base = memblock_alloc_base(size * gpage_npages[i], size,
393 MEMBLOCK_ALLOC_ANYWHERE);
394 add_gpage(base, size, gpage_npages[i]);
395 }
396 }
397
398 #else /* !PPC_FSL_BOOK3E */
399
400 /* Build list of addresses of gigantic pages. This function is used in early
401 * boot before the buddy or bootmem allocator is setup.
402 */
403 void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
404 {
405 if (!addr)
406 return;
407 while (number_of_pages > 0) {
408 gpage_freearray[nr_gpages] = addr;
409 nr_gpages++;
410 number_of_pages--;
411 addr += page_size;
412 }
413 }
414
415 /* Moves the gigantic page addresses from the temporary list to the
416 * huge_boot_pages list.
417 */
418 int alloc_bootmem_huge_page(struct hstate *hstate)
419 {
420 struct huge_bootmem_page *m;
421 if (nr_gpages == 0)
422 return 0;
423 m = phys_to_virt(gpage_freearray[--nr_gpages]);
424 gpage_freearray[nr_gpages] = 0;
425 list_add(&m->list, &huge_boot_pages);
426 m->hstate = hstate;
427 return 1;
428 }
429 #endif
430
431 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
432 {
433 return 0;
434 }
435
436 #ifdef CONFIG_PPC_FSL_BOOK3E
437 #define HUGEPD_FREELIST_SIZE \
438 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
439
440 struct hugepd_freelist {
441 struct rcu_head rcu;
442 unsigned int index;
443 void *ptes[0];
444 };
445
446 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
447
448 static void hugepd_free_rcu_callback(struct rcu_head *head)
449 {
450 struct hugepd_freelist *batch =
451 container_of(head, struct hugepd_freelist, rcu);
452 unsigned int i;
453
454 for (i = 0; i < batch->index; i++)
455 kmem_cache_free(hugepte_cache, batch->ptes[i]);
456
457 free_page((unsigned long)batch);
458 }
459
460 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
461 {
462 struct hugepd_freelist **batchp;
463
464 batchp = &__get_cpu_var(hugepd_freelist_cur);
465
466 if (atomic_read(&tlb->mm->mm_users) < 2 ||
467 cpumask_equal(mm_cpumask(tlb->mm),
468 cpumask_of(smp_processor_id()))) {
469 kmem_cache_free(hugepte_cache, hugepte);
470 return;
471 }
472
473 if (*batchp == NULL) {
474 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
475 (*batchp)->index = 0;
476 }
477
478 (*batchp)->ptes[(*batchp)->index++] = hugepte;
479 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
480 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
481 *batchp = NULL;
482 }
483 }
484 #endif
485
486 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
487 unsigned long start, unsigned long end,
488 unsigned long floor, unsigned long ceiling)
489 {
490 pte_t *hugepte = hugepd_page(*hpdp);
491 int i;
492
493 unsigned long pdmask = ~((1UL << pdshift) - 1);
494 unsigned int num_hugepd = 1;
495
496 #ifdef CONFIG_PPC_FSL_BOOK3E
497 /* Note: On fsl the hpdp may be the first of several */
498 num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
499 #else
500 unsigned int shift = hugepd_shift(*hpdp);
501 #endif
502
503 start &= pdmask;
504 if (start < floor)
505 return;
506 if (ceiling) {
507 ceiling &= pdmask;
508 if (! ceiling)
509 return;
510 }
511 if (end - 1 > ceiling - 1)
512 return;
513
514 for (i = 0; i < num_hugepd; i++, hpdp++)
515 hpdp->pd = 0;
516
517 tlb->need_flush = 1;
518
519 #ifdef CONFIG_PPC_FSL_BOOK3E
520 hugepd_free(tlb, hugepte);
521 #else
522 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
523 #endif
524 }
525
526 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
527 unsigned long addr, unsigned long end,
528 unsigned long floor, unsigned long ceiling)
529 {
530 pmd_t *pmd;
531 unsigned long next;
532 unsigned long start;
533
534 start = addr;
535 do {
536 pmd = pmd_offset(pud, addr);
537 next = pmd_addr_end(addr, end);
538 if (pmd_none_or_clear_bad(pmd))
539 continue;
540 #ifdef CONFIG_PPC_FSL_BOOK3E
541 /*
542 * Increment next by the size of the huge mapping since
543 * there may be more than one entry at this level for a
544 * single hugepage, but all of them point to
545 * the same kmem cache that holds the hugepte.
546 */
547 next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
548 #endif
549 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
550 addr, next, floor, ceiling);
551 } while (addr = next, addr != end);
552
553 start &= PUD_MASK;
554 if (start < floor)
555 return;
556 if (ceiling) {
557 ceiling &= PUD_MASK;
558 if (!ceiling)
559 return;
560 }
561 if (end - 1 > ceiling - 1)
562 return;
563
564 pmd = pmd_offset(pud, start);
565 pud_clear(pud);
566 pmd_free_tlb(tlb, pmd, start);
567 }
568
569 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
570 unsigned long addr, unsigned long end,
571 unsigned long floor, unsigned long ceiling)
572 {
573 pud_t *pud;
574 unsigned long next;
575 unsigned long start;
576
577 start = addr;
578 do {
579 pud = pud_offset(pgd, addr);
580 next = pud_addr_end(addr, end);
581 if (!is_hugepd(pud)) {
582 if (pud_none_or_clear_bad(pud))
583 continue;
584 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
585 ceiling);
586 } else {
587 #ifdef CONFIG_PPC_FSL_BOOK3E
588 /*
589 * Increment next by the size of the huge mapping since
590 * there may be more than one entry at this level for a
591 * single hugepage, but all of them point to
592 * the same kmem cache that holds the hugepte.
593 */
594 next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
595 #endif
596 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
597 addr, next, floor, ceiling);
598 }
599 } while (addr = next, addr != end);
600
601 start &= PGDIR_MASK;
602 if (start < floor)
603 return;
604 if (ceiling) {
605 ceiling &= PGDIR_MASK;
606 if (!ceiling)
607 return;
608 }
609 if (end - 1 > ceiling - 1)
610 return;
611
612 pud = pud_offset(pgd, start);
613 pgd_clear(pgd);
614 pud_free_tlb(tlb, pud, start);
615 }
616
617 /*
618 * This function frees user-level page tables of a process.
619 *
620 * Must be called with pagetable lock held.
621 */
622 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
623 unsigned long addr, unsigned long end,
624 unsigned long floor, unsigned long ceiling)
625 {
626 pgd_t *pgd;
627 unsigned long next;
628
629 /*
630 * Because there are a number of different possible pagetable
631 * layouts for hugepage ranges, we limit knowledge of how
632 * things should be laid out to the allocation path
633 * (huge_pte_alloc(), above). Everything else works out the
634 * structure as it goes from information in the hugepd
635 * pointers. That means that we can't here use the
636 * optimization used in the normal page free_pgd_range(), of
637 * checking whether we're actually covering a large enough
638 * range to have to do anything at the top level of the walk
639 * instead of at the bottom.
640 *
641 * To make sense of this, you should probably go read the big
642 * block comment at the top of the normal free_pgd_range(),
643 * too.
644 */
645
646 do {
647 next = pgd_addr_end(addr, end);
648 pgd = pgd_offset(tlb->mm, addr);
649 if (!is_hugepd(pgd)) {
650 if (pgd_none_or_clear_bad(pgd))
651 continue;
652 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
653 } else {
654 #ifdef CONFIG_PPC_FSL_BOOK3E
655 /*
656 * Increment next by the size of the huge mapping since
657 * there may be more than one entry at the pgd level
658 * for a single hugepage, but all of them point to the
659 * same kmem cache that holds the hugepte.
660 */
661 next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
662 #endif
663 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
664 addr, next, floor, ceiling);
665 }
666 } while (addr = next, addr != end);
667 }
668
669 struct page *
670 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
671 {
672 pte_t *ptep;
673 struct page *page;
674 unsigned shift;
675 unsigned long mask;
676
677 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
678
679 /* Verify it is a huge page else bail. */
680 if (!ptep || !shift)
681 return ERR_PTR(-EINVAL);
682
683 mask = (1UL << shift) - 1;
684 page = pte_page(*ptep);
685 if (page)
686 page += (address & mask) / PAGE_SIZE;
687
688 return page;
689 }
690
691 struct page *
692 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
693 pmd_t *pmd, int write)
694 {
695 BUG();
696 return NULL;
697 }
698
699 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
700 unsigned long sz)
701 {
702 unsigned long __boundary = (addr + sz) & ~(sz-1);
703 return (__boundary - 1 < end - 1) ? __boundary : end;
704 }
705
706 int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
707 unsigned long addr, unsigned long end,
708 int write, struct page **pages, int *nr)
709 {
710 pte_t *ptep;
711 unsigned long sz = 1UL << hugepd_shift(*hugepd);
712 unsigned long next;
713
714 ptep = hugepte_offset(hugepd, addr, pdshift);
715 do {
716 next = hugepte_addr_end(addr, end, sz);
717 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
718 return 0;
719 } while (ptep++, addr = next, addr != end);
720
721 return 1;
722 }
723
724 #ifdef CONFIG_PPC_MM_SLICES
725 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
726 unsigned long len, unsigned long pgoff,
727 unsigned long flags)
728 {
729 struct hstate *hstate = hstate_file(file);
730 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
731
732 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
733 }
734 #endif
735
736 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
737 {
738 #ifdef CONFIG_PPC_MM_SLICES
739 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
740
741 return 1UL << mmu_psize_to_shift(psize);
742 #else
743 if (!is_vm_hugetlb_page(vma))
744 return PAGE_SIZE;
745
746 return huge_page_size(hstate_vma(vma));
747 #endif
748 }
749
750 static inline bool is_power_of_4(unsigned long x)
751 {
752 if (is_power_of_2(x))
753 return (__ilog2(x) % 2) ? false : true;
754 return false;
755 }
756
757 static int __init add_huge_page_size(unsigned long long size)
758 {
759 int shift = __ffs(size);
760 int mmu_psize;
761
762 /* Check that it is a page size supported by the hardware and
763 * that it fits within pagetable and slice limits. */
764 #ifdef CONFIG_PPC_FSL_BOOK3E
765 if ((size < PAGE_SIZE) || !is_power_of_4(size))
766 return -EINVAL;
767 #else
768 if (!is_power_of_2(size)
769 || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
770 return -EINVAL;
771 #endif
772
773 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
774 return -EINVAL;
775
776 #ifdef CONFIG_SPU_FS_64K_LS
777 /* Disable support for 64K huge pages when 64K SPU local store
778 * support is enabled as the current implementation conflicts.
779 */
780 if (shift == PAGE_SHIFT_64K)
781 return -EINVAL;
782 #endif /* CONFIG_SPU_FS_64K_LS */
783
784 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
785
786 /* Return if huge page size has already been setup */
787 if (size_to_hstate(size))
788 return 0;
789
790 hugetlb_add_hstate(shift - PAGE_SHIFT);
791
792 return 0;
793 }
794
795 static int __init hugepage_setup_sz(char *str)
796 {
797 unsigned long long size;
798
799 size = memparse(str, &str);
800
801 if (add_huge_page_size(size) != 0)
802 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
803
804 return 1;
805 }
806 __setup("hugepagesz=", hugepage_setup_sz);
807
808 #ifdef CONFIG_PPC_FSL_BOOK3E
809 struct kmem_cache *hugepte_cache;
810 static int __init hugetlbpage_init(void)
811 {
812 int psize;
813
814 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
815 unsigned shift;
816
817 if (!mmu_psize_defs[psize].shift)
818 continue;
819
820 shift = mmu_psize_to_shift(psize);
821
822 /* Don't treat normal page sizes as huge... */
823 if (shift != PAGE_SHIFT)
824 if (add_huge_page_size(1ULL << shift) < 0)
825 continue;
826 }
827
828 /*
829 * Create a kmem cache for hugeptes. The bottom bits in the pte have
830 * size information encoded in them, so align them to allow this
831 */
832 hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
833 HUGEPD_SHIFT_MASK + 1, 0, NULL);
834 if (hugepte_cache == NULL)
835 panic("%s: Unable to create kmem cache for hugeptes\n",
836 __func__);
837
838 /* Default hpage size = 4M */
839 if (mmu_psize_defs[MMU_PAGE_4M].shift)
840 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
841 else
842 panic("%s: Unable to set default huge page size\n", __func__);
843
844
845 return 0;
846 }
847 #else
848 static int __init hugetlbpage_init(void)
849 {
850 int psize;
851
852 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
853 return -ENODEV;
854
855 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
856 unsigned shift;
857 unsigned pdshift;
858
859 if (!mmu_psize_defs[psize].shift)
860 continue;
861
862 shift = mmu_psize_to_shift(psize);
863
864 if (add_huge_page_size(1ULL << shift) < 0)
865 continue;
866
867 if (shift < PMD_SHIFT)
868 pdshift = PMD_SHIFT;
869 else if (shift < PUD_SHIFT)
870 pdshift = PUD_SHIFT;
871 else
872 pdshift = PGDIR_SHIFT;
873 /*
874 * if we have pdshift and shift value same, we don't
875 * use pgt cache for hugepd.
876 */
877 if (pdshift != shift) {
878 pgtable_cache_add(pdshift - shift, NULL);
879 if (!PGT_CACHE(pdshift - shift))
880 panic("hugetlbpage_init(): could not create "
881 "pgtable cache for %d bit pagesize\n", shift);
882 }
883 }
884
885 /* Set default large page size. Currently, we pick 16M or 1M
886 * depending on what is available
887 */
888 if (mmu_psize_defs[MMU_PAGE_16M].shift)
889 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
890 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
891 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
892
893 return 0;
894 }
895 #endif
896 module_init(hugetlbpage_init);
897
898 void flush_dcache_icache_hugepage(struct page *page)
899 {
900 int i;
901 void *start;
902
903 BUG_ON(!PageCompound(page));
904
905 for (i = 0; i < (1UL << compound_order(page)); i++) {
906 if (!PageHighMem(page)) {
907 __flush_dcache_icache(page_address(page+i));
908 } else {
909 start = kmap_atomic(page+i);
910 __flush_dcache_icache(start);
911 kunmap_atomic(start);
912 }
913 }
914 }
915
916 #endif /* CONFIG_HUGETLB_PAGE */
917
918 /*
919 * We have 4 cases for pgds and pmds:
920 * (1) invalid (all zeroes)
921 * (2) pointer to next table, as normal; bottom 6 bits == 0
922 * (3) leaf pte for huge page, bottom two bits != 00
923 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
924 */
925 pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
926 {
927 pgd_t *pg;
928 pud_t *pu;
929 pmd_t *pm;
930 pte_t *ret_pte;
931 hugepd_t *hpdp = NULL;
932 unsigned pdshift = PGDIR_SHIFT;
933
934 if (shift)
935 *shift = 0;
936
937 pg = pgdir + pgd_index(ea);
938
939 if (pgd_huge(*pg)) {
940 ret_pte = (pte_t *) pg;
941 goto out;
942 } else if (is_hugepd(pg))
943 hpdp = (hugepd_t *)pg;
944 else if (!pgd_none(*pg)) {
945 pdshift = PUD_SHIFT;
946 pu = pud_offset(pg, ea);
947
948 if (pud_huge(*pu)) {
949 ret_pte = (pte_t *) pu;
950 goto out;
951 } else if (is_hugepd(pu))
952 hpdp = (hugepd_t *)pu;
953 else if (!pud_none(*pu)) {
954 pdshift = PMD_SHIFT;
955 pm = pmd_offset(pu, ea);
956
957 if (pmd_huge(*pm)) {
958 ret_pte = (pte_t *) pm;
959 goto out;
960 } else if (is_hugepd(pm))
961 hpdp = (hugepd_t *)pm;
962 else if (!pmd_none(*pm))
963 return pte_offset_kernel(pm, ea);
964 }
965 }
966 if (!hpdp)
967 return NULL;
968
969 ret_pte = hugepte_offset(hpdp, ea, pdshift);
970 pdshift = hugepd_shift(*hpdp);
971 out:
972 if (shift)
973 *shift = pdshift;
974 return ret_pte;
975 }
976 EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
977
978 int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
979 unsigned long end, int write, struct page **pages, int *nr)
980 {
981 unsigned long mask;
982 unsigned long pte_end;
983 struct page *head, *page, *tail;
984 pte_t pte;
985 int refs;
986
987 pte_end = (addr + sz) & ~(sz-1);
988 if (pte_end < end)
989 end = pte_end;
990
991 pte = *ptep;
992 mask = _PAGE_PRESENT | _PAGE_USER;
993 if (write)
994 mask |= _PAGE_RW;
995
996 if ((pte_val(pte) & mask) != mask)
997 return 0;
998
999 /* hugepages are never "special" */
1000 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1001
1002 refs = 0;
1003 head = pte_page(pte);
1004
1005 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
1006 tail = page;
1007 do {
1008 VM_BUG_ON(compound_head(page) != head);
1009 pages[*nr] = page;
1010 (*nr)++;
1011 page++;
1012 refs++;
1013 } while (addr += PAGE_SIZE, addr != end);
1014
1015 if (!page_cache_add_speculative(head, refs)) {
1016 *nr -= refs;
1017 return 0;
1018 }
1019
1020 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1021 /* Could be optimized better */
1022 *nr -= refs;
1023 while (refs--)
1024 put_page(head);
1025 return 0;
1026 }
1027
1028 /*
1029 * Any tail page need their mapcount reference taken before we
1030 * return.
1031 */
1032 while (refs--) {
1033 if (PageTail(tail))
1034 get_huge_page_tail(tail);
1035 tail++;
1036 }
1037
1038 return 1;
1039 }
This page took 0.052203 seconds and 4 git commands to generate.