2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/moduleparam.h>
20 #include <asm/pgtable.h>
21 #include <asm/pgalloc.h>
23 #include <asm/setup.h>
25 #define PAGE_SHIFT_64K 16
26 #define PAGE_SHIFT_16M 24
27 #define PAGE_SHIFT_16G 34
29 unsigned int HPAGE_SHIFT
;
32 * Tracks gpages after the device tree is scanned and before the
33 * huge_boot_pages list is ready. On non-Freescale implementations, this is
34 * just used to track 16G pages and so is a single array. FSL-based
35 * implementations may have more than one gpage size, so we need multiple
38 #ifdef CONFIG_PPC_FSL_BOOK3E
39 #define MAX_NUMBER_GPAGES 128
41 u64 gpage_list
[MAX_NUMBER_GPAGES
];
42 unsigned int nr_gpages
;
44 static struct psize_gpages gpage_freearray
[MMU_PAGE_COUNT
];
46 #define MAX_NUMBER_GPAGES 1024
47 static u64 gpage_freearray
[MAX_NUMBER_GPAGES
];
48 static unsigned nr_gpages
;
51 #define hugepd_none(hpd) ((hpd).pd == 0)
53 #ifdef CONFIG_PPC_BOOK3S_64
55 * At this point we do the placement change only for BOOK3S 64. This would
56 * possibly work on other subarchs.
60 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
61 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
63 int pmd_huge(pmd_t pmd
)
66 * leaf pte for huge page, bottom two bits != 00
68 return ((pmd_val(pmd
) & 0x3) != 0x0);
71 int pud_huge(pud_t pud
)
74 * leaf pte for huge page, bottom two bits != 00
76 return ((pud_val(pud
) & 0x3) != 0x0);
79 int pgd_huge(pgd_t pgd
)
82 * leaf pte for huge page, bottom two bits != 00
84 return ((pgd_val(pgd
) & 0x3) != 0x0);
87 int pmd_huge(pmd_t pmd
)
92 int pud_huge(pud_t pud
)
97 int pgd_huge(pgd_t pgd
)
104 * We have 4 cases for pgds and pmds:
105 * (1) invalid (all zeroes)
106 * (2) pointer to next table, as normal; bottom 6 bits == 0
107 * (3) leaf pte for huge page, bottom two bits != 00
108 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
110 pte_t
*find_linux_pte_or_hugepte(pgd_t
*pgdir
, unsigned long ea
, unsigned *shift
)
116 hugepd_t
*hpdp
= NULL
;
117 unsigned pdshift
= PGDIR_SHIFT
;
122 pg
= pgdir
+ pgd_index(ea
);
125 ret_pte
= (pte_t
*) pg
;
127 } else if (is_hugepd(pg
))
128 hpdp
= (hugepd_t
*)pg
;
129 else if (!pgd_none(*pg
)) {
131 pu
= pud_offset(pg
, ea
);
134 ret_pte
= (pte_t
*) pu
;
136 } else if (is_hugepd(pu
))
137 hpdp
= (hugepd_t
*)pu
;
138 else if (!pud_none(*pu
)) {
140 pm
= pmd_offset(pu
, ea
);
143 ret_pte
= (pte_t
*) pm
;
145 } else if (is_hugepd(pm
))
146 hpdp
= (hugepd_t
*)pm
;
147 else if (!pmd_none(*pm
))
148 return pte_offset_kernel(pm
, ea
);
154 ret_pte
= hugepte_offset(hpdp
, ea
, pdshift
);
155 pdshift
= hugepd_shift(*hpdp
);
161 EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte
);
163 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
165 return find_linux_pte_or_hugepte(mm
->pgd
, addr
, NULL
);
168 static int __hugepte_alloc(struct mm_struct
*mm
, hugepd_t
*hpdp
,
169 unsigned long address
, unsigned pdshift
, unsigned pshift
)
171 struct kmem_cache
*cachep
;
174 #ifdef CONFIG_PPC_FSL_BOOK3E
176 int num_hugepd
= 1 << (pshift
- pdshift
);
177 cachep
= hugepte_cache
;
179 cachep
= PGT_CACHE(pdshift
- pshift
);
182 new = kmem_cache_zalloc(cachep
, GFP_KERNEL
|__GFP_REPEAT
);
184 BUG_ON(pshift
> HUGEPD_SHIFT_MASK
);
185 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK
);
190 spin_lock(&mm
->page_table_lock
);
191 #ifdef CONFIG_PPC_FSL_BOOK3E
193 * We have multiple higher-level entries that point to the same
194 * actual pte location. Fill in each as we go and backtrack on error.
195 * We need all of these so the DTLB pgtable walk code can find the
196 * right higher-level entry without knowing if it's a hugepage or not.
198 for (i
= 0; i
< num_hugepd
; i
++, hpdp
++) {
199 if (unlikely(!hugepd_none(*hpdp
)))
202 /* We use the old format for PPC_FSL_BOOK3E */
203 hpdp
->pd
= ((unsigned long)new & ~PD_HUGE
) | pshift
;
205 /* If we bailed from the for loop early, an error occurred, clean up */
206 if (i
< num_hugepd
) {
207 for (i
= i
- 1 ; i
>= 0; i
--, hpdp
--)
209 kmem_cache_free(cachep
, new);
212 if (!hugepd_none(*hpdp
))
213 kmem_cache_free(cachep
, new);
215 #ifdef CONFIG_PPC_BOOK3S_64
216 hpdp
->pd
= (unsigned long)new |
217 (shift_to_mmu_psize(pshift
) << 2);
219 hpdp
->pd
= ((unsigned long)new & ~PD_HUGE
) | pshift
;
223 spin_unlock(&mm
->page_table_lock
);
228 * These macros define how to determine which level of the page table holds
231 #ifdef CONFIG_PPC_FSL_BOOK3E
232 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
233 #define HUGEPD_PUD_SHIFT PUD_SHIFT
235 #define HUGEPD_PGD_SHIFT PUD_SHIFT
236 #define HUGEPD_PUD_SHIFT PMD_SHIFT
239 #ifdef CONFIG_PPC_BOOK3S_64
241 * At this point we do the placement change only for BOOK3S 64. This would
242 * possibly work on other subarchs.
244 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
, unsigned long sz
)
249 hugepd_t
*hpdp
= NULL
;
250 unsigned pshift
= __ffs(sz
);
251 unsigned pdshift
= PGDIR_SHIFT
;
254 pg
= pgd_offset(mm
, addr
);
256 if (pshift
== PGDIR_SHIFT
)
259 else if (pshift
> PUD_SHIFT
)
261 * We need to use hugepd table
263 hpdp
= (hugepd_t
*)pg
;
266 pu
= pud_alloc(mm
, pg
, addr
);
267 if (pshift
== PUD_SHIFT
)
269 else if (pshift
> PMD_SHIFT
)
270 hpdp
= (hugepd_t
*)pu
;
273 pm
= pmd_alloc(mm
, pu
, addr
);
274 if (pshift
== PMD_SHIFT
)
278 hpdp
= (hugepd_t
*)pm
;
284 BUG_ON(!hugepd_none(*hpdp
) && !hugepd_ok(*hpdp
));
286 if (hugepd_none(*hpdp
) && __hugepte_alloc(mm
, hpdp
, addr
, pdshift
, pshift
))
289 return hugepte_offset(hpdp
, addr
, pdshift
);
294 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
, unsigned long sz
)
299 hugepd_t
*hpdp
= NULL
;
300 unsigned pshift
= __ffs(sz
);
301 unsigned pdshift
= PGDIR_SHIFT
;
305 pg
= pgd_offset(mm
, addr
);
307 if (pshift
>= HUGEPD_PGD_SHIFT
) {
308 hpdp
= (hugepd_t
*)pg
;
311 pu
= pud_alloc(mm
, pg
, addr
);
312 if (pshift
>= HUGEPD_PUD_SHIFT
) {
313 hpdp
= (hugepd_t
*)pu
;
316 pm
= pmd_alloc(mm
, pu
, addr
);
317 hpdp
= (hugepd_t
*)pm
;
324 BUG_ON(!hugepd_none(*hpdp
) && !hugepd_ok(*hpdp
));
326 if (hugepd_none(*hpdp
) && __hugepte_alloc(mm
, hpdp
, addr
, pdshift
, pshift
))
329 return hugepte_offset(hpdp
, addr
, pdshift
);
333 #ifdef CONFIG_PPC_FSL_BOOK3E
334 /* Build list of addresses of gigantic pages. This function is used in early
335 * boot before the buddy or bootmem allocator is setup.
337 void add_gpage(u64 addr
, u64 page_size
, unsigned long number_of_pages
)
339 unsigned int idx
= shift_to_mmu_psize(__ffs(page_size
));
345 gpage_freearray
[idx
].nr_gpages
= number_of_pages
;
347 for (i
= 0; i
< number_of_pages
; i
++) {
348 gpage_freearray
[idx
].gpage_list
[i
] = addr
;
354 * Moves the gigantic page addresses from the temporary list to the
355 * huge_boot_pages list.
357 int alloc_bootmem_huge_page(struct hstate
*hstate
)
359 struct huge_bootmem_page
*m
;
360 int idx
= shift_to_mmu_psize(hstate
->order
+ PAGE_SHIFT
);
361 int nr_gpages
= gpage_freearray
[idx
].nr_gpages
;
366 #ifdef CONFIG_HIGHMEM
368 * If gpages can be in highmem we can't use the trick of storing the
369 * data structure in the page; allocate space for this
371 m
= alloc_bootmem(sizeof(struct huge_bootmem_page
));
372 m
->phys
= gpage_freearray
[idx
].gpage_list
[--nr_gpages
];
374 m
= phys_to_virt(gpage_freearray
[idx
].gpage_list
[--nr_gpages
]);
377 list_add(&m
->list
, &huge_boot_pages
);
378 gpage_freearray
[idx
].nr_gpages
= nr_gpages
;
379 gpage_freearray
[idx
].gpage_list
[nr_gpages
] = 0;
385 * Scan the command line hugepagesz= options for gigantic pages; store those in
386 * a list that we use to allocate the memory once all options are parsed.
389 unsigned long gpage_npages
[MMU_PAGE_COUNT
];
391 static int __init
do_gpage_early_setup(char *param
, char *val
,
394 static phys_addr_t size
;
395 unsigned long npages
;
398 * The hugepagesz and hugepages cmdline options are interleaved. We
399 * use the size variable to keep track of whether or not this was done
400 * properly and skip over instances where it is incorrect. Other
401 * command-line parsing code will issue warnings, so we don't need to.
404 if ((strcmp(param
, "default_hugepagesz") == 0) ||
405 (strcmp(param
, "hugepagesz") == 0)) {
406 size
= memparse(val
, NULL
);
407 } else if (strcmp(param
, "hugepages") == 0) {
409 if (sscanf(val
, "%lu", &npages
) <= 0)
411 gpage_npages
[shift_to_mmu_psize(__ffs(size
))] = npages
;
420 * This function allocates physical space for pages that are larger than the
421 * buddy allocator can handle. We want to allocate these in highmem because
422 * the amount of lowmem is limited. This means that this function MUST be
423 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
424 * allocate to grab highmem.
426 void __init
reserve_hugetlb_gpages(void)
428 static __initdata
char cmdline
[COMMAND_LINE_SIZE
];
429 phys_addr_t size
, base
;
432 strlcpy(cmdline
, boot_command_line
, COMMAND_LINE_SIZE
);
433 parse_args("hugetlb gpages", cmdline
, NULL
, 0, 0, 0,
434 &do_gpage_early_setup
);
437 * Walk gpage list in reverse, allocating larger page sizes first.
438 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
439 * When we reach the point in the list where pages are no longer
440 * considered gpages, we're done.
442 for (i
= MMU_PAGE_COUNT
-1; i
>= 0; i
--) {
443 if (mmu_psize_defs
[i
].shift
== 0 || gpage_npages
[i
] == 0)
445 else if (mmu_psize_to_shift(i
) < (MAX_ORDER
+ PAGE_SHIFT
))
448 size
= (phys_addr_t
)(1ULL << mmu_psize_to_shift(i
));
449 base
= memblock_alloc_base(size
* gpage_npages
[i
], size
,
450 MEMBLOCK_ALLOC_ANYWHERE
);
451 add_gpage(base
, size
, gpage_npages
[i
]);
455 #else /* !PPC_FSL_BOOK3E */
457 /* Build list of addresses of gigantic pages. This function is used in early
458 * boot before the buddy or bootmem allocator is setup.
460 void add_gpage(u64 addr
, u64 page_size
, unsigned long number_of_pages
)
464 while (number_of_pages
> 0) {
465 gpage_freearray
[nr_gpages
] = addr
;
472 /* Moves the gigantic page addresses from the temporary list to the
473 * huge_boot_pages list.
475 int alloc_bootmem_huge_page(struct hstate
*hstate
)
477 struct huge_bootmem_page
*m
;
480 m
= phys_to_virt(gpage_freearray
[--nr_gpages
]);
481 gpage_freearray
[nr_gpages
] = 0;
482 list_add(&m
->list
, &huge_boot_pages
);
488 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
493 #ifdef CONFIG_PPC_FSL_BOOK3E
494 #define HUGEPD_FREELIST_SIZE \
495 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
497 struct hugepd_freelist
{
503 static DEFINE_PER_CPU(struct hugepd_freelist
*, hugepd_freelist_cur
);
505 static void hugepd_free_rcu_callback(struct rcu_head
*head
)
507 struct hugepd_freelist
*batch
=
508 container_of(head
, struct hugepd_freelist
, rcu
);
511 for (i
= 0; i
< batch
->index
; i
++)
512 kmem_cache_free(hugepte_cache
, batch
->ptes
[i
]);
514 free_page((unsigned long)batch
);
517 static void hugepd_free(struct mmu_gather
*tlb
, void *hugepte
)
519 struct hugepd_freelist
**batchp
;
521 batchp
= &__get_cpu_var(hugepd_freelist_cur
);
523 if (atomic_read(&tlb
->mm
->mm_users
) < 2 ||
524 cpumask_equal(mm_cpumask(tlb
->mm
),
525 cpumask_of(smp_processor_id()))) {
526 kmem_cache_free(hugepte_cache
, hugepte
);
530 if (*batchp
== NULL
) {
531 *batchp
= (struct hugepd_freelist
*)__get_free_page(GFP_ATOMIC
);
532 (*batchp
)->index
= 0;
535 (*batchp
)->ptes
[(*batchp
)->index
++] = hugepte
;
536 if ((*batchp
)->index
== HUGEPD_FREELIST_SIZE
) {
537 call_rcu_sched(&(*batchp
)->rcu
, hugepd_free_rcu_callback
);
543 static void free_hugepd_range(struct mmu_gather
*tlb
, hugepd_t
*hpdp
, int pdshift
,
544 unsigned long start
, unsigned long end
,
545 unsigned long floor
, unsigned long ceiling
)
547 pte_t
*hugepte
= hugepd_page(*hpdp
);
550 unsigned long pdmask
= ~((1UL << pdshift
) - 1);
551 unsigned int num_hugepd
= 1;
553 #ifdef CONFIG_PPC_FSL_BOOK3E
554 /* Note: On fsl the hpdp may be the first of several */
555 num_hugepd
= (1 << (hugepd_shift(*hpdp
) - pdshift
));
557 unsigned int shift
= hugepd_shift(*hpdp
);
568 if (end
- 1 > ceiling
- 1)
571 for (i
= 0; i
< num_hugepd
; i
++, hpdp
++)
576 #ifdef CONFIG_PPC_FSL_BOOK3E
577 hugepd_free(tlb
, hugepte
);
579 pgtable_free_tlb(tlb
, hugepte
, pdshift
- shift
);
583 static void hugetlb_free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
584 unsigned long addr
, unsigned long end
,
585 unsigned long floor
, unsigned long ceiling
)
593 pmd
= pmd_offset(pud
, addr
);
594 next
= pmd_addr_end(addr
, end
);
595 if (!is_hugepd(pmd
)) {
597 * if it is not hugepd pointer, we should already find
600 WARN_ON(!pmd_none_or_clear_bad(pmd
));
603 #ifdef CONFIG_PPC_FSL_BOOK3E
605 * Increment next by the size of the huge mapping since
606 * there may be more than one entry at this level for a
607 * single hugepage, but all of them point to
608 * the same kmem cache that holds the hugepte.
610 next
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pmd
));
612 free_hugepd_range(tlb
, (hugepd_t
*)pmd
, PMD_SHIFT
,
613 addr
, next
, floor
, ceiling
);
614 } while (addr
= next
, addr
!= end
);
624 if (end
- 1 > ceiling
- 1)
627 pmd
= pmd_offset(pud
, start
);
629 pmd_free_tlb(tlb
, pmd
, start
);
632 static void hugetlb_free_pud_range(struct mmu_gather
*tlb
, pgd_t
*pgd
,
633 unsigned long addr
, unsigned long end
,
634 unsigned long floor
, unsigned long ceiling
)
642 pud
= pud_offset(pgd
, addr
);
643 next
= pud_addr_end(addr
, end
);
644 if (!is_hugepd(pud
)) {
645 if (pud_none_or_clear_bad(pud
))
647 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
,
650 #ifdef CONFIG_PPC_FSL_BOOK3E
652 * Increment next by the size of the huge mapping since
653 * there may be more than one entry at this level for a
654 * single hugepage, but all of them point to
655 * the same kmem cache that holds the hugepte.
657 next
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pud
));
659 free_hugepd_range(tlb
, (hugepd_t
*)pud
, PUD_SHIFT
,
660 addr
, next
, floor
, ceiling
);
662 } while (addr
= next
, addr
!= end
);
668 ceiling
&= PGDIR_MASK
;
672 if (end
- 1 > ceiling
- 1)
675 pud
= pud_offset(pgd
, start
);
677 pud_free_tlb(tlb
, pud
, start
);
681 * This function frees user-level page tables of a process.
683 * Must be called with pagetable lock held.
685 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
686 unsigned long addr
, unsigned long end
,
687 unsigned long floor
, unsigned long ceiling
)
693 * Because there are a number of different possible pagetable
694 * layouts for hugepage ranges, we limit knowledge of how
695 * things should be laid out to the allocation path
696 * (huge_pte_alloc(), above). Everything else works out the
697 * structure as it goes from information in the hugepd
698 * pointers. That means that we can't here use the
699 * optimization used in the normal page free_pgd_range(), of
700 * checking whether we're actually covering a large enough
701 * range to have to do anything at the top level of the walk
702 * instead of at the bottom.
704 * To make sense of this, you should probably go read the big
705 * block comment at the top of the normal free_pgd_range(),
710 next
= pgd_addr_end(addr
, end
);
711 pgd
= pgd_offset(tlb
->mm
, addr
);
712 if (!is_hugepd(pgd
)) {
713 if (pgd_none_or_clear_bad(pgd
))
715 hugetlb_free_pud_range(tlb
, pgd
, addr
, next
, floor
, ceiling
);
717 #ifdef CONFIG_PPC_FSL_BOOK3E
719 * Increment next by the size of the huge mapping since
720 * there may be more than one entry at the pgd level
721 * for a single hugepage, but all of them point to the
722 * same kmem cache that holds the hugepte.
724 next
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pgd
));
726 free_hugepd_range(tlb
, (hugepd_t
*)pgd
, PGDIR_SHIFT
,
727 addr
, next
, floor
, ceiling
);
729 } while (addr
= next
, addr
!= end
);
733 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
740 ptep
= find_linux_pte_or_hugepte(mm
->pgd
, address
, &shift
);
742 /* Verify it is a huge page else bail. */
744 return ERR_PTR(-EINVAL
);
746 mask
= (1UL << shift
) - 1;
747 page
= pte_page(*ptep
);
749 page
+= (address
& mask
) / PAGE_SIZE
;
755 follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
756 pmd_t
*pmd
, int write
)
762 int gup_hugepte(pte_t
*ptep
, unsigned long sz
, unsigned long addr
,
763 unsigned long end
, int write
, struct page
**pages
, int *nr
)
766 unsigned long pte_end
;
767 struct page
*head
, *page
, *tail
;
771 pte_end
= (addr
+ sz
) & ~(sz
-1);
776 mask
= _PAGE_PRESENT
| _PAGE_USER
;
780 if ((pte_val(pte
) & mask
) != mask
)
783 /* hugepages are never "special" */
784 VM_BUG_ON(!pfn_valid(pte_pfn(pte
)));
787 head
= pte_page(pte
);
789 page
= head
+ ((addr
& (sz
-1)) >> PAGE_SHIFT
);
792 VM_BUG_ON(compound_head(page
) != head
);
797 } while (addr
+= PAGE_SIZE
, addr
!= end
);
799 if (!page_cache_add_speculative(head
, refs
)) {
804 if (unlikely(pte_val(pte
) != pte_val(*ptep
))) {
805 /* Could be optimized better */
813 * Any tail page need their mapcount reference taken before we
818 get_huge_page_tail(tail
);
825 static unsigned long hugepte_addr_end(unsigned long addr
, unsigned long end
,
828 unsigned long __boundary
= (addr
+ sz
) & ~(sz
-1);
829 return (__boundary
- 1 < end
- 1) ? __boundary
: end
;
832 int gup_hugepd(hugepd_t
*hugepd
, unsigned pdshift
,
833 unsigned long addr
, unsigned long end
,
834 int write
, struct page
**pages
, int *nr
)
837 unsigned long sz
= 1UL << hugepd_shift(*hugepd
);
840 ptep
= hugepte_offset(hugepd
, addr
, pdshift
);
842 next
= hugepte_addr_end(addr
, end
, sz
);
843 if (!gup_hugepte(ptep
, sz
, addr
, end
, write
, pages
, nr
))
845 } while (ptep
++, addr
= next
, addr
!= end
);
850 #ifdef CONFIG_PPC_MM_SLICES
851 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
852 unsigned long len
, unsigned long pgoff
,
855 struct hstate
*hstate
= hstate_file(file
);
856 int mmu_psize
= shift_to_mmu_psize(huge_page_shift(hstate
));
858 return slice_get_unmapped_area(addr
, len
, flags
, mmu_psize
, 1);
862 unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
)
864 #ifdef CONFIG_PPC_MM_SLICES
865 unsigned int psize
= get_slice_psize(vma
->vm_mm
, vma
->vm_start
);
867 return 1UL << mmu_psize_to_shift(psize
);
869 if (!is_vm_hugetlb_page(vma
))
872 return huge_page_size(hstate_vma(vma
));
876 static inline bool is_power_of_4(unsigned long x
)
878 if (is_power_of_2(x
))
879 return (__ilog2(x
) % 2) ? false : true;
883 static int __init
add_huge_page_size(unsigned long long size
)
885 int shift
= __ffs(size
);
888 /* Check that it is a page size supported by the hardware and
889 * that it fits within pagetable and slice limits. */
890 #ifdef CONFIG_PPC_FSL_BOOK3E
891 if ((size
< PAGE_SIZE
) || !is_power_of_4(size
))
894 if (!is_power_of_2(size
)
895 || (shift
> SLICE_HIGH_SHIFT
) || (shift
<= PAGE_SHIFT
))
899 if ((mmu_psize
= shift_to_mmu_psize(shift
)) < 0)
902 #ifdef CONFIG_SPU_FS_64K_LS
903 /* Disable support for 64K huge pages when 64K SPU local store
904 * support is enabled as the current implementation conflicts.
906 if (shift
== PAGE_SHIFT_64K
)
908 #endif /* CONFIG_SPU_FS_64K_LS */
910 BUG_ON(mmu_psize_defs
[mmu_psize
].shift
!= shift
);
912 /* Return if huge page size has already been setup */
913 if (size_to_hstate(size
))
916 hugetlb_add_hstate(shift
- PAGE_SHIFT
);
921 static int __init
hugepage_setup_sz(char *str
)
923 unsigned long long size
;
925 size
= memparse(str
, &str
);
927 if (add_huge_page_size(size
) != 0)
928 printk(KERN_WARNING
"Invalid huge page size specified(%llu)\n", size
);
932 __setup("hugepagesz=", hugepage_setup_sz
);
934 #ifdef CONFIG_PPC_FSL_BOOK3E
935 struct kmem_cache
*hugepte_cache
;
936 static int __init
hugetlbpage_init(void)
940 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
943 if (!mmu_psize_defs
[psize
].shift
)
946 shift
= mmu_psize_to_shift(psize
);
948 /* Don't treat normal page sizes as huge... */
949 if (shift
!= PAGE_SHIFT
)
950 if (add_huge_page_size(1ULL << shift
) < 0)
955 * Create a kmem cache for hugeptes. The bottom bits in the pte have
956 * size information encoded in them, so align them to allow this
958 hugepte_cache
= kmem_cache_create("hugepte-cache", sizeof(pte_t
),
959 HUGEPD_SHIFT_MASK
+ 1, 0, NULL
);
960 if (hugepte_cache
== NULL
)
961 panic("%s: Unable to create kmem cache for hugeptes\n",
964 /* Default hpage size = 4M */
965 if (mmu_psize_defs
[MMU_PAGE_4M
].shift
)
966 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_4M
].shift
;
968 panic("%s: Unable to set default huge page size\n", __func__
);
974 static int __init
hugetlbpage_init(void)
978 if (!mmu_has_feature(MMU_FTR_16M_PAGE
))
981 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
985 if (!mmu_psize_defs
[psize
].shift
)
988 shift
= mmu_psize_to_shift(psize
);
990 if (add_huge_page_size(1ULL << shift
) < 0)
993 if (shift
< PMD_SHIFT
)
995 else if (shift
< PUD_SHIFT
)
998 pdshift
= PGDIR_SHIFT
;
1000 * if we have pdshift and shift value same, we don't
1001 * use pgt cache for hugepd.
1003 if (pdshift
!= shift
) {
1004 pgtable_cache_add(pdshift
- shift
, NULL
);
1005 if (!PGT_CACHE(pdshift
- shift
))
1006 panic("hugetlbpage_init(): could not create "
1007 "pgtable cache for %d bit pagesize\n", shift
);
1011 /* Set default large page size. Currently, we pick 16M or 1M
1012 * depending on what is available
1014 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
1015 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_16M
].shift
;
1016 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
1017 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_1M
].shift
;
1022 module_init(hugetlbpage_init
);
1024 void flush_dcache_icache_hugepage(struct page
*page
)
1029 BUG_ON(!PageCompound(page
));
1031 for (i
= 0; i
< (1UL << compound_order(page
)); i
++) {
1032 if (!PageHighMem(page
)) {
1033 __flush_dcache_icache(page_address(page
+i
));
1035 start
= kmap_atomic(page
+i
);
1036 __flush_dcache_icache(start
);
1037 kunmap_atomic(start
);