powerpc: Update hugetlb huge_pte_alloc and tablewalk code for FSL BOOKE
authorBecky Bruce <beckyb@kernel.crashing.org>
Mon, 10 Oct 2011 10:50:39 +0000 (10:50 +0000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Wed, 7 Dec 2011 05:26:22 +0000 (16:26 +1100)
This updates the hugetlb page table code to handle 64-bit FSL_BOOKE.
The previous 32-bit work counted on the inner levels of the page table
collapsing.

Signed-off-by: Becky Bruce <beckyb@kernel.crashing.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/mm/hugetlbpage.c

index 6b1cf64788517dfccf6fa7296bd70f8de7bb8524..96178e8fb046d67b2e121891ce34d521ad14f3b2 100644 (file)
@@ -155,11 +155,28 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
                        hpdp->pd = 0;
                kmem_cache_free(cachep, new);
        }
+#else
+       if (!hugepd_none(*hpdp))
+               kmem_cache_free(cachep, new);
+       else
+               hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
 #endif
        spin_unlock(&mm->page_table_lock);
        return 0;
 }
 
+/*
+ * These macros define how to determine which level of the page table holds
+ * the hpdp.
+ */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
+#define HUGEPD_PUD_SHIFT PUD_SHIFT
+#else
+#define HUGEPD_PGD_SHIFT PUD_SHIFT
+#define HUGEPD_PUD_SHIFT PMD_SHIFT
+#endif
+
 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
 {
        pgd_t *pg;
@@ -172,12 +189,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
        addr &= ~(sz-1);
 
        pg = pgd_offset(mm, addr);
-       if (pshift >= PUD_SHIFT) {
+
+       if (pshift >= HUGEPD_PGD_SHIFT) {
                hpdp = (hugepd_t *)pg;
        } else {
                pdshift = PUD_SHIFT;
                pu = pud_alloc(mm, pg, addr);
-               if (pshift >= PMD_SHIFT) {
+               if (pshift >= HUGEPD_PUD_SHIFT) {
                        hpdp = (hugepd_t *)pu;
                } else {
                        pdshift = PMD_SHIFT;
@@ -453,14 +471,23 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
        unsigned long start;
 
        start = addr;
-       pmd = pmd_offset(pud, addr);
        do {
+               pmd = pmd_offset(pud, addr);
                next = pmd_addr_end(addr, end);
                if (pmd_none(*pmd))
                        continue;
+#ifdef CONFIG_PPC_FSL_BOOK3E
+               /*
+                * Increment next by the size of the huge mapping since
+                * there may be more than one entry at this level for a
+                * single hugepage, but all of them point to
+                * the same kmem cache that holds the hugepte.
+                */
+               next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
+#endif
                free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
                                  addr, next, floor, ceiling);
-       } while (pmd++, addr = next, addr != end);
+       } while (addr = next, addr != end);
 
        start &= PUD_MASK;
        if (start < floor)
@@ -487,8 +514,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
        unsigned long start;
 
        start = addr;
-       pud = pud_offset(pgd, addr);
        do {
+               pud = pud_offset(pgd, addr);
                next = pud_addr_end(addr, end);
                if (!is_hugepd(pud)) {
                        if (pud_none_or_clear_bad(pud))
@@ -496,10 +523,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
                        hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
                                               ceiling);
                } else {
+#ifdef CONFIG_PPC_FSL_BOOK3E
+                       /*
+                        * Increment next by the size of the huge mapping since
+                        * there may be more than one entry at this level for a
+                        * single hugepage, but all of them point to
+                        * the same kmem cache that holds the hugepte.
+                        */
+                       next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
+#endif
                        free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
                                          addr, next, floor, ceiling);
                }
-       } while (pud++, addr = next, addr != end);
+       } while (addr = next, addr != end);
 
        start &= PGDIR_MASK;
        if (start < floor)
This page took 0.026418 seconds and 5 git commands to generate.