Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[deliverable/linux.git] / arch / powerpc / mm / hash_utils_64.c
index e9d589eefc14ce07aa5f670c036b35bfc3124328..d03fd2b4445e1f43e5cde417b760e71f227d9f84 100644 (file)
@@ -92,10 +92,15 @@ unsigned long htab_size_bytes;
 unsigned long htab_hash_mask;
 int mmu_linear_psize = MMU_PAGE_4K;
 int mmu_virtual_psize = MMU_PAGE_4K;
+int mmu_vmalloc_psize = MMU_PAGE_4K;
+int mmu_io_psize = MMU_PAGE_4K;
 #ifdef CONFIG_HUGETLB_PAGE
 int mmu_huge_psize = MMU_PAGE_16M;
 unsigned int HPAGE_SHIFT;
 #endif
+#ifdef CONFIG_PPC_64K_PAGES
+int mmu_ci_restrictions;
+#endif
 
 /* There are definitions of page sizes arrays to be used when none
  * is provided by the firmware.
@@ -167,18 +172,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
                 * normal insert callback here.
                 */
 #ifdef CONFIG_PPC_ISERIES
-               if (_machine == PLATFORM_ISERIES_LPAR)
+               if (machine_is(iseries))
                        ret = iSeries_hpte_insert(hpteg, va,
-                                                 __pa(vaddr),
+                                                 paddr,
                                                  tmp_mode,
                                                  HPTE_V_BOLTED,
                                                  psize);
                else
 #endif
 #ifdef CONFIG_PPC_PSERIES
-               if (_machine & PLATFORM_LPAR)
+               if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR))
                        ret = pSeries_lpar_hpte_insert(hpteg, va,
-                                                      virt_to_abs(paddr),
+                                                      paddr,
                                                       tmp_mode,
                                                       HPTE_V_BOLTED,
                                                       psize);
@@ -186,7 +191,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 #endif
 #ifdef CONFIG_PPC_MULTIPLATFORM
                        ret = native_hpte_insert(hpteg, va,
-                                                virt_to_abs(paddr),
+                                                paddr,
                                                 tmp_mode, HPTE_V_BOLTED,
                                                 psize);
 #endif
@@ -295,8 +300,7 @@ static void __init htab_init_page_sizes(void)
         * Not in the device-tree, let's fallback on known size
         * list for 16M capable GP & GR
         */
-       if ((_machine != PLATFORM_ISERIES_LPAR) &&
-           cpu_has_feature(CPU_FTR_16M_PAGE))
+       if (cpu_has_feature(CPU_FTR_16M_PAGE) && !machine_is(iseries))
                memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
                       sizeof(mmu_psize_defaults_gp));
  found:
@@ -309,20 +313,31 @@ static void __init htab_init_page_sizes(void)
        else if (mmu_psize_defs[MMU_PAGE_1M].shift)
                mmu_linear_psize = MMU_PAGE_1M;
 
+#ifdef CONFIG_PPC_64K_PAGES
        /*
         * Pick a size for the ordinary pages. Default is 4K, we support
-        * 64K if cache inhibited large pages are supported by the
-        * processor
+        * 64K for user mappings and vmalloc if supported by the processor.
+        * We only use 64k for ioremap if the processor
+        * (and firmware) support cache-inhibited large pages.
+        * If not, we use 4k and set mmu_ci_restrictions so that
+        * hash_page knows to switch processes that use cache-inhibited
+        * mappings to 4k pages.
         */
-#ifdef CONFIG_PPC_64K_PAGES
-       if (mmu_psize_defs[MMU_PAGE_64K].shift &&
-           cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
+       if (mmu_psize_defs[MMU_PAGE_64K].shift) {
                mmu_virtual_psize = MMU_PAGE_64K;
+               mmu_vmalloc_psize = MMU_PAGE_64K;
+               if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
+                       mmu_io_psize = MMU_PAGE_64K;
+               else
+                       mmu_ci_restrictions = 1;
+       }
 #endif
 
-       printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n",
+       printk(KERN_DEBUG "Page orders: linear mapping = %d, "
+              "virtual = %d, io = %d\n",
               mmu_psize_defs[mmu_linear_psize].shift,
-              mmu_psize_defs[mmu_virtual_psize].shift);
+              mmu_psize_defs[mmu_virtual_psize].shift,
+              mmu_psize_defs[mmu_io_psize].shift);
 
 #ifdef CONFIG_HUGETLB_PAGE
        /* Init large page size. Currently, we pick 16M or 1M depending
@@ -392,7 +407,7 @@ static unsigned long __init htab_get_table_size(void)
 #ifdef CONFIG_MEMORY_HOTPLUG
 void create_section_mapping(unsigned long start, unsigned long end)
 {
-               BUG_ON(htab_bolt_mapping(start, end, start,
+               BUG_ON(htab_bolt_mapping(start, end, __pa(start),
                        _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
                        mmu_linear_psize));
 }
@@ -422,7 +437,7 @@ void __init htab_initialize(void)
 
        htab_hash_mask = pteg_count - 1;
 
-       if (platform_is_lpar()) {
+       if (firmware_has_feature(FW_FEATURE_LPAR)) {
                /* Using a hypervisor which owns the htab */
                htab_address = NULL;
                _SDR1 = 0; 
@@ -431,7 +446,6 @@ void __init htab_initialize(void)
                 * the absolute address space.
                 */
                table = lmb_alloc(htab_size_bytes, htab_size_bytes);
-               BUG_ON(table == 0);
 
                DBG("Hash table allocated at %lx, size: %lx\n", table,
                    htab_size_bytes);
@@ -474,21 +488,22 @@ void __init htab_initialize(void)
 
                if (dart_tablebase != 0 && dart_tablebase >= base
                    && dart_tablebase < (base + size)) {
+                       unsigned long dart_table_end = dart_tablebase + 16 * MB;
                        if (base != dart_tablebase)
                                BUG_ON(htab_bolt_mapping(base, dart_tablebase,
-                                                        base, mode_rw,
-                                                        mmu_linear_psize));
-                       if ((base + size) > (dart_tablebase + 16*MB))
+                                                       __pa(base), mode_rw,
+                                                       mmu_linear_psize));
+                       if ((base + size) > dart_table_end)
                                BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
-                                                        base + size,
-                                                        dart_tablebase+16*MB,
+                                                       base + size,
+                                                       __pa(dart_table_end),
                                                         mode_rw,
                                                         mmu_linear_psize));
                        continue;
                }
 #endif /* CONFIG_U3_DART */
-               BUG_ON(htab_bolt_mapping(base, base + size, base,
-                                        mode_rw, mmu_linear_psize));
+               BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
+                                       mode_rw, mmu_linear_psize));
        }
 
        /*
@@ -505,8 +520,8 @@ void __init htab_initialize(void)
                if (base + size >= tce_alloc_start)
                        tce_alloc_start = base + size + 1;
 
-               BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
-                                        tce_alloc_start, mode_rw,
+               BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
+                                        __pa(tce_alloc_start), mode_rw,
                                         mmu_linear_psize));
        }
 
@@ -517,7 +532,7 @@ void __init htab_initialize(void)
 
 void htab_initialize_secondary(void)
 {
-       if (!platform_is_lpar())
+       if (!firmware_has_feature(FW_FEATURE_LPAR))
                mtspr(SPRN_SDR1, _SDR1);
 }
 
@@ -557,6 +572,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
        pte_t *ptep;
        cpumask_t tmp;
        int rc, user_region = 0, local = 0;
+       int psize;
 
        DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
                ea, access, trap);
@@ -576,10 +592,15 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                        return 1;
                }
                vsid = get_vsid(mm->context.id, ea);
+               psize = mm->context.user_psize;
                break;
        case VMALLOC_REGION_ID:
                mm = &init_mm;
                vsid = get_kernel_vsid(ea);
+               if (ea < VMALLOC_END)
+                       psize = mmu_vmalloc_psize;
+               else
+                       psize = mmu_io_psize;
                break;
        default:
                /* Not a valid range
@@ -630,7 +651,40 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
 #ifndef CONFIG_PPC_64K_PAGES
        rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
 #else
-       if (mmu_virtual_psize == MMU_PAGE_64K)
+       if (mmu_ci_restrictions) {
+               /* If this PTE is non-cacheable, switch to 4k */
+               if (psize == MMU_PAGE_64K &&
+                   (pte_val(*ptep) & _PAGE_NO_CACHE)) {
+                       if (user_region) {
+                               psize = MMU_PAGE_4K;
+                               mm->context.user_psize = MMU_PAGE_4K;
+                               mm->context.sllp = SLB_VSID_USER |
+                                       mmu_psize_defs[MMU_PAGE_4K].sllp;
+                       } else if (ea < VMALLOC_END) {
+                               /*
+                                * some driver did a non-cacheable mapping
+                                * in vmalloc space, so switch vmalloc
+                                * to 4k pages
+                                */
+                               printk(KERN_ALERT "Reducing vmalloc segment "
+                                      "to 4kB pages because of "
+                                      "non-cacheable mapping\n");
+                               psize = mmu_vmalloc_psize = MMU_PAGE_4K;
+                       }
+               }
+               if (user_region) {
+                       if (psize != get_paca()->context.user_psize) {
+                               get_paca()->context = mm->context;
+                               slb_flush_and_rebolt();
+                       }
+               } else if (get_paca()->vmalloc_sllp !=
+                          mmu_psize_defs[mmu_vmalloc_psize].sllp) {
+                       get_paca()->vmalloc_sllp =
+                               mmu_psize_defs[mmu_vmalloc_psize].sllp;
+                       slb_flush_and_rebolt();
+               }
+       }
+       if (psize == MMU_PAGE_64K)
                rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
        else
                rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
@@ -682,7 +736,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
 #ifndef CONFIG_PPC_64K_PAGES
        __hash_page_4K(ea, access, vsid, ptep, trap, local);
 #else
-       if (mmu_virtual_psize == MMU_PAGE_64K)
+       if (mmu_ci_restrictions) {
+               /* If this PTE is non-cacheable, switch to 4k */
+               if (mm->context.user_psize == MMU_PAGE_64K &&
+                   (pte_val(*ptep) & _PAGE_NO_CACHE)) {
+                       mm->context.user_psize = MMU_PAGE_4K;
+                       mm->context.sllp = SLB_VSID_USER |
+                               mmu_psize_defs[MMU_PAGE_4K].sllp;
+                       get_paca()->context = mm->context;
+                       slb_flush_and_rebolt();
+               }
+       }
+       if (mm->context.user_psize == MMU_PAGE_64K)
                __hash_page_64K(ea, access, vsid, ptep, trap, local);
        else
                __hash_page_4K(ea, access, vsid, ptep, trap, local);
This page took 0.028861 seconds and 5 git commands to generate.