unsigned long htab_hash_mask;
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
+int mmu_vmalloc_psize = MMU_PAGE_4K;
+int mmu_io_psize = MMU_PAGE_4K;
#ifdef CONFIG_HUGETLB_PAGE
int mmu_huge_psize = MMU_PAGE_16M;
unsigned int HPAGE_SHIFT;
#endif
+#ifdef CONFIG_PPC_64K_PAGES
+int mmu_ci_restrictions;
+#endif
/* There are definitions of page sizes arrays to be used when none
* is provided by the firmware.
* normal insert callback here.
*/
#ifdef CONFIG_PPC_ISERIES
- if (_machine == PLATFORM_ISERIES_LPAR)
+ if (machine_is(iseries))
ret = iSeries_hpte_insert(hpteg, va,
- virt_to_abs(paddr),
+ paddr,
tmp_mode,
HPTE_V_BOLTED,
psize);
else
#endif
#ifdef CONFIG_PPC_PSERIES
- if (_machine & PLATFORM_LPAR)
+ if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR))
ret = pSeries_lpar_hpte_insert(hpteg, va,
- virt_to_abs(paddr),
+ paddr,
tmp_mode,
HPTE_V_BOLTED,
psize);
#endif
#ifdef CONFIG_PPC_MULTIPLATFORM
ret = native_hpte_insert(hpteg, va,
- virt_to_abs(paddr),
+ paddr,
tmp_mode, HPTE_V_BOLTED,
psize);
#endif
* Not in the device-tree, let's fallback on known size
* list for 16M capable GP & GR
*/
- if ((_machine != PLATFORM_ISERIES_LPAR) &&
- cpu_has_feature(CPU_FTR_16M_PAGE))
+ if (cpu_has_feature(CPU_FTR_16M_PAGE) && !machine_is(iseries))
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
sizeof(mmu_psize_defaults_gp));
found:
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
mmu_linear_psize = MMU_PAGE_1M;
+#ifdef CONFIG_PPC_64K_PAGES
/*
* Pick a size for the ordinary pages. Default is 4K, we support
- * 64K if cache inhibited large pages are supported by the
- * processor
+ * 64K for user mappings and vmalloc if supported by the processor.
+ * We only use 64k for ioremap if the processor
+ * (and firmware) support cache-inhibited large pages.
+ * If not, we use 4k and set mmu_ci_restrictions so that
+ * hash_page knows to switch processes that use cache-inhibited
+ * mappings to 4k pages.
*/
-#ifdef CONFIG_PPC_64K_PAGES
- if (mmu_psize_defs[MMU_PAGE_64K].shift &&
- cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
+ if (mmu_psize_defs[MMU_PAGE_64K].shift) {
mmu_virtual_psize = MMU_PAGE_64K;
+ mmu_vmalloc_psize = MMU_PAGE_64K;
+ if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
+ mmu_io_psize = MMU_PAGE_64K;
+ else
+ mmu_ci_restrictions = 1;
+ }
#endif
- printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n",
+ printk(KERN_DEBUG "Page orders: linear mapping = %d, "
+ "virtual = %d, io = %d\n",
mmu_psize_defs[mmu_linear_psize].shift,
- mmu_psize_defs[mmu_virtual_psize].shift);
+ mmu_psize_defs[mmu_virtual_psize].shift,
+ mmu_psize_defs[mmu_io_psize].shift);
#ifdef CONFIG_HUGETLB_PAGE
/* Init large page size. Currently, we pick 16M or 1M depending
#ifdef CONFIG_MEMORY_HOTPLUG
void create_section_mapping(unsigned long start, unsigned long end)
{
- BUG_ON(htab_bolt_mapping(start, end, start,
+ BUG_ON(htab_bolt_mapping(start, end, __pa(start),
_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
mmu_linear_psize));
}
htab_hash_mask = pteg_count - 1;
- if (platform_is_lpar()) {
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
/* Using a hypervisor which owns the htab */
htab_address = NULL;
_SDR1 = 0;
if (dart_tablebase != 0 && dart_tablebase >= base
&& dart_tablebase < (base + size)) {
+ unsigned long dart_table_end = dart_tablebase + 16 * MB;
if (base != dart_tablebase)
BUG_ON(htab_bolt_mapping(base, dart_tablebase,
- base, mode_rw,
- mmu_linear_psize));
- if ((base + size) > (dart_tablebase + 16*MB))
+ __pa(base), mode_rw,
+ mmu_linear_psize));
+ if ((base + size) > dart_table_end)
BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
- base + size,
- dart_tablebase+16*MB,
+ base + size,
+ __pa(dart_table_end),
mode_rw,
mmu_linear_psize));
continue;
}
#endif /* CONFIG_U3_DART */
- BUG_ON(htab_bolt_mapping(base, base + size, base,
- mode_rw, mmu_linear_psize));
+ BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
+ mode_rw, mmu_linear_psize));
}
/*
if (base + size >= tce_alloc_start)
tce_alloc_start = base + size + 1;
- BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
- tce_alloc_start, mode_rw,
+ BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
+ __pa(tce_alloc_start), mode_rw,
mmu_linear_psize));
}
void htab_initialize_secondary(void)
{
- if (!platform_is_lpar())
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
mtspr(SPRN_SDR1, _SDR1);
}
pte_t *ptep;
cpumask_t tmp;
int rc, user_region = 0, local = 0;
+ int psize;
DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
ea, access, trap);
return 1;
}
vsid = get_vsid(mm->context.id, ea);
+ psize = mm->context.user_psize;
break;
case VMALLOC_REGION_ID:
mm = &init_mm;
vsid = get_kernel_vsid(ea);
+ if (ea < VMALLOC_END)
+ psize = mmu_vmalloc_psize;
+ else
+ psize = mmu_io_psize;
break;
default:
/* Not a valid range
#ifndef CONFIG_PPC_64K_PAGES
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
- if (mmu_virtual_psize == MMU_PAGE_64K)
+ if (mmu_ci_restrictions) {
+ /* If this PTE is non-cacheable, switch to 4k */
+ if (psize == MMU_PAGE_64K &&
+ (pte_val(*ptep) & _PAGE_NO_CACHE)) {
+ if (user_region) {
+ psize = MMU_PAGE_4K;
+ mm->context.user_psize = MMU_PAGE_4K;
+ mm->context.sllp = SLB_VSID_USER |
+ mmu_psize_defs[MMU_PAGE_4K].sllp;
+ } else if (ea < VMALLOC_END) {
+ /*
+ * some driver did a non-cacheable mapping
+ * in vmalloc space, so switch vmalloc
+ * to 4k pages
+ */
+ printk(KERN_ALERT "Reducing vmalloc segment "
+ "to 4kB pages because of "
+ "non-cacheable mapping\n");
+ psize = mmu_vmalloc_psize = MMU_PAGE_4K;
+ }
+ }
+ if (user_region) {
+ if (psize != get_paca()->context.user_psize) {
+ get_paca()->context = mm->context;
+ slb_flush_and_rebolt();
+ }
+ } else if (get_paca()->vmalloc_sllp !=
+ mmu_psize_defs[mmu_vmalloc_psize].sllp) {
+ get_paca()->vmalloc_sllp =
+ mmu_psize_defs[mmu_vmalloc_psize].sllp;
+ slb_flush_and_rebolt();
+ }
+ }
+ if (psize == MMU_PAGE_64K)
rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
else
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#ifndef CONFIG_PPC_64K_PAGES
__hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
- if (mmu_virtual_psize == MMU_PAGE_64K)
+ if (mmu_ci_restrictions) {
+ /* If this PTE is non-cacheable, switch to 4k */
+ if (mm->context.user_psize == MMU_PAGE_64K &&
+ (pte_val(*ptep) & _PAGE_NO_CACHE)) {
+ mm->context.user_psize = MMU_PAGE_4K;
+ mm->context.sllp = SLB_VSID_USER |
+ mmu_psize_defs[MMU_PAGE_4K].sllp;
+ get_paca()->context = mm->context;
+ slb_flush_and_rebolt();
+ }
+ }
+ if (mm->context.user_psize == MMU_PAGE_64K)
__hash_page_64K(ea, access, vsid, ptep, trap, local);
else
__hash_page_4K(ea, access, vsid, ptep, trap, local);