2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
13 * PowerPC Hashed Page Table functions
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <linux/spinlock.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stat.h>
29 #include <linux/sysctl.h>
30 #include <linux/ctype.h>
31 #include <linux/cache.h>
32 #include <linux/init.h>
33 #include <linux/signal.h>
34 #include <linux/lmb.h>
36 #include <asm/processor.h>
37 #include <asm/pgtable.h>
39 #include <asm/mmu_context.h>
41 #include <asm/types.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
44 #include <asm/machdep.h>
46 #include <asm/abs_addr.h>
47 #include <asm/tlbflush.h>
51 #include <asm/cacheflush.h>
52 #include <asm/cputable.h>
53 #include <asm/sections.h>
58 #define DBG(fmt...) udbg_printf(fmt)
64 #define DBG_LOW(fmt...) udbg_printf(fmt)
66 #define DBG_LOW(fmt...)
74 * Note: pte --> Linux PTE
75 * HPTE --> PowerPC Hashed Page Table Entry
78 * htab_initialize is called with the MMU off (of course), but
79 * the kernel has been copied down to zero so it can directly
80 * reference global data. At this point it is very difficult
81 * to print debug info.
86 extern unsigned long dart_tablebase
;
87 #endif /* CONFIG_U3_DART */
89 static unsigned long _SDR1
;
90 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
];
92 struct hash_pte
*htab_address
;
93 unsigned long htab_size_bytes
;
94 unsigned long htab_hash_mask
;
95 int mmu_linear_psize
= MMU_PAGE_4K
;
96 int mmu_virtual_psize
= MMU_PAGE_4K
;
97 int mmu_vmalloc_psize
= MMU_PAGE_4K
;
98 #ifdef CONFIG_SPARSEMEM_VMEMMAP
99 int mmu_vmemmap_psize
= MMU_PAGE_4K
;
101 int mmu_io_psize
= MMU_PAGE_4K
;
102 int mmu_kernel_ssize
= MMU_SEGSIZE_256M
;
103 int mmu_highuser_ssize
= MMU_SEGSIZE_256M
;
104 u16 mmu_slb_size
= 64;
105 #ifdef CONFIG_HUGETLB_PAGE
106 unsigned int HPAGE_SHIFT
;
108 #ifdef CONFIG_PPC_64K_PAGES
109 int mmu_ci_restrictions
;
111 #ifdef CONFIG_DEBUG_PAGEALLOC
112 static u8
*linear_map_hash_slots
;
113 static unsigned long linear_map_hash_count
;
114 static DEFINE_SPINLOCK(linear_map_hash_lock
);
115 #endif /* CONFIG_DEBUG_PAGEALLOC */
117 /* There are definitions of page sizes arrays to be used when none
118 * is provided by the firmware.
121 /* Pre-POWER4 CPUs (4k pages only)
123 static struct mmu_psize_def mmu_psize_defaults_old
[] = {
133 /* POWER4, GPUL, POWER5
135 * Support for 16Mb large pages
137 static struct mmu_psize_def mmu_psize_defaults_gp
[] = {
155 int htab_bolt_mapping(unsigned long vstart
, unsigned long vend
,
156 unsigned long pstart
, unsigned long mode
,
157 int psize
, int ssize
)
159 unsigned long vaddr
, paddr
;
160 unsigned int step
, shift
;
161 unsigned long tmp_mode
;
164 shift
= mmu_psize_defs
[psize
].shift
;
167 for (vaddr
= vstart
, paddr
= pstart
; vaddr
< vend
;
168 vaddr
+= step
, paddr
+= step
) {
169 unsigned long hash
, hpteg
;
170 unsigned long vsid
= get_kernel_vsid(vaddr
, ssize
);
171 unsigned long va
= hpt_va(vaddr
, vsid
, ssize
);
175 /* Make non-kernel text non-executable */
176 if (!in_kernel_text(vaddr
))
177 tmp_mode
= mode
| HPTE_R_N
;
179 hash
= hpt_hash(va
, shift
, ssize
);
180 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
182 DBG("htab_bolt_mapping: calling %p\n", ppc_md
.hpte_insert
);
184 BUG_ON(!ppc_md
.hpte_insert
);
185 ret
= ppc_md
.hpte_insert(hpteg
, va
, paddr
,
186 tmp_mode
, HPTE_V_BOLTED
, psize
, ssize
);
190 #ifdef CONFIG_DEBUG_PAGEALLOC
191 if ((paddr
>> PAGE_SHIFT
) < linear_map_hash_count
)
192 linear_map_hash_slots
[paddr
>> PAGE_SHIFT
] = ret
| 0x80;
193 #endif /* CONFIG_DEBUG_PAGEALLOC */
195 return ret
< 0 ? ret
: 0;
198 #ifdef CONFIG_MEMORY_HOTPLUG
199 static int htab_remove_mapping(unsigned long vstart
, unsigned long vend
,
200 int psize
, int ssize
)
203 unsigned int step
, shift
;
205 shift
= mmu_psize_defs
[psize
].shift
;
208 if (!ppc_md
.hpte_removebolted
) {
209 printk(KERN_WARNING
"Platform doesn't implement "
210 "hpte_removebolted\n");
214 for (vaddr
= vstart
; vaddr
< vend
; vaddr
+= step
)
215 ppc_md
.hpte_removebolted(vaddr
, psize
, ssize
);
219 #endif /* CONFIG_MEMORY_HOTPLUG */
221 static int __init
htab_dt_scan_seg_sizes(unsigned long node
,
222 const char *uname
, int depth
,
225 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
227 unsigned long size
= 0;
229 /* We are scanning "cpu" nodes only */
230 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
233 prop
= (u32
*)of_get_flat_dt_prop(node
, "ibm,processor-segment-sizes",
237 for (; size
>= 4; size
-= 4, ++prop
) {
239 DBG("1T segment support detected\n");
240 cur_cpu_spec
->cpu_features
|= CPU_FTR_1T_SEGMENT
;
244 cur_cpu_spec
->cpu_features
&= ~CPU_FTR_NO_SLBIE_B
;
248 static void __init
htab_init_seg_sizes(void)
250 of_scan_flat_dt(htab_dt_scan_seg_sizes
, NULL
);
253 static int __init
htab_dt_scan_page_sizes(unsigned long node
,
254 const char *uname
, int depth
,
257 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
259 unsigned long size
= 0;
261 /* We are scanning "cpu" nodes only */
262 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
265 prop
= (u32
*)of_get_flat_dt_prop(node
,
266 "ibm,segment-page-sizes", &size
);
268 DBG("Page sizes from device-tree:\n");
270 cur_cpu_spec
->cpu_features
&= ~(CPU_FTR_16M_PAGE
);
272 unsigned int shift
= prop
[0];
273 unsigned int slbenc
= prop
[1];
274 unsigned int lpnum
= prop
[2];
275 unsigned int lpenc
= 0;
276 struct mmu_psize_def
*def
;
279 size
-= 3; prop
+= 3;
280 while(size
> 0 && lpnum
) {
281 if (prop
[0] == shift
)
283 prop
+= 2; size
-= 2;
298 cur_cpu_spec
->cpu_features
|= CPU_FTR_16M_PAGE
;
306 def
= &mmu_psize_defs
[idx
];
311 def
->avpnm
= (1 << (shift
- 23)) - 1;
314 /* We don't know for sure what's up with tlbiel, so
315 * for now we only set it for 4K and 64K pages
317 if (idx
== MMU_PAGE_4K
|| idx
== MMU_PAGE_64K
)
322 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
323 "tlbiel=%d, penc=%d\n",
324 idx
, shift
, def
->sllp
, def
->avpnm
, def
->tlbiel
,
332 /* Scan for 16G memory blocks that have been set aside for huge pages
333 * and reserve those blocks for 16G huge pages.
335 static int __init
htab_dt_scan_hugepage_blocks(unsigned long node
,
336 const char *uname
, int depth
,
338 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
339 unsigned long *addr_prop
;
340 u32
*page_count_prop
;
341 unsigned int expected_pages
;
342 long unsigned int phys_addr
;
343 long unsigned int block_size
;
345 /* We are scanning "memory" nodes only */
346 if (type
== NULL
|| strcmp(type
, "memory") != 0)
349 /* This property is the log base 2 of the number of virtual pages that
350 * will represent this memory block. */
351 page_count_prop
= of_get_flat_dt_prop(node
, "ibm,expected#pages", NULL
);
352 if (page_count_prop
== NULL
)
354 expected_pages
= (1 << page_count_prop
[0]);
355 addr_prop
= of_get_flat_dt_prop(node
, "reg", NULL
);
356 if (addr_prop
== NULL
)
358 phys_addr
= addr_prop
[0];
359 block_size
= addr_prop
[1];
360 if (block_size
!= (16 * GB
))
362 printk(KERN_INFO
"Huge page(16GB) memory: "
363 "addr = 0x%lX size = 0x%lX pages = %d\n",
364 phys_addr
, block_size
, expected_pages
);
365 lmb_reserve(phys_addr
, block_size
* expected_pages
);
366 add_gpage(phys_addr
, block_size
, expected_pages
);
370 static void __init
htab_init_page_sizes(void)
374 /* Default to 4K pages only */
375 memcpy(mmu_psize_defs
, mmu_psize_defaults_old
,
376 sizeof(mmu_psize_defaults_old
));
379 * Try to find the available page sizes in the device-tree
381 rc
= of_scan_flat_dt(htab_dt_scan_page_sizes
, NULL
);
382 if (rc
!= 0) /* Found */
386 * Not in the device-tree, let's fallback on known size
387 * list for 16M capable GP & GR
389 if (cpu_has_feature(CPU_FTR_16M_PAGE
))
390 memcpy(mmu_psize_defs
, mmu_psize_defaults_gp
,
391 sizeof(mmu_psize_defaults_gp
));
393 #ifndef CONFIG_DEBUG_PAGEALLOC
395 * Pick a size for the linear mapping. Currently, we only support
396 * 16M, 1M and 4K which is the default
398 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
399 mmu_linear_psize
= MMU_PAGE_16M
;
400 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
401 mmu_linear_psize
= MMU_PAGE_1M
;
402 #endif /* CONFIG_DEBUG_PAGEALLOC */
404 #ifdef CONFIG_PPC_64K_PAGES
406 * Pick a size for the ordinary pages. Default is 4K, we support
407 * 64K for user mappings and vmalloc if supported by the processor.
408 * We only use 64k for ioremap if the processor
409 * (and firmware) support cache-inhibited large pages.
410 * If not, we use 4k and set mmu_ci_restrictions so that
411 * hash_page knows to switch processes that use cache-inhibited
412 * mappings to 4k pages.
414 if (mmu_psize_defs
[MMU_PAGE_64K
].shift
) {
415 mmu_virtual_psize
= MMU_PAGE_64K
;
416 mmu_vmalloc_psize
= MMU_PAGE_64K
;
417 if (mmu_linear_psize
== MMU_PAGE_4K
)
418 mmu_linear_psize
= MMU_PAGE_64K
;
419 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE
)) {
421 * Don't use 64k pages for ioremap on pSeries, since
422 * that would stop us accessing the HEA ethernet.
424 if (!machine_is(pseries
))
425 mmu_io_psize
= MMU_PAGE_64K
;
427 mmu_ci_restrictions
= 1;
429 #endif /* CONFIG_PPC_64K_PAGES */
431 #ifdef CONFIG_SPARSEMEM_VMEMMAP
432 /* We try to use 16M pages for vmemmap if that is supported
433 * and we have at least 1G of RAM at boot
435 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
&&
436 lmb_phys_mem_size() >= 0x40000000)
437 mmu_vmemmap_psize
= MMU_PAGE_16M
;
438 else if (mmu_psize_defs
[MMU_PAGE_64K
].shift
)
439 mmu_vmemmap_psize
= MMU_PAGE_64K
;
441 mmu_vmemmap_psize
= MMU_PAGE_4K
;
442 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
444 printk(KERN_DEBUG
"Page orders: linear mapping = %d, "
445 "virtual = %d, io = %d"
446 #ifdef CONFIG_SPARSEMEM_VMEMMAP
450 mmu_psize_defs
[mmu_linear_psize
].shift
,
451 mmu_psize_defs
[mmu_virtual_psize
].shift
,
452 mmu_psize_defs
[mmu_io_psize
].shift
453 #ifdef CONFIG_SPARSEMEM_VMEMMAP
454 ,mmu_psize_defs
[mmu_vmemmap_psize
].shift
458 #ifdef CONFIG_HUGETLB_PAGE
459 /* Reserve 16G huge page memory sections for huge pages */
460 of_scan_flat_dt(htab_dt_scan_hugepage_blocks
, NULL
);
462 /* Set default large page size. Currently, we pick 16M or 1M depending
463 * on what is available
465 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
466 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_16M
].shift
;
467 /* With 4k/4level pagetables, we can't (for now) cope with a
468 * huge page size < PMD_SIZE */
469 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
470 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_1M
].shift
;
471 #endif /* CONFIG_HUGETLB_PAGE */
474 static int __init
htab_dt_scan_pftsize(unsigned long node
,
475 const char *uname
, int depth
,
478 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
481 /* We are scanning "cpu" nodes only */
482 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
485 prop
= (u32
*)of_get_flat_dt_prop(node
, "ibm,pft-size", NULL
);
487 /* pft_size[0] is the NUMA CEC cookie */
488 ppc64_pft_size
= prop
[1];
494 static unsigned long __init
htab_get_table_size(void)
496 unsigned long mem_size
, rnd_mem_size
, pteg_count
;
498 /* If hash size isn't already provided by the platform, we try to
499 * retrieve it from the device-tree. If it's not there neither, we
500 * calculate it now based on the total RAM size
502 if (ppc64_pft_size
== 0)
503 of_scan_flat_dt(htab_dt_scan_pftsize
, NULL
);
505 return 1UL << ppc64_pft_size
;
507 /* round mem_size up to next power of 2 */
508 mem_size
= lmb_phys_mem_size();
509 rnd_mem_size
= 1UL << __ilog2(mem_size
);
510 if (rnd_mem_size
< mem_size
)
514 pteg_count
= max(rnd_mem_size
>> (12 + 1), 1UL << 11);
516 return pteg_count
<< 7;
519 #ifdef CONFIG_MEMORY_HOTPLUG
520 void create_section_mapping(unsigned long start
, unsigned long end
)
522 BUG_ON(htab_bolt_mapping(start
, end
, __pa(start
),
523 _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_COHERENT
| PP_RWXX
,
524 mmu_linear_psize
, mmu_kernel_ssize
));
527 int remove_section_mapping(unsigned long start
, unsigned long end
)
529 return htab_remove_mapping(start
, end
, mmu_linear_psize
,
532 #endif /* CONFIG_MEMORY_HOTPLUG */
534 static inline void make_bl(unsigned int *insn_addr
, void *func
)
536 unsigned long funcp
= *((unsigned long *)func
);
537 int offset
= funcp
- (unsigned long)insn_addr
;
539 *insn_addr
= (unsigned int)(0x48000001 | (offset
& 0x03fffffc));
540 flush_icache_range((unsigned long)insn_addr
, 4+
541 (unsigned long)insn_addr
);
544 static void __init
htab_finish_init(void)
546 extern unsigned int *htab_call_hpte_insert1
;
547 extern unsigned int *htab_call_hpte_insert2
;
548 extern unsigned int *htab_call_hpte_remove
;
549 extern unsigned int *htab_call_hpte_updatepp
;
551 #ifdef CONFIG_PPC_HAS_HASH_64K
552 extern unsigned int *ht64_call_hpte_insert1
;
553 extern unsigned int *ht64_call_hpte_insert2
;
554 extern unsigned int *ht64_call_hpte_remove
;
555 extern unsigned int *ht64_call_hpte_updatepp
;
557 make_bl(ht64_call_hpte_insert1
, ppc_md
.hpte_insert
);
558 make_bl(ht64_call_hpte_insert2
, ppc_md
.hpte_insert
);
559 make_bl(ht64_call_hpte_remove
, ppc_md
.hpte_remove
);
560 make_bl(ht64_call_hpte_updatepp
, ppc_md
.hpte_updatepp
);
561 #endif /* CONFIG_PPC_HAS_HASH_64K */
563 make_bl(htab_call_hpte_insert1
, ppc_md
.hpte_insert
);
564 make_bl(htab_call_hpte_insert2
, ppc_md
.hpte_insert
);
565 make_bl(htab_call_hpte_remove
, ppc_md
.hpte_remove
);
566 make_bl(htab_call_hpte_updatepp
, ppc_md
.hpte_updatepp
);
569 void __init
htab_initialize(void)
572 unsigned long pteg_count
;
573 unsigned long mode_rw
;
574 unsigned long base
= 0, size
= 0, limit
;
577 DBG(" -> htab_initialize()\n");
579 /* Initialize segment sizes */
580 htab_init_seg_sizes();
582 /* Initialize page sizes */
583 htab_init_page_sizes();
585 if (cpu_has_feature(CPU_FTR_1T_SEGMENT
)) {
586 mmu_kernel_ssize
= MMU_SEGSIZE_1T
;
587 mmu_highuser_ssize
= MMU_SEGSIZE_1T
;
588 printk(KERN_INFO
"Using 1TB segments\n");
592 * Calculate the required size of the htab. We want the number of
593 * PTEGs to equal one half the number of real pages.
595 htab_size_bytes
= htab_get_table_size();
596 pteg_count
= htab_size_bytes
>> 7;
598 htab_hash_mask
= pteg_count
- 1;
600 if (firmware_has_feature(FW_FEATURE_LPAR
)) {
601 /* Using a hypervisor which owns the htab */
605 /* Find storage for the HPT. Must be contiguous in
606 * the absolute address space. On cell we want it to be
607 * in the first 2 Gig so we can use it for IOMMU hacks.
609 if (machine_is(cell
))
614 table
= lmb_alloc_base(htab_size_bytes
, htab_size_bytes
, limit
);
616 DBG("Hash table allocated at %lx, size: %lx\n", table
,
619 htab_address
= abs_to_virt(table
);
621 /* htab absolute addr + encoded htabsize */
622 _SDR1
= table
+ __ilog2(pteg_count
) - 11;
624 /* Initialize the HPT with no entries */
625 memset((void *)table
, 0, htab_size_bytes
);
628 mtspr(SPRN_SDR1
, _SDR1
);
631 mode_rw
= _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_COHERENT
| PP_RWXX
;
633 #ifdef CONFIG_DEBUG_PAGEALLOC
634 linear_map_hash_count
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
635 linear_map_hash_slots
= __va(lmb_alloc_base(linear_map_hash_count
,
637 memset(linear_map_hash_slots
, 0, linear_map_hash_count
);
638 #endif /* CONFIG_DEBUG_PAGEALLOC */
640 /* On U3 based machines, we need to reserve the DART area and
641 * _NOT_ map it to avoid cache paradoxes as it's remapped non
645 /* create bolted the linear mapping in the hash table */
646 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
647 base
= (unsigned long)__va(lmb
.memory
.region
[i
].base
);
648 size
= lmb
.memory
.region
[i
].size
;
650 DBG("creating mapping for region: %lx : %lx\n", base
, size
);
652 #ifdef CONFIG_U3_DART
653 /* Do not map the DART space. Fortunately, it will be aligned
654 * in such a way that it will not cross two lmb regions and
655 * will fit within a single 16Mb page.
656 * The DART space is assumed to be a full 16Mb region even if
657 * we only use 2Mb of that space. We will use more of it later
658 * for AGP GART. We have to use a full 16Mb large page.
660 DBG("DART base: %lx\n", dart_tablebase
);
662 if (dart_tablebase
!= 0 && dart_tablebase
>= base
663 && dart_tablebase
< (base
+ size
)) {
664 unsigned long dart_table_end
= dart_tablebase
+ 16 * MB
;
665 if (base
!= dart_tablebase
)
666 BUG_ON(htab_bolt_mapping(base
, dart_tablebase
,
670 if ((base
+ size
) > dart_table_end
)
671 BUG_ON(htab_bolt_mapping(dart_tablebase
+16*MB
,
673 __pa(dart_table_end
),
679 #endif /* CONFIG_U3_DART */
680 BUG_ON(htab_bolt_mapping(base
, base
+ size
, __pa(base
),
681 mode_rw
, mmu_linear_psize
, mmu_kernel_ssize
));
685 * If we have a memory_limit and we've allocated TCEs then we need to
686 * explicitly map the TCE area at the top of RAM. We also cope with the
687 * case that the TCEs start below memory_limit.
688 * tce_alloc_start/end are 16MB aligned so the mapping should work
689 * for either 4K or 16MB pages.
691 if (tce_alloc_start
) {
692 tce_alloc_start
= (unsigned long)__va(tce_alloc_start
);
693 tce_alloc_end
= (unsigned long)__va(tce_alloc_end
);
695 if (base
+ size
>= tce_alloc_start
)
696 tce_alloc_start
= base
+ size
+ 1;
698 BUG_ON(htab_bolt_mapping(tce_alloc_start
, tce_alloc_end
,
699 __pa(tce_alloc_start
), mode_rw
,
700 mmu_linear_psize
, mmu_kernel_ssize
));
705 DBG(" <- htab_initialize()\n");
710 void htab_initialize_secondary(void)
712 if (!firmware_has_feature(FW_FEATURE_LPAR
))
713 mtspr(SPRN_SDR1
, _SDR1
);
717 * Called by asm hashtable.S for doing lazy icache flush
719 unsigned int hash_page_do_lazy_icache(unsigned int pp
, pte_t pte
, int trap
)
723 if (!pfn_valid(pte_pfn(pte
)))
726 page
= pte_page(pte
);
729 if (!test_bit(PG_arch_1
, &page
->flags
) && !PageReserved(page
)) {
731 __flush_dcache_icache(page_address(page
));
732 set_bit(PG_arch_1
, &page
->flags
);
739 #ifdef CONFIG_PPC_MM_SLICES
740 unsigned int get_paca_psize(unsigned long addr
)
742 unsigned long index
, slices
;
744 if (addr
< SLICE_LOW_TOP
) {
745 slices
= get_paca()->context
.low_slices_psize
;
746 index
= GET_LOW_SLICE_INDEX(addr
);
748 slices
= get_paca()->context
.high_slices_psize
;
749 index
= GET_HIGH_SLICE_INDEX(addr
);
751 return (slices
>> (index
* 4)) & 0xF;
755 unsigned int get_paca_psize(unsigned long addr
)
757 return get_paca()->context
.user_psize
;
762 * Demote a segment to using 4k pages.
763 * For now this makes the whole process use 4k pages.
765 #ifdef CONFIG_PPC_64K_PAGES
766 void demote_segment_4k(struct mm_struct
*mm
, unsigned long addr
)
768 if (get_slice_psize(mm
, addr
) == MMU_PAGE_4K
)
770 slice_set_range_psize(mm
, addr
, 1, MMU_PAGE_4K
);
771 #ifdef CONFIG_SPU_BASE
772 spu_flush_all_slbs(mm
);
774 if (get_paca_psize(addr
) != MMU_PAGE_4K
) {
775 get_paca()->context
= mm
->context
;
776 slb_flush_and_rebolt();
779 #endif /* CONFIG_PPC_64K_PAGES */
781 #ifdef CONFIG_PPC_SUBPAGE_PROT
783 * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
784 * Userspace sets the subpage permissions using the subpage_prot system call.
786 * Result is 0: full permissions, _PAGE_RW: read-only,
787 * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
789 static int subpage_protection(pgd_t
*pgdir
, unsigned long ea
)
791 struct subpage_prot_table
*spt
= pgd_subpage_prot(pgdir
);
795 if (ea
>= spt
->maxaddr
)
797 if (ea
< 0x100000000) {
798 /* addresses below 4GB use spt->low_prot */
799 sbpm
= spt
->low_prot
;
801 sbpm
= spt
->protptrs
[ea
>> SBP_L3_SHIFT
];
805 sbpp
= sbpm
[(ea
>> SBP_L2_SHIFT
) & (SBP_L2_COUNT
- 1)];
808 spp
= sbpp
[(ea
>> PAGE_SHIFT
) & (SBP_L1_COUNT
- 1)];
810 /* extract 2-bit bitfield for this 4k subpage */
811 spp
>>= 30 - 2 * ((ea
>> 12) & 0xf);
813 /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
814 spp
= ((spp
& 2) ? _PAGE_USER
: 0) | ((spp
& 1) ? _PAGE_RW
: 0);
818 #else /* CONFIG_PPC_SUBPAGE_PROT */
819 static inline int subpage_protection(pgd_t
*pgdir
, unsigned long ea
)
827 * 1 - normal page fault
828 * -1 - critical hash insertion error
829 * -2 - access not permitted by subpage protection mechanism
831 int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
)
835 struct mm_struct
*mm
;
838 int rc
, user_region
= 0, local
= 0;
841 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
844 if ((ea
& ~REGION_MASK
) >= PGTABLE_RANGE
) {
845 DBG_LOW(" out of pgtable range !\n");
849 /* Get region & vsid */
850 switch (REGION_ID(ea
)) {
855 DBG_LOW(" user region with no mm !\n");
858 psize
= get_slice_psize(mm
, ea
);
859 ssize
= user_segment_size(ea
);
860 vsid
= get_vsid(mm
->context
.id
, ea
, ssize
);
862 case VMALLOC_REGION_ID
:
864 vsid
= get_kernel_vsid(ea
, mmu_kernel_ssize
);
865 if (ea
< VMALLOC_END
)
866 psize
= mmu_vmalloc_psize
;
868 psize
= mmu_io_psize
;
869 ssize
= mmu_kernel_ssize
;
873 * Send the problem up to do_page_fault
877 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm
, mm
->pgd
, vsid
);
884 /* Check CPU locality */
885 tmp
= cpumask_of_cpu(smp_processor_id());
886 if (user_region
&& cpus_equal(mm
->cpu_vm_mask
, tmp
))
889 #ifdef CONFIG_HUGETLB_PAGE
890 /* Handle hugepage regions */
891 if (HPAGE_SHIFT
&& mmu_huge_psizes
[psize
]) {
892 DBG_LOW(" -> huge page !\n");
893 return hash_huge_page(mm
, access
, ea
, vsid
, local
, trap
);
895 #endif /* CONFIG_HUGETLB_PAGE */
897 #ifndef CONFIG_PPC_64K_PAGES
898 /* If we use 4K pages and our psize is not 4K, then we are hitting
899 * a special driver mapping, we need to align the address before
902 if (psize
!= MMU_PAGE_4K
)
903 ea
&= ~((1ul << mmu_psize_defs
[psize
].shift
) - 1);
904 #endif /* CONFIG_PPC_64K_PAGES */
906 /* Get PTE and page size from page tables */
907 ptep
= find_linux_pte(pgdir
, ea
);
908 if (ptep
== NULL
|| !pte_present(*ptep
)) {
909 DBG_LOW(" no PTE !\n");
913 #ifndef CONFIG_PPC_64K_PAGES
914 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep
));
916 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep
),
917 pte_val(*(ptep
+ PTRS_PER_PTE
)));
919 /* Pre-check access permissions (will be re-checked atomically
920 * in __hash_page_XX but this pre-check is a fast path
922 if (access
& ~pte_val(*ptep
)) {
923 DBG_LOW(" no access !\n");
927 /* Do actual hashing */
928 #ifdef CONFIG_PPC_64K_PAGES
929 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
930 if ((pte_val(*ptep
) & _PAGE_4K_PFN
) && psize
== MMU_PAGE_64K
) {
931 demote_segment_4k(mm
, ea
);
935 /* If this PTE is non-cacheable and we have restrictions on
936 * using non cacheable large pages, then we switch to 4k
938 if (mmu_ci_restrictions
&& psize
== MMU_PAGE_64K
&&
939 (pte_val(*ptep
) & _PAGE_NO_CACHE
)) {
941 demote_segment_4k(mm
, ea
);
943 } else if (ea
< VMALLOC_END
) {
945 * some driver did a non-cacheable mapping
946 * in vmalloc space, so switch vmalloc
949 printk(KERN_ALERT
"Reducing vmalloc segment "
950 "to 4kB pages because of "
951 "non-cacheable mapping\n");
952 psize
= mmu_vmalloc_psize
= MMU_PAGE_4K
;
953 #ifdef CONFIG_SPU_BASE
954 spu_flush_all_slbs(mm
);
959 if (psize
!= get_paca_psize(ea
)) {
960 get_paca()->context
= mm
->context
;
961 slb_flush_and_rebolt();
963 } else if (get_paca()->vmalloc_sllp
!=
964 mmu_psize_defs
[mmu_vmalloc_psize
].sllp
) {
965 get_paca()->vmalloc_sllp
=
966 mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
967 slb_vmalloc_update();
969 #endif /* CONFIG_PPC_64K_PAGES */
971 #ifdef CONFIG_PPC_HAS_HASH_64K
972 if (psize
== MMU_PAGE_64K
)
973 rc
= __hash_page_64K(ea
, access
, vsid
, ptep
, trap
, local
, ssize
);
975 #endif /* CONFIG_PPC_HAS_HASH_64K */
977 int spp
= subpage_protection(pgdir
, ea
);
981 rc
= __hash_page_4K(ea
, access
, vsid
, ptep
, trap
,
985 #ifndef CONFIG_PPC_64K_PAGES
986 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep
));
988 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep
),
989 pte_val(*(ptep
+ PTRS_PER_PTE
)));
991 DBG_LOW(" -> rc=%d\n", rc
);
994 EXPORT_SYMBOL_GPL(hash_page
);
996 void hash_preload(struct mm_struct
*mm
, unsigned long ea
,
997 unsigned long access
, unsigned long trap
)
1003 unsigned long flags
;
1007 BUG_ON(REGION_ID(ea
) != USER_REGION_ID
);
1009 #ifdef CONFIG_PPC_MM_SLICES
1010 /* We only prefault standard pages for now */
1011 if (unlikely(get_slice_psize(mm
, ea
) != mm
->context
.user_psize
))
1015 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
1016 " trap=%lx\n", mm
, mm
->pgd
, ea
, access
, trap
);
1018 /* Get Linux PTE if available */
1022 ptep
= find_linux_pte(pgdir
, ea
);
1026 #ifdef CONFIG_PPC_64K_PAGES
1027 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
1028 * a 64K kernel), then we don't preload, hash_page() will take
1029 * care of it once we actually try to access the page.
1030 * That way we don't have to duplicate all of the logic for segment
1031 * page size demotion here
1033 if (pte_val(*ptep
) & (_PAGE_4K_PFN
| _PAGE_NO_CACHE
))
1035 #endif /* CONFIG_PPC_64K_PAGES */
1038 ssize
= user_segment_size(ea
);
1039 vsid
= get_vsid(mm
->context
.id
, ea
, ssize
);
1041 /* Hash doesn't like irqs */
1042 local_irq_save(flags
);
1044 /* Is that local to this CPU ? */
1045 mask
= cpumask_of_cpu(smp_processor_id());
1046 if (cpus_equal(mm
->cpu_vm_mask
, mask
))
1050 #ifdef CONFIG_PPC_HAS_HASH_64K
1051 if (mm
->context
.user_psize
== MMU_PAGE_64K
)
1052 __hash_page_64K(ea
, access
, vsid
, ptep
, trap
, local
, ssize
);
1054 #endif /* CONFIG_PPC_HAS_HASH_64K */
1055 __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, local
, ssize
,
1056 subpage_protection(pgdir
, ea
));
1058 local_irq_restore(flags
);
1061 /* WARNING: This is called from hash_low_64.S, if you change this prototype,
1062 * do not forget to update the assembly call site !
1064 void flush_hash_page(unsigned long va
, real_pte_t pte
, int psize
, int ssize
,
1067 unsigned long hash
, index
, shift
, hidx
, slot
;
1069 DBG_LOW("flush_hash_page(va=%016x)\n", va
);
1070 pte_iterate_hashed_subpages(pte
, psize
, va
, index
, shift
) {
1071 hash
= hpt_hash(va
, shift
, ssize
);
1072 hidx
= __rpte_to_hidx(pte
, index
);
1073 if (hidx
& _PTEIDX_SECONDARY
)
1075 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
1076 slot
+= hidx
& _PTEIDX_GROUP_IX
;
1077 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index
, slot
, hidx
);
1078 ppc_md
.hpte_invalidate(slot
, va
, psize
, ssize
, local
);
1079 } pte_iterate_hashed_end();
1082 void flush_hash_range(unsigned long number
, int local
)
1084 if (ppc_md
.flush_hash_range
)
1085 ppc_md
.flush_hash_range(number
, local
);
1088 struct ppc64_tlb_batch
*batch
=
1089 &__get_cpu_var(ppc64_tlb_batch
);
1091 for (i
= 0; i
< number
; i
++)
1092 flush_hash_page(batch
->vaddr
[i
], batch
->pte
[i
],
1093 batch
->psize
, batch
->ssize
, local
);
1098 * low_hash_fault is called when we the low level hash code failed
1099 * to instert a PTE due to an hypervisor error
1101 void low_hash_fault(struct pt_regs
*regs
, unsigned long address
, int rc
)
1103 if (user_mode(regs
)) {
1104 #ifdef CONFIG_PPC_SUBPAGE_PROT
1106 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, address
);
1109 _exception(SIGBUS
, regs
, BUS_ADRERR
, address
);
1111 bad_page_fault(regs
, address
, SIGBUS
);
1114 #ifdef CONFIG_DEBUG_PAGEALLOC
1115 static void kernel_map_linear_page(unsigned long vaddr
, unsigned long lmi
)
1117 unsigned long hash
, hpteg
;
1118 unsigned long vsid
= get_kernel_vsid(vaddr
, mmu_kernel_ssize
);
1119 unsigned long va
= hpt_va(vaddr
, vsid
, mmu_kernel_ssize
);
1120 unsigned long mode
= _PAGE_ACCESSED
| _PAGE_DIRTY
|
1121 _PAGE_COHERENT
| PP_RWXX
| HPTE_R_N
;
1124 hash
= hpt_hash(va
, PAGE_SHIFT
, mmu_kernel_ssize
);
1125 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
1127 ret
= ppc_md
.hpte_insert(hpteg
, va
, __pa(vaddr
),
1128 mode
, HPTE_V_BOLTED
,
1129 mmu_linear_psize
, mmu_kernel_ssize
);
1131 spin_lock(&linear_map_hash_lock
);
1132 BUG_ON(linear_map_hash_slots
[lmi
] & 0x80);
1133 linear_map_hash_slots
[lmi
] = ret
| 0x80;
1134 spin_unlock(&linear_map_hash_lock
);
1137 static void kernel_unmap_linear_page(unsigned long vaddr
, unsigned long lmi
)
1139 unsigned long hash
, hidx
, slot
;
1140 unsigned long vsid
= get_kernel_vsid(vaddr
, mmu_kernel_ssize
);
1141 unsigned long va
= hpt_va(vaddr
, vsid
, mmu_kernel_ssize
);
1143 hash
= hpt_hash(va
, PAGE_SHIFT
, mmu_kernel_ssize
);
1144 spin_lock(&linear_map_hash_lock
);
1145 BUG_ON(!(linear_map_hash_slots
[lmi
] & 0x80));
1146 hidx
= linear_map_hash_slots
[lmi
] & 0x7f;
1147 linear_map_hash_slots
[lmi
] = 0;
1148 spin_unlock(&linear_map_hash_lock
);
1149 if (hidx
& _PTEIDX_SECONDARY
)
1151 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
1152 slot
+= hidx
& _PTEIDX_GROUP_IX
;
1153 ppc_md
.hpte_invalidate(slot
, va
, mmu_linear_psize
, mmu_kernel_ssize
, 0);
1156 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
1158 unsigned long flags
, vaddr
, lmi
;
1161 local_irq_save(flags
);
1162 for (i
= 0; i
< numpages
; i
++, page
++) {
1163 vaddr
= (unsigned long)page_address(page
);
1164 lmi
= __pa(vaddr
) >> PAGE_SHIFT
;
1165 if (lmi
>= linear_map_hash_count
)
1168 kernel_map_linear_page(vaddr
, lmi
);
1170 kernel_unmap_linear_page(vaddr
, lmi
);
1172 local_irq_restore(flags
);
1174 #endif /* CONFIG_DEBUG_PAGEALLOC */