2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/initrd.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/poison.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
24 #include <linux/sort.h>
25 #include <linux/percpu.h>
26 #include <linux/memblock.h>
27 #include <linux/mmzone.h>
28 #include <linux/gfp.h>
32 #include <asm/pgalloc.h>
33 #include <asm/pgtable.h>
34 #include <asm/oplib.h>
35 #include <asm/iommu.h>
37 #include <asm/uaccess.h>
38 #include <asm/mmu_context.h>
39 #include <asm/tlbflush.h>
41 #include <asm/starfire.h>
43 #include <asm/spitfire.h>
44 #include <asm/sections.h>
46 #include <asm/hypervisor.h>
48 #include <asm/mdesc.h>
49 #include <asm/cpudata.h>
54 unsigned long kern_linear_pte_xor
[4] __read_mostly
;
56 /* A bitmap, two bits for every 256MB of physical memory. These two
57 * bits determine what page size we use for kernel linear
58 * translations. They form an index into kern_linear_pte_xor[]. The
59 * value in the indexed slot is XOR'd with the TLB miss virtual
60 * address to form the resulting TTE. The mapping is:
67 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
68 * support 2GB pages, and hopefully future cpus will support the 16GB
69 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
70 * if these larger page sizes are not supported by the cpu.
72 * It would be nice to determine this from the machine description
73 * 'cpu' properties, but we need to have this table setup before the
74 * MDESC is initialized.
76 unsigned long kpte_linear_bitmap
[KPTE_BITMAP_BYTES
/ sizeof(unsigned long)];
78 #ifndef CONFIG_DEBUG_PAGEALLOC
79 /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
80 * Space is allocated for this right after the trap table in
81 * arch/sparc64/kernel/head.S
83 extern struct tsb swapper_4m_tsb
[KERNEL_TSB4M_NENTRIES
];
86 static unsigned long cpu_pgsz_mask
;
90 static struct linux_prom64_registers pavail
[MAX_BANKS
] __devinitdata
;
91 static int pavail_ents __devinitdata
;
93 static int cmp_p64(const void *a
, const void *b
)
95 const struct linux_prom64_registers
*x
= a
, *y
= b
;
97 if (x
->phys_addr
> y
->phys_addr
)
99 if (x
->phys_addr
< y
->phys_addr
)
104 static void __init
read_obp_memory(const char *property
,
105 struct linux_prom64_registers
*regs
,
108 phandle node
= prom_finddevice("/memory");
109 int prop_size
= prom_getproplen(node
, property
);
112 ents
= prop_size
/ sizeof(struct linux_prom64_registers
);
113 if (ents
> MAX_BANKS
) {
114 prom_printf("The machine has more %s property entries than "
115 "this kernel can support (%d).\n",
116 property
, MAX_BANKS
);
120 ret
= prom_getproperty(node
, property
, (char *) regs
, prop_size
);
122 prom_printf("Couldn't get %s property from /memory.\n",
127 /* Sanitize what we got from the firmware, by page aligning
130 for (i
= 0; i
< ents
; i
++) {
131 unsigned long base
, size
;
133 base
= regs
[i
].phys_addr
;
134 size
= regs
[i
].reg_size
;
137 if (base
& ~PAGE_MASK
) {
138 unsigned long new_base
= PAGE_ALIGN(base
);
140 size
-= new_base
- base
;
141 if ((long) size
< 0L)
146 /* If it is empty, simply get rid of it.
147 * This simplifies the logic of the other
148 * functions that process these arrays.
150 memmove(®s
[i
], ®s
[i
+ 1],
151 (ents
- i
- 1) * sizeof(regs
[0]));
156 regs
[i
].phys_addr
= base
;
157 regs
[i
].reg_size
= size
;
162 sort(regs
, ents
, sizeof(struct linux_prom64_registers
),
166 unsigned long sparc64_valid_addr_bitmap
[VALID_ADDR_BITMAP_BYTES
/
167 sizeof(unsigned long)];
168 EXPORT_SYMBOL(sparc64_valid_addr_bitmap
);
170 /* Kernel physical address base and size in bytes. */
171 unsigned long kern_base __read_mostly
;
172 unsigned long kern_size __read_mostly
;
174 /* Initial ramdisk setup */
175 extern unsigned long sparc_ramdisk_image64
;
176 extern unsigned int sparc_ramdisk_image
;
177 extern unsigned int sparc_ramdisk_size
;
179 struct page
*mem_map_zero __read_mostly
;
180 EXPORT_SYMBOL(mem_map_zero
);
182 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly
;
184 unsigned long sparc64_kern_pri_context __read_mostly
;
185 unsigned long sparc64_kern_pri_nuc_bits __read_mostly
;
186 unsigned long sparc64_kern_sec_context __read_mostly
;
188 int num_kernel_image_mappings
;
190 #ifdef CONFIG_DEBUG_DCFLUSH
191 atomic_t dcpage_flushes
= ATOMIC_INIT(0);
193 atomic_t dcpage_flushes_xcall
= ATOMIC_INIT(0);
197 inline void flush_dcache_page_impl(struct page
*page
)
199 BUG_ON(tlb_type
== hypervisor
);
200 #ifdef CONFIG_DEBUG_DCFLUSH
201 atomic_inc(&dcpage_flushes
);
204 #ifdef DCACHE_ALIASING_POSSIBLE
205 __flush_dcache_page(page_address(page
),
206 ((tlb_type
== spitfire
) &&
207 page_mapping(page
) != NULL
));
209 if (page_mapping(page
) != NULL
&&
210 tlb_type
== spitfire
)
211 __flush_icache_page(__pa(page_address(page
)));
215 #define PG_dcache_dirty PG_arch_1
216 #define PG_dcache_cpu_shift 32UL
217 #define PG_dcache_cpu_mask \
218 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
220 #define dcache_dirty_cpu(page) \
221 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
223 static inline void set_dcache_dirty(struct page
*page
, int this_cpu
)
225 unsigned long mask
= this_cpu
;
226 unsigned long non_cpu_bits
;
228 non_cpu_bits
= ~(PG_dcache_cpu_mask
<< PG_dcache_cpu_shift
);
229 mask
= (mask
<< PG_dcache_cpu_shift
) | (1UL << PG_dcache_dirty
);
231 __asm__
__volatile__("1:\n\t"
233 "and %%g7, %1, %%g1\n\t"
234 "or %%g1, %0, %%g1\n\t"
235 "casx [%2], %%g7, %%g1\n\t"
237 "bne,pn %%xcc, 1b\n\t"
240 : "r" (mask
), "r" (non_cpu_bits
), "r" (&page
->flags
)
244 static inline void clear_dcache_dirty_cpu(struct page
*page
, unsigned long cpu
)
246 unsigned long mask
= (1UL << PG_dcache_dirty
);
248 __asm__
__volatile__("! test_and_clear_dcache_dirty\n"
251 "srlx %%g7, %4, %%g1\n\t"
252 "and %%g1, %3, %%g1\n\t"
254 "bne,pn %%icc, 2f\n\t"
255 " andn %%g7, %1, %%g1\n\t"
256 "casx [%2], %%g7, %%g1\n\t"
258 "bne,pn %%xcc, 1b\n\t"
262 : "r" (cpu
), "r" (mask
), "r" (&page
->flags
),
263 "i" (PG_dcache_cpu_mask
),
264 "i" (PG_dcache_cpu_shift
)
268 static inline void tsb_insert(struct tsb
*ent
, unsigned long tag
, unsigned long pte
)
270 unsigned long tsb_addr
= (unsigned long) ent
;
272 if (tlb_type
== cheetah_plus
|| tlb_type
== hypervisor
)
273 tsb_addr
= __pa(tsb_addr
);
275 __tsb_insert(tsb_addr
, tag
, pte
);
278 unsigned long _PAGE_ALL_SZ_BITS __read_mostly
;
280 static void flush_dcache(unsigned long pfn
)
284 page
= pfn_to_page(pfn
);
286 unsigned long pg_flags
;
288 pg_flags
= page
->flags
;
289 if (pg_flags
& (1UL << PG_dcache_dirty
)) {
290 int cpu
= ((pg_flags
>> PG_dcache_cpu_shift
) &
292 int this_cpu
= get_cpu();
294 /* This is just to optimize away some function calls
298 flush_dcache_page_impl(page
);
300 smp_flush_dcache_page_impl(page
, cpu
);
302 clear_dcache_dirty_cpu(page
, cpu
);
309 /* mm->context.lock must be held */
310 static void __update_mmu_tsb_insert(struct mm_struct
*mm
, unsigned long tsb_index
,
311 unsigned long tsb_hash_shift
, unsigned long address
,
314 struct tsb
*tsb
= mm
->context
.tsb_block
[tsb_index
].tsb
;
317 tsb
+= ((address
>> tsb_hash_shift
) &
318 (mm
->context
.tsb_block
[tsb_index
].tsb_nentries
- 1UL));
319 tag
= (address
>> 22UL);
320 tsb_insert(tsb
, tag
, tte
);
323 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t
*ptep
)
325 unsigned long tsb_index
, tsb_hash_shift
, flags
;
326 struct mm_struct
*mm
;
329 if (tlb_type
!= hypervisor
) {
330 unsigned long pfn
= pte_pfn(pte
);
338 tsb_index
= MM_TSB_BASE
;
339 tsb_hash_shift
= PAGE_SHIFT
;
341 spin_lock_irqsave(&mm
->context
.lock
, flags
);
343 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
344 if (mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb
!= NULL
) {
345 if ((tlb_type
== hypervisor
&&
346 (pte_val(pte
) & _PAGE_SZALL_4V
) == _PAGE_SZHUGE_4V
) ||
347 (tlb_type
!= hypervisor
&&
348 (pte_val(pte
) & _PAGE_SZALL_4U
) == _PAGE_SZHUGE_4U
)) {
349 tsb_index
= MM_TSB_HUGE
;
350 tsb_hash_shift
= HPAGE_SHIFT
;
355 __update_mmu_tsb_insert(mm
, tsb_index
, tsb_hash_shift
,
356 address
, pte_val(pte
));
358 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
361 void flush_dcache_page(struct page
*page
)
363 struct address_space
*mapping
;
366 if (tlb_type
== hypervisor
)
369 /* Do not bother with the expensive D-cache flush if it
370 * is merely the zero page. The 'bigcore' testcase in GDB
371 * causes this case to run millions of times.
373 if (page
== ZERO_PAGE(0))
376 this_cpu
= get_cpu();
378 mapping
= page_mapping(page
);
379 if (mapping
&& !mapping_mapped(mapping
)) {
380 int dirty
= test_bit(PG_dcache_dirty
, &page
->flags
);
382 int dirty_cpu
= dcache_dirty_cpu(page
);
384 if (dirty_cpu
== this_cpu
)
386 smp_flush_dcache_page_impl(page
, dirty_cpu
);
388 set_dcache_dirty(page
, this_cpu
);
390 /* We could delay the flush for the !page_mapping
391 * case too. But that case is for exec env/arg
392 * pages and those are %99 certainly going to get
393 * faulted into the tlb (and thus flushed) anyways.
395 flush_dcache_page_impl(page
);
401 EXPORT_SYMBOL(flush_dcache_page
);
403 void __kprobes
flush_icache_range(unsigned long start
, unsigned long end
)
405 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
406 if (tlb_type
== spitfire
) {
409 /* This code only runs on Spitfire cpus so this is
410 * why we can assume _PAGE_PADDR_4U.
412 for (kaddr
= start
; kaddr
< end
; kaddr
+= PAGE_SIZE
) {
413 unsigned long paddr
, mask
= _PAGE_PADDR_4U
;
415 if (kaddr
>= PAGE_OFFSET
)
416 paddr
= kaddr
& mask
;
418 pgd_t
*pgdp
= pgd_offset_k(kaddr
);
419 pud_t
*pudp
= pud_offset(pgdp
, kaddr
);
420 pmd_t
*pmdp
= pmd_offset(pudp
, kaddr
);
421 pte_t
*ptep
= pte_offset_kernel(pmdp
, kaddr
);
423 paddr
= pte_val(*ptep
) & mask
;
425 __flush_icache_page(paddr
);
429 EXPORT_SYMBOL(flush_icache_range
);
431 void mmu_info(struct seq_file
*m
)
433 static const char *pgsz_strings
[] = {
434 "8K", "64K", "512K", "4MB", "32MB",
435 "256MB", "2GB", "16GB",
439 if (tlb_type
== cheetah
)
440 seq_printf(m
, "MMU Type\t: Cheetah\n");
441 else if (tlb_type
== cheetah_plus
)
442 seq_printf(m
, "MMU Type\t: Cheetah+\n");
443 else if (tlb_type
== spitfire
)
444 seq_printf(m
, "MMU Type\t: Spitfire\n");
445 else if (tlb_type
== hypervisor
)
446 seq_printf(m
, "MMU Type\t: Hypervisor (sun4v)\n");
448 seq_printf(m
, "MMU Type\t: ???\n");
450 seq_printf(m
, "MMU PGSZs\t: ");
452 for (i
= 0; i
< ARRAY_SIZE(pgsz_strings
); i
++) {
453 if (cpu_pgsz_mask
& (1UL << i
)) {
454 seq_printf(m
, "%s%s",
455 printed
? "," : "", pgsz_strings
[i
]);
461 #ifdef CONFIG_DEBUG_DCFLUSH
462 seq_printf(m
, "DCPageFlushes\t: %d\n",
463 atomic_read(&dcpage_flushes
));
465 seq_printf(m
, "DCPageFlushesXC\t: %d\n",
466 atomic_read(&dcpage_flushes_xcall
));
467 #endif /* CONFIG_SMP */
468 #endif /* CONFIG_DEBUG_DCFLUSH */
471 struct linux_prom_translation prom_trans
[512] __read_mostly
;
472 unsigned int prom_trans_ents __read_mostly
;
474 unsigned long kern_locked_tte_data
;
476 /* The obp translations are saved based on 8k pagesize, since obp can
477 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
478 * HI_OBP_ADDRESS range are handled in ktlb.S.
480 static inline int in_obp_range(unsigned long vaddr
)
482 return (vaddr
>= LOW_OBP_ADDRESS
&&
483 vaddr
< HI_OBP_ADDRESS
);
486 static int cmp_ptrans(const void *a
, const void *b
)
488 const struct linux_prom_translation
*x
= a
, *y
= b
;
490 if (x
->virt
> y
->virt
)
492 if (x
->virt
< y
->virt
)
497 /* Read OBP translations property into 'prom_trans[]'. */
498 static void __init
read_obp_translations(void)
500 int n
, node
, ents
, first
, last
, i
;
502 node
= prom_finddevice("/virtual-memory");
503 n
= prom_getproplen(node
, "translations");
504 if (unlikely(n
== 0 || n
== -1)) {
505 prom_printf("prom_mappings: Couldn't get size.\n");
508 if (unlikely(n
> sizeof(prom_trans
))) {
509 prom_printf("prom_mappings: Size %d is too big.\n", n
);
513 if ((n
= prom_getproperty(node
, "translations",
514 (char *)&prom_trans
[0],
515 sizeof(prom_trans
))) == -1) {
516 prom_printf("prom_mappings: Couldn't get property.\n");
520 n
= n
/ sizeof(struct linux_prom_translation
);
524 sort(prom_trans
, ents
, sizeof(struct linux_prom_translation
),
527 /* Now kick out all the non-OBP entries. */
528 for (i
= 0; i
< ents
; i
++) {
529 if (in_obp_range(prom_trans
[i
].virt
))
533 for (; i
< ents
; i
++) {
534 if (!in_obp_range(prom_trans
[i
].virt
))
539 for (i
= 0; i
< (last
- first
); i
++) {
540 struct linux_prom_translation
*src
= &prom_trans
[i
+ first
];
541 struct linux_prom_translation
*dest
= &prom_trans
[i
];
545 for (; i
< ents
; i
++) {
546 struct linux_prom_translation
*dest
= &prom_trans
[i
];
547 dest
->virt
= dest
->size
= dest
->data
= 0x0UL
;
550 prom_trans_ents
= last
- first
;
552 if (tlb_type
== spitfire
) {
553 /* Clear diag TTE bits. */
554 for (i
= 0; i
< prom_trans_ents
; i
++)
555 prom_trans
[i
].data
&= ~0x0003fe0000000000UL
;
558 /* Force execute bit on. */
559 for (i
= 0; i
< prom_trans_ents
; i
++)
560 prom_trans
[i
].data
|= (tlb_type
== hypervisor
?
561 _PAGE_EXEC_4V
: _PAGE_EXEC_4U
);
564 static void __init
hypervisor_tlb_lock(unsigned long vaddr
,
568 unsigned long ret
= sun4v_mmu_map_perm_addr(vaddr
, 0, pte
, mmu
);
571 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
572 "errors with %lx\n", vaddr
, 0, pte
, mmu
, ret
);
577 static unsigned long kern_large_tte(unsigned long paddr
);
579 static void __init
remap_kernel(void)
581 unsigned long phys_page
, tte_vaddr
, tte_data
;
582 int i
, tlb_ent
= sparc64_highest_locked_tlbent();
584 tte_vaddr
= (unsigned long) KERNBASE
;
585 phys_page
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
586 tte_data
= kern_large_tte(phys_page
);
588 kern_locked_tte_data
= tte_data
;
590 /* Now lock us into the TLBs via Hypervisor or OBP. */
591 if (tlb_type
== hypervisor
) {
592 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
593 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_DMMU
);
594 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_IMMU
);
595 tte_vaddr
+= 0x400000;
596 tte_data
+= 0x400000;
599 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
600 prom_dtlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
601 prom_itlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
602 tte_vaddr
+= 0x400000;
603 tte_data
+= 0x400000;
605 sparc64_highest_unlocked_tlb_ent
= tlb_ent
- i
;
607 if (tlb_type
== cheetah_plus
) {
608 sparc64_kern_pri_context
= (CTX_CHEETAH_PLUS_CTX0
|
609 CTX_CHEETAH_PLUS_NUC
);
610 sparc64_kern_pri_nuc_bits
= CTX_CHEETAH_PLUS_NUC
;
611 sparc64_kern_sec_context
= CTX_CHEETAH_PLUS_CTX0
;
616 static void __init
inherit_prom_mappings(void)
618 /* Now fixup OBP's idea about where we really are mapped. */
619 printk("Remapping the kernel... ");
624 void prom_world(int enter
)
629 __asm__
__volatile__("flushw");
632 void __flush_dcache_range(unsigned long start
, unsigned long end
)
636 if (tlb_type
== spitfire
) {
639 for (va
= start
; va
< end
; va
+= 32) {
640 spitfire_put_dcache_tag(va
& 0x3fe0, 0x0);
644 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
647 for (va
= start
; va
< end
; va
+= 32)
648 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
652 "i" (ASI_DCACHE_INVALIDATE
));
655 EXPORT_SYMBOL(__flush_dcache_range
);
657 /* get_new_mmu_context() uses "cache + 1". */
658 DEFINE_SPINLOCK(ctx_alloc_lock
);
659 unsigned long tlb_context_cache
= CTX_FIRST_VERSION
- 1;
660 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
661 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
662 DECLARE_BITMAP(mmu_context_bmap
, MAX_CTX_NR
);
664 /* Caller does TLB context flushing on local CPU if necessary.
665 * The caller also ensures that CTX_VALID(mm->context) is false.
667 * We must be careful about boundary cases so that we never
668 * let the user have CTX 0 (nucleus) or we ever use a CTX
669 * version of zero (and thus NO_CONTEXT would not be caught
670 * by version mis-match tests in mmu_context.h).
672 * Always invoked with interrupts disabled.
674 void get_new_mmu_context(struct mm_struct
*mm
)
676 unsigned long ctx
, new_ctx
;
677 unsigned long orig_pgsz_bits
;
681 spin_lock_irqsave(&ctx_alloc_lock
, flags
);
682 orig_pgsz_bits
= (mm
->context
.sparc64_ctx_val
& CTX_PGSZ_MASK
);
683 ctx
= (tlb_context_cache
+ 1) & CTX_NR_MASK
;
684 new_ctx
= find_next_zero_bit(mmu_context_bmap
, 1 << CTX_NR_BITS
, ctx
);
686 if (new_ctx
>= (1 << CTX_NR_BITS
)) {
687 new_ctx
= find_next_zero_bit(mmu_context_bmap
, ctx
, 1);
688 if (new_ctx
>= ctx
) {
690 new_ctx
= (tlb_context_cache
& CTX_VERSION_MASK
) +
693 new_ctx
= CTX_FIRST_VERSION
;
695 /* Don't call memset, for 16 entries that's just
698 mmu_context_bmap
[0] = 3;
699 mmu_context_bmap
[1] = 0;
700 mmu_context_bmap
[2] = 0;
701 mmu_context_bmap
[3] = 0;
702 for (i
= 4; i
< CTX_BMAP_SLOTS
; i
+= 4) {
703 mmu_context_bmap
[i
+ 0] = 0;
704 mmu_context_bmap
[i
+ 1] = 0;
705 mmu_context_bmap
[i
+ 2] = 0;
706 mmu_context_bmap
[i
+ 3] = 0;
712 mmu_context_bmap
[new_ctx
>>6] |= (1UL << (new_ctx
& 63));
713 new_ctx
|= (tlb_context_cache
& CTX_VERSION_MASK
);
715 tlb_context_cache
= new_ctx
;
716 mm
->context
.sparc64_ctx_val
= new_ctx
| orig_pgsz_bits
;
717 spin_unlock_irqrestore(&ctx_alloc_lock
, flags
);
719 if (unlikely(new_version
))
720 smp_new_mmu_context_version();
723 static int numa_enabled
= 1;
724 static int numa_debug
;
726 static int __init
early_numa(char *p
)
731 if (strstr(p
, "off"))
734 if (strstr(p
, "debug"))
739 early_param("numa", early_numa
);
741 #define numadbg(f, a...) \
742 do { if (numa_debug) \
743 printk(KERN_INFO f, ## a); \
746 static void __init
find_ramdisk(unsigned long phys_base
)
748 #ifdef CONFIG_BLK_DEV_INITRD
749 if (sparc_ramdisk_image
|| sparc_ramdisk_image64
) {
750 unsigned long ramdisk_image
;
752 /* Older versions of the bootloader only supported a
753 * 32-bit physical address for the ramdisk image
754 * location, stored at sparc_ramdisk_image. Newer
755 * SILO versions set sparc_ramdisk_image to zero and
756 * provide a full 64-bit physical address at
757 * sparc_ramdisk_image64.
759 ramdisk_image
= sparc_ramdisk_image
;
761 ramdisk_image
= sparc_ramdisk_image64
;
763 /* Another bootloader quirk. The bootloader normalizes
764 * the physical address to KERNBASE, so we have to
765 * factor that back out and add in the lowest valid
766 * physical page address to get the true physical address.
768 ramdisk_image
-= KERNBASE
;
769 ramdisk_image
+= phys_base
;
771 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
772 ramdisk_image
, sparc_ramdisk_size
);
774 initrd_start
= ramdisk_image
;
775 initrd_end
= ramdisk_image
+ sparc_ramdisk_size
;
777 memblock_reserve(initrd_start
, sparc_ramdisk_size
);
779 initrd_start
+= PAGE_OFFSET
;
780 initrd_end
+= PAGE_OFFSET
;
785 struct node_mem_mask
{
789 static struct node_mem_mask node_masks
[MAX_NUMNODES
];
790 static int num_node_masks
;
792 int numa_cpu_lookup_table
[NR_CPUS
];
793 cpumask_t numa_cpumask_lookup_table
[MAX_NUMNODES
];
795 #ifdef CONFIG_NEED_MULTIPLE_NODES
797 struct mdesc_mblock
{
800 u64 offset
; /* RA-to-PA */
802 static struct mdesc_mblock
*mblocks
;
803 static int num_mblocks
;
805 static unsigned long ra_to_pa(unsigned long addr
)
809 for (i
= 0; i
< num_mblocks
; i
++) {
810 struct mdesc_mblock
*m
= &mblocks
[i
];
812 if (addr
>= m
->base
&&
813 addr
< (m
->base
+ m
->size
)) {
821 static int find_node(unsigned long addr
)
825 addr
= ra_to_pa(addr
);
826 for (i
= 0; i
< num_node_masks
; i
++) {
827 struct node_mem_mask
*p
= &node_masks
[i
];
829 if ((addr
& p
->mask
) == p
->val
)
835 static u64
memblock_nid_range(u64 start
, u64 end
, int *nid
)
837 *nid
= find_node(start
);
839 while (start
< end
) {
840 int n
= find_node(start
);
854 /* This must be invoked after performing all of the necessary
855 * memblock_set_node() calls for 'nid'. We need to be able to get
856 * correct data from get_pfn_range_for_nid().
858 static void __init
allocate_node_data(int nid
)
860 struct pglist_data
*p
;
861 unsigned long start_pfn
, end_pfn
;
862 #ifdef CONFIG_NEED_MULTIPLE_NODES
865 paddr
= memblock_alloc_try_nid(sizeof(struct pglist_data
), SMP_CACHE_BYTES
, nid
);
867 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid
);
870 NODE_DATA(nid
) = __va(paddr
);
871 memset(NODE_DATA(nid
), 0, sizeof(struct pglist_data
));
873 NODE_DATA(nid
)->node_id
= nid
;
878 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
879 p
->node_start_pfn
= start_pfn
;
880 p
->node_spanned_pages
= end_pfn
- start_pfn
;
883 static void init_node_masks_nonnuma(void)
887 numadbg("Initializing tables for non-numa.\n");
889 node_masks
[0].mask
= node_masks
[0].val
= 0;
892 for (i
= 0; i
< NR_CPUS
; i
++)
893 numa_cpu_lookup_table
[i
] = 0;
895 cpumask_setall(&numa_cpumask_lookup_table
[0]);
898 #ifdef CONFIG_NEED_MULTIPLE_NODES
899 struct pglist_data
*node_data
[MAX_NUMNODES
];
901 EXPORT_SYMBOL(numa_cpu_lookup_table
);
902 EXPORT_SYMBOL(numa_cpumask_lookup_table
);
903 EXPORT_SYMBOL(node_data
);
905 struct mdesc_mlgroup
{
911 static struct mdesc_mlgroup
*mlgroups
;
912 static int num_mlgroups
;
914 static int scan_pio_for_cfg_handle(struct mdesc_handle
*md
, u64 pio
,
919 mdesc_for_each_arc(arc
, md
, pio
, MDESC_ARC_TYPE_FWD
) {
920 u64 target
= mdesc_arc_target(md
, arc
);
923 val
= mdesc_get_property(md
, target
,
925 if (val
&& *val
== cfg_handle
)
931 static int scan_arcs_for_cfg_handle(struct mdesc_handle
*md
, u64 grp
,
934 u64 arc
, candidate
, best_latency
= ~(u64
)0;
936 candidate
= MDESC_NODE_NULL
;
937 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_FWD
) {
938 u64 target
= mdesc_arc_target(md
, arc
);
939 const char *name
= mdesc_node_name(md
, target
);
942 if (strcmp(name
, "pio-latency-group"))
945 val
= mdesc_get_property(md
, target
, "latency", NULL
);
949 if (*val
< best_latency
) {
955 if (candidate
== MDESC_NODE_NULL
)
958 return scan_pio_for_cfg_handle(md
, candidate
, cfg_handle
);
961 int of_node_to_nid(struct device_node
*dp
)
963 const struct linux_prom64_registers
*regs
;
964 struct mdesc_handle
*md
;
969 /* This is the right thing to do on currently supported
970 * SUN4U NUMA platforms as well, as the PCI controller does
971 * not sit behind any particular memory controller.
976 regs
= of_get_property(dp
, "reg", NULL
);
980 cfg_handle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
986 mdesc_for_each_node_by_name(md
, grp
, "group") {
987 if (!scan_arcs_for_cfg_handle(md
, grp
, cfg_handle
)) {
999 static void __init
add_node_ranges(void)
1001 struct memblock_region
*reg
;
1003 for_each_memblock(memory
, reg
) {
1004 unsigned long size
= reg
->size
;
1005 unsigned long start
, end
;
1009 while (start
< end
) {
1010 unsigned long this_end
;
1013 this_end
= memblock_nid_range(start
, end
, &nid
);
1015 numadbg("Setting memblock NUMA node nid[%d] "
1016 "start[%lx] end[%lx]\n",
1017 nid
, start
, this_end
);
1019 memblock_set_node(start
, this_end
- start
, nid
);
1025 static int __init
grab_mlgroups(struct mdesc_handle
*md
)
1027 unsigned long paddr
;
1031 mdesc_for_each_node_by_name(md
, node
, "memory-latency-group")
1036 paddr
= memblock_alloc(count
* sizeof(struct mdesc_mlgroup
),
1041 mlgroups
= __va(paddr
);
1042 num_mlgroups
= count
;
1045 mdesc_for_each_node_by_name(md
, node
, "memory-latency-group") {
1046 struct mdesc_mlgroup
*m
= &mlgroups
[count
++];
1051 val
= mdesc_get_property(md
, node
, "latency", NULL
);
1053 val
= mdesc_get_property(md
, node
, "address-match", NULL
);
1055 val
= mdesc_get_property(md
, node
, "address-mask", NULL
);
1058 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1059 "match[%llx] mask[%llx]\n",
1060 count
- 1, m
->node
, m
->latency
, m
->match
, m
->mask
);
1066 static int __init
grab_mblocks(struct mdesc_handle
*md
)
1068 unsigned long paddr
;
1072 mdesc_for_each_node_by_name(md
, node
, "mblock")
1077 paddr
= memblock_alloc(count
* sizeof(struct mdesc_mblock
),
1082 mblocks
= __va(paddr
);
1083 num_mblocks
= count
;
1086 mdesc_for_each_node_by_name(md
, node
, "mblock") {
1087 struct mdesc_mblock
*m
= &mblocks
[count
++];
1090 val
= mdesc_get_property(md
, node
, "base", NULL
);
1092 val
= mdesc_get_property(md
, node
, "size", NULL
);
1094 val
= mdesc_get_property(md
, node
,
1095 "address-congruence-offset", NULL
);
1098 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1099 count
- 1, m
->base
, m
->size
, m
->offset
);
1105 static void __init
numa_parse_mdesc_group_cpus(struct mdesc_handle
*md
,
1106 u64 grp
, cpumask_t
*mask
)
1110 cpumask_clear(mask
);
1112 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_BACK
) {
1113 u64 target
= mdesc_arc_target(md
, arc
);
1114 const char *name
= mdesc_node_name(md
, target
);
1117 if (strcmp(name
, "cpu"))
1119 id
= mdesc_get_property(md
, target
, "id", NULL
);
1120 if (*id
< nr_cpu_ids
)
1121 cpumask_set_cpu(*id
, mask
);
1125 static struct mdesc_mlgroup
* __init
find_mlgroup(u64 node
)
1129 for (i
= 0; i
< num_mlgroups
; i
++) {
1130 struct mdesc_mlgroup
*m
= &mlgroups
[i
];
1131 if (m
->node
== node
)
1137 static int __init
numa_attach_mlgroup(struct mdesc_handle
*md
, u64 grp
,
1140 struct mdesc_mlgroup
*candidate
= NULL
;
1141 u64 arc
, best_latency
= ~(u64
)0;
1142 struct node_mem_mask
*n
;
1144 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_FWD
) {
1145 u64 target
= mdesc_arc_target(md
, arc
);
1146 struct mdesc_mlgroup
*m
= find_mlgroup(target
);
1149 if (m
->latency
< best_latency
) {
1151 best_latency
= m
->latency
;
1157 if (num_node_masks
!= index
) {
1158 printk(KERN_ERR
"Inconsistent NUMA state, "
1159 "index[%d] != num_node_masks[%d]\n",
1160 index
, num_node_masks
);
1164 n
= &node_masks
[num_node_masks
++];
1166 n
->mask
= candidate
->mask
;
1167 n
->val
= candidate
->match
;
1169 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
1170 index
, n
->mask
, n
->val
, candidate
->latency
);
1175 static int __init
numa_parse_mdesc_group(struct mdesc_handle
*md
, u64 grp
,
1181 numa_parse_mdesc_group_cpus(md
, grp
, &mask
);
1183 for_each_cpu(cpu
, &mask
)
1184 numa_cpu_lookup_table
[cpu
] = index
;
1185 cpumask_copy(&numa_cpumask_lookup_table
[index
], &mask
);
1188 printk(KERN_INFO
"NUMA GROUP[%d]: cpus [ ", index
);
1189 for_each_cpu(cpu
, &mask
)
1194 return numa_attach_mlgroup(md
, grp
, index
);
1197 static int __init
numa_parse_mdesc(void)
1199 struct mdesc_handle
*md
= mdesc_grab();
1203 node
= mdesc_node_by_name(md
, MDESC_NODE_NULL
, "latency-groups");
1204 if (node
== MDESC_NODE_NULL
) {
1209 err
= grab_mblocks(md
);
1213 err
= grab_mlgroups(md
);
1218 mdesc_for_each_node_by_name(md
, node
, "group") {
1219 err
= numa_parse_mdesc_group(md
, node
, count
);
1227 for (i
= 0; i
< num_node_masks
; i
++) {
1228 allocate_node_data(i
);
1238 static int __init
numa_parse_jbus(void)
1240 unsigned long cpu
, index
;
1242 /* NUMA node id is encoded in bits 36 and higher, and there is
1243 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1246 for_each_present_cpu(cpu
) {
1247 numa_cpu_lookup_table
[cpu
] = index
;
1248 cpumask_copy(&numa_cpumask_lookup_table
[index
], cpumask_of(cpu
));
1249 node_masks
[index
].mask
= ~((1UL << 36UL) - 1UL);
1250 node_masks
[index
].val
= cpu
<< 36UL;
1254 num_node_masks
= index
;
1258 for (index
= 0; index
< num_node_masks
; index
++) {
1259 allocate_node_data(index
);
1260 node_set_online(index
);
1266 static int __init
numa_parse_sun4u(void)
1268 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1271 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
1272 if ((ver
>> 32UL) == __JALAPENO_ID
||
1273 (ver
>> 32UL) == __SERRANO_ID
)
1274 return numa_parse_jbus();
1279 static int __init
bootmem_init_numa(void)
1283 numadbg("bootmem_init_numa()\n");
1286 if (tlb_type
== hypervisor
)
1287 err
= numa_parse_mdesc();
1289 err
= numa_parse_sun4u();
1296 static int bootmem_init_numa(void)
1303 static void __init
bootmem_init_nonnuma(void)
1305 unsigned long top_of_ram
= memblock_end_of_DRAM();
1306 unsigned long total_ram
= memblock_phys_mem_size();
1308 numadbg("bootmem_init_nonnuma()\n");
1310 printk(KERN_INFO
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1311 top_of_ram
, total_ram
);
1312 printk(KERN_INFO
"Memory hole size: %ldMB\n",
1313 (top_of_ram
- total_ram
) >> 20);
1315 init_node_masks_nonnuma();
1316 memblock_set_node(0, (phys_addr_t
)ULLONG_MAX
, 0);
1317 allocate_node_data(0);
1321 static unsigned long __init
bootmem_init(unsigned long phys_base
)
1323 unsigned long end_pfn
;
1325 end_pfn
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
1326 max_pfn
= max_low_pfn
= end_pfn
;
1327 min_low_pfn
= (phys_base
>> PAGE_SHIFT
);
1329 if (bootmem_init_numa() < 0)
1330 bootmem_init_nonnuma();
1332 /* Dump memblock with node info. */
1333 memblock_dump_all();
1335 /* XXX cpu notifier XXX */
1337 sparse_memory_present_with_active_regions(MAX_NUMNODES
);
1343 static struct linux_prom64_registers pall
[MAX_BANKS
] __initdata
;
1344 static int pall_ents __initdata
;
1346 #ifdef CONFIG_DEBUG_PAGEALLOC
1347 static unsigned long __ref
kernel_map_range(unsigned long pstart
,
1348 unsigned long pend
, pgprot_t prot
)
1350 unsigned long vstart
= PAGE_OFFSET
+ pstart
;
1351 unsigned long vend
= PAGE_OFFSET
+ pend
;
1352 unsigned long alloc_bytes
= 0UL;
1354 if ((vstart
& ~PAGE_MASK
) || (vend
& ~PAGE_MASK
)) {
1355 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1360 while (vstart
< vend
) {
1361 unsigned long this_end
, paddr
= __pa(vstart
);
1362 pgd_t
*pgd
= pgd_offset_k(vstart
);
1367 pud
= pud_offset(pgd
, vstart
);
1368 if (pud_none(*pud
)) {
1371 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1372 alloc_bytes
+= PAGE_SIZE
;
1373 pud_populate(&init_mm
, pud
, new);
1376 pmd
= pmd_offset(pud
, vstart
);
1377 if (!pmd_present(*pmd
)) {
1380 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1381 alloc_bytes
+= PAGE_SIZE
;
1382 pmd_populate_kernel(&init_mm
, pmd
, new);
1385 pte
= pte_offset_kernel(pmd
, vstart
);
1386 this_end
= (vstart
+ PMD_SIZE
) & PMD_MASK
;
1387 if (this_end
> vend
)
1390 while (vstart
< this_end
) {
1391 pte_val(*pte
) = (paddr
| pgprot_val(prot
));
1393 vstart
+= PAGE_SIZE
;
1402 extern unsigned int kvmap_linear_patch
[1];
1403 #endif /* CONFIG_DEBUG_PAGEALLOC */
1405 static void __init
kpte_set_val(unsigned long index
, unsigned long val
)
1407 unsigned long *ptr
= kpte_linear_bitmap
;
1409 val
<<= ((index
% (BITS_PER_LONG
/ 2)) * 2);
1410 ptr
+= (index
/ (BITS_PER_LONG
/ 2));
1415 static const unsigned long kpte_shift_min
= 28; /* 256MB */
1416 static const unsigned long kpte_shift_max
= 34; /* 16GB */
1417 static const unsigned long kpte_shift_incr
= 3;
1419 static unsigned long kpte_mark_using_shift(unsigned long start
, unsigned long end
,
1420 unsigned long shift
)
1422 unsigned long size
= (1UL << shift
);
1423 unsigned long mask
= (size
- 1UL);
1424 unsigned long remains
= end
- start
;
1427 if (remains
< size
|| (start
& mask
))
1432 * shift 28 --> kern_linear_pte_xor index 1
1433 * shift 31 --> kern_linear_pte_xor index 2
1434 * shift 34 --> kern_linear_pte_xor index 3
1436 val
= ((shift
- kpte_shift_min
) / kpte_shift_incr
) + 1;
1439 if (shift
!= kpte_shift_max
)
1443 unsigned long index
= start
>> kpte_shift_min
;
1445 kpte_set_val(index
, val
);
1447 start
+= 1UL << kpte_shift_min
;
1448 remains
-= 1UL << kpte_shift_min
;
1454 static void __init
mark_kpte_bitmap(unsigned long start
, unsigned long end
)
1456 unsigned long smallest_size
, smallest_mask
;
1459 smallest_size
= (1UL << kpte_shift_min
);
1460 smallest_mask
= (smallest_size
- 1UL);
1462 while (start
< end
) {
1463 unsigned long orig_start
= start
;
1465 for (s
= kpte_shift_max
; s
>= kpte_shift_min
; s
-= kpte_shift_incr
) {
1466 start
= kpte_mark_using_shift(start
, end
, s
);
1468 if (start
!= orig_start
)
1472 if (start
== orig_start
)
1473 start
= (start
+ smallest_size
) & ~smallest_mask
;
1477 static void __init
init_kpte_bitmap(void)
1481 for (i
= 0; i
< pall_ents
; i
++) {
1482 unsigned long phys_start
, phys_end
;
1484 phys_start
= pall
[i
].phys_addr
;
1485 phys_end
= phys_start
+ pall
[i
].reg_size
;
1487 mark_kpte_bitmap(phys_start
, phys_end
);
1491 static void __init
kernel_physical_mapping_init(void)
1493 #ifdef CONFIG_DEBUG_PAGEALLOC
1494 unsigned long i
, mem_alloced
= 0UL;
1496 for (i
= 0; i
< pall_ents
; i
++) {
1497 unsigned long phys_start
, phys_end
;
1499 phys_start
= pall
[i
].phys_addr
;
1500 phys_end
= phys_start
+ pall
[i
].reg_size
;
1502 mem_alloced
+= kernel_map_range(phys_start
, phys_end
,
1506 printk("Allocated %ld bytes for kernel page tables.\n",
1509 kvmap_linear_patch
[0] = 0x01000000; /* nop */
1510 flushi(&kvmap_linear_patch
[0]);
1516 #ifdef CONFIG_DEBUG_PAGEALLOC
1517 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
1519 unsigned long phys_start
= page_to_pfn(page
) << PAGE_SHIFT
;
1520 unsigned long phys_end
= phys_start
+ (numpages
* PAGE_SIZE
);
1522 kernel_map_range(phys_start
, phys_end
,
1523 (enable
? PAGE_KERNEL
: __pgprot(0)));
1525 flush_tsb_kernel_range(PAGE_OFFSET
+ phys_start
,
1526 PAGE_OFFSET
+ phys_end
);
1528 /* we should perform an IPI and flush all tlbs,
1529 * but that can deadlock->flush only current cpu.
1531 __flush_tlb_kernel_range(PAGE_OFFSET
+ phys_start
,
1532 PAGE_OFFSET
+ phys_end
);
1536 unsigned long __init
find_ecache_flush_span(unsigned long size
)
1540 for (i
= 0; i
< pavail_ents
; i
++) {
1541 if (pavail
[i
].reg_size
>= size
)
1542 return pavail
[i
].phys_addr
;
1548 static void __init
tsb_phys_patch(void)
1550 struct tsb_ldquad_phys_patch_entry
*pquad
;
1551 struct tsb_phys_patch_entry
*p
;
1553 pquad
= &__tsb_ldquad_phys_patch
;
1554 while (pquad
< &__tsb_ldquad_phys_patch_end
) {
1555 unsigned long addr
= pquad
->addr
;
1557 if (tlb_type
== hypervisor
)
1558 *(unsigned int *) addr
= pquad
->sun4v_insn
;
1560 *(unsigned int *) addr
= pquad
->sun4u_insn
;
1562 __asm__
__volatile__("flush %0"
1569 p
= &__tsb_phys_patch
;
1570 while (p
< &__tsb_phys_patch_end
) {
1571 unsigned long addr
= p
->addr
;
1573 *(unsigned int *) addr
= p
->insn
;
1575 __asm__
__volatile__("flush %0"
1583 /* Don't mark as init, we give this to the Hypervisor. */
1584 #ifndef CONFIG_DEBUG_PAGEALLOC
1585 #define NUM_KTSB_DESCR 2
1587 #define NUM_KTSB_DESCR 1
1589 static struct hv_tsb_descr ktsb_descr
[NUM_KTSB_DESCR
];
1590 extern struct tsb swapper_tsb
[KERNEL_TSB_NENTRIES
];
1592 static void patch_one_ktsb_phys(unsigned int *start
, unsigned int *end
, unsigned long pa
)
1594 pa
>>= KTSB_PHYS_SHIFT
;
1596 while (start
< end
) {
1597 unsigned int *ia
= (unsigned int *)(unsigned long)*start
;
1599 ia
[0] = (ia
[0] & ~0x3fffff) | (pa
>> 10);
1600 __asm__
__volatile__("flush %0" : : "r" (ia
));
1602 ia
[1] = (ia
[1] & ~0x3ff) | (pa
& 0x3ff);
1603 __asm__
__volatile__("flush %0" : : "r" (ia
+ 1));
1609 static void ktsb_phys_patch(void)
1611 extern unsigned int __swapper_tsb_phys_patch
;
1612 extern unsigned int __swapper_tsb_phys_patch_end
;
1613 unsigned long ktsb_pa
;
1615 ktsb_pa
= kern_base
+ ((unsigned long)&swapper_tsb
[0] - KERNBASE
);
1616 patch_one_ktsb_phys(&__swapper_tsb_phys_patch
,
1617 &__swapper_tsb_phys_patch_end
, ktsb_pa
);
1618 #ifndef CONFIG_DEBUG_PAGEALLOC
1620 extern unsigned int __swapper_4m_tsb_phys_patch
;
1621 extern unsigned int __swapper_4m_tsb_phys_patch_end
;
1622 ktsb_pa
= (kern_base
+
1623 ((unsigned long)&swapper_4m_tsb
[0] - KERNBASE
));
1624 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch
,
1625 &__swapper_4m_tsb_phys_patch_end
, ktsb_pa
);
1630 static void __init
sun4v_ktsb_init(void)
1632 unsigned long ktsb_pa
;
1634 /* First KTSB for PAGE_SIZE mappings. */
1635 ktsb_pa
= kern_base
+ ((unsigned long)&swapper_tsb
[0] - KERNBASE
);
1637 switch (PAGE_SIZE
) {
1640 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_8K
;
1641 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_8K
;
1645 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_64K
;
1646 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_64K
;
1650 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_512K
;
1651 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_512K
;
1654 case 4 * 1024 * 1024:
1655 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1656 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_4MB
;
1660 ktsb_descr
[0].assoc
= 1;
1661 ktsb_descr
[0].num_ttes
= KERNEL_TSB_NENTRIES
;
1662 ktsb_descr
[0].ctx_idx
= 0;
1663 ktsb_descr
[0].tsb_base
= ktsb_pa
;
1664 ktsb_descr
[0].resv
= 0;
1666 #ifndef CONFIG_DEBUG_PAGEALLOC
1667 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
1668 ktsb_pa
= (kern_base
+
1669 ((unsigned long)&swapper_4m_tsb
[0] - KERNBASE
));
1671 ktsb_descr
[1].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1672 ktsb_descr
[1].pgsz_mask
= ((HV_PGSZ_MASK_4MB
|
1673 HV_PGSZ_MASK_256MB
|
1675 HV_PGSZ_MASK_16GB
) &
1677 ktsb_descr
[1].assoc
= 1;
1678 ktsb_descr
[1].num_ttes
= KERNEL_TSB4M_NENTRIES
;
1679 ktsb_descr
[1].ctx_idx
= 0;
1680 ktsb_descr
[1].tsb_base
= ktsb_pa
;
1681 ktsb_descr
[1].resv
= 0;
1685 void __cpuinit
sun4v_ktsb_register(void)
1687 unsigned long pa
, ret
;
1689 pa
= kern_base
+ ((unsigned long)&ktsb_descr
[0] - KERNBASE
);
1691 ret
= sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR
, pa
);
1693 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1694 "errors with %lx\n", pa
, ret
);
1699 static void __init
sun4u_linear_pte_xor_finalize(void)
1701 #ifndef CONFIG_DEBUG_PAGEALLOC
1702 /* This is where we would add Panther support for
1703 * 32MB and 256MB pages.
1708 static void __init
sun4v_linear_pte_xor_finalize(void)
1710 #ifndef CONFIG_DEBUG_PAGEALLOC
1711 if (cpu_pgsz_mask
& HV_PGSZ_MASK_256MB
) {
1712 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZ256MB_4V
) ^
1713 0xfffff80000000000UL
;
1714 kern_linear_pte_xor
[1] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
1715 _PAGE_P_4V
| _PAGE_W_4V
);
1717 kern_linear_pte_xor
[1] = kern_linear_pte_xor
[0];
1720 if (cpu_pgsz_mask
& HV_PGSZ_MASK_2GB
) {
1721 kern_linear_pte_xor
[2] = (_PAGE_VALID
| _PAGE_SZ2GB_4V
) ^
1722 0xfffff80000000000UL
;
1723 kern_linear_pte_xor
[2] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
1724 _PAGE_P_4V
| _PAGE_W_4V
);
1726 kern_linear_pte_xor
[2] = kern_linear_pte_xor
[1];
1729 if (cpu_pgsz_mask
& HV_PGSZ_MASK_16GB
) {
1730 kern_linear_pte_xor
[3] = (_PAGE_VALID
| _PAGE_SZ16GB_4V
) ^
1731 0xfffff80000000000UL
;
1732 kern_linear_pte_xor
[3] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
1733 _PAGE_P_4V
| _PAGE_W_4V
);
1735 kern_linear_pte_xor
[3] = kern_linear_pte_xor
[2];
1740 /* paging_init() sets up the page tables */
1742 static unsigned long last_valid_pfn
;
1743 pgd_t swapper_pg_dir
[2048];
1745 static void sun4u_pgprot_init(void);
1746 static void sun4v_pgprot_init(void);
1748 void __init
paging_init(void)
1750 unsigned long end_pfn
, shift
, phys_base
;
1751 unsigned long real_end
, i
;
1754 /* These build time checkes make sure that the dcache_dirty_cpu()
1755 * page->flags usage will work.
1757 * When a page gets marked as dcache-dirty, we store the
1758 * cpu number starting at bit 32 in the page->flags. Also,
1759 * functions like clear_dcache_dirty_cpu use the cpu mask
1760 * in 13-bit signed-immediate instruction fields.
1764 * Page flags must not reach into upper 32 bits that are used
1765 * for the cpu number
1767 BUILD_BUG_ON(NR_PAGEFLAGS
> 32);
1770 * The bit fields placed in the high range must not reach below
1771 * the 32 bit boundary. Otherwise we cannot place the cpu field
1772 * at the 32 bit boundary.
1774 BUILD_BUG_ON(SECTIONS_WIDTH
+ NODES_WIDTH
+ ZONES_WIDTH
+
1775 ilog2(roundup_pow_of_two(NR_CPUS
)) > 32);
1777 BUILD_BUG_ON(NR_CPUS
> 4096);
1779 kern_base
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
1780 kern_size
= (unsigned long)&_end
- (unsigned long)KERNBASE
;
1782 /* Invalidate both kernel TSBs. */
1783 memset(swapper_tsb
, 0x40, sizeof(swapper_tsb
));
1784 #ifndef CONFIG_DEBUG_PAGEALLOC
1785 memset(swapper_4m_tsb
, 0x40, sizeof(swapper_4m_tsb
));
1788 if (tlb_type
== hypervisor
)
1789 sun4v_pgprot_init();
1791 sun4u_pgprot_init();
1793 if (tlb_type
== cheetah_plus
||
1794 tlb_type
== hypervisor
) {
1799 if (tlb_type
== hypervisor
)
1800 sun4v_patch_tlb_handlers();
1802 /* Find available physical memory...
1804 * Read it twice in order to work around a bug in openfirmware.
1805 * The call to grab this table itself can cause openfirmware to
1806 * allocate memory, which in turn can take away some space from
1807 * the list of available memory. Reading it twice makes sure
1808 * we really do get the final value.
1810 read_obp_translations();
1811 read_obp_memory("reg", &pall
[0], &pall_ents
);
1812 read_obp_memory("available", &pavail
[0], &pavail_ents
);
1813 read_obp_memory("available", &pavail
[0], &pavail_ents
);
1815 phys_base
= 0xffffffffffffffffUL
;
1816 for (i
= 0; i
< pavail_ents
; i
++) {
1817 phys_base
= min(phys_base
, pavail
[i
].phys_addr
);
1818 memblock_add(pavail
[i
].phys_addr
, pavail
[i
].reg_size
);
1821 memblock_reserve(kern_base
, kern_size
);
1823 find_ramdisk(phys_base
);
1825 memblock_enforce_memory_limit(cmdline_memory_size
);
1827 memblock_allow_resize();
1828 memblock_dump_all();
1830 set_bit(0, mmu_context_bmap
);
1832 shift
= kern_base
+ PAGE_OFFSET
- ((unsigned long)KERNBASE
);
1834 real_end
= (unsigned long)_end
;
1835 num_kernel_image_mappings
= DIV_ROUND_UP(real_end
- KERNBASE
, 1 << 22);
1836 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1837 num_kernel_image_mappings
);
1839 /* Set kernel pgd to upper alias so physical page computations
1842 init_mm
.pgd
+= ((shift
) / (sizeof(pgd_t
)));
1844 memset(swapper_low_pmd_dir
, 0, sizeof(swapper_low_pmd_dir
));
1846 /* Now can init the kernel/bad page tables. */
1847 pud_set(pud_offset(&swapper_pg_dir
[0], 0),
1848 swapper_low_pmd_dir
+ (shift
/ sizeof(pgd_t
)));
1850 inherit_prom_mappings();
1854 /* Ok, we can use our TLB miss and window trap handlers safely. */
1859 prom_build_devicetree();
1860 of_populate_present_mask();
1862 of_fill_in_cpu_data();
1865 if (tlb_type
== hypervisor
) {
1867 mdesc_populate_present_mask(cpu_all_mask
);
1869 mdesc_fill_in_cpu_data(cpu_all_mask
);
1871 mdesc_get_page_sizes(cpu_all_mask
, &cpu_pgsz_mask
);
1873 sun4v_linear_pte_xor_finalize();
1876 sun4v_ktsb_register();
1878 unsigned long impl
, ver
;
1880 cpu_pgsz_mask
= (HV_PGSZ_MASK_8K
| HV_PGSZ_MASK_64K
|
1881 HV_PGSZ_MASK_512K
| HV_PGSZ_MASK_4MB
);
1883 __asm__
__volatile__("rdpr %%ver, %0" : "=r" (ver
));
1884 impl
= ((ver
>> 32) & 0xffff);
1885 if (impl
== PANTHER_IMPL
)
1886 cpu_pgsz_mask
|= (HV_PGSZ_MASK_32MB
|
1887 HV_PGSZ_MASK_256MB
);
1889 sun4u_linear_pte_xor_finalize();
1892 /* Flush the TLBs and the 4M TSB so that the updated linear
1893 * pte XOR settings are realized for all mappings.
1896 #ifndef CONFIG_DEBUG_PAGEALLOC
1897 memset(swapper_4m_tsb
, 0x40, sizeof(swapper_4m_tsb
));
1901 /* Setup bootmem... */
1902 last_valid_pfn
= end_pfn
= bootmem_init(phys_base
);
1904 /* Once the OF device tree and MDESC have been setup, we know
1905 * the list of possible cpus. Therefore we can allocate the
1908 for_each_possible_cpu(i
) {
1909 node
= cpu_to_node(i
);
1911 softirq_stack
[i
] = __alloc_bootmem_node(NODE_DATA(node
),
1914 hardirq_stack
[i
] = __alloc_bootmem_node(NODE_DATA(node
),
1919 kernel_physical_mapping_init();
1922 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
1924 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
1926 max_zone_pfns
[ZONE_NORMAL
] = end_pfn
;
1928 free_area_init_nodes(max_zone_pfns
);
1931 printk("Booting Linux...\n");
1934 int __devinit
page_in_phys_avail(unsigned long paddr
)
1940 for (i
= 0; i
< pavail_ents
; i
++) {
1941 unsigned long start
, end
;
1943 start
= pavail
[i
].phys_addr
;
1944 end
= start
+ pavail
[i
].reg_size
;
1946 if (paddr
>= start
&& paddr
< end
)
1949 if (paddr
>= kern_base
&& paddr
< (kern_base
+ kern_size
))
1951 #ifdef CONFIG_BLK_DEV_INITRD
1952 if (paddr
>= __pa(initrd_start
) &&
1953 paddr
< __pa(PAGE_ALIGN(initrd_end
)))
1960 static struct linux_prom64_registers pavail_rescan
[MAX_BANKS
] __initdata
;
1961 static int pavail_rescan_ents __initdata
;
1963 /* Certain OBP calls, such as fetching "available" properties, can
1964 * claim physical memory. So, along with initializing the valid
1965 * address bitmap, what we do here is refetch the physical available
1966 * memory list again, and make sure it provides at least as much
1967 * memory as 'pavail' does.
1969 static void __init
setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap
)
1973 read_obp_memory("available", &pavail_rescan
[0], &pavail_rescan_ents
);
1975 for (i
= 0; i
< pavail_ents
; i
++) {
1976 unsigned long old_start
, old_end
;
1978 old_start
= pavail
[i
].phys_addr
;
1979 old_end
= old_start
+ pavail
[i
].reg_size
;
1980 while (old_start
< old_end
) {
1983 for (n
= 0; n
< pavail_rescan_ents
; n
++) {
1984 unsigned long new_start
, new_end
;
1986 new_start
= pavail_rescan
[n
].phys_addr
;
1987 new_end
= new_start
+
1988 pavail_rescan
[n
].reg_size
;
1990 if (new_start
<= old_start
&&
1991 new_end
>= (old_start
+ PAGE_SIZE
)) {
1992 set_bit(old_start
>> 22, bitmap
);
1997 prom_printf("mem_init: Lost memory in pavail\n");
1998 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
1999 pavail
[i
].phys_addr
,
2000 pavail
[i
].reg_size
);
2001 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
2002 pavail_rescan
[i
].phys_addr
,
2003 pavail_rescan
[i
].reg_size
);
2004 prom_printf("mem_init: Cannot continue, aborting.\n");
2008 old_start
+= PAGE_SIZE
;
2013 static void __init
patch_tlb_miss_handler_bitmap(void)
2015 extern unsigned int valid_addr_bitmap_insn
[];
2016 extern unsigned int valid_addr_bitmap_patch
[];
2018 valid_addr_bitmap_insn
[1] = valid_addr_bitmap_patch
[1];
2020 valid_addr_bitmap_insn
[0] = valid_addr_bitmap_patch
[0];
2021 flushi(&valid_addr_bitmap_insn
[0]);
2024 void __init
mem_init(void)
2026 unsigned long codepages
, datapages
, initpages
;
2027 unsigned long addr
, last
;
2029 addr
= PAGE_OFFSET
+ kern_base
;
2030 last
= PAGE_ALIGN(kern_size
) + addr
;
2031 while (addr
< last
) {
2032 set_bit(__pa(addr
) >> 22, sparc64_valid_addr_bitmap
);
2036 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap
);
2037 patch_tlb_miss_handler_bitmap();
2039 high_memory
= __va(last_valid_pfn
<< PAGE_SHIFT
);
2041 #ifdef CONFIG_NEED_MULTIPLE_NODES
2044 for_each_online_node(i
) {
2045 if (NODE_DATA(i
)->node_spanned_pages
!= 0) {
2047 free_all_bootmem_node(NODE_DATA(i
));
2050 totalram_pages
+= free_low_memory_core_early(MAX_NUMNODES
);
2053 totalram_pages
= free_all_bootmem();
2056 /* We subtract one to account for the mem_map_zero page
2059 totalram_pages
-= 1;
2060 num_physpages
= totalram_pages
;
2063 * Set up the zero page, mark it reserved, so that page count
2064 * is not manipulated when freeing the page from user ptes.
2066 mem_map_zero
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
, 0);
2067 if (mem_map_zero
== NULL
) {
2068 prom_printf("paging_init: Cannot alloc zero page.\n");
2071 SetPageReserved(mem_map_zero
);
2073 codepages
= (((unsigned long) _etext
) - ((unsigned long) _start
));
2074 codepages
= PAGE_ALIGN(codepages
) >> PAGE_SHIFT
;
2075 datapages
= (((unsigned long) _edata
) - ((unsigned long) _etext
));
2076 datapages
= PAGE_ALIGN(datapages
) >> PAGE_SHIFT
;
2077 initpages
= (((unsigned long) __init_end
) - ((unsigned long) __init_begin
));
2078 initpages
= PAGE_ALIGN(initpages
) >> PAGE_SHIFT
;
2080 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
2081 nr_free_pages() << (PAGE_SHIFT
-10),
2082 codepages
<< (PAGE_SHIFT
-10),
2083 datapages
<< (PAGE_SHIFT
-10),
2084 initpages
<< (PAGE_SHIFT
-10),
2085 PAGE_OFFSET
, (last_valid_pfn
<< PAGE_SHIFT
));
2087 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
2088 cheetah_ecache_flush_init();
2091 void free_initmem(void)
2093 unsigned long addr
, initend
;
2096 /* If the physical memory maps were trimmed by kernel command
2097 * line options, don't even try freeing this initmem stuff up.
2098 * The kernel image could have been in the trimmed out region
2099 * and if so the freeing below will free invalid page structs.
2101 if (cmdline_memory_size
)
2105 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2107 addr
= PAGE_ALIGN((unsigned long)(__init_begin
));
2108 initend
= (unsigned long)(__init_end
) & PAGE_MASK
;
2109 for (; addr
< initend
; addr
+= PAGE_SIZE
) {
2114 ((unsigned long) __va(kern_base
)) -
2115 ((unsigned long) KERNBASE
));
2116 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
2119 p
= virt_to_page(page
);
2121 ClearPageReserved(p
);
2130 #ifdef CONFIG_BLK_DEV_INITRD
2131 void free_initrd_mem(unsigned long start
, unsigned long end
)
2134 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
2135 for (; start
< end
; start
+= PAGE_SIZE
) {
2136 struct page
*p
= virt_to_page(start
);
2138 ClearPageReserved(p
);
2147 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2148 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2149 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2150 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2151 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2152 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2154 pgprot_t PAGE_KERNEL __read_mostly
;
2155 EXPORT_SYMBOL(PAGE_KERNEL
);
2157 pgprot_t PAGE_KERNEL_LOCKED __read_mostly
;
2158 pgprot_t PAGE_COPY __read_mostly
;
2160 pgprot_t PAGE_SHARED __read_mostly
;
2161 EXPORT_SYMBOL(PAGE_SHARED
);
2163 unsigned long pg_iobits __read_mostly
;
2165 unsigned long _PAGE_IE __read_mostly
;
2166 EXPORT_SYMBOL(_PAGE_IE
);
2168 unsigned long _PAGE_E __read_mostly
;
2169 EXPORT_SYMBOL(_PAGE_E
);
2171 unsigned long _PAGE_CACHE __read_mostly
;
2172 EXPORT_SYMBOL(_PAGE_CACHE
);
2174 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2175 unsigned long vmemmap_table
[VMEMMAP_SIZE
];
2177 static long __meminitdata addr_start
, addr_end
;
2178 static int __meminitdata node_start
;
2180 int __meminit
vmemmap_populate(struct page
*start
, unsigned long nr
, int node
)
2182 unsigned long vstart
= (unsigned long) start
;
2183 unsigned long vend
= (unsigned long) (start
+ nr
);
2184 unsigned long phys_start
= (vstart
- VMEMMAP_BASE
);
2185 unsigned long phys_end
= (vend
- VMEMMAP_BASE
);
2186 unsigned long addr
= phys_start
& VMEMMAP_CHUNK_MASK
;
2187 unsigned long end
= VMEMMAP_ALIGN(phys_end
);
2188 unsigned long pte_base
;
2190 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
2191 _PAGE_CP_4U
| _PAGE_CV_4U
|
2192 _PAGE_P_4U
| _PAGE_W_4U
);
2193 if (tlb_type
== hypervisor
)
2194 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
2195 _PAGE_CP_4V
| _PAGE_CV_4V
|
2196 _PAGE_P_4V
| _PAGE_W_4V
);
2198 for (; addr
< end
; addr
+= VMEMMAP_CHUNK
) {
2199 unsigned long *vmem_pp
=
2200 vmemmap_table
+ (addr
>> VMEMMAP_CHUNK_SHIFT
);
2203 if (!(*vmem_pp
& _PAGE_VALID
)) {
2204 block
= vmemmap_alloc_block(1UL << 22, node
);
2208 *vmem_pp
= pte_base
| __pa(block
);
2210 /* check to see if we have contiguous blocks */
2211 if (addr_end
!= addr
|| node_start
!= node
) {
2213 printk(KERN_DEBUG
" [%lx-%lx] on node %d\n",
2214 addr_start
, addr_end
-1, node_start
);
2218 addr_end
= addr
+ VMEMMAP_CHUNK
;
2224 void __meminit
vmemmap_populate_print_last(void)
2227 printk(KERN_DEBUG
" [%lx-%lx] on node %d\n",
2228 addr_start
, addr_end
-1, node_start
);
2234 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2236 static void prot_init_common(unsigned long page_none
,
2237 unsigned long page_shared
,
2238 unsigned long page_copy
,
2239 unsigned long page_readonly
,
2240 unsigned long page_exec_bit
)
2242 PAGE_COPY
= __pgprot(page_copy
);
2243 PAGE_SHARED
= __pgprot(page_shared
);
2245 protection_map
[0x0] = __pgprot(page_none
);
2246 protection_map
[0x1] = __pgprot(page_readonly
& ~page_exec_bit
);
2247 protection_map
[0x2] = __pgprot(page_copy
& ~page_exec_bit
);
2248 protection_map
[0x3] = __pgprot(page_copy
& ~page_exec_bit
);
2249 protection_map
[0x4] = __pgprot(page_readonly
);
2250 protection_map
[0x5] = __pgprot(page_readonly
);
2251 protection_map
[0x6] = __pgprot(page_copy
);
2252 protection_map
[0x7] = __pgprot(page_copy
);
2253 protection_map
[0x8] = __pgprot(page_none
);
2254 protection_map
[0x9] = __pgprot(page_readonly
& ~page_exec_bit
);
2255 protection_map
[0xa] = __pgprot(page_shared
& ~page_exec_bit
);
2256 protection_map
[0xb] = __pgprot(page_shared
& ~page_exec_bit
);
2257 protection_map
[0xc] = __pgprot(page_readonly
);
2258 protection_map
[0xd] = __pgprot(page_readonly
);
2259 protection_map
[0xe] = __pgprot(page_shared
);
2260 protection_map
[0xf] = __pgprot(page_shared
);
2263 static void __init
sun4u_pgprot_init(void)
2265 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
2266 unsigned long page_exec_bit
;
2269 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
2270 _PAGE_CACHE_4U
| _PAGE_P_4U
|
2271 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
2273 PAGE_KERNEL_LOCKED
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
2274 _PAGE_CACHE_4U
| _PAGE_P_4U
|
2275 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
2276 _PAGE_EXEC_4U
| _PAGE_L_4U
);
2278 _PAGE_IE
= _PAGE_IE_4U
;
2279 _PAGE_E
= _PAGE_E_4U
;
2280 _PAGE_CACHE
= _PAGE_CACHE_4U
;
2282 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| __DIRTY_BITS_4U
|
2283 __ACCESS_BITS_4U
| _PAGE_E_4U
);
2285 #ifdef CONFIG_DEBUG_PAGEALLOC
2286 kern_linear_pte_xor
[0] = _PAGE_VALID
^ 0xfffff80000000000UL
;
2288 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4U
) ^
2289 0xfffff80000000000UL
;
2291 kern_linear_pte_xor
[0] |= (_PAGE_CP_4U
| _PAGE_CV_4U
|
2292 _PAGE_P_4U
| _PAGE_W_4U
);
2294 for (i
= 1; i
< 4; i
++)
2295 kern_linear_pte_xor
[i
] = kern_linear_pte_xor
[0];
2297 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ4MB_4U
| _PAGE_SZ512K_4U
|
2298 _PAGE_SZ64K_4U
| _PAGE_SZ8K_4U
|
2299 _PAGE_SZ32MB_4U
| _PAGE_SZ256MB_4U
);
2302 page_none
= _PAGE_PRESENT_4U
| _PAGE_ACCESSED_4U
| _PAGE_CACHE_4U
;
2303 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2304 __ACCESS_BITS_4U
| _PAGE_WRITE_4U
| _PAGE_EXEC_4U
);
2305 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2306 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
2307 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2308 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
2310 page_exec_bit
= _PAGE_EXEC_4U
;
2312 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
2316 static void __init
sun4v_pgprot_init(void)
2318 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
2319 unsigned long page_exec_bit
;
2322 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4V
| _PAGE_VALID
|
2323 _PAGE_CACHE_4V
| _PAGE_P_4V
|
2324 __ACCESS_BITS_4V
| __DIRTY_BITS_4V
|
2326 PAGE_KERNEL_LOCKED
= PAGE_KERNEL
;
2328 _PAGE_IE
= _PAGE_IE_4V
;
2329 _PAGE_E
= _PAGE_E_4V
;
2330 _PAGE_CACHE
= _PAGE_CACHE_4V
;
2332 #ifdef CONFIG_DEBUG_PAGEALLOC
2333 kern_linear_pte_xor
[0] = _PAGE_VALID
^ 0xfffff80000000000UL
;
2335 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4V
) ^
2336 0xfffff80000000000UL
;
2338 kern_linear_pte_xor
[0] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
2339 _PAGE_P_4V
| _PAGE_W_4V
);
2341 for (i
= 1; i
< 4; i
++)
2342 kern_linear_pte_xor
[i
] = kern_linear_pte_xor
[0];
2344 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| __DIRTY_BITS_4V
|
2345 __ACCESS_BITS_4V
| _PAGE_E_4V
);
2347 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ16GB_4V
| _PAGE_SZ2GB_4V
|
2348 _PAGE_SZ256MB_4V
| _PAGE_SZ32MB_4V
|
2349 _PAGE_SZ4MB_4V
| _PAGE_SZ512K_4V
|
2350 _PAGE_SZ64K_4V
| _PAGE_SZ8K_4V
);
2352 page_none
= _PAGE_PRESENT_4V
| _PAGE_ACCESSED_4V
| _PAGE_CACHE_4V
;
2353 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2354 __ACCESS_BITS_4V
| _PAGE_WRITE_4V
| _PAGE_EXEC_4V
);
2355 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2356 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
2357 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2358 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
2360 page_exec_bit
= _PAGE_EXEC_4V
;
2362 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
2366 unsigned long pte_sz_bits(unsigned long sz
)
2368 if (tlb_type
== hypervisor
) {
2372 return _PAGE_SZ8K_4V
;
2374 return _PAGE_SZ64K_4V
;
2376 return _PAGE_SZ512K_4V
;
2377 case 4 * 1024 * 1024:
2378 return _PAGE_SZ4MB_4V
;
2384 return _PAGE_SZ8K_4U
;
2386 return _PAGE_SZ64K_4U
;
2388 return _PAGE_SZ512K_4U
;
2389 case 4 * 1024 * 1024:
2390 return _PAGE_SZ4MB_4U
;
2395 pte_t
mk_pte_io(unsigned long page
, pgprot_t prot
, int space
, unsigned long page_size
)
2399 pte_val(pte
) = page
| pgprot_val(pgprot_noncached(prot
));
2400 pte_val(pte
) |= (((unsigned long)space
) << 32);
2401 pte_val(pte
) |= pte_sz_bits(page_size
);
2406 static unsigned long kern_large_tte(unsigned long paddr
)
2410 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
2411 _PAGE_CP_4U
| _PAGE_CV_4U
| _PAGE_P_4U
|
2412 _PAGE_EXEC_4U
| _PAGE_L_4U
| _PAGE_W_4U
);
2413 if (tlb_type
== hypervisor
)
2414 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
2415 _PAGE_CP_4V
| _PAGE_CV_4V
| _PAGE_P_4V
|
2416 _PAGE_EXEC_4V
| _PAGE_W_4V
);
2421 /* If not locked, zap it. */
2422 void __flush_tlb_all(void)
2424 unsigned long pstate
;
2427 __asm__
__volatile__("flushw\n\t"
2428 "rdpr %%pstate, %0\n\t"
2429 "wrpr %0, %1, %%pstate"
2432 if (tlb_type
== hypervisor
) {
2433 sun4v_mmu_demap_all();
2434 } else if (tlb_type
== spitfire
) {
2435 for (i
= 0; i
< 64; i
++) {
2436 /* Spitfire Errata #32 workaround */
2437 /* NOTE: Always runs on spitfire, so no
2438 * cheetah+ page size encodings.
2440 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
2444 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
2446 if (!(spitfire_get_dtlb_data(i
) & _PAGE_L_4U
)) {
2447 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
2450 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
2451 spitfire_put_dtlb_data(i
, 0x0UL
);
2454 /* Spitfire Errata #32 workaround */
2455 /* NOTE: Always runs on spitfire, so no
2456 * cheetah+ page size encodings.
2458 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
2462 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
2464 if (!(spitfire_get_itlb_data(i
) & _PAGE_L_4U
)) {
2465 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
2468 : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
2469 spitfire_put_itlb_data(i
, 0x0UL
);
2472 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
2473 cheetah_flush_dtlb_all();
2474 cheetah_flush_itlb_all();
2476 __asm__
__volatile__("wrpr %0, 0, %%pstate"
2480 static pte_t
*get_from_cache(struct mm_struct
*mm
)
2485 spin_lock(&mm
->page_table_lock
);
2486 page
= mm
->context
.pgtable_page
;
2489 void *p
= page_address(page
);
2491 mm
->context
.pgtable_page
= NULL
;
2493 ret
= (pte_t
*) (p
+ (PAGE_SIZE
/ 2));
2495 spin_unlock(&mm
->page_table_lock
);
2500 static struct page
*__alloc_for_cache(struct mm_struct
*mm
)
2502 struct page
*page
= alloc_page(GFP_KERNEL
| __GFP_NOTRACK
|
2503 __GFP_REPEAT
| __GFP_ZERO
);
2506 spin_lock(&mm
->page_table_lock
);
2507 if (!mm
->context
.pgtable_page
) {
2508 atomic_set(&page
->_count
, 2);
2509 mm
->context
.pgtable_page
= page
;
2511 spin_unlock(&mm
->page_table_lock
);
2516 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
,
2517 unsigned long address
)
2522 pte
= get_from_cache(mm
);
2526 page
= __alloc_for_cache(mm
);
2528 pte
= (pte_t
*) page_address(page
);
2533 pgtable_t
pte_alloc_one(struct mm_struct
*mm
,
2534 unsigned long address
)
2539 pte
= get_from_cache(mm
);
2543 page
= __alloc_for_cache(mm
);
2545 pgtable_page_ctor(page
);
2546 pte
= (pte_t
*) page_address(page
);
2552 void pte_free_kernel(struct mm_struct
*mm
, pte_t
*pte
)
2554 struct page
*page
= virt_to_page(pte
);
2555 if (put_page_testzero(page
))
2556 free_hot_cold_page(page
, 0);
2559 static void __pte_free(pgtable_t pte
)
2561 struct page
*page
= virt_to_page(pte
);
2562 if (put_page_testzero(page
)) {
2563 pgtable_page_dtor(page
);
2564 free_hot_cold_page(page
, 0);
2568 void pte_free(struct mm_struct
*mm
, pgtable_t pte
)
2573 void pgtable_free(void *table
, bool is_page
)
2578 kmem_cache_free(pgtable_cache
, table
);
2581 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2582 static pmd_t
pmd_set_protbits(pmd_t pmd
, pgprot_t pgprot
, bool for_modify
)
2584 if (pgprot_val(pgprot
) & _PAGE_VALID
)
2585 pmd_val(pmd
) |= PMD_HUGE_PRESENT
;
2586 if (tlb_type
== hypervisor
) {
2587 if (pgprot_val(pgprot
) & _PAGE_WRITE_4V
)
2588 pmd_val(pmd
) |= PMD_HUGE_WRITE
;
2589 if (pgprot_val(pgprot
) & _PAGE_EXEC_4V
)
2590 pmd_val(pmd
) |= PMD_HUGE_EXEC
;
2593 if (pgprot_val(pgprot
) & _PAGE_ACCESSED_4V
)
2594 pmd_val(pmd
) |= PMD_HUGE_ACCESSED
;
2595 if (pgprot_val(pgprot
) & _PAGE_MODIFIED_4V
)
2596 pmd_val(pmd
) |= PMD_HUGE_DIRTY
;
2599 if (pgprot_val(pgprot
) & _PAGE_WRITE_4U
)
2600 pmd_val(pmd
) |= PMD_HUGE_WRITE
;
2601 if (pgprot_val(pgprot
) & _PAGE_EXEC_4U
)
2602 pmd_val(pmd
) |= PMD_HUGE_EXEC
;
2605 if (pgprot_val(pgprot
) & _PAGE_ACCESSED_4U
)
2606 pmd_val(pmd
) |= PMD_HUGE_ACCESSED
;
2607 if (pgprot_val(pgprot
) & _PAGE_MODIFIED_4U
)
2608 pmd_val(pmd
) |= PMD_HUGE_DIRTY
;
2615 pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
2619 pmd_val(pmd
) = (page_nr
<< ((PAGE_SHIFT
- PMD_PADDR_SHIFT
)));
2620 pmd_val(pmd
) |= PMD_ISHUGE
;
2621 pmd
= pmd_set_protbits(pmd
, pgprot
, false);
2625 pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
2627 pmd_val(pmd
) &= ~(PMD_HUGE_PRESENT
|
2630 pmd
= pmd_set_protbits(pmd
, newprot
, true);
2634 pgprot_t
pmd_pgprot(pmd_t entry
)
2636 unsigned long pte
= 0;
2638 if (pmd_val(entry
) & PMD_HUGE_PRESENT
)
2641 if (tlb_type
== hypervisor
) {
2642 if (pmd_val(entry
) & PMD_HUGE_PRESENT
)
2643 pte
|= _PAGE_PRESENT_4V
;
2644 if (pmd_val(entry
) & PMD_HUGE_EXEC
)
2645 pte
|= _PAGE_EXEC_4V
;
2646 if (pmd_val(entry
) & PMD_HUGE_WRITE
)
2648 if (pmd_val(entry
) & PMD_HUGE_ACCESSED
)
2649 pte
|= _PAGE_ACCESSED_4V
;
2650 if (pmd_val(entry
) & PMD_HUGE_DIRTY
)
2651 pte
|= _PAGE_MODIFIED_4V
;
2652 pte
|= _PAGE_CP_4V
|_PAGE_CV_4V
;
2654 if (pmd_val(entry
) & PMD_HUGE_PRESENT
)
2655 pte
|= _PAGE_PRESENT_4U
;
2656 if (pmd_val(entry
) & PMD_HUGE_EXEC
)
2657 pte
|= _PAGE_EXEC_4U
;
2658 if (pmd_val(entry
) & PMD_HUGE_WRITE
)
2660 if (pmd_val(entry
) & PMD_HUGE_ACCESSED
)
2661 pte
|= _PAGE_ACCESSED_4U
;
2662 if (pmd_val(entry
) & PMD_HUGE_DIRTY
)
2663 pte
|= _PAGE_MODIFIED_4U
;
2664 pte
|= _PAGE_CP_4U
|_PAGE_CV_4U
;
2667 return __pgprot(pte
);
2670 void update_mmu_cache_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
2673 unsigned long pte
, flags
;
2674 struct mm_struct
*mm
;
2678 if (!pmd_large(entry
) || !pmd_young(entry
))
2681 pte
= (pmd_val(entry
) & ~PMD_HUGE_PROTBITS
);
2682 pte
<<= PMD_PADDR_SHIFT
;
2685 prot
= pmd_pgprot(entry
);
2687 if (tlb_type
== hypervisor
)
2688 pgprot_val(prot
) |= _PAGE_SZHUGE_4V
;
2690 pgprot_val(prot
) |= _PAGE_SZHUGE_4U
;
2692 pte
|= pgprot_val(prot
);
2696 spin_lock_irqsave(&mm
->context
.lock
, flags
);
2698 if (mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb
!= NULL
)
2699 __update_mmu_tsb_insert(mm
, MM_TSB_HUGE
, HPAGE_SHIFT
,
2702 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
2704 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2706 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2707 static void context_reload(void *__data
)
2709 struct mm_struct
*mm
= __data
;
2711 if (mm
== current
->mm
)
2712 load_secondary_context(mm
);
2715 void hugetlb_setup(struct mm_struct
*mm
)
2717 struct tsb_config
*tp
= &mm
->context
.tsb_block
[MM_TSB_HUGE
];
2719 if (likely(tp
->tsb
!= NULL
))
2722 tsb_grow(mm
, MM_TSB_HUGE
, 0);
2723 tsb_context_switch(mm
);
2726 /* On UltraSPARC-III+ and later, configure the second half of
2727 * the Data-TLB for huge pages.
2729 if (tlb_type
== cheetah_plus
) {
2732 spin_lock(&ctx_alloc_lock
);
2733 ctx
= mm
->context
.sparc64_ctx_val
;
2734 ctx
&= ~CTX_PGSZ_MASK
;
2735 ctx
|= CTX_PGSZ_BASE
<< CTX_PGSZ0_SHIFT
;
2736 ctx
|= CTX_PGSZ_HUGE
<< CTX_PGSZ1_SHIFT
;
2738 if (ctx
!= mm
->context
.sparc64_ctx_val
) {
2739 /* When changing the page size fields, we
2740 * must perform a context flush so that no
2741 * stale entries match. This flush must
2742 * occur with the original context register
2745 do_flush_tlb_mm(mm
);
2747 /* Reload the context register of all processors
2748 * also executing in this address space.
2750 mm
->context
.sparc64_ctx_val
= ctx
;
2751 on_each_cpu(context_reload
, mm
, 0);
2753 spin_unlock(&ctx_alloc_lock
);