1 /* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/slab.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/poison.h>
22 #include <linux/seq_file.h>
23 #include <linux/kprobes.h>
24 #include <linux/cache.h>
25 #include <linux/sort.h>
26 #include <linux/percpu.h>
29 #include <asm/system.h>
31 #include <asm/pgalloc.h>
32 #include <asm/pgtable.h>
33 #include <asm/oplib.h>
34 #include <asm/iommu.h>
36 #include <asm/uaccess.h>
37 #include <asm/mmu_context.h>
38 #include <asm/tlbflush.h>
40 #include <asm/starfire.h>
42 #include <asm/spitfire.h>
43 #include <asm/sections.h>
45 #include <asm/hypervisor.h>
47 #include <asm/sstate.h>
48 #include <asm/mdesc.h>
49 #include <asm/cpudata.h>
51 #define MAX_PHYS_ADDRESS (1UL << 42UL)
52 #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
53 #define KPTE_BITMAP_BYTES \
54 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
56 unsigned long kern_linear_pte_xor
[2] __read_mostly
;
58 /* A bitmap, one bit for every 256MB of physical memory. If the bit
59 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
60 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
62 unsigned long kpte_linear_bitmap
[KPTE_BITMAP_BYTES
/ sizeof(unsigned long)];
64 #ifndef CONFIG_DEBUG_PAGEALLOC
65 /* A special kernel TSB for 4MB and 256MB linear mappings.
66 * Space is allocated for this right after the trap table
67 * in arch/sparc64/kernel/head.S
69 extern struct tsb swapper_4m_tsb
[KERNEL_TSB4M_NENTRIES
];
74 static struct linux_prom64_registers pavail
[MAX_BANKS
] __initdata
;
75 static struct linux_prom64_registers pavail_rescan
[MAX_BANKS
] __initdata
;
76 static int pavail_ents __initdata
;
77 static int pavail_rescan_ents __initdata
;
79 static int cmp_p64(const void *a
, const void *b
)
81 const struct linux_prom64_registers
*x
= a
, *y
= b
;
83 if (x
->phys_addr
> y
->phys_addr
)
85 if (x
->phys_addr
< y
->phys_addr
)
90 static void __init
read_obp_memory(const char *property
,
91 struct linux_prom64_registers
*regs
,
94 int node
= prom_finddevice("/memory");
95 int prop_size
= prom_getproplen(node
, property
);
98 ents
= prop_size
/ sizeof(struct linux_prom64_registers
);
99 if (ents
> MAX_BANKS
) {
100 prom_printf("The machine has more %s property entries than "
101 "this kernel can support (%d).\n",
102 property
, MAX_BANKS
);
106 ret
= prom_getproperty(node
, property
, (char *) regs
, prop_size
);
108 prom_printf("Couldn't get %s property from /memory.\n");
112 /* Sanitize what we got from the firmware, by page aligning
115 for (i
= 0; i
< ents
; i
++) {
116 unsigned long base
, size
;
118 base
= regs
[i
].phys_addr
;
119 size
= regs
[i
].reg_size
;
122 if (base
& ~PAGE_MASK
) {
123 unsigned long new_base
= PAGE_ALIGN(base
);
125 size
-= new_base
- base
;
126 if ((long) size
< 0L)
131 /* If it is empty, simply get rid of it.
132 * This simplifies the logic of the other
133 * functions that process these arrays.
135 memmove(®s
[i
], ®s
[i
+ 1],
136 (ents
- i
- 1) * sizeof(regs
[0]));
141 regs
[i
].phys_addr
= base
;
142 regs
[i
].reg_size
= size
;
147 sort(regs
, ents
, sizeof(struct linux_prom64_registers
),
151 unsigned long *sparc64_valid_addr_bitmap __read_mostly
;
153 /* Kernel physical address base and size in bytes. */
154 unsigned long kern_base __read_mostly
;
155 unsigned long kern_size __read_mostly
;
157 /* Initial ramdisk setup */
158 extern unsigned long sparc_ramdisk_image64
;
159 extern unsigned int sparc_ramdisk_image
;
160 extern unsigned int sparc_ramdisk_size
;
162 struct page
*mem_map_zero __read_mostly
;
164 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly
;
166 unsigned long sparc64_kern_pri_context __read_mostly
;
167 unsigned long sparc64_kern_pri_nuc_bits __read_mostly
;
168 unsigned long sparc64_kern_sec_context __read_mostly
;
170 int num_kernel_image_mappings
;
172 #ifdef CONFIG_DEBUG_DCFLUSH
173 atomic_t dcpage_flushes
= ATOMIC_INIT(0);
175 atomic_t dcpage_flushes_xcall
= ATOMIC_INIT(0);
179 inline void flush_dcache_page_impl(struct page
*page
)
181 BUG_ON(tlb_type
== hypervisor
);
182 #ifdef CONFIG_DEBUG_DCFLUSH
183 atomic_inc(&dcpage_flushes
);
186 #ifdef DCACHE_ALIASING_POSSIBLE
187 __flush_dcache_page(page_address(page
),
188 ((tlb_type
== spitfire
) &&
189 page_mapping(page
) != NULL
));
191 if (page_mapping(page
) != NULL
&&
192 tlb_type
== spitfire
)
193 __flush_icache_page(__pa(page_address(page
)));
197 #define PG_dcache_dirty PG_arch_1
198 #define PG_dcache_cpu_shift 32UL
199 #define PG_dcache_cpu_mask \
200 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
202 #define dcache_dirty_cpu(page) \
203 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
205 static inline void set_dcache_dirty(struct page
*page
, int this_cpu
)
207 unsigned long mask
= this_cpu
;
208 unsigned long non_cpu_bits
;
210 non_cpu_bits
= ~(PG_dcache_cpu_mask
<< PG_dcache_cpu_shift
);
211 mask
= (mask
<< PG_dcache_cpu_shift
) | (1UL << PG_dcache_dirty
);
213 __asm__
__volatile__("1:\n\t"
215 "and %%g7, %1, %%g1\n\t"
216 "or %%g1, %0, %%g1\n\t"
217 "casx [%2], %%g7, %%g1\n\t"
219 "membar #StoreLoad | #StoreStore\n\t"
220 "bne,pn %%xcc, 1b\n\t"
223 : "r" (mask
), "r" (non_cpu_bits
), "r" (&page
->flags
)
227 static inline void clear_dcache_dirty_cpu(struct page
*page
, unsigned long cpu
)
229 unsigned long mask
= (1UL << PG_dcache_dirty
);
231 __asm__
__volatile__("! test_and_clear_dcache_dirty\n"
234 "srlx %%g7, %4, %%g1\n\t"
235 "and %%g1, %3, %%g1\n\t"
237 "bne,pn %%icc, 2f\n\t"
238 " andn %%g7, %1, %%g1\n\t"
239 "casx [%2], %%g7, %%g1\n\t"
241 "membar #StoreLoad | #StoreStore\n\t"
242 "bne,pn %%xcc, 1b\n\t"
246 : "r" (cpu
), "r" (mask
), "r" (&page
->flags
),
247 "i" (PG_dcache_cpu_mask
),
248 "i" (PG_dcache_cpu_shift
)
252 static inline void tsb_insert(struct tsb
*ent
, unsigned long tag
, unsigned long pte
)
254 unsigned long tsb_addr
= (unsigned long) ent
;
256 if (tlb_type
== cheetah_plus
|| tlb_type
== hypervisor
)
257 tsb_addr
= __pa(tsb_addr
);
259 __tsb_insert(tsb_addr
, tag
, pte
);
262 unsigned long _PAGE_ALL_SZ_BITS __read_mostly
;
263 unsigned long _PAGE_SZBITS __read_mostly
;
265 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t pte
)
267 struct mm_struct
*mm
;
269 unsigned long tag
, flags
;
270 unsigned long tsb_index
, tsb_hash_shift
;
272 if (tlb_type
!= hypervisor
) {
273 unsigned long pfn
= pte_pfn(pte
);
274 unsigned long pg_flags
;
277 if (pfn_valid(pfn
) &&
278 (page
= pfn_to_page(pfn
), page_mapping(page
)) &&
279 ((pg_flags
= page
->flags
) & (1UL << PG_dcache_dirty
))) {
280 int cpu
= ((pg_flags
>> PG_dcache_cpu_shift
) &
282 int this_cpu
= get_cpu();
284 /* This is just to optimize away some function calls
288 flush_dcache_page_impl(page
);
290 smp_flush_dcache_page_impl(page
, cpu
);
292 clear_dcache_dirty_cpu(page
, cpu
);
300 tsb_index
= MM_TSB_BASE
;
301 tsb_hash_shift
= PAGE_SHIFT
;
303 spin_lock_irqsave(&mm
->context
.lock
, flags
);
305 #ifdef CONFIG_HUGETLB_PAGE
306 if (mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb
!= NULL
) {
307 if ((tlb_type
== hypervisor
&&
308 (pte_val(pte
) & _PAGE_SZALL_4V
) == _PAGE_SZHUGE_4V
) ||
309 (tlb_type
!= hypervisor
&&
310 (pte_val(pte
) & _PAGE_SZALL_4U
) == _PAGE_SZHUGE_4U
)) {
311 tsb_index
= MM_TSB_HUGE
;
312 tsb_hash_shift
= HPAGE_SHIFT
;
317 tsb
= mm
->context
.tsb_block
[tsb_index
].tsb
;
318 tsb
+= ((address
>> tsb_hash_shift
) &
319 (mm
->context
.tsb_block
[tsb_index
].tsb_nentries
- 1UL));
320 tag
= (address
>> 22UL);
321 tsb_insert(tsb
, tag
, pte_val(pte
));
323 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
326 void flush_dcache_page(struct page
*page
)
328 struct address_space
*mapping
;
331 if (tlb_type
== hypervisor
)
334 /* Do not bother with the expensive D-cache flush if it
335 * is merely the zero page. The 'bigcore' testcase in GDB
336 * causes this case to run millions of times.
338 if (page
== ZERO_PAGE(0))
341 this_cpu
= get_cpu();
343 mapping
= page_mapping(page
);
344 if (mapping
&& !mapping_mapped(mapping
)) {
345 int dirty
= test_bit(PG_dcache_dirty
, &page
->flags
);
347 int dirty_cpu
= dcache_dirty_cpu(page
);
349 if (dirty_cpu
== this_cpu
)
351 smp_flush_dcache_page_impl(page
, dirty_cpu
);
353 set_dcache_dirty(page
, this_cpu
);
355 /* We could delay the flush for the !page_mapping
356 * case too. But that case is for exec env/arg
357 * pages and those are %99 certainly going to get
358 * faulted into the tlb (and thus flushed) anyways.
360 flush_dcache_page_impl(page
);
367 void __kprobes
flush_icache_range(unsigned long start
, unsigned long end
)
369 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
370 if (tlb_type
== spitfire
) {
373 /* This code only runs on Spitfire cpus so this is
374 * why we can assume _PAGE_PADDR_4U.
376 for (kaddr
= start
; kaddr
< end
; kaddr
+= PAGE_SIZE
) {
377 unsigned long paddr
, mask
= _PAGE_PADDR_4U
;
379 if (kaddr
>= PAGE_OFFSET
)
380 paddr
= kaddr
& mask
;
382 pgd_t
*pgdp
= pgd_offset_k(kaddr
);
383 pud_t
*pudp
= pud_offset(pgdp
, kaddr
);
384 pmd_t
*pmdp
= pmd_offset(pudp
, kaddr
);
385 pte_t
*ptep
= pte_offset_kernel(pmdp
, kaddr
);
387 paddr
= pte_val(*ptep
) & mask
;
389 __flush_icache_page(paddr
);
396 unsigned long total
= 0, reserved
= 0;
397 unsigned long shared
= 0, cached
= 0;
400 printk(KERN_INFO
"Mem-info:\n");
402 printk(KERN_INFO
"Free swap: %6ldkB\n",
403 nr_swap_pages
<< (PAGE_SHIFT
-10));
404 for_each_online_pgdat(pgdat
) {
405 unsigned long i
, flags
;
407 pgdat_resize_lock(pgdat
, &flags
);
408 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
409 struct page
*page
= pgdat_page_nr(pgdat
, i
);
411 if (PageReserved(page
))
413 else if (PageSwapCache(page
))
415 else if (page_count(page
))
416 shared
+= page_count(page
) - 1;
418 pgdat_resize_unlock(pgdat
, &flags
);
421 printk(KERN_INFO
"%lu pages of RAM\n", total
);
422 printk(KERN_INFO
"%lu reserved pages\n", reserved
);
423 printk(KERN_INFO
"%lu pages shared\n", shared
);
424 printk(KERN_INFO
"%lu pages swap cached\n", cached
);
426 printk(KERN_INFO
"%lu pages dirty\n",
427 global_page_state(NR_FILE_DIRTY
));
428 printk(KERN_INFO
"%lu pages writeback\n",
429 global_page_state(NR_WRITEBACK
));
430 printk(KERN_INFO
"%lu pages mapped\n",
431 global_page_state(NR_FILE_MAPPED
));
432 printk(KERN_INFO
"%lu pages slab\n",
433 global_page_state(NR_SLAB_RECLAIMABLE
) +
434 global_page_state(NR_SLAB_UNRECLAIMABLE
));
435 printk(KERN_INFO
"%lu pages pagetables\n",
436 global_page_state(NR_PAGETABLE
));
439 void mmu_info(struct seq_file
*m
)
441 if (tlb_type
== cheetah
)
442 seq_printf(m
, "MMU Type\t: Cheetah\n");
443 else if (tlb_type
== cheetah_plus
)
444 seq_printf(m
, "MMU Type\t: Cheetah+\n");
445 else if (tlb_type
== spitfire
)
446 seq_printf(m
, "MMU Type\t: Spitfire\n");
447 else if (tlb_type
== hypervisor
)
448 seq_printf(m
, "MMU Type\t: Hypervisor (sun4v)\n");
450 seq_printf(m
, "MMU Type\t: ???\n");
452 #ifdef CONFIG_DEBUG_DCFLUSH
453 seq_printf(m
, "DCPageFlushes\t: %d\n",
454 atomic_read(&dcpage_flushes
));
456 seq_printf(m
, "DCPageFlushesXC\t: %d\n",
457 atomic_read(&dcpage_flushes_xcall
));
458 #endif /* CONFIG_SMP */
459 #endif /* CONFIG_DEBUG_DCFLUSH */
462 struct linux_prom_translation
{
468 /* Exported for kernel TLB miss handling in ktlb.S */
469 struct linux_prom_translation prom_trans
[512] __read_mostly
;
470 unsigned int prom_trans_ents __read_mostly
;
472 /* Exported for SMP bootup purposes. */
473 unsigned long kern_locked_tte_data
;
475 /* The obp translations are saved based on 8k pagesize, since obp can
476 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
477 * HI_OBP_ADDRESS range are handled in ktlb.S.
479 static inline int in_obp_range(unsigned long vaddr
)
481 return (vaddr
>= LOW_OBP_ADDRESS
&&
482 vaddr
< HI_OBP_ADDRESS
);
485 static int cmp_ptrans(const void *a
, const void *b
)
487 const struct linux_prom_translation
*x
= a
, *y
= b
;
489 if (x
->virt
> y
->virt
)
491 if (x
->virt
< y
->virt
)
496 /* Read OBP translations property into 'prom_trans[]'. */
497 static void __init
read_obp_translations(void)
499 int n
, node
, ents
, first
, last
, i
;
501 node
= prom_finddevice("/virtual-memory");
502 n
= prom_getproplen(node
, "translations");
503 if (unlikely(n
== 0 || n
== -1)) {
504 prom_printf("prom_mappings: Couldn't get size.\n");
507 if (unlikely(n
> sizeof(prom_trans
))) {
508 prom_printf("prom_mappings: Size %Zd is too big.\n", n
);
512 if ((n
= prom_getproperty(node
, "translations",
513 (char *)&prom_trans
[0],
514 sizeof(prom_trans
))) == -1) {
515 prom_printf("prom_mappings: Couldn't get property.\n");
519 n
= n
/ sizeof(struct linux_prom_translation
);
523 sort(prom_trans
, ents
, sizeof(struct linux_prom_translation
),
526 /* Now kick out all the non-OBP entries. */
527 for (i
= 0; i
< ents
; i
++) {
528 if (in_obp_range(prom_trans
[i
].virt
))
532 for (; i
< ents
; i
++) {
533 if (!in_obp_range(prom_trans
[i
].virt
))
538 for (i
= 0; i
< (last
- first
); i
++) {
539 struct linux_prom_translation
*src
= &prom_trans
[i
+ first
];
540 struct linux_prom_translation
*dest
= &prom_trans
[i
];
544 for (; i
< ents
; i
++) {
545 struct linux_prom_translation
*dest
= &prom_trans
[i
];
546 dest
->virt
= dest
->size
= dest
->data
= 0x0UL
;
549 prom_trans_ents
= last
- first
;
551 if (tlb_type
== spitfire
) {
552 /* Clear diag TTE bits. */
553 for (i
= 0; i
< prom_trans_ents
; i
++)
554 prom_trans
[i
].data
&= ~0x0003fe0000000000UL
;
558 static void __init
hypervisor_tlb_lock(unsigned long vaddr
,
562 unsigned long ret
= sun4v_mmu_map_perm_addr(vaddr
, 0, pte
, mmu
);
565 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
566 "errors with %lx\n", vaddr
, 0, pte
, mmu
, ret
);
571 static unsigned long kern_large_tte(unsigned long paddr
);
573 static void __init
remap_kernel(void)
575 unsigned long phys_page
, tte_vaddr
, tte_data
;
576 int i
, tlb_ent
= sparc64_highest_locked_tlbent();
578 tte_vaddr
= (unsigned long) KERNBASE
;
579 phys_page
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
580 tte_data
= kern_large_tte(phys_page
);
582 kern_locked_tte_data
= tte_data
;
584 /* Now lock us into the TLBs via Hypervisor or OBP. */
585 if (tlb_type
== hypervisor
) {
586 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
587 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_DMMU
);
588 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_IMMU
);
589 tte_vaddr
+= 0x400000;
590 tte_data
+= 0x400000;
593 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
594 prom_dtlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
595 prom_itlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
596 tte_vaddr
+= 0x400000;
597 tte_data
+= 0x400000;
599 sparc64_highest_unlocked_tlb_ent
= tlb_ent
- i
;
601 if (tlb_type
== cheetah_plus
) {
602 sparc64_kern_pri_context
= (CTX_CHEETAH_PLUS_CTX0
|
603 CTX_CHEETAH_PLUS_NUC
);
604 sparc64_kern_pri_nuc_bits
= CTX_CHEETAH_PLUS_NUC
;
605 sparc64_kern_sec_context
= CTX_CHEETAH_PLUS_CTX0
;
610 static void __init
inherit_prom_mappings(void)
612 read_obp_translations();
614 /* Now fixup OBP's idea about where we really are mapped. */
615 printk("Remapping the kernel... ");
620 void prom_world(int enter
)
623 set_fs((mm_segment_t
) { get_thread_current_ds() });
625 __asm__
__volatile__("flushw");
628 void __flush_dcache_range(unsigned long start
, unsigned long end
)
632 if (tlb_type
== spitfire
) {
635 for (va
= start
; va
< end
; va
+= 32) {
636 spitfire_put_dcache_tag(va
& 0x3fe0, 0x0);
640 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
643 for (va
= start
; va
< end
; va
+= 32)
644 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
648 "i" (ASI_DCACHE_INVALIDATE
));
652 /* get_new_mmu_context() uses "cache + 1". */
653 DEFINE_SPINLOCK(ctx_alloc_lock
);
654 unsigned long tlb_context_cache
= CTX_FIRST_VERSION
- 1;
655 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
656 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
657 DECLARE_BITMAP(mmu_context_bmap
, MAX_CTX_NR
);
659 /* Caller does TLB context flushing on local CPU if necessary.
660 * The caller also ensures that CTX_VALID(mm->context) is false.
662 * We must be careful about boundary cases so that we never
663 * let the user have CTX 0 (nucleus) or we ever use a CTX
664 * version of zero (and thus NO_CONTEXT would not be caught
665 * by version mis-match tests in mmu_context.h).
667 * Always invoked with interrupts disabled.
669 void get_new_mmu_context(struct mm_struct
*mm
)
671 unsigned long ctx
, new_ctx
;
672 unsigned long orig_pgsz_bits
;
676 spin_lock_irqsave(&ctx_alloc_lock
, flags
);
677 orig_pgsz_bits
= (mm
->context
.sparc64_ctx_val
& CTX_PGSZ_MASK
);
678 ctx
= (tlb_context_cache
+ 1) & CTX_NR_MASK
;
679 new_ctx
= find_next_zero_bit(mmu_context_bmap
, 1 << CTX_NR_BITS
, ctx
);
681 if (new_ctx
>= (1 << CTX_NR_BITS
)) {
682 new_ctx
= find_next_zero_bit(mmu_context_bmap
, ctx
, 1);
683 if (new_ctx
>= ctx
) {
685 new_ctx
= (tlb_context_cache
& CTX_VERSION_MASK
) +
688 new_ctx
= CTX_FIRST_VERSION
;
690 /* Don't call memset, for 16 entries that's just
693 mmu_context_bmap
[0] = 3;
694 mmu_context_bmap
[1] = 0;
695 mmu_context_bmap
[2] = 0;
696 mmu_context_bmap
[3] = 0;
697 for (i
= 4; i
< CTX_BMAP_SLOTS
; i
+= 4) {
698 mmu_context_bmap
[i
+ 0] = 0;
699 mmu_context_bmap
[i
+ 1] = 0;
700 mmu_context_bmap
[i
+ 2] = 0;
701 mmu_context_bmap
[i
+ 3] = 0;
707 mmu_context_bmap
[new_ctx
>>6] |= (1UL << (new_ctx
& 63));
708 new_ctx
|= (tlb_context_cache
& CTX_VERSION_MASK
);
710 tlb_context_cache
= new_ctx
;
711 mm
->context
.sparc64_ctx_val
= new_ctx
| orig_pgsz_bits
;
712 spin_unlock_irqrestore(&ctx_alloc_lock
, flags
);
714 if (unlikely(new_version
))
715 smp_new_mmu_context_version();
718 /* Find a free area for the bootmem map, avoiding the kernel image
719 * and the initial ramdisk.
721 static unsigned long __init
choose_bootmap_pfn(unsigned long start_pfn
,
722 unsigned long end_pfn
)
724 unsigned long avoid_start
, avoid_end
, bootmap_size
;
727 bootmap_size
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
728 bootmap_size
<<= PAGE_SHIFT
;
730 avoid_start
= avoid_end
= 0;
731 #ifdef CONFIG_BLK_DEV_INITRD
732 avoid_start
= initrd_start
;
733 avoid_end
= PAGE_ALIGN(initrd_end
);
736 for (i
= 0; i
< pavail_ents
; i
++) {
737 unsigned long start
, end
;
739 start
= pavail
[i
].phys_addr
;
740 end
= start
+ pavail
[i
].reg_size
;
742 while (start
< end
) {
743 if (start
>= kern_base
&&
744 start
< PAGE_ALIGN(kern_base
+ kern_size
)) {
745 start
= PAGE_ALIGN(kern_base
+ kern_size
);
748 if (start
>= avoid_start
&& start
< avoid_end
) {
753 if ((end
- start
) < bootmap_size
)
756 if (start
< kern_base
&&
757 (start
+ bootmap_size
) > kern_base
) {
758 start
= PAGE_ALIGN(kern_base
+ kern_size
);
762 if (start
< avoid_start
&&
763 (start
+ bootmap_size
) > avoid_start
) {
768 /* OK, it doesn't overlap anything, use it. */
769 return start
>> PAGE_SHIFT
;
773 prom_printf("Cannot find free area for bootmap, aborting.\n");
777 static void __init
trim_pavail(unsigned long *cur_size_p
,
778 unsigned long *end_of_phys_p
)
780 unsigned long to_trim
= *cur_size_p
- cmdline_memory_size
;
781 unsigned long avoid_start
, avoid_end
;
784 to_trim
= PAGE_ALIGN(to_trim
);
786 avoid_start
= avoid_end
= 0;
787 #ifdef CONFIG_BLK_DEV_INITRD
788 avoid_start
= initrd_start
;
789 avoid_end
= PAGE_ALIGN(initrd_end
);
792 /* Trim some pavail[] entries in order to satisfy the
793 * requested "mem=xxx" kernel command line specification.
795 * We must not trim off the kernel image area nor the
796 * initial ramdisk range (if any). Also, we must not trim
797 * any pavail[] entry down to zero in order to preserve
798 * the invariant that all pavail[] entries have a non-zero
799 * size which is assumed by all of the code in here.
801 for (i
= 0; i
< pavail_ents
; i
++) {
802 unsigned long start
, end
, kern_end
;
803 unsigned long trim_low
, trim_high
, n
;
805 kern_end
= PAGE_ALIGN(kern_base
+ kern_size
);
807 trim_low
= start
= pavail
[i
].phys_addr
;
808 trim_high
= end
= start
+ pavail
[i
].reg_size
;
810 if (kern_base
>= start
&&
812 trim_low
= kern_base
;
816 if (kern_end
>= start
&&
818 trim_high
= kern_end
;
821 avoid_start
>= start
&&
823 if (trim_low
> avoid_start
)
824 trim_low
= avoid_start
;
825 if (avoid_end
>= end
)
829 avoid_end
>= start
&&
831 if (trim_high
< avoid_end
)
832 trim_high
= avoid_end
;
835 if (trim_high
<= trim_low
)
838 if (trim_low
== start
&& trim_high
== end
) {
839 /* Whole chunk is available for trimming.
840 * Trim all except one page, in order to keep
843 n
= (end
- start
) - PAGE_SIZE
;
848 pavail
[i
].phys_addr
+= n
;
849 pavail
[i
].reg_size
-= n
;
853 n
= (trim_low
- start
);
858 pavail
[i
].phys_addr
+= n
;
859 pavail
[i
].reg_size
-= n
;
867 pavail
[i
].reg_size
-= n
;
879 for (i
= 0; i
< pavail_ents
; i
++) {
880 *end_of_phys_p
= pavail
[i
].phys_addr
+
882 *cur_size_p
+= pavail
[i
].reg_size
;
886 /* About pages_avail, this is the value we will use to calculate
887 * the zholes_size[] argument given to free_area_init_node(). The
888 * page allocator uses this to calculate nr_kernel_pages,
889 * nr_all_pages and zone->present_pages. On NUMA it is used
890 * to calculate zone->min_unmapped_pages and zone->min_slab_pages.
892 * So this number should really be set to what the page allocator
893 * actually ends up with. This means:
894 * 1) It should include bootmem map pages, we'll release those.
895 * 2) It should not include the kernel image, except for the
896 * __init sections which we will also release.
897 * 3) It should include the initrd image, since we'll release
900 static unsigned long __init
bootmem_init(unsigned long *pages_avail
,
901 unsigned long phys_base
)
903 unsigned long bootmap_size
, end_pfn
;
904 unsigned long end_of_phys_memory
= 0UL;
905 unsigned long bootmap_pfn
, bytes_avail
, size
;
909 for (i
= 0; i
< pavail_ents
; i
++) {
910 end_of_phys_memory
= pavail
[i
].phys_addr
+
912 bytes_avail
+= pavail
[i
].reg_size
;
915 /* Determine the location of the initial ramdisk before trying
916 * to honor the "mem=xxx" command line argument. We must know
917 * where the kernel image and the ramdisk image are so that we
918 * do not trim those two areas from the physical memory map.
921 #ifdef CONFIG_BLK_DEV_INITRD
922 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
923 if (sparc_ramdisk_image
|| sparc_ramdisk_image64
) {
924 unsigned long ramdisk_image
= sparc_ramdisk_image
?
925 sparc_ramdisk_image
: sparc_ramdisk_image64
;
926 ramdisk_image
-= KERNBASE
;
927 initrd_start
= ramdisk_image
+ phys_base
;
928 initrd_end
= initrd_start
+ sparc_ramdisk_size
;
929 if (initrd_end
> end_of_phys_memory
) {
930 printk(KERN_CRIT
"initrd extends beyond end of memory "
931 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
932 initrd_end
, end_of_phys_memory
);
939 if (cmdline_memory_size
&&
940 bytes_avail
> cmdline_memory_size
)
941 trim_pavail(&bytes_avail
,
942 &end_of_phys_memory
);
944 *pages_avail
= bytes_avail
>> PAGE_SHIFT
;
946 end_pfn
= end_of_phys_memory
>> PAGE_SHIFT
;
948 /* Initialize the boot-time allocator. */
949 max_pfn
= max_low_pfn
= end_pfn
;
950 min_low_pfn
= (phys_base
>> PAGE_SHIFT
);
952 bootmap_pfn
= choose_bootmap_pfn(min_low_pfn
, end_pfn
);
954 bootmap_size
= init_bootmem_node(NODE_DATA(0), bootmap_pfn
,
955 min_low_pfn
, end_pfn
);
957 /* Now register the available physical memory with the
960 for (i
= 0; i
< pavail_ents
; i
++)
961 free_bootmem(pavail
[i
].phys_addr
, pavail
[i
].reg_size
);
963 #ifdef CONFIG_BLK_DEV_INITRD
965 size
= initrd_end
- initrd_start
;
967 /* Reserve the initrd image area. */
968 reserve_bootmem(initrd_start
, size
, BOOTMEM_DEFAULT
);
970 initrd_start
+= PAGE_OFFSET
;
971 initrd_end
+= PAGE_OFFSET
;
974 /* Reserve the kernel text/data/bss. */
975 reserve_bootmem(kern_base
, kern_size
, BOOTMEM_DEFAULT
);
976 *pages_avail
-= PAGE_ALIGN(kern_size
) >> PAGE_SHIFT
;
978 /* Add back in the initmem pages. */
979 size
= ((unsigned long)(__init_end
) & PAGE_MASK
) -
980 PAGE_ALIGN((unsigned long)__init_begin
);
981 *pages_avail
+= size
>> PAGE_SHIFT
;
983 /* Reserve the bootmem map. We do not account for it
984 * in pages_avail because we will release that memory
985 * in free_all_bootmem.
988 reserve_bootmem((bootmap_pfn
<< PAGE_SHIFT
), size
, BOOTMEM_DEFAULT
);
990 for (i
= 0; i
< pavail_ents
; i
++) {
991 unsigned long start_pfn
, end_pfn
;
993 start_pfn
= pavail
[i
].phys_addr
>> PAGE_SHIFT
;
994 end_pfn
= (start_pfn
+ (pavail
[i
].reg_size
>> PAGE_SHIFT
));
995 memory_present(0, start_pfn
, end_pfn
);
1003 static struct linux_prom64_registers pall
[MAX_BANKS
] __initdata
;
1004 static int pall_ents __initdata
;
1006 #ifdef CONFIG_DEBUG_PAGEALLOC
1007 static unsigned long __ref
kernel_map_range(unsigned long pstart
,
1008 unsigned long pend
, pgprot_t prot
)
1010 unsigned long vstart
= PAGE_OFFSET
+ pstart
;
1011 unsigned long vend
= PAGE_OFFSET
+ pend
;
1012 unsigned long alloc_bytes
= 0UL;
1014 if ((vstart
& ~PAGE_MASK
) || (vend
& ~PAGE_MASK
)) {
1015 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1020 while (vstart
< vend
) {
1021 unsigned long this_end
, paddr
= __pa(vstart
);
1022 pgd_t
*pgd
= pgd_offset_k(vstart
);
1027 pud
= pud_offset(pgd
, vstart
);
1028 if (pud_none(*pud
)) {
1031 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1032 alloc_bytes
+= PAGE_SIZE
;
1033 pud_populate(&init_mm
, pud
, new);
1036 pmd
= pmd_offset(pud
, vstart
);
1037 if (!pmd_present(*pmd
)) {
1040 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1041 alloc_bytes
+= PAGE_SIZE
;
1042 pmd_populate_kernel(&init_mm
, pmd
, new);
1045 pte
= pte_offset_kernel(pmd
, vstart
);
1046 this_end
= (vstart
+ PMD_SIZE
) & PMD_MASK
;
1047 if (this_end
> vend
)
1050 while (vstart
< this_end
) {
1051 pte_val(*pte
) = (paddr
| pgprot_val(prot
));
1053 vstart
+= PAGE_SIZE
;
1062 extern unsigned int kvmap_linear_patch
[1];
1063 #endif /* CONFIG_DEBUG_PAGEALLOC */
1065 static void __init
mark_kpte_bitmap(unsigned long start
, unsigned long end
)
1067 const unsigned long shift_256MB
= 28;
1068 const unsigned long mask_256MB
= ((1UL << shift_256MB
) - 1UL);
1069 const unsigned long size_256MB
= (1UL << shift_256MB
);
1071 while (start
< end
) {
1074 remains
= end
- start
;
1075 if (remains
< size_256MB
)
1078 if (start
& mask_256MB
) {
1079 start
= (start
+ size_256MB
) & ~mask_256MB
;
1083 while (remains
>= size_256MB
) {
1084 unsigned long index
= start
>> shift_256MB
;
1086 __set_bit(index
, kpte_linear_bitmap
);
1088 start
+= size_256MB
;
1089 remains
-= size_256MB
;
1094 static void __init
init_kpte_bitmap(void)
1098 for (i
= 0; i
< pall_ents
; i
++) {
1099 unsigned long phys_start
, phys_end
;
1101 phys_start
= pall
[i
].phys_addr
;
1102 phys_end
= phys_start
+ pall
[i
].reg_size
;
1104 mark_kpte_bitmap(phys_start
, phys_end
);
1108 static void __init
kernel_physical_mapping_init(void)
1110 #ifdef CONFIG_DEBUG_PAGEALLOC
1111 unsigned long i
, mem_alloced
= 0UL;
1113 for (i
= 0; i
< pall_ents
; i
++) {
1114 unsigned long phys_start
, phys_end
;
1116 phys_start
= pall
[i
].phys_addr
;
1117 phys_end
= phys_start
+ pall
[i
].reg_size
;
1119 mem_alloced
+= kernel_map_range(phys_start
, phys_end
,
1123 printk("Allocated %ld bytes for kernel page tables.\n",
1126 kvmap_linear_patch
[0] = 0x01000000; /* nop */
1127 flushi(&kvmap_linear_patch
[0]);
1133 #ifdef CONFIG_DEBUG_PAGEALLOC
1134 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
1136 unsigned long phys_start
= page_to_pfn(page
) << PAGE_SHIFT
;
1137 unsigned long phys_end
= phys_start
+ (numpages
* PAGE_SIZE
);
1139 kernel_map_range(phys_start
, phys_end
,
1140 (enable
? PAGE_KERNEL
: __pgprot(0)));
1142 flush_tsb_kernel_range(PAGE_OFFSET
+ phys_start
,
1143 PAGE_OFFSET
+ phys_end
);
1145 /* we should perform an IPI and flush all tlbs,
1146 * but that can deadlock->flush only current cpu.
1148 __flush_tlb_kernel_range(PAGE_OFFSET
+ phys_start
,
1149 PAGE_OFFSET
+ phys_end
);
1153 unsigned long __init
find_ecache_flush_span(unsigned long size
)
1157 for (i
= 0; i
< pavail_ents
; i
++) {
1158 if (pavail
[i
].reg_size
>= size
)
1159 return pavail
[i
].phys_addr
;
1165 static void __init
tsb_phys_patch(void)
1167 struct tsb_ldquad_phys_patch_entry
*pquad
;
1168 struct tsb_phys_patch_entry
*p
;
1170 pquad
= &__tsb_ldquad_phys_patch
;
1171 while (pquad
< &__tsb_ldquad_phys_patch_end
) {
1172 unsigned long addr
= pquad
->addr
;
1174 if (tlb_type
== hypervisor
)
1175 *(unsigned int *) addr
= pquad
->sun4v_insn
;
1177 *(unsigned int *) addr
= pquad
->sun4u_insn
;
1179 __asm__
__volatile__("flush %0"
1186 p
= &__tsb_phys_patch
;
1187 while (p
< &__tsb_phys_patch_end
) {
1188 unsigned long addr
= p
->addr
;
1190 *(unsigned int *) addr
= p
->insn
;
1192 __asm__
__volatile__("flush %0"
1200 /* Don't mark as init, we give this to the Hypervisor. */
1201 #ifndef CONFIG_DEBUG_PAGEALLOC
1202 #define NUM_KTSB_DESCR 2
1204 #define NUM_KTSB_DESCR 1
1206 static struct hv_tsb_descr ktsb_descr
[NUM_KTSB_DESCR
];
1207 extern struct tsb swapper_tsb
[KERNEL_TSB_NENTRIES
];
1209 static void __init
sun4v_ktsb_init(void)
1211 unsigned long ktsb_pa
;
1213 /* First KTSB for PAGE_SIZE mappings. */
1214 ktsb_pa
= kern_base
+ ((unsigned long)&swapper_tsb
[0] - KERNBASE
);
1216 switch (PAGE_SIZE
) {
1219 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_8K
;
1220 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_8K
;
1224 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_64K
;
1225 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_64K
;
1229 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_512K
;
1230 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_512K
;
1233 case 4 * 1024 * 1024:
1234 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1235 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_4MB
;
1239 ktsb_descr
[0].assoc
= 1;
1240 ktsb_descr
[0].num_ttes
= KERNEL_TSB_NENTRIES
;
1241 ktsb_descr
[0].ctx_idx
= 0;
1242 ktsb_descr
[0].tsb_base
= ktsb_pa
;
1243 ktsb_descr
[0].resv
= 0;
1245 #ifndef CONFIG_DEBUG_PAGEALLOC
1246 /* Second KTSB for 4MB/256MB mappings. */
1247 ktsb_pa
= (kern_base
+
1248 ((unsigned long)&swapper_4m_tsb
[0] - KERNBASE
));
1250 ktsb_descr
[1].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1251 ktsb_descr
[1].pgsz_mask
= (HV_PGSZ_MASK_4MB
|
1252 HV_PGSZ_MASK_256MB
);
1253 ktsb_descr
[1].assoc
= 1;
1254 ktsb_descr
[1].num_ttes
= KERNEL_TSB4M_NENTRIES
;
1255 ktsb_descr
[1].ctx_idx
= 0;
1256 ktsb_descr
[1].tsb_base
= ktsb_pa
;
1257 ktsb_descr
[1].resv
= 0;
1261 void __cpuinit
sun4v_ktsb_register(void)
1263 unsigned long pa
, ret
;
1265 pa
= kern_base
+ ((unsigned long)&ktsb_descr
[0] - KERNBASE
);
1267 ret
= sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR
, pa
);
1269 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1270 "errors with %lx\n", pa
, ret
);
1275 /* paging_init() sets up the page tables */
1277 extern void cheetah_ecache_flush_init(void);
1278 extern void sun4v_patch_tlb_handlers(void);
1280 extern void cpu_probe(void);
1281 extern void central_probe(void);
1283 static unsigned long last_valid_pfn
;
1284 pgd_t swapper_pg_dir
[2048];
1286 static void sun4u_pgprot_init(void);
1287 static void sun4v_pgprot_init(void);
1289 /* Dummy function */
1290 void __init
setup_per_cpu_areas(void)
1294 void __init
paging_init(void)
1296 unsigned long end_pfn
, pages_avail
, shift
, phys_base
;
1297 unsigned long real_end
, i
;
1299 /* These build time checkes make sure that the dcache_dirty_cpu()
1300 * page->flags usage will work.
1302 * When a page gets marked as dcache-dirty, we store the
1303 * cpu number starting at bit 32 in the page->flags. Also,
1304 * functions like clear_dcache_dirty_cpu use the cpu mask
1305 * in 13-bit signed-immediate instruction fields.
1307 BUILD_BUG_ON(FLAGS_RESERVED
!= 32);
1308 BUILD_BUG_ON(SECTIONS_WIDTH
+ NODES_WIDTH
+ ZONES_WIDTH
+
1309 ilog2(roundup_pow_of_two(NR_CPUS
)) > FLAGS_RESERVED
);
1310 BUILD_BUG_ON(NR_CPUS
> 4096);
1312 kern_base
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
1313 kern_size
= (unsigned long)&_end
- (unsigned long)KERNBASE
;
1317 /* Invalidate both kernel TSBs. */
1318 memset(swapper_tsb
, 0x40, sizeof(swapper_tsb
));
1319 #ifndef CONFIG_DEBUG_PAGEALLOC
1320 memset(swapper_4m_tsb
, 0x40, sizeof(swapper_4m_tsb
));
1323 if (tlb_type
== hypervisor
)
1324 sun4v_pgprot_init();
1326 sun4u_pgprot_init();
1328 if (tlb_type
== cheetah_plus
||
1329 tlb_type
== hypervisor
)
1332 if (tlb_type
== hypervisor
) {
1333 sun4v_patch_tlb_handlers();
1337 /* Find available physical memory... */
1338 read_obp_memory("available", &pavail
[0], &pavail_ents
);
1340 phys_base
= 0xffffffffffffffffUL
;
1341 for (i
= 0; i
< pavail_ents
; i
++)
1342 phys_base
= min(phys_base
, pavail
[i
].phys_addr
);
1344 set_bit(0, mmu_context_bmap
);
1346 shift
= kern_base
+ PAGE_OFFSET
- ((unsigned long)KERNBASE
);
1348 real_end
= (unsigned long)_end
;
1349 num_kernel_image_mappings
= DIV_ROUND_UP(real_end
- KERNBASE
, 1 << 22);
1350 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1351 num_kernel_image_mappings
);
1353 /* Set kernel pgd to upper alias so physical page computations
1356 init_mm
.pgd
+= ((shift
) / (sizeof(pgd_t
)));
1358 memset(swapper_low_pmd_dir
, 0, sizeof(swapper_low_pmd_dir
));
1360 /* Now can init the kernel/bad page tables. */
1361 pud_set(pud_offset(&swapper_pg_dir
[0], 0),
1362 swapper_low_pmd_dir
+ (shift
/ sizeof(pgd_t
)));
1364 inherit_prom_mappings();
1366 read_obp_memory("reg", &pall
[0], &pall_ents
);
1370 /* Ok, we can use our TLB miss and window trap handlers safely. */
1375 if (tlb_type
== hypervisor
)
1376 sun4v_ktsb_register();
1378 /* Setup bootmem... */
1380 last_valid_pfn
= end_pfn
= bootmem_init(&pages_avail
, phys_base
);
1382 max_mapnr
= last_valid_pfn
;
1384 kernel_physical_mapping_init();
1386 real_setup_per_cpu_areas();
1388 prom_build_devicetree();
1390 if (tlb_type
== hypervisor
)
1394 unsigned long zones_size
[MAX_NR_ZONES
];
1395 unsigned long zholes_size
[MAX_NR_ZONES
];
1398 for (znum
= 0; znum
< MAX_NR_ZONES
; znum
++)
1399 zones_size
[znum
] = zholes_size
[znum
] = 0;
1401 zones_size
[ZONE_NORMAL
] = end_pfn
;
1402 zholes_size
[ZONE_NORMAL
] = end_pfn
- pages_avail
;
1404 free_area_init_node(0, &contig_page_data
, zones_size
,
1405 __pa(PAGE_OFFSET
) >> PAGE_SHIFT
,
1409 printk("Booting Linux...\n");
1415 static void __init
taint_real_pages(void)
1419 read_obp_memory("available", &pavail_rescan
[0], &pavail_rescan_ents
);
1421 /* Find changes discovered in the physmem available rescan and
1422 * reserve the lost portions in the bootmem maps.
1424 for (i
= 0; i
< pavail_ents
; i
++) {
1425 unsigned long old_start
, old_end
;
1427 old_start
= pavail
[i
].phys_addr
;
1428 old_end
= old_start
+
1430 while (old_start
< old_end
) {
1433 for (n
= 0; n
< pavail_rescan_ents
; n
++) {
1434 unsigned long new_start
, new_end
;
1436 new_start
= pavail_rescan
[n
].phys_addr
;
1437 new_end
= new_start
+
1438 pavail_rescan
[n
].reg_size
;
1440 if (new_start
<= old_start
&&
1441 new_end
>= (old_start
+ PAGE_SIZE
)) {
1442 set_bit(old_start
>> 22,
1443 sparc64_valid_addr_bitmap
);
1447 reserve_bootmem(old_start
, PAGE_SIZE
, BOOTMEM_DEFAULT
);
1450 old_start
+= PAGE_SIZE
;
1455 int __init
page_in_phys_avail(unsigned long paddr
)
1461 for (i
= 0; i
< pavail_rescan_ents
; i
++) {
1462 unsigned long start
, end
;
1464 start
= pavail_rescan
[i
].phys_addr
;
1465 end
= start
+ pavail_rescan
[i
].reg_size
;
1467 if (paddr
>= start
&& paddr
< end
)
1470 if (paddr
>= kern_base
&& paddr
< (kern_base
+ kern_size
))
1472 #ifdef CONFIG_BLK_DEV_INITRD
1473 if (paddr
>= __pa(initrd_start
) &&
1474 paddr
< __pa(PAGE_ALIGN(initrd_end
)))
1481 void __init
mem_init(void)
1483 unsigned long codepages
, datapages
, initpages
;
1484 unsigned long addr
, last
;
1487 i
= last_valid_pfn
>> ((22 - PAGE_SHIFT
) + 6);
1489 sparc64_valid_addr_bitmap
= (unsigned long *) alloc_bootmem(i
<< 3);
1490 if (sparc64_valid_addr_bitmap
== NULL
) {
1491 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1494 memset(sparc64_valid_addr_bitmap
, 0, i
<< 3);
1496 addr
= PAGE_OFFSET
+ kern_base
;
1497 last
= PAGE_ALIGN(kern_size
) + addr
;
1498 while (addr
< last
) {
1499 set_bit(__pa(addr
) >> 22, sparc64_valid_addr_bitmap
);
1505 high_memory
= __va(last_valid_pfn
<< PAGE_SHIFT
);
1507 /* We subtract one to account for the mem_map_zero page
1510 totalram_pages
= num_physpages
= free_all_bootmem() - 1;
1513 * Set up the zero page, mark it reserved, so that page count
1514 * is not manipulated when freeing the page from user ptes.
1516 mem_map_zero
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
, 0);
1517 if (mem_map_zero
== NULL
) {
1518 prom_printf("paging_init: Cannot alloc zero page.\n");
1521 SetPageReserved(mem_map_zero
);
1523 codepages
= (((unsigned long) _etext
) - ((unsigned long) _start
));
1524 codepages
= PAGE_ALIGN(codepages
) >> PAGE_SHIFT
;
1525 datapages
= (((unsigned long) _edata
) - ((unsigned long) _etext
));
1526 datapages
= PAGE_ALIGN(datapages
) >> PAGE_SHIFT
;
1527 initpages
= (((unsigned long) __init_end
) - ((unsigned long) __init_begin
));
1528 initpages
= PAGE_ALIGN(initpages
) >> PAGE_SHIFT
;
1530 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1531 nr_free_pages() << (PAGE_SHIFT
-10),
1532 codepages
<< (PAGE_SHIFT
-10),
1533 datapages
<< (PAGE_SHIFT
-10),
1534 initpages
<< (PAGE_SHIFT
-10),
1535 PAGE_OFFSET
, (last_valid_pfn
<< PAGE_SHIFT
));
1537 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
1538 cheetah_ecache_flush_init();
1541 void free_initmem(void)
1543 unsigned long addr
, initend
;
1546 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1548 addr
= PAGE_ALIGN((unsigned long)(__init_begin
));
1549 initend
= (unsigned long)(__init_end
) & PAGE_MASK
;
1550 for (; addr
< initend
; addr
+= PAGE_SIZE
) {
1555 ((unsigned long) __va(kern_base
)) -
1556 ((unsigned long) KERNBASE
));
1557 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
1558 p
= virt_to_page(page
);
1560 ClearPageReserved(p
);
1568 #ifdef CONFIG_BLK_DEV_INITRD
1569 void free_initrd_mem(unsigned long start
, unsigned long end
)
1572 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
1573 for (; start
< end
; start
+= PAGE_SIZE
) {
1574 struct page
*p
= virt_to_page(start
);
1576 ClearPageReserved(p
);
1585 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
1586 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
1587 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
1588 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
1589 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
1590 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
1592 pgprot_t PAGE_KERNEL __read_mostly
;
1593 EXPORT_SYMBOL(PAGE_KERNEL
);
1595 pgprot_t PAGE_KERNEL_LOCKED __read_mostly
;
1596 pgprot_t PAGE_COPY __read_mostly
;
1598 pgprot_t PAGE_SHARED __read_mostly
;
1599 EXPORT_SYMBOL(PAGE_SHARED
);
1601 pgprot_t PAGE_EXEC __read_mostly
;
1602 unsigned long pg_iobits __read_mostly
;
1604 unsigned long _PAGE_IE __read_mostly
;
1605 EXPORT_SYMBOL(_PAGE_IE
);
1607 unsigned long _PAGE_E __read_mostly
;
1608 EXPORT_SYMBOL(_PAGE_E
);
1610 unsigned long _PAGE_CACHE __read_mostly
;
1611 EXPORT_SYMBOL(_PAGE_CACHE
);
1613 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1615 #define VMEMMAP_CHUNK_SHIFT 22
1616 #define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
1617 #define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
1618 #define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
1620 #define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
1621 sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
1622 unsigned long vmemmap_table
[VMEMMAP_SIZE
];
1624 int __meminit
vmemmap_populate(struct page
*start
, unsigned long nr
, int node
)
1626 unsigned long vstart
= (unsigned long) start
;
1627 unsigned long vend
= (unsigned long) (start
+ nr
);
1628 unsigned long phys_start
= (vstart
- VMEMMAP_BASE
);
1629 unsigned long phys_end
= (vend
- VMEMMAP_BASE
);
1630 unsigned long addr
= phys_start
& VMEMMAP_CHUNK_MASK
;
1631 unsigned long end
= VMEMMAP_ALIGN(phys_end
);
1632 unsigned long pte_base
;
1634 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
1635 _PAGE_CP_4U
| _PAGE_CV_4U
|
1636 _PAGE_P_4U
| _PAGE_W_4U
);
1637 if (tlb_type
== hypervisor
)
1638 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
1639 _PAGE_CP_4V
| _PAGE_CV_4V
|
1640 _PAGE_P_4V
| _PAGE_W_4V
);
1642 for (; addr
< end
; addr
+= VMEMMAP_CHUNK
) {
1643 unsigned long *vmem_pp
=
1644 vmemmap_table
+ (addr
>> VMEMMAP_CHUNK_SHIFT
);
1647 if (!(*vmem_pp
& _PAGE_VALID
)) {
1648 block
= vmemmap_alloc_block(1UL << 22, node
);
1652 *vmem_pp
= pte_base
| __pa(block
);
1654 printk(KERN_INFO
"[%p-%p] page_structs=%lu "
1655 "node=%d entry=%lu/%lu\n", start
, block
, nr
,
1657 addr
>> VMEMMAP_CHUNK_SHIFT
,
1658 VMEMMAP_SIZE
>> VMEMMAP_CHUNK_SHIFT
);
1663 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
1665 static void prot_init_common(unsigned long page_none
,
1666 unsigned long page_shared
,
1667 unsigned long page_copy
,
1668 unsigned long page_readonly
,
1669 unsigned long page_exec_bit
)
1671 PAGE_COPY
= __pgprot(page_copy
);
1672 PAGE_SHARED
= __pgprot(page_shared
);
1674 protection_map
[0x0] = __pgprot(page_none
);
1675 protection_map
[0x1] = __pgprot(page_readonly
& ~page_exec_bit
);
1676 protection_map
[0x2] = __pgprot(page_copy
& ~page_exec_bit
);
1677 protection_map
[0x3] = __pgprot(page_copy
& ~page_exec_bit
);
1678 protection_map
[0x4] = __pgprot(page_readonly
);
1679 protection_map
[0x5] = __pgprot(page_readonly
);
1680 protection_map
[0x6] = __pgprot(page_copy
);
1681 protection_map
[0x7] = __pgprot(page_copy
);
1682 protection_map
[0x8] = __pgprot(page_none
);
1683 protection_map
[0x9] = __pgprot(page_readonly
& ~page_exec_bit
);
1684 protection_map
[0xa] = __pgprot(page_shared
& ~page_exec_bit
);
1685 protection_map
[0xb] = __pgprot(page_shared
& ~page_exec_bit
);
1686 protection_map
[0xc] = __pgprot(page_readonly
);
1687 protection_map
[0xd] = __pgprot(page_readonly
);
1688 protection_map
[0xe] = __pgprot(page_shared
);
1689 protection_map
[0xf] = __pgprot(page_shared
);
1692 static void __init
sun4u_pgprot_init(void)
1694 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
1695 unsigned long page_exec_bit
;
1697 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
1698 _PAGE_CACHE_4U
| _PAGE_P_4U
|
1699 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
1701 PAGE_KERNEL_LOCKED
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
1702 _PAGE_CACHE_4U
| _PAGE_P_4U
|
1703 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
1704 _PAGE_EXEC_4U
| _PAGE_L_4U
);
1705 PAGE_EXEC
= __pgprot(_PAGE_EXEC_4U
);
1707 _PAGE_IE
= _PAGE_IE_4U
;
1708 _PAGE_E
= _PAGE_E_4U
;
1709 _PAGE_CACHE
= _PAGE_CACHE_4U
;
1711 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| __DIRTY_BITS_4U
|
1712 __ACCESS_BITS_4U
| _PAGE_E_4U
);
1714 #ifdef CONFIG_DEBUG_PAGEALLOC
1715 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZBITS_4U
) ^
1718 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4U
) ^
1721 kern_linear_pte_xor
[0] |= (_PAGE_CP_4U
| _PAGE_CV_4U
|
1722 _PAGE_P_4U
| _PAGE_W_4U
);
1724 /* XXX Should use 256MB on Panther. XXX */
1725 kern_linear_pte_xor
[1] = kern_linear_pte_xor
[0];
1727 _PAGE_SZBITS
= _PAGE_SZBITS_4U
;
1728 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ4MB_4U
| _PAGE_SZ512K_4U
|
1729 _PAGE_SZ64K_4U
| _PAGE_SZ8K_4U
|
1730 _PAGE_SZ32MB_4U
| _PAGE_SZ256MB_4U
);
1733 page_none
= _PAGE_PRESENT_4U
| _PAGE_ACCESSED_4U
| _PAGE_CACHE_4U
;
1734 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
1735 __ACCESS_BITS_4U
| _PAGE_WRITE_4U
| _PAGE_EXEC_4U
);
1736 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
1737 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
1738 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
1739 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
1741 page_exec_bit
= _PAGE_EXEC_4U
;
1743 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
1747 static void __init
sun4v_pgprot_init(void)
1749 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
1750 unsigned long page_exec_bit
;
1752 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4V
| _PAGE_VALID
|
1753 _PAGE_CACHE_4V
| _PAGE_P_4V
|
1754 __ACCESS_BITS_4V
| __DIRTY_BITS_4V
|
1756 PAGE_KERNEL_LOCKED
= PAGE_KERNEL
;
1757 PAGE_EXEC
= __pgprot(_PAGE_EXEC_4V
);
1759 _PAGE_IE
= _PAGE_IE_4V
;
1760 _PAGE_E
= _PAGE_E_4V
;
1761 _PAGE_CACHE
= _PAGE_CACHE_4V
;
1763 #ifdef CONFIG_DEBUG_PAGEALLOC
1764 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZBITS_4V
) ^
1767 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4V
) ^
1770 kern_linear_pte_xor
[0] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
1771 _PAGE_P_4V
| _PAGE_W_4V
);
1773 #ifdef CONFIG_DEBUG_PAGEALLOC
1774 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZBITS_4V
) ^
1777 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZ256MB_4V
) ^
1780 kern_linear_pte_xor
[1] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
1781 _PAGE_P_4V
| _PAGE_W_4V
);
1783 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| __DIRTY_BITS_4V
|
1784 __ACCESS_BITS_4V
| _PAGE_E_4V
);
1786 _PAGE_SZBITS
= _PAGE_SZBITS_4V
;
1787 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ16GB_4V
| _PAGE_SZ2GB_4V
|
1788 _PAGE_SZ256MB_4V
| _PAGE_SZ32MB_4V
|
1789 _PAGE_SZ4MB_4V
| _PAGE_SZ512K_4V
|
1790 _PAGE_SZ64K_4V
| _PAGE_SZ8K_4V
);
1792 page_none
= _PAGE_PRESENT_4V
| _PAGE_ACCESSED_4V
| _PAGE_CACHE_4V
;
1793 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
1794 __ACCESS_BITS_4V
| _PAGE_WRITE_4V
| _PAGE_EXEC_4V
);
1795 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
1796 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
1797 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
1798 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
1800 page_exec_bit
= _PAGE_EXEC_4V
;
1802 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
1806 unsigned long pte_sz_bits(unsigned long sz
)
1808 if (tlb_type
== hypervisor
) {
1812 return _PAGE_SZ8K_4V
;
1814 return _PAGE_SZ64K_4V
;
1816 return _PAGE_SZ512K_4V
;
1817 case 4 * 1024 * 1024:
1818 return _PAGE_SZ4MB_4V
;
1824 return _PAGE_SZ8K_4U
;
1826 return _PAGE_SZ64K_4U
;
1828 return _PAGE_SZ512K_4U
;
1829 case 4 * 1024 * 1024:
1830 return _PAGE_SZ4MB_4U
;
1835 pte_t
mk_pte_io(unsigned long page
, pgprot_t prot
, int space
, unsigned long page_size
)
1839 pte_val(pte
) = page
| pgprot_val(pgprot_noncached(prot
));
1840 pte_val(pte
) |= (((unsigned long)space
) << 32);
1841 pte_val(pte
) |= pte_sz_bits(page_size
);
1846 static unsigned long kern_large_tte(unsigned long paddr
)
1850 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
1851 _PAGE_CP_4U
| _PAGE_CV_4U
| _PAGE_P_4U
|
1852 _PAGE_EXEC_4U
| _PAGE_L_4U
| _PAGE_W_4U
);
1853 if (tlb_type
== hypervisor
)
1854 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
1855 _PAGE_CP_4V
| _PAGE_CV_4V
| _PAGE_P_4V
|
1856 _PAGE_EXEC_4V
| _PAGE_W_4V
);
1861 /* If not locked, zap it. */
1862 void __flush_tlb_all(void)
1864 unsigned long pstate
;
1867 __asm__
__volatile__("flushw\n\t"
1868 "rdpr %%pstate, %0\n\t"
1869 "wrpr %0, %1, %%pstate"
1872 if (tlb_type
== hypervisor
) {
1873 sun4v_mmu_demap_all();
1874 } else if (tlb_type
== spitfire
) {
1875 for (i
= 0; i
< 64; i
++) {
1876 /* Spitfire Errata #32 workaround */
1877 /* NOTE: Always runs on spitfire, so no
1878 * cheetah+ page size encodings.
1880 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
1884 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
1886 if (!(spitfire_get_dtlb_data(i
) & _PAGE_L_4U
)) {
1887 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
1890 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
1891 spitfire_put_dtlb_data(i
, 0x0UL
);
1894 /* Spitfire Errata #32 workaround */
1895 /* NOTE: Always runs on spitfire, so no
1896 * cheetah+ page size encodings.
1898 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
1902 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
1904 if (!(spitfire_get_itlb_data(i
) & _PAGE_L_4U
)) {
1905 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
1908 : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
1909 spitfire_put_itlb_data(i
, 0x0UL
);
1912 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1913 cheetah_flush_dtlb_all();
1914 cheetah_flush_itlb_all();
1916 __asm__
__volatile__("wrpr %0, 0, %%pstate"
1920 #ifdef CONFIG_MEMORY_HOTPLUG
1922 void online_page(struct page
*page
)
1924 ClearPageReserved(page
);
1925 init_page_count(page
);
1931 #endif /* CONFIG_MEMORY_HOTPLUG */