X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fmm%2Finit_32.c;h=47325f23c51f2b325ba30fd8cd89d972ed2a0318;hb=771168494719;hp=e1f5ded851f6ed08c40ff892af4423d7af4b7fa2;hpb=2fe83b3ad12d43799af5f3156886eca443a88bac;p=deliverable%2Flinux.git diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index e1f5ded851f6..47325f23c51f 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -41,8 +42,6 @@ #include #include #include -#include -#include #include #include "mmu_decl.h" @@ -60,8 +59,8 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); unsigned long total_memory; unsigned long total_lowmem; -unsigned long ppc_memstart; -unsigned long ppc_memoffset = PAGE_OFFSET; +phys_addr_t memstart_addr; +phys_addr_t lowmem_end_addr; int boot_mapsize; #ifdef CONFIG_PPC_PMAC @@ -96,10 +95,10 @@ int __map_without_ltlbs; unsigned long __max_low_memory = MAX_LOW_MEM; /* - * limit of what is accessible with initial MMU setup - + * address of the limit of what is accessible with initial MMU setup - * 256MB usually, but only 16MB on 601. */ -unsigned long __initial_memory_limit = 0x10000000; +phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000; /* * Check for command-line options that affect what MMU_init will do. @@ -132,7 +131,10 @@ void __init MMU_init(void) /* 601 can only access 16MB at the moment */ if (PVR_VER(mfspr(SPRN_PVR)) == 1) - __initial_memory_limit = 0x01000000; + __initial_memory_limit_addr = 0x01000000; + /* 8xx can only access 8MB at the moment */ + if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) + __initial_memory_limit_addr = 0x00800000; /* parse args from command line */ MMU_setup(); @@ -143,8 +145,8 @@ void __init MMU_init(void) printk(KERN_WARNING "Only using first contiguous memory region"); } - total_memory = lmb_end_of_DRAM(); - total_lowmem = total_memory; + total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr; + lowmem_end_addr = memstart_addr + total_lowmem; #ifdef CONFIG_FSL_BOOKE /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB @@ -155,9 +157,10 @@ void __init MMU_init(void) if (total_lowmem > __max_low_memory) { total_lowmem = __max_low_memory; + lowmem_end_addr = memstart_addr + total_lowmem; #ifndef CONFIG_HIGHMEM total_memory = total_lowmem; - lmb_enforce_memory_limit(total_lowmem); + lmb_enforce_memory_limit(lowmem_end_addr); lmb_analyze(); #endif /* CONFIG_HIGHMEM */ } @@ -182,8 +185,6 @@ void __init MMU_init(void) /* Map in I/O resources */ if (ppc_md.progress) ppc_md.progress("MMU:setio", 0x302); - if (ppc_md.setup_io_mappings) - ppc_md.setup_io_mappings(); /* Initialize the context management stuff */ mmu_context_init(); @@ -206,7 +207,7 @@ void __init *early_get_page(void) p = alloc_bootmem_pages(PAGE_SIZE); } else { p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, - __initial_memory_limit)); + __initial_memory_limit_addr)); } return p; } @@ -256,3 +257,40 @@ void free_initrd_mem(unsigned long start, unsigned long end) } } #endif + +#ifdef CONFIG_PROC_KCORE +static struct kcore_list kcore_vmem; + +static int __init setup_kcore(void) +{ + int i; + + for (i = 0; i < lmb.memory.cnt; i++) { + unsigned long base; + unsigned long size; + struct kcore_list *kcore_mem; + + base = lmb.memory.region[i].base; + size = lmb.memory.region[i].size; + + kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); + if (!kcore_mem) + panic("%s: kmalloc failed\n", __func__); + + /* must stay under 32 bits */ + if ( 0xfffffffful - (unsigned long)__va(base) < size) { + size = 0xfffffffful - (unsigned long)(__va(base)); + printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n", + size); + } + + kclist_add(kcore_mem, __va(base), size); + } + + kclist_add(&kcore_vmem, (void *)VMALLOC_START, + VMALLOC_END-VMALLOC_START); + + return 0; +} +module_init(setup_kcore); +#endif