Commit | Line | Data |
---|---|---|
1394f032 | 1 | /* |
96f1050d | 2 | * Copyright 2004-2009 Analog Devices Inc. |
1394f032 | 3 | * |
96f1050d | 4 | * Licensed under the GPL-2 or later. |
1394f032 BW |
5 | */ |
6 | ||
5a0e3ad6 | 7 | #include <linux/gfp.h> |
1394f032 BW |
8 | #include <linux/swap.h> |
9 | #include <linux/bootmem.h> | |
1f83b8f1 | 10 | #include <linux/uaccess.h> |
1394f032 | 11 | #include <asm/bfin-global.h> |
8f65873e GY |
12 | #include <asm/pda.h> |
13 | #include <asm/cplbinit.h> | |
837ec2d5 | 14 | #include <asm/early_printk.h> |
1394f032 BW |
15 | #include "blackfin_sram.h" |
16 | ||
17 | /* | |
18 | * BAD_PAGE is the page that is used for page faults when linux | |
19 | * is out-of-memory. Older versions of linux just did a | |
20 | * do_exit(), but using this instead means there is less risk | |
21 | * for a process dying in kernel mode, possibly leaving a inode | |
22 | * unused etc.. | |
23 | * | |
24 | * BAD_PAGETABLE is the accompanying page-table: it is initialized | |
25 | * to point to BAD_PAGE entries. | |
26 | * | |
27 | * ZERO_PAGE is a special page that is used for zero-initialized | |
28 | * data and COW. | |
29 | */ | |
30 | static unsigned long empty_bad_page_table; | |
31 | ||
32 | static unsigned long empty_bad_page; | |
33 | ||
f82e0a0c | 34 | static unsigned long empty_zero_page; |
1394f032 | 35 | |
f82e0a0c GY |
36 | #ifndef CONFIG_EXCEPTION_L1_SCRATCH |
37 | #if defined CONFIG_SYSCALL_TAB_L1 | |
38 | __attribute__((l1_data)) | |
39 | #endif | |
40 | static unsigned long exception_stack[NR_CPUS][1024]; | |
41 | #endif | |
8f65873e GY |
42 | |
43 | struct blackfin_pda cpu_pda[NR_CPUS]; | |
44 | EXPORT_SYMBOL(cpu_pda); | |
45 | ||
1394f032 BW |
46 | /* |
47 | * paging_init() continues the virtual memory environment setup which | |
48 | * was begun by the code in arch/head.S. | |
49 | * The parameters are pointers to where to stick the starting and ending | |
50 | * addresses of available kernel virtual memory. | |
51 | */ | |
321f6e0f | 52 | void __init paging_init(void) |
1394f032 BW |
53 | { |
54 | /* | |
55 | * make sure start_mem is page aligned, otherwise bootmem and | |
56 | * page_alloc get different views og the world | |
57 | */ | |
58 | unsigned long end_mem = memory_end & PAGE_MASK; | |
59 | ||
60 | pr_debug("start_mem is %#lx virtual_end is %#lx\n", PAGE_ALIGN(memory_start), end_mem); | |
61 | ||
62 | /* | |
63 | * initialize the bad page table and bad page to point | |
64 | * to a couple of allocated pages | |
65 | */ | |
66 | empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | |
67 | empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | |
68 | empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | |
69 | memset((void *)empty_zero_page, 0, PAGE_SIZE); | |
70 | ||
71 | /* | |
72 | * Set up SFC/DFC registers (user data space) | |
73 | */ | |
74 | set_fs(KERNEL_DS); | |
75 | ||
76 | pr_debug("free_area_init -> start_mem is %#lx virtual_end is %#lx\n", | |
77 | PAGE_ALIGN(memory_start), end_mem); | |
78 | ||
79 | { | |
80 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; | |
81 | ||
e3defffe AL |
82 | zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; |
83 | zones_size[ZONE_NORMAL] = 0; | |
1394f032 BW |
84 | #ifdef CONFIG_HIGHMEM |
85 | zones_size[ZONE_HIGHMEM] = 0; | |
86 | #endif | |
87 | free_area_init(zones_size); | |
88 | } | |
89 | } | |
90 | ||
8e2a7694 | 91 | asmlinkage void __init init_pda(void) |
8f65873e GY |
92 | { |
93 | unsigned int cpu = raw_smp_processor_id(); | |
94 | ||
837ec2d5 RG |
95 | early_shadow_stamp(); |
96 | ||
8f65873e GY |
97 | /* Initialize the PDA fields holding references to other parts |
98 | of the memory. The content of such memory is still | |
99 | undefined at the time of the call, we are only setting up | |
100 | valid pointers to it. */ | |
101 | memset(&cpu_pda[cpu], 0, sizeof(cpu_pda[cpu])); | |
102 | ||
103 | cpu_pda[0].next = &cpu_pda[1]; | |
104 | cpu_pda[1].next = &cpu_pda[0]; | |
105 | ||
f82e0a0c GY |
106 | #ifdef CONFIG_EXCEPTION_L1_SCRATCH |
107 | cpu_pda[cpu].ex_stack = (unsigned long *)(L1_SCRATCH_START + \ | |
108 | L1_SCRATCH_LENGTH); | |
109 | #else | |
8f65873e | 110 | cpu_pda[cpu].ex_stack = exception_stack[cpu + 1]; |
f82e0a0c | 111 | #endif |
8f65873e | 112 | |
8f65873e GY |
113 | #ifdef CONFIG_SMP |
114 | cpu_pda[cpu].imask = 0x1f; | |
115 | #endif | |
116 | } | |
117 | ||
321f6e0f | 118 | void __init mem_init(void) |
1394f032 BW |
119 | { |
120 | unsigned int codek = 0, datak = 0, initk = 0; | |
ee7883b7 | 121 | unsigned int reservedpages = 0, freepages = 0; |
1394f032 | 122 | unsigned long tmp; |
1394f032 BW |
123 | unsigned long start_mem = memory_start; |
124 | unsigned long end_mem = memory_end; | |
125 | ||
126 | end_mem &= PAGE_MASK; | |
127 | high_memory = (void *)end_mem; | |
128 | ||
129 | start_mem = PAGE_ALIGN(start_mem); | |
130 | max_mapnr = num_physpages = MAP_NR(high_memory); | |
856783b3 | 131 | printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages); |
1394f032 BW |
132 | |
133 | /* This will put all memory onto the freelists. */ | |
134 | totalram_pages = free_all_bootmem(); | |
135 | ||
ee7883b7 YL |
136 | reservedpages = 0; |
137 | for (tmp = 0; tmp < max_mapnr; tmp++) | |
138 | if (PageReserved(pfn_to_page(tmp))) | |
139 | reservedpages++; | |
140 | freepages = max_mapnr - reservedpages; | |
141 | ||
142 | /* do not count in kernel image between _rambase and _ramstart */ | |
143 | reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT; | |
41ba653f | 144 | #if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) |
856783b3 | 145 | reservedpages += (_ramend - memory_end - DMA_UNCACHED_REGION) >> PAGE_SHIFT; |
ee7883b7 YL |
146 | #endif |
147 | ||
1394f032 | 148 | codek = (_etext - _stext) >> 10; |
1394f032 | 149 | initk = (__init_end - __init_begin) >> 10; |
ee7883b7 | 150 | datak = ((_ramstart - _rambase) >> 10) - codek - initk; |
1394f032 | 151 | |
1394f032 | 152 | printk(KERN_INFO |
ee7883b7 | 153 | "Memory available: %luk/%luk RAM, " |
856783b3 | 154 | "(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n", |
ee7883b7 | 155 | (unsigned long) freepages << (PAGE_SHIFT-10), _ramend >> 10, |
856783b3 | 156 | initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); |
5d481f49 SZ |
157 | } |
158 | ||
c051489d | 159 | static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end) |
1394f032 | 160 | { |
1d189474 MF |
161 | unsigned long addr; |
162 | /* next to check that the page we free is not a partial page */ | |
163 | for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) { | |
164 | ClearPageReserved(virt_to_page(addr)); | |
165 | init_page_count(virt_to_page(addr)); | |
166 | free_page(addr); | |
1394f032 | 167 | totalram_pages++; |
1394f032 | 168 | } |
1d189474 MF |
169 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); |
170 | } | |
171 | ||
172 | #ifdef CONFIG_BLK_DEV_INITRD | |
173 | void __init free_initrd_mem(unsigned long start, unsigned long end) | |
174 | { | |
b97b8a99 | 175 | #ifndef CONFIG_MPU |
1d189474 | 176 | free_init_pages("initrd memory", start, end); |
b97b8a99 | 177 | #endif |
1394f032 BW |
178 | } |
179 | #endif | |
180 | ||
c051489d | 181 | void __init_refok free_initmem(void) |
1394f032 | 182 | { |
b97b8a99 | 183 | #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU |
1d189474 MF |
184 | free_init_pages("unused kernel memory", |
185 | (unsigned long)(&__init_begin), | |
186 | (unsigned long)(&__init_end)); | |
1394f032 BW |
187 | #endif |
188 | } |