Commit | Line | Data |
---|---|---|
2f36fa13 | 1 | /* |
1da177e4 LT |
2 | * Handle the memory map. |
3 | * The functions here do the job until bootmem takes over. | |
8059b2a2 VP |
4 | * |
5 | * Getting sanitize_e820_map() in sync with i386 version by applying change: | |
6 | * - Provisions for empty E820 memory regions (reported by certain BIOSes). | |
7 | * Alex Achenbach <xela@slit.de>, December 2002. | |
8 | * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
9 | * | |
1da177e4 | 10 | */ |
1da177e4 LT |
11 | #include <linux/kernel.h> |
12 | #include <linux/types.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/bootmem.h> | |
15 | #include <linux/ioport.h> | |
16 | #include <linux/string.h> | |
5f5609df | 17 | #include <linux/kexec.h> |
b9491ac8 | 18 | #include <linux/module.h> |
e8eff5ac | 19 | #include <linux/mm.h> |
74dfd666 RW |
20 | #include <linux/suspend.h> |
21 | #include <linux/pfn.h> | |
b9491ac8 | 22 | |
1a91023a | 23 | #include <asm/pgtable.h> |
1da177e4 LT |
24 | #include <asm/page.h> |
25 | #include <asm/e820.h> | |
26 | #include <asm/proto.h> | |
30c82645 | 27 | #include <asm/setup.h> |
2bc0414e | 28 | #include <asm/sections.h> |
718fc13b | 29 | #include <asm/kdebug.h> |
1da177e4 | 30 | |
b92e9fac | 31 | struct e820map e820; |
3bd4d18c | 32 | |
2f36fa13 | 33 | /* |
1da177e4 LT |
34 | * PFN of last memory page. |
35 | */ | |
2f36fa13 | 36 | unsigned long end_pfn; |
1da177e4 | 37 | |
2f36fa13 | 38 | /* |
1da177e4 LT |
39 | * end_pfn only includes RAM, while end_pfn_map includes all e820 entries. |
40 | * The direct mapping extends to end_pfn_map, so that we can directly access | |
41 | * apertures, ACPI and other tables without having to play with fixmaps. | |
2f36fa13 TG |
42 | */ |
43 | unsigned long end_pfn_map; | |
1da177e4 | 44 | |
2f36fa13 | 45 | /* |
1da177e4 LT |
46 | * Last pfn which the user wants to use. |
47 | */ | |
caff0710 | 48 | static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT; |
1da177e4 | 49 | |
2f36fa13 | 50 | /* Check for some hardcoded bad areas that early boot is not allowed to touch */ |
1da177e4 | 51 | static inline int bad_addr(unsigned long *addrp, unsigned long size) |
2f36fa13 TG |
52 | { |
53 | unsigned long addr = *addrp, last = addr + size; | |
1da177e4 LT |
54 | |
55 | /* various gunk below that needed for SMP startup */ | |
2f36fa13 | 56 | if (addr < 0x8000) { |
73bb8919 | 57 | *addrp = PAGE_ALIGN(0x8000); |
2f36fa13 | 58 | return 1; |
1da177e4 LT |
59 | } |
60 | ||
61 | /* direct mapping tables of the kernel */ | |
2f36fa13 | 62 | if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { |
73bb8919 | 63 | *addrp = PAGE_ALIGN(table_end << PAGE_SHIFT); |
1da177e4 | 64 | return 1; |
2f36fa13 | 65 | } |
1da177e4 | 66 | |
2f36fa13 | 67 | /* initrd */ |
1da177e4 | 68 | #ifdef CONFIG_BLK_DEV_INITRD |
30c82645 PA |
69 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { |
70 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; | |
71 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; | |
72 | unsigned long ramdisk_end = ramdisk_image+ramdisk_size; | |
73 | ||
74 | if (last >= ramdisk_image && addr < ramdisk_end) { | |
75 | *addrp = PAGE_ALIGN(ramdisk_end); | |
76 | return 1; | |
77 | } | |
2f36fa13 | 78 | } |
1da177e4 | 79 | #endif |
dbf9272e | 80 | /* kernel code */ |
73bb8919 VG |
81 | if (last >= __pa_symbol(&_text) && addr < __pa_symbol(&_end)) { |
82 | *addrp = PAGE_ALIGN(__pa_symbol(&_end)); | |
1da177e4 LT |
83 | return 1; |
84 | } | |
ac71d12c AK |
85 | |
86 | if (last >= ebda_addr && addr < ebda_addr + ebda_size) { | |
73bb8919 | 87 | *addrp = PAGE_ALIGN(ebda_addr + ebda_size); |
ac71d12c AK |
88 | return 1; |
89 | } | |
90 | ||
076422d2 AS |
91 | #ifdef CONFIG_NUMA |
92 | /* NUMA memory to node map */ | |
93 | if (last >= nodemap_addr && addr < nodemap_addr + nodemap_size) { | |
94 | *addrp = nodemap_addr + nodemap_size; | |
95 | return 1; | |
96 | } | |
97 | #endif | |
2f36fa13 | 98 | /* XXX ramdisk image here? */ |
1da177e4 | 99 | return 0; |
2f36fa13 | 100 | } |
1da177e4 | 101 | |
95222368 AV |
102 | /* |
103 | * This function checks if any part of the range <start,end> is mapped | |
104 | * with type. | |
105 | */ | |
b92e9fac | 106 | int |
eee5a9fa | 107 | e820_any_mapped(unsigned long start, unsigned long end, unsigned type) |
2f36fa13 | 108 | { |
1da177e4 | 109 | int i; |
2f36fa13 TG |
110 | |
111 | for (i = 0; i < e820.nr_map; i++) { | |
112 | struct e820entry *ei = &e820.map[i]; | |
113 | ||
114 | if (type && ei->type != type) | |
1da177e4 | 115 | continue; |
48c8b113 | 116 | if (ei->addr >= end || ei->addr + ei->size <= start) |
2f36fa13 TG |
117 | continue; |
118 | return 1; | |
119 | } | |
1da177e4 LT |
120 | return 0; |
121 | } | |
b92e9fac | 122 | EXPORT_SYMBOL_GPL(e820_any_mapped); |
1da177e4 | 123 | |
79e453d4 LT |
124 | /* |
125 | * This function checks if the entire range <start,end> is mapped with type. | |
126 | * | |
127 | * Note: this function only works correct if the e820 table is sorted and | |
128 | * not-overlapping, which is the case | |
129 | */ | |
2f36fa13 TG |
130 | int __init e820_all_mapped(unsigned long start, unsigned long end, |
131 | unsigned type) | |
79e453d4 LT |
132 | { |
133 | int i; | |
2f36fa13 | 134 | |
79e453d4 LT |
135 | for (i = 0; i < e820.nr_map; i++) { |
136 | struct e820entry *ei = &e820.map[i]; | |
2f36fa13 | 137 | |
79e453d4 LT |
138 | if (type && ei->type != type) |
139 | continue; | |
140 | /* is the region (part) in overlap with the current region ?*/ | |
141 | if (ei->addr >= end || ei->addr + ei->size <= start) | |
142 | continue; | |
143 | ||
144 | /* if the region is at the beginning of <start,end> we move | |
145 | * start to the end of the region since it's ok until there | |
146 | */ | |
147 | if (ei->addr <= start) | |
148 | start = ei->addr + ei->size; | |
2f36fa13 TG |
149 | /* |
150 | * if start is now at or beyond end, we're done, full | |
151 | * coverage | |
152 | */ | |
79e453d4 | 153 | if (start >= end) |
2f36fa13 | 154 | return 1; |
79e453d4 LT |
155 | } |
156 | return 0; | |
157 | } | |
158 | ||
2f36fa13 TG |
159 | /* |
160 | * Find a free area in a specific range. | |
161 | */ | |
162 | unsigned long __init find_e820_area(unsigned long start, unsigned long end, | |
163 | unsigned size) | |
164 | { | |
165 | int i; | |
166 | ||
167 | for (i = 0; i < e820.nr_map; i++) { | |
168 | struct e820entry *ei = &e820.map[i]; | |
169 | unsigned long addr = ei->addr, last; | |
170 | ||
171 | if (ei->type != E820_RAM) | |
172 | continue; | |
173 | if (addr < start) | |
1da177e4 | 174 | addr = start; |
2f36fa13 TG |
175 | if (addr > ei->addr + ei->size) |
176 | continue; | |
7ca97c61 | 177 | while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size) |
1da177e4 | 178 | ; |
73bb8919 | 179 | last = PAGE_ALIGN(addr) + size; |
1da177e4 LT |
180 | if (last > ei->addr + ei->size) |
181 | continue; | |
2f36fa13 | 182 | if (last > end) |
1da177e4 | 183 | continue; |
2f36fa13 TG |
184 | return addr; |
185 | } | |
186 | return -1UL; | |
187 | } | |
1da177e4 | 188 | |
1da177e4 LT |
189 | /* |
190 | * Find the highest page frame number we have available | |
191 | */ | |
192 | unsigned long __init e820_end_of_ram(void) | |
193 | { | |
2f36fa13 TG |
194 | unsigned long end_pfn; |
195 | ||
5cb248ab | 196 | end_pfn = find_max_pfn_with_active_regions(); |
2f36fa13 TG |
197 | |
198 | if (end_pfn > end_pfn_map) | |
1da177e4 LT |
199 | end_pfn_map = end_pfn; |
200 | if (end_pfn_map > MAXMEM>>PAGE_SHIFT) | |
201 | end_pfn_map = MAXMEM>>PAGE_SHIFT; | |
202 | if (end_pfn > end_user_pfn) | |
203 | end_pfn = end_user_pfn; | |
2f36fa13 TG |
204 | if (end_pfn > end_pfn_map) |
205 | end_pfn = end_pfn_map; | |
1da177e4 | 206 | |
2f36fa13 TG |
207 | printk(KERN_INFO "end_pfn_map = %lu\n", end_pfn_map); |
208 | return end_pfn; | |
1da177e4 LT |
209 | } |
210 | ||
485761bd | 211 | /* |
1da177e4 LT |
212 | * Mark e820 reserved areas as busy for the resource manager. |
213 | */ | |
c9cce83d BW |
214 | void __init e820_reserve_resources(struct resource *code_resource, |
215 | struct resource *data_resource, struct resource *bss_resource) | |
1da177e4 LT |
216 | { |
217 | int i; | |
218 | for (i = 0; i < e820.nr_map; i++) { | |
219 | struct resource *res; | |
1da177e4 LT |
220 | res = alloc_bootmem_low(sizeof(struct resource)); |
221 | switch (e820.map[i].type) { | |
222 | case E820_RAM: res->name = "System RAM"; break; | |
223 | case E820_ACPI: res->name = "ACPI Tables"; break; | |
224 | case E820_NVS: res->name = "ACPI Non-volatile Storage"; break; | |
225 | default: res->name = "reserved"; | |
226 | } | |
227 | res->start = e820.map[i].addr; | |
228 | res->end = res->start + e820.map[i].size - 1; | |
229 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | |
230 | request_resource(&iomem_resource, res); | |
231 | if (e820.map[i].type == E820_RAM) { | |
232 | /* | |
2f36fa13 TG |
233 | * We don't know which RAM region contains kernel data, |
234 | * so we try it repeatedly and let the resource manager | |
235 | * test it. | |
1da177e4 | 236 | */ |
c9cce83d BW |
237 | request_resource(res, code_resource); |
238 | request_resource(res, data_resource); | |
239 | request_resource(res, bss_resource); | |
5f5609df | 240 | #ifdef CONFIG_KEXEC |
5c3391f9 BW |
241 | if (crashk_res.start != crashk_res.end) |
242 | request_resource(res, &crashk_res); | |
5f5609df | 243 | #endif |
1da177e4 LT |
244 | } |
245 | } | |
246 | } | |
247 | ||
e8eff5ac RW |
248 | /* |
249 | * Find the ranges of physical addresses that do not correspond to | |
250 | * e820 RAM areas and mark the corresponding pages as nosave for software | |
251 | * suspend and suspend to RAM. | |
252 | * | |
253 | * This function requires the e820 map to be sorted and without any | |
254 | * overlapping entries and assumes the first e820 area to be RAM. | |
255 | */ | |
256 | void __init e820_mark_nosave_regions(void) | |
257 | { | |
258 | int i; | |
259 | unsigned long paddr; | |
260 | ||
261 | paddr = round_down(e820.map[0].addr + e820.map[0].size, PAGE_SIZE); | |
262 | for (i = 1; i < e820.nr_map; i++) { | |
263 | struct e820entry *ei = &e820.map[i]; | |
264 | ||
265 | if (paddr < ei->addr) | |
74dfd666 RW |
266 | register_nosave_region(PFN_DOWN(paddr), |
267 | PFN_UP(ei->addr)); | |
e8eff5ac RW |
268 | |
269 | paddr = round_down(ei->addr + ei->size, PAGE_SIZE); | |
270 | if (ei->type != E820_RAM) | |
74dfd666 RW |
271 | register_nosave_region(PFN_UP(ei->addr), |
272 | PFN_DOWN(paddr)); | |
e8eff5ac RW |
273 | |
274 | if (paddr >= (end_pfn << PAGE_SHIFT)) | |
275 | break; | |
276 | } | |
277 | } | |
278 | ||
3af044e0 DR |
279 | /* |
280 | * Finds an active region in the address range from start_pfn to end_pfn and | |
281 | * returns its range in ei_startpfn and ei_endpfn for the e820 entry. | |
282 | */ | |
283 | static int __init e820_find_active_region(const struct e820entry *ei, | |
284 | unsigned long start_pfn, | |
285 | unsigned long end_pfn, | |
286 | unsigned long *ei_startpfn, | |
287 | unsigned long *ei_endpfn) | |
288 | { | |
289 | *ei_startpfn = round_up(ei->addr, PAGE_SIZE) >> PAGE_SHIFT; | |
290 | *ei_endpfn = round_down(ei->addr + ei->size, PAGE_SIZE) >> PAGE_SHIFT; | |
291 | ||
292 | /* Skip map entries smaller than a page */ | |
293 | if (*ei_startpfn >= *ei_endpfn) | |
294 | return 0; | |
295 | ||
296 | /* Check if end_pfn_map should be updated */ | |
297 | if (ei->type != E820_RAM && *ei_endpfn > end_pfn_map) | |
298 | end_pfn_map = *ei_endpfn; | |
299 | ||
300 | /* Skip if map is outside the node */ | |
301 | if (ei->type != E820_RAM || *ei_endpfn <= start_pfn || | |
302 | *ei_startpfn >= end_pfn) | |
303 | return 0; | |
304 | ||
305 | /* Check for overlaps */ | |
306 | if (*ei_startpfn < start_pfn) | |
307 | *ei_startpfn = start_pfn; | |
308 | if (*ei_endpfn > end_pfn) | |
309 | *ei_endpfn = end_pfn; | |
310 | ||
311 | /* Obey end_user_pfn to save on memmap */ | |
312 | if (*ei_startpfn >= end_user_pfn) | |
313 | return 0; | |
314 | if (*ei_endpfn > end_user_pfn) | |
315 | *ei_endpfn = end_user_pfn; | |
316 | ||
317 | return 1; | |
318 | } | |
319 | ||
5cb248ab MG |
320 | /* Walk the e820 map and register active regions within a node */ |
321 | void __init | |
322 | e820_register_active_regions(int nid, unsigned long start_pfn, | |
323 | unsigned long end_pfn) | |
324 | { | |
3af044e0 DR |
325 | unsigned long ei_startpfn; |
326 | unsigned long ei_endpfn; | |
5cb248ab | 327 | int i; |
5cb248ab | 328 | |
3af044e0 DR |
329 | for (i = 0; i < e820.nr_map; i++) |
330 | if (e820_find_active_region(&e820.map[i], | |
331 | start_pfn, end_pfn, | |
332 | &ei_startpfn, &ei_endpfn)) | |
333 | add_active_range(nid, ei_startpfn, ei_endpfn); | |
5cb248ab MG |
334 | } |
335 | ||
2f36fa13 | 336 | /* |
1da177e4 | 337 | * Add a memory region to the kernel e820 map. |
2f36fa13 | 338 | */ |
1da177e4 LT |
339 | void __init add_memory_region(unsigned long start, unsigned long size, int type) |
340 | { | |
341 | int x = e820.nr_map; | |
342 | ||
343 | if (x == E820MAX) { | |
344 | printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); | |
345 | return; | |
346 | } | |
347 | ||
348 | e820.map[x].addr = start; | |
349 | e820.map[x].size = size; | |
350 | e820.map[x].type = type; | |
351 | e820.nr_map++; | |
352 | } | |
353 | ||
a7e96629 DR |
354 | /* |
355 | * Find the hole size (in bytes) in the memory range. | |
356 | * @start: starting address of the memory range to scan | |
357 | * @end: ending address of the memory range to scan | |
358 | */ | |
359 | unsigned long __init e820_hole_size(unsigned long start, unsigned long end) | |
360 | { | |
361 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
362 | unsigned long end_pfn = end >> PAGE_SHIFT; | |
2f36fa13 | 363 | unsigned long ei_startpfn, ei_endpfn, ram = 0; |
a7e96629 DR |
364 | int i; |
365 | ||
366 | for (i = 0; i < e820.nr_map; i++) { | |
367 | if (e820_find_active_region(&e820.map[i], | |
368 | start_pfn, end_pfn, | |
369 | &ei_startpfn, &ei_endpfn)) | |
370 | ram += ei_endpfn - ei_startpfn; | |
371 | } | |
372 | return end - start - (ram << PAGE_SHIFT); | |
373 | } | |
374 | ||
013d23e1 | 375 | static void __init e820_print_map(char *who) |
1da177e4 LT |
376 | { |
377 | int i; | |
378 | ||
379 | for (i = 0; i < e820.nr_map; i++) { | |
5a3ece79 | 380 | printk(KERN_INFO " %s: %016Lx - %016Lx ", who, |
2f36fa13 TG |
381 | (unsigned long long) e820.map[i].addr, |
382 | (unsigned long long) | |
383 | (e820.map[i].addr + e820.map[i].size)); | |
1da177e4 | 384 | switch (e820.map[i].type) { |
2f36fa13 TG |
385 | case E820_RAM: |
386 | printk(KERN_CONT "(usable)\n"); | |
387 | break; | |
1da177e4 | 388 | case E820_RESERVED: |
2f36fa13 TG |
389 | printk(KERN_CONT "(reserved)\n"); |
390 | break; | |
1da177e4 | 391 | case E820_ACPI: |
2f36fa13 TG |
392 | printk(KERN_CONT "(ACPI data)\n"); |
393 | break; | |
1da177e4 | 394 | case E820_NVS: |
2f36fa13 TG |
395 | printk(KERN_CONT "(ACPI NVS)\n"); |
396 | break; | |
397 | default: | |
398 | printk(KERN_CONT "type %u\n", e820.map[i].type); | |
399 | break; | |
1da177e4 LT |
400 | } |
401 | } | |
402 | } | |
403 | ||
404 | /* | |
405 | * Sanitize the BIOS e820 map. | |
406 | * | |
2f36fa13 | 407 | * Some e820 responses include overlapping entries. The following |
1da177e4 LT |
408 | * replaces the original e820 map with a new one, removing overlaps. |
409 | * | |
410 | */ | |
2f36fa13 | 411 | static int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map) |
1da177e4 LT |
412 | { |
413 | struct change_member { | |
414 | struct e820entry *pbios; /* pointer to original bios entry */ | |
415 | unsigned long long addr; /* address for this change point */ | |
416 | }; | |
417 | static struct change_member change_point_list[2*E820MAX] __initdata; | |
418 | static struct change_member *change_point[2*E820MAX] __initdata; | |
419 | static struct e820entry *overlap_list[E820MAX] __initdata; | |
420 | static struct e820entry new_bios[E820MAX] __initdata; | |
421 | struct change_member *change_tmp; | |
422 | unsigned long current_type, last_type; | |
423 | unsigned long long last_addr; | |
424 | int chgidx, still_changing; | |
425 | int overlap_entries; | |
426 | int new_bios_entry; | |
8059b2a2 | 427 | int old_nr, new_nr, chg_nr; |
1da177e4 LT |
428 | int i; |
429 | ||
430 | /* | |
2f36fa13 TG |
431 | Visually we're performing the following |
432 | (1,2,3,4 = memory types)... | |
1da177e4 LT |
433 | |
434 | Sample memory map (w/overlaps): | |
435 | ____22__________________ | |
436 | ______________________4_ | |
437 | ____1111________________ | |
438 | _44_____________________ | |
439 | 11111111________________ | |
440 | ____________________33__ | |
441 | ___________44___________ | |
442 | __________33333_________ | |
443 | ______________22________ | |
444 | ___________________2222_ | |
445 | _________111111111______ | |
446 | _____________________11_ | |
447 | _________________4______ | |
448 | ||
449 | Sanitized equivalent (no overlap): | |
450 | 1_______________________ | |
451 | _44_____________________ | |
452 | ___1____________________ | |
453 | ____22__________________ | |
454 | ______11________________ | |
455 | _________1______________ | |
456 | __________3_____________ | |
457 | ___________44___________ | |
458 | _____________33_________ | |
459 | _______________2________ | |
460 | ________________1_______ | |
461 | _________________4______ | |
462 | ___________________2____ | |
463 | ____________________33__ | |
464 | ______________________4_ | |
465 | */ | |
466 | ||
467 | /* if there's only one memory region, don't bother */ | |
468 | if (*pnr_map < 2) | |
469 | return -1; | |
470 | ||
471 | old_nr = *pnr_map; | |
472 | ||
473 | /* bail out if we find any unreasonable addresses in bios map */ | |
2f36fa13 | 474 | for (i = 0; i < old_nr; i++) |
1da177e4 LT |
475 | if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) |
476 | return -1; | |
477 | ||
478 | /* create pointers for initial change-point information (for sorting) */ | |
2f36fa13 | 479 | for (i = 0; i < 2 * old_nr; i++) |
1da177e4 LT |
480 | change_point[i] = &change_point_list[i]; |
481 | ||
8059b2a2 VP |
482 | /* record all known change-points (starting and ending addresses), |
483 | omitting those that are for empty memory regions */ | |
1da177e4 | 484 | chgidx = 0; |
2f36fa13 | 485 | for (i = 0; i < old_nr; i++) { |
8059b2a2 VP |
486 | if (biosmap[i].size != 0) { |
487 | change_point[chgidx]->addr = biosmap[i].addr; | |
488 | change_point[chgidx++]->pbios = &biosmap[i]; | |
2f36fa13 TG |
489 | change_point[chgidx]->addr = biosmap[i].addr + |
490 | biosmap[i].size; | |
8059b2a2 VP |
491 | change_point[chgidx++]->pbios = &biosmap[i]; |
492 | } | |
1da177e4 | 493 | } |
8059b2a2 | 494 | chg_nr = chgidx; |
1da177e4 LT |
495 | |
496 | /* sort change-point list by memory addresses (low -> high) */ | |
497 | still_changing = 1; | |
498 | while (still_changing) { | |
499 | still_changing = 0; | |
2f36fa13 TG |
500 | for (i = 1; i < chg_nr; i++) { |
501 | unsigned long long curaddr, lastaddr; | |
502 | unsigned long long curpbaddr, lastpbaddr; | |
503 | ||
504 | curaddr = change_point[i]->addr; | |
505 | lastaddr = change_point[i - 1]->addr; | |
506 | curpbaddr = change_point[i]->pbios->addr; | |
507 | lastpbaddr = change_point[i - 1]->pbios->addr; | |
508 | ||
509 | /* | |
510 | * swap entries, when: | |
511 | * | |
512 | * curaddr > lastaddr or | |
513 | * curaddr == lastaddr and curaddr == curpbaddr and | |
514 | * lastaddr != lastpbaddr | |
515 | */ | |
516 | if (curaddr < lastaddr || | |
517 | (curaddr == lastaddr && curaddr == curpbaddr && | |
518 | lastaddr != lastpbaddr)) { | |
1da177e4 LT |
519 | change_tmp = change_point[i]; |
520 | change_point[i] = change_point[i-1]; | |
521 | change_point[i-1] = change_tmp; | |
2f36fa13 | 522 | still_changing = 1; |
1da177e4 LT |
523 | } |
524 | } | |
525 | } | |
526 | ||
527 | /* create a new bios memory map, removing overlaps */ | |
2f36fa13 TG |
528 | overlap_entries = 0; /* number of entries in the overlap table */ |
529 | new_bios_entry = 0; /* index for creating new bios map entries */ | |
1da177e4 LT |
530 | last_type = 0; /* start with undefined memory type */ |
531 | last_addr = 0; /* start with 0 as last starting address */ | |
2f36fa13 | 532 | |
1da177e4 | 533 | /* loop through change-points, determining affect on the new bios map */ |
2f36fa13 | 534 | for (chgidx = 0; chgidx < chg_nr; chgidx++) { |
1da177e4 | 535 | /* keep track of all overlapping bios entries */ |
2f36fa13 TG |
536 | if (change_point[chgidx]->addr == |
537 | change_point[chgidx]->pbios->addr) { | |
538 | /* | |
539 | * add map entry to overlap list (> 1 entry | |
540 | * implies an overlap) | |
541 | */ | |
542 | overlap_list[overlap_entries++] = | |
543 | change_point[chgidx]->pbios; | |
544 | } else { | |
545 | /* | |
546 | * remove entry from list (order independent, | |
547 | * so swap with last) | |
548 | */ | |
549 | for (i = 0; i < overlap_entries; i++) { | |
550 | if (overlap_list[i] == | |
551 | change_point[chgidx]->pbios) | |
552 | overlap_list[i] = | |
553 | overlap_list[overlap_entries-1]; | |
1da177e4 LT |
554 | } |
555 | overlap_entries--; | |
556 | } | |
2f36fa13 TG |
557 | /* |
558 | * if there are overlapping entries, decide which | |
559 | * "type" to use (larger value takes precedence -- | |
560 | * 1=usable, 2,3,4,4+=unusable) | |
561 | */ | |
1da177e4 | 562 | current_type = 0; |
2f36fa13 | 563 | for (i = 0; i < overlap_entries; i++) |
1da177e4 LT |
564 | if (overlap_list[i]->type > current_type) |
565 | current_type = overlap_list[i]->type; | |
2f36fa13 TG |
566 | /* |
567 | * continue building up new bios map based on this | |
568 | * information | |
569 | */ | |
1da177e4 LT |
570 | if (current_type != last_type) { |
571 | if (last_type != 0) { | |
572 | new_bios[new_bios_entry].size = | |
573 | change_point[chgidx]->addr - last_addr; | |
2f36fa13 TG |
574 | /* |
575 | * move forward only if the new size | |
576 | * was non-zero | |
577 | */ | |
1da177e4 | 578 | if (new_bios[new_bios_entry].size != 0) |
2f36fa13 TG |
579 | /* |
580 | * no more space left for new | |
581 | * bios entries ? | |
582 | */ | |
1da177e4 | 583 | if (++new_bios_entry >= E820MAX) |
2f36fa13 | 584 | break; |
1da177e4 LT |
585 | } |
586 | if (current_type != 0) { | |
2f36fa13 TG |
587 | new_bios[new_bios_entry].addr = |
588 | change_point[chgidx]->addr; | |
1da177e4 | 589 | new_bios[new_bios_entry].type = current_type; |
2f36fa13 | 590 | last_addr = change_point[chgidx]->addr; |
1da177e4 LT |
591 | } |
592 | last_type = current_type; | |
593 | } | |
594 | } | |
2f36fa13 TG |
595 | /* retain count for new bios entries */ |
596 | new_nr = new_bios_entry; | |
1da177e4 LT |
597 | |
598 | /* copy new bios mapping into original location */ | |
2f36fa13 | 599 | memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry)); |
1da177e4 LT |
600 | *pnr_map = new_nr; |
601 | ||
602 | return 0; | |
603 | } | |
604 | ||
605 | /* | |
606 | * Copy the BIOS e820 map into a safe place. | |
607 | * | |
608 | * Sanity-check it while we're at it.. | |
609 | * | |
610 | * If we're lucky and live on a modern system, the setup code | |
611 | * will have given us a memory map that we can use to properly | |
612 | * set up memory. If we aren't, we'll fake a memory map. | |
1da177e4 | 613 | */ |
2f36fa13 | 614 | static int __init copy_e820_map(struct e820entry *biosmap, int nr_map) |
1da177e4 LT |
615 | { |
616 | /* Only one memory region (or negative)? Ignore it */ | |
617 | if (nr_map < 2) | |
618 | return -1; | |
619 | ||
620 | do { | |
621 | unsigned long start = biosmap->addr; | |
622 | unsigned long size = biosmap->size; | |
623 | unsigned long end = start + size; | |
624 | unsigned long type = biosmap->type; | |
625 | ||
626 | /* Overflow in 64 bits? Ignore the memory map. */ | |
627 | if (start > end) | |
628 | return -1; | |
629 | ||
1da177e4 | 630 | add_memory_region(start, size, type); |
2f36fa13 | 631 | } while (biosmap++, --nr_map); |
1da177e4 LT |
632 | return 0; |
633 | } | |
634 | ||
013d23e1 | 635 | static void early_panic(char *msg) |
1da177e4 | 636 | { |
8380aabb AK |
637 | early_printk(msg); |
638 | panic(msg); | |
639 | } | |
1da177e4 | 640 | |
8380aabb AK |
641 | void __init setup_memory_region(void) |
642 | { | |
1da177e4 LT |
643 | /* |
644 | * Try to copy the BIOS-supplied E820-map. | |
645 | * | |
646 | * Otherwise fake a memory map; one section from 0k->640k, | |
647 | * the next section from 1mb->appropriate_mem_k | |
648 | */ | |
30c82645 PA |
649 | sanitize_e820_map(boot_params.e820_map, &boot_params.e820_entries); |
650 | if (copy_e820_map(boot_params.e820_map, boot_params.e820_entries) < 0) | |
8380aabb | 651 | early_panic("Cannot find a valid memory map"); |
1da177e4 | 652 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); |
8380aabb | 653 | e820_print_map("BIOS-e820"); |
1da177e4 LT |
654 | } |
655 | ||
2c8c0e6b AK |
656 | static int __init parse_memopt(char *p) |
657 | { | |
658 | if (!p) | |
659 | return -EINVAL; | |
660 | end_user_pfn = memparse(p, &p); | |
2f36fa13 | 661 | end_user_pfn >>= PAGE_SHIFT; |
2c8c0e6b | 662 | return 0; |
2f36fa13 | 663 | } |
2c8c0e6b | 664 | early_param("mem", parse_memopt); |
1da177e4 | 665 | |
2c8c0e6b | 666 | static int userdef __initdata; |
1da177e4 | 667 | |
2c8c0e6b | 668 | static int __init parse_memmap_opt(char *p) |
69cda7b1 | 669 | { |
2c8c0e6b | 670 | char *oldp; |
69cda7b1 | 671 | unsigned long long start_at, mem_size; |
672 | ||
2c8c0e6b AK |
673 | if (!strcmp(p, "exactmap")) { |
674 | #ifdef CONFIG_CRASH_DUMP | |
2f36fa13 TG |
675 | /* |
676 | * If we are doing a crash dump, we still need to know | |
677 | * the real mem size before original memory map is | |
2c8c0e6b AK |
678 | * reset. |
679 | */ | |
15803a43 | 680 | e820_register_active_regions(0, 0, -1UL); |
2c8c0e6b | 681 | saved_max_pfn = e820_end_of_ram(); |
15803a43 | 682 | remove_all_active_ranges(); |
2c8c0e6b AK |
683 | #endif |
684 | end_pfn_map = 0; | |
685 | e820.nr_map = 0; | |
686 | userdef = 1; | |
687 | return 0; | |
688 | } | |
689 | ||
690 | oldp = p; | |
691 | mem_size = memparse(p, &p); | |
692 | if (p == oldp) | |
693 | return -EINVAL; | |
69cda7b1 | 694 | if (*p == '@') { |
2c8c0e6b | 695 | start_at = memparse(p+1, &p); |
69cda7b1 | 696 | add_memory_region(start_at, mem_size, E820_RAM); |
697 | } else if (*p == '#') { | |
2c8c0e6b | 698 | start_at = memparse(p+1, &p); |
69cda7b1 | 699 | add_memory_region(start_at, mem_size, E820_ACPI); |
700 | } else if (*p == '$') { | |
2c8c0e6b | 701 | start_at = memparse(p+1, &p); |
69cda7b1 | 702 | add_memory_region(start_at, mem_size, E820_RESERVED); |
703 | } else { | |
704 | end_user_pfn = (mem_size >> PAGE_SHIFT); | |
705 | } | |
2c8c0e6b AK |
706 | return *p == '\0' ? 0 : -EINVAL; |
707 | } | |
708 | early_param("memmap", parse_memmap_opt); | |
709 | ||
43999d9e | 710 | void __init finish_e820_parsing(void) |
2c8c0e6b AK |
711 | { |
712 | if (userdef) { | |
713 | printk(KERN_INFO "user-defined physical RAM map:\n"); | |
714 | e820_print_map("user"); | |
715 | } | |
69cda7b1 | 716 | } |
717 | ||
a1e97782 | 718 | unsigned long pci_mem_start = 0xaeedbabe; |
2ee60e17 | 719 | EXPORT_SYMBOL(pci_mem_start); |
a1e97782 AK |
720 | |
721 | /* | |
722 | * Search for the biggest gap in the low 32 bits of the e820 | |
723 | * memory space. We pass this space to PCI to assign MMIO resources | |
724 | * for hotplug or unconfigured devices in. | |
725 | * Hopefully the BIOS let enough space left. | |
726 | */ | |
727 | __init void e820_setup_gap(void) | |
728 | { | |
f0eca962 | 729 | unsigned long gapstart, gapsize, round; |
a1e97782 AK |
730 | unsigned long last; |
731 | int i; | |
732 | int found = 0; | |
733 | ||
734 | last = 0x100000000ull; | |
735 | gapstart = 0x10000000; | |
736 | gapsize = 0x400000; | |
737 | i = e820.nr_map; | |
738 | while (--i >= 0) { | |
739 | unsigned long long start = e820.map[i].addr; | |
740 | unsigned long long end = start + e820.map[i].size; | |
741 | ||
742 | /* | |
743 | * Since "last" is at most 4GB, we know we'll | |
744 | * fit in 32 bits if this condition is true | |
745 | */ | |
746 | if (last > end) { | |
747 | unsigned long gap = last - end; | |
748 | ||
749 | if (gap > gapsize) { | |
750 | gapsize = gap; | |
751 | gapstart = end; | |
752 | found = 1; | |
753 | } | |
754 | } | |
755 | if (start < last) | |
756 | last = start; | |
757 | } | |
758 | ||
759 | if (!found) { | |
760 | gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024; | |
2f36fa13 TG |
761 | printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit " |
762 | "address range\n" | |
763 | KERN_ERR "PCI: Unassigned devices with 32bit resource " | |
764 | "registers may break!\n"); | |
a1e97782 AK |
765 | } |
766 | ||
767 | /* | |
f0eca962 DR |
768 | * See how much we want to round up: start off with |
769 | * rounding to the next 1MB area. | |
a1e97782 | 770 | */ |
f0eca962 DR |
771 | round = 0x100000; |
772 | while ((gapsize >> 4) > round) | |
773 | round += round; | |
774 | /* Fun with two's complement */ | |
775 | pci_mem_start = (gapstart + round) & -round; | |
a1e97782 | 776 | |
2f36fa13 TG |
777 | printk(KERN_INFO |
778 | "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n", | |
779 | pci_mem_start, gapstart, gapsize); | |
a1e97782 | 780 | } |
e820482c KA |
781 | |
782 | int __init arch_get_ram_range(int slot, u64 *addr, u64 *size) | |
783 | { | |
784 | int i; | |
785 | ||
786 | if (slot < 0 || slot >= e820.nr_map) | |
787 | return -1; | |
788 | for (i = slot; i < e820.nr_map; i++) { | |
789 | if (e820.map[i].type != E820_RAM) | |
790 | continue; | |
791 | break; | |
792 | } | |
793 | if (i == e820.nr_map || e820.map[i].addr > (max_pfn << PAGE_SHIFT)) | |
794 | return -1; | |
795 | *addr = e820.map[i].addr; | |
796 | *size = min_t(u64, e820.map[i].size + e820.map[i].addr, | |
797 | max_pfn << PAGE_SHIFT) - *addr; | |
798 | return i + 1; | |
799 | } |