x86: remove extern declarations for code, data, bss resources
[deliverable/linux.git] / arch / x86 / kernel / e820_32.c
1 #include <linux/kernel.h>
2 #include <linux/types.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/ioport.h>
6 #include <linux/string.h>
7 #include <linux/kexec.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/efi.h>
11 #include <linux/pfn.h>
12 #include <linux/uaccess.h>
13 #include <linux/suspend.h>
14
15 #include <asm/pgtable.h>
16 #include <asm/page.h>
17 #include <asm/e820.h>
18 #include <asm/setup.h>
19
20 #ifdef CONFIG_EFI
21 int efi_enabled = 0;
22 EXPORT_SYMBOL(efi_enabled);
23 #endif
24
25 struct e820map e820;
26 struct change_member {
27 struct e820entry *pbios; /* pointer to original bios entry */
28 unsigned long long addr; /* address for this change point */
29 };
30 static struct change_member change_point_list[2*E820MAX] __initdata;
31 static struct change_member *change_point[2*E820MAX] __initdata;
32 static struct e820entry *overlap_list[E820MAX] __initdata;
33 static struct e820entry new_bios[E820MAX] __initdata;
34 /* For PCI or other memory-mapped resources */
35 unsigned long pci_mem_start = 0x10000000;
36 #ifdef CONFIG_PCI
37 EXPORT_SYMBOL(pci_mem_start);
38 #endif
39 extern int user_defined_memmap;
40
41 static struct resource system_rom_resource = {
42 .name = "System ROM",
43 .start = 0xf0000,
44 .end = 0xfffff,
45 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
46 };
47
48 static struct resource extension_rom_resource = {
49 .name = "Extension ROM",
50 .start = 0xe0000,
51 .end = 0xeffff,
52 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
53 };
54
55 static struct resource adapter_rom_resources[] = { {
56 .name = "Adapter ROM",
57 .start = 0xc8000,
58 .end = 0,
59 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
60 }, {
61 .name = "Adapter ROM",
62 .start = 0,
63 .end = 0,
64 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
65 }, {
66 .name = "Adapter ROM",
67 .start = 0,
68 .end = 0,
69 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
70 }, {
71 .name = "Adapter ROM",
72 .start = 0,
73 .end = 0,
74 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
75 }, {
76 .name = "Adapter ROM",
77 .start = 0,
78 .end = 0,
79 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
80 }, {
81 .name = "Adapter ROM",
82 .start = 0,
83 .end = 0,
84 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
85 } };
86
87 static struct resource video_rom_resource = {
88 .name = "Video ROM",
89 .start = 0xc0000,
90 .end = 0xc7fff,
91 .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
92 };
93
94 #define ROMSIGNATURE 0xaa55
95
96 static int __init romsignature(const unsigned char *rom)
97 {
98 const unsigned short * const ptr = (const unsigned short *)rom;
99 unsigned short sig;
100
101 return probe_kernel_address(ptr, sig) == 0 && sig == ROMSIGNATURE;
102 }
103
104 static int __init romchecksum(const unsigned char *rom, unsigned long length)
105 {
106 unsigned char sum, c;
107
108 for (sum = 0; length && probe_kernel_address(rom++, c) == 0; length--)
109 sum += c;
110 return !length && !sum;
111 }
112
113 static void __init probe_roms(void)
114 {
115 const unsigned char *rom;
116 unsigned long start, length, upper;
117 unsigned char c;
118 int i;
119
120 /* video rom */
121 upper = adapter_rom_resources[0].start;
122 for (start = video_rom_resource.start; start < upper; start += 2048) {
123 rom = isa_bus_to_virt(start);
124 if (!romsignature(rom))
125 continue;
126
127 video_rom_resource.start = start;
128
129 if (probe_kernel_address(rom + 2, c) != 0)
130 continue;
131
132 /* 0 < length <= 0x7f * 512, historically */
133 length = c * 512;
134
135 /* if checksum okay, trust length byte */
136 if (length && romchecksum(rom, length))
137 video_rom_resource.end = start + length - 1;
138
139 request_resource(&iomem_resource, &video_rom_resource);
140 break;
141 }
142
143 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
144 if (start < upper)
145 start = upper;
146
147 /* system rom */
148 request_resource(&iomem_resource, &system_rom_resource);
149 upper = system_rom_resource.start;
150
151 /* check for extension rom (ignore length byte!) */
152 rom = isa_bus_to_virt(extension_rom_resource.start);
153 if (romsignature(rom)) {
154 length = extension_rom_resource.end - extension_rom_resource.start + 1;
155 if (romchecksum(rom, length)) {
156 request_resource(&iomem_resource, &extension_rom_resource);
157 upper = extension_rom_resource.start;
158 }
159 }
160
161 /* check for adapter roms on 2k boundaries */
162 for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
163 rom = isa_bus_to_virt(start);
164 if (!romsignature(rom))
165 continue;
166
167 if (probe_kernel_address(rom + 2, c) != 0)
168 continue;
169
170 /* 0 < length <= 0x7f * 512, historically */
171 length = c * 512;
172
173 /* but accept any length that fits if checksum okay */
174 if (!length || start + length > upper || !romchecksum(rom, length))
175 continue;
176
177 adapter_rom_resources[i].start = start;
178 adapter_rom_resources[i].end = start + length - 1;
179 request_resource(&iomem_resource, &adapter_rom_resources[i]);
180
181 start = adapter_rom_resources[i++].end & ~2047UL;
182 }
183 }
184
185 /*
186 * Request address space for all standard RAM and ROM resources
187 * and also for regions reported as reserved by the e820.
188 */
189 void __init legacy_init_iomem_resources(struct resource *code_resource,
190 struct resource *data_resource,
191 struct resource *bss_resource)
192 {
193 int i;
194
195 probe_roms();
196 for (i = 0; i < e820.nr_map; i++) {
197 struct resource *res;
198 #ifndef CONFIG_RESOURCES_64BIT
199 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
200 continue;
201 #endif
202 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
203 switch (e820.map[i].type) {
204 case E820_RAM: res->name = "System RAM"; break;
205 case E820_ACPI: res->name = "ACPI Tables"; break;
206 case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
207 default: res->name = "reserved";
208 }
209 res->start = e820.map[i].addr;
210 res->end = res->start + e820.map[i].size - 1;
211 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
212 if (request_resource(&iomem_resource, res)) {
213 kfree(res);
214 continue;
215 }
216 if (e820.map[i].type == E820_RAM) {
217 /*
218 * We don't know which RAM region contains kernel data,
219 * so we try it repeatedly and let the resource manager
220 * test it.
221 */
222 request_resource(res, code_resource);
223 request_resource(res, data_resource);
224 request_resource(res, bss_resource);
225 #ifdef CONFIG_KEXEC
226 if (crashk_res.start != crashk_res.end)
227 request_resource(res, &crashk_res);
228 #endif
229 }
230 }
231 }
232
233 #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
234 /**
235 * e820_mark_nosave_regions - Find the ranges of physical addresses that do not
236 * correspond to e820 RAM areas and mark the corresponding pages as nosave for
237 * hibernation.
238 *
239 * This function requires the e820 map to be sorted and without any
240 * overlapping entries and assumes the first e820 area to be RAM.
241 */
242 void __init e820_mark_nosave_regions(void)
243 {
244 int i;
245 unsigned long pfn;
246
247 pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size);
248 for (i = 1; i < e820.nr_map; i++) {
249 struct e820entry *ei = &e820.map[i];
250
251 if (pfn < PFN_UP(ei->addr))
252 register_nosave_region(pfn, PFN_UP(ei->addr));
253
254 pfn = PFN_DOWN(ei->addr + ei->size);
255 if (ei->type != E820_RAM)
256 register_nosave_region(PFN_UP(ei->addr), pfn);
257
258 if (pfn >= max_low_pfn)
259 break;
260 }
261 }
262 #endif
263
264 void __init add_memory_region(unsigned long long start,
265 unsigned long long size, int type)
266 {
267 int x;
268
269 if (!efi_enabled) {
270 x = e820.nr_map;
271
272 if (x == E820MAX) {
273 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
274 return;
275 }
276
277 e820.map[x].addr = start;
278 e820.map[x].size = size;
279 e820.map[x].type = type;
280 e820.nr_map++;
281 }
282 } /* add_memory_region */
283
284 /*
285 * Sanitize the BIOS e820 map.
286 *
287 * Some e820 responses include overlapping entries. The following
288 * replaces the original e820 map with a new one, removing overlaps.
289 *
290 */
291 int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
292 {
293 struct change_member *change_tmp;
294 unsigned long current_type, last_type;
295 unsigned long long last_addr;
296 int chgidx, still_changing;
297 int overlap_entries;
298 int new_bios_entry;
299 int old_nr, new_nr, chg_nr;
300 int i;
301
302 /*
303 Visually we're performing the following (1,2,3,4 = memory types)...
304
305 Sample memory map (w/overlaps):
306 ____22__________________
307 ______________________4_
308 ____1111________________
309 _44_____________________
310 11111111________________
311 ____________________33__
312 ___________44___________
313 __________33333_________
314 ______________22________
315 ___________________2222_
316 _________111111111______
317 _____________________11_
318 _________________4______
319
320 Sanitized equivalent (no overlap):
321 1_______________________
322 _44_____________________
323 ___1____________________
324 ____22__________________
325 ______11________________
326 _________1______________
327 __________3_____________
328 ___________44___________
329 _____________33_________
330 _______________2________
331 ________________1_______
332 _________________4______
333 ___________________2____
334 ____________________33__
335 ______________________4_
336 */
337 /* if there's only one memory region, don't bother */
338 if (*pnr_map < 2) {
339 return -1;
340 }
341
342 old_nr = *pnr_map;
343
344 /* bail out if we find any unreasonable addresses in bios map */
345 for (i=0; i<old_nr; i++)
346 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) {
347 return -1;
348 }
349
350 /* create pointers for initial change-point information (for sorting) */
351 for (i=0; i < 2*old_nr; i++)
352 change_point[i] = &change_point_list[i];
353
354 /* record all known change-points (starting and ending addresses),
355 omitting those that are for empty memory regions */
356 chgidx = 0;
357 for (i=0; i < old_nr; i++) {
358 if (biosmap[i].size != 0) {
359 change_point[chgidx]->addr = biosmap[i].addr;
360 change_point[chgidx++]->pbios = &biosmap[i];
361 change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
362 change_point[chgidx++]->pbios = &biosmap[i];
363 }
364 }
365 chg_nr = chgidx; /* true number of change-points */
366
367 /* sort change-point list by memory addresses (low -> high) */
368 still_changing = 1;
369 while (still_changing) {
370 still_changing = 0;
371 for (i=1; i < chg_nr; i++) {
372 /* if <current_addr> > <last_addr>, swap */
373 /* or, if current=<start_addr> & last=<end_addr>, swap */
374 if ((change_point[i]->addr < change_point[i-1]->addr) ||
375 ((change_point[i]->addr == change_point[i-1]->addr) &&
376 (change_point[i]->addr == change_point[i]->pbios->addr) &&
377 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
378 )
379 {
380 change_tmp = change_point[i];
381 change_point[i] = change_point[i-1];
382 change_point[i-1] = change_tmp;
383 still_changing=1;
384 }
385 }
386 }
387
388 /* create a new bios memory map, removing overlaps */
389 overlap_entries=0; /* number of entries in the overlap table */
390 new_bios_entry=0; /* index for creating new bios map entries */
391 last_type = 0; /* start with undefined memory type */
392 last_addr = 0; /* start with 0 as last starting address */
393 /* loop through change-points, determining affect on the new bios map */
394 for (chgidx=0; chgidx < chg_nr; chgidx++)
395 {
396 /* keep track of all overlapping bios entries */
397 if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
398 {
399 /* add map entry to overlap list (> 1 entry implies an overlap) */
400 overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
401 }
402 else
403 {
404 /* remove entry from list (order independent, so swap with last) */
405 for (i=0; i<overlap_entries; i++)
406 {
407 if (overlap_list[i] == change_point[chgidx]->pbios)
408 overlap_list[i] = overlap_list[overlap_entries-1];
409 }
410 overlap_entries--;
411 }
412 /* if there are overlapping entries, decide which "type" to use */
413 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
414 current_type = 0;
415 for (i=0; i<overlap_entries; i++)
416 if (overlap_list[i]->type > current_type)
417 current_type = overlap_list[i]->type;
418 /* continue building up new bios map based on this information */
419 if (current_type != last_type) {
420 if (last_type != 0) {
421 new_bios[new_bios_entry].size =
422 change_point[chgidx]->addr - last_addr;
423 /* move forward only if the new size was non-zero */
424 if (new_bios[new_bios_entry].size != 0)
425 if (++new_bios_entry >= E820MAX)
426 break; /* no more space left for new bios entries */
427 }
428 if (current_type != 0) {
429 new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
430 new_bios[new_bios_entry].type = current_type;
431 last_addr=change_point[chgidx]->addr;
432 }
433 last_type = current_type;
434 }
435 }
436 new_nr = new_bios_entry; /* retain count for new bios entries */
437
438 /* copy new bios mapping into original location */
439 memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
440 *pnr_map = new_nr;
441
442 return 0;
443 }
444
445 /*
446 * Copy the BIOS e820 map into a safe place.
447 *
448 * Sanity-check it while we're at it..
449 *
450 * If we're lucky and live on a modern system, the setup code
451 * will have given us a memory map that we can use to properly
452 * set up memory. If we aren't, we'll fake a memory map.
453 *
454 * We check to see that the memory map contains at least 2 elements
455 * before we'll use it, because the detection code in setup.S may
456 * not be perfect and most every PC known to man has two memory
457 * regions: one from 0 to 640k, and one from 1mb up. (The IBM
458 * thinkpad 560x, for example, does not cooperate with the memory
459 * detection code.)
460 */
461 int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
462 {
463 /* Only one memory region (or negative)? Ignore it */
464 if (nr_map < 2)
465 return -1;
466
467 do {
468 unsigned long long start = biosmap->addr;
469 unsigned long long size = biosmap->size;
470 unsigned long long end = start + size;
471 unsigned long type = biosmap->type;
472
473 /* Overflow in 64 bits? Ignore the memory map. */
474 if (start > end)
475 return -1;
476
477 /*
478 * Some BIOSes claim RAM in the 640k - 1M region.
479 * Not right. Fix it up.
480 */
481 if (type == E820_RAM) {
482 if (start < 0x100000ULL && end > 0xA0000ULL) {
483 if (start < 0xA0000ULL)
484 add_memory_region(start, 0xA0000ULL-start, type);
485 if (end <= 0x100000ULL)
486 continue;
487 start = 0x100000ULL;
488 size = end - start;
489 }
490 }
491 add_memory_region(start, size, type);
492 } while (biosmap++,--nr_map);
493 return 0;
494 }
495
496 /*
497 * Callback for efi_memory_walk.
498 */
499 static int __init
500 efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
501 {
502 unsigned long *max_pfn = arg, pfn;
503
504 if (start < end) {
505 pfn = PFN_UP(end -1);
506 if (pfn > *max_pfn)
507 *max_pfn = pfn;
508 }
509 return 0;
510 }
511
512 static int __init
513 efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
514 {
515 memory_present(0, PFN_UP(start), PFN_DOWN(end));
516 return 0;
517 }
518
519 /*
520 * Find the highest page frame number we have available
521 */
522 void __init find_max_pfn(void)
523 {
524 int i;
525
526 max_pfn = 0;
527 if (efi_enabled) {
528 efi_memmap_walk(efi_find_max_pfn, &max_pfn);
529 efi_memmap_walk(efi_memory_present_wrapper, NULL);
530 return;
531 }
532
533 for (i = 0; i < e820.nr_map; i++) {
534 unsigned long start, end;
535 /* RAM? */
536 if (e820.map[i].type != E820_RAM)
537 continue;
538 start = PFN_UP(e820.map[i].addr);
539 end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
540 if (start >= end)
541 continue;
542 if (end > max_pfn)
543 max_pfn = end;
544 memory_present(0, start, end);
545 }
546 }
547
548 /*
549 * Free all available memory for boot time allocation. Used
550 * as a callback function by efi_memory_walk()
551 */
552
553 static int __init
554 free_available_memory(unsigned long start, unsigned long end, void *arg)
555 {
556 /* check max_low_pfn */
557 if (start >= (max_low_pfn << PAGE_SHIFT))
558 return 0;
559 if (end >= (max_low_pfn << PAGE_SHIFT))
560 end = max_low_pfn << PAGE_SHIFT;
561 if (start < end)
562 free_bootmem(start, end - start);
563
564 return 0;
565 }
566 /*
567 * Register fully available low RAM pages with the bootmem allocator.
568 */
569 void __init register_bootmem_low_pages(unsigned long max_low_pfn)
570 {
571 int i;
572
573 if (efi_enabled) {
574 efi_memmap_walk(free_available_memory, NULL);
575 return;
576 }
577 for (i = 0; i < e820.nr_map; i++) {
578 unsigned long curr_pfn, last_pfn, size;
579 /*
580 * Reserve usable low memory
581 */
582 if (e820.map[i].type != E820_RAM)
583 continue;
584 /*
585 * We are rounding up the start address of usable memory:
586 */
587 curr_pfn = PFN_UP(e820.map[i].addr);
588 if (curr_pfn >= max_low_pfn)
589 continue;
590 /*
591 * ... and at the end of the usable range downwards:
592 */
593 last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
594
595 if (last_pfn > max_low_pfn)
596 last_pfn = max_low_pfn;
597
598 /*
599 * .. finally, did all the rounding and playing
600 * around just make the area go away?
601 */
602 if (last_pfn <= curr_pfn)
603 continue;
604
605 size = last_pfn - curr_pfn;
606 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
607 }
608 }
609
610 void __init e820_register_memory(void)
611 {
612 unsigned long gapstart, gapsize, round;
613 unsigned long long last;
614 int i;
615
616 /*
617 * Search for the biggest gap in the low 32 bits of the e820
618 * memory space.
619 */
620 last = 0x100000000ull;
621 gapstart = 0x10000000;
622 gapsize = 0x400000;
623 i = e820.nr_map;
624 while (--i >= 0) {
625 unsigned long long start = e820.map[i].addr;
626 unsigned long long end = start + e820.map[i].size;
627
628 /*
629 * Since "last" is at most 4GB, we know we'll
630 * fit in 32 bits if this condition is true
631 */
632 if (last > end) {
633 unsigned long gap = last - end;
634
635 if (gap > gapsize) {
636 gapsize = gap;
637 gapstart = end;
638 }
639 }
640 if (start < last)
641 last = start;
642 }
643
644 /*
645 * See how much we want to round up: start off with
646 * rounding to the next 1MB area.
647 */
648 round = 0x100000;
649 while ((gapsize >> 4) > round)
650 round += round;
651 /* Fun with two's complement */
652 pci_mem_start = (gapstart + round) & -round;
653
654 printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
655 pci_mem_start, gapstart, gapsize);
656 }
657
658 void __init print_memory_map(char *who)
659 {
660 int i;
661
662 for (i = 0; i < e820.nr_map; i++) {
663 printk(" %s: %016Lx - %016Lx ", who,
664 e820.map[i].addr,
665 e820.map[i].addr + e820.map[i].size);
666 switch (e820.map[i].type) {
667 case E820_RAM: printk("(usable)\n");
668 break;
669 case E820_RESERVED:
670 printk("(reserved)\n");
671 break;
672 case E820_ACPI:
673 printk("(ACPI data)\n");
674 break;
675 case E820_NVS:
676 printk("(ACPI NVS)\n");
677 break;
678 default: printk("type %u\n", e820.map[i].type);
679 break;
680 }
681 }
682 }
683
684 static __init __always_inline void efi_limit_regions(unsigned long long size)
685 {
686 unsigned long long current_addr = 0;
687 efi_memory_desc_t *md, *next_md;
688 void *p, *p1;
689 int i, j;
690
691 j = 0;
692 p1 = memmap.map;
693 for (p = p1, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) {
694 md = p;
695 next_md = p1;
696 current_addr = md->phys_addr +
697 PFN_PHYS(md->num_pages);
698 if (is_available_memory(md)) {
699 if (md->phys_addr >= size) continue;
700 memcpy(next_md, md, memmap.desc_size);
701 if (current_addr >= size) {
702 next_md->num_pages -=
703 PFN_UP(current_addr-size);
704 }
705 p1 += memmap.desc_size;
706 next_md = p1;
707 j++;
708 } else if ((md->attribute & EFI_MEMORY_RUNTIME) ==
709 EFI_MEMORY_RUNTIME) {
710 /* In order to make runtime services
711 * available we have to include runtime
712 * memory regions in memory map */
713 memcpy(next_md, md, memmap.desc_size);
714 p1 += memmap.desc_size;
715 next_md = p1;
716 j++;
717 }
718 }
719 memmap.nr_map = j;
720 memmap.map_end = memmap.map +
721 (memmap.nr_map * memmap.desc_size);
722 }
723
724 void __init limit_regions(unsigned long long size)
725 {
726 unsigned long long current_addr;
727 int i;
728
729 print_memory_map("limit_regions start");
730 if (efi_enabled) {
731 efi_limit_regions(size);
732 return;
733 }
734 for (i = 0; i < e820.nr_map; i++) {
735 current_addr = e820.map[i].addr + e820.map[i].size;
736 if (current_addr < size)
737 continue;
738
739 if (e820.map[i].type != E820_RAM)
740 continue;
741
742 if (e820.map[i].addr >= size) {
743 /*
744 * This region starts past the end of the
745 * requested size, skip it completely.
746 */
747 e820.nr_map = i;
748 } else {
749 e820.nr_map = i + 1;
750 e820.map[i].size -= current_addr - size;
751 }
752 print_memory_map("limit_regions endfor");
753 return;
754 }
755 print_memory_map("limit_regions endfunc");
756 }
757
758 /*
759 * This function checks if any part of the range <start,end> is mapped
760 * with type.
761 */
762 int
763 e820_any_mapped(u64 start, u64 end, unsigned type)
764 {
765 int i;
766 for (i = 0; i < e820.nr_map; i++) {
767 const struct e820entry *ei = &e820.map[i];
768 if (type && ei->type != type)
769 continue;
770 if (ei->addr >= end || ei->addr + ei->size <= start)
771 continue;
772 return 1;
773 }
774 return 0;
775 }
776 EXPORT_SYMBOL_GPL(e820_any_mapped);
777
778 /*
779 * This function checks if the entire range <start,end> is mapped with type.
780 *
781 * Note: this function only works correct if the e820 table is sorted and
782 * not-overlapping, which is the case
783 */
784 int __init
785 e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
786 {
787 u64 start = s;
788 u64 end = e;
789 int i;
790 for (i = 0; i < e820.nr_map; i++) {
791 struct e820entry *ei = &e820.map[i];
792 if (type && ei->type != type)
793 continue;
794 /* is the region (part) in overlap with the current region ?*/
795 if (ei->addr >= end || ei->addr + ei->size <= start)
796 continue;
797 /* if the region is at the beginning of <start,end> we move
798 * start to the end of the region since it's ok until there
799 */
800 if (ei->addr <= start)
801 start = ei->addr + ei->size;
802 /* if start is now at or beyond end, we're done, full
803 * coverage */
804 if (start >= end)
805 return 1; /* we're done */
806 }
807 return 0;
808 }
809
810 static int __init parse_memmap(char *arg)
811 {
812 if (!arg)
813 return -EINVAL;
814
815 if (strcmp(arg, "exactmap") == 0) {
816 #ifdef CONFIG_CRASH_DUMP
817 /* If we are doing a crash dump, we
818 * still need to know the real mem
819 * size before original memory map is
820 * reset.
821 */
822 find_max_pfn();
823 saved_max_pfn = max_pfn;
824 #endif
825 e820.nr_map = 0;
826 user_defined_memmap = 1;
827 } else {
828 /* If the user specifies memory size, we
829 * limit the BIOS-provided memory map to
830 * that size. exactmap can be used to specify
831 * the exact map. mem=number can be used to
832 * trim the existing memory map.
833 */
834 unsigned long long start_at, mem_size;
835
836 mem_size = memparse(arg, &arg);
837 if (*arg == '@') {
838 start_at = memparse(arg+1, &arg);
839 add_memory_region(start_at, mem_size, E820_RAM);
840 } else if (*arg == '#') {
841 start_at = memparse(arg+1, &arg);
842 add_memory_region(start_at, mem_size, E820_ACPI);
843 } else if (*arg == '$') {
844 start_at = memparse(arg+1, &arg);
845 add_memory_region(start_at, mem_size, E820_RESERVED);
846 } else {
847 limit_regions(mem_size);
848 user_defined_memmap = 1;
849 }
850 }
851 return 0;
852 }
853 early_param("memmap", parse_memmap);
This page took 0.049536 seconds and 6 git commands to generate.