Pull pnpacpi into acpica branch
[deliverable/linux.git] / arch / ia64 / mm / discontig.c
1 /*
2 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
3 * Copyright (c) 2001 Intel Corp.
4 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
5 * Copyright (c) 2002 NEC Corp.
6 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
7 * Copyright (c) 2004 Silicon Graphics, Inc
8 * Russ Anderson <rja@sgi.com>
9 * Jesse Barnes <jbarnes@sgi.com>
10 * Jack Steiner <steiner@sgi.com>
11 */
12
13 /*
14 * Platform initialization for Discontig Memory
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/bootmem.h>
21 #include <linux/acpi.h>
22 #include <linux/efi.h>
23 #include <linux/nodemask.h>
24 #include <asm/pgalloc.h>
25 #include <asm/tlb.h>
26 #include <asm/meminit.h>
27 #include <asm/numa.h>
28 #include <asm/sections.h>
29
30 /*
31 * Track per-node information needed to setup the boot memory allocator, the
32 * per-node areas, and the real VM.
33 */
34 struct early_node_data {
35 struct ia64_node_data *node_data;
36 pg_data_t *pgdat;
37 unsigned long pernode_addr;
38 unsigned long pernode_size;
39 struct bootmem_data bootmem_data;
40 unsigned long num_physpages;
41 unsigned long num_dma_physpages;
42 unsigned long min_pfn;
43 unsigned long max_pfn;
44 };
45
46 static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
47 static nodemask_t memory_less_mask __initdata;
48
49 /*
50 * To prevent cache aliasing effects, align per-node structures so that they
51 * start at addresses that are strided by node number.
52 */
53 #define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024)
54 #define NODEDATA_ALIGN(addr, node) \
55 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
56 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
57
58 /**
59 * build_node_maps - callback to setup bootmem structs for each node
60 * @start: physical start of range
61 * @len: length of range
62 * @node: node where this range resides
63 *
64 * We allocate a struct bootmem_data for each piece of memory that we wish to
65 * treat as a virtually contiguous block (i.e. each node). Each such block
66 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
67 * if necessary. Any non-existent pages will simply be part of the virtual
68 * memmap. We also update min_low_pfn and max_low_pfn here as we receive
69 * memory ranges from the caller.
70 */
71 static int __init build_node_maps(unsigned long start, unsigned long len,
72 int node)
73 {
74 unsigned long cstart, epfn, end = start + len;
75 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
76
77 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
78 cstart = GRANULEROUNDDOWN(start);
79
80 if (!bdp->node_low_pfn) {
81 bdp->node_boot_start = cstart;
82 bdp->node_low_pfn = epfn;
83 } else {
84 bdp->node_boot_start = min(cstart, bdp->node_boot_start);
85 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
86 }
87
88 min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
89 max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
90
91 return 0;
92 }
93
94 /**
95 * early_nr_cpus_node - return number of cpus on a given node
96 * @node: node to check
97 *
98 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
99 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
100 * called yet. Note that node 0 will also count all non-existent cpus.
101 */
102 static int __init early_nr_cpus_node(int node)
103 {
104 int cpu, n = 0;
105
106 for (cpu = 0; cpu < NR_CPUS; cpu++)
107 if (node == node_cpuid[cpu].nid)
108 n++;
109
110 return n;
111 }
112
113 /**
114 * compute_pernodesize - compute size of pernode data
115 * @node: the node id.
116 */
117 static unsigned long __init compute_pernodesize(int node)
118 {
119 unsigned long pernodesize = 0, cpus;
120
121 cpus = early_nr_cpus_node(node);
122 pernodesize += PERCPU_PAGE_SIZE * cpus;
123 pernodesize += node * L1_CACHE_BYTES;
124 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
125 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
126 pernodesize = PAGE_ALIGN(pernodesize);
127 return pernodesize;
128 }
129
130 /**
131 * per_cpu_node_setup - setup per-cpu areas on each node
132 * @cpu_data: per-cpu area on this node
133 * @node: node to setup
134 *
135 * Copy the static per-cpu data into the region we just set aside and then
136 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
137 * the end of the area.
138 */
139 static void *per_cpu_node_setup(void *cpu_data, int node)
140 {
141 #ifdef CONFIG_SMP
142 int cpu;
143
144 for (cpu = 0; cpu < NR_CPUS; cpu++) {
145 if (node == node_cpuid[cpu].nid) {
146 memcpy(__va(cpu_data), __phys_per_cpu_start,
147 __per_cpu_end - __per_cpu_start);
148 __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
149 __per_cpu_start;
150 cpu_data += PERCPU_PAGE_SIZE;
151 }
152 }
153 #endif
154 return cpu_data;
155 }
156
157 /**
158 * fill_pernode - initialize pernode data.
159 * @node: the node id.
160 * @pernode: physical address of pernode data
161 * @pernodesize: size of the pernode data
162 */
163 static void __init fill_pernode(int node, unsigned long pernode,
164 unsigned long pernodesize)
165 {
166 void *cpu_data;
167 int cpus = early_nr_cpus_node(node);
168 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
169
170 mem_data[node].pernode_addr = pernode;
171 mem_data[node].pernode_size = pernodesize;
172 memset(__va(pernode), 0, pernodesize);
173
174 cpu_data = (void *)pernode;
175 pernode += PERCPU_PAGE_SIZE * cpus;
176 pernode += node * L1_CACHE_BYTES;
177
178 mem_data[node].pgdat = __va(pernode);
179 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
180
181 mem_data[node].node_data = __va(pernode);
182 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
183
184 mem_data[node].pgdat->bdata = bdp;
185 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
186
187 cpu_data = per_cpu_node_setup(cpu_data, node);
188
189 return;
190 }
191
192 /**
193 * find_pernode_space - allocate memory for memory map and per-node structures
194 * @start: physical start of range
195 * @len: length of range
196 * @node: node where this range resides
197 *
198 * This routine reserves space for the per-cpu data struct, the list of
199 * pg_data_ts and the per-node data struct. Each node will have something like
200 * the following in the first chunk of addr. space large enough to hold it.
201 *
202 * ________________________
203 * | |
204 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
205 * | PERCPU_PAGE_SIZE * | start and length big enough
206 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
207 * |------------------------|
208 * | local pg_data_t * |
209 * |------------------------|
210 * | local ia64_node_data |
211 * |------------------------|
212 * | ??? |
213 * |________________________|
214 *
215 * Once this space has been set aside, the bootmem maps are initialized. We
216 * could probably move the allocation of the per-cpu and ia64_node_data space
217 * outside of this function and use alloc_bootmem_node(), but doing it here
218 * is straightforward and we get the alignments we want so...
219 */
220 static int __init find_pernode_space(unsigned long start, unsigned long len,
221 int node)
222 {
223 unsigned long epfn;
224 unsigned long pernodesize = 0, pernode, pages, mapsize;
225 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
226
227 epfn = (start + len) >> PAGE_SHIFT;
228
229 pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
230 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
231
232 /*
233 * Make sure this memory falls within this node's usable memory
234 * since we may have thrown some away in build_maps().
235 */
236 if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
237 return 0;
238
239 /* Don't setup this node's local space twice... */
240 if (mem_data[node].pernode_addr)
241 return 0;
242
243 /*
244 * Calculate total size needed, incl. what's necessary
245 * for good alignment and alias prevention.
246 */
247 pernodesize = compute_pernodesize(node);
248 pernode = NODEDATA_ALIGN(start, node);
249
250 /* Is this range big enough for what we want to store here? */
251 if (start + len > (pernode + pernodesize + mapsize))
252 fill_pernode(node, pernode, pernodesize);
253
254 return 0;
255 }
256
257 /**
258 * free_node_bootmem - free bootmem allocator memory for use
259 * @start: physical start of range
260 * @len: length of range
261 * @node: node where this range resides
262 *
263 * Simply calls the bootmem allocator to free the specified ranged from
264 * the given pg_data_t's bdata struct. After this function has been called
265 * for all the entries in the EFI memory map, the bootmem allocator will
266 * be ready to service allocation requests.
267 */
268 static int __init free_node_bootmem(unsigned long start, unsigned long len,
269 int node)
270 {
271 free_bootmem_node(mem_data[node].pgdat, start, len);
272
273 return 0;
274 }
275
276 /**
277 * reserve_pernode_space - reserve memory for per-node space
278 *
279 * Reserve the space used by the bootmem maps & per-node space in the boot
280 * allocator so that when we actually create the real mem maps we don't
281 * use their memory.
282 */
283 static void __init reserve_pernode_space(void)
284 {
285 unsigned long base, size, pages;
286 struct bootmem_data *bdp;
287 int node;
288
289 for_each_online_node(node) {
290 pg_data_t *pdp = mem_data[node].pgdat;
291
292 if (node_isset(node, memory_less_mask))
293 continue;
294
295 bdp = pdp->bdata;
296
297 /* First the bootmem_map itself */
298 pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
299 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
300 base = __pa(bdp->node_bootmem_map);
301 reserve_bootmem_node(pdp, base, size);
302
303 /* Now the per-node space */
304 size = mem_data[node].pernode_size;
305 base = __pa(mem_data[node].pernode_addr);
306 reserve_bootmem_node(pdp, base, size);
307 }
308 }
309
310 /**
311 * initialize_pernode_data - fixup per-cpu & per-node pointers
312 *
313 * Each node's per-node area has a copy of the global pg_data_t list, so
314 * we copy that to each node here, as well as setting the per-cpu pointer
315 * to the local node data structure. The active_cpus field of the per-node
316 * structure gets setup by the platform_cpu_init() function later.
317 */
318 static void __init initialize_pernode_data(void)
319 {
320 pg_data_t *pgdat_list[MAX_NUMNODES];
321 int cpu, node;
322
323 for_each_online_node(node)
324 pgdat_list[node] = mem_data[node].pgdat;
325
326 /* Copy the pg_data_t list to each node and init the node field */
327 for_each_online_node(node) {
328 memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
329 sizeof(pgdat_list));
330 }
331 #ifdef CONFIG_SMP
332 /* Set the node_data pointer for each per-cpu struct */
333 for (cpu = 0; cpu < NR_CPUS; cpu++) {
334 node = node_cpuid[cpu].nid;
335 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
336 }
337 #else
338 {
339 struct cpuinfo_ia64 *cpu0_cpu_info;
340 cpu = 0;
341 node = node_cpuid[cpu].nid;
342 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
343 ((char *)&per_cpu__cpu_info - __per_cpu_start));
344 cpu0_cpu_info->node_data = mem_data[node].node_data;
345 }
346 #endif /* CONFIG_SMP */
347 }
348
349 /**
350 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
351 * node but fall back to any other node when __alloc_bootmem_node fails
352 * for best.
353 * @nid: node id
354 * @pernodesize: size of this node's pernode data
355 */
356 static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
357 {
358 void *ptr = NULL;
359 u8 best = 0xff;
360 int bestnode = -1, node, anynode = 0;
361
362 for_each_online_node(node) {
363 if (node_isset(node, memory_less_mask))
364 continue;
365 else if (node_distance(nid, node) < best) {
366 best = node_distance(nid, node);
367 bestnode = node;
368 }
369 anynode = node;
370 }
371
372 if (bestnode == -1)
373 bestnode = anynode;
374
375 ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize,
376 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
377
378 return ptr;
379 }
380
381 /**
382 * pgdat_insert - insert the pgdat into global pgdat_list
383 * @pgdat: the pgdat for a node.
384 */
385 static void __init pgdat_insert(pg_data_t *pgdat)
386 {
387 pg_data_t *prev = NULL, *next;
388
389 for_each_pgdat(next)
390 if (pgdat->node_id < next->node_id)
391 break;
392 else
393 prev = next;
394
395 if (prev) {
396 prev->pgdat_next = pgdat;
397 pgdat->pgdat_next = next;
398 } else {
399 pgdat->pgdat_next = pgdat_list;
400 pgdat_list = pgdat;
401 }
402
403 return;
404 }
405
406 /**
407 * memory_less_nodes - allocate and initialize CPU only nodes pernode
408 * information.
409 */
410 static void __init memory_less_nodes(void)
411 {
412 unsigned long pernodesize;
413 void *pernode;
414 int node;
415
416 for_each_node_mask(node, memory_less_mask) {
417 pernodesize = compute_pernodesize(node);
418 pernode = memory_less_node_alloc(node, pernodesize);
419 fill_pernode(node, __pa(pernode), pernodesize);
420 }
421
422 return;
423 }
424
425 #ifdef CONFIG_SPARSEMEM
426 /**
427 * register_sparse_mem - notify SPARSEMEM that this memory range exists.
428 * @start: physical start of range
429 * @end: physical end of range
430 * @arg: unused
431 *
432 * Simply calls SPARSEMEM to register memory section(s).
433 */
434 static int __init register_sparse_mem(unsigned long start, unsigned long end,
435 void *arg)
436 {
437 int nid;
438
439 start = __pa(start) >> PAGE_SHIFT;
440 end = __pa(end) >> PAGE_SHIFT;
441 nid = early_pfn_to_nid(start);
442 memory_present(nid, start, end);
443
444 return 0;
445 }
446
447 static void __init arch_sparse_init(void)
448 {
449 efi_memmap_walk(register_sparse_mem, NULL);
450 sparse_init();
451 }
452 #else
453 #define arch_sparse_init() do {} while (0)
454 #endif
455
456 /**
457 * find_memory - walk the EFI memory map and setup the bootmem allocator
458 *
459 * Called early in boot to setup the bootmem allocator, and to
460 * allocate the per-cpu and per-node structures.
461 */
462 void __init find_memory(void)
463 {
464 int node;
465
466 reserve_memory();
467
468 if (num_online_nodes() == 0) {
469 printk(KERN_ERR "node info missing!\n");
470 node_set_online(0);
471 }
472
473 nodes_or(memory_less_mask, memory_less_mask, node_online_map);
474 min_low_pfn = -1;
475 max_low_pfn = 0;
476
477 /* These actually end up getting called by call_pernode_memory() */
478 efi_memmap_walk(filter_rsvd_memory, build_node_maps);
479 efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
480
481 for_each_online_node(node)
482 if (mem_data[node].bootmem_data.node_low_pfn) {
483 node_clear(node, memory_less_mask);
484 mem_data[node].min_pfn = ~0UL;
485 }
486 /*
487 * Initialize the boot memory maps in reverse order since that's
488 * what the bootmem allocator expects
489 */
490 for (node = MAX_NUMNODES - 1; node >= 0; node--) {
491 unsigned long pernode, pernodesize, map;
492 struct bootmem_data *bdp;
493
494 if (!node_online(node))
495 continue;
496 else if (node_isset(node, memory_less_mask))
497 continue;
498
499 bdp = &mem_data[node].bootmem_data;
500 pernode = mem_data[node].pernode_addr;
501 pernodesize = mem_data[node].pernode_size;
502 map = pernode + pernodesize;
503
504 init_bootmem_node(mem_data[node].pgdat,
505 map>>PAGE_SHIFT,
506 bdp->node_boot_start>>PAGE_SHIFT,
507 bdp->node_low_pfn);
508 }
509
510 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
511
512 reserve_pernode_space();
513 memory_less_nodes();
514 initialize_pernode_data();
515
516 max_pfn = max_low_pfn;
517
518 find_initrd();
519 }
520
521 #ifdef CONFIG_SMP
522 /**
523 * per_cpu_init - setup per-cpu variables
524 *
525 * find_pernode_space() does most of this already, we just need to set
526 * local_per_cpu_offset
527 */
528 void *per_cpu_init(void)
529 {
530 int cpu;
531
532 if (smp_processor_id() != 0)
533 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
534
535 for (cpu = 0; cpu < NR_CPUS; cpu++)
536 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
537
538 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
539 }
540 #endif /* CONFIG_SMP */
541
542 /**
543 * show_mem - give short summary of memory stats
544 *
545 * Shows a simple page count of reserved and used pages in the system.
546 * For discontig machines, it does this on a per-pgdat basis.
547 */
548 void show_mem(void)
549 {
550 int i, total_reserved = 0;
551 int total_shared = 0, total_cached = 0;
552 unsigned long total_present = 0;
553 pg_data_t *pgdat;
554
555 printk("Mem-info:\n");
556 show_free_areas();
557 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
558 for_each_pgdat(pgdat) {
559 unsigned long present;
560 unsigned long flags;
561 int shared = 0, cached = 0, reserved = 0;
562
563 printk("Node ID: %d\n", pgdat->node_id);
564 pgdat_resize_lock(pgdat, &flags);
565 present = pgdat->node_present_pages;
566 for(i = 0; i < pgdat->node_spanned_pages; i++) {
567 struct page *page;
568 if (pfn_valid(pgdat->node_start_pfn + i))
569 page = pfn_to_page(pgdat->node_start_pfn + i);
570 else
571 continue;
572 if (PageReserved(page))
573 reserved++;
574 else if (PageSwapCache(page))
575 cached++;
576 else if (page_count(page))
577 shared += page_count(page)-1;
578 }
579 pgdat_resize_unlock(pgdat, &flags);
580 total_present += present;
581 total_reserved += reserved;
582 total_cached += cached;
583 total_shared += shared;
584 printk("\t%ld pages of RAM\n", present);
585 printk("\t%d reserved pages\n", reserved);
586 printk("\t%d pages shared\n", shared);
587 printk("\t%d pages swap cached\n", cached);
588 }
589 printk("%ld pages of RAM\n", total_present);
590 printk("%d reserved pages\n", total_reserved);
591 printk("%d pages shared\n", total_shared);
592 printk("%d pages swap cached\n", total_cached);
593 printk("Total of %ld pages in page table cache\n",
594 pgtable_quicklist_total_size());
595 printk("%d free buffer pages\n", nr_free_buffer_pages());
596 }
597
598 /**
599 * call_pernode_memory - use SRAT to call callback functions with node info
600 * @start: physical start of range
601 * @len: length of range
602 * @arg: function to call for each range
603 *
604 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
605 * out to which node a block of memory belongs. Ignore memory that we cannot
606 * identify, and split blocks that run across multiple nodes.
607 *
608 * Take this opportunity to round the start address up and the end address
609 * down to page boundaries.
610 */
611 void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
612 {
613 unsigned long rs, re, end = start + len;
614 void (*func)(unsigned long, unsigned long, int);
615 int i;
616
617 start = PAGE_ALIGN(start);
618 end &= PAGE_MASK;
619 if (start >= end)
620 return;
621
622 func = arg;
623
624 if (!num_node_memblks) {
625 /* No SRAT table, so assume one node (node 0) */
626 if (start < end)
627 (*func)(start, end - start, 0);
628 return;
629 }
630
631 for (i = 0; i < num_node_memblks; i++) {
632 rs = max(start, node_memblk[i].start_paddr);
633 re = min(end, node_memblk[i].start_paddr +
634 node_memblk[i].size);
635
636 if (rs < re)
637 (*func)(rs, re - rs, node_memblk[i].nid);
638
639 if (re == end)
640 break;
641 }
642 }
643
644 /**
645 * count_node_pages - callback to build per-node memory info structures
646 * @start: physical start of range
647 * @len: length of range
648 * @node: node where this range resides
649 *
650 * Each node has it's own number of physical pages, DMAable pages, start, and
651 * end page frame number. This routine will be called by call_pernode_memory()
652 * for each piece of usable memory and will setup these values for each node.
653 * Very similar to build_maps().
654 */
655 static __init int count_node_pages(unsigned long start, unsigned long len, int node)
656 {
657 unsigned long end = start + len;
658
659 mem_data[node].num_physpages += len >> PAGE_SHIFT;
660 if (start <= __pa(MAX_DMA_ADDRESS))
661 mem_data[node].num_dma_physpages +=
662 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
663 start = GRANULEROUNDDOWN(start);
664 start = ORDERROUNDDOWN(start);
665 end = GRANULEROUNDUP(end);
666 mem_data[node].max_pfn = max(mem_data[node].max_pfn,
667 end >> PAGE_SHIFT);
668 mem_data[node].min_pfn = min(mem_data[node].min_pfn,
669 start >> PAGE_SHIFT);
670
671 return 0;
672 }
673
674 /**
675 * paging_init - setup page tables
676 *
677 * paging_init() sets up the page tables for each node of the system and frees
678 * the bootmem allocator memory for general use.
679 */
680 void __init paging_init(void)
681 {
682 unsigned long max_dma;
683 unsigned long zones_size[MAX_NR_ZONES];
684 unsigned long zholes_size[MAX_NR_ZONES];
685 unsigned long pfn_offset = 0;
686 int node;
687
688 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
689
690 arch_sparse_init();
691
692 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
693
694 #ifdef CONFIG_VIRTUAL_MEM_MAP
695 vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page));
696 vmem_map = (struct page *) vmalloc_end;
697 efi_memmap_walk(create_mem_map_page_table, NULL);
698 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
699 #endif
700
701 for_each_online_node(node) {
702 memset(zones_size, 0, sizeof(zones_size));
703 memset(zholes_size, 0, sizeof(zholes_size));
704
705 num_physpages += mem_data[node].num_physpages;
706
707 if (mem_data[node].min_pfn >= max_dma) {
708 /* All of this node's memory is above ZONE_DMA */
709 zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
710 mem_data[node].min_pfn;
711 zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn -
712 mem_data[node].min_pfn -
713 mem_data[node].num_physpages;
714 } else if (mem_data[node].max_pfn < max_dma) {
715 /* All of this node's memory is in ZONE_DMA */
716 zones_size[ZONE_DMA] = mem_data[node].max_pfn -
717 mem_data[node].min_pfn;
718 zholes_size[ZONE_DMA] = mem_data[node].max_pfn -
719 mem_data[node].min_pfn -
720 mem_data[node].num_dma_physpages;
721 } else {
722 /* This node has memory in both zones */
723 zones_size[ZONE_DMA] = max_dma -
724 mem_data[node].min_pfn;
725 zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] -
726 mem_data[node].num_dma_physpages;
727 zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
728 max_dma;
729 zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] -
730 (mem_data[node].num_physpages -
731 mem_data[node].num_dma_physpages);
732 }
733
734 pfn_offset = mem_data[node].min_pfn;
735
736 #ifdef CONFIG_VIRTUAL_MEM_MAP
737 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
738 #endif
739 free_area_init_node(node, NODE_DATA(node), zones_size,
740 pfn_offset, zholes_size);
741 }
742
743 /*
744 * Make memory less nodes become a member of the known nodes.
745 */
746 for_each_node_mask(node, memory_less_mask)
747 pgdat_insert(mem_data[node].pgdat);
748
749 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
750 }
This page took 0.045065 seconds and 6 git commands to generate.