Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
[deliverable/linux.git] / arch / ia64 / mm / discontig.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
3 * Copyright (c) 2001 Intel Corp.
4 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
5 * Copyright (c) 2002 NEC Corp.
6 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
7 * Copyright (c) 2004 Silicon Graphics, Inc
8 * Russ Anderson <rja@sgi.com>
9 * Jesse Barnes <jbarnes@sgi.com>
10 * Jack Steiner <steiner@sgi.com>
11 */
12
13/*
14 * Platform initialization for Discontig Memory
15 */
16
17#include <linux/kernel.h>
18#include <linux/mm.h>
99a19cf1 19#include <linux/nmi.h>
1da177e4
LT
20#include <linux/swap.h>
21#include <linux/bootmem.h>
22#include <linux/acpi.h>
23#include <linux/efi.h>
24#include <linux/nodemask.h>
25#include <asm/pgalloc.h>
26#include <asm/tlb.h>
27#include <asm/meminit.h>
28#include <asm/numa.h>
29#include <asm/sections.h>
30
31/*
32 * Track per-node information needed to setup the boot memory allocator, the
33 * per-node areas, and the real VM.
34 */
35struct early_node_data {
36 struct ia64_node_data *node_data;
1da177e4
LT
37 unsigned long pernode_addr;
38 unsigned long pernode_size;
1da177e4 39 unsigned long num_physpages;
09ae1f58 40#ifdef CONFIG_ZONE_DMA
1da177e4 41 unsigned long num_dma_physpages;
09ae1f58 42#endif
1da177e4
LT
43 unsigned long min_pfn;
44 unsigned long max_pfn;
45};
46
47static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
564601a5 48static nodemask_t memory_less_mask __initdata;
1da177e4 49
fd59d231 50pg_data_t *pgdat_list[MAX_NUMNODES];
ae5a2c1c 51
1da177e4
LT
52/*
53 * To prevent cache aliasing effects, align per-node structures so that they
54 * start at addresses that are strided by node number.
55 */
acb7f672 56#define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024)
1da177e4 57#define NODEDATA_ALIGN(addr, node) \
acb7f672
JS
58 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
59 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
1da177e4
LT
60
61/**
62 * build_node_maps - callback to setup bootmem structs for each node
63 * @start: physical start of range
64 * @len: length of range
65 * @node: node where this range resides
66 *
67 * We allocate a struct bootmem_data for each piece of memory that we wish to
68 * treat as a virtually contiguous block (i.e. each node). Each such block
69 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
70 * if necessary. Any non-existent pages will simply be part of the virtual
71 * memmap. We also update min_low_pfn and max_low_pfn here as we receive
72 * memory ranges from the caller.
73 */
74static int __init build_node_maps(unsigned long start, unsigned long len,
75 int node)
76{
3560e249 77 unsigned long spfn, epfn, end = start + len;
b61bfa3c 78 struct bootmem_data *bdp = &bootmem_node_data[node];
1da177e4
LT
79
80 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
3560e249 81 spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
1da177e4
LT
82
83 if (!bdp->node_low_pfn) {
3560e249 84 bdp->node_min_pfn = spfn;
1da177e4
LT
85 bdp->node_low_pfn = epfn;
86 } else {
3560e249 87 bdp->node_min_pfn = min(spfn, bdp->node_min_pfn);
1da177e4
LT
88 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
89 }
90
1da177e4
LT
91 return 0;
92}
93
94/**
564601a5 95 * early_nr_cpus_node - return number of cpus on a given node
1da177e4
LT
96 * @node: node to check
97 *
564601a5 98 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
1da177e4 99 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
564601a5 100 * called yet. Note that node 0 will also count all non-existent cpus.
1da177e4 101 */
dd0932d9 102static int __meminit early_nr_cpus_node(int node)
1da177e4
LT
103{
104 int cpu, n = 0;
105
2c6e6db4 106 for_each_possible_early_cpu(cpu)
1da177e4 107 if (node == node_cpuid[cpu].nid)
564601a5 108 n++;
1da177e4
LT
109
110 return n;
111}
112
564601a5 113/**
114 * compute_pernodesize - compute size of pernode data
115 * @node: the node id.
116 */
dd0932d9 117static unsigned long __meminit compute_pernodesize(int node)
564601a5 118{
119 unsigned long pernodesize = 0, cpus;
120
121 cpus = early_nr_cpus_node(node);
122 pernodesize += PERCPU_PAGE_SIZE * cpus;
123 pernodesize += node * L1_CACHE_BYTES;
124 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
125 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
41bd26d6 126 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
564601a5 127 pernodesize = PAGE_ALIGN(pernodesize);
128 return pernodesize;
129}
1da177e4 130
8d7e3517
TL
131/**
132 * per_cpu_node_setup - setup per-cpu areas on each node
133 * @cpu_data: per-cpu area on this node
134 * @node: node to setup
135 *
136 * Copy the static per-cpu data into the region we just set aside and then
137 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
138 * the end of the area.
139 */
140static void *per_cpu_node_setup(void *cpu_data, int node)
141{
142#ifdef CONFIG_SMP
143 int cpu;
144
2c6e6db4 145 for_each_possible_early_cpu(cpu) {
10617bbe 146 if (cpu == 0) {
c459ce8b 147 void *cpu0_data = __cpu0_per_cpu;
10617bbe
TL
148 __per_cpu_offset[cpu] = (char*)cpu0_data -
149 __per_cpu_start;
150 } else if (node == node_cpuid[cpu].nid) {
8d7e3517
TL
151 memcpy(__va(cpu_data), __phys_per_cpu_start,
152 __per_cpu_end - __per_cpu_start);
153 __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
154 __per_cpu_start;
155 cpu_data += PERCPU_PAGE_SIZE;
156 }
157 }
158#endif
159 return cpu_data;
160}
161
1da177e4 162/**
564601a5 163 * fill_pernode - initialize pernode data.
164 * @node: the node id.
165 * @pernode: physical address of pernode data
166 * @pernodesize: size of the pernode data
1da177e4 167 */
564601a5 168static void __init fill_pernode(int node, unsigned long pernode,
169 unsigned long pernodesize)
1da177e4 170{
564601a5 171 void *cpu_data;
8d7e3517 172 int cpus = early_nr_cpus_node(node);
b61bfa3c 173 struct bootmem_data *bdp = &bootmem_node_data[node];
1da177e4 174
564601a5 175 mem_data[node].pernode_addr = pernode;
176 mem_data[node].pernode_size = pernodesize;
177 memset(__va(pernode), 0, pernodesize);
1da177e4 178
564601a5 179 cpu_data = (void *)pernode;
180 pernode += PERCPU_PAGE_SIZE * cpus;
181 pernode += node * L1_CACHE_BYTES;
182
ae5a2c1c 183 pgdat_list[node] = __va(pernode);
564601a5 184 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
185
186 mem_data[node].node_data = __va(pernode);
187 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
188
ae5a2c1c 189 pgdat_list[node]->bdata = bdp;
564601a5 190 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
191
8d7e3517 192 cpu_data = per_cpu_node_setup(cpu_data, node);
1da177e4 193
564601a5 194 return;
195}
8d7e3517 196
1da177e4
LT
197/**
198 * find_pernode_space - allocate memory for memory map and per-node structures
199 * @start: physical start of range
200 * @len: length of range
201 * @node: node where this range resides
202 *
203 * This routine reserves space for the per-cpu data struct, the list of
204 * pg_data_ts and the per-node data struct. Each node will have something like
205 * the following in the first chunk of addr. space large enough to hold it.
206 *
207 * ________________________
208 * | |
209 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
210 * | PERCPU_PAGE_SIZE * | start and length big enough
211 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
212 * |------------------------|
213 * | local pg_data_t * |
214 * |------------------------|
215 * | local ia64_node_data |
216 * |------------------------|
217 * | ??? |
218 * |________________________|
219 *
220 * Once this space has been set aside, the bootmem maps are initialized. We
221 * could probably move the allocation of the per-cpu and ia64_node_data space
222 * outside of this function and use alloc_bootmem_node(), but doing it here
223 * is straightforward and we get the alignments we want so...
224 */
225static int __init find_pernode_space(unsigned long start, unsigned long len,
226 int node)
227{
3560e249 228 unsigned long spfn, epfn;
1da177e4 229 unsigned long pernodesize = 0, pernode, pages, mapsize;
b61bfa3c 230 struct bootmem_data *bdp = &bootmem_node_data[node];
1da177e4 231
3560e249 232 spfn = start >> PAGE_SHIFT;
1da177e4
LT
233 epfn = (start + len) >> PAGE_SHIFT;
234
3560e249 235 pages = bdp->node_low_pfn - bdp->node_min_pfn;
1da177e4
LT
236 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
237
238 /*
239 * Make sure this memory falls within this node's usable memory
240 * since we may have thrown some away in build_maps().
241 */
3560e249 242 if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn)
1da177e4
LT
243 return 0;
244
245 /* Don't setup this node's local space twice... */
246 if (mem_data[node].pernode_addr)
247 return 0;
248
249 /*
250 * Calculate total size needed, incl. what's necessary
251 * for good alignment and alias prevention.
252 */
564601a5 253 pernodesize = compute_pernodesize(node);
1da177e4
LT
254 pernode = NODEDATA_ALIGN(start, node);
255
256 /* Is this range big enough for what we want to store here? */
564601a5 257 if (start + len > (pernode + pernodesize + mapsize))
258 fill_pernode(node, pernode, pernodesize);
1da177e4
LT
259
260 return 0;
261}
262
263/**
264 * free_node_bootmem - free bootmem allocator memory for use
265 * @start: physical start of range
266 * @len: length of range
267 * @node: node where this range resides
268 *
269 * Simply calls the bootmem allocator to free the specified ranged from
270 * the given pg_data_t's bdata struct. After this function has been called
271 * for all the entries in the EFI memory map, the bootmem allocator will
272 * be ready to service allocation requests.
273 */
274static int __init free_node_bootmem(unsigned long start, unsigned long len,
275 int node)
276{
ae5a2c1c 277 free_bootmem_node(pgdat_list[node], start, len);
1da177e4
LT
278
279 return 0;
280}
281
282/**
283 * reserve_pernode_space - reserve memory for per-node space
284 *
285 * Reserve the space used by the bootmem maps & per-node space in the boot
286 * allocator so that when we actually create the real mem maps we don't
287 * use their memory.
288 */
289static void __init reserve_pernode_space(void)
290{
291 unsigned long base, size, pages;
292 struct bootmem_data *bdp;
293 int node;
294
295 for_each_online_node(node) {
ae5a2c1c 296 pg_data_t *pdp = pgdat_list[node];
1da177e4 297
564601a5 298 if (node_isset(node, memory_less_mask))
299 continue;
300
1da177e4
LT
301 bdp = pdp->bdata;
302
303 /* First the bootmem_map itself */
3560e249 304 pages = bdp->node_low_pfn - bdp->node_min_pfn;
1da177e4
LT
305 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
306 base = __pa(bdp->node_bootmem_map);
72a7fe39 307 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
1da177e4
LT
308
309 /* Now the per-node space */
310 size = mem_data[node].pernode_size;
311 base = __pa(mem_data[node].pernode_addr);
72a7fe39 312 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
1da177e4
LT
313 }
314}
315
7049027c
YG
316static void __meminit scatter_node_data(void)
317{
318 pg_data_t **dst;
319 int node;
320
dd8041f1
YG
321 /*
322 * for_each_online_node() can't be used at here.
323 * node_online_map is not set for hot-added nodes at this time,
324 * because we are halfway through initialization of the new node's
325 * structures. If for_each_online_node() is used, a new node's
72fdbdce 326 * pg_data_ptrs will be not initialized. Instead of using it,
dd8041f1
YG
327 * pgdat_list[] is checked.
328 */
329 for_each_node(node) {
330 if (pgdat_list[node]) {
331 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
332 memcpy(dst, pgdat_list, sizeof(pgdat_list));
333 }
7049027c
YG
334 }
335}
336
1da177e4
LT
337/**
338 * initialize_pernode_data - fixup per-cpu & per-node pointers
339 *
340 * Each node's per-node area has a copy of the global pg_data_t list, so
341 * we copy that to each node here, as well as setting the per-cpu pointer
342 * to the local node data structure. The active_cpus field of the per-node
343 * structure gets setup by the platform_cpu_init() function later.
344 */
345static void __init initialize_pernode_data(void)
346{
8d7e3517 347 int cpu, node;
1da177e4 348
7049027c
YG
349 scatter_node_data();
350
8d7e3517 351#ifdef CONFIG_SMP
1da177e4 352 /* Set the node_data pointer for each per-cpu struct */
2c6e6db4 353 for_each_possible_early_cpu(cpu) {
1da177e4
LT
354 node = node_cpuid[cpu].nid;
355 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
356 }
8d7e3517
TL
357#else
358 {
359 struct cpuinfo_ia64 *cpu0_cpu_info;
360 cpu = 0;
361 node = node_cpuid[cpu].nid;
362 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
363 ((char *)&per_cpu__cpu_info - __per_cpu_start));
364 cpu0_cpu_info->node_data = mem_data[node].node_data;
365 }
366#endif /* CONFIG_SMP */
1da177e4
LT
367}
368
564601a5 369/**
370 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
371 * node but fall back to any other node when __alloc_bootmem_node fails
372 * for best.
373 * @nid: node id
374 * @pernodesize: size of this node's pernode data
564601a5 375 */
97835245 376static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
564601a5 377{
378 void *ptr = NULL;
379 u8 best = 0xff;
97835245 380 int bestnode = -1, node, anynode = 0;
564601a5 381
382 for_each_online_node(node) {
383 if (node_isset(node, memory_less_mask))
384 continue;
385 else if (node_distance(nid, node) < best) {
386 best = node_distance(nid, node);
387 bestnode = node;
388 }
97835245 389 anynode = node;
564601a5 390 }
391
97835245
BP
392 if (bestnode == -1)
393 bestnode = anynode;
394
ae5a2c1c 395 ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
97835245 396 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
564601a5 397
564601a5 398 return ptr;
399}
400
564601a5 401/**
402 * memory_less_nodes - allocate and initialize CPU only nodes pernode
403 * information.
404 */
405static void __init memory_less_nodes(void)
406{
407 unsigned long pernodesize;
408 void *pernode;
409 int node;
410
411 for_each_node_mask(node, memory_less_mask) {
412 pernodesize = compute_pernodesize(node);
97835245 413 pernode = memory_less_node_alloc(node, pernodesize);
564601a5 414 fill_pernode(node, __pa(pernode), pernodesize);
415 }
416
417 return;
418}
419
1da177e4
LT
420/**
421 * find_memory - walk the EFI memory map and setup the bootmem allocator
422 *
423 * Called early in boot to setup the bootmem allocator, and to
424 * allocate the per-cpu and per-node structures.
425 */
426void __init find_memory(void)
427{
428 int node;
429
430 reserve_memory();
431
432 if (num_online_nodes() == 0) {
433 printk(KERN_ERR "node info missing!\n");
434 node_set_online(0);
435 }
436
564601a5 437 nodes_or(memory_less_mask, memory_less_mask, node_online_map);
1da177e4
LT
438 min_low_pfn = -1;
439 max_low_pfn = 0;
440
1da177e4
LT
441 /* These actually end up getting called by call_pernode_memory() */
442 efi_memmap_walk(filter_rsvd_memory, build_node_maps);
443 efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
a3f5c338 444 efi_memmap_walk(find_max_min_low_pfn, NULL);
1da177e4 445
564601a5 446 for_each_online_node(node)
b61bfa3c 447 if (bootmem_node_data[node].node_low_pfn) {
564601a5 448 node_clear(node, memory_less_mask);
449 mem_data[node].min_pfn = ~0UL;
450 }
139b8304 451
98075d24 452 efi_memmap_walk(filter_memory, register_active_ranges);
139b8304 453
1da177e4
LT
454 /*
455 * Initialize the boot memory maps in reverse order since that's
456 * what the bootmem allocator expects
457 */
458 for (node = MAX_NUMNODES - 1; node >= 0; node--) {
459 unsigned long pernode, pernodesize, map;
460 struct bootmem_data *bdp;
461
462 if (!node_online(node))
463 continue;
564601a5 464 else if (node_isset(node, memory_less_mask))
465 continue;
1da177e4 466
b61bfa3c 467 bdp = &bootmem_node_data[node];
1da177e4
LT
468 pernode = mem_data[node].pernode_addr;
469 pernodesize = mem_data[node].pernode_size;
470 map = pernode + pernodesize;
471
ae5a2c1c 472 init_bootmem_node(pgdat_list[node],
1da177e4 473 map>>PAGE_SHIFT,
3560e249 474 bdp->node_min_pfn,
1da177e4
LT
475 bdp->node_low_pfn);
476 }
477
478 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
479
480 reserve_pernode_space();
564601a5 481 memory_less_nodes();
1da177e4
LT
482 initialize_pernode_data();
483
484 max_pfn = max_low_pfn;
485
486 find_initrd();
487}
488
8d7e3517 489#ifdef CONFIG_SMP
1da177e4
LT
490/**
491 * per_cpu_init - setup per-cpu variables
492 *
493 * find_pernode_space() does most of this already, we just need to set
494 * local_per_cpu_offset
495 */
244fd545 496void __cpuinit *per_cpu_init(void)
1da177e4
LT
497{
498 int cpu;
ff741906
AR
499 static int first_time = 1;
500
ff741906
AR
501 if (first_time) {
502 first_time = 0;
2c6e6db4 503 for_each_possible_early_cpu(cpu)
ff741906
AR
504 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
505 }
1da177e4
LT
506
507 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
508}
8d7e3517 509#endif /* CONFIG_SMP */
1da177e4
LT
510
511/**
512 * show_mem - give short summary of memory stats
513 *
514 * Shows a simple page count of reserved and used pages in the system.
515 * For discontig machines, it does this on a per-pgdat basis.
516 */
517void show_mem(void)
518{
519 int i, total_reserved = 0;
520 int total_shared = 0, total_cached = 0;
521 unsigned long total_present = 0;
522 pg_data_t *pgdat;
523
709a6c1c 524 printk(KERN_INFO "Mem-info:\n");
1da177e4 525 show_free_areas();
816add4e 526 printk(KERN_INFO "Node memory in pages:\n");
ec936fc5 527 for_each_online_pgdat(pgdat) {
208d54e5
DH
528 unsigned long present;
529 unsigned long flags;
1da177e4 530 int shared = 0, cached = 0, reserved = 0;
208d54e5 531
208d54e5
DH
532 pgdat_resize_lock(pgdat, &flags);
533 present = pgdat->node_present_pages;
1da177e4 534 for(i = 0; i < pgdat->node_spanned_pages; i++) {
2d4b1fa2 535 struct page *page;
99a19cf1
PB
536 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
537 touch_nmi_watchdog();
2d4b1fa2
BP
538 if (pfn_valid(pgdat->node_start_pfn + i))
539 page = pfn_to_page(pgdat->node_start_pfn + i);
ace1d816 540 else {
e44e41d0
BP
541 i = vmemmap_find_next_valid_pfn(pgdat->node_id,
542 i) - 1;
1da177e4 543 continue;
ace1d816 544 }
408fde81 545 if (PageReserved(page))
1da177e4 546 reserved++;
408fde81 547 else if (PageSwapCache(page))
1da177e4 548 cached++;
408fde81
DH
549 else if (page_count(page))
550 shared += page_count(page)-1;
1da177e4 551 }
208d54e5 552 pgdat_resize_unlock(pgdat, &flags);
1da177e4
LT
553 total_present += present;
554 total_reserved += reserved;
555 total_cached += cached;
556 total_shared += shared;
816add4e
JS
557 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
558 "shrd: %10d, swpd: %10d\n", pgdat->node_id,
559 present, reserved, shared, cached);
1da177e4 560 }
709a6c1c
JS
561 printk(KERN_INFO "%ld pages of RAM\n", total_present);
562 printk(KERN_INFO "%d reserved pages\n", total_reserved);
563 printk(KERN_INFO "%d pages shared\n", total_shared);
564 printk(KERN_INFO "%d pages swap cached\n", total_cached);
565 printk(KERN_INFO "Total of %ld pages in page table cache\n",
2bd62a40 566 quicklist_total_size());
709a6c1c 567 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
1da177e4
LT
568}
569
570/**
571 * call_pernode_memory - use SRAT to call callback functions with node info
572 * @start: physical start of range
573 * @len: length of range
574 * @arg: function to call for each range
575 *
576 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
577 * out to which node a block of memory belongs. Ignore memory that we cannot
578 * identify, and split blocks that run across multiple nodes.
579 *
580 * Take this opportunity to round the start address up and the end address
581 * down to page boundaries.
582 */
583void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
584{
585 unsigned long rs, re, end = start + len;
586 void (*func)(unsigned long, unsigned long, int);
587 int i;
588
589 start = PAGE_ALIGN(start);
590 end &= PAGE_MASK;
591 if (start >= end)
592 return;
593
594 func = arg;
595
596 if (!num_node_memblks) {
597 /* No SRAT table, so assume one node (node 0) */
598 if (start < end)
599 (*func)(start, end - start, 0);
600 return;
601 }
602
603 for (i = 0; i < num_node_memblks; i++) {
604 rs = max(start, node_memblk[i].start_paddr);
605 re = min(end, node_memblk[i].start_paddr +
606 node_memblk[i].size);
607
608 if (rs < re)
609 (*func)(rs, re - rs, node_memblk[i].nid);
610
611 if (re == end)
612 break;
613 }
614}
615
616/**
617 * count_node_pages - callback to build per-node memory info structures
618 * @start: physical start of range
619 * @len: length of range
620 * @node: node where this range resides
621 *
622 * Each node has it's own number of physical pages, DMAable pages, start, and
623 * end page frame number. This routine will be called by call_pernode_memory()
624 * for each piece of usable memory and will setup these values for each node.
625 * Very similar to build_maps().
626 */
627static __init int count_node_pages(unsigned long start, unsigned long len, int node)
628{
629 unsigned long end = start + len;
630
631 mem_data[node].num_physpages += len >> PAGE_SHIFT;
09ae1f58 632#ifdef CONFIG_ZONE_DMA
1da177e4
LT
633 if (start <= __pa(MAX_DMA_ADDRESS))
634 mem_data[node].num_dma_physpages +=
635 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
09ae1f58 636#endif
1da177e4 637 start = GRANULEROUNDDOWN(start);
1da177e4
LT
638 end = GRANULEROUNDUP(end);
639 mem_data[node].max_pfn = max(mem_data[node].max_pfn,
640 end >> PAGE_SHIFT);
641 mem_data[node].min_pfn = min(mem_data[node].min_pfn,
642 start >> PAGE_SHIFT);
643
644 return 0;
645}
646
647/**
648 * paging_init - setup page tables
649 *
650 * paging_init() sets up the page tables for each node of the system and frees
651 * the bootmem allocator memory for general use.
652 */
653void __init paging_init(void)
654{
655 unsigned long max_dma;
1da177e4 656 unsigned long pfn_offset = 0;
05e0caad 657 unsigned long max_pfn = 0;
1da177e4 658 int node;
05e0caad 659 unsigned long max_zone_pfns[MAX_NR_ZONES];
1da177e4
LT
660
661 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
662
1da177e4
LT
663 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
664
524fd988
BP
665 sparse_memory_present_with_active_regions(MAX_NUMNODES);
666 sparse_init();
667
2d4b1fa2 668#ifdef CONFIG_VIRTUAL_MEM_MAP
921eea1c
BP
669 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
670 sizeof(struct page));
564601a5 671 vmem_map = (struct page *) vmalloc_end;
672 efi_memmap_walk(create_mem_map_page_table, NULL);
673 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
2d4b1fa2 674#endif
564601a5 675
1da177e4 676 for_each_online_node(node) {
1da177e4 677 num_physpages += mem_data[node].num_physpages;
1da177e4
LT
678 pfn_offset = mem_data[node].min_pfn;
679
2d4b1fa2 680#ifdef CONFIG_VIRTUAL_MEM_MAP
1da177e4 681 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
2d4b1fa2 682#endif
05e0caad
MG
683 if (mem_data[node].max_pfn > max_pfn)
684 max_pfn = mem_data[node].max_pfn;
1da177e4
LT
685 }
686
6391af17 687 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
09ae1f58 688#ifdef CONFIG_ZONE_DMA
05e0caad 689 max_zone_pfns[ZONE_DMA] = max_dma;
09ae1f58 690#endif
05e0caad
MG
691 max_zone_pfns[ZONE_NORMAL] = max_pfn;
692 free_area_init_nodes(max_zone_pfns);
693
1da177e4
LT
694 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
695}
7049027c 696
a3142c8e 697#ifdef CONFIG_MEMORY_HOTPLUG
dd0932d9
YG
698pg_data_t *arch_alloc_nodedata(int nid)
699{
700 unsigned long size = compute_pernodesize(nid);
701
702 return kzalloc(size, GFP_KERNEL);
703}
704
705void arch_free_nodedata(pg_data_t *pgdat)
706{
707 kfree(pgdat);
708}
709
7049027c
YG
710void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
711{
712 pgdat_list[update_node] = update_pgdat;
713 scatter_node_data();
714}
a3142c8e 715#endif
ef229c5a
CL
716
717#ifdef CONFIG_SPARSEMEM_VMEMMAP
718int __meminit vmemmap_populate(struct page *start_page,
719 unsigned long size, int node)
720{
721 return vmemmap_populate_basepages(start_page, size, node);
722}
723#endif
This page took 0.456977 seconds and 5 git commands to generate.