memory hotplug: allocate usemap on the section with pgdat
[deliverable/linux.git] / mm / sparse.c
1 /*
2 * sparse memory mappings.
3 */
4 #include <linux/mm.h>
5 #include <linux/mmzone.h>
6 #include <linux/bootmem.h>
7 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11 #include <asm/dma.h>
12 #include <asm/pgalloc.h>
13 #include <asm/pgtable.h>
14
15 /*
16 * Permanent SPARSEMEM data:
17 *
18 * 1) mem_section - memory sections, mem_map's for valid memory
19 */
20 #ifdef CONFIG_SPARSEMEM_EXTREME
21 struct mem_section *mem_section[NR_SECTION_ROOTS]
22 ____cacheline_internodealigned_in_smp;
23 #else
24 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
25 ____cacheline_internodealigned_in_smp;
26 #endif
27 EXPORT_SYMBOL(mem_section);
28
29 #ifdef NODE_NOT_IN_PAGE_FLAGS
30 /*
31 * If we did not store the node number in the page then we have to
32 * do a lookup in the section_to_node_table in order to find which
33 * node the page belongs to.
34 */
35 #if MAX_NUMNODES <= 256
36 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
37 #else
38 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
39 #endif
40
41 int page_to_nid(struct page *page)
42 {
43 return section_to_node_table[page_to_section(page)];
44 }
45 EXPORT_SYMBOL(page_to_nid);
46
47 static void set_section_nid(unsigned long section_nr, int nid)
48 {
49 section_to_node_table[section_nr] = nid;
50 }
51 #else /* !NODE_NOT_IN_PAGE_FLAGS */
52 static inline void set_section_nid(unsigned long section_nr, int nid)
53 {
54 }
55 #endif
56
57 #ifdef CONFIG_SPARSEMEM_EXTREME
58 static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
59 {
60 struct mem_section *section = NULL;
61 unsigned long array_size = SECTIONS_PER_ROOT *
62 sizeof(struct mem_section);
63
64 if (slab_is_available())
65 section = kmalloc_node(array_size, GFP_KERNEL, nid);
66 else
67 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
68
69 if (section)
70 memset(section, 0, array_size);
71
72 return section;
73 }
74
75 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
76 {
77 static DEFINE_SPINLOCK(index_init_lock);
78 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
79 struct mem_section *section;
80 int ret = 0;
81
82 if (mem_section[root])
83 return -EEXIST;
84
85 section = sparse_index_alloc(nid);
86 if (!section)
87 return -ENOMEM;
88 /*
89 * This lock keeps two different sections from
90 * reallocating for the same index
91 */
92 spin_lock(&index_init_lock);
93
94 if (mem_section[root]) {
95 ret = -EEXIST;
96 goto out;
97 }
98
99 mem_section[root] = section;
100 out:
101 spin_unlock(&index_init_lock);
102 return ret;
103 }
104 #else /* !SPARSEMEM_EXTREME */
105 static inline int sparse_index_init(unsigned long section_nr, int nid)
106 {
107 return 0;
108 }
109 #endif
110
111 /*
112 * Although written for the SPARSEMEM_EXTREME case, this happens
113 * to also work for the flat array case because
114 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
115 */
116 int __section_nr(struct mem_section* ms)
117 {
118 unsigned long root_nr;
119 struct mem_section* root;
120
121 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
122 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
123 if (!root)
124 continue;
125
126 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
127 break;
128 }
129
130 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
131 }
132
133 /*
134 * During early boot, before section_mem_map is used for an actual
135 * mem_map, we use section_mem_map to store the section's NUMA
136 * node. This keeps us from having to use another data structure. The
137 * node information is cleared just before we store the real mem_map.
138 */
139 static inline unsigned long sparse_encode_early_nid(int nid)
140 {
141 return (nid << SECTION_NID_SHIFT);
142 }
143
144 static inline int sparse_early_nid(struct mem_section *section)
145 {
146 return (section->section_mem_map >> SECTION_NID_SHIFT);
147 }
148
149 /* Record a memory area against a node. */
150 void __init memory_present(int nid, unsigned long start, unsigned long end)
151 {
152 unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
153 unsigned long pfn;
154
155 /*
156 * Sanity checks - do not allow an architecture to pass
157 * in larger pfns than the maximum scope of sparsemem:
158 */
159 if (start >= max_arch_pfn)
160 return;
161 if (end >= max_arch_pfn)
162 end = max_arch_pfn;
163
164 start &= PAGE_SECTION_MASK;
165 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
166 unsigned long section = pfn_to_section_nr(pfn);
167 struct mem_section *ms;
168
169 sparse_index_init(section, nid);
170 set_section_nid(section, nid);
171
172 ms = __nr_to_section(section);
173 if (!ms->section_mem_map)
174 ms->section_mem_map = sparse_encode_early_nid(nid) |
175 SECTION_MARKED_PRESENT;
176 }
177 }
178
179 /*
180 * Only used by the i386 NUMA architecures, but relatively
181 * generic code.
182 */
183 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
184 unsigned long end_pfn)
185 {
186 unsigned long pfn;
187 unsigned long nr_pages = 0;
188
189 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
190 if (nid != early_pfn_to_nid(pfn))
191 continue;
192
193 if (pfn_present(pfn))
194 nr_pages += PAGES_PER_SECTION;
195 }
196
197 return nr_pages * sizeof(struct page);
198 }
199
200 /*
201 * Subtle, we encode the real pfn into the mem_map such that
202 * the identity pfn - section_mem_map will return the actual
203 * physical page frame number.
204 */
205 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
206 {
207 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
208 }
209
210 /*
211 * Decode mem_map from the coded memmap
212 */
213 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
214 {
215 /* mask off the extra low bits of information */
216 coded_mem_map &= SECTION_MAP_MASK;
217 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
218 }
219
220 static int __meminit sparse_init_one_section(struct mem_section *ms,
221 unsigned long pnum, struct page *mem_map,
222 unsigned long *pageblock_bitmap)
223 {
224 if (!present_section(ms))
225 return -EINVAL;
226
227 ms->section_mem_map &= ~SECTION_MAP_MASK;
228 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
229 SECTION_HAS_MEM_MAP;
230 ms->pageblock_flags = pageblock_bitmap;
231
232 return 1;
233 }
234
235 unsigned long usemap_size(void)
236 {
237 unsigned long size_bytes;
238 size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
239 size_bytes = roundup(size_bytes, sizeof(unsigned long));
240 return size_bytes;
241 }
242
243 #ifdef CONFIG_MEMORY_HOTPLUG
244 static unsigned long *__kmalloc_section_usemap(void)
245 {
246 return kmalloc(usemap_size(), GFP_KERNEL);
247 }
248 #endif /* CONFIG_MEMORY_HOTPLUG */
249
250 static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
251 {
252 unsigned long *usemap, section_nr;
253 struct mem_section *ms = __nr_to_section(pnum);
254 int nid = sparse_early_nid(ms);
255 struct pglist_data *pgdat = NODE_DATA(nid);
256
257 /*
258 * Usemap's page can't be freed until freeing other sections
259 * which use it. And, Pgdat has same feature.
260 * If section A has pgdat and section B has usemap for other
261 * sections (includes section A), both sections can't be removed,
262 * because there is the dependency each other.
263 * To solve above issue, this collects all usemap on the same section
264 * which has pgdat.
265 */
266 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
267 usemap = alloc_bootmem_section(usemap_size(), section_nr);
268 if (usemap)
269 return usemap;
270
271 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
272 nid = 0;
273
274 printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
275 return NULL;
276 }
277
278 #ifndef CONFIG_SPARSEMEM_VMEMMAP
279 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
280 {
281 struct page *map;
282
283 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
284 if (map)
285 return map;
286
287 map = alloc_bootmem_pages_node(NODE_DATA(nid),
288 PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
289 return map;
290 }
291 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
292
293 struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
294 {
295 struct page *map;
296 struct mem_section *ms = __nr_to_section(pnum);
297 int nid = sparse_early_nid(ms);
298
299 map = sparse_mem_map_populate(pnum, nid);
300 if (map)
301 return map;
302
303 printk(KERN_ERR "%s: sparsemem memory map backing failed "
304 "some memory will not be available.\n", __FUNCTION__);
305 ms->section_mem_map = 0;
306 return NULL;
307 }
308
309 void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
310 {
311 }
312 /*
313 * Allocate the accumulated non-linear sections, allocate a mem_map
314 * for each and record the physical to section mapping.
315 */
316 void __init sparse_init(void)
317 {
318 unsigned long pnum;
319 struct page *map;
320 unsigned long *usemap;
321 unsigned long **usemap_map;
322 int size;
323
324 /*
325 * map is using big page (aka 2M in x86 64 bit)
326 * usemap is less one page (aka 24 bytes)
327 * so alloc 2M (with 2M align) and 24 bytes in turn will
328 * make next 2M slip to one more 2M later.
329 * then in big system, the memory will have a lot of holes...
330 * here try to allocate 2M pages continously.
331 *
332 * powerpc need to call sparse_init_one_section right after each
333 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
334 */
335 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
336 usemap_map = alloc_bootmem(size);
337 if (!usemap_map)
338 panic("can not allocate usemap_map\n");
339
340 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
341 if (!present_section_nr(pnum))
342 continue;
343 usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
344 }
345
346 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
347 if (!present_section_nr(pnum))
348 continue;
349
350 usemap = usemap_map[pnum];
351 if (!usemap)
352 continue;
353
354 map = sparse_early_mem_map_alloc(pnum);
355 if (!map)
356 continue;
357
358 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
359 usemap);
360 }
361
362 vmemmap_populate_print_last();
363
364 free_bootmem(__pa(usemap_map), size);
365 }
366
367 #ifdef CONFIG_MEMORY_HOTPLUG
368 #ifdef CONFIG_SPARSEMEM_VMEMMAP
369 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
370 unsigned long nr_pages)
371 {
372 /* This will make the necessary allocations eventually. */
373 return sparse_mem_map_populate(pnum, nid);
374 }
375 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
376 {
377 return; /* XXX: Not implemented yet */
378 }
379 #else
380 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
381 {
382 struct page *page, *ret;
383 unsigned long memmap_size = sizeof(struct page) * nr_pages;
384
385 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
386 if (page)
387 goto got_map_page;
388
389 ret = vmalloc(memmap_size);
390 if (ret)
391 goto got_map_ptr;
392
393 return NULL;
394 got_map_page:
395 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
396 got_map_ptr:
397 memset(ret, 0, memmap_size);
398
399 return ret;
400 }
401
402 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
403 unsigned long nr_pages)
404 {
405 return __kmalloc_section_memmap(nr_pages);
406 }
407
408 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
409 {
410 if (is_vmalloc_addr(memmap))
411 vfree(memmap);
412 else
413 free_pages((unsigned long)memmap,
414 get_order(sizeof(struct page) * nr_pages));
415 }
416 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
417
418 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
419 {
420 if (!usemap)
421 return;
422
423 /*
424 * Check to see if allocation came from hot-plug-add
425 */
426 if (PageSlab(virt_to_page(usemap))) {
427 kfree(usemap);
428 if (memmap)
429 __kfree_section_memmap(memmap, PAGES_PER_SECTION);
430 return;
431 }
432
433 /*
434 * TODO: Allocations came from bootmem - how do I free up ?
435 */
436 printk(KERN_WARNING "Not freeing up allocations from bootmem "
437 "- leaking memory\n");
438 }
439
440 /*
441 * returns the number of sections whose mem_maps were properly
442 * set. If this is <=0, then that means that the passed-in
443 * map was not consumed and must be freed.
444 */
445 int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
446 int nr_pages)
447 {
448 unsigned long section_nr = pfn_to_section_nr(start_pfn);
449 struct pglist_data *pgdat = zone->zone_pgdat;
450 struct mem_section *ms;
451 struct page *memmap;
452 unsigned long *usemap;
453 unsigned long flags;
454 int ret;
455
456 /*
457 * no locking for this, because it does its own
458 * plus, it does a kmalloc
459 */
460 ret = sparse_index_init(section_nr, pgdat->node_id);
461 if (ret < 0 && ret != -EEXIST)
462 return ret;
463 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
464 if (!memmap)
465 return -ENOMEM;
466 usemap = __kmalloc_section_usemap();
467 if (!usemap) {
468 __kfree_section_memmap(memmap, nr_pages);
469 return -ENOMEM;
470 }
471
472 pgdat_resize_lock(pgdat, &flags);
473
474 ms = __pfn_to_section(start_pfn);
475 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
476 ret = -EEXIST;
477 goto out;
478 }
479
480 ms->section_mem_map |= SECTION_MARKED_PRESENT;
481
482 ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
483
484 out:
485 pgdat_resize_unlock(pgdat, &flags);
486 if (ret <= 0) {
487 kfree(usemap);
488 __kfree_section_memmap(memmap, nr_pages);
489 }
490 return ret;
491 }
492
493 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
494 {
495 struct page *memmap = NULL;
496 unsigned long *usemap = NULL;
497
498 if (ms->section_mem_map) {
499 usemap = ms->pageblock_flags;
500 memmap = sparse_decode_mem_map(ms->section_mem_map,
501 __section_nr(ms));
502 ms->section_mem_map = 0;
503 ms->pageblock_flags = NULL;
504 }
505
506 free_section_usemap(memmap, usemap);
507 }
508 #endif
This page took 0.050026 seconds and 6 git commands to generate.