mm, compaction: more robust check for scanners meeting
[deliverable/linux.git] / mm / memory_hotplug.c
1 /*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
7 #include <linux/stddef.h>
8 #include <linux/mm.h>
9 #include <linux/swap.h>
10 #include <linux/interrupt.h>
11 #include <linux/pagemap.h>
12 #include <linux/compiler.h>
13 #include <linux/export.h>
14 #include <linux/pagevec.h>
15 #include <linux/writeback.h>
16 #include <linux/slab.h>
17 #include <linux/sysctl.h>
18 #include <linux/cpu.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
21 #include <linux/highmem.h>
22 #include <linux/vmalloc.h>
23 #include <linux/ioport.h>
24 #include <linux/delay.h>
25 #include <linux/migrate.h>
26 #include <linux/page-isolation.h>
27 #include <linux/pfn.h>
28 #include <linux/suspend.h>
29 #include <linux/mm_inline.h>
30 #include <linux/firmware-map.h>
31 #include <linux/stop_machine.h>
32 #include <linux/hugetlb.h>
33 #include <linux/memblock.h>
34 #include <linux/bootmem.h>
35
36 #include <asm/tlbflush.h>
37
38 #include "internal.h"
39
40 /*
41 * online_page_callback contains pointer to current page onlining function.
42 * Initially it is generic_online_page(). If it is required it could be
43 * changed by calling set_online_page_callback() for callback registration
44 * and restore_online_page_callback() for generic callback restore.
45 */
46
47 static void generic_online_page(struct page *page);
48
49 static online_page_callback_t online_page_callback = generic_online_page;
50 static DEFINE_MUTEX(online_page_callback_lock);
51
52 /* The same as the cpu_hotplug lock, but for memory hotplug. */
53 static struct {
54 struct task_struct *active_writer;
55 struct mutex lock; /* Synchronizes accesses to refcount, */
56 /*
57 * Also blocks the new readers during
58 * an ongoing mem hotplug operation.
59 */
60 int refcount;
61
62 #ifdef CONFIG_DEBUG_LOCK_ALLOC
63 struct lockdep_map dep_map;
64 #endif
65 } mem_hotplug = {
66 .active_writer = NULL,
67 .lock = __MUTEX_INITIALIZER(mem_hotplug.lock),
68 .refcount = 0,
69 #ifdef CONFIG_DEBUG_LOCK_ALLOC
70 .dep_map = {.name = "mem_hotplug.lock" },
71 #endif
72 };
73
74 /* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */
75 #define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map)
76 #define memhp_lock_acquire() lock_map_acquire(&mem_hotplug.dep_map)
77 #define memhp_lock_release() lock_map_release(&mem_hotplug.dep_map)
78
79 void get_online_mems(void)
80 {
81 might_sleep();
82 if (mem_hotplug.active_writer == current)
83 return;
84 memhp_lock_acquire_read();
85 mutex_lock(&mem_hotplug.lock);
86 mem_hotplug.refcount++;
87 mutex_unlock(&mem_hotplug.lock);
88
89 }
90
91 void put_online_mems(void)
92 {
93 if (mem_hotplug.active_writer == current)
94 return;
95 mutex_lock(&mem_hotplug.lock);
96
97 if (WARN_ON(!mem_hotplug.refcount))
98 mem_hotplug.refcount++; /* try to fix things up */
99
100 if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer))
101 wake_up_process(mem_hotplug.active_writer);
102 mutex_unlock(&mem_hotplug.lock);
103 memhp_lock_release();
104
105 }
106
107 void mem_hotplug_begin(void)
108 {
109 mem_hotplug.active_writer = current;
110
111 memhp_lock_acquire();
112 for (;;) {
113 mutex_lock(&mem_hotplug.lock);
114 if (likely(!mem_hotplug.refcount))
115 break;
116 __set_current_state(TASK_UNINTERRUPTIBLE);
117 mutex_unlock(&mem_hotplug.lock);
118 schedule();
119 }
120 }
121
122 void mem_hotplug_done(void)
123 {
124 mem_hotplug.active_writer = NULL;
125 mutex_unlock(&mem_hotplug.lock);
126 memhp_lock_release();
127 }
128
129 /* add this memory to iomem resource */
130 static struct resource *register_memory_resource(u64 start, u64 size)
131 {
132 struct resource *res;
133 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
134 BUG_ON(!res);
135
136 res->name = "System RAM";
137 res->start = start;
138 res->end = start + size - 1;
139 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
140 if (request_resource(&iomem_resource, res) < 0) {
141 pr_debug("System RAM resource %pR cannot be added\n", res);
142 kfree(res);
143 res = NULL;
144 }
145 return res;
146 }
147
148 static void release_memory_resource(struct resource *res)
149 {
150 if (!res)
151 return;
152 release_resource(res);
153 kfree(res);
154 return;
155 }
156
157 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
158 void get_page_bootmem(unsigned long info, struct page *page,
159 unsigned long type)
160 {
161 page->lru.next = (struct list_head *) type;
162 SetPagePrivate(page);
163 set_page_private(page, info);
164 atomic_inc(&page->_count);
165 }
166
167 void put_page_bootmem(struct page *page)
168 {
169 unsigned long type;
170
171 type = (unsigned long) page->lru.next;
172 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
173 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
174
175 if (atomic_dec_return(&page->_count) == 1) {
176 ClearPagePrivate(page);
177 set_page_private(page, 0);
178 INIT_LIST_HEAD(&page->lru);
179 free_reserved_page(page);
180 }
181 }
182
183 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
184 #ifndef CONFIG_SPARSEMEM_VMEMMAP
185 static void register_page_bootmem_info_section(unsigned long start_pfn)
186 {
187 unsigned long *usemap, mapsize, section_nr, i;
188 struct mem_section *ms;
189 struct page *page, *memmap;
190
191 section_nr = pfn_to_section_nr(start_pfn);
192 ms = __nr_to_section(section_nr);
193
194 /* Get section's memmap address */
195 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
196
197 /*
198 * Get page for the memmap's phys address
199 * XXX: need more consideration for sparse_vmemmap...
200 */
201 page = virt_to_page(memmap);
202 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
203 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
204
205 /* remember memmap's page */
206 for (i = 0; i < mapsize; i++, page++)
207 get_page_bootmem(section_nr, page, SECTION_INFO);
208
209 usemap = __nr_to_section(section_nr)->pageblock_flags;
210 page = virt_to_page(usemap);
211
212 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
213
214 for (i = 0; i < mapsize; i++, page++)
215 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
216
217 }
218 #else /* CONFIG_SPARSEMEM_VMEMMAP */
219 static void register_page_bootmem_info_section(unsigned long start_pfn)
220 {
221 unsigned long *usemap, mapsize, section_nr, i;
222 struct mem_section *ms;
223 struct page *page, *memmap;
224
225 if (!pfn_valid(start_pfn))
226 return;
227
228 section_nr = pfn_to_section_nr(start_pfn);
229 ms = __nr_to_section(section_nr);
230
231 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
232
233 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
234
235 usemap = __nr_to_section(section_nr)->pageblock_flags;
236 page = virt_to_page(usemap);
237
238 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
239
240 for (i = 0; i < mapsize; i++, page++)
241 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
242 }
243 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
244
245 void register_page_bootmem_info_node(struct pglist_data *pgdat)
246 {
247 unsigned long i, pfn, end_pfn, nr_pages;
248 int node = pgdat->node_id;
249 struct page *page;
250 struct zone *zone;
251
252 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
253 page = virt_to_page(pgdat);
254
255 for (i = 0; i < nr_pages; i++, page++)
256 get_page_bootmem(node, page, NODE_INFO);
257
258 zone = &pgdat->node_zones[0];
259 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
260 if (zone_is_initialized(zone)) {
261 nr_pages = zone->wait_table_hash_nr_entries
262 * sizeof(wait_queue_head_t);
263 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
264 page = virt_to_page(zone->wait_table);
265
266 for (i = 0; i < nr_pages; i++, page++)
267 get_page_bootmem(node, page, NODE_INFO);
268 }
269 }
270
271 pfn = pgdat->node_start_pfn;
272 end_pfn = pgdat_end_pfn(pgdat);
273
274 /* register section info */
275 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
276 /*
277 * Some platforms can assign the same pfn to multiple nodes - on
278 * node0 as well as nodeN. To avoid registering a pfn against
279 * multiple nodes we check that this pfn does not already
280 * reside in some other nodes.
281 */
282 if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
283 register_page_bootmem_info_section(pfn);
284 }
285 }
286 #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
287
288 static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn,
289 unsigned long end_pfn)
290 {
291 unsigned long old_zone_end_pfn;
292
293 zone_span_writelock(zone);
294
295 old_zone_end_pfn = zone_end_pfn(zone);
296 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
297 zone->zone_start_pfn = start_pfn;
298
299 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
300 zone->zone_start_pfn;
301
302 zone_span_writeunlock(zone);
303 }
304
305 static void resize_zone(struct zone *zone, unsigned long start_pfn,
306 unsigned long end_pfn)
307 {
308 zone_span_writelock(zone);
309
310 if (end_pfn - start_pfn) {
311 zone->zone_start_pfn = start_pfn;
312 zone->spanned_pages = end_pfn - start_pfn;
313 } else {
314 /*
315 * make it consist as free_area_init_core(),
316 * if spanned_pages = 0, then keep start_pfn = 0
317 */
318 zone->zone_start_pfn = 0;
319 zone->spanned_pages = 0;
320 }
321
322 zone_span_writeunlock(zone);
323 }
324
325 static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
326 unsigned long end_pfn)
327 {
328 enum zone_type zid = zone_idx(zone);
329 int nid = zone->zone_pgdat->node_id;
330 unsigned long pfn;
331
332 for (pfn = start_pfn; pfn < end_pfn; pfn++)
333 set_page_links(pfn_to_page(pfn), zid, nid, pfn);
334 }
335
336 /* Can fail with -ENOMEM from allocating a wait table with vmalloc() or
337 * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */
338 static int __ref ensure_zone_is_initialized(struct zone *zone,
339 unsigned long start_pfn, unsigned long num_pages)
340 {
341 if (!zone_is_initialized(zone))
342 return init_currently_empty_zone(zone, start_pfn, num_pages,
343 MEMMAP_HOTPLUG);
344 return 0;
345 }
346
347 static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
348 unsigned long start_pfn, unsigned long end_pfn)
349 {
350 int ret;
351 unsigned long flags;
352 unsigned long z1_start_pfn;
353
354 ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn);
355 if (ret)
356 return ret;
357
358 pgdat_resize_lock(z1->zone_pgdat, &flags);
359
360 /* can't move pfns which are higher than @z2 */
361 if (end_pfn > zone_end_pfn(z2))
362 goto out_fail;
363 /* the move out part must be at the left most of @z2 */
364 if (start_pfn > z2->zone_start_pfn)
365 goto out_fail;
366 /* must included/overlap */
367 if (end_pfn <= z2->zone_start_pfn)
368 goto out_fail;
369
370 /* use start_pfn for z1's start_pfn if z1 is empty */
371 if (!zone_is_empty(z1))
372 z1_start_pfn = z1->zone_start_pfn;
373 else
374 z1_start_pfn = start_pfn;
375
376 resize_zone(z1, z1_start_pfn, end_pfn);
377 resize_zone(z2, end_pfn, zone_end_pfn(z2));
378
379 pgdat_resize_unlock(z1->zone_pgdat, &flags);
380
381 fix_zone_id(z1, start_pfn, end_pfn);
382
383 return 0;
384 out_fail:
385 pgdat_resize_unlock(z1->zone_pgdat, &flags);
386 return -1;
387 }
388
389 static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
390 unsigned long start_pfn, unsigned long end_pfn)
391 {
392 int ret;
393 unsigned long flags;
394 unsigned long z2_end_pfn;
395
396 ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn);
397 if (ret)
398 return ret;
399
400 pgdat_resize_lock(z1->zone_pgdat, &flags);
401
402 /* can't move pfns which are lower than @z1 */
403 if (z1->zone_start_pfn > start_pfn)
404 goto out_fail;
405 /* the move out part mast at the right most of @z1 */
406 if (zone_end_pfn(z1) > end_pfn)
407 goto out_fail;
408 /* must included/overlap */
409 if (start_pfn >= zone_end_pfn(z1))
410 goto out_fail;
411
412 /* use end_pfn for z2's end_pfn if z2 is empty */
413 if (!zone_is_empty(z2))
414 z2_end_pfn = zone_end_pfn(z2);
415 else
416 z2_end_pfn = end_pfn;
417
418 resize_zone(z1, z1->zone_start_pfn, start_pfn);
419 resize_zone(z2, start_pfn, z2_end_pfn);
420
421 pgdat_resize_unlock(z1->zone_pgdat, &flags);
422
423 fix_zone_id(z2, start_pfn, end_pfn);
424
425 return 0;
426 out_fail:
427 pgdat_resize_unlock(z1->zone_pgdat, &flags);
428 return -1;
429 }
430
431 static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
432 unsigned long end_pfn)
433 {
434 unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
435
436 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
437 pgdat->node_start_pfn = start_pfn;
438
439 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
440 pgdat->node_start_pfn;
441 }
442
443 static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
444 {
445 struct pglist_data *pgdat = zone->zone_pgdat;
446 int nr_pages = PAGES_PER_SECTION;
447 int nid = pgdat->node_id;
448 int zone_type;
449 unsigned long flags, pfn;
450 int ret;
451
452 zone_type = zone - pgdat->node_zones;
453 ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages);
454 if (ret)
455 return ret;
456
457 pgdat_resize_lock(zone->zone_pgdat, &flags);
458 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
459 grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
460 phys_start_pfn + nr_pages);
461 pgdat_resize_unlock(zone->zone_pgdat, &flags);
462 memmap_init_zone(nr_pages, nid, zone_type,
463 phys_start_pfn, MEMMAP_HOTPLUG);
464
465 /* online_page_range is called later and expects pages reserved */
466 for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
467 if (!pfn_valid(pfn))
468 continue;
469
470 SetPageReserved(pfn_to_page(pfn));
471 }
472 return 0;
473 }
474
475 static int __meminit __add_section(int nid, struct zone *zone,
476 unsigned long phys_start_pfn)
477 {
478 int ret;
479
480 if (pfn_valid(phys_start_pfn))
481 return -EEXIST;
482
483 ret = sparse_add_one_section(zone, phys_start_pfn);
484
485 if (ret < 0)
486 return ret;
487
488 ret = __add_zone(zone, phys_start_pfn);
489
490 if (ret < 0)
491 return ret;
492
493 return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
494 }
495
496 /*
497 * Reasonably generic function for adding memory. It is
498 * expected that archs that support memory hotplug will
499 * call this function after deciding the zone to which to
500 * add the new pages.
501 */
502 int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
503 unsigned long nr_pages)
504 {
505 unsigned long i;
506 int err = 0;
507 int start_sec, end_sec;
508 /* during initialize mem_map, align hot-added range to section */
509 start_sec = pfn_to_section_nr(phys_start_pfn);
510 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
511
512 for (i = start_sec; i <= end_sec; i++) {
513 err = __add_section(nid, zone, section_nr_to_pfn(i));
514
515 /*
516 * EEXIST is finally dealt with by ioresource collision
517 * check. see add_memory() => register_memory_resource()
518 * Warning will be printed if there is collision.
519 */
520 if (err && (err != -EEXIST))
521 break;
522 err = 0;
523 }
524 vmemmap_populate_print_last();
525
526 return err;
527 }
528 EXPORT_SYMBOL_GPL(__add_pages);
529
530 #ifdef CONFIG_MEMORY_HOTREMOVE
531 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
532 static int find_smallest_section_pfn(int nid, struct zone *zone,
533 unsigned long start_pfn,
534 unsigned long end_pfn)
535 {
536 struct mem_section *ms;
537
538 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
539 ms = __pfn_to_section(start_pfn);
540
541 if (unlikely(!valid_section(ms)))
542 continue;
543
544 if (unlikely(pfn_to_nid(start_pfn) != nid))
545 continue;
546
547 if (zone && zone != page_zone(pfn_to_page(start_pfn)))
548 continue;
549
550 return start_pfn;
551 }
552
553 return 0;
554 }
555
556 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
557 static int find_biggest_section_pfn(int nid, struct zone *zone,
558 unsigned long start_pfn,
559 unsigned long end_pfn)
560 {
561 struct mem_section *ms;
562 unsigned long pfn;
563
564 /* pfn is the end pfn of a memory section. */
565 pfn = end_pfn - 1;
566 for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
567 ms = __pfn_to_section(pfn);
568
569 if (unlikely(!valid_section(ms)))
570 continue;
571
572 if (unlikely(pfn_to_nid(pfn) != nid))
573 continue;
574
575 if (zone && zone != page_zone(pfn_to_page(pfn)))
576 continue;
577
578 return pfn;
579 }
580
581 return 0;
582 }
583
584 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
585 unsigned long end_pfn)
586 {
587 unsigned long zone_start_pfn = zone->zone_start_pfn;
588 unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
589 unsigned long zone_end_pfn = z;
590 unsigned long pfn;
591 struct mem_section *ms;
592 int nid = zone_to_nid(zone);
593
594 zone_span_writelock(zone);
595 if (zone_start_pfn == start_pfn) {
596 /*
597 * If the section is smallest section in the zone, it need
598 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
599 * In this case, we find second smallest valid mem_section
600 * for shrinking zone.
601 */
602 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
603 zone_end_pfn);
604 if (pfn) {
605 zone->zone_start_pfn = pfn;
606 zone->spanned_pages = zone_end_pfn - pfn;
607 }
608 } else if (zone_end_pfn == end_pfn) {
609 /*
610 * If the section is biggest section in the zone, it need
611 * shrink zone->spanned_pages.
612 * In this case, we find second biggest valid mem_section for
613 * shrinking zone.
614 */
615 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
616 start_pfn);
617 if (pfn)
618 zone->spanned_pages = pfn - zone_start_pfn + 1;
619 }
620
621 /*
622 * The section is not biggest or smallest mem_section in the zone, it
623 * only creates a hole in the zone. So in this case, we need not
624 * change the zone. But perhaps, the zone has only hole data. Thus
625 * it check the zone has only hole or not.
626 */
627 pfn = zone_start_pfn;
628 for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
629 ms = __pfn_to_section(pfn);
630
631 if (unlikely(!valid_section(ms)))
632 continue;
633
634 if (page_zone(pfn_to_page(pfn)) != zone)
635 continue;
636
637 /* If the section is current section, it continues the loop */
638 if (start_pfn == pfn)
639 continue;
640
641 /* If we find valid section, we have nothing to do */
642 zone_span_writeunlock(zone);
643 return;
644 }
645
646 /* The zone has no valid section */
647 zone->zone_start_pfn = 0;
648 zone->spanned_pages = 0;
649 zone_span_writeunlock(zone);
650 }
651
652 static void shrink_pgdat_span(struct pglist_data *pgdat,
653 unsigned long start_pfn, unsigned long end_pfn)
654 {
655 unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
656 unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
657 unsigned long pgdat_end_pfn = p;
658 unsigned long pfn;
659 struct mem_section *ms;
660 int nid = pgdat->node_id;
661
662 if (pgdat_start_pfn == start_pfn) {
663 /*
664 * If the section is smallest section in the pgdat, it need
665 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
666 * In this case, we find second smallest valid mem_section
667 * for shrinking zone.
668 */
669 pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
670 pgdat_end_pfn);
671 if (pfn) {
672 pgdat->node_start_pfn = pfn;
673 pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
674 }
675 } else if (pgdat_end_pfn == end_pfn) {
676 /*
677 * If the section is biggest section in the pgdat, it need
678 * shrink pgdat->node_spanned_pages.
679 * In this case, we find second biggest valid mem_section for
680 * shrinking zone.
681 */
682 pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
683 start_pfn);
684 if (pfn)
685 pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
686 }
687
688 /*
689 * If the section is not biggest or smallest mem_section in the pgdat,
690 * it only creates a hole in the pgdat. So in this case, we need not
691 * change the pgdat.
692 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
693 * has only hole or not.
694 */
695 pfn = pgdat_start_pfn;
696 for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
697 ms = __pfn_to_section(pfn);
698
699 if (unlikely(!valid_section(ms)))
700 continue;
701
702 if (pfn_to_nid(pfn) != nid)
703 continue;
704
705 /* If the section is current section, it continues the loop */
706 if (start_pfn == pfn)
707 continue;
708
709 /* If we find valid section, we have nothing to do */
710 return;
711 }
712
713 /* The pgdat has no valid section */
714 pgdat->node_start_pfn = 0;
715 pgdat->node_spanned_pages = 0;
716 }
717
718 static void __remove_zone(struct zone *zone, unsigned long start_pfn)
719 {
720 struct pglist_data *pgdat = zone->zone_pgdat;
721 int nr_pages = PAGES_PER_SECTION;
722 int zone_type;
723 unsigned long flags;
724
725 zone_type = zone - pgdat->node_zones;
726
727 pgdat_resize_lock(zone->zone_pgdat, &flags);
728 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
729 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
730 pgdat_resize_unlock(zone->zone_pgdat, &flags);
731 }
732
733 static int __remove_section(struct zone *zone, struct mem_section *ms)
734 {
735 unsigned long start_pfn;
736 int scn_nr;
737 int ret = -EINVAL;
738
739 if (!valid_section(ms))
740 return ret;
741
742 ret = unregister_memory_section(ms);
743 if (ret)
744 return ret;
745
746 scn_nr = __section_nr(ms);
747 start_pfn = section_nr_to_pfn(scn_nr);
748 __remove_zone(zone, start_pfn);
749
750 sparse_remove_one_section(zone, ms);
751 return 0;
752 }
753
754 /**
755 * __remove_pages() - remove sections of pages from a zone
756 * @zone: zone from which pages need to be removed
757 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
758 * @nr_pages: number of pages to remove (must be multiple of section size)
759 *
760 * Generic helper function to remove section mappings and sysfs entries
761 * for the section of the memory we are removing. Caller needs to make
762 * sure that pages are marked reserved and zones are adjust properly by
763 * calling offline_pages().
764 */
765 int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
766 unsigned long nr_pages)
767 {
768 unsigned long i;
769 int sections_to_remove;
770 resource_size_t start, size;
771 int ret = 0;
772
773 /*
774 * We can only remove entire sections
775 */
776 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
777 BUG_ON(nr_pages % PAGES_PER_SECTION);
778
779 start = phys_start_pfn << PAGE_SHIFT;
780 size = nr_pages * PAGE_SIZE;
781 ret = release_mem_region_adjustable(&iomem_resource, start, size);
782 if (ret) {
783 resource_size_t endres = start + size - 1;
784
785 pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
786 &start, &endres, ret);
787 }
788
789 sections_to_remove = nr_pages / PAGES_PER_SECTION;
790 for (i = 0; i < sections_to_remove; i++) {
791 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
792 ret = __remove_section(zone, __pfn_to_section(pfn));
793 if (ret)
794 break;
795 }
796 return ret;
797 }
798 EXPORT_SYMBOL_GPL(__remove_pages);
799 #endif /* CONFIG_MEMORY_HOTREMOVE */
800
801 int set_online_page_callback(online_page_callback_t callback)
802 {
803 int rc = -EINVAL;
804
805 get_online_mems();
806 mutex_lock(&online_page_callback_lock);
807
808 if (online_page_callback == generic_online_page) {
809 online_page_callback = callback;
810 rc = 0;
811 }
812
813 mutex_unlock(&online_page_callback_lock);
814 put_online_mems();
815
816 return rc;
817 }
818 EXPORT_SYMBOL_GPL(set_online_page_callback);
819
820 int restore_online_page_callback(online_page_callback_t callback)
821 {
822 int rc = -EINVAL;
823
824 get_online_mems();
825 mutex_lock(&online_page_callback_lock);
826
827 if (online_page_callback == callback) {
828 online_page_callback = generic_online_page;
829 rc = 0;
830 }
831
832 mutex_unlock(&online_page_callback_lock);
833 put_online_mems();
834
835 return rc;
836 }
837 EXPORT_SYMBOL_GPL(restore_online_page_callback);
838
839 void __online_page_set_limits(struct page *page)
840 {
841 }
842 EXPORT_SYMBOL_GPL(__online_page_set_limits);
843
844 void __online_page_increment_counters(struct page *page)
845 {
846 adjust_managed_page_count(page, 1);
847 }
848 EXPORT_SYMBOL_GPL(__online_page_increment_counters);
849
850 void __online_page_free(struct page *page)
851 {
852 __free_reserved_page(page);
853 }
854 EXPORT_SYMBOL_GPL(__online_page_free);
855
856 static void generic_online_page(struct page *page)
857 {
858 __online_page_set_limits(page);
859 __online_page_increment_counters(page);
860 __online_page_free(page);
861 }
862
863 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
864 void *arg)
865 {
866 unsigned long i;
867 unsigned long onlined_pages = *(unsigned long *)arg;
868 struct page *page;
869 if (PageReserved(pfn_to_page(start_pfn)))
870 for (i = 0; i < nr_pages; i++) {
871 page = pfn_to_page(start_pfn + i);
872 (*online_page_callback)(page);
873 onlined_pages++;
874 }
875 *(unsigned long *)arg = onlined_pages;
876 return 0;
877 }
878
879 #ifdef CONFIG_MOVABLE_NODE
880 /*
881 * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
882 * normal memory.
883 */
884 static bool can_online_high_movable(struct zone *zone)
885 {
886 return true;
887 }
888 #else /* CONFIG_MOVABLE_NODE */
889 /* ensure every online node has NORMAL memory */
890 static bool can_online_high_movable(struct zone *zone)
891 {
892 return node_state(zone_to_nid(zone), N_NORMAL_MEMORY);
893 }
894 #endif /* CONFIG_MOVABLE_NODE */
895
896 /* check which state of node_states will be changed when online memory */
897 static void node_states_check_changes_online(unsigned long nr_pages,
898 struct zone *zone, struct memory_notify *arg)
899 {
900 int nid = zone_to_nid(zone);
901 enum zone_type zone_last = ZONE_NORMAL;
902
903 /*
904 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
905 * contains nodes which have zones of 0...ZONE_NORMAL,
906 * set zone_last to ZONE_NORMAL.
907 *
908 * If we don't have HIGHMEM nor movable node,
909 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
910 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
911 */
912 if (N_MEMORY == N_NORMAL_MEMORY)
913 zone_last = ZONE_MOVABLE;
914
915 /*
916 * if the memory to be online is in a zone of 0...zone_last, and
917 * the zones of 0...zone_last don't have memory before online, we will
918 * need to set the node to node_states[N_NORMAL_MEMORY] after
919 * the memory is online.
920 */
921 if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
922 arg->status_change_nid_normal = nid;
923 else
924 arg->status_change_nid_normal = -1;
925
926 #ifdef CONFIG_HIGHMEM
927 /*
928 * If we have movable node, node_states[N_HIGH_MEMORY]
929 * contains nodes which have zones of 0...ZONE_HIGHMEM,
930 * set zone_last to ZONE_HIGHMEM.
931 *
932 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
933 * contains nodes which have zones of 0...ZONE_MOVABLE,
934 * set zone_last to ZONE_MOVABLE.
935 */
936 zone_last = ZONE_HIGHMEM;
937 if (N_MEMORY == N_HIGH_MEMORY)
938 zone_last = ZONE_MOVABLE;
939
940 if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
941 arg->status_change_nid_high = nid;
942 else
943 arg->status_change_nid_high = -1;
944 #else
945 arg->status_change_nid_high = arg->status_change_nid_normal;
946 #endif
947
948 /*
949 * if the node don't have memory befor online, we will need to
950 * set the node to node_states[N_MEMORY] after the memory
951 * is online.
952 */
953 if (!node_state(nid, N_MEMORY))
954 arg->status_change_nid = nid;
955 else
956 arg->status_change_nid = -1;
957 }
958
959 static void node_states_set_node(int node, struct memory_notify *arg)
960 {
961 if (arg->status_change_nid_normal >= 0)
962 node_set_state(node, N_NORMAL_MEMORY);
963
964 if (arg->status_change_nid_high >= 0)
965 node_set_state(node, N_HIGH_MEMORY);
966
967 node_set_state(node, N_MEMORY);
968 }
969
970
971 /* Must be protected by mem_hotplug_begin() */
972 int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
973 {
974 unsigned long flags;
975 unsigned long onlined_pages = 0;
976 struct zone *zone;
977 int need_zonelists_rebuild = 0;
978 int nid;
979 int ret;
980 struct memory_notify arg;
981
982 /*
983 * This doesn't need a lock to do pfn_to_page().
984 * The section can't be removed here because of the
985 * memory_block->state_mutex.
986 */
987 zone = page_zone(pfn_to_page(pfn));
988
989 if ((zone_idx(zone) > ZONE_NORMAL ||
990 online_type == MMOP_ONLINE_MOVABLE) &&
991 !can_online_high_movable(zone))
992 return -EINVAL;
993
994 if (online_type == MMOP_ONLINE_KERNEL &&
995 zone_idx(zone) == ZONE_MOVABLE) {
996 if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages))
997 return -EINVAL;
998 }
999 if (online_type == MMOP_ONLINE_MOVABLE &&
1000 zone_idx(zone) == ZONE_MOVABLE - 1) {
1001 if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages))
1002 return -EINVAL;
1003 }
1004
1005 /* Previous code may changed the zone of the pfn range */
1006 zone = page_zone(pfn_to_page(pfn));
1007
1008 arg.start_pfn = pfn;
1009 arg.nr_pages = nr_pages;
1010 node_states_check_changes_online(nr_pages, zone, &arg);
1011
1012 nid = pfn_to_nid(pfn);
1013
1014 ret = memory_notify(MEM_GOING_ONLINE, &arg);
1015 ret = notifier_to_errno(ret);
1016 if (ret) {
1017 memory_notify(MEM_CANCEL_ONLINE, &arg);
1018 return ret;
1019 }
1020 /*
1021 * If this zone is not populated, then it is not in zonelist.
1022 * This means the page allocator ignores this zone.
1023 * So, zonelist must be updated after online.
1024 */
1025 mutex_lock(&zonelists_mutex);
1026 if (!populated_zone(zone)) {
1027 need_zonelists_rebuild = 1;
1028 build_all_zonelists(NULL, zone);
1029 }
1030
1031 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
1032 online_pages_range);
1033 if (ret) {
1034 if (need_zonelists_rebuild)
1035 zone_pcp_reset(zone);
1036 mutex_unlock(&zonelists_mutex);
1037 printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
1038 (unsigned long long) pfn << PAGE_SHIFT,
1039 (((unsigned long long) pfn + nr_pages)
1040 << PAGE_SHIFT) - 1);
1041 memory_notify(MEM_CANCEL_ONLINE, &arg);
1042 return ret;
1043 }
1044
1045 zone->present_pages += onlined_pages;
1046
1047 pgdat_resize_lock(zone->zone_pgdat, &flags);
1048 zone->zone_pgdat->node_present_pages += onlined_pages;
1049 pgdat_resize_unlock(zone->zone_pgdat, &flags);
1050
1051 if (onlined_pages) {
1052 node_states_set_node(zone_to_nid(zone), &arg);
1053 if (need_zonelists_rebuild)
1054 build_all_zonelists(NULL, NULL);
1055 else
1056 zone_pcp_update(zone);
1057 }
1058
1059 mutex_unlock(&zonelists_mutex);
1060
1061 init_per_zone_wmark_min();
1062
1063 if (onlined_pages)
1064 kswapd_run(zone_to_nid(zone));
1065
1066 vm_total_pages = nr_free_pagecache_pages();
1067
1068 writeback_set_ratelimit();
1069
1070 if (onlined_pages)
1071 memory_notify(MEM_ONLINE, &arg);
1072 return 0;
1073 }
1074 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
1075
1076 static void reset_node_present_pages(pg_data_t *pgdat)
1077 {
1078 struct zone *z;
1079
1080 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1081 z->present_pages = 0;
1082
1083 pgdat->node_present_pages = 0;
1084 }
1085
1086 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1087 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1088 {
1089 struct pglist_data *pgdat;
1090 unsigned long zones_size[MAX_NR_ZONES] = {0};
1091 unsigned long zholes_size[MAX_NR_ZONES] = {0};
1092 unsigned long start_pfn = PFN_DOWN(start);
1093
1094 pgdat = NODE_DATA(nid);
1095 if (!pgdat) {
1096 pgdat = arch_alloc_nodedata(nid);
1097 if (!pgdat)
1098 return NULL;
1099
1100 arch_refresh_nodedata(nid, pgdat);
1101 } else {
1102 /* Reset the nr_zones and classzone_idx to 0 before reuse */
1103 pgdat->nr_zones = 0;
1104 pgdat->classzone_idx = 0;
1105 }
1106
1107 /* we can use NODE_DATA(nid) from here */
1108
1109 /* init node's zones as empty zones, we don't have any present pages.*/
1110 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
1111
1112 /*
1113 * The node we allocated has no zone fallback lists. For avoiding
1114 * to access not-initialized zonelist, build here.
1115 */
1116 mutex_lock(&zonelists_mutex);
1117 build_all_zonelists(pgdat, NULL);
1118 mutex_unlock(&zonelists_mutex);
1119
1120 /*
1121 * zone->managed_pages is set to an approximate value in
1122 * free_area_init_core(), which will cause
1123 * /sys/device/system/node/nodeX/meminfo has wrong data.
1124 * So reset it to 0 before any memory is onlined.
1125 */
1126 reset_node_managed_pages(pgdat);
1127
1128 /*
1129 * When memory is hot-added, all the memory is in offline state. So
1130 * clear all zones' present_pages because they will be updated in
1131 * online_pages() and offline_pages().
1132 */
1133 reset_node_present_pages(pgdat);
1134
1135 return pgdat;
1136 }
1137
1138 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
1139 {
1140 arch_refresh_nodedata(nid, NULL);
1141 arch_free_nodedata(pgdat);
1142 return;
1143 }
1144
1145
1146 /**
1147 * try_online_node - online a node if offlined
1148 *
1149 * called by cpu_up() to online a node without onlined memory.
1150 */
1151 int try_online_node(int nid)
1152 {
1153 pg_data_t *pgdat;
1154 int ret;
1155
1156 if (node_online(nid))
1157 return 0;
1158
1159 mem_hotplug_begin();
1160 pgdat = hotadd_new_pgdat(nid, 0);
1161 if (!pgdat) {
1162 pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1163 ret = -ENOMEM;
1164 goto out;
1165 }
1166 node_set_online(nid);
1167 ret = register_one_node(nid);
1168 BUG_ON(ret);
1169
1170 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
1171 mutex_lock(&zonelists_mutex);
1172 build_all_zonelists(NULL, NULL);
1173 mutex_unlock(&zonelists_mutex);
1174 }
1175
1176 out:
1177 mem_hotplug_done();
1178 return ret;
1179 }
1180
1181 static int check_hotplug_memory_range(u64 start, u64 size)
1182 {
1183 u64 start_pfn = PFN_DOWN(start);
1184 u64 nr_pages = size >> PAGE_SHIFT;
1185
1186 /* Memory range must be aligned with section */
1187 if ((start_pfn & ~PAGE_SECTION_MASK) ||
1188 (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
1189 pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
1190 (unsigned long long)start,
1191 (unsigned long long)size);
1192 return -EINVAL;
1193 }
1194
1195 return 0;
1196 }
1197
1198 /*
1199 * If movable zone has already been setup, newly added memory should be check.
1200 * If its address is higher than movable zone, it should be added as movable.
1201 * Without this check, movable zone may overlap with other zone.
1202 */
1203 static int should_add_memory_movable(int nid, u64 start, u64 size)
1204 {
1205 unsigned long start_pfn = start >> PAGE_SHIFT;
1206 pg_data_t *pgdat = NODE_DATA(nid);
1207 struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE;
1208
1209 if (zone_is_empty(movable_zone))
1210 return 0;
1211
1212 if (movable_zone->zone_start_pfn <= start_pfn)
1213 return 1;
1214
1215 return 0;
1216 }
1217
1218 int zone_for_memory(int nid, u64 start, u64 size, int zone_default)
1219 {
1220 if (should_add_memory_movable(nid, start, size))
1221 return ZONE_MOVABLE;
1222
1223 return zone_default;
1224 }
1225
1226 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1227 int __ref add_memory(int nid, u64 start, u64 size)
1228 {
1229 pg_data_t *pgdat = NULL;
1230 bool new_pgdat;
1231 bool new_node;
1232 struct resource *res;
1233 int ret;
1234
1235 ret = check_hotplug_memory_range(start, size);
1236 if (ret)
1237 return ret;
1238
1239 res = register_memory_resource(start, size);
1240 ret = -EEXIST;
1241 if (!res)
1242 return ret;
1243
1244 { /* Stupid hack to suppress address-never-null warning */
1245 void *p = NODE_DATA(nid);
1246 new_pgdat = !p;
1247 }
1248
1249 mem_hotplug_begin();
1250
1251 /*
1252 * Add new range to memblock so that when hotadd_new_pgdat() is called
1253 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
1254 * this new range and calculate total pages correctly. The range will
1255 * be removed at hot-remove time.
1256 */
1257 memblock_add_node(start, size, nid);
1258
1259 new_node = !node_online(nid);
1260 if (new_node) {
1261 pgdat = hotadd_new_pgdat(nid, start);
1262 ret = -ENOMEM;
1263 if (!pgdat)
1264 goto error;
1265 }
1266
1267 /* call arch's memory hotadd */
1268 ret = arch_add_memory(nid, start, size);
1269
1270 if (ret < 0)
1271 goto error;
1272
1273 /* we online node here. we can't roll back from here. */
1274 node_set_online(nid);
1275
1276 if (new_node) {
1277 ret = register_one_node(nid);
1278 /*
1279 * If sysfs file of new node can't create, cpu on the node
1280 * can't be hot-added. There is no rollback way now.
1281 * So, check by BUG_ON() to catch it reluctantly..
1282 */
1283 BUG_ON(ret);
1284 }
1285
1286 /* create new memmap entry */
1287 firmware_map_add_hotplug(start, start + size, "System RAM");
1288
1289 goto out;
1290
1291 error:
1292 /* rollback pgdat allocation and others */
1293 if (new_pgdat)
1294 rollback_node_hotadd(nid, pgdat);
1295 release_memory_resource(res);
1296 memblock_remove(start, size);
1297
1298 out:
1299 mem_hotplug_done();
1300 return ret;
1301 }
1302 EXPORT_SYMBOL_GPL(add_memory);
1303
1304 #ifdef CONFIG_MEMORY_HOTREMOVE
1305 /*
1306 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
1307 * set and the size of the free page is given by page_order(). Using this,
1308 * the function determines if the pageblock contains only free pages.
1309 * Due to buddy contraints, a free page at least the size of a pageblock will
1310 * be located at the start of the pageblock
1311 */
1312 static inline int pageblock_free(struct page *page)
1313 {
1314 return PageBuddy(page) && page_order(page) >= pageblock_order;
1315 }
1316
1317 /* Return the start of the next active pageblock after a given page */
1318 static struct page *next_active_pageblock(struct page *page)
1319 {
1320 /* Ensure the starting page is pageblock-aligned */
1321 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
1322
1323 /* If the entire pageblock is free, move to the end of free page */
1324 if (pageblock_free(page)) {
1325 int order;
1326 /* be careful. we don't have locks, page_order can be changed.*/
1327 order = page_order(page);
1328 if ((order < MAX_ORDER) && (order >= pageblock_order))
1329 return page + (1 << order);
1330 }
1331
1332 return page + pageblock_nr_pages;
1333 }
1334
1335 /* Checks if this range of memory is likely to be hot-removable. */
1336 int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1337 {
1338 struct page *page = pfn_to_page(start_pfn);
1339 struct page *end_page = page + nr_pages;
1340
1341 /* Check the starting page of each pageblock within the range */
1342 for (; page < end_page; page = next_active_pageblock(page)) {
1343 if (!is_pageblock_removable_nolock(page))
1344 return 0;
1345 cond_resched();
1346 }
1347
1348 /* All pageblocks in the memory block are likely to be hot-removable */
1349 return 1;
1350 }
1351
1352 /*
1353 * Confirm all pages in a range [start, end) is belongs to the same zone.
1354 */
1355 int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
1356 {
1357 unsigned long pfn;
1358 struct zone *zone = NULL;
1359 struct page *page;
1360 int i;
1361 for (pfn = start_pfn;
1362 pfn < end_pfn;
1363 pfn += MAX_ORDER_NR_PAGES) {
1364 i = 0;
1365 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
1366 while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
1367 i++;
1368 if (i == MAX_ORDER_NR_PAGES)
1369 continue;
1370 page = pfn_to_page(pfn + i);
1371 if (zone && page_zone(page) != zone)
1372 return 0;
1373 zone = page_zone(page);
1374 }
1375 return 1;
1376 }
1377
1378 /*
1379 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages
1380 * and hugepages). We scan pfn because it's much easier than scanning over
1381 * linked list. This function returns the pfn of the first found movable
1382 * page if it's found, otherwise 0.
1383 */
1384 static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
1385 {
1386 unsigned long pfn;
1387 struct page *page;
1388 for (pfn = start; pfn < end; pfn++) {
1389 if (pfn_valid(pfn)) {
1390 page = pfn_to_page(pfn);
1391 if (PageLRU(page))
1392 return pfn;
1393 if (PageHuge(page)) {
1394 if (page_huge_active(page))
1395 return pfn;
1396 else
1397 pfn = round_up(pfn + 1,
1398 1 << compound_order(page)) - 1;
1399 }
1400 }
1401 }
1402 return 0;
1403 }
1404
1405 #define NR_OFFLINE_AT_ONCE_PAGES (256)
1406 static int
1407 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1408 {
1409 unsigned long pfn;
1410 struct page *page;
1411 int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
1412 int not_managed = 0;
1413 int ret = 0;
1414 LIST_HEAD(source);
1415
1416 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
1417 if (!pfn_valid(pfn))
1418 continue;
1419 page = pfn_to_page(pfn);
1420
1421 if (PageHuge(page)) {
1422 struct page *head = compound_head(page);
1423 pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1424 if (compound_order(head) > PFN_SECTION_SHIFT) {
1425 ret = -EBUSY;
1426 break;
1427 }
1428 if (isolate_huge_page(page, &source))
1429 move_pages -= 1 << compound_order(head);
1430 continue;
1431 }
1432
1433 if (!get_page_unless_zero(page))
1434 continue;
1435 /*
1436 * We can skip free pages. And we can only deal with pages on
1437 * LRU.
1438 */
1439 ret = isolate_lru_page(page);
1440 if (!ret) { /* Success */
1441 put_page(page);
1442 list_add_tail(&page->lru, &source);
1443 move_pages--;
1444 inc_zone_page_state(page, NR_ISOLATED_ANON +
1445 page_is_file_cache(page));
1446
1447 } else {
1448 #ifdef CONFIG_DEBUG_VM
1449 printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
1450 pfn);
1451 dump_page(page, "failed to remove from LRU");
1452 #endif
1453 put_page(page);
1454 /* Because we don't have big zone->lock. we should
1455 check this again here. */
1456 if (page_count(page)) {
1457 not_managed++;
1458 ret = -EBUSY;
1459 break;
1460 }
1461 }
1462 }
1463 if (!list_empty(&source)) {
1464 if (not_managed) {
1465 putback_movable_pages(&source);
1466 goto out;
1467 }
1468
1469 /*
1470 * alloc_migrate_target should be improooooved!!
1471 * migrate_pages returns # of failed pages.
1472 */
1473 ret = migrate_pages(&source, alloc_migrate_target, NULL, 0,
1474 MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
1475 if (ret)
1476 putback_movable_pages(&source);
1477 }
1478 out:
1479 return ret;
1480 }
1481
1482 /*
1483 * remove from free_area[] and mark all as Reserved.
1484 */
1485 static int
1486 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
1487 void *data)
1488 {
1489 __offline_isolated_pages(start, start + nr_pages);
1490 return 0;
1491 }
1492
1493 static void
1494 offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
1495 {
1496 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
1497 offline_isolated_pages_cb);
1498 }
1499
1500 /*
1501 * Check all pages in range, recoreded as memory resource, are isolated.
1502 */
1503 static int
1504 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
1505 void *data)
1506 {
1507 int ret;
1508 long offlined = *(long *)data;
1509 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
1510 offlined = nr_pages;
1511 if (!ret)
1512 *(long *)data += offlined;
1513 return ret;
1514 }
1515
1516 static long
1517 check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
1518 {
1519 long offlined = 0;
1520 int ret;
1521
1522 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
1523 check_pages_isolated_cb);
1524 if (ret < 0)
1525 offlined = (long)ret;
1526 return offlined;
1527 }
1528
1529 #ifdef CONFIG_MOVABLE_NODE
1530 /*
1531 * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
1532 * normal memory.
1533 */
1534 static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1535 {
1536 return true;
1537 }
1538 #else /* CONFIG_MOVABLE_NODE */
1539 /* ensure the node has NORMAL memory if it is still online */
1540 static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1541 {
1542 struct pglist_data *pgdat = zone->zone_pgdat;
1543 unsigned long present_pages = 0;
1544 enum zone_type zt;
1545
1546 for (zt = 0; zt <= ZONE_NORMAL; zt++)
1547 present_pages += pgdat->node_zones[zt].present_pages;
1548
1549 if (present_pages > nr_pages)
1550 return true;
1551
1552 present_pages = 0;
1553 for (; zt <= ZONE_MOVABLE; zt++)
1554 present_pages += pgdat->node_zones[zt].present_pages;
1555
1556 /*
1557 * we can't offline the last normal memory until all
1558 * higher memory is offlined.
1559 */
1560 return present_pages == 0;
1561 }
1562 #endif /* CONFIG_MOVABLE_NODE */
1563
1564 static int __init cmdline_parse_movable_node(char *p)
1565 {
1566 #ifdef CONFIG_MOVABLE_NODE
1567 /*
1568 * Memory used by the kernel cannot be hot-removed because Linux
1569 * cannot migrate the kernel pages. When memory hotplug is
1570 * enabled, we should prevent memblock from allocating memory
1571 * for the kernel.
1572 *
1573 * ACPI SRAT records all hotpluggable memory ranges. But before
1574 * SRAT is parsed, we don't know about it.
1575 *
1576 * The kernel image is loaded into memory at very early time. We
1577 * cannot prevent this anyway. So on NUMA system, we set any
1578 * node the kernel resides in as un-hotpluggable.
1579 *
1580 * Since on modern servers, one node could have double-digit
1581 * gigabytes memory, we can assume the memory around the kernel
1582 * image is also un-hotpluggable. So before SRAT is parsed, just
1583 * allocate memory near the kernel image to try the best to keep
1584 * the kernel away from hotpluggable memory.
1585 */
1586 memblock_set_bottom_up(true);
1587 movable_node_enabled = true;
1588 #else
1589 pr_warn("movable_node option not supported\n");
1590 #endif
1591 return 0;
1592 }
1593 early_param("movable_node", cmdline_parse_movable_node);
1594
1595 /* check which state of node_states will be changed when offline memory */
1596 static void node_states_check_changes_offline(unsigned long nr_pages,
1597 struct zone *zone, struct memory_notify *arg)
1598 {
1599 struct pglist_data *pgdat = zone->zone_pgdat;
1600 unsigned long present_pages = 0;
1601 enum zone_type zt, zone_last = ZONE_NORMAL;
1602
1603 /*
1604 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
1605 * contains nodes which have zones of 0...ZONE_NORMAL,
1606 * set zone_last to ZONE_NORMAL.
1607 *
1608 * If we don't have HIGHMEM nor movable node,
1609 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
1610 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
1611 */
1612 if (N_MEMORY == N_NORMAL_MEMORY)
1613 zone_last = ZONE_MOVABLE;
1614
1615 /*
1616 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1617 * If the memory to be offline is in a zone of 0...zone_last,
1618 * and it is the last present memory, 0...zone_last will
1619 * become empty after offline , thus we can determind we will
1620 * need to clear the node from node_states[N_NORMAL_MEMORY].
1621 */
1622 for (zt = 0; zt <= zone_last; zt++)
1623 present_pages += pgdat->node_zones[zt].present_pages;
1624 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1625 arg->status_change_nid_normal = zone_to_nid(zone);
1626 else
1627 arg->status_change_nid_normal = -1;
1628
1629 #ifdef CONFIG_HIGHMEM
1630 /*
1631 * If we have movable node, node_states[N_HIGH_MEMORY]
1632 * contains nodes which have zones of 0...ZONE_HIGHMEM,
1633 * set zone_last to ZONE_HIGHMEM.
1634 *
1635 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1636 * contains nodes which have zones of 0...ZONE_MOVABLE,
1637 * set zone_last to ZONE_MOVABLE.
1638 */
1639 zone_last = ZONE_HIGHMEM;
1640 if (N_MEMORY == N_HIGH_MEMORY)
1641 zone_last = ZONE_MOVABLE;
1642
1643 for (; zt <= zone_last; zt++)
1644 present_pages += pgdat->node_zones[zt].present_pages;
1645 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1646 arg->status_change_nid_high = zone_to_nid(zone);
1647 else
1648 arg->status_change_nid_high = -1;
1649 #else
1650 arg->status_change_nid_high = arg->status_change_nid_normal;
1651 #endif
1652
1653 /*
1654 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1655 */
1656 zone_last = ZONE_MOVABLE;
1657
1658 /*
1659 * check whether node_states[N_HIGH_MEMORY] will be changed
1660 * If we try to offline the last present @nr_pages from the node,
1661 * we can determind we will need to clear the node from
1662 * node_states[N_HIGH_MEMORY].
1663 */
1664 for (; zt <= zone_last; zt++)
1665 present_pages += pgdat->node_zones[zt].present_pages;
1666 if (nr_pages >= present_pages)
1667 arg->status_change_nid = zone_to_nid(zone);
1668 else
1669 arg->status_change_nid = -1;
1670 }
1671
1672 static void node_states_clear_node(int node, struct memory_notify *arg)
1673 {
1674 if (arg->status_change_nid_normal >= 0)
1675 node_clear_state(node, N_NORMAL_MEMORY);
1676
1677 if ((N_MEMORY != N_NORMAL_MEMORY) &&
1678 (arg->status_change_nid_high >= 0))
1679 node_clear_state(node, N_HIGH_MEMORY);
1680
1681 if ((N_MEMORY != N_HIGH_MEMORY) &&
1682 (arg->status_change_nid >= 0))
1683 node_clear_state(node, N_MEMORY);
1684 }
1685
1686 static int __ref __offline_pages(unsigned long start_pfn,
1687 unsigned long end_pfn, unsigned long timeout)
1688 {
1689 unsigned long pfn, nr_pages, expire;
1690 long offlined_pages;
1691 int ret, drain, retry_max, node;
1692 unsigned long flags;
1693 struct zone *zone;
1694 struct memory_notify arg;
1695
1696 /* at least, alignment against pageblock is necessary */
1697 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
1698 return -EINVAL;
1699 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
1700 return -EINVAL;
1701 /* This makes hotplug much easier...and readable.
1702 we assume this for now. .*/
1703 if (!test_pages_in_a_zone(start_pfn, end_pfn))
1704 return -EINVAL;
1705
1706 zone = page_zone(pfn_to_page(start_pfn));
1707 node = zone_to_nid(zone);
1708 nr_pages = end_pfn - start_pfn;
1709
1710 if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages))
1711 return -EINVAL;
1712
1713 /* set above range as isolated */
1714 ret = start_isolate_page_range(start_pfn, end_pfn,
1715 MIGRATE_MOVABLE, true);
1716 if (ret)
1717 return ret;
1718
1719 arg.start_pfn = start_pfn;
1720 arg.nr_pages = nr_pages;
1721 node_states_check_changes_offline(nr_pages, zone, &arg);
1722
1723 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1724 ret = notifier_to_errno(ret);
1725 if (ret)
1726 goto failed_removal;
1727
1728 pfn = start_pfn;
1729 expire = jiffies + timeout;
1730 drain = 0;
1731 retry_max = 5;
1732 repeat:
1733 /* start memory hot removal */
1734 ret = -EAGAIN;
1735 if (time_after(jiffies, expire))
1736 goto failed_removal;
1737 ret = -EINTR;
1738 if (signal_pending(current))
1739 goto failed_removal;
1740 ret = 0;
1741 if (drain) {
1742 lru_add_drain_all();
1743 cond_resched();
1744 drain_all_pages(zone);
1745 }
1746
1747 pfn = scan_movable_pages(start_pfn, end_pfn);
1748 if (pfn) { /* We have movable pages */
1749 ret = do_migrate_range(pfn, end_pfn);
1750 if (!ret) {
1751 drain = 1;
1752 goto repeat;
1753 } else {
1754 if (ret < 0)
1755 if (--retry_max == 0)
1756 goto failed_removal;
1757 yield();
1758 drain = 1;
1759 goto repeat;
1760 }
1761 }
1762 /* drain all zone's lru pagevec, this is asynchronous... */
1763 lru_add_drain_all();
1764 yield();
1765 /* drain pcp pages, this is synchronous. */
1766 drain_all_pages(zone);
1767 /*
1768 * dissolve free hugepages in the memory block before doing offlining
1769 * actually in order to make hugetlbfs's object counting consistent.
1770 */
1771 dissolve_free_huge_pages(start_pfn, end_pfn);
1772 /* check again */
1773 offlined_pages = check_pages_isolated(start_pfn, end_pfn);
1774 if (offlined_pages < 0) {
1775 ret = -EBUSY;
1776 goto failed_removal;
1777 }
1778 printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
1779 /* Ok, all of our target is isolated.
1780 We cannot do rollback at this point. */
1781 offline_isolated_pages(start_pfn, end_pfn);
1782 /* reset pagetype flags and makes migrate type to be MOVABLE */
1783 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1784 /* removal success */
1785 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
1786 zone->present_pages -= offlined_pages;
1787
1788 pgdat_resize_lock(zone->zone_pgdat, &flags);
1789 zone->zone_pgdat->node_present_pages -= offlined_pages;
1790 pgdat_resize_unlock(zone->zone_pgdat, &flags);
1791
1792 init_per_zone_wmark_min();
1793
1794 if (!populated_zone(zone)) {
1795 zone_pcp_reset(zone);
1796 mutex_lock(&zonelists_mutex);
1797 build_all_zonelists(NULL, NULL);
1798 mutex_unlock(&zonelists_mutex);
1799 } else
1800 zone_pcp_update(zone);
1801
1802 node_states_clear_node(node, &arg);
1803 if (arg.status_change_nid >= 0)
1804 kswapd_stop(node);
1805
1806 vm_total_pages = nr_free_pagecache_pages();
1807 writeback_set_ratelimit();
1808
1809 memory_notify(MEM_OFFLINE, &arg);
1810 return 0;
1811
1812 failed_removal:
1813 printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
1814 (unsigned long long) start_pfn << PAGE_SHIFT,
1815 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1816 memory_notify(MEM_CANCEL_OFFLINE, &arg);
1817 /* pushback to free area */
1818 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1819 return ret;
1820 }
1821
1822 /* Must be protected by mem_hotplug_begin() */
1823 int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1824 {
1825 return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
1826 }
1827 #endif /* CONFIG_MEMORY_HOTREMOVE */
1828
1829 /**
1830 * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1831 * @start_pfn: start pfn of the memory range
1832 * @end_pfn: end pfn of the memory range
1833 * @arg: argument passed to func
1834 * @func: callback for each memory section walked
1835 *
1836 * This function walks through all present mem sections in range
1837 * [start_pfn, end_pfn) and call func on each mem section.
1838 *
1839 * Returns the return value of func.
1840 */
1841 int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
1842 void *arg, int (*func)(struct memory_block *, void *))
1843 {
1844 struct memory_block *mem = NULL;
1845 struct mem_section *section;
1846 unsigned long pfn, section_nr;
1847 int ret;
1848
1849 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1850 section_nr = pfn_to_section_nr(pfn);
1851 if (!present_section_nr(section_nr))
1852 continue;
1853
1854 section = __nr_to_section(section_nr);
1855 /* same memblock? */
1856 if (mem)
1857 if ((section_nr >= mem->start_section_nr) &&
1858 (section_nr <= mem->end_section_nr))
1859 continue;
1860
1861 mem = find_memory_block_hinted(section, mem);
1862 if (!mem)
1863 continue;
1864
1865 ret = func(mem, arg);
1866 if (ret) {
1867 kobject_put(&mem->dev.kobj);
1868 return ret;
1869 }
1870 }
1871
1872 if (mem)
1873 kobject_put(&mem->dev.kobj);
1874
1875 return 0;
1876 }
1877
1878 #ifdef CONFIG_MEMORY_HOTREMOVE
1879 static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
1880 {
1881 int ret = !is_memblock_offlined(mem);
1882
1883 if (unlikely(ret)) {
1884 phys_addr_t beginpa, endpa;
1885
1886 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1887 endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
1888 pr_warn("removing memory fails, because memory "
1889 "[%pa-%pa] is onlined\n",
1890 &beginpa, &endpa);
1891 }
1892
1893 return ret;
1894 }
1895
1896 static int check_cpu_on_node(pg_data_t *pgdat)
1897 {
1898 int cpu;
1899
1900 for_each_present_cpu(cpu) {
1901 if (cpu_to_node(cpu) == pgdat->node_id)
1902 /*
1903 * the cpu on this node isn't removed, and we can't
1904 * offline this node.
1905 */
1906 return -EBUSY;
1907 }
1908
1909 return 0;
1910 }
1911
1912 static void unmap_cpu_on_node(pg_data_t *pgdat)
1913 {
1914 #ifdef CONFIG_ACPI_NUMA
1915 int cpu;
1916
1917 for_each_possible_cpu(cpu)
1918 if (cpu_to_node(cpu) == pgdat->node_id)
1919 numa_clear_node(cpu);
1920 #endif
1921 }
1922
1923 static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
1924 {
1925 int ret;
1926
1927 ret = check_cpu_on_node(pgdat);
1928 if (ret)
1929 return ret;
1930
1931 /*
1932 * the node will be offlined when we come here, so we can clear
1933 * the cpu_to_node() now.
1934 */
1935
1936 unmap_cpu_on_node(pgdat);
1937 return 0;
1938 }
1939
1940 /**
1941 * try_offline_node
1942 *
1943 * Offline a node if all memory sections and cpus of the node are removed.
1944 *
1945 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1946 * and online/offline operations before this call.
1947 */
1948 void try_offline_node(int nid)
1949 {
1950 pg_data_t *pgdat = NODE_DATA(nid);
1951 unsigned long start_pfn = pgdat->node_start_pfn;
1952 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1953 unsigned long pfn;
1954 int i;
1955
1956 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1957 unsigned long section_nr = pfn_to_section_nr(pfn);
1958
1959 if (!present_section_nr(section_nr))
1960 continue;
1961
1962 if (pfn_to_nid(pfn) != nid)
1963 continue;
1964
1965 /*
1966 * some memory sections of this node are not removed, and we
1967 * can't offline node now.
1968 */
1969 return;
1970 }
1971
1972 if (check_and_unmap_cpu_on_node(pgdat))
1973 return;
1974
1975 /*
1976 * all memory/cpu of this node are removed, we can offline this
1977 * node now.
1978 */
1979 node_set_offline(nid);
1980 unregister_one_node(nid);
1981
1982 /* free waittable in each zone */
1983 for (i = 0; i < MAX_NR_ZONES; i++) {
1984 struct zone *zone = pgdat->node_zones + i;
1985
1986 /*
1987 * wait_table may be allocated from boot memory,
1988 * here only free if it's allocated by vmalloc.
1989 */
1990 if (is_vmalloc_addr(zone->wait_table)) {
1991 vfree(zone->wait_table);
1992 zone->wait_table = NULL;
1993 }
1994 }
1995 }
1996 EXPORT_SYMBOL(try_offline_node);
1997
1998 /**
1999 * remove_memory
2000 *
2001 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
2002 * and online/offline operations before this call, as required by
2003 * try_offline_node().
2004 */
2005 void __ref remove_memory(int nid, u64 start, u64 size)
2006 {
2007 int ret;
2008
2009 BUG_ON(check_hotplug_memory_range(start, size));
2010
2011 mem_hotplug_begin();
2012
2013 /*
2014 * All memory blocks must be offlined before removing memory. Check
2015 * whether all memory blocks in question are offline and trigger a BUG()
2016 * if this is not the case.
2017 */
2018 ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
2019 check_memblock_offlined_cb);
2020 if (ret)
2021 BUG();
2022
2023 /* remove memmap entry */
2024 firmware_map_remove(start, start + size, "System RAM");
2025 memblock_free(start, size);
2026 memblock_remove(start, size);
2027
2028 arch_remove_memory(start, size);
2029
2030 try_offline_node(nid);
2031
2032 mem_hotplug_done();
2033 }
2034 EXPORT_SYMBOL_GPL(remove_memory);
2035 #endif /* CONFIG_MEMORY_HOTREMOVE */
This page took 0.122793 seconds and 5 git commands to generate.