2 * linux/mm/memory_hotplug.c
7 #include <linux/config.h>
8 #include <linux/stddef.h>
10 #include <linux/swap.h>
11 #include <linux/interrupt.h>
12 #include <linux/pagemap.h>
13 #include <linux/bootmem.h>
14 #include <linux/compiler.h>
15 #include <linux/module.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
18 #include <linux/sysctl.h>
19 #include <linux/cpu.h>
20 #include <linux/memory.h>
21 #include <linux/memory_hotplug.h>
22 #include <linux/highmem.h>
23 #include <linux/vmalloc.h>
25 #include <asm/tlbflush.h>
27 extern void zonetable_add(struct zone
*zone
, int nid
, int zid
, unsigned long pfn
,
29 static void __add_zone(struct zone
*zone
, unsigned long phys_start_pfn
)
31 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
32 int nr_pages
= PAGES_PER_SECTION
;
33 int nid
= pgdat
->node_id
;
36 zone_type
= zone
- pgdat
->node_zones
;
37 memmap_init_zone(nr_pages
, nid
, zone_type
, phys_start_pfn
);
38 zonetable_add(zone
, nid
, zone_type
, phys_start_pfn
, nr_pages
);
41 extern int sparse_add_one_section(struct zone
*zone
, unsigned long start_pfn
,
43 static int __add_section(struct zone
*zone
, unsigned long phys_start_pfn
)
45 int nr_pages
= PAGES_PER_SECTION
;
48 ret
= sparse_add_one_section(zone
, phys_start_pfn
, nr_pages
);
53 __add_zone(zone
, phys_start_pfn
);
54 return register_new_memory(__pfn_to_section(phys_start_pfn
));
58 * Reasonably generic function for adding memory. It is
59 * expected that archs that support memory hotplug will
60 * call this function after deciding the zone to which to
63 int __add_pages(struct zone
*zone
, unsigned long phys_start_pfn
,
64 unsigned long nr_pages
)
69 for (i
= 0; i
< nr_pages
; i
+= PAGES_PER_SECTION
) {
70 err
= __add_section(zone
, phys_start_pfn
+ i
);
72 /* We want to keep adding the rest of the
73 * sections if the first ones already exist
75 if (err
&& (err
!= -EEXIST
))
81 EXPORT_SYMBOL_GPL(__add_pages
);
83 static void grow_zone_span(struct zone
*zone
,
84 unsigned long start_pfn
, unsigned long end_pfn
)
86 unsigned long old_zone_end_pfn
;
88 zone_span_writelock(zone
);
90 old_zone_end_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
91 if (start_pfn
< zone
->zone_start_pfn
)
92 zone
->zone_start_pfn
= start_pfn
;
94 zone
->spanned_pages
= max(old_zone_end_pfn
, end_pfn
) -
97 zone_span_writeunlock(zone
);
100 static void grow_pgdat_span(struct pglist_data
*pgdat
,
101 unsigned long start_pfn
, unsigned long end_pfn
)
103 unsigned long old_pgdat_end_pfn
=
104 pgdat
->node_start_pfn
+ pgdat
->node_spanned_pages
;
106 if (start_pfn
< pgdat
->node_start_pfn
)
107 pgdat
->node_start_pfn
= start_pfn
;
109 pgdat
->node_spanned_pages
= max(old_pgdat_end_pfn
, end_pfn
) -
110 pgdat
->node_start_pfn
;
113 int online_pages(unsigned long pfn
, unsigned long nr_pages
)
117 unsigned long onlined_pages
= 0;
121 * This doesn't need a lock to do pfn_to_page().
122 * The section can't be removed here because of the
123 * memory_block->state_sem.
125 zone
= page_zone(pfn_to_page(pfn
));
126 pgdat_resize_lock(zone
->zone_pgdat
, &flags
);
127 grow_zone_span(zone
, pfn
, pfn
+ nr_pages
);
128 grow_pgdat_span(zone
->zone_pgdat
, pfn
, pfn
+ nr_pages
);
129 pgdat_resize_unlock(zone
->zone_pgdat
, &flags
);
131 for (i
= 0; i
< nr_pages
; i
++) {
132 struct page
*page
= pfn_to_page(pfn
+ i
);
136 zone
->present_pages
+= onlined_pages
;
137 zone
->zone_pgdat
->node_present_pages
+= onlined_pages
;
139 setup_per_zone_pages_min();
This page took 0.045913 seconds and 6 git commands to generate.