2 * linux/mm/page_isolation.c
6 #include <linux/page-isolation.h>
7 #include <linux/pageblock-flags.h>
8 #include <linux/memory.h>
9 #include <linux/hugetlb.h>
12 int set_migratetype_isolate(struct page
*page
, bool skip_hwpoisoned_pages
)
15 unsigned long flags
, pfn
;
16 struct memory_isolate_notify arg
;
20 zone
= page_zone(page
);
22 spin_lock_irqsave(&zone
->lock
, flags
);
24 pfn
= page_to_pfn(page
);
26 arg
.nr_pages
= pageblock_nr_pages
;
30 * It may be possible to isolate a pageblock even if the
31 * migratetype is not MIGRATE_MOVABLE. The memory isolation
32 * notifier chain is used by balloon drivers to return the
33 * number of pages in a range that are held by the balloon
34 * driver to shrink memory. If all the pages are accounted for
35 * by balloons, are free, or on the LRU, isolation can continue.
36 * Later, for example, when memory hotplug notifier runs, these
37 * pages reported as "can be isolated" should be isolated(freed)
38 * by the balloon driver through the memory notifier chain.
40 notifier_ret
= memory_isolate_notify(MEM_ISOLATE_COUNT
, &arg
);
41 notifier_ret
= notifier_to_errno(notifier_ret
);
45 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
46 * We just check MOVABLE pages.
48 if (!has_unmovable_pages(zone
, page
, arg
.pages_found
,
49 skip_hwpoisoned_pages
))
53 * immobile means "not-on-lru" paes. If immobile is larger than
54 * removable-by-driver pages reported by notifier, we'll fail.
59 unsigned long nr_pages
;
60 int migratetype
= get_pageblock_migratetype(page
);
62 set_pageblock_migratetype(page
, MIGRATE_ISOLATE
);
63 zone
->nr_isolate_pageblock
++;
64 nr_pages
= move_freepages_block(zone
, page
, MIGRATE_ISOLATE
);
66 __mod_zone_freepage_state(zone
, -nr_pages
, migratetype
);
69 spin_unlock_irqrestore(&zone
->lock
, flags
);
71 drain_all_pages(zone
);
75 void unset_migratetype_isolate(struct page
*page
, unsigned migratetype
)
78 unsigned long flags
, nr_pages
;
79 struct page
*isolated_page
= NULL
;
81 unsigned long page_idx
, buddy_idx
;
84 zone
= page_zone(page
);
85 spin_lock_irqsave(&zone
->lock
, flags
);
86 if (get_pageblock_migratetype(page
) != MIGRATE_ISOLATE
)
90 * Because freepage with more than pageblock_order on isolated
91 * pageblock is restricted to merge due to freepage counting problem,
92 * it is possible that there is free buddy page.
93 * move_freepages_block() doesn't care of merge so we need other
94 * approach in order to merge them. Isolation and free will make
95 * these pages to be merged.
97 if (PageBuddy(page
)) {
98 order
= page_order(page
);
99 if (order
>= pageblock_order
) {
100 page_idx
= page_to_pfn(page
) & ((1 << MAX_ORDER
) - 1);
101 buddy_idx
= __find_buddy_index(page_idx
, order
);
102 buddy
= page
+ (buddy_idx
- page_idx
);
104 if (pfn_valid_within(page_to_pfn(buddy
)) &&
105 !is_migrate_isolate_page(buddy
)) {
106 __isolate_free_page(page
, order
);
107 kernel_map_pages(page
, (1 << order
), 1);
108 set_page_refcounted(page
);
109 isolated_page
= page
;
115 * If we isolate freepage with more than pageblock_order, there
116 * should be no freepage in the range, so we could avoid costly
117 * pageblock scanning for freepage moving.
119 if (!isolated_page
) {
120 nr_pages
= move_freepages_block(zone
, page
, migratetype
);
121 __mod_zone_freepage_state(zone
, nr_pages
, migratetype
);
123 set_pageblock_migratetype(page
, migratetype
);
124 zone
->nr_isolate_pageblock
--;
126 spin_unlock_irqrestore(&zone
->lock
, flags
);
128 __free_pages(isolated_page
, order
);
131 static inline struct page
*
132 __first_valid_page(unsigned long pfn
, unsigned long nr_pages
)
135 for (i
= 0; i
< nr_pages
; i
++)
136 if (pfn_valid_within(pfn
+ i
))
138 if (unlikely(i
== nr_pages
))
140 return pfn_to_page(pfn
+ i
);
144 * start_isolate_page_range() -- make page-allocation-type of range of pages
145 * to be MIGRATE_ISOLATE.
146 * @start_pfn: The lower PFN of the range to be isolated.
147 * @end_pfn: The upper PFN of the range to be isolated.
148 * @migratetype: migrate type to set in error recovery.
150 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
151 * the range will never be allocated. Any free pages and pages freed in the
152 * future will not be allocated again.
154 * start_pfn/end_pfn must be aligned to pageblock_order.
155 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
157 int start_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
158 unsigned migratetype
, bool skip_hwpoisoned_pages
)
161 unsigned long undo_pfn
;
164 BUG_ON((start_pfn
) & (pageblock_nr_pages
- 1));
165 BUG_ON((end_pfn
) & (pageblock_nr_pages
- 1));
167 for (pfn
= start_pfn
;
169 pfn
+= pageblock_nr_pages
) {
170 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
172 set_migratetype_isolate(page
, skip_hwpoisoned_pages
)) {
179 for (pfn
= start_pfn
;
181 pfn
+= pageblock_nr_pages
)
182 unset_migratetype_isolate(pfn_to_page(pfn
), migratetype
);
188 * Make isolated pages available again.
190 int undo_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
191 unsigned migratetype
)
195 BUG_ON((start_pfn
) & (pageblock_nr_pages
- 1));
196 BUG_ON((end_pfn
) & (pageblock_nr_pages
- 1));
197 for (pfn
= start_pfn
;
199 pfn
+= pageblock_nr_pages
) {
200 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
201 if (!page
|| get_pageblock_migratetype(page
) != MIGRATE_ISOLATE
)
203 unset_migratetype_isolate(page
, migratetype
);
208 * Test all pages in the range is free(means isolated) or not.
209 * all pages in [start_pfn...end_pfn) must be in the same zone.
210 * zone->lock must be held before call this.
212 * Returns 1 if all pages in the range are isolated.
215 __test_page_isolated_in_pageblock(unsigned long pfn
, unsigned long end_pfn
,
216 bool skip_hwpoisoned_pages
)
220 while (pfn
< end_pfn
) {
221 if (!pfn_valid_within(pfn
)) {
225 page
= pfn_to_page(pfn
);
228 * If the page is on a free list, it has to be on
229 * the correct MIGRATE_ISOLATE freelist. There is no
230 * simple way to verify that as VM_BUG_ON(), though.
232 pfn
+= 1 << page_order(page
);
233 else if (skip_hwpoisoned_pages
&& PageHWPoison(page
))
234 /* A HWPoisoned page cannot be also PageBuddy */
244 int test_pages_isolated(unsigned long start_pfn
, unsigned long end_pfn
,
245 bool skip_hwpoisoned_pages
)
247 unsigned long pfn
, flags
;
253 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
254 * are not aligned to pageblock_nr_pages.
255 * Then we just check migratetype first.
257 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= pageblock_nr_pages
) {
258 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
259 if (page
&& get_pageblock_migratetype(page
) != MIGRATE_ISOLATE
)
262 page
= __first_valid_page(start_pfn
, end_pfn
- start_pfn
);
263 if ((pfn
< end_pfn
) || !page
)
265 /* Check all pages are free or marked as ISOLATED */
266 zone
= page_zone(page
);
267 spin_lock_irqsave(&zone
->lock
, flags
);
268 ret
= __test_page_isolated_in_pageblock(start_pfn
, end_pfn
,
269 skip_hwpoisoned_pages
);
270 spin_unlock_irqrestore(&zone
->lock
, flags
);
271 return ret
? 0 : -EBUSY
;
274 struct page
*alloc_migrate_target(struct page
*page
, unsigned long private,
277 gfp_t gfp_mask
= GFP_USER
| __GFP_MOVABLE
;
280 * TODO: allocate a destination hugepage from a nearest neighbor node,
281 * accordance with memory policy of the user process if possible. For
282 * now as a simple work-around, we use the next node for destination.
284 if (PageHuge(page
)) {
285 nodemask_t src
= nodemask_of_node(page_to_nid(page
));
287 nodes_complement(dst
, src
);
288 return alloc_huge_page_node(page_hstate(compound_head(page
)),
289 next_node(page_to_nid(page
), dst
));
292 if (PageHighMem(page
))
293 gfp_mask
|= __GFP_HIGHMEM
;
295 return alloc_page(gfp_mask
);