Commit | Line | Data |
---|---|---|
48c96a36 JK |
1 | #include <linux/debugfs.h> |
2 | #include <linux/mm.h> | |
3 | #include <linux/slab.h> | |
4 | #include <linux/uaccess.h> | |
5 | #include <linux/bootmem.h> | |
6 | #include <linux/stacktrace.h> | |
7 | #include <linux/page_owner.h> | |
7dd80b8a | 8 | #include <linux/jump_label.h> |
7cd12b4a | 9 | #include <linux/migrate.h> |
48c96a36 JK |
10 | #include "internal.h" |
11 | ||
12 | static bool page_owner_disabled = true; | |
7dd80b8a | 13 | DEFINE_STATIC_KEY_FALSE(page_owner_inited); |
48c96a36 | 14 | |
61cf5feb JK |
15 | static void init_early_allocated_pages(void); |
16 | ||
48c96a36 JK |
17 | static int early_page_owner_param(char *buf) |
18 | { | |
19 | if (!buf) | |
20 | return -EINVAL; | |
21 | ||
22 | if (strcmp(buf, "on") == 0) | |
23 | page_owner_disabled = false; | |
24 | ||
25 | return 0; | |
26 | } | |
27 | early_param("page_owner", early_page_owner_param); | |
28 | ||
29 | static bool need_page_owner(void) | |
30 | { | |
31 | if (page_owner_disabled) | |
32 | return false; | |
33 | ||
34 | return true; | |
35 | } | |
36 | ||
37 | static void init_page_owner(void) | |
38 | { | |
39 | if (page_owner_disabled) | |
40 | return; | |
41 | ||
7dd80b8a | 42 | static_branch_enable(&page_owner_inited); |
61cf5feb | 43 | init_early_allocated_pages(); |
48c96a36 JK |
44 | } |
45 | ||
46 | struct page_ext_operations page_owner_ops = { | |
47 | .need = need_page_owner, | |
48 | .init = init_page_owner, | |
49 | }; | |
50 | ||
51 | void __reset_page_owner(struct page *page, unsigned int order) | |
52 | { | |
53 | int i; | |
54 | struct page_ext *page_ext; | |
55 | ||
56 | for (i = 0; i < (1 << order); i++) { | |
57 | page_ext = lookup_page_ext(page + i); | |
f86e4271 YS |
58 | if (unlikely(!page_ext)) |
59 | continue; | |
48c96a36 JK |
60 | __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); |
61 | } | |
62 | } | |
63 | ||
64 | void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) | |
65 | { | |
94f759d6 | 66 | struct page_ext *page_ext = lookup_page_ext(page); |
f86e4271 | 67 | |
94f759d6 SR |
68 | struct stack_trace trace = { |
69 | .nr_entries = 0, | |
70 | .max_entries = ARRAY_SIZE(page_ext->trace_entries), | |
71 | .entries = &page_ext->trace_entries[0], | |
72 | .skip = 3, | |
73 | }; | |
48c96a36 | 74 | |
f86e4271 YS |
75 | if (unlikely(!page_ext)) |
76 | return; | |
77 | ||
94f759d6 | 78 | save_stack_trace(&trace); |
48c96a36 JK |
79 | |
80 | page_ext->order = order; | |
81 | page_ext->gfp_mask = gfp_mask; | |
94f759d6 | 82 | page_ext->nr_entries = trace.nr_entries; |
7cd12b4a | 83 | page_ext->last_migrate_reason = -1; |
48c96a36 JK |
84 | |
85 | __set_bit(PAGE_EXT_OWNER, &page_ext->flags); | |
86 | } | |
87 | ||
7cd12b4a VB |
88 | void __set_page_owner_migrate_reason(struct page *page, int reason) |
89 | { | |
90 | struct page_ext *page_ext = lookup_page_ext(page); | |
f86e4271 YS |
91 | if (unlikely(!page_ext)) |
92 | return; | |
7cd12b4a VB |
93 | |
94 | page_ext->last_migrate_reason = reason; | |
95 | } | |
96 | ||
e2cfc911 JK |
97 | gfp_t __get_page_owner_gfp(struct page *page) |
98 | { | |
99 | struct page_ext *page_ext = lookup_page_ext(page); | |
f86e4271 YS |
100 | if (unlikely(!page_ext)) |
101 | /* | |
102 | * The caller just returns 0 if no valid gfp | |
103 | * So return 0 here too. | |
104 | */ | |
105 | return 0; | |
e2cfc911 JK |
106 | |
107 | return page_ext->gfp_mask; | |
108 | } | |
109 | ||
d435edca VB |
110 | void __copy_page_owner(struct page *oldpage, struct page *newpage) |
111 | { | |
112 | struct page_ext *old_ext = lookup_page_ext(oldpage); | |
113 | struct page_ext *new_ext = lookup_page_ext(newpage); | |
114 | int i; | |
115 | ||
f86e4271 YS |
116 | if (unlikely(!old_ext || !new_ext)) |
117 | return; | |
118 | ||
d435edca VB |
119 | new_ext->order = old_ext->order; |
120 | new_ext->gfp_mask = old_ext->gfp_mask; | |
121 | new_ext->nr_entries = old_ext->nr_entries; | |
122 | ||
123 | for (i = 0; i < ARRAY_SIZE(new_ext->trace_entries); i++) | |
124 | new_ext->trace_entries[i] = old_ext->trace_entries[i]; | |
125 | ||
126 | /* | |
127 | * We don't clear the bit on the oldpage as it's going to be freed | |
128 | * after migration. Until then, the info can be useful in case of | |
129 | * a bug, and the overal stats will be off a bit only temporarily. | |
130 | * Also, migrate_misplaced_transhuge_page() can still fail the | |
131 | * migration and then we want the oldpage to retain the info. But | |
132 | * in that case we also don't need to explicitly clear the info from | |
133 | * the new page, which will be freed. | |
134 | */ | |
135 | __set_bit(PAGE_EXT_OWNER, &new_ext->flags); | |
136 | } | |
137 | ||
48c96a36 JK |
138 | static ssize_t |
139 | print_page_owner(char __user *buf, size_t count, unsigned long pfn, | |
140 | struct page *page, struct page_ext *page_ext) | |
141 | { | |
142 | int ret; | |
143 | int pageblock_mt, page_mt; | |
144 | char *kbuf; | |
94f759d6 SR |
145 | struct stack_trace trace = { |
146 | .nr_entries = page_ext->nr_entries, | |
147 | .entries = &page_ext->trace_entries[0], | |
148 | }; | |
48c96a36 JK |
149 | |
150 | kbuf = kmalloc(count, GFP_KERNEL); | |
151 | if (!kbuf) | |
152 | return -ENOMEM; | |
153 | ||
154 | ret = snprintf(kbuf, count, | |
60f30350 VB |
155 | "Page allocated via order %u, mask %#x(%pGg)\n", |
156 | page_ext->order, page_ext->gfp_mask, | |
157 | &page_ext->gfp_mask); | |
48c96a36 JK |
158 | |
159 | if (ret >= count) | |
160 | goto err; | |
161 | ||
162 | /* Print information relevant to grouping pages by mobility */ | |
0b423ca2 | 163 | pageblock_mt = get_pageblock_migratetype(page); |
48c96a36 JK |
164 | page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); |
165 | ret += snprintf(kbuf + ret, count - ret, | |
60f30350 | 166 | "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n", |
48c96a36 | 167 | pfn, |
60f30350 | 168 | migratetype_names[page_mt], |
48c96a36 | 169 | pfn >> pageblock_order, |
60f30350 VB |
170 | migratetype_names[pageblock_mt], |
171 | page->flags, &page->flags); | |
48c96a36 JK |
172 | |
173 | if (ret >= count) | |
174 | goto err; | |
175 | ||
94f759d6 | 176 | ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); |
48c96a36 JK |
177 | if (ret >= count) |
178 | goto err; | |
179 | ||
7cd12b4a VB |
180 | if (page_ext->last_migrate_reason != -1) { |
181 | ret += snprintf(kbuf + ret, count - ret, | |
182 | "Page has been migrated, last migrate reason: %s\n", | |
183 | migrate_reason_names[page_ext->last_migrate_reason]); | |
184 | if (ret >= count) | |
185 | goto err; | |
186 | } | |
187 | ||
48c96a36 JK |
188 | ret += snprintf(kbuf + ret, count - ret, "\n"); |
189 | if (ret >= count) | |
190 | goto err; | |
191 | ||
192 | if (copy_to_user(buf, kbuf, ret)) | |
193 | ret = -EFAULT; | |
194 | ||
195 | kfree(kbuf); | |
196 | return ret; | |
197 | ||
198 | err: | |
199 | kfree(kbuf); | |
200 | return -ENOMEM; | |
201 | } | |
202 | ||
4e462112 VB |
203 | void __dump_page_owner(struct page *page) |
204 | { | |
205 | struct page_ext *page_ext = lookup_page_ext(page); | |
206 | struct stack_trace trace = { | |
207 | .nr_entries = page_ext->nr_entries, | |
208 | .entries = &page_ext->trace_entries[0], | |
209 | }; | |
210 | gfp_t gfp_mask = page_ext->gfp_mask; | |
211 | int mt = gfpflags_to_migratetype(gfp_mask); | |
212 | ||
f86e4271 YS |
213 | if (unlikely(!page_ext)) { |
214 | pr_alert("There is not page extension available.\n"); | |
215 | return; | |
216 | } | |
217 | ||
4e462112 VB |
218 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { |
219 | pr_alert("page_owner info is not active (free page?)\n"); | |
220 | return; | |
221 | } | |
222 | ||
756a025f JP |
223 | pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n", |
224 | page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask); | |
4e462112 VB |
225 | print_stack_trace(&trace, 0); |
226 | ||
227 | if (page_ext->last_migrate_reason != -1) | |
228 | pr_alert("page has been migrated, last migrate reason: %s\n", | |
229 | migrate_reason_names[page_ext->last_migrate_reason]); | |
230 | } | |
231 | ||
48c96a36 JK |
232 | static ssize_t |
233 | read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
234 | { | |
235 | unsigned long pfn; | |
236 | struct page *page; | |
237 | struct page_ext *page_ext; | |
238 | ||
7dd80b8a | 239 | if (!static_branch_unlikely(&page_owner_inited)) |
48c96a36 JK |
240 | return -EINVAL; |
241 | ||
242 | page = NULL; | |
243 | pfn = min_low_pfn + *ppos; | |
244 | ||
245 | /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ | |
246 | while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) | |
247 | pfn++; | |
248 | ||
249 | drain_all_pages(NULL); | |
250 | ||
251 | /* Find an allocated page */ | |
252 | for (; pfn < max_pfn; pfn++) { | |
253 | /* | |
254 | * If the new page is in a new MAX_ORDER_NR_PAGES area, | |
255 | * validate the area as existing, skip it if not | |
256 | */ | |
257 | if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { | |
258 | pfn += MAX_ORDER_NR_PAGES - 1; | |
259 | continue; | |
260 | } | |
261 | ||
262 | /* Check for holes within a MAX_ORDER area */ | |
263 | if (!pfn_valid_within(pfn)) | |
264 | continue; | |
265 | ||
266 | page = pfn_to_page(pfn); | |
267 | if (PageBuddy(page)) { | |
268 | unsigned long freepage_order = page_order_unsafe(page); | |
269 | ||
270 | if (freepage_order < MAX_ORDER) | |
271 | pfn += (1UL << freepage_order) - 1; | |
272 | continue; | |
273 | } | |
274 | ||
275 | page_ext = lookup_page_ext(page); | |
f86e4271 YS |
276 | if (unlikely(!page_ext)) |
277 | continue; | |
48c96a36 JK |
278 | |
279 | /* | |
61cf5feb JK |
280 | * Some pages could be missed by concurrent allocation or free, |
281 | * because we don't hold the zone lock. | |
48c96a36 JK |
282 | */ |
283 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | |
284 | continue; | |
285 | ||
286 | /* Record the next PFN to read in the file offset */ | |
287 | *ppos = (pfn - min_low_pfn) + 1; | |
288 | ||
289 | return print_page_owner(buf, count, pfn, page, page_ext); | |
290 | } | |
291 | ||
292 | return 0; | |
293 | } | |
294 | ||
61cf5feb JK |
295 | static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) |
296 | { | |
297 | struct page *page; | |
298 | struct page_ext *page_ext; | |
299 | unsigned long pfn = zone->zone_start_pfn, block_end_pfn; | |
300 | unsigned long end_pfn = pfn + zone->spanned_pages; | |
301 | unsigned long count = 0; | |
302 | ||
303 | /* Scan block by block. First and last block may be incomplete */ | |
304 | pfn = zone->zone_start_pfn; | |
305 | ||
306 | /* | |
307 | * Walk the zone in pageblock_nr_pages steps. If a page block spans | |
308 | * a zone boundary, it will be double counted between zones. This does | |
309 | * not matter as the mixed block count will still be correct | |
310 | */ | |
311 | for (; pfn < end_pfn; ) { | |
312 | if (!pfn_valid(pfn)) { | |
313 | pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); | |
314 | continue; | |
315 | } | |
316 | ||
317 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); | |
318 | block_end_pfn = min(block_end_pfn, end_pfn); | |
319 | ||
320 | page = pfn_to_page(pfn); | |
321 | ||
322 | for (; pfn < block_end_pfn; pfn++) { | |
323 | if (!pfn_valid_within(pfn)) | |
324 | continue; | |
325 | ||
326 | page = pfn_to_page(pfn); | |
327 | ||
9d43f5ae JK |
328 | if (page_zone(page) != zone) |
329 | continue; | |
330 | ||
61cf5feb JK |
331 | /* |
332 | * We are safe to check buddy flag and order, because | |
333 | * this is init stage and only single thread runs. | |
334 | */ | |
335 | if (PageBuddy(page)) { | |
336 | pfn += (1UL << page_order(page)) - 1; | |
337 | continue; | |
338 | } | |
339 | ||
340 | if (PageReserved(page)) | |
341 | continue; | |
342 | ||
343 | page_ext = lookup_page_ext(page); | |
f86e4271 YS |
344 | if (unlikely(!page_ext)) |
345 | continue; | |
61cf5feb JK |
346 | |
347 | /* Maybe overraping zone */ | |
348 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | |
349 | continue; | |
350 | ||
351 | /* Found early allocated page */ | |
352 | set_page_owner(page, 0, 0); | |
353 | count++; | |
354 | } | |
355 | } | |
356 | ||
357 | pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", | |
358 | pgdat->node_id, zone->name, count); | |
359 | } | |
360 | ||
361 | static void init_zones_in_node(pg_data_t *pgdat) | |
362 | { | |
363 | struct zone *zone; | |
364 | struct zone *node_zones = pgdat->node_zones; | |
365 | unsigned long flags; | |
366 | ||
367 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { | |
368 | if (!populated_zone(zone)) | |
369 | continue; | |
370 | ||
371 | spin_lock_irqsave(&zone->lock, flags); | |
372 | init_pages_in_zone(pgdat, zone); | |
373 | spin_unlock_irqrestore(&zone->lock, flags); | |
374 | } | |
375 | } | |
376 | ||
377 | static void init_early_allocated_pages(void) | |
378 | { | |
379 | pg_data_t *pgdat; | |
380 | ||
381 | drain_all_pages(NULL); | |
382 | for_each_online_pgdat(pgdat) | |
383 | init_zones_in_node(pgdat); | |
384 | } | |
385 | ||
48c96a36 JK |
386 | static const struct file_operations proc_page_owner_operations = { |
387 | .read = read_page_owner, | |
388 | }; | |
389 | ||
390 | static int __init pageowner_init(void) | |
391 | { | |
392 | struct dentry *dentry; | |
393 | ||
7dd80b8a | 394 | if (!static_branch_unlikely(&page_owner_inited)) { |
48c96a36 JK |
395 | pr_info("page_owner is disabled\n"); |
396 | return 0; | |
397 | } | |
398 | ||
399 | dentry = debugfs_create_file("page_owner", S_IRUSR, NULL, | |
400 | NULL, &proc_page_owner_operations); | |
401 | if (IS_ERR(dentry)) | |
402 | return PTR_ERR(dentry); | |
403 | ||
404 | return 0; | |
405 | } | |
44c5af96 | 406 | late_initcall(pageowner_init) |