]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
48c96a36 JK |
2 | #include <linux/debugfs.h> |
3 | #include <linux/mm.h> | |
4 | #include <linux/slab.h> | |
5 | #include <linux/uaccess.h> | |
57c8a661 | 6 | #include <linux/memblock.h> |
48c96a36 JK |
7 | #include <linux/stacktrace.h> |
8 | #include <linux/page_owner.h> | |
7dd80b8a | 9 | #include <linux/jump_label.h> |
7cd12b4a | 10 | #include <linux/migrate.h> |
f2ca0b55 | 11 | #include <linux/stackdepot.h> |
e2f612e6 | 12 | #include <linux/seq_file.h> |
f2ca0b55 | 13 | |
48c96a36 JK |
14 | #include "internal.h" |
15 | ||
f2ca0b55 JK |
16 | /* |
17 | * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack) | |
18 | * to use off stack temporal storage | |
19 | */ | |
20 | #define PAGE_OWNER_STACK_DEPTH (16) | |
21 | ||
9300d8df | 22 | struct page_owner { |
6b4c54e3 AM |
23 | unsigned short order; |
24 | short last_migrate_reason; | |
9300d8df | 25 | gfp_t gfp_mask; |
9300d8df | 26 | depot_stack_handle_t handle; |
8974558f | 27 | depot_stack_handle_t free_handle; |
9300d8df JK |
28 | }; |
29 | ||
0fe9a448 | 30 | static bool page_owner_enabled = false; |
7dd80b8a | 31 | DEFINE_STATIC_KEY_FALSE(page_owner_inited); |
48c96a36 | 32 | |
f2ca0b55 JK |
33 | static depot_stack_handle_t dummy_handle; |
34 | static depot_stack_handle_t failure_handle; | |
dab4ead1 | 35 | static depot_stack_handle_t early_handle; |
f2ca0b55 | 36 | |
61cf5feb JK |
37 | static void init_early_allocated_pages(void); |
38 | ||
1173194e | 39 | static int __init early_page_owner_param(char *buf) |
48c96a36 JK |
40 | { |
41 | if (!buf) | |
42 | return -EINVAL; | |
43 | ||
44 | if (strcmp(buf, "on") == 0) | |
0fe9a448 | 45 | page_owner_enabled = true; |
48c96a36 JK |
46 | |
47 | return 0; | |
48 | } | |
49 | early_param("page_owner", early_page_owner_param); | |
50 | ||
51 | static bool need_page_owner(void) | |
52 | { | |
0fe9a448 | 53 | return page_owner_enabled; |
48c96a36 JK |
54 | } |
55 | ||
dab4ead1 | 56 | static __always_inline depot_stack_handle_t create_dummy_stack(void) |
f2ca0b55 JK |
57 | { |
58 | unsigned long entries[4]; | |
af52bf6b | 59 | unsigned int nr_entries; |
f2ca0b55 | 60 | |
af52bf6b TG |
61 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); |
62 | return stack_depot_save(entries, nr_entries, GFP_KERNEL); | |
f2ca0b55 JK |
63 | } |
64 | ||
dab4ead1 | 65 | static noinline void register_dummy_stack(void) |
f2ca0b55 | 66 | { |
dab4ead1 VB |
67 | dummy_handle = create_dummy_stack(); |
68 | } | |
f2ca0b55 | 69 | |
dab4ead1 VB |
70 | static noinline void register_failure_stack(void) |
71 | { | |
72 | failure_handle = create_dummy_stack(); | |
73 | } | |
f2ca0b55 | 74 | |
dab4ead1 VB |
75 | static noinline void register_early_stack(void) |
76 | { | |
77 | early_handle = create_dummy_stack(); | |
f2ca0b55 JK |
78 | } |
79 | ||
48c96a36 JK |
80 | static void init_page_owner(void) |
81 | { | |
0fe9a448 | 82 | if (!page_owner_enabled) |
48c96a36 JK |
83 | return; |
84 | ||
f2ca0b55 JK |
85 | register_dummy_stack(); |
86 | register_failure_stack(); | |
dab4ead1 | 87 | register_early_stack(); |
7dd80b8a | 88 | static_branch_enable(&page_owner_inited); |
61cf5feb | 89 | init_early_allocated_pages(); |
48c96a36 JK |
90 | } |
91 | ||
92 | struct page_ext_operations page_owner_ops = { | |
9300d8df | 93 | .size = sizeof(struct page_owner), |
48c96a36 JK |
94 | .need = need_page_owner, |
95 | .init = init_page_owner, | |
96 | }; | |
97 | ||
9300d8df JK |
98 | static inline struct page_owner *get_page_owner(struct page_ext *page_ext) |
99 | { | |
100 | return (void *)page_ext + page_owner_ops.offset; | |
101 | } | |
102 | ||
af52bf6b TG |
103 | static inline bool check_recursive_alloc(unsigned long *entries, |
104 | unsigned int nr_entries, | |
105 | unsigned long ip) | |
48c96a36 | 106 | { |
af52bf6b | 107 | unsigned int i; |
f2ca0b55 | 108 | |
af52bf6b TG |
109 | for (i = 0; i < nr_entries; i++) { |
110 | if (entries[i] == ip) | |
f2ca0b55 JK |
111 | return true; |
112 | } | |
f2ca0b55 JK |
113 | return false; |
114 | } | |
115 | ||
116 | static noinline depot_stack_handle_t save_stack(gfp_t flags) | |
117 | { | |
118 | unsigned long entries[PAGE_OWNER_STACK_DEPTH]; | |
f2ca0b55 | 119 | depot_stack_handle_t handle; |
af52bf6b | 120 | unsigned int nr_entries; |
f2ca0b55 | 121 | |
af52bf6b | 122 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); |
f2ca0b55 JK |
123 | |
124 | /* | |
af52bf6b TG |
125 | * We need to check recursion here because our request to |
126 | * stackdepot could trigger memory allocation to save new | |
127 | * entry. New memory allocation would reach here and call | |
128 | * stack_depot_save_entries() again if we don't catch it. There is | |
129 | * still not enough memory in stackdepot so it would try to | |
130 | * allocate memory again and loop forever. | |
f2ca0b55 | 131 | */ |
af52bf6b | 132 | if (check_recursive_alloc(entries, nr_entries, _RET_IP_)) |
f2ca0b55 JK |
133 | return dummy_handle; |
134 | ||
af52bf6b | 135 | handle = stack_depot_save(entries, nr_entries, flags); |
f2ca0b55 JK |
136 | if (!handle) |
137 | handle = failure_handle; | |
138 | ||
139 | return handle; | |
140 | } | |
141 | ||
8974558f VB |
142 | void __reset_page_owner(struct page *page, unsigned int order) |
143 | { | |
144 | int i; | |
145 | struct page_ext *page_ext; | |
8974558f VB |
146 | depot_stack_handle_t handle = 0; |
147 | struct page_owner *page_owner; | |
148 | ||
0fe9a448 | 149 | handle = save_stack(GFP_NOWAIT | __GFP_NOWARN); |
8974558f | 150 | |
5556cfe8 VB |
151 | page_ext = lookup_page_ext(page); |
152 | if (unlikely(!page_ext)) | |
153 | return; | |
8974558f | 154 | for (i = 0; i < (1 << order); i++) { |
fdf3bf80 | 155 | __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); |
0fe9a448 VB |
156 | page_owner = get_page_owner(page_ext); |
157 | page_owner->free_handle = handle; | |
5556cfe8 | 158 | page_ext = page_ext_next(page_ext); |
8974558f VB |
159 | } |
160 | } | |
161 | ||
7e2f2a0c VB |
162 | static inline void __set_page_owner_handle(struct page *page, |
163 | struct page_ext *page_ext, depot_stack_handle_t handle, | |
164 | unsigned int order, gfp_t gfp_mask) | |
f2ca0b55 | 165 | { |
9300d8df | 166 | struct page_owner *page_owner; |
7e2f2a0c | 167 | int i; |
48c96a36 | 168 | |
7e2f2a0c VB |
169 | for (i = 0; i < (1 << order); i++) { |
170 | page_owner = get_page_owner(page_ext); | |
171 | page_owner->handle = handle; | |
172 | page_owner->order = order; | |
173 | page_owner->gfp_mask = gfp_mask; | |
174 | page_owner->last_migrate_reason = -1; | |
175 | __set_bit(PAGE_EXT_OWNER, &page_ext->flags); | |
fdf3bf80 | 176 | __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); |
48c96a36 | 177 | |
5556cfe8 | 178 | page_ext = page_ext_next(page_ext); |
7e2f2a0c | 179 | } |
48c96a36 JK |
180 | } |
181 | ||
dab4ead1 VB |
182 | noinline void __set_page_owner(struct page *page, unsigned int order, |
183 | gfp_t gfp_mask) | |
184 | { | |
185 | struct page_ext *page_ext = lookup_page_ext(page); | |
186 | depot_stack_handle_t handle; | |
187 | ||
188 | if (unlikely(!page_ext)) | |
189 | return; | |
190 | ||
191 | handle = save_stack(gfp_mask); | |
7e2f2a0c | 192 | __set_page_owner_handle(page, page_ext, handle, order, gfp_mask); |
dab4ead1 VB |
193 | } |
194 | ||
7cd12b4a VB |
195 | void __set_page_owner_migrate_reason(struct page *page, int reason) |
196 | { | |
197 | struct page_ext *page_ext = lookup_page_ext(page); | |
9300d8df JK |
198 | struct page_owner *page_owner; |
199 | ||
f86e4271 YS |
200 | if (unlikely(!page_ext)) |
201 | return; | |
7cd12b4a | 202 | |
9300d8df JK |
203 | page_owner = get_page_owner(page_ext); |
204 | page_owner->last_migrate_reason = reason; | |
7cd12b4a VB |
205 | } |
206 | ||
a9627bc5 | 207 | void __split_page_owner(struct page *page, unsigned int order) |
e2cfc911 | 208 | { |
a9627bc5 | 209 | int i; |
e2cfc911 | 210 | struct page_ext *page_ext = lookup_page_ext(page); |
9300d8df | 211 | struct page_owner *page_owner; |
a9627bc5 | 212 | |
f86e4271 | 213 | if (unlikely(!page_ext)) |
a9627bc5 | 214 | return; |
e2cfc911 | 215 | |
5556cfe8 | 216 | for (i = 0; i < (1 << order); i++) { |
7e2f2a0c VB |
217 | page_owner = get_page_owner(page_ext); |
218 | page_owner->order = 0; | |
5556cfe8 | 219 | page_ext = page_ext_next(page_ext); |
7e2f2a0c | 220 | } |
e2cfc911 JK |
221 | } |
222 | ||
d435edca VB |
223 | void __copy_page_owner(struct page *oldpage, struct page *newpage) |
224 | { | |
225 | struct page_ext *old_ext = lookup_page_ext(oldpage); | |
226 | struct page_ext *new_ext = lookup_page_ext(newpage); | |
9300d8df | 227 | struct page_owner *old_page_owner, *new_page_owner; |
d435edca | 228 | |
f86e4271 YS |
229 | if (unlikely(!old_ext || !new_ext)) |
230 | return; | |
231 | ||
9300d8df JK |
232 | old_page_owner = get_page_owner(old_ext); |
233 | new_page_owner = get_page_owner(new_ext); | |
234 | new_page_owner->order = old_page_owner->order; | |
235 | new_page_owner->gfp_mask = old_page_owner->gfp_mask; | |
236 | new_page_owner->last_migrate_reason = | |
237 | old_page_owner->last_migrate_reason; | |
238 | new_page_owner->handle = old_page_owner->handle; | |
d435edca VB |
239 | |
240 | /* | |
241 | * We don't clear the bit on the oldpage as it's going to be freed | |
242 | * after migration. Until then, the info can be useful in case of | |
243 | * a bug, and the overal stats will be off a bit only temporarily. | |
244 | * Also, migrate_misplaced_transhuge_page() can still fail the | |
245 | * migration and then we want the oldpage to retain the info. But | |
246 | * in that case we also don't need to explicitly clear the info from | |
247 | * the new page, which will be freed. | |
248 | */ | |
249 | __set_bit(PAGE_EXT_OWNER, &new_ext->flags); | |
fdf3bf80 | 250 | __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags); |
d435edca VB |
251 | } |
252 | ||
e2f612e6 JK |
253 | void pagetypeinfo_showmixedcount_print(struct seq_file *m, |
254 | pg_data_t *pgdat, struct zone *zone) | |
255 | { | |
256 | struct page *page; | |
257 | struct page_ext *page_ext; | |
9300d8df | 258 | struct page_owner *page_owner; |
e2f612e6 JK |
259 | unsigned long pfn = zone->zone_start_pfn, block_end_pfn; |
260 | unsigned long end_pfn = pfn + zone->spanned_pages; | |
261 | unsigned long count[MIGRATE_TYPES] = { 0, }; | |
262 | int pageblock_mt, page_mt; | |
263 | int i; | |
264 | ||
265 | /* Scan block by block. First and last block may be incomplete */ | |
266 | pfn = zone->zone_start_pfn; | |
267 | ||
268 | /* | |
269 | * Walk the zone in pageblock_nr_pages steps. If a page block spans | |
270 | * a zone boundary, it will be double counted between zones. This does | |
271 | * not matter as the mixed block count will still be correct | |
272 | */ | |
273 | for (; pfn < end_pfn; ) { | |
a26ee565 QC |
274 | page = pfn_to_online_page(pfn); |
275 | if (!page) { | |
e2f612e6 JK |
276 | pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); |
277 | continue; | |
278 | } | |
279 | ||
280 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); | |
281 | block_end_pfn = min(block_end_pfn, end_pfn); | |
282 | ||
e2f612e6 JK |
283 | pageblock_mt = get_pageblock_migratetype(page); |
284 | ||
285 | for (; pfn < block_end_pfn; pfn++) { | |
286 | if (!pfn_valid_within(pfn)) | |
287 | continue; | |
288 | ||
a26ee565 | 289 | /* The pageblock is online, no need to recheck. */ |
e2f612e6 JK |
290 | page = pfn_to_page(pfn); |
291 | ||
292 | if (page_zone(page) != zone) | |
293 | continue; | |
294 | ||
295 | if (PageBuddy(page)) { | |
727c080f VM |
296 | unsigned long freepage_order; |
297 | ||
298 | freepage_order = page_order_unsafe(page); | |
299 | if (freepage_order < MAX_ORDER) | |
300 | pfn += (1UL << freepage_order) - 1; | |
e2f612e6 JK |
301 | continue; |
302 | } | |
303 | ||
304 | if (PageReserved(page)) | |
305 | continue; | |
306 | ||
307 | page_ext = lookup_page_ext(page); | |
308 | if (unlikely(!page_ext)) | |
309 | continue; | |
310 | ||
fdf3bf80 | 311 | if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) |
e2f612e6 JK |
312 | continue; |
313 | ||
9300d8df JK |
314 | page_owner = get_page_owner(page_ext); |
315 | page_mt = gfpflags_to_migratetype( | |
316 | page_owner->gfp_mask); | |
e2f612e6 JK |
317 | if (pageblock_mt != page_mt) { |
318 | if (is_migrate_cma(pageblock_mt)) | |
319 | count[MIGRATE_MOVABLE]++; | |
320 | else | |
321 | count[pageblock_mt]++; | |
322 | ||
323 | pfn = block_end_pfn; | |
324 | break; | |
325 | } | |
9300d8df | 326 | pfn += (1UL << page_owner->order) - 1; |
e2f612e6 JK |
327 | } |
328 | } | |
329 | ||
330 | /* Print counts */ | |
331 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | |
332 | for (i = 0; i < MIGRATE_TYPES; i++) | |
333 | seq_printf(m, "%12lu ", count[i]); | |
334 | seq_putc(m, '\n'); | |
335 | } | |
336 | ||
48c96a36 JK |
337 | static ssize_t |
338 | print_page_owner(char __user *buf, size_t count, unsigned long pfn, | |
9300d8df | 339 | struct page *page, struct page_owner *page_owner, |
f2ca0b55 | 340 | depot_stack_handle_t handle) |
48c96a36 | 341 | { |
af52bf6b TG |
342 | int ret, pageblock_mt, page_mt; |
343 | unsigned long *entries; | |
344 | unsigned int nr_entries; | |
48c96a36 JK |
345 | char *kbuf; |
346 | ||
c8f61cfc | 347 | count = min_t(size_t, count, PAGE_SIZE); |
48c96a36 JK |
348 | kbuf = kmalloc(count, GFP_KERNEL); |
349 | if (!kbuf) | |
350 | return -ENOMEM; | |
351 | ||
352 | ret = snprintf(kbuf, count, | |
60f30350 | 353 | "Page allocated via order %u, mask %#x(%pGg)\n", |
9300d8df JK |
354 | page_owner->order, page_owner->gfp_mask, |
355 | &page_owner->gfp_mask); | |
48c96a36 JK |
356 | |
357 | if (ret >= count) | |
358 | goto err; | |
359 | ||
360 | /* Print information relevant to grouping pages by mobility */ | |
0b423ca2 | 361 | pageblock_mt = get_pageblock_migratetype(page); |
9300d8df | 362 | page_mt = gfpflags_to_migratetype(page_owner->gfp_mask); |
48c96a36 | 363 | ret += snprintf(kbuf + ret, count - ret, |
60f30350 | 364 | "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n", |
48c96a36 | 365 | pfn, |
60f30350 | 366 | migratetype_names[page_mt], |
48c96a36 | 367 | pfn >> pageblock_order, |
60f30350 VB |
368 | migratetype_names[pageblock_mt], |
369 | page->flags, &page->flags); | |
48c96a36 JK |
370 | |
371 | if (ret >= count) | |
372 | goto err; | |
373 | ||
af52bf6b TG |
374 | nr_entries = stack_depot_fetch(handle, &entries); |
375 | ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0); | |
48c96a36 JK |
376 | if (ret >= count) |
377 | goto err; | |
378 | ||
9300d8df | 379 | if (page_owner->last_migrate_reason != -1) { |
7cd12b4a VB |
380 | ret += snprintf(kbuf + ret, count - ret, |
381 | "Page has been migrated, last migrate reason: %s\n", | |
9300d8df | 382 | migrate_reason_names[page_owner->last_migrate_reason]); |
7cd12b4a VB |
383 | if (ret >= count) |
384 | goto err; | |
385 | } | |
386 | ||
48c96a36 JK |
387 | ret += snprintf(kbuf + ret, count - ret, "\n"); |
388 | if (ret >= count) | |
389 | goto err; | |
390 | ||
391 | if (copy_to_user(buf, kbuf, ret)) | |
392 | ret = -EFAULT; | |
393 | ||
394 | kfree(kbuf); | |
395 | return ret; | |
396 | ||
397 | err: | |
398 | kfree(kbuf); | |
399 | return -ENOMEM; | |
400 | } | |
401 | ||
4e462112 VB |
402 | void __dump_page_owner(struct page *page) |
403 | { | |
404 | struct page_ext *page_ext = lookup_page_ext(page); | |
9300d8df | 405 | struct page_owner *page_owner; |
f2ca0b55 | 406 | depot_stack_handle_t handle; |
af52bf6b TG |
407 | unsigned long *entries; |
408 | unsigned int nr_entries; | |
8285027f SM |
409 | gfp_t gfp_mask; |
410 | int mt; | |
4e462112 | 411 | |
f86e4271 YS |
412 | if (unlikely(!page_ext)) { |
413 | pr_alert("There is not page extension available.\n"); | |
414 | return; | |
415 | } | |
9300d8df JK |
416 | |
417 | page_owner = get_page_owner(page_ext); | |
418 | gfp_mask = page_owner->gfp_mask; | |
8285027f | 419 | mt = gfpflags_to_migratetype(gfp_mask); |
f86e4271 | 420 | |
4e462112 | 421 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { |
37389167 | 422 | pr_alert("page_owner info is not present (never set?)\n"); |
4e462112 VB |
423 | return; |
424 | } | |
425 | ||
fdf3bf80 | 426 | if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) |
37389167 VB |
427 | pr_alert("page_owner tracks the page as allocated\n"); |
428 | else | |
429 | pr_alert("page_owner tracks the page as freed\n"); | |
430 | ||
431 | pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n", | |
432 | page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask); | |
433 | ||
9300d8df | 434 | handle = READ_ONCE(page_owner->handle); |
f2ca0b55 | 435 | if (!handle) { |
37389167 VB |
436 | pr_alert("page_owner allocation stack trace missing\n"); |
437 | } else { | |
438 | nr_entries = stack_depot_fetch(handle, &entries); | |
439 | stack_trace_print(entries, nr_entries, 0); | |
f2ca0b55 JK |
440 | } |
441 | ||
8974558f VB |
442 | handle = READ_ONCE(page_owner->free_handle); |
443 | if (!handle) { | |
444 | pr_alert("page_owner free stack trace missing\n"); | |
445 | } else { | |
446 | nr_entries = stack_depot_fetch(handle, &entries); | |
447 | pr_alert("page last free stack trace:\n"); | |
448 | stack_trace_print(entries, nr_entries, 0); | |
449 | } | |
8974558f | 450 | |
9300d8df | 451 | if (page_owner->last_migrate_reason != -1) |
4e462112 | 452 | pr_alert("page has been migrated, last migrate reason: %s\n", |
9300d8df | 453 | migrate_reason_names[page_owner->last_migrate_reason]); |
4e462112 VB |
454 | } |
455 | ||
48c96a36 JK |
456 | static ssize_t |
457 | read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
458 | { | |
459 | unsigned long pfn; | |
460 | struct page *page; | |
461 | struct page_ext *page_ext; | |
9300d8df | 462 | struct page_owner *page_owner; |
f2ca0b55 | 463 | depot_stack_handle_t handle; |
48c96a36 | 464 | |
7dd80b8a | 465 | if (!static_branch_unlikely(&page_owner_inited)) |
48c96a36 JK |
466 | return -EINVAL; |
467 | ||
468 | page = NULL; | |
469 | pfn = min_low_pfn + *ppos; | |
470 | ||
471 | /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ | |
472 | while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) | |
473 | pfn++; | |
474 | ||
475 | drain_all_pages(NULL); | |
476 | ||
477 | /* Find an allocated page */ | |
478 | for (; pfn < max_pfn; pfn++) { | |
479 | /* | |
480 | * If the new page is in a new MAX_ORDER_NR_PAGES area, | |
481 | * validate the area as existing, skip it if not | |
482 | */ | |
483 | if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { | |
484 | pfn += MAX_ORDER_NR_PAGES - 1; | |
485 | continue; | |
486 | } | |
487 | ||
488 | /* Check for holes within a MAX_ORDER area */ | |
489 | if (!pfn_valid_within(pfn)) | |
490 | continue; | |
491 | ||
492 | page = pfn_to_page(pfn); | |
493 | if (PageBuddy(page)) { | |
494 | unsigned long freepage_order = page_order_unsafe(page); | |
495 | ||
496 | if (freepage_order < MAX_ORDER) | |
497 | pfn += (1UL << freepage_order) - 1; | |
498 | continue; | |
499 | } | |
500 | ||
501 | page_ext = lookup_page_ext(page); | |
f86e4271 YS |
502 | if (unlikely(!page_ext)) |
503 | continue; | |
48c96a36 JK |
504 | |
505 | /* | |
61cf5feb JK |
506 | * Some pages could be missed by concurrent allocation or free, |
507 | * because we don't hold the zone lock. | |
48c96a36 JK |
508 | */ |
509 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | |
510 | continue; | |
511 | ||
37389167 VB |
512 | /* |
513 | * Although we do have the info about past allocation of free | |
514 | * pages, it's not relevant for current memory usage. | |
515 | */ | |
fdf3bf80 | 516 | if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) |
37389167 VB |
517 | continue; |
518 | ||
9300d8df JK |
519 | page_owner = get_page_owner(page_ext); |
520 | ||
7e2f2a0c VB |
521 | /* |
522 | * Don't print "tail" pages of high-order allocations as that | |
523 | * would inflate the stats. | |
524 | */ | |
525 | if (!IS_ALIGNED(pfn, 1 << page_owner->order)) | |
526 | continue; | |
527 | ||
f2ca0b55 JK |
528 | /* |
529 | * Access to page_ext->handle isn't synchronous so we should | |
530 | * be careful to access it. | |
531 | */ | |
9300d8df | 532 | handle = READ_ONCE(page_owner->handle); |
f2ca0b55 JK |
533 | if (!handle) |
534 | continue; | |
535 | ||
48c96a36 JK |
536 | /* Record the next PFN to read in the file offset */ |
537 | *ppos = (pfn - min_low_pfn) + 1; | |
538 | ||
f2ca0b55 | 539 | return print_page_owner(buf, count, pfn, page, |
9300d8df | 540 | page_owner, handle); |
48c96a36 JK |
541 | } |
542 | ||
543 | return 0; | |
544 | } | |
545 | ||
61cf5feb JK |
546 | static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) |
547 | { | |
6787c1da OS |
548 | unsigned long pfn = zone->zone_start_pfn; |
549 | unsigned long end_pfn = zone_end_pfn(zone); | |
61cf5feb JK |
550 | unsigned long count = 0; |
551 | ||
61cf5feb JK |
552 | /* |
553 | * Walk the zone in pageblock_nr_pages steps. If a page block spans | |
554 | * a zone boundary, it will be double counted between zones. This does | |
555 | * not matter as the mixed block count will still be correct | |
556 | */ | |
557 | for (; pfn < end_pfn; ) { | |
6787c1da OS |
558 | unsigned long block_end_pfn; |
559 | ||
61cf5feb JK |
560 | if (!pfn_valid(pfn)) { |
561 | pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); | |
562 | continue; | |
563 | } | |
564 | ||
565 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); | |
566 | block_end_pfn = min(block_end_pfn, end_pfn); | |
567 | ||
61cf5feb | 568 | for (; pfn < block_end_pfn; pfn++) { |
6787c1da OS |
569 | struct page *page; |
570 | struct page_ext *page_ext; | |
571 | ||
61cf5feb JK |
572 | if (!pfn_valid_within(pfn)) |
573 | continue; | |
574 | ||
575 | page = pfn_to_page(pfn); | |
576 | ||
9d43f5ae JK |
577 | if (page_zone(page) != zone) |
578 | continue; | |
579 | ||
61cf5feb | 580 | /* |
10903027 VB |
581 | * To avoid having to grab zone->lock, be a little |
582 | * careful when reading buddy page order. The only | |
583 | * danger is that we skip too much and potentially miss | |
584 | * some early allocated pages, which is better than | |
585 | * heavy lock contention. | |
61cf5feb JK |
586 | */ |
587 | if (PageBuddy(page)) { | |
10903027 VB |
588 | unsigned long order = page_order_unsafe(page); |
589 | ||
590 | if (order > 0 && order < MAX_ORDER) | |
591 | pfn += (1UL << order) - 1; | |
61cf5feb JK |
592 | continue; |
593 | } | |
594 | ||
595 | if (PageReserved(page)) | |
596 | continue; | |
597 | ||
598 | page_ext = lookup_page_ext(page); | |
f86e4271 YS |
599 | if (unlikely(!page_ext)) |
600 | continue; | |
61cf5feb | 601 | |
dab4ead1 | 602 | /* Maybe overlapping zone */ |
61cf5feb JK |
603 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) |
604 | continue; | |
605 | ||
606 | /* Found early allocated page */ | |
7e2f2a0c VB |
607 | __set_page_owner_handle(page, page_ext, early_handle, |
608 | 0, 0); | |
61cf5feb JK |
609 | count++; |
610 | } | |
10903027 | 611 | cond_resched(); |
61cf5feb JK |
612 | } |
613 | ||
614 | pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", | |
615 | pgdat->node_id, zone->name, count); | |
616 | } | |
617 | ||
618 | static void init_zones_in_node(pg_data_t *pgdat) | |
619 | { | |
620 | struct zone *zone; | |
621 | struct zone *node_zones = pgdat->node_zones; | |
61cf5feb JK |
622 | |
623 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { | |
624 | if (!populated_zone(zone)) | |
625 | continue; | |
626 | ||
61cf5feb | 627 | init_pages_in_zone(pgdat, zone); |
61cf5feb JK |
628 | } |
629 | } | |
630 | ||
631 | static void init_early_allocated_pages(void) | |
632 | { | |
633 | pg_data_t *pgdat; | |
634 | ||
61cf5feb JK |
635 | for_each_online_pgdat(pgdat) |
636 | init_zones_in_node(pgdat); | |
637 | } | |
638 | ||
48c96a36 JK |
639 | static const struct file_operations proc_page_owner_operations = { |
640 | .read = read_page_owner, | |
641 | }; | |
642 | ||
643 | static int __init pageowner_init(void) | |
644 | { | |
7dd80b8a | 645 | if (!static_branch_unlikely(&page_owner_inited)) { |
48c96a36 JK |
646 | pr_info("page_owner is disabled\n"); |
647 | return 0; | |
648 | } | |
649 | ||
d9f7979c GKH |
650 | debugfs_create_file("page_owner", 0400, NULL, NULL, |
651 | &proc_page_owner_operations); | |
48c96a36 | 652 | |
d9f7979c | 653 | return 0; |
48c96a36 | 654 | } |
44c5af96 | 655 | late_initcall(pageowner_init) |