1 // SPDX-License-Identifier: GPL-2.0
5 * mm/ specific debug routines.
9 #include <linux/kernel.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
20 const char *migrate_reason_names
[MR_TYPES
] = {
30 const struct trace_print_flags pageflag_names
[] = {
35 const struct trace_print_flags gfpflag_names
[] = {
40 const struct trace_print_flags vmaflag_names
[] = {
45 void __dump_page(struct page
*page
, const char *reason
)
47 struct page
*head
= compound_head(page
);
48 struct address_space
*mapping
;
49 bool page_poisoned
= PagePoisoned(page
);
50 bool compound
= PageCompound(page
);
52 * Accessing the pageblock without the zone lock. It could change to
53 * "isolate" again in the meantime, but since we are just dumping the
54 * state for debugging, it should be fine to accept a bit of
55 * inaccuracy here due to racing.
57 bool page_cma
= is_migrate_cma_page(page
);
62 * If struct page is poisoned don't access Page*() functions as that
63 * leads to recursive loop. Page*() check for poisoned pages, and calls
64 * dump_page() when detected.
67 pr_warn("page:%px is uninitialized and poisoned", page
);
71 if (page
< head
|| (page
>= head
+ MAX_ORDER_NR_PAGES
)) {
72 /* Corrupt page, cannot call page_mapping */
73 mapping
= page
->mapping
;
77 mapping
= page_mapping(page
);
81 * Avoid VM_BUG_ON() in page_mapcount().
82 * page->_mapcount space in struct page is used by sl[aou]b pages to
85 mapcount
= PageSlab(head
) ? 0 : page_mapcount(page
);
88 if (hpage_pincount_available(page
)) {
89 pr_warn("page:%px refcount:%d mapcount:%d mapping:%p "
90 "index:%#lx head:%px order:%u "
91 "compound_mapcount:%d compound_pincount:%d\n",
92 page
, page_ref_count(head
), mapcount
,
93 mapping
, page_to_pgoff(page
), head
,
94 compound_order(head
), compound_mapcount(page
),
95 compound_pincount(page
));
97 pr_warn("page:%px refcount:%d mapcount:%d mapping:%p "
98 "index:%#lx head:%px order:%u "
99 "compound_mapcount:%d\n",
100 page
, page_ref_count(head
), mapcount
,
101 mapping
, page_to_pgoff(page
), head
,
102 compound_order(head
), compound_mapcount(page
));
105 pr_warn("page:%px refcount:%d mapcount:%d mapping:%p index:%#lx\n",
106 page
, page_ref_count(page
), mapcount
,
107 mapping
, page_to_pgoff(page
));
110 else if (PageAnon(page
))
113 const struct inode
*host
;
114 const struct address_space_operations
*a_ops
;
115 const struct hlist_node
*dentry_first
;
116 const struct dentry
*dentry_ptr
;
117 struct dentry dentry
;
120 * mapping can be invalid pointer and we don't want to crash
121 * accessing it, so probe everything depending on it carefully
123 if (copy_from_kernel_nofault(&host
, &mapping
->host
,
124 sizeof(struct inode
*)) ||
125 copy_from_kernel_nofault(&a_ops
, &mapping
->a_ops
,
126 sizeof(struct address_space_operations
*))) {
127 pr_warn("failed to read mapping->host or a_ops, mapping not a valid kernel address?\n");
132 pr_warn("mapping->a_ops:%ps\n", a_ops
);
136 if (copy_from_kernel_nofault(&dentry_first
,
137 &host
->i_dentry
.first
, sizeof(struct hlist_node
*))) {
138 pr_warn("mapping->a_ops:%ps with invalid mapping->host inode address %px\n",
144 pr_warn("mapping->a_ops:%ps\n", a_ops
);
148 dentry_ptr
= container_of(dentry_first
, struct dentry
, d_u
.d_alias
);
149 if (copy_from_kernel_nofault(&dentry
, dentry_ptr
,
150 sizeof(struct dentry
))) {
151 pr_warn("mapping->aops:%ps with invalid mapping->host->i_dentry.first %px\n",
155 * if dentry is corrupted, the %pd handler may still
156 * crash, but it's unlikely that we reach here with a
157 * corrupted struct page
159 pr_warn("mapping->aops:%ps dentry name:\"%pd\"\n",
164 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names
) != __NR_PAGEFLAGS
+ 1);
166 pr_warn("%sflags: %#lx(%pGp)%s\n", type
, page
->flags
, &page
->flags
,
167 page_cma
? " CMA" : "");
170 print_hex_dump(KERN_WARNING
, "raw: ", DUMP_PREFIX_NONE
, 32,
171 sizeof(unsigned long), page
,
172 sizeof(struct page
), false);
174 print_hex_dump(KERN_WARNING
, "head: ", DUMP_PREFIX_NONE
, 32,
175 sizeof(unsigned long), head
,
176 sizeof(struct page
), false);
179 pr_warn("page dumped because: %s\n", reason
);
182 if (!page_poisoned
&& page
->mem_cgroup
)
183 pr_warn("page->mem_cgroup:%px\n", page
->mem_cgroup
);
187 void dump_page(struct page
*page
, const char *reason
)
189 __dump_page(page
, reason
);
190 dump_page_owner(page
);
192 EXPORT_SYMBOL(dump_page
);
194 #ifdef CONFIG_DEBUG_VM
196 void dump_vma(const struct vm_area_struct
*vma
)
198 pr_emerg("vma %px start %px end %px\n"
199 "next %px prev %px mm %px\n"
200 "prot %lx anon_vma %px vm_ops %px\n"
201 "pgoff %lx file %px private_data %px\n"
202 "flags: %#lx(%pGv)\n",
203 vma
, (void *)vma
->vm_start
, (void *)vma
->vm_end
, vma
->vm_next
,
204 vma
->vm_prev
, vma
->vm_mm
,
205 (unsigned long)pgprot_val(vma
->vm_page_prot
),
206 vma
->anon_vma
, vma
->vm_ops
, vma
->vm_pgoff
,
207 vma
->vm_file
, vma
->vm_private_data
,
208 vma
->vm_flags
, &vma
->vm_flags
);
210 EXPORT_SYMBOL(dump_vma
);
212 void dump_mm(const struct mm_struct
*mm
)
214 pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
216 "get_unmapped_area %px\n"
218 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
219 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
220 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
221 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
222 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
223 "start_brk %lx brk %lx start_stack %lx\n"
224 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
225 "binfmt %px flags %lx core_state %px\n"
233 #ifdef CONFIG_MMU_NOTIFIER
234 "notifier_subscriptions %px\n"
236 #ifdef CONFIG_NUMA_BALANCING
237 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
239 "tlb_flush_pending %d\n"
240 "def_flags: %#lx(%pGv)\n",
242 mm
, mm
->mmap
, (long long) mm
->vmacache_seqnum
, mm
->task_size
,
244 mm
->get_unmapped_area
,
246 mm
->mmap_base
, mm
->mmap_legacy_base
, mm
->highest_vm_end
,
247 mm
->pgd
, atomic_read(&mm
->mm_users
),
248 atomic_read(&mm
->mm_count
),
249 mm_pgtables_bytes(mm
),
251 mm
->hiwater_rss
, mm
->hiwater_vm
, mm
->total_vm
, mm
->locked_vm
,
252 (u64
)atomic64_read(&mm
->pinned_vm
),
253 mm
->data_vm
, mm
->exec_vm
, mm
->stack_vm
,
254 mm
->start_code
, mm
->end_code
, mm
->start_data
, mm
->end_data
,
255 mm
->start_brk
, mm
->brk
, mm
->start_stack
,
256 mm
->arg_start
, mm
->arg_end
, mm
->env_start
, mm
->env_end
,
257 mm
->binfmt
, mm
->flags
, mm
->core_state
,
265 #ifdef CONFIG_MMU_NOTIFIER
266 mm
->notifier_subscriptions
,
268 #ifdef CONFIG_NUMA_BALANCING
269 mm
->numa_next_scan
, mm
->numa_scan_offset
, mm
->numa_scan_seq
,
271 atomic_read(&mm
->tlb_flush_pending
),
272 mm
->def_flags
, &mm
->def_flags
276 static bool page_init_poisoning __read_mostly
= true;
278 static int __init
setup_vm_debug(char *str
)
280 bool __page_init_poisoning
= true;
283 * Calling vm_debug with no arguments is equivalent to requesting
284 * to enable all debugging options we can control.
286 if (*str
++ != '=' || !*str
)
289 __page_init_poisoning
= false;
294 switch (tolower(*str
)) {
296 __page_init_poisoning
= true;
299 pr_err("vm_debug option '%c' unknown. skipped\n",
306 if (page_init_poisoning
&& !__page_init_poisoning
)
307 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
309 page_init_poisoning
= __page_init_poisoning
;
313 __setup("vm_debug", setup_vm_debug
);
315 void page_init_poison(struct page
*page
, size_t size
)
317 if (page_init_poisoning
)
318 memset(page
, PAGE_POISON_PATTERN
, size
);
320 EXPORT_SYMBOL_GPL(page_init_poison
);
321 #endif /* CONFIG_DEBUG_VM */