xref: /linux/mm/debug.c (revision 2da68a77)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mm/debug.c
4  *
5  * mm/ specific debug routines.
6  *
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
17 
18 #include "internal.h"
19 #include <trace/events/migrate.h>
20 
21 /*
22  * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23  * be used to populate migrate_reason_names[].
24  */
25 #undef EM
26 #undef EMe
27 #define EM(a, b)	b,
28 #define EMe(a, b)	b
29 
30 const char *migrate_reason_names[MR_TYPES] = {
31 	MIGRATE_REASON
32 };
33 
34 const struct trace_print_flags pageflag_names[] = {
35 	__def_pageflag_names,
36 	{0, NULL}
37 };
38 
39 const struct trace_print_flags gfpflag_names[] = {
40 	__def_gfpflag_names,
41 	{0, NULL}
42 };
43 
44 const struct trace_print_flags vmaflag_names[] = {
45 	__def_vmaflag_names,
46 	{0, NULL}
47 };
48 
49 static void __dump_page(struct page *page)
50 {
51 	struct folio *folio = page_folio(page);
52 	struct page *head = &folio->page;
53 	struct address_space *mapping;
54 	bool compound = PageCompound(page);
55 	/*
56 	 * Accessing the pageblock without the zone lock. It could change to
57 	 * "isolate" again in the meantime, but since we are just dumping the
58 	 * state for debugging, it should be fine to accept a bit of
59 	 * inaccuracy here due to racing.
60 	 */
61 	bool page_cma = is_migrate_cma_page(page);
62 	int mapcount;
63 	char *type = "";
64 
65 	if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
66 		/*
67 		 * Corrupt page, so we cannot call page_mapping. Instead, do a
68 		 * safe subset of the steps that page_mapping() does. Caution:
69 		 * this will be misleading for tail pages, PageSwapCache pages,
70 		 * and potentially other situations. (See the page_mapping()
71 		 * implementation for what's missing here.)
72 		 */
73 		unsigned long tmp = (unsigned long)page->mapping;
74 
75 		if (tmp & PAGE_MAPPING_ANON)
76 			mapping = NULL;
77 		else
78 			mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
79 		head = page;
80 		folio = (struct folio *)page;
81 		compound = false;
82 	} else {
83 		mapping = page_mapping(page);
84 	}
85 
86 	/*
87 	 * Avoid VM_BUG_ON() in page_mapcount().
88 	 * page->_mapcount space in struct page is used by sl[aou]b pages to
89 	 * encode own info.
90 	 */
91 	mapcount = PageSlab(head) ? 0 : page_mapcount(page);
92 
93 	pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
94 			page, page_ref_count(head), mapcount, mapping,
95 			page_to_pgoff(page), page_to_pfn(page));
96 	if (compound) {
97 		pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n",
98 				head, compound_order(head),
99 				folio_entire_mapcount(folio),
100 				head_compound_pincount(head));
101 	}
102 
103 #ifdef CONFIG_MEMCG
104 	if (head->memcg_data)
105 		pr_warn("memcg:%lx\n", head->memcg_data);
106 #endif
107 	if (PageKsm(page))
108 		type = "ksm ";
109 	else if (PageAnon(page))
110 		type = "anon ";
111 	else if (mapping)
112 		dump_mapping(mapping);
113 	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
114 
115 	pr_warn("%sflags: %pGp%s\n", type, &head->flags,
116 		page_cma ? " CMA" : "");
117 	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
118 			sizeof(unsigned long), page,
119 			sizeof(struct page), false);
120 	if (head != page)
121 		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
122 			sizeof(unsigned long), head,
123 			sizeof(struct page), false);
124 }
125 
126 void dump_page(struct page *page, const char *reason)
127 {
128 	if (PagePoisoned(page))
129 		pr_warn("page:%p is uninitialized and poisoned", page);
130 	else
131 		__dump_page(page);
132 	if (reason)
133 		pr_warn("page dumped because: %s\n", reason);
134 	dump_page_owner(page);
135 }
136 EXPORT_SYMBOL(dump_page);
137 
138 #ifdef CONFIG_DEBUG_VM
139 
140 void dump_vma(const struct vm_area_struct *vma)
141 {
142 	pr_emerg("vma %px start %px end %px mm %px\n"
143 		"prot %lx anon_vma %px vm_ops %px\n"
144 		"pgoff %lx file %px private_data %px\n"
145 		"flags: %#lx(%pGv)\n",
146 		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
147 		(unsigned long)pgprot_val(vma->vm_page_prot),
148 		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
149 		vma->vm_file, vma->vm_private_data,
150 		vma->vm_flags, &vma->vm_flags);
151 }
152 EXPORT_SYMBOL(dump_vma);
153 
154 void dump_mm(const struct mm_struct *mm)
155 {
156 	pr_emerg("mm %px task_size %lu\n"
157 #ifdef CONFIG_MMU
158 		"get_unmapped_area %px\n"
159 #endif
160 		"mmap_base %lu mmap_legacy_base %lu\n"
161 		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
162 		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
163 		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
164 		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
165 		"start_brk %lx brk %lx start_stack %lx\n"
166 		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
167 		"binfmt %px flags %lx\n"
168 #ifdef CONFIG_AIO
169 		"ioctx_table %px\n"
170 #endif
171 #ifdef CONFIG_MEMCG
172 		"owner %px "
173 #endif
174 		"exe_file %px\n"
175 #ifdef CONFIG_MMU_NOTIFIER
176 		"notifier_subscriptions %px\n"
177 #endif
178 #ifdef CONFIG_NUMA_BALANCING
179 		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
180 #endif
181 		"tlb_flush_pending %d\n"
182 		"def_flags: %#lx(%pGv)\n",
183 
184 		mm, mm->task_size,
185 #ifdef CONFIG_MMU
186 		mm->get_unmapped_area,
187 #endif
188 		mm->mmap_base, mm->mmap_legacy_base,
189 		mm->pgd, atomic_read(&mm->mm_users),
190 		atomic_read(&mm->mm_count),
191 		mm_pgtables_bytes(mm),
192 		mm->map_count,
193 		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
194 		(u64)atomic64_read(&mm->pinned_vm),
195 		mm->data_vm, mm->exec_vm, mm->stack_vm,
196 		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
197 		mm->start_brk, mm->brk, mm->start_stack,
198 		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
199 		mm->binfmt, mm->flags,
200 #ifdef CONFIG_AIO
201 		mm->ioctx_table,
202 #endif
203 #ifdef CONFIG_MEMCG
204 		mm->owner,
205 #endif
206 		mm->exe_file,
207 #ifdef CONFIG_MMU_NOTIFIER
208 		mm->notifier_subscriptions,
209 #endif
210 #ifdef CONFIG_NUMA_BALANCING
211 		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
212 #endif
213 		atomic_read(&mm->tlb_flush_pending),
214 		mm->def_flags, &mm->def_flags
215 	);
216 }
217 
218 static bool page_init_poisoning __read_mostly = true;
219 
220 static int __init setup_vm_debug(char *str)
221 {
222 	bool __page_init_poisoning = true;
223 
224 	/*
225 	 * Calling vm_debug with no arguments is equivalent to requesting
226 	 * to enable all debugging options we can control.
227 	 */
228 	if (*str++ != '=' || !*str)
229 		goto out;
230 
231 	__page_init_poisoning = false;
232 	if (*str == '-')
233 		goto out;
234 
235 	while (*str) {
236 		switch (tolower(*str)) {
237 		case'p':
238 			__page_init_poisoning = true;
239 			break;
240 		default:
241 			pr_err("vm_debug option '%c' unknown. skipped\n",
242 			       *str);
243 		}
244 
245 		str++;
246 	}
247 out:
248 	if (page_init_poisoning && !__page_init_poisoning)
249 		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
250 
251 	page_init_poisoning = __page_init_poisoning;
252 
253 	return 1;
254 }
255 __setup("vm_debug", setup_vm_debug);
256 
257 void page_init_poison(struct page *page, size_t size)
258 {
259 	if (page_init_poisoning)
260 		memset(page, PAGE_POISON_PATTERN, size);
261 }
262 #endif		/* CONFIG_DEBUG_VM */
263