xref: /linux/mm/debug.c (revision 7441d349)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
231c9afa6SSasha Levin /*
331c9afa6SSasha Levin  * mm/debug.c
431c9afa6SSasha Levin  *
531c9afa6SSasha Levin  * mm/ specific debug routines.
631c9afa6SSasha Levin  *
731c9afa6SSasha Levin  */
831c9afa6SSasha Levin 
982742a3aSSasha Levin #include <linux/kernel.h>
1082742a3aSSasha Levin #include <linux/mm.h>
11af658dcaSSteven Rostedt (Red Hat) #include <linux/trace_events.h>
1282742a3aSSasha Levin #include <linux/memcontrol.h>
13420adbe9SVlastimil Babka #include <trace/events/mmflags.h>
147cd12b4aSVlastimil Babka #include <linux/migrate.h>
154e462112SVlastimil Babka #include <linux/page_owner.h>
16f682a97aSAlexander Duyck #include <linux/ctype.h>
1782742a3aSSasha Levin 
18edf14cdbSVlastimil Babka #include "internal.h"
198eb42beaSJohn Hubbard #include <trace/events/migrate.h>
208eb42beaSJohn Hubbard 
218eb42beaSJohn Hubbard /*
228eb42beaSJohn Hubbard  * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
238eb42beaSJohn Hubbard  * be used to populate migrate_reason_names[].
248eb42beaSJohn Hubbard  */
258eb42beaSJohn Hubbard #undef EM
268eb42beaSJohn Hubbard #undef EMe
278eb42beaSJohn Hubbard #define EM(a, b)	b,
288eb42beaSJohn Hubbard #define EMe(a, b)	b
29edf14cdbSVlastimil Babka 
309a2f45ffSAlexey Dobriyan const char *migrate_reason_names[MR_TYPES] = {
318eb42beaSJohn Hubbard 	MIGRATE_REASON
327cd12b4aSVlastimil Babka };
337cd12b4aSVlastimil Babka 
34edf14cdbSVlastimil Babka const struct trace_print_flags pageflag_names[] = {
35edf14cdbSVlastimil Babka 	__def_pageflag_names,
36edf14cdbSVlastimil Babka 	{0, NULL}
37420adbe9SVlastimil Babka };
38420adbe9SVlastimil Babka 
394c85c0beSHyeonggon Yoo const struct trace_print_flags pagetype_names[] = {
404c85c0beSHyeonggon Yoo 	__def_pagetype_names,
414c85c0beSHyeonggon Yoo 	{0, NULL}
424c85c0beSHyeonggon Yoo };
434c85c0beSHyeonggon Yoo 
44edf14cdbSVlastimil Babka const struct trace_print_flags gfpflag_names[] = {
45edf14cdbSVlastimil Babka 	__def_gfpflag_names,
46edf14cdbSVlastimil Babka 	{0, NULL}
47edf14cdbSVlastimil Babka };
48edf14cdbSVlastimil Babka 
49edf14cdbSVlastimil Babka const struct trace_print_flags vmaflag_names[] = {
50edf14cdbSVlastimil Babka 	__def_vmaflag_names,
51edf14cdbSVlastimil Babka 	{0, NULL}
5282742a3aSSasha Levin };
5382742a3aSSasha Levin 
__dump_folio(struct folio * folio,struct page * page,unsigned long pfn,unsigned long idx)54fae7d834SMatthew Wilcox (Oracle) static void __dump_folio(struct folio *folio, struct page *page,
55fae7d834SMatthew Wilcox (Oracle) 		unsigned long pfn, unsigned long idx)
5682742a3aSSasha Levin {
57fae7d834SMatthew Wilcox (Oracle) 	struct address_space *mapping = folio_mapping(folio);
58*7441d349SDavid Hildenbrand 	int mapcount = atomic_read(&page->_mapcount);
595b57b8f2SVlastimil Babka 	char *type = "";
60fc36def9SPavel Tatashin 
61*7441d349SDavid Hildenbrand 	mapcount = page_type_has_type(mapcount) ? 0 : mapcount + 1;
62fae7d834SMatthew Wilcox (Oracle) 	pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
63fae7d834SMatthew Wilcox (Oracle) 			folio_ref_count(folio), mapcount, mapping,
64fae7d834SMatthew Wilcox (Oracle) 			folio->index + idx, pfn);
65fae7d834SMatthew Wilcox (Oracle) 	if (folio_test_large(folio)) {
6605c5323bSDavid Hildenbrand 		pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
67fae7d834SMatthew Wilcox (Oracle) 				folio_order(folio),
6805c5323bSDavid Hildenbrand 				folio_mapcount(folio),
6991ec7f28SMatthew Wilcox (Oracle) 				folio_entire_mapcount(folio),
70eec20426SMatthew Wilcox (Oracle) 				folio_nr_pages_mapped(folio),
7194688e8eSMatthew Wilcox (Oracle) 				atomic_read(&folio->_pincount));
72452b557cSMatthew Wilcox (Oracle) 	}
7391f5345aSMatthew Wilcox (Oracle) 
7491f5345aSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG
75fae7d834SMatthew Wilcox (Oracle) 	if (folio->memcg_data)
76fae7d834SMatthew Wilcox (Oracle) 		pr_warn("memcg:%lx\n", folio->memcg_data);
7791f5345aSMatthew Wilcox (Oracle) #endif
78fae7d834SMatthew Wilcox (Oracle) 	if (folio_test_ksm(folio))
795b57b8f2SVlastimil Babka 		type = "ksm ";
80fae7d834SMatthew Wilcox (Oracle) 	else if (folio_test_anon(folio))
815b57b8f2SVlastimil Babka 		type = "anon ";
823e9d80a8SMatthew Wilcox (Oracle) 	else if (mapping)
833e9d80a8SMatthew Wilcox (Oracle) 		dump_mapping(mapping);
84edf14cdbSVlastimil Babka 	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
85ff8e8116SVlastimil Babka 
86fae7d834SMatthew Wilcox (Oracle) 	/*
87fae7d834SMatthew Wilcox (Oracle) 	 * Accessing the pageblock without the zone lock. It could change to
88fae7d834SMatthew Wilcox (Oracle) 	 * "isolate" again in the meantime, but since we are just dumping the
89fae7d834SMatthew Wilcox (Oracle) 	 * state for debugging, it should be fine to accept a bit of
90fae7d834SMatthew Wilcox (Oracle) 	 * inaccuracy here due to racing.
91fae7d834SMatthew Wilcox (Oracle) 	 */
92fae7d834SMatthew Wilcox (Oracle) 	pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
93fae7d834SMatthew Wilcox (Oracle) 		is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
948f790d0cSMatthew Wilcox (Oracle) 	if (page_has_type(&folio->page))
95fae7d834SMatthew Wilcox (Oracle) 		pr_warn("page_type: %pGt\n", &folio->page.page_type);
96f2421a16SHyeonggon Yoo 
97e0392cf7SMichal Hocko 	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
9846e8a3a0SVlastimil Babka 			sizeof(unsigned long), page,
9946e8a3a0SVlastimil Babka 			sizeof(struct page), false);
100fae7d834SMatthew Wilcox (Oracle) 	if (folio_test_large(folio))
1016197ab98SMatthew Wilcox (Oracle) 		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
102fae7d834SMatthew Wilcox (Oracle) 			sizeof(unsigned long), folio,
103fae7d834SMatthew Wilcox (Oracle) 			2 * sizeof(struct page), false);
104fae7d834SMatthew Wilcox (Oracle) }
105fae7d834SMatthew Wilcox (Oracle) 
__dump_page(const struct page * page)106fae7d834SMatthew Wilcox (Oracle) static void __dump_page(const struct page *page)
107fae7d834SMatthew Wilcox (Oracle) {
108fae7d834SMatthew Wilcox (Oracle) 	struct folio *foliop, folio;
109fae7d834SMatthew Wilcox (Oracle) 	struct page precise;
110fae7d834SMatthew Wilcox (Oracle) 	unsigned long pfn = page_to_pfn(page);
111fae7d834SMatthew Wilcox (Oracle) 	unsigned long idx, nr_pages = 1;
112fae7d834SMatthew Wilcox (Oracle) 	int loops = 5;
113fae7d834SMatthew Wilcox (Oracle) 
114fae7d834SMatthew Wilcox (Oracle) again:
115fae7d834SMatthew Wilcox (Oracle) 	memcpy(&precise, page, sizeof(*page));
116fae7d834SMatthew Wilcox (Oracle) 	foliop = page_folio(&precise);
117fae7d834SMatthew Wilcox (Oracle) 	if (foliop == (struct folio *)&precise) {
118fae7d834SMatthew Wilcox (Oracle) 		idx = 0;
119fae7d834SMatthew Wilcox (Oracle) 		if (!folio_test_large(foliop))
120fae7d834SMatthew Wilcox (Oracle) 			goto dump;
121fae7d834SMatthew Wilcox (Oracle) 		foliop = (struct folio *)page;
122fae7d834SMatthew Wilcox (Oracle) 	} else {
123fae7d834SMatthew Wilcox (Oracle) 		idx = folio_page_idx(foliop, page);
124fae7d834SMatthew Wilcox (Oracle) 	}
125fae7d834SMatthew Wilcox (Oracle) 
126fae7d834SMatthew Wilcox (Oracle) 	if (idx < MAX_FOLIO_NR_PAGES) {
127fae7d834SMatthew Wilcox (Oracle) 		memcpy(&folio, foliop, 2 * sizeof(struct page));
128fae7d834SMatthew Wilcox (Oracle) 		nr_pages = folio_nr_pages(&folio);
129fae7d834SMatthew Wilcox (Oracle) 		foliop = &folio;
130fae7d834SMatthew Wilcox (Oracle) 	}
131fae7d834SMatthew Wilcox (Oracle) 
132fae7d834SMatthew Wilcox (Oracle) 	if (idx > nr_pages) {
133fae7d834SMatthew Wilcox (Oracle) 		if (loops-- > 0)
134fae7d834SMatthew Wilcox (Oracle) 			goto again;
135fae7d834SMatthew Wilcox (Oracle) 		pr_warn("page does not match folio\n");
136fae7d834SMatthew Wilcox (Oracle) 		precise.compound_head &= ~1UL;
137fae7d834SMatthew Wilcox (Oracle) 		foliop = (struct folio *)&precise;
138fae7d834SMatthew Wilcox (Oracle) 		idx = 0;
139fae7d834SMatthew Wilcox (Oracle) 	}
140fae7d834SMatthew Wilcox (Oracle) 
141fae7d834SMatthew Wilcox (Oracle) dump:
142fae7d834SMatthew Wilcox (Oracle) 	__dump_folio(foliop, &precise, pfn, idx);
14382742a3aSSasha Levin }
14482742a3aSSasha Levin 
dump_page(const struct page * page,const char * reason)145b3a32033SMatthew Wilcox (Oracle) void dump_page(const struct page *page, const char *reason)
14682742a3aSSasha Levin {
147be7c701fSMatthew Wilcox (Oracle) 	if (PagePoisoned(page))
148be7c701fSMatthew Wilcox (Oracle) 		pr_warn("page:%p is uninitialized and poisoned", page);
149be7c701fSMatthew Wilcox (Oracle) 	else
150be7c701fSMatthew Wilcox (Oracle) 		__dump_page(page);
151be7c701fSMatthew Wilcox (Oracle) 	if (reason)
152be7c701fSMatthew Wilcox (Oracle) 		pr_warn("page dumped because: %s\n", reason);
1534e462112SVlastimil Babka 	dump_page_owner(page);
15482742a3aSSasha Levin }
15582742a3aSSasha Levin EXPORT_SYMBOL(dump_page);
15682742a3aSSasha Levin 
15782742a3aSSasha Levin #ifdef CONFIG_DEBUG_VM
15882742a3aSSasha Levin 
dump_vma(const struct vm_area_struct * vma)15982742a3aSSasha Levin void dump_vma(const struct vm_area_struct *vma)
16082742a3aSSasha Levin {
161763ecb03SLiam R. Howlett 	pr_emerg("vma %px start %px end %px mm %px\n"
162152a2d19SMatthew Wilcox 		"prot %lx anon_vma %px vm_ops %px\n"
163152a2d19SMatthew Wilcox 		"pgoff %lx file %px private_data %px\n"
164b8eceeb9SVlastimil Babka 		"flags: %#lx(%pGv)\n",
165763ecb03SLiam R. Howlett 		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
16682742a3aSSasha Levin 		(unsigned long)pgprot_val(vma->vm_page_prot),
16782742a3aSSasha Levin 		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
168b8eceeb9SVlastimil Babka 		vma->vm_file, vma->vm_private_data,
169b8eceeb9SVlastimil Babka 		vma->vm_flags, &vma->vm_flags);
17082742a3aSSasha Levin }
17182742a3aSSasha Levin EXPORT_SYMBOL(dump_vma);
17282742a3aSSasha Levin 
dump_mm(const struct mm_struct * mm)17331c9afa6SSasha Levin void dump_mm(const struct mm_struct *mm)
17431c9afa6SSasha Levin {
175763ecb03SLiam R. Howlett 	pr_emerg("mm %px task_size %lu\n"
176763ecb03SLiam R. Howlett 		"mmap_base %lu mmap_legacy_base %lu\n"
177152a2d19SMatthew Wilcox 		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
17831c9afa6SSasha Levin 		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
17970f8a3caSDavidlohr Bueso 		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
18031c9afa6SSasha Levin 		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
18131c9afa6SSasha Levin 		"start_brk %lx brk %lx start_stack %lx\n"
18231c9afa6SSasha Levin 		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
1830258b5fdSEric W. Biederman 		"binfmt %px flags %lx\n"
18431c9afa6SSasha Levin #ifdef CONFIG_AIO
185152a2d19SMatthew Wilcox 		"ioctx_table %px\n"
18631c9afa6SSasha Levin #endif
18731c9afa6SSasha Levin #ifdef CONFIG_MEMCG
188152a2d19SMatthew Wilcox 		"owner %px "
18931c9afa6SSasha Levin #endif
190152a2d19SMatthew Wilcox 		"exe_file %px\n"
19131c9afa6SSasha Levin #ifdef CONFIG_MMU_NOTIFIER
192984cfe4eSJason Gunthorpe 		"notifier_subscriptions %px\n"
19331c9afa6SSasha Levin #endif
19431c9afa6SSasha Levin #ifdef CONFIG_NUMA_BALANCING
19531c9afa6SSasha Levin 		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
19631c9afa6SSasha Levin #endif
19731c9afa6SSasha Levin 		"tlb_flush_pending %d\n"
198b8eceeb9SVlastimil Babka 		"def_flags: %#lx(%pGv)\n",
19931c9afa6SSasha Levin 
200763ecb03SLiam R. Howlett 		mm, mm->task_size,
201763ecb03SLiam R. Howlett 		mm->mmap_base, mm->mmap_legacy_base,
20231c9afa6SSasha Levin 		mm->pgd, atomic_read(&mm->mm_users),
20331c9afa6SSasha Levin 		atomic_read(&mm->mm_count),
204af5b0f6aSKirill A. Shutemov 		mm_pgtables_bytes(mm),
20531c9afa6SSasha Levin 		mm->map_count,
20631c9afa6SSasha Levin 		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
20744dc1b1fSQian Cai 		(u64)atomic64_read(&mm->pinned_vm),
20870f8a3caSDavidlohr Bueso 		mm->data_vm, mm->exec_vm, mm->stack_vm,
20931c9afa6SSasha Levin 		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
21031c9afa6SSasha Levin 		mm->start_brk, mm->brk, mm->start_stack,
21131c9afa6SSasha Levin 		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
2120258b5fdSEric W. Biederman 		mm->binfmt, mm->flags,
21331c9afa6SSasha Levin #ifdef CONFIG_AIO
21431c9afa6SSasha Levin 		mm->ioctx_table,
21531c9afa6SSasha Levin #endif
21631c9afa6SSasha Levin #ifdef CONFIG_MEMCG
21731c9afa6SSasha Levin 		mm->owner,
21831c9afa6SSasha Levin #endif
21931c9afa6SSasha Levin 		mm->exe_file,
22031c9afa6SSasha Levin #ifdef CONFIG_MMU_NOTIFIER
221984cfe4eSJason Gunthorpe 		mm->notifier_subscriptions,
22231c9afa6SSasha Levin #endif
22331c9afa6SSasha Levin #ifdef CONFIG_NUMA_BALANCING
22431c9afa6SSasha Levin 		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
22531c9afa6SSasha Levin #endif
22616af97dcSNadav Amit 		atomic_read(&mm->tlb_flush_pending),
227b8eceeb9SVlastimil Babka 		mm->def_flags, &mm->def_flags
22831c9afa6SSasha Levin 	);
22931c9afa6SSasha Levin }
230c2fdc235SSuren Baghdasaryan EXPORT_SYMBOL(dump_mm);
23131c9afa6SSasha Levin 
232f682a97aSAlexander Duyck static bool page_init_poisoning __read_mostly = true;
233f682a97aSAlexander Duyck 
setup_vm_debug(char * str)234f682a97aSAlexander Duyck static int __init setup_vm_debug(char *str)
235f682a97aSAlexander Duyck {
236f682a97aSAlexander Duyck 	bool __page_init_poisoning = true;
237f682a97aSAlexander Duyck 
238f682a97aSAlexander Duyck 	/*
239f682a97aSAlexander Duyck 	 * Calling vm_debug with no arguments is equivalent to requesting
240f682a97aSAlexander Duyck 	 * to enable all debugging options we can control.
241f682a97aSAlexander Duyck 	 */
242f682a97aSAlexander Duyck 	if (*str++ != '=' || !*str)
243f682a97aSAlexander Duyck 		goto out;
244f682a97aSAlexander Duyck 
245f682a97aSAlexander Duyck 	__page_init_poisoning = false;
246f682a97aSAlexander Duyck 	if (*str == '-')
247f682a97aSAlexander Duyck 		goto out;
248f682a97aSAlexander Duyck 
249f682a97aSAlexander Duyck 	while (*str) {
250f682a97aSAlexander Duyck 		switch (tolower(*str)) {
251f682a97aSAlexander Duyck 		case'p':
252f682a97aSAlexander Duyck 			__page_init_poisoning = true;
253f682a97aSAlexander Duyck 			break;
254f682a97aSAlexander Duyck 		default:
255f682a97aSAlexander Duyck 			pr_err("vm_debug option '%c' unknown. skipped\n",
256f682a97aSAlexander Duyck 			       *str);
257f682a97aSAlexander Duyck 		}
258f682a97aSAlexander Duyck 
259f682a97aSAlexander Duyck 		str++;
260f682a97aSAlexander Duyck 	}
261f682a97aSAlexander Duyck out:
262f682a97aSAlexander Duyck 	if (page_init_poisoning && !__page_init_poisoning)
263f682a97aSAlexander Duyck 		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
264f682a97aSAlexander Duyck 
265f682a97aSAlexander Duyck 	page_init_poisoning = __page_init_poisoning;
266f682a97aSAlexander Duyck 
267f682a97aSAlexander Duyck 	return 1;
268f682a97aSAlexander Duyck }
269f682a97aSAlexander Duyck __setup("vm_debug", setup_vm_debug);
270f682a97aSAlexander Duyck 
page_init_poison(struct page * page,size_t size)271f682a97aSAlexander Duyck void page_init_poison(struct page *page, size_t size)
272f682a97aSAlexander Duyck {
273f682a97aSAlexander Duyck 	if (page_init_poisoning)
274f682a97aSAlexander Duyck 		memset(page, PAGE_POISON_PATTERN, size);
275f682a97aSAlexander Duyck }
276b50e195fSLiam R. Howlett 
vma_iter_dump_tree(const struct vma_iterator * vmi)277b50e195fSLiam R. Howlett void vma_iter_dump_tree(const struct vma_iterator *vmi)
278b50e195fSLiam R. Howlett {
279b50e195fSLiam R. Howlett #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
280b50e195fSLiam R. Howlett 	mas_dump(&vmi->mas);
281b50e195fSLiam R. Howlett 	mt_dump(vmi->mas.tree, mt_dump_hex);
282b50e195fSLiam R. Howlett #endif	/* CONFIG_DEBUG_VM_MAPLE_TREE */
283b50e195fSLiam R. Howlett }
284b50e195fSLiam R. Howlett 
28582742a3aSSasha Levin #endif		/* CONFIG_DEBUG_VM */
286