at v6.3 6.9 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * mm/debug.c 4 * 5 * mm/ specific debug routines. 6 * 7 */ 8 9#include <linux/kernel.h> 10#include <linux/mm.h> 11#include <linux/trace_events.h> 12#include <linux/memcontrol.h> 13#include <trace/events/mmflags.h> 14#include <linux/migrate.h> 15#include <linux/page_owner.h> 16#include <linux/ctype.h> 17 18#include "internal.h" 19#include <trace/events/migrate.h> 20 21/* 22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can 23 * be used to populate migrate_reason_names[]. 24 */ 25#undef EM 26#undef EMe 27#define EM(a, b) b, 28#define EMe(a, b) b 29 30const char *migrate_reason_names[MR_TYPES] = { 31 MIGRATE_REASON 32}; 33 34const struct trace_print_flags pageflag_names[] = { 35 __def_pageflag_names, 36 {0, NULL} 37}; 38 39const struct trace_print_flags gfpflag_names[] = { 40 __def_gfpflag_names, 41 {0, NULL} 42}; 43 44const struct trace_print_flags vmaflag_names[] = { 45 __def_vmaflag_names, 46 {0, NULL} 47}; 48 49static void __dump_page(struct page *page) 50{ 51 struct folio *folio = page_folio(page); 52 struct page *head = &folio->page; 53 struct address_space *mapping; 54 bool compound = PageCompound(page); 55 /* 56 * Accessing the pageblock without the zone lock. It could change to 57 * "isolate" again in the meantime, but since we are just dumping the 58 * state for debugging, it should be fine to accept a bit of 59 * inaccuracy here due to racing. 60 */ 61 bool page_cma = is_migrate_cma_page(page); 62 int mapcount; 63 char *type = ""; 64 65 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) { 66 /* 67 * Corrupt page, so we cannot call page_mapping. Instead, do a 68 * safe subset of the steps that page_mapping() does. Caution: 69 * this will be misleading for tail pages, PageSwapCache pages, 70 * and potentially other situations. (See the page_mapping() 71 * implementation for what's missing here.) 72 */ 73 unsigned long tmp = (unsigned long)page->mapping; 74 75 if (tmp & PAGE_MAPPING_ANON) 76 mapping = NULL; 77 else 78 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS); 79 head = page; 80 folio = (struct folio *)page; 81 compound = false; 82 } else { 83 mapping = page_mapping(page); 84 } 85 86 /* 87 * Avoid VM_BUG_ON() in page_mapcount(). 88 * page->_mapcount space in struct page is used by sl[aou]b pages to 89 * encode own info. 90 */ 91 mapcount = PageSlab(head) ? 0 : page_mapcount(page); 92 93 pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", 94 page, page_ref_count(head), mapcount, mapping, 95 page_to_pgoff(page), page_to_pfn(page)); 96 if (compound) { 97 pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n", 98 head, compound_order(head), 99 folio_entire_mapcount(folio), 100 folio_nr_pages_mapped(folio), 101 atomic_read(&folio->_pincount)); 102 } 103 104#ifdef CONFIG_MEMCG 105 if (head->memcg_data) 106 pr_warn("memcg:%lx\n", head->memcg_data); 107#endif 108 if (PageKsm(page)) 109 type = "ksm "; 110 else if (PageAnon(page)) 111 type = "anon "; 112 else if (mapping) 113 dump_mapping(mapping); 114 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); 115 116 pr_warn("%sflags: %pGp%s\n", type, &head->flags, 117 page_cma ? " CMA" : ""); 118 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, 119 sizeof(unsigned long), page, 120 sizeof(struct page), false); 121 if (head != page) 122 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32, 123 sizeof(unsigned long), head, 124 sizeof(struct page), false); 125} 126 127void dump_page(struct page *page, const char *reason) 128{ 129 if (PagePoisoned(page)) 130 pr_warn("page:%p is uninitialized and poisoned", page); 131 else 132 __dump_page(page); 133 if (reason) 134 pr_warn("page dumped because: %s\n", reason); 135 dump_page_owner(page); 136} 137EXPORT_SYMBOL(dump_page); 138 139#ifdef CONFIG_DEBUG_VM 140 141void dump_vma(const struct vm_area_struct *vma) 142{ 143 pr_emerg("vma %px start %px end %px mm %px\n" 144 "prot %lx anon_vma %px vm_ops %px\n" 145 "pgoff %lx file %px private_data %px\n" 146 "flags: %#lx(%pGv)\n", 147 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm, 148 (unsigned long)pgprot_val(vma->vm_page_prot), 149 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, 150 vma->vm_file, vma->vm_private_data, 151 vma->vm_flags, &vma->vm_flags); 152} 153EXPORT_SYMBOL(dump_vma); 154 155void dump_mm(const struct mm_struct *mm) 156{ 157 pr_emerg("mm %px task_size %lu\n" 158#ifdef CONFIG_MMU 159 "get_unmapped_area %px\n" 160#endif 161 "mmap_base %lu mmap_legacy_base %lu\n" 162 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" 163 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" 164 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n" 165 "start_code %lx end_code %lx start_data %lx end_data %lx\n" 166 "start_brk %lx brk %lx start_stack %lx\n" 167 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" 168 "binfmt %px flags %lx\n" 169#ifdef CONFIG_AIO 170 "ioctx_table %px\n" 171#endif 172#ifdef CONFIG_MEMCG 173 "owner %px " 174#endif 175 "exe_file %px\n" 176#ifdef CONFIG_MMU_NOTIFIER 177 "notifier_subscriptions %px\n" 178#endif 179#ifdef CONFIG_NUMA_BALANCING 180 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" 181#endif 182 "tlb_flush_pending %d\n" 183 "def_flags: %#lx(%pGv)\n", 184 185 mm, mm->task_size, 186#ifdef CONFIG_MMU 187 mm->get_unmapped_area, 188#endif 189 mm->mmap_base, mm->mmap_legacy_base, 190 mm->pgd, atomic_read(&mm->mm_users), 191 atomic_read(&mm->mm_count), 192 mm_pgtables_bytes(mm), 193 mm->map_count, 194 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, 195 (u64)atomic64_read(&mm->pinned_vm), 196 mm->data_vm, mm->exec_vm, mm->stack_vm, 197 mm->start_code, mm->end_code, mm->start_data, mm->end_data, 198 mm->start_brk, mm->brk, mm->start_stack, 199 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, 200 mm->binfmt, mm->flags, 201#ifdef CONFIG_AIO 202 mm->ioctx_table, 203#endif 204#ifdef CONFIG_MEMCG 205 mm->owner, 206#endif 207 mm->exe_file, 208#ifdef CONFIG_MMU_NOTIFIER 209 mm->notifier_subscriptions, 210#endif 211#ifdef CONFIG_NUMA_BALANCING 212 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, 213#endif 214 atomic_read(&mm->tlb_flush_pending), 215 mm->def_flags, &mm->def_flags 216 ); 217} 218EXPORT_SYMBOL(dump_mm); 219 220static bool page_init_poisoning __read_mostly = true; 221 222static int __init setup_vm_debug(char *str) 223{ 224 bool __page_init_poisoning = true; 225 226 /* 227 * Calling vm_debug with no arguments is equivalent to requesting 228 * to enable all debugging options we can control. 229 */ 230 if (*str++ != '=' || !*str) 231 goto out; 232 233 __page_init_poisoning = false; 234 if (*str == '-') 235 goto out; 236 237 while (*str) { 238 switch (tolower(*str)) { 239 case'p': 240 __page_init_poisoning = true; 241 break; 242 default: 243 pr_err("vm_debug option '%c' unknown. skipped\n", 244 *str); 245 } 246 247 str++; 248 } 249out: 250 if (page_init_poisoning && !__page_init_poisoning) 251 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n"); 252 253 page_init_poisoning = __page_init_poisoning; 254 255 return 1; 256} 257__setup("vm_debug", setup_vm_debug); 258 259void page_init_poison(struct page *page, size_t size) 260{ 261 if (page_init_poisoning) 262 memset(page, PAGE_POISON_PATTERN, size); 263} 264#endif /* CONFIG_DEBUG_VM */