at v5.13 6.0 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 Generic support for BUG() 4 5 This respects the following config options: 6 7 CONFIG_BUG - emit BUG traps. Nothing happens without this. 8 CONFIG_GENERIC_BUG - enable this code. 9 CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to 10 the containing struct bug_entry for bug_addr and file. 11 CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG 12 13 CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable 14 (though they're generally always on). 15 16 CONFIG_GENERIC_BUG is set by each architecture using this code. 17 18 To use this, your architecture must: 19 20 1. Set up the config options: 21 - Enable CONFIG_GENERIC_BUG if CONFIG_BUG 22 23 2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON) 24 - Define HAVE_ARCH_BUG 25 - Implement BUG() to generate a faulting instruction 26 - NOTE: struct bug_entry does not have "file" or "line" entries 27 when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate 28 the values accordingly. 29 30 3. Implement the trap 31 - In the illegal instruction trap handler (typically), verify 32 that the fault was in kernel mode, and call report_bug() 33 - report_bug() will return whether it was a false alarm, a warning, 34 or an actual bug. 35 - You must implement the is_valid_bugaddr(bugaddr) callback which 36 returns true if the eip is a real kernel address, and it points 37 to the expected BUG trap instruction. 38 39 Jeremy Fitzhardinge <jeremy@goop.org> 2006 40 */ 41 42#define pr_fmt(fmt) fmt 43 44#include <linux/list.h> 45#include <linux/module.h> 46#include <linux/kernel.h> 47#include <linux/bug.h> 48#include <linux/sched.h> 49#include <linux/rculist.h> 50#include <linux/ftrace.h> 51 52extern struct bug_entry __start___bug_table[], __stop___bug_table[]; 53 54static inline unsigned long bug_addr(const struct bug_entry *bug) 55{ 56#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS 57 return bug->bug_addr; 58#else 59 return (unsigned long)bug + bug->bug_addr_disp; 60#endif 61} 62 63#ifdef CONFIG_MODULES 64/* Updates are protected by module mutex */ 65static LIST_HEAD(module_bug_list); 66 67static struct bug_entry *module_find_bug(unsigned long bugaddr) 68{ 69 struct module *mod; 70 struct bug_entry *bug = NULL; 71 72 rcu_read_lock_sched(); 73 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { 74 unsigned i; 75 76 bug = mod->bug_table; 77 for (i = 0; i < mod->num_bugs; ++i, ++bug) 78 if (bugaddr == bug_addr(bug)) 79 goto out; 80 } 81 bug = NULL; 82out: 83 rcu_read_unlock_sched(); 84 85 return bug; 86} 87 88void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 89 struct module *mod) 90{ 91 char *secstrings; 92 unsigned int i; 93 94 mod->bug_table = NULL; 95 mod->num_bugs = 0; 96 97 /* Find the __bug_table section, if present */ 98 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 99 for (i = 1; i < hdr->e_shnum; i++) { 100 if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table")) 101 continue; 102 mod->bug_table = (void *) sechdrs[i].sh_addr; 103 mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry); 104 break; 105 } 106 107 /* 108 * Strictly speaking this should have a spinlock to protect against 109 * traversals, but since we only traverse on BUG()s, a spinlock 110 * could potentially lead to deadlock and thus be counter-productive. 111 * Thus, this uses RCU to safely manipulate the bug list, since BUG 112 * must run in non-interruptive state. 113 */ 114 list_add_rcu(&mod->bug_list, &module_bug_list); 115} 116 117void module_bug_cleanup(struct module *mod) 118{ 119 list_del_rcu(&mod->bug_list); 120} 121 122#else 123 124static inline struct bug_entry *module_find_bug(unsigned long bugaddr) 125{ 126 return NULL; 127} 128#endif 129 130void bug_get_file_line(struct bug_entry *bug, const char **file, 131 unsigned int *line) 132{ 133#ifdef CONFIG_DEBUG_BUGVERBOSE 134#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS 135 *file = bug->file; 136#else 137 *file = (const char *)bug + bug->file_disp; 138#endif 139 *line = bug->line; 140#else 141 *file = NULL; 142 *line = 0; 143#endif 144} 145 146struct bug_entry *find_bug(unsigned long bugaddr) 147{ 148 struct bug_entry *bug; 149 150 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) 151 if (bugaddr == bug_addr(bug)) 152 return bug; 153 154 return module_find_bug(bugaddr); 155} 156 157enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) 158{ 159 struct bug_entry *bug; 160 const char *file; 161 unsigned line, warning, once, done; 162 163 if (!is_valid_bugaddr(bugaddr)) 164 return BUG_TRAP_TYPE_NONE; 165 166 bug = find_bug(bugaddr); 167 if (!bug) 168 return BUG_TRAP_TYPE_NONE; 169 170 disable_trace_on_warning(); 171 172 bug_get_file_line(bug, &file, &line); 173 174 warning = (bug->flags & BUGFLAG_WARNING) != 0; 175 once = (bug->flags & BUGFLAG_ONCE) != 0; 176 done = (bug->flags & BUGFLAG_DONE) != 0; 177 178 if (warning && once) { 179 if (done) 180 return BUG_TRAP_TYPE_WARN; 181 182 /* 183 * Since this is the only store, concurrency is not an issue. 184 */ 185 bug->flags |= BUGFLAG_DONE; 186 } 187 188 /* 189 * BUG() and WARN_ON() families don't print a custom debug message 190 * before triggering the exception handler, so we must add the 191 * "cut here" line now. WARN() issues its own "cut here" before the 192 * extra debugging message it writes before triggering the handler. 193 */ 194 if ((bug->flags & BUGFLAG_NO_CUT_HERE) == 0) 195 printk(KERN_DEFAULT CUT_HERE); 196 197 if (warning) { 198 /* this is a WARN_ON rather than BUG/BUG_ON */ 199 __warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs, 200 NULL); 201 return BUG_TRAP_TYPE_WARN; 202 } 203 204 if (file) 205 pr_crit("kernel BUG at %s:%u!\n", file, line); 206 else 207 pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n", 208 (void *)bugaddr); 209 210 return BUG_TRAP_TYPE_BUG; 211} 212 213static void clear_once_table(struct bug_entry *start, struct bug_entry *end) 214{ 215 struct bug_entry *bug; 216 217 for (bug = start; bug < end; bug++) 218 bug->flags &= ~BUGFLAG_DONE; 219} 220 221void generic_bug_clear_once(void) 222{ 223#ifdef CONFIG_MODULES 224 struct module *mod; 225 226 rcu_read_lock_sched(); 227 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) 228 clear_once_table(mod->bug_table, 229 mod->bug_table + mod->num_bugs); 230 rcu_read_unlock_sched(); 231#endif 232 233 clear_once_table(__start___bug_table, __stop___bug_table); 234}