at v5.4-rc3 4.4 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* Rewritten by Rusty Russell, on the backs of many others... 3 Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM. 4 5*/ 6#include <linux/ftrace.h> 7#include <linux/memory.h> 8#include <linux/extable.h> 9#include <linux/module.h> 10#include <linux/mutex.h> 11#include <linux/init.h> 12#include <linux/kprobes.h> 13#include <linux/filter.h> 14 15#include <asm/sections.h> 16#include <linux/uaccess.h> 17 18/* 19 * mutex protecting text section modification (dynamic code patching). 20 * some users need to sleep (allocating memory...) while they hold this lock. 21 * 22 * Note: Also protects SMP-alternatives modification on x86. 23 * 24 * NOT exported to modules - patching kernel text is a really delicate matter. 25 */ 26DEFINE_MUTEX(text_mutex); 27 28extern struct exception_table_entry __start___ex_table[]; 29extern struct exception_table_entry __stop___ex_table[]; 30 31/* Cleared by build time tools if the table is already sorted. */ 32u32 __initdata __visible main_extable_sort_needed = 1; 33 34/* Sort the kernel's built-in exception table */ 35void __init sort_main_extable(void) 36{ 37 if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) { 38 pr_notice("Sorting __ex_table...\n"); 39 sort_extable(__start___ex_table, __stop___ex_table); 40 } 41} 42 43/* Given an address, look for it in the kernel exception table */ 44const 45struct exception_table_entry *search_kernel_exception_table(unsigned long addr) 46{ 47 return search_extable(__start___ex_table, 48 __stop___ex_table - __start___ex_table, addr); 49} 50 51/* Given an address, look for it in the exception tables. */ 52const struct exception_table_entry *search_exception_tables(unsigned long addr) 53{ 54 const struct exception_table_entry *e; 55 56 e = search_kernel_exception_table(addr); 57 if (!e) 58 e = search_module_extables(addr); 59 return e; 60} 61 62int init_kernel_text(unsigned long addr) 63{ 64 if (addr >= (unsigned long)_sinittext && 65 addr < (unsigned long)_einittext) 66 return 1; 67 return 0; 68} 69 70int notrace core_kernel_text(unsigned long addr) 71{ 72 if (addr >= (unsigned long)_stext && 73 addr < (unsigned long)_etext) 74 return 1; 75 76 if (system_state < SYSTEM_RUNNING && 77 init_kernel_text(addr)) 78 return 1; 79 return 0; 80} 81 82/** 83 * core_kernel_data - tell if addr points to kernel data 84 * @addr: address to test 85 * 86 * Returns true if @addr passed in is from the core kernel data 87 * section. 88 * 89 * Note: On some archs it may return true for core RODATA, and false 90 * for others. But will always be true for core RW data. 91 */ 92int core_kernel_data(unsigned long addr) 93{ 94 if (addr >= (unsigned long)_sdata && 95 addr < (unsigned long)_edata) 96 return 1; 97 return 0; 98} 99 100int __kernel_text_address(unsigned long addr) 101{ 102 if (kernel_text_address(addr)) 103 return 1; 104 /* 105 * There might be init symbols in saved stacktraces. 106 * Give those symbols a chance to be printed in 107 * backtraces (such as lockdep traces). 108 * 109 * Since we are after the module-symbols check, there's 110 * no danger of address overlap: 111 */ 112 if (init_kernel_text(addr)) 113 return 1; 114 return 0; 115} 116 117int kernel_text_address(unsigned long addr) 118{ 119 bool no_rcu; 120 int ret = 1; 121 122 if (core_kernel_text(addr)) 123 return 1; 124 125 /* 126 * If a stack dump happens while RCU is not watching, then 127 * RCU needs to be notified that it requires to start 128 * watching again. This can happen either by tracing that 129 * triggers a stack trace, or a WARN() that happens during 130 * coming back from idle, or cpu on or offlining. 131 * 132 * is_module_text_address() as well as the kprobe slots 133 * and is_bpf_text_address() require RCU to be watching. 134 */ 135 no_rcu = !rcu_is_watching(); 136 137 /* Treat this like an NMI as it can happen anywhere */ 138 if (no_rcu) 139 rcu_nmi_enter(); 140 141 if (is_module_text_address(addr)) 142 goto out; 143 if (is_ftrace_trampoline(addr)) 144 goto out; 145 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr)) 146 goto out; 147 if (is_bpf_text_address(addr)) 148 goto out; 149 ret = 0; 150out: 151 if (no_rcu) 152 rcu_nmi_exit(); 153 154 return ret; 155} 156 157/* 158 * On some architectures (PPC64, IA64) function pointers 159 * are actually only tokens to some data that then holds the 160 * real function address. As a result, to find if a function 161 * pointer is part of the kernel text, we need to do some 162 * special dereferencing first. 163 */ 164int func_ptr_is_kernel_text(void *ptr) 165{ 166 unsigned long addr; 167 addr = (unsigned long) dereference_function_descriptor(ptr); 168 if (core_kernel_text(addr)) 169 return 1; 170 return is_module_text_address(addr); 171}