Merge tag 'locking_urgent_for_v5.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Borislav Petkov:

- Allow the compiler to optimize away unused percpu accesses and change
the local_lock_* macros back to inline functions

- A couple of fixes to static call insn patching

* tag 'locking_urgent_for_v5.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
Revert "mm/page_alloc: mark pagesets as __maybe_unused"
Revert "locking/local_lock: Make the empty local_lock_*() function a macro."
x86/percpu: Remove volatile from arch_raw_cpu_ptr().
static_call: Remove __DEFINE_STATIC_CALL macro
static_call: Properly initialise DEFINE_STATIC_CALL_RET0()
static_call: Don't make __static_call_return0 static
x86,static_call: Fix __static_call_return0 for i386

Changed files
+585 -572
arch
powerpc
include
x86
include
kernel
include
kernel
mm
+1
arch/powerpc/include/asm/static_call.h
··· 24 25 #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) __PPC_SCT(name, "b " #func) 26 #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) __PPC_SCT(name, "blr") 27 28 #endif /* _ASM_POWERPC_STATIC_CALL_H */
··· 24 25 #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) __PPC_SCT(name, "b " #func) 26 #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) __PPC_SCT(name, "blr") 27 + #define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) __PPC_SCT(name, "b .+20") 28 29 #endif /* _ASM_POWERPC_STATIC_CALL_H */
+3 -3
arch/x86/include/asm/percpu.h
··· 38 #define arch_raw_cpu_ptr(ptr) \ 39 ({ \ 40 unsigned long tcp_ptr__; \ 41 - asm volatile("add " __percpu_arg(1) ", %0" \ 42 - : "=r" (tcp_ptr__) \ 43 - : "m" (this_cpu_off), "0" (ptr)); \ 44 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ 45 }) 46 #else
··· 38 #define arch_raw_cpu_ptr(ptr) \ 39 ({ \ 40 unsigned long tcp_ptr__; \ 41 + asm ("add " __percpu_arg(1) ", %0" \ 42 + : "=r" (tcp_ptr__) \ 43 + : "m" (this_cpu_off), "0" (ptr)); \ 44 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ 45 }) 46 #else
+2
arch/x86/include/asm/static_call.h
··· 38 #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \ 39 __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop") 40 41 42 #define ARCH_ADD_TRAMP_KEY(name) \ 43 asm(".pushsection .static_call_tramp_key, \"a\" \n" \
··· 38 #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \ 39 __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop") 40 41 + #define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) \ 42 + ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0) 43 44 #define ARCH_ADD_TRAMP_KEY(name) \ 45 asm(".pushsection .static_call_tramp_key, \"a\" \n" \
+2 -3
arch/x86/kernel/static_call.c
··· 12 }; 13 14 /* 15 - * data16 data16 xorq %rax, %rax - a single 5 byte instruction that clears %rax 16 - * The REX.W cancels the effect of any data16. 17 */ 18 - static const u8 xor5rax[] = { 0x66, 0x66, 0x48, 0x31, 0xc0 }; 19 20 static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc }; 21
··· 12 }; 13 14 /* 15 + * cs cs cs xorl %eax, %eax - a single 5 byte instruction that clears %[er]ax 16 */ 17 + static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 }; 18 19 static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc }; 20
+3 -3
include/linux/local_lock_internal.h
··· 44 } 45 #else /* CONFIG_DEBUG_LOCK_ALLOC */ 46 # define LOCAL_LOCK_DEBUG_INIT(lockname) 47 - # define local_lock_acquire(__ll) do { typecheck(local_lock_t *, __ll); } while (0) 48 - # define local_lock_release(__ll) do { typecheck(local_lock_t *, __ll); } while (0) 49 - # define local_lock_debug_init(__ll) do { typecheck(local_lock_t *, __ll); } while (0) 50 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ 51 52 #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
··· 44 } 45 #else /* CONFIG_DEBUG_LOCK_ALLOC */ 46 # define LOCAL_LOCK_DEBUG_INIT(lockname) 47 + static inline void local_lock_acquire(local_lock_t *l) { } 48 + static inline void local_lock_release(local_lock_t *l) { } 49 + static inline void local_lock_debug_init(local_lock_t *l) { } 50 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ 51 52 #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
+28 -20
include/linux/static_call.h
··· 180 181 extern long __static_call_return0(void); 182 183 - #define __DEFINE_STATIC_CALL(name, _func, _func_init) \ 184 DECLARE_STATIC_CALL(name, _func); \ 185 struct static_call_key STATIC_CALL_KEY(name) = { \ 186 - .func = _func_init, \ 187 .type = 1, \ 188 }; \ 189 - ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init) 190 191 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 192 DECLARE_STATIC_CALL(name, _func); \ ··· 195 .type = 1, \ 196 }; \ 197 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) 198 199 #define static_call_cond(name) (void)__static_call(name) 200 ··· 225 226 static inline int static_call_init(void) { return 0; } 227 228 - #define __DEFINE_STATIC_CALL(name, _func, _func_init) \ 229 DECLARE_STATIC_CALL(name, _func); \ 230 struct static_call_key STATIC_CALL_KEY(name) = { \ 231 - .func = _func_init, \ 232 }; \ 233 - ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init) 234 235 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 236 DECLARE_STATIC_CALL(name, _func); \ ··· 239 }; \ 240 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) 241 242 243 #define static_call_cond(name) (void)__static_call(name) 244 ··· 262 return 0; 263 } 264 265 - static inline long __static_call_return0(void) 266 - { 267 - return 0; 268 - } 269 270 #define EXPORT_STATIC_CALL(name) \ 271 EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \ ··· 292 .func = _func_init, \ 293 } 294 295 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 296 - DECLARE_STATIC_CALL(name, _func); \ 297 - struct static_call_key STATIC_CALL_KEY(name) = { \ 298 - .func = NULL, \ 299 - } 300 301 static inline void __static_call_nop(void) { } 302 ··· 340 #define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)) 341 342 #endif /* CONFIG_HAVE_STATIC_CALL */ 343 - 344 - #define DEFINE_STATIC_CALL(name, _func) \ 345 - __DEFINE_STATIC_CALL(name, _func, _func) 346 - 347 - #define DEFINE_STATIC_CALL_RET0(name, _func) \ 348 - __DEFINE_STATIC_CALL(name, _func, __static_call_return0) 349 350 #endif /* _LINUX_STATIC_CALL_H */
··· 180 181 extern long __static_call_return0(void); 182 183 + #define DEFINE_STATIC_CALL(name, _func) \ 184 DECLARE_STATIC_CALL(name, _func); \ 185 struct static_call_key STATIC_CALL_KEY(name) = { \ 186 + .func = _func, \ 187 .type = 1, \ 188 }; \ 189 + ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func) 190 191 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 192 DECLARE_STATIC_CALL(name, _func); \ ··· 195 .type = 1, \ 196 }; \ 197 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) 198 + 199 + #define DEFINE_STATIC_CALL_RET0(name, _func) \ 200 + DECLARE_STATIC_CALL(name, _func); \ 201 + struct static_call_key STATIC_CALL_KEY(name) = { \ 202 + .func = __static_call_return0, \ 203 + .type = 1, \ 204 + }; \ 205 + ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) 206 207 #define static_call_cond(name) (void)__static_call(name) 208 ··· 217 218 static inline int static_call_init(void) { return 0; } 219 220 + #define DEFINE_STATIC_CALL(name, _func) \ 221 DECLARE_STATIC_CALL(name, _func); \ 222 struct static_call_key STATIC_CALL_KEY(name) = { \ 223 + .func = _func, \ 224 }; \ 225 + ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func) 226 227 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 228 DECLARE_STATIC_CALL(name, _func); \ ··· 231 }; \ 232 ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) 233 234 + #define DEFINE_STATIC_CALL_RET0(name, _func) \ 235 + DECLARE_STATIC_CALL(name, _func); \ 236 + struct static_call_key STATIC_CALL_KEY(name) = { \ 237 + .func = __static_call_return0, \ 238 + }; \ 239 + ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) 240 241 #define static_call_cond(name) (void)__static_call(name) 242 ··· 248 return 0; 249 } 250 251 + extern long __static_call_return0(void); 252 253 #define EXPORT_STATIC_CALL(name) \ 254 EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \ ··· 281 .func = _func_init, \ 282 } 283 284 + #define DEFINE_STATIC_CALL(name, _func) \ 285 + __DEFINE_STATIC_CALL(name, _func, _func) 286 + 287 #define DEFINE_STATIC_CALL_NULL(name, _func) \ 288 + __DEFINE_STATIC_CALL(name, _func, NULL) 289 + 290 + #define DEFINE_STATIC_CALL_RET0(name, _func) \ 291 + __DEFINE_STATIC_CALL(name, _func, __static_call_return0) 292 293 static inline void __static_call_nop(void) { } 294 ··· 326 #define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)) 327 328 #endif /* CONFIG_HAVE_STATIC_CALL */ 329 330 #endif /* _LINUX_STATIC_CALL_H */
+2 -1
kernel/Makefile
··· 114 obj-$(CONFIG_BPF) += bpf/ 115 obj-$(CONFIG_KCSAN) += kcsan/ 116 obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o 117 - obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call.o 118 obj-$(CONFIG_CFI_CLANG) += cfi.o 119 120 obj-$(CONFIG_PERF_EVENTS) += events/
··· 114 obj-$(CONFIG_BPF) += bpf/ 115 obj-$(CONFIG_KCSAN) += kcsan/ 116 obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o 117 + obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o 118 + obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o 119 obj-$(CONFIG_CFI_CLANG) += cfi.o 120 121 obj-$(CONFIG_PERF_EVENTS) += events/
-541
kernel/static_call.c
··· 1 // SPDX-License-Identifier: GPL-2.0 2 - #include <linux/init.h> 3 #include <linux/static_call.h> 4 - #include <linux/bug.h> 5 - #include <linux/smp.h> 6 - #include <linux/sort.h> 7 - #include <linux/slab.h> 8 - #include <linux/module.h> 9 - #include <linux/cpu.h> 10 - #include <linux/processor.h> 11 - #include <asm/sections.h> 12 - 13 - extern struct static_call_site __start_static_call_sites[], 14 - __stop_static_call_sites[]; 15 - extern struct static_call_tramp_key __start_static_call_tramp_key[], 16 - __stop_static_call_tramp_key[]; 17 - 18 - static bool static_call_initialized; 19 - 20 - /* mutex to protect key modules/sites */ 21 - static DEFINE_MUTEX(static_call_mutex); 22 - 23 - static void static_call_lock(void) 24 - { 25 - mutex_lock(&static_call_mutex); 26 - } 27 - 28 - static void static_call_unlock(void) 29 - { 30 - mutex_unlock(&static_call_mutex); 31 - } 32 - 33 - static inline void *static_call_addr(struct static_call_site *site) 34 - { 35 - return (void *)((long)site->addr + (long)&site->addr); 36 - } 37 - 38 - static inline unsigned long __static_call_key(const struct static_call_site *site) 39 - { 40 - return (long)site->key + (long)&site->key; 41 - } 42 - 43 - static inline struct static_call_key *static_call_key(const struct static_call_site *site) 44 - { 45 - return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS); 46 - } 47 - 48 - /* These assume the key is word-aligned. */ 49 - static inline bool static_call_is_init(struct static_call_site *site) 50 - { 51 - return __static_call_key(site) & STATIC_CALL_SITE_INIT; 52 - } 53 - 54 - static inline bool static_call_is_tail(struct static_call_site *site) 55 - { 56 - return __static_call_key(site) & STATIC_CALL_SITE_TAIL; 57 - } 58 - 59 - static inline void static_call_set_init(struct static_call_site *site) 60 - { 61 - site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) - 62 - (long)&site->key; 63 - } 64 - 65 - static int static_call_site_cmp(const void *_a, const void *_b) 66 - { 67 - const struct static_call_site *a = _a; 68 - const struct static_call_site *b = _b; 69 - const struct static_call_key *key_a = static_call_key(a); 70 - const struct static_call_key *key_b = static_call_key(b); 71 - 72 - if (key_a < key_b) 73 - return -1; 74 - 75 - if (key_a > key_b) 76 - return 1; 77 - 78 - return 0; 79 - } 80 - 81 - static void static_call_site_swap(void *_a, void *_b, int size) 82 - { 83 - long delta = (unsigned long)_a - (unsigned long)_b; 84 - struct static_call_site *a = _a; 85 - struct static_call_site *b = _b; 86 - struct static_call_site tmp = *a; 87 - 88 - a->addr = b->addr - delta; 89 - a->key = b->key - delta; 90 - 91 - b->addr = tmp.addr + delta; 92 - b->key = tmp.key + delta; 93 - } 94 - 95 - static inline void static_call_sort_entries(struct static_call_site *start, 96 - struct static_call_site *stop) 97 - { 98 - sort(start, stop - start, sizeof(struct static_call_site), 99 - static_call_site_cmp, static_call_site_swap); 100 - } 101 - 102 - static inline bool static_call_key_has_mods(struct static_call_key *key) 103 - { 104 - return !(key->type & 1); 105 - } 106 - 107 - static inline struct static_call_mod *static_call_key_next(struct static_call_key *key) 108 - { 109 - if (!static_call_key_has_mods(key)) 110 - return NULL; 111 - 112 - return key->mods; 113 - } 114 - 115 - static inline struct static_call_site *static_call_key_sites(struct static_call_key *key) 116 - { 117 - if (static_call_key_has_mods(key)) 118 - return NULL; 119 - 120 - return (struct static_call_site *)(key->type & ~1); 121 - } 122 - 123 - void __static_call_update(struct static_call_key *key, void *tramp, void *func) 124 - { 125 - struct static_call_site *site, *stop; 126 - struct static_call_mod *site_mod, first; 127 - 128 - cpus_read_lock(); 129 - static_call_lock(); 130 - 131 - if (key->func == func) 132 - goto done; 133 - 134 - key->func = func; 135 - 136 - arch_static_call_transform(NULL, tramp, func, false); 137 - 138 - /* 139 - * If uninitialized, we'll not update the callsites, but they still 140 - * point to the trampoline and we just patched that. 141 - */ 142 - if (WARN_ON_ONCE(!static_call_initialized)) 143 - goto done; 144 - 145 - first = (struct static_call_mod){ 146 - .next = static_call_key_next(key), 147 - .mod = NULL, 148 - .sites = static_call_key_sites(key), 149 - }; 150 - 151 - for (site_mod = &first; site_mod; site_mod = site_mod->next) { 152 - bool init = system_state < SYSTEM_RUNNING; 153 - struct module *mod = site_mod->mod; 154 - 155 - if (!site_mod->sites) { 156 - /* 157 - * This can happen if the static call key is defined in 158 - * a module which doesn't use it. 159 - * 160 - * It also happens in the has_mods case, where the 161 - * 'first' entry has no sites associated with it. 162 - */ 163 - continue; 164 - } 165 - 166 - stop = __stop_static_call_sites; 167 - 168 - if (mod) { 169 - #ifdef CONFIG_MODULES 170 - stop = mod->static_call_sites + 171 - mod->num_static_call_sites; 172 - init = mod->state == MODULE_STATE_COMING; 173 - #endif 174 - } 175 - 176 - for (site = site_mod->sites; 177 - site < stop && static_call_key(site) == key; site++) { 178 - void *site_addr = static_call_addr(site); 179 - 180 - if (!init && static_call_is_init(site)) 181 - continue; 182 - 183 - if (!kernel_text_address((unsigned long)site_addr)) { 184 - /* 185 - * This skips patching built-in __exit, which 186 - * is part of init_section_contains() but is 187 - * not part of kernel_text_address(). 188 - * 189 - * Skipping built-in __exit is fine since it 190 - * will never be executed. 191 - */ 192 - WARN_ONCE(!static_call_is_init(site), 193 - "can't patch static call site at %pS", 194 - site_addr); 195 - continue; 196 - } 197 - 198 - arch_static_call_transform(site_addr, NULL, func, 199 - static_call_is_tail(site)); 200 - } 201 - } 202 - 203 - done: 204 - static_call_unlock(); 205 - cpus_read_unlock(); 206 - } 207 - EXPORT_SYMBOL_GPL(__static_call_update); 208 - 209 - static int __static_call_init(struct module *mod, 210 - struct static_call_site *start, 211 - struct static_call_site *stop) 212 - { 213 - struct static_call_site *site; 214 - struct static_call_key *key, *prev_key = NULL; 215 - struct static_call_mod *site_mod; 216 - 217 - if (start == stop) 218 - return 0; 219 - 220 - static_call_sort_entries(start, stop); 221 - 222 - for (site = start; site < stop; site++) { 223 - void *site_addr = static_call_addr(site); 224 - 225 - if ((mod && within_module_init((unsigned long)site_addr, mod)) || 226 - (!mod && init_section_contains(site_addr, 1))) 227 - static_call_set_init(site); 228 - 229 - key = static_call_key(site); 230 - if (key != prev_key) { 231 - prev_key = key; 232 - 233 - /* 234 - * For vmlinux (!mod) avoid the allocation by storing 235 - * the sites pointer in the key itself. Also see 236 - * __static_call_update()'s @first. 237 - * 238 - * This allows architectures (eg. x86) to call 239 - * static_call_init() before memory allocation works. 240 - */ 241 - if (!mod) { 242 - key->sites = site; 243 - key->type |= 1; 244 - goto do_transform; 245 - } 246 - 247 - site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); 248 - if (!site_mod) 249 - return -ENOMEM; 250 - 251 - /* 252 - * When the key has a direct sites pointer, extract 253 - * that into an explicit struct static_call_mod, so we 254 - * can have a list of modules. 255 - */ 256 - if (static_call_key_sites(key)) { 257 - site_mod->mod = NULL; 258 - site_mod->next = NULL; 259 - site_mod->sites = static_call_key_sites(key); 260 - 261 - key->mods = site_mod; 262 - 263 - site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); 264 - if (!site_mod) 265 - return -ENOMEM; 266 - } 267 - 268 - site_mod->mod = mod; 269 - site_mod->sites = site; 270 - site_mod->next = static_call_key_next(key); 271 - key->mods = site_mod; 272 - } 273 - 274 - do_transform: 275 - arch_static_call_transform(site_addr, NULL, key->func, 276 - static_call_is_tail(site)); 277 - } 278 - 279 - return 0; 280 - } 281 - 282 - static int addr_conflict(struct static_call_site *site, void *start, void *end) 283 - { 284 - unsigned long addr = (unsigned long)static_call_addr(site); 285 - 286 - if (addr <= (unsigned long)end && 287 - addr + CALL_INSN_SIZE > (unsigned long)start) 288 - return 1; 289 - 290 - return 0; 291 - } 292 - 293 - static int __static_call_text_reserved(struct static_call_site *iter_start, 294 - struct static_call_site *iter_stop, 295 - void *start, void *end, bool init) 296 - { 297 - struct static_call_site *iter = iter_start; 298 - 299 - while (iter < iter_stop) { 300 - if (init || !static_call_is_init(iter)) { 301 - if (addr_conflict(iter, start, end)) 302 - return 1; 303 - } 304 - iter++; 305 - } 306 - 307 - return 0; 308 - } 309 - 310 - #ifdef CONFIG_MODULES 311 - 312 - static int __static_call_mod_text_reserved(void *start, void *end) 313 - { 314 - struct module *mod; 315 - int ret; 316 - 317 - preempt_disable(); 318 - mod = __module_text_address((unsigned long)start); 319 - WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); 320 - if (!try_module_get(mod)) 321 - mod = NULL; 322 - preempt_enable(); 323 - 324 - if (!mod) 325 - return 0; 326 - 327 - ret = __static_call_text_reserved(mod->static_call_sites, 328 - mod->static_call_sites + mod->num_static_call_sites, 329 - start, end, mod->state == MODULE_STATE_COMING); 330 - 331 - module_put(mod); 332 - 333 - return ret; 334 - } 335 - 336 - static unsigned long tramp_key_lookup(unsigned long addr) 337 - { 338 - struct static_call_tramp_key *start = __start_static_call_tramp_key; 339 - struct static_call_tramp_key *stop = __stop_static_call_tramp_key; 340 - struct static_call_tramp_key *tramp_key; 341 - 342 - for (tramp_key = start; tramp_key != stop; tramp_key++) { 343 - unsigned long tramp; 344 - 345 - tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp; 346 - if (tramp == addr) 347 - return (long)tramp_key->key + (long)&tramp_key->key; 348 - } 349 - 350 - return 0; 351 - } 352 - 353 - static int static_call_add_module(struct module *mod) 354 - { 355 - struct static_call_site *start = mod->static_call_sites; 356 - struct static_call_site *stop = start + mod->num_static_call_sites; 357 - struct static_call_site *site; 358 - 359 - for (site = start; site != stop; site++) { 360 - unsigned long s_key = __static_call_key(site); 361 - unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS; 362 - unsigned long key; 363 - 364 - /* 365 - * Is the key is exported, 'addr' points to the key, which 366 - * means modules are allowed to call static_call_update() on 367 - * it. 368 - * 369 - * Otherwise, the key isn't exported, and 'addr' points to the 370 - * trampoline so we need to lookup the key. 371 - * 372 - * We go through this dance to prevent crazy modules from 373 - * abusing sensitive static calls. 374 - */ 375 - if (!kernel_text_address(addr)) 376 - continue; 377 - 378 - key = tramp_key_lookup(addr); 379 - if (!key) { 380 - pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n", 381 - static_call_addr(site)); 382 - return -EINVAL; 383 - } 384 - 385 - key |= s_key & STATIC_CALL_SITE_FLAGS; 386 - site->key = key - (long)&site->key; 387 - } 388 - 389 - return __static_call_init(mod, start, stop); 390 - } 391 - 392 - static void static_call_del_module(struct module *mod) 393 - { 394 - struct static_call_site *start = mod->static_call_sites; 395 - struct static_call_site *stop = mod->static_call_sites + 396 - mod->num_static_call_sites; 397 - struct static_call_key *key, *prev_key = NULL; 398 - struct static_call_mod *site_mod, **prev; 399 - struct static_call_site *site; 400 - 401 - for (site = start; site < stop; site++) { 402 - key = static_call_key(site); 403 - if (key == prev_key) 404 - continue; 405 - 406 - prev_key = key; 407 - 408 - for (prev = &key->mods, site_mod = key->mods; 409 - site_mod && site_mod->mod != mod; 410 - prev = &site_mod->next, site_mod = site_mod->next) 411 - ; 412 - 413 - if (!site_mod) 414 - continue; 415 - 416 - *prev = site_mod->next; 417 - kfree(site_mod); 418 - } 419 - } 420 - 421 - static int static_call_module_notify(struct notifier_block *nb, 422 - unsigned long val, void *data) 423 - { 424 - struct module *mod = data; 425 - int ret = 0; 426 - 427 - cpus_read_lock(); 428 - static_call_lock(); 429 - 430 - switch (val) { 431 - case MODULE_STATE_COMING: 432 - ret = static_call_add_module(mod); 433 - if (ret) { 434 - WARN(1, "Failed to allocate memory for static calls"); 435 - static_call_del_module(mod); 436 - } 437 - break; 438 - case MODULE_STATE_GOING: 439 - static_call_del_module(mod); 440 - break; 441 - } 442 - 443 - static_call_unlock(); 444 - cpus_read_unlock(); 445 - 446 - return notifier_from_errno(ret); 447 - } 448 - 449 - static struct notifier_block static_call_module_nb = { 450 - .notifier_call = static_call_module_notify, 451 - }; 452 - 453 - #else 454 - 455 - static inline int __static_call_mod_text_reserved(void *start, void *end) 456 - { 457 - return 0; 458 - } 459 - 460 - #endif /* CONFIG_MODULES */ 461 - 462 - int static_call_text_reserved(void *start, void *end) 463 - { 464 - bool init = system_state < SYSTEM_RUNNING; 465 - int ret = __static_call_text_reserved(__start_static_call_sites, 466 - __stop_static_call_sites, start, end, init); 467 - 468 - if (ret) 469 - return ret; 470 - 471 - return __static_call_mod_text_reserved(start, end); 472 - } 473 - 474 - int __init static_call_init(void) 475 - { 476 - int ret; 477 - 478 - if (static_call_initialized) 479 - return 0; 480 - 481 - cpus_read_lock(); 482 - static_call_lock(); 483 - ret = __static_call_init(NULL, __start_static_call_sites, 484 - __stop_static_call_sites); 485 - static_call_unlock(); 486 - cpus_read_unlock(); 487 - 488 - if (ret) { 489 - pr_err("Failed to allocate memory for static_call!\n"); 490 - BUG(); 491 - } 492 - 493 - static_call_initialized = true; 494 - 495 - #ifdef CONFIG_MODULES 496 - register_module_notifier(&static_call_module_nb); 497 - #endif 498 - return 0; 499 - } 500 - early_initcall(static_call_init); 501 502 long __static_call_return0(void) 503 { 504 return 0; 505 } 506 EXPORT_SYMBOL_GPL(__static_call_return0); 507 - 508 - #ifdef CONFIG_STATIC_CALL_SELFTEST 509 - 510 - static int func_a(int x) 511 - { 512 - return x+1; 513 - } 514 - 515 - static int func_b(int x) 516 - { 517 - return x+2; 518 - } 519 - 520 - DEFINE_STATIC_CALL(sc_selftest, func_a); 521 - 522 - static struct static_call_data { 523 - int (*func)(int); 524 - int val; 525 - int expect; 526 - } static_call_data [] __initdata = { 527 - { NULL, 2, 3 }, 528 - { func_b, 2, 4 }, 529 - { func_a, 2, 3 } 530 - }; 531 - 532 - static int __init test_static_call_init(void) 533 - { 534 - int i; 535 - 536 - for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) { 537 - struct static_call_data *scd = &static_call_data[i]; 538 - 539 - if (scd->func) 540 - static_call_update(sc_selftest, scd->func); 541 - 542 - WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect); 543 - } 544 - 545 - return 0; 546 - } 547 - early_initcall(test_static_call_init); 548 - 549 - #endif /* CONFIG_STATIC_CALL_SELFTEST */
··· 1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/static_call.h> 3 4 long __static_call_return0(void) 5 { 6 return 0; 7 } 8 EXPORT_SYMBOL_GPL(__static_call_return0);
+543
kernel/static_call_inline.c
···
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/init.h> 3 + #include <linux/static_call.h> 4 + #include <linux/bug.h> 5 + #include <linux/smp.h> 6 + #include <linux/sort.h> 7 + #include <linux/slab.h> 8 + #include <linux/module.h> 9 + #include <linux/cpu.h> 10 + #include <linux/processor.h> 11 + #include <asm/sections.h> 12 + 13 + extern struct static_call_site __start_static_call_sites[], 14 + __stop_static_call_sites[]; 15 + extern struct static_call_tramp_key __start_static_call_tramp_key[], 16 + __stop_static_call_tramp_key[]; 17 + 18 + static bool static_call_initialized; 19 + 20 + /* mutex to protect key modules/sites */ 21 + static DEFINE_MUTEX(static_call_mutex); 22 + 23 + static void static_call_lock(void) 24 + { 25 + mutex_lock(&static_call_mutex); 26 + } 27 + 28 + static void static_call_unlock(void) 29 + { 30 + mutex_unlock(&static_call_mutex); 31 + } 32 + 33 + static inline void *static_call_addr(struct static_call_site *site) 34 + { 35 + return (void *)((long)site->addr + (long)&site->addr); 36 + } 37 + 38 + static inline unsigned long __static_call_key(const struct static_call_site *site) 39 + { 40 + return (long)site->key + (long)&site->key; 41 + } 42 + 43 + static inline struct static_call_key *static_call_key(const struct static_call_site *site) 44 + { 45 + return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS); 46 + } 47 + 48 + /* These assume the key is word-aligned. */ 49 + static inline bool static_call_is_init(struct static_call_site *site) 50 + { 51 + return __static_call_key(site) & STATIC_CALL_SITE_INIT; 52 + } 53 + 54 + static inline bool static_call_is_tail(struct static_call_site *site) 55 + { 56 + return __static_call_key(site) & STATIC_CALL_SITE_TAIL; 57 + } 58 + 59 + static inline void static_call_set_init(struct static_call_site *site) 60 + { 61 + site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) - 62 + (long)&site->key; 63 + } 64 + 65 + static int static_call_site_cmp(const void *_a, const void *_b) 66 + { 67 + const struct static_call_site *a = _a; 68 + const struct static_call_site *b = _b; 69 + const struct static_call_key *key_a = static_call_key(a); 70 + const struct static_call_key *key_b = static_call_key(b); 71 + 72 + if (key_a < key_b) 73 + return -1; 74 + 75 + if (key_a > key_b) 76 + return 1; 77 + 78 + return 0; 79 + } 80 + 81 + static void static_call_site_swap(void *_a, void *_b, int size) 82 + { 83 + long delta = (unsigned long)_a - (unsigned long)_b; 84 + struct static_call_site *a = _a; 85 + struct static_call_site *b = _b; 86 + struct static_call_site tmp = *a; 87 + 88 + a->addr = b->addr - delta; 89 + a->key = b->key - delta; 90 + 91 + b->addr = tmp.addr + delta; 92 + b->key = tmp.key + delta; 93 + } 94 + 95 + static inline void static_call_sort_entries(struct static_call_site *start, 96 + struct static_call_site *stop) 97 + { 98 + sort(start, stop - start, sizeof(struct static_call_site), 99 + static_call_site_cmp, static_call_site_swap); 100 + } 101 + 102 + static inline bool static_call_key_has_mods(struct static_call_key *key) 103 + { 104 + return !(key->type & 1); 105 + } 106 + 107 + static inline struct static_call_mod *static_call_key_next(struct static_call_key *key) 108 + { 109 + if (!static_call_key_has_mods(key)) 110 + return NULL; 111 + 112 + return key->mods; 113 + } 114 + 115 + static inline struct static_call_site *static_call_key_sites(struct static_call_key *key) 116 + { 117 + if (static_call_key_has_mods(key)) 118 + return NULL; 119 + 120 + return (struct static_call_site *)(key->type & ~1); 121 + } 122 + 123 + void __static_call_update(struct static_call_key *key, void *tramp, void *func) 124 + { 125 + struct static_call_site *site, *stop; 126 + struct static_call_mod *site_mod, first; 127 + 128 + cpus_read_lock(); 129 + static_call_lock(); 130 + 131 + if (key->func == func) 132 + goto done; 133 + 134 + key->func = func; 135 + 136 + arch_static_call_transform(NULL, tramp, func, false); 137 + 138 + /* 139 + * If uninitialized, we'll not update the callsites, but they still 140 + * point to the trampoline and we just patched that. 141 + */ 142 + if (WARN_ON_ONCE(!static_call_initialized)) 143 + goto done; 144 + 145 + first = (struct static_call_mod){ 146 + .next = static_call_key_next(key), 147 + .mod = NULL, 148 + .sites = static_call_key_sites(key), 149 + }; 150 + 151 + for (site_mod = &first; site_mod; site_mod = site_mod->next) { 152 + bool init = system_state < SYSTEM_RUNNING; 153 + struct module *mod = site_mod->mod; 154 + 155 + if (!site_mod->sites) { 156 + /* 157 + * This can happen if the static call key is defined in 158 + * a module which doesn't use it. 159 + * 160 + * It also happens in the has_mods case, where the 161 + * 'first' entry has no sites associated with it. 162 + */ 163 + continue; 164 + } 165 + 166 + stop = __stop_static_call_sites; 167 + 168 + if (mod) { 169 + #ifdef CONFIG_MODULES 170 + stop = mod->static_call_sites + 171 + mod->num_static_call_sites; 172 + init = mod->state == MODULE_STATE_COMING; 173 + #endif 174 + } 175 + 176 + for (site = site_mod->sites; 177 + site < stop && static_call_key(site) == key; site++) { 178 + void *site_addr = static_call_addr(site); 179 + 180 + if (!init && static_call_is_init(site)) 181 + continue; 182 + 183 + if (!kernel_text_address((unsigned long)site_addr)) { 184 + /* 185 + * This skips patching built-in __exit, which 186 + * is part of init_section_contains() but is 187 + * not part of kernel_text_address(). 188 + * 189 + * Skipping built-in __exit is fine since it 190 + * will never be executed. 191 + */ 192 + WARN_ONCE(!static_call_is_init(site), 193 + "can't patch static call site at %pS", 194 + site_addr); 195 + continue; 196 + } 197 + 198 + arch_static_call_transform(site_addr, NULL, func, 199 + static_call_is_tail(site)); 200 + } 201 + } 202 + 203 + done: 204 + static_call_unlock(); 205 + cpus_read_unlock(); 206 + } 207 + EXPORT_SYMBOL_GPL(__static_call_update); 208 + 209 + static int __static_call_init(struct module *mod, 210 + struct static_call_site *start, 211 + struct static_call_site *stop) 212 + { 213 + struct static_call_site *site; 214 + struct static_call_key *key, *prev_key = NULL; 215 + struct static_call_mod *site_mod; 216 + 217 + if (start == stop) 218 + return 0; 219 + 220 + static_call_sort_entries(start, stop); 221 + 222 + for (site = start; site < stop; site++) { 223 + void *site_addr = static_call_addr(site); 224 + 225 + if ((mod && within_module_init((unsigned long)site_addr, mod)) || 226 + (!mod && init_section_contains(site_addr, 1))) 227 + static_call_set_init(site); 228 + 229 + key = static_call_key(site); 230 + if (key != prev_key) { 231 + prev_key = key; 232 + 233 + /* 234 + * For vmlinux (!mod) avoid the allocation by storing 235 + * the sites pointer in the key itself. Also see 236 + * __static_call_update()'s @first. 237 + * 238 + * This allows architectures (eg. x86) to call 239 + * static_call_init() before memory allocation works. 240 + */ 241 + if (!mod) { 242 + key->sites = site; 243 + key->type |= 1; 244 + goto do_transform; 245 + } 246 + 247 + site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); 248 + if (!site_mod) 249 + return -ENOMEM; 250 + 251 + /* 252 + * When the key has a direct sites pointer, extract 253 + * that into an explicit struct static_call_mod, so we 254 + * can have a list of modules. 255 + */ 256 + if (static_call_key_sites(key)) { 257 + site_mod->mod = NULL; 258 + site_mod->next = NULL; 259 + site_mod->sites = static_call_key_sites(key); 260 + 261 + key->mods = site_mod; 262 + 263 + site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); 264 + if (!site_mod) 265 + return -ENOMEM; 266 + } 267 + 268 + site_mod->mod = mod; 269 + site_mod->sites = site; 270 + site_mod->next = static_call_key_next(key); 271 + key->mods = site_mod; 272 + } 273 + 274 + do_transform: 275 + arch_static_call_transform(site_addr, NULL, key->func, 276 + static_call_is_tail(site)); 277 + } 278 + 279 + return 0; 280 + } 281 + 282 + static int addr_conflict(struct static_call_site *site, void *start, void *end) 283 + { 284 + unsigned long addr = (unsigned long)static_call_addr(site); 285 + 286 + if (addr <= (unsigned long)end && 287 + addr + CALL_INSN_SIZE > (unsigned long)start) 288 + return 1; 289 + 290 + return 0; 291 + } 292 + 293 + static int __static_call_text_reserved(struct static_call_site *iter_start, 294 + struct static_call_site *iter_stop, 295 + void *start, void *end, bool init) 296 + { 297 + struct static_call_site *iter = iter_start; 298 + 299 + while (iter < iter_stop) { 300 + if (init || !static_call_is_init(iter)) { 301 + if (addr_conflict(iter, start, end)) 302 + return 1; 303 + } 304 + iter++; 305 + } 306 + 307 + return 0; 308 + } 309 + 310 + #ifdef CONFIG_MODULES 311 + 312 + static int __static_call_mod_text_reserved(void *start, void *end) 313 + { 314 + struct module *mod; 315 + int ret; 316 + 317 + preempt_disable(); 318 + mod = __module_text_address((unsigned long)start); 319 + WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); 320 + if (!try_module_get(mod)) 321 + mod = NULL; 322 + preempt_enable(); 323 + 324 + if (!mod) 325 + return 0; 326 + 327 + ret = __static_call_text_reserved(mod->static_call_sites, 328 + mod->static_call_sites + mod->num_static_call_sites, 329 + start, end, mod->state == MODULE_STATE_COMING); 330 + 331 + module_put(mod); 332 + 333 + return ret; 334 + } 335 + 336 + static unsigned long tramp_key_lookup(unsigned long addr) 337 + { 338 + struct static_call_tramp_key *start = __start_static_call_tramp_key; 339 + struct static_call_tramp_key *stop = __stop_static_call_tramp_key; 340 + struct static_call_tramp_key *tramp_key; 341 + 342 + for (tramp_key = start; tramp_key != stop; tramp_key++) { 343 + unsigned long tramp; 344 + 345 + tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp; 346 + if (tramp == addr) 347 + return (long)tramp_key->key + (long)&tramp_key->key; 348 + } 349 + 350 + return 0; 351 + } 352 + 353 + static int static_call_add_module(struct module *mod) 354 + { 355 + struct static_call_site *start = mod->static_call_sites; 356 + struct static_call_site *stop = start + mod->num_static_call_sites; 357 + struct static_call_site *site; 358 + 359 + for (site = start; site != stop; site++) { 360 + unsigned long s_key = __static_call_key(site); 361 + unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS; 362 + unsigned long key; 363 + 364 + /* 365 + * Is the key is exported, 'addr' points to the key, which 366 + * means modules are allowed to call static_call_update() on 367 + * it. 368 + * 369 + * Otherwise, the key isn't exported, and 'addr' points to the 370 + * trampoline so we need to lookup the key. 371 + * 372 + * We go through this dance to prevent crazy modules from 373 + * abusing sensitive static calls. 374 + */ 375 + if (!kernel_text_address(addr)) 376 + continue; 377 + 378 + key = tramp_key_lookup(addr); 379 + if (!key) { 380 + pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n", 381 + static_call_addr(site)); 382 + return -EINVAL; 383 + } 384 + 385 + key |= s_key & STATIC_CALL_SITE_FLAGS; 386 + site->key = key - (long)&site->key; 387 + } 388 + 389 + return __static_call_init(mod, start, stop); 390 + } 391 + 392 + static void static_call_del_module(struct module *mod) 393 + { 394 + struct static_call_site *start = mod->static_call_sites; 395 + struct static_call_site *stop = mod->static_call_sites + 396 + mod->num_static_call_sites; 397 + struct static_call_key *key, *prev_key = NULL; 398 + struct static_call_mod *site_mod, **prev; 399 + struct static_call_site *site; 400 + 401 + for (site = start; site < stop; site++) { 402 + key = static_call_key(site); 403 + if (key == prev_key) 404 + continue; 405 + 406 + prev_key = key; 407 + 408 + for (prev = &key->mods, site_mod = key->mods; 409 + site_mod && site_mod->mod != mod; 410 + prev = &site_mod->next, site_mod = site_mod->next) 411 + ; 412 + 413 + if (!site_mod) 414 + continue; 415 + 416 + *prev = site_mod->next; 417 + kfree(site_mod); 418 + } 419 + } 420 + 421 + static int static_call_module_notify(struct notifier_block *nb, 422 + unsigned long val, void *data) 423 + { 424 + struct module *mod = data; 425 + int ret = 0; 426 + 427 + cpus_read_lock(); 428 + static_call_lock(); 429 + 430 + switch (val) { 431 + case MODULE_STATE_COMING: 432 + ret = static_call_add_module(mod); 433 + if (ret) { 434 + WARN(1, "Failed to allocate memory for static calls"); 435 + static_call_del_module(mod); 436 + } 437 + break; 438 + case MODULE_STATE_GOING: 439 + static_call_del_module(mod); 440 + break; 441 + } 442 + 443 + static_call_unlock(); 444 + cpus_read_unlock(); 445 + 446 + return notifier_from_errno(ret); 447 + } 448 + 449 + static struct notifier_block static_call_module_nb = { 450 + .notifier_call = static_call_module_notify, 451 + }; 452 + 453 + #else 454 + 455 + static inline int __static_call_mod_text_reserved(void *start, void *end) 456 + { 457 + return 0; 458 + } 459 + 460 + #endif /* CONFIG_MODULES */ 461 + 462 + int static_call_text_reserved(void *start, void *end) 463 + { 464 + bool init = system_state < SYSTEM_RUNNING; 465 + int ret = __static_call_text_reserved(__start_static_call_sites, 466 + __stop_static_call_sites, start, end, init); 467 + 468 + if (ret) 469 + return ret; 470 + 471 + return __static_call_mod_text_reserved(start, end); 472 + } 473 + 474 + int __init static_call_init(void) 475 + { 476 + int ret; 477 + 478 + if (static_call_initialized) 479 + return 0; 480 + 481 + cpus_read_lock(); 482 + static_call_lock(); 483 + ret = __static_call_init(NULL, __start_static_call_sites, 484 + __stop_static_call_sites); 485 + static_call_unlock(); 486 + cpus_read_unlock(); 487 + 488 + if (ret) { 489 + pr_err("Failed to allocate memory for static_call!\n"); 490 + BUG(); 491 + } 492 + 493 + static_call_initialized = true; 494 + 495 + #ifdef CONFIG_MODULES 496 + register_module_notifier(&static_call_module_nb); 497 + #endif 498 + return 0; 499 + } 500 + early_initcall(static_call_init); 501 + 502 + #ifdef CONFIG_STATIC_CALL_SELFTEST 503 + 504 + static int func_a(int x) 505 + { 506 + return x+1; 507 + } 508 + 509 + static int func_b(int x) 510 + { 511 + return x+2; 512 + } 513 + 514 + DEFINE_STATIC_CALL(sc_selftest, func_a); 515 + 516 + static struct static_call_data { 517 + int (*func)(int); 518 + int val; 519 + int expect; 520 + } static_call_data [] __initdata = { 521 + { NULL, 2, 3 }, 522 + { func_b, 2, 4 }, 523 + { func_a, 2, 3 } 524 + }; 525 + 526 + static int __init test_static_call_init(void) 527 + { 528 + int i; 529 + 530 + for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) { 531 + struct static_call_data *scd = &static_call_data[i]; 532 + 533 + if (scd->func) 534 + static_call_update(sc_selftest, scd->func); 535 + 536 + WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect); 537 + } 538 + 539 + return 0; 540 + } 541 + early_initcall(test_static_call_init); 542 + 543 + #endif /* CONFIG_STATIC_CALL_SELFTEST */
+1 -1
mm/page_alloc.c
··· 128 struct pagesets { 129 local_lock_t lock; 130 }; 131 - static DEFINE_PER_CPU(struct pagesets, pagesets) __maybe_unused = { 132 .lock = INIT_LOCAL_LOCK(lock), 133 }; 134
··· 128 struct pagesets { 129 local_lock_t lock; 130 }; 131 + static DEFINE_PER_CPU(struct pagesets, pagesets) = { 132 .lock = INIT_LOCAL_LOCK(lock), 133 }; 134