at v5.13 16 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_JUMP_LABEL_H 3#define _LINUX_JUMP_LABEL_H 4 5/* 6 * Jump label support 7 * 8 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com> 9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra 10 * 11 * DEPRECATED API: 12 * 13 * The use of 'struct static_key' directly, is now DEPRECATED. In addition 14 * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following: 15 * 16 * struct static_key false = STATIC_KEY_INIT_FALSE; 17 * struct static_key true = STATIC_KEY_INIT_TRUE; 18 * static_key_true() 19 * static_key_false() 20 * 21 * The updated API replacements are: 22 * 23 * DEFINE_STATIC_KEY_TRUE(key); 24 * DEFINE_STATIC_KEY_FALSE(key); 25 * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); 26 * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); 27 * static_branch_likely() 28 * static_branch_unlikely() 29 * 30 * Jump labels provide an interface to generate dynamic branches using 31 * self-modifying code. Assuming toolchain and architecture support, if we 32 * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)", 33 * an "if (static_branch_unlikely(&key))" statement is an unconditional branch 34 * (which defaults to false - and the true block is placed out of line). 35 * Similarly, we can define an initially true key via 36 * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same 37 * "if (static_branch_unlikely(&key))", in which case we will generate an 38 * unconditional branch to the out-of-line true branch. Keys that are 39 * initially true or false can be using in both static_branch_unlikely() 40 * and static_branch_likely() statements. 41 * 42 * At runtime we can change the branch target by setting the key 43 * to true via a call to static_branch_enable(), or false using 44 * static_branch_disable(). If the direction of the branch is switched by 45 * these calls then we run-time modify the branch target via a 46 * no-op -> jump or jump -> no-op conversion. For example, for an 47 * initially false key that is used in an "if (static_branch_unlikely(&key))" 48 * statement, setting the key to true requires us to patch in a jump 49 * to the out-of-line of true branch. 50 * 51 * In addition to static_branch_{enable,disable}, we can also reference count 52 * the key or branch direction via static_branch_{inc,dec}. Thus, 53 * static_branch_inc() can be thought of as a 'make more true' and 54 * static_branch_dec() as a 'make more false'. 55 * 56 * Since this relies on modifying code, the branch modifying functions 57 * must be considered absolute slow paths (machine wide synchronization etc.). 58 * OTOH, since the affected branches are unconditional, their runtime overhead 59 * will be absolutely minimal, esp. in the default (off) case where the total 60 * effect is a single NOP of appropriate size. The on case will patch in a jump 61 * to the out-of-line block. 62 * 63 * When the control is directly exposed to userspace, it is prudent to delay the 64 * decrement to avoid high frequency code modifications which can (and do) 65 * cause significant performance degradation. Struct static_key_deferred and 66 * static_key_slow_dec_deferred() provide for this. 67 * 68 * Lacking toolchain and or architecture support, static keys fall back to a 69 * simple conditional branch. 70 * 71 * Additional babbling in: Documentation/staging/static-keys.rst 72 */ 73 74#ifndef __ASSEMBLY__ 75 76#include <linux/types.h> 77#include <linux/compiler.h> 78 79extern bool static_key_initialized; 80 81#define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \ 82 "%s(): static key '%pS' used before call to jump_label_init()", \ 83 __func__, (key)) 84 85#ifdef CONFIG_JUMP_LABEL 86 87struct static_key { 88 atomic_t enabled; 89/* 90 * Note: 91 * To make anonymous unions work with old compilers, the static 92 * initialization of them requires brackets. This creates a dependency 93 * on the order of the struct with the initializers. If any fields 94 * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need 95 * to be modified. 96 * 97 * bit 0 => 1 if key is initially true 98 * 0 if initially false 99 * bit 1 => 1 if points to struct static_key_mod 100 * 0 if points to struct jump_entry 101 */ 102 union { 103 unsigned long type; 104 struct jump_entry *entries; 105 struct static_key_mod *next; 106 }; 107}; 108 109#else 110struct static_key { 111 atomic_t enabled; 112}; 113#endif /* CONFIG_JUMP_LABEL */ 114#endif /* __ASSEMBLY__ */ 115 116#ifdef CONFIG_JUMP_LABEL 117#include <asm/jump_label.h> 118 119#ifndef __ASSEMBLY__ 120#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE 121 122struct jump_entry { 123 s32 code; 124 s32 target; 125 long key; // key may be far away from the core kernel under KASLR 126}; 127 128static inline unsigned long jump_entry_code(const struct jump_entry *entry) 129{ 130 return (unsigned long)&entry->code + entry->code; 131} 132 133static inline unsigned long jump_entry_target(const struct jump_entry *entry) 134{ 135 return (unsigned long)&entry->target + entry->target; 136} 137 138static inline struct static_key *jump_entry_key(const struct jump_entry *entry) 139{ 140 long offset = entry->key & ~3L; 141 142 return (struct static_key *)((unsigned long)&entry->key + offset); 143} 144 145#else 146 147static inline unsigned long jump_entry_code(const struct jump_entry *entry) 148{ 149 return entry->code; 150} 151 152static inline unsigned long jump_entry_target(const struct jump_entry *entry) 153{ 154 return entry->target; 155} 156 157static inline struct static_key *jump_entry_key(const struct jump_entry *entry) 158{ 159 return (struct static_key *)((unsigned long)entry->key & ~3UL); 160} 161 162#endif 163 164static inline bool jump_entry_is_branch(const struct jump_entry *entry) 165{ 166 return (unsigned long)entry->key & 1UL; 167} 168 169static inline bool jump_entry_is_init(const struct jump_entry *entry) 170{ 171 return (unsigned long)entry->key & 2UL; 172} 173 174static inline void jump_entry_set_init(struct jump_entry *entry) 175{ 176 entry->key |= 2; 177} 178 179#endif 180#endif 181 182#ifndef __ASSEMBLY__ 183 184enum jump_label_type { 185 JUMP_LABEL_NOP = 0, 186 JUMP_LABEL_JMP, 187}; 188 189struct module; 190 191#ifdef CONFIG_JUMP_LABEL 192 193#define JUMP_TYPE_FALSE 0UL 194#define JUMP_TYPE_TRUE 1UL 195#define JUMP_TYPE_LINKED 2UL 196#define JUMP_TYPE_MASK 3UL 197 198static __always_inline bool static_key_false(struct static_key *key) 199{ 200 return arch_static_branch(key, false); 201} 202 203static __always_inline bool static_key_true(struct static_key *key) 204{ 205 return !arch_static_branch(key, true); 206} 207 208extern struct jump_entry __start___jump_table[]; 209extern struct jump_entry __stop___jump_table[]; 210 211extern void jump_label_init(void); 212extern void jump_label_lock(void); 213extern void jump_label_unlock(void); 214extern void arch_jump_label_transform(struct jump_entry *entry, 215 enum jump_label_type type); 216extern void arch_jump_label_transform_static(struct jump_entry *entry, 217 enum jump_label_type type); 218extern bool arch_jump_label_transform_queue(struct jump_entry *entry, 219 enum jump_label_type type); 220extern void arch_jump_label_transform_apply(void); 221extern int jump_label_text_reserved(void *start, void *end); 222extern void static_key_slow_inc(struct static_key *key); 223extern void static_key_slow_dec(struct static_key *key); 224extern void static_key_slow_inc_cpuslocked(struct static_key *key); 225extern void static_key_slow_dec_cpuslocked(struct static_key *key); 226extern void jump_label_apply_nops(struct module *mod); 227extern int static_key_count(struct static_key *key); 228extern void static_key_enable(struct static_key *key); 229extern void static_key_disable(struct static_key *key); 230extern void static_key_enable_cpuslocked(struct static_key *key); 231extern void static_key_disable_cpuslocked(struct static_key *key); 232 233/* 234 * We should be using ATOMIC_INIT() for initializing .enabled, but 235 * the inclusion of atomic.h is problematic for inclusion of jump_label.h 236 * in 'low-level' headers. Thus, we are initializing .enabled with a 237 * raw value, but have added a BUILD_BUG_ON() to catch any issues in 238 * jump_label_init() see: kernel/jump_label.c. 239 */ 240#define STATIC_KEY_INIT_TRUE \ 241 { .enabled = { 1 }, \ 242 { .entries = (void *)JUMP_TYPE_TRUE } } 243#define STATIC_KEY_INIT_FALSE \ 244 { .enabled = { 0 }, \ 245 { .entries = (void *)JUMP_TYPE_FALSE } } 246 247#else /* !CONFIG_JUMP_LABEL */ 248 249#include <linux/atomic.h> 250#include <linux/bug.h> 251 252static inline int static_key_count(struct static_key *key) 253{ 254 return atomic_read(&key->enabled); 255} 256 257static __always_inline void jump_label_init(void) 258{ 259 static_key_initialized = true; 260} 261 262static __always_inline bool static_key_false(struct static_key *key) 263{ 264 if (unlikely_notrace(static_key_count(key) > 0)) 265 return true; 266 return false; 267} 268 269static __always_inline bool static_key_true(struct static_key *key) 270{ 271 if (likely_notrace(static_key_count(key) > 0)) 272 return true; 273 return false; 274} 275 276static inline void static_key_slow_inc(struct static_key *key) 277{ 278 STATIC_KEY_CHECK_USE(key); 279 atomic_inc(&key->enabled); 280} 281 282static inline void static_key_slow_dec(struct static_key *key) 283{ 284 STATIC_KEY_CHECK_USE(key); 285 atomic_dec(&key->enabled); 286} 287 288#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key) 289#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key) 290 291static inline int jump_label_text_reserved(void *start, void *end) 292{ 293 return 0; 294} 295 296static inline void jump_label_lock(void) {} 297static inline void jump_label_unlock(void) {} 298 299static inline int jump_label_apply_nops(struct module *mod) 300{ 301 return 0; 302} 303 304static inline void static_key_enable(struct static_key *key) 305{ 306 STATIC_KEY_CHECK_USE(key); 307 308 if (atomic_read(&key->enabled) != 0) { 309 WARN_ON_ONCE(atomic_read(&key->enabled) != 1); 310 return; 311 } 312 atomic_set(&key->enabled, 1); 313} 314 315static inline void static_key_disable(struct static_key *key) 316{ 317 STATIC_KEY_CHECK_USE(key); 318 319 if (atomic_read(&key->enabled) != 1) { 320 WARN_ON_ONCE(atomic_read(&key->enabled) != 0); 321 return; 322 } 323 atomic_set(&key->enabled, 0); 324} 325 326#define static_key_enable_cpuslocked(k) static_key_enable((k)) 327#define static_key_disable_cpuslocked(k) static_key_disable((k)) 328 329#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } 330#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } 331 332#endif /* CONFIG_JUMP_LABEL */ 333 334#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE 335#define jump_label_enabled static_key_enabled 336 337/* -------------------------------------------------------------------------- */ 338 339/* 340 * Two type wrappers around static_key, such that we can use compile time 341 * type differentiation to emit the right code. 342 * 343 * All the below code is macros in order to play type games. 344 */ 345 346struct static_key_true { 347 struct static_key key; 348}; 349 350struct static_key_false { 351 struct static_key key; 352}; 353 354#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, } 355#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, } 356 357#define DEFINE_STATIC_KEY_TRUE(name) \ 358 struct static_key_true name = STATIC_KEY_TRUE_INIT 359 360#define DEFINE_STATIC_KEY_TRUE_RO(name) \ 361 struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT 362 363#define DECLARE_STATIC_KEY_TRUE(name) \ 364 extern struct static_key_true name 365 366#define DEFINE_STATIC_KEY_FALSE(name) \ 367 struct static_key_false name = STATIC_KEY_FALSE_INIT 368 369#define DEFINE_STATIC_KEY_FALSE_RO(name) \ 370 struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT 371 372#define DECLARE_STATIC_KEY_FALSE(name) \ 373 extern struct static_key_false name 374 375#define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \ 376 struct static_key_true name[count] = { \ 377 [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \ 378 } 379 380#define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \ 381 struct static_key_false name[count] = { \ 382 [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \ 383 } 384 385#define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name) 386#define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name) 387#define DEFINE_STATIC_KEY_MAYBE(cfg, name) \ 388 __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name) 389 390#define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name) 391#define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name) 392#define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \ 393 __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name) 394 395#define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name) 396#define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name) 397#define DECLARE_STATIC_KEY_MAYBE(cfg, name) \ 398 __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name) 399 400extern bool ____wrong_branch_error(void); 401 402#define static_key_enabled(x) \ 403({ \ 404 if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \ 405 !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\ 406 !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ 407 ____wrong_branch_error(); \ 408 static_key_count((struct static_key *)x) > 0; \ 409}) 410 411#ifdef CONFIG_JUMP_LABEL 412 413/* 414 * Combine the right initial value (type) with the right branch order 415 * to generate the desired result. 416 * 417 * 418 * type\branch| likely (1) | unlikely (0) 419 * -----------+-----------------------+------------------ 420 * | | 421 * true (1) | ... | ... 422 * | NOP | JMP L 423 * | <br-stmts> | 1: ... 424 * | L: ... | 425 * | | 426 * | | L: <br-stmts> 427 * | | jmp 1b 428 * | | 429 * -----------+-----------------------+------------------ 430 * | | 431 * false (0) | ... | ... 432 * | JMP L | NOP 433 * | <br-stmts> | 1: ... 434 * | L: ... | 435 * | | 436 * | | L: <br-stmts> 437 * | | jmp 1b 438 * | | 439 * -----------+-----------------------+------------------ 440 * 441 * The initial value is encoded in the LSB of static_key::entries, 442 * type: 0 = false, 1 = true. 443 * 444 * The branch type is encoded in the LSB of jump_entry::key, 445 * branch: 0 = unlikely, 1 = likely. 446 * 447 * This gives the following logic table: 448 * 449 * enabled type branch instuction 450 * -----------------------------+----------- 451 * 0 0 0 | NOP 452 * 0 0 1 | JMP 453 * 0 1 0 | NOP 454 * 0 1 1 | JMP 455 * 456 * 1 0 0 | JMP 457 * 1 0 1 | NOP 458 * 1 1 0 | JMP 459 * 1 1 1 | NOP 460 * 461 * Which gives the following functions: 462 * 463 * dynamic: instruction = enabled ^ branch 464 * static: instruction = type ^ branch 465 * 466 * See jump_label_type() / jump_label_init_type(). 467 */ 468 469#define static_branch_likely(x) \ 470({ \ 471 bool branch; \ 472 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \ 473 branch = !arch_static_branch(&(x)->key, true); \ 474 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ 475 branch = !arch_static_branch_jump(&(x)->key, true); \ 476 else \ 477 branch = ____wrong_branch_error(); \ 478 likely_notrace(branch); \ 479}) 480 481#define static_branch_unlikely(x) \ 482({ \ 483 bool branch; \ 484 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \ 485 branch = arch_static_branch_jump(&(x)->key, false); \ 486 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ 487 branch = arch_static_branch(&(x)->key, false); \ 488 else \ 489 branch = ____wrong_branch_error(); \ 490 unlikely_notrace(branch); \ 491}) 492 493#else /* !CONFIG_JUMP_LABEL */ 494 495#define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key)) 496#define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key)) 497 498#endif /* CONFIG_JUMP_LABEL */ 499 500#define static_branch_maybe(config, x) \ 501 (IS_ENABLED(config) ? static_branch_likely(x) \ 502 : static_branch_unlikely(x)) 503 504/* 505 * Advanced usage; refcount, branch is enabled when: count != 0 506 */ 507 508#define static_branch_inc(x) static_key_slow_inc(&(x)->key) 509#define static_branch_dec(x) static_key_slow_dec(&(x)->key) 510#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key) 511#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key) 512 513/* 514 * Normal usage; boolean enable/disable. 515 */ 516 517#define static_branch_enable(x) static_key_enable(&(x)->key) 518#define static_branch_disable(x) static_key_disable(&(x)->key) 519#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key) 520#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key) 521 522#endif /* __ASSEMBLY__ */ 523 524#endif /* _LINUX_JUMP_LABEL_H */