Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

jump label: Introduce static_branch() interface

Introduce:

static __always_inline bool static_branch(struct jump_label_key *key);

instead of the old JUMP_LABEL(key, label) macro.

In this way, jump labels become really easy to use:

Define:

struct jump_label_key jump_key;

Can be used as:

if (static_branch(&jump_key))
do unlikely code

enable/disale via:

jump_label_inc(&jump_key);
jump_label_dec(&jump_key);

that's it!

For the jump labels disabled case, the static_branch() becomes an
atomic_read(), and jump_label_inc()/dec() are simply atomic_inc(),
atomic_dec() operations. We show testing results for this change below.

Thanks to H. Peter Anvin for suggesting the 'static_branch()' construct.

Since we now require a 'struct jump_label_key *key', we can store a pointer into
the jump table addresses. In this way, we can enable/disable jump labels, in
basically constant time. This change allows us to completely remove the previous
hashtable scheme. Thanks to Peter Zijlstra for this re-write.

Testing:

I ran a series of 'tbench 20' runs 5 times (with reboots) for 3
configurations, where tracepoints were disabled.

jump label configured in
avg: 815.6

jump label *not* configured in (using atomic reads)
avg: 800.1

jump label *not* configured in (regular reads)
avg: 803.4

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20110316212947.GA8792@redhat.com>
Signed-off-by: Jason Baron <jbaron@redhat.com>
Suggested-by: H. Peter Anvin <hpa@linux.intel.com>
Tested-by: David Daney <ddaney@caviumnetworks.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

authored by

Jason Baron and committed by
Steven Rostedt
d430d3d7 ee5e51f5

+418 -548
+12 -10
arch/mips/include/asm/jump_label.h
··· 20 20 #define WORD_INSN ".word" 21 21 #endif 22 22 23 - #define JUMP_LABEL(key, label) \ 24 - do { \ 25 - asm goto("1:\tnop\n\t" \ 26 - "nop\n\t" \ 27 - ".pushsection __jump_table, \"a\"\n\t" \ 28 - WORD_INSN " 1b, %l[" #label "], %0\n\t" \ 29 - ".popsection\n\t" \ 30 - : : "i" (key) : : label); \ 31 - } while (0) 32 - 23 + static __always_inline bool arch_static_branch(struct jump_label_key *key) 24 + { 25 + asm goto("1:\tnop\n\t" 26 + "nop\n\t" 27 + ".pushsection __jump_table, \"aw\"\n\t" 28 + WORD_INSN " 1b, %l[l_yes], %0\n\t" 29 + ".popsection\n\t" 30 + : : "i" (key) : : l_yes); 31 + return false; 32 + l_yes: 33 + return true; 34 + } 33 35 34 36 #endif /* __KERNEL__ */ 35 37
+14 -11
arch/sparc/include/asm/jump_label.h
··· 7 7 8 8 #define JUMP_LABEL_NOP_SIZE 4 9 9 10 - #define JUMP_LABEL(key, label) \ 11 - do { \ 12 - asm goto("1:\n\t" \ 13 - "nop\n\t" \ 14 - "nop\n\t" \ 15 - ".pushsection __jump_table, \"a\"\n\t"\ 16 - ".align 4\n\t" \ 17 - ".word 1b, %l[" #label "], %c0\n\t" \ 18 - ".popsection \n\t" \ 19 - : : "i" (key) : : label);\ 20 - } while (0) 10 + static __always_inline bool arch_static_branch(struct jump_label_key *key) 11 + { 12 + asm goto("1:\n\t" 13 + "nop\n\t" 14 + "nop\n\t" 15 + ".pushsection __jump_table, \"aw\"\n\t" 16 + ".align 4\n\t" 17 + ".word 1b, %l[l_yes], %c0\n\t" 18 + ".popsection \n\t" 19 + : : "i" (key) : : l_yes); 20 + return false; 21 + l_yes: 22 + return true; 23 + } 21 24 22 25 #endif /* __KERNEL__ */ 23 26
+1 -2
arch/x86/include/asm/alternative.h
··· 4 4 #include <linux/types.h> 5 5 #include <linux/stddef.h> 6 6 #include <linux/stringify.h> 7 - #include <linux/jump_label.h> 8 7 #include <asm/asm.h> 9 8 10 9 /* ··· 190 191 extern void *text_poke_smp(void *addr, const void *opcode, size_t len); 191 192 extern void text_poke_smp_batch(struct text_poke_param *params, int n); 192 193 193 - #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 194 + #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) 194 195 #define IDEAL_NOP_SIZE_5 5 195 196 extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; 196 197 extern void arch_init_ideal_nop5(void);
+14 -10
arch/x86/include/asm/jump_label.h
··· 5 5 6 6 #include <linux/types.h> 7 7 #include <asm/nops.h> 8 + #include <asm/asm.h> 8 9 9 10 #define JUMP_LABEL_NOP_SIZE 5 10 11 11 - # define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" 12 + #define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" 12 13 13 - # define JUMP_LABEL(key, label) \ 14 - do { \ 15 - asm goto("1:" \ 16 - JUMP_LABEL_INITIAL_NOP \ 17 - ".pushsection __jump_table, \"aw\" \n\t"\ 18 - _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ 19 - ".popsection \n\t" \ 20 - : : "i" (key) : : label); \ 21 - } while (0) 14 + static __always_inline bool arch_static_branch(struct jump_label_key *key) 15 + { 16 + asm goto("1:" 17 + JUMP_LABEL_INITIAL_NOP 18 + ".pushsection __jump_table, \"aw\" \n\t" 19 + _ASM_PTR "1b, %l[l_yes], %c0 \n\t" 20 + ".popsection \n\t" 21 + : : "i" (key) : : l_yes); 22 + return false; 23 + l_yes: 24 + return true; 25 + } 22 26 23 27 #endif /* __KERNEL__ */ 24 28
+1 -1
arch/x86/kernel/alternative.c
··· 679 679 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); 680 680 } 681 681 682 - #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 682 + #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) 683 683 684 684 #ifdef CONFIG_X86_64 685 685 unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
+1
arch/x86/kernel/module.c
··· 24 24 #include <linux/bug.h> 25 25 #include <linux/mm.h> 26 26 #include <linux/gfp.h> 27 + #include <linux/jump_label.h> 27 28 28 29 #include <asm/system.h> 29 30 #include <asm/page.h>
+4 -10
include/asm-generic/vmlinux.lds.h
··· 170 170 STRUCT_ALIGN(); \ 171 171 *(__tracepoints) \ 172 172 /* implement dynamic printk debug */ \ 173 + . = ALIGN(8); \ 174 + VMLINUX_SYMBOL(__start___jump_table) = .; \ 175 + *(__jump_table) \ 176 + VMLINUX_SYMBOL(__stop___jump_table) = .; \ 173 177 . = ALIGN(8); \ 174 178 VMLINUX_SYMBOL(__start___verbose) = .; \ 175 179 *(__verbose) \ ··· 231 227 } \ 232 228 \ 233 229 BUG_TABLE \ 234 - \ 235 - JUMP_TABLE \ 236 230 \ 237 231 /* PCI quirks */ \ 238 232 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ ··· 590 588 #else 591 589 #define BUG_TABLE 592 590 #endif 593 - 594 - #define JUMP_TABLE \ 595 - . = ALIGN(8); \ 596 - __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ 597 - VMLINUX_SYMBOL(__start___jump_table) = .; \ 598 - *(__jump_table) \ 599 - VMLINUX_SYMBOL(__stop___jump_table) = .; \ 600 - } 601 591 602 592 #ifdef CONFIG_PM_TRACE 603 593 #define TRACEDATA \
-2
include/linux/dynamic_debug.h
··· 1 1 #ifndef _DYNAMIC_DEBUG_H 2 2 #define _DYNAMIC_DEBUG_H 3 3 4 - #include <linux/jump_label.h> 5 - 6 4 /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which 7 5 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They 8 6 * use independent hash functions, to reduce the chance of false positives.
+57 -34
include/linux/jump_label.h
··· 1 1 #ifndef _LINUX_JUMP_LABEL_H 2 2 #define _LINUX_JUMP_LABEL_H 3 3 4 + #include <linux/types.h> 5 + #include <linux/compiler.h> 6 + 4 7 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) 8 + 9 + struct jump_label_key { 10 + atomic_t enabled; 11 + struct jump_entry *entries; 12 + #ifdef CONFIG_MODULES 13 + struct jump_label_mod *next; 14 + #endif 15 + }; 16 + 5 17 # include <asm/jump_label.h> 6 18 # define HAVE_JUMP_LABEL 7 19 #endif 8 20 9 21 enum jump_label_type { 22 + JUMP_LABEL_DISABLE = 0, 10 23 JUMP_LABEL_ENABLE, 11 - JUMP_LABEL_DISABLE 12 24 }; 13 25 14 26 struct module; 15 27 16 28 #ifdef HAVE_JUMP_LABEL 29 + 30 + #ifdef CONFIG_MODULES 31 + #define JUMP_LABEL_INIT {{ 0 }, NULL, NULL} 32 + #else 33 + #define JUMP_LABEL_INIT {{ 0 }, NULL} 34 + #endif 35 + 36 + static __always_inline bool static_branch(struct jump_label_key *key) 37 + { 38 + return arch_static_branch(key); 39 + } 17 40 18 41 extern struct jump_entry __start___jump_table[]; 19 42 extern struct jump_entry __stop___jump_table[]; ··· 46 23 extern void arch_jump_label_transform(struct jump_entry *entry, 47 24 enum jump_label_type type); 48 25 extern void arch_jump_label_text_poke_early(jump_label_t addr); 49 - extern void jump_label_update(unsigned long key, enum jump_label_type type); 50 - extern void jump_label_apply_nops(struct module *mod); 51 26 extern int jump_label_text_reserved(void *start, void *end); 52 - 53 - #define jump_label_enable(key) \ 54 - jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); 55 - 56 - #define jump_label_disable(key) \ 57 - jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); 27 + extern void jump_label_inc(struct jump_label_key *key); 28 + extern void jump_label_dec(struct jump_label_key *key); 29 + extern bool jump_label_enabled(struct jump_label_key *key); 30 + extern void jump_label_apply_nops(struct module *mod); 58 31 59 32 #else 60 33 61 - #define JUMP_LABEL(key, label) \ 62 - do { \ 63 - if (unlikely(*key)) \ 64 - goto label; \ 65 - } while (0) 34 + #include <asm/atomic.h> 66 35 67 - #define jump_label_enable(cond_var) \ 68 - do { \ 69 - *(cond_var) = 1; \ 70 - } while (0) 36 + #define JUMP_LABEL_INIT {ATOMIC_INIT(0)} 71 37 72 - #define jump_label_disable(cond_var) \ 73 - do { \ 74 - *(cond_var) = 0; \ 75 - } while (0) 38 + struct jump_label_key { 39 + atomic_t enabled; 40 + }; 76 41 77 - static inline int jump_label_apply_nops(struct module *mod) 42 + static __always_inline bool static_branch(struct jump_label_key *key) 78 43 { 79 - return 0; 44 + if (unlikely(atomic_read(&key->enabled))) 45 + return true; 46 + return false; 47 + } 48 + 49 + static inline void jump_label_inc(struct jump_label_key *key) 50 + { 51 + atomic_inc(&key->enabled); 52 + } 53 + 54 + static inline void jump_label_dec(struct jump_label_key *key) 55 + { 56 + atomic_dec(&key->enabled); 80 57 } 81 58 82 59 static inline int jump_label_text_reserved(void *start, void *end) ··· 87 64 static inline void jump_label_lock(void) {} 88 65 static inline void jump_label_unlock(void) {} 89 66 90 - #endif 67 + static inline bool jump_label_enabled(struct jump_label_key *key) 68 + { 69 + return !!atomic_read(&key->enabled); 70 + } 91 71 92 - #define COND_STMT(key, stmt) \ 93 - do { \ 94 - __label__ jl_enabled; \ 95 - JUMP_LABEL(key, jl_enabled); \ 96 - if (0) { \ 97 - jl_enabled: \ 98 - stmt; \ 99 - } \ 100 - } while (0) 72 + static inline int jump_label_apply_nops(struct module *mod) 73 + { 74 + return 0; 75 + } 76 + 77 + #endif 101 78 102 79 #endif
-44
include/linux/jump_label_ref.h
··· 1 - #ifndef _LINUX_JUMP_LABEL_REF_H 2 - #define _LINUX_JUMP_LABEL_REF_H 3 - 4 - #include <linux/jump_label.h> 5 - #include <asm/atomic.h> 6 - 7 - #ifdef HAVE_JUMP_LABEL 8 - 9 - static inline void jump_label_inc(atomic_t *key) 10 - { 11 - if (atomic_add_return(1, key) == 1) 12 - jump_label_enable(key); 13 - } 14 - 15 - static inline void jump_label_dec(atomic_t *key) 16 - { 17 - if (atomic_dec_and_test(key)) 18 - jump_label_disable(key); 19 - } 20 - 21 - #else /* !HAVE_JUMP_LABEL */ 22 - 23 - static inline void jump_label_inc(atomic_t *key) 24 - { 25 - atomic_inc(key); 26 - } 27 - 28 - static inline void jump_label_dec(atomic_t *key) 29 - { 30 - atomic_dec(key); 31 - } 32 - 33 - #undef JUMP_LABEL 34 - #define JUMP_LABEL(key, label) \ 35 - do { \ 36 - if (unlikely(__builtin_choose_expr( \ 37 - __builtin_types_compatible_p(typeof(key), atomic_t *), \ 38 - atomic_read((atomic_t *)(key)), *(key)))) \ 39 - goto label; \ 40 - } while (0) 41 - 42 - #endif /* HAVE_JUMP_LABEL */ 43 - 44 - #endif /* _LINUX_JUMP_LABEL_REF_H */
+13 -13
include/linux/perf_event.h
··· 505 505 #include <linux/ftrace.h> 506 506 #include <linux/cpu.h> 507 507 #include <linux/irq_work.h> 508 - #include <linux/jump_label_ref.h> 508 + #include <linux/jump_label.h> 509 509 #include <asm/atomic.h> 510 510 #include <asm/local.h> 511 511 ··· 1034 1034 return event->pmu->task_ctx_nr == perf_sw_context; 1035 1035 } 1036 1036 1037 - extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; 1037 + extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 1038 1038 1039 1039 extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); 1040 1040 ··· 1063 1063 { 1064 1064 struct pt_regs hot_regs; 1065 1065 1066 - JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); 1067 - return; 1068 - 1069 - have_event: 1070 - if (!regs) { 1071 - perf_fetch_caller_regs(&hot_regs); 1072 - regs = &hot_regs; 1066 + if (static_branch(&perf_swevent_enabled[event_id])) { 1067 + if (!regs) { 1068 + perf_fetch_caller_regs(&hot_regs); 1069 + regs = &hot_regs; 1070 + } 1071 + __perf_sw_event(event_id, nr, nmi, regs, addr); 1073 1072 } 1074 - __perf_sw_event(event_id, nr, nmi, regs, addr); 1075 1073 } 1076 1074 1077 - extern atomic_t perf_sched_events; 1075 + extern struct jump_label_key perf_sched_events; 1078 1076 1079 1077 static inline void perf_event_task_sched_in(struct task_struct *task) 1080 1078 { 1081 - COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); 1079 + if (static_branch(&perf_sched_events)) 1080 + __perf_event_task_sched_in(task); 1082 1081 } 1083 1082 1084 1083 static inline ··· 1085 1086 { 1086 1087 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); 1087 1088 1088 - COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next)); 1089 + if (static_branch(&perf_sched_events)) 1090 + __perf_event_task_sched_out(task, next); 1089 1091 } 1090 1092 1091 1093 extern void perf_event_mmap(struct vm_area_struct *vma);
+10 -12
include/linux/tracepoint.h
··· 29 29 30 30 struct tracepoint { 31 31 const char *name; /* Tracepoint name */ 32 - int state; /* State. */ 32 + struct jump_label_key key; 33 33 void (*regfunc)(void); 34 34 void (*unregfunc)(void); 35 35 struct tracepoint_func __rcu *funcs; ··· 146 146 extern struct tracepoint __tracepoint_##name; \ 147 147 static inline void trace_##name(proto) \ 148 148 { \ 149 - JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ 150 - return; \ 151 - do_trace: \ 149 + if (static_branch(&__tracepoint_##name.key)) \ 152 150 __DO_TRACE(&__tracepoint_##name, \ 153 151 TP_PROTO(data_proto), \ 154 152 TP_ARGS(data_args), \ ··· 174 176 * structures, so we create an array of pointers that will be used for iteration 175 177 * on the tracepoints. 176 178 */ 177 - #define DEFINE_TRACE_FN(name, reg, unreg) \ 178 - static const char __tpstrtab_##name[] \ 179 - __attribute__((section("__tracepoints_strings"))) = #name; \ 180 - struct tracepoint __tracepoint_##name \ 181 - __attribute__((section("__tracepoints"))) = \ 182 - { __tpstrtab_##name, 0, reg, unreg, NULL }; \ 183 - static struct tracepoint * const __tracepoint_ptr_##name __used \ 184 - __attribute__((section("__tracepoints_ptrs"))) = \ 179 + #define DEFINE_TRACE_FN(name, reg, unreg) \ 180 + static const char __tpstrtab_##name[] \ 181 + __attribute__((section("__tracepoints_strings"))) = #name; \ 182 + struct tracepoint __tracepoint_##name \ 183 + __attribute__((section("__tracepoints"))) = \ 184 + { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ 185 + static struct tracepoint * const __tracepoint_ptr_##name __used \ 186 + __attribute__((section("__tracepoints_ptrs"))) = \ 185 187 &__tracepoint_##name; 186 188 187 189 #define DEFINE_TRACE(name) \
+280 -383
kernel/jump_label.c
··· 2 2 * jump label support 3 3 * 4 4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> 5 + * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com> 5 6 * 6 7 */ 7 - #include <linux/jump_label.h> 8 8 #include <linux/memory.h> 9 9 #include <linux/uaccess.h> 10 10 #include <linux/module.h> 11 11 #include <linux/list.h> 12 - #include <linux/jhash.h> 13 12 #include <linux/slab.h> 14 13 #include <linux/sort.h> 15 14 #include <linux/err.h> 15 + #include <linux/jump_label.h> 16 16 17 17 #ifdef HAVE_JUMP_LABEL 18 18 19 - #define JUMP_LABEL_HASH_BITS 6 20 - #define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) 21 - static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; 22 - 23 19 /* mutex to protect coming/going of the the jump_label table */ 24 20 static DEFINE_MUTEX(jump_label_mutex); 25 - 26 - struct jump_label_entry { 27 - struct hlist_node hlist; 28 - struct jump_entry *table; 29 - int nr_entries; 30 - /* hang modules off here */ 31 - struct hlist_head modules; 32 - unsigned long key; 33 - }; 34 - 35 - struct jump_label_module_entry { 36 - struct hlist_node hlist; 37 - struct jump_entry *table; 38 - int nr_entries; 39 - struct module *mod; 40 - }; 41 21 42 22 void jump_label_lock(void) 43 23 { ··· 27 47 void jump_label_unlock(void) 28 48 { 29 49 mutex_unlock(&jump_label_mutex); 50 + } 51 + 52 + bool jump_label_enabled(struct jump_label_key *key) 53 + { 54 + return !!atomic_read(&key->enabled); 30 55 } 31 56 32 57 static int jump_label_cmp(const void *a, const void *b) ··· 49 64 } 50 65 51 66 static void 52 - sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) 67 + jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) 53 68 { 54 69 unsigned long size; 55 70 ··· 58 73 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); 59 74 } 60 75 61 - static struct jump_label_entry *get_jump_label_entry(jump_label_t key) 76 + static void jump_label_update(struct jump_label_key *key, int enable); 77 + 78 + void jump_label_inc(struct jump_label_key *key) 62 79 { 63 - struct hlist_head *head; 64 - struct hlist_node *node; 65 - struct jump_label_entry *e; 66 - u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); 67 - 68 - head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; 69 - hlist_for_each_entry(e, node, head, hlist) { 70 - if (key == e->key) 71 - return e; 72 - } 73 - return NULL; 74 - } 75 - 76 - static struct jump_label_entry * 77 - add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) 78 - { 79 - struct hlist_head *head; 80 - struct jump_label_entry *e; 81 - u32 hash; 82 - 83 - e = get_jump_label_entry(key); 84 - if (e) 85 - return ERR_PTR(-EEXIST); 86 - 87 - e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); 88 - if (!e) 89 - return ERR_PTR(-ENOMEM); 90 - 91 - hash = jhash((void *)&key, sizeof(jump_label_t), 0); 92 - head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; 93 - e->key = key; 94 - e->table = table; 95 - e->nr_entries = nr_entries; 96 - INIT_HLIST_HEAD(&(e->modules)); 97 - hlist_add_head(&e->hlist, head); 98 - return e; 99 - } 100 - 101 - static int 102 - build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) 103 - { 104 - struct jump_entry *iter, *iter_begin; 105 - struct jump_label_entry *entry; 106 - int count; 107 - 108 - sort_jump_label_entries(start, stop); 109 - iter = start; 110 - while (iter < stop) { 111 - entry = get_jump_label_entry(iter->key); 112 - if (!entry) { 113 - iter_begin = iter; 114 - count = 0; 115 - while ((iter < stop) && 116 - (iter->key == iter_begin->key)) { 117 - iter++; 118 - count++; 119 - } 120 - entry = add_jump_label_entry(iter_begin->key, 121 - count, iter_begin); 122 - if (IS_ERR(entry)) 123 - return PTR_ERR(entry); 124 - } else { 125 - WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); 126 - return -1; 127 - } 128 - } 129 - return 0; 130 - } 131 - 132 - /*** 133 - * jump_label_update - update jump label text 134 - * @key - key value associated with a a jump label 135 - * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE 136 - * 137 - * Will enable/disable the jump for jump label @key, depending on the 138 - * value of @type. 139 - * 140 - */ 141 - 142 - void jump_label_update(unsigned long key, enum jump_label_type type) 143 - { 144 - struct jump_entry *iter; 145 - struct jump_label_entry *entry; 146 - struct hlist_node *module_node; 147 - struct jump_label_module_entry *e_module; 148 - int count; 80 + if (atomic_inc_not_zero(&key->enabled)) 81 + return; 149 82 150 83 jump_label_lock(); 151 - entry = get_jump_label_entry((jump_label_t)key); 152 - if (entry) { 153 - count = entry->nr_entries; 154 - iter = entry->table; 155 - while (count--) { 156 - if (kernel_text_address(iter->code)) 157 - arch_jump_label_transform(iter, type); 158 - iter++; 159 - } 160 - /* eanble/disable jump labels in modules */ 161 - hlist_for_each_entry(e_module, module_node, &(entry->modules), 162 - hlist) { 163 - count = e_module->nr_entries; 164 - iter = e_module->table; 165 - while (count--) { 166 - if (iter->key && 167 - kernel_text_address(iter->code)) 168 - arch_jump_label_transform(iter, type); 169 - iter++; 170 - } 171 - } 172 - } 84 + if (atomic_add_return(1, &key->enabled) == 1) 85 + jump_label_update(key, JUMP_LABEL_ENABLE); 86 + jump_label_unlock(); 87 + } 88 + 89 + void jump_label_dec(struct jump_label_key *key) 90 + { 91 + if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) 92 + return; 93 + 94 + jump_label_update(key, JUMP_LABEL_DISABLE); 173 95 jump_label_unlock(); 174 96 } 175 97 ··· 89 197 return 0; 90 198 } 91 199 92 - #ifdef CONFIG_MODULES 93 - 94 - static int module_conflict(void *start, void *end) 200 + static int __jump_label_text_reserved(struct jump_entry *iter_start, 201 + struct jump_entry *iter_stop, void *start, void *end) 95 202 { 96 - struct hlist_head *head; 97 - struct hlist_node *node, *node_next, *module_node, *module_node_next; 98 - struct jump_label_entry *e; 99 - struct jump_label_module_entry *e_module; 100 203 struct jump_entry *iter; 101 - int i, count; 102 - int conflict = 0; 103 204 104 - for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { 105 - head = &jump_label_table[i]; 106 - hlist_for_each_entry_safe(e, node, node_next, head, hlist) { 107 - hlist_for_each_entry_safe(e_module, module_node, 108 - module_node_next, 109 - &(e->modules), hlist) { 110 - count = e_module->nr_entries; 111 - iter = e_module->table; 112 - while (count--) { 113 - if (addr_conflict(iter, start, end)) { 114 - conflict = 1; 115 - goto out; 116 - } 117 - iter++; 118 - } 119 - } 120 - } 205 + iter = iter_start; 206 + while (iter < iter_stop) { 207 + if (addr_conflict(iter, start, end)) 208 + return 1; 209 + iter++; 121 210 } 122 - out: 123 - return conflict; 211 + 212 + return 0; 124 213 } 125 214 215 + static void __jump_label_update(struct jump_label_key *key, 216 + struct jump_entry *entry, int enable) 217 + { 218 + for (; entry->key == (jump_label_t)(unsigned long)key; entry++) { 219 + /* 220 + * entry->code set to 0 invalidates module init text sections 221 + * kernel_text_address() verifies we are not in core kernel 222 + * init code, see jump_label_invalidate_module_init(). 223 + */ 224 + if (entry->code && kernel_text_address(entry->code)) 225 + arch_jump_label_transform(entry, enable); 226 + } 227 + } 228 + 229 + /* 230 + * Not all archs need this. 231 + */ 232 + void __weak arch_jump_label_text_poke_early(jump_label_t addr) 233 + { 234 + } 235 + 236 + static __init int jump_label_init(void) 237 + { 238 + struct jump_entry *iter_start = __start___jump_table; 239 + struct jump_entry *iter_stop = __stop___jump_table; 240 + struct jump_label_key *key = NULL; 241 + struct jump_entry *iter; 242 + 243 + jump_label_lock(); 244 + jump_label_sort_entries(iter_start, iter_stop); 245 + 246 + for (iter = iter_start; iter < iter_stop; iter++) { 247 + arch_jump_label_text_poke_early(iter->code); 248 + if (iter->key == (jump_label_t)(unsigned long)key) 249 + continue; 250 + 251 + key = (struct jump_label_key *)(unsigned long)iter->key; 252 + atomic_set(&key->enabled, 0); 253 + key->entries = iter; 254 + #ifdef CONFIG_MODULES 255 + key->next = NULL; 126 256 #endif 257 + } 258 + jump_label_unlock(); 259 + 260 + return 0; 261 + } 262 + early_initcall(jump_label_init); 263 + 264 + #ifdef CONFIG_MODULES 265 + 266 + struct jump_label_mod { 267 + struct jump_label_mod *next; 268 + struct jump_entry *entries; 269 + struct module *mod; 270 + }; 271 + 272 + static int __jump_label_mod_text_reserved(void *start, void *end) 273 + { 274 + struct module *mod; 275 + 276 + mod = __module_text_address((unsigned long)start); 277 + if (!mod) 278 + return 0; 279 + 280 + WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); 281 + 282 + return __jump_label_text_reserved(mod->jump_entries, 283 + mod->jump_entries + mod->num_jump_entries, 284 + start, end); 285 + } 286 + 287 + static void __jump_label_mod_update(struct jump_label_key *key, int enable) 288 + { 289 + struct jump_label_mod *mod = key->next; 290 + 291 + while (mod) { 292 + __jump_label_update(key, mod->entries, enable); 293 + mod = mod->next; 294 + } 295 + } 296 + 297 + /*** 298 + * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() 299 + * @mod: module to patch 300 + * 301 + * Allow for run-time selection of the optimal nops. Before the module 302 + * loads patch these with arch_get_jump_label_nop(), which is specified by 303 + * the arch specific jump label code. 304 + */ 305 + void jump_label_apply_nops(struct module *mod) 306 + { 307 + struct jump_entry *iter_start = mod->jump_entries; 308 + struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 309 + struct jump_entry *iter; 310 + 311 + /* if the module doesn't have jump label entries, just return */ 312 + if (iter_start == iter_stop) 313 + return; 314 + 315 + for (iter = iter_start; iter < iter_stop; iter++) 316 + arch_jump_label_text_poke_early(iter->code); 317 + } 318 + 319 + static int jump_label_add_module(struct module *mod) 320 + { 321 + struct jump_entry *iter_start = mod->jump_entries; 322 + struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 323 + struct jump_entry *iter; 324 + struct jump_label_key *key = NULL; 325 + struct jump_label_mod *jlm; 326 + 327 + /* if the module doesn't have jump label entries, just return */ 328 + if (iter_start == iter_stop) 329 + return 0; 330 + 331 + jump_label_sort_entries(iter_start, iter_stop); 332 + 333 + for (iter = iter_start; iter < iter_stop; iter++) { 334 + if (iter->key == (jump_label_t)(unsigned long)key) 335 + continue; 336 + 337 + key = (struct jump_label_key *)(unsigned long)iter->key; 338 + 339 + if (__module_address(iter->key) == mod) { 340 + atomic_set(&key->enabled, 0); 341 + key->entries = iter; 342 + key->next = NULL; 343 + continue; 344 + } 345 + 346 + jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL); 347 + if (!jlm) 348 + return -ENOMEM; 349 + 350 + jlm->mod = mod; 351 + jlm->entries = iter; 352 + jlm->next = key->next; 353 + key->next = jlm; 354 + 355 + if (jump_label_enabled(key)) 356 + __jump_label_update(key, iter, JUMP_LABEL_ENABLE); 357 + } 358 + 359 + return 0; 360 + } 361 + 362 + static void jump_label_del_module(struct module *mod) 363 + { 364 + struct jump_entry *iter_start = mod->jump_entries; 365 + struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 366 + struct jump_entry *iter; 367 + struct jump_label_key *key = NULL; 368 + struct jump_label_mod *jlm, **prev; 369 + 370 + for (iter = iter_start; iter < iter_stop; iter++) { 371 + if (iter->key == (jump_label_t)(unsigned long)key) 372 + continue; 373 + 374 + key = (struct jump_label_key *)(unsigned long)iter->key; 375 + 376 + if (__module_address(iter->key) == mod) 377 + continue; 378 + 379 + prev = &key->next; 380 + jlm = key->next; 381 + 382 + while (jlm && jlm->mod != mod) { 383 + prev = &jlm->next; 384 + jlm = jlm->next; 385 + } 386 + 387 + if (jlm) { 388 + *prev = jlm->next; 389 + kfree(jlm); 390 + } 391 + } 392 + } 393 + 394 + static void jump_label_invalidate_module_init(struct module *mod) 395 + { 396 + struct jump_entry *iter_start = mod->jump_entries; 397 + struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 398 + struct jump_entry *iter; 399 + 400 + for (iter = iter_start; iter < iter_stop; iter++) { 401 + if (within_module_init(iter->code, mod)) 402 + iter->code = 0; 403 + } 404 + } 405 + 406 + static int 407 + jump_label_module_notify(struct notifier_block *self, unsigned long val, 408 + void *data) 409 + { 410 + struct module *mod = data; 411 + int ret = 0; 412 + 413 + switch (val) { 414 + case MODULE_STATE_COMING: 415 + jump_label_lock(); 416 + ret = jump_label_add_module(mod); 417 + if (ret) 418 + jump_label_del_module(mod); 419 + jump_label_unlock(); 420 + break; 421 + case MODULE_STATE_GOING: 422 + jump_label_lock(); 423 + jump_label_del_module(mod); 424 + jump_label_unlock(); 425 + break; 426 + case MODULE_STATE_LIVE: 427 + jump_label_lock(); 428 + jump_label_invalidate_module_init(mod); 429 + jump_label_unlock(); 430 + break; 431 + } 432 + 433 + return notifier_from_errno(ret); 434 + } 435 + 436 + struct notifier_block jump_label_module_nb = { 437 + .notifier_call = jump_label_module_notify, 438 + .priority = 1, /* higher than tracepoints */ 439 + }; 440 + 441 + static __init int jump_label_init_module(void) 442 + { 443 + return register_module_notifier(&jump_label_module_nb); 444 + } 445 + early_initcall(jump_label_init_module); 446 + 447 + #endif /* CONFIG_MODULES */ 127 448 128 449 /*** 129 450 * jump_label_text_reserved - check if addr range is reserved ··· 353 248 */ 354 249 int jump_label_text_reserved(void *start, void *end) 355 250 { 356 - struct jump_entry *iter; 357 - struct jump_entry *iter_start = __start___jump_table; 358 - struct jump_entry *iter_stop = __start___jump_table; 359 - int conflict = 0; 251 + int ret = __jump_label_text_reserved(__start___jump_table, 252 + __stop___jump_table, start, end); 360 253 361 - iter = iter_start; 362 - while (iter < iter_stop) { 363 - if (addr_conflict(iter, start, end)) { 364 - conflict = 1; 365 - goto out; 366 - } 367 - iter++; 368 - } 254 + if (ret) 255 + return ret; 369 256 370 - /* now check modules */ 371 257 #ifdef CONFIG_MODULES 372 - conflict = module_conflict(start, end); 258 + ret = __jump_label_mod_text_reserved(start, end); 373 259 #endif 374 - out: 375 - return conflict; 376 - } 377 - 378 - /* 379 - * Not all archs need this. 380 - */ 381 - void __weak arch_jump_label_text_poke_early(jump_label_t addr) 382 - { 383 - } 384 - 385 - static __init int init_jump_label(void) 386 - { 387 - int ret; 388 - struct jump_entry *iter_start = __start___jump_table; 389 - struct jump_entry *iter_stop = __stop___jump_table; 390 - struct jump_entry *iter; 391 - 392 - jump_label_lock(); 393 - ret = build_jump_label_hashtable(__start___jump_table, 394 - __stop___jump_table); 395 - iter = iter_start; 396 - while (iter < iter_stop) { 397 - arch_jump_label_text_poke_early(iter->code); 398 - iter++; 399 - } 400 - jump_label_unlock(); 401 260 return ret; 402 261 } 403 - early_initcall(init_jump_label); 262 + 263 + static void jump_label_update(struct jump_label_key *key, int enable) 264 + { 265 + struct jump_entry *entry = key->entries; 266 + 267 + /* if there are no users, entry can be NULL */ 268 + if (entry) 269 + __jump_label_update(key, entry, enable); 404 270 405 271 #ifdef CONFIG_MODULES 406 - 407 - static struct jump_label_module_entry * 408 - add_jump_label_module_entry(struct jump_label_entry *entry, 409 - struct jump_entry *iter_begin, 410 - int count, struct module *mod) 411 - { 412 - struct jump_label_module_entry *e; 413 - 414 - e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); 415 - if (!e) 416 - return ERR_PTR(-ENOMEM); 417 - e->mod = mod; 418 - e->nr_entries = count; 419 - e->table = iter_begin; 420 - hlist_add_head(&e->hlist, &entry->modules); 421 - return e; 272 + __jump_label_mod_update(key, enable); 273 + #endif 422 274 } 423 - 424 - static int add_jump_label_module(struct module *mod) 425 - { 426 - struct jump_entry *iter, *iter_begin; 427 - struct jump_label_entry *entry; 428 - struct jump_label_module_entry *module_entry; 429 - int count; 430 - 431 - /* if the module doesn't have jump label entries, just return */ 432 - if (!mod->num_jump_entries) 433 - return 0; 434 - 435 - sort_jump_label_entries(mod->jump_entries, 436 - mod->jump_entries + mod->num_jump_entries); 437 - iter = mod->jump_entries; 438 - while (iter < mod->jump_entries + mod->num_jump_entries) { 439 - entry = get_jump_label_entry(iter->key); 440 - iter_begin = iter; 441 - count = 0; 442 - while ((iter < mod->jump_entries + mod->num_jump_entries) && 443 - (iter->key == iter_begin->key)) { 444 - iter++; 445 - count++; 446 - } 447 - if (!entry) { 448 - entry = add_jump_label_entry(iter_begin->key, 0, NULL); 449 - if (IS_ERR(entry)) 450 - return PTR_ERR(entry); 451 - } 452 - module_entry = add_jump_label_module_entry(entry, iter_begin, 453 - count, mod); 454 - if (IS_ERR(module_entry)) 455 - return PTR_ERR(module_entry); 456 - } 457 - return 0; 458 - } 459 - 460 - static void remove_jump_label_module(struct module *mod) 461 - { 462 - struct hlist_head *head; 463 - struct hlist_node *node, *node_next, *module_node, *module_node_next; 464 - struct jump_label_entry *e; 465 - struct jump_label_module_entry *e_module; 466 - int i; 467 - 468 - /* if the module doesn't have jump label entries, just return */ 469 - if (!mod->num_jump_entries) 470 - return; 471 - 472 - for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { 473 - head = &jump_label_table[i]; 474 - hlist_for_each_entry_safe(e, node, node_next, head, hlist) { 475 - hlist_for_each_entry_safe(e_module, module_node, 476 - module_node_next, 477 - &(e->modules), hlist) { 478 - if (e_module->mod == mod) { 479 - hlist_del(&e_module->hlist); 480 - kfree(e_module); 481 - } 482 - } 483 - if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { 484 - hlist_del(&e->hlist); 485 - kfree(e); 486 - } 487 - } 488 - } 489 - } 490 - 491 - static void remove_jump_label_module_init(struct module *mod) 492 - { 493 - struct hlist_head *head; 494 - struct hlist_node *node, *node_next, *module_node, *module_node_next; 495 - struct jump_label_entry *e; 496 - struct jump_label_module_entry *e_module; 497 - struct jump_entry *iter; 498 - int i, count; 499 - 500 - /* if the module doesn't have jump label entries, just return */ 501 - if (!mod->num_jump_entries) 502 - return; 503 - 504 - for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { 505 - head = &jump_label_table[i]; 506 - hlist_for_each_entry_safe(e, node, node_next, head, hlist) { 507 - hlist_for_each_entry_safe(e_module, module_node, 508 - module_node_next, 509 - &(e->modules), hlist) { 510 - if (e_module->mod != mod) 511 - continue; 512 - count = e_module->nr_entries; 513 - iter = e_module->table; 514 - while (count--) { 515 - if (within_module_init(iter->code, mod)) 516 - iter->key = 0; 517 - iter++; 518 - } 519 - } 520 - } 521 - } 522 - } 523 - 524 - static int 525 - jump_label_module_notify(struct notifier_block *self, unsigned long val, 526 - void *data) 527 - { 528 - struct module *mod = data; 529 - int ret = 0; 530 - 531 - switch (val) { 532 - case MODULE_STATE_COMING: 533 - jump_label_lock(); 534 - ret = add_jump_label_module(mod); 535 - if (ret) 536 - remove_jump_label_module(mod); 537 - jump_label_unlock(); 538 - break; 539 - case MODULE_STATE_GOING: 540 - jump_label_lock(); 541 - remove_jump_label_module(mod); 542 - jump_label_unlock(); 543 - break; 544 - case MODULE_STATE_LIVE: 545 - jump_label_lock(); 546 - remove_jump_label_module_init(mod); 547 - jump_label_unlock(); 548 - break; 549 - } 550 - return ret; 551 - } 552 - 553 - /*** 554 - * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() 555 - * @mod: module to patch 556 - * 557 - * Allow for run-time selection of the optimal nops. Before the module 558 - * loads patch these with arch_get_jump_label_nop(), which is specified by 559 - * the arch specific jump label code. 560 - */ 561 - void jump_label_apply_nops(struct module *mod) 562 - { 563 - struct jump_entry *iter; 564 - 565 - /* if the module doesn't have jump label entries, just return */ 566 - if (!mod->num_jump_entries) 567 - return; 568 - 569 - iter = mod->jump_entries; 570 - while (iter < mod->jump_entries + mod->num_jump_entries) { 571 - arch_jump_label_text_poke_early(iter->code); 572 - iter++; 573 - } 574 - } 575 - 576 - struct notifier_block jump_label_module_nb = { 577 - .notifier_call = jump_label_module_notify, 578 - .priority = 0, 579 - }; 580 - 581 - static __init int init_jump_label_module(void) 582 - { 583 - return register_module_notifier(&jump_label_module_nb); 584 - } 585 - early_initcall(init_jump_label_module); 586 - 587 - #endif /* CONFIG_MODULES */ 588 275 589 276 #endif
+2 -2
kernel/perf_event.c
··· 125 125 * perf_sched_events : >0 events exist 126 126 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 127 127 */ 128 - atomic_t perf_sched_events __read_mostly; 128 + struct jump_label_key perf_sched_events __read_mostly; 129 129 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 130 130 131 131 static atomic_t nr_mmap_events __read_mostly; ··· 5417 5417 return err; 5418 5418 } 5419 5419 5420 - atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; 5420 + struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 5421 5421 5422 5422 static void sw_perf_event_destroy(struct perf_event *event) 5423 5423 {
+9 -14
kernel/tracepoint.c
··· 251 251 { 252 252 WARN_ON(strcmp((*entry)->name, elem->name) != 0); 253 253 254 - if (elem->regfunc && !elem->state && active) 254 + if (elem->regfunc && !jump_label_enabled(&elem->key) && active) 255 255 elem->regfunc(); 256 - else if (elem->unregfunc && elem->state && !active) 256 + else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active) 257 257 elem->unregfunc(); 258 258 259 259 /* ··· 264 264 * is used. 265 265 */ 266 266 rcu_assign_pointer(elem->funcs, (*entry)->funcs); 267 - if (!elem->state && active) { 268 - jump_label_enable(&elem->state); 269 - elem->state = active; 270 - } else if (elem->state && !active) { 271 - jump_label_disable(&elem->state); 272 - elem->state = active; 273 - } 267 + if (active && !jump_label_enabled(&elem->key)) 268 + jump_label_inc(&elem->key); 269 + else if (!active && jump_label_enabled(&elem->key)) 270 + jump_label_dec(&elem->key); 274 271 } 275 272 276 273 /* ··· 278 281 */ 279 282 static void disable_tracepoint(struct tracepoint *elem) 280 283 { 281 - if (elem->unregfunc && elem->state) 284 + if (elem->unregfunc && jump_label_enabled(&elem->key)) 282 285 elem->unregfunc(); 283 286 284 - if (elem->state) { 285 - jump_label_disable(&elem->state); 286 - elem->state = 0; 287 - } 287 + if (jump_label_enabled(&elem->key)) 288 + jump_label_dec(&elem->key); 288 289 rcu_assign_pointer(elem->funcs, NULL); 289 290 } 290 291