Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

jump_label: Reorder hotplug lock and jump_label_lock

The conversion of the hotplug locking to a percpu rwsem unearthed lock
ordering issues all over the place.

The jump_label code has two issues:

1) Nested get_online_cpus() invocations

2) Ordering problems vs. the cpus rwsem and the jump_label_mutex

To cure these, the following lock order has been established;

cpus_rwsem -> jump_label_lock -> text_mutex

Even if not all architectures need protection against CPU hotplug, taking
cpus_rwsem before jump_label_lock is now mandatory in code pathes which
actually modify code and therefor need text_mutex protection.

Move the get_online_cpus() invocations into the core jump label code and
establish the proper lock order where required.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: "David S. Miller" <davem@davemloft.net>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Jason Baron <jbaron@akamai.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Link: http://lkml.kernel.org/r/20170524081549.025830817@linutronix.de

+14 -14
-2
arch/mips/kernel/jump_label.c
··· 58 58 insn.word = 0; /* nop */ 59 59 } 60 60 61 - get_online_cpus(); 62 61 mutex_lock(&text_mutex); 63 62 if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) { 64 63 insn_p->halfword[0] = insn.word >> 16; ··· 69 70 (unsigned long)insn_p + sizeof(*insn_p)); 70 71 71 72 mutex_unlock(&text_mutex); 72 - put_online_cpus(); 73 73 } 74 74 75 75 #endif /* HAVE_JUMP_LABEL */
-2
arch/sparc/kernel/jump_label.c
··· 41 41 val = 0x01000000; 42 42 } 43 43 44 - get_online_cpus(); 45 44 mutex_lock(&text_mutex); 46 45 *insn = val; 47 46 flushi(insn); 48 47 mutex_unlock(&text_mutex); 49 - put_online_cpus(); 50 48 } 51 49 52 50 #endif
-2
arch/tile/kernel/jump_label.c
··· 45 45 void arch_jump_label_transform(struct jump_entry *e, 46 46 enum jump_label_type type) 47 47 { 48 - get_online_cpus(); 49 48 mutex_lock(&text_mutex); 50 49 51 50 __jump_label_transform(e, type); 52 51 flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits)); 53 52 54 53 mutex_unlock(&text_mutex); 55 - put_online_cpus(); 56 54 } 57 55 58 56 __init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
-2
arch/x86/kernel/jump_label.c
··· 105 105 void arch_jump_label_transform(struct jump_entry *entry, 106 106 enum jump_label_type type) 107 107 { 108 - get_online_cpus(); 109 108 mutex_lock(&text_mutex); 110 109 __jump_label_transform(entry, type, NULL, 0); 111 110 mutex_unlock(&text_mutex); 112 - put_online_cpus(); 113 111 } 114 112 115 113 static enum {
+14 -6
kernel/jump_label.c
··· 15 15 #include <linux/static_key.h> 16 16 #include <linux/jump_label_ratelimit.h> 17 17 #include <linux/bug.h> 18 + #include <linux/cpu.h> 18 19 19 20 #ifdef HAVE_JUMP_LABEL 20 21 ··· 125 124 return; 126 125 } 127 126 127 + cpus_read_lock(); 128 128 jump_label_lock(); 129 129 if (atomic_read(&key->enabled) == 0) { 130 130 atomic_set(&key->enabled, -1); ··· 135 133 atomic_inc(&key->enabled); 136 134 } 137 135 jump_label_unlock(); 136 + cpus_read_unlock(); 138 137 } 139 138 EXPORT_SYMBOL_GPL(static_key_slow_inc); 140 139 141 140 static void __static_key_slow_dec(struct static_key *key, 142 141 unsigned long rate_limit, struct delayed_work *work) 143 142 { 143 + cpus_read_lock(); 144 144 /* 145 145 * The negative count check is valid even when a negative 146 146 * key->enabled is in use by static_key_slow_inc(); a ··· 153 149 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { 154 150 WARN(atomic_read(&key->enabled) < 0, 155 151 "jump label: negative count!\n"); 152 + cpus_read_unlock(); 156 153 return; 157 154 } 158 155 ··· 164 159 jump_label_update(key); 165 160 } 166 161 jump_label_unlock(); 162 + cpus_read_unlock(); 167 163 } 168 164 169 165 static void jump_label_update_timeout(struct work_struct *work) ··· 340 334 if (static_key_initialized) 341 335 return; 342 336 337 + cpus_read_lock(); 343 338 jump_label_lock(); 344 339 jump_label_sort_entries(iter_start, iter_stop); 345 340 ··· 360 353 } 361 354 static_key_initialized = true; 362 355 jump_label_unlock(); 356 + cpus_read_unlock(); 363 357 } 364 358 365 359 #ifdef CONFIG_MODULES ··· 598 590 struct module *mod = data; 599 591 int ret = 0; 600 592 593 + cpus_read_lock(); 594 + jump_label_lock(); 595 + 601 596 switch (val) { 602 597 case MODULE_STATE_COMING: 603 - jump_label_lock(); 604 598 ret = jump_label_add_module(mod); 605 599 if (ret) { 606 600 WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n"); 607 601 jump_label_del_module(mod); 608 602 } 609 - jump_label_unlock(); 610 603 break; 611 604 case MODULE_STATE_GOING: 612 - jump_label_lock(); 613 605 jump_label_del_module(mod); 614 - jump_label_unlock(); 615 606 break; 616 607 case MODULE_STATE_LIVE: 617 - jump_label_lock(); 618 608 jump_label_invalidate_module_init(mod); 619 - jump_label_unlock(); 620 609 break; 621 610 } 611 + 612 + jump_label_unlock(); 613 + cpus_read_unlock(); 622 614 623 615 return notifier_from_errno(ret); 624 616 }