jump_label: Fix static_key_slow_dec() yet again

While commit 83ab38ef0a0b ("jump_label: Fix concurrency issues in
static_key_slow_dec()") fixed one problem, it created yet another,
notably the following is now possible:

slow_dec
if (try_dec) // dec_not_one-ish, false
// enabled == 1
slow_inc
if (inc_not_disabled) // inc_not_zero-ish
// enabled == 2
return

guard((mutex)(&jump_label_mutex);
if (atomic_cmpxchg(1,0)==1) // false, we're 2

slow_dec
if (try-dec) // dec_not_one, true
// enabled == 1
return
else
try_dec() // dec_not_one, false
WARN

Use dec_and_test instead of cmpxchg(), like it was prior to
83ab38ef0a0b. Add a few WARNs for the paranoid.

Fixes: 83ab38ef0a0b ("jump_label: Fix concurrency issues in static_key_slow_dec()")
Reported-by: "Darrick J. Wong" <djwong@kernel.org>
Tested-by: Klara Modin <klarasmodin@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>

+27 -7
+27 -7
kernel/jump_label.c
··· 168 jump_label_update(key); 169 /* 170 * Ensure that when static_key_fast_inc_not_disabled() or 171 - * static_key_slow_try_dec() observe the positive value, 172 * they must also observe all the text changes. 173 */ 174 atomic_set_release(&key->enabled, 1); ··· 250 } 251 EXPORT_SYMBOL_GPL(static_key_disable); 252 253 - static bool static_key_slow_try_dec(struct static_key *key) 254 { 255 int v; 256 ··· 274 * enabled. This suggests an ordering problem on the user side. 275 */ 276 WARN_ON_ONCE(v < 0); 277 if (v <= 1) 278 return false; 279 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1))); ··· 292 static void __static_key_slow_dec_cpuslocked(struct static_key *key) 293 { 294 lockdep_assert_cpus_held(); 295 296 - if (static_key_slow_try_dec(key)) 297 return; 298 299 guard(mutex)(&jump_label_mutex); 300 - if (atomic_cmpxchg(&key->enabled, 1, 0) == 1) 301 jump_label_update(key); 302 - else 303 - WARN_ON_ONCE(!static_key_slow_try_dec(key)); 304 } 305 306 static void __static_key_slow_dec(struct static_key *key) ··· 349 { 350 STATIC_KEY_CHECK_USE(key); 351 352 - if (static_key_slow_try_dec(key)) 353 return; 354 355 schedule_delayed_work(work, timeout);
··· 168 jump_label_update(key); 169 /* 170 * Ensure that when static_key_fast_inc_not_disabled() or 171 + * static_key_dec_not_one() observe the positive value, 172 * they must also observe all the text changes. 173 */ 174 atomic_set_release(&key->enabled, 1); ··· 250 } 251 EXPORT_SYMBOL_GPL(static_key_disable); 252 253 + static bool static_key_dec_not_one(struct static_key *key) 254 { 255 int v; 256 ··· 274 * enabled. This suggests an ordering problem on the user side. 275 */ 276 WARN_ON_ONCE(v < 0); 277 + 278 + /* 279 + * Warn about underflow, and lie about success in an attempt to 280 + * not make things worse. 281 + */ 282 + if (WARN_ON_ONCE(v == 0)) 283 + return true; 284 + 285 if (v <= 1) 286 return false; 287 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1))); ··· 284 static void __static_key_slow_dec_cpuslocked(struct static_key *key) 285 { 286 lockdep_assert_cpus_held(); 287 + int val; 288 289 + if (static_key_dec_not_one(key)) 290 return; 291 292 guard(mutex)(&jump_label_mutex); 293 + val = atomic_read(&key->enabled); 294 + /* 295 + * It should be impossible to observe -1 with jump_label_mutex held, 296 + * see static_key_slow_inc_cpuslocked(). 297 + */ 298 + if (WARN_ON_ONCE(val == -1)) 299 + return; 300 + /* 301 + * Cannot already be 0, something went sideways. 302 + */ 303 + if (WARN_ON_ONCE(val == 0)) 304 + return; 305 + 306 + if (atomic_dec_and_test(&key->enabled)) 307 jump_label_update(key); 308 } 309 310 static void __static_key_slow_dec(struct static_key *key) ··· 329 { 330 STATIC_KEY_CHECK_USE(key); 331 332 + if (static_key_dec_not_one(key)) 333 return; 334 335 schedule_delayed_work(work, timeout);