Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ratelimit: Reduce ___ratelimit() false-positive rate limiting

Retain the locked design, but check rate-limiting even when the lock
could not be acquired.

Link: https://lore.kernel.org/all/Z_VRo63o2UsVoxLG@pathway.suse.cz/
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>

authored by

Petr Mladek and committed by
Paul E. McKenney
cf8cfa8a e64a348d

+40 -15
+1 -1
include/linux/ratelimit.h
··· 44 44 raw_spin_lock_irqsave(&rs->lock, flags); 45 45 rs->interval = interval_init; 46 46 rs->flags &= ~RATELIMIT_INITIALIZED; 47 - rs->printed = 0; 47 + atomic_set(&rs->rs_n_left, rs->burst); 48 48 ratelimit_state_reset_miss(rs); 49 49 raw_spin_unlock_irqrestore(&rs->lock, flags); 50 50 }
+1 -1
include/linux/ratelimit_types.h
··· 18 18 19 19 int interval; 20 20 int burst; 21 - int printed; 21 + atomic_t rs_n_left; 22 22 atomic_t missed; 23 23 unsigned int flags; 24 24 unsigned long begin;
+38 -13
lib/ratelimit.c
··· 39 39 return 1; 40 40 41 41 /* 42 - * If we contend on this state's lock then almost 43 - * by definition we are too busy to print a message, 44 - * in addition to the one that will be printed by 45 - * the entity that is holding the lock already: 42 + * If we contend on this state's lock then just check if 43 + * the current burst is used or not. It might cause 44 + * false positive when we are past the interval and 45 + * the current lock owner is just about to reset it. 46 46 */ 47 47 if (!raw_spin_trylock_irqsave(&rs->lock, flags)) { 48 + unsigned int rs_flags = READ_ONCE(rs->flags); 49 + 50 + if (rs_flags & RATELIMIT_INITIALIZED && burst) { 51 + int n_left; 52 + 53 + n_left = atomic_dec_return(&rs->rs_n_left); 54 + if (n_left >= 0) 55 + return 1; 56 + } 57 + 48 58 ratelimit_state_inc_miss(rs); 49 59 return 0; 50 60 } ··· 62 52 if (!(rs->flags & RATELIMIT_INITIALIZED)) { 63 53 rs->begin = jiffies; 64 54 rs->flags |= RATELIMIT_INITIALIZED; 55 + atomic_set(&rs->rs_n_left, rs->burst); 65 56 } 66 57 67 58 if (time_is_before_jiffies(rs->begin + interval)) { 68 - int m = ratelimit_state_reset_miss(rs); 59 + int m; 69 60 61 + /* 62 + * Reset rs_n_left ASAP to reduce false positives 63 + * in parallel calls, see above. 64 + */ 65 + atomic_set(&rs->rs_n_left, rs->burst); 66 + rs->begin = jiffies; 67 + 68 + m = ratelimit_state_reset_miss(rs); 70 69 if (m) { 71 70 if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { 72 71 printk_deferred(KERN_WARNING 73 72 "%s: %d callbacks suppressed\n", func, m); 74 73 } 75 74 } 76 - rs->begin = jiffies; 77 - rs->printed = 0; 78 75 } 79 - if (burst && burst > rs->printed) { 80 - rs->printed++; 81 - ret = 1; 82 - } else { 83 - ratelimit_state_inc_miss(rs); 84 - ret = 0; 76 + if (burst) { 77 + int n_left; 78 + 79 + /* The burst might have been taken by a parallel call. */ 80 + n_left = atomic_dec_return(&rs->rs_n_left); 81 + if (n_left >= 0) { 82 + ret = 1; 83 + goto unlock_ret; 84 + } 85 85 } 86 + 87 + ratelimit_state_inc_miss(rs); 88 + ret = 0; 89 + 90 + unlock_ret: 86 91 raw_spin_unlock_irqrestore(&rs->lock, flags); 87 92 88 93 return ret;