ratelimit: Reduce ___ratelimit() false-positive rate limiting

Retain the locked design, but check rate-limiting even when the lock
could not be acquired.

Link: https://lore.kernel.org/all/Z_VRo63o2UsVoxLG@pathway.suse.cz/
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
This commit is contained in:
Petr Mladek
2025-04-09 16:54:33 -07:00
committed by Paul E. McKenney
parent e64a348dc1
commit cf8cfa8a99
3 changed files with 40 additions and 15 deletions

View File

@@ -44,7 +44,7 @@ static inline void ratelimit_state_reset_interval(struct ratelimit_state *rs, in
raw_spin_lock_irqsave(&rs->lock, flags);
rs->interval = interval_init;
rs->flags &= ~RATELIMIT_INITIALIZED;
rs->printed = 0;
atomic_set(&rs->rs_n_left, rs->burst);
ratelimit_state_reset_miss(rs);
raw_spin_unlock_irqrestore(&rs->lock, flags);
}

View File

@@ -18,7 +18,7 @@ struct ratelimit_state {
int interval;
int burst;
int printed;
atomic_t rs_n_left;
atomic_t missed;
unsigned int flags;
unsigned long begin;

View File

@@ -39,12 +39,22 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
return 1;
/*
* If we contend on this state's lock then almost
* by definition we are too busy to print a message,
* in addition to the one that will be printed by
* the entity that is holding the lock already:
* If we contend on this state's lock then just check if
* the current burst is used or not. It might cause
* false positive when we are past the interval and
* the current lock owner is just about to reset it.
*/
if (!raw_spin_trylock_irqsave(&rs->lock, flags)) {
unsigned int rs_flags = READ_ONCE(rs->flags);
if (rs_flags & RATELIMIT_INITIALIZED && burst) {
int n_left;
n_left = atomic_dec_return(&rs->rs_n_left);
if (n_left >= 0)
return 1;
}
ratelimit_state_inc_miss(rs);
return 0;
}
@@ -52,27 +62,42 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
if (!(rs->flags & RATELIMIT_INITIALIZED)) {
rs->begin = jiffies;
rs->flags |= RATELIMIT_INITIALIZED;
atomic_set(&rs->rs_n_left, rs->burst);
}
if (time_is_before_jiffies(rs->begin + interval)) {
int m = ratelimit_state_reset_miss(rs);
int m;
/*
* Reset rs_n_left ASAP to reduce false positives
* in parallel calls, see above.
*/
atomic_set(&rs->rs_n_left, rs->burst);
rs->begin = jiffies;
m = ratelimit_state_reset_miss(rs);
if (m) {
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
printk_deferred(KERN_WARNING
"%s: %d callbacks suppressed\n", func, m);
}
}
rs->begin = jiffies;
rs->printed = 0;
}
if (burst && burst > rs->printed) {
rs->printed++;
ret = 1;
} else {
ratelimit_state_inc_miss(rs);
ret = 0;
if (burst) {
int n_left;
/* The burst might have been taken by a parallel call. */
n_left = atomic_dec_return(&rs->rs_n_left);
if (n_left >= 0) {
ret = 1;
goto unlock_ret;
}
}
ratelimit_state_inc_miss(rs);
ret = 0;
unlock_ret:
raw_spin_unlock_irqrestore(&rs->lock, flags);
return ret;