mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-15 23:41:35 -04:00
net/sched: sch_sfb: annotate data-races in sfb_dump_stats()
sfb_dump_stats() only runs with RTNL held,
reading fields that can be changed in qdisc fast path.
Add READ_ONCE()/WRITE_ONCE() annotations.
Alternative would be to acquire the qdisc spinlock, but our long-term
goal is to make qdisc dump operations lockless as much as we can.
tc_sfb_xstats fields don't need to be latched atomically,
otherwise this bug would have been caught earlier.
Fixes: edb09eb17e ("net: sched: do not acquire qdisc spinlock in qdisc/class stats dump")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260421141655.3953721-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
a8f5192809
commit
1ada03fdef
@@ -130,7 +130,7 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
|
||||
|
||||
sfbhash >>= SFB_BUCKET_SHIFT;
|
||||
if (b[hash].qlen < 0xFFFF)
|
||||
b[hash].qlen++;
|
||||
WRITE_ONCE(b[hash].qlen, b[hash].qlen + 1);
|
||||
b += SFB_NUMBUCKETS; /* next level */
|
||||
}
|
||||
}
|
||||
@@ -159,7 +159,7 @@ static void decrement_one_qlen(u32 sfbhash, u32 slot,
|
||||
|
||||
sfbhash >>= SFB_BUCKET_SHIFT;
|
||||
if (b[hash].qlen > 0)
|
||||
b[hash].qlen--;
|
||||
WRITE_ONCE(b[hash].qlen, b[hash].qlen - 1);
|
||||
b += SFB_NUMBUCKETS; /* next level */
|
||||
}
|
||||
}
|
||||
@@ -179,12 +179,12 @@ static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
|
||||
|
||||
static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
|
||||
{
|
||||
b->p_mark = prob_minus(b->p_mark, q->decrement);
|
||||
WRITE_ONCE(b->p_mark, prob_minus(b->p_mark, q->decrement));
|
||||
}
|
||||
|
||||
static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
|
||||
{
|
||||
b->p_mark = prob_plus(b->p_mark, q->increment);
|
||||
WRITE_ONCE(b->p_mark, prob_plus(b->p_mark, q->increment));
|
||||
}
|
||||
|
||||
static void sfb_zero_all_buckets(struct sfb_sched_data *q)
|
||||
@@ -202,11 +202,14 @@ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_da
|
||||
const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
|
||||
|
||||
for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
|
||||
if (qlen < b->qlen)
|
||||
qlen = b->qlen;
|
||||
totalpm += b->p_mark;
|
||||
if (prob < b->p_mark)
|
||||
prob = b->p_mark;
|
||||
u32 b_qlen = READ_ONCE(b->qlen);
|
||||
u32 b_mark = READ_ONCE(b->p_mark);
|
||||
|
||||
if (qlen < b_qlen)
|
||||
qlen = b_qlen;
|
||||
totalpm += b_mark;
|
||||
if (prob < b_mark)
|
||||
prob = b_mark;
|
||||
b++;
|
||||
}
|
||||
*prob_r = prob;
|
||||
@@ -295,7 +298,8 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
|
||||
if (unlikely(sch->q.qlen >= q->limit)) {
|
||||
qdisc_qstats_overlimit(sch);
|
||||
q->stats.queuedrop++;
|
||||
WRITE_ONCE(q->stats.queuedrop,
|
||||
q->stats.queuedrop + 1);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@@ -348,7 +352,8 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
|
||||
if (unlikely(minqlen >= q->max)) {
|
||||
qdisc_qstats_overlimit(sch);
|
||||
q->stats.bucketdrop++;
|
||||
WRITE_ONCE(q->stats.bucketdrop,
|
||||
q->stats.bucketdrop + 1);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@@ -374,7 +379,8 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
}
|
||||
if (sfb_rate_limit(skb, q)) {
|
||||
qdisc_qstats_overlimit(sch);
|
||||
q->stats.penaltydrop++;
|
||||
WRITE_ONCE(q->stats.penaltydrop,
|
||||
q->stats.penaltydrop + 1);
|
||||
goto drop;
|
||||
}
|
||||
goto enqueue;
|
||||
@@ -390,14 +396,17 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
* In either case, we want to start dropping packets.
|
||||
*/
|
||||
if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
|
||||
q->stats.earlydrop++;
|
||||
WRITE_ONCE(q->stats.earlydrop,
|
||||
q->stats.earlydrop + 1);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
if (INET_ECN_set_ce(skb)) {
|
||||
q->stats.marked++;
|
||||
WRITE_ONCE(q->stats.marked,
|
||||
q->stats.marked + 1);
|
||||
} else {
|
||||
q->stats.earlydrop++;
|
||||
WRITE_ONCE(q->stats.earlydrop,
|
||||
q->stats.earlydrop + 1);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
@@ -410,7 +419,8 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
sch->q.qlen++;
|
||||
increment_qlen(&cb, q);
|
||||
} else if (net_xmit_drop_count(ret)) {
|
||||
q->stats.childdrop++;
|
||||
WRITE_ONCE(q->stats.childdrop,
|
||||
q->stats.childdrop + 1);
|
||||
qdisc_qstats_drop(sch);
|
||||
}
|
||||
return ret;
|
||||
@@ -599,12 +609,12 @@ static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
|
||||
{
|
||||
struct sfb_sched_data *q = qdisc_priv(sch);
|
||||
struct tc_sfb_xstats st = {
|
||||
.earlydrop = q->stats.earlydrop,
|
||||
.penaltydrop = q->stats.penaltydrop,
|
||||
.bucketdrop = q->stats.bucketdrop,
|
||||
.queuedrop = q->stats.queuedrop,
|
||||
.childdrop = q->stats.childdrop,
|
||||
.marked = q->stats.marked,
|
||||
.earlydrop = READ_ONCE(q->stats.earlydrop),
|
||||
.penaltydrop = READ_ONCE(q->stats.penaltydrop),
|
||||
.bucketdrop = READ_ONCE(q->stats.bucketdrop),
|
||||
.queuedrop = READ_ONCE(q->stats.queuedrop),
|
||||
.childdrop = READ_ONCE(q->stats.childdrop),
|
||||
.marked = READ_ONCE(q->stats.marked),
|
||||
};
|
||||
|
||||
st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
|
||||
|
||||
Reference in New Issue
Block a user