mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 10:01:39 -05:00
inet: frags: flush pending skbs in fqdir_pre_exit()
We have been seeing occasional deadlocks on pernet_ops_rwsem since
September in NIPA. The stuck task was usually modprobe (often loading
a driver like ipvlan), trying to take the lock as a Writer.
lockdep does not track readers for rwsems so the read wasn't obvious
from the reports.
On closer inspection the Reader holding the lock was conntrack looping
forever in nf_conntrack_cleanup_net_list(). Based on past experience
with occasional NIPA crashes I looked thru the tests which run before
the crash and noticed that the crash follows ip_defrag.sh. An immediate
red flag. Scouring thru (de)fragmentation queues reveals skbs sitting
around, holding conntrack references.
The problem is that since conntrack depends on nf_defrag_ipv6,
nf_defrag_ipv6 will load first. Since nf_defrag_ipv6 loads first its
netns exit hooks run _after_ conntrack's netns exit hook.
Flush all fragment queue SKBs during fqdir_pre_exit() to release
conntrack references before conntrack cleanup runs. Also flush
the queues in timer expiry handlers when they discover fqdir->dead
is set, in case packet sneaks in while we're running the pre_exit
flush.
The commit under Fixes is not exactly the culprit, but I think
previously the timer firing would eventually unblock the spinning
conntrack.
Fixes: d5dd88794a ("inet: fix various use-after-free in defrags units")
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20251207010942.1672972-4-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -123,18 +123,7 @@ void inet_frags_fini(struct inet_frags *);
|
||||
|
||||
int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
|
||||
|
||||
static inline void fqdir_pre_exit(struct fqdir *fqdir)
|
||||
{
|
||||
/* Prevent creation of new frags.
|
||||
* Pairs with READ_ONCE() in inet_frag_find().
|
||||
*/
|
||||
WRITE_ONCE(fqdir->high_thresh, 0);
|
||||
|
||||
/* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
|
||||
* and ip6frag_expire_frag_queue().
|
||||
*/
|
||||
WRITE_ONCE(fqdir->dead, true);
|
||||
}
|
||||
void fqdir_pre_exit(struct fqdir *fqdir);
|
||||
void fqdir_exit(struct fqdir *fqdir);
|
||||
|
||||
void inet_frag_kill(struct inet_frag_queue *q, int *refs);
|
||||
|
||||
@@ -69,9 +69,6 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
|
||||
int refs = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
/* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
|
||||
if (READ_ONCE(fq->q.fqdir->dead))
|
||||
goto out_rcu_unlock;
|
||||
spin_lock(&fq->q.lock);
|
||||
|
||||
if (fq->q.flags & INET_FRAG_COMPLETE)
|
||||
@@ -80,6 +77,12 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
|
||||
fq->q.flags |= INET_FRAG_DROP;
|
||||
inet_frag_kill(&fq->q, &refs);
|
||||
|
||||
/* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
|
||||
if (READ_ONCE(fq->q.fqdir->dead)) {
|
||||
inet_frag_queue_flush(&fq->q, 0);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev = dev_get_by_index_rcu(net, fq->iif);
|
||||
if (!dev)
|
||||
goto out;
|
||||
|
||||
@@ -218,6 +218,41 @@ static int __init inet_frag_wq_init(void)
|
||||
|
||||
pure_initcall(inet_frag_wq_init);
|
||||
|
||||
void fqdir_pre_exit(struct fqdir *fqdir)
|
||||
{
|
||||
struct inet_frag_queue *fq;
|
||||
struct rhashtable_iter hti;
|
||||
|
||||
/* Prevent creation of new frags.
|
||||
* Pairs with READ_ONCE() in inet_frag_find().
|
||||
*/
|
||||
WRITE_ONCE(fqdir->high_thresh, 0);
|
||||
|
||||
/* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
|
||||
* and ip6frag_expire_frag_queue().
|
||||
*/
|
||||
WRITE_ONCE(fqdir->dead, true);
|
||||
|
||||
rhashtable_walk_enter(&fqdir->rhashtable, &hti);
|
||||
rhashtable_walk_start(&hti);
|
||||
|
||||
while ((fq = rhashtable_walk_next(&hti))) {
|
||||
if (IS_ERR(fq)) {
|
||||
if (PTR_ERR(fq) != -EAGAIN)
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
spin_lock_bh(&fq->lock);
|
||||
if (!(fq->flags & INET_FRAG_COMPLETE))
|
||||
inet_frag_queue_flush(fq, 0);
|
||||
spin_unlock_bh(&fq->lock);
|
||||
}
|
||||
|
||||
rhashtable_walk_stop(&hti);
|
||||
rhashtable_walk_exit(&hti);
|
||||
}
|
||||
EXPORT_SYMBOL(fqdir_pre_exit);
|
||||
|
||||
void fqdir_exit(struct fqdir *fqdir)
|
||||
{
|
||||
INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
|
||||
@@ -290,6 +325,7 @@ void inet_frag_queue_flush(struct inet_frag_queue *q,
|
||||
{
|
||||
unsigned int sum;
|
||||
|
||||
reason = reason ?: SKB_DROP_REASON_FRAG_REASM_TIMEOUT;
|
||||
sum = inet_frag_rbtree_purge(&q->rb_fragments, reason);
|
||||
sub_frag_mem_limit(q->fqdir, sum);
|
||||
}
|
||||
|
||||
@@ -134,11 +134,6 @@ static void ip_expire(struct timer_list *t)
|
||||
net = qp->q.fqdir->net;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/* Paired with WRITE_ONCE() in fqdir_pre_exit(). */
|
||||
if (READ_ONCE(qp->q.fqdir->dead))
|
||||
goto out_rcu_unlock;
|
||||
|
||||
spin_lock(&qp->q.lock);
|
||||
|
||||
if (qp->q.flags & INET_FRAG_COMPLETE)
|
||||
@@ -146,6 +141,13 @@ static void ip_expire(struct timer_list *t)
|
||||
|
||||
qp->q.flags |= INET_FRAG_DROP;
|
||||
inet_frag_kill(&qp->q, &refs);
|
||||
|
||||
/* Paired with WRITE_ONCE() in fqdir_pre_exit(). */
|
||||
if (READ_ONCE(qp->q.fqdir->dead)) {
|
||||
inet_frag_queue_flush(&qp->q, 0);
|
||||
goto out;
|
||||
}
|
||||
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user