mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 10:01:39 -05:00
ipv4: start using dst_dev_rcu()
Change icmpv4_xrlim_allow(), ip_defrag() to prevent possible UAF.
Change ipmr_prepare_xmit(), ipmr_queue_fwd_xmit(), ip_mr_output(),
ipv4_neigh_lookup() to use lockdep enabled dst_dev_rcu().
Fixes: 4a6ce2b6f2 ("net: introduce a new function dst_dev_put()")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Link: https://patch.msgid.link/20250828195823.3958522-9-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
b62a59c18b
commit
6ad8de3cef
@@ -319,17 +319,17 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
|
||||
return true;
|
||||
|
||||
/* No rate limit on loopback */
|
||||
dev = dst_dev(dst);
|
||||
rcu_read_lock();
|
||||
dev = dst_dev_rcu(dst);
|
||||
if (dev && (dev->flags & IFF_LOOPBACK))
|
||||
goto out;
|
||||
|
||||
rcu_read_lock();
|
||||
peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr,
|
||||
l3mdev_master_ifindex_rcu(dev));
|
||||
rc = inet_peer_xrlim_allow(peer,
|
||||
READ_ONCE(net->ipv4.sysctl_icmp_ratelimit));
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
if (!rc)
|
||||
__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST);
|
||||
else
|
||||
|
||||
@@ -476,14 +476,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
|
||||
/* Process an incoming IP datagram fragment. */
|
||||
int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
|
||||
{
|
||||
struct net_device *dev = skb->dev ? : skb_dst_dev(skb);
|
||||
int vif = l3mdev_master_ifindex_rcu(dev);
|
||||
struct net_device *dev;
|
||||
struct ipq *qp;
|
||||
int vif;
|
||||
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
|
||||
|
||||
/* Lookup (or create) queue header */
|
||||
rcu_read_lock();
|
||||
dev = skb->dev ? : skb_dst_dev_rcu(skb);
|
||||
vif = l3mdev_master_ifindex_rcu(dev);
|
||||
qp = ip_find(net, ip_hdr(skb), user, vif);
|
||||
if (qp) {
|
||||
int ret, refs = 0;
|
||||
|
||||
@@ -1905,7 +1905,7 @@ static int ipmr_prepare_xmit(struct net *net, struct mr_table *mrt,
|
||||
return -1;
|
||||
}
|
||||
|
||||
encap += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
|
||||
encap += LL_RESERVED_SPACE(dst_dev_rcu(&rt->dst)) + rt->dst.header_len;
|
||||
|
||||
if (skb_cow(skb, encap)) {
|
||||
ip_rt_put(rt);
|
||||
@@ -1958,7 +1958,7 @@ static void ipmr_queue_fwd_xmit(struct net *net, struct mr_table *mrt,
|
||||
* result in receiving multiple packets.
|
||||
*/
|
||||
NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
|
||||
net, NULL, skb, skb->dev, rt->dst.dev,
|
||||
net, NULL, skb, skb->dev, dst_dev_rcu(&rt->dst),
|
||||
ipmr_forward_finish);
|
||||
return;
|
||||
|
||||
@@ -2302,7 +2302,7 @@ int ip_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
guard(rcu)();
|
||||
|
||||
dev = rt->dst.dev;
|
||||
dev = dst_dev_rcu(&rt->dst);
|
||||
|
||||
if (IPCB(skb)->flags & IPSKB_FORWARDED)
|
||||
goto mc_output;
|
||||
|
||||
@@ -414,11 +414,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
|
||||
const void *daddr)
|
||||
{
|
||||
const struct rtable *rt = container_of(dst, struct rtable, dst);
|
||||
struct net_device *dev = dst_dev(dst);
|
||||
struct net_device *dev;
|
||||
struct neighbour *n;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
dev = dst_dev_rcu(dst);
|
||||
if (likely(rt->rt_gw_family == AF_INET)) {
|
||||
n = ip_neigh_gw4(dev, rt->rt_gw4);
|
||||
} else if (rt->rt_gw_family == AF_INET6) {
|
||||
|
||||
Reference in New Issue
Block a user