mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 00:51:51 -04:00
net: Slightly simplify net_mp_{open,close}_rxq
net_mp_open_rxq is currently not used in the tree as all callers are
using __net_mp_open_rxq directly, and net_mp_close_rxq is only used
once while all other locations use __net_mp_close_rxq.
Consolidate into a single API, netif_mp_{open,close}_rxq, using the
netif_ prefix to indicate that the caller is responsible for locking.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Co-developed-by: David Wei <dw@davidwei.uk>
Signed-off-by: David Wei <dw@davidwei.uk>
Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
Link: https://patch.msgid.link/20260402231031.447597-6-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
22fdf28f7c
commit
1e91c98bc9
@@ -23,14 +23,10 @@ bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr);
|
||||
void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
|
||||
void net_mp_niov_clear_page_pool(struct net_iov *niov);
|
||||
|
||||
int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
|
||||
struct pp_memory_provider_params *p);
|
||||
int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
|
||||
int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
|
||||
const struct pp_memory_provider_params *p,
|
||||
struct netlink_ext_ack *extack);
|
||||
void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
|
||||
struct pp_memory_provider_params *old_p);
|
||||
void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
|
||||
void netif_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
|
||||
const struct pp_memory_provider_params *old_p);
|
||||
|
||||
/**
|
||||
|
||||
@@ -552,8 +552,11 @@ static void io_close_queue(struct io_zcrx_ifq *ifq)
|
||||
}
|
||||
|
||||
if (netdev) {
|
||||
if (ifq->if_rxq != -1)
|
||||
net_mp_close_rxq(netdev, ifq->if_rxq, &p);
|
||||
if (ifq->if_rxq != -1) {
|
||||
netdev_lock(netdev);
|
||||
netif_mp_close_rxq(netdev, ifq->if_rxq, &p);
|
||||
netdev_unlock(netdev);
|
||||
}
|
||||
netdev_put(netdev, &netdev_tracker);
|
||||
}
|
||||
ifq->if_rxq = -1;
|
||||
@@ -841,7 +844,7 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
|
||||
mp_param.rx_page_size = 1U << ifq->niov_shift;
|
||||
mp_param.mp_ops = &io_uring_pp_zc_ops;
|
||||
mp_param.mp_priv = ifq;
|
||||
ret = __net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL);
|
||||
ret = netif_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL);
|
||||
if (ret)
|
||||
goto netdev_put_unlock;
|
||||
netdev_unlock(ifq->netdev);
|
||||
|
||||
@@ -145,7 +145,7 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
|
||||
|
||||
rxq_idx = get_netdev_rx_queue_index(rxq);
|
||||
|
||||
__net_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
|
||||
netif_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
|
||||
}
|
||||
|
||||
percpu_ref_kill(&binding->ref);
|
||||
@@ -163,7 +163,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
|
||||
u32 xa_idx;
|
||||
int err;
|
||||
|
||||
err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
|
||||
err = netif_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -176,7 +176,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
|
||||
return 0;
|
||||
|
||||
err_close_rxq:
|
||||
__net_mp_close_rxq(dev, rxq_idx, &mp_params);
|
||||
netif_mp_close_rxq(dev, rxq_idx, &mp_params);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -200,7 +200,7 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
|
||||
|
||||
int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
|
||||
int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
|
||||
const struct pp_memory_provider_params *p,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
@@ -264,18 +264,7 @@ int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
|
||||
struct pp_memory_provider_params *p)
|
||||
{
|
||||
int ret;
|
||||
|
||||
netdev_lock(dev);
|
||||
ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
|
||||
netdev_unlock(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
|
||||
void netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
|
||||
const struct pp_memory_provider_params *old_p)
|
||||
{
|
||||
struct netdev_queue_config qcfg[2];
|
||||
@@ -305,11 +294,3 @@ void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
|
||||
err = netdev_rx_queue_reconfig(dev, ifq_idx, &qcfg[0], &qcfg[1]);
|
||||
WARN_ON(err && err != -ENETDOWN);
|
||||
}
|
||||
|
||||
void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
|
||||
struct pp_memory_provider_params *old_p)
|
||||
{
|
||||
netdev_lock(dev);
|
||||
__net_mp_close_rxq(dev, ifq_idx, old_p);
|
||||
netdev_unlock(dev);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user