xsk: Proxy pool management for leased queues

Similarly to the net_mp_{open,close}_rxq handling for leased queues, proxy
the xsk_{reg,clear}_pool_at_qid via netif_get_rx_queue_lease_locked such
that in case a virtual netdev picked a leased rxq, the request gets through
to the real rxq in the physical netdev. The proxying is only relevant for
queue_id < dev->real_num_rx_queues since right now its only supported for
rxqs.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Co-developed-by: David Wei <dw@davidwei.uk>
Signed-off-by: David Wei <dw@davidwei.uk>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
Link: https://patch.msgid.link/20260115082603.219152-9-daniel@iogearbox.net
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Daniel Borkmann
2026-01-15 09:25:55 +01:00
committed by Paolo Abeni
parent 1ecea95dd3
commit 0073d2fd67

View File

@@ -23,6 +23,8 @@
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <linux/vmalloc.h>
#include <net/netdev_queues.h>
#include <net/xdp_sock_drv.h>
#include <net/busy_poll.h>
#include <net/netdev_lock.h>
@@ -117,10 +119,18 @@ EXPORT_SYMBOL(xsk_get_pool_from_qid);
void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
{
if (queue_id < dev->num_rx_queues)
dev->_rx[queue_id].pool = NULL;
if (queue_id < dev->num_tx_queues)
dev->_tx[queue_id].pool = NULL;
struct net_device *orig_dev = dev;
unsigned int id = queue_id;
if (id < dev->real_num_rx_queues)
WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &id));
if (id < dev->real_num_rx_queues)
dev->_rx[id].pool = NULL;
if (id < dev->real_num_tx_queues)
dev->_tx[id].pool = NULL;
netif_put_rx_queue_lease_locked(orig_dev, dev);
}
/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
@@ -130,17 +140,29 @@ void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
u16 queue_id)
{
if (queue_id >= max_t(unsigned int,
dev->real_num_rx_queues,
dev->real_num_tx_queues))
struct net_device *orig_dev = dev;
unsigned int id = queue_id;
int ret = 0;
if (id >= max(dev->real_num_rx_queues,
dev->real_num_tx_queues))
return -EINVAL;
if (id < dev->real_num_rx_queues) {
if (!netif_get_rx_queue_lease_locked(&dev, &id))
return -EBUSY;
if (xsk_get_pool_from_qid(dev, id)) {
ret = -EBUSY;
goto out;
}
}
if (queue_id < dev->real_num_rx_queues)
dev->_rx[queue_id].pool = pool;
if (queue_id < dev->real_num_tx_queues)
dev->_tx[queue_id].pool = pool;
return 0;
if (id < dev->real_num_rx_queues)
dev->_rx[id].pool = pool;
if (id < dev->real_num_tx_queues)
dev->_tx[id].pool = pool;
out:
netif_put_rx_queue_lease_locked(orig_dev, dev);
return ret;
}
static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,