Files
linux/include/net/netdev_rx_queue.h
David Wei 0caa9a8dde net: Proxy net_mp_{open,close}_rxq for leased queues
When a process in a container wants to setup a memory provider, it will
use the virtual netdev and a leased rxq, and call net_mp_{open,close}_rxq
to try and restart the queue. At this point, proxy the queue restart on
the real rxq in the physical netdev.

For memory providers (io_uring zero-copy rx and devmem), it causes the
real rxq in the physical netdev to be filled from a memory provider that
has DMA mapped memory from a process within a container.

Signed-off-by: David Wei <dw@davidwei.uk>
Co-developed-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Link: https://patch.msgid.link/20260115082603.219152-6-daniel@iogearbox.net
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2026-01-20 11:58:49 +01:00

81 lines
2.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NETDEV_RX_QUEUE_H
#define _LINUX_NETDEV_RX_QUEUE_H
#include <linux/kobject.h>
#include <linux/netdevice.h>
#include <linux/sysfs.h>
#include <net/xdp.h>
#include <net/page_pool/types.h>
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
struct xdp_rxq_info xdp_rxq;
#ifdef CONFIG_RPS
struct rps_map __rcu *rps_map;
struct rps_dev_flow_table __rcu *rps_flow_table;
#endif
struct kobject kobj;
const struct attribute_group **groups;
struct net_device *dev;
netdevice_tracker dev_tracker;
/* All fields below are "ops protected",
* see comment about net_device::lock
*/
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
struct napi_struct *napi;
struct pp_memory_provider_params mp_params;
struct netdev_rx_queue *lease;
netdevice_tracker lease_tracker;
} ____cacheline_aligned_in_smp;
/*
* RX queue sysfs structures and functions.
*/
struct rx_queue_attribute {
struct attribute attr;
ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
ssize_t (*store)(struct netdev_rx_queue *queue,
const char *buf, size_t len);
};
static inline struct netdev_rx_queue *
__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
{
return dev->_rx + rxq;
}
static inline unsigned int
get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
{
struct net_device *dev = queue->dev;
int index = queue - dev->_rx;
BUG_ON(index >= dev->num_rx_queues);
return index;
}
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst,
struct netdev_rx_queue *rxq_src);
void netdev_rx_queue_unlease(struct netdev_rx_queue *rxq_dst,
struct netdev_rx_queue *rxq_src);
bool netif_rx_queue_lease_get_owner(struct net_device **dev, unsigned int *rxq);
enum netif_lease_dir {
NETIF_VIRT_TO_PHYS,
NETIF_PHYS_TO_VIRT,
};
struct netdev_rx_queue *
__netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq,
enum netif_lease_dir dir);
struct netdev_rx_queue *
netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq);
void netif_put_rx_queue_lease_locked(struct net_device *orig_dev,
struct net_device *dev);
#endif /* _LINUX_NETDEV_RX_QUEUE_H */