Merge branch 'gve-support-larger-ring-sizes-in-dqo-qpl-mode'

Max Yuan says:

====================
gve: Support larger ring sizes in DQO-QPL mode

This patch series updates the gve driver to improve Queue Page List
(QPL) management and enable support for larger ring sizes when using the
DQO-QPL queue format.

Previously, the driver used hardcoded multipliers to determine the
number of pages to register for QPLs (e.g., 2x ring size for RX). This
rigid approach made it difficult to support larger ring sizes without
potentially exceeding the "max_registered_pages" limit reported by the
device.

The first patch introduces a unified and flexible logic for calculating
QPL page requirements. It balances TX and RX page allocations based on
the configured ring sizes and scales the total count down proportionally
if it would otherwise exceed the device's global registration limit.

The second patch leverages this new flexibility to stop ignoring the
maximum ring size supported by the device in DQO-QPL mode. Users can now
configure ring sizes up to the device-reported maximum, as the driver
will automatically adjust the QPL size to stay within allowed memory
bounds.
====================

Link: https://patch.msgid.link/20260225182342.1049816-1-joshwash@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2026-02-28 08:58:40 -08:00
8 changed files with 58 additions and 41 deletions

View File

@@ -79,8 +79,6 @@
#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
#define DQO_QPL_DEFAULT_TX_PAGES 512
/* Maximum TSO size supported on DQO */
#define GVE_DQO_TX_MAX 0x3FFFF
@@ -711,6 +709,7 @@ struct gve_ptype_lut {
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
struct gve_tx_queue_config *qcfg;
u16 pages_per_qpl;
u16 num_xdp_rings;
@@ -726,6 +725,7 @@ struct gve_rx_alloc_rings_cfg {
/* tx config is also needed to determine QPL ids */
struct gve_rx_queue_config *qcfg_rx;
struct gve_tx_queue_config *qcfg_tx;
u16 pages_per_qpl;
u16 ring_size;
u16 packet_buffer_size;
@@ -816,7 +816,8 @@ struct gve_priv {
u16 min_rx_desc_cnt;
bool modify_ring_size_enabled;
bool default_min_ring_size;
u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
u16 tx_pages_per_qpl;
u16 rx_pages_per_qpl;
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
struct bpf_prog *xdp_prog; /* XDP BPF program */
@@ -1150,14 +1151,6 @@ static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
return gve_get_rx_qpl_id(tx_cfg, 0);
}
static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
{
/* For DQO, page count should be more than ring size for
* out-of-order completions. Set it to two times of ring size.
*/
return 2 * rx_desc_cnt;
}
/* Returns the correct dma direction for tx and rx qpls */
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id)
@@ -1308,6 +1301,9 @@ int gve_reset(struct gve_priv *priv, bool attempt_teardown);
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
void gve_update_num_qpl_pages(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg);
int gve_adjust_config(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);

View File

@@ -970,14 +970,6 @@ static void gve_enable_supported_features(struct gve_priv *priv,
priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
}
/* Override pages for qpl for DQO-QPL */
if (dev_op_dqo_qpl) {
priv->tx_pages_per_qpl =
be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
if (priv->tx_pages_per_qpl == 0)
priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
}
if (dev_op_buffer_sizes &&
(supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
priv->max_rx_buffer_size =
@@ -997,12 +989,10 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (dev_op_modify_ring &&
(supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
priv->modify_ring_size_enabled = true;
/* max ring size for DQO QPL should not be overwritten because of device limit */
if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
}
priv->max_rx_desc_cnt =
be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
priv->max_tx_desc_cnt =
be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
if (priv->default_min_ring_size) {
/* If device hasn't provided minimums, use default minimums */
priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;

View File

@@ -133,7 +133,7 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
u32 idx;
idx = rx->dqo.next_qpl_page_idx;
if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
if (idx >= priv->rx_pages_per_qpl) {
net_err_ratelimited("%s: Out of QPL pages\n",
priv->dev->name);
return -ENOMEM;

View File

@@ -11,6 +11,7 @@
#include <linux/filter.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
@@ -966,6 +967,7 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->ring_size = priv->tx_desc_cnt;
cfg->pages_per_qpl = priv->tx_pages_per_qpl;
cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues;
cfg->tx = priv->tx;
}
@@ -997,12 +999,48 @@ static void gve_tx_start_rings(struct gve_priv *priv, int num_rings)
}
}
void gve_update_num_qpl_pages(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg)
{
u64 ideal_tx_pages, ideal_rx_pages;
u16 tx_num_queues, rx_num_queues;
u64 max_pages, tx_pages;
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
rx_alloc_cfg->pages_per_qpl = rx_alloc_cfg->ring_size;
} else if (priv->queue_format == GVE_DQO_QPL_FORMAT) {
/*
* We want 2 pages per RX descriptor and half a page per TX
* descriptor, which means the fraction ideal_tx_pages /
* (ideal_tx_pages + ideal_rx_pages) of the pages we allocate
* should be for TX. Shrink proportionally as necessary to avoid
* allocating more than max_registered_pages total pages.
*/
tx_num_queues = tx_alloc_cfg->qcfg->num_queues;
rx_num_queues = rx_alloc_cfg->qcfg_rx->num_queues;
ideal_tx_pages = tx_alloc_cfg->ring_size * tx_num_queues / 2;
ideal_rx_pages = rx_alloc_cfg->ring_size * rx_num_queues * 2;
max_pages = min(priv->max_registered_pages,
ideal_tx_pages + ideal_rx_pages);
tx_pages = div64_u64(max_pages * ideal_tx_pages,
ideal_tx_pages + ideal_rx_pages);
tx_alloc_cfg->pages_per_qpl = div_u64(tx_pages, tx_num_queues);
rx_alloc_cfg->pages_per_qpl = div_u64(max_pages - tx_pages,
rx_num_queues);
}
}
static int gve_queues_mem_alloc(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
gve_update_num_qpl_pages(priv, rx_alloc_cfg, tx_alloc_cfg);
if (gve_is_gqi(priv))
err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
else
@@ -1293,6 +1331,7 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
cfg->ring_size = priv->rx_desc_cnt;
cfg->pages_per_qpl = priv->rx_pages_per_qpl;
cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
cfg->rx = priv->rx;
cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues;
@@ -1372,6 +1411,8 @@ static int gve_queues_start(struct gve_priv *priv,
priv->rx_cfg = *rx_alloc_cfg->qcfg_rx;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
priv->tx_pages_per_qpl = tx_alloc_cfg->pages_per_qpl;
priv->rx_pages_per_qpl = rx_alloc_cfg->pages_per_qpl;
gve_tx_start_rings(priv, gve_num_tx_queues(priv));
gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues);

View File

@@ -278,7 +278,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
struct device *hdev = &priv->pdev->dev;
u32 slots = cfg->ring_size;
int filled_pages;
int qpl_page_cnt;
u32 qpl_id = 0;
size_t bytes;
int err;
@@ -314,10 +313,8 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
if (!rx->data.raw_addressing) {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
qpl_page_cnt = cfg->ring_size;
rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
cfg->pages_per_qpl);
if (!rx->data.qpl) {
err = -ENOMEM;
goto abort_with_copy_pool;

View File

@@ -218,7 +218,6 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
{
struct device *hdev = &priv->pdev->dev;
struct page_pool *pool;
int qpl_page_cnt;
size_t size;
u32 qpl_id;
@@ -246,7 +245,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
XSK_CHECK_PRIV_TYPE(struct gve_xdp_buff);
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
cfg->pages_per_qpl;
rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states,
sizeof(rx->dqo.buf_states[0]),
GFP_KERNEL, priv->numa_node);
@@ -281,10 +280,9 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
rx->dqo.page_pool = pool;
} else {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
cfg->pages_per_qpl);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;

View File

@@ -264,7 +264,6 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
int qpl_page_cnt;
u32 qpl_id = 0;
size_t bytes;
@@ -291,10 +290,8 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
tx->dev = hdev;
if (!tx->raw_addressing) {
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
qpl_page_cnt = priv->tx_pages_per_qpl;
tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
cfg->pages_per_qpl);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;

View File

@@ -311,7 +311,6 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
{
struct device *hdev = &priv->pdev->dev;
int num_pending_packets;
int qpl_page_cnt;
size_t bytes;
u32 qpl_id;
int i;
@@ -392,10 +391,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
if (!cfg->raw_addressing) {
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
qpl_page_cnt = priv->tx_pages_per_qpl;
tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
cfg->pages_per_qpl);
if (!tx->dqo.qpl)
goto err;