gve: Allow ethtool to configure rx_buf_len

Add support for getting and setting the RX buffer length via the
ethtool ring parameters (`ethtool -g`/`-G`). The driver restricts the
allowed buffer length to 2048 (SZ_2K) by default and allows 4096 (SZ_4K)
based on device options.

As XDP is only supported when the `rx_buf_len` is 2048, the driver now
enforces this in two places:
1.  In `gve_xdp_set`, rejecting XDP programs if the current buffer
    length is not 2048.
2.  In `gve_set_rx_buf_len_config`, rejecting buffer length changes if XDP
    is loaded and the new length is not 2048.

Signed-off-by: Ankit Garg <nktgrg@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
Reviewed-by: Jordan Rhee <jordanrhee@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Link: https://patch.msgid.link/20251106192746.243525-4-joshwash@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Ankit Garg
2025-11-06 11:27:45 -08:00
committed by Jakub Kicinski
parent 091a3b6ff2
commit d235bb213f
3 changed files with 60 additions and 1 deletions

View File

@@ -1167,6 +1167,12 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
priv->queue_format == GVE_GQI_QPL_FORMAT;
}
static inline bool gve_is_dqo(struct gve_priv *priv)
{
return priv->queue_format == GVE_DQO_RDA_FORMAT ||
priv->queue_format == GVE_DQO_QPL_FORMAT;
}
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
@@ -1248,6 +1254,9 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
bool gve_header_split_supported(const struct gve_priv *priv);
int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
struct netlink_ext_ack *extack,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
/* rx buffer handling */

View File

@@ -529,6 +529,8 @@ static void gve_get_ringparam(struct net_device *netdev,
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
kernel_cmd->rx_buf_len = priv->rx_cfg.packet_buffer_size;
if (!gve_header_split_supported(priv))
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
else if (priv->header_split_enabled)
@@ -589,6 +591,12 @@ static int gve_set_ringparam(struct net_device *netdev,
int err;
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
err = gve_set_rx_buf_len_config(priv, kernel_cmd->rx_buf_len, extack,
&rx_alloc_cfg);
if (err)
return err;
err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split,
&rx_alloc_cfg);
if (err)
@@ -605,6 +613,8 @@ static int gve_set_ringparam(struct net_device *netdev,
return err;
} else {
/* Set ring params for the next up */
priv->rx_cfg.packet_buffer_size =
rx_alloc_cfg.packet_buffer_size;
priv->header_split_enabled = rx_alloc_cfg.enable_header_split;
priv->tx_desc_cnt = tx_alloc_cfg.ring_size;
priv->rx_desc_cnt = rx_alloc_cfg.ring_size;
@@ -944,7 +954,8 @@ static int gve_get_ts_info(struct net_device *netdev,
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
ETHTOOL_RING_USE_RX_BUF_LEN,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,

View File

@@ -1725,6 +1725,13 @@ static int gve_verify_xdp_configuration(struct net_device *dev,
return -EOPNOTSUPP;
}
if (priv->rx_cfg.packet_buffer_size != SZ_2K) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"XDP is not supported for Rx buf len %d, only %d supported.",
priv->rx_cfg.packet_buffer_size, SZ_2K);
return -EOPNOTSUPP;
}
max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr);
if (priv->queue_format == GVE_GQI_QPL_FORMAT)
max_xdp_mtu -= GVE_RX_PAD;
@@ -2056,6 +2063,38 @@ bool gve_header_split_supported(const struct gve_priv *priv)
priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
}
int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
struct netlink_ext_ack *extack,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
u32 old_rx_buf_len = rx_alloc_cfg->packet_buffer_size;
if (rx_buf_len == old_rx_buf_len)
return 0;
/* device options may not always contain support for 4K buffers */
if (!gve_is_dqo(priv) || priv->max_rx_buffer_size < SZ_4K) {
NL_SET_ERR_MSG_MOD(extack,
"Modifying Rx buf len is not supported");
return -EOPNOTSUPP;
}
if (priv->xdp_prog && rx_buf_len != SZ_2K) {
NL_SET_ERR_MSG_MOD(extack,
"Rx buf len can only be 2048 when XDP is on");
return -EINVAL;
}
if (rx_buf_len != SZ_2K && rx_buf_len != SZ_4K) {
NL_SET_ERR_MSG_MOD(extack,
"Rx buf len can only be 2048 or 4096");
return -EINVAL;
}
rx_alloc_cfg->packet_buffer_size = rx_buf_len;
return 0;
}
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{