gve: support unreadable netmem

Declare PP_FLAG_ALLOW_UNREADABLE_NETMEM to turn on unreadable netmem
support in GVE.

We also drop any net_iov packets where header split is not enabled.
We're unable to process packets where the header landed in unreadable
netmem.

Use page_pool_dma_sync_netmem_for_cpu in lieu of
dma_sync_single_range_for_cpu to correctly handle unreadable netmem
that should not be dma-sync'd.

Disable rx_copybreak optimization if payload is unreadable netmem as
that needs access to the payload.

Signed-off-by: Mina Almasry <almasrymina@google.com>
Signed-off-by: Ziwei Xiao <ziweixiao@google.com>
Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
Link: https://patch.msgid.link/20250818210507.3781705-1-hramamurthy@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Mina Almasry
2025-08-18 21:05:07 +00:00
committed by Jakub Kicinski
parent c3439666d1
commit 62d7f40503
2 changed files with 35 additions and 5 deletions

View File

@@ -260,6 +260,11 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
.offset = xdp ? XDP_PACKET_HEADROOM : 0,
};
if (priv->header_split_enabled) {
pp.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
pp.queue_idx = rx->q_num;
}
return page_pool_create(&pp);
}

View File

@@ -718,6 +718,24 @@ static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
static void gve_dma_sync(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state, u16 buf_len)
{
struct gve_rx_slot_page_info *page_info = &buf_state->page_info;
if (rx->dqo.page_pool) {
page_pool_dma_sync_netmem_for_cpu(rx->dqo.page_pool,
page_info->netmem,
page_info->page_offset,
buf_len);
} else {
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
page_info->page_offset +
page_info->pad,
buf_len, DMA_FROM_DEVICE);
}
}
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -793,13 +811,18 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
rx->rx_hsplit_unsplit_pkt += unsplit;
rx->rx_hsplit_bytes += hdr_len;
u64_stats_update_end(&rx->statss);
} else if (!rx->ctx.skb_head && rx->dqo.page_pool &&
netmem_is_net_iov(buf_state->page_info.netmem)) {
/* when header split is disabled, the header went to the packet
* buffer. If the packet buffer is a net_iov, those can't be
* easily mapped into the kernel space to access the header
* required to process the packet.
*/
goto error;
}
/* Sync the portion of dma buffer for CPU to read. */
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
buf_state->page_info.page_offset +
buf_state->page_info.pad,
buf_len, DMA_FROM_DEVICE);
gve_dma_sync(priv, rx, buf_state, buf_len);
/* Append to current skb if one exists. */
if (rx->ctx.skb_head) {
@@ -837,7 +860,9 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
u64_stats_update_end(&rx->statss);
}
if (eop && buf_len <= priv->rx_copybreak) {
if (eop && buf_len <= priv->rx_copybreak &&
!(rx->dqo.page_pool &&
netmem_is_net_iov(buf_state->page_info.netmem))) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head))