Merge branch 'fix-generating-skb-from-non-linear-xdp_buff-for-mlx5'

Tariq Toukan says:

====================
Fix generating skb from non-linear xdp_buff for mlx5

Link: https://lore.kernel.org/20250915225857.3024997-1-ameryhung@gmail.com
====================

Link: https://patch.msgid.link/1760644540-899148-1-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-10-20 17:39:15 -07:00

View File

@@ -1794,14 +1794,27 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
prog = rcu_dereference(rq->xdp_prog);
if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_wqe_frag_info *pwi;
if (prog) {
u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
for (pwi = head_wi; pwi < wi; pwi++)
pwi->frag_page->frags++;
if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT,
rq->flags)) {
struct mlx5e_wqe_frag_info *pwi;
wi -= old_nr_frags - sinfo->nr_frags;
for (pwi = head_wi; pwi < wi; pwi++)
pwi->frag_page->frags++;
}
return NULL; /* page/packet was consumed by XDP */
}
nr_frags_free = old_nr_frags - sinfo->nr_frags;
if (unlikely(nr_frags_free)) {
wi -= nr_frags_free;
truesize -= nr_frags_free * frag_info->frag_stride;
}
return NULL; /* page/packet was consumed by XDP */
}
skb = mlx5e_build_linear_skb(
@@ -2027,6 +2040,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
unsigned int truesize = 0;
u32 pg_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 linear_frame_sz;
@@ -2080,7 +2094,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
pg_consumed_bytes =
min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
truesize += pg_consumed_bytes;
@@ -2096,10 +2111,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
if (prog) {
u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
u32 len;
if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_frag_page *pfp;
frag_page -= old_nr_frags - sinfo->nr_frags;
for (pfp = head_page; pfp < frag_page; pfp++)
pfp->frags++;
@@ -2110,9 +2130,19 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL; /* page/packet was consumed by XDP */
}
nr_frags_free = old_nr_frags - sinfo->nr_frags;
if (unlikely(nr_frags_free)) {
frag_page -= nr_frags_free;
truesize -= (nr_frags_free - 1) * PAGE_SIZE +
ALIGN(pg_consumed_bytes,
BIT(rq->mpwqe.log_stride_sz));
}
len = mxbuf->xdp.data_end - mxbuf->xdp.data;
skb = mlx5e_build_linear_skb(
rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq->page_pool,
@@ -2137,8 +2167,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
do
pagep->frags++;
while (++pagep < frag_page);
headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len,
skb->data_len);
__pskb_pull_tail(skb, headlen);
}
__pskb_pull_tail(skb, headlen);
} else {
dma_addr_t addr;