mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-05 11:01:06 -04:00
net/mlx5e: SHAMPO, Specialize mlx5e_fill_skb_data()
mlx5e_fill_skb_data() used to have multiple callers. But after the XDP
multibuf refactoring from commit 2cb0e27d43 ("net/mlx5e: RX, Prepare
non-linear striding RQ for XDP multi-buffer support") the SHAMPO code
path is the only caller.
Take advantage of this and specialize the function:
- Drop the redundant check.
- Assume that data_bcnt is > 0. This is needed in a downstream patch.
Rename the function as well to make things clear.
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Suggested-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20240603212219.1037656-8-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
e839ac9a89
commit
d34d7d1973
@@ -1948,21 +1948,16 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
|
||||
#endif
|
||||
|
||||
static void
|
||||
mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
|
||||
struct mlx5e_frag_page *frag_page,
|
||||
u32 data_bcnt, u32 data_offset)
|
||||
mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
|
||||
struct mlx5e_frag_page *frag_page,
|
||||
u32 data_bcnt, u32 data_offset)
|
||||
{
|
||||
net_prefetchw(skb->data);
|
||||
|
||||
while (data_bcnt) {
|
||||
do {
|
||||
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
|
||||
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
|
||||
unsigned int truesize;
|
||||
|
||||
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
|
||||
truesize = pg_consumed_bytes;
|
||||
else
|
||||
truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
|
||||
unsigned int truesize = pg_consumed_bytes;
|
||||
|
||||
frag_page->frags++;
|
||||
mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset,
|
||||
@@ -1971,7 +1966,7 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
|
||||
data_bcnt -= pg_consumed_bytes;
|
||||
data_offset = 0;
|
||||
frag_page++;
|
||||
}
|
||||
} while (data_bcnt);
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
@@ -2330,10 +2325,12 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
|
||||
}
|
||||
|
||||
if (likely(head_size)) {
|
||||
struct mlx5e_frag_page *frag_page;
|
||||
if (data_bcnt) {
|
||||
struct mlx5e_frag_page *frag_page;
|
||||
|
||||
frag_page = &wi->alloc_units.frag_pages[page_idx];
|
||||
mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
|
||||
frag_page = &wi->alloc_units.frag_pages[page_idx];
|
||||
mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
|
||||
}
|
||||
}
|
||||
|
||||
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
|
||||
|
||||
Reference in New Issue
Block a user