mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-10 15:13:44 -04:00
net/mlx5e: SHAMPO, Cleanup reservation size formula
The reservation size formula can be reduced to a simple evaluation of MLX5E_SHAMPO_WQ_RESRV_SIZE. This leaves mlx5e_shampo_get_log_rsrv_size() with one single use, which can be replaced with a macro for simplicity. Also, function mlx5e_shampo_get_log_rsrv_size() is used only throughout params.c, make it static. Signed-off-by: Lama Kayal <lkayal@nvidia.com> Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com> Signed-off-by: Tariq Toukan <tariqt@nvidia.com> Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Link: https://patch.msgid.link/1753081999-326247-2-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
ad892e912b
commit
bc2d44b83f
@@ -85,8 +85,9 @@ struct page_pool;
|
||||
#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
|
||||
#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
|
||||
#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
|
||||
#define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024)
|
||||
#define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
|
||||
#define MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT (12)
|
||||
#define MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE (16)
|
||||
#define MLX5E_SHAMPO_WQ_RESRV_SIZE BIT(MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE)
|
||||
|
||||
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
|
||||
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
|
||||
|
||||
@@ -420,19 +420,10 @@ u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
|
||||
return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE));
|
||||
}
|
||||
|
||||
u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params)
|
||||
static u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5e_params *params)
|
||||
{
|
||||
return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE);
|
||||
}
|
||||
|
||||
u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params)
|
||||
{
|
||||
u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
|
||||
MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
|
||||
|
||||
return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
|
||||
return order_base_2(DIV_ROUND_UP(MLX5E_SHAMPO_WQ_RESRV_SIZE,
|
||||
params->sw_mtu));
|
||||
}
|
||||
|
||||
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
|
||||
@@ -834,13 +825,12 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk)
|
||||
{
|
||||
int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
|
||||
MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
|
||||
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
|
||||
int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
|
||||
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
|
||||
int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
|
||||
int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
|
||||
int wqe_size = BIT(log_stride_sz) * num_strides;
|
||||
int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
|
||||
|
||||
/* +1 is for the case that the pkt_per_rsrv dont consume the reservation
|
||||
* so we get a filler cqe for the rest of the reservation.
|
||||
@@ -932,10 +922,11 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
|
||||
|
||||
MLX5_SET(wq, wq, shampo_enable, true);
|
||||
MLX5_SET(wq, wq, log_reservation_size,
|
||||
mlx5e_shampo_get_log_rsrv_size(mdev, params));
|
||||
MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE -
|
||||
MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT);
|
||||
MLX5_SET(wq, wq,
|
||||
log_max_num_of_packets_per_reservation,
|
||||
mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
|
||||
mlx5e_shampo_get_log_pkt_per_rsrv(params));
|
||||
MLX5_SET(wq, wq, log_headers_entry_size,
|
||||
mlx5e_shampo_get_log_hd_entry_size(mdev, params));
|
||||
lro_timeout =
|
||||
@@ -1048,18 +1039,17 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_rq_param *rq_param)
|
||||
{
|
||||
int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
|
||||
MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
|
||||
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
|
||||
int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
|
||||
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
|
||||
int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
|
||||
int wqe_size = BIT(log_stride_sz) * num_strides;
|
||||
int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
|
||||
u32 hd_per_wqe;
|
||||
|
||||
/* Assumption: hd_per_wqe % 8 == 0. */
|
||||
hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv;
|
||||
mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
|
||||
__func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv);
|
||||
hd_per_wqe = (wqe_size / rsrv_size) * pkt_per_rsrv;
|
||||
mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_rsrv = %d\n",
|
||||
__func__, hd_per_wqe, rsrv_size, wqe_size, pkt_per_rsrv);
|
||||
return hd_per_wqe;
|
||||
}
|
||||
|
||||
|
||||
@@ -97,10 +97,6 @@ u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_xsk_param *xsk);
|
||||
u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params);
|
||||
u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params);
|
||||
u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params);
|
||||
u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_rq_param *rq_param);
|
||||
|
||||
Reference in New Issue
Block a user