net/mlx5: fs, add API for sharing HWS action by refcount

Counters HWS actions are shared using refcount, to create action on
demand by flow steering rule and destroy only when no rules are using
the action. The method is extensible to other HWS action types, such as
flow meter and sampler actions, in the downstream patches.

Add an API to facilitate the reuse of get/put logic for HWS actions
shared by refcount.

Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Link: https://patch.msgid.link/1741543663-22123-2-git-send-email-tariqt@nvidia.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Moshe Shemesh
2025-03-09 20:07:41 +02:00
committed by Paolo Abeni
parent 2c99b2e163
commit cc2cc56fc6
4 changed files with 81 additions and 42 deletions

View File

@@ -347,16 +347,10 @@ struct mlx5_fc {
u64 lastbytes;
};
struct mlx5_fc_bulk_hws_data {
struct mlx5hws_action *hws_action;
struct mutex lock; /* protects hws_action */
refcount_t hws_action_refcount;
};
struct mlx5_fc_bulk {
struct mlx5_fs_bulk fs_bulk;
u32 base_id;
struct mlx5_fc_bulk_hws_data hws_data;
struct mlx5_fs_hws_data hws_data;
struct mlx5_fc fcs[];
};

View File

@@ -519,6 +519,63 @@ mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
return mlx5hws_action_create_last(ctx, flags);
}
static struct mlx5hws_action *
mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx)
{
u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
switch (create_ctx->actions_type) {
case MLX5HWS_ACTION_TYP_CTR:
return mlx5hws_action_create_counter(create_ctx->hws_ctx,
create_ctx->id, flags);
default:
return NULL;
}
}
struct mlx5hws_action *
mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
struct mlx5_fs_hws_create_action_ctx *create_ctx)
{
/* try avoid locking if not necessary */
if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount))
return fs_hws_data->hws_action;
mutex_lock(&fs_hws_data->lock);
if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) {
mutex_unlock(&fs_hws_data->lock);
return fs_hws_data->hws_action;
}
fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx);
if (!fs_hws_data->hws_action) {
mutex_unlock(&fs_hws_data->lock);
return NULL;
}
refcount_set(&fs_hws_data->hws_action_refcount, 1);
mutex_unlock(&fs_hws_data->lock);
return fs_hws_data->hws_action;
}
void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data)
{
if (!fs_hws_data)
return;
/* try avoid locking if not necessary */
if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount))
return;
mutex_lock(&fs_hws_data->lock);
if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) {
mutex_unlock(&fs_hws_data->lock);
return;
}
mlx5hws_action_destroy(fs_hws_data->hws_action);
fs_hws_data->hws_action = NULL;
mutex_unlock(&fs_hws_data->lock);
}
static void mlx5_fs_destroy_fs_action(struct mlx5_fs_hws_rule_action *fs_action)
{
switch (mlx5hws_action_get_type(fs_action->action)) {

View File

@@ -58,6 +58,23 @@ struct mlx5_fs_hws_rule {
int num_fs_actions;
};
struct mlx5_fs_hws_data {
struct mlx5hws_action *hws_action;
struct mutex lock; /* protects hws_action */
refcount_t hws_action_refcount;
};
struct mlx5_fs_hws_create_action_ctx {
enum mlx5hws_action_type actions_type;
struct mlx5hws_context *hws_ctx;
u32 id;
};
struct mlx5hws_action *
mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
struct mlx5_fs_hws_create_action_ctx *create_ctx);
void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data);
#ifdef CONFIG_MLX5_HW_STEERING
bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);

View File

@@ -405,46 +405,17 @@ bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
struct mlx5_fc *counter)
{
u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
struct mlx5_fs_hws_create_action_ctx create_ctx;
struct mlx5_fc_bulk *fc_bulk = counter->bulk;
struct mlx5_fc_bulk_hws_data *fc_bulk_hws;
fc_bulk_hws = &fc_bulk->hws_data;
/* try avoid locking if not necessary */
if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount))
return fc_bulk_hws->hws_action;
create_ctx.hws_ctx = ctx;
create_ctx.id = fc_bulk->base_id;
create_ctx.actions_type = MLX5HWS_ACTION_TYP_CTR;
mutex_lock(&fc_bulk_hws->lock);
if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) {
mutex_unlock(&fc_bulk_hws->lock);
return fc_bulk_hws->hws_action;
}
fc_bulk_hws->hws_action =
mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags);
if (!fc_bulk_hws->hws_action) {
mutex_unlock(&fc_bulk_hws->lock);
return NULL;
}
refcount_set(&fc_bulk_hws->hws_action_refcount, 1);
mutex_unlock(&fc_bulk_hws->lock);
return fc_bulk_hws->hws_action;
return mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
}
void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
{
struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data;
/* try avoid locking if not necessary */
if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount))
return;
mutex_lock(&fc_bulk_hws->lock);
if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) {
mutex_unlock(&fc_bulk_hws->lock);
return;
}
mlx5hws_action_destroy(fc_bulk_hws->hws_action);
fc_bulk_hws->hws_action = NULL;
mutex_unlock(&fc_bulk_hws->lock);
mlx5_fs_put_hws_action(&counter->bulk->hws_data);
}