Merge branch 'net-mlx5-support-disabling-host-pfs'

Tariq Toukan says:

====================
net/mlx5: Support disabling host PFs

This small series by Daniel adds support for disabling host PFs.
If device is capable and configured, the driver won't access vports of
disabled host functions.
====================

Link: https://patch.msgid.link/1755112796-467444-1-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-08-15 12:29:09 -07:00
3 changed files with 90 additions and 37 deletions

View File

@@ -1038,6 +1038,25 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(err);
}
static int mlx5_esw_host_functions_enabled_query(struct mlx5_eswitch *esw)
{
const u32 *query_host_out;
if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
return 0;
query_host_out = mlx5_esw_query_functions(esw->dev);
if (IS_ERR(query_host_out))
return PTR_ERR(query_host_out);
esw->esw_funcs.host_funcs_disabled =
MLX5_GET(query_esw_functions_out, query_host_out,
host_params_context.host_pf_not_exist);
kvfree(query_host_out);
return 0;
}
static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
{
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
@@ -1278,17 +1297,19 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
esw->mode == MLX5_ESWITCH_LEGACY;
/* Enable PF vport */
if (pf_needed) {
if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev)) {
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
enabled_events);
if (ret)
return ret;
}
/* Enable external host PF HCA */
ret = host_pf_enable_hca(esw->dev);
if (ret)
goto pf_hca_err;
if (mlx5_esw_host_functions_enabled(esw->dev)) {
/* Enable external host PF HCA */
ret = host_pf_enable_hca(esw->dev);
if (ret)
goto pf_hca_err;
}
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
@@ -1320,9 +1341,10 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
host_pf_disable_hca(esw->dev);
if (mlx5_esw_host_functions_enabled(esw->dev))
host_pf_disable_hca(esw->dev);
pf_hca_err:
if (pf_needed)
if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1342,10 +1364,12 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
host_pf_disable_hca(esw->dev);
if (mlx5_esw_host_functions_enabled(esw->dev))
host_pf_disable_hca(esw->dev);
if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
esw->mode == MLX5_ESWITCH_LEGACY)
if ((mlx5_core_is_ecpf_esw_manager(esw->dev) ||
esw->mode == MLX5_ESWITCH_LEGACY) &&
mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
@@ -1674,7 +1698,8 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
void *hca_caps;
int err;
if (!mlx5_core_is_ecpf(dev)) {
if (!mlx5_core_is_ecpf(dev) ||
!mlx5_esw_host_functions_enabled(dev)) {
*max_sfs = 0;
return 0;
}
@@ -1750,21 +1775,23 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
xa_init(&esw->vports);
err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
if (err)
goto err;
if (esw->first_host_vport == MLX5_VPORT_PF)
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
idx++;
for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
err = mlx5_esw_vport_alloc(esw, idx, idx);
if (mlx5_esw_host_functions_enabled(dev)) {
err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
if (err)
goto err;
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
if (esw->first_host_vport == MLX5_VPORT_PF)
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
idx++;
for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
err = mlx5_esw_vport_alloc(esw, idx, idx);
if (err)
goto err;
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
idx++;
}
}
base_sf_num = mlx5_sf_start_function_id(dev);
for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
err = mlx5_esw_vport_alloc(esw, idx, base_sf_num + i);
@@ -1864,6 +1891,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto free_esw;
esw->dev = dev;
dev->priv.eswitch = esw;
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
@@ -1874,11 +1902,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
err = mlx5_esw_host_functions_enabled_query(esw);
if (err)
goto abort;
err = mlx5_esw_vports_init(esw);
if (err)
goto abort;
dev->priv.eswitch = esw;
err = esw_offloads_init(esw);
if (err)
goto reps_err;
@@ -2410,3 +2441,11 @@ void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
dev->num_ipsec_offloads--;
mutex_unlock(&esw->state_lock);
}
bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
{
if (!dev->priv.eswitch)
return true;
return !dev->priv.eswitch->esw_funcs.host_funcs_disabled;
}

View File

@@ -323,6 +323,7 @@ struct mlx5_host_work {
struct mlx5_esw_functions {
struct mlx5_nb nb;
bool host_funcs_disabled;
u16 num_vfs;
u16 num_ec_vfs;
};
@@ -898,6 +899,7 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v
bool enable);
int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
u16 vport_num);
bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -965,6 +967,12 @@ static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
}
static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
static inline bool
mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
{
return true;
}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */

View File

@@ -1213,7 +1213,8 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
mlx5_esw_host_functions_enabled(peer_dev)) {
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
MLX5_VPORT_PF);
@@ -1239,19 +1240,21 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows[peer_vport->index] = flow;
}
mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
mlx5_core_max_vfs(peer_dev)) {
esw_set_peer_miss_rule_source_port(esw,
peer_esw,
spec, peer_vport->vport);
if (mlx5_esw_host_functions_enabled(esw->dev)) {
mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
mlx5_core_max_vfs(peer_dev)) {
esw_set_peer_miss_rule_source_port(esw, peer_esw,
spec,
peer_vport->vport);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto add_vf_flow_err;
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto add_vf_flow_err;
}
flows[peer_vport->index] = flow;
}
flows[peer_vport->index] = flow;
}
if (mlx5_core_ec_sriov_enabled(peer_dev)) {
@@ -1301,7 +1304,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_ecpf_flow_err:
if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
mlx5_esw_host_functions_enabled(peer_dev)) {
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
mlx5_del_flow_rules(flows[peer_vport->index]);
}
@@ -4059,7 +4064,8 @@ mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{
/* Currently, only ECPF based device has representor for host PF. */
if (vport_num == MLX5_VPORT_PF &&
!mlx5_core_is_ecpf_esw_manager(esw->dev))
(!mlx5_core_is_ecpf_esw_manager(esw->dev) ||
!mlx5_esw_host_functions_enabled(esw->dev)))
return false;
if (vport_num == MLX5_VPORT_ECPF &&