mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-28 06:44:36 -05:00
Merge branch 'e-switch-vport-sharing-delegation'
Saeed Mahameed says: ==================== E-Switch vport sharing & delegation An mlx5 E-Switch FDB table can manage vports belonging to other sibling physical functions, such as ECPF (ARM embedded cores) and Host PF (x86). This enables a single source of truth for SDN software to manage network pipelines from one host. While such functionality already exists in mlx5, it is currently limited by static vport allocation, meaning the number of vports shared between multi-host functions must be known pre-boot. This patchset enables delegated/external vports to be discovered dynamically when switchdev mode is enabled, leveraging new firmware capabilities for dynamic vport creation. Adjacent functions that delegate their SR-IOV VFs to sibling PFs, can be dynamically discovered on the sibling PF's switchdev mode enabling, after sriov was enabled on the originating PF, allowing for more flexible and scalable management in multi-host and ECPF-to-host scenarios. The patchset consists of the following changes: - Refactoring of ACL root namespace handling: The storage of vport ACL root namespaces is converted from a linear array to an xarray, allowing dynamic creation of ACLs per individual vport. - Improvements for vhca_id to vport mapping. - Dynamic querying and creation of delegated functions/vports. ==================== Link: https://patch.msgid.link/20250829223722.900629-1-saeed@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
@@ -69,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
|
||||
# Core extra
|
||||
#
|
||||
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
|
||||
ecpf.o rdma.o esw/legacy.o \
|
||||
ecpf.o rdma.o esw/legacy.o esw/adj_vport.o \
|
||||
esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/ipsec.o
|
||||
|
||||
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
|
||||
|
||||
209
drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c
Normal file
209
drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c
Normal file
@@ -0,0 +1,209 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
// Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "fs_core.h"
|
||||
#include "eswitch.h"
|
||||
|
||||
enum {
|
||||
MLX5_ADJ_VPORT_DISCONNECT = 0x0,
|
||||
MLX5_ADJ_VPORT_CONNECT = 0x1,
|
||||
};
|
||||
|
||||
static int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev,
|
||||
u16 vport, bool connect)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
|
||||
|
||||
MLX5_SET(modify_vport_state_in, in, opcode,
|
||||
MLX5_CMD_OP_MODIFY_VPORT_STATE);
|
||||
MLX5_SET(modify_vport_state_in, in, op_mod,
|
||||
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT);
|
||||
MLX5_SET(modify_vport_state_in, in, other_vport, 1);
|
||||
MLX5_SET(modify_vport_state_in, in, vport_number, vport);
|
||||
MLX5_SET(modify_vport_state_in, in, ingress_connect_valid, 1);
|
||||
MLX5_SET(modify_vport_state_in, in, egress_connect_valid, 1);
|
||||
MLX5_SET(modify_vport_state_in, in, ingress_connect, connect);
|
||||
MLX5_SET(modify_vport_state_in, in, egress_connect, connect);
|
||||
|
||||
return mlx5_cmd_exec_in(dev, modify_vport_state, in);
|
||||
}
|
||||
|
||||
static void mlx5_esw_destroy_esw_vport(struct mlx5_core_dev *dev, u16 vport)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_esw_vport_in)] = {};
|
||||
|
||||
MLX5_SET(destroy_esw_vport_in, in, opcode,
|
||||
MLX5_CMD_OPCODE_DESTROY_ESW_VPORT);
|
||||
MLX5_SET(destroy_esw_vport_in, in, vport_num, vport);
|
||||
|
||||
mlx5_cmd_exec_in(dev, destroy_esw_vport, in);
|
||||
}
|
||||
|
||||
static int mlx5_esw_create_esw_vport(struct mlx5_core_dev *dev, u16 vhca_id,
|
||||
u16 *vport_num)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_esw_vport_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(create_esw_vport_in)] = {};
|
||||
int err;
|
||||
|
||||
MLX5_SET(create_esw_vport_in, in, opcode,
|
||||
MLX5_CMD_OPCODE_CREATE_ESW_VPORT);
|
||||
MLX5_SET(create_esw_vport_in, in, managed_vhca_id, vhca_id);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (!err)
|
||||
*vport_num = MLX5_GET(create_esw_vport_out, out, vport_num);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_esw_adj_vport_create(struct mlx5_eswitch *esw, u16 vhca_id,
|
||||
const void *rid_info_reg)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
u16 vport_num;
|
||||
int err;
|
||||
|
||||
err = mlx5_esw_create_esw_vport(esw->dev, vhca_id, &vport_num);
|
||||
if (err) {
|
||||
esw_warn(esw->dev,
|
||||
"Failed to create adjacent vport for vhca_id %d, err %d\n",
|
||||
vhca_id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
esw_debug(esw->dev, "Created adjacent vport[%d] %d for vhca_id 0x%x\n",
|
||||
esw->last_vport_idx, vport_num, vhca_id);
|
||||
|
||||
err = mlx5_esw_vport_alloc(esw, esw->last_vport_idx++, vport_num);
|
||||
if (err)
|
||||
goto destroy_esw_vport;
|
||||
|
||||
xa_set_mark(&esw->vports, vport_num, MLX5_ESW_VPT_VF);
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
vport->adjacent = true;
|
||||
vport->vhca_id = vhca_id;
|
||||
|
||||
vport->adj_info.parent_pci_devfn =
|
||||
MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
|
||||
parent_pci_device_function);
|
||||
vport->adj_info.function_id =
|
||||
MLX5_GET(function_vhca_rid_info_reg, rid_info_reg, function_id);
|
||||
|
||||
mlx5_fs_vport_egress_acl_ns_add(esw->dev->priv.steering, vport->index);
|
||||
mlx5_fs_vport_ingress_acl_ns_add(esw->dev->priv.steering, vport->index);
|
||||
err = mlx5_esw_offloads_rep_add(esw, vport);
|
||||
if (err)
|
||||
goto acl_ns_remove;
|
||||
|
||||
mlx5_esw_adj_vport_modify(esw->dev, vport_num, MLX5_ADJ_VPORT_CONNECT);
|
||||
return 0;
|
||||
|
||||
acl_ns_remove:
|
||||
mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
|
||||
vport->index);
|
||||
mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
|
||||
vport->index);
|
||||
mlx5_esw_vport_free(esw, vport);
|
||||
destroy_esw_vport:
|
||||
mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_esw_adj_vport_destroy(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
u16 vport_num = vport->vport;
|
||||
|
||||
esw_debug(esw->dev, "Destroying adjacent vport %d for vhca_id 0x%x\n",
|
||||
vport_num, vport->vhca_id);
|
||||
mlx5_esw_adj_vport_modify(esw->dev, vport_num,
|
||||
MLX5_ADJ_VPORT_DISCONNECT);
|
||||
mlx5_esw_offloads_rep_remove(esw, vport);
|
||||
mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
|
||||
vport->index);
|
||||
mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
|
||||
vport->index);
|
||||
mlx5_esw_vport_free(esw, vport);
|
||||
/* Reset the vport index back so new adj vports can use this index.
|
||||
* When vport count can incrementally change, this needs to be modified.
|
||||
*/
|
||||
esw->last_vport_idx--;
|
||||
mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
|
||||
}
|
||||
|
||||
void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
|
||||
if (!MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max))
|
||||
return;
|
||||
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
|
||||
if (!vport->adjacent)
|
||||
continue;
|
||||
mlx5_esw_adj_vport_destroy(esw, vport);
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw)
|
||||
{
|
||||
u32 delegated_vhca_max = MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max);
|
||||
u32 in[MLX5_ST_SZ_DW(query_delegated_vhca_in)] = {};
|
||||
int outlen, err, i = 0;
|
||||
u8 *out;
|
||||
u32 count;
|
||||
|
||||
if (!delegated_vhca_max)
|
||||
return;
|
||||
|
||||
outlen = MLX5_ST_SZ_BYTES(query_delegated_vhca_out) +
|
||||
delegated_vhca_max *
|
||||
MLX5_ST_SZ_BYTES(delegated_function_vhca_rid_info);
|
||||
|
||||
esw_debug(esw->dev, "delegated_vhca_max=%d\n", delegated_vhca_max);
|
||||
|
||||
out = kvzalloc(outlen, GFP_KERNEL);
|
||||
if (!out)
|
||||
return;
|
||||
|
||||
MLX5_SET(query_delegated_vhca_in, in, opcode,
|
||||
MLX5_CMD_OPCODE_QUERY_DELEGATED_VHCA);
|
||||
|
||||
err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
|
||||
if (err) {
|
||||
kvfree(out);
|
||||
esw_warn(esw->dev, "Failed to query delegated vhca, err %d\n",
|
||||
err);
|
||||
return;
|
||||
}
|
||||
|
||||
count = MLX5_GET(query_delegated_vhca_out, out, functions_count);
|
||||
esw_debug(esw->dev, "Delegated vhca functions count %d\n", count);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
const void *rid_info, *rid_info_reg;
|
||||
u16 vhca_id;
|
||||
|
||||
rid_info = MLX5_ADDR_OF(query_delegated_vhca_out, out,
|
||||
delegated_function_vhca_rid_info[i]);
|
||||
|
||||
rid_info_reg = MLX5_ADDR_OF(delegated_function_vhca_rid_info,
|
||||
rid_info, function_vhca_rid_info);
|
||||
|
||||
vhca_id = MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
|
||||
vhca_id);
|
||||
esw_debug(esw->dev, "Delegating vhca_id 0x%x\n", vhca_id);
|
||||
|
||||
err = mlx5_esw_adj_vport_create(esw, vhca_id, rid_info_reg);
|
||||
if (err) {
|
||||
esw_warn(esw->dev,
|
||||
"Failed to init adjacent vhca 0x%x, err %d\n",
|
||||
vhca_id, err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kvfree(out);
|
||||
}
|
||||
@@ -27,6 +27,7 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
|
||||
{
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct netdev_phys_item_id ppid = {};
|
||||
struct mlx5_vport *vport;
|
||||
u32 controller_num = 0;
|
||||
bool external;
|
||||
u16 pfnum;
|
||||
@@ -42,10 +43,18 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
|
||||
dl_port->attrs.switch_id.id_len = ppid.id_len;
|
||||
devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external);
|
||||
} else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) {
|
||||
u16 func_id = vport_num - 1;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
|
||||
dl_port->attrs.switch_id.id_len = ppid.id_len;
|
||||
if (vport->adjacent) {
|
||||
func_id = vport->adj_info.function_id;
|
||||
pfnum = vport->adj_info.parent_pci_devfn;
|
||||
}
|
||||
|
||||
devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
|
||||
vport_num - 1, external);
|
||||
func_id, external);
|
||||
} else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
|
||||
u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
|
||||
|
||||
|
||||
@@ -1217,7 +1217,8 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
|
||||
unsigned long i;
|
||||
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
|
||||
if (!vport->enabled)
|
||||
/* Adjacent VFs are unloaded separately */
|
||||
if (!vport->enabled || vport->adjacent)
|
||||
continue;
|
||||
mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
|
||||
}
|
||||
@@ -1236,6 +1237,42 @@ static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_eswitch_unload_adj_vf_vports(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
|
||||
if (!vport->enabled || !vport->adjacent)
|
||||
continue;
|
||||
mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5_eswitch_load_adj_vf_vports(struct mlx5_eswitch *esw,
|
||||
enum mlx5_eswitch_vport_event enabled_events)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
int err;
|
||||
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
|
||||
if (!vport->adjacent)
|
||||
continue;
|
||||
err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport,
|
||||
enabled_events);
|
||||
if (err)
|
||||
goto unload_adj_vf_vport;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unload_adj_vf_vport:
|
||||
mlx5_eswitch_unload_adj_vf_vports(esw);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
|
||||
enum mlx5_eswitch_vport_event enabled_events)
|
||||
{
|
||||
@@ -1345,8 +1382,16 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
|
||||
enabled_events);
|
||||
if (ret)
|
||||
goto vf_err;
|
||||
|
||||
/* Enable adjacent VF vports */
|
||||
ret = mlx5_eswitch_load_adj_vf_vports(esw, enabled_events);
|
||||
if (ret)
|
||||
goto unload_vf_vports;
|
||||
|
||||
return 0;
|
||||
|
||||
unload_vf_vports:
|
||||
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
|
||||
vf_err:
|
||||
if (mlx5_core_ec_sriov_enabled(esw->dev))
|
||||
mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
|
||||
@@ -1367,6 +1412,8 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
|
||||
*/
|
||||
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
|
||||
{
|
||||
mlx5_eswitch_unload_adj_vf_vports(esw);
|
||||
|
||||
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
|
||||
|
||||
if (mlx5_core_ec_sriov_enabled(esw->dev))
|
||||
@@ -1439,19 +1486,76 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
|
||||
blocking_notifier_call_chain(&esw->n_head, 0, &info);
|
||||
}
|
||||
|
||||
static int mlx5_esw_egress_acls_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int total_vports = mlx5_eswitch_get_total_vports(dev);
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < total_vports; i++) {
|
||||
err = mlx5_fs_vport_egress_acl_ns_add(steering, i);
|
||||
if (err)
|
||||
goto acl_ns_remove;
|
||||
}
|
||||
return 0;
|
||||
|
||||
acl_ns_remove:
|
||||
while (i--)
|
||||
mlx5_fs_vport_egress_acl_ns_remove(steering, i);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_esw_egress_acls_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int total_vports = mlx5_eswitch_get_total_vports(dev);
|
||||
int i;
|
||||
|
||||
for (i = total_vports - 1; i >= 0; i--)
|
||||
mlx5_fs_vport_egress_acl_ns_remove(steering, i);
|
||||
}
|
||||
|
||||
static int mlx5_esw_ingress_acls_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int total_vports = mlx5_eswitch_get_total_vports(dev);
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < total_vports; i++) {
|
||||
err = mlx5_fs_vport_ingress_acl_ns_add(steering, i);
|
||||
if (err)
|
||||
goto acl_ns_remove;
|
||||
}
|
||||
return 0;
|
||||
|
||||
acl_ns_remove:
|
||||
while (i--)
|
||||
mlx5_fs_vport_ingress_acl_ns_remove(steering, i);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_esw_ingress_acls_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int total_vports = mlx5_eswitch_get_total_vports(dev);
|
||||
int i;
|
||||
|
||||
for (i = total_vports - 1; i >= 0; i--)
|
||||
mlx5_fs_vport_ingress_acl_ns_remove(steering, i);
|
||||
}
|
||||
|
||||
static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
int total_vports;
|
||||
int err;
|
||||
|
||||
if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
|
||||
return 0;
|
||||
|
||||
total_vports = mlx5_eswitch_get_total_vports(dev);
|
||||
|
||||
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
|
||||
err = mlx5_fs_egress_acls_init(dev, total_vports);
|
||||
err = mlx5_esw_egress_acls_init(dev);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
@@ -1459,7 +1563,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
|
||||
}
|
||||
|
||||
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
|
||||
err = mlx5_fs_ingress_acls_init(dev, total_vports);
|
||||
err = mlx5_esw_ingress_acls_init(dev);
|
||||
if (err)
|
||||
goto err;
|
||||
} else {
|
||||
@@ -1470,7 +1574,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
|
||||
|
||||
err:
|
||||
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
|
||||
mlx5_fs_egress_acls_cleanup(dev);
|
||||
mlx5_esw_egress_acls_cleanup(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1480,9 +1584,9 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
|
||||
|
||||
esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
|
||||
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
|
||||
mlx5_fs_ingress_acls_cleanup(dev);
|
||||
mlx5_esw_ingress_acls_cleanup(dev);
|
||||
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
|
||||
mlx5_fs_egress_acls_cleanup(dev);
|
||||
mlx5_esw_egress_acls_cleanup(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1734,8 +1838,7 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
|
||||
int index, u16 vport_num)
|
||||
int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int err;
|
||||
@@ -1762,8 +1865,9 @@ static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
|
||||
void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
|
||||
{
|
||||
esw->total_vports--;
|
||||
xa_erase(&esw->vports, vport->vport);
|
||||
kfree(vport);
|
||||
}
|
||||
@@ -1847,6 +1951,9 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
|
||||
err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_UPLINK);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
/* Adjacent vports or other dynamically create vports will use this */
|
||||
esw->last_vport_idx = ++idx;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
||||
@@ -216,6 +216,12 @@ struct mlx5_vport {
|
||||
u32 metadata;
|
||||
int vhca_id;
|
||||
|
||||
bool adjacent; /* delegated vhca from adjacent function */
|
||||
struct {
|
||||
u16 parent_pci_devfn; /* Adjacent parent PCI device function */
|
||||
u16 function_id; /* Function ID of the delegated VPort */
|
||||
} adj_info;
|
||||
|
||||
struct mlx5_vport_info info;
|
||||
|
||||
/* Protected with the E-Switch qos domain lock. The Vport QoS can
|
||||
@@ -384,6 +390,7 @@ struct mlx5_eswitch {
|
||||
|
||||
struct mlx5_esw_bridge_offloads *br_offloads;
|
||||
struct mlx5_esw_offload offloads;
|
||||
u32 last_vport_idx;
|
||||
int mode;
|
||||
u16 manager_vport;
|
||||
u16 first_host_vport;
|
||||
@@ -417,6 +424,8 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
|
||||
/* E-Switch API */
|
||||
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
|
||||
int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num);
|
||||
void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
|
||||
|
||||
#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
|
||||
int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
|
||||
@@ -622,6 +631,9 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
|
||||
|
||||
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
|
||||
|
||||
void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw);
|
||||
void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw);
|
||||
|
||||
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
|
||||
|
||||
#define esw_info(__dev, format, ...) \
|
||||
@@ -831,6 +843,11 @@ void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
|
||||
int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
|
||||
bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id);
|
||||
|
||||
void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
|
||||
const struct mlx5_vport *vport);
|
||||
int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
|
||||
const struct mlx5_vport *vport);
|
||||
|
||||
/**
|
||||
* struct mlx5_esw_event_info - Indicates eswitch mode changed/changing.
|
||||
*
|
||||
|
||||
@@ -2378,7 +2378,20 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
|
||||
void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
|
||||
const struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep = xa_load(&esw->offloads.vport_reps,
|
||||
vport->vport);
|
||||
|
||||
if (!rep)
|
||||
return;
|
||||
xa_erase(&esw->offloads.vport_reps, vport->vport);
|
||||
kfree(rep);
|
||||
}
|
||||
|
||||
int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
|
||||
const struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int rep_type;
|
||||
@@ -2390,9 +2403,19 @@ static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx
|
||||
|
||||
rep->vport = vport->vport;
|
||||
rep->vport_index = vport->index;
|
||||
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
|
||||
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
|
||||
|
||||
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
|
||||
if (!esw->offloads.rep_ops[rep_type]) {
|
||||
atomic_set(&rep->rep_data[rep_type].state,
|
||||
REP_UNREGISTERED);
|
||||
continue;
|
||||
}
|
||||
/* Dynamic/delegated vports add their representors after
|
||||
* mlx5_eswitch_register_vport_reps, so mark them as registered
|
||||
* for them to be loaded later with the others.
|
||||
*/
|
||||
rep->esw = esw;
|
||||
atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
|
||||
}
|
||||
err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
|
||||
if (err)
|
||||
goto insert_err;
|
||||
@@ -2430,7 +2453,7 @@ static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
|
||||
xa_init(&esw->offloads.vport_reps);
|
||||
|
||||
mlx5_esw_for_each_vport(esw, i, vport) {
|
||||
err = mlx5_esw_offloads_rep_init(esw, vport);
|
||||
err = mlx5_esw_offloads_rep_add(esw, vport);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
@@ -3538,6 +3561,8 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
||||
int err;
|
||||
|
||||
mutex_init(&esw->offloads.termtbl_mutex);
|
||||
mlx5_esw_adjacent_vhcas_setup(esw);
|
||||
|
||||
err = mlx5_rdma_enable_roce(esw->dev);
|
||||
if (err)
|
||||
goto err_roce;
|
||||
@@ -3602,6 +3627,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
||||
err_metadata:
|
||||
mlx5_rdma_disable_roce(esw->dev);
|
||||
err_roce:
|
||||
mlx5_esw_adjacent_vhcas_cleanup(esw);
|
||||
mutex_destroy(&esw->offloads.termtbl_mutex);
|
||||
return err;
|
||||
}
|
||||
@@ -3635,6 +3661,7 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
|
||||
mapping_destroy(esw->offloads.reg_c0_obj_pool);
|
||||
esw_offloads_metadata_uninit(esw);
|
||||
mlx5_rdma_disable_roce(esw->dev);
|
||||
mlx5_esw_adjacent_vhcas_cleanup(esw);
|
||||
mutex_destroy(&esw->offloads.termtbl_mutex);
|
||||
}
|
||||
|
||||
|
||||
@@ -2793,30 +2793,32 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_get_flow_namespace);
|
||||
|
||||
struct mlx5_vport_acl_root_ns {
|
||||
u16 vport_idx;
|
||||
struct mlx5_flow_root_namespace *root_ns;
|
||||
};
|
||||
|
||||
struct mlx5_flow_namespace *
|
||||
mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
|
||||
enum mlx5_flow_namespace_type type, int vport_idx)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
struct mlx5_vport_acl_root_ns *vport_ns;
|
||||
|
||||
if (!steering)
|
||||
return NULL;
|
||||
|
||||
switch (type) {
|
||||
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
|
||||
if (vport_idx >= steering->esw_egress_acl_vports)
|
||||
return NULL;
|
||||
if (steering->esw_egress_root_ns &&
|
||||
steering->esw_egress_root_ns[vport_idx])
|
||||
return &steering->esw_egress_root_ns[vport_idx]->ns;
|
||||
vport_ns = xa_load(&steering->esw_egress_root_ns, vport_idx);
|
||||
if (vport_ns)
|
||||
return &vport_ns->root_ns->ns;
|
||||
else
|
||||
return NULL;
|
||||
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
|
||||
if (vport_idx >= steering->esw_ingress_acl_vports)
|
||||
return NULL;
|
||||
if (steering->esw_ingress_root_ns &&
|
||||
steering->esw_ingress_root_ns[vport_idx])
|
||||
return &steering->esw_ingress_root_ns[vport_idx]->ns;
|
||||
vport_ns = xa_load(&steering->esw_ingress_root_ns, vport_idx);
|
||||
if (vport_ns)
|
||||
return &vport_ns->root_ns->ns;
|
||||
else
|
||||
return NULL;
|
||||
case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
|
||||
@@ -3575,118 +3577,102 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
|
||||
static void
|
||||
mlx5_fs_remove_vport_acl_root_ns(struct xarray *esw_acl_root_ns, u16 vport_idx)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
struct mlx5_vport_acl_root_ns *vport_ns;
|
||||
|
||||
steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
|
||||
if (!steering->esw_egress_root_ns[vport])
|
||||
return -ENOMEM;
|
||||
|
||||
/* create 1 prio*/
|
||||
prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
|
||||
return PTR_ERR_OR_ZERO(prio);
|
||||
}
|
||||
|
||||
static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
|
||||
steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
|
||||
if (!steering->esw_ingress_root_ns[vport])
|
||||
return -ENOMEM;
|
||||
|
||||
/* create 1 prio*/
|
||||
prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
|
||||
return PTR_ERR_OR_ZERO(prio);
|
||||
}
|
||||
|
||||
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
steering->esw_egress_root_ns =
|
||||
kcalloc(total_vports,
|
||||
sizeof(*steering->esw_egress_root_ns),
|
||||
GFP_KERNEL);
|
||||
if (!steering->esw_egress_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < total_vports; i++) {
|
||||
err = init_egress_acl_root_ns(steering, i);
|
||||
if (err)
|
||||
goto cleanup_root_ns;
|
||||
vport_ns = xa_erase(esw_acl_root_ns, vport_idx);
|
||||
if (vport_ns) {
|
||||
cleanup_root_ns(vport_ns->root_ns);
|
||||
kfree(vport_ns);
|
||||
}
|
||||
steering->esw_egress_acl_vports = total_vports;
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5_fs_add_vport_acl_root_ns(struct mlx5_flow_steering *steering,
|
||||
struct xarray *esw_acl_root_ns,
|
||||
enum fs_flow_table_type table_type,
|
||||
u16 vport_idx)
|
||||
{
|
||||
struct mlx5_vport_acl_root_ns *vport_ns;
|
||||
struct fs_prio *prio;
|
||||
int err;
|
||||
|
||||
/* sanity check, intended xarrays are used */
|
||||
if (WARN_ON(esw_acl_root_ns != &steering->esw_egress_root_ns &&
|
||||
esw_acl_root_ns != &steering->esw_ingress_root_ns))
|
||||
return -EINVAL;
|
||||
|
||||
if (table_type != FS_FT_ESW_EGRESS_ACL &&
|
||||
table_type != FS_FT_ESW_INGRESS_ACL) {
|
||||
mlx5_core_err(steering->dev,
|
||||
"Invalid table type %d for egress/ingress ACLs\n",
|
||||
table_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (xa_load(esw_acl_root_ns, vport_idx))
|
||||
return -EEXIST;
|
||||
|
||||
vport_ns = kzalloc(sizeof(*vport_ns), GFP_KERNEL);
|
||||
if (!vport_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
vport_ns->root_ns = create_root_ns(steering, table_type);
|
||||
if (!vport_ns->root_ns) {
|
||||
err = -ENOMEM;
|
||||
goto kfree_vport_ns;
|
||||
}
|
||||
|
||||
/* create 1 prio*/
|
||||
prio = fs_create_prio(&vport_ns->root_ns->ns, 0, 1);
|
||||
if (IS_ERR(prio)) {
|
||||
err = PTR_ERR(prio);
|
||||
goto cleanup_root_ns;
|
||||
}
|
||||
|
||||
vport_ns->vport_idx = vport_idx;
|
||||
err = xa_insert(esw_acl_root_ns, vport_idx, vport_ns, GFP_KERNEL);
|
||||
if (err)
|
||||
goto cleanup_root_ns;
|
||||
return 0;
|
||||
|
||||
cleanup_root_ns:
|
||||
for (i--; i >= 0; i--)
|
||||
cleanup_root_ns(steering->esw_egress_root_ns[i]);
|
||||
kfree(steering->esw_egress_root_ns);
|
||||
steering->esw_egress_root_ns = NULL;
|
||||
cleanup_root_ns(vport_ns->root_ns);
|
||||
kfree_vport_ns:
|
||||
kfree(vport_ns);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
|
||||
int mlx5_fs_vport_egress_acl_ns_add(struct mlx5_flow_steering *steering,
|
||||
u16 vport_idx)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int i;
|
||||
|
||||
if (!steering->esw_egress_root_ns)
|
||||
return;
|
||||
|
||||
for (i = 0; i < steering->esw_egress_acl_vports; i++)
|
||||
cleanup_root_ns(steering->esw_egress_root_ns[i]);
|
||||
|
||||
kfree(steering->esw_egress_root_ns);
|
||||
steering->esw_egress_root_ns = NULL;
|
||||
return mlx5_fs_add_vport_acl_root_ns(steering,
|
||||
&steering->esw_egress_root_ns,
|
||||
FS_FT_ESW_EGRESS_ACL, vport_idx);
|
||||
}
|
||||
|
||||
int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
|
||||
int mlx5_fs_vport_ingress_acl_ns_add(struct mlx5_flow_steering *steering,
|
||||
u16 vport_idx)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
steering->esw_ingress_root_ns =
|
||||
kcalloc(total_vports,
|
||||
sizeof(*steering->esw_ingress_root_ns),
|
||||
GFP_KERNEL);
|
||||
if (!steering->esw_ingress_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < total_vports; i++) {
|
||||
err = init_ingress_acl_root_ns(steering, i);
|
||||
if (err)
|
||||
goto cleanup_root_ns;
|
||||
}
|
||||
steering->esw_ingress_acl_vports = total_vports;
|
||||
return 0;
|
||||
|
||||
cleanup_root_ns:
|
||||
for (i--; i >= 0; i--)
|
||||
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
|
||||
kfree(steering->esw_ingress_root_ns);
|
||||
steering->esw_ingress_root_ns = NULL;
|
||||
return err;
|
||||
return mlx5_fs_add_vport_acl_root_ns(steering,
|
||||
&steering->esw_ingress_root_ns,
|
||||
FS_FT_ESW_INGRESS_ACL, vport_idx);
|
||||
}
|
||||
|
||||
void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
|
||||
void mlx5_fs_vport_egress_acl_ns_remove(struct mlx5_flow_steering *steering,
|
||||
int vport_idx)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int i;
|
||||
mlx5_fs_remove_vport_acl_root_ns(&steering->esw_egress_root_ns,
|
||||
vport_idx);
|
||||
}
|
||||
|
||||
if (!steering->esw_ingress_root_ns)
|
||||
return;
|
||||
|
||||
for (i = 0; i < steering->esw_ingress_acl_vports; i++)
|
||||
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
|
||||
|
||||
kfree(steering->esw_ingress_root_ns);
|
||||
steering->esw_ingress_root_ns = NULL;
|
||||
void mlx5_fs_vport_ingress_acl_ns_remove(struct mlx5_flow_steering *steering,
|
||||
int vport_idx)
|
||||
{
|
||||
mlx5_fs_remove_vport_acl_root_ns(&steering->esw_ingress_root_ns,
|
||||
vport_idx);
|
||||
}
|
||||
|
||||
u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
|
||||
@@ -3818,6 +3804,11 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
WARN_ON(!xa_empty(&steering->esw_egress_root_ns));
|
||||
WARN_ON(!xa_empty(&steering->esw_ingress_root_ns));
|
||||
xa_destroy(&steering->esw_egress_root_ns);
|
||||
xa_destroy(&steering->esw_ingress_root_ns);
|
||||
|
||||
cleanup_root_ns(steering->root_ns);
|
||||
cleanup_fdb_root_ns(steering);
|
||||
cleanup_root_ns(steering->port_sel_root_ns);
|
||||
@@ -3908,6 +3899,8 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
xa_init(&steering->esw_egress_root_ns);
|
||||
xa_init(&steering->esw_ingress_root_ns);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
||||
@@ -151,16 +151,14 @@ struct mlx5_flow_steering {
|
||||
struct mlx5_flow_root_namespace *root_ns;
|
||||
struct mlx5_flow_root_namespace *fdb_root_ns;
|
||||
struct mlx5_flow_namespace **fdb_sub_ns;
|
||||
struct mlx5_flow_root_namespace **esw_egress_root_ns;
|
||||
struct mlx5_flow_root_namespace **esw_ingress_root_ns;
|
||||
struct xarray esw_egress_root_ns;
|
||||
struct xarray esw_ingress_root_ns;
|
||||
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
|
||||
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
|
||||
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
|
||||
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
|
||||
struct mlx5_flow_root_namespace *egress_root_ns;
|
||||
struct mlx5_flow_root_namespace *port_sel_root_ns;
|
||||
int esw_egress_acl_vports;
|
||||
int esw_ingress_acl_vports;
|
||||
struct mlx5_flow_root_namespace **rdma_transport_rx_root_ns;
|
||||
struct mlx5_flow_root_namespace **rdma_transport_tx_root_ns;
|
||||
int rdma_transport_rx_vports;
|
||||
@@ -378,10 +376,14 @@ void mlx5_fs_core_free(struct mlx5_core_dev *dev);
|
||||
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
|
||||
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
|
||||
void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_fs_vport_egress_acl_ns_add(struct mlx5_flow_steering *steering,
|
||||
u16 vport_idx);
|
||||
int mlx5_fs_vport_ingress_acl_ns_add(struct mlx5_flow_steering *steering,
|
||||
u16 vport_idx);
|
||||
void mlx5_fs_vport_egress_acl_ns_remove(struct mlx5_flow_steering *steering,
|
||||
int vport_idx);
|
||||
void mlx5_fs_vport_ingress_acl_ns_remove(struct mlx5_flow_steering *steering,
|
||||
int vport_idx);
|
||||
|
||||
u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type);
|
||||
|
||||
|
||||
@@ -1200,40 +1200,20 @@ int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
|
||||
int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
|
||||
u16 vport_number, u16 *gvmi)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
|
||||
int out_size;
|
||||
void *out;
|
||||
int err;
|
||||
|
||||
if (other_function) {
|
||||
err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
|
||||
if (!err)
|
||||
return 0;
|
||||
if (!other_function) {
|
||||
/* self vhca_id */
|
||||
*gvmi = MLX5_CAP_GEN(mdev, vhca_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
|
||||
if (err) {
|
||||
mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n",
|
||||
vport_number);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* get vhca_id for `this` function */
|
||||
out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
|
||||
out = kzalloc(out_size, GFP_KERNEL);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
|
||||
MLX5_SET(query_hca_cap_in, in, op_mod,
|
||||
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR);
|
||||
|
||||
err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
|
||||
if (err) {
|
||||
kfree(out);
|
||||
return err;
|
||||
}
|
||||
|
||||
*gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
|
||||
|
||||
kfree(out);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -35,41 +35,21 @@ int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
|
||||
int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
|
||||
u16 vport_number, u16 *gvmi)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
|
||||
int out_size;
|
||||
void *out;
|
||||
int err;
|
||||
|
||||
if (other_vport) {
|
||||
err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
|
||||
if (!err)
|
||||
return 0;
|
||||
if (!other_vport) {
|
||||
/* self vhca_id */
|
||||
*gvmi = MLX5_CAP_GEN(mdev, vhca_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
|
||||
if (err) {
|
||||
mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n",
|
||||
vport_number);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* get vhca_id for `this` function */
|
||||
out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
|
||||
out = kzalloc(out_size, GFP_KERNEL);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
|
||||
MLX5_SET(query_hca_cap_in, in, op_mod,
|
||||
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
|
||||
HCA_CAP_OPMOD_GET_CUR);
|
||||
|
||||
err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
|
||||
if (err) {
|
||||
kfree(out);
|
||||
return err;
|
||||
}
|
||||
|
||||
*gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
|
||||
|
||||
kfree(out);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user