mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-10 15:13:44 -04:00
Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
Tariq Toukan says: ==================== mlx5-next updates 2025-07-08 The following pull-request contains common mlx5 updates for your *net-next* tree. v2: https://lore.kernel.org/1751574385-24672-1-git-send-email-tariqt@nvidia.com * 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux: net/mlx5: Check device memory pointer before usage net/mlx5: fs, fix RDMA TRANSPORT init cleanup flow net/mlx5: Add IFC bits for PCIe Congestion Event object net/mlx5: Small refactor for general object capabilities net/mlx5: fs, add multiple prios to RDMA TRANSPORT steering domain ==================== Link: https://patch.msgid.link/1752002102-11316-1-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -282,7 +282,7 @@ static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
|
||||
int err;
|
||||
u64 address;
|
||||
|
||||
if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
|
||||
if (!dm_db || !MLX5_CAP_DEV_MEM(dm_db->dev, memic))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
|
||||
|
||||
@@ -3245,34 +3245,62 @@ static int
|
||||
init_rdma_transport_rx_root_ns_one(struct mlx5_flow_steering *steering,
|
||||
int vport_idx)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root_ns;
|
||||
struct fs_prio *prio;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
steering->rdma_transport_rx_root_ns[vport_idx] =
|
||||
create_root_ns(steering, FS_FT_RDMA_TRANSPORT_RX);
|
||||
if (!steering->rdma_transport_rx_root_ns[vport_idx])
|
||||
return -ENOMEM;
|
||||
|
||||
/* create 1 prio*/
|
||||
prio = fs_create_prio(&steering->rdma_transport_rx_root_ns[vport_idx]->ns,
|
||||
MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
|
||||
return PTR_ERR_OR_ZERO(prio);
|
||||
root_ns = steering->rdma_transport_rx_root_ns[vport_idx];
|
||||
|
||||
for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
|
||||
prio = fs_create_prio(&root_ns->ns, i, 1);
|
||||
if (IS_ERR(prio)) {
|
||||
ret = PTR_ERR(prio);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
set_prio_attrs(root_ns);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
cleanup_root_ns(root_ns);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
init_rdma_transport_tx_root_ns_one(struct mlx5_flow_steering *steering,
|
||||
int vport_idx)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root_ns;
|
||||
struct fs_prio *prio;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
steering->rdma_transport_tx_root_ns[vport_idx] =
|
||||
create_root_ns(steering, FS_FT_RDMA_TRANSPORT_TX);
|
||||
if (!steering->rdma_transport_tx_root_ns[vport_idx])
|
||||
return -ENOMEM;
|
||||
|
||||
/* create 1 prio*/
|
||||
prio = fs_create_prio(&steering->rdma_transport_tx_root_ns[vport_idx]->ns,
|
||||
MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
|
||||
return PTR_ERR_OR_ZERO(prio);
|
||||
root_ns = steering->rdma_transport_tx_root_ns[vport_idx];
|
||||
|
||||
for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
|
||||
prio = fs_create_prio(&root_ns->ns, i, 1);
|
||||
if (IS_ERR(prio)) {
|
||||
ret = PTR_ERR(prio);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
set_prio_attrs(root_ns);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
cleanup_root_ns(root_ns);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering)
|
||||
|
||||
@@ -30,7 +30,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
|
||||
|
||||
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
|
||||
if (!dm)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&dm->lock);
|
||||
|
||||
@@ -96,7 +96,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
|
||||
err_steering:
|
||||
kfree(dm);
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
|
||||
|
||||
@@ -1102,9 +1102,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
||||
}
|
||||
|
||||
dev->dm = mlx5_dm_create(dev);
|
||||
if (IS_ERR(dev->dm))
|
||||
mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
|
||||
|
||||
dev->tracer = mlx5_fw_tracer_create(dev);
|
||||
dev->hv_vhca = mlx5_hv_vhca_create(dev);
|
||||
dev->rsc_dump = mlx5_rsc_dump_create(dev);
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
|
||||
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
|
||||
|
||||
#define MLX5_RDMA_TRANSPORT_BYPASS_PRIO 0
|
||||
#define MLX5_RDMA_TRANSPORT_BYPASS_PRIO 16
|
||||
#define MLX5_FS_MAX_POOL_SIZE BIT(30)
|
||||
|
||||
enum mlx5_flow_destination_type {
|
||||
|
||||
@@ -12501,17 +12501,6 @@ struct mlx5_ifc_affiliated_event_header_bits {
|
||||
u8 obj_id[0x20];
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc),
|
||||
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13),
|
||||
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20),
|
||||
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = BIT_ULL(0x24),
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL = BIT_ULL(0x13),
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
|
||||
MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
|
||||
@@ -12520,9 +12509,28 @@ enum {
|
||||
MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
|
||||
MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
|
||||
MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL = 0x53,
|
||||
MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT = 0x58,
|
||||
MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY =
|
||||
BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY),
|
||||
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC =
|
||||
BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_IPSEC),
|
||||
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER =
|
||||
BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_SAMPLER),
|
||||
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO =
|
||||
BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO),
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL =
|
||||
BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL - 0x40),
|
||||
MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT =
|
||||
BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT - 0x40),
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_IPSEC_OBJECT_ICV_LEN_16B,
|
||||
};
|
||||
@@ -13279,4 +13287,41 @@ struct mlx5_ifc_mrtcq_reg_bits {
|
||||
u8 reserved_at_80[0x180];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_pcie_cong_event_obj_bits {
|
||||
u8 modify_select_field[0x40];
|
||||
|
||||
u8 inbound_event_en[0x1];
|
||||
u8 outbound_event_en[0x1];
|
||||
u8 reserved_at_42[0x1e];
|
||||
|
||||
u8 reserved_at_60[0x1];
|
||||
u8 inbound_cong_state[0x3];
|
||||
u8 reserved_at_64[0x1];
|
||||
u8 outbound_cong_state[0x3];
|
||||
u8 reserved_at_68[0x18];
|
||||
|
||||
u8 inbound_cong_low_threshold[0x10];
|
||||
u8 inbound_cong_high_threshold[0x10];
|
||||
|
||||
u8 outbound_cong_low_threshold[0x10];
|
||||
u8 outbound_cong_high_threshold[0x10];
|
||||
|
||||
u8 reserved_at_e0[0x340];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_pcie_cong_event_cmd_in_bits {
|
||||
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
|
||||
struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj;
|
||||
};
|
||||
|
||||
struct mlx5_ifc_pcie_cong_event_cmd_out_bits {
|
||||
struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
|
||||
struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj;
|
||||
};
|
||||
|
||||
enum mlx5e_pcie_cong_event_mod_field {
|
||||
MLX5_PCIE_CONG_EVENT_MOD_EVENT_EN = BIT(0),
|
||||
MLX5_PCIE_CONG_EVENT_MOD_THRESH = BIT(2),
|
||||
};
|
||||
|
||||
#endif /* MLX5_IFC_H */
|
||||
|
||||
Reference in New Issue
Block a user