net/mlx5: DR, Warn and ignore SW steering rule insertion on QP err

In the event of SW steering QP entering error state, SW steering
cannot insert more rules, and will silently ignore the insertion
after issuing a warning.

Signed-off-by: Yuval Avnery <yuvalav@mellanox.com>
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Yevgeny Kliteynik
2021-07-04 11:57:38 +03:00
committed by Saeed Mahameed
parent f35715a657
commit d5a84e968f
2 changed files with 15 additions and 2 deletions

View File

@@ -325,10 +325,14 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
do {
ne = dr_poll_cq(send_ring->cq, 1);
if (ne < 0)
if (unlikely(ne < 0)) {
mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited",
send_ring->qp->qpn);
send_ring->err_state = true;
return ne;
else if (ne == 1)
} else if (ne == 1) {
send_ring->pending_wqe -= send_ring->signal_th;
}
} while (is_drain && send_ring->pending_wqe);
return 0;
@@ -361,6 +365,14 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
u32 buff_offset;
int ret;
if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
send_ring->err_state)) {
mlx5_core_dbg_once(dmn->mdev,
"Skipping post send: QP err state: %d, device state: %d\n",
send_ring->err_state, dmn->mdev->state);
return 0;
}
spin_lock(&send_ring->lock);
ret = dr_handle_pending_wc(dmn, send_ring);

View File

@@ -1285,6 +1285,7 @@ struct mlx5dr_send_ring {
u8 sync_buff[MIN_READ_SYNC];
struct mlx5dr_mr *sync_mr;
spinlock_t lock; /* Protect the data path of the send ring */
bool err_state; /* send_ring is not usable in err state */
};
int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);