mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 02:01:18 -04:00
RDMA/mana_ib: cleanup the usage of mana_gd_send_request()
Do not check the status of the response header returned by mana_gd_send_request(), as the returned error code already indicates the request status. The mana_gd_send_request() may return no error code and have the response status GDMA_STATUS_MORE_ENTRIES, which is a successful completion. It is used for checking the correctness of multi-request operations, such as creation of a dma region with mana_ib_gd_create_dma_region(). Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com> Link: https://patch.msgid.link/20260318173939.1417856-1-kotaranov@linux.microsoft.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
committed by
Leon Romanovsky
parent
2bb02691df
commit
5aa437c93d
@@ -87,18 +87,9 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
flags |= GDMA_PD_FLAG_ALLOW_GPA_MR;
|
||||
|
||||
req.flags = flags;
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req,
|
||||
sizeof(resp), &resp);
|
||||
|
||||
if (err || resp.hdr.status) {
|
||||
ibdev_dbg(&dev->ib_dev,
|
||||
"Failed to get pd_id err %d status %u\n", err,
|
||||
resp.hdr.status);
|
||||
if (!err)
|
||||
err = -EPROTO;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
pd->pd_handle = resp.pd_handle;
|
||||
pd->pdn = resp.pd_id;
|
||||
@@ -118,7 +109,6 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
struct gdma_destroy_pd_req req = {};
|
||||
struct mana_ib_dev *dev;
|
||||
struct gdma_context *gc;
|
||||
int err;
|
||||
|
||||
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
|
||||
gc = mdev_to_gc(dev);
|
||||
@@ -127,18 +117,8 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
sizeof(resp));
|
||||
|
||||
req.pd_handle = pd->pd_handle;
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req,
|
||||
sizeof(resp), &resp);
|
||||
|
||||
if (err || resp.hdr.status) {
|
||||
ibdev_dbg(&dev->ib_dev,
|
||||
"Failed to destroy pd_handle 0x%llx err %d status %u",
|
||||
pd->pd_handle, err, resp.hdr.status);
|
||||
if (!err)
|
||||
err = -EPROTO;
|
||||
}
|
||||
|
||||
return err;
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
|
||||
@@ -146,7 +126,6 @@ static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
|
||||
{
|
||||
struct gdma_destroy_resource_range_req req = {};
|
||||
struct gdma_resp_hdr resp = {};
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE,
|
||||
sizeof(req), sizeof(resp));
|
||||
@@ -155,15 +134,7 @@ static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
|
||||
req.num_resources = 1;
|
||||
req.allocated_resources = doorbell_page;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.status) {
|
||||
dev_err(gc->dev,
|
||||
"Failed to destroy doorbell page: ret %d, 0x%x\n",
|
||||
err, resp.status);
|
||||
return err ?: -EPROTO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
|
||||
@@ -184,12 +155,8 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
|
||||
req.allocated_resources = 0;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.hdr.status) {
|
||||
dev_err(gc->dev,
|
||||
"Failed to allocate doorbell page: ret %d, 0x%x\n",
|
||||
err, resp.hdr.status);
|
||||
return err ?: -EPROTO;
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*doorbell_page = resp.allocated_resources;
|
||||
|
||||
@@ -682,14 +649,10 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
|
||||
req.hdr.resp.msg_version = GDMA_MESSAGE_V4;
|
||||
req.hdr.dev_id = dev->gdma_dev->dev_id;
|
||||
|
||||
err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
|
||||
&req, sizeof(resp), &resp);
|
||||
|
||||
if (err) {
|
||||
ibdev_err(&dev->ib_dev,
|
||||
"Failed to query adapter caps err %d", err);
|
||||
err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req,
|
||||
sizeof(resp), &resp);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
caps->max_sq_id = resp.max_sq_id;
|
||||
caps->max_rq_id = resp.max_rq_id;
|
||||
@@ -727,12 +690,10 @@ int mana_eth_query_adapter_caps(struct mana_ib_dev *dev)
|
||||
mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
|
||||
sizeof(req), sizeof(resp));
|
||||
|
||||
err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&dev->ib_dev,
|
||||
"Failed to query adapter caps err %d", err);
|
||||
err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req,
|
||||
sizeof(resp), &resp);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq);
|
||||
caps->max_cq_count = resp.max_cq;
|
||||
@@ -847,10 +808,8 @@ int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
|
||||
req.feature_flags |= MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
mdev->adapter_handle = resp.adapter;
|
||||
|
||||
return 0;
|
||||
@@ -861,20 +820,13 @@ int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
|
||||
struct mana_rnic_destroy_adapter_resp resp = {};
|
||||
struct mana_rnic_destroy_adapter_req req = {};
|
||||
struct gdma_context *gc;
|
||||
int err;
|
||||
|
||||
gc = mdev_to_gc(mdev);
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
|
||||
req.hdr.dev_id = mdev->gdma_dev->dev_id;
|
||||
req.adapter = mdev->adapter_handle;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
|
||||
@@ -884,7 +836,6 @@ int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
|
||||
struct mana_rnic_config_addr_resp resp = {};
|
||||
struct gdma_context *gc = mdev_to_gc(mdev);
|
||||
struct mana_rnic_config_addr_req req = {};
|
||||
int err;
|
||||
|
||||
if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
|
||||
ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
|
||||
@@ -898,13 +849,7 @@ int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
|
||||
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
|
||||
copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
|
||||
@@ -914,7 +859,6 @@ int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
|
||||
struct mana_rnic_config_addr_resp resp = {};
|
||||
struct gdma_context *gc = mdev_to_gc(mdev);
|
||||
struct mana_rnic_config_addr_req req = {};
|
||||
int err;
|
||||
|
||||
if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
|
||||
ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
|
||||
@@ -928,13 +872,7 @@ int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
|
||||
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
|
||||
copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac)
|
||||
@@ -942,7 +880,6 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8
|
||||
struct mana_rnic_config_mac_addr_resp resp = {};
|
||||
struct mana_rnic_config_mac_addr_req req = {};
|
||||
struct gdma_context *gc = mdev_to_gc(mdev);
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
|
||||
req.hdr.dev_id = mdev->gdma_dev->dev_id;
|
||||
@@ -950,13 +887,7 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8
|
||||
req.op = op;
|
||||
copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell)
|
||||
@@ -996,7 +927,6 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
|
||||
struct gdma_context *gc = mdev_to_gc(mdev);
|
||||
struct mana_rnic_destroy_cq_resp resp = {};
|
||||
struct mana_rnic_destroy_cq_req req = {};
|
||||
int err;
|
||||
|
||||
if (cq->cq_handle == INVALID_MANA_HANDLE)
|
||||
return 0;
|
||||
@@ -1006,14 +936,7 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
|
||||
req.adapter = mdev->adapter_handle;
|
||||
req.cq_handle = cq->cq_handle;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
|
||||
@@ -1043,10 +966,9 @@ int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
|
||||
req.flags = flags;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to create rc qp err %d", err);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
qp->qp_handle = resp.rc_qp_handle;
|
||||
for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++) {
|
||||
qp->rc_qp.queues[i].id = resp.queue_ids[i];
|
||||
@@ -1061,18 +983,13 @@ int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
|
||||
struct mana_rnic_destroy_rc_qp_resp resp = {0};
|
||||
struct mana_rnic_destroy_rc_qp_req req = {0};
|
||||
struct gdma_context *gc = mdev_to_gc(mdev);
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
|
||||
req.hdr.dev_id = mdev->gdma_dev->dev_id;
|
||||
req.adapter = mdev->adapter_handle;
|
||||
req.rc_qp_handle = qp->qp_handle;
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to destroy rc qp err %d", err);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
|
||||
@@ -1101,10 +1018,9 @@ int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
|
||||
req.max_recv_sge = attr->cap.max_recv_sge;
|
||||
req.qp_type = type;
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to create ud qp err %d", err);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
qp->qp_handle = resp.qp_handle;
|
||||
for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++) {
|
||||
qp->ud_qp.queues[i].id = resp.queue_ids[i];
|
||||
@@ -1119,16 +1035,11 @@ int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
|
||||
struct mana_rnic_destroy_udqp_resp resp = {0};
|
||||
struct mana_rnic_destroy_udqp_req req = {0};
|
||||
struct gdma_context *gc = mdev_to_gc(mdev);
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
|
||||
req.hdr.dev_id = mdev->gdma_dev->dev_id;
|
||||
req.adapter = mdev->adapter_handle;
|
||||
req.qp_handle = qp->qp_handle;
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to destroy ud qp err %d", err);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
@@ -70,15 +70,8 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
|
||||
}
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
|
||||
if (err || resp.hdr.status) {
|
||||
ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
|
||||
resp.hdr.status);
|
||||
if (!err)
|
||||
err = -EPROTO;
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
mr->ibmr.lkey = resp.lkey;
|
||||
mr->ibmr.rkey = resp.rkey;
|
||||
@@ -92,23 +85,13 @@ static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
|
||||
struct gdma_destroy_mr_response resp = {};
|
||||
struct gdma_destroy_mr_request req = {};
|
||||
struct gdma_context *gc = mdev_to_gc(dev);
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
|
||||
sizeof(resp));
|
||||
|
||||
req.mr_handle = mr_handle;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.hdr.status) {
|
||||
dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
|
||||
resp.hdr.status);
|
||||
if (!err)
|
||||
err = -EPROTO;
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
||||
@@ -339,12 +322,8 @@ static int mana_ib_gd_alloc_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm,
|
||||
req.flags = attr->flags;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.hdr.status) {
|
||||
if (!err)
|
||||
err = -EPROTO;
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
dm->dm_handle = resp.dm_handle;
|
||||
|
||||
@@ -380,20 +359,11 @@ static int mana_ib_gd_destroy_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm
|
||||
struct gdma_context *gc = mdev_to_gc(mdev);
|
||||
struct gdma_destroy_dm_resp resp = {};
|
||||
struct gdma_destroy_dm_req req = {};
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DM, sizeof(req), sizeof(resp));
|
||||
req.dm_handle = dm->dm_handle;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.hdr.status) {
|
||||
if (!err)
|
||||
err = -EPROTO;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
int mana_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
|
||||
|
||||
@@ -68,22 +68,6 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
|
||||
req->vport, default_rxobj);
|
||||
|
||||
err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (resp.hdr.status) {
|
||||
netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
|
||||
resp.hdr.status);
|
||||
err = -EPROTO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
|
||||
mpc->port_handle, log_ind_tbl_size);
|
||||
|
||||
out:
|
||||
kfree(req);
|
||||
return err;
|
||||
}
|
||||
@@ -731,7 +715,6 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
struct gdma_context *gc = mdev_to_gc(mdev);
|
||||
struct mana_port_context *mpc;
|
||||
struct net_device *ndev;
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
|
||||
|
||||
@@ -784,13 +767,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
req.ah_attr.flow_label = attr->ah_attr.grh.flow_label;
|
||||
}
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed modify qp err %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
}
|
||||
|
||||
int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
|
||||
Reference in New Issue
Block a user