mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 03:11:11 -04:00
RDMA/mana_ib: Support memory windows
Implement .alloc_mw() and .dealloc_mw() for mana device. This is just the basic infrastructure, MW is not practically usable until additional kernel support for allowing user space to submit MW work requests is completed. Link: https://patch.msgid.link/r/20260331090851.2276205-1-kotaranov@linux.microsoft.com Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com> Reviewed-by: Long Li <longli@microsoft.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
committed by
Jason Gunthorpe
parent
7244491dab
commit
b21058880c
@@ -17,6 +17,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
|
||||
.uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION,
|
||||
|
||||
.add_gid = mana_ib_gd_add_gid,
|
||||
.alloc_mw = mana_ib_alloc_mw,
|
||||
.alloc_pd = mana_ib_alloc_pd,
|
||||
.alloc_ucontext = mana_ib_alloc_ucontext,
|
||||
.create_ah = mana_ib_create_ah,
|
||||
@@ -24,6 +25,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
|
||||
.create_qp = mana_ib_create_qp,
|
||||
.create_rwq_ind_table = mana_ib_create_rwq_ind_table,
|
||||
.create_wq = mana_ib_create_wq,
|
||||
.dealloc_mw = mana_ib_dealloc_mw,
|
||||
.dealloc_pd = mana_ib_dealloc_pd,
|
||||
.dealloc_ucontext = mana_ib_dealloc_ucontext,
|
||||
.del_gid = mana_ib_gd_del_gid,
|
||||
@@ -53,6 +55,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
|
||||
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, mana_ib_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_cq, mana_ib_cq, ibcq),
|
||||
INIT_RDMA_OBJ_SIZE(ib_mw, mana_ib_mw, ibmw),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, mana_ib_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_qp, mana_ib_qp, ibqp),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, mana_ib_ucontext, ibucontext),
|
||||
|
||||
@@ -125,6 +125,11 @@ struct mana_ib_ah {
|
||||
dma_addr_t dma_handle;
|
||||
};
|
||||
|
||||
struct mana_ib_mw {
|
||||
struct ib_mw ibmw;
|
||||
mana_handle_t mw_handle;
|
||||
};
|
||||
|
||||
struct mana_ib_mr {
|
||||
struct ib_mr ibmr;
|
||||
struct ib_umem *umem;
|
||||
@@ -736,6 +741,9 @@ void mana_drain_gsi_sqs(struct mana_ib_dev *mdev);
|
||||
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
|
||||
int mana_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
|
||||
int mana_ib_dealloc_mw(struct ib_mw *mw);
|
||||
|
||||
struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
|
||||
u64 iova, int fd, int mr_access_flags,
|
||||
struct ib_dmah *dmah,
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
#include "mana_ib.h"
|
||||
|
||||
#define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
|
||||
IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
|
||||
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND | IB_ZERO_BASED)
|
||||
|
||||
#define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
|
||||
|
||||
@@ -27,6 +27,9 @@ mana_ib_verbs_to_gdma_access_flags(int access_flags)
|
||||
if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
|
||||
flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
|
||||
|
||||
if (access_flags & IB_ACCESS_MW_BIND)
|
||||
flags |= GDMA_ACCESS_FLAG_BIND_MW;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
@@ -287,6 +290,55 @@ struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int mana_ib_gd_create_mw(struct mana_ib_dev *dev, struct mana_ib_pd *pd, struct ib_mw *ibmw)
|
||||
{
|
||||
struct mana_ib_mw *mw = container_of(ibmw, struct mana_ib_mw, ibmw);
|
||||
struct gdma_context *gc = mdev_to_gc(dev);
|
||||
struct gdma_create_mr_response resp = {};
|
||||
struct gdma_create_mr_request req = {};
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req), sizeof(resp));
|
||||
req.hdr.req.msg_version = GDMA_MESSAGE_V2;
|
||||
req.pd_handle = pd->pd_handle;
|
||||
|
||||
switch (mw->ibmw.type) {
|
||||
case IB_MW_TYPE_1:
|
||||
req.mr_type = GDMA_MR_TYPE_MW1;
|
||||
break;
|
||||
case IB_MW_TYPE_2:
|
||||
req.mr_type = GDMA_MR_TYPE_MW2;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mw->ibmw.rkey = resp.rkey;
|
||||
mw->mw_handle = resp.mr_handle;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mana_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
|
||||
{
|
||||
struct mana_ib_dev *mdev = container_of(ibmw->device, struct mana_ib_dev, ib_dev);
|
||||
struct mana_ib_pd *pd = container_of(ibmw->pd, struct mana_ib_pd, ibpd);
|
||||
|
||||
return mana_ib_gd_create_mw(mdev, pd, ibmw);
|
||||
}
|
||||
|
||||
int mana_ib_dealloc_mw(struct ib_mw *ibmw)
|
||||
{
|
||||
struct mana_ib_dev *dev = container_of(ibmw->device, struct mana_ib_dev, ib_dev);
|
||||
struct mana_ib_mw *mw = container_of(ibmw, struct mana_ib_mw, ibmw);
|
||||
|
||||
return mana_ib_gd_destroy_mr(dev, mw->mw_handle);
|
||||
}
|
||||
|
||||
int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
||||
{
|
||||
struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
|
||||
|
||||
@@ -778,6 +778,7 @@ enum gdma_mr_access_flags {
|
||||
GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
|
||||
GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
|
||||
GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
|
||||
GDMA_ACCESS_FLAG_BIND_MW = BIT_ULL(5),
|
||||
};
|
||||
|
||||
/* GDMA_CREATE_DMA_REGION */
|
||||
@@ -870,6 +871,10 @@ enum gdma_mr_type {
|
||||
GDMA_MR_TYPE_ZBVA = 4,
|
||||
/* Device address MRs */
|
||||
GDMA_MR_TYPE_DM = 5,
|
||||
/* Memory Window type 1 */
|
||||
GDMA_MR_TYPE_MW1 = 6,
|
||||
/* Memory Window type 2 */
|
||||
GDMA_MR_TYPE_MW2 = 7,
|
||||
};
|
||||
|
||||
struct gdma_create_mr_params {
|
||||
|
||||
Reference in New Issue
Block a user