mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-17 08:24:21 -04:00
smb: client: make use of smbdirect_map_sges_from_iter()
This is basically a copy of smb_extract_iter_to_rdma() moved to common code. Before we had the inconsistency we called ib_dma_unmap_single(), while we mapped using ib_dma_map_page() in smb_set_sge(). Now ib_dma_unmap_page() is used for consistency. It doesn't really matter as ib_dma_unmap_single() and ib_dma_unmap_page() both operate on dma_addr_t and dma_unmap_single_attrs() is just an alias for dma_unmap_page_attrs(). Cc: Steve French <smfrench@gmail.com> Cc: Tom Talpey <tom@talpey.com> Cc: Long Li <longli@microsoft.com> Cc: Namjae Jeon <linkinjeon@kernel.org> Cc: linux-cifs@vger.kernel.org Cc: samba-technical@lists.samba.org Signed-off-by: Stefan Metzmacher <metze@samba.org> Acked-by: Namjae Jeon <linkinjeon@kernel.org> Signed-off-by: Steve French <stfrench@microsoft.com>
This commit is contained in:
committed by
Steve French
parent
116f3eed36
commit
e5fd091663
@@ -36,17 +36,6 @@ static int smbd_post_send_empty(struct smbdirect_socket *sc);
|
||||
static void destroy_mr_list(struct smbdirect_socket *sc);
|
||||
static int allocate_mr_list(struct smbdirect_socket *sc);
|
||||
|
||||
struct smb_extract_to_rdma {
|
||||
struct ib_sge *sge;
|
||||
unsigned int nr_sge;
|
||||
unsigned int max_sge;
|
||||
struct ib_device *device;
|
||||
u32 local_dma_lkey;
|
||||
enum dma_data_direction direction;
|
||||
};
|
||||
static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
|
||||
struct smb_extract_to_rdma *rdma);
|
||||
|
||||
/* Port numbers for SMBD transport */
|
||||
#define SMB_PORT 445
|
||||
#define SMBD_PORT 5445
|
||||
@@ -1268,9 +1257,9 @@ static int smbd_post_send_iter(struct smbdirect_socket *sc,
|
||||
|
||||
/* Fill in the data payload to find out how much data we can add */
|
||||
if (iter) {
|
||||
struct smb_extract_to_rdma extract = {
|
||||
.nr_sge = request->num_sge,
|
||||
.max_sge = SMBDIRECT_SEND_IO_MAX_SGE,
|
||||
struct smbdirect_map_sges extract = {
|
||||
.num_sge = request->num_sge,
|
||||
.max_sge = ARRAY_SIZE(request->sge),
|
||||
.sge = request->sge,
|
||||
.device = sc->ib.dev,
|
||||
.local_dma_lkey = sc->ib.pd->local_dma_lkey,
|
||||
@@ -1279,12 +1268,11 @@ static int smbd_post_send_iter(struct smbdirect_socket *sc,
|
||||
size_t payload_len = umin(*_remaining_data_length,
|
||||
sp->max_send_size - sizeof(*packet));
|
||||
|
||||
rc = smb_extract_iter_to_rdma(iter, payload_len,
|
||||
&extract);
|
||||
rc = smbdirect_map_sges_from_iter(iter, payload_len, &extract);
|
||||
if (rc < 0)
|
||||
goto err_dma;
|
||||
data_length = rc;
|
||||
request->num_sge = extract.nr_sge;
|
||||
request->num_sge = extract.num_sge;
|
||||
*_remaining_data_length -= data_length;
|
||||
} else {
|
||||
data_length = 0;
|
||||
@@ -2653,222 +2641,3 @@ void smbd_deregister_mr(struct smbdirect_mr_io *mr)
|
||||
if (!kref_put(&mr->kref, smbd_mr_free_locked))
|
||||
mutex_unlock(&mr->mutex);
|
||||
}
|
||||
|
||||
static bool smb_set_sge(struct smb_extract_to_rdma *rdma,
|
||||
struct page *lowest_page, size_t off, size_t len)
|
||||
{
|
||||
struct ib_sge *sge = &rdma->sge[rdma->nr_sge];
|
||||
u64 addr;
|
||||
|
||||
addr = ib_dma_map_page(rdma->device, lowest_page,
|
||||
off, len, rdma->direction);
|
||||
if (ib_dma_mapping_error(rdma->device, addr))
|
||||
return false;
|
||||
|
||||
sge->addr = addr;
|
||||
sge->length = len;
|
||||
sge->lkey = rdma->local_dma_lkey;
|
||||
rdma->nr_sge++;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract page fragments from a BVEC-class iterator and add them to an RDMA
|
||||
* element list. The pages are not pinned.
|
||||
*/
|
||||
static ssize_t smb_extract_bvec_to_rdma(struct iov_iter *iter,
|
||||
struct smb_extract_to_rdma *rdma,
|
||||
ssize_t maxsize)
|
||||
{
|
||||
const struct bio_vec *bv = iter->bvec;
|
||||
unsigned long start = iter->iov_offset;
|
||||
unsigned int i;
|
||||
ssize_t ret = 0;
|
||||
|
||||
for (i = 0; i < iter->nr_segs; i++) {
|
||||
size_t off, len;
|
||||
|
||||
len = bv[i].bv_len;
|
||||
if (start >= len) {
|
||||
start -= len;
|
||||
continue;
|
||||
}
|
||||
|
||||
len = min_t(size_t, maxsize, len - start);
|
||||
off = bv[i].bv_offset + start;
|
||||
|
||||
if (!smb_set_sge(rdma, bv[i].bv_page, off, len))
|
||||
return -EIO;
|
||||
|
||||
ret += len;
|
||||
maxsize -= len;
|
||||
if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0)
|
||||
break;
|
||||
start = 0;
|
||||
}
|
||||
|
||||
if (ret > 0)
|
||||
iov_iter_advance(iter, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract fragments from a KVEC-class iterator and add them to an RDMA list.
|
||||
* This can deal with vmalloc'd buffers as well as kmalloc'd or static buffers.
|
||||
* The pages are not pinned.
|
||||
*/
|
||||
static ssize_t smb_extract_kvec_to_rdma(struct iov_iter *iter,
|
||||
struct smb_extract_to_rdma *rdma,
|
||||
ssize_t maxsize)
|
||||
{
|
||||
const struct kvec *kv = iter->kvec;
|
||||
unsigned long start = iter->iov_offset;
|
||||
unsigned int i;
|
||||
ssize_t ret = 0;
|
||||
|
||||
for (i = 0; i < iter->nr_segs; i++) {
|
||||
struct page *page;
|
||||
unsigned long kaddr;
|
||||
size_t off, len, seg;
|
||||
|
||||
len = kv[i].iov_len;
|
||||
if (start >= len) {
|
||||
start -= len;
|
||||
continue;
|
||||
}
|
||||
|
||||
kaddr = (unsigned long)kv[i].iov_base + start;
|
||||
off = kaddr & ~PAGE_MASK;
|
||||
len = min_t(size_t, maxsize, len - start);
|
||||
kaddr &= PAGE_MASK;
|
||||
|
||||
maxsize -= len;
|
||||
do {
|
||||
seg = min_t(size_t, len, PAGE_SIZE - off);
|
||||
|
||||
if (is_vmalloc_or_module_addr((void *)kaddr))
|
||||
page = vmalloc_to_page((void *)kaddr);
|
||||
else
|
||||
page = virt_to_page((void *)kaddr);
|
||||
|
||||
if (!smb_set_sge(rdma, page, off, seg))
|
||||
return -EIO;
|
||||
|
||||
ret += seg;
|
||||
len -= seg;
|
||||
kaddr += PAGE_SIZE;
|
||||
off = 0;
|
||||
} while (len > 0 && rdma->nr_sge < rdma->max_sge);
|
||||
|
||||
if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0)
|
||||
break;
|
||||
start = 0;
|
||||
}
|
||||
|
||||
if (ret > 0)
|
||||
iov_iter_advance(iter, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract folio fragments from a FOLIOQ-class iterator and add them to an RDMA
|
||||
* list. The folios are not pinned.
|
||||
*/
|
||||
static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter,
|
||||
struct smb_extract_to_rdma *rdma,
|
||||
ssize_t maxsize)
|
||||
{
|
||||
const struct folio_queue *folioq = iter->folioq;
|
||||
unsigned int slot = iter->folioq_slot;
|
||||
ssize_t ret = 0;
|
||||
size_t offset = iter->iov_offset;
|
||||
|
||||
BUG_ON(!folioq);
|
||||
|
||||
if (slot >= folioq_nr_slots(folioq)) {
|
||||
folioq = folioq->next;
|
||||
if (WARN_ON_ONCE(!folioq))
|
||||
return -EIO;
|
||||
slot = 0;
|
||||
}
|
||||
|
||||
do {
|
||||
struct folio *folio = folioq_folio(folioq, slot);
|
||||
size_t fsize = folioq_folio_size(folioq, slot);
|
||||
|
||||
if (offset < fsize) {
|
||||
size_t part = umin(maxsize, fsize - offset);
|
||||
|
||||
if (!smb_set_sge(rdma, folio_page(folio, 0), offset, part))
|
||||
return -EIO;
|
||||
|
||||
offset += part;
|
||||
ret += part;
|
||||
maxsize -= part;
|
||||
}
|
||||
|
||||
if (offset >= fsize) {
|
||||
offset = 0;
|
||||
slot++;
|
||||
if (slot >= folioq_nr_slots(folioq)) {
|
||||
if (!folioq->next) {
|
||||
WARN_ON_ONCE(ret < iter->count);
|
||||
break;
|
||||
}
|
||||
folioq = folioq->next;
|
||||
slot = 0;
|
||||
}
|
||||
}
|
||||
} while (rdma->nr_sge < rdma->max_sge && maxsize > 0);
|
||||
|
||||
iter->folioq = folioq;
|
||||
iter->folioq_slot = slot;
|
||||
iter->iov_offset = offset;
|
||||
iter->count -= ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract page fragments from up to the given amount of the source iterator
|
||||
* and build up an RDMA list that refers to all of those bits. The RDMA list
|
||||
* is appended to, up to the maximum number of elements set in the parameter
|
||||
* block.
|
||||
*
|
||||
* The extracted page fragments are not pinned or ref'd in any way; if an
|
||||
* IOVEC/UBUF-type iterator is to be used, it should be converted to a
|
||||
* BVEC-type iterator and the pages pinned, ref'd or otherwise held in some
|
||||
* way.
|
||||
*/
|
||||
static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
|
||||
struct smb_extract_to_rdma *rdma)
|
||||
{
|
||||
ssize_t ret;
|
||||
int before = rdma->nr_sge;
|
||||
|
||||
switch (iov_iter_type(iter)) {
|
||||
case ITER_BVEC:
|
||||
ret = smb_extract_bvec_to_rdma(iter, rdma, len);
|
||||
break;
|
||||
case ITER_KVEC:
|
||||
ret = smb_extract_kvec_to_rdma(iter, rdma, len);
|
||||
break;
|
||||
case ITER_FOLIOQ:
|
||||
ret = smb_extract_folioq_to_rdma(iter, rdma, len);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
while (rdma->nr_sge > before) {
|
||||
struct ib_sge *sge = &rdma->sge[rdma->nr_sge--];
|
||||
|
||||
ib_dma_unmap_single(rdma->device, sge->addr, sge->length,
|
||||
rdma->direction);
|
||||
sge->addr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user