mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-09 09:01:36 -04:00
mt76: dma: cache dma map address/len in struct mt76_queue_entry
Accessing them from uncached memory can be expensive, so it's cheaper to cache them Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
@@ -49,6 +49,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct mt76_queue_buf *buf, int nbufs, u32 info,
|
||||
struct sk_buff *skb, void *txwi)
|
||||
{
|
||||
struct mt76_queue_entry *entry;
|
||||
struct mt76_desc *desc;
|
||||
u32 ctrl;
|
||||
int i, idx = -1;
|
||||
@@ -61,16 +62,27 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
for (i = 0; i < nbufs; i += 2, buf += 2) {
|
||||
u32 buf0 = buf[0].addr, buf1 = 0;
|
||||
|
||||
idx = q->head;
|
||||
q->head = (q->head + 1) % q->ndesc;
|
||||
|
||||
desc = &q->desc[idx];
|
||||
entry = &q->entry[idx];
|
||||
|
||||
if (buf[0].skip_unmap)
|
||||
q->entry[q->head].skip_buf0 = true;
|
||||
q->entry[q->head].skip_buf1 = i == nbufs - 1;
|
||||
entry->skip_buf0 = true;
|
||||
entry->skip_buf1 = i == nbufs - 1;
|
||||
|
||||
entry->dma_addr[0] = buf[0].addr;
|
||||
entry->dma_len[0] = buf[0].len;
|
||||
|
||||
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
|
||||
if (i < nbufs - 1) {
|
||||
entry->dma_addr[1] = buf[1].addr;
|
||||
entry->dma_len[1] = buf[1].len;
|
||||
buf1 = buf[1].addr;
|
||||
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
|
||||
if (buf[1].skip_unmap)
|
||||
q->entry[q->head].skip_buf1 = true;
|
||||
entry->skip_buf1 = true;
|
||||
}
|
||||
|
||||
if (i == nbufs - 1)
|
||||
@@ -78,11 +90,6 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
else if (i == nbufs - 2)
|
||||
ctrl |= MT_DMA_CTL_LAST_SEC1;
|
||||
|
||||
idx = q->head;
|
||||
q->head = (q->head + 1) % q->ndesc;
|
||||
|
||||
desc = &q->desc[idx];
|
||||
|
||||
WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
|
||||
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
|
||||
WRITE_ONCE(desc->info, cpu_to_le32(info));
|
||||
@@ -102,24 +109,14 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
|
||||
struct mt76_queue_entry *prev_e)
|
||||
{
|
||||
struct mt76_queue_entry *e = &q->entry[idx];
|
||||
__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
|
||||
u32 ctrl = le32_to_cpu(__ctrl);
|
||||
|
||||
if (!e->skip_buf0) {
|
||||
__le32 addr = READ_ONCE(q->desc[idx].buf0);
|
||||
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
|
||||
|
||||
dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
|
||||
if (!e->skip_buf0)
|
||||
dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
if (!e->skip_buf1) {
|
||||
__le32 addr = READ_ONCE(q->desc[idx].buf1);
|
||||
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
|
||||
|
||||
dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
|
||||
if (!e->skip_buf1)
|
||||
dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
if (e->txwi == DMA_DUMMY_DATA)
|
||||
e->txwi = NULL;
|
||||
@@ -207,7 +204,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
|
||||
void *buf = e->buf;
|
||||
int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
|
||||
|
||||
buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
|
||||
buf_addr = e->dma_addr[0];
|
||||
if (len) {
|
||||
u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
|
||||
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
|
||||
|
||||
@@ -102,6 +102,8 @@ struct mt76_queue_entry {
|
||||
struct urb *urb;
|
||||
int buf_sz;
|
||||
};
|
||||
u32 dma_addr[2];
|
||||
u16 dma_len[2];
|
||||
u16 wcid;
|
||||
bool skip_buf0:1;
|
||||
bool skip_buf1:1;
|
||||
|
||||
Reference in New Issue
Block a user