mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-10 21:14:56 -04:00
wifi: mt76: do not hold queue lock during initial rx buffer alloc
In dma init or reset scene, full buffer is needed for all rx rings. Since this is very time consuming, split the function to perform initial allocation without holding the spinlock. This avoids causing excessive scheduler latency. Signed-off-by: Quan Zhou <quan.zhou@mediatek.com> Reviewed-by: Shayne Chen <shayne.chen@mediatek.com> Reviewed-by: Deren Wu <deren.wu@mediatek.com> Link: https://patch.msgid.link/57c68a7ce1dd9022fa5e06af2c53d6313f30ec83.1731069062.git.quan.zhou@mediatek.com Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
@@ -631,7 +631,8 @@ mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
static int
|
||||
mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
bool allow_direct)
|
||||
{
|
||||
int len = SKB_WITH_OVERHEAD(q->buf_size);
|
||||
@@ -640,8 +641,6 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
if (!q->ndesc)
|
||||
return 0;
|
||||
|
||||
spin_lock_bh(&q->lock);
|
||||
|
||||
while (q->queued < q->ndesc - 1) {
|
||||
struct mt76_queue_buf qbuf = {};
|
||||
enum dma_data_direction dir;
|
||||
@@ -674,6 +673,19 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
if (frames || mt76_queue_is_wed_rx(q))
|
||||
mt76_dma_kick_queue(dev, q);
|
||||
|
||||
return frames;
|
||||
}
|
||||
|
||||
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
bool allow_direct)
|
||||
{
|
||||
int frames;
|
||||
|
||||
if (!q->ndesc)
|
||||
return 0;
|
||||
|
||||
spin_lock_bh(&q->lock);
|
||||
frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
|
||||
spin_unlock_bh(&q->lock);
|
||||
|
||||
return frames;
|
||||
@@ -796,7 +808,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
|
||||
return;
|
||||
|
||||
mt76_dma_sync_idx(dev, q);
|
||||
mt76_dma_rx_fill(dev, q, false);
|
||||
mt76_dma_rx_fill_buf(dev, q, false);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -969,7 +981,7 @@ mt76_dma_init(struct mt76_dev *dev,
|
||||
|
||||
mt76_for_each_q_rx(dev, i) {
|
||||
netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
|
||||
mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
|
||||
mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
|
||||
napi_enable(&dev->napi[i]);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user