mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-29 09:22:53 -04:00
mt76: keep a set of software tx queues per phy
Allows tracking tx scheduling separately per phy Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
@@ -30,7 +30,7 @@ int mt76_queues_read(struct seq_file *s, void *data)
|
||||
struct mt76_dev *dev = dev_get_drvdata(s->private);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
|
||||
for (i = 0; i < __MT_TXQ_MAX; i++) {
|
||||
struct mt76_sw_queue *q = &dev->q_tx[i];
|
||||
|
||||
if (!q->q)
|
||||
|
||||
@@ -141,7 +141,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
|
||||
struct mt76_sw_queue *sq = &dev->q_tx[qid];
|
||||
struct mt76_queue *q = sq->q;
|
||||
struct mt76_queue_entry entry;
|
||||
unsigned int n_swq_queued[4] = {};
|
||||
unsigned int n_swq_queued[8] = {};
|
||||
unsigned int n_queued = 0;
|
||||
bool wake = false;
|
||||
int i, last;
|
||||
@@ -178,13 +178,21 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
|
||||
spin_lock_bh(&q->lock);
|
||||
|
||||
q->queued -= n_queued;
|
||||
for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (!n_swq_queued[i])
|
||||
continue;
|
||||
|
||||
dev->q_tx[i].swq_queued -= n_swq_queued[i];
|
||||
}
|
||||
|
||||
/* ext PHY */
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (!n_swq_queued[i])
|
||||
continue;
|
||||
|
||||
dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i];
|
||||
}
|
||||
|
||||
if (flush)
|
||||
mt76_dma_sync_idx(dev, q);
|
||||
|
||||
|
||||
@@ -412,13 +412,16 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_rx);
|
||||
|
||||
bool mt76_has_tx_pending(struct mt76_dev *dev)
|
||||
bool mt76_has_tx_pending(struct mt76_phy *phy)
|
||||
{
|
||||
struct mt76_dev *dev = phy->dev;
|
||||
struct mt76_queue *q;
|
||||
int i;
|
||||
int i, offset;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
|
||||
q = dev->q_tx[i].q;
|
||||
offset = __MT_TXQ_MAX * (phy != &dev->phy);
|
||||
|
||||
for (i = 0; i < __MT_TXQ_MAX; i++) {
|
||||
q = dev->q_tx[offset + i].q;
|
||||
if (q && q->queued)
|
||||
return true;
|
||||
}
|
||||
@@ -486,7 +489,7 @@ void mt76_set_channel(struct mt76_phy *phy)
|
||||
bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
|
||||
int timeout = HZ / 5;
|
||||
|
||||
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout);
|
||||
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
|
||||
mt76_update_survey(dev);
|
||||
|
||||
phy->chandef = *chandef;
|
||||
|
||||
@@ -498,7 +498,7 @@ struct mt76_dev {
|
||||
u32 ampdu_ref;
|
||||
|
||||
struct list_head txwi_cache;
|
||||
struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
|
||||
struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX];
|
||||
struct mt76_queue q_rx[__MT_RXQ_MAX];
|
||||
const struct mt76_queue_ops *queue_ops;
|
||||
int tx_dma_idx[4];
|
||||
@@ -752,7 +752,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
|
||||
u16 tids, int nframes,
|
||||
enum ieee80211_frame_release_type reason,
|
||||
bool more_data);
|
||||
bool mt76_has_tx_pending(struct mt76_dev *dev);
|
||||
bool mt76_has_tx_pending(struct mt76_phy *phy);
|
||||
void mt76_set_channel(struct mt76_phy *phy);
|
||||
void mt76_update_survey(struct mt76_dev *dev);
|
||||
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
|
||||
|
||||
@@ -1426,7 +1426,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
|
||||
|
||||
mt7603_pse_client_reset(dev);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
|
||||
for (i = 0; i < __MT_TXQ_MAX; i++)
|
||||
mt76_queue_tx_cleanup(dev, i, true);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
|
||||
|
||||
@@ -476,7 +476,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
|
||||
if (restart)
|
||||
mt76_mcu_restart(dev);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
|
||||
for (i = 0; i < __MT_TXQ_MAX; i++)
|
||||
mt76_queue_tx_cleanup(dev, i, true);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
|
||||
|
||||
@@ -872,7 +872,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
|
||||
struct mt76_queue *q;
|
||||
int i, j, ret;
|
||||
|
||||
ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev),
|
||||
ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
|
||||
HZ / 5);
|
||||
if (!ret) {
|
||||
dev_err(dev->dev, "timed out waiting for pending tx\n");
|
||||
|
||||
Reference in New Issue
Block a user