mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-18 17:18:16 -04:00
idpf: fix netdev Tx queue stop/wake
netif_txq_maybe_stop() returns -1, 0, or 1, while idpf_tx_maybe_stop_common() says it returns 0 or -EBUSY. As a result, there sometimes are Tx queue timeout warnings despite that the queue is empty or there is at least enough space to restart it. Make idpf_tx_maybe_stop_common() inline and returning true or false, handling the return of netif_txq_maybe_stop() properly. Use a correct goto in idpf_tx_maybe_stop_splitq() to avoid stopping the queue or incrementing the stops counter twice. Fixes:6818c4d5b3("idpf: add splitq start_xmit") Fixes:a5ab9ee0df("idpf: add singleq start_xmit and napi poll") Cc: stable@vger.kernel.org # 6.7+ Signed-off-by: Michal Kubiak <michal.kubiak@intel.com> Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
committed by
Tony Nguyen
parent
24eb35b151
commit
e4b398dd82
@@ -375,6 +375,10 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
|
||||
IDPF_TX_DESCS_FOR_CTX)) {
|
||||
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
|
||||
|
||||
u64_stats_update_begin(&tx_q->stats_sync);
|
||||
u64_stats_inc(&tx_q->q_stats.q_busy);
|
||||
u64_stats_update_end(&tx_q->stats_sync);
|
||||
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
|
||||
@@ -2132,29 +2132,6 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
|
||||
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
|
||||
}
|
||||
|
||||
/**
|
||||
* idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
|
||||
* @tx_q: the queue to be checked
|
||||
* @size: number of descriptors we want to assure is available
|
||||
*
|
||||
* Returns 0 if stop is not needed
|
||||
*/
|
||||
int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size)
|
||||
{
|
||||
struct netdev_queue *nq;
|
||||
|
||||
if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
|
||||
return 0;
|
||||
|
||||
u64_stats_update_begin(&tx_q->stats_sync);
|
||||
u64_stats_inc(&tx_q->q_stats.q_busy);
|
||||
u64_stats_update_end(&tx_q->stats_sync);
|
||||
|
||||
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
|
||||
|
||||
return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
|
||||
}
|
||||
|
||||
/**
|
||||
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
|
||||
* @tx_q: the queue to be checked
|
||||
@@ -2166,7 +2143,7 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
|
||||
unsigned int descs_needed)
|
||||
{
|
||||
if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
|
||||
goto splitq_stop;
|
||||
goto out;
|
||||
|
||||
/* If there are too many outstanding completions expected on the
|
||||
* completion queue, stop the TX queue to give the device some time to
|
||||
@@ -2185,10 +2162,12 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
|
||||
return 0;
|
||||
|
||||
splitq_stop:
|
||||
netif_stop_subqueue(tx_q->netdev, tx_q->idx);
|
||||
|
||||
out:
|
||||
u64_stats_update_begin(&tx_q->stats_sync);
|
||||
u64_stats_inc(&tx_q->q_stats.q_busy);
|
||||
u64_stats_update_end(&tx_q->stats_sync);
|
||||
netif_stop_subqueue(tx_q->netdev, tx_q->idx);
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
@@ -2211,7 +2190,11 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
|
||||
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
|
||||
tx_q->next_to_use = val;
|
||||
|
||||
idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
|
||||
if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
|
||||
u64_stats_update_begin(&tx_q->stats_sync);
|
||||
u64_stats_inc(&tx_q->q_stats.q_busy);
|
||||
u64_stats_update_end(&tx_q->stats_sync);
|
||||
}
|
||||
|
||||
/* Force memory writes to complete before letting h/w
|
||||
* know there are new descriptors to fetch. (Only
|
||||
|
||||
@@ -1018,7 +1018,6 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
|
||||
struct idpf_tx_buf *first, u16 ring_idx);
|
||||
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
|
||||
struct sk_buff *skb);
|
||||
int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
|
||||
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
|
||||
netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
|
||||
struct idpf_tx_queue *tx_q);
|
||||
@@ -1027,4 +1026,12 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
|
||||
u16 cleaned_count);
|
||||
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
|
||||
|
||||
static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
|
||||
u32 needed)
|
||||
{
|
||||
return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
|
||||
IDPF_DESC_UNUSED(tx_q),
|
||||
needed, needed);
|
||||
}
|
||||
|
||||
#endif /* !_IDPF_TXRX_H_ */
|
||||
|
||||
Reference in New Issue
Block a user