mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-13 07:08:21 -04:00
octeon_ep_vf: update tx/rx stats locally for persistence
Update tx/rx stats locally, so that ndo_get_stats64() can use that and not rely on per queue resources to obtain statistics. The latter used to cause race conditions when the device stopped. Signed-off-by: Shinas Rasheed <srasheed@marvell.com> Link: https://patch.msgid.link/20250117094653.2588578-5-srasheed@marvell.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
cc0e510cc8
commit
f840399395
@@ -114,12 +114,9 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
|
||||
iface_tx_stats = &oct->iface_tx_stats;
|
||||
iface_rx_stats = &oct->iface_rx_stats;
|
||||
|
||||
for (q = 0; q < oct->num_oqs; q++) {
|
||||
struct octep_vf_iq *iq = oct->iq[q];
|
||||
struct octep_vf_oq *oq = oct->oq[q];
|
||||
|
||||
tx_busy_errors += iq->stats.tx_busy;
|
||||
rx_alloc_errors += oq->stats.alloc_failures;
|
||||
for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
|
||||
tx_busy_errors += oct->stats_iq[q].tx_busy;
|
||||
rx_alloc_errors += oct->stats_oq[q].alloc_failures;
|
||||
}
|
||||
i = 0;
|
||||
data[i++] = rx_alloc_errors;
|
||||
@@ -134,22 +131,18 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
|
||||
data[i++] = iface_rx_stats->dropped_octets_fifo_full;
|
||||
|
||||
/* Per Tx Queue stats */
|
||||
for (q = 0; q < oct->num_iqs; q++) {
|
||||
struct octep_vf_iq *iq = oct->iq[q];
|
||||
|
||||
data[i++] = iq->stats.instr_posted;
|
||||
data[i++] = iq->stats.instr_completed;
|
||||
data[i++] = iq->stats.bytes_sent;
|
||||
data[i++] = iq->stats.tx_busy;
|
||||
for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
|
||||
data[i++] = oct->stats_iq[q].instr_posted;
|
||||
data[i++] = oct->stats_iq[q].instr_completed;
|
||||
data[i++] = oct->stats_iq[q].bytes_sent;
|
||||
data[i++] = oct->stats_iq[q].tx_busy;
|
||||
}
|
||||
|
||||
/* Per Rx Queue stats */
|
||||
for (q = 0; q < oct->num_oqs; q++) {
|
||||
struct octep_vf_oq *oq = oct->oq[q];
|
||||
|
||||
data[i++] = oq->stats.packets;
|
||||
data[i++] = oq->stats.bytes;
|
||||
data[i++] = oq->stats.alloc_failures;
|
||||
data[i++] = oct->stats_oq[q].packets;
|
||||
data[i++] = oct->stats_oq[q].bytes;
|
||||
data[i++] = oct->stats_oq[q].alloc_failures;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -574,7 +574,7 @@ static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
|
||||
* caused queues to get re-enabled after
|
||||
* being stopped
|
||||
*/
|
||||
iq->stats.restart_cnt++;
|
||||
iq->stats->restart_cnt++;
|
||||
fallthrough;
|
||||
case 1: /* Queue left enabled, since IQ is not yet full*/
|
||||
return 0;
|
||||
@@ -731,7 +731,7 @@ static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb,
|
||||
/* Flush the hw descriptors before writing to doorbell */
|
||||
smp_wmb();
|
||||
writel(iq->fill_cnt, iq->doorbell_reg);
|
||||
iq->stats.instr_posted += iq->fill_cnt;
|
||||
iq->stats->instr_posted += iq->fill_cnt;
|
||||
iq->fill_cnt = 0;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
@@ -786,14 +786,11 @@ static void octep_vf_get_stats64(struct net_device *netdev,
|
||||
tx_bytes = 0;
|
||||
rx_packets = 0;
|
||||
rx_bytes = 0;
|
||||
for (q = 0; q < oct->num_oqs; q++) {
|
||||
struct octep_vf_iq *iq = oct->iq[q];
|
||||
struct octep_vf_oq *oq = oct->oq[q];
|
||||
|
||||
tx_packets += iq->stats.instr_completed;
|
||||
tx_bytes += iq->stats.bytes_sent;
|
||||
rx_packets += oq->stats.packets;
|
||||
rx_bytes += oq->stats.bytes;
|
||||
for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
|
||||
tx_packets += oct->stats_iq[q].instr_completed;
|
||||
tx_bytes += oct->stats_iq[q].bytes_sent;
|
||||
rx_packets += oct->stats_oq[q].packets;
|
||||
rx_bytes += oct->stats_oq[q].bytes;
|
||||
}
|
||||
stats->tx_packets = tx_packets;
|
||||
stats->tx_bytes = tx_bytes;
|
||||
|
||||
@@ -246,11 +246,17 @@ struct octep_vf_device {
|
||||
/* Pointers to Octeon Tx queues */
|
||||
struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
|
||||
|
||||
/* Per iq stats */
|
||||
struct octep_vf_iq_stats stats_iq[OCTEP_VF_MAX_IQ];
|
||||
|
||||
/* Rx queues (OQ: Output Queue) */
|
||||
u16 num_oqs;
|
||||
/* Pointers to Octeon Rx queues */
|
||||
struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
|
||||
|
||||
/* Per oq stats */
|
||||
struct octep_vf_oq_stats stats_oq[OCTEP_VF_MAX_OQ];
|
||||
|
||||
/* Hardware port number of the PCIe interface */
|
||||
u16 pcie_port;
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
|
||||
page = dev_alloc_page();
|
||||
if (unlikely(!page)) {
|
||||
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
|
||||
oq->stats.alloc_failures++;
|
||||
oq->stats->alloc_failures++;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
|
||||
"OQ-%d buffer refill: DMA mapping error!\n",
|
||||
oq->q_no);
|
||||
put_page(page);
|
||||
oq->stats.alloc_failures++;
|
||||
oq->stats->alloc_failures++;
|
||||
break;
|
||||
}
|
||||
oq->buff_info[refill_idx].page = page;
|
||||
@@ -134,6 +134,7 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
|
||||
oq->netdev = oct->netdev;
|
||||
oq->dev = &oct->pdev->dev;
|
||||
oq->q_no = q_no;
|
||||
oq->stats = &oct->stats_oq[q_no];
|
||||
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
|
||||
oq->ring_size_mask = oq->max_count - 1;
|
||||
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
|
||||
@@ -458,8 +459,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
|
||||
|
||||
oq->host_read_idx = read_idx;
|
||||
oq->refill_count += desc_used;
|
||||
oq->stats.packets += pkt;
|
||||
oq->stats.bytes += rx_bytes;
|
||||
oq->stats->packets += pkt;
|
||||
oq->stats->bytes += rx_bytes;
|
||||
|
||||
return pkt;
|
||||
}
|
||||
|
||||
@@ -187,7 +187,7 @@ struct octep_vf_oq {
|
||||
u8 __iomem *pkts_sent_reg;
|
||||
|
||||
/* Statistics for this OQ. */
|
||||
struct octep_vf_oq_stats stats;
|
||||
struct octep_vf_oq_stats *stats;
|
||||
|
||||
/* Packets pending to be processed */
|
||||
u32 pkts_pending;
|
||||
|
||||
@@ -82,9 +82,9 @@ int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
|
||||
}
|
||||
|
||||
iq->pkts_processed += compl_pkts;
|
||||
iq->stats.instr_completed += compl_pkts;
|
||||
iq->stats.bytes_sent += compl_bytes;
|
||||
iq->stats.sgentry_sent += compl_sg;
|
||||
iq->stats->instr_completed += compl_pkts;
|
||||
iq->stats->bytes_sent += compl_bytes;
|
||||
iq->stats->sgentry_sent += compl_sg;
|
||||
iq->flush_index = fi;
|
||||
|
||||
netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
|
||||
@@ -186,6 +186,7 @@ static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
|
||||
iq->netdev = oct->netdev;
|
||||
iq->dev = &oct->pdev->dev;
|
||||
iq->q_no = q_no;
|
||||
iq->stats = &oct->stats_iq[q_no];
|
||||
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
|
||||
iq->ring_size_mask = iq->max_count - 1;
|
||||
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
|
||||
|
||||
@@ -129,7 +129,7 @@ struct octep_vf_iq {
|
||||
u16 flush_index;
|
||||
|
||||
/* Statistics for this input queue. */
|
||||
struct octep_vf_iq_stats stats;
|
||||
struct octep_vf_iq_stats *stats;
|
||||
|
||||
/* Pointer to the Virtual Base addr of the input ring. */
|
||||
struct octep_vf_tx_desc_hw *desc_ring;
|
||||
|
||||
Reference in New Issue
Block a user