mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-03 21:45:08 -04:00
Merge branch 'sfc-per-q-stats'
Edward Cree says: ==================== sfc: per-queue stats This series implements the netdev_stat_ops interface for per-queue statistics in the sfc driver, partly using existing counters that were originally added for ethtool -S output. Changed in v4: * remove RFC tags Changed in v3: * make TX stats count completions rather than enqueues * add new patch #4 to account for XDP TX separately from netdev traffic and include it in base_stats * move the tx_queue->old_* members out of the fastpath cachelines * note on patch #6 that our hw_gso stats still count enqueues * RFC since net-next is closed right now Changed in v2: * exclude (dedicated) XDP TXQ stats from per-queue TX stats * explain patch #3 better ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -134,6 +134,9 @@ void __ef100_rx_packet(struct efx_channel *channel)
|
||||
goto free_rx_buffer;
|
||||
}
|
||||
|
||||
++rx_queue->rx_packets;
|
||||
rx_queue->rx_bytes += rx_buf->len;
|
||||
|
||||
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
|
||||
goto out;
|
||||
|
||||
@@ -149,8 +152,6 @@ static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index)
|
||||
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
|
||||
++rx_queue->rx_packets;
|
||||
|
||||
netif_vdbg(efx, rx_status, efx->net_dev,
|
||||
"RX queue %d received id %x\n",
|
||||
efx_rx_queue_index(rx_queue), index);
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "net_driver.h"
|
||||
#include <net/gre.h>
|
||||
#include <net/udp_tunnel.h>
|
||||
#include <net/netdev_queues.h>
|
||||
#include "efx.h"
|
||||
#include "efx_common.h"
|
||||
#include "efx_channels.h"
|
||||
@@ -626,6 +627,113 @@ static const struct net_device_ops efx_netdev_ops = {
|
||||
.ndo_bpf = efx_xdp
|
||||
};
|
||||
|
||||
static void efx_get_queue_stats_rx(struct net_device *net_dev, int idx,
|
||||
struct netdev_queue_stats_rx *stats)
|
||||
{
|
||||
struct efx_nic *efx = efx_netdev_priv(net_dev);
|
||||
struct efx_rx_queue *rx_queue;
|
||||
struct efx_channel *channel;
|
||||
|
||||
channel = efx_get_channel(efx, idx);
|
||||
rx_queue = efx_channel_get_rx_queue(channel);
|
||||
/* Count only packets since last time datapath was started */
|
||||
stats->packets = rx_queue->rx_packets - rx_queue->old_rx_packets;
|
||||
stats->bytes = rx_queue->rx_bytes - rx_queue->old_rx_bytes;
|
||||
stats->hw_drops = efx_get_queue_stat_rx_hw_drops(channel) -
|
||||
channel->old_n_rx_hw_drops;
|
||||
stats->hw_drop_overruns = channel->n_rx_nodesc_trunc -
|
||||
channel->old_n_rx_hw_drop_overruns;
|
||||
}
|
||||
|
||||
static void efx_get_queue_stats_tx(struct net_device *net_dev, int idx,
|
||||
struct netdev_queue_stats_tx *stats)
|
||||
{
|
||||
struct efx_nic *efx = efx_netdev_priv(net_dev);
|
||||
struct efx_tx_queue *tx_queue;
|
||||
struct efx_channel *channel;
|
||||
|
||||
channel = efx_get_tx_channel(efx, idx);
|
||||
stats->packets = 0;
|
||||
stats->bytes = 0;
|
||||
stats->hw_gso_packets = 0;
|
||||
stats->hw_gso_wire_packets = 0;
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||
stats->packets += tx_queue->complete_packets -
|
||||
tx_queue->old_complete_packets;
|
||||
stats->bytes += tx_queue->complete_bytes -
|
||||
tx_queue->old_complete_bytes;
|
||||
/* Note that, unlike stats->packets and stats->bytes,
|
||||
* these count TXes enqueued, rather than completed,
|
||||
* which may not be what users expect.
|
||||
*/
|
||||
stats->hw_gso_packets += tx_queue->tso_bursts -
|
||||
tx_queue->old_tso_bursts;
|
||||
stats->hw_gso_wire_packets += tx_queue->tso_packets -
|
||||
tx_queue->old_tso_packets;
|
||||
}
|
||||
}
|
||||
|
||||
static void efx_get_base_stats(struct net_device *net_dev,
|
||||
struct netdev_queue_stats_rx *rx,
|
||||
struct netdev_queue_stats_tx *tx)
|
||||
{
|
||||
struct efx_nic *efx = efx_netdev_priv(net_dev);
|
||||
struct efx_tx_queue *tx_queue;
|
||||
struct efx_rx_queue *rx_queue;
|
||||
struct efx_channel *channel;
|
||||
|
||||
rx->packets = 0;
|
||||
rx->bytes = 0;
|
||||
rx->hw_drops = 0;
|
||||
rx->hw_drop_overruns = 0;
|
||||
tx->packets = 0;
|
||||
tx->bytes = 0;
|
||||
tx->hw_gso_packets = 0;
|
||||
tx->hw_gso_wire_packets = 0;
|
||||
|
||||
/* Count all packets on non-core queues, and packets before last
|
||||
* datapath start on core queues.
|
||||
*/
|
||||
efx_for_each_channel(channel, efx) {
|
||||
rx_queue = efx_channel_get_rx_queue(channel);
|
||||
if (channel->channel >= net_dev->real_num_rx_queues) {
|
||||
rx->packets += rx_queue->rx_packets;
|
||||
rx->bytes += rx_queue->rx_bytes;
|
||||
rx->hw_drops += efx_get_queue_stat_rx_hw_drops(channel);
|
||||
rx->hw_drop_overruns += channel->n_rx_nodesc_trunc;
|
||||
} else {
|
||||
rx->packets += rx_queue->old_rx_packets;
|
||||
rx->bytes += rx_queue->old_rx_bytes;
|
||||
rx->hw_drops += channel->old_n_rx_hw_drops;
|
||||
rx->hw_drop_overruns += channel->old_n_rx_hw_drop_overruns;
|
||||
}
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||
if (channel->channel < efx->tx_channel_offset ||
|
||||
channel->channel >= efx->tx_channel_offset +
|
||||
net_dev->real_num_tx_queues) {
|
||||
tx->packets += tx_queue->complete_packets;
|
||||
tx->bytes += tx_queue->complete_bytes;
|
||||
tx->hw_gso_packets += tx_queue->tso_bursts;
|
||||
tx->hw_gso_wire_packets += tx_queue->tso_packets;
|
||||
} else {
|
||||
tx->packets += tx_queue->old_complete_packets;
|
||||
tx->bytes += tx_queue->old_complete_bytes;
|
||||
tx->hw_gso_packets += tx_queue->old_tso_bursts;
|
||||
tx->hw_gso_wire_packets += tx_queue->old_tso_packets;
|
||||
}
|
||||
/* Include XDP TX in device-wide stats */
|
||||
tx->packets += tx_queue->complete_xdp_packets;
|
||||
tx->bytes += tx_queue->complete_xdp_bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static const struct netdev_stat_ops efx_stat_ops = {
|
||||
.get_queue_stats_rx = efx_get_queue_stats_rx,
|
||||
.get_queue_stats_tx = efx_get_queue_stats_tx,
|
||||
.get_base_stats = efx_get_base_stats,
|
||||
};
|
||||
|
||||
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog *old_prog;
|
||||
@@ -716,6 +824,7 @@ static int efx_register_netdev(struct efx_nic *efx)
|
||||
net_dev->watchdog_timeo = 5 * HZ;
|
||||
net_dev->irq = efx->pci_dev->irq;
|
||||
net_dev->netdev_ops = &efx_netdev_ops;
|
||||
net_dev->stat_ops = &efx_stat_ops;
|
||||
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
|
||||
net_dev->priv_flags |= IFF_UNICAST_FLT;
|
||||
net_dev->ethtool_ops = &efx_ethtool_ops;
|
||||
|
||||
@@ -1100,6 +1100,10 @@ void efx_start_channels(struct efx_nic *efx)
|
||||
atomic_inc(&efx->active_queues);
|
||||
}
|
||||
|
||||
/* reset per-queue stats */
|
||||
channel->old_n_rx_hw_drops = efx_get_queue_stat_rx_hw_drops(channel);
|
||||
channel->old_n_rx_hw_drop_overruns = channel->n_rx_nodesc_trunc;
|
||||
|
||||
efx_for_each_channel_rx_queue(rx_queue, channel) {
|
||||
efx_init_rx_queue(rx_queue);
|
||||
atomic_inc(&efx->active_queues);
|
||||
@@ -1209,6 +1213,8 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
|
||||
tx_queue->pkts_compl,
|
||||
tx_queue->bytes_compl);
|
||||
}
|
||||
tx_queue->complete_packets += tx_queue->pkts_compl;
|
||||
tx_queue->complete_bytes += tx_queue->bytes_compl;
|
||||
}
|
||||
|
||||
/* Receive any packets we queued up */
|
||||
|
||||
@@ -43,6 +43,13 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
|
||||
void efx_start_channels(struct efx_nic *efx);
|
||||
void efx_stop_channels(struct efx_nic *efx);
|
||||
|
||||
static inline u64 efx_get_queue_stat_rx_hw_drops(struct efx_channel *channel)
|
||||
{
|
||||
return channel->n_rx_eth_crc_err + channel->n_rx_frm_trunc +
|
||||
channel->n_rx_overlength + channel->n_rx_nodesc_trunc +
|
||||
channel->n_rx_mport_bad;
|
||||
}
|
||||
|
||||
void efx_init_napi_channel(struct efx_channel *channel);
|
||||
void efx_init_napi(struct efx_nic *efx);
|
||||
void efx_fini_napi_channel(struct efx_channel *channel);
|
||||
|
||||
@@ -75,7 +75,6 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
|
||||
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
|
||||
@@ -83,8 +82,8 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_overlength),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
|
||||
|
||||
@@ -193,6 +193,12 @@ struct efx_tx_buffer {
|
||||
* @initialised: Has hardware queue been initialised?
|
||||
* @timestamping: Is timestamping enabled for this channel?
|
||||
* @xdp_tx: Is this an XDP tx queue?
|
||||
* @old_complete_packets: Value of @complete_packets as of last
|
||||
* efx_init_tx_queue()
|
||||
* @old_complete_bytes: Value of @complete_bytes as of last
|
||||
* efx_init_tx_queue()
|
||||
* @old_tso_bursts: Value of @tso_bursts as of last efx_init_tx_queue()
|
||||
* @old_tso_packets: Value of @tso_packets as of last efx_init_tx_queue()
|
||||
* @read_count: Current read pointer.
|
||||
* This is the number of buffers that have been removed from both rings.
|
||||
* @old_write_count: The value of @write_count when last checked.
|
||||
@@ -202,6 +208,20 @@ struct efx_tx_buffer {
|
||||
* avoid cache-line ping-pong between the xmit path and the
|
||||
* completion path.
|
||||
* @merge_events: Number of TX merged completion events
|
||||
* @bytes_compl: Number of bytes completed during this NAPI poll
|
||||
* (efx_process_channel()). For BQL.
|
||||
* @pkts_compl: Number of packets completed during this NAPI poll.
|
||||
* @complete_packets: Number of packets completed since this struct was
|
||||
* created. Only counts SKB packets, not XDP TX (it accumulates
|
||||
* the same values that are reported to BQL).
|
||||
* @complete_bytes: Number of bytes completed since this struct was
|
||||
* created. For TSO, counts the superframe size, not the sizes of
|
||||
* generated frames on the wire (i.e. the headers are only counted
|
||||
* once)
|
||||
* @complete_xdp_packets: Number of XDP TX packets completed since this
|
||||
* struct was created.
|
||||
* @complete_xdp_bytes: Number of XDP TX bytes completed since this
|
||||
* struct was created.
|
||||
* @completed_timestamp_major: Top part of the most recent tx timestamp.
|
||||
* @completed_timestamp_minor: Low part of the most recent tx timestamp.
|
||||
* @insert_count: Current insert pointer
|
||||
@@ -232,6 +252,7 @@ struct efx_tx_buffer {
|
||||
* @xmit_pending: Are any packets waiting to be pushed to the NIC
|
||||
* @cb_packets: Number of times the TX copybreak feature has been used
|
||||
* @notify_count: Count of notified descriptors to the NIC
|
||||
* @tx_packets: Number of packets sent since this struct was created
|
||||
* @empty_read_count: If the completion path has seen the queue as empty
|
||||
* and the transmission path has not yet checked this, the value of
|
||||
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
|
||||
@@ -255,6 +276,10 @@ struct efx_tx_queue {
|
||||
bool initialised;
|
||||
bool timestamping;
|
||||
bool xdp_tx;
|
||||
unsigned long old_complete_packets;
|
||||
unsigned long old_complete_bytes;
|
||||
unsigned int old_tso_bursts;
|
||||
unsigned int old_tso_packets;
|
||||
|
||||
/* Members used mainly on the completion path */
|
||||
unsigned int read_count ____cacheline_aligned_in_smp;
|
||||
@@ -262,6 +287,10 @@ struct efx_tx_queue {
|
||||
unsigned int merge_events;
|
||||
unsigned int bytes_compl;
|
||||
unsigned int pkts_compl;
|
||||
unsigned long complete_packets;
|
||||
unsigned long complete_bytes;
|
||||
unsigned long complete_xdp_packets;
|
||||
unsigned long complete_xdp_bytes;
|
||||
u32 completed_timestamp_major;
|
||||
u32 completed_timestamp_minor;
|
||||
|
||||
@@ -370,6 +399,10 @@ struct efx_rx_page_state {
|
||||
* @recycle_count: RX buffer recycle counter.
|
||||
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
|
||||
* @grant_work: workitem used to grant credits to the MAE if @grant_credits
|
||||
* @rx_packets: Number of packets received since this struct was created
|
||||
* @rx_bytes: Number of bytes received since this struct was created
|
||||
* @old_rx_packets: Value of @rx_packets as of last efx_init_rx_queue()
|
||||
* @old_rx_bytes: Value of @rx_bytes as of last efx_init_rx_queue()
|
||||
* @xdp_rxq_info: XDP specific RX queue information.
|
||||
* @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
|
||||
*/
|
||||
@@ -406,6 +439,9 @@ struct efx_rx_queue {
|
||||
struct work_struct grant_work;
|
||||
/* Statistics to supplement MAC stats */
|
||||
unsigned long rx_packets;
|
||||
unsigned long rx_bytes;
|
||||
unsigned long old_rx_packets;
|
||||
unsigned long old_rx_bytes;
|
||||
struct xdp_rxq_info xdp_rxq_info;
|
||||
bool xdp_rxq_info_valid;
|
||||
};
|
||||
@@ -451,10 +487,8 @@ enum efx_sync_events_state {
|
||||
* @filter_work: Work item for efx_filter_rfs_expire()
|
||||
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
|
||||
* indexed by filter ID
|
||||
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
|
||||
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
|
||||
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
|
||||
* @n_rx_mcast_mismatch: Count of unmatched multicast frames
|
||||
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
|
||||
* @n_rx_overlength: Count of RX_OVERLENGTH errors
|
||||
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
|
||||
@@ -468,6 +502,10 @@ enum efx_sync_events_state {
|
||||
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
|
||||
* @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was
|
||||
* not recognised
|
||||
* @old_n_rx_hw_drops: Count of all RX packets dropped for any reason as of last
|
||||
* efx_start_channels()
|
||||
* @old_n_rx_hw_drop_overruns: Value of @n_rx_nodesc_trunc as of last
|
||||
* efx_start_channels()
|
||||
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
|
||||
* __efx_rx_packet(), or zero if there is none
|
||||
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
|
||||
@@ -511,7 +549,6 @@ struct efx_channel {
|
||||
u32 *rps_flow_id;
|
||||
#endif
|
||||
|
||||
unsigned int n_rx_tobe_disc;
|
||||
unsigned int n_rx_ip_hdr_chksum_err;
|
||||
unsigned int n_rx_tcp_udp_chksum_err;
|
||||
unsigned int n_rx_outer_ip_hdr_chksum_err;
|
||||
@@ -519,7 +556,6 @@ struct efx_channel {
|
||||
unsigned int n_rx_inner_ip_hdr_chksum_err;
|
||||
unsigned int n_rx_inner_tcp_udp_chksum_err;
|
||||
unsigned int n_rx_eth_crc_err;
|
||||
unsigned int n_rx_mcast_mismatch;
|
||||
unsigned int n_rx_frm_trunc;
|
||||
unsigned int n_rx_overlength;
|
||||
unsigned int n_skbuff_leaks;
|
||||
@@ -532,6 +568,9 @@ struct efx_channel {
|
||||
unsigned int n_rx_xdp_redirect;
|
||||
unsigned int n_rx_mport_bad;
|
||||
|
||||
unsigned int old_n_rx_hw_drops;
|
||||
unsigned int old_n_rx_hw_drop_overruns;
|
||||
|
||||
unsigned int rx_pkt_n_frags;
|
||||
unsigned int rx_pkt_index;
|
||||
|
||||
|
||||
@@ -125,8 +125,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
||||
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
|
||||
rx_queue->rx_packets++;
|
||||
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
rx_buf->flags |= flags;
|
||||
|
||||
@@ -394,6 +392,9 @@ void __efx_rx_packet(struct efx_channel *channel)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rx_queue->rx_packets++;
|
||||
rx_queue->rx_bytes += rx_buf->len;
|
||||
|
||||
if (!efx_do_xdp(efx, channel, rx_buf, &eh))
|
||||
goto out;
|
||||
|
||||
|
||||
@@ -241,6 +241,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
rx_queue->page_recycle_failed = 0;
|
||||
rx_queue->page_recycle_full = 0;
|
||||
|
||||
rx_queue->old_rx_packets = rx_queue->rx_packets;
|
||||
rx_queue->old_rx_bytes = rx_queue->rx_bytes;
|
||||
|
||||
/* Initialise limit fields */
|
||||
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
|
||||
max_trigger =
|
||||
|
||||
@@ -553,6 +553,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
|
||||
|
||||
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
|
||||
unsigned int pkts_compl = 0, bytes_compl = 0;
|
||||
unsigned int efv_pkts_compl = 0;
|
||||
unsigned int read_ptr;
|
||||
@@ -577,7 +578,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
|
||||
if (buffer->flags & EFX_TX_BUF_SKB)
|
||||
finished = true;
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
|
||||
&efv_pkts_compl);
|
||||
&efv_pkts_compl, &xdp_pkts_compl,
|
||||
&xdp_bytes_compl);
|
||||
|
||||
++tx_queue->read_count;
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
@@ -585,6 +587,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
|
||||
|
||||
tx_queue->pkts_compl += pkts_compl;
|
||||
tx_queue->bytes_compl += bytes_compl;
|
||||
tx_queue->complete_xdp_packets += xdp_pkts_compl;
|
||||
tx_queue->complete_xdp_bytes += xdp_bytes_compl;
|
||||
|
||||
EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1);
|
||||
|
||||
|
||||
@@ -86,6 +86,11 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
tx_queue->completed_timestamp_major = 0;
|
||||
tx_queue->completed_timestamp_minor = 0;
|
||||
|
||||
tx_queue->old_complete_packets = tx_queue->complete_packets;
|
||||
tx_queue->old_complete_bytes = tx_queue->complete_bytes;
|
||||
tx_queue->old_tso_bursts = tx_queue->tso_bursts;
|
||||
tx_queue->old_tso_packets = tx_queue->tso_packets;
|
||||
|
||||
tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
|
||||
tx_queue->tso_version = 0;
|
||||
|
||||
@@ -109,12 +114,14 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
|
||||
/* Free any buffers left in the ring */
|
||||
while (tx_queue->read_count != tx_queue->write_count) {
|
||||
unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
|
||||
unsigned int pkts_compl = 0, bytes_compl = 0;
|
||||
unsigned int efv_pkts_compl = 0;
|
||||
|
||||
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
|
||||
&efv_pkts_compl);
|
||||
&efv_pkts_compl, &xdp_pkts_compl,
|
||||
&xdp_bytes_compl);
|
||||
|
||||
++tx_queue->read_count;
|
||||
}
|
||||
@@ -150,7 +157,9 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl,
|
||||
unsigned int *efv_pkts_compl)
|
||||
unsigned int *efv_pkts_compl,
|
||||
unsigned int *xdp_pkts,
|
||||
unsigned int *xdp_bytes)
|
||||
{
|
||||
if (buffer->unmap_len) {
|
||||
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
|
||||
@@ -195,6 +204,10 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
||||
tx_queue->queue, tx_queue->read_count);
|
||||
} else if (buffer->flags & EFX_TX_BUF_XDP) {
|
||||
xdp_return_frame_rx_napi(buffer->xdpf);
|
||||
if (xdp_pkts)
|
||||
(*xdp_pkts)++;
|
||||
if (xdp_bytes)
|
||||
(*xdp_bytes) += buffer->xdpf->len;
|
||||
}
|
||||
|
||||
buffer->len = 0;
|
||||
@@ -210,7 +223,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
|
||||
unsigned int index,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl,
|
||||
unsigned int *efv_pkts_compl)
|
||||
unsigned int *efv_pkts_compl,
|
||||
unsigned int *xdp_pkts,
|
||||
unsigned int *xdp_bytes)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
unsigned int stop_index, read_ptr;
|
||||
@@ -230,7 +245,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
|
||||
}
|
||||
|
||||
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl,
|
||||
efv_pkts_compl);
|
||||
efv_pkts_compl, xdp_pkts, xdp_bytes);
|
||||
|
||||
++tx_queue->read_count;
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
@@ -253,15 +268,18 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
|
||||
int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
{
|
||||
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
|
||||
unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
|
||||
unsigned int efv_pkts_compl = 0;
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
|
||||
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
|
||||
|
||||
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl,
|
||||
&efv_pkts_compl);
|
||||
&efv_pkts_compl, &xdp_pkts_compl, &xdp_bytes_compl);
|
||||
tx_queue->pkts_compl += pkts_compl;
|
||||
tx_queue->bytes_compl += bytes_compl;
|
||||
tx_queue->complete_xdp_packets += xdp_pkts_compl;
|
||||
tx_queue->complete_xdp_bytes += xdp_bytes_compl;
|
||||
|
||||
if (pkts_compl + efv_pkts_compl > 1)
|
||||
++tx_queue->merge_events;
|
||||
@@ -290,6 +308,8 @@ int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
||||
unsigned int insert_count)
|
||||
{
|
||||
unsigned int xdp_bytes_compl = 0;
|
||||
unsigned int xdp_pkts_compl = 0;
|
||||
unsigned int efv_pkts_compl = 0;
|
||||
struct efx_tx_buffer *buffer;
|
||||
unsigned int bytes_compl = 0;
|
||||
@@ -300,7 +320,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
||||
--tx_queue->insert_count;
|
||||
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
|
||||
&efv_pkts_compl);
|
||||
&efv_pkts_compl, &xdp_pkts_compl,
|
||||
&xdp_bytes_compl);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,9 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl,
|
||||
unsigned int *efv_pkts_compl);
|
||||
unsigned int *efv_pkts_compl,
|
||||
unsigned int *xdp_pkts,
|
||||
unsigned int *xdp_bytes);
|
||||
|
||||
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user