Merge branch 'net-xilinx-axienet-enable-adaptive-irq-coalescing-with-dim'

Sean Anderson says:

====================
net: xilinx: axienet: Enable adaptive IRQ coalescing with DIM

To improve performance without sacrificing latency under low load,
enable DIM. While I appreciate not having to write the library myself, I
do think there are many unusual aspects to DIM, as detailed in the last
patch.
====================

Link: https://patch.msgid.link/20250206201036.1516800-1-sean.anderson@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-02-10 18:53:43 -08:00
3 changed files with 265 additions and 79 deletions

View File

@@ -28,6 +28,7 @@ config XILINX_AXI_EMAC
depends on HAS_IOMEM
depends on XILINX_DMA
select PHYLINK
select DIMLIB
help
This driver supports the 10/100/1000 Ethernet from Xilinx for the
AXI bus interface used in Xilinx Virtex FPGAs and Soc's.

View File

@@ -9,6 +9,7 @@
#ifndef XILINX_AXIENET_H
#define XILINX_AXIENET_H
#include <linux/dim.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -112,9 +113,6 @@
#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
#define XAXIDMA_DELAY_SHIFT 24
#define XAXIDMA_COALESCE_SHIFT 16
#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
@@ -126,8 +124,7 @@
/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_USEC 50
#define XAXIDMA_DFT_RX_THRESHOLD 1
#define XAXIDMA_DFT_RX_USEC 50
#define XAXIDMA_DFT_RX_USEC 16
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
@@ -487,7 +484,12 @@ struct skbuf_dma_descriptor {
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
* @napi_rx: NAPI RX control structure
* @rx_dim: DIM state for the receive queue
* @rx_dim_enabled: Whether DIM is enabled or not
* @rx_irqs: Number of interrupts
* @rx_cr_lock: Lock protecting @rx_dma_cr, its register, and @rx_dma_started
* @rx_dma_cr: Nominal content of RX DMA control register
* @rx_dma_started: Set when RX DMA is started
* @rx_bd_v: Virtual address of the RX buffer descriptor ring
* @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
* @rx_bd_num: Size of RX buffer descriptor ring
@@ -497,7 +499,9 @@ struct skbuf_dma_descriptor {
* @rx_bytes: RX byte count for statistics
* @rx_stat_sync: Synchronization object for RX stats
* @napi_tx: NAPI TX control structure
* @tx_cr_lock: Lock protecting @tx_dma_cr, its register, and @tx_dma_started
* @tx_dma_cr: Nominal content of TX DMA control register
* @tx_dma_started: Set when TX DMA is started
* @tx_bd_v: Virtual address of the TX buffer descriptor ring
* @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
* @tx_bd_num: Size of TX buffer descriptor ring
@@ -532,10 +536,6 @@ struct skbuf_dma_descriptor {
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
* @rxmem: Stores rx memory size for jumbo frame handling.
* @coalesce_count_rx: Store the irq coalesce on RX side.
* @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
* @coalesce_usec_tx: IRQ coalesce delay for TX
* @use_dmaengine: flag to check dmaengine framework usage.
* @tx_chan: TX DMA channel.
* @rx_chan: RX DMA channel.
@@ -569,7 +569,12 @@ struct axienet_local {
void __iomem *dma_regs;
struct napi_struct napi_rx;
struct dim rx_dim;
bool rx_dim_enabled;
u16 rx_irqs;
spinlock_t rx_cr_lock;
u32 rx_dma_cr;
bool rx_dma_started;
struct axidma_bd *rx_bd_v;
dma_addr_t rx_bd_p;
u32 rx_bd_num;
@@ -579,7 +584,9 @@ struct axienet_local {
struct u64_stats_sync rx_stat_sync;
struct napi_struct napi_tx;
spinlock_t tx_cr_lock;
u32 tx_dma_cr;
bool tx_dma_started;
struct axidma_bd *tx_bd_v;
dma_addr_t tx_bd_p;
u32 tx_bd_num;
@@ -610,10 +617,6 @@ struct axienet_local {
u32 max_frm_size;
u32 rxmem;
u32 coalesce_count_rx;
u32 coalesce_usec_rx;
u32 coalesce_count_tx;
u32 coalesce_usec_tx;
u8 use_dmaengine;
struct dma_chan *tx_chan;
struct dma_chan *rx_chan;

View File

@@ -223,23 +223,62 @@ static void axienet_dma_bd_release(struct net_device *ndev)
lp->rx_bd_p);
}
/**
* axienet_usec_to_timer - Calculate IRQ delay timer value
* @lp: Pointer to the axienet_local structure
* @coalesce_usec: Microseconds to convert into timer value
*/
static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
static u64 axienet_dma_rate(struct axienet_local *lp)
{
u32 result;
u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
if (lp->axi_clk)
clk_rate = clk_get_rate(lp->axi_clk);
return clk_get_rate(lp->axi_clk);
return 125000000; /* arbitrary guess if no clock rate set */
}
/* 1 Timeout Interval = 125 * (clock period of SG clock) */
result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
XAXIDMA_DELAY_SCALE);
return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK));
/**
* axienet_calc_cr() - Calculate control register value
* @lp: Device private data
* @count: Number of completions before an interrupt
* @usec: Microseconds after the last completion before an interrupt
*
* Calculate a control register value based on the coalescing settings. The
* run/stop bit is not set.
*/
static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
{
u32 cr;
cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
XAXIDMA_IRQ_ERROR_MASK;
/* Only set interrupt delay timer if not generating an interrupt on
* the first packet. Otherwise leave at 0 to disable delay interrupt.
*/
if (count > 1) {
u64 clk_rate = axienet_dma_rate(lp);
u32 timer;
/* 1 Timeout Interval = 125 * (clock period of SG clock) */
timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
XAXIDMA_DELAY_SCALE);
timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
XAXIDMA_IRQ_DELAY_MASK;
}
return cr;
}
/**
* axienet_coalesce_params() - Extract coalesce parameters from the CR
* @lp: Device private data
* @cr: The control register to parse
* @count: Number of packets before an interrupt
* @usec: Idle time (in usec) before an interrupt
*/
static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
u32 *count, u32 *usec)
{
u64 clk_rate = axienet_dma_rate(lp);
u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
*count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
*usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
}
/**
@@ -248,29 +287,11 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
*/
static void axienet_dma_start(struct axienet_local *lp)
{
/* Start updating the Rx channel control register */
lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
/* Only set interrupt delay timer if not generating an interrupt on
* the first RX packet. Otherwise leave at 0 to disable delay interrupt.
*/
if (lp->coalesce_count_rx > 1)
lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
<< XAXIDMA_DELAY_SHIFT) |
XAXIDMA_IRQ_DELAY_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
spin_lock_irq(&lp->rx_cr_lock);
/* Start updating the Tx channel control register */
lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
/* Only set interrupt delay timer if not generating an interrupt on
* the first TX packet. Otherwise leave at 0 to disable delay interrupt.
*/
if (lp->coalesce_count_tx > 1)
lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
<< XAXIDMA_DELAY_SHIFT) |
XAXIDMA_IRQ_DELAY_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
/* Start updating the Rx channel control register */
lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
@@ -280,6 +301,14 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
lp->rx_dma_started = true;
spin_unlock_irq(&lp->rx_cr_lock);
spin_lock_irq(&lp->tx_cr_lock);
/* Start updating the Tx channel control register */
lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -288,6 +317,9 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
lp->tx_dma_started = true;
spin_unlock_irq(&lp->tx_cr_lock);
}
/**
@@ -623,14 +655,22 @@ static void axienet_dma_stop(struct axienet_local *lp)
int count;
u32 cr, sr;
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
spin_lock_irq(&lp->rx_cr_lock);
cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
lp->rx_dma_started = false;
spin_unlock_irq(&lp->rx_cr_lock);
synchronize_irq(lp->rx_irq);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
spin_lock_irq(&lp->tx_cr_lock);
cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
lp->tx_dma_started = false;
spin_unlock_irq(&lp->tx_cr_lock);
synchronize_irq(lp->tx_irq);
/* Give DMAs a chance to halt gracefully */
@@ -979,7 +1019,9 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
* cause an immediate interrupt if any TX packets are
* already pending.
*/
spin_lock_irq(&lp->tx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
spin_unlock_irq(&lp->tx_cr_lock);
}
return packets;
}
@@ -1241,11 +1283,25 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
if (packets < budget && napi_complete_done(napi, packets)) {
if (READ_ONCE(lp->rx_dim_enabled)) {
struct dim_sample sample = {
.time = ktime_get(),
/* Safe because we are the only writer */
.pkt_ctr = u64_stats_read(&lp->rx_packets),
.byte_ctr = u64_stats_read(&lp->rx_bytes),
.event_ctr = READ_ONCE(lp->rx_irqs),
};
net_dim(&lp->rx_dim, &sample);
}
/* Re-enable RX completion interrupts. This should
* cause an immediate interrupt if any RX packets are
* already pending.
*/
spin_lock_irq(&lp->rx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
spin_unlock_irq(&lp->rx_cr_lock);
}
return packets;
}
@@ -1283,11 +1339,14 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
/* Disable further TX completion interrupts and schedule
* NAPI to handle the completions.
*/
u32 cr = lp->tx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
if (napi_schedule_prep(&lp->napi_tx)) {
u32 cr;
spin_lock(&lp->tx_cr_lock);
cr = lp->tx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
spin_unlock(&lp->tx_cr_lock);
__napi_schedule(&lp->napi_tx);
}
}
@@ -1328,11 +1387,16 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
/* Disable further RX completion interrupts and schedule
* NAPI receive.
*/
u32 cr = lp->rx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
if (napi_schedule_prep(&lp->napi_rx)) {
u32 cr;
spin_lock(&lp->rx_cr_lock);
cr = lp->rx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
spin_unlock(&lp->rx_cr_lock);
__napi_schedule(&lp->napi_rx);
}
}
@@ -1625,6 +1689,7 @@ static int axienet_open(struct net_device *ndev)
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
err_phy:
cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1654,6 +1719,7 @@ static int axienet_stop(struct net_device *ndev)
napi_disable(&lp->napi_rx);
}
cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
@@ -1998,6 +2064,87 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
}
/**
* axienet_update_coalesce_rx() - Set RX CR
* @lp: Device private data
* @cr: Value to write to the RX CR
* @mask: Bits to set from @cr
*/
static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
u32 mask)
{
spin_lock_irq(&lp->rx_cr_lock);
lp->rx_dma_cr &= ~mask;
lp->rx_dma_cr |= cr;
/* If DMA isn't started, then the settings will be applied the next
* time dma_start() is called.
*/
if (lp->rx_dma_started) {
u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
/* Don't enable IRQs if they are disabled by NAPI */
if (reg & XAXIDMA_IRQ_ALL_MASK)
cr = lp->rx_dma_cr;
else
cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
}
spin_unlock_irq(&lp->rx_cr_lock);
}
/**
* axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
* @lp: Device private data
*/
static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
{
return min(1 << (lp->rx_dim.profile_ix << 1), 255);
}
/**
* axienet_rx_dim_work() - Adjust RX DIM settings
* @work: The work struct
*/
static void axienet_rx_dim_work(struct work_struct *work)
{
struct axienet_local *lp =
container_of(work, struct axienet_local, rx_dim.work);
u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
XAXIDMA_IRQ_ERROR_MASK;
axienet_update_coalesce_rx(lp, cr, mask);
lp->rx_dim.state = DIM_START_MEASURE;
}
/**
* axienet_update_coalesce_tx() - Set TX CR
* @lp: Device private data
* @cr: Value to write to the TX CR
* @mask: Bits to set from @cr
*/
static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
u32 mask)
{
spin_lock_irq(&lp->tx_cr_lock);
lp->tx_dma_cr &= ~mask;
lp->tx_dma_cr |= cr;
/* If DMA isn't started, then the settings will be applied the next
* time dma_start() is called.
*/
if (lp->tx_dma_started) {
u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
/* Don't enable IRQs if they are disabled by NAPI */
if (reg & XAXIDMA_IRQ_ALL_MASK)
cr = lp->tx_dma_cr;
else
cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
}
spin_unlock_irq(&lp->tx_cr_lock);
}
/**
* axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
* @ndev: Pointer to net_device structure
@@ -2018,11 +2165,23 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
u32 cr;
ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
spin_lock_irq(&lp->rx_cr_lock);
cr = lp->rx_dma_cr;
spin_unlock_irq(&lp->rx_cr_lock);
axienet_coalesce_params(lp, cr,
&ecoalesce->rx_max_coalesced_frames,
&ecoalesce->rx_coalesce_usecs);
spin_lock_irq(&lp->tx_cr_lock);
cr = lp->tx_dma_cr;
spin_unlock_irq(&lp->tx_cr_lock);
axienet_coalesce_params(lp, cr,
&ecoalesce->tx_max_coalesced_frames,
&ecoalesce->tx_coalesce_usecs);
return 0;
}
@@ -2046,12 +2205,9 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
NL_SET_ERR_MSG(extack,
"Please stop netif before applying configuration");
return -EBUSY;
}
bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
bool old_dim = lp->rx_dim_enabled;
u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
if (ecoalesce->rx_max_coalesced_frames > 255 ||
ecoalesce->tx_max_coalesced_frames > 255) {
@@ -2065,7 +2221,7 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EINVAL;
}
if ((ecoalesce->rx_max_coalesced_frames > 1 &&
if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
!ecoalesce->rx_coalesce_usecs) ||
(ecoalesce->tx_max_coalesced_frames > 1 &&
!ecoalesce->tx_coalesce_usecs)) {
@@ -2074,11 +2230,31 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EINVAL;
}
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
if (new_dim && !old_dim) {
cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
ecoalesce->rx_coalesce_usecs);
} else if (!new_dim) {
if (old_dim) {
WRITE_ONCE(lp->rx_dim_enabled, false);
napi_synchronize(&lp->napi_rx);
flush_work(&lp->rx_dim.work);
}
cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
ecoalesce->rx_coalesce_usecs);
} else {
/* Dummy value for count just to calculate timer */
cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
}
axienet_update_coalesce_rx(lp, cr, mask);
if (new_dim && !old_dim)
WRITE_ONCE(lp->rx_dim_enabled, true);
cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
ecoalesce->tx_coalesce_usecs);
axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
return 0;
}
@@ -2316,7 +2492,8 @@ axienet_ethtool_get_rmon_stats(struct net_device *dev,
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS,
ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -2858,10 +3035,15 @@ static int axienet_probe(struct platform_device *pdev)
axienet_set_mac_address(ndev, NULL);
}
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
spin_lock_init(&lp->rx_cr_lock);
spin_lock_init(&lp->tx_cr_lock);
INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
lp->rx_dim_enabled = true;
lp->rx_dim.profile_ix = 1;
lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
XAXIDMA_DFT_RX_USEC);
lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
XAXIDMA_DFT_TX_USEC);
ret = axienet_mdio_setup(lp);
if (ret)