mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 05:31:37 -04:00
Merge branch 'net-stmmac-descriptor-cleanups-part-2'
Russell King says: ==================== net: stmmac: descriptor cleanups part 2 Part 2 of the stmmac descriptor cleanups. - rename "priv->mode" to be more descriptive, and do the same in function arguments. - simplify descriptor allocation/initialisation/freeing - use more descriptive local variable names in stmmac_xmit() - STMMAC_GET_ENTRY() doesn't get an entry, it moves to the next one. Describe this in the macro name. ==================== Link: https://patch.msgid.link/abruRQpjLyMkoUEP@shell.armlinux.org.uk Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -46,7 +46,7 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
|
||||
|
||||
while (len != 0) {
|
||||
tx_q->tx_skbuff[entry] = NULL;
|
||||
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
desc = tx_q->dma_tx + entry;
|
||||
|
||||
if (len > bmax) {
|
||||
|
||||
@@ -63,7 +63,7 @@ static inline bool dwmac_is_xmac(enum dwmac_core_type core_type)
|
||||
#define DMA_MIN_RX_SIZE 64
|
||||
#define DMA_MAX_RX_SIZE 1024
|
||||
#define DMA_DEFAULT_RX_SIZE 512
|
||||
#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1))
|
||||
#define STMMAC_NEXT_ENTRY(x, size) ((x + 1) & (size - 1))
|
||||
|
||||
#undef FRAME_FILTER_DEBUG
|
||||
/* #define FRAME_FILTER_DEBUG */
|
||||
|
||||
@@ -289,12 +289,13 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
|
||||
}
|
||||
|
||||
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
|
||||
int mode, int end, int bfsize)
|
||||
u8 descriptor_mode, int end, int bfsize)
|
||||
{
|
||||
dwmac4_set_rx_owner(p, disable_rx_ic);
|
||||
}
|
||||
|
||||
static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
|
||||
static void dwmac4_rd_init_tx_desc(struct dma_desc *p, u8 descriptor_mode,
|
||||
int end)
|
||||
{
|
||||
p->des0 = 0;
|
||||
p->des1 = 0;
|
||||
@@ -303,8 +304,9 @@ static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
|
||||
}
|
||||
|
||||
static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
||||
bool csum_flag, int mode, bool tx_own,
|
||||
bool ls, unsigned int tot_pkt_len)
|
||||
bool csum_flag, u8 descriptor_mode,
|
||||
bool tx_own, bool ls,
|
||||
unsigned int tot_pkt_len)
|
||||
{
|
||||
u32 tdes3 = le32_to_cpu(p->des3);
|
||||
|
||||
@@ -381,7 +383,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
|
||||
p->des3 = cpu_to_le32(tdes3);
|
||||
}
|
||||
|
||||
static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
|
||||
static void dwmac4_release_tx_desc(struct dma_desc *p, u8 descriptor_mode)
|
||||
{
|
||||
p->des0 = 0;
|
||||
p->des1 = 0;
|
||||
|
||||
@@ -130,12 +130,13 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
|
||||
}
|
||||
|
||||
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
|
||||
int mode, int end, int bfsize)
|
||||
u8 descriptor_mode, int end, int bfsize)
|
||||
{
|
||||
dwxgmac2_set_rx_owner(p, disable_rx_ic);
|
||||
}
|
||||
|
||||
static void dwxgmac2_init_tx_desc(struct dma_desc *p, int mode, int end)
|
||||
static void dwxgmac2_init_tx_desc(struct dma_desc *p, u8 descriptor_mode,
|
||||
int end)
|
||||
{
|
||||
p->des0 = 0;
|
||||
p->des1 = 0;
|
||||
@@ -144,8 +145,9 @@ static void dwxgmac2_init_tx_desc(struct dma_desc *p, int mode, int end)
|
||||
}
|
||||
|
||||
static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
||||
bool csum_flag, int mode, bool tx_own,
|
||||
bool ls, unsigned int tot_pkt_len)
|
||||
bool csum_flag, u8 descriptor_mode,
|
||||
bool tx_own, bool ls,
|
||||
unsigned int tot_pkt_len)
|
||||
{
|
||||
u32 tdes3 = le32_to_cpu(p->des3);
|
||||
|
||||
@@ -219,7 +221,7 @@ static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
|
||||
p->des3 = cpu_to_le32(tdes3);
|
||||
}
|
||||
|
||||
static void dwxgmac2_release_tx_desc(struct dma_desc *p, int mode)
|
||||
static void dwxgmac2_release_tx_desc(struct dma_desc *p, u8 descriptor_mode)
|
||||
{
|
||||
p->des0 = 0;
|
||||
p->des1 = 0;
|
||||
|
||||
@@ -245,7 +245,7 @@ static int enh_desc_get_rx_status(struct stmmac_extra_stats *x,
|
||||
}
|
||||
|
||||
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
|
||||
int mode, int end, int bfsize)
|
||||
u8 descriptor_mode, int end, int bfsize)
|
||||
{
|
||||
int bfsize1;
|
||||
|
||||
@@ -254,7 +254,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
|
||||
bfsize1 = min(bfsize, BUF_SIZE_8KiB);
|
||||
p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
|
||||
|
||||
if (mode == STMMAC_CHAIN_MODE)
|
||||
if (descriptor_mode == STMMAC_CHAIN_MODE)
|
||||
ehn_desc_rx_set_on_chain(p);
|
||||
else
|
||||
ehn_desc_rx_set_on_ring(p, end, bfsize);
|
||||
@@ -263,10 +263,11 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
|
||||
p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
|
||||
}
|
||||
|
||||
static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
|
||||
static void enh_desc_init_tx_desc(struct dma_desc *p, u8 descriptor_mode,
|
||||
int end)
|
||||
{
|
||||
p->des0 &= cpu_to_le32(~ETDES0_OWN);
|
||||
if (mode == STMMAC_CHAIN_MODE)
|
||||
if (descriptor_mode == STMMAC_CHAIN_MODE)
|
||||
enh_desc_end_tx_desc_on_chain(p);
|
||||
else
|
||||
enh_desc_end_tx_desc_on_ring(p, end);
|
||||
@@ -282,24 +283,25 @@ static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
|
||||
p->des0 |= cpu_to_le32(RDES0_OWN);
|
||||
}
|
||||
|
||||
static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
|
||||
static void enh_desc_release_tx_desc(struct dma_desc *p, u8 descriptor_mode)
|
||||
{
|
||||
int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
|
||||
|
||||
memset(p, 0, offsetof(struct dma_desc, des2));
|
||||
if (mode == STMMAC_CHAIN_MODE)
|
||||
if (descriptor_mode == STMMAC_CHAIN_MODE)
|
||||
enh_desc_end_tx_desc_on_chain(p);
|
||||
else
|
||||
enh_desc_end_tx_desc_on_ring(p, ter);
|
||||
}
|
||||
|
||||
static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
||||
bool csum_flag, int mode, bool tx_own,
|
||||
bool ls, unsigned int tot_pkt_len)
|
||||
bool csum_flag, u8 descriptor_mode,
|
||||
bool tx_own, bool ls,
|
||||
unsigned int tot_pkt_len)
|
||||
{
|
||||
u32 tdes0 = le32_to_cpu(p->des0);
|
||||
|
||||
if (mode == STMMAC_CHAIN_MODE)
|
||||
if (descriptor_mode == STMMAC_CHAIN_MODE)
|
||||
enh_set_tx_desc_len_on_chain(p, len);
|
||||
else
|
||||
enh_set_tx_desc_len_on_ring(p, len);
|
||||
|
||||
@@ -57,11 +57,11 @@ static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv)
|
||||
|
||||
if (priv->chain_mode) {
|
||||
dev_info(priv->device, "Chain mode enabled\n");
|
||||
priv->mode = STMMAC_CHAIN_MODE;
|
||||
priv->descriptor_mode = STMMAC_CHAIN_MODE;
|
||||
mac->mode = &chain_mode_ops;
|
||||
} else {
|
||||
dev_info(priv->device, "Ring mode enabled\n");
|
||||
priv->mode = STMMAC_RING_MODE;
|
||||
priv->descriptor_mode = STMMAC_RING_MODE;
|
||||
mac->mode = &ring_mode_ops;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,21 +38,21 @@ struct dma_edesc;
|
||||
/* Descriptors helpers */
|
||||
struct stmmac_desc_ops {
|
||||
/* DMA RX descriptor ring initialization */
|
||||
void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
|
||||
int end, int bfsize);
|
||||
void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic,
|
||||
u8 descriptor_mode, int end, int bfsize);
|
||||
/* DMA TX descriptor ring initialization */
|
||||
void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
|
||||
void (*init_tx_desc)(struct dma_desc *p, u8 descriptor_mode, int end);
|
||||
/* Invoked by the xmit function to prepare the tx descriptor */
|
||||
void (*prepare_tx_desc)(struct dma_desc *p, int is_fs, int len,
|
||||
bool csum_flag, int mode, bool tx_own, bool ls,
|
||||
unsigned int tot_pkt_len);
|
||||
bool csum_flag, u8 descriptor_mode, bool tx_own,
|
||||
bool ls, unsigned int tot_pkt_len);
|
||||
void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
|
||||
int len2, bool tx_own, bool ls, unsigned int tcphdrlen,
|
||||
unsigned int tcppayloadlen);
|
||||
/* Set/get the owner of the descriptor */
|
||||
void (*set_tx_owner)(struct dma_desc *p);
|
||||
/* Clean the tx descriptor as soon as the tx irq is received */
|
||||
void (*release_tx_desc)(struct dma_desc *p, int mode);
|
||||
void (*release_tx_desc)(struct dma_desc *p, u8 descriptor_mode);
|
||||
/* Clear interrupt on tx frame completion. When this bit is
|
||||
* set an interrupt happens as soon as the frame is transmitted */
|
||||
void (*set_tx_ic)(struct dma_desc *p);
|
||||
|
||||
@@ -108,8 +108,8 @@ static int ndesc_get_rx_status(struct stmmac_extra_stats *x,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
|
||||
int end, int bfsize)
|
||||
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
|
||||
u8 descriptor_mode, int end, int bfsize)
|
||||
{
|
||||
int bfsize1;
|
||||
|
||||
@@ -118,7 +118,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
|
||||
bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
|
||||
p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
|
||||
|
||||
if (mode == STMMAC_CHAIN_MODE)
|
||||
if (descriptor_mode == STMMAC_CHAIN_MODE)
|
||||
ndesc_rx_set_on_chain(p, end);
|
||||
else
|
||||
ndesc_rx_set_on_ring(p, end, bfsize);
|
||||
@@ -127,10 +127,10 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
|
||||
p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
|
||||
}
|
||||
|
||||
static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
|
||||
static void ndesc_init_tx_desc(struct dma_desc *p, u8 descriptor_mode, int end)
|
||||
{
|
||||
p->des0 &= cpu_to_le32(~TDES0_OWN);
|
||||
if (mode == STMMAC_CHAIN_MODE)
|
||||
if (descriptor_mode == STMMAC_CHAIN_MODE)
|
||||
ndesc_tx_set_on_chain(p);
|
||||
else
|
||||
ndesc_end_tx_desc_on_ring(p, end);
|
||||
@@ -146,20 +146,21 @@ static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
|
||||
p->des0 |= cpu_to_le32(RDES0_OWN);
|
||||
}
|
||||
|
||||
static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
|
||||
static void ndesc_release_tx_desc(struct dma_desc *p, u8 descriptor_mode)
|
||||
{
|
||||
int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
|
||||
|
||||
memset(p, 0, offsetof(struct dma_desc, des2));
|
||||
if (mode == STMMAC_CHAIN_MODE)
|
||||
if (descriptor_mode == STMMAC_CHAIN_MODE)
|
||||
ndesc_tx_set_on_chain(p);
|
||||
else
|
||||
ndesc_end_tx_desc_on_ring(p, ter);
|
||||
}
|
||||
|
||||
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
||||
bool csum_flag, int mode, bool tx_own,
|
||||
bool ls, unsigned int tot_pkt_len)
|
||||
bool csum_flag, u8 descriptor_mode,
|
||||
bool tx_own, bool ls,
|
||||
unsigned int tot_pkt_len)
|
||||
{
|
||||
u32 tdes1 = le32_to_cpu(p->des1);
|
||||
|
||||
@@ -176,7 +177,7 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
||||
|
||||
p->des1 = cpu_to_le32(tdes1);
|
||||
|
||||
if (mode == STMMAC_CHAIN_MODE)
|
||||
if (descriptor_mode == STMMAC_CHAIN_MODE)
|
||||
norm_set_tx_desc_len_on_chain(p, len);
|
||||
else
|
||||
norm_set_tx_desc_len_on_ring(p, len);
|
||||
|
||||
@@ -51,7 +51,7 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
|
||||
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
|
||||
STMMAC_RING_MODE, 0, false, skb->len);
|
||||
tx_q->tx_skbuff[entry] = NULL;
|
||||
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
|
||||
if (priv->extend_desc)
|
||||
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
|
||||
|
||||
@@ -323,7 +323,10 @@ struct stmmac_priv {
|
||||
bool extend_desc;
|
||||
/* chain_mode: requested descriptor mode */
|
||||
bool chain_mode;
|
||||
unsigned int mode;
|
||||
/* descriptor_mode: actual descriptor mode,
|
||||
* see STMMAC_CHAIN_MODE or STMMAC_RING_MODE
|
||||
*/
|
||||
u8 descriptor_mode;
|
||||
struct kernel_hwtstamp_config tstamp_config;
|
||||
struct ptp_clock *ptp_clock;
|
||||
struct ptp_clock_info ptp_clock_ops;
|
||||
|
||||
@@ -1589,7 +1589,8 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
|
||||
for (i = 0; i < dma_conf->dma_rx_size; i++) {
|
||||
desc = stmmac_get_rx_desc(priv, rx_q, i);
|
||||
|
||||
stmmac_init_rx_desc(priv, desc, priv->use_riwt, priv->mode,
|
||||
stmmac_init_rx_desc(priv, desc, priv->use_riwt,
|
||||
priv->descriptor_mode,
|
||||
(i == dma_conf->dma_rx_size - 1),
|
||||
dma_conf->dma_buf_sz);
|
||||
}
|
||||
@@ -1616,7 +1617,7 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
|
||||
struct dma_desc *p;
|
||||
|
||||
p = stmmac_get_tx_desc(priv, tx_q, i);
|
||||
stmmac_init_tx_desc(priv, p, priv->mode, last);
|
||||
stmmac_init_tx_desc(priv, p, priv->descriptor_mode, last);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1884,6 +1885,7 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
|
||||
u32 queue, gfp_t flags)
|
||||
{
|
||||
struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
||||
void *des;
|
||||
int ret;
|
||||
|
||||
netif_dbg(priv, probe, priv->dev,
|
||||
@@ -1925,15 +1927,14 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
|
||||
}
|
||||
|
||||
/* Setup the chained descriptor addresses */
|
||||
if (priv->mode == STMMAC_CHAIN_MODE) {
|
||||
if (priv->descriptor_mode == STMMAC_CHAIN_MODE) {
|
||||
if (priv->extend_desc)
|
||||
stmmac_mode_init(priv, rx_q->dma_erx,
|
||||
rx_q->dma_rx_phy,
|
||||
dma_conf->dma_rx_size, 1);
|
||||
des = rx_q->dma_erx;
|
||||
else
|
||||
stmmac_mode_init(priv, rx_q->dma_rx,
|
||||
rx_q->dma_rx_phy,
|
||||
dma_conf->dma_rx_size, 0);
|
||||
des = rx_q->dma_rx;
|
||||
|
||||
stmmac_mode_init(priv, des, rx_q->dma_rx_phy,
|
||||
dma_conf->dma_rx_size, priv->extend_desc);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -2027,7 +2028,7 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
|
||||
(u32)tx_q->dma_tx_phy);
|
||||
|
||||
/* Setup the chained descriptor addresses */
|
||||
if (priv->mode == STMMAC_CHAIN_MODE) {
|
||||
if (priv->descriptor_mode == STMMAC_CHAIN_MODE) {
|
||||
if (priv->extend_desc)
|
||||
stmmac_mode_init(priv, tx_q->dma_etx,
|
||||
tx_q->dma_tx_phy,
|
||||
@@ -2147,6 +2148,8 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
|
||||
u32 queue)
|
||||
{
|
||||
struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
|
||||
size_t size;
|
||||
void *addr;
|
||||
|
||||
/* Release the DMA RX socket buffers */
|
||||
if (rx_q->xsk_pool)
|
||||
@@ -2158,14 +2161,14 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
|
||||
rx_q->xsk_pool = NULL;
|
||||
|
||||
/* Free DMA regions of consistent memory previously allocated */
|
||||
if (!priv->extend_desc)
|
||||
dma_free_coherent(priv->device, dma_conf->dma_rx_size *
|
||||
sizeof(struct dma_desc),
|
||||
rx_q->dma_rx, rx_q->dma_rx_phy);
|
||||
if (priv->extend_desc)
|
||||
addr = rx_q->dma_erx;
|
||||
else
|
||||
dma_free_coherent(priv->device, dma_conf->dma_rx_size *
|
||||
sizeof(struct dma_extended_desc),
|
||||
rx_q->dma_erx, rx_q->dma_rx_phy);
|
||||
addr = rx_q->dma_rx;
|
||||
|
||||
size = stmmac_get_rx_desc_size(priv) * dma_conf->dma_rx_size;
|
||||
|
||||
dma_free_coherent(priv->device, size, addr, rx_q->dma_rx_phy);
|
||||
|
||||
if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
|
||||
xdp_rxq_info_unreg(&rx_q->xdp_rxq);
|
||||
@@ -2250,6 +2253,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
|
||||
struct page_pool_params pp_params = { 0 };
|
||||
unsigned int dma_buf_sz_pad, num_pages;
|
||||
unsigned int napi_id;
|
||||
size_t size;
|
||||
void *addr;
|
||||
int ret;
|
||||
|
||||
dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
|
||||
@@ -2285,24 +2290,17 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
|
||||
if (!rx_q->buf_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
if (priv->extend_desc) {
|
||||
rx_q->dma_erx = dma_alloc_coherent(priv->device,
|
||||
dma_conf->dma_rx_size *
|
||||
sizeof(struct dma_extended_desc),
|
||||
&rx_q->dma_rx_phy,
|
||||
GFP_KERNEL);
|
||||
if (!rx_q->dma_erx)
|
||||
return -ENOMEM;
|
||||
size = stmmac_get_rx_desc_size(priv) * dma_conf->dma_rx_size;
|
||||
|
||||
} else {
|
||||
rx_q->dma_rx = dma_alloc_coherent(priv->device,
|
||||
dma_conf->dma_rx_size *
|
||||
sizeof(struct dma_desc),
|
||||
&rx_q->dma_rx_phy,
|
||||
GFP_KERNEL);
|
||||
if (!rx_q->dma_rx)
|
||||
return -ENOMEM;
|
||||
}
|
||||
addr = dma_alloc_coherent(priv->device, size, &rx_q->dma_rx_phy,
|
||||
GFP_KERNEL);
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (priv->extend_desc)
|
||||
rx_q->dma_erx = addr;
|
||||
else
|
||||
rx_q->dma_rx = addr;
|
||||
|
||||
if (stmmac_xdp_is_enabled(priv) &&
|
||||
test_bit(queue, priv->af_xdp_zc_qps))
|
||||
@@ -2774,7 +2772,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
|
||||
}
|
||||
|
||||
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
|
||||
csum, priv->mode, true, true,
|
||||
csum, priv->descriptor_mode, true, true,
|
||||
xdp_desc.len);
|
||||
|
||||
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
|
||||
@@ -2782,7 +2780,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
|
||||
xsk_tx_metadata_to_compl(meta,
|
||||
&tx_q->tx_skbuff_dma[entry].xsk_meta);
|
||||
|
||||
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
|
||||
tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
|
||||
entry = tx_q->cur_tx;
|
||||
}
|
||||
u64_stats_update_begin(&txq_stats->napi_syncp);
|
||||
@@ -2948,9 +2946,9 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
|
||||
}
|
||||
}
|
||||
|
||||
stmmac_release_tx_desc(priv, p, priv->mode);
|
||||
stmmac_release_tx_desc(priv, p, priv->descriptor_mode);
|
||||
|
||||
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
}
|
||||
tx_q->dirty_tx = entry;
|
||||
|
||||
@@ -4305,7 +4303,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
|
||||
return false;
|
||||
|
||||
stmmac_set_tx_owner(priv, p);
|
||||
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
|
||||
tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -4333,7 +4331,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
|
||||
while (tmp_len > 0) {
|
||||
dma_addr_t curr_addr;
|
||||
|
||||
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
|
||||
tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
|
||||
priv->dma_conf.dma_tx_size);
|
||||
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
|
||||
|
||||
@@ -4475,7 +4473,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
stmmac_set_mss(priv, mss_desc, mss);
|
||||
tx_q->mss = mss;
|
||||
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
|
||||
tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
|
||||
priv->dma_conf.dma_tx_size);
|
||||
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
|
||||
}
|
||||
@@ -4575,7 +4573,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
* ndo_start_xmit will fill this descriptor the next time it's
|
||||
* called and stmmac_tx_clean may clean up to this descriptor.
|
||||
*/
|
||||
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
|
||||
tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
|
||||
|
||||
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
|
||||
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
|
||||
@@ -4686,12 +4684,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
unsigned int first_entry, tx_packets;
|
||||
int gso = skb_shinfo(skb)->gso_type;
|
||||
struct stmmac_txq_stats *txq_stats;
|
||||
struct dma_desc *desc, *first_desc;
|
||||
struct dma_edesc *tbs_desc = NULL;
|
||||
struct dma_desc *desc, *first;
|
||||
struct stmmac_tx_queue *tx_q;
|
||||
int i, csum_insertion = 0;
|
||||
int entry, first_tx;
|
||||
dma_addr_t des;
|
||||
dma_addr_t dma_addr;
|
||||
u32 sdu_len;
|
||||
|
||||
tx_q = &priv->dma_conf.tx_queue[queue];
|
||||
@@ -4758,10 +4756,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
desc = stmmac_get_tx_desc(priv, tx_q, entry);
|
||||
first = desc;
|
||||
first_desc = desc;
|
||||
|
||||
if (has_vlan)
|
||||
stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
|
||||
stmmac_set_desc_vlan(priv, first_desc, STMMAC_VLAN_INSERT);
|
||||
|
||||
enh_desc = priv->plat->enh_desc;
|
||||
/* To program the descriptors according to the size of the frame */
|
||||
@@ -4776,25 +4774,27 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
for (i = 0; i < nfrags; i++) {
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
int len = skb_frag_size(frag);
|
||||
unsigned int frag_size = skb_frag_size(frag);
|
||||
bool last_segment = (i == (nfrags - 1));
|
||||
|
||||
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
WARN_ON(tx_q->tx_skbuff[entry]);
|
||||
|
||||
desc = stmmac_get_tx_desc(priv, tx_q, entry);
|
||||
|
||||
des = skb_frag_dma_map(priv->device, frag, 0, len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(priv->device, des))
|
||||
dma_addr = skb_frag_dma_map(priv->device, frag, 0, frag_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(priv->device, dma_addr))
|
||||
goto dma_map_err; /* should reuse desc w/o issues */
|
||||
|
||||
stmmac_set_tx_skb_dma_entry(tx_q, entry, des, len, true);
|
||||
stmmac_set_desc_addr(priv, desc, des);
|
||||
stmmac_set_tx_skb_dma_entry(tx_q, entry, dma_addr, frag_size,
|
||||
true);
|
||||
stmmac_set_desc_addr(priv, desc, dma_addr);
|
||||
|
||||
/* Prepare the descriptor and set the own bit too */
|
||||
stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
|
||||
priv->mode, 1, last_segment, skb->len);
|
||||
stmmac_prepare_tx_desc(priv, desc, 0, frag_size, csum_insertion,
|
||||
priv->descriptor_mode, 1, last_segment,
|
||||
skb->len);
|
||||
}
|
||||
|
||||
stmmac_set_tx_dma_last_segment(tx_q, entry);
|
||||
@@ -4833,14 +4833,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
* ndo_start_xmit will fill this descriptor the next time it's
|
||||
* called and stmmac_tx_clean may clean up to this descriptor.
|
||||
*/
|
||||
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
tx_q->cur_tx = entry;
|
||||
|
||||
if (netif_msg_pktdata(priv)) {
|
||||
netdev_dbg(priv->dev,
|
||||
"%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
|
||||
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
|
||||
entry, first, nfrags);
|
||||
entry, first_desc, nfrags);
|
||||
|
||||
netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
|
||||
print_pkt(skb->data, skb->len);
|
||||
@@ -4859,7 +4859,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
u64_stats_update_end(&txq_stats->q_syncp);
|
||||
|
||||
if (priv->sarc_type)
|
||||
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
|
||||
stmmac_set_desc_sarc(priv, first_desc, priv->sarc_type);
|
||||
|
||||
/* Ready to fill the first descriptor and set the OWN bit w/o any
|
||||
* problems because all the descriptors are actually ready to be
|
||||
@@ -4868,15 +4868,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (likely(!is_jumbo)) {
|
||||
bool last_segment = (nfrags == 0);
|
||||
|
||||
des = dma_map_single(priv->device, skb->data,
|
||||
nopaged_len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(priv->device, des))
|
||||
dma_addr = dma_map_single(priv->device, skb->data,
|
||||
nopaged_len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(priv->device, dma_addr))
|
||||
goto dma_map_err;
|
||||
|
||||
stmmac_set_tx_skb_dma_entry(tx_q, first_entry, des, nopaged_len,
|
||||
false);
|
||||
stmmac_set_tx_skb_dma_entry(tx_q, first_entry, dma_addr,
|
||||
nopaged_len, false);
|
||||
|
||||
stmmac_set_desc_addr(priv, first, des);
|
||||
stmmac_set_desc_addr(priv, first_desc, dma_addr);
|
||||
|
||||
if (last_segment)
|
||||
stmmac_set_tx_dma_last_segment(tx_q, first_entry);
|
||||
@@ -4885,13 +4885,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
priv->hwts_tx_en)) {
|
||||
/* declare that device is doing timestamping */
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
stmmac_enable_tx_timestamp(priv, first);
|
||||
stmmac_enable_tx_timestamp(priv, first_desc);
|
||||
}
|
||||
|
||||
/* Prepare the first descriptor setting the OWN bit too */
|
||||
stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
|
||||
csum_insertion, priv->mode, 0, last_segment,
|
||||
skb->len);
|
||||
stmmac_prepare_tx_desc(priv, first_desc, 1, nopaged_len,
|
||||
csum_insertion, priv->descriptor_mode,
|
||||
0, last_segment, skb->len);
|
||||
}
|
||||
|
||||
if (tx_q->tbs & STMMAC_TBS_EN) {
|
||||
@@ -4901,7 +4901,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
|
||||
}
|
||||
|
||||
stmmac_set_tx_owner(priv, first);
|
||||
stmmac_set_tx_owner(priv, first_desc);
|
||||
|
||||
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
|
||||
|
||||
@@ -4998,7 +4998,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
|
||||
dma_wmb();
|
||||
stmmac_set_rx_owner(priv, p, use_rx_wd);
|
||||
|
||||
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
|
||||
entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
|
||||
}
|
||||
rx_q->dirty_rx = entry;
|
||||
stmmac_set_queue_rx_tail_ptr(priv, rx_q, queue, rx_q->dirty_rx);
|
||||
@@ -5119,7 +5119,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
|
||||
stmmac_set_desc_addr(priv, tx_desc, dma_addr);
|
||||
|
||||
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
|
||||
csum, priv->mode, true, true,
|
||||
csum, priv->descriptor_mode, true, true,
|
||||
xdpf->len);
|
||||
|
||||
tx_q->tx_count_frames++;
|
||||
@@ -5139,7 +5139,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
|
||||
|
||||
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
|
||||
|
||||
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
|
||||
tx_q->cur_tx = entry;
|
||||
|
||||
return STMMAC_XDP_TX;
|
||||
@@ -5370,7 +5370,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
|
||||
dma_wmb();
|
||||
stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
|
||||
|
||||
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
|
||||
entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
|
||||
}
|
||||
|
||||
if (rx_desc) {
|
||||
@@ -5454,7 +5454,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
break;
|
||||
|
||||
/* Prefetch the next RX descriptor */
|
||||
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
|
||||
rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
|
||||
priv->dma_conf.dma_rx_size);
|
||||
next_entry = rx_q->cur_rx;
|
||||
|
||||
@@ -5638,7 +5638,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
if (unlikely(status & dma_own))
|
||||
break;
|
||||
|
||||
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
|
||||
rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
|
||||
priv->dma_conf.dma_rx_size);
|
||||
next_entry = rx_q->cur_rx;
|
||||
|
||||
@@ -7432,7 +7432,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
|
||||
* is not expected to change this.
|
||||
*/
|
||||
priv->plat->dma_cfg->atds = priv->extend_desc &&
|
||||
priv->mode == STMMAC_RING_MODE;
|
||||
priv->descriptor_mode == STMMAC_RING_MODE;
|
||||
|
||||
/* Rx Watchdog is available in the COREs newer than the 3.40.
|
||||
* In some case, for example on bugged HW this feature
|
||||
|
||||
Reference in New Issue
Block a user