net: ethernet: ti am65_cpsw: Drop separate TX completion functions

Drop separate TX completion functions for SKB and XDP. To do that
use the SW_DATA mechanism to store ndev and skb/xdpf for TX packets.

Use BUILD_BUG_ON_MSG() to fail build if SW_DATA size exceeds whats
available. i.e. AM65_CPSW_NAV_SW_DATA_SIZE.

Signed-off-by: Roger Quadros <rogerq@kernel.org>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Roger Quadros
2025-02-17 09:31:50 +02:00
committed by David S. Miller
parent 6d6c7933ce
commit ce643fa62a
2 changed files with 32 additions and 58 deletions

View File

@@ -830,19 +830,19 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_tx_chn *tx_chn = data;
enum am65_cpsw_tx_buf_type buf_type;
struct am65_cpsw_tx_swdata *swdata;
struct cppi5_host_desc_t *desc_tx;
struct xdp_frame *xdpf;
struct sk_buff *skb;
void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
skb = *(swdata);
skb = swdata->skb;
dev_kfree_skb_any(skb);
} else {
xdpf = *(swdata);
xdpf = swdata->xdpf;
xdp_return_frame(xdpf);
}
@@ -1099,10 +1099,10 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct cppi5_host_desc_t *host_desc;
struct am65_cpsw_tx_swdata *swdata;
struct netdev_queue *netif_txq;
dma_addr_t dma_desc, dma_buf;
u32 pkt_len = xdpf->len;
void **swdata;
int ret;
host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
@@ -1132,7 +1132,8 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
swdata = cppi5_hdesc_get_swdata(host_desc);
*(swdata) = xdpf;
swdata->ndev = ndev;
swdata->xdpf = xdpf;
/* Report BQL before sending the packet */
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@ -1435,52 +1436,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
return num_rx;
}
static struct sk_buff *
am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
dma_addr_t desc_dma)
{
struct cppi5_host_desc_t *desc_tx;
struct sk_buff *skb;
void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
return skb;
}
static struct xdp_frame *
am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
struct am65_cpsw_tx_chn *tx_chn,
dma_addr_t desc_dma,
struct net_device **ndev)
{
struct cppi5_host_desc_t *desc_tx;
struct am65_cpsw_port *port;
struct xdp_frame *xdpf;
u32 port_id = 0;
void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
swdata = cppi5_hdesc_get_swdata(desc_tx);
xdpf = *(swdata);
am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
port = am65_common_get_port(common, port_id);
dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len);
*ndev = port->ndev;
return xdpf;
}
static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
struct netdev_queue *netif_txq)
{
@@ -1503,6 +1458,8 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
{
bool single_port = AM65_CPSW_IS_CPSW2G(common);
enum am65_cpsw_tx_buf_type buf_type;
struct am65_cpsw_tx_swdata *swdata;
struct cppi5_host_desc_t *desc_tx;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
@@ -1533,15 +1490,18 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
break;
}
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
ndev = swdata->ndev;
buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
ndev = skb->dev;
skb = swdata->skb;
am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
pkt_len = skb->len;
napi_consume_skb(skb, budget);
} else {
xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
desc_dma, &ndev);
xdpf = swdata->xdpf;
pkt_len = xdpf->len;
if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
xdp_return_frame_rx_napi(xdpf);
@@ -1551,7 +1511,8 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
total_bytes += pkt_len;
num_tx++;
am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
dev_sw_netstats_tx_add(ndev, 1, pkt_len);
if (!single_port) {
/* as packets from multi ports can be interleaved
* on the same channel, we have to figure out the
@@ -1634,12 +1595,12 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpsw_tx_swdata *swdata;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
dma_addr_t desc_dma, buf_dma;
int ret, q_idx, i;
void **swdata;
u32 *psdata;
u32 pkt_len;
@@ -1685,7 +1646,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
*(swdata) = skb;
swdata->ndev = ndev;
swdata->skb = skb;
psdata = cppi5_hdesc_get_psdata(first_desc);
/* HW csum offload if enabled */
@@ -3527,6 +3489,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
__be64 id_temp;
int ret, i;
BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_tx_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
"TX SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
"SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
return -ENOMEM;

View File

@@ -104,6 +104,14 @@ struct am65_cpsw_rx_flow {
char name[32];
};
struct am65_cpsw_tx_swdata {
struct net_device *ndev;
union {
struct sk_buff *skb;
struct xdp_frame *xdpf;
};
};
struct am65_cpsw_swdata {
u32 flow_id;
struct page *page;