net: fec: move xdp_rxq_info* APIs out of fec_enet_create_page_pool()

Extract fec_xdp_rxq_info_reg() from fec_enet_create_page_pool() and move
it out of fec_enet_create_page_pool(), so that it can be reused in the
subsequent patches to support XDP zero copy mode.

Signed-off-by: Wei Fang <wei.fang@nxp.com>
Link: https://patch.msgid.link/20260205085742.2685134-13-wei.fang@nxp.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Wei Fang
2026-02-05 16:57:39 +08:00
committed by Paolo Abeni
parent 6729b24c1c
commit edd393153e

View File

@@ -489,23 +489,7 @@ fec_enet_create_page_pool(struct fec_enet_private *fep,
return err;
}
err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
if (err < 0)
goto err_free_pp;
err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
rxq->page_pool);
if (err)
goto err_unregister_rxq;
return 0;
err_unregister_rxq:
xdp_rxq_info_unreg(&rxq->xdp_rxq);
err_free_pp:
page_pool_destroy(rxq->page_pool);
rxq->page_pool = NULL;
return err;
}
static void fec_txq_trigger_xmit(struct fec_enet_private *fep,
@@ -3420,6 +3404,38 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.self_test = net_selftest,
};
static int fec_xdp_rxq_info_reg(struct fec_enet_private *fep,
struct fec_enet_priv_rx_q *rxq)
{
struct net_device *ndev = fep->netdev;
int err;
err = xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq->id, 0);
if (err) {
netdev_err(ndev, "Failed to register xdp rxq info\n");
return err;
}
err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
rxq->page_pool);
if (err) {
netdev_err(ndev, "Failed to register XDP mem model\n");
xdp_rxq_info_unreg(&rxq->xdp_rxq);
return err;
}
return 0;
}
static void fec_xdp_rxq_info_unreg(struct fec_enet_priv_rx_q *rxq)
{
if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) {
xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq);
xdp_rxq_info_unreg(&rxq->xdp_rxq);
}
}
static void fec_enet_free_buffers(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -3431,6 +3447,9 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
fec_xdp_rxq_info_unreg(rxq);
for (i = 0; i < rxq->bd.ring_size; i++)
page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
false);
@@ -3438,8 +3457,6 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
xdp_rxq_info_unreg(&rxq->xdp_rxq);
page_pool_destroy(rxq->page_pool);
rxq->page_pool = NULL;
}
@@ -3594,6 +3611,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
err = fec_xdp_rxq_info_reg(fep, rxq);
if (err)
goto err_alloc;
return 0;
err_alloc: