Merge branch 'net-lan966x-fix-page_pool-error-handling-and-error-paths'

David Carlier says:

====================
net: lan966x: fix page_pool error handling and error paths

This series fixes error handling around the lan966x page pool:

    1/3 adds the missing IS_ERR check after page_pool_create(), preventing
        a kernel oops when the error pointer flows into
        xdp_rxq_info_reg_mem_model().

    2/3 plugs page pool leaks in the lan966x_fdma_rx_alloc() and
        lan966x_fdma_init() error paths, now reachable after 1/3.

    3/3 fixes a use-after-free and page pool leak in the
        lan966x_fdma_reload() restore path, where the hardware could
        resume DMA into pages already returned to the page pool.
====================

Link: https://patch.msgid.link/20260405055241.35767-1-devnexen@gmail.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni
2026-04-09 15:17:25 +02:00

View File

@@ -91,6 +91,8 @@ static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
pp_params.dma_dir = DMA_BIDIRECTIONAL;
rx->page_pool = page_pool_create(&pp_params);
if (unlikely(IS_ERR(rx->page_pool)))
return PTR_ERR(rx->page_pool);
for (int i = 0; i < lan966x->num_phys_ports; ++i) {
struct lan966x_port *port;
@@ -117,8 +119,10 @@ static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
return PTR_ERR(rx->page_pool);
err = fdma_alloc_coherent(lan966x->dev, fdma);
if (err)
if (err) {
page_pool_destroy(rx->page_pool);
return err;
}
fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
FDMA_DCB_STATUS_INTR);
@@ -808,9 +812,15 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x)
static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
{
struct page *(*old_pages)[FDMA_RX_DCB_MAX_DBS];
struct page_pool *page_pool;
struct fdma fdma_rx_old;
int err;
int err, i, j;
old_pages = kmemdup(lan966x->rx.page, sizeof(lan966x->rx.page),
GFP_KERNEL);
if (!old_pages)
return -ENOMEM;
/* Store these for later to free them */
memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma));
@@ -821,7 +831,6 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
lan966x_fdma_stop_netdev(lan966x);
lan966x_fdma_rx_disable(&lan966x->rx);
lan966x_fdma_rx_free_pages(&lan966x->rx);
lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
lan966x->rx.max_mtu = new_mtu;
err = lan966x_fdma_rx_alloc(&lan966x->rx);
@@ -829,6 +838,11 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
goto restore;
lan966x_fdma_rx_start(&lan966x->rx);
for (i = 0; i < fdma_rx_old.n_dcbs; ++i)
for (j = 0; j < fdma_rx_old.n_dbs; ++j)
page_pool_put_full_page(page_pool,
old_pages[i][j], false);
fdma_free_coherent(lan966x->dev, &fdma_rx_old);
page_pool_destroy(page_pool);
@@ -836,12 +850,17 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
lan966x_fdma_wakeup_netdev(lan966x);
napi_enable(&lan966x->napi);
return err;
kfree(old_pages);
return 0;
restore:
lan966x->rx.page_pool = page_pool;
memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma));
lan966x_fdma_rx_start(&lan966x->rx);
lan966x_fdma_wakeup_netdev(lan966x);
napi_enable(&lan966x->napi);
kfree(old_pages);
return err;
}
@@ -955,6 +974,7 @@ int lan966x_fdma_init(struct lan966x *lan966x)
err = lan966x_fdma_tx_alloc(&lan966x->tx);
if (err) {
fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
page_pool_destroy(lan966x->rx.page_pool);
return err;
}