bng_en: Add ring memory allocation support

Add ring allocation/free mechanism which help
to allocate rings (TX/RX/Completion) and backing
stores memory on the host for the device.
Future patches will use these functions.

Signed-off-by: Vikas Gupta <vikas.gupta@broadcom.com>
Reviewed-by: Bhargava Chenna Marreddy <bhargava.marreddy@broadcom.com>
Reviewed-by: Rajashekar Hudumula <rajashekar.hudumula@broadcom.com>
Link: https://patch.msgid.link/20250701143511.280702-6-vikas.gupta@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Vikas Gupta
2025-07-01 14:35:03 +00:00
committed by Jakub Kicinski
parent fb7d8b61c1
commit 27544c0ecb
3 changed files with 138 additions and 1 deletions

View File

@@ -5,4 +5,5 @@ obj-$(CONFIG_BNGE) += bng_en.o
bng_en-y := bnge_core.o \
bnge_devlink.o \
bnge_hwrm.o \
bnge_hwrm_lib.o
bnge_hwrm_lib.o \
bnge_rmem.o

View File

@@ -0,0 +1,101 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2025 Broadcom.
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/crash_dump.h>
#include "bnge.h"
#include "../bnxt/bnxt_hsi.h"
#include "bnge_hwrm_lib.h"
#include "bnge_rmem.h"
void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
{
struct pci_dev *pdev = bd->pdev;
int i;
if (!rmem->pg_arr)
goto skip_pages;
for (i = 0; i < rmem->nr_pages; i++) {
if (!rmem->pg_arr[i])
continue;
dma_free_coherent(&pdev->dev, rmem->page_size,
rmem->pg_arr[i], rmem->dma_arr[i]);
rmem->pg_arr[i] = NULL;
}
skip_pages:
if (rmem->pg_tbl) {
size_t pg_tbl_size = rmem->nr_pages * 8;
if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
pg_tbl_size = rmem->page_size;
dma_free_coherent(&pdev->dev, pg_tbl_size,
rmem->pg_tbl, rmem->dma_pg_tbl);
rmem->pg_tbl = NULL;
}
if (rmem->vmem_size && *rmem->vmem) {
vfree(*rmem->vmem);
*rmem->vmem = NULL;
}
}
int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
{
struct pci_dev *pdev = bd->pdev;
u64 valid_bit = 0;
int i;
if (rmem->flags & (BNGE_RMEM_VALID_PTE_FLAG | BNGE_RMEM_RING_PTE_FLAG))
valid_bit = PTU_PTE_VALID;
if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
size_t pg_tbl_size = rmem->nr_pages * 8;
if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
pg_tbl_size = rmem->page_size;
rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
&rmem->dma_pg_tbl,
GFP_KERNEL);
if (!rmem->pg_tbl)
return -ENOMEM;
}
for (i = 0; i < rmem->nr_pages; i++) {
u64 extra_bits = valid_bit;
rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
rmem->page_size,
&rmem->dma_arr[i],
GFP_KERNEL);
if (!rmem->pg_arr[i])
return -ENOMEM;
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
extra_bits |= PTU_PTE_NEXT_TO_LAST;
else if (i == rmem->nr_pages - 1 &&
(rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
extra_bits |= PTU_PTE_LAST;
rmem->pg_tbl[i] =
cpu_to_le64(rmem->dma_arr[i] | extra_bits);
}
}
if (rmem->vmem_size) {
*rmem->vmem = vzalloc(rmem->vmem_size);
if (!(*rmem->vmem))
return -ENOMEM;
}
return 0;
}

View File

@@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2025 Broadcom */
#ifndef _BNGE_RMEM_H_
#define _BNGE_RMEM_H_
#define PTU_PTE_VALID 0x1UL
#define PTU_PTE_LAST 0x2UL
#define PTU_PTE_NEXT_TO_LAST 0x4UL
struct bnge_ring_mem_info {
/* Number of pages to next level */
int nr_pages;
int page_size;
u16 flags;
#define BNGE_RMEM_VALID_PTE_FLAG 1
#define BNGE_RMEM_RING_PTE_FLAG 2
#define BNGE_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
void **pg_arr;
dma_addr_t *dma_arr;
__le64 *pg_tbl;
dma_addr_t dma_pg_tbl;
int vmem_size;
void **vmem;
};
int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
#endif /* _BNGE_RMEM_H_ */