Merge branch 'add-more-functionality-to-bnge'

Bhargava Marreddy says:

====================
Add more functionality to BNGE

This patch series adds the infrastructure to make the netdevice
functional. It allocates data structures for core resources,
followed by their initialisation and registration with the firmware.
The core resources include the RX, TX, AGG, CMPL, and NQ rings,
as well as the VNIC. RX/TX functionality will be introduced in the
next patch series to keep this one at a reviewable size.
====================

Link: https://patch.msgid.link/20250919174742.24969-1-bhargava.marreddy@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-09-22 17:51:37 -07:00
12 changed files with 3140 additions and 7 deletions

View File

@@ -257,6 +257,7 @@ config BNGE
tristate "Broadcom Ethernet device support"
depends on PCI
select NET_DEVLINK
select PAGE_POOL
help
This driver supports Broadcom 50/100/200/400/800 gigabit Ethernet cards.
The module will be called bng_en. To compile this driver as a module,

View File

@@ -102,6 +102,10 @@ struct bnge_dev {
u16 chip_num;
u8 chip_rev;
#if BITS_PER_LONG == 32
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
int db_offset; /* db_offset within db_size */
int db_size;
@@ -129,6 +133,7 @@ struct bnge_dev {
unsigned long state;
#define BNGE_STATE_DRV_REGISTERED 0
#define BNGE_STATE_OPEN 1
u64 fw_cap;
@@ -155,6 +160,7 @@ struct bnge_dev {
u16 rss_indir_tbl_entries;
u32 rss_cap;
u32 rss_hash_cfg;
u16 rx_nr_rings;
u16 tx_nr_rings;
@@ -213,6 +219,27 @@ static inline bool bnge_is_agg_reqd(struct bnge_dev *bd)
return true;
}
static inline void bnge_writeq(struct bnge_dev *bd, u64 val,
void __iomem *addr)
{
#if BITS_PER_LONG == 32
spin_lock(&bd->db_lock);
lo_hi_writeq(val, addr);
spin_unlock(&bd->db_lock);
#else
writeq(val, addr);
#endif
}
/* For TX and RX ring doorbells */
static inline void bnge_db_write(struct bnge_dev *bd, struct bnge_db_info *db,
u32 idx)
{
bnge_writeq(bd, db->db_key64 | DB_RING_IDX(db, idx),
db->doorbell);
}
bool bnge_aux_registered(struct bnge_dev *bd);
u16 bnge_aux_get_msix(struct bnge_dev *bd);
#endif /* _BNGE_H_ */

View File

@@ -96,6 +96,16 @@ static void bnge_fw_unregister_dev(struct bnge_dev *bd)
bnge_free_ctx_mem(bd);
}
static void bnge_set_dflt_rss_hash_type(struct bnge_dev *bd)
{
bd->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
}
static int bnge_fw_register_dev(struct bnge_dev *bd)
{
int rc;
@@ -137,6 +147,8 @@ static int bnge_fw_register_dev(struct bnge_dev *bd)
goto err_func_unrgtr;
}
bnge_set_dflt_rss_hash_type(bd);
return 0;
err_func_unrgtr:
@@ -296,6 +308,10 @@ static int bnge_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_config_uninit;
}
#if BITS_PER_LONG == 32
spin_lock_init(&bd->db_lock);
#endif
rc = bnge_alloc_irqs(bd);
if (rc) {
dev_err(&pdev->dev, "Error IRQ allocation rc = %d\n", rc);

View File

@@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2025 Broadcom */
#ifndef _BNGE_DB_H_
#define _BNGE_DB_H_
/* 64-bit doorbell */
#define DBR_EPOCH_SFT 24
#define DBR_TOGGLE_SFT 25
#define DBR_XID_SFT 32
#define DBR_PATH_L2 (0x1ULL << 56)
#define DBR_VALID (0x1ULL << 58)
#define DBR_TYPE_SQ (0x0ULL << 60)
#define DBR_TYPE_SRQ (0x2ULL << 60)
#define DBR_TYPE_CQ (0x4ULL << 60)
#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60)
#define DBR_TYPE_NQ (0xaULL << 60)
#define DBR_TYPE_NQ_ARM (0xbULL << 60)
#define DBR_TYPE_NQ_MASK (0xeULL << 60)
struct bnge_db_info {
void __iomem *doorbell;
u64 db_key64;
u32 db_ring_mask;
u32 db_epoch_mask;
u8 db_epoch_shift;
};
#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
((db)->db_epoch_shift))
#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
DB_EPOCH(db, idx))
#endif /* _BNGE_DB_H_ */

View File

@@ -6,6 +6,8 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/bnxt/hsi.h>
#include <linux/if_vlan.h>
#include <net/netdev_queues.h>
#include "bnge.h"
#include "bnge_hwrm.h"
@@ -701,3 +703,483 @@ int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd)
bnge_hwrm_req_drop(bd, req);
return rc;
}
int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic)
{
u16 hds_thresh = (u16)bn->netdev->cfg_pending->hds_thresh;
struct hwrm_vnic_plcmodes_cfg_input *req;
struct bnge_dev *bd = bn->bd;
int rc;
rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_PLCMODES_CFG);
if (rc)
return rc;
req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
req->enables = cpu_to_le32(BNGE_PLC_EN_JUMBO_THRES_VALID);
req->jumbo_thresh = cpu_to_le16(bn->rx_buf_use_size);
if (bnge_is_agg_reqd(bd)) {
req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
req->enables |=
cpu_to_le32(BNGE_PLC_EN_HDS_THRES_VALID);
req->hds_threshold = cpu_to_le16(hds_thresh);
}
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return bnge_hwrm_req_send(bd, req);
}
int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
struct bnge_vnic_info *vnic, u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
int rc;
rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
if (rc)
return rc;
resp = bnge_hwrm_req_hold(bd, req);
rc = bnge_hwrm_req_send(bd, req);
if (!rc)
vnic->fw_rss_cos_lb_ctx[ctx_idx] =
le16_to_cpu(resp->rss_cos_lb_ctx_id);
bnge_hwrm_req_drop(bd, req);
return rc;
}
static void
__bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
struct hwrm_vnic_rss_cfg_input *req,
struct bnge_vnic_info *vnic)
{
struct bnge_dev *bd = bn->bd;
bnge_fill_hw_rss_tbl(bn, vnic);
req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
req->hash_type = cpu_to_le32(bd->rss_hash_cfg);
req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
struct bnge_vnic_info *vnic, bool set_rss)
{
struct hwrm_vnic_rss_cfg_input *req;
struct bnge_dev *bd = bn->bd;
dma_addr_t ring_tbl_map;
u32 i, nr_ctxs;
int rc;
rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_CFG);
if (rc)
return rc;
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
if (!set_rss)
return bnge_hwrm_req_send(bd, req);
__bnge_hwrm_vnic_set_rss(bn, req, vnic);
ring_tbl_map = vnic->rss_table_dma_addr;
nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
bnge_hwrm_req_hold(bd, req);
for (i = 0; i < nr_ctxs; ring_tbl_map += BNGE_RSS_TABLE_SIZE, i++) {
req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
req->ring_table_pair_index = i;
req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
rc = bnge_hwrm_req_send(bd, req);
if (rc)
goto exit;
}
exit:
bnge_hwrm_req_drop(bd, req);
return rc;
}
int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic)
{
struct bnge_rx_ring_info *rxr = &bn->rx_ring[0];
struct hwrm_vnic_cfg_input *req;
struct bnge_dev *bd = bn->bd;
int rc;
rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_CFG);
if (rc)
return rc;
req->default_rx_ring_id =
cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
req->default_cmpl_ring_id =
cpu_to_le16(bnge_cp_ring_for_rx(rxr));
req->enables =
cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
vnic->mru = bd->netdev->mtu + ETH_HLEN + VLAN_HLEN;
req->mru = cpu_to_le16(vnic->mru);
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
if (bd->flags & BNGE_EN_STRIP_VLAN)
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
if (vnic->vnic_id == BNGE_VNIC_DEFAULT && bnge_aux_registered(bd))
req->flags |= cpu_to_le32(BNGE_VNIC_CFG_ROCE_DUAL_MODE);
return bnge_hwrm_req_send(bd, req);
}
void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn)
{
struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
struct hwrm_vnic_rss_qcfg_output *resp;
struct hwrm_vnic_rss_qcfg_input *req;
struct bnge_dev *bd = bn->bd;
if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_QCFG))
return;
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
/* all contexts configured to same hash_type, zero always exists */
req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
resp = bnge_hwrm_req_hold(bd, req);
if (!bnge_hwrm_req_send(bd, req))
bd->rss_hash_cfg =
le32_to_cpu(resp->hash_type) ?: bd->rss_hash_cfg;
bnge_hwrm_req_drop(bd, req);
}
int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
{
struct hwrm_cfa_l2_filter_free_input *req;
int rc;
rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_FREE);
if (rc)
return rc;
req->l2_filter_id = fltr->base.filter_id;
return bnge_hwrm_req_send(bd, req);
}
int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
{
struct hwrm_cfa_l2_filter_alloc_output *resp;
struct hwrm_cfa_l2_filter_alloc_input *req;
int rc;
rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_ALLOC);
if (rc)
return rc;
req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
req->enables =
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
eth_broadcast_addr(req->l2_addr_mask);
if (fltr->l2_key.vlan) {
req->enables |=
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
req->num_vlans = 1;
req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
req->l2_ivlan_mask = cpu_to_le16(0xfff);
}
resp = bnge_hwrm_req_hold(bd, req);
rc = bnge_hwrm_req_send(bd, req);
if (!rc)
fltr->base.filter_id = resp->l2_filter_id;
bnge_hwrm_req_drop(bd, req);
return rc;
}
int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
struct bnge_vnic_info *vnic)
{
struct hwrm_cfa_l2_set_rx_mask_input *req;
int rc;
rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_SET_RX_MASK);
if (rc)
return rc;
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
}
req->mask = cpu_to_le32(vnic->rx_mask);
return bnge_hwrm_req_send_silent(bd, req);
}
int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
unsigned int nr_rings)
{
struct hwrm_vnic_alloc_output *resp;
struct hwrm_vnic_alloc_input *req;
unsigned int i;
int rc;
rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_ALLOC);
if (rc)
return rc;
for (i = 0; i < BNGE_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
if (vnic->vnic_id == BNGE_VNIC_DEFAULT)
req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = bnge_hwrm_req_hold(bd, req);
rc = bnge_hwrm_req_send(bd, req);
if (!rc)
vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
bnge_hwrm_req_drop(bd, req);
return rc;
}
void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic)
{
if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
struct hwrm_vnic_free_input *req;
if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_FREE))
return;
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
bnge_hwrm_req_send(bd, req);
vnic->fw_vnic_id = INVALID_HW_RING_ID;
}
}
void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
struct bnge_vnic_info *vnic, u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
return;
req->rss_cos_lb_ctx_id =
cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
bnge_hwrm_req_send(bd, req);
vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
}
void bnge_hwrm_stat_ctx_free(struct bnge_net *bn)
{
struct hwrm_stat_ctx_free_input *req;
struct bnge_dev *bd = bn->bd;
int i;
if (bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_FREE))
return;
bnge_hwrm_req_hold(bd, req);
for (i = 0; i < bd->nq_nr_rings; i++) {
struct bnge_napi *bnapi = bn->bnapi[i];
struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
if (nqr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
req->stat_ctx_id = cpu_to_le32(nqr->hw_stats_ctx_id);
bnge_hwrm_req_send(bd, req);
nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
}
bnge_hwrm_req_drop(bd, req);
}
int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn)
{
struct hwrm_stat_ctx_alloc_output *resp;
struct hwrm_stat_ctx_alloc_input *req;
struct bnge_dev *bd = bn->bd;
int rc, i;
rc = bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_ALLOC);
if (rc)
return rc;
req->stats_dma_length = cpu_to_le16(bd->hw_ring_stats_size);
req->update_period_ms = cpu_to_le32(bn->stats_coal_ticks / 1000);
resp = bnge_hwrm_req_hold(bd, req);
for (i = 0; i < bd->nq_nr_rings; i++) {
struct bnge_napi *bnapi = bn->bnapi[i];
struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
req->stats_dma_addr = cpu_to_le64(nqr->stats.hw_stats_map);
rc = bnge_hwrm_req_send(bd, req);
if (rc)
break;
nqr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
bn->grp_info[i].fw_stats_ctx = nqr->hw_stats_ctx_id;
}
bnge_hwrm_req_drop(bd, req);
return rc;
}
int hwrm_ring_free_send_msg(struct bnge_net *bn,
struct bnge_ring_struct *ring,
u32 ring_type, int cmpl_ring_id)
{
struct hwrm_ring_free_input *req;
struct bnge_dev *bd = bn->bd;
int rc;
rc = bnge_hwrm_req_init(bd, req, HWRM_RING_FREE);
if (rc)
goto exit;
req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
req->ring_type = ring_type;
req->ring_id = cpu_to_le16(ring->fw_ring_id);
bnge_hwrm_req_hold(bd, req);
rc = bnge_hwrm_req_send(bd, req);
bnge_hwrm_req_drop(bd, req);
exit:
if (rc) {
netdev_err(bd->netdev, "hwrm_ring_free type %d failed. rc:%d\n", ring_type, rc);
return -EIO;
}
return 0;
}
int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
struct bnge_ring_struct *ring,
u32 ring_type, u32 map_index)
{
struct bnge_ring_mem_info *rmem = &ring->ring_mem;
struct bnge_ring_grp_info *grp_info;
struct hwrm_ring_alloc_output *resp;
struct hwrm_ring_alloc_input *req;
struct bnge_dev *bd = bn->bd;
u16 ring_id, flags = 0;
int rc;
rc = bnge_hwrm_req_init(bd, req, HWRM_RING_ALLOC);
if (rc)
goto exit;
req->enables = 0;
if (rmem->nr_pages > 1) {
req->page_tbl_addr = cpu_to_le64(rmem->dma_pg_tbl);
/* Page size is in log2 units */
req->page_size = BNGE_PAGE_SHIFT;
req->page_tbl_depth = 1;
} else {
req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
}
req->fbo = 0;
/* Association of ring index with doorbell index and MSIX number */
req->logical_id = cpu_to_le16(map_index);
switch (ring_type) {
case HWRM_RING_ALLOC_TX: {
struct bnge_tx_ring_info *txr;
txr = container_of(ring, struct bnge_tx_ring_info,
tx_ring_struct);
req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
/* Association of transmit ring with completion ring */
grp_info = &bn->grp_info[ring->grp_idx];
req->cmpl_ring_id = cpu_to_le16(bnge_cp_ring_for_tx(txr));
req->length = cpu_to_le32(bn->tx_ring_mask + 1);
req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req->queue_id = cpu_to_le16(ring->queue_id);
req->flags = cpu_to_le16(flags);
break;
}
case HWRM_RING_ALLOC_RX:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
req->length = cpu_to_le32(bn->rx_ring_mask + 1);
/* Association of rx ring with stats context */
grp_info = &bn->grp_info[ring->grp_idx];
req->rx_buf_size = cpu_to_le16(bn->rx_buf_use_size);
req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req->enables |=
cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
if (NET_IP_ALIGN == 2)
flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
req->flags = cpu_to_le16(flags);
break;
case HWRM_RING_ALLOC_AGG:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
/* Association of agg ring with rx ring */
grp_info = &bn->grp_info[ring->grp_idx];
req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
req->rx_buf_size = cpu_to_le16(BNGE_RX_PAGE_SIZE);
req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req->enables |=
cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
req->length = cpu_to_le32(bn->rx_agg_ring_mask + 1);
break;
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req->length = cpu_to_le32(bn->cp_ring_mask + 1);
/* Association of cp ring with nq */
grp_info = &bn->grp_info[map_index];
req->nq_ring_id = cpu_to_le16(grp_info->nq_fw_ring_id);
req->cq_handle = cpu_to_le64(ring->handle);
req->enables |=
cpu_to_le32(RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
break;
case HWRM_RING_ALLOC_NQ:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
req->length = cpu_to_le32(bn->cp_ring_mask + 1);
req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
break;
default:
netdev_err(bn->netdev, "hwrm alloc invalid ring type %d\n", ring_type);
return -EINVAL;
}
resp = bnge_hwrm_req_hold(bd, req);
rc = bnge_hwrm_req_send(bd, req);
ring_id = le16_to_cpu(resp->ring_id);
bnge_hwrm_req_drop(bd, req);
exit:
if (rc) {
netdev_err(bd->netdev, "hwrm_ring_alloc type %d failed. rc:%d\n", ring_type, rc);
return -EIO;
}
ring->fw_ring_id = ring_id;
return rc;
}
int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx)
{
struct hwrm_func_cfg_input *req;
int rc;
rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_CFG);
if (rc)
return rc;
req->fid = cpu_to_le16(0xffff);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
req->async_event_cr = cpu_to_le16(idx);
return bnge_hwrm_req_send(bd, req);
}

View File

@@ -4,6 +4,13 @@
#ifndef _BNGE_HWRM_LIB_H_
#define _BNGE_HWRM_LIB_H_
#define BNGE_PLC_EN_JUMBO_THRES_VALID \
VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID
#define BNGE_PLC_EN_HDS_THRES_VALID \
VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID
#define BNGE_VNIC_CFG_ROCE_DUAL_MODE \
VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE
int bnge_hwrm_ver_get(struct bnge_dev *bd);
int bnge_hwrm_func_reset(struct bnge_dev *bd);
int bnge_hwrm_fw_set_time(struct bnge_dev *bd);
@@ -24,4 +31,28 @@ int bnge_hwrm_func_qcfg(struct bnge_dev *bd);
int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd);
int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd);
int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic);
int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
struct bnge_vnic_info *vnic, u16 ctx_idx);
int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
struct bnge_vnic_info *vnic, bool set_rss);
int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic);
void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn);
int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
unsigned int nr_rings);
void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic);
void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
struct bnge_vnic_info *vnic, u16 ctx_idx);
int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
struct bnge_vnic_info *vnic);
void bnge_hwrm_stat_ctx_free(struct bnge_net *bn);
int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn);
int hwrm_ring_free_send_msg(struct bnge_net *bn, struct bnge_ring_struct *ring,
u32 ring_type, int cmpl_ring_id);
int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
struct bnge_ring_struct *ring,
u32 ring_type, u32 map_index);
int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx);
#endif /* _BNGE_HWRM_LIB_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -5,6 +5,9 @@
#define _BNGE_NETDEV_H_
#include <linux/bnxt/hsi.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/refcount.h>
#include "bnge_db.h"
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -113,11 +116,25 @@ struct bnge_sw_rx_bd {
};
struct bnge_sw_rx_agg_bd {
struct page *page;
netmem_ref netmem;
unsigned int offset;
dma_addr_t mapping;
};
#define HWRM_RING_ALLOC_TX 0x1
#define HWRM_RING_ALLOC_RX 0x2
#define HWRM_RING_ALLOC_AGG 0x4
#define HWRM_RING_ALLOC_CMPL 0x8
#define HWRM_RING_ALLOC_NQ 0x10
struct bnge_ring_grp_info {
u16 fw_stats_ctx;
u16 fw_grp_id;
u16 rx_fw_ring_id;
u16 agg_fw_ring_id;
u16 nq_fw_ring_id;
};
#define BNGE_RX_COPY_THRESH 256
#define BNGE_HW_FEATURE_VLAN_ALL_RX \
@@ -133,6 +150,32 @@ enum {
#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
* BD because the first TX BD is always a long BD.
*/
#define BNGE_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
#define RX_RING(bn, x) (((x) & (bn)->rx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
#define RX_AGG_RING(bn, x) (((x) & (bn)->rx_agg_ring_mask) >> \
(BNGE_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
#define TX_RING(bn, x) (((x) & (bn)->tx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNGE_PAGE_SHIFT - 4))
#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
#define RING_RX(bn, idx) ((idx) & (bn)->rx_ring_mask)
#define NEXT_RX(idx) ((idx) + 1)
#define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask)
#define NEXT_RX_AGG(idx) ((idx) + 1)
#define BNGE_NQ_HDL_TYPE_SHIFT 24
#define BNGE_NQ_HDL_TYPE_RX 0x00
#define BNGE_NQ_HDL_TYPE_TX 0x01
struct bnge_net {
struct bnge_dev *bd;
struct net_device *netdev;
@@ -164,6 +207,30 @@ struct bnge_net {
struct hlist_head l2_fltr_hash_tbl[BNGE_L2_FLTR_HASH_SIZE];
u32 hash_seed;
u64 toeplitz_prefix;
struct bnge_napi **bnapi;
struct bnge_rx_ring_info *rx_ring;
struct bnge_tx_ring_info *tx_ring;
u16 *tx_ring_map;
enum dma_data_direction rx_dir;
/* grp_info indexed by napi/nq index */
struct bnge_ring_grp_info *grp_info;
struct bnge_vnic_info *vnic_info;
int nr_vnics;
int total_irqs;
u32 tx_wake_thresh;
u16 rx_offset;
u16 rx_dma_offset;
u8 rss_hash_key[HW_HASH_KEY_SIZE];
u8 rss_hash_key_valid:1;
u8 rss_hash_key_updated:1;
int rsscos_nr_ctxs;
u32 stats_coal_ticks;
};
#define BNGE_DEFAULT_RX_RING_SIZE 511
@@ -203,4 +270,185 @@ void bnge_set_ring_params(struct bnge_dev *bd);
#define BNGE_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
#define BNGE_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
#define BNGE_MAX_TXR_PER_NAPI 8
#define bnge_for_each_napi_tx(iter, bnapi, txr) \
for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
txr = (iter < BNGE_MAX_TXR_PER_NAPI - 1) ? \
(bnapi)->tx_ring[++iter] : NULL)
#define BNGE_SET_NQ_HDL(cpr) \
(((cpr)->cp_ring_type << BNGE_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
struct bnge_stats_mem {
u64 *sw_stats;
u64 *hw_masks;
void *hw_stats;
dma_addr_t hw_stats_map;
int len;
};
struct bnge_cp_ring_info {
struct bnge_napi *bnapi;
dma_addr_t *desc_mapping;
struct tx_cmp **desc_ring;
struct bnge_ring_struct ring_struct;
u8 cp_ring_type;
u8 cp_idx;
u32 cp_raw_cons;
struct bnge_db_info cp_db;
};
struct bnge_nq_ring_info {
struct bnge_napi *bnapi;
dma_addr_t *desc_mapping;
struct nqe_cn **desc_ring;
struct bnge_ring_struct ring_struct;
u32 nq_raw_cons;
struct bnge_db_info nq_db;
struct bnge_stats_mem stats;
u32 hw_stats_ctx_id;
int cp_ring_count;
struct bnge_cp_ring_info *cp_ring_arr;
};
struct bnge_rx_ring_info {
struct bnge_napi *bnapi;
struct bnge_cp_ring_info *rx_cpr;
u16 rx_prod;
u16 rx_agg_prod;
u16 rx_sw_agg_prod;
u16 rx_next_cons;
struct bnge_db_info rx_db;
struct bnge_db_info rx_agg_db;
struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
struct bnge_sw_rx_bd *rx_buf_ring;
struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
struct bnge_sw_rx_agg_bd *rx_agg_buf_ring;
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
struct bnge_ring_struct rx_ring_struct;
struct bnge_ring_struct rx_agg_ring_struct;
struct page_pool *page_pool;
struct page_pool *head_pool;
bool need_head_pool;
};
struct bnge_tx_ring_info {
struct bnge_napi *bnapi;
struct bnge_cp_ring_info *tx_cpr;
u16 tx_prod;
u16 tx_cons;
u16 tx_hw_cons;
u16 txq_index;
u8 tx_napi_idx;
u8 kick_pending;
struct bnge_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
struct bnge_sw_tx_bd *tx_buf_ring;
dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
u32 dev_state;
#define BNGE_DEV_STATE_CLOSING 0x1
struct bnge_ring_struct tx_ring_struct;
};
struct bnge_napi {
struct napi_struct napi;
struct bnge_net *bn;
int index;
struct bnge_nq_ring_info nq_ring;
struct bnge_rx_ring_info *rx_ring;
struct bnge_tx_ring_info *tx_ring[BNGE_MAX_TXR_PER_NAPI];
};
#define INVALID_STATS_CTX_ID -1
#define BNGE_VNIC_DEFAULT 0
#define BNGE_MAX_UC_ADDRS 4
struct bnge_vnic_info {
u16 fw_vnic_id;
#define BNGE_MAX_CTX_PER_VNIC 8
u16 fw_rss_cos_lb_ctx[BNGE_MAX_CTX_PER_VNIC];
u16 mru;
/* index 0 always dev_addr */
struct bnge_l2_filter *l2_filters[BNGE_MAX_UC_ADDRS];
u16 uc_filter_count;
u8 *uc_list;
dma_addr_t rss_table_dma_addr;
__le16 *rss_table;
dma_addr_t rss_hash_key_dma_addr;
u64 *rss_hash_key;
int rss_table_size;
#define BNGE_RSS_TABLE_ENTRIES 64
#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4)
#define BNGE_RSS_TABLE_MAX_TBL 8
#define BNGE_MAX_RSS_TABLE_SIZE \
(BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
u32 rx_mask;
u8 *mc_list;
int mc_list_size;
int mc_list_count;
dma_addr_t mc_list_mapping;
#define BNGE_MAX_MC_ADDRS 16
u32 flags;
#define BNGE_VNIC_RSS_FLAG 1
#define BNGE_VNIC_MCAST_FLAG 4
#define BNGE_VNIC_UCAST_FLAG 8
u32 vnic_id;
};
struct bnge_filter_base {
struct hlist_node hash;
struct list_head list;
__le64 filter_id;
u8 type;
#define BNGE_FLTR_TYPE_L2 2
u8 flags;
u16 rxq;
u16 fw_vnic_id;
u16 vf_idx;
unsigned long state;
#define BNGE_FLTR_VALID 0
#define BNGE_FLTR_FW_DELETED 2
struct rcu_head rcu;
};
struct bnge_l2_key {
union {
struct {
u8 dst_mac_addr[ETH_ALEN];
u16 vlan;
};
u32 filter_key;
};
};
#define BNGE_L2_KEY_SIZE (sizeof(struct bnge_l2_key) / 4)
struct bnge_l2_filter {
/* base filter must be the first member */
struct bnge_filter_base base;
struct bnge_l2_key l2_key;
refcount_t refcnt;
};
u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr);
u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr);
void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic);
#endif /* _BNGE_NETDEV_H_ */

View File

@@ -46,7 +46,7 @@ static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
return min_t(int, roce_msix, num_online_cpus() + 1);
}
static u16 bnge_aux_get_msix(struct bnge_dev *bd)
u16 bnge_aux_get_msix(struct bnge_dev *bd)
{
if (bnge_is_roce_en(bd))
return bd->aux_num_msix;
@@ -164,7 +164,7 @@ static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx,
return bnge_fix_rings_count(rx, tx, max_nq, sh);
}
static int bnge_cal_nr_rss_ctxs(u16 rx_rings)
int bnge_cal_nr_rss_ctxs(u16 rx_rings)
{
if (!rx_rings)
return 0;
@@ -184,7 +184,7 @@ static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings)
return 1;
}
static u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
{
return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) *
BNGE_RSS_TABLE_ENTRIES;

View File

@@ -72,6 +72,8 @@ void bnge_free_irqs(struct bnge_dev *bd);
int bnge_net_init_dflt_config(struct bnge_dev *bd);
void bnge_net_uninit_dflt_config(struct bnge_dev *bd);
void bnge_aux_init_dflt_config(struct bnge_dev *bd);
u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd);
int bnge_cal_nr_rss_ctxs(u16 rx_rings);
static inline u32
bnge_adjust_pow_two(u32 total_ent, u16 ent_per_blk)

View File

@@ -95,7 +95,7 @@ int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
&rmem->dma_arr[i],
GFP_KERNEL);
if (!rmem->pg_arr[i])
return -ENOMEM;
goto err_free_ring;
if (rmem->ctx_mem)
bnge_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
@@ -116,10 +116,13 @@ int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
if (rmem->vmem_size) {
*rmem->vmem = vzalloc(rmem->vmem_size);
if (!(*rmem->vmem))
return -ENOMEM;
goto err_free_ring;
}
return 0;
err_free_ring:
bnge_free_ring(bd, rmem);
return -ENOMEM;
}
static int bnge_alloc_ctx_one_lvl(struct bnge_dev *bd,
@@ -436,3 +439,61 @@ int bnge_alloc_ctx_mem(struct bnge_dev *bd)
return 0;
}
void bnge_init_ring_struct(struct bnge_net *bn)
{
struct bnge_dev *bd = bn->bd;
int i, j;
for (i = 0; i < bd->nq_nr_rings; i++) {
struct bnge_napi *bnapi = bn->bnapi[i];
struct bnge_ring_mem_info *rmem;
struct bnge_nq_ring_info *nqr;
struct bnge_rx_ring_info *rxr;
struct bnge_tx_ring_info *txr;
struct bnge_ring_struct *ring;
nqr = &bnapi->nq_ring;
ring = &nqr->ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bn->cp_nr_pages;
rmem->page_size = HW_CMPD_RING_SIZE;
rmem->pg_arr = (void **)nqr->desc_ring;
rmem->dma_arr = nqr->desc_mapping;
rmem->vmem_size = 0;
rxr = bnapi->rx_ring;
if (!rxr)
goto skip_rx;
ring = &rxr->rx_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bn->rx_nr_pages;
rmem->page_size = HW_RXBD_RING_SIZE;
rmem->pg_arr = (void **)rxr->rx_desc_ring;
rmem->dma_arr = rxr->rx_desc_mapping;
rmem->vmem_size = SW_RXBD_RING_SIZE * bn->rx_nr_pages;
rmem->vmem = (void **)&rxr->rx_buf_ring;
ring = &rxr->rx_agg_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bn->rx_agg_nr_pages;
rmem->page_size = HW_RXBD_RING_SIZE;
rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
rmem->dma_arr = rxr->rx_agg_desc_mapping;
rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bn->rx_agg_nr_pages;
rmem->vmem = (void **)&rxr->rx_agg_buf_ring;
skip_rx:
bnge_for_each_napi_tx(j, bnapi, txr) {
ring = &txr->tx_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bn->tx_nr_pages;
rmem->page_size = HW_TXBD_RING_SIZE;
rmem->pg_arr = (void **)txr->tx_desc_ring;
rmem->dma_arr = txr->tx_desc_mapping;
rmem->vmem_size = SW_TXBD_RING_SIZE * bn->tx_nr_pages;
rmem->vmem = (void **)&txr->tx_buf_ring;
}
}
}

View File

@@ -6,6 +6,7 @@
struct bnge_ctx_mem_type;
struct bnge_dev;
struct bnge_net;
#define PTU_PTE_VALID 0x1UL
#define PTU_PTE_LAST 0x2UL
@@ -180,9 +181,22 @@ struct bnge_ctx_mem_info {
struct bnge_ctx_mem_type ctx_arr[BNGE_CTX_V2_MAX];
};
struct bnge_ring_struct {
struct bnge_ring_mem_info ring_mem;
u16 fw_ring_id;
union {
u16 grp_idx;
u16 map_idx; /* Used by NQs */
};
u32 handle;
u8 queue_id;
};
int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
int bnge_alloc_ctx_mem(struct bnge_dev *bd);
void bnge_free_ctx_mem(struct bnge_dev *bd);
void bnge_init_ring_struct(struct bnge_net *bn);
#endif /* _BNGE_RMEM_H_ */