Merge branch 'amd-xgbe-add-support-for-amd-renoir'

Raju Rangoju says:

====================
amd-xgbe: add support for AMD Renoir

Add support for a new AMD Ethernet device called "Renoir". It has a new
PCI ID, add this to the current list of supported devices in the
amd-xgbe devices. Also, the BAR1 addresses cannot be used to access the
PCS registers on Renoir platform, use the indirect addressing via SMN
instead.
====================

Link: https://patch.msgid.link/20250509155325.720499-1-Raju.Rangoju@amd.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni
2025-05-13 13:29:43 +02:00
5 changed files with 225 additions and 55 deletions

View File

@@ -791,6 +791,11 @@
#define PCS_V2_RV_WINDOW_SELECT 0x1064
#define PCS_V2_YC_WINDOW_DEF 0x18060
#define PCS_V2_YC_WINDOW_SELECT 0x18064
#define PCS_V3_RN_WINDOW_DEF 0xf8078
#define PCS_V3_RN_WINDOW_SELECT 0xf807c
#define PCS_RN_SMN_BASE_ADDR 0x11e00000
#define PCS_RN_PORT_ADDR_SIZE 0x100000
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6

View File

@@ -11,9 +11,11 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
#include <linux/crc32poly.h>
#include <linux/pci.h>
#include "xgbe.h"
#include "xgbe-common.h"
#include "xgbe-smn.h"
static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
{
@@ -1053,18 +1055,19 @@ static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
return 0;
}
static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg)
static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata,
int mmd_reg)
{
unsigned long flags;
unsigned int mmd_address, index, offset;
int mmd_data;
if (mmd_reg & XGBE_ADDR_C45)
mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
return (mmd_reg & XGBE_ADDR_C45) ?
mmd_reg & ~XGBE_ADDR_C45 :
(pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
}
static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata,
unsigned int mmd_address,
unsigned int *index,
unsigned int *offset)
{
/* The PCS registers are accessed using mmio. The underlying
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
@@ -1075,8 +1078,98 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
* offset 1 bit and reading 16 bits of data.
*/
mmd_address <<= 1;
index = mmd_address & ~pdata->xpcs_window_mask;
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
*index = mmd_address & ~pdata->xpcs_window_mask;
*offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
}
static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg)
{
unsigned int mmd_address, index, offset;
u32 smn_address;
int mmd_data;
int ret;
mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
ret = amd_smn_write(0, smn_address, index);
if (ret)
return ret;
ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data);
if (ret)
return ret;
mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) :
FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
return mmd_data;
}
static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg, int mmd_data)
{
unsigned int pci_mmd_data, hi_mask, lo_mask;
unsigned int mmd_address, index, offset;
struct pci_dev *dev;
u32 smn_address;
int ret;
dev = pdata->pcidev;
mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
ret = amd_smn_write(0, smn_address, index);
if (ret) {
pci_err(dev, "Failed to write data 0x%x\n", index);
return;
}
ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data);
if (ret) {
pci_err(dev, "Failed to read data\n");
return;
}
if (offset % 4) {
hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data);
lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data);
} else {
hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK,
FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data));
lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
}
pci_mmd_data = hi_mask | lo_mask;
ret = amd_smn_write(0, smn_address, index);
if (ret) {
pci_err(dev, "Failed to write data 0x%x\n", index);
return;
}
ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data);
if (ret) {
pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data);
return;
}
}
static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg)
{
unsigned int mmd_address, index, offset;
unsigned long flags;
int mmd_data;
mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1092,23 +1185,9 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned long flags;
unsigned int mmd_address, index, offset;
if (mmd_reg & XGBE_ADDR_C45)
mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
* phases, an address phase and a data phase.
*
* The mmio interface is based on 16-bit offsets and values. All
* register offsets must therefore be adjusted by left shifting the
* offset 1 bit and writing 16 bits of data.
*/
mmd_address <<= 1;
index = mmd_address & ~pdata->xpcs_window_mask;
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1123,10 +1202,7 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
int mmd_data;
if (mmd_reg & XGBE_ADDR_C45)
mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1151,10 +1227,7 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
unsigned long flags;
if (mmd_reg & XGBE_ADDR_C45)
mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1181,6 +1254,9 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
case XGBE_XPCS_ACCESS_V3:
return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg);
}
}
@@ -1191,6 +1267,9 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V1:
return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
case XGBE_XPCS_ACCESS_V3:
return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data);
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);

View File

@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/log2.h>
#include "xgbe-smn.h"
#include "xgbe.h"
#include "xgbe-common.h"
@@ -98,14 +99,14 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct xgbe_prv_data *pdata;
struct device *dev = &pdev->dev;
void __iomem * const *iomap_table;
struct pci_dev *rdev;
unsigned int port_addr_size, reg;
struct device *dev = &pdev->dev;
struct xgbe_prv_data *pdata;
unsigned int ma_lo, ma_hi;
unsigned int reg;
int bar_mask;
int ret;
struct pci_dev *rdev;
int bar_mask, ret;
u32 address;
pdata = xgbe_alloc_pdata(dev);
if (IS_ERR(pdata)) {
@@ -165,20 +166,31 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the PCS indirect addressing definition registers */
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (rdev &&
(rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
} else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) &&
(rdev->device == 0x14b5)) {
pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
if (rdev && rdev->vendor == PCI_VENDOR_ID_AMD) {
switch (rdev->device) {
case XGBE_RV_PCI_DEVICE_ID:
pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
break;
case XGBE_YC_PCI_DEVICE_ID:
pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
/* Yellow Carp devices do not need cdr workaround */
pdata->vdata->an_cdr_workaround = 0;
/* Yellow Carp devices do not need cdr workaround */
pdata->vdata->an_cdr_workaround = 0;
/* Yellow Carp devices do not need rrc */
pdata->vdata->enable_rrc = 0;
/* Yellow Carp devices do not need rrc */
pdata->vdata->enable_rrc = 0;
break;
case XGBE_RN_PCI_DEVICE_ID:
pdata->xpcs_window_def_reg = PCS_V3_RN_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V3_RN_WINDOW_SELECT;
break;
default:
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
break;
}
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -186,7 +198,22 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_dev_put(rdev);
/* Configure the PCS indirect addressing support */
reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
if (pdata->vdata->xpcs_access == XGBE_XPCS_ACCESS_V3) {
reg = XP_IOREAD(pdata, XP_PROP_0);
port_addr_size = PCS_RN_PORT_ADDR_SIZE *
XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
pdata->smn_base = PCS_RN_SMN_BASE_ADDR + port_addr_size;
address = pdata->smn_base + (pdata->xpcs_window_def_reg);
ret = amd_smn_read(0, address, &reg);
if (ret) {
pci_err(pdata->pcidev, "Failed to read data\n");
goto err_pci_enable;
}
} else {
reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
}
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
pdata->xpcs_window <<= 6;
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
@@ -364,6 +391,22 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev)
return ret;
}
static struct xgbe_version_data xgbe_v3 = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V3,
.mmc_64bit = 1,
.tx_max_fifo_size = 65536,
.rx_max_fifo_size = 65536,
.tx_tstamp_workaround = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
.tx_desc_prefetch = 5,
.rx_desc_prefetch = 5,
.an_cdr_workaround = 0,
.enable_rrc = 0,
};
static struct xgbe_version_data xgbe_v2a = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
@@ -401,6 +444,8 @@ static const struct pci_device_id xgbe_pci_table[] = {
.driver_data = (kernel_ulong_t)&xgbe_v2a },
{ PCI_VDEVICE(AMD, 0x1459),
.driver_data = (kernel_ulong_t)&xgbe_v2b },
{ PCI_VDEVICE(AMD, 0x1641),
.driver_data = (kernel_ulong_t)&xgbe_v3 },
/* Last entry must be zero */
{ 0, }
};

View File

@@ -0,0 +1,30 @@
/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */
/*
* Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
* Copyright (c) 2014, Synopsys, Inc.
* All rights reserved
*
* Author: Raju Rangoju <Raju.Rangoju@amd.com>
*/
#ifndef __SMN_H__
#define __SMN_H__
#ifdef CONFIG_AMD_NB
#include <asm/amd_nb.h>
#else
static inline int amd_smn_write(u16 node, u32 address, u32 value)
{
return -ENODEV;
}
static inline int amd_smn_read(u16 node, u32 address, u32 *value)
{
return -ENODEV;
}
#endif
#endif

View File

@@ -238,6 +238,15 @@
(_src)->link_modes._sname, \
__ETHTOOL_LINK_MODE_MASK_NBITS)
/* XGBE PCI device id */
#define XGBE_RV_PCI_DEVICE_ID 0x15d0
#define XGBE_YC_PCI_DEVICE_ID 0x14b5
#define XGBE_RN_PCI_DEVICE_ID 0x1630
/* Generic low and high masks */
#define XGBE_GEN_HI_MASK GENMASK(31, 16)
#define XGBE_GEN_LO_MASK GENMASK(15, 0)
struct xgbe_prv_data;
struct xgbe_packet_data {
@@ -456,6 +465,7 @@ enum xgbe_speed {
enum xgbe_xpcs_access {
XGBE_XPCS_ACCESS_V1 = 0,
XGBE_XPCS_ACCESS_V2,
XGBE_XPCS_ACCESS_V3,
};
enum xgbe_an_mode {
@@ -951,6 +961,7 @@ struct xgbe_prv_data {
struct device *dev;
struct platform_device *phy_platdev;
struct device *phy_dev;
unsigned int smn_base;
/* Version related data */
struct xgbe_version_data *vdata;