mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-02 12:10:23 -04:00
net: sparx5: add is_sparx5 macro and use it throughout
We dont want to ops out each time a function needs to do some platform
specifics. In particular we have a few places, where it would be
convenient to just branch out on the platform type. Add the function
is_sparx5() and, initially, use it for:
- register writes that should only be done on Sparx5 (QSYS_CAL_CTRL,
CLKGEN_LCPLL1_CORE_CLK).
- function calls that should only be done on Sparx5
(ethtool_op_get_ts_info())
- register writes that are chip-exclusive (MASK_CFG1/2, PGID_CFG1/2,
these are replicated for n_ports >32 on Sparx5).
The is_sparx5() function simply checks the target chip type, to
determine if this is a Sparx5 SKU or not.
Reviewed-by: Steen Hegelund <Steen.Hegelund@microchip.com>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Daniel Machon <daniel.machon@microchip.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
committed by
Paolo Abeni
parent
a0dd890682
commit
4b67bcb909
@@ -194,9 +194,10 @@ int sparx5_config_auto_calendar(struct sparx5 *sparx5)
|
||||
}
|
||||
|
||||
/* Halt the calendar while changing it */
|
||||
spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
|
||||
QSYS_CAL_CTRL_CAL_MODE,
|
||||
sparx5, QSYS_CAL_CTRL);
|
||||
if (is_sparx5(sparx5))
|
||||
spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
|
||||
QSYS_CAL_CTRL_CAL_MODE,
|
||||
sparx5, QSYS_CAL_CTRL);
|
||||
|
||||
/* Assign port bandwidth to auto calendar */
|
||||
for (idx = 0; idx < consts->n_auto_cals; idx++)
|
||||
|
||||
@@ -1189,7 +1189,7 @@ static int sparx5_get_ts_info(struct net_device *dev,
|
||||
struct sparx5 *sparx5 = port->sparx5;
|
||||
struct sparx5_phc *phc;
|
||||
|
||||
if (!sparx5->ptp)
|
||||
if (!sparx5->ptp && is_sparx5(sparx5))
|
||||
return ethtool_op_get_ts_info(dev, info);
|
||||
|
||||
phc = &sparx5->phc[SPARX5_PHC_PORT];
|
||||
|
||||
@@ -208,6 +208,25 @@ static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
|
||||
{ TARGET_VOP, 0x11a00000, 2 }, /* 0x611a00000 */
|
||||
};
|
||||
|
||||
bool is_sparx5(struct sparx5 *sparx5)
|
||||
{
|
||||
switch (sparx5->target_ct) {
|
||||
case SPX5_TARGET_CT_7546:
|
||||
case SPX5_TARGET_CT_7549:
|
||||
case SPX5_TARGET_CT_7552:
|
||||
case SPX5_TARGET_CT_7556:
|
||||
case SPX5_TARGET_CT_7558:
|
||||
case SPX5_TARGET_CT_7546TSN:
|
||||
case SPX5_TARGET_CT_7549TSN:
|
||||
case SPX5_TARGET_CT_7552TSN:
|
||||
case SPX5_TARGET_CT_7556TSN:
|
||||
case SPX5_TARGET_CT_7558TSN:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int sparx5_create_targets(struct sparx5 *sparx5)
|
||||
{
|
||||
const struct sparx5_main_io_resource *iomap = sparx5->data->iomap;
|
||||
@@ -462,44 +481,45 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
switch (freq) {
|
||||
case SPX5_CORE_CLOCK_250MHZ:
|
||||
clk_div = 10;
|
||||
pol_upd_int = 312;
|
||||
break;
|
||||
case SPX5_CORE_CLOCK_500MHZ:
|
||||
clk_div = 5;
|
||||
pol_upd_int = 624;
|
||||
break;
|
||||
case SPX5_CORE_CLOCK_625MHZ:
|
||||
clk_div = 4;
|
||||
pol_upd_int = 780;
|
||||
break;
|
||||
default:
|
||||
dev_err(sparx5->dev, "%d coreclock not supported on (%#04x)\n",
|
||||
sparx5->coreclock, sparx5->target_ct);
|
||||
return -EINVAL;
|
||||
if (is_sparx5(sparx5)) {
|
||||
switch (freq) {
|
||||
case SPX5_CORE_CLOCK_250MHZ:
|
||||
clk_div = 10;
|
||||
pol_upd_int = 312;
|
||||
break;
|
||||
case SPX5_CORE_CLOCK_500MHZ:
|
||||
clk_div = 5;
|
||||
pol_upd_int = 624;
|
||||
break;
|
||||
case SPX5_CORE_CLOCK_625MHZ:
|
||||
clk_div = 4;
|
||||
pol_upd_int = 780;
|
||||
break;
|
||||
default:
|
||||
dev_err(sparx5->dev,
|
||||
"%d coreclock not supported on (%#04x)\n",
|
||||
sparx5->coreclock, sparx5->target_ct);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Configure the LCPLL */
|
||||
spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1),
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA,
|
||||
sparx5, CLKGEN_LCPLL1_CORE_CLK_CFG);
|
||||
}
|
||||
|
||||
/* Update state with chosen frequency */
|
||||
sparx5->coreclock = freq;
|
||||
|
||||
/* Configure the LCPLL */
|
||||
spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1),
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA |
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA,
|
||||
sparx5,
|
||||
CLKGEN_LCPLL1_CORE_CLK_CFG);
|
||||
|
||||
clk_period = sparx5_clk_period(freq);
|
||||
|
||||
spx5_rmw(HSCH_SYS_CLK_PER_100PS_SET(clk_period / 100),
|
||||
|
||||
@@ -376,6 +376,9 @@ struct sparx5 {
|
||||
const struct sparx5_match_data *data;
|
||||
};
|
||||
|
||||
/* sparx5_main.c */
|
||||
bool is_sparx5(struct sparx5 *sparx5);
|
||||
|
||||
/* sparx5_switchdev.c */
|
||||
int sparx5_register_notifier_blocks(struct sparx5 *sparx5);
|
||||
void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5);
|
||||
|
||||
@@ -16,8 +16,10 @@ static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
|
||||
|
||||
/* Output mask to respective registers */
|
||||
spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
|
||||
spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
|
||||
spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
|
||||
if (is_sparx5(sparx5)) {
|
||||
spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
|
||||
spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -141,15 +143,19 @@ void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
|
||||
void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
|
||||
{
|
||||
spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
|
||||
spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
|
||||
spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
|
||||
if (is_sparx5(spx5)) {
|
||||
spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
|
||||
spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
|
||||
}
|
||||
}
|
||||
|
||||
void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
|
||||
{
|
||||
portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
|
||||
portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
|
||||
portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
|
||||
if (is_sparx5(spx5)) {
|
||||
portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
|
||||
portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
|
||||
}
|
||||
}
|
||||
|
||||
void sparx5_update_fwd(struct sparx5 *sparx5)
|
||||
@@ -164,8 +170,10 @@ void sparx5_update_fwd(struct sparx5 *sparx5)
|
||||
/* Update flood masks */
|
||||
for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) {
|
||||
spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
|
||||
spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
|
||||
spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
|
||||
if (is_sparx5(sparx5)) {
|
||||
spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
|
||||
spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
|
||||
}
|
||||
}
|
||||
|
||||
/* Update SRC masks */
|
||||
@@ -176,12 +184,16 @@ void sparx5_update_fwd(struct sparx5 *sparx5)
|
||||
clear_bit(port, workmask);
|
||||
bitmap_to_arr32(mask, workmask, SPX5_PORTS);
|
||||
spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
|
||||
spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
|
||||
spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
|
||||
if (is_sparx5(sparx5)) {
|
||||
spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
|
||||
spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
|
||||
}
|
||||
} else {
|
||||
spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
|
||||
spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
|
||||
spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
|
||||
if (is_sparx5(sparx5)) {
|
||||
spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
|
||||
spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,8 +204,10 @@ void sparx5_update_fwd(struct sparx5 *sparx5)
|
||||
|
||||
/* Apply learning mask */
|
||||
spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
|
||||
spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
|
||||
spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
|
||||
if (is_sparx5(sparx5)) {
|
||||
spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
|
||||
spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
|
||||
}
|
||||
}
|
||||
|
||||
void sparx5_vlan_port_apply(struct sparx5 *sparx5,
|
||||
|
||||
Reference in New Issue
Block a user