mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-10 15:13:44 -04:00
mlxsw: core, pci: Add plumbing related to LAG mode
lag_mode describes where the responsibility for LAG table placement lies: SW or FW. The bus module determines whether LAG is supported, can configure it if it is, and knows what (if any) configuration has been applied. Therefore add a bus callback to determine the configured LAG mode. Also add to core an API to query it. The LAG mode is for now kept at the default value of 0 for FW-managed. The code to actually toggle it will be added later. Signed-off-by: Petr Machata <petrm@nvidia.com> Reviewed-by: Ido Schimmel <idosch@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
8eabd10cdc
commit
b2e9b1fe8c
@@ -204,6 +204,13 @@ int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag)
|
||||
}
|
||||
EXPORT_SYMBOL(mlxsw_core_max_lag);
|
||||
|
||||
enum mlxsw_cmd_mbox_config_profile_lag_mode
|
||||
mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core)
|
||||
{
|
||||
return mlxsw_core->bus->lag_mode(mlxsw_core->bus_priv);
|
||||
}
|
||||
EXPORT_SYMBOL(mlxsw_core_lag_mode);
|
||||
|
||||
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
|
||||
{
|
||||
return mlxsw_core->driver_priv;
|
||||
|
||||
@@ -36,6 +36,8 @@ struct mlxsw_fw_rev;
|
||||
unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
|
||||
|
||||
int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag);
|
||||
enum mlxsw_cmd_mbox_config_profile_lag_mode
|
||||
mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core);
|
||||
|
||||
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
|
||||
|
||||
@@ -485,6 +487,7 @@ struct mlxsw_bus {
|
||||
u32 (*read_frc_l)(void *bus_priv);
|
||||
u32 (*read_utc_sec)(void *bus_priv);
|
||||
u32 (*read_utc_nsec)(void *bus_priv);
|
||||
enum mlxsw_cmd_mbox_config_profile_lag_mode (*lag_mode)(void *bus_priv);
|
||||
u8 features;
|
||||
};
|
||||
|
||||
|
||||
@@ -105,6 +105,8 @@ struct mlxsw_pci {
|
||||
u64 free_running_clock_offset;
|
||||
u64 utc_sec_offset;
|
||||
u64 utc_nsec_offset;
|
||||
bool lag_mode_support;
|
||||
enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
|
||||
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
|
||||
u32 doorbell_offset;
|
||||
struct mlxsw_core *core;
|
||||
@@ -1312,6 +1314,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
||||
profile->cqe_time_stamp_type);
|
||||
}
|
||||
|
||||
mlxsw_pci->lag_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW;
|
||||
return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
|
||||
}
|
||||
|
||||
@@ -1587,6 +1590,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
|
||||
mlxsw_pci->utc_nsec_offset =
|
||||
mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
|
||||
|
||||
mlxsw_pci->lag_mode_support =
|
||||
mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox);
|
||||
num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
|
||||
err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
|
||||
if (err)
|
||||
@@ -1896,6 +1901,14 @@ static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
|
||||
return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
|
||||
}
|
||||
|
||||
static enum mlxsw_cmd_mbox_config_profile_lag_mode
|
||||
mlxsw_pci_lag_mode(void *bus_priv)
|
||||
{
|
||||
struct mlxsw_pci *mlxsw_pci = bus_priv;
|
||||
|
||||
return mlxsw_pci->lag_mode;
|
||||
}
|
||||
|
||||
static const struct mlxsw_bus mlxsw_pci_bus = {
|
||||
.kind = "pci",
|
||||
.init = mlxsw_pci_init,
|
||||
@@ -1907,6 +1920,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
|
||||
.read_frc_l = mlxsw_pci_read_frc_l,
|
||||
.read_utc_sec = mlxsw_pci_read_utc_sec,
|
||||
.read_utc_nsec = mlxsw_pci_read_utc_nsec,
|
||||
.lag_mode = mlxsw_pci_lag_mode,
|
||||
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user