Merge branch 'a-pile-of-sfc-deadcode'

Dr. David Alan Gilbert says:

====================
A pile of sfc deadcode

This is a collection of deadcode removal in the sfc
drivers;  the split is vaguely where I found them in
the tree, with some left over.

This has been build tested and booted on an x86 VM,
but I fon't have the hardware to test; however
it's all full function removal.

Signed-off-by: Dr. David Alan Gilbert <linux@treblig.org>
====================

Link: https://patch.msgid.link/20241102151625.39535-1-linux@treblig.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2024-11-05 17:35:13 -08:00
19 changed files with 0 additions and 199 deletions

View File

@@ -418,14 +418,6 @@ unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns;
}
unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
{
/* We must round up when converting ticks to microseconds
* because we round down when converting the other way.
*/
return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
}
/* Set interrupt moderation parameters */
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,

View File

@@ -168,7 +168,6 @@ extern const struct ethtool_ops efx_ethtool_ops;
/* Global */
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);

View File

@@ -635,22 +635,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
return rc;
}
/* Reinitialise the MAC to pick up new PHY settings, even if the port is
* disabled.
*/
int efx_reconfigure_port(struct efx_nic *efx)
{
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
mutex_lock(&efx->mac_lock);
rc = __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
return rc;
}
/**************************************************************************
*
* Device reset and suspend

View File

@@ -40,7 +40,6 @@ void efx_destroy_reset_workqueue(void);
void efx_start_monitor(struct efx_nic *efx);
int __efx_reconfigure_port(struct efx_nic *efx);
int efx_reconfigure_port(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \

View File

@@ -1886,14 +1886,6 @@ unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns;
}
unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
{
/* We must round up when converting ticks to microseconds
* because we round down when converting the other way.
*/
return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
}
/* Set interrupt moderation parameters */
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,

View File

@@ -198,7 +198,6 @@ int ef4_try_recovery(struct ef4_nic *efx);
/* Global */
void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type);
unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs);
unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks);
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);

View File

@@ -1631,28 +1631,6 @@ void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
}
}
/* Looks at available SRAM resources and works out how many queues we
* can support, and where things like descriptor caches should live.
*
* SRAM is split up as follows:
* 0 buftbl entries for channels
* efx->vf_buftbl_base buftbl entries for SR-IOV
* efx->rx_dc_base RX descriptor caches
* efx->tx_dc_base TX descriptor caches
*/
void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
{
unsigned vi_count;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
}
u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
{
ef4_oword_t altera_build;

View File

@@ -511,14 +511,3 @@ void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
}
}
}
void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *rx_nodesc_drops)
{
/* if down, or this is the first update after coming up */
if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
efx->rx_nodesc_drops_while_down +=
*rx_nodesc_drops - efx->rx_nodesc_drops_total;
efx->rx_nodesc_drops_total = *rx_nodesc_drops;
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
}

View File

@@ -477,7 +477,6 @@ void ef4_farch_finish_flr(struct ef4_nic *efx);
void falcon_start_nic_stats(struct ef4_nic *efx);
void falcon_stop_nic_stats(struct ef4_nic *efx);
int falcon_reset_xaui(struct ef4_nic *efx);
void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw);
void ef4_farch_init_common(struct ef4_nic *efx);
void ef4_farch_rx_push_indir_table(struct ef4_nic *efx);
@@ -502,10 +501,6 @@ size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *stat);
#define EF4_MAX_FLUSH_TIME 5000
void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
ef4_qword_t *event);

View File

@@ -40,14 +40,6 @@ static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
return (u8 *)page_buf->addr + offset;
}
u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer, size_t len)
{
if (len > EF4_TX_CB_SIZE)
return NULL;
return ef4_tx_get_copy_buffer(tx_queue, buffer);
}
static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer,
unsigned int *pkts_compl,

View File

@@ -15,9 +15,6 @@
unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer, size_t len);
int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);

View File

@@ -76,17 +76,6 @@ void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out)
*out = EFX_DWORD_VAL(mport);
}
void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
{
efx_dword_t mport;
EFX_POPULATE_DWORD_3(mport,
MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id);
*out = EFX_DWORD_VAL(mport);
}
/* Constructs an mport selector from an mport ID, because they're not the same */
void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out)
{

View File

@@ -23,7 +23,6 @@ int efx_mae_free_mport(struct efx_nic *efx, u32 id);
void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);

View File

@@ -1051,15 +1051,6 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
cookie, false);
}
int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen,
size_t outlen, efx_mcdi_async_completer *complete,
unsigned long cookie)
{
return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
cookie, true);
}
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
@@ -1068,14 +1059,6 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
outlen_actual, false, NULL, NULL);
}
int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
outlen_actual, true, NULL, NULL);
}
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
size_t inlen, efx_dword_t *outbuf,
size_t outlen, int rc)
@@ -1982,33 +1965,6 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
}
int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
size_t outlen;
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
rc = -EIO;
goto fail;
}
*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
return 0;
fail:
*id_out = -1;
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
@@ -2021,38 +1977,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
return rc;
}
int efx_mcdi_flush_rxqs(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
MCDI_DECLARE_BUF(inbuf,
MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
int rc, count;
BUILD_BUG_ON(EFX_MAX_CHANNELS >
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
count = 0;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) {
if (rx_queue->flush_pending) {
rx_queue->flush_pending = false;
atomic_dec(&efx->rxq_flush_pending);
MCDI_SET_ARRAY_DWORD(
inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
count, efx_rx_queue_index(rx_queue));
count++;
}
}
}
rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
WARN_ON(rc < 0);
return rc;
}
int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
{
int rc;

View File

@@ -155,9 +155,6 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
size_t inlen, efx_dword_t *outbuf,
size_t outlen, size_t *outlen_actual);
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
@@ -167,11 +164,6 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen, size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen,
size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
size_t inlen, efx_dword_t *outbuf,
@@ -410,10 +402,8 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx);
int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
int *id_out);
int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
int efx_mcdi_flush_rxqs(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);

View File

@@ -1800,11 +1800,6 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
return NETDEV_TX_OK;
}
int efx_ptp_get_mode(struct efx_nic *efx)
{
return efx->ptp_data->mode;
}
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode)
{

View File

@@ -26,7 +26,6 @@ int efx_ptp_get_ts_config(struct efx_nic *efx,
void efx_ptp_get_ts_info(struct efx_nic *efx,
struct kernel_ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
int efx_ptp_get_mode(struct efx_nic *efx);
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);

View File

@@ -49,14 +49,6 @@ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
return (u8 *)page_buf->addr + offset;
}
u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer, size_t len)
{
if (len > EFX_TX_CB_SIZE)
return NULL;
return efx_tx_get_copy_buffer(tx_queue, buffer);
}
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
/* We need to consider all queues that the net core sees as one */

View File

@@ -15,9 +15,6 @@
unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer, size_t len);
/* What TXQ type will satisfy the checksum offloads required for this skb? */
static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
{