sfc: Remove falcon deadcode

ef4_farch_dimension_resources(), ef4_nic_fix_nodesc_drop_stat(),
ef4_ticks_to_usecs() and ef4_tx_get_copy_buffer_limited() were
copied over from efx_ equivalents in 2016 but never used by
commit 5a6681e22c ("sfc: separate out SFC4000 ("Falcon") support into new
sfc-falcon driver")

EF4_MAX_FLUSH_TIME is also unused.

Remove them.

Signed-off-by: Dr. David Alan Gilbert <linux@treblig.org>
Acked-by: Martin Habets <habetsm.xilinx@gmail.com>
Link: https://patch.msgid.link/20241102151625.39535-2-linux@treblig.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Dr. David Alan Gilbert 2024-11-02 15:16:22 +00:00 committed by Jakub Kicinski
parent 84bfbfbbd3
commit cc4914d904
7 changed files with 0 additions and 58 deletions

View File

@ -1886,14 +1886,6 @@ unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns;
}
unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
{
/* We must round up when converting ticks to microseconds
* because we round down when converting the other way.
*/
return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
}
/* Set interrupt moderation parameters */
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,

View File

@ -198,7 +198,6 @@ int ef4_try_recovery(struct ef4_nic *efx);
/* Global */
void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type);
unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs);
unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks);
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);

View File

@ -1631,28 +1631,6 @@ void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
}
}
/* Looks at available SRAM resources and works out how many queues we
* can support, and where things like descriptor caches should live.
*
* SRAM is split up as follows:
* 0 buftbl entries for channels
* efx->vf_buftbl_base buftbl entries for SR-IOV
* efx->rx_dc_base RX descriptor caches
* efx->tx_dc_base TX descriptor caches
*/
void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
{
unsigned vi_count;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
}
u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
{
ef4_oword_t altera_build;

View File

@ -511,14 +511,3 @@ void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
}
}
}
void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *rx_nodesc_drops)
{
/* if down, or this is the first update after coming up */
if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
efx->rx_nodesc_drops_while_down +=
*rx_nodesc_drops - efx->rx_nodesc_drops_total;
efx->rx_nodesc_drops_total = *rx_nodesc_drops;
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
}

View File

@ -477,7 +477,6 @@ void ef4_farch_finish_flr(struct ef4_nic *efx);
void falcon_start_nic_stats(struct ef4_nic *efx);
void falcon_stop_nic_stats(struct ef4_nic *efx);
int falcon_reset_xaui(struct ef4_nic *efx);
void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw);
void ef4_farch_init_common(struct ef4_nic *efx);
void ef4_farch_rx_push_indir_table(struct ef4_nic *efx);
@ -502,10 +501,6 @@ size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *stat);
#define EF4_MAX_FLUSH_TIME 5000
void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
ef4_qword_t *event);

View File

@ -40,14 +40,6 @@ static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
return (u8 *)page_buf->addr + offset;
}
u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer, size_t len)
{
if (len > EF4_TX_CB_SIZE)
return NULL;
return ef4_tx_get_copy_buffer(tx_queue, buffer);
}
static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer,
unsigned int *pkts_compl,

View File

@ -15,9 +15,6 @@
unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer, size_t len);
int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);