mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 15:29:16 +00:00
bna: scope and dead code cleanup
As suggested by Stephen Hemminger: 1) Made functions and data structures static wherever possible. 2) Removed unused code. Signed-off-by: Debashis Dutt <ddutt@brocade.com> Signed-off-by: Rasesh Mody <rmody@brocade.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e2fa6f2ef6
commit
b7ee31c5af
@ -65,7 +65,7 @@
|
||||
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
|
||||
readl((__ioc)->ioc_regs.hfn_mbox_cmd))
|
||||
|
||||
bool bfa_nw_auto_recover = true;
|
||||
static bool bfa_nw_auto_recover = true;
|
||||
|
||||
/*
|
||||
* forward declarations
|
||||
@ -1276,12 +1276,6 @@ bfa_nw_ioc_auto_recover(bool auto_recover)
|
||||
bfa_nw_auto_recover = auto_recover;
|
||||
}
|
||||
|
||||
bool
|
||||
bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
|
||||
{
|
||||
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
|
||||
{
|
||||
|
@ -271,7 +271,6 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
|
||||
void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
|
||||
|
||||
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
|
||||
bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
|
||||
|
||||
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
|
||||
void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
|
||||
|
@ -34,7 +34,7 @@ static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
|
||||
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
|
||||
|
||||
struct bfa_ioc_hwif nw_hwif_ct;
|
||||
static struct bfa_ioc_hwif nw_hwif_ct;
|
||||
|
||||
/**
|
||||
* Called from bfa_ioc_attach() to map asic specific calls.
|
||||
|
@ -77,7 +77,7 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
|
||||
((_fsm)->fsm == (bfa_fsm_t)(_state))
|
||||
|
||||
static inline int
|
||||
bfa_sm_to_state(struct bfa_sm_table *smt, bfa_sm_t sm)
|
||||
bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
|
@ -19,8 +19,7 @@
|
||||
#include "bfi_ll.h"
|
||||
#include "bna_types.h"
|
||||
|
||||
extern u32 bna_dim_vector[][BNA_BIAS_T_MAX];
|
||||
extern u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
|
||||
extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
|
||||
|
||||
/**
|
||||
*
|
||||
@ -344,9 +343,6 @@ do { \
|
||||
* BNA
|
||||
*/
|
||||
|
||||
/* Internal APIs */
|
||||
void bna_adv_res_req(struct bna_res_info *res_info);
|
||||
|
||||
/* APIs for BNAD */
|
||||
void bna_res_req(struct bna_res_info *res_info);
|
||||
void bna_init(struct bna *bna, struct bnad *bnad,
|
||||
@ -354,7 +350,6 @@ void bna_init(struct bna *bna, struct bnad *bnad,
|
||||
struct bna_res_info *res_info);
|
||||
void bna_uninit(struct bna *bna);
|
||||
void bna_stats_get(struct bna *bna);
|
||||
void bna_stats_clr(struct bna *bna);
|
||||
void bna_get_perm_mac(struct bna *bna, u8 *mac);
|
||||
|
||||
/* APIs for Rx */
|
||||
@ -376,18 +371,6 @@ void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
|
||||
* DEVICE
|
||||
*/
|
||||
|
||||
/* Interanl APIs */
|
||||
void bna_adv_device_init(struct bna_device *device, struct bna *bna,
|
||||
struct bna_res_info *res_info);
|
||||
|
||||
/* APIs for BNA */
|
||||
void bna_device_init(struct bna_device *device, struct bna *bna,
|
||||
struct bna_res_info *res_info);
|
||||
void bna_device_uninit(struct bna_device *device);
|
||||
void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
|
||||
int bna_device_status_get(struct bna_device *device);
|
||||
int bna_device_state_get(struct bna_device *device);
|
||||
|
||||
/* APIs for BNAD */
|
||||
void bna_device_enable(struct bna_device *device);
|
||||
void bna_device_disable(struct bna_device *device,
|
||||
@ -397,12 +380,6 @@ void bna_device_disable(struct bna_device *device,
|
||||
* MBOX
|
||||
*/
|
||||
|
||||
/* APIs for DEVICE */
|
||||
void bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna);
|
||||
void bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod);
|
||||
void bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod);
|
||||
void bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod);
|
||||
|
||||
/* APIs for PORT, TX, RX */
|
||||
void bna_mbox_handler(struct bna *bna, u32 intr_status);
|
||||
void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
|
||||
@ -411,17 +388,6 @@ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
|
||||
* PORT
|
||||
*/
|
||||
|
||||
/* APIs for BNA */
|
||||
void bna_port_init(struct bna_port *port, struct bna *bna);
|
||||
void bna_port_uninit(struct bna_port *port);
|
||||
int bna_port_state_get(struct bna_port *port);
|
||||
int bna_llport_state_get(struct bna_llport *llport);
|
||||
|
||||
/* APIs for DEVICE */
|
||||
void bna_port_start(struct bna_port *port);
|
||||
void bna_port_stop(struct bna_port *port);
|
||||
void bna_port_fail(struct bna_port *port);
|
||||
|
||||
/* API for RX */
|
||||
int bna_port_mtu_get(struct bna_port *port);
|
||||
void bna_llport_admin_up(struct bna_llport *llport);
|
||||
@ -437,12 +403,6 @@ void bna_port_pause_config(struct bna_port *port,
|
||||
void bna_port_mtu_set(struct bna_port *port, int mtu,
|
||||
void (*cbfn)(struct bnad *, enum bna_cb_status));
|
||||
void bna_port_mac_get(struct bna_port *port, mac_t *mac);
|
||||
void bna_port_type_set(struct bna_port *port, enum bna_port_type type);
|
||||
void bna_port_linkcbfn_set(struct bna_port *port,
|
||||
void (*linkcbfn)(struct bnad *,
|
||||
enum bna_link_status));
|
||||
void bna_port_admin_up(struct bna_port *port);
|
||||
void bna_port_admin_down(struct bna_port *port);
|
||||
|
||||
/* Callbacks for TX, RX */
|
||||
void bna_port_cb_tx_stopped(struct bna_port *port,
|
||||
@ -450,11 +410,6 @@ void bna_port_cb_tx_stopped(struct bna_port *port,
|
||||
void bna_port_cb_rx_stopped(struct bna_port *port,
|
||||
enum bna_cb_status status);
|
||||
|
||||
/* Callbacks for MBOX */
|
||||
void bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
|
||||
int status);
|
||||
void bna_port_cb_link_down(struct bna_port *port, int status);
|
||||
|
||||
/**
|
||||
* IB
|
||||
*/
|
||||
@ -464,25 +419,10 @@ void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
|
||||
struct bna_res_info *res_info);
|
||||
void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
|
||||
|
||||
/* APIs for TX, RX */
|
||||
struct bna_ib *bna_ib_get(struct bna_ib_mod *ib_mod,
|
||||
enum bna_intr_type intr_type, int vector);
|
||||
void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib);
|
||||
int bna_ib_reserve_idx(struct bna_ib *ib);
|
||||
void bna_ib_release_idx(struct bna_ib *ib, int idx);
|
||||
int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config);
|
||||
void bna_ib_start(struct bna_ib *ib);
|
||||
void bna_ib_stop(struct bna_ib *ib);
|
||||
void bna_ib_fail(struct bna_ib *ib);
|
||||
void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo);
|
||||
|
||||
/**
|
||||
* TX MODULE AND TX
|
||||
*/
|
||||
|
||||
/* Internal APIs */
|
||||
void bna_tx_prio_changed(struct bna_tx *tx, int prio);
|
||||
|
||||
/* APIs for BNA */
|
||||
void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
|
||||
struct bna_res_info *res_info);
|
||||
@ -508,10 +448,6 @@ void bna_tx_enable(struct bna_tx *tx);
|
||||
void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
|
||||
void (*cbfn)(void *, struct bna_tx *,
|
||||
enum bna_cb_status));
|
||||
enum bna_cb_status
|
||||
bna_tx_prio_set(struct bna_tx *tx, int prio,
|
||||
void (*cbfn)(struct bnad *, struct bna_tx *,
|
||||
enum bna_cb_status));
|
||||
void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
|
||||
|
||||
/**
|
||||
@ -564,35 +500,20 @@ void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
|
||||
void (*cbfn)(void *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
|
||||
void bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]);
|
||||
void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
|
||||
void bna_rx_dim_update(struct bna_ccb *ccb);
|
||||
enum bna_cb_status
|
||||
bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
enum bna_cb_status
|
||||
bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
enum bna_cb_status
|
||||
bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
enum bna_cb_status
|
||||
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
enum bna_cb_status
|
||||
bna_rx_mcast_del(struct bna_rx *rx, u8 *mcmac,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
enum bna_cb_status
|
||||
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
void bna_rx_mcast_delall(struct bna_rx *rx,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
enum bna_cb_status
|
||||
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
|
||||
enum bna_rxmode bitmask,
|
||||
@ -601,36 +522,12 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
|
||||
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
|
||||
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
|
||||
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
|
||||
void bna_rx_vlanfilter_disable(struct bna_rx *rx);
|
||||
void bna_rx_rss_enable(struct bna_rx *rx);
|
||||
void bna_rx_rss_disable(struct bna_rx *rx);
|
||||
void bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config);
|
||||
void bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors,
|
||||
int nvectors);
|
||||
void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
void bna_rx_hds_disable(struct bna_rx *rx,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
void bna_rx_receive_pause(struct bna_rx *rx,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
void bna_rx_receive_resume(struct bna_rx *rx,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status));
|
||||
|
||||
/* RxF APIs for RX */
|
||||
void bna_rxf_start(struct bna_rxf *rxf);
|
||||
void bna_rxf_stop(struct bna_rxf *rxf);
|
||||
void bna_rxf_fail(struct bna_rxf *rxf);
|
||||
void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx,
|
||||
struct bna_rx_config *q_config);
|
||||
void bna_rxf_uninit(struct bna_rxf *rxf);
|
||||
|
||||
/* Callback from RXF to RX */
|
||||
void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status);
|
||||
void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
|
||||
|
||||
/**
|
||||
* BNAD
|
||||
@ -639,7 +536,6 @@ void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
|
||||
/* Callbacks for BNA */
|
||||
void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
|
||||
struct bna_stats *stats);
|
||||
void bnad_cb_stats_clr(struct bnad *bnad);
|
||||
|
||||
/* Callbacks for DEVICE */
|
||||
void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
|
||||
|
@ -19,6 +19,46 @@
|
||||
#include "bfa_sm.h"
|
||||
#include "bfa_wc.h"
|
||||
|
||||
static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
|
||||
|
||||
static void
|
||||
bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
|
||||
int status)
|
||||
{
|
||||
int i;
|
||||
u8 prio_map;
|
||||
|
||||
port->llport.link_status = BNA_LINK_UP;
|
||||
if (aen->cee_linkup)
|
||||
port->llport.link_status = BNA_CEE_UP;
|
||||
|
||||
/* Compute the priority */
|
||||
prio_map = aen->prio_map;
|
||||
if (prio_map) {
|
||||
for (i = 0; i < 8; i++) {
|
||||
if ((prio_map >> i) & 0x1)
|
||||
break;
|
||||
}
|
||||
port->priority = i;
|
||||
} else
|
||||
port->priority = 0;
|
||||
|
||||
/* Dispatch events */
|
||||
bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
|
||||
bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
|
||||
port->link_cbfn(port->bna->bnad, port->llport.link_status);
|
||||
}
|
||||
|
||||
static void
|
||||
bna_port_cb_link_down(struct bna_port *port, int status)
|
||||
{
|
||||
port->llport.link_status = BNA_LINK_DOWN;
|
||||
|
||||
/* Dispatch events */
|
||||
bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
|
||||
port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
|
||||
}
|
||||
|
||||
/**
|
||||
* MBOX
|
||||
*/
|
||||
@ -96,7 +136,7 @@ bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
|
||||
bna_mbox_aen_callback(bna, msg);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_err_handler(struct bna *bna, u32 intr_status)
|
||||
{
|
||||
u32 init_halt;
|
||||
@ -140,7 +180,7 @@ bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_mbox_flush_q(struct bna *bna, struct list_head *q)
|
||||
{
|
||||
struct bna_mbox_qe *mb_qe = NULL;
|
||||
@ -166,18 +206,18 @@ bna_mbox_flush_q(struct bna *bna, struct list_head *q)
|
||||
bna->mbox_mod.state = BNA_MBOX_FREE;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
|
||||
{
|
||||
bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
|
||||
{
|
||||
bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
|
||||
@ -187,7 +227,7 @@ bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
|
||||
mbox_mod->bna = bna;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
|
||||
{
|
||||
mbox_mod->bna = NULL;
|
||||
@ -538,7 +578,7 @@ bna_fw_cb_llport_down(void *arg, int status)
|
||||
bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_port_cb_llport_stopped(struct bna_port *port,
|
||||
enum bna_cb_status status)
|
||||
{
|
||||
@ -591,7 +631,7 @@ bna_llport_fail(struct bna_llport *llport)
|
||||
bfa_fsm_send_event(llport, LLPORT_E_FAIL);
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
bna_llport_state_get(struct bna_llport *llport)
|
||||
{
|
||||
return bfa_sm_to_state(llport_sm_table, llport->fsm);
|
||||
@ -1109,7 +1149,7 @@ bna_port_cb_chld_stopped(void *arg)
|
||||
bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_port_init(struct bna_port *port, struct bna *bna)
|
||||
{
|
||||
port->bna = bna;
|
||||
@ -1137,7 +1177,7 @@ bna_port_init(struct bna_port *port, struct bna *bna)
|
||||
bna_llport_init(&port->llport, bna);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_port_uninit(struct bna_port *port)
|
||||
{
|
||||
bna_llport_uninit(&port->llport);
|
||||
@ -1147,13 +1187,13 @@ bna_port_uninit(struct bna_port *port)
|
||||
port->bna = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
bna_port_state_get(struct bna_port *port)
|
||||
{
|
||||
return bfa_sm_to_state(port_sm_table, port->fsm);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_port_start(struct bna_port *port)
|
||||
{
|
||||
port->flags |= BNA_PORT_F_DEVICE_READY;
|
||||
@ -1161,7 +1201,7 @@ bna_port_start(struct bna_port *port)
|
||||
bfa_fsm_send_event(port, PORT_E_START);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_port_stop(struct bna_port *port)
|
||||
{
|
||||
port->stop_cbfn = bna_device_cb_port_stopped;
|
||||
@ -1171,7 +1211,7 @@ bna_port_stop(struct bna_port *port)
|
||||
bfa_fsm_send_event(port, PORT_E_STOP);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_port_fail(struct bna_port *port)
|
||||
{
|
||||
port->flags &= ~BNA_PORT_F_DEVICE_READY;
|
||||
@ -1190,44 +1230,6 @@ bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
|
||||
bfa_wc_down(&port->chld_stop_wc);
|
||||
}
|
||||
|
||||
void
|
||||
bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
|
||||
int status)
|
||||
{
|
||||
int i;
|
||||
u8 prio_map;
|
||||
|
||||
port->llport.link_status = BNA_LINK_UP;
|
||||
if (aen->cee_linkup)
|
||||
port->llport.link_status = BNA_CEE_UP;
|
||||
|
||||
/* Compute the priority */
|
||||
prio_map = aen->prio_map;
|
||||
if (prio_map) {
|
||||
for (i = 0; i < 8; i++) {
|
||||
if ((prio_map >> i) & 0x1)
|
||||
break;
|
||||
}
|
||||
port->priority = i;
|
||||
} else
|
||||
port->priority = 0;
|
||||
|
||||
/* Dispatch events */
|
||||
bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
|
||||
bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
|
||||
port->link_cbfn(port->bna->bnad, port->llport.link_status);
|
||||
}
|
||||
|
||||
void
|
||||
bna_port_cb_link_down(struct bna_port *port, int status)
|
||||
{
|
||||
port->llport.link_status = BNA_LINK_DOWN;
|
||||
|
||||
/* Dispatch events */
|
||||
bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
|
||||
port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
|
||||
}
|
||||
|
||||
int
|
||||
bna_port_mtu_get(struct bna_port *port)
|
||||
{
|
||||
@ -1292,54 +1294,6 @@ bna_port_mac_get(struct bna_port *port, mac_t *mac)
|
||||
*mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
|
||||
}
|
||||
|
||||
/**
|
||||
* Should be called only when port is disabled
|
||||
*/
|
||||
void
|
||||
bna_port_type_set(struct bna_port *port, enum bna_port_type type)
|
||||
{
|
||||
port->type = type;
|
||||
port->llport.type = type;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should be called only when port is disabled
|
||||
*/
|
||||
void
|
||||
bna_port_linkcbfn_set(struct bna_port *port,
|
||||
void (*linkcbfn)(struct bnad *, enum bna_link_status))
|
||||
{
|
||||
port->link_cbfn = linkcbfn;
|
||||
}
|
||||
|
||||
void
|
||||
bna_port_admin_up(struct bna_port *port)
|
||||
{
|
||||
struct bna_llport *llport = &port->llport;
|
||||
|
||||
if (llport->flags & BNA_LLPORT_F_ENABLED)
|
||||
return;
|
||||
|
||||
llport->flags |= BNA_LLPORT_F_ENABLED;
|
||||
|
||||
if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
|
||||
bfa_fsm_send_event(llport, LLPORT_E_UP);
|
||||
}
|
||||
|
||||
void
|
||||
bna_port_admin_down(struct bna_port *port)
|
||||
{
|
||||
struct bna_llport *llport = &port->llport;
|
||||
|
||||
if (!(llport->flags & BNA_LLPORT_F_ENABLED))
|
||||
return;
|
||||
|
||||
llport->flags &= ~BNA_LLPORT_F_ENABLED;
|
||||
|
||||
if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
|
||||
bfa_fsm_send_event(llport, LLPORT_E_DOWN);
|
||||
}
|
||||
|
||||
/**
|
||||
* DEVICE
|
||||
*/
|
||||
@ -1357,7 +1311,7 @@ do {\
|
||||
bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
|
||||
} while (0)
|
||||
|
||||
const struct bna_chip_regs_offset reg_offset[] =
|
||||
static const struct bna_chip_regs_offset reg_offset[] =
|
||||
{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
|
||||
HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
|
||||
{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
|
||||
@ -1642,7 +1596,34 @@ static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
|
||||
bna_device_cb_iocll_reset
|
||||
};
|
||||
|
||||
void
|
||||
/* device */
|
||||
static void
|
||||
bna_adv_device_init(struct bna_device *device, struct bna *bna,
|
||||
struct bna_res_info *res_info)
|
||||
{
|
||||
u8 *kva;
|
||||
u64 dma;
|
||||
|
||||
device->bna = bna;
|
||||
|
||||
kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
|
||||
|
||||
/**
|
||||
* Attach common modules (Diag, SFP, CEE, Port) and claim respective
|
||||
* DMA memory.
|
||||
*/
|
||||
BNA_GET_DMA_ADDR(
|
||||
&res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
|
||||
kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
|
||||
|
||||
bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
|
||||
bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
|
||||
kva += bfa_nw_cee_meminfo();
|
||||
dma += bfa_nw_cee_meminfo();
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
bna_device_init(struct bna_device *device, struct bna *bna,
|
||||
struct bna_res_info *res_info)
|
||||
{
|
||||
@ -1681,7 +1662,7 @@ bna_device_init(struct bna_device *device, struct bna *bna,
|
||||
bfa_fsm_set_state(device, bna_device_sm_stopped);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_device_uninit(struct bna_device *device)
|
||||
{
|
||||
bna_mbox_mod_uninit(&device->bna->mbox_mod);
|
||||
@ -1691,7 +1672,7 @@ bna_device_uninit(struct bna_device *device)
|
||||
device->bna = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
|
||||
{
|
||||
struct bna_device *device = (struct bna_device *)arg;
|
||||
@ -1699,7 +1680,7 @@ bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
|
||||
bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
bna_device_status_get(struct bna_device *device)
|
||||
{
|
||||
return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
|
||||
@ -1733,24 +1714,13 @@ bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
|
||||
bfa_fsm_send_event(device, DEVICE_E_DISABLE);
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
bna_device_state_get(struct bna_device *device)
|
||||
{
|
||||
return bfa_sm_to_state(device_sm_table, device->fsm);
|
||||
}
|
||||
|
||||
u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
|
||||
{12, 20},
|
||||
{10, 18},
|
||||
{8, 16},
|
||||
{6, 12},
|
||||
{4, 8},
|
||||
{3, 6},
|
||||
{2, 4},
|
||||
{1, 2},
|
||||
};
|
||||
|
||||
u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
|
||||
const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
|
||||
{12, 12},
|
||||
{6, 10},
|
||||
{5, 10},
|
||||
@ -1761,36 +1731,9 @@ u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
|
||||
{1, 2},
|
||||
};
|
||||
|
||||
/* device */
|
||||
void
|
||||
bna_adv_device_init(struct bna_device *device, struct bna *bna,
|
||||
struct bna_res_info *res_info)
|
||||
{
|
||||
u8 *kva;
|
||||
u64 dma;
|
||||
|
||||
device->bna = bna;
|
||||
|
||||
kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
|
||||
|
||||
/**
|
||||
* Attach common modules (Diag, SFP, CEE, Port) and claim respective
|
||||
* DMA memory.
|
||||
*/
|
||||
BNA_GET_DMA_ADDR(
|
||||
&res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
|
||||
kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
|
||||
|
||||
bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
|
||||
bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
|
||||
kva += bfa_nw_cee_meminfo();
|
||||
dma += bfa_nw_cee_meminfo();
|
||||
|
||||
}
|
||||
|
||||
/* utils */
|
||||
|
||||
void
|
||||
static void
|
||||
bna_adv_res_req(struct bna_res_info *res_info)
|
||||
{
|
||||
/* DMA memory for COMMON_MODULE */
|
||||
@ -2044,36 +1987,6 @@ bna_fw_stats_get(struct bna *bna)
|
||||
bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
|
||||
}
|
||||
|
||||
static void
|
||||
bna_fw_cb_stats_clr(void *arg, int status)
|
||||
{
|
||||
struct bna *bna = (struct bna *)arg;
|
||||
|
||||
bfa_q_qe_init(&bna->mbox_qe.qe);
|
||||
|
||||
memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats));
|
||||
memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats));
|
||||
|
||||
bnad_cb_stats_clr(bna->bnad);
|
||||
}
|
||||
|
||||
static void
|
||||
bna_fw_stats_clr(struct bna *bna)
|
||||
{
|
||||
struct bfi_ll_stats_req ll_req;
|
||||
|
||||
bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
|
||||
ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
|
||||
ll_req.rxf_id_mask[0] = htonl(0xffffffff);
|
||||
ll_req.rxf_id_mask[1] = htonl(0xffffffff);
|
||||
ll_req.txf_id_mask[0] = htonl(0xffffffff);
|
||||
ll_req.txf_id_mask[1] = htonl(0xffffffff);
|
||||
|
||||
bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
|
||||
bna_fw_cb_stats_clr, bna);
|
||||
bna_mbox_send(bna, &bna->mbox_qe);
|
||||
}
|
||||
|
||||
void
|
||||
bna_stats_get(struct bna *bna)
|
||||
{
|
||||
@ -2083,22 +1996,8 @@ bna_stats_get(struct bna *bna)
|
||||
bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
|
||||
}
|
||||
|
||||
void
|
||||
bna_stats_clr(struct bna *bna)
|
||||
{
|
||||
if (bna_device_status_get(&bna->device))
|
||||
bna_fw_stats_clr(bna);
|
||||
else {
|
||||
memset(&bna->stats.sw_stats, 0,
|
||||
sizeof(struct bna_sw_stats));
|
||||
memset(bna->stats.hw_stats, 0,
|
||||
sizeof(struct bfi_ll_stats));
|
||||
bnad_cb_stats_clr(bna->bnad);
|
||||
}
|
||||
}
|
||||
|
||||
/* IB */
|
||||
void
|
||||
static void
|
||||
bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
|
||||
{
|
||||
ib->ib_config.coalescing_timeo = coalescing_timeo;
|
||||
@ -2157,7 +2056,7 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
|
||||
bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
|
||||
{
|
||||
struct bna_rx_fndb_ram *rx_fndb_ram;
|
||||
@ -2553,7 +2452,7 @@ rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
|
||||
* 0 = no h/w change
|
||||
* 1 = need h/w change
|
||||
*/
|
||||
int
|
||||
static int
|
||||
rxf_promisc_enable(struct bna_rxf *rxf)
|
||||
{
|
||||
struct bna *bna = rxf->rx->bna;
|
||||
@ -2584,7 +2483,7 @@ rxf_promisc_enable(struct bna_rxf *rxf)
|
||||
* 0 = no h/w change
|
||||
* 1 = need h/w change
|
||||
*/
|
||||
int
|
||||
static int
|
||||
rxf_promisc_disable(struct bna_rxf *rxf)
|
||||
{
|
||||
struct bna *bna = rxf->rx->bna;
|
||||
@ -2623,7 +2522,7 @@ rxf_promisc_disable(struct bna_rxf *rxf)
|
||||
* 0 = no h/w change
|
||||
* 1 = need h/w change
|
||||
*/
|
||||
int
|
||||
static int
|
||||
rxf_default_enable(struct bna_rxf *rxf)
|
||||
{
|
||||
struct bna *bna = rxf->rx->bna;
|
||||
@ -2654,7 +2553,7 @@ rxf_default_enable(struct bna_rxf *rxf)
|
||||
* 0 = no h/w change
|
||||
* 1 = need h/w change
|
||||
*/
|
||||
int
|
||||
static int
|
||||
rxf_default_disable(struct bna_rxf *rxf)
|
||||
{
|
||||
struct bna *bna = rxf->rx->bna;
|
||||
@ -2693,7 +2592,7 @@ rxf_default_disable(struct bna_rxf *rxf)
|
||||
* 0 = no h/w change
|
||||
* 1 = need h/w change
|
||||
*/
|
||||
int
|
||||
static int
|
||||
rxf_allmulti_enable(struct bna_rxf *rxf)
|
||||
{
|
||||
int ret = 0;
|
||||
@ -2721,7 +2620,7 @@ rxf_allmulti_enable(struct bna_rxf *rxf)
|
||||
* 0 = no h/w change
|
||||
* 1 = need h/w change
|
||||
*/
|
||||
int
|
||||
static int
|
||||
rxf_allmulti_disable(struct bna_rxf *rxf)
|
||||
{
|
||||
int ret = 0;
|
||||
@ -2745,159 +2644,6 @@ rxf_allmulti_disable(struct bna_rxf *rxf)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* RxF <- bnad */
|
||||
void
|
||||
bna_rx_mcast_delall(struct bna_rx *rx,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status))
|
||||
{
|
||||
struct bna_rxf *rxf = &rx->rxf;
|
||||
struct list_head *qe;
|
||||
struct bna_mac *mac;
|
||||
int need_hw_config = 0;
|
||||
|
||||
/* Purge all entries from pending_add_q */
|
||||
while (!list_empty(&rxf->mcast_pending_add_q)) {
|
||||
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
|
||||
mac = (struct bna_mac *)qe;
|
||||
bfa_q_qe_init(&mac->qe);
|
||||
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
|
||||
}
|
||||
|
||||
/* Schedule all entries in active_q for deletion */
|
||||
while (!list_empty(&rxf->mcast_active_q)) {
|
||||
bfa_q_deq(&rxf->mcast_active_q, &qe);
|
||||
mac = (struct bna_mac *)qe;
|
||||
bfa_q_qe_init(&mac->qe);
|
||||
list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
|
||||
need_hw_config = 1;
|
||||
}
|
||||
|
||||
if (need_hw_config) {
|
||||
rxf->cam_fltr_cbfn = cbfn;
|
||||
rxf->cam_fltr_cbarg = rx->bna->bnad;
|
||||
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cbfn)
|
||||
(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
|
||||
}
|
||||
|
||||
/* RxF <- Rx */
|
||||
void
|
||||
bna_rx_receive_resume(struct bna_rx *rx,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status))
|
||||
{
|
||||
struct bna_rxf *rxf = &rx->rxf;
|
||||
|
||||
if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) {
|
||||
rxf->oper_state_cbfn = cbfn;
|
||||
rxf->oper_state_cbarg = rx->bna->bnad;
|
||||
bfa_fsm_send_event(rxf, RXF_E_RESUME);
|
||||
} else if (cbfn)
|
||||
(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
|
||||
}
|
||||
|
||||
void
|
||||
bna_rx_receive_pause(struct bna_rx *rx,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status))
|
||||
{
|
||||
struct bna_rxf *rxf = &rx->rxf;
|
||||
|
||||
if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) {
|
||||
rxf->oper_state_cbfn = cbfn;
|
||||
rxf->oper_state_cbarg = rx->bna->bnad;
|
||||
bfa_fsm_send_event(rxf, RXF_E_PAUSE);
|
||||
} else if (cbfn)
|
||||
(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
|
||||
}
|
||||
|
||||
/* RxF <- bnad */
|
||||
enum bna_cb_status
|
||||
bna_rx_ucast_add(struct bna_rx *rx, u8 *addr,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status))
|
||||
{
|
||||
struct bna_rxf *rxf = &rx->rxf;
|
||||
struct list_head *qe;
|
||||
struct bna_mac *mac;
|
||||
|
||||
/* Check if already added */
|
||||
list_for_each(qe, &rxf->ucast_active_q) {
|
||||
mac = (struct bna_mac *)qe;
|
||||
if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
|
||||
if (cbfn)
|
||||
(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
|
||||
return BNA_CB_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if pending addition */
|
||||
list_for_each(qe, &rxf->ucast_pending_add_q) {
|
||||
mac = (struct bna_mac *)qe;
|
||||
if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
|
||||
if (cbfn)
|
||||
(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
|
||||
return BNA_CB_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
|
||||
if (mac == NULL)
|
||||
return BNA_CB_UCAST_CAM_FULL;
|
||||
bfa_q_qe_init(&mac->qe);
|
||||
memcpy(mac->addr, addr, ETH_ALEN);
|
||||
list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
|
||||
|
||||
rxf->cam_fltr_cbfn = cbfn;
|
||||
rxf->cam_fltr_cbarg = rx->bna->bnad;
|
||||
|
||||
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
|
||||
|
||||
return BNA_CB_SUCCESS;
|
||||
}
|
||||
|
||||
/* RxF <- bnad */
|
||||
enum bna_cb_status
|
||||
bna_rx_ucast_del(struct bna_rx *rx, u8 *addr,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status))
|
||||
{
|
||||
struct bna_rxf *rxf = &rx->rxf;
|
||||
struct list_head *qe;
|
||||
struct bna_mac *mac;
|
||||
|
||||
list_for_each(qe, &rxf->ucast_pending_add_q) {
|
||||
mac = (struct bna_mac *)qe;
|
||||
if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
|
||||
list_del(qe);
|
||||
bfa_q_qe_init(qe);
|
||||
bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
|
||||
if (cbfn)
|
||||
(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
|
||||
return BNA_CB_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each(qe, &rxf->ucast_active_q) {
|
||||
mac = (struct bna_mac *)qe;
|
||||
if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
|
||||
list_del(qe);
|
||||
bfa_q_qe_init(qe);
|
||||
list_add_tail(qe, &rxf->ucast_pending_del_q);
|
||||
rxf->cam_fltr_cbfn = cbfn;
|
||||
rxf->cam_fltr_cbarg = rx->bna->bnad;
|
||||
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
|
||||
return BNA_CB_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
return BNA_CB_INVALID_MAC;
|
||||
}
|
||||
|
||||
/* RxF <- bnad */
|
||||
enum bna_cb_status
|
||||
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
|
||||
@ -2978,39 +2724,6 @@ err_return:
|
||||
return BNA_CB_FAIL;
|
||||
}
|
||||
|
||||
/* RxF <- bnad */
|
||||
void
|
||||
bna_rx_rss_enable(struct bna_rx *rx)
|
||||
{
|
||||
struct bna_rxf *rxf = &rx->rxf;
|
||||
|
||||
rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
|
||||
rxf->rss_status = BNA_STATUS_T_ENABLED;
|
||||
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
|
||||
}
|
||||
|
||||
/* RxF <- bnad */
|
||||
void
|
||||
bna_rx_rss_disable(struct bna_rx *rx)
|
||||
{
|
||||
struct bna_rxf *rxf = &rx->rxf;
|
||||
|
||||
rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
|
||||
rxf->rss_status = BNA_STATUS_T_DISABLED;
|
||||
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
|
||||
}
|
||||
|
||||
/* RxF <- bnad */
|
||||
void
|
||||
bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config)
|
||||
{
|
||||
struct bna_rxf *rxf = &rx->rxf;
|
||||
rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
|
||||
rxf->rss_status = BNA_STATUS_T_ENABLED;
|
||||
rxf->rss_cfg = *rss_config;
|
||||
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
|
||||
}
|
||||
|
||||
void
|
||||
/* RxF <- bnad */
|
||||
bna_rx_vlanfilter_enable(struct bna_rx *rx)
|
||||
@ -3024,68 +2737,8 @@ bna_rx_vlanfilter_enable(struct bna_rx *rx)
|
||||
}
|
||||
}
|
||||
|
||||
/* RxF <- bnad */
|
||||
void
|
||||
bna_rx_vlanfilter_disable(struct bna_rx *rx)
|
||||
{
|
||||
struct bna_rxf *rxf = &rx->rxf;
|
||||
|
||||
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
|
||||
rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
|
||||
rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
|
||||
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
|
||||
}
|
||||
}
|
||||
|
||||
/* Rx */
|
||||
|
||||
struct bna_rxp *
|
||||
bna_rx_get_rxp(struct bna_rx *rx, int vector)
|
||||
{
|
||||
struct bna_rxp *rxp;
|
||||
struct list_head *qe;
|
||||
|
||||
list_for_each(qe, &rx->rxp_q) {
|
||||
rxp = (struct bna_rxp *)qe;
|
||||
if (rxp->vector == vector)
|
||||
return rxp;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* bna_rx_rss_rit_set()
|
||||
* Sets the Q ids for the specified msi-x vectors in the RIT.
|
||||
* Maximum rit size supported is 64, which should be the max size of the
|
||||
* vectors array.
|
||||
*/
|
||||
|
||||
void
|
||||
bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors)
|
||||
{
|
||||
int i;
|
||||
struct bna_rxp *rxp;
|
||||
struct bna_rxq *q0 = NULL, *q1 = NULL;
|
||||
struct bna *bna;
|
||||
struct bna_rxf *rxf;
|
||||
|
||||
/* Build the RIT contents for this RX */
|
||||
bna = rx->bna;
|
||||
|
||||
rxf = &rx->rxf;
|
||||
for (i = 0; i < nvectors; i++) {
|
||||
rxp = bna_rx_get_rxp(rx, vectors[i]);
|
||||
|
||||
GET_RXQS(rxp, q0, q1);
|
||||
rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id;
|
||||
rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0);
|
||||
}
|
||||
|
||||
rxf->rit_segment->rit_size = nvectors;
|
||||
|
||||
/* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
|
||||
}
|
||||
|
||||
/* Rx <- bnad */
|
||||
void
|
||||
bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
|
||||
@ -3102,7 +2755,7 @@ bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
|
||||
|
||||
/* Rx <- bnad */
|
||||
void
|
||||
bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX])
|
||||
bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
|
||||
{
|
||||
int i, j;
|
||||
|
||||
@ -3164,22 +2817,6 @@ bna_rx_dim_update(struct bna_ccb *ccb)
|
||||
}
|
||||
|
||||
/* Tx */
|
||||
/* TX <- bnad */
|
||||
enum bna_cb_status
|
||||
bna_tx_prio_set(struct bna_tx *tx, int prio,
|
||||
void (*cbfn)(struct bnad *, struct bna_tx *,
|
||||
enum bna_cb_status))
|
||||
{
|
||||
if (tx->flags & BNA_TX_F_PRIO_LOCK)
|
||||
return BNA_CB_FAIL;
|
||||
else {
|
||||
tx->prio_change_cbfn = cbfn;
|
||||
bna_tx_prio_changed(tx, prio);
|
||||
}
|
||||
|
||||
return BNA_CB_SUCCESS;
|
||||
}
|
||||
|
||||
/* TX <- bnad */
|
||||
void
|
||||
bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
|
||||
|
@ -1282,7 +1282,6 @@ struct bna_chip_regs_offset {
|
||||
u32 fn_int_mask;
|
||||
u32 msix_idx;
|
||||
};
|
||||
extern const struct bna_chip_regs_offset reg_offset[];
|
||||
|
||||
struct bna_chip_regs {
|
||||
void __iomem *page_addr;
|
||||
|
@ -195,7 +195,7 @@ bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
|
||||
ib_mod->bna = NULL;
|
||||
}
|
||||
|
||||
struct bna_ib *
|
||||
static struct bna_ib *
|
||||
bna_ib_get(struct bna_ib_mod *ib_mod,
|
||||
enum bna_intr_type intr_type,
|
||||
int vector)
|
||||
@ -240,7 +240,7 @@ bna_ib_get(struct bna_ib_mod *ib_mod,
|
||||
return ib;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
|
||||
{
|
||||
bna_intr_put(ib_mod, ib->intr);
|
||||
@ -255,7 +255,7 @@ bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
|
||||
}
|
||||
|
||||
/* Returns index offset - starting from 0 */
|
||||
int
|
||||
static int
|
||||
bna_ib_reserve_idx(struct bna_ib *ib)
|
||||
{
|
||||
struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
|
||||
@ -309,7 +309,7 @@ bna_ib_reserve_idx(struct bna_ib *ib)
|
||||
return idx;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_ib_release_idx(struct bna_ib *ib, int idx)
|
||||
{
|
||||
struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
|
||||
@ -356,7 +356,7 @@ bna_ib_release_idx(struct bna_ib *ib, int idx)
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
|
||||
{
|
||||
if (ib->start_count)
|
||||
@ -374,7 +374,7 @@ bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_ib_start(struct bna_ib *ib)
|
||||
{
|
||||
struct bna_ib_blk_mem ib_cfg;
|
||||
@ -450,7 +450,7 @@ bna_ib_start(struct bna_ib *ib)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_ib_stop(struct bna_ib *ib)
|
||||
{
|
||||
u32 intx_mask;
|
||||
@ -468,7 +468,7 @@ bna_ib_stop(struct bna_ib *ib)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_ib_fail(struct bna_ib *ib)
|
||||
{
|
||||
ib->start_count = 0;
|
||||
@ -1394,7 +1394,7 @@ rxf_reset_packet_filter(struct bna_rxf *rxf)
|
||||
rxf_reset_packet_filter_allmulti(rxf);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_rxf_init(struct bna_rxf *rxf,
|
||||
struct bna_rx *rx,
|
||||
struct bna_rx_config *q_config)
|
||||
@ -1444,7 +1444,7 @@ bna_rxf_init(struct bna_rxf *rxf,
|
||||
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_rxf_uninit(struct bna_rxf *rxf)
|
||||
{
|
||||
struct bna_mac *mac;
|
||||
@ -1476,7 +1476,18 @@ bna_rxf_uninit(struct bna_rxf *rxf)
|
||||
rxf->rx = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
|
||||
{
|
||||
bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
|
||||
if (rx->rxf.rxf_id < 32)
|
||||
rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
|
||||
else
|
||||
rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
|
||||
1 << (rx->rxf.rxf_id - 32));
|
||||
}
|
||||
|
||||
static void
|
||||
bna_rxf_start(struct bna_rxf *rxf)
|
||||
{
|
||||
rxf->start_cbfn = bna_rx_cb_rxf_started;
|
||||
@ -1485,7 +1496,18 @@ bna_rxf_start(struct bna_rxf *rxf)
|
||||
bfa_fsm_send_event(rxf, RXF_E_START);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
|
||||
{
|
||||
bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
|
||||
if (rx->rxf.rxf_id < 32)
|
||||
rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
|
||||
else
|
||||
rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
|
||||
1 << (rx->rxf.rxf_id - 32);
|
||||
}
|
||||
|
||||
static void
|
||||
bna_rxf_stop(struct bna_rxf *rxf)
|
||||
{
|
||||
rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
|
||||
@ -1493,7 +1515,7 @@ bna_rxf_stop(struct bna_rxf *rxf)
|
||||
bfa_fsm_send_event(rxf, RXF_E_STOP);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_rxf_fail(struct bna_rxf *rxf)
|
||||
{
|
||||
rxf->rxf_flags |= BNA_RXF_FL_FAILED;
|
||||
@ -1575,43 +1597,6 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
|
||||
return BNA_CB_SUCCESS;
|
||||
}
|
||||
|
||||
enum bna_cb_status
|
||||
bna_rx_mcast_del(struct bna_rx *rx, u8 *addr,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
enum bna_cb_status))
|
||||
{
|
||||
struct bna_rxf *rxf = &rx->rxf;
|
||||
struct list_head *qe;
|
||||
struct bna_mac *mac;
|
||||
|
||||
list_for_each(qe, &rxf->mcast_pending_add_q) {
|
||||
mac = (struct bna_mac *)qe;
|
||||
if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
|
||||
list_del(qe);
|
||||
bfa_q_qe_init(qe);
|
||||
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
|
||||
if (cbfn)
|
||||
(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
|
||||
return BNA_CB_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each(qe, &rxf->mcast_active_q) {
|
||||
mac = (struct bna_mac *)qe;
|
||||
if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
|
||||
list_del(qe);
|
||||
bfa_q_qe_init(qe);
|
||||
list_add_tail(qe, &rxf->mcast_pending_del_q);
|
||||
rxf->cam_fltr_cbfn = cbfn;
|
||||
rxf->cam_fltr_cbarg = rx->bna->bnad;
|
||||
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
|
||||
return BNA_CB_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
return BNA_CB_INVALID_MAC;
|
||||
}
|
||||
|
||||
enum bna_cb_status
|
||||
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
|
||||
void (*cbfn)(struct bnad *, struct bna_rx *,
|
||||
@ -1862,7 +1847,7 @@ bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
|
||||
bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
|
||||
struct bna_rx, enum bna_rx_event);
|
||||
|
||||
static struct bfa_sm_table rx_sm_table[] = {
|
||||
static const struct bfa_sm_table rx_sm_table[] = {
|
||||
{BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
|
||||
{BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
|
||||
{BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
|
||||
@ -2247,7 +2232,7 @@ bna_rit_create(struct bna_rx *rx)
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
_rx_can_satisfy(struct bna_rx_mod *rx_mod,
|
||||
struct bna_rx_config *rx_cfg)
|
||||
{
|
||||
@ -2272,7 +2257,7 @@ _rx_can_satisfy(struct bna_rx_mod *rx_mod,
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct bna_rxq *
|
||||
static struct bna_rxq *
|
||||
_get_free_rxq(struct bna_rx_mod *rx_mod)
|
||||
{
|
||||
struct bna_rxq *rxq = NULL;
|
||||
@ -2286,7 +2271,7 @@ _get_free_rxq(struct bna_rx_mod *rx_mod)
|
||||
return rxq;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
|
||||
{
|
||||
bfa_q_qe_init(&rxq->qe);
|
||||
@ -2294,7 +2279,7 @@ _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
|
||||
rx_mod->rxq_free_count++;
|
||||
}
|
||||
|
||||
struct bna_rxp *
|
||||
static struct bna_rxp *
|
||||
_get_free_rxp(struct bna_rx_mod *rx_mod)
|
||||
{
|
||||
struct list_head *qe = NULL;
|
||||
@ -2310,7 +2295,7 @@ _get_free_rxp(struct bna_rx_mod *rx_mod)
|
||||
return rxp;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
|
||||
{
|
||||
bfa_q_qe_init(&rxp->qe);
|
||||
@ -2318,7 +2303,7 @@ _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
|
||||
rx_mod->rxp_free_count++;
|
||||
}
|
||||
|
||||
struct bna_rx *
|
||||
static struct bna_rx *
|
||||
_get_free_rx(struct bna_rx_mod *rx_mod)
|
||||
{
|
||||
struct list_head *qe = NULL;
|
||||
@ -2336,7 +2321,7 @@ _get_free_rx(struct bna_rx_mod *rx_mod)
|
||||
return rx;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
|
||||
{
|
||||
bfa_q_qe_init(&rx->qe);
|
||||
@ -2344,7 +2329,7 @@ _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
|
||||
rx_mod->rx_free_count++;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
_rx_init(struct bna_rx *rx, struct bna *bna)
|
||||
{
|
||||
rx->bna = bna;
|
||||
@ -2360,7 +2345,7 @@ _rx_init(struct bna_rx *rx, struct bna *bna)
|
||||
rx->stop_cbarg = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
_rxp_add_rxqs(struct bna_rxp *rxp,
|
||||
struct bna_rxq *q0,
|
||||
struct bna_rxq *q1)
|
||||
@ -2383,7 +2368,7 @@ _rxp_add_rxqs(struct bna_rxp *rxp,
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
_rxq_qpt_init(struct bna_rxq *rxq,
|
||||
struct bna_rxp *rxp,
|
||||
u32 page_count,
|
||||
@ -2412,7 +2397,7 @@ _rxq_qpt_init(struct bna_rxq *rxq,
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
_rxp_cqpt_setup(struct bna_rxp *rxp,
|
||||
u32 page_count,
|
||||
u32 page_size,
|
||||
@ -2441,13 +2426,13 @@ _rxp_cqpt_setup(struct bna_rxp *rxp,
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
|
||||
{
|
||||
list_add_tail(&rxp->qe, &rx->rxp_q);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
_init_rxmod_queues(struct bna_rx_mod *rx_mod)
|
||||
{
|
||||
INIT_LIST_HEAD(&rx_mod->rx_free_q);
|
||||
@ -2460,7 +2445,7 @@ _init_rxmod_queues(struct bna_rx_mod *rx_mod)
|
||||
rx_mod->rxp_free_count = 0;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
_rx_ctor(struct bna_rx *rx, int id)
|
||||
{
|
||||
bfa_q_qe_init(&rx->qe);
|
||||
@ -2492,7 +2477,7 @@ bna_rx_cb_rxq_stopped_all(void *arg)
|
||||
bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
|
||||
enum bna_cb_status status)
|
||||
{
|
||||
@ -2501,7 +2486,7 @@ bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
|
||||
bfa_wc_down(&rx_mod->rx_stop_wc);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_rx_mod_cb_rx_stopped_all(void *arg)
|
||||
{
|
||||
struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
|
||||
@ -2511,7 +2496,7 @@ bna_rx_mod_cb_rx_stopped_all(void *arg)
|
||||
rx_mod->stop_cbfn = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_rx_start(struct bna_rx *rx)
|
||||
{
|
||||
rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
|
||||
@ -2519,7 +2504,7 @@ bna_rx_start(struct bna_rx *rx)
|
||||
bfa_fsm_send_event(rx, RX_E_START);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_rx_stop(struct bna_rx *rx)
|
||||
{
|
||||
rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
|
||||
@ -2532,7 +2517,7 @@ bna_rx_stop(struct bna_rx *rx)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_rx_fail(struct bna_rx *rx)
|
||||
{
|
||||
/* Indicate port is not enabled, and failed */
|
||||
@ -2541,28 +2526,6 @@ bna_rx_fail(struct bna_rx *rx)
|
||||
bfa_fsm_send_event(rx, RX_E_FAIL);
|
||||
}
|
||||
|
||||
void
|
||||
bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
|
||||
{
|
||||
bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
|
||||
if (rx->rxf.rxf_id < 32)
|
||||
rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
|
||||
else
|
||||
rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
|
||||
1 << (rx->rxf.rxf_id - 32));
|
||||
}
|
||||
|
||||
void
|
||||
bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
|
||||
{
|
||||
bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
|
||||
if (rx->rxf.rxf_id < 32)
|
||||
rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
|
||||
else
|
||||
rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
|
||||
1 << (rx->rxf.rxf_id - 32);
|
||||
}
|
||||
|
||||
void
|
||||
bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
|
||||
{
|
||||
@ -3731,7 +3694,7 @@ bna_tx_fail(struct bna_tx *tx)
|
||||
bfa_fsm_send_event(tx, TX_E_FAIL);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
bna_tx_prio_changed(struct bna_tx *tx, int prio)
|
||||
{
|
||||
struct bna_txq *txq;
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include "bna.h"
|
||||
#include "cna.h"
|
||||
|
||||
DEFINE_MUTEX(bnad_fwimg_mutex);
|
||||
static DEFINE_MUTEX(bnad_fwimg_mutex);
|
||||
|
||||
/*
|
||||
* Module params
|
||||
@ -46,7 +46,7 @@ MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
|
||||
*/
|
||||
u32 bnad_rxqs_per_cq = 2;
|
||||
|
||||
const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
||||
static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
||||
|
||||
/*
|
||||
* Local MACROS
|
||||
@ -687,7 +687,7 @@ bnad_enable_mbox_irq(struct bnad *bnad)
|
||||
* Called with bnad->bna_lock held b'cos of
|
||||
* bnad->cfg_flags access.
|
||||
*/
|
||||
void
|
||||
static void
|
||||
bnad_disable_mbox_irq(struct bnad *bnad)
|
||||
{
|
||||
int irq = BNAD_GET_MBOX_IRQ(bnad);
|
||||
@ -956,11 +956,6 @@ bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
|
||||
jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
|
||||
}
|
||||
|
||||
void
|
||||
bnad_cb_stats_clr(struct bnad *bnad)
|
||||
{
|
||||
}
|
||||
|
||||
/* Resource allocation, free functions */
|
||||
|
||||
static void
|
||||
@ -1111,8 +1106,10 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&bnad->bna_lock, flags);
|
||||
|
||||
if (bnad->cfg_flags & BNAD_CF_MSIX)
|
||||
disable_irq_nosync(irq);
|
||||
|
||||
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
@ -2243,7 +2240,6 @@ static void
|
||||
bnad_enable_msix(struct bnad *bnad)
|
||||
{
|
||||
int i, ret;
|
||||
u32 tot_msix_num;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bnad->bna_lock, flags);
|
||||
@ -2256,18 +2252,16 @@ bnad_enable_msix(struct bnad *bnad)
|
||||
if (bnad->msix_table)
|
||||
return;
|
||||
|
||||
tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
|
||||
|
||||
bnad->msix_table =
|
||||
kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL);
|
||||
kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
|
||||
|
||||
if (!bnad->msix_table)
|
||||
goto intx_mode;
|
||||
|
||||
for (i = 0; i < tot_msix_num; i++)
|
||||
for (i = 0; i < bnad->msix_num; i++)
|
||||
bnad->msix_table[i].entry = i;
|
||||
|
||||
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num);
|
||||
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
|
||||
if (ret > 0) {
|
||||
/* Not enough MSI-X vectors. */
|
||||
|
||||
@ -2280,12 +2274,11 @@ bnad_enable_msix(struct bnad *bnad)
|
||||
+ (bnad->num_rx
|
||||
* bnad->num_rxp_per_rx) +
|
||||
BNAD_MAILBOX_MSIX_VECTORS;
|
||||
tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
|
||||
|
||||
/* Try once more with adjusted numbers */
|
||||
/* If this fails, fall back to INTx */
|
||||
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
|
||||
tot_msix_num);
|
||||
bnad->msix_num);
|
||||
if (ret)
|
||||
goto intx_mode;
|
||||
|
||||
@ -2298,7 +2291,6 @@ intx_mode:
|
||||
kfree(bnad->msix_table);
|
||||
bnad->msix_table = NULL;
|
||||
bnad->msix_num = 0;
|
||||
bnad->msix_diag_num = 0;
|
||||
spin_lock_irqsave(&bnad->bna_lock, flags);
|
||||
bnad->cfg_flags &= ~BNAD_CF_MSIX;
|
||||
bnad_q_num_init(bnad);
|
||||
@ -2946,7 +2938,6 @@ bnad_init(struct bnad *bnad,
|
||||
bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
|
||||
(bnad->num_rx * bnad->num_rxp_per_rx) +
|
||||
BNAD_MAILBOX_MSIX_VECTORS;
|
||||
bnad->msix_diag_num = 2; /* 1 for Tx, 1 for Rx */
|
||||
|
||||
bnad->txq_depth = BNAD_TXQ_DEPTH;
|
||||
bnad->rxq_depth = BNAD_RXQ_DEPTH;
|
||||
@ -3217,7 +3208,7 @@ bnad_pci_remove(struct pci_dev *pdev)
|
||||
free_netdev(netdev);
|
||||
}
|
||||
|
||||
const struct pci_device_id bnad_pci_id_table[] = {
|
||||
static const struct pci_device_id bnad_pci_id_table[] = {
|
||||
{
|
||||
PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
|
||||
PCI_DEVICE_ID_BROCADE_CT),
|
||||
|
@ -248,7 +248,6 @@ struct bnad {
|
||||
u64 mmio_len;
|
||||
|
||||
u32 msix_num;
|
||||
u32 msix_diag_num;
|
||||
struct msix_entry *msix_table;
|
||||
|
||||
struct mutex conf_mutex;
|
||||
|
@ -22,7 +22,7 @@ const struct firmware *bfi_fw;
|
||||
static u32 *bfi_image_ct_cna;
|
||||
static u32 bfi_image_ct_cna_size;
|
||||
|
||||
u32 *
|
||||
static u32 *
|
||||
cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
|
||||
u32 *bfi_image_size, char *fw_name)
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user