mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-17 02:36:21 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (71 commits) [SCSI] fcoe: cleanup cpu selection for incoming requests [SCSI] fcoe: add fip retry to avoid missing critical keep alive [SCSI] libfc: fix warn on in lport retry [SCSI] libfc: Remove the reference to FCP packet from scsi_cmnd in case of error [SCSI] libfc: cleanup sending SRR request [SCSI] libfc: two minor changes in comments [SCSI] libfc, fcoe: ignore rx frame with wrong xid info [SCSI] libfc: release exchg cache [SCSI] libfc: use FC_MAX_ERROR_CNT [SCSI] fcoe: remove unused ptype field in fcoe_rcv_info [SCSI] bnx2fc: Update copyright and bump version to 1.0.4 [SCSI] bnx2fc: Tx BDs cache in write tasks [SCSI] bnx2fc: Do not arm CQ when there are no CQEs [SCSI] bnx2fc: hold tgt lock when calling cmd_release [SCSI] bnx2fc: Enable support for sequence level error recovery [SCSI] bnx2fc: HSI changes for tape [SCSI] bnx2fc: Handle REC_TOV error code from firmware [SCSI] bnx2fc: REC/SRR link service request and response handling [SCSI] bnx2fc: Support 'sequence cleanup' task [SCSI] dh_rdac: Associate HBA and storage in rdac_controller to support partitions in storage ...
This commit is contained in:
commit
6c6e3b828b
@ -1,3 +1,11 @@
|
||||
Release Date : Tue. Jul 26, 2011 17:00:00 PST 2010 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Adam Radford
|
||||
Current Version : 00.00.05.40-rc1
|
||||
Old Version : 00.00.05.38-rc1
|
||||
1. Fix FastPath I/O to work with degraded RAID 1.
|
||||
2. Add .change_queue_depth support.
|
||||
-------------------------------------------------------------------------------
|
||||
Release Date : Wed. May 11, 2011 17:00:00 PST 2010 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Adam Radford
|
||||
|
@ -827,7 +827,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
||||
* DID_SOFT_ERROR is set.
|
||||
*/
|
||||
if (ioc->bus_type == SPI) {
|
||||
if (pScsiReq->CDB[0] == READ_6 ||
|
||||
if ((pScsiReq->CDB[0] == READ_6 && ((pScsiReq->CDB[1] & 0x02) == 0)) ||
|
||||
pScsiReq->CDB[0] == READ_10 ||
|
||||
pScsiReq->CDB[0] == READ_12 ||
|
||||
pScsiReq->CDB[0] == READ_16 ||
|
||||
|
@ -27,6 +27,7 @@
|
||||
struct bfa_s;
|
||||
|
||||
typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
|
||||
typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
|
||||
|
||||
/*
|
||||
* Interrupt message handlers
|
||||
@ -121,6 +122,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
|
||||
#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
|
||||
(__hcb_qe)->cbfn = (__cbfn); \
|
||||
(__hcb_qe)->cbarg = (__cbarg); \
|
||||
(__hcb_qe)->pre_rmv = BFA_FALSE; \
|
||||
list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
|
||||
} while (0)
|
||||
|
||||
@ -135,6 +137,11 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do { \
|
||||
(__hcb_qe)->fw_status = (__status); \
|
||||
list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
|
||||
} while (0)
|
||||
|
||||
#define bfa_cb_queue_done(__hcb_qe) do { \
|
||||
(__hcb_qe)->once = BFA_FALSE; \
|
||||
} while (0)
|
||||
@ -177,7 +184,7 @@ struct bfa_msix_s {
|
||||
struct bfa_hwif_s {
|
||||
void (*hw_reginit)(struct bfa_s *bfa);
|
||||
void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
|
||||
void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
|
||||
void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
|
||||
void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
|
||||
void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
|
||||
void (*hw_msix_queue_install)(struct bfa_s *bfa);
|
||||
@ -268,10 +275,8 @@ struct bfa_iocfc_s {
|
||||
((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
|
||||
#define bfa_msix_uninstall(__bfa) \
|
||||
((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
|
||||
#define bfa_isr_rspq_ack(__bfa, __queue) do { \
|
||||
if ((__bfa)->iocfc.hwif.hw_rspq_ack) \
|
||||
(__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue); \
|
||||
} while (0)
|
||||
#define bfa_isr_rspq_ack(__bfa, __queue, __ci) \
|
||||
((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci))
|
||||
#define bfa_isr_reqq_ack(__bfa, __queue) do { \
|
||||
if ((__bfa)->iocfc.hwif.hw_reqq_ack) \
|
||||
(__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \
|
||||
@ -311,7 +316,7 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
|
||||
void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
|
||||
|
||||
void bfa_hwcb_reginit(struct bfa_s *bfa);
|
||||
void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
|
||||
void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
|
||||
void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
|
||||
void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
|
||||
void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
|
||||
@ -324,7 +329,8 @@ void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
|
||||
void bfa_hwct_reginit(struct bfa_s *bfa);
|
||||
void bfa_hwct2_reginit(struct bfa_s *bfa);
|
||||
void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
|
||||
void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
|
||||
void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
|
||||
void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
|
||||
void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
|
||||
void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
|
||||
void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
|
||||
@ -376,6 +382,22 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
|
||||
#define bfa_get_fw_clock_res(__bfa) \
|
||||
((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
|
||||
|
||||
/*
|
||||
* lun mask macros return NULL when min cfg is enabled and there is
|
||||
* no memory allocated for lunmask.
|
||||
*/
|
||||
#define bfa_get_lun_mask(__bfa) \
|
||||
((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
|
||||
(&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask))
|
||||
|
||||
#define bfa_get_lun_mask_list(_bfa) \
|
||||
((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
|
||||
(bfa_get_lun_mask(_bfa)->lun_list)
|
||||
|
||||
#define bfa_get_lun_mask_status(_bfa) \
|
||||
(((&(_bfa)->modules.dconf_mod)->min_cfg) \
|
||||
? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status))
|
||||
|
||||
void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
|
||||
void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
|
||||
void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
|
||||
@ -406,7 +428,22 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
|
||||
|
||||
void bfa_iocfc_enable(struct bfa_s *bfa);
|
||||
void bfa_iocfc_disable(struct bfa_s *bfa);
|
||||
void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
|
||||
#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
|
||||
bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
|
||||
|
||||
struct bfa_cb_pending_q_s {
|
||||
struct bfa_cb_qe_s hcb_qe;
|
||||
void *data; /* Driver buffer */
|
||||
};
|
||||
|
||||
/* Common macros to operate on pending stats/attr apis */
|
||||
#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do { \
|
||||
bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
|
||||
(__qe)->hcb_qe.cbfn = (__cbfn); \
|
||||
(__qe)->hcb_qe.cbarg = (__cbarg); \
|
||||
(__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
|
||||
(__qe)->data = (__data); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __BFA_H__ */
|
||||
|
@ -33,6 +33,7 @@ static struct bfa_module_s *hal_mods[] = {
|
||||
&hal_mod_uf,
|
||||
&hal_mod_rport,
|
||||
&hal_mod_fcp,
|
||||
&hal_mod_dconf,
|
||||
NULL
|
||||
};
|
||||
|
||||
@ -237,8 +238,6 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
|
||||
u32 pi, ci;
|
||||
struct list_head *waitq;
|
||||
|
||||
bfa_isr_rspq_ack(bfa, qid);
|
||||
|
||||
ci = bfa_rspq_ci(bfa, qid);
|
||||
pi = bfa_rspq_pi(bfa, qid);
|
||||
|
||||
@ -251,11 +250,9 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
|
||||
}
|
||||
|
||||
/*
|
||||
* update CI
|
||||
* acknowledge RME completions and update CI
|
||||
*/
|
||||
bfa_rspq_ci(bfa, qid) = pi;
|
||||
writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
|
||||
mmiowb();
|
||||
bfa_isr_rspq_ack(bfa, qid, ci);
|
||||
|
||||
/*
|
||||
* Resume any pending requests in the corresponding reqq.
|
||||
@ -325,23 +322,19 @@ bfa_intx(struct bfa_s *bfa)
|
||||
int queue;
|
||||
|
||||
intr = readl(bfa->iocfc.bfa_regs.intr_status);
|
||||
if (!intr)
|
||||
return BFA_FALSE;
|
||||
|
||||
qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
|
||||
if (qintr)
|
||||
writel(qintr, bfa->iocfc.bfa_regs.intr_status);
|
||||
|
||||
/*
|
||||
* RME completion queue interrupt
|
||||
* Unconditional RME completion queue interrupt
|
||||
*/
|
||||
qintr = intr & __HFN_INT_RME_MASK;
|
||||
if (qintr && bfa->queue_process) {
|
||||
if (bfa->queue_process) {
|
||||
for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
|
||||
bfa_isr_rspq(bfa, queue);
|
||||
}
|
||||
|
||||
intr &= ~qintr;
|
||||
if (!intr)
|
||||
return BFA_TRUE;
|
||||
|
||||
@ -432,7 +425,8 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
|
||||
__HFN_INT_MBOX_LPU1_CT2);
|
||||
intr &= __HFN_INT_ERR_MASK_CT2;
|
||||
} else {
|
||||
halt_isr = intr & __HFN_INT_LL_HALT;
|
||||
halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
|
||||
(intr & __HFN_INT_LL_HALT) : 0;
|
||||
pss_isr = intr & __HFN_INT_ERR_PSS;
|
||||
lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
|
||||
intr &= __HFN_INT_ERR_MASK;
|
||||
@ -578,7 +572,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
||||
} else {
|
||||
iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
|
||||
iocfc->hwif.hw_reqq_ack = NULL;
|
||||
iocfc->hwif.hw_rspq_ack = NULL;
|
||||
iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
|
||||
iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
|
||||
iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
|
||||
iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
|
||||
@ -595,7 +589,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
||||
if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
|
||||
iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
|
||||
iocfc->hwif.hw_isr_mode_set = NULL;
|
||||
iocfc->hwif.hw_rspq_ack = NULL;
|
||||
iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
|
||||
}
|
||||
|
||||
iocfc->hwif.hw_reginit(bfa);
|
||||
@ -685,7 +679,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
|
||||
|
||||
bfa->queue_process = BFA_TRUE;
|
||||
for (i = 0; i < BFI_IOC_MAX_CQS; i++)
|
||||
bfa_isr_rspq_ack(bfa, i);
|
||||
bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
|
||||
|
||||
for (i = 0; hal_mods[i]; i++)
|
||||
hal_mods[i]->start(bfa);
|
||||
@ -709,7 +703,7 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
|
||||
struct bfa_s *bfa = bfa_arg;
|
||||
|
||||
if (complete) {
|
||||
if (bfa->iocfc.cfgdone)
|
||||
if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
|
||||
bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
|
||||
else
|
||||
bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
|
||||
@ -822,9 +816,11 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
|
||||
*/
|
||||
bfa_fcport_init(bfa);
|
||||
|
||||
if (iocfc->action == BFA_IOCFC_ACT_INIT)
|
||||
bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
|
||||
else {
|
||||
if (iocfc->action == BFA_IOCFC_ACT_INIT) {
|
||||
if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
|
||||
bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
|
||||
bfa_iocfc_init_cb, bfa);
|
||||
} else {
|
||||
if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
|
||||
bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
|
||||
bfa_iocfc_enable_cb, bfa);
|
||||
@ -1045,6 +1041,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
|
||||
}
|
||||
|
||||
bfa_iocfc_send_cfg(bfa);
|
||||
bfa_dconf_modinit(bfa);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1207,7 +1204,9 @@ bfa_iocfc_stop(struct bfa_s *bfa)
|
||||
bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
|
||||
|
||||
bfa->queue_process = BFA_FALSE;
|
||||
bfa_ioc_disable(&bfa->ioc);
|
||||
bfa_dconf_modexit(bfa);
|
||||
if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
|
||||
bfa_ioc_disable(&bfa->ioc);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1540,10 +1539,17 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
|
||||
struct list_head *qe;
|
||||
struct list_head *qen;
|
||||
struct bfa_cb_qe_s *hcb_qe;
|
||||
bfa_cb_cbfn_status_t cbfn;
|
||||
|
||||
list_for_each_safe(qe, qen, comp_q) {
|
||||
hcb_qe = (struct bfa_cb_qe_s *) qe;
|
||||
hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
|
||||
if (hcb_qe->pre_rmv) {
|
||||
/* qe is invalid after return, dequeue before cbfn() */
|
||||
list_del(qe);
|
||||
cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
|
||||
cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
|
||||
} else
|
||||
hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1556,10 +1562,20 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
|
||||
while (!list_empty(comp_q)) {
|
||||
bfa_q_deq(comp_q, &qe);
|
||||
hcb_qe = (struct bfa_cb_qe_s *) qe;
|
||||
WARN_ON(hcb_qe->pre_rmv);
|
||||
hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
|
||||
{
|
||||
if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
|
||||
if (bfa->iocfc.cfgdone == BFA_TRUE)
|
||||
bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
|
||||
bfa_iocfc_init_cb, bfa);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the list of PCI vendor/device id lists supported by this
|
||||
|
@ -144,6 +144,7 @@ enum bfa_status {
|
||||
BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
|
||||
BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */
|
||||
BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
|
||||
BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */
|
||||
BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */
|
||||
BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
|
||||
BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
|
||||
@ -164,6 +165,8 @@ enum bfa_status {
|
||||
BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
|
||||
BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
|
||||
* configuration */
|
||||
BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
|
||||
BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
|
||||
BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
|
||||
BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
|
||||
* this adapter */
|
||||
@ -172,11 +175,15 @@ enum bfa_status {
|
||||
BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
|
||||
BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
|
||||
BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */
|
||||
BFA_STATUS_ENTRY_EXISTS = 193, /* Entry already exists */
|
||||
BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */
|
||||
BFA_STATUS_NO_CHANGE = 195, /* Feature already in that state */
|
||||
BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */
|
||||
BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */
|
||||
BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */
|
||||
BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */
|
||||
BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */
|
||||
BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */
|
||||
BFA_STATUS_MAX_VAL /* Unknown error code */
|
||||
};
|
||||
#define bfa_status_t enum bfa_status
|
||||
@ -358,6 +365,139 @@ struct bfa_ioc_attr_s {
|
||||
u8 rsvd[4]; /* 64bit align */
|
||||
};
|
||||
|
||||
/*
|
||||
* AEN related definitions
|
||||
*/
|
||||
enum bfa_aen_category {
|
||||
BFA_AEN_CAT_ADAPTER = 1,
|
||||
BFA_AEN_CAT_PORT = 2,
|
||||
BFA_AEN_CAT_LPORT = 3,
|
||||
BFA_AEN_CAT_RPORT = 4,
|
||||
BFA_AEN_CAT_ITNIM = 5,
|
||||
BFA_AEN_CAT_AUDIT = 8,
|
||||
BFA_AEN_CAT_IOC = 9,
|
||||
};
|
||||
|
||||
/* BFA adapter level events */
|
||||
enum bfa_adapter_aen_event {
|
||||
BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */
|
||||
BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */
|
||||
};
|
||||
|
||||
struct bfa_adapter_aen_data_s {
|
||||
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
|
||||
u32 nports; /* Number of NPorts */
|
||||
wwn_t pwwn; /* WWN of one of its physical port */
|
||||
};
|
||||
|
||||
/* BFA physical port Level events */
|
||||
enum bfa_port_aen_event {
|
||||
BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */
|
||||
BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */
|
||||
BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */
|
||||
BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */
|
||||
BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */
|
||||
BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */
|
||||
BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */
|
||||
BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */
|
||||
BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */
|
||||
BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */
|
||||
BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */
|
||||
BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */
|
||||
BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change */
|
||||
BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */
|
||||
BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */
|
||||
};
|
||||
|
||||
enum bfa_port_aen_sfp_pom {
|
||||
BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
|
||||
BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
|
||||
BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */
|
||||
BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED
|
||||
};
|
||||
|
||||
struct bfa_port_aen_data_s {
|
||||
wwn_t pwwn; /* WWN of the physical port */
|
||||
wwn_t fwwn; /* WWN of the fabric port */
|
||||
u32 phy_port_num; /* For SFP related events */
|
||||
u16 ioc_type;
|
||||
u16 level; /* Only transitions will be informed */
|
||||
mac_t mac; /* MAC address of the ethernet port */
|
||||
u16 rsvd;
|
||||
};
|
||||
|
||||
/* BFA AEN logical port events */
|
||||
enum bfa_lport_aen_event {
|
||||
BFA_LPORT_AEN_NEW = 1, /* LPort created event */
|
||||
BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */
|
||||
BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */
|
||||
BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */
|
||||
BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */
|
||||
BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */
|
||||
BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */
|
||||
BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */
|
||||
BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */
|
||||
BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort with duplicate WWN */
|
||||
BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */
|
||||
BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code */
|
||||
};
|
||||
|
||||
struct bfa_lport_aen_data_s {
|
||||
u16 vf_id; /* vf_id of this logical port */
|
||||
u16 roles; /* Logical port mode,IM/TM/IP etc */
|
||||
u32 rsvd;
|
||||
wwn_t ppwwn; /* WWN of its physical port */
|
||||
wwn_t lpwwn; /* WWN of this logical port */
|
||||
};
|
||||
|
||||
/* BFA ITNIM events */
|
||||
enum bfa_itnim_aen_event {
|
||||
BFA_ITNIM_AEN_ONLINE = 1, /* Target online */
|
||||
BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */
|
||||
BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */
|
||||
};
|
||||
|
||||
struct bfa_itnim_aen_data_s {
|
||||
u16 vf_id; /* vf_id of the IT nexus */
|
||||
u16 rsvd[3];
|
||||
wwn_t ppwwn; /* WWN of its physical port */
|
||||
wwn_t lpwwn; /* WWN of logical port */
|
||||
wwn_t rpwwn; /* WWN of remote(target) port */
|
||||
};
|
||||
|
||||
/* BFA audit events */
|
||||
enum bfa_audit_aen_event {
|
||||
BFA_AUDIT_AEN_AUTH_ENABLE = 1,
|
||||
BFA_AUDIT_AEN_AUTH_DISABLE = 2,
|
||||
BFA_AUDIT_AEN_FLASH_ERASE = 3,
|
||||
BFA_AUDIT_AEN_FLASH_UPDATE = 4,
|
||||
};
|
||||
|
||||
struct bfa_audit_aen_data_s {
|
||||
wwn_t pwwn;
|
||||
int partition_inst;
|
||||
int partition_type;
|
||||
};
|
||||
|
||||
/* BFA IOC level events */
|
||||
enum bfa_ioc_aen_event {
|
||||
BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */
|
||||
BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */
|
||||
BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */
|
||||
BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */
|
||||
BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */
|
||||
BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */
|
||||
BFA_IOC_AEN_INVALID_VENDOR = 7,
|
||||
BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */
|
||||
BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */
|
||||
};
|
||||
|
||||
struct bfa_ioc_aen_data_s {
|
||||
wwn_t pwwn;
|
||||
u16 ioc_type;
|
||||
mac_t mac;
|
||||
};
|
||||
|
||||
/*
|
||||
* ---------------------- mfg definitions ------------
|
||||
*/
|
||||
@ -520,6 +660,20 @@ struct bfa_boot_bootlun_s {
|
||||
/*
|
||||
* BOOT boot configuraton
|
||||
*/
|
||||
struct bfa_boot_cfg_s {
|
||||
u8 version;
|
||||
u8 rsvd1;
|
||||
u16 chksum;
|
||||
u8 enable; /* enable/disable SAN boot */
|
||||
u8 speed; /* boot speed settings */
|
||||
u8 topology; /* boot topology setting */
|
||||
u8 bootopt; /* bfa_boot_bootopt_t */
|
||||
u32 nbluns; /* number of boot luns */
|
||||
u32 rsvd2;
|
||||
struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
|
||||
struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
|
||||
};
|
||||
|
||||
struct bfa_boot_pbc_s {
|
||||
u8 enable; /* enable/disable SAN boot */
|
||||
u8 speed; /* boot speed settings */
|
||||
@ -529,6 +683,15 @@ struct bfa_boot_pbc_s {
|
||||
struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
|
||||
};
|
||||
|
||||
struct bfa_ethboot_cfg_s {
|
||||
u8 version;
|
||||
u8 rsvd1;
|
||||
u16 chksum;
|
||||
u8 enable; /* enable/disable Eth/PXE boot */
|
||||
u8 rsvd2;
|
||||
u16 vlan;
|
||||
};
|
||||
|
||||
/*
|
||||
* ASIC block configuration related structures
|
||||
*/
|
||||
@ -587,6 +750,14 @@ struct bfa_ablk_cfg_s {
|
||||
*/
|
||||
#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */
|
||||
|
||||
/* SFP state change notification event */
|
||||
#define BFA_SFP_SCN_REMOVED 0
|
||||
#define BFA_SFP_SCN_INSERTED 1
|
||||
#define BFA_SFP_SCN_POM 2
|
||||
#define BFA_SFP_SCN_FAILED 3
|
||||
#define BFA_SFP_SCN_UNSUPPORT 4
|
||||
#define BFA_SFP_SCN_VALID 5
|
||||
|
||||
enum bfa_defs_sfp_media_e {
|
||||
BFA_SFP_MEDIA_UNKNOWN = 0x00,
|
||||
BFA_SFP_MEDIA_CU = 0x01,
|
||||
|
@ -268,6 +268,7 @@ struct bfa_fw_port_snsm_stats_s {
|
||||
u32 error_resets; /* error resets initiated by upsm */
|
||||
u32 sync_lost; /* Sync loss count */
|
||||
u32 sig_lost; /* Signal loss count */
|
||||
u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
|
||||
};
|
||||
|
||||
struct bfa_fw_port_physm_stats_s {
|
||||
@ -468,6 +469,7 @@ struct bfa_fw_stats_s {
|
||||
* QoS states
|
||||
*/
|
||||
enum bfa_qos_state {
|
||||
BFA_QOS_DISABLED = 0, /* QoS is disabled */
|
||||
BFA_QOS_ONLINE = 1, /* QoS is online */
|
||||
BFA_QOS_OFFLINE = 2, /* QoS is offline */
|
||||
};
|
||||
@ -670,6 +672,12 @@ struct bfa_itnim_iostats_s {
|
||||
u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
|
||||
u32 tm_cleanups; /* TM cleanup requests */
|
||||
u32 tm_cleanup_comps; /* TM cleanup completions */
|
||||
u32 lm_lun_across_sg; /* LM lun is across sg data buf */
|
||||
u32 lm_lun_not_sup; /* LM lun not supported */
|
||||
u32 lm_rpl_data_changed; /* LM report-lun data changed */
|
||||
u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
|
||||
u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
|
||||
u32 lm_lun_not_rdy; /* LM lun not ready */
|
||||
};
|
||||
|
||||
/* Modify char* port_stt[] in bfal_port.c if a new state was added */
|
||||
@ -785,7 +793,50 @@ enum bfa_port_linkstate_rsn {
|
||||
CEE_ISCSI_PRI_PFC_OFF = 42,
|
||||
CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
|
||||
};
|
||||
|
||||
#define MAX_LUN_MASK_CFG 16
|
||||
|
||||
/*
|
||||
* Initially flash content may be fff. On making LUN mask enable and disable
|
||||
* state chnage. when report lun command is being processed it goes from
|
||||
* BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
|
||||
* BFA_LUN_MASK_ACTIVE.
|
||||
*/
|
||||
enum bfa_ioim_lun_mask_state_s {
|
||||
BFA_IOIM_LUN_MASK_INACTIVE = 0,
|
||||
BFA_IOIM_LUN_MASK_ACTIVE = 1,
|
||||
BFA_IOIM_LUN_MASK_FETCHED = 2,
|
||||
};
|
||||
|
||||
enum bfa_lunmask_state_s {
|
||||
BFA_LUNMASK_DISABLED = 0x00,
|
||||
BFA_LUNMASK_ENABLED = 0x01,
|
||||
BFA_LUNMASK_MINCFG = 0x02,
|
||||
BFA_LUNMASK_UNINITIALIZED = 0xff,
|
||||
};
|
||||
|
||||
#pragma pack(1)
|
||||
/*
|
||||
* LUN mask configuration
|
||||
*/
|
||||
struct bfa_lun_mask_s {
|
||||
wwn_t lp_wwn;
|
||||
wwn_t rp_wwn;
|
||||
struct scsi_lun lun;
|
||||
u8 ua;
|
||||
u8 rsvd[3];
|
||||
u16 rp_tag;
|
||||
u8 lp_tag;
|
||||
u8 state;
|
||||
};
|
||||
|
||||
#define MAX_LUN_MASK_CFG 16
|
||||
struct bfa_lunmask_cfg_s {
|
||||
u32 status;
|
||||
u32 rsvd;
|
||||
struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG];
|
||||
};
|
||||
|
||||
/*
|
||||
* Physical port configuration
|
||||
*/
|
||||
@ -1228,4 +1279,52 @@ struct bfa_cee_stats_s {
|
||||
|
||||
#pragma pack()
|
||||
|
||||
/*
|
||||
* AEN related definitions
|
||||
*/
|
||||
#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \
|
||||
| BFA_PCI_VENDOR_ID_BROCADE)
|
||||
|
||||
/* BFA remote port events */
|
||||
enum bfa_rport_aen_event {
|
||||
BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */
|
||||
BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */
|
||||
BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */
|
||||
BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */
|
||||
BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */
|
||||
};
|
||||
|
||||
struct bfa_rport_aen_data_s {
|
||||
u16 vf_id; /* vf_id of this logical port */
|
||||
u16 rsvd[3];
|
||||
wwn_t ppwwn; /* WWN of its physical port */
|
||||
wwn_t lpwwn; /* WWN of this logical port */
|
||||
wwn_t rpwwn; /* WWN of this remote port */
|
||||
union {
|
||||
struct bfa_rport_qos_attr_s qos;
|
||||
} priv;
|
||||
};
|
||||
|
||||
union bfa_aen_data_u {
|
||||
struct bfa_adapter_aen_data_s adapter;
|
||||
struct bfa_port_aen_data_s port;
|
||||
struct bfa_lport_aen_data_s lport;
|
||||
struct bfa_rport_aen_data_s rport;
|
||||
struct bfa_itnim_aen_data_s itnim;
|
||||
struct bfa_audit_aen_data_s audit;
|
||||
struct bfa_ioc_aen_data_s ioc;
|
||||
};
|
||||
|
||||
#define BFA_AEN_MAX_ENTRY 512
|
||||
|
||||
struct bfa_aen_entry_s {
|
||||
struct list_head qe;
|
||||
enum bfa_aen_category aen_category;
|
||||
u32 aen_type;
|
||||
union bfa_aen_data_u aen_data;
|
||||
struct timeval aen_tv;
|
||||
u32 seq_num;
|
||||
u32 bfad_num;
|
||||
};
|
||||
|
||||
#endif /* __BFA_DEFS_SVC_H__ */
|
||||
|
@ -56,6 +56,161 @@ struct scsi_cdb_s {
|
||||
|
||||
#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
|
||||
|
||||
#define SCSI_SENSE_CUR_ERR 0x70
|
||||
#define SCSI_SENSE_DEF_ERR 0x71
|
||||
|
||||
/*
|
||||
* SCSI additional sense codes
|
||||
*/
|
||||
#define SCSI_ASC_LUN_NOT_READY 0x04
|
||||
#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
|
||||
#define SCSI_ASC_TOCC 0x3F
|
||||
|
||||
/*
|
||||
* SCSI additional sense code qualifiers
|
||||
*/
|
||||
#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
|
||||
#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
|
||||
|
||||
/*
|
||||
* Methods of reporting informational exceptions
|
||||
*/
|
||||
#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
|
||||
|
||||
struct scsi_report_luns_data_s {
|
||||
u32 lun_list_length; /* length of LUN list length */
|
||||
u32 reserved;
|
||||
struct scsi_lun lun[1]; /* first LUN in lun list */
|
||||
};
|
||||
|
||||
struct scsi_inquiry_vendor_s {
|
||||
u8 vendor_id[8];
|
||||
};
|
||||
|
||||
struct scsi_inquiry_prodid_s {
|
||||
u8 product_id[16];
|
||||
};
|
||||
|
||||
struct scsi_inquiry_prodrev_s {
|
||||
u8 product_rev[4];
|
||||
};
|
||||
|
||||
struct scsi_inquiry_data_s {
|
||||
#ifdef __BIG_ENDIAN
|
||||
u8 peripheral_qual:3; /* peripheral qualifier */
|
||||
u8 device_type:5; /* peripheral device type */
|
||||
u8 rmb:1; /* removable medium bit */
|
||||
u8 device_type_mod:7; /* device type modifier */
|
||||
u8 version;
|
||||
u8 aenc:1; /* async evt notification capability */
|
||||
u8 trm_iop:1; /* terminate I/O process */
|
||||
u8 norm_aca:1; /* normal ACA supported */
|
||||
u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
|
||||
u8 rsp_data_format:4;
|
||||
u8 additional_len;
|
||||
u8 sccs:1;
|
||||
u8 reserved1:7;
|
||||
u8 reserved2:1;
|
||||
u8 enc_serv:1; /* enclosure service component */
|
||||
u8 reserved3:1;
|
||||
u8 multi_port:1; /* multi-port device */
|
||||
u8 m_chngr:1; /* device in medium transport element */
|
||||
u8 ack_req_q:1; /* SIP specific bit */
|
||||
u8 addr32:1; /* SIP specific bit */
|
||||
u8 addr16:1; /* SIP specific bit */
|
||||
u8 rel_adr:1; /* relative address */
|
||||
u8 w_bus32:1;
|
||||
u8 w_bus16:1;
|
||||
u8 synchronous:1;
|
||||
u8 linked_commands:1;
|
||||
u8 trans_dis:1;
|
||||
u8 cmd_queue:1; /* command queueing supported */
|
||||
u8 soft_reset:1; /* soft reset alternative (VS) */
|
||||
#else
|
||||
u8 device_type:5; /* peripheral device type */
|
||||
u8 peripheral_qual:3; /* peripheral qualifier */
|
||||
u8 device_type_mod:7; /* device type modifier */
|
||||
u8 rmb:1; /* removable medium bit */
|
||||
u8 version;
|
||||
u8 rsp_data_format:4;
|
||||
u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
|
||||
u8 norm_aca:1; /* normal ACA supported */
|
||||
u8 terminate_iop:1;/* terminate I/O process */
|
||||
u8 aenc:1; /* async evt notification capability */
|
||||
u8 additional_len;
|
||||
u8 reserved1:7;
|
||||
u8 sccs:1;
|
||||
u8 addr16:1; /* SIP specific bit */
|
||||
u8 addr32:1; /* SIP specific bit */
|
||||
u8 ack_req_q:1; /* SIP specific bit */
|
||||
u8 m_chngr:1; /* device in medium transport element */
|
||||
u8 multi_port:1; /* multi-port device */
|
||||
u8 reserved3:1; /* TBD - Vendor Specific */
|
||||
u8 enc_serv:1; /* enclosure service component */
|
||||
u8 reserved2:1;
|
||||
u8 soft_seset:1; /* soft reset alternative (VS) */
|
||||
u8 cmd_queue:1; /* command queueing supported */
|
||||
u8 trans_dis:1;
|
||||
u8 linked_commands:1;
|
||||
u8 synchronous:1;
|
||||
u8 w_bus16:1;
|
||||
u8 w_bus32:1;
|
||||
u8 rel_adr:1; /* relative address */
|
||||
#endif
|
||||
struct scsi_inquiry_vendor_s vendor_id;
|
||||
struct scsi_inquiry_prodid_s product_id;
|
||||
struct scsi_inquiry_prodrev_s product_rev;
|
||||
u8 vendor_specific[20];
|
||||
u8 reserved4[40];
|
||||
};
|
||||
|
||||
/*
|
||||
* SCSI sense data format
|
||||
*/
|
||||
struct scsi_sense_s {
|
||||
#ifdef __BIG_ENDIAN
|
||||
u8 valid:1;
|
||||
u8 rsp_code:7;
|
||||
#else
|
||||
u8 rsp_code:7;
|
||||
u8 valid:1;
|
||||
#endif
|
||||
u8 seg_num;
|
||||
#ifdef __BIG_ENDIAN
|
||||
u8 file_mark:1;
|
||||
u8 eom:1; /* end of media */
|
||||
u8 ili:1; /* incorrect length indicator */
|
||||
u8 reserved:1;
|
||||
u8 sense_key:4;
|
||||
#else
|
||||
u8 sense_key:4;
|
||||
u8 reserved:1;
|
||||
u8 ili:1; /* incorrect length indicator */
|
||||
u8 eom:1; /* end of media */
|
||||
u8 file_mark:1;
|
||||
#endif
|
||||
u8 information[4]; /* device-type or cmd specific info */
|
||||
u8 add_sense_length; /* additional sense length */
|
||||
u8 command_info[4];/* command specific information */
|
||||
u8 asc; /* additional sense code */
|
||||
u8 ascq; /* additional sense code qualifier */
|
||||
u8 fru_code; /* field replaceable unit code */
|
||||
#ifdef __BIG_ENDIAN
|
||||
u8 sksv:1; /* sense key specific valid */
|
||||
u8 c_d:1; /* command/data bit */
|
||||
u8 res1:2;
|
||||
u8 bpv:1; /* bit pointer valid */
|
||||
u8 bpointer:3; /* bit pointer */
|
||||
#else
|
||||
u8 bpointer:3; /* bit pointer */
|
||||
u8 bpv:1; /* bit pointer valid */
|
||||
u8 res1:2;
|
||||
u8 c_d:1; /* command/data bit */
|
||||
u8 sksv:1; /* sense key specific valid */
|
||||
#endif
|
||||
u8 fpointer[2]; /* field pointer */
|
||||
};
|
||||
|
||||
/*
|
||||
* Fibre Channel Header Structure (FCHS) definition
|
||||
*/
|
||||
|
@ -24,6 +24,9 @@ BFA_TRC_FILE(HAL, FCPIM);
|
||||
* BFA ITNIM Related definitions
|
||||
*/
|
||||
static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
|
||||
static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
|
||||
static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
|
||||
static void bfa_ioim_lm_init(struct bfa_s *bfa);
|
||||
|
||||
#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
|
||||
(((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
|
||||
@ -57,6 +60,14 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define bfa_ioim_rp_wwn(__ioim) \
|
||||
(((struct bfa_fcs_rport_s *) \
|
||||
(__ioim)->itnim->rport->rport_drv)->pwwn)
|
||||
|
||||
#define bfa_ioim_lp_wwn(__ioim) \
|
||||
((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
|
||||
(__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
|
||||
|
||||
#define bfa_itnim_sler_cb(__itnim) do { \
|
||||
if ((__itnim)->bfa->fcs) \
|
||||
bfa_cb_itnim_sler((__itnim)->ditn); \
|
||||
@ -66,6 +77,18 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
enum bfa_ioim_lm_status {
|
||||
BFA_IOIM_LM_PRESENT = 1,
|
||||
BFA_IOIM_LM_LUN_NOT_SUP = 2,
|
||||
BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
|
||||
BFA_IOIM_LM_LUN_NOT_RDY = 4,
|
||||
};
|
||||
|
||||
enum bfa_ioim_lm_ua_status {
|
||||
BFA_IOIM_LM_UA_RESET = 0,
|
||||
BFA_IOIM_LM_UA_SET = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* itnim state machine event
|
||||
*/
|
||||
@ -122,6 +145,9 @@ enum bfa_ioim_event {
|
||||
BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
|
||||
BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
|
||||
BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
|
||||
BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
|
||||
BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
|
||||
BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
|
||||
};
|
||||
|
||||
|
||||
@ -219,6 +245,9 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
|
||||
static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
|
||||
static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
|
||||
static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
|
||||
static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
|
||||
static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
|
||||
static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
|
||||
|
||||
/*
|
||||
* forward declaration of BFA IO state machine
|
||||
@ -416,6 +445,12 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
|
||||
bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
|
||||
bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
|
||||
bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
|
||||
bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
|
||||
bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
|
||||
bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
|
||||
bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
|
||||
bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
|
||||
bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
@ -437,6 +472,59 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa,
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
void
|
||||
bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
|
||||
{
|
||||
struct bfa_itnim_latency_s *io_lat =
|
||||
&(ioim->itnim->ioprofile.io_latency);
|
||||
u32 val, idx;
|
||||
|
||||
val = (u32)(jiffies - ioim->start_time);
|
||||
idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
|
||||
bfa_itnim_ioprofile_update(ioim->itnim, idx);
|
||||
|
||||
io_lat->count[idx]++;
|
||||
io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
|
||||
io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
|
||||
io_lat->avg[idx] += val;
|
||||
}
|
||||
|
||||
void
|
||||
bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
|
||||
{
|
||||
ioim->start_time = jiffies;
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
|
||||
{
|
||||
struct bfa_itnim_s *itnim;
|
||||
struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
|
||||
struct list_head *qe, *qen;
|
||||
|
||||
/* accumulate IO stats from itnim */
|
||||
list_for_each_safe(qe, qen, &fcpim->itnim_q) {
|
||||
itnim = (struct bfa_itnim_s *) qe;
|
||||
bfa_itnim_clear_stats(itnim);
|
||||
}
|
||||
fcpim->io_profile = BFA_TRUE;
|
||||
fcpim->io_profile_start_time = time;
|
||||
fcpim->profile_comp = bfa_ioim_profile_comp;
|
||||
fcpim->profile_start = bfa_ioim_profile_start;
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfa_fcpim_profile_off(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
|
||||
fcpim->io_profile = BFA_FALSE;
|
||||
fcpim->io_profile_start_time = 0;
|
||||
fcpim->profile_comp = NULL;
|
||||
fcpim->profile_start = NULL;
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
u16
|
||||
bfa_fcpim_qdepth_get(struct bfa_s *bfa)
|
||||
{
|
||||
@ -1401,6 +1489,26 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
|
||||
bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
|
||||
}
|
||||
|
||||
#define bfa_io_lat_clock_res_div HZ
|
||||
#define bfa_io_lat_clock_res_mul 1000
|
||||
bfa_status_t
|
||||
bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
|
||||
struct bfa_itnim_ioprofile_s *ioprofile)
|
||||
{
|
||||
struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
|
||||
if (!fcpim->io_profile)
|
||||
return BFA_STATUS_IOPROFILE_OFF;
|
||||
|
||||
itnim->ioprofile.index = BFA_IOBUCKET_MAX;
|
||||
itnim->ioprofile.io_profile_start_time =
|
||||
bfa_io_profile_start_time(itnim->bfa);
|
||||
itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
|
||||
itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
|
||||
*ioprofile = itnim->ioprofile;
|
||||
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
void
|
||||
bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
|
||||
{
|
||||
@ -1469,7 +1577,28 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
|
||||
__bfa_cb_ioim_abort, ioim);
|
||||
__bfa_cb_ioim_abort, ioim);
|
||||
break;
|
||||
|
||||
case BFA_IOIM_SM_LM_LUN_NOT_SUP:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
bfa_ioim_move_to_comp_q(ioim);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
|
||||
__bfa_cb_ioim_lm_lun_not_sup, ioim);
|
||||
break;
|
||||
|
||||
case BFA_IOIM_SM_LM_RPL_DC:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
bfa_ioim_move_to_comp_q(ioim);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
|
||||
__bfa_cb_ioim_lm_rpl_dc, ioim);
|
||||
break;
|
||||
|
||||
case BFA_IOIM_SM_LM_LUN_NOT_RDY:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
bfa_ioim_move_to_comp_q(ioim);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
|
||||
__bfa_cb_ioim_lm_lun_not_rdy, ioim);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -2009,6 +2138,264 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from bfa_fcpim_start after the bfa_init() with flash read
|
||||
* is complete by driver. now invalidate the stale content of lun mask
|
||||
* like unit attention, rp tag and lp tag.
|
||||
*/
|
||||
static void
|
||||
bfa_ioim_lm_init(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_lun_mask_s *lunm_list;
|
||||
int i;
|
||||
|
||||
if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
|
||||
return;
|
||||
|
||||
lunm_list = bfa_get_lun_mask_list(bfa);
|
||||
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
|
||||
lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
|
||||
lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
|
||||
lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate LUN for LUN masking
|
||||
*/
|
||||
static enum bfa_ioim_lm_status
|
||||
bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
|
||||
struct bfa_rport_s *rp, struct scsi_lun lun)
|
||||
{
|
||||
u8 i;
|
||||
struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
|
||||
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
|
||||
struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
|
||||
|
||||
if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
|
||||
(scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
|
||||
ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
|
||||
return BFA_IOIM_LM_PRESENT;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
|
||||
|
||||
if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
|
||||
continue;
|
||||
|
||||
if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
|
||||
scsilun_to_int((struct scsi_lun *)&lun))
|
||||
&& (rp->rport_tag == lun_list[i].rp_tag)
|
||||
&& ((u8)ioim->itnim->rport->rport_info.lp_tag ==
|
||||
lun_list[i].lp_tag)) {
|
||||
bfa_trc(ioim->bfa, lun_list[i].rp_tag);
|
||||
bfa_trc(ioim->bfa, lun_list[i].lp_tag);
|
||||
bfa_trc(ioim->bfa, scsilun_to_int(
|
||||
(struct scsi_lun *)&lun_list[i].lun));
|
||||
|
||||
if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
|
||||
((cdb->scsi_cdb[0] != INQUIRY) ||
|
||||
(cdb->scsi_cdb[0] != REPORT_LUNS))) {
|
||||
lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
|
||||
return BFA_IOIM_LM_RPL_DATA_CHANGED;
|
||||
}
|
||||
|
||||
if (cdb->scsi_cdb[0] == REPORT_LUNS)
|
||||
ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
|
||||
|
||||
return BFA_IOIM_LM_PRESENT;
|
||||
}
|
||||
}
|
||||
|
||||
if ((cdb->scsi_cdb[0] == INQUIRY) &&
|
||||
(scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
|
||||
ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
|
||||
return BFA_IOIM_LM_PRESENT;
|
||||
}
|
||||
|
||||
if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
|
||||
return BFA_IOIM_LM_LUN_NOT_RDY;
|
||||
|
||||
return BFA_IOIM_LM_LUN_NOT_SUP;
|
||||
}
|
||||
|
||||
static bfa_boolean_t
|
||||
bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
|
||||
{
|
||||
return BFA_TRUE;
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
|
||||
int buf_lun_cnt)
|
||||
{
|
||||
struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
|
||||
struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
|
||||
struct scsi_lun lun;
|
||||
int i, j;
|
||||
|
||||
bfa_trc(ioim->bfa, buf_lun_cnt);
|
||||
for (j = 0; j < buf_lun_cnt; j++) {
|
||||
lun = *((struct scsi_lun *)(lun_data + j));
|
||||
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
|
||||
if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
|
||||
continue;
|
||||
if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
|
||||
(lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
|
||||
(scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
|
||||
== scsilun_to_int((struct scsi_lun *)&lun))) {
|
||||
lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
|
||||
break;
|
||||
}
|
||||
} /* next lun in mask DB */
|
||||
} /* next lun in buf */
|
||||
}
|
||||
|
||||
static int
|
||||
bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
|
||||
struct scsi_report_luns_data_s *rl)
|
||||
{
|
||||
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
|
||||
struct scatterlist *sg = scsi_sglist(cmnd);
|
||||
struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
|
||||
struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
|
||||
int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
|
||||
int lun_across_sg_bytes, bytes_from_next_buf;
|
||||
u64 last_lun, temp_last_lun;
|
||||
|
||||
/* fetch luns from the first sg element */
|
||||
bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
|
||||
(sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
|
||||
|
||||
/* fetch luns from multiple sg elements */
|
||||
scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
|
||||
if (sgeid == 0) {
|
||||
prev_sg_len = sg_dma_len(sg);
|
||||
prev_rl_data = (struct scsi_lun *)
|
||||
phys_to_virt(sg_dma_address(sg));
|
||||
continue;
|
||||
}
|
||||
|
||||
/* if the buf is having more data */
|
||||
lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
|
||||
if (lun_across_sg_bytes) {
|
||||
bfa_trc(ioim->bfa, lun_across_sg_bytes);
|
||||
bfa_stats(ioim->itnim, lm_lun_across_sg);
|
||||
bytes_from_next_buf = sizeof(struct scsi_lun) -
|
||||
lun_across_sg_bytes;
|
||||
|
||||
/* from next buf take higher bytes */
|
||||
temp_last_lun = *((u64 *)
|
||||
phys_to_virt(sg_dma_address(sg)));
|
||||
last_lun |= temp_last_lun >>
|
||||
(lun_across_sg_bytes * BITS_PER_BYTE);
|
||||
|
||||
/* from prev buf take higher bytes */
|
||||
temp_last_lun = *((u64 *)(prev_rl_data +
|
||||
(prev_sg_len - lun_across_sg_bytes)));
|
||||
temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
|
||||
last_lun = last_lun | (temp_last_lun <<
|
||||
(bytes_from_next_buf * BITS_PER_BYTE));
|
||||
|
||||
bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
|
||||
} else
|
||||
bytes_from_next_buf = 0;
|
||||
|
||||
*pgdlen += sg_dma_len(sg);
|
||||
prev_sg_len = sg_dma_len(sg);
|
||||
prev_rl_data = (struct scsi_lun *)
|
||||
phys_to_virt(sg_dma_address(sg));
|
||||
bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
|
||||
bytes_from_next_buf,
|
||||
sg_dma_len(sg) / sizeof(struct scsi_lun));
|
||||
}
|
||||
|
||||
/* update the report luns data - based on fetched luns */
|
||||
sg = scsi_sglist(cmnd);
|
||||
base_rl_data = (struct scsi_lun *)rl->lun;
|
||||
base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
|
||||
for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
|
||||
if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
|
||||
base_rl_data[j] = lun_list[i].lun;
|
||||
lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
|
||||
j++;
|
||||
lun_fetched_cnt++;
|
||||
}
|
||||
|
||||
if (j > base_count) {
|
||||
j = 0;
|
||||
sg = sg_next(sg);
|
||||
base_rl_data = (struct scsi_lun *)
|
||||
phys_to_virt(sg_dma_address(sg));
|
||||
base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
|
||||
}
|
||||
}
|
||||
|
||||
bfa_trc(ioim->bfa, lun_fetched_cnt);
|
||||
return lun_fetched_cnt;
|
||||
}
|
||||
|
||||
static bfa_boolean_t
|
||||
bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
|
||||
{
|
||||
struct scsi_inquiry_data_s *inq;
|
||||
struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
|
||||
|
||||
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
|
||||
inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
|
||||
|
||||
bfa_trc(ioim->bfa, inq->device_type);
|
||||
inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bfa_boolean_t
|
||||
bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
|
||||
{
|
||||
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
|
||||
struct scatterlist *sg = scsi_sglist(cmnd);
|
||||
struct bfi_ioim_rsp_s *m;
|
||||
struct scsi_report_luns_data_s *rl = NULL;
|
||||
int lun_count = 0, lun_fetched_cnt = 0;
|
||||
u32 residue, pgdlen = 0;
|
||||
|
||||
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
|
||||
if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
|
||||
return BFA_TRUE;
|
||||
|
||||
m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
|
||||
if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
|
||||
return BFA_TRUE;
|
||||
|
||||
pgdlen = sg_dma_len(sg);
|
||||
bfa_trc(ioim->bfa, pgdlen);
|
||||
rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
|
||||
lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
|
||||
lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
|
||||
|
||||
if (lun_count == lun_fetched_cnt)
|
||||
return BFA_TRUE;
|
||||
|
||||
bfa_trc(ioim->bfa, lun_count);
|
||||
bfa_trc(ioim->bfa, lun_fetched_cnt);
|
||||
bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
|
||||
|
||||
if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
|
||||
rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
|
||||
sizeof(struct scsi_lun);
|
||||
else
|
||||
bfa_stats(ioim->itnim, lm_small_buf_addresidue);
|
||||
|
||||
bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
|
||||
bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
|
||||
|
||||
residue = be32_to_cpu(m->residue);
|
||||
residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
|
||||
bfa_stats(ioim->itnim, lm_wire_residue_changed);
|
||||
m->residue = be32_to_cpu(residue);
|
||||
bfa_trc(ioim->bfa, ioim->nsges);
|
||||
return BFA_FALSE;
|
||||
}
|
||||
|
||||
static void
|
||||
__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
|
||||
@ -2067,6 +2454,299 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
|
||||
m->scsi_status, sns_len, snsinfo, residue);
|
||||
}
|
||||
|
||||
static void
|
||||
__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
|
||||
{
|
||||
struct bfa_ioim_s *ioim = cbarg;
|
||||
int sns_len = 0xD;
|
||||
u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
|
||||
struct scsi_sense_s *snsinfo;
|
||||
|
||||
if (!complete) {
|
||||
bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
|
||||
return;
|
||||
}
|
||||
|
||||
snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
|
||||
ioim->fcpim->fcp, ioim->iotag);
|
||||
snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
|
||||
snsinfo->add_sense_length = 0xa;
|
||||
snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
|
||||
snsinfo->sense_key = ILLEGAL_REQUEST;
|
||||
bfa_trc(ioim->bfa, residue);
|
||||
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
|
||||
SCSI_STATUS_CHECK_CONDITION, sns_len,
|
||||
(u8 *)snsinfo, residue);
|
||||
}
|
||||
|
||||
static void
|
||||
__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
|
||||
{
|
||||
struct bfa_ioim_s *ioim = cbarg;
|
||||
int sns_len = 0xD;
|
||||
u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
|
||||
struct scsi_sense_s *snsinfo;
|
||||
|
||||
if (!complete) {
|
||||
bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
|
||||
return;
|
||||
}
|
||||
|
||||
snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
|
||||
ioim->iotag);
|
||||
snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
|
||||
snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
|
||||
snsinfo->asc = SCSI_ASC_TOCC;
|
||||
snsinfo->add_sense_length = 0x6;
|
||||
snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
|
||||
bfa_trc(ioim->bfa, residue);
|
||||
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
|
||||
SCSI_STATUS_CHECK_CONDITION, sns_len,
|
||||
(u8 *)snsinfo, residue);
|
||||
}
|
||||
|
||||
static void
|
||||
__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
|
||||
{
|
||||
struct bfa_ioim_s *ioim = cbarg;
|
||||
int sns_len = 0xD;
|
||||
u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
|
||||
struct scsi_sense_s *snsinfo;
|
||||
|
||||
if (!complete) {
|
||||
bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
|
||||
return;
|
||||
}
|
||||
|
||||
snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
|
||||
ioim->fcpim->fcp, ioim->iotag);
|
||||
snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
|
||||
snsinfo->add_sense_length = 0xa;
|
||||
snsinfo->sense_key = NOT_READY;
|
||||
snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
|
||||
snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
|
||||
bfa_trc(ioim->bfa, residue);
|
||||
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
|
||||
SCSI_STATUS_CHECK_CONDITION, sns_len,
|
||||
(u8 *)snsinfo, residue);
|
||||
}
|
||||
|
||||
void
|
||||
bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
|
||||
u16 rp_tag, u8 lp_tag)
|
||||
{
|
||||
struct bfa_lun_mask_s *lun_list;
|
||||
u8 i;
|
||||
|
||||
if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
|
||||
return;
|
||||
|
||||
lun_list = bfa_get_lun_mask_list(bfa);
|
||||
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
|
||||
if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
|
||||
if ((lun_list[i].lp_wwn == lp_wwn) &&
|
||||
(lun_list[i].rp_wwn == rp_wwn)) {
|
||||
lun_list[i].rp_tag = rp_tag;
|
||||
lun_list[i].lp_tag = lp_tag;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* set UA for all active luns in LM DB
|
||||
*/
|
||||
static void
|
||||
bfa_ioim_lm_set_ua(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_lun_mask_s *lunm_list;
|
||||
int i;
|
||||
|
||||
lunm_list = bfa_get_lun_mask_list(bfa);
|
||||
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
|
||||
if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
|
||||
continue;
|
||||
lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
|
||||
}
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
|
||||
{
|
||||
struct bfa_lunmask_cfg_s *lun_mask;
|
||||
|
||||
bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
|
||||
if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
|
||||
return BFA_STATUS_FAILED;
|
||||
|
||||
if (bfa_get_lun_mask_status(bfa) == update)
|
||||
return BFA_STATUS_NO_CHANGE;
|
||||
|
||||
lun_mask = bfa_get_lun_mask(bfa);
|
||||
lun_mask->status = update;
|
||||
|
||||
if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
|
||||
bfa_ioim_lm_set_ua(bfa);
|
||||
|
||||
return bfa_dconf_update(bfa);
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
|
||||
{
|
||||
int i;
|
||||
struct bfa_lun_mask_s *lunm_list;
|
||||
|
||||
bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
|
||||
if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
|
||||
return BFA_STATUS_FAILED;
|
||||
|
||||
lunm_list = bfa_get_lun_mask_list(bfa);
|
||||
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
|
||||
if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
|
||||
if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
|
||||
bfa_rport_unset_lunmask(bfa,
|
||||
BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
|
||||
}
|
||||
}
|
||||
|
||||
memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
|
||||
return bfa_dconf_update(bfa);
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
|
||||
{
|
||||
struct bfa_lunmask_cfg_s *lun_mask;
|
||||
|
||||
bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
|
||||
if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
|
||||
return BFA_STATUS_FAILED;
|
||||
|
||||
lun_mask = bfa_get_lun_mask(bfa);
|
||||
memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
|
||||
wwn_t rpwwn, struct scsi_lun lun)
|
||||
{
|
||||
struct bfa_lun_mask_s *lunm_list;
|
||||
struct bfa_rport_s *rp = NULL;
|
||||
int i, free_index = MAX_LUN_MASK_CFG + 1;
|
||||
struct bfa_fcs_lport_s *port = NULL;
|
||||
struct bfa_fcs_rport_s *rp_fcs;
|
||||
|
||||
bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
|
||||
if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
|
||||
return BFA_STATUS_FAILED;
|
||||
|
||||
port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
|
||||
vf_id, *pwwn);
|
||||
if (port) {
|
||||
*pwwn = port->port_cfg.pwwn;
|
||||
rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
|
||||
rp = rp_fcs->bfa_rport;
|
||||
}
|
||||
|
||||
lunm_list = bfa_get_lun_mask_list(bfa);
|
||||
/* if entry exists */
|
||||
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
|
||||
if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
|
||||
free_index = i;
|
||||
if ((lunm_list[i].lp_wwn == *pwwn) &&
|
||||
(lunm_list[i].rp_wwn == rpwwn) &&
|
||||
(scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
|
||||
scsilun_to_int((struct scsi_lun *)&lun)))
|
||||
return BFA_STATUS_ENTRY_EXISTS;
|
||||
}
|
||||
|
||||
if (free_index > MAX_LUN_MASK_CFG)
|
||||
return BFA_STATUS_MAX_ENTRY_REACHED;
|
||||
|
||||
if (rp) {
|
||||
lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
|
||||
rp->rport_info.local_pid);
|
||||
lunm_list[free_index].rp_tag = rp->rport_tag;
|
||||
} else {
|
||||
lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
|
||||
lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
|
||||
}
|
||||
|
||||
lunm_list[free_index].lp_wwn = *pwwn;
|
||||
lunm_list[free_index].rp_wwn = rpwwn;
|
||||
lunm_list[free_index].lun = lun;
|
||||
lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
|
||||
|
||||
/* set for all luns in this rp */
|
||||
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
|
||||
if ((lunm_list[i].lp_wwn == *pwwn) &&
|
||||
(lunm_list[i].rp_wwn == rpwwn))
|
||||
lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
|
||||
}
|
||||
|
||||
return bfa_dconf_update(bfa);
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
|
||||
wwn_t rpwwn, struct scsi_lun lun)
|
||||
{
|
||||
struct bfa_lun_mask_s *lunm_list;
|
||||
struct bfa_rport_s *rp = NULL;
|
||||
struct bfa_fcs_lport_s *port = NULL;
|
||||
struct bfa_fcs_rport_s *rp_fcs;
|
||||
int i;
|
||||
|
||||
/* in min cfg lunm_list could be NULL but no commands should run. */
|
||||
if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
|
||||
return BFA_STATUS_FAILED;
|
||||
|
||||
bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
|
||||
bfa_trc(bfa, *pwwn);
|
||||
bfa_trc(bfa, rpwwn);
|
||||
bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
|
||||
|
||||
if (*pwwn == 0) {
|
||||
port = bfa_fcs_lookup_port(
|
||||
&((struct bfad_s *)bfa->bfad)->bfa_fcs,
|
||||
vf_id, *pwwn);
|
||||
if (port) {
|
||||
*pwwn = port->port_cfg.pwwn;
|
||||
rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
|
||||
rp = rp_fcs->bfa_rport;
|
||||
}
|
||||
}
|
||||
|
||||
lunm_list = bfa_get_lun_mask_list(bfa);
|
||||
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
|
||||
if ((lunm_list[i].lp_wwn == *pwwn) &&
|
||||
(lunm_list[i].rp_wwn == rpwwn) &&
|
||||
(scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
|
||||
scsilun_to_int((struct scsi_lun *)&lun))) {
|
||||
lunm_list[i].lp_wwn = 0;
|
||||
lunm_list[i].rp_wwn = 0;
|
||||
int_to_scsilun(0, &lunm_list[i].lun);
|
||||
lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
|
||||
if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
|
||||
lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
|
||||
lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
|
||||
}
|
||||
return bfa_dconf_update(bfa);
|
||||
}
|
||||
}
|
||||
|
||||
/* set for all luns in this rp */
|
||||
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
|
||||
if ((lunm_list[i].lp_wwn == *pwwn) &&
|
||||
(lunm_list[i].rp_wwn == rpwwn))
|
||||
lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
|
||||
}
|
||||
|
||||
return BFA_STATUS_ENTRY_NOT_EXISTS;
|
||||
}
|
||||
|
||||
static void
|
||||
__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
|
||||
{
|
||||
@ -2077,6 +2757,7 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
|
||||
return;
|
||||
}
|
||||
|
||||
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
|
||||
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
|
||||
0, 0, NULL, 0);
|
||||
}
|
||||
@ -2092,6 +2773,7 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
|
||||
return;
|
||||
}
|
||||
|
||||
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
|
||||
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
|
||||
0, 0, NULL, 0);
|
||||
}
|
||||
@ -2106,6 +2788,7 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
|
||||
return;
|
||||
}
|
||||
|
||||
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
|
||||
bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
|
||||
}
|
||||
|
||||
@ -2449,6 +3132,7 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
|
||||
ioim->bfa = fcpim->bfa;
|
||||
ioim->fcpim = fcpim;
|
||||
ioim->iosp = iosp;
|
||||
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
|
||||
INIT_LIST_HEAD(&ioim->sgpg_q);
|
||||
bfa_reqq_winit(&ioim->iosp->reqq_wait,
|
||||
bfa_ioim_qresume, ioim);
|
||||
@ -2486,6 +3170,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
|
||||
evt = BFA_IOIM_SM_DONE;
|
||||
else
|
||||
evt = BFA_IOIM_SM_COMP;
|
||||
ioim->proc_rsp_data(ioim);
|
||||
break;
|
||||
|
||||
case BFI_IOIM_STS_TIMEDOUT:
|
||||
@ -2521,6 +3206,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
|
||||
if (rsp->abort_tag != ioim->abort_tag) {
|
||||
bfa_trc(ioim->bfa, rsp->abort_tag);
|
||||
bfa_trc(ioim->bfa, ioim->abort_tag);
|
||||
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2539,6 +3225,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
|
||||
bfa_sm_send_event(ioim, evt);
|
||||
}
|
||||
|
||||
@ -2556,7 +3243,16 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
|
||||
WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
|
||||
|
||||
bfa_ioim_cb_profile_comp(fcpim, ioim);
|
||||
bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
|
||||
|
||||
if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
|
||||
bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
|
||||
bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
|
||||
else
|
||||
bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2668,6 +3364,35 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
|
||||
void
|
||||
bfa_ioim_start(struct bfa_ioim_s *ioim)
|
||||
{
|
||||
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
|
||||
struct bfa_lps_s *lps;
|
||||
enum bfa_ioim_lm_status status;
|
||||
struct scsi_lun scsilun;
|
||||
|
||||
if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
|
||||
lps = BFA_IOIM_TO_LPS(ioim);
|
||||
int_to_scsilun(cmnd->device->lun, &scsilun);
|
||||
status = bfa_ioim_lm_check(ioim, lps,
|
||||
ioim->itnim->rport, scsilun);
|
||||
if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
|
||||
bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
|
||||
bfa_stats(ioim->itnim, lm_lun_not_rdy);
|
||||
return;
|
||||
}
|
||||
|
||||
if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
|
||||
bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
|
||||
bfa_stats(ioim->itnim, lm_lun_not_sup);
|
||||
return;
|
||||
}
|
||||
|
||||
if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
|
||||
bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
|
||||
bfa_stats(ioim->itnim, lm_rpl_data_changed);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
|
||||
|
||||
/*
|
||||
@ -3411,6 +4136,13 @@ bfa_fcp_detach(struct bfa_s *bfa)
|
||||
static void
|
||||
bfa_fcp_start(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
|
||||
|
||||
/*
|
||||
* bfa_init() with flash read is complete. now invalidate the stale
|
||||
* content of lun mask like unit attention, rp tag and lp tag.
|
||||
*/
|
||||
bfa_ioim_lm_init(fcp->bfa);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -79,14 +79,22 @@ bfa_ioim_get_index(u32 n) {
|
||||
if (n >= (1UL)<<22)
|
||||
return BFA_IOBUCKET_MAX - 1;
|
||||
n >>= 8;
|
||||
if (n >= (1UL)<<16)
|
||||
n >>= 16; pos += 16;
|
||||
if (n >= 1 << 8)
|
||||
n >>= 8; pos += 8;
|
||||
if (n >= 1 << 4)
|
||||
n >>= 4; pos += 4;
|
||||
if (n >= 1 << 2)
|
||||
n >>= 2; pos += 2;
|
||||
if (n >= (1UL)<<16) {
|
||||
n >>= 16;
|
||||
pos += 16;
|
||||
}
|
||||
if (n >= 1 << 8) {
|
||||
n >>= 8;
|
||||
pos += 8;
|
||||
}
|
||||
if (n >= 1 << 4) {
|
||||
n >>= 4;
|
||||
pos += 4;
|
||||
}
|
||||
if (n >= 1 << 2) {
|
||||
n >>= 2;
|
||||
pos += 2;
|
||||
}
|
||||
if (n >= 1 << 1)
|
||||
pos += 1;
|
||||
|
||||
@ -102,6 +110,7 @@ struct bfad_ioim_s;
|
||||
struct bfad_tskim_s;
|
||||
|
||||
typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
|
||||
typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
|
||||
|
||||
struct bfa_fcpim_s {
|
||||
struct bfa_s *bfa;
|
||||
@ -115,7 +124,7 @@ struct bfa_fcpim_s {
|
||||
u32 path_tov;
|
||||
u16 q_depth;
|
||||
u8 reqq; /* Request queue to be used */
|
||||
u8 rsvd;
|
||||
u8 lun_masking_pending;
|
||||
struct list_head itnim_q; /* queue of active itnim */
|
||||
struct list_head ioim_resfree_q; /* IOs waiting for f/w */
|
||||
struct list_head ioim_comp_q; /* IO global comp Q */
|
||||
@ -170,7 +179,9 @@ struct bfa_ioim_s {
|
||||
bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
|
||||
struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
|
||||
u8 reqq; /* Request queue for I/O */
|
||||
u8 mode; /* IO is passthrough or not */
|
||||
u64 start_time; /* IO's Profile start val */
|
||||
bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
|
||||
};
|
||||
|
||||
struct bfa_ioim_sp_s {
|
||||
@ -250,6 +261,10 @@ struct bfa_itnim_s {
|
||||
(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
|
||||
} while (0)
|
||||
|
||||
#define BFA_IOIM_TO_LPS(__ioim) \
|
||||
BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
|
||||
__ioim->itnim->rport->rport_info.lp_tag)
|
||||
|
||||
static inline bfa_boolean_t
|
||||
bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
|
||||
{
|
||||
@ -297,6 +312,8 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
|
||||
struct bfa_itnim_iostats_s *stats, u8 lp_tag);
|
||||
void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
|
||||
struct bfa_itnim_iostats_s *itnim_stats);
|
||||
bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
|
||||
bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
|
||||
|
||||
#define bfa_fcpim_ioredirect_enabled(__bfa) \
|
||||
(((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
|
||||
@ -397,4 +414,14 @@ void bfa_tskim_start(struct bfa_tskim_s *tskim,
|
||||
void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
|
||||
enum bfi_tskim_status tsk_status);
|
||||
|
||||
void bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn,
|
||||
wwn_t rp_wwn, u16 rp_tag, u8 lp_tag);
|
||||
bfa_status_t bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off);
|
||||
bfa_status_t bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf);
|
||||
bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
|
||||
wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
|
||||
bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
|
||||
wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
|
||||
bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
|
||||
|
||||
#endif /* __BFA_FCPIM_H__ */
|
||||
|
@ -20,6 +20,7 @@
|
||||
*/
|
||||
|
||||
#include "bfad_drv.h"
|
||||
#include "bfad_im.h"
|
||||
#include "bfa_fcs.h"
|
||||
#include "bfa_fcbuild.h"
|
||||
|
||||
@ -1327,6 +1328,29 @@ bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
|
||||
bfa_trc(fabric->fcs, status);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Send AEN notification
|
||||
*/
|
||||
static void
|
||||
bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port,
|
||||
enum bfa_port_aen_event event)
|
||||
{
|
||||
struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
|
||||
struct bfa_aen_entry_s *aen_entry;
|
||||
|
||||
bfad_get_aen_entry(bfad, aen_entry);
|
||||
if (!aen_entry)
|
||||
return;
|
||||
|
||||
aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port);
|
||||
aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port);
|
||||
|
||||
/* Send the AEN notification */
|
||||
bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
|
||||
BFA_AEN_CAT_PORT, event);
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* @param[in] fabric - fabric
|
||||
@ -1358,6 +1382,8 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
|
||||
BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
|
||||
"Base port WWN = %s Fabric WWN = %s\n",
|
||||
pwwn_ptr, fwwn_ptr);
|
||||
bfa_fcs_fabric_aen_post(&fabric->bport,
|
||||
BFA_PORT_AEN_FABRIC_NAME_CHANGE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -675,6 +675,7 @@ struct bfa_fcs_s {
|
||||
struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
|
||||
struct bfa_fcs_stats_s stats; /* FCS statistics */
|
||||
struct bfa_wc_s wc; /* waiting counter */
|
||||
int fcs_aen_seq;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -37,6 +37,8 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
|
||||
struct bfa_fcxp_s *fcxp, void *cbarg,
|
||||
bfa_status_t req_status, u32 rsp_len,
|
||||
u32 resid_len, struct fchs_s *rsp_fchs);
|
||||
static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
|
||||
enum bfa_itnim_aen_event event);
|
||||
|
||||
/*
|
||||
* fcs_itnim_sm FCS itnim state machine events
|
||||
@ -269,6 +271,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Target (WWN = %s) is online for initiator (WWN = %s)\n",
|
||||
rpwwn_buf, lpwwn_buf);
|
||||
bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
|
||||
break;
|
||||
|
||||
case BFA_FCS_ITNIM_SM_OFFLINE:
|
||||
@ -305,14 +308,17 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
|
||||
bfa_itnim_offline(itnim->bfa_itnim);
|
||||
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
|
||||
wwn2str(rpwwn_buf, itnim->rport->pwwn);
|
||||
if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE)
|
||||
if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) {
|
||||
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
|
||||
"Target (WWN = %s) connectivity lost for "
|
||||
"initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
|
||||
else
|
||||
bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
|
||||
} else {
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Target (WWN = %s) offlined by initiator (WWN = %s)\n",
|
||||
rpwwn_buf, lpwwn_buf);
|
||||
bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
|
||||
}
|
||||
break;
|
||||
|
||||
case BFA_FCS_ITNIM_SM_DELETE:
|
||||
@ -381,6 +387,33 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
|
||||
enum bfa_itnim_aen_event event)
|
||||
{
|
||||
struct bfa_fcs_rport_s *rport = itnim->rport;
|
||||
struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
|
||||
struct bfa_aen_entry_s *aen_entry;
|
||||
|
||||
/* Don't post events for well known addresses */
|
||||
if (BFA_FCS_PID_IS_WKA(rport->pid))
|
||||
return;
|
||||
|
||||
bfad_get_aen_entry(bfad, aen_entry);
|
||||
if (!aen_entry)
|
||||
return;
|
||||
|
||||
aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id;
|
||||
aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn(
|
||||
bfa_fcs_get_base_port(itnim->fcs));
|
||||
aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
|
||||
aen_entry->aen_data.itnim.rpwwn = rport->pwwn;
|
||||
|
||||
/* Send the AEN notification */
|
||||
bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
|
||||
BFA_AEN_CAT_ITNIM, event);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
|
||||
{
|
||||
|
@ -16,6 +16,7 @@
|
||||
*/
|
||||
|
||||
#include "bfad_drv.h"
|
||||
#include "bfad_im.h"
|
||||
#include "bfa_fcs.h"
|
||||
#include "bfa_fcbuild.h"
|
||||
#include "bfa_fc.h"
|
||||
@ -299,6 +300,31 @@ bfa_fcs_lport_sm_deleting(
|
||||
* fcs_port_pvt
|
||||
*/
|
||||
|
||||
/*
|
||||
* Send AEN notification
|
||||
*/
|
||||
static void
|
||||
bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port,
|
||||
enum bfa_lport_aen_event event)
|
||||
{
|
||||
struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
|
||||
struct bfa_aen_entry_s *aen_entry;
|
||||
|
||||
bfad_get_aen_entry(bfad, aen_entry);
|
||||
if (!aen_entry)
|
||||
return;
|
||||
|
||||
aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
|
||||
aen_entry->aen_data.lport.roles = port->port_cfg.roles;
|
||||
aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
|
||||
bfa_fcs_get_base_port(port->fcs));
|
||||
aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
|
||||
|
||||
/* Send the AEN notification */
|
||||
bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
|
||||
BFA_AEN_CAT_LPORT, event);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send a LS reject
|
||||
*/
|
||||
@ -593,6 +619,7 @@ bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Logical port online: WWN = %s Role = %s\n",
|
||||
lpwwn_buf, "Initiator");
|
||||
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
|
||||
|
||||
bfad->bfad_flags |= BFAD_PORT_ONLINE;
|
||||
}
|
||||
@ -611,14 +638,17 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
|
||||
|
||||
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
|
||||
if (bfa_sm_cmp_state(port->fabric,
|
||||
bfa_fcs_fabric_sm_online) == BFA_TRUE)
|
||||
bfa_fcs_fabric_sm_online) == BFA_TRUE) {
|
||||
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
|
||||
"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
|
||||
lpwwn_buf, "Initiator");
|
||||
else
|
||||
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
|
||||
} else {
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Logical port taken offline: WWN = %s Role = %s\n",
|
||||
lpwwn_buf, "Initiator");
|
||||
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
|
||||
}
|
||||
|
||||
list_for_each_safe(qe, qen, &port->rport_q) {
|
||||
rport = (struct bfa_fcs_rport_s *) qe;
|
||||
@ -676,6 +706,7 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Logical port deleted: WWN = %s Role = %s\n",
|
||||
lpwwn_buf, "Initiator");
|
||||
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
|
||||
|
||||
/* Base port will be deleted by the OS driver */
|
||||
if (port->vport) {
|
||||
@ -973,6 +1004,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"New logical port created: WWN = %s Role = %s\n",
|
||||
lpwwn_buf, "Initiator");
|
||||
bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW);
|
||||
|
||||
bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
|
||||
bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
|
||||
@ -5558,6 +5590,31 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
|
||||
/*
|
||||
* fcs_vport_private FCS virtual port private functions
|
||||
*/
|
||||
/*
|
||||
* Send AEN notification
|
||||
*/
|
||||
static void
|
||||
bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port,
|
||||
enum bfa_lport_aen_event event)
|
||||
{
|
||||
struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
|
||||
struct bfa_aen_entry_s *aen_entry;
|
||||
|
||||
bfad_get_aen_entry(bfad, aen_entry);
|
||||
if (!aen_entry)
|
||||
return;
|
||||
|
||||
aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
|
||||
aen_entry->aen_data.lport.roles = port->port_cfg.roles;
|
||||
aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
|
||||
bfa_fcs_get_base_port(port->fcs));
|
||||
aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
|
||||
|
||||
/* Send the AEN notification */
|
||||
bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
|
||||
BFA_AEN_CAT_LPORT, event);
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine will be called to send a FDISC command.
|
||||
*/
|
||||
@ -5585,8 +5642,11 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
|
||||
case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
|
||||
if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
|
||||
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
|
||||
else
|
||||
else {
|
||||
bfa_fcs_vport_aen_post(&vport->lport,
|
||||
BFA_LPORT_AEN_NPIV_DUP_WWN);
|
||||
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
|
||||
}
|
||||
break;
|
||||
|
||||
case FC_LS_RJT_EXP_INSUFF_RES:
|
||||
@ -5596,11 +5656,17 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
|
||||
*/
|
||||
if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
|
||||
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
|
||||
else
|
||||
else {
|
||||
bfa_fcs_vport_aen_post(&vport->lport,
|
||||
BFA_LPORT_AEN_NPIV_FABRIC_MAX);
|
||||
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
if (vport->fdisc_retries == 0)
|
||||
bfa_fcs_vport_aen_post(&vport->lport,
|
||||
BFA_LPORT_AEN_NPIV_UNKNOWN);
|
||||
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
*/
|
||||
|
||||
#include "bfad_drv.h"
|
||||
#include "bfad_im.h"
|
||||
#include "bfa_fcs.h"
|
||||
#include "bfa_fcbuild.h"
|
||||
|
||||
@ -2040,6 +2041,35 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
|
||||
kfree(rport->rp_drv);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
|
||||
enum bfa_rport_aen_event event,
|
||||
struct bfa_rport_aen_data_s *data)
|
||||
{
|
||||
struct bfa_fcs_lport_s *port = rport->port;
|
||||
struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
|
||||
struct bfa_aen_entry_s *aen_entry;
|
||||
|
||||
bfad_get_aen_entry(bfad, aen_entry);
|
||||
if (!aen_entry)
|
||||
return;
|
||||
|
||||
if (event == BFA_RPORT_AEN_QOS_PRIO)
|
||||
aen_entry->aen_data.rport.priv.qos = data->priv.qos;
|
||||
else if (event == BFA_RPORT_AEN_QOS_FLOWID)
|
||||
aen_entry->aen_data.rport.priv.qos = data->priv.qos;
|
||||
|
||||
aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id;
|
||||
aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn(
|
||||
bfa_fcs_get_base_port(rport->fcs));
|
||||
aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
|
||||
aen_entry->aen_data.rport.rpwwn = rport->pwwn;
|
||||
|
||||
/* Send the AEN notification */
|
||||
bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
|
||||
BFA_AEN_CAT_RPORT, event);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
|
||||
{
|
||||
@ -2063,10 +2093,12 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
|
||||
|
||||
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
|
||||
wwn2str(rpwwn_buf, rport->pwwn);
|
||||
if (!BFA_FCS_PID_IS_WKA(rport->pid))
|
||||
if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Remote port (WWN = %s) online for logical port (WWN = %s)\n",
|
||||
rpwwn_buf, lpwwn_buf);
|
||||
bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2083,16 +2115,21 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
|
||||
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
|
||||
wwn2str(rpwwn_buf, rport->pwwn);
|
||||
if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
|
||||
if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE)
|
||||
if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) {
|
||||
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
|
||||
"Remote port (WWN = %s) connectivity lost for "
|
||||
"logical port (WWN = %s)\n",
|
||||
rpwwn_buf, lpwwn_buf);
|
||||
else
|
||||
bfa_fcs_rport_aen_post(rport,
|
||||
BFA_RPORT_AEN_DISCONNECT, NULL);
|
||||
} else {
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Remote port (WWN = %s) offlined by "
|
||||
"logical port (WWN = %s)\n",
|
||||
rpwwn_buf, lpwwn_buf);
|
||||
bfa_fcs_rport_aen_post(rport,
|
||||
BFA_RPORT_AEN_OFFLINE, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
if (bfa_fcs_lport_is_initiator(port)) {
|
||||
@ -2366,8 +2403,11 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
|
||||
struct bfa_rport_qos_attr_s new_qos_attr)
|
||||
{
|
||||
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
|
||||
struct bfa_rport_aen_data_s aen_data;
|
||||
|
||||
bfa_trc(rport->fcs, rport->pwwn);
|
||||
aen_data.priv.qos = new_qos_attr;
|
||||
bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2390,8 +2430,11 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
|
||||
struct bfa_rport_qos_attr_s new_qos_attr)
|
||||
{
|
||||
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
|
||||
struct bfa_rport_aen_data_s aen_data;
|
||||
|
||||
bfa_trc(rport->fcs, rport->pwwn);
|
||||
aen_data.priv.qos = new_qos_attr;
|
||||
bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -42,11 +42,36 @@ bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
|
||||
bfa->iocfc.bfa_regs.intr_status);
|
||||
}
|
||||
|
||||
/*
|
||||
* Actions to respond RME Interrupt for Crossbow ASIC:
|
||||
* - Write 1 to Interrupt Status register
|
||||
* INTX - done in bfa_intx()
|
||||
* MSIX - done in bfa_hwcb_rspq_ack_msix()
|
||||
* - Update CI (only if new CI)
|
||||
*/
|
||||
static void
|
||||
bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
|
||||
bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
|
||||
{
|
||||
writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
|
||||
bfa->iocfc.bfa_regs.intr_status);
|
||||
bfa->iocfc.bfa_regs.intr_status);
|
||||
|
||||
if (bfa_rspq_ci(bfa, rspq) == ci)
|
||||
return;
|
||||
|
||||
bfa_rspq_ci(bfa, rspq) = ci;
|
||||
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
void
|
||||
bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
|
||||
{
|
||||
if (bfa_rspq_ci(bfa, rspq) == ci)
|
||||
return;
|
||||
|
||||
bfa_rspq_ci(bfa, rspq) = ci;
|
||||
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
void
|
||||
@ -149,8 +174,13 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
|
||||
void
|
||||
bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
|
||||
{
|
||||
bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
|
||||
bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
|
||||
if (msix) {
|
||||
bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
|
||||
bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
|
||||
} else {
|
||||
bfa->iocfc.hwif.hw_reqq_ack = NULL;
|
||||
bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -64,13 +64,36 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
|
||||
writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Actions to respond RME Interrupt for Catapult ASIC:
|
||||
* - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
|
||||
* - Acknowledge by writing to RME Queue Control register
|
||||
* - Update CI
|
||||
*/
|
||||
void
|
||||
bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
|
||||
bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
|
||||
{
|
||||
u32 r32;
|
||||
|
||||
r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
|
||||
writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
|
||||
|
||||
bfa_rspq_ci(bfa, rspq) = ci;
|
||||
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
/*
|
||||
* Actions to respond RME Interrupt for Catapult2 ASIC:
|
||||
* - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
|
||||
* - Update CI
|
||||
*/
|
||||
void
|
||||
bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
|
||||
{
|
||||
bfa_rspq_ci(bfa, rspq) = ci;
|
||||
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -16,6 +16,7 @@
|
||||
*/
|
||||
|
||||
#include "bfad_drv.h"
|
||||
#include "bfad_im.h"
|
||||
#include "bfa_ioc.h"
|
||||
#include "bfi_reg.h"
|
||||
#include "bfa_defs.h"
|
||||
@ -458,6 +459,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
|
||||
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
|
||||
bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
|
||||
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -502,6 +504,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
|
||||
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
|
||||
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
|
||||
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1966,6 +1969,7 @@ bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
|
||||
|
||||
BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
|
||||
"Heart Beat of IOC has failed\n");
|
||||
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
|
||||
|
||||
}
|
||||
|
||||
@ -1980,6 +1984,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
|
||||
BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
|
||||
"Running firmware version is incompatible "
|
||||
"with the driver version\n");
|
||||
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
@ -2678,6 +2683,43 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
|
||||
return m;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send AEN notification
|
||||
*/
|
||||
void
|
||||
bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
|
||||
{
|
||||
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
|
||||
struct bfa_aen_entry_s *aen_entry;
|
||||
enum bfa_ioc_type_e ioc_type;
|
||||
|
||||
bfad_get_aen_entry(bfad, aen_entry);
|
||||
if (!aen_entry)
|
||||
return;
|
||||
|
||||
ioc_type = bfa_ioc_get_type(ioc);
|
||||
switch (ioc_type) {
|
||||
case BFA_IOC_TYPE_FC:
|
||||
aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
|
||||
break;
|
||||
case BFA_IOC_TYPE_FCoE:
|
||||
aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
|
||||
aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
|
||||
break;
|
||||
case BFA_IOC_TYPE_LL:
|
||||
aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Send the AEN notification */
|
||||
aen_entry->aen_data.ioc.ioc_type = ioc_type;
|
||||
bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
|
||||
BFA_AEN_CAT_IOC, event);
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve saved firmware trace from a prior IOC failure.
|
||||
*/
|
||||
@ -2879,6 +2921,10 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
|
||||
return;
|
||||
if (ioc->attr->nwwn == 0)
|
||||
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
|
||||
if (ioc->attr->pwwn == 0)
|
||||
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3442,6 +3488,54 @@ bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* SFP's State Change Notification post to AEN
|
||||
*/
|
||||
static void
|
||||
bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
|
||||
{
|
||||
struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
|
||||
struct bfa_aen_entry_s *aen_entry;
|
||||
enum bfa_port_aen_event aen_evt = 0;
|
||||
|
||||
bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
|
||||
((u64)rsp->event));
|
||||
|
||||
bfad_get_aen_entry(bfad, aen_entry);
|
||||
if (!aen_entry)
|
||||
return;
|
||||
|
||||
aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
|
||||
aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
|
||||
aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
|
||||
|
||||
switch (rsp->event) {
|
||||
case BFA_SFP_SCN_INSERTED:
|
||||
aen_evt = BFA_PORT_AEN_SFP_INSERT;
|
||||
break;
|
||||
case BFA_SFP_SCN_REMOVED:
|
||||
aen_evt = BFA_PORT_AEN_SFP_REMOVE;
|
||||
break;
|
||||
case BFA_SFP_SCN_FAILED:
|
||||
aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
|
||||
break;
|
||||
case BFA_SFP_SCN_UNSUPPORT:
|
||||
aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
|
||||
break;
|
||||
case BFA_SFP_SCN_POM:
|
||||
aen_evt = BFA_PORT_AEN_SFP_POM;
|
||||
aen_entry->aen_data.port.level = rsp->pomlvl;
|
||||
break;
|
||||
default:
|
||||
bfa_trc(sfp, rsp->event);
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
/* Send the AEN notification */
|
||||
bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
|
||||
BFA_AEN_CAT_PORT, aen_evt);
|
||||
}
|
||||
|
||||
/*
|
||||
* SFP get data send
|
||||
*/
|
||||
@ -3481,6 +3575,50 @@ bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
|
||||
bfa_sfp_getdata_send(sfp);
|
||||
}
|
||||
|
||||
/*
|
||||
* SFP scn handler
|
||||
*/
|
||||
static void
|
||||
bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
|
||||
{
|
||||
struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
|
||||
|
||||
switch (rsp->event) {
|
||||
case BFA_SFP_SCN_INSERTED:
|
||||
sfp->state = BFA_SFP_STATE_INSERTED;
|
||||
sfp->data_valid = 0;
|
||||
bfa_sfp_scn_aen_post(sfp, rsp);
|
||||
break;
|
||||
case BFA_SFP_SCN_REMOVED:
|
||||
sfp->state = BFA_SFP_STATE_REMOVED;
|
||||
sfp->data_valid = 0;
|
||||
bfa_sfp_scn_aen_post(sfp, rsp);
|
||||
break;
|
||||
case BFA_SFP_SCN_FAILED:
|
||||
sfp->state = BFA_SFP_STATE_FAILED;
|
||||
sfp->data_valid = 0;
|
||||
bfa_sfp_scn_aen_post(sfp, rsp);
|
||||
break;
|
||||
case BFA_SFP_SCN_UNSUPPORT:
|
||||
sfp->state = BFA_SFP_STATE_UNSUPPORT;
|
||||
bfa_sfp_scn_aen_post(sfp, rsp);
|
||||
if (!sfp->lock)
|
||||
bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
|
||||
break;
|
||||
case BFA_SFP_SCN_POM:
|
||||
bfa_sfp_scn_aen_post(sfp, rsp);
|
||||
break;
|
||||
case BFA_SFP_SCN_VALID:
|
||||
sfp->state = BFA_SFP_STATE_VALID;
|
||||
if (!sfp->lock)
|
||||
bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
|
||||
break;
|
||||
default:
|
||||
bfa_trc(sfp, rsp->event);
|
||||
WARN_ON(1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* SFP show complete
|
||||
*/
|
||||
@ -3645,7 +3783,7 @@ bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
|
||||
break;
|
||||
|
||||
case BFI_SFP_I2H_SCN:
|
||||
bfa_trc(sfp, msg->mh.msg_id);
|
||||
bfa_sfp_scn(sfp, msg);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -3837,6 +3975,26 @@ bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
|
||||
#define BFA_FLASH_DMA_BUF_SZ \
|
||||
BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
|
||||
|
||||
static void
|
||||
bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
|
||||
int inst, int type)
|
||||
{
|
||||
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
|
||||
struct bfa_aen_entry_s *aen_entry;
|
||||
|
||||
bfad_get_aen_entry(bfad, aen_entry);
|
||||
if (!aen_entry)
|
||||
return;
|
||||
|
||||
aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
|
||||
aen_entry->aen_data.audit.partition_inst = inst;
|
||||
aen_entry->aen_data.audit.partition_type = type;
|
||||
|
||||
/* Send the AEN notification */
|
||||
bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
|
||||
BFA_AEN_CAT_AUDIT, event);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_flash_cb(struct bfa_flash_s *flash)
|
||||
{
|
||||
@ -3978,6 +4136,7 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
|
||||
struct bfi_flash_erase_rsp_s *erase;
|
||||
struct bfi_flash_write_rsp_s *write;
|
||||
struct bfi_flash_read_rsp_s *read;
|
||||
struct bfi_flash_event_s *event;
|
||||
struct bfi_mbmsg_s *msg;
|
||||
} m;
|
||||
|
||||
@ -4061,8 +4220,19 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
|
||||
}
|
||||
break;
|
||||
case BFI_FLASH_I2H_BOOT_VER_RSP:
|
||||
break;
|
||||
case BFI_FLASH_I2H_EVENT:
|
||||
bfa_trc(flash, msg->mh.msg_id);
|
||||
status = be32_to_cpu(m.event->status);
|
||||
bfa_trc(flash, status);
|
||||
if (status == BFA_STATUS_BAD_FWCFG)
|
||||
bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
|
||||
else if (status == BFA_STATUS_INVALID_VENDOR) {
|
||||
u32 param;
|
||||
param = be32_to_cpu(m.event->param);
|
||||
bfa_trc(flash, param);
|
||||
bfa_ioc_aen_post(flash->ioc,
|
||||
BFA_IOC_AEN_INVALID_VENDOR);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -4204,6 +4374,8 @@ bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
|
||||
flash->instance = instance;
|
||||
|
||||
bfa_flash_erase_send(flash);
|
||||
bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
|
||||
instance, type);
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
@ -5416,3 +5588,396 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
|
||||
WARN_ON(1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* DCONF module specific
|
||||
*/
|
||||
|
||||
BFA_MODULE(dconf);
|
||||
|
||||
/*
|
||||
* DCONF state machine events
|
||||
*/
|
||||
enum bfa_dconf_event {
|
||||
BFA_DCONF_SM_INIT = 1, /* dconf Init */
|
||||
BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
|
||||
BFA_DCONF_SM_WR = 3, /* binding change, map */
|
||||
BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
|
||||
BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
|
||||
BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
|
||||
};
|
||||
|
||||
/* forward declaration of DCONF state machine */
|
||||
static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
|
||||
enum bfa_dconf_event event);
|
||||
static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
|
||||
enum bfa_dconf_event event);
|
||||
static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
|
||||
enum bfa_dconf_event event);
|
||||
static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
|
||||
enum bfa_dconf_event event);
|
||||
static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
|
||||
enum bfa_dconf_event event);
|
||||
static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
|
||||
enum bfa_dconf_event event);
|
||||
static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
|
||||
enum bfa_dconf_event event);
|
||||
|
||||
static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
|
||||
static void bfa_dconf_timer(void *cbarg);
|
||||
static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
|
||||
static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
|
||||
|
||||
/*
|
||||
* Begining state of dconf module. Waiting for an event to start.
|
||||
*/
|
||||
static void
|
||||
bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
|
||||
{
|
||||
bfa_status_t bfa_status;
|
||||
bfa_trc(dconf->bfa, event);
|
||||
|
||||
switch (event) {
|
||||
case BFA_DCONF_SM_INIT:
|
||||
if (dconf->min_cfg) {
|
||||
bfa_trc(dconf->bfa, dconf->min_cfg);
|
||||
return;
|
||||
}
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
|
||||
dconf->flashdone = BFA_FALSE;
|
||||
bfa_trc(dconf->bfa, dconf->flashdone);
|
||||
bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
|
||||
BFA_FLASH_PART_DRV, dconf->instance,
|
||||
dconf->dconf,
|
||||
sizeof(struct bfa_dconf_s), 0,
|
||||
bfa_dconf_init_cb, dconf->bfa);
|
||||
if (bfa_status != BFA_STATUS_OK) {
|
||||
bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case BFA_DCONF_SM_EXIT:
|
||||
dconf->flashdone = BFA_TRUE;
|
||||
case BFA_DCONF_SM_IOCDISABLE:
|
||||
case BFA_DCONF_SM_WR:
|
||||
case BFA_DCONF_SM_FLASH_COMP:
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(dconf->bfa, event);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Read flash for dconf entries and make a call back to the driver once done.
|
||||
*/
|
||||
static void
|
||||
bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
|
||||
enum bfa_dconf_event event)
|
||||
{
|
||||
bfa_trc(dconf->bfa, event);
|
||||
|
||||
switch (event) {
|
||||
case BFA_DCONF_SM_FLASH_COMP:
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
|
||||
break;
|
||||
case BFA_DCONF_SM_TIMEOUT:
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
|
||||
break;
|
||||
case BFA_DCONF_SM_EXIT:
|
||||
dconf->flashdone = BFA_TRUE;
|
||||
bfa_trc(dconf->bfa, dconf->flashdone);
|
||||
case BFA_DCONF_SM_IOCDISABLE:
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(dconf->bfa, event);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* DCONF Module is in ready state. Has completed the initialization.
|
||||
*/
|
||||
static void
|
||||
bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
|
||||
{
|
||||
bfa_trc(dconf->bfa, event);
|
||||
|
||||
switch (event) {
|
||||
case BFA_DCONF_SM_WR:
|
||||
bfa_timer_start(dconf->bfa, &dconf->timer,
|
||||
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
|
||||
break;
|
||||
case BFA_DCONF_SM_EXIT:
|
||||
dconf->flashdone = BFA_TRUE;
|
||||
bfa_trc(dconf->bfa, dconf->flashdone);
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
|
||||
break;
|
||||
case BFA_DCONF_SM_INIT:
|
||||
case BFA_DCONF_SM_IOCDISABLE:
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(dconf->bfa, event);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* entries are dirty, write back to the flash.
|
||||
*/
|
||||
|
||||
static void
|
||||
bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
|
||||
{
|
||||
bfa_trc(dconf->bfa, event);
|
||||
|
||||
switch (event) {
|
||||
case BFA_DCONF_SM_TIMEOUT:
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
|
||||
bfa_dconf_flash_write(dconf);
|
||||
break;
|
||||
case BFA_DCONF_SM_WR:
|
||||
bfa_timer_stop(&dconf->timer);
|
||||
bfa_timer_start(dconf->bfa, &dconf->timer,
|
||||
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
|
||||
break;
|
||||
case BFA_DCONF_SM_EXIT:
|
||||
bfa_timer_stop(&dconf->timer);
|
||||
bfa_timer_start(dconf->bfa, &dconf->timer,
|
||||
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
|
||||
bfa_dconf_flash_write(dconf);
|
||||
break;
|
||||
case BFA_DCONF_SM_FLASH_COMP:
|
||||
break;
|
||||
case BFA_DCONF_SM_IOCDISABLE:
|
||||
bfa_timer_stop(&dconf->timer);
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(dconf->bfa, event);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Sync the dconf entries to the flash.
|
||||
*/
|
||||
static void
|
||||
bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
|
||||
enum bfa_dconf_event event)
|
||||
{
|
||||
bfa_trc(dconf->bfa, event);
|
||||
|
||||
switch (event) {
|
||||
case BFA_DCONF_SM_IOCDISABLE:
|
||||
case BFA_DCONF_SM_FLASH_COMP:
|
||||
bfa_timer_stop(&dconf->timer);
|
||||
case BFA_DCONF_SM_TIMEOUT:
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
|
||||
dconf->flashdone = BFA_TRUE;
|
||||
bfa_trc(dconf->bfa, dconf->flashdone);
|
||||
bfa_ioc_disable(&dconf->bfa->ioc);
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(dconf->bfa, event);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
|
||||
{
|
||||
bfa_trc(dconf->bfa, event);
|
||||
|
||||
switch (event) {
|
||||
case BFA_DCONF_SM_FLASH_COMP:
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
|
||||
break;
|
||||
case BFA_DCONF_SM_WR:
|
||||
bfa_timer_start(dconf->bfa, &dconf->timer,
|
||||
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
|
||||
break;
|
||||
case BFA_DCONF_SM_EXIT:
|
||||
bfa_timer_start(dconf->bfa, &dconf->timer,
|
||||
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
|
||||
break;
|
||||
case BFA_DCONF_SM_IOCDISABLE:
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(dconf->bfa, event);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
|
||||
enum bfa_dconf_event event)
|
||||
{
|
||||
bfa_trc(dconf->bfa, event);
|
||||
|
||||
switch (event) {
|
||||
case BFA_DCONF_SM_INIT:
|
||||
bfa_timer_start(dconf->bfa, &dconf->timer,
|
||||
bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
|
||||
break;
|
||||
case BFA_DCONF_SM_EXIT:
|
||||
dconf->flashdone = BFA_TRUE;
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
|
||||
break;
|
||||
case BFA_DCONF_SM_IOCDISABLE:
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(dconf->bfa, event);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute and return memory needed by DRV_CFG module.
|
||||
*/
|
||||
static void
|
||||
bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
|
||||
struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
|
||||
|
||||
if (cfg->drvcfg.min_cfg)
|
||||
bfa_mem_kva_setup(meminfo, dconf_kva,
|
||||
sizeof(struct bfa_dconf_hdr_s));
|
||||
else
|
||||
bfa_mem_kva_setup(meminfo, dconf_kva,
|
||||
sizeof(struct bfa_dconf_s));
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
||||
struct bfa_pcidev_s *pcidev)
|
||||
{
|
||||
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
|
||||
|
||||
dconf->bfad = bfad;
|
||||
dconf->bfa = bfa;
|
||||
dconf->instance = bfa->ioc.port_id;
|
||||
bfa_trc(bfa, dconf->instance);
|
||||
|
||||
dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
|
||||
if (cfg->drvcfg.min_cfg) {
|
||||
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
|
||||
dconf->min_cfg = BFA_TRUE;
|
||||
/*
|
||||
* Set the flashdone flag to TRUE explicitly as no flash
|
||||
* write will happen in min_cfg mode.
|
||||
*/
|
||||
dconf->flashdone = BFA_TRUE;
|
||||
} else {
|
||||
dconf->min_cfg = BFA_FALSE;
|
||||
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
|
||||
}
|
||||
|
||||
bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
|
||||
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_dconf_init_cb(void *arg, bfa_status_t status)
|
||||
{
|
||||
struct bfa_s *bfa = arg;
|
||||
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
|
||||
|
||||
dconf->flashdone = BFA_TRUE;
|
||||
bfa_trc(bfa, dconf->flashdone);
|
||||
bfa_iocfc_cb_dconf_modinit(bfa, status);
|
||||
if (status == BFA_STATUS_OK) {
|
||||
bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
|
||||
if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
|
||||
dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
|
||||
if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
|
||||
dconf->dconf->hdr.version = BFI_DCONF_VERSION;
|
||||
}
|
||||
bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
|
||||
}
|
||||
|
||||
void
|
||||
bfa_dconf_modinit(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
|
||||
bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
|
||||
}
|
||||
static void
|
||||
bfa_dconf_start(struct bfa_s *bfa)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_dconf_stop(struct bfa_s *bfa)
|
||||
{
|
||||
}
|
||||
|
||||
static void bfa_dconf_timer(void *cbarg)
|
||||
{
|
||||
struct bfa_dconf_mod_s *dconf = cbarg;
|
||||
bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
|
||||
}
|
||||
static void
|
||||
bfa_dconf_iocdisable(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
|
||||
bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_dconf_detach(struct bfa_s *bfa)
|
||||
{
|
||||
}
|
||||
|
||||
static bfa_status_t
|
||||
bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
|
||||
{
|
||||
bfa_status_t bfa_status;
|
||||
bfa_trc(dconf->bfa, 0);
|
||||
|
||||
bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
|
||||
BFA_FLASH_PART_DRV, dconf->instance,
|
||||
dconf->dconf, sizeof(struct bfa_dconf_s), 0,
|
||||
bfa_dconf_cbfn, dconf);
|
||||
if (bfa_status != BFA_STATUS_OK)
|
||||
WARN_ON(bfa_status);
|
||||
bfa_trc(dconf->bfa, bfa_status);
|
||||
|
||||
return bfa_status;
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfa_dconf_update(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
|
||||
bfa_trc(dconf->bfa, 0);
|
||||
if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
|
||||
return BFA_STATUS_FAILED;
|
||||
|
||||
if (dconf->min_cfg) {
|
||||
bfa_trc(dconf->bfa, dconf->min_cfg);
|
||||
return BFA_STATUS_FAILED;
|
||||
}
|
||||
|
||||
bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_dconf_cbfn(void *arg, bfa_status_t status)
|
||||
{
|
||||
struct bfa_dconf_mod_s *dconf = arg;
|
||||
WARN_ON(status);
|
||||
bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
|
||||
}
|
||||
|
||||
void
|
||||
bfa_dconf_modexit(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
|
||||
BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
|
||||
bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
|
||||
bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
|
||||
}
|
||||
|
@ -327,6 +327,7 @@ struct bfa_ioc_s {
|
||||
enum bfa_mode_s port_mode;
|
||||
u8 ad_cap_bm; /* adapter cap bit mask */
|
||||
u8 port_mode_cfg; /* config port mode */
|
||||
int ioc_aen_seq;
|
||||
};
|
||||
|
||||
struct bfa_ioc_hwif_s {
|
||||
@ -366,6 +367,8 @@ struct bfa_cb_qe_s {
|
||||
struct list_head qe;
|
||||
bfa_cb_cbfn_t cbfn;
|
||||
bfa_boolean_t once;
|
||||
bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
|
||||
bfa_status_t fw_status; /* to access fw status in comp proc */
|
||||
void *cbarg;
|
||||
};
|
||||
|
||||
@ -658,7 +661,6 @@ struct bfa_phy_s {
|
||||
struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
|
||||
struct bfa_mem_dma_s phy_dma;
|
||||
};
|
||||
|
||||
#define BFA_PHY(__bfa) (&(__bfa)->modules.phy)
|
||||
#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma))
|
||||
|
||||
@ -683,6 +685,49 @@ void bfa_phy_memclaim(struct bfa_phy_s *phy,
|
||||
u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
|
||||
void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
|
||||
|
||||
/*
|
||||
* Driver Config( dconf) specific
|
||||
*/
|
||||
#define BFI_DCONF_SIGNATURE 0xabcdabcd
|
||||
#define BFI_DCONF_VERSION 1
|
||||
|
||||
#pragma pack(1)
|
||||
struct bfa_dconf_hdr_s {
|
||||
u32 signature;
|
||||
u32 version;
|
||||
};
|
||||
|
||||
struct bfa_dconf_s {
|
||||
struct bfa_dconf_hdr_s hdr;
|
||||
struct bfa_lunmask_cfg_s lun_mask;
|
||||
};
|
||||
#pragma pack()
|
||||
|
||||
struct bfa_dconf_mod_s {
|
||||
bfa_sm_t sm;
|
||||
u8 instance;
|
||||
bfa_boolean_t flashdone;
|
||||
bfa_boolean_t read_data_valid;
|
||||
bfa_boolean_t min_cfg;
|
||||
struct bfa_timer_s timer;
|
||||
struct bfa_s *bfa;
|
||||
void *bfad;
|
||||
void *trcmod;
|
||||
struct bfa_dconf_s *dconf;
|
||||
struct bfa_mem_kva_s kva_seg;
|
||||
};
|
||||
|
||||
#define BFA_DCONF_MOD(__bfa) \
|
||||
(&(__bfa)->modules.dconf_mod)
|
||||
#define BFA_MEM_DCONF_KVA(__bfa) (&(BFA_DCONF_MOD(__bfa)->kva_seg))
|
||||
#define bfa_dconf_read_data_valid(__bfa) \
|
||||
(BFA_DCONF_MOD(__bfa)->read_data_valid)
|
||||
#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */
|
||||
|
||||
void bfa_dconf_modinit(struct bfa_s *bfa);
|
||||
void bfa_dconf_modexit(struct bfa_s *bfa);
|
||||
bfa_status_t bfa_dconf_update(struct bfa_s *bfa);
|
||||
|
||||
/*
|
||||
* IOC specfic macros
|
||||
*/
|
||||
@ -803,6 +848,7 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
|
||||
struct bfi_ioc_image_hdr_s *fwhdr);
|
||||
bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
|
||||
struct bfi_ioc_image_hdr_s *fwhdr);
|
||||
void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
|
||||
bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
|
||||
bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
|
||||
|
||||
|
@ -44,6 +44,7 @@ struct bfa_modules_s {
|
||||
struct bfa_flash_s flash; /* flash module */
|
||||
struct bfa_diag_s diag_mod; /* diagnostics module */
|
||||
struct bfa_phy_s phy; /* phy module */
|
||||
struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -119,6 +120,7 @@ struct bfa_s {
|
||||
struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
|
||||
bfa_boolean_t fcs; /* FCS is attached to BFA */
|
||||
struct bfa_msix_s msix;
|
||||
int bfa_aen_seq;
|
||||
};
|
||||
|
||||
extern bfa_boolean_t bfa_auto_recover;
|
||||
@ -130,5 +132,6 @@ extern struct bfa_module_s hal_mod_lps;
|
||||
extern struct bfa_module_s hal_mod_uf;
|
||||
extern struct bfa_module_s hal_mod_rport;
|
||||
extern struct bfa_module_s hal_mod_fcp;
|
||||
extern struct bfa_module_s hal_mod_dconf;
|
||||
|
||||
#endif /* __BFA_MODULES_H__ */
|
||||
|
@ -16,6 +16,7 @@
|
||||
*/
|
||||
|
||||
#include "bfad_drv.h"
|
||||
#include "bfad_im.h"
|
||||
#include "bfa_plog.h"
|
||||
#include "bfa_cs.h"
|
||||
#include "bfa_modules.h"
|
||||
@ -2007,6 +2008,24 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
|
||||
{
|
||||
struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
|
||||
struct bfa_aen_entry_s *aen_entry;
|
||||
|
||||
bfad_get_aen_entry(bfad, aen_entry);
|
||||
if (!aen_entry)
|
||||
return;
|
||||
|
||||
aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
|
||||
aen_entry->aen_data.port.pwwn = fcport->pwwn;
|
||||
|
||||
/* Send the AEN notification */
|
||||
bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
|
||||
BFA_AEN_CAT_PORT, event);
|
||||
}
|
||||
|
||||
/*
|
||||
* FC PORT state machine functions
|
||||
*/
|
||||
@ -2095,6 +2114,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
|
||||
wwn2str(pwwn_buf, fcport->pwwn);
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Base port disabled: WWN = %s\n", pwwn_buf);
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
|
||||
break;
|
||||
|
||||
case BFA_FCPORT_SM_LINKUP:
|
||||
@ -2155,6 +2175,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
|
||||
wwn2str(pwwn_buf, fcport->pwwn);
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Base port disabled: WWN = %s\n", pwwn_buf);
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
|
||||
break;
|
||||
|
||||
case BFA_FCPORT_SM_STOP:
|
||||
@ -2208,6 +2229,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
|
||||
wwn2str(pwwn_buf, fcport->pwwn);
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Base port online: WWN = %s\n", pwwn_buf);
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
|
||||
|
||||
/* If QoS is enabled and it is not online, send AEN */
|
||||
if (fcport->cfg.qos_enabled &&
|
||||
fcport->qos_attr.state != BFA_QOS_ONLINE)
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
|
||||
break;
|
||||
|
||||
case BFA_FCPORT_SM_LINKDOWN:
|
||||
@ -2234,6 +2261,7 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
|
||||
wwn2str(pwwn_buf, fcport->pwwn);
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Base port disabled: WWN = %s\n", pwwn_buf);
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
|
||||
break;
|
||||
|
||||
case BFA_FCPORT_SM_STOP:
|
||||
@ -2279,8 +2307,10 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
|
||||
wwn2str(pwwn_buf, fcport->pwwn);
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Base port offline: WWN = %s\n", pwwn_buf);
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Base port disabled: WWN = %s\n", pwwn_buf);
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
|
||||
break;
|
||||
|
||||
case BFA_FCPORT_SM_LINKDOWN:
|
||||
@ -2290,26 +2320,32 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
|
||||
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
|
||||
BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
|
||||
wwn2str(pwwn_buf, fcport->pwwn);
|
||||
if (BFA_PORT_IS_DISABLED(fcport->bfa))
|
||||
if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Base port offline: WWN = %s\n", pwwn_buf);
|
||||
else
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
|
||||
} else {
|
||||
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
|
||||
"Base port (WWN = %s) "
|
||||
"lost fabric connectivity\n", pwwn_buf);
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
|
||||
}
|
||||
break;
|
||||
|
||||
case BFA_FCPORT_SM_STOP:
|
||||
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
|
||||
bfa_fcport_reset_linkinfo(fcport);
|
||||
wwn2str(pwwn_buf, fcport->pwwn);
|
||||
if (BFA_PORT_IS_DISABLED(fcport->bfa))
|
||||
if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Base port offline: WWN = %s\n", pwwn_buf);
|
||||
else
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
|
||||
} else {
|
||||
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
|
||||
"Base port (WWN = %s) "
|
||||
"lost fabric connectivity\n", pwwn_buf);
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
|
||||
}
|
||||
break;
|
||||
|
||||
case BFA_FCPORT_SM_HWFAIL:
|
||||
@ -2317,13 +2353,16 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
|
||||
bfa_fcport_reset_linkinfo(fcport);
|
||||
bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
|
||||
wwn2str(pwwn_buf, fcport->pwwn);
|
||||
if (BFA_PORT_IS_DISABLED(fcport->bfa))
|
||||
if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Base port offline: WWN = %s\n", pwwn_buf);
|
||||
else
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
|
||||
} else {
|
||||
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
|
||||
"Base port (WWN = %s) "
|
||||
"lost fabric connectivity\n", pwwn_buf);
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -2454,6 +2493,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
|
||||
wwn2str(pwwn_buf, fcport->pwwn);
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Base port enabled: WWN = %s\n", pwwn_buf);
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
|
||||
break;
|
||||
|
||||
case BFA_FCPORT_SM_STOP:
|
||||
@ -2508,6 +2548,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
|
||||
wwn2str(pwwn_buf, fcport->pwwn);
|
||||
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
|
||||
"Base port enabled: WWN = %s\n", pwwn_buf);
|
||||
bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
|
||||
break;
|
||||
|
||||
case BFA_FCPORT_SM_DISABLE:
|
||||
@ -2874,6 +2915,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
||||
|
||||
port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
|
||||
|
||||
INIT_LIST_HEAD(&fcport->stats_pending_q);
|
||||
INIT_LIST_HEAD(&fcport->statsclr_pending_q);
|
||||
|
||||
bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
|
||||
}
|
||||
|
||||
@ -3102,30 +3146,38 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
|
||||
static void
|
||||
__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
|
||||
{
|
||||
struct bfa_fcport_s *fcport = cbarg;
|
||||
struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
|
||||
struct bfa_cb_pending_q_s *cb;
|
||||
struct list_head *qe, *qen;
|
||||
union bfa_fcport_stats_u *ret;
|
||||
|
||||
if (complete) {
|
||||
if (fcport->stats_status == BFA_STATUS_OK) {
|
||||
struct timeval tv;
|
||||
struct timeval tv;
|
||||
if (fcport->stats_status == BFA_STATUS_OK)
|
||||
do_gettimeofday(&tv);
|
||||
|
||||
/* Swap FC QoS or FCoE stats */
|
||||
if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
|
||||
bfa_fcport_qos_stats_swap(
|
||||
&fcport->stats_ret->fcqos,
|
||||
&fcport->stats->fcqos);
|
||||
} else {
|
||||
bfa_fcport_fcoe_stats_swap(
|
||||
&fcport->stats_ret->fcoe,
|
||||
&fcport->stats->fcoe);
|
||||
|
||||
do_gettimeofday(&tv);
|
||||
fcport->stats_ret->fcoe.secs_reset =
|
||||
list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
|
||||
bfa_q_deq(&fcport->stats_pending_q, &qe);
|
||||
cb = (struct bfa_cb_pending_q_s *)qe;
|
||||
if (fcport->stats_status == BFA_STATUS_OK) {
|
||||
ret = (union bfa_fcport_stats_u *)cb->data;
|
||||
/* Swap FC QoS or FCoE stats */
|
||||
if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
|
||||
bfa_fcport_qos_stats_swap(&ret->fcqos,
|
||||
&fcport->stats->fcqos);
|
||||
else {
|
||||
bfa_fcport_fcoe_stats_swap(&ret->fcoe,
|
||||
&fcport->stats->fcoe);
|
||||
ret->fcoe.secs_reset =
|
||||
tv.tv_sec - fcport->stats_reset_time;
|
||||
}
|
||||
}
|
||||
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
|
||||
fcport->stats_status);
|
||||
}
|
||||
fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
|
||||
fcport->stats_status = BFA_STATUS_OK;
|
||||
} else {
|
||||
fcport->stats_busy = BFA_FALSE;
|
||||
INIT_LIST_HEAD(&fcport->stats_pending_q);
|
||||
fcport->stats_status = BFA_STATUS_OK;
|
||||
}
|
||||
}
|
||||
@ -3143,8 +3195,7 @@ bfa_fcport_stats_get_timeout(void *cbarg)
|
||||
}
|
||||
|
||||
fcport->stats_status = BFA_STATUS_ETIMER;
|
||||
bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
|
||||
fcport);
|
||||
__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3174,7 +3225,9 @@ bfa_fcport_send_stats_get(void *cbarg)
|
||||
static void
|
||||
__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
|
||||
{
|
||||
struct bfa_fcport_s *fcport = cbarg;
|
||||
struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
|
||||
struct bfa_cb_pending_q_s *cb;
|
||||
struct list_head *qe, *qen;
|
||||
|
||||
if (complete) {
|
||||
struct timeval tv;
|
||||
@ -3184,10 +3237,15 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
|
||||
*/
|
||||
do_gettimeofday(&tv);
|
||||
fcport->stats_reset_time = tv.tv_sec;
|
||||
|
||||
fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
|
||||
list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
|
||||
bfa_q_deq(&fcport->statsclr_pending_q, &qe);
|
||||
cb = (struct bfa_cb_pending_q_s *)qe;
|
||||
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
|
||||
fcport->stats_status);
|
||||
}
|
||||
fcport->stats_status = BFA_STATUS_OK;
|
||||
} else {
|
||||
fcport->stats_busy = BFA_FALSE;
|
||||
INIT_LIST_HEAD(&fcport->statsclr_pending_q);
|
||||
fcport->stats_status = BFA_STATUS_OK;
|
||||
}
|
||||
}
|
||||
@ -3205,8 +3263,7 @@ bfa_fcport_stats_clr_timeout(void *cbarg)
|
||||
}
|
||||
|
||||
fcport->stats_status = BFA_STATUS_ETIMER;
|
||||
bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
|
||||
__bfa_cb_fcport_stats_clr, fcport);
|
||||
__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3402,6 +3459,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
|
||||
fcport->use_flash_cfg = BFA_FALSE;
|
||||
}
|
||||
|
||||
if (fcport->cfg.qos_enabled)
|
||||
fcport->qos_attr.state = BFA_QOS_OFFLINE;
|
||||
else
|
||||
fcport->qos_attr.state = BFA_QOS_DISABLED;
|
||||
|
||||
bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
|
||||
}
|
||||
break;
|
||||
@ -3426,28 +3488,26 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
|
||||
/*
|
||||
* check for timer pop before processing the rsp
|
||||
*/
|
||||
if (fcport->stats_busy == BFA_FALSE ||
|
||||
fcport->stats_status == BFA_STATUS_ETIMER)
|
||||
if (list_empty(&fcport->stats_pending_q) ||
|
||||
(fcport->stats_status == BFA_STATUS_ETIMER))
|
||||
break;
|
||||
|
||||
bfa_timer_stop(&fcport->timer);
|
||||
fcport->stats_status = i2hmsg.pstatsget_rsp->status;
|
||||
bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
|
||||
__bfa_cb_fcport_stats_get, fcport);
|
||||
__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
|
||||
break;
|
||||
|
||||
case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
|
||||
/*
|
||||
* check for timer pop before processing the rsp
|
||||
*/
|
||||
if (fcport->stats_busy == BFA_FALSE ||
|
||||
fcport->stats_status == BFA_STATUS_ETIMER)
|
||||
if (list_empty(&fcport->statsclr_pending_q) ||
|
||||
(fcport->stats_status == BFA_STATUS_ETIMER))
|
||||
break;
|
||||
|
||||
bfa_timer_stop(&fcport->timer);
|
||||
fcport->stats_status = BFA_STATUS_OK;
|
||||
bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
|
||||
__bfa_cb_fcport_stats_clr, fcport);
|
||||
__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
|
||||
break;
|
||||
|
||||
case BFI_FCPORT_I2H_ENABLE_AEN:
|
||||
@ -3779,25 +3839,25 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
|
||||
* Fetch port statistics (FCQoS or FCoE).
|
||||
*/
|
||||
bfa_status_t
|
||||
bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
|
||||
bfa_cb_port_t cbfn, void *cbarg)
|
||||
bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
|
||||
{
|
||||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
|
||||
|
||||
if (fcport->stats_busy) {
|
||||
bfa_trc(bfa, fcport->stats_busy);
|
||||
if (bfa_ioc_is_disabled(&bfa->ioc))
|
||||
return BFA_STATUS_IOC_DISABLED;
|
||||
|
||||
if (!list_empty(&fcport->statsclr_pending_q))
|
||||
return BFA_STATUS_DEVBUSY;
|
||||
}
|
||||
|
||||
fcport->stats_busy = BFA_TRUE;
|
||||
fcport->stats_ret = stats;
|
||||
fcport->stats_cbfn = cbfn;
|
||||
fcport->stats_cbarg = cbarg;
|
||||
if (list_empty(&fcport->stats_pending_q)) {
|
||||
list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
|
||||
bfa_fcport_send_stats_get(fcport);
|
||||
bfa_timer_start(bfa, &fcport->timer,
|
||||
bfa_fcport_stats_get_timeout,
|
||||
fcport, BFA_FCPORT_STATS_TOV);
|
||||
} else
|
||||
list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
|
||||
|
||||
bfa_fcport_send_stats_get(fcport);
|
||||
|
||||
bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
|
||||
fcport, BFA_FCPORT_STATS_TOV);
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
@ -3805,27 +3865,25 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
|
||||
* Reset port statistics (FCQoS or FCoE).
|
||||
*/
|
||||
bfa_status_t
|
||||
bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
|
||||
bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
|
||||
{
|
||||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
|
||||
|
||||
if (fcport->stats_busy) {
|
||||
bfa_trc(bfa, fcport->stats_busy);
|
||||
if (!list_empty(&fcport->stats_pending_q))
|
||||
return BFA_STATUS_DEVBUSY;
|
||||
}
|
||||
|
||||
fcport->stats_busy = BFA_TRUE;
|
||||
fcport->stats_cbfn = cbfn;
|
||||
fcport->stats_cbarg = cbarg;
|
||||
if (list_empty(&fcport->statsclr_pending_q)) {
|
||||
list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
|
||||
bfa_fcport_send_stats_clear(fcport);
|
||||
bfa_timer_start(bfa, &fcport->timer,
|
||||
bfa_fcport_stats_clr_timeout,
|
||||
fcport, BFA_FCPORT_STATS_TOV);
|
||||
} else
|
||||
list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
|
||||
|
||||
bfa_fcport_send_stats_clear(fcport);
|
||||
|
||||
bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
|
||||
fcport, BFA_FCPORT_STATS_TOV);
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Fetch port attributes.
|
||||
*/
|
||||
@ -4619,6 +4677,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
|
||||
rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
|
||||
rp->fw_handle = msg.create_rsp->fw_handle;
|
||||
rp->qos_attr = msg.create_rsp->qos_attr;
|
||||
bfa_rport_set_lunmask(bfa, rp);
|
||||
WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
|
||||
bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
|
||||
break;
|
||||
@ -4626,6 +4685,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
|
||||
case BFI_RPORT_I2H_DELETE_RSP:
|
||||
rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
|
||||
WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
|
||||
bfa_rport_unset_lunmask(bfa, rp);
|
||||
bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
|
||||
break;
|
||||
|
||||
@ -4706,6 +4766,37 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
|
||||
bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
|
||||
}
|
||||
|
||||
/* Set Rport LUN Mask */
|
||||
void
|
||||
bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
|
||||
{
|
||||
struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
|
||||
wwn_t lp_wwn, rp_wwn;
|
||||
u8 lp_tag = (u8)rp->rport_info.lp_tag;
|
||||
|
||||
rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
|
||||
lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
|
||||
|
||||
BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
|
||||
rp->lun_mask = BFA_TRUE;
|
||||
bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
|
||||
}
|
||||
|
||||
/* Unset Rport LUN mask */
|
||||
void
|
||||
bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
|
||||
{
|
||||
struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
|
||||
wwn_t lp_wwn, rp_wwn;
|
||||
|
||||
rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
|
||||
lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
|
||||
|
||||
BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
|
||||
rp->lun_mask = BFA_FALSE;
|
||||
bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
|
||||
BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
|
||||
}
|
||||
|
||||
/*
|
||||
* SGPG related functions
|
||||
@ -5517,11 +5608,29 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
|
||||
return BFA_STATUS_PORT_NOT_DISABLED;
|
||||
}
|
||||
|
||||
/* Check if the speed is supported */
|
||||
bfa_fcport_get_attr(bfa, &attr);
|
||||
bfa_trc(fcdiag, attr.speed_supported);
|
||||
if (speed > attr.speed_supported)
|
||||
return BFA_STATUS_UNSUPP_SPEED;
|
||||
/*
|
||||
* Check if input speed is supported by the port mode
|
||||
*/
|
||||
if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
|
||||
if (!(speed == BFA_PORT_SPEED_1GBPS ||
|
||||
speed == BFA_PORT_SPEED_2GBPS ||
|
||||
speed == BFA_PORT_SPEED_4GBPS ||
|
||||
speed == BFA_PORT_SPEED_8GBPS ||
|
||||
speed == BFA_PORT_SPEED_16GBPS ||
|
||||
speed == BFA_PORT_SPEED_AUTO)) {
|
||||
bfa_trc(fcdiag, speed);
|
||||
return BFA_STATUS_UNSUPP_SPEED;
|
||||
}
|
||||
bfa_fcport_get_attr(bfa, &attr);
|
||||
bfa_trc(fcdiag, attr.speed_supported);
|
||||
if (speed > attr.speed_supported)
|
||||
return BFA_STATUS_UNSUPP_SPEED;
|
||||
} else {
|
||||
if (speed != BFA_PORT_SPEED_10GBPS) {
|
||||
bfa_trc(fcdiag, speed);
|
||||
return BFA_STATUS_UNSUPP_SPEED;
|
||||
}
|
||||
}
|
||||
|
||||
/* For Mezz card, port speed entered needs to be checked */
|
||||
if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
|
||||
|
@ -297,6 +297,7 @@ struct bfa_rport_s {
|
||||
void *rport_drv; /* fcs/driver rport object */
|
||||
u16 fw_handle; /* firmware rport handle */
|
||||
u16 rport_tag; /* BFA rport tag */
|
||||
u8 lun_mask; /* LUN mask flag */
|
||||
struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
|
||||
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
|
||||
struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
|
||||
@ -404,6 +405,7 @@ struct bfa_lps_s {
|
||||
u8 bb_scn; /* local BB_SCN */
|
||||
u8 lsrjt_rsn; /* LSRJT reason */
|
||||
u8 lsrjt_expl; /* LSRJT explanation */
|
||||
u8 lun_mask; /* LUN mask flag */
|
||||
wwn_t pwwn; /* port wwn of lport */
|
||||
wwn_t nwwn; /* node wwn of lport */
|
||||
wwn_t pr_pwwn; /* port wwn of lport peer */
|
||||
@ -441,7 +443,6 @@ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
|
||||
*/
|
||||
|
||||
#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
|
||||
typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
|
||||
|
||||
/*
|
||||
* Link notification data structure
|
||||
@ -495,13 +496,11 @@ struct bfa_fcport_s {
|
||||
u8 *stats_kva;
|
||||
u64 stats_pa;
|
||||
union bfa_fcport_stats_u *stats;
|
||||
union bfa_fcport_stats_u *stats_ret; /* driver stats location */
|
||||
bfa_status_t stats_status; /* stats/statsclr status */
|
||||
bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
|
||||
struct list_head stats_pending_q;
|
||||
struct list_head statsclr_pending_q;
|
||||
bfa_boolean_t stats_qfull;
|
||||
u32 stats_reset_time; /* stats reset time stamp */
|
||||
bfa_cb_port_t stats_cbfn; /* driver callback function */
|
||||
void *stats_cbarg; /* *!< user callback arg */
|
||||
bfa_boolean_t diag_busy; /* diag busy status */
|
||||
bfa_boolean_t beacon; /* port beacon status */
|
||||
bfa_boolean_t link_e2e_beacon; /* link beacon status */
|
||||
@ -552,10 +551,9 @@ void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
|
||||
bfa_boolean_t link_e2e_beacon);
|
||||
bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
|
||||
bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
|
||||
union bfa_fcport_stats_u *stats,
|
||||
bfa_cb_port_t cbfn, void *cbarg);
|
||||
bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
|
||||
void *cbarg);
|
||||
struct bfa_cb_pending_q_s *cb);
|
||||
bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
|
||||
struct bfa_cb_pending_q_s *cb);
|
||||
bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
|
||||
bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
|
||||
bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
|
||||
@ -577,6 +575,19 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
|
||||
struct bfa_rport_qos_attr_s old_qos_attr,
|
||||
struct bfa_rport_qos_attr_s new_qos_attr);
|
||||
|
||||
/*
|
||||
* Rport LUN masking related
|
||||
*/
|
||||
#define BFA_RPORT_TAG_INVALID 0xffff
|
||||
#define BFA_LP_TAG_INVALID 0xff
|
||||
void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
|
||||
void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
|
||||
bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
|
||||
wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
|
||||
struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
|
||||
wwn_t *lpwwn, wwn_t rpwwn);
|
||||
void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
|
||||
|
||||
/*
|
||||
* bfa fcxp API functions
|
||||
*/
|
||||
|
@ -1348,7 +1348,7 @@ int
|
||||
bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
{
|
||||
struct bfad_s *bfad;
|
||||
int error = -ENODEV, retval;
|
||||
int error = -ENODEV, retval, i;
|
||||
|
||||
/* For single port cards - only claim function 0 */
|
||||
if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
|
||||
@ -1372,6 +1372,12 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
bfa_trc_init(bfad->trcmod);
|
||||
bfa_trc(bfad, bfad_inst);
|
||||
|
||||
/* AEN INIT */
|
||||
INIT_LIST_HEAD(&bfad->free_aen_q);
|
||||
INIT_LIST_HEAD(&bfad->active_aen_q);
|
||||
for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
|
||||
list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
|
||||
|
||||
if (!(bfad_load_fwimg(pdev))) {
|
||||
kfree(bfad->trcmod);
|
||||
goto out_alloc_trace_failure;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -30,24 +30,48 @@ enum {
|
||||
IOCMD_IOC_GET_INFO,
|
||||
IOCMD_IOC_GET_STATS,
|
||||
IOCMD_IOC_GET_FWSTATS,
|
||||
IOCMD_IOC_RESET_STATS,
|
||||
IOCMD_IOC_RESET_FWSTATS,
|
||||
IOCMD_IOC_SET_ADAPTER_NAME,
|
||||
IOCMD_IOC_SET_PORT_NAME,
|
||||
IOCMD_IOCFC_GET_ATTR,
|
||||
IOCMD_IOCFC_SET_INTR,
|
||||
IOCMD_PORT_ENABLE,
|
||||
IOCMD_PORT_DISABLE,
|
||||
IOCMD_PORT_GET_ATTR,
|
||||
IOCMD_PORT_GET_STATS,
|
||||
IOCMD_PORT_RESET_STATS,
|
||||
IOCMD_PORT_CFG_TOPO,
|
||||
IOCMD_PORT_CFG_SPEED,
|
||||
IOCMD_PORT_CFG_ALPA,
|
||||
IOCMD_PORT_CFG_MAXFRSZ,
|
||||
IOCMD_PORT_CLR_ALPA,
|
||||
IOCMD_PORT_BBSC_ENABLE,
|
||||
IOCMD_PORT_BBSC_DISABLE,
|
||||
IOCMD_LPORT_GET_ATTR,
|
||||
IOCMD_LPORT_GET_RPORTS,
|
||||
IOCMD_LPORT_GET_STATS,
|
||||
IOCMD_LPORT_RESET_STATS,
|
||||
IOCMD_LPORT_GET_IOSTATS,
|
||||
IOCMD_RPORT_GET_ATTR,
|
||||
IOCMD_RPORT_GET_ADDR,
|
||||
IOCMD_RPORT_GET_STATS,
|
||||
IOCMD_RPORT_RESET_STATS,
|
||||
IOCMD_RPORT_SET_SPEED,
|
||||
IOCMD_VPORT_GET_ATTR,
|
||||
IOCMD_VPORT_GET_STATS,
|
||||
IOCMD_VPORT_RESET_STATS,
|
||||
IOCMD_FABRIC_GET_LPORTS,
|
||||
IOCMD_RATELIM_ENABLE,
|
||||
IOCMD_RATELIM_DISABLE,
|
||||
IOCMD_RATELIM_DEF_SPEED,
|
||||
IOCMD_FCPIM_FAILOVER,
|
||||
IOCMD_FCPIM_MODSTATS,
|
||||
IOCMD_FCPIM_MODSTATSCLR,
|
||||
IOCMD_FCPIM_DEL_ITN_STATS,
|
||||
IOCMD_ITNIM_GET_ATTR,
|
||||
IOCMD_ITNIM_GET_IOSTATS,
|
||||
IOCMD_ITNIM_RESET_STATS,
|
||||
IOCMD_ITNIM_GET_ITNSTATS,
|
||||
IOCMD_IOC_PCIFN_CFG,
|
||||
IOCMD_FCPORT_ENABLE,
|
||||
@ -86,6 +110,39 @@ enum {
|
||||
IOCMD_PHY_READ_FW,
|
||||
IOCMD_VHBA_QUERY,
|
||||
IOCMD_DEBUG_PORTLOG,
|
||||
IOCMD_DEBUG_FW_CORE,
|
||||
IOCMD_DEBUG_FW_STATE_CLR,
|
||||
IOCMD_DEBUG_PORTLOG_CLR,
|
||||
IOCMD_DEBUG_START_DTRC,
|
||||
IOCMD_DEBUG_STOP_DTRC,
|
||||
IOCMD_DEBUG_PORTLOG_CTL,
|
||||
IOCMD_FCPIM_PROFILE_ON,
|
||||
IOCMD_FCPIM_PROFILE_OFF,
|
||||
IOCMD_ITNIM_GET_IOPROFILE,
|
||||
IOCMD_FCPORT_GET_STATS,
|
||||
IOCMD_FCPORT_RESET_STATS,
|
||||
IOCMD_BOOT_CFG,
|
||||
IOCMD_BOOT_QUERY,
|
||||
IOCMD_PREBOOT_QUERY,
|
||||
IOCMD_ETHBOOT_CFG,
|
||||
IOCMD_ETHBOOT_QUERY,
|
||||
IOCMD_TRUNK_ENABLE,
|
||||
IOCMD_TRUNK_DISABLE,
|
||||
IOCMD_TRUNK_GET_ATTR,
|
||||
IOCMD_QOS_ENABLE,
|
||||
IOCMD_QOS_DISABLE,
|
||||
IOCMD_QOS_GET_ATTR,
|
||||
IOCMD_QOS_GET_VC_ATTR,
|
||||
IOCMD_QOS_GET_STATS,
|
||||
IOCMD_QOS_RESET_STATS,
|
||||
IOCMD_VF_GET_STATS,
|
||||
IOCMD_VF_RESET_STATS,
|
||||
IOCMD_FCPIM_LUNMASK_ENABLE,
|
||||
IOCMD_FCPIM_LUNMASK_DISABLE,
|
||||
IOCMD_FCPIM_LUNMASK_CLEAR,
|
||||
IOCMD_FCPIM_LUNMASK_QUERY,
|
||||
IOCMD_FCPIM_LUNMASK_ADD,
|
||||
IOCMD_FCPIM_LUNMASK_DELETE,
|
||||
};
|
||||
|
||||
struct bfa_bsg_gen_s {
|
||||
@ -94,6 +151,43 @@ struct bfa_bsg_gen_s {
|
||||
u16 rsvd;
|
||||
};
|
||||
|
||||
struct bfa_bsg_portlogctl_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
bfa_boolean_t ctl;
|
||||
int inst_no;
|
||||
};
|
||||
|
||||
struct bfa_bsg_fcpim_profile_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
};
|
||||
|
||||
struct bfa_bsg_itnim_ioprofile_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 vf_id;
|
||||
wwn_t lpwwn;
|
||||
wwn_t rpwwn;
|
||||
struct bfa_itnim_ioprofile_s ioprofile;
|
||||
};
|
||||
|
||||
struct bfa_bsg_fcport_stats_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
union bfa_fcport_stats_u stats;
|
||||
};
|
||||
|
||||
struct bfa_bsg_ioc_name_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
char name[BFA_ADAPTER_SYM_NAME_LEN];
|
||||
};
|
||||
|
||||
struct bfa_bsg_ioc_info_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
@ -164,6 +258,20 @@ struct bfa_bsg_port_attr_s {
|
||||
struct bfa_port_attr_s attr;
|
||||
};
|
||||
|
||||
struct bfa_bsg_port_cfg_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
u32 param;
|
||||
u32 rsvd1;
|
||||
};
|
||||
|
||||
struct bfa_bsg_port_cfg_maxfrsize_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 maxfrsize;
|
||||
};
|
||||
|
||||
struct bfa_bsg_port_stats_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
@ -237,6 +345,47 @@ struct bfa_bsg_rport_scsi_addr_s {
|
||||
u32 lun;
|
||||
};
|
||||
|
||||
struct bfa_bsg_rport_reset_stats_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 vf_id;
|
||||
wwn_t pwwn;
|
||||
wwn_t rpwwn;
|
||||
};
|
||||
|
||||
struct bfa_bsg_rport_set_speed_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 vf_id;
|
||||
enum bfa_port_speed speed;
|
||||
u32 rsvd;
|
||||
wwn_t pwwn;
|
||||
wwn_t rpwwn;
|
||||
};
|
||||
|
||||
struct bfa_bsg_vport_attr_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 vf_id;
|
||||
wwn_t vpwwn;
|
||||
struct bfa_vport_attr_s vport_attr;
|
||||
};
|
||||
|
||||
struct bfa_bsg_vport_stats_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 vf_id;
|
||||
wwn_t vpwwn;
|
||||
struct bfa_vport_stats_s vport_stats;
|
||||
};
|
||||
|
||||
struct bfa_bsg_reset_stats_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 vf_id;
|
||||
wwn_t vpwwn;
|
||||
};
|
||||
|
||||
struct bfa_bsg_fabric_get_lports_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
@ -246,6 +395,19 @@ struct bfa_bsg_fabric_get_lports_s {
|
||||
u32 rsvd;
|
||||
};
|
||||
|
||||
struct bfa_bsg_trl_speed_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
enum bfa_port_speed speed;
|
||||
};
|
||||
|
||||
struct bfa_bsg_fcpim_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 param;
|
||||
};
|
||||
|
||||
struct bfa_bsg_fcpim_modstats_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
@ -258,6 +420,11 @@ struct bfa_bsg_fcpim_del_itn_stats_s {
|
||||
struct bfa_fcpim_del_itn_stats_s modstats;
|
||||
};
|
||||
|
||||
struct bfa_bsg_fcpim_modstatsclr_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
};
|
||||
|
||||
struct bfa_bsg_itnim_attr_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
@ -485,6 +652,76 @@ struct bfa_bsg_vhba_attr_s {
|
||||
struct bfa_vhba_attr_s attr;
|
||||
};
|
||||
|
||||
struct bfa_bsg_boot_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
struct bfa_boot_cfg_s cfg;
|
||||
};
|
||||
|
||||
struct bfa_bsg_preboot_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
struct bfa_boot_pbc_s cfg;
|
||||
};
|
||||
|
||||
struct bfa_bsg_ethboot_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
struct bfa_ethboot_cfg_s cfg;
|
||||
};
|
||||
|
||||
struct bfa_bsg_trunk_attr_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
struct bfa_trunk_attr_s attr;
|
||||
};
|
||||
|
||||
struct bfa_bsg_qos_attr_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
struct bfa_qos_attr_s attr;
|
||||
};
|
||||
|
||||
struct bfa_bsg_qos_vc_attr_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 rsvd;
|
||||
struct bfa_qos_vc_attr_s attr;
|
||||
};
|
||||
|
||||
struct bfa_bsg_vf_stats_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 vf_id;
|
||||
struct bfa_vf_stats_s stats;
|
||||
};
|
||||
|
||||
struct bfa_bsg_vf_reset_stats_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 vf_id;
|
||||
};
|
||||
|
||||
struct bfa_bsg_fcpim_lunmask_query_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
struct bfa_lunmask_cfg_s lun_mask;
|
||||
};
|
||||
|
||||
struct bfa_bsg_fcpim_lunmask_s {
|
||||
bfa_status_t status;
|
||||
u16 bfad_num;
|
||||
u16 vf_id;
|
||||
wwn_t pwwn;
|
||||
wwn_t rpwwn;
|
||||
struct scsi_lun lun;
|
||||
};
|
||||
|
||||
struct bfa_bsg_fcpt_s {
|
||||
bfa_status_t status;
|
||||
u16 vf_id;
|
||||
|
@ -56,7 +56,7 @@
|
||||
#ifdef BFA_DRIVER_VERSION
|
||||
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
|
||||
#else
|
||||
#define BFAD_DRIVER_VERSION "3.0.2.1"
|
||||
#define BFAD_DRIVER_VERSION "3.0.2.2"
|
||||
#endif
|
||||
|
||||
#define BFAD_PROTO_NAME FCPI_NAME
|
||||
@ -224,6 +224,10 @@ struct bfad_s {
|
||||
char *regdata;
|
||||
u32 reglen;
|
||||
struct dentry *bfad_dentry_files[5];
|
||||
struct list_head free_aen_q;
|
||||
struct list_head active_aen_q;
|
||||
struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
|
||||
spinlock_t bfad_aen_spinlock;
|
||||
};
|
||||
|
||||
/* BFAD state machine events */
|
||||
|
@ -656,6 +656,31 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
}
|
||||
|
||||
static void bfad_aen_im_notify_handler(struct work_struct *work)
|
||||
{
|
||||
struct bfad_im_s *im =
|
||||
container_of(work, struct bfad_im_s, aen_im_notify_work);
|
||||
struct bfa_aen_entry_s *aen_entry;
|
||||
struct bfad_s *bfad = im->bfad;
|
||||
struct Scsi_Host *shost = bfad->pport.im_port->shost;
|
||||
void *event_data;
|
||||
unsigned long flags;
|
||||
|
||||
while (!list_empty(&bfad->active_aen_q)) {
|
||||
spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
|
||||
bfa_q_deq(&bfad->active_aen_q, &aen_entry);
|
||||
spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
|
||||
event_data = (char *)aen_entry + sizeof(struct list_head);
|
||||
fc_host_post_vendor_event(shost, fc_get_event_number(),
|
||||
sizeof(struct bfa_aen_entry_s) -
|
||||
sizeof(struct list_head),
|
||||
(char *)event_data, BFAD_NL_VENDOR_ID);
|
||||
spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
|
||||
list_add_tail(&aen_entry->qe, &bfad->free_aen_q);
|
||||
spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfad_im_probe(struct bfad_s *bfad)
|
||||
{
|
||||
@ -676,6 +701,7 @@ bfad_im_probe(struct bfad_s *bfad)
|
||||
rc = BFA_STATUS_FAILED;
|
||||
}
|
||||
|
||||
INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
|
||||
ext:
|
||||
return rc;
|
||||
}
|
||||
|
@ -115,8 +115,30 @@ struct bfad_im_s {
|
||||
struct bfad_s *bfad;
|
||||
struct workqueue_struct *drv_workq;
|
||||
char drv_workq_name[KOBJ_NAME_LEN];
|
||||
struct work_struct aen_im_notify_work;
|
||||
};
|
||||
|
||||
#define bfad_get_aen_entry(_drv, _entry) do { \
|
||||
unsigned long _flags; \
|
||||
spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags); \
|
||||
bfa_q_deq(&(_drv)->free_aen_q, &(_entry)); \
|
||||
if (_entry) \
|
||||
list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q); \
|
||||
spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags); \
|
||||
} while (0)
|
||||
|
||||
/* post fc_host vendor event */
|
||||
#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
|
||||
do_gettimeofday(&(_entry)->aen_tv); \
|
||||
(_entry)->bfad_num = (_drv)->inst_no; \
|
||||
(_entry)->seq_num = (_cnt); \
|
||||
(_entry)->aen_category = (_cat); \
|
||||
(_entry)->aen_type = (_evt); \
|
||||
if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
|
||||
queue_work((_drv)->im->drv_workq, \
|
||||
&(_drv)->im->aen_im_notify_work); \
|
||||
} while (0)
|
||||
|
||||
struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
|
||||
struct bfad_s *);
|
||||
bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
|
||||
|
@ -783,6 +783,17 @@ enum bfi_sfp_i2h_e {
|
||||
BFI_SFP_I2H_SCN = BFA_I2HM(BFI_SFP_H2I_SCN),
|
||||
};
|
||||
|
||||
/*
|
||||
* SFP state change notification
|
||||
*/
|
||||
struct bfi_sfp_scn_s {
|
||||
struct bfi_mhdr_s mhr; /* host msg header */
|
||||
u8 event;
|
||||
u8 sfpid;
|
||||
u8 pomlvl; /* pom level: normal/warning/alarm */
|
||||
u8 is_elb; /* e-loopback */
|
||||
};
|
||||
|
||||
/*
|
||||
* SFP state
|
||||
*/
|
||||
@ -925,6 +936,15 @@ struct bfi_flash_erase_rsp_s {
|
||||
u32 status;
|
||||
};
|
||||
|
||||
/*
|
||||
* Flash event notification
|
||||
*/
|
||||
struct bfi_flash_event_s {
|
||||
struct bfi_mhdr_s mh; /* Common msg header */
|
||||
bfa_status_t status;
|
||||
u32 param;
|
||||
};
|
||||
|
||||
/*
|
||||
*----------------------------------------------------------------------
|
||||
* DIAG
|
||||
|
@ -2,7 +2,7 @@
|
||||
#define _BNX2FC_H_
|
||||
/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
|
||||
*
|
||||
* Copyright (c) 2008 - 2010 Broadcom Corporation
|
||||
* Copyright (c) 2008 - 2011 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -62,7 +62,7 @@
|
||||
#include "bnx2fc_constants.h"
|
||||
|
||||
#define BNX2FC_NAME "bnx2fc"
|
||||
#define BNX2FC_VERSION "1.0.3"
|
||||
#define BNX2FC_VERSION "1.0.4"
|
||||
|
||||
#define PFX "bnx2fc: "
|
||||
|
||||
@ -141,6 +141,10 @@
|
||||
|
||||
#define BNX2FC_RNID_HBA 0x7
|
||||
|
||||
#define SRR_RETRY_COUNT 5
|
||||
#define REC_RETRY_COUNT 1
|
||||
#define BNX2FC_NUM_ERR_BITS 63
|
||||
|
||||
/* bnx2fc driver uses only one instance of fcoe_percpu_s */
|
||||
extern struct fcoe_percpu_s bnx2fc_global;
|
||||
|
||||
@ -153,18 +157,13 @@ struct bnx2fc_percpu_s {
|
||||
};
|
||||
|
||||
struct bnx2fc_hba {
|
||||
struct list_head link;
|
||||
struct list_head list;
|
||||
struct cnic_dev *cnic;
|
||||
struct pci_dev *pcidev;
|
||||
struct net_device *netdev;
|
||||
struct net_device *phys_dev;
|
||||
unsigned long reg_with_cnic;
|
||||
#define BNX2FC_CNIC_REGISTERED 1
|
||||
struct packet_type fcoe_packet_type;
|
||||
struct packet_type fip_packet_type;
|
||||
struct bnx2fc_cmd_mgr *cmd_mgr;
|
||||
struct workqueue_struct *timer_work_queue;
|
||||
struct kref kref;
|
||||
spinlock_t hba_lock;
|
||||
struct mutex hba_mutex;
|
||||
unsigned long adapter_state;
|
||||
@ -172,15 +171,9 @@ struct bnx2fc_hba {
|
||||
#define ADAPTER_STATE_GOING_DOWN 1
|
||||
#define ADAPTER_STATE_LINK_DOWN 2
|
||||
#define ADAPTER_STATE_READY 3
|
||||
u32 flags;
|
||||
unsigned long init_done;
|
||||
#define BNX2FC_FW_INIT_DONE 0
|
||||
#define BNX2FC_CTLR_INIT_DONE 1
|
||||
#define BNX2FC_CREATE_DONE 2
|
||||
struct fcoe_ctlr ctlr;
|
||||
struct list_head vports;
|
||||
u8 vlan_enabled;
|
||||
int vlan_id;
|
||||
unsigned long flags;
|
||||
#define BNX2FC_FLAG_FW_INIT_DONE 0
|
||||
#define BNX2FC_FLAG_DESTROY_CMPL 1
|
||||
u32 next_conn_id;
|
||||
struct fcoe_task_ctx_entry **task_ctx;
|
||||
dma_addr_t *task_ctx_dma;
|
||||
@ -199,38 +192,41 @@ struct bnx2fc_hba {
|
||||
char *dummy_buffer;
|
||||
dma_addr_t dummy_buf_dma;
|
||||
|
||||
/* Active list of offloaded sessions */
|
||||
struct bnx2fc_rport **tgt_ofld_list;
|
||||
|
||||
/* statistics */
|
||||
struct fcoe_statistics_params *stats_buffer;
|
||||
dma_addr_t stats_buf_dma;
|
||||
|
||||
/*
|
||||
* PCI related info.
|
||||
*/
|
||||
u16 pci_did;
|
||||
u16 pci_vid;
|
||||
u16 pci_sdid;
|
||||
u16 pci_svid;
|
||||
u16 pci_func;
|
||||
u16 pci_devno;
|
||||
|
||||
struct task_struct *l2_thread;
|
||||
|
||||
/* linkdown handling */
|
||||
wait_queue_head_t shutdown_wait;
|
||||
int wait_for_link_down;
|
||||
struct completion stat_req_done;
|
||||
|
||||
/*destroy handling */
|
||||
struct timer_list destroy_timer;
|
||||
wait_queue_head_t destroy_wait;
|
||||
|
||||
/* Active list of offloaded sessions */
|
||||
struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS];
|
||||
/* linkdown handling */
|
||||
wait_queue_head_t shutdown_wait;
|
||||
int wait_for_link_down;
|
||||
int num_ofld_sess;
|
||||
|
||||
/* statistics */
|
||||
struct completion stat_req_done;
|
||||
struct list_head vports;
|
||||
};
|
||||
|
||||
#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
|
||||
struct bnx2fc_interface {
|
||||
struct list_head list;
|
||||
unsigned long if_flags;
|
||||
#define BNX2FC_CTLR_INIT_DONE 0
|
||||
struct bnx2fc_hba *hba;
|
||||
struct net_device *netdev;
|
||||
struct packet_type fcoe_packet_type;
|
||||
struct packet_type fip_packet_type;
|
||||
struct workqueue_struct *timer_work_queue;
|
||||
struct kref kref;
|
||||
struct fcoe_ctlr ctlr;
|
||||
u8 vlan_enabled;
|
||||
int vlan_id;
|
||||
};
|
||||
|
||||
#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
|
||||
|
||||
struct bnx2fc_lport {
|
||||
struct list_head list;
|
||||
@ -252,9 +248,11 @@ struct bnx2fc_rport {
|
||||
struct fc_rport_priv *rdata;
|
||||
void __iomem *ctx_base;
|
||||
#define DPM_TRIGER_TYPE 0x40
|
||||
u32 io_timeout;
|
||||
u32 fcoe_conn_id;
|
||||
u32 context_id;
|
||||
u32 sid;
|
||||
int dev_type;
|
||||
|
||||
unsigned long flags;
|
||||
#define BNX2FC_FLAG_SESSION_READY 0x1
|
||||
@ -262,10 +260,9 @@ struct bnx2fc_rport {
|
||||
#define BNX2FC_FLAG_DISABLED 0x3
|
||||
#define BNX2FC_FLAG_DESTROYED 0x4
|
||||
#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
|
||||
#define BNX2FC_FLAG_DESTROY_CMPL 0x6
|
||||
#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7
|
||||
#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8
|
||||
#define BNX2FC_FLAG_EXPL_LOGO 0x9
|
||||
#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6
|
||||
#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
|
||||
#define BNX2FC_FLAG_EXPL_LOGO 0x8
|
||||
|
||||
u8 src_addr[ETH_ALEN];
|
||||
u32 max_sqes;
|
||||
@ -327,12 +324,9 @@ struct bnx2fc_rport {
|
||||
spinlock_t cq_lock;
|
||||
atomic_t num_active_ios;
|
||||
u32 flush_in_prog;
|
||||
unsigned long work_time_slice;
|
||||
unsigned long timestamp;
|
||||
struct list_head free_task_list;
|
||||
struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
|
||||
atomic_t pi;
|
||||
atomic_t ci;
|
||||
struct list_head active_cmd_queue;
|
||||
struct list_head els_queue;
|
||||
struct list_head io_retire_queue;
|
||||
@ -367,6 +361,8 @@ struct bnx2fc_els_cb_arg {
|
||||
struct bnx2fc_cmd *aborted_io_req;
|
||||
struct bnx2fc_cmd *io_req;
|
||||
u16 l2_oxid;
|
||||
u32 offset;
|
||||
enum fc_rctl r_ctl;
|
||||
};
|
||||
|
||||
/* bnx2fc command structure */
|
||||
@ -380,6 +376,7 @@ struct bnx2fc_cmd {
|
||||
#define BNX2FC_ABTS 3
|
||||
#define BNX2FC_ELS 4
|
||||
#define BNX2FC_CLEANUP 5
|
||||
#define BNX2FC_SEQ_CLEANUP 6
|
||||
u8 io_req_flags;
|
||||
struct kref refcount;
|
||||
struct fcoe_port *port;
|
||||
@ -393,6 +390,7 @@ struct bnx2fc_cmd {
|
||||
struct completion tm_done;
|
||||
int wait_for_comp;
|
||||
u16 xid;
|
||||
struct fcoe_err_report_entry err_entry;
|
||||
struct fcoe_task_ctx_entry *task;
|
||||
struct io_bdt *bd_tbl;
|
||||
struct fcp_rsp *rsp;
|
||||
@ -409,6 +407,12 @@ struct bnx2fc_cmd {
|
||||
#define BNX2FC_FLAG_IO_COMPL 0x9
|
||||
#define BNX2FC_FLAG_ELS_DONE 0xa
|
||||
#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
|
||||
#define BNX2FC_FLAG_CMD_LOST 0xc
|
||||
#define BNX2FC_FLAG_SRR_SENT 0xd
|
||||
u8 rec_retry;
|
||||
u8 srr_retry;
|
||||
u32 srr_offset;
|
||||
u8 srr_rctl;
|
||||
u32 fcp_resid;
|
||||
u32 fcp_rsp_len;
|
||||
u32 fcp_sns_len;
|
||||
@ -439,6 +443,7 @@ struct bnx2fc_unsol_els {
|
||||
|
||||
|
||||
|
||||
struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
|
||||
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
|
||||
void bnx2fc_cmd_release(struct kref *ref);
|
||||
int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
|
||||
@ -476,6 +481,10 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
|
||||
void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
u16 orig_xid);
|
||||
void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
struct bnx2fc_cmd *orig_io_req,
|
||||
u32 offset);
|
||||
void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
|
||||
struct fcoe_task_ctx_entry *task);
|
||||
void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
|
||||
@ -525,5 +534,13 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
|
||||
unsigned char *buf,
|
||||
u32 frame_len, u16 l2_oxid);
|
||||
int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
|
||||
int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req);
|
||||
int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req);
|
||||
int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl);
|
||||
void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
u8 rx_state);
|
||||
int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
|
||||
enum fc_rctl r_ctl);
|
||||
|
||||
#endif
|
||||
|
@ -21,21 +21,21 @@ extern unsigned int bnx2fc_debug_level;
|
||||
|
||||
#define BNX2FC_ELS_DBG(fmt, arg...) \
|
||||
BNX2FC_CHK_LOGGING(LOG_ELS, \
|
||||
printk(KERN_ALERT PFX fmt, ##arg))
|
||||
printk(KERN_INFO PFX fmt, ##arg))
|
||||
|
||||
#define BNX2FC_MISC_DBG(fmt, arg...) \
|
||||
BNX2FC_CHK_LOGGING(LOG_MISC, \
|
||||
printk(KERN_ALERT PFX fmt, ##arg))
|
||||
printk(KERN_INFO PFX fmt, ##arg))
|
||||
|
||||
#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
|
||||
do { \
|
||||
if (!io_req || !io_req->port || !io_req->port->lport || \
|
||||
!io_req->port->lport->host) \
|
||||
BNX2FC_CHK_LOGGING(LOG_IO, \
|
||||
printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
|
||||
printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
|
||||
else \
|
||||
BNX2FC_CHK_LOGGING(LOG_IO, \
|
||||
shost_printk(KERN_ALERT, \
|
||||
shost_printk(KERN_INFO, \
|
||||
(io_req)->port->lport->host, \
|
||||
PFX "xid:0x%x " fmt, \
|
||||
(io_req)->xid, ##arg)); \
|
||||
@ -46,10 +46,10 @@ extern unsigned int bnx2fc_debug_level;
|
||||
if (!tgt || !tgt->port || !tgt->port->lport || \
|
||||
!tgt->port->lport->host || !tgt->rport) \
|
||||
BNX2FC_CHK_LOGGING(LOG_TGT, \
|
||||
printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
|
||||
printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
|
||||
else \
|
||||
BNX2FC_CHK_LOGGING(LOG_TGT, \
|
||||
shost_printk(KERN_ALERT, \
|
||||
shost_printk(KERN_INFO, \
|
||||
(tgt)->port->lport->host, \
|
||||
PFX "port:%x " fmt, \
|
||||
(tgt)->rport->port_id, ##arg)); \
|
||||
@ -60,10 +60,10 @@ extern unsigned int bnx2fc_debug_level;
|
||||
do { \
|
||||
if (!lport || !lport->host) \
|
||||
BNX2FC_CHK_LOGGING(LOG_HBA, \
|
||||
printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
|
||||
printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
|
||||
else \
|
||||
BNX2FC_CHK_LOGGING(LOG_HBA, \
|
||||
shost_printk(KERN_ALERT, lport->host, \
|
||||
shost_printk(KERN_INFO, lport->host, \
|
||||
PFX fmt, ##arg)); \
|
||||
} while (0)
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
* This file contains helper routines that handle ELS requests
|
||||
* and responses.
|
||||
*
|
||||
* Copyright (c) 2008 - 2010 Broadcom Corporation
|
||||
* Copyright (c) 2008 - 2011 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -253,13 +253,417 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
|
||||
{
|
||||
struct bnx2fc_mp_req *mp_req;
|
||||
struct fc_frame_header *fc_hdr, *fh;
|
||||
struct bnx2fc_cmd *srr_req;
|
||||
struct bnx2fc_cmd *orig_io_req;
|
||||
struct fc_frame *fp;
|
||||
unsigned char *buf;
|
||||
void *resp_buf;
|
||||
u32 resp_len, hdr_len;
|
||||
u8 opcode;
|
||||
int rc = 0;
|
||||
|
||||
orig_io_req = cb_arg->aborted_io_req;
|
||||
srr_req = cb_arg->io_req;
|
||||
if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
|
||||
BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed",
|
||||
orig_io_req->xid);
|
||||
goto srr_compl_done;
|
||||
}
|
||||
if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
|
||||
BNX2FC_IO_DBG(srr_req, "rec abts in prog "
|
||||
"orig_io - 0x%x\n",
|
||||
orig_io_req->xid);
|
||||
goto srr_compl_done;
|
||||
}
|
||||
if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
|
||||
/* SRR timedout */
|
||||
BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
|
||||
"orig_io - 0x%x\n",
|
||||
orig_io_req->xid);
|
||||
rc = bnx2fc_initiate_abts(srr_req);
|
||||
if (rc != SUCCESS) {
|
||||
BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
|
||||
"failed. issue cleanup\n");
|
||||
bnx2fc_initiate_cleanup(srr_req);
|
||||
}
|
||||
orig_io_req->srr_retry++;
|
||||
if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
|
||||
struct bnx2fc_rport *tgt = orig_io_req->tgt;
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
rc = bnx2fc_send_srr(orig_io_req,
|
||||
orig_io_req->srr_offset,
|
||||
orig_io_req->srr_rctl);
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
if (!rc)
|
||||
goto srr_compl_done;
|
||||
}
|
||||
|
||||
rc = bnx2fc_initiate_abts(orig_io_req);
|
||||
if (rc != SUCCESS) {
|
||||
BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
|
||||
"failed xid = 0x%x. issue cleanup\n",
|
||||
orig_io_req->xid);
|
||||
bnx2fc_initiate_cleanup(orig_io_req);
|
||||
}
|
||||
goto srr_compl_done;
|
||||
}
|
||||
mp_req = &(srr_req->mp_req);
|
||||
fc_hdr = &(mp_req->resp_fc_hdr);
|
||||
resp_len = mp_req->resp_len;
|
||||
resp_buf = mp_req->resp_buf;
|
||||
|
||||
hdr_len = sizeof(*fc_hdr);
|
||||
buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
|
||||
if (!buf) {
|
||||
printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
|
||||
goto srr_compl_done;
|
||||
}
|
||||
memcpy(buf, fc_hdr, hdr_len);
|
||||
memcpy(buf + hdr_len, resp_buf, resp_len);
|
||||
|
||||
fp = fc_frame_alloc(NULL, resp_len);
|
||||
if (!fp) {
|
||||
printk(KERN_ERR PFX "fc_frame_alloc failure\n");
|
||||
goto free_buf;
|
||||
}
|
||||
|
||||
fh = (struct fc_frame_header *) fc_frame_header_get(fp);
|
||||
/* Copy FC Frame header and payload into the frame */
|
||||
memcpy(fh, buf, hdr_len + resp_len);
|
||||
|
||||
opcode = fc_frame_payload_op(fp);
|
||||
switch (opcode) {
|
||||
case ELS_LS_ACC:
|
||||
BNX2FC_IO_DBG(srr_req, "SRR success\n");
|
||||
break;
|
||||
case ELS_LS_RJT:
|
||||
BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
|
||||
rc = bnx2fc_initiate_abts(orig_io_req);
|
||||
if (rc != SUCCESS) {
|
||||
BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
|
||||
"failed xid = 0x%x. issue cleanup\n",
|
||||
orig_io_req->xid);
|
||||
bnx2fc_initiate_cleanup(orig_io_req);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
|
||||
opcode);
|
||||
break;
|
||||
}
|
||||
fc_frame_free(fp);
|
||||
free_buf:
|
||||
kfree(buf);
|
||||
srr_compl_done:
|
||||
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
|
||||
}
|
||||
|
||||
void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
|
||||
{
|
||||
struct bnx2fc_cmd *orig_io_req, *new_io_req;
|
||||
struct bnx2fc_cmd *rec_req;
|
||||
struct bnx2fc_mp_req *mp_req;
|
||||
struct fc_frame_header *fc_hdr, *fh;
|
||||
struct fc_els_ls_rjt *rjt;
|
||||
struct fc_els_rec_acc *acc;
|
||||
struct bnx2fc_rport *tgt;
|
||||
struct fcoe_err_report_entry *err_entry;
|
||||
struct scsi_cmnd *sc_cmd;
|
||||
enum fc_rctl r_ctl;
|
||||
unsigned char *buf;
|
||||
void *resp_buf;
|
||||
struct fc_frame *fp;
|
||||
u8 opcode;
|
||||
u32 offset;
|
||||
u32 e_stat;
|
||||
u32 resp_len, hdr_len;
|
||||
int rc = 0;
|
||||
bool send_seq_clnp = false;
|
||||
bool abort_io = false;
|
||||
|
||||
BNX2FC_MISC_DBG("Entered rec_compl callback\n");
|
||||
rec_req = cb_arg->io_req;
|
||||
orig_io_req = cb_arg->aborted_io_req;
|
||||
BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
|
||||
tgt = orig_io_req->tgt;
|
||||
|
||||
if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
|
||||
BNX2FC_IO_DBG(rec_req, "completed"
|
||||
"orig_io - 0x%x\n",
|
||||
orig_io_req->xid);
|
||||
goto rec_compl_done;
|
||||
}
|
||||
if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
|
||||
BNX2FC_IO_DBG(rec_req, "abts in prog "
|
||||
"orig_io - 0x%x\n",
|
||||
orig_io_req->xid);
|
||||
goto rec_compl_done;
|
||||
}
|
||||
/* Handle REC timeout case */
|
||||
if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
|
||||
BNX2FC_IO_DBG(rec_req, "timed out, abort "
|
||||
"orig_io - 0x%x\n",
|
||||
orig_io_req->xid);
|
||||
/* els req is timed out. send abts for els */
|
||||
rc = bnx2fc_initiate_abts(rec_req);
|
||||
if (rc != SUCCESS) {
|
||||
BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
|
||||
"failed. issue cleanup\n");
|
||||
bnx2fc_initiate_cleanup(rec_req);
|
||||
}
|
||||
orig_io_req->rec_retry++;
|
||||
/* REC timedout. send ABTS to the orig IO req */
|
||||
if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
rc = bnx2fc_send_rec(orig_io_req);
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
if (!rc)
|
||||
goto rec_compl_done;
|
||||
}
|
||||
rc = bnx2fc_initiate_abts(orig_io_req);
|
||||
if (rc != SUCCESS) {
|
||||
BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
|
||||
"failed xid = 0x%x. issue cleanup\n",
|
||||
orig_io_req->xid);
|
||||
bnx2fc_initiate_cleanup(orig_io_req);
|
||||
}
|
||||
goto rec_compl_done;
|
||||
}
|
||||
mp_req = &(rec_req->mp_req);
|
||||
fc_hdr = &(mp_req->resp_fc_hdr);
|
||||
resp_len = mp_req->resp_len;
|
||||
acc = resp_buf = mp_req->resp_buf;
|
||||
|
||||
hdr_len = sizeof(*fc_hdr);
|
||||
|
||||
buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
|
||||
if (!buf) {
|
||||
printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
|
||||
goto rec_compl_done;
|
||||
}
|
||||
memcpy(buf, fc_hdr, hdr_len);
|
||||
memcpy(buf + hdr_len, resp_buf, resp_len);
|
||||
|
||||
fp = fc_frame_alloc(NULL, resp_len);
|
||||
if (!fp) {
|
||||
printk(KERN_ERR PFX "fc_frame_alloc failure\n");
|
||||
goto free_buf;
|
||||
}
|
||||
|
||||
fh = (struct fc_frame_header *) fc_frame_header_get(fp);
|
||||
/* Copy FC Frame header and payload into the frame */
|
||||
memcpy(fh, buf, hdr_len + resp_len);
|
||||
|
||||
opcode = fc_frame_payload_op(fp);
|
||||
if (opcode == ELS_LS_RJT) {
|
||||
BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
|
||||
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
|
||||
if ((rjt->er_reason == ELS_RJT_LOGIC ||
|
||||
rjt->er_reason == ELS_RJT_UNAB) &&
|
||||
rjt->er_explan == ELS_EXPL_OXID_RXID) {
|
||||
BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
|
||||
new_io_req = bnx2fc_cmd_alloc(tgt);
|
||||
if (!new_io_req)
|
||||
goto abort_io;
|
||||
new_io_req->sc_cmd = orig_io_req->sc_cmd;
|
||||
/* cleanup orig_io_req that is with the FW */
|
||||
set_bit(BNX2FC_FLAG_CMD_LOST,
|
||||
&orig_io_req->req_flags);
|
||||
bnx2fc_initiate_cleanup(orig_io_req);
|
||||
/* Post a new IO req with the same sc_cmd */
|
||||
BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
rc = bnx2fc_post_io_req(tgt, new_io_req);
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
if (!rc)
|
||||
goto free_frame;
|
||||
BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
|
||||
}
|
||||
abort_io:
|
||||
rc = bnx2fc_initiate_abts(orig_io_req);
|
||||
if (rc != SUCCESS) {
|
||||
BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
|
||||
"failed. issue cleanup\n");
|
||||
bnx2fc_initiate_cleanup(orig_io_req);
|
||||
}
|
||||
} else if (opcode == ELS_LS_ACC) {
|
||||
/* REVISIT: Check if the exchange is already aborted */
|
||||
offset = ntohl(acc->reca_fc4value);
|
||||
e_stat = ntohl(acc->reca_e_stat);
|
||||
if (e_stat & ESB_ST_SEQ_INIT) {
|
||||
BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
|
||||
goto free_frame;
|
||||
}
|
||||
BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
|
||||
e_stat, offset);
|
||||
/* Seq initiative is with us */
|
||||
err_entry = (struct fcoe_err_report_entry *)
|
||||
&orig_io_req->err_entry;
|
||||
sc_cmd = orig_io_req->sc_cmd;
|
||||
if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
/* SCSI WRITE command */
|
||||
if (offset == orig_io_req->data_xfer_len) {
|
||||
BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
|
||||
/* FCP_RSP lost */
|
||||
r_ctl = FC_RCTL_DD_CMD_STATUS;
|
||||
offset = 0;
|
||||
} else {
|
||||
/* start transmitting from offset */
|
||||
BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
|
||||
send_seq_clnp = true;
|
||||
r_ctl = FC_RCTL_DD_DATA_DESC;
|
||||
if (bnx2fc_initiate_seq_cleanup(orig_io_req,
|
||||
offset, r_ctl))
|
||||
abort_io = true;
|
||||
/* XFER_RDY */
|
||||
}
|
||||
} else {
|
||||
/* SCSI READ command */
|
||||
if (err_entry->data.rx_buf_off ==
|
||||
orig_io_req->data_xfer_len) {
|
||||
/* FCP_RSP lost */
|
||||
BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
|
||||
r_ctl = FC_RCTL_DD_CMD_STATUS;
|
||||
offset = 0;
|
||||
} else {
|
||||
/* request retransmission from this offset */
|
||||
send_seq_clnp = true;
|
||||
offset = err_entry->data.rx_buf_off;
|
||||
BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
|
||||
/* FCP_DATA lost */
|
||||
r_ctl = FC_RCTL_DD_SOL_DATA;
|
||||
if (bnx2fc_initiate_seq_cleanup(orig_io_req,
|
||||
offset, r_ctl))
|
||||
abort_io = true;
|
||||
}
|
||||
}
|
||||
if (abort_io) {
|
||||
rc = bnx2fc_initiate_abts(orig_io_req);
|
||||
if (rc != SUCCESS) {
|
||||
BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
|
||||
" failed. issue cleanup\n");
|
||||
bnx2fc_initiate_cleanup(orig_io_req);
|
||||
}
|
||||
} else if (!send_seq_clnp) {
|
||||
BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
|
||||
if (rc) {
|
||||
BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
|
||||
" IO will abort\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
free_frame:
|
||||
fc_frame_free(fp);
|
||||
free_buf:
|
||||
kfree(buf);
|
||||
rec_compl_done:
|
||||
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
|
||||
kfree(cb_arg);
|
||||
}
|
||||
|
||||
int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
|
||||
{
|
||||
struct fc_els_rec rec;
|
||||
struct bnx2fc_rport *tgt = orig_io_req->tgt;
|
||||
struct fc_lport *lport = tgt->rdata->local_port;
|
||||
struct bnx2fc_els_cb_arg *cb_arg = NULL;
|
||||
u32 sid = tgt->sid;
|
||||
u32 r_a_tov = lport->r_a_tov;
|
||||
int rc;
|
||||
|
||||
BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
|
||||
memset(&rec, 0, sizeof(rec));
|
||||
|
||||
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
|
||||
if (!cb_arg) {
|
||||
printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
|
||||
rc = -ENOMEM;
|
||||
goto rec_err;
|
||||
}
|
||||
kref_get(&orig_io_req->refcount);
|
||||
|
||||
cb_arg->aborted_io_req = orig_io_req;
|
||||
|
||||
rec.rec_cmd = ELS_REC;
|
||||
hton24(rec.rec_s_id, sid);
|
||||
rec.rec_ox_id = htons(orig_io_req->xid);
|
||||
rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
|
||||
|
||||
rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
|
||||
bnx2fc_rec_compl, cb_arg,
|
||||
r_a_tov);
|
||||
rec_err:
|
||||
if (rc) {
|
||||
BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
kfree(cb_arg);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
|
||||
{
|
||||
struct fcp_srr srr;
|
||||
struct bnx2fc_rport *tgt = orig_io_req->tgt;
|
||||
struct fc_lport *lport = tgt->rdata->local_port;
|
||||
struct bnx2fc_els_cb_arg *cb_arg = NULL;
|
||||
u32 r_a_tov = lport->r_a_tov;
|
||||
int rc;
|
||||
|
||||
BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
|
||||
memset(&srr, 0, sizeof(srr));
|
||||
|
||||
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
|
||||
if (!cb_arg) {
|
||||
printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
|
||||
rc = -ENOMEM;
|
||||
goto srr_err;
|
||||
}
|
||||
kref_get(&orig_io_req->refcount);
|
||||
|
||||
cb_arg->aborted_io_req = orig_io_req;
|
||||
|
||||
srr.srr_op = ELS_SRR;
|
||||
srr.srr_ox_id = htons(orig_io_req->xid);
|
||||
srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
|
||||
srr.srr_rel_off = htonl(offset);
|
||||
srr.srr_r_ctl = r_ctl;
|
||||
orig_io_req->srr_offset = offset;
|
||||
orig_io_req->srr_rctl = r_ctl;
|
||||
|
||||
rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
|
||||
bnx2fc_srr_compl, cb_arg,
|
||||
r_a_tov);
|
||||
srr_err:
|
||||
if (rc) {
|
||||
BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
kfree(cb_arg);
|
||||
} else
|
||||
set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
|
||||
void *data, u32 data_len,
|
||||
void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
|
||||
struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
|
||||
{
|
||||
struct fcoe_port *port = tgt->port;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct fc_rport *rport = tgt->rport;
|
||||
struct fc_lport *lport = port->lport;
|
||||
struct bnx2fc_cmd *els_req;
|
||||
@ -274,12 +678,12 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
|
||||
|
||||
rc = fc_remote_port_chkready(rport);
|
||||
if (rc) {
|
||||
printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op);
|
||||
printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
|
||||
rc = -EINVAL;
|
||||
goto els_err;
|
||||
}
|
||||
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
|
||||
printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op);
|
||||
printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
|
||||
rc = -EINVAL;
|
||||
goto els_err;
|
||||
}
|
||||
@ -305,7 +709,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
|
||||
mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
|
||||
rc = bnx2fc_init_mp_req(els_req);
|
||||
if (rc == FAILED) {
|
||||
printk(KERN_ALERT PFX "ELS MP request init failed\n");
|
||||
printk(KERN_ERR PFX "ELS MP request init failed\n");
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
kref_put(&els_req->refcount, bnx2fc_cmd_release);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
@ -324,7 +728,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
|
||||
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
|
||||
memcpy(mp_req->req_buf, data, data_len);
|
||||
} else {
|
||||
printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op);
|
||||
printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
|
||||
els_req->cb_func = NULL;
|
||||
els_req->cb_arg = NULL;
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
@ -342,9 +746,14 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
|
||||
did = tgt->rport->port_id;
|
||||
sid = tgt->sid;
|
||||
|
||||
__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
|
||||
FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
|
||||
FC_FC_SEQ_INIT, 0);
|
||||
if (op == ELS_SRR)
|
||||
__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
|
||||
FC_TYPE_FCP, FC_FC_FIRST_SEQ |
|
||||
FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
|
||||
else
|
||||
__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
|
||||
FC_TYPE_ELS, FC_FC_FIRST_SEQ |
|
||||
FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
|
||||
|
||||
/* Obtain exchange id */
|
||||
xid = els_req->xid;
|
||||
@ -352,7 +761,8 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
|
||||
index = xid % BNX2FC_TASKS_PER_PAGE;
|
||||
|
||||
/* Initialize task context for this IO request */
|
||||
task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
|
||||
task_page = (struct fcoe_task_ctx_entry *)
|
||||
interface->hba->task_ctx[task_idx];
|
||||
task = &(task_page[index]);
|
||||
bnx2fc_init_mp_task(els_req, task);
|
||||
|
||||
@ -496,8 +906,8 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
|
||||
void *arg, u32 timeout)
|
||||
{
|
||||
struct fcoe_port *port = lport_priv(lport);
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct fcoe_ctlr *fip = &hba->ctlr;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct fcoe_ctlr *fip = &interface->ctlr;
|
||||
struct fc_frame_header *fh = fc_frame_header_get(fp);
|
||||
|
||||
switch (op) {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,7 @@
|
||||
* This file contains the code that low level functions that interact
|
||||
* with 57712 FCoE firmware.
|
||||
*
|
||||
* Copyright (c) 2008 - 2010 Broadcom Corporation
|
||||
* Copyright (c) 2008 - 2011 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -23,7 +23,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
|
||||
struct fcoe_kcqe *ofld_kcqe);
|
||||
static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
|
||||
static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
|
||||
struct fcoe_kcqe *conn_destroy);
|
||||
struct fcoe_kcqe *destroy_kcqe);
|
||||
|
||||
int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
|
||||
{
|
||||
@ -67,7 +67,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
|
||||
int rc = 0;
|
||||
|
||||
if (!hba->cnic) {
|
||||
printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
|
||||
printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -103,6 +103,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
|
||||
fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
|
||||
fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
|
||||
|
||||
|
||||
fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
|
||||
fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
|
||||
((u64) hba->hash_tbl_pbl_dma >> 32);
|
||||
@ -165,7 +166,8 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt)
|
||||
{
|
||||
struct fc_lport *lport = port->lport;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
struct kwqe *kwqe_arr[4];
|
||||
struct fcoe_kwqe_conn_offload1 ofld_req1;
|
||||
struct fcoe_kwqe_conn_offload2 ofld_req2;
|
||||
@ -227,7 +229,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
|
||||
ofld_req3.hdr.flags =
|
||||
(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
|
||||
|
||||
ofld_req3.vlan_tag = hba->vlan_id <<
|
||||
ofld_req3.vlan_tag = interface->vlan_id <<
|
||||
FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
|
||||
ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
|
||||
|
||||
@ -277,8 +279,20 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
|
||||
ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
|
||||
FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
|
||||
|
||||
/*
|
||||
* Info from PRLI response, this info is used for sequence level error
|
||||
* recovery support
|
||||
*/
|
||||
if (tgt->dev_type == TYPE_TAPE) {
|
||||
ofld_req3.flags |= 1 <<
|
||||
FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
|
||||
ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
|
||||
? 1 : 0) <<
|
||||
FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
|
||||
}
|
||||
|
||||
/* vlan flag */
|
||||
ofld_req3.flags |= (hba->vlan_enabled <<
|
||||
ofld_req3.flags |= (interface->vlan_enabled <<
|
||||
FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
|
||||
|
||||
/* C2_VALID and ACK flags are not set as they are not suppported */
|
||||
@ -300,12 +314,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
|
||||
ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
|
||||
ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
|
||||
ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
|
||||
ofld_req4.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
|
||||
ofld_req4.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
|
||||
ofld_req4.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
|
||||
ofld_req4.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
|
||||
ofld_req4.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
|
||||
ofld_req4.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
|
||||
ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
|
||||
/* fcf mac */
|
||||
ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
|
||||
ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
|
||||
ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
|
||||
ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
|
||||
ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
|
||||
|
||||
ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
|
||||
ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
|
||||
@ -335,7 +350,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt)
|
||||
{
|
||||
struct kwqe *kwqe_arr[2];
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
struct fcoe_kwqe_conn_enable_disable enbl_req;
|
||||
struct fc_lport *lport = port->lport;
|
||||
struct fc_rport *rport = tgt->rport;
|
||||
@ -358,12 +374,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
|
||||
enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
|
||||
memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
|
||||
|
||||
enbl_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
|
||||
enbl_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
|
||||
enbl_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
|
||||
enbl_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
|
||||
enbl_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
|
||||
enbl_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
|
||||
enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
|
||||
enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
|
||||
enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
|
||||
enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
|
||||
enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
|
||||
enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
|
||||
|
||||
port_id = fc_host_port_id(lport->host);
|
||||
if (port_id != tgt->sid) {
|
||||
@ -379,10 +395,10 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
|
||||
enbl_req.d_id[0] = (port_id & 0x000000FF);
|
||||
enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
|
||||
enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
|
||||
enbl_req.vlan_tag = hba->vlan_id <<
|
||||
enbl_req.vlan_tag = interface->vlan_id <<
|
||||
FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
|
||||
enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
|
||||
enbl_req.vlan_flag = hba->vlan_enabled;
|
||||
enbl_req.vlan_flag = interface->vlan_enabled;
|
||||
enbl_req.context_id = tgt->context_id;
|
||||
enbl_req.conn_id = tgt->fcoe_conn_id;
|
||||
|
||||
@ -402,7 +418,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
|
||||
int bnx2fc_send_session_disable_req(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt)
|
||||
{
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
struct fcoe_kwqe_conn_enable_disable disable_req;
|
||||
struct kwqe *kwqe_arr[2];
|
||||
struct fc_rport *rport = tgt->rport;
|
||||
@ -423,12 +440,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
|
||||
disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
|
||||
disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
|
||||
|
||||
disable_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
|
||||
disable_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
|
||||
disable_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
|
||||
disable_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
|
||||
disable_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
|
||||
disable_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
|
||||
disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
|
||||
disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
|
||||
disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
|
||||
disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
|
||||
disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
|
||||
disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
|
||||
|
||||
port_id = tgt->sid;
|
||||
disable_req.s_id[0] = (port_id & 0x000000FF);
|
||||
@ -442,11 +459,11 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
|
||||
disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
|
||||
disable_req.context_id = tgt->context_id;
|
||||
disable_req.conn_id = tgt->fcoe_conn_id;
|
||||
disable_req.vlan_tag = hba->vlan_id <<
|
||||
disable_req.vlan_tag = interface->vlan_id <<
|
||||
FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
|
||||
disable_req.vlan_tag |=
|
||||
3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
|
||||
disable_req.vlan_flag = hba->vlan_enabled;
|
||||
disable_req.vlan_flag = interface->vlan_enabled;
|
||||
|
||||
kwqe_arr[0] = (struct kwqe *) &disable_req;
|
||||
|
||||
@ -525,7 +542,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
|
||||
{
|
||||
struct fcoe_port *port = tgt->port;
|
||||
struct fc_lport *lport = port->lport;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_unsol_els *unsol_els;
|
||||
struct fc_frame_header *fh;
|
||||
struct fc_frame *fp;
|
||||
@ -586,7 +603,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
|
||||
fr_eof(fp) = FC_EOF_T;
|
||||
fr_crc(fp) = cpu_to_le32(~crc);
|
||||
unsol_els->lport = lport;
|
||||
unsol_els->hba = hba;
|
||||
unsol_els->hba = interface->hba;
|
||||
unsol_els->fp = fp;
|
||||
INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
|
||||
queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
|
||||
@ -608,9 +625,12 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
|
||||
u32 frame_len, len;
|
||||
struct bnx2fc_cmd *io_req = NULL;
|
||||
struct fcoe_task_ctx_entry *task, *task_page;
|
||||
struct bnx2fc_hba *hba = tgt->port->priv;
|
||||
struct bnx2fc_interface *interface = tgt->port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
int task_idx, index;
|
||||
int rc = 0;
|
||||
u64 err_warn_bit_map;
|
||||
u8 err_warn = 0xff;
|
||||
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
|
||||
@ -673,39 +693,43 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
|
||||
BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
|
||||
err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
|
||||
|
||||
bnx2fc_return_rqe(tgt, 1);
|
||||
|
||||
if (xid > BNX2FC_MAX_XID) {
|
||||
BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
|
||||
xid);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
break;
|
||||
goto ret_err_rqe;
|
||||
}
|
||||
|
||||
task_idx = xid / BNX2FC_TASKS_PER_PAGE;
|
||||
index = xid % BNX2FC_TASKS_PER_PAGE;
|
||||
task_page = (struct fcoe_task_ctx_entry *)
|
||||
hba->task_ctx[task_idx];
|
||||
hba->task_ctx[task_idx];
|
||||
task = &(task_page[index]);
|
||||
|
||||
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
|
||||
if (!io_req) {
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
break;
|
||||
}
|
||||
if (!io_req)
|
||||
goto ret_err_rqe;
|
||||
|
||||
if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
|
||||
printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
break;
|
||||
goto ret_err_rqe;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
|
||||
&io_req->req_flags)) {
|
||||
BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
|
||||
"progress.. ignore unsol err\n");
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
break;
|
||||
goto ret_err_rqe;
|
||||
}
|
||||
|
||||
err_warn_bit_map = (u64)
|
||||
((u64)err_entry->data.err_warn_bitmap_hi << 32) |
|
||||
(u64)err_entry->data.err_warn_bitmap_lo;
|
||||
for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
|
||||
if (err_warn_bit_map & (u64)((u64)1 << i)) {
|
||||
err_warn = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -715,26 +739,61 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
|
||||
* logging out the target, when the ABTS eventually
|
||||
* times out.
|
||||
*/
|
||||
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
|
||||
&io_req->req_flags)) {
|
||||
/*
|
||||
* Cancel the timeout_work, as we received IO
|
||||
* completion with FW error.
|
||||
*/
|
||||
if (cancel_delayed_work(&io_req->timeout_work))
|
||||
kref_put(&io_req->refcount,
|
||||
bnx2fc_cmd_release); /* timer hold */
|
||||
|
||||
rc = bnx2fc_initiate_abts(io_req);
|
||||
if (rc != SUCCESS) {
|
||||
BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
|
||||
"failed. issue cleanup\n");
|
||||
rc = bnx2fc_initiate_cleanup(io_req);
|
||||
BUG_ON(rc);
|
||||
}
|
||||
} else
|
||||
if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
|
||||
printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
|
||||
"in ABTS processing\n", xid);
|
||||
goto ret_err_rqe;
|
||||
}
|
||||
BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
|
||||
if (tgt->dev_type != TYPE_TAPE)
|
||||
goto skip_rec;
|
||||
switch (err_warn) {
|
||||
case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
|
||||
case FCOE_ERROR_CODE_DATA_OOO_RO:
|
||||
case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
|
||||
case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
|
||||
case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
|
||||
case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
|
||||
BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
|
||||
xid);
|
||||
memset(&io_req->err_entry, 0,
|
||||
sizeof(struct fcoe_err_report_entry));
|
||||
memcpy(&io_req->err_entry, err_entry,
|
||||
sizeof(struct fcoe_err_report_entry));
|
||||
if (!test_bit(BNX2FC_FLAG_SRR_SENT,
|
||||
&io_req->req_flags)) {
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
rc = bnx2fc_send_rec(io_req);
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
|
||||
if (rc)
|
||||
goto skip_rec;
|
||||
} else
|
||||
printk(KERN_ERR PFX "SRR in progress\n");
|
||||
goto ret_err_rqe;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
skip_rec:
|
||||
set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
|
||||
/*
|
||||
* Cancel the timeout_work, as we received IO
|
||||
* completion with FW error.
|
||||
*/
|
||||
if (cancel_delayed_work(&io_req->timeout_work))
|
||||
kref_put(&io_req->refcount, bnx2fc_cmd_release);
|
||||
|
||||
rc = bnx2fc_initiate_abts(io_req);
|
||||
if (rc != SUCCESS) {
|
||||
printk(KERN_ERR PFX "err_warn: initiate_abts "
|
||||
"failed xid = 0x%x. issue cleanup\n",
|
||||
io_req->xid);
|
||||
bnx2fc_initiate_cleanup(io_req);
|
||||
}
|
||||
ret_err_rqe:
|
||||
bnx2fc_return_rqe(tgt, 1);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
break;
|
||||
|
||||
@ -755,6 +814,47 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
|
||||
BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
|
||||
err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
|
||||
|
||||
if (xid > BNX2FC_MAX_XID) {
|
||||
BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
|
||||
goto ret_warn_rqe;
|
||||
}
|
||||
|
||||
err_warn_bit_map = (u64)
|
||||
((u64)err_entry->data.err_warn_bitmap_hi << 32) |
|
||||
(u64)err_entry->data.err_warn_bitmap_lo;
|
||||
for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
|
||||
if (err_warn_bit_map & (u64) (1 << i)) {
|
||||
err_warn = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
|
||||
|
||||
task_idx = xid / BNX2FC_TASKS_PER_PAGE;
|
||||
index = xid % BNX2FC_TASKS_PER_PAGE;
|
||||
task_page = (struct fcoe_task_ctx_entry *)
|
||||
interface->hba->task_ctx[task_idx];
|
||||
task = &(task_page[index]);
|
||||
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
|
||||
if (!io_req)
|
||||
goto ret_warn_rqe;
|
||||
|
||||
if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
|
||||
printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
|
||||
goto ret_warn_rqe;
|
||||
}
|
||||
|
||||
memset(&io_req->err_entry, 0,
|
||||
sizeof(struct fcoe_err_report_entry));
|
||||
memcpy(&io_req->err_entry, err_entry,
|
||||
sizeof(struct fcoe_err_report_entry));
|
||||
|
||||
if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
|
||||
/* REC_TOV is not a warning code */
|
||||
BUG_ON(1);
|
||||
else
|
||||
BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
|
||||
ret_warn_rqe:
|
||||
bnx2fc_return_rqe(tgt, 1);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
break;
|
||||
@ -770,7 +870,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
|
||||
struct fcoe_task_ctx_entry *task;
|
||||
struct fcoe_task_ctx_entry *task_page;
|
||||
struct fcoe_port *port = tgt->port;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
struct bnx2fc_cmd *io_req;
|
||||
int task_idx, index;
|
||||
u16 xid;
|
||||
@ -781,7 +882,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
|
||||
if (xid >= BNX2FC_MAX_TASKS) {
|
||||
printk(KERN_ALERT PFX "ERROR:xid out of range\n");
|
||||
printk(KERN_ERR PFX "ERROR:xid out of range\n");
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
return;
|
||||
}
|
||||
@ -861,6 +962,13 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
|
||||
kref_put(&io_req->refcount, bnx2fc_cmd_release);
|
||||
break;
|
||||
|
||||
case BNX2FC_SEQ_CLEANUP:
|
||||
BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
|
||||
io_req->xid);
|
||||
bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
|
||||
kref_put(&io_req->refcount, bnx2fc_cmd_release);
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
|
||||
break;
|
||||
@ -962,8 +1070,10 @@ unlock:
|
||||
1 - tgt->cq_curr_toggle_bit;
|
||||
}
|
||||
}
|
||||
bnx2fc_arm_cq(tgt);
|
||||
atomic_add(num_free_sqes, &tgt->free_sqes);
|
||||
if (num_free_sqes) {
|
||||
bnx2fc_arm_cq(tgt);
|
||||
atomic_add(num_free_sqes, &tgt->free_sqes);
|
||||
}
|
||||
spin_unlock_bh(&tgt->cq_lock);
|
||||
return 0;
|
||||
}
|
||||
@ -983,7 +1093,7 @@ static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
|
||||
struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
|
||||
|
||||
if (!tgt) {
|
||||
printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
|
||||
printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1004,6 +1114,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
|
||||
{
|
||||
struct bnx2fc_rport *tgt;
|
||||
struct fcoe_port *port;
|
||||
struct bnx2fc_interface *interface;
|
||||
u32 conn_id;
|
||||
u32 context_id;
|
||||
int rc;
|
||||
@ -1018,8 +1129,9 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
|
||||
BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
|
||||
ofld_kcqe->fcoe_conn_context_id);
|
||||
port = tgt->port;
|
||||
if (hba != tgt->port->priv) {
|
||||
printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
|
||||
interface = tgt->port->priv;
|
||||
if (hba != interface->hba) {
|
||||
printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
|
||||
goto ofld_cmpl_err;
|
||||
}
|
||||
/*
|
||||
@ -1040,7 +1152,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
|
||||
/* now enable the session */
|
||||
rc = bnx2fc_send_session_enable_req(port, tgt);
|
||||
if (rc) {
|
||||
printk(KERN_ALERT PFX "enable session failed\n");
|
||||
printk(KERN_ERR PFX "enable session failed\n");
|
||||
goto ofld_cmpl_err;
|
||||
}
|
||||
}
|
||||
@ -1063,6 +1175,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
|
||||
struct fcoe_kcqe *ofld_kcqe)
|
||||
{
|
||||
struct bnx2fc_rport *tgt;
|
||||
struct bnx2fc_interface *interface;
|
||||
u32 conn_id;
|
||||
u32 context_id;
|
||||
|
||||
@ -1070,7 +1183,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
|
||||
conn_id = ofld_kcqe->fcoe_conn_id;
|
||||
tgt = hba->tgt_ofld_list[conn_id];
|
||||
if (!tgt) {
|
||||
printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
|
||||
printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1082,16 +1195,17 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
|
||||
* and enable
|
||||
*/
|
||||
if (tgt->context_id != context_id) {
|
||||
printk(KERN_ALERT PFX "context id mis-match\n");
|
||||
printk(KERN_ERR PFX "context id mis-match\n");
|
||||
return;
|
||||
}
|
||||
if (hba != tgt->port->priv) {
|
||||
printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
|
||||
interface = tgt->port->priv;
|
||||
if (hba != interface->hba) {
|
||||
printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
|
||||
goto enbl_cmpl_err;
|
||||
}
|
||||
if (ofld_kcqe->completion_status) {
|
||||
if (ofld_kcqe->completion_status)
|
||||
goto enbl_cmpl_err;
|
||||
} else {
|
||||
else {
|
||||
/* enable successful - rport ready for issuing IOs */
|
||||
set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
|
||||
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
|
||||
@ -1114,14 +1228,14 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
|
||||
conn_id = disable_kcqe->fcoe_conn_id;
|
||||
tgt = hba->tgt_ofld_list[conn_id];
|
||||
if (!tgt) {
|
||||
printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
|
||||
printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
|
||||
return;
|
||||
}
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
|
||||
|
||||
if (disable_kcqe->completion_status) {
|
||||
printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
|
||||
printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
|
||||
disable_kcqe->completion_status);
|
||||
return;
|
||||
} else {
|
||||
@ -1143,14 +1257,14 @@ static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
|
||||
conn_id = destroy_kcqe->fcoe_conn_id;
|
||||
tgt = hba->tgt_ofld_list[conn_id];
|
||||
if (!tgt) {
|
||||
printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
|
||||
printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
|
||||
return;
|
||||
}
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
|
||||
|
||||
if (destroy_kcqe->completion_status) {
|
||||
printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
|
||||
printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
|
||||
destroy_kcqe->completion_status);
|
||||
return;
|
||||
} else {
|
||||
@ -1182,6 +1296,7 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
|
||||
break;
|
||||
case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
|
||||
printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
|
||||
}
|
||||
@ -1240,7 +1355,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
|
||||
} else {
|
||||
printk(KERN_ERR PFX "DESTROY success\n");
|
||||
}
|
||||
hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
|
||||
set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
|
||||
wake_up_interruptible(&hba->destroy_wait);
|
||||
break;
|
||||
|
||||
@ -1262,7 +1377,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
|
||||
case FCOE_KCQE_OPCODE_FCOE_ERROR:
|
||||
/* fall thru */
|
||||
default:
|
||||
printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
|
||||
printk(KERN_ERR PFX "unknown opcode 0x%x\n",
|
||||
kcqe->op_code);
|
||||
}
|
||||
}
|
||||
@ -1305,7 +1420,8 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
|
||||
struct fcoe_port *port = tgt->port;
|
||||
u32 reg_off;
|
||||
resource_size_t reg_base;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
|
||||
reg_base = pci_resource_start(hba->pcidev,
|
||||
BNX2X_DOORBELL_PCI_BAR);
|
||||
@ -1344,6 +1460,96 @@ void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
|
||||
tgt->conn_db->rq_prod = tgt->rq_prod_idx;
|
||||
}
|
||||
|
||||
void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
struct bnx2fc_cmd *orig_io_req,
|
||||
u32 offset)
|
||||
{
|
||||
struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
|
||||
struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
|
||||
struct bnx2fc_interface *interface = tgt->port->priv;
|
||||
struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
|
||||
struct fcoe_task_ctx_entry *orig_task;
|
||||
struct fcoe_task_ctx_entry *task_page;
|
||||
struct fcoe_ext_mul_sges_ctx *sgl;
|
||||
u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
|
||||
u8 orig_task_type;
|
||||
u16 orig_xid = orig_io_req->xid;
|
||||
u32 context_id = tgt->context_id;
|
||||
u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
|
||||
u32 orig_offset = offset;
|
||||
int bd_count;
|
||||
int orig_task_idx, index;
|
||||
int i;
|
||||
|
||||
memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
|
||||
|
||||
if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
|
||||
orig_task_type = FCOE_TASK_TYPE_WRITE;
|
||||
else
|
||||
orig_task_type = FCOE_TASK_TYPE_READ;
|
||||
|
||||
/* Tx flags */
|
||||
task->txwr_rxrd.const_ctx.tx_flags =
|
||||
FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
|
||||
/* init flags */
|
||||
task->txwr_rxrd.const_ctx.init_flags = task_type <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
|
||||
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
|
||||
task->rxwr_txrd.const_ctx.init_flags = context_id <<
|
||||
FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
|
||||
task->rxwr_txrd.const_ctx.init_flags = context_id <<
|
||||
FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
|
||||
|
||||
task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
|
||||
|
||||
task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
|
||||
task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
|
||||
|
||||
bd_count = orig_io_req->bd_tbl->bd_valid;
|
||||
|
||||
/* obtain the appropriate bd entry from relative offset */
|
||||
for (i = 0; i < bd_count; i++) {
|
||||
if (offset < bd[i].buf_len)
|
||||
break;
|
||||
offset -= bd[i].buf_len;
|
||||
}
|
||||
phys_addr += (i * sizeof(struct fcoe_bd_ctx));
|
||||
|
||||
if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
|
||||
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
|
||||
(u32)phys_addr;
|
||||
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
|
||||
(u32)((u64)phys_addr >> 32);
|
||||
task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
|
||||
bd_count;
|
||||
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
|
||||
offset; /* adjusted offset */
|
||||
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
|
||||
} else {
|
||||
orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
|
||||
index = orig_xid % BNX2FC_TASKS_PER_PAGE;
|
||||
|
||||
task_page = (struct fcoe_task_ctx_entry *)
|
||||
interface->hba->task_ctx[orig_task_idx];
|
||||
orig_task = &(task_page[index]);
|
||||
|
||||
/* Multiple SGEs were used for this IO */
|
||||
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
|
||||
sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
|
||||
sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
|
||||
sgl->mul_sgl.sgl_size = bd_count;
|
||||
sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
|
||||
sgl->mul_sgl.cur_sge_idx = i;
|
||||
|
||||
memset(&task->rxwr_only.rx_seq_ctx, 0,
|
||||
sizeof(struct fcoe_rx_seq_ctx));
|
||||
task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
|
||||
task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
|
||||
}
|
||||
}
|
||||
void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
u16 orig_xid)
|
||||
@ -1360,7 +1566,12 @@ void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
|
||||
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
|
||||
task->txwr_rxrd.const_ctx.init_flags |=
|
||||
if (tgt->dev_type == TYPE_TAPE)
|
||||
task->txwr_rxrd.const_ctx.init_flags |=
|
||||
FCOE_TASK_DEV_TYPE_TAPE <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
|
||||
else
|
||||
task->txwr_rxrd.const_ctx.init_flags |=
|
||||
FCOE_TASK_DEV_TYPE_DISK <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
|
||||
task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
|
||||
@ -1420,7 +1631,12 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
|
||||
/* init flags */
|
||||
task->txwr_rxrd.const_ctx.init_flags = task_type <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
|
||||
task->txwr_rxrd.const_ctx.init_flags |=
|
||||
if (tgt->dev_type == TYPE_TAPE)
|
||||
task->txwr_rxrd.const_ctx.init_flags |=
|
||||
FCOE_TASK_DEV_TYPE_TAPE <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
|
||||
else
|
||||
task->txwr_rxrd.const_ctx.init_flags |=
|
||||
FCOE_TASK_DEV_TYPE_DISK <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
|
||||
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
|
||||
@ -1477,6 +1693,7 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
|
||||
struct bnx2fc_rport *tgt = io_req->tgt;
|
||||
struct fcoe_cached_sge_ctx *cached_sge;
|
||||
struct fcoe_ext_mul_sges_ctx *sgl;
|
||||
int dev_type = tgt->dev_type;
|
||||
u64 *fcp_cmnd;
|
||||
u64 tmp_fcp_cmnd[4];
|
||||
u32 context_id;
|
||||
@ -1494,20 +1711,40 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
|
||||
task_type = FCOE_TASK_TYPE_READ;
|
||||
|
||||
/* Tx only */
|
||||
bd_count = bd_tbl->bd_valid;
|
||||
if (task_type == FCOE_TASK_TYPE_WRITE) {
|
||||
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
|
||||
(u32)bd_tbl->bd_tbl_dma;
|
||||
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
|
||||
(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
|
||||
task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
|
||||
bd_tbl->bd_valid;
|
||||
if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
|
||||
struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
|
||||
|
||||
task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
|
||||
fcoe_bd_tbl->buf_addr_lo;
|
||||
task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
|
||||
fcoe_bd_tbl->buf_addr_hi;
|
||||
task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
|
||||
fcoe_bd_tbl->buf_len;
|
||||
|
||||
task->txwr_rxrd.const_ctx.init_flags |= 1 <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
|
||||
} else {
|
||||
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
|
||||
(u32)bd_tbl->bd_tbl_dma;
|
||||
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
|
||||
(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
|
||||
task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
|
||||
bd_tbl->bd_valid;
|
||||
}
|
||||
}
|
||||
|
||||
/*Tx Write Rx Read */
|
||||
/* Init state to NORMAL */
|
||||
task->txwr_rxrd.const_ctx.init_flags = task_type <<
|
||||
task->txwr_rxrd.const_ctx.init_flags |= task_type <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
|
||||
task->txwr_rxrd.const_ctx.init_flags |=
|
||||
if (dev_type == TYPE_TAPE)
|
||||
task->txwr_rxrd.const_ctx.init_flags |=
|
||||
FCOE_TASK_DEV_TYPE_TAPE <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
|
||||
else
|
||||
task->txwr_rxrd.const_ctx.init_flags |=
|
||||
FCOE_TASK_DEV_TYPE_DISK <<
|
||||
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
|
||||
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
|
||||
@ -1550,7 +1787,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
|
||||
cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
|
||||
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
|
||||
bd_count = bd_tbl->bd_valid;
|
||||
if (task_type == FCOE_TASK_TYPE_READ) {
|
||||
if (task_type == FCOE_TASK_TYPE_READ &&
|
||||
dev_type == TYPE_DISK) {
|
||||
if (bd_count == 1) {
|
||||
|
||||
struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
|
||||
@ -1582,6 +1820,11 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
|
||||
(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
|
||||
sgl->mul_sgl.sgl_size = bd_count;
|
||||
}
|
||||
} else {
|
||||
sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
|
||||
sgl->mul_sgl.cur_sge_addr.hi =
|
||||
(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
|
||||
sgl->mul_sgl.sgl_size = bd_count;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
|
||||
* IO manager and SCSI IO processing.
|
||||
*
|
||||
* Copyright (c) 2008 - 2010 Broadcom Corporation
|
||||
* Copyright (c) 2008 - 2011 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -18,8 +18,6 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
|
||||
int bd_index);
|
||||
static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
|
||||
static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
|
||||
static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
|
||||
struct bnx2fc_cmd *io_req);
|
||||
static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
|
||||
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
|
||||
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
|
||||
@ -29,10 +27,11 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
|
||||
void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
|
||||
unsigned int timer_msec)
|
||||
{
|
||||
struct bnx2fc_hba *hba = io_req->port->priv;
|
||||
struct bnx2fc_interface *interface = io_req->port->priv;
|
||||
|
||||
if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work,
|
||||
msecs_to_jiffies(timer_msec)))
|
||||
if (queue_delayed_work(interface->timer_work_queue,
|
||||
&io_req->timeout_work,
|
||||
msecs_to_jiffies(timer_msec)))
|
||||
kref_get(&io_req->refcount);
|
||||
}
|
||||
|
||||
@ -217,6 +216,11 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
|
||||
return;
|
||||
|
||||
BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
|
||||
if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
|
||||
/* Do not call scsi done for this IO */
|
||||
return;
|
||||
}
|
||||
|
||||
bnx2fc_unmap_sg_list(io_req);
|
||||
io_req->sc_cmd = NULL;
|
||||
if (!sc_cmd) {
|
||||
@ -419,8 +423,8 @@ free_cmgr:
|
||||
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
|
||||
{
|
||||
struct fcoe_port *port = tgt->port;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
|
||||
struct bnx2fc_cmd *io_req;
|
||||
struct list_head *listp;
|
||||
struct io_bdt *bd_tbl;
|
||||
@ -485,11 +489,12 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
|
||||
kref_init(&io_req->refcount);
|
||||
return io_req;
|
||||
}
|
||||
static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
|
||||
|
||||
struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
|
||||
{
|
||||
struct fcoe_port *port = tgt->port;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
|
||||
struct bnx2fc_cmd *io_req;
|
||||
struct list_head *listp;
|
||||
struct io_bdt *bd_tbl;
|
||||
@ -570,7 +575,8 @@ void bnx2fc_cmd_release(struct kref *ref)
|
||||
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
|
||||
{
|
||||
struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
|
||||
struct bnx2fc_hba *hba = io_req->port->priv;
|
||||
struct bnx2fc_interface *interface = io_req->port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
size_t sz = sizeof(struct fcoe_bd_ctx);
|
||||
|
||||
/* clear tm flags */
|
||||
@ -606,7 +612,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
|
||||
struct bnx2fc_mp_req *mp_req;
|
||||
struct fcoe_bd_ctx *mp_req_bd;
|
||||
struct fcoe_bd_ctx *mp_resp_bd;
|
||||
struct bnx2fc_hba *hba = io_req->port->priv;
|
||||
struct bnx2fc_interface *interface = io_req->port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
dma_addr_t addr;
|
||||
size_t sz;
|
||||
|
||||
@ -682,7 +689,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
|
||||
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
|
||||
struct fc_rport_libfc_priv *rp = rport->dd_data;
|
||||
struct fcoe_port *port;
|
||||
struct bnx2fc_hba *hba;
|
||||
struct bnx2fc_interface *interface;
|
||||
struct bnx2fc_rport *tgt;
|
||||
struct bnx2fc_cmd *io_req;
|
||||
struct bnx2fc_mp_req *tm_req;
|
||||
@ -699,10 +706,10 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
|
||||
|
||||
lport = shost_priv(host);
|
||||
port = lport_priv(lport);
|
||||
hba = port->priv;
|
||||
interface = port->priv;
|
||||
|
||||
if (rport == NULL) {
|
||||
printk(KERN_ALERT PFX "device_reset: rport is NULL\n");
|
||||
printk(KERN_ERR PFX "device_reset: rport is NULL\n");
|
||||
rc = FAILED;
|
||||
goto tmf_err;
|
||||
}
|
||||
@ -745,7 +752,9 @@ retry_tmf:
|
||||
rc = bnx2fc_init_mp_req(io_req);
|
||||
if (rc == FAILED) {
|
||||
printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
kref_put(&io_req->refcount, bnx2fc_cmd_release);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
goto tmf_err;
|
||||
}
|
||||
|
||||
@ -774,7 +783,8 @@ retry_tmf:
|
||||
index = xid % BNX2FC_TASKS_PER_PAGE;
|
||||
|
||||
/* Initialize task context for this IO request */
|
||||
task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
|
||||
task_page = (struct fcoe_task_ctx_entry *)
|
||||
interface->hba->task_ctx[task_idx];
|
||||
task = &(task_page[index]);
|
||||
bnx2fc_init_mp_task(io_req, task);
|
||||
|
||||
@ -806,10 +816,10 @@ retry_tmf:
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
|
||||
if (!rc) {
|
||||
printk(KERN_ERR PFX "task mgmt command failed...\n");
|
||||
BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
|
||||
rc = FAILED;
|
||||
} else {
|
||||
printk(KERN_ERR PFX "task mgmt command success...\n");
|
||||
BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
|
||||
rc = SUCCESS;
|
||||
}
|
||||
tmf_err:
|
||||
@ -822,7 +832,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
|
||||
struct bnx2fc_rport *tgt = io_req->tgt;
|
||||
struct fc_rport *rport = tgt->rport;
|
||||
struct fc_rport_priv *rdata = tgt->rdata;
|
||||
struct bnx2fc_hba *hba;
|
||||
struct bnx2fc_interface *interface;
|
||||
struct fcoe_port *port;
|
||||
struct bnx2fc_cmd *abts_io_req;
|
||||
struct fcoe_task_ctx_entry *task;
|
||||
@ -839,7 +849,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
|
||||
BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
|
||||
|
||||
port = io_req->port;
|
||||
hba = port->priv;
|
||||
interface = port->priv;
|
||||
lport = port->lport;
|
||||
|
||||
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
|
||||
@ -849,7 +859,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
|
||||
}
|
||||
|
||||
if (rport == NULL) {
|
||||
printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n");
|
||||
printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
|
||||
rc = FAILED;
|
||||
goto abts_err;
|
||||
}
|
||||
@ -896,7 +906,8 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
|
||||
index = xid % BNX2FC_TASKS_PER_PAGE;
|
||||
|
||||
/* Initialize task context for this IO request */
|
||||
task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
|
||||
task_page = (struct fcoe_task_ctx_entry *)
|
||||
interface->hba->task_ctx[task_idx];
|
||||
task = &(task_page[index]);
|
||||
bnx2fc_init_mp_task(abts_io_req, task);
|
||||
|
||||
@ -924,11 +935,81 @@ abts_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
|
||||
enum fc_rctl r_ctl)
|
||||
{
|
||||
struct fc_lport *lport;
|
||||
struct bnx2fc_rport *tgt = orig_io_req->tgt;
|
||||
struct bnx2fc_interface *interface;
|
||||
struct fcoe_port *port;
|
||||
struct bnx2fc_cmd *seq_clnp_req;
|
||||
struct fcoe_task_ctx_entry *task;
|
||||
struct fcoe_task_ctx_entry *task_page;
|
||||
struct bnx2fc_els_cb_arg *cb_arg = NULL;
|
||||
int task_idx, index;
|
||||
u16 xid;
|
||||
int rc = 0;
|
||||
|
||||
BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
|
||||
orig_io_req->xid);
|
||||
kref_get(&orig_io_req->refcount);
|
||||
|
||||
port = orig_io_req->port;
|
||||
interface = port->priv;
|
||||
lport = port->lport;
|
||||
|
||||
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
|
||||
if (!cb_arg) {
|
||||
printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
|
||||
rc = -ENOMEM;
|
||||
goto cleanup_err;
|
||||
}
|
||||
|
||||
seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
|
||||
if (!seq_clnp_req) {
|
||||
printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
|
||||
rc = -ENOMEM;
|
||||
kfree(cb_arg);
|
||||
goto cleanup_err;
|
||||
}
|
||||
/* Initialize rest of io_req fields */
|
||||
seq_clnp_req->sc_cmd = NULL;
|
||||
seq_clnp_req->port = port;
|
||||
seq_clnp_req->tgt = tgt;
|
||||
seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
|
||||
|
||||
xid = seq_clnp_req->xid;
|
||||
|
||||
task_idx = xid/BNX2FC_TASKS_PER_PAGE;
|
||||
index = xid % BNX2FC_TASKS_PER_PAGE;
|
||||
|
||||
/* Initialize task context for this IO request */
|
||||
task_page = (struct fcoe_task_ctx_entry *)
|
||||
interface->hba->task_ctx[task_idx];
|
||||
task = &(task_page[index]);
|
||||
cb_arg->aborted_io_req = orig_io_req;
|
||||
cb_arg->io_req = seq_clnp_req;
|
||||
cb_arg->r_ctl = r_ctl;
|
||||
cb_arg->offset = offset;
|
||||
seq_clnp_req->cb_arg = cb_arg;
|
||||
|
||||
printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
|
||||
bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
|
||||
|
||||
/* Obtain free SQ entry */
|
||||
bnx2fc_add_2_sq(tgt, xid);
|
||||
|
||||
/* Ring doorbell */
|
||||
bnx2fc_ring_doorbell(tgt);
|
||||
cleanup_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
|
||||
{
|
||||
struct fc_lport *lport;
|
||||
struct bnx2fc_rport *tgt = io_req->tgt;
|
||||
struct bnx2fc_hba *hba;
|
||||
struct bnx2fc_interface *interface;
|
||||
struct fcoe_port *port;
|
||||
struct bnx2fc_cmd *cleanup_io_req;
|
||||
struct fcoe_task_ctx_entry *task;
|
||||
@ -941,7 +1022,7 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
|
||||
BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
|
||||
|
||||
port = io_req->port;
|
||||
hba = port->priv;
|
||||
interface = port->priv;
|
||||
lport = port->lport;
|
||||
|
||||
cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
|
||||
@ -963,7 +1044,8 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
|
||||
index = xid % BNX2FC_TASKS_PER_PAGE;
|
||||
|
||||
/* Initialize task context for this IO request */
|
||||
task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
|
||||
task_page = (struct fcoe_task_ctx_entry *)
|
||||
interface->hba->task_ctx[task_idx];
|
||||
task = &(task_page[index]);
|
||||
orig_xid = io_req->xid;
|
||||
|
||||
@ -1031,7 +1113,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
|
||||
|
||||
lport = shost_priv(sc_cmd->device->host);
|
||||
if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
|
||||
printk(KERN_ALERT PFX "eh_abort: link not ready\n");
|
||||
printk(KERN_ERR PFX "eh_abort: link not ready\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1062,7 +1144,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
|
||||
* io_req is no longer in the active_q.
|
||||
*/
|
||||
if (tgt->flush_in_prog) {
|
||||
printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
|
||||
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
|
||||
"flush in progress\n", io_req->xid);
|
||||
kref_put(&io_req->refcount, bnx2fc_cmd_release);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
@ -1070,7 +1152,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
|
||||
}
|
||||
|
||||
if (io_req->on_active_queue == 0) {
|
||||
printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
|
||||
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
|
||||
"not on active_q\n", io_req->xid);
|
||||
/*
|
||||
* This condition can happen only due to the FW bug,
|
||||
@ -1108,7 +1190,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
|
||||
set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
|
||||
rc = bnx2fc_initiate_abts(io_req);
|
||||
} else {
|
||||
printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
|
||||
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
|
||||
"already in abts processing\n", io_req->xid);
|
||||
kref_put(&io_req->refcount, bnx2fc_cmd_release);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
@ -1149,6 +1231,42 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
u8 rx_state)
|
||||
{
|
||||
struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
|
||||
struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
|
||||
u32 offset = cb_arg->offset;
|
||||
enum fc_rctl r_ctl = cb_arg->r_ctl;
|
||||
int rc = 0;
|
||||
struct bnx2fc_rport *tgt = orig_io_req->tgt;
|
||||
|
||||
BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
|
||||
"cmd_type = %d\n",
|
||||
seq_clnp_req->xid, seq_clnp_req->cmd_type);
|
||||
|
||||
if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
|
||||
printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
|
||||
seq_clnp_req->xid);
|
||||
goto free_cb_arg;
|
||||
}
|
||||
kref_get(&orig_io_req->refcount);
|
||||
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
|
||||
if (rc)
|
||||
printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
|
||||
" IO will abort\n");
|
||||
seq_clnp_req->cb_arg = NULL;
|
||||
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
|
||||
free_cb_arg:
|
||||
kfree(cb_arg);
|
||||
return;
|
||||
}
|
||||
|
||||
void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
u8 num_rq)
|
||||
@ -1378,7 +1496,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
|
||||
fc_hdr->fh_r_ctl);
|
||||
}
|
||||
if (!sc_cmd->SCp.ptr) {
|
||||
printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n");
|
||||
printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
|
||||
return;
|
||||
}
|
||||
switch (io_req->fcp_status) {
|
||||
@ -1410,7 +1528,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
|
||||
io_req->on_tmf_queue = 0;
|
||||
} else {
|
||||
|
||||
printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n");
|
||||
printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1597,7 +1715,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
|
||||
|
||||
if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
|
||||
/* Invalid sense sense length. */
|
||||
printk(KERN_ALERT PFX "invalid sns length %d\n",
|
||||
printk(KERN_ERR PFX "invalid sns length %d\n",
|
||||
rq_buff_len);
|
||||
/* reset rq_buff_len */
|
||||
rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
|
||||
@ -1780,7 +1898,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
|
||||
scsi_set_resid(sc_cmd, io_req->fcp_resid);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n",
|
||||
printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
|
||||
io_req->fcp_status);
|
||||
break;
|
||||
}
|
||||
@ -1789,14 +1907,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
|
||||
kref_put(&io_req->refcount, bnx2fc_cmd_release);
|
||||
}
|
||||
|
||||
static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
|
||||
int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
|
||||
struct bnx2fc_cmd *io_req)
|
||||
{
|
||||
struct fcoe_task_ctx_entry *task;
|
||||
struct fcoe_task_ctx_entry *task_page;
|
||||
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
|
||||
struct fcoe_port *port = tgt->port;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
struct fc_lport *lport = port->lport;
|
||||
struct fcoe_dev_stats *stats;
|
||||
int task_idx, index;
|
||||
@ -1854,7 +1973,8 @@ static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
|
||||
}
|
||||
|
||||
/* Time IO req */
|
||||
bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
|
||||
if (tgt->io_timeout)
|
||||
bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
|
||||
/* Obtain free SQ entry */
|
||||
bnx2fc_add_2_sq(tgt, xid);
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
* Handles operations such as session offload/upload etc, and manages
|
||||
* session resources such as connection id and qp resources.
|
||||
*
|
||||
* Copyright (c) 2008 - 2010 Broadcom Corporation
|
||||
* Copyright (c) 2008 - 2011 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -65,7 +65,8 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
|
||||
{
|
||||
struct fc_lport *lport = rdata->local_port;
|
||||
struct fc_rport *rport = rdata->rport;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
int rval;
|
||||
int i = 0;
|
||||
|
||||
@ -237,7 +238,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
|
||||
static void bnx2fc_upload_session(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt)
|
||||
{
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
|
||||
tgt->num_active_ios.counter);
|
||||
@ -316,7 +318,8 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
|
||||
{
|
||||
|
||||
struct fc_rport *rport = rdata->rport;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
|
||||
struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
|
||||
|
||||
@ -350,6 +353,14 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
|
||||
tgt->rq_cons_idx = 0;
|
||||
atomic_set(&tgt->num_active_ios, 0);
|
||||
|
||||
if (rdata->flags & FC_RP_FLAGS_RETRY) {
|
||||
tgt->dev_type = TYPE_TAPE;
|
||||
tgt->io_timeout = 0; /* use default ULP timeout */
|
||||
} else {
|
||||
tgt->dev_type = TYPE_DISK;
|
||||
tgt->io_timeout = BNX2FC_IO_TIMEOUT;
|
||||
}
|
||||
|
||||
/* initialize sq doorbell */
|
||||
sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
|
||||
sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
|
||||
@ -392,7 +403,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
|
||||
enum fc_rport_event event)
|
||||
{
|
||||
struct fcoe_port *port = lport_priv(lport);
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
struct fc_rport *rport = rdata->rport;
|
||||
struct fc_rport_libfc_priv *rp;
|
||||
struct bnx2fc_rport *tgt;
|
||||
@ -403,7 +415,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
|
||||
switch (event) {
|
||||
case RPORT_EV_READY:
|
||||
if (!rport) {
|
||||
printk(KERN_ALERT PFX "rport is NULL: ERROR!\n");
|
||||
printk(KERN_ERR PFX "rport is NULL: ERROR!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -415,7 +427,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
|
||||
* We should not come here, as lport will
|
||||
* take care of fabric login
|
||||
*/
|
||||
printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n",
|
||||
printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n",
|
||||
rdata->ids.port_id);
|
||||
break;
|
||||
}
|
||||
@ -483,7 +495,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
|
||||
break;
|
||||
|
||||
if (!rport) {
|
||||
printk(KERN_ALERT PFX "%x - rport not created Yet!!\n",
|
||||
printk(KERN_INFO PFX "%x - rport not created Yet!!\n",
|
||||
port_id);
|
||||
break;
|
||||
}
|
||||
@ -537,7 +549,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
|
||||
struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
|
||||
u32 port_id)
|
||||
{
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
struct bnx2fc_rport *tgt;
|
||||
struct fc_rport_priv *rdata;
|
||||
int i;
|
||||
@ -552,7 +565,7 @@ struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
|
||||
"obtained\n");
|
||||
return tgt;
|
||||
} else {
|
||||
printk(KERN_ERR PFX "rport 0x%x "
|
||||
BNX2FC_TGT_DBG(tgt, "rport 0x%x "
|
||||
"is in DELETED state\n",
|
||||
rdata->ids.port_id);
|
||||
return NULL;
|
||||
@ -633,7 +646,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
|
||||
&tgt->sq_dma, GFP_KERNEL);
|
||||
if (!tgt->sq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n",
|
||||
printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
|
||||
tgt->sq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
@ -646,7 +659,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
|
||||
&tgt->cq_dma, GFP_KERNEL);
|
||||
if (!tgt->cq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n",
|
||||
printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
|
||||
tgt->cq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
@ -659,7 +672,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
|
||||
&tgt->rq_dma, GFP_KERNEL);
|
||||
if (!tgt->rq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n",
|
||||
printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
|
||||
tgt->rq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
@ -671,7 +684,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
|
||||
&tgt->rq_pbl_dma, GFP_KERNEL);
|
||||
if (!tgt->rq_pbl) {
|
||||
printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n",
|
||||
printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
|
||||
tgt->rq_pbl_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
@ -697,7 +710,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
|
||||
&tgt->xferq_dma, GFP_KERNEL);
|
||||
if (!tgt->xferq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n",
|
||||
printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
|
||||
tgt->xferq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
@ -711,7 +724,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
|
||||
&tgt->confq_dma, GFP_KERNEL);
|
||||
if (!tgt->confq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n",
|
||||
printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
|
||||
tgt->confq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
@ -726,7 +739,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
tgt->confq_pbl_size,
|
||||
&tgt->confq_pbl_dma, GFP_KERNEL);
|
||||
if (!tgt->confq_pbl) {
|
||||
printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n",
|
||||
printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
|
||||
tgt->confq_pbl_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
@ -751,7 +764,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
tgt->conn_db_mem_size,
|
||||
&tgt->conn_db_dma, GFP_KERNEL);
|
||||
if (!tgt->conn_db) {
|
||||
printk(KERN_ALERT PFX "unable to allocate conn_db %d\n",
|
||||
printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
|
||||
tgt->conn_db_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
@ -767,7 +780,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
&tgt->lcq_dma, GFP_KERNEL);
|
||||
|
||||
if (!tgt->lcq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate lcq %d\n",
|
||||
printk(KERN_ERR PFX "unable to allocate lcq %d\n",
|
||||
tgt->lcq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
|
@ -128,25 +128,7 @@ struct c4_inquiry {
|
||||
u8 reserved[2];
|
||||
};
|
||||
|
||||
struct rdac_controller {
|
||||
u8 subsys_id[SUBSYS_ID_LEN];
|
||||
u8 slot_id[SLOT_ID_LEN];
|
||||
int use_ms10;
|
||||
struct kref kref;
|
||||
struct list_head node; /* list of all controllers */
|
||||
union {
|
||||
struct rdac_pg_legacy legacy;
|
||||
struct rdac_pg_expanded expanded;
|
||||
} mode_select;
|
||||
u8 index;
|
||||
u8 array_name[ARRAY_LABEL_LEN];
|
||||
spinlock_t ms_lock;
|
||||
int ms_queued;
|
||||
struct work_struct ms_work;
|
||||
struct scsi_device *ms_sdev;
|
||||
struct list_head ms_head;
|
||||
};
|
||||
|
||||
#define UNIQUE_ID_LEN 16
|
||||
struct c8_inquiry {
|
||||
u8 peripheral_info;
|
||||
u8 page_code; /* 0xC8 */
|
||||
@ -159,12 +141,31 @@ struct c8_inquiry {
|
||||
u8 vol_user_label_len;
|
||||
u8 vol_user_label[60];
|
||||
u8 array_uniq_id_len;
|
||||
u8 array_unique_id[16];
|
||||
u8 array_unique_id[UNIQUE_ID_LEN];
|
||||
u8 array_user_label_len;
|
||||
u8 array_user_label[60];
|
||||
u8 lun[8];
|
||||
};
|
||||
|
||||
struct rdac_controller {
|
||||
u8 array_id[UNIQUE_ID_LEN];
|
||||
int use_ms10;
|
||||
struct kref kref;
|
||||
struct list_head node; /* list of all controllers */
|
||||
union {
|
||||
struct rdac_pg_legacy legacy;
|
||||
struct rdac_pg_expanded expanded;
|
||||
} mode_select;
|
||||
u8 index;
|
||||
u8 array_name[ARRAY_LABEL_LEN];
|
||||
struct Scsi_Host *host;
|
||||
spinlock_t ms_lock;
|
||||
int ms_queued;
|
||||
struct work_struct ms_work;
|
||||
struct scsi_device *ms_sdev;
|
||||
struct list_head ms_head;
|
||||
};
|
||||
|
||||
struct c2_inquiry {
|
||||
u8 peripheral_info;
|
||||
u8 page_code; /* 0xC2 */
|
||||
@ -369,16 +370,17 @@ static void release_controller(struct kref *kref)
|
||||
kfree(ctlr);
|
||||
}
|
||||
|
||||
static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
|
||||
char *array_name)
|
||||
static struct rdac_controller *get_controller(int index, char *array_name,
|
||||
u8 *array_id, struct scsi_device *sdev)
|
||||
{
|
||||
struct rdac_controller *ctlr, *tmp;
|
||||
|
||||
spin_lock(&list_lock);
|
||||
|
||||
list_for_each_entry(tmp, &ctlr_list, node) {
|
||||
if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
|
||||
(memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
|
||||
if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
|
||||
(tmp->index == index) &&
|
||||
(tmp->host == sdev->host)) {
|
||||
kref_get(&tmp->kref);
|
||||
spin_unlock(&list_lock);
|
||||
return tmp;
|
||||
@ -389,16 +391,11 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
|
||||
goto done;
|
||||
|
||||
/* initialize fields of controller */
|
||||
memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
|
||||
memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
|
||||
memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
|
||||
ctlr->index = index;
|
||||
ctlr->host = sdev->host;
|
||||
memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
|
||||
|
||||
/* update the controller index */
|
||||
if (slot_id[1] == 0x31)
|
||||
ctlr->index = 0;
|
||||
else
|
||||
ctlr->index = 1;
|
||||
|
||||
kref_init(&ctlr->kref);
|
||||
ctlr->use_ms10 = -1;
|
||||
ctlr->ms_queued = 0;
|
||||
@ -444,7 +441,7 @@ done:
|
||||
}
|
||||
|
||||
static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
|
||||
char *array_name)
|
||||
char *array_name, u8 *array_id)
|
||||
{
|
||||
int err, i;
|
||||
struct c8_inquiry *inqp;
|
||||
@ -463,6 +460,8 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
|
||||
*(array_name+i) = inqp->array_user_label[(2*i)+1];
|
||||
|
||||
*(array_name+ARRAY_LABEL_LEN-1) = '\0';
|
||||
memset(array_id, 0, UNIQUE_ID_LEN);
|
||||
memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@ -504,16 +503,20 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
|
||||
}
|
||||
|
||||
static int initialize_controller(struct scsi_device *sdev,
|
||||
struct rdac_dh_data *h, char *array_name)
|
||||
struct rdac_dh_data *h, char *array_name, u8 *array_id)
|
||||
{
|
||||
int err;
|
||||
int err, index;
|
||||
struct c4_inquiry *inqp;
|
||||
|
||||
err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
|
||||
if (err == SCSI_DH_OK) {
|
||||
inqp = &h->inq.c4;
|
||||
h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id,
|
||||
array_name);
|
||||
/* get the controller index */
|
||||
if (inqp->slot_id[1] == 0x31)
|
||||
index = 0;
|
||||
else
|
||||
index = 1;
|
||||
h->ctlr = get_controller(index, array_name, array_id, sdev);
|
||||
if (!h->ctlr)
|
||||
err = SCSI_DH_RES_TEMP_UNAVAIL;
|
||||
}
|
||||
@ -835,6 +838,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
|
||||
unsigned long flags;
|
||||
int err;
|
||||
char array_name[ARRAY_LABEL_LEN];
|
||||
char array_id[UNIQUE_ID_LEN];
|
||||
|
||||
scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
|
||||
+ sizeof(*h) , GFP_KERNEL);
|
||||
@ -849,11 +853,11 @@ static int rdac_bus_attach(struct scsi_device *sdev)
|
||||
h->lun = UNINITIALIZED_LUN;
|
||||
h->state = RDAC_STATE_ACTIVE;
|
||||
|
||||
err = get_lun_info(sdev, h, array_name);
|
||||
err = get_lun_info(sdev, h, array_name, array_id);
|
||||
if (err != SCSI_DH_OK)
|
||||
goto failed;
|
||||
|
||||
err = initialize_controller(sdev, h, array_name);
|
||||
err = initialize_controller(sdev, h, array_name, array_id);
|
||||
if (err != SCSI_DH_OK)
|
||||
goto failed;
|
||||
|
||||
|
@ -486,6 +486,19 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame
|
||||
* @port: The FCoE port
|
||||
* @skb: The FIP/FCoE packet to be sent
|
||||
*/
|
||||
static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
|
||||
{
|
||||
if (port->fcoe_pending_queue.qlen)
|
||||
fcoe_check_wait_queue(port->lport, skb);
|
||||
else if (fcoe_start_io(skb))
|
||||
fcoe_check_wait_queue(port->lport, skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
|
||||
* @fip: The FCoE controller
|
||||
@ -494,7 +507,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
|
||||
static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
||||
{
|
||||
skb->dev = fcoe_from_ctlr(fip)->netdev;
|
||||
dev_queue_xmit(skb);
|
||||
fcoe_port_send(lport_priv(fip->lp), skb);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1257,30 +1270,20 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
|
||||
/**
|
||||
* fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
|
||||
* command.
|
||||
* @curr_cpu: CPU which received request
|
||||
*
|
||||
* This routine selects next CPU based on cpumask.
|
||||
* This routine selects next CPU based on cpumask to distribute
|
||||
* incoming requests in round robin.
|
||||
*
|
||||
* Returns: int (CPU number). Caller to verify if returned CPU is online or not.
|
||||
* Returns: int CPU number
|
||||
*/
|
||||
static unsigned int fcoe_select_cpu(unsigned int curr_cpu)
|
||||
static inline unsigned int fcoe_select_cpu(void)
|
||||
{
|
||||
static unsigned int selected_cpu;
|
||||
|
||||
if (num_online_cpus() == 1)
|
||||
return curr_cpu;
|
||||
/*
|
||||
* Doing following check, to skip "curr_cpu (smp_processor_id)"
|
||||
* from selection of CPU is intentional. This is to avoid same CPU
|
||||
* doing post-processing of command. "curr_cpu" to just receive
|
||||
* incoming request in case where rx_id is UNKNOWN and all other
|
||||
* CPU to actually process the command(s)
|
||||
*/
|
||||
do {
|
||||
selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
|
||||
if (selected_cpu >= nr_cpu_ids)
|
||||
selected_cpu = cpumask_first(cpu_online_mask);
|
||||
} while (selected_cpu == curr_cpu);
|
||||
selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
|
||||
if (selected_cpu >= nr_cpu_ids)
|
||||
selected_cpu = cpumask_first(cpu_online_mask);
|
||||
|
||||
return selected_cpu;
|
||||
}
|
||||
|
||||
@ -1350,30 +1353,26 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
||||
|
||||
fr = fcoe_dev_from_skb(skb);
|
||||
fr->fr_dev = lport;
|
||||
fr->ptype = ptype;
|
||||
|
||||
/*
|
||||
* In case the incoming frame's exchange is originated from
|
||||
* the initiator, then received frame's exchange id is ANDed
|
||||
* with fc_cpu_mask bits to get the same cpu on which exchange
|
||||
* was originated, otherwise just use the current cpu.
|
||||
* was originated, otherwise select cpu using rx exchange id
|
||||
* or fcoe_select_cpu().
|
||||
*/
|
||||
if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
|
||||
cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
|
||||
else {
|
||||
cpu = smp_processor_id();
|
||||
|
||||
if ((fh->fh_type == FC_TYPE_FCP) &&
|
||||
(ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
|
||||
do {
|
||||
cpu = fcoe_select_cpu(cpu);
|
||||
} while (!cpu_online(cpu));
|
||||
} else if ((fh->fh_type == FC_TYPE_FCP) &&
|
||||
(ntohs(fh->fh_rx_id) != FC_XID_UNKNOWN)) {
|
||||
if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)
|
||||
cpu = fcoe_select_cpu();
|
||||
else
|
||||
cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
|
||||
} else
|
||||
cpu = smp_processor_id();
|
||||
}
|
||||
|
||||
if (cpu >= nr_cpu_ids)
|
||||
goto err;
|
||||
|
||||
fps = &per_cpu(fcoe_percpu, cpu);
|
||||
spin_lock_bh(&fps->fcoe_rx_list.lock);
|
||||
if (unlikely(!fps->thread)) {
|
||||
@ -1572,11 +1571,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
||||
|
||||
/* send down to lld */
|
||||
fr_dev(fp) = lport;
|
||||
if (port->fcoe_pending_queue.qlen)
|
||||
fcoe_check_wait_queue(lport, skb);
|
||||
else if (fcoe_start_io(skb))
|
||||
fcoe_check_wait_queue(lport, skb);
|
||||
|
||||
fcoe_port_send(port, skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1219,8 +1219,8 @@ static void complete_scsi_command(struct CommandList *cp)
|
||||
dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
|
||||
break;
|
||||
case CMD_UNSOLICITED_ABORT:
|
||||
cmd->result = DID_RESET << 16;
|
||||
dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
|
||||
cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
|
||||
dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
|
||||
"abort\n", cp);
|
||||
break;
|
||||
case CMD_TIMEOUT:
|
||||
|
@ -214,7 +214,7 @@ static void SA5_submit_command(struct ctlr_info *h,
|
||||
dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
|
||||
c->Header.Tag.lower);
|
||||
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
|
||||
(void) readl(h->vaddr + SA5_REQUEST_PORT_OFFSET);
|
||||
(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
|
||||
h->commands_outstanding++;
|
||||
if (h->commands_outstanding > h->max_outstanding)
|
||||
h->max_outstanding = h->commands_outstanding;
|
||||
|
@ -8778,14 +8778,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
|
||||
if (rc != PCIBIOS_SUCCESSFUL) {
|
||||
dev_err(&pdev->dev, "Failed to save PCI config space\n");
|
||||
rc = -EIO;
|
||||
goto cleanup_nomem;
|
||||
goto out_msi_disable;
|
||||
}
|
||||
|
||||
if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
|
||||
goto cleanup_nomem;
|
||||
goto out_msi_disable;
|
||||
|
||||
if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
|
||||
goto cleanup_nomem;
|
||||
goto out_msi_disable;
|
||||
|
||||
if (ioa_cfg->sis64)
|
||||
ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
|
||||
@ -8800,7 +8800,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
|
||||
if (rc < 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"Couldn't allocate enough memory for device driver!\n");
|
||||
goto cleanup_nomem;
|
||||
goto out_msi_disable;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -8845,10 +8845,10 @@ out:
|
||||
|
||||
cleanup_nolog:
|
||||
ipr_free_mem(ioa_cfg);
|
||||
cleanup_nomem:
|
||||
iounmap(ipr_regs);
|
||||
out_msi_disable:
|
||||
pci_disable_msi(pdev);
|
||||
cleanup_nomem:
|
||||
iounmap(ipr_regs);
|
||||
out_release_regions:
|
||||
pci_release_regions(pdev);
|
||||
out_scsi_host_put:
|
||||
|
@ -802,10 +802,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
|
||||
pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
|
||||
spin_lock_bh(&pool->lock);
|
||||
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
|
||||
if (ep) {
|
||||
if (ep && ep->xid == xid)
|
||||
fc_exch_hold(ep);
|
||||
WARN_ON(ep->xid != xid);
|
||||
}
|
||||
spin_unlock_bh(&pool->lock);
|
||||
}
|
||||
return ep;
|
||||
@ -2465,8 +2463,11 @@ int fc_setup_exch_mgr(void)
|
||||
|
||||
fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
|
||||
if (!fc_exch_workqueue)
|
||||
return -ENOMEM;
|
||||
goto err;
|
||||
return 0;
|
||||
err:
|
||||
kmem_cache_destroy(fc_em_cachep);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -498,7 +498,7 @@ crc_err:
|
||||
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
||||
stats->ErrorFrames++;
|
||||
/* per cpu count, not total count, but OK for limit */
|
||||
if (stats->InvalidCRCCount++ < 5)
|
||||
if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
|
||||
printk(KERN_WARNING "libfc: CRC error on data "
|
||||
"frame for port (%6.6x)\n",
|
||||
lport->port_id);
|
||||
@ -690,7 +690,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_fcp_abts_resp() - Send an ABTS response
|
||||
* fc_fcp_abts_resp() - Receive an ABTS response
|
||||
* @fsp: The FCP packet that is being aborted
|
||||
* @fp: The response frame
|
||||
*/
|
||||
@ -730,7 +730,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_fcp_recv() - Reveive an FCP frame
|
||||
* fc_fcp_recv() - Receive an FCP frame
|
||||
* @seq: The sequence the frame is on
|
||||
* @fp: The received frame
|
||||
* @arg: The related FCP packet
|
||||
@ -1084,6 +1084,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
|
||||
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
|
||||
if (unlikely(rc)) {
|
||||
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
||||
fsp->cmd->SCp.ptr = NULL;
|
||||
list_del(&fsp->list);
|
||||
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
||||
}
|
||||
@ -1645,12 +1646,10 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
|
||||
struct fc_seq *seq;
|
||||
struct fcp_srr *srr;
|
||||
struct fc_frame *fp;
|
||||
u8 cdb_op;
|
||||
unsigned int rec_tov;
|
||||
|
||||
rport = fsp->rport;
|
||||
rpriv = rport->dd_data;
|
||||
cdb_op = fsp->cdb_cmd.fc_cdb[0];
|
||||
|
||||
if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
|
||||
rpriv->rp_state != RPORT_ST_READY)
|
||||
|
@ -1352,7 +1352,6 @@ static void fc_lport_timeout(struct work_struct *work)
|
||||
WARN_ON(1);
|
||||
break;
|
||||
case LPORT_ST_READY:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
case LPORT_ST_RESET:
|
||||
break;
|
||||
|
@ -849,6 +849,9 @@ static struct domain_device *sas_ex_discover_expander(
|
||||
|
||||
res = sas_discover_expander(child);
|
||||
if (res) {
|
||||
spin_lock_irq(&parent->port->dev_list_lock);
|
||||
list_del(&child->dev_list_node);
|
||||
spin_unlock_irq(&parent->port->dev_list_lock);
|
||||
kfree(child);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -20,6 +20,11 @@
|
||||
*******************************************************************/
|
||||
|
||||
#include <scsi/scsi_host.h>
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
|
||||
#define CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
#endif
|
||||
|
||||
struct lpfc_sli2_slim;
|
||||
|
||||
#define LPFC_PCI_DEV_LP 0x1
|
||||
@ -465,9 +470,10 @@ enum intr_type_t {
|
||||
struct unsol_rcv_ct_ctx {
|
||||
uint32_t ctxt_id;
|
||||
uint32_t SID;
|
||||
uint32_t oxid;
|
||||
uint32_t flags;
|
||||
#define UNSOL_VALID 0x00000001
|
||||
uint16_t oxid;
|
||||
uint16_t rxid;
|
||||
};
|
||||
|
||||
#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/
|
||||
@ -674,6 +680,9 @@ struct lpfc_hba {
|
||||
uint32_t cfg_enable_rrq;
|
||||
uint32_t cfg_topology;
|
||||
uint32_t cfg_link_speed;
|
||||
#define LPFC_FCF_FOV 1 /* Fast fcf failover */
|
||||
#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
|
||||
uint32_t cfg_fcf_failover_policy;
|
||||
uint32_t cfg_cr_delay;
|
||||
uint32_t cfg_cr_count;
|
||||
uint32_t cfg_multi_ring_support;
|
||||
@ -845,9 +854,13 @@ struct lpfc_hba {
|
||||
/* iDiag debugfs sub-directory */
|
||||
struct dentry *idiag_root;
|
||||
struct dentry *idiag_pci_cfg;
|
||||
struct dentry *idiag_bar_acc;
|
||||
struct dentry *idiag_que_info;
|
||||
struct dentry *idiag_que_acc;
|
||||
struct dentry *idiag_drb_acc;
|
||||
struct dentry *idiag_ctl_acc;
|
||||
struct dentry *idiag_mbx_acc;
|
||||
struct dentry *idiag_ext_acc;
|
||||
#endif
|
||||
|
||||
/* Used for deferred freeing of ELS data buffers */
|
||||
|
@ -754,6 +754,47 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
|
||||
* @phba: lpfc_hba pointer.
|
||||
*
|
||||
* Description:
|
||||
* SLI4 interface type-2 device to wait on the sliport status register for
|
||||
* the readyness after performing a firmware reset.
|
||||
*
|
||||
* Returns:
|
||||
* zero for success
|
||||
**/
|
||||
static int
|
||||
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_register portstat_reg;
|
||||
int i;
|
||||
|
||||
|
||||
lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
|
||||
&portstat_reg.word0);
|
||||
|
||||
/* wait for the SLI port firmware ready after firmware reset */
|
||||
for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
|
||||
msleep(10);
|
||||
lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
|
||||
&portstat_reg.word0);
|
||||
if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
|
||||
continue;
|
||||
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
|
||||
continue;
|
||||
if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
|
||||
if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
|
||||
return 0;
|
||||
else
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
|
||||
* @phba: lpfc_hba pointer.
|
||||
@ -769,6 +810,7 @@ static ssize_t
|
||||
lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
|
||||
{
|
||||
struct completion online_compl;
|
||||
struct pci_dev *pdev = phba->pcidev;
|
||||
uint32_t reg_val;
|
||||
int status = 0;
|
||||
int rc;
|
||||
@ -781,6 +823,14 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
|
||||
LPFC_SLI_INTF_IF_TYPE_2))
|
||||
return -EPERM;
|
||||
|
||||
if (!pdev->is_physfn)
|
||||
return -EPERM;
|
||||
|
||||
/* Disable SR-IOV virtual functions if enabled */
|
||||
if (phba->cfg_sriov_nr_virtfn) {
|
||||
pci_disable_sriov(pdev);
|
||||
phba->cfg_sriov_nr_virtfn = 0;
|
||||
}
|
||||
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
|
||||
|
||||
if (status != 0)
|
||||
@ -805,7 +855,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
|
||||
readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
|
||||
|
||||
/* delay driver action following IF_TYPE_2 reset */
|
||||
msleep(100);
|
||||
rc = lpfc_sli4_pdev_status_reg_wait(phba);
|
||||
|
||||
if (rc)
|
||||
return -EIO;
|
||||
|
||||
init_completion(&online_compl);
|
||||
rc = lpfc_workq_post_event(phba, &status, &online_compl,
|
||||
@ -895,6 +948,10 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
if (!phba->cfg_enable_hba_reset)
|
||||
return -EACCES;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"3050 lpfc_board_mode set to %s\n", buf);
|
||||
|
||||
init_completion(&online_compl);
|
||||
|
||||
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
|
||||
@ -1290,6 +1347,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
val = 0;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"3051 lpfc_poll changed from %d to %d\n",
|
||||
phba->cfg_poll, val);
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
|
||||
old_val = phba->cfg_poll;
|
||||
@ -1414,80 +1475,10 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct pci_dev *pdev = phba->pcidev;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
uint32_t shdr_status, shdr_add_status;
|
||||
LPFC_MBOXQ_t *mboxq;
|
||||
struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
|
||||
struct lpfc_rsrc_desc_pcie *desc;
|
||||
uint32_t max_nr_virtfn;
|
||||
uint32_t desc_count;
|
||||
int length, rc, i;
|
||||
uint16_t max_nr_virtfn;
|
||||
|
||||
if ((phba->sli_rev < LPFC_SLI_REV4) ||
|
||||
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
|
||||
LPFC_SLI_INTF_IF_TYPE_2))
|
||||
return -EPERM;
|
||||
|
||||
if (!pdev->is_physfn)
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", 0);
|
||||
|
||||
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mboxq)
|
||||
return -ENOMEM;
|
||||
|
||||
/* get the maximum number of virtfn support by physfn */
|
||||
length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr));
|
||||
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
|
||||
LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
|
||||
length, LPFC_SLI4_MBX_EMBED);
|
||||
shdr = (union lpfc_sli4_cfg_shdr *)
|
||||
&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
|
||||
bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
|
||||
phba->sli4_hba.iov.pf_number + 1);
|
||||
|
||||
get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
|
||||
bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
|
||||
LPFC_CFG_TYPE_CURRENT_ACTIVE);
|
||||
|
||||
rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
|
||||
lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
|
||||
|
||||
if (rc != MBX_TIMEOUT) {
|
||||
/* check return status */
|
||||
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
|
||||
&shdr->response);
|
||||
if (shdr_status || shdr_add_status || rc)
|
||||
goto error_out;
|
||||
|
||||
} else
|
||||
goto error_out;
|
||||
|
||||
desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
|
||||
|
||||
for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
|
||||
desc = (struct lpfc_rsrc_desc_pcie *)
|
||||
&get_prof_cfg->u.response.prof_cfg.desc[i];
|
||||
if (LPFC_RSRC_DESC_TYPE_PCIE ==
|
||||
bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
|
||||
max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
|
||||
desc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i < LPFC_RSRC_DESC_MAX_NUM) {
|
||||
if (rc != MBX_TIMEOUT)
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
|
||||
}
|
||||
|
||||
error_out:
|
||||
if (rc != MBX_TIMEOUT)
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
return -EIO;
|
||||
max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1605,6 +1596,9 @@ static int \
|
||||
lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
|
||||
{ \
|
||||
if (val >= minval && val <= maxval) {\
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
|
||||
"3052 lpfc_" #attr " changed from %d to %d\n", \
|
||||
phba->cfg_##attr, val); \
|
||||
phba->cfg_##attr = val;\
|
||||
return 0;\
|
||||
}\
|
||||
@ -1762,6 +1756,9 @@ static int \
|
||||
lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
|
||||
{ \
|
||||
if (val >= minval && val <= maxval) {\
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
|
||||
"3053 lpfc_" #attr " changed from %d to %d\n", \
|
||||
vport->cfg_##attr, val); \
|
||||
vport->cfg_##attr = val;\
|
||||
return 0;\
|
||||
}\
|
||||
@ -2196,6 +2193,9 @@ lpfc_param_show(enable_npiv);
|
||||
lpfc_param_init(enable_npiv, 1, 0, 1);
|
||||
static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
|
||||
|
||||
LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
|
||||
"FCF Fast failover=1 Priority failover=2");
|
||||
|
||||
int lpfc_enable_rrq;
|
||||
module_param(lpfc_enable_rrq, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
|
||||
@ -2678,6 +2678,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
|
||||
if (nolip)
|
||||
return strlen(buf);
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"3054 lpfc_topology changed from %d to %d\n",
|
||||
prev_val, val);
|
||||
err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
|
||||
if (err) {
|
||||
phba->cfg_topology = prev_val;
|
||||
@ -3101,6 +3104,10 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
|
||||
if (sscanf(val_buf, "%i", &val) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"3055 lpfc_link_speed changed from %d to %d %s\n",
|
||||
phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
|
||||
|
||||
if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
|
||||
((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
|
||||
((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
|
||||
@ -3678,7 +3685,9 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
|
||||
# - Default will result in registering capabilities for all profiles.
|
||||
#
|
||||
*/
|
||||
unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
|
||||
unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
|
||||
SHOST_DIX_TYPE0_PROTECTION |
|
||||
SHOST_DIX_TYPE1_PROTECTION;
|
||||
|
||||
module_param(lpfc_prot_mask, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
|
||||
@ -3769,6 +3778,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
|
||||
&dev_attr_lpfc_fdmi_on,
|
||||
&dev_attr_lpfc_max_luns,
|
||||
&dev_attr_lpfc_enable_npiv,
|
||||
&dev_attr_lpfc_fcf_failover_policy,
|
||||
&dev_attr_lpfc_enable_rrq,
|
||||
&dev_attr_nport_evt_cnt,
|
||||
&dev_attr_board_mode,
|
||||
@ -4989,6 +4999,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
lpfc_link_speed_init(phba, lpfc_link_speed);
|
||||
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
|
||||
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
|
||||
lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
|
||||
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
|
||||
lpfc_use_msi_init(phba, lpfc_use_msi);
|
||||
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "lpfc.h"
|
||||
#include "lpfc_logmsg.h"
|
||||
#include "lpfc_crtn.h"
|
||||
#include "lpfc_debugfs.h"
|
||||
#include "lpfc_vport.h"
|
||||
#include "lpfc_version.h"
|
||||
|
||||
@ -960,8 +961,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
evt_dat->immed_dat].oxid,
|
||||
phba->ct_ctx[
|
||||
evt_dat->immed_dat].SID);
|
||||
phba->ct_ctx[evt_dat->immed_dat].rxid =
|
||||
piocbq->iocb.ulpContext;
|
||||
phba->ct_ctx[evt_dat->immed_dat].oxid =
|
||||
piocbq->iocb.ulpContext;
|
||||
piocbq->iocb.unsli3.rcvsli3.ox_id;
|
||||
phba->ct_ctx[evt_dat->immed_dat].SID =
|
||||
piocbq->iocb.un.rcvels.remoteID;
|
||||
phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
|
||||
@ -1312,7 +1315,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
|
||||
rc = IOCB_ERROR;
|
||||
goto issue_ct_rsp_exit;
|
||||
}
|
||||
icmd->ulpContext = phba->ct_ctx[tag].oxid;
|
||||
icmd->ulpContext = phba->ct_ctx[tag].rxid;
|
||||
icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
|
||||
ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
|
||||
if (!ndlp) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
|
||||
@ -1337,9 +1341,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
|
||||
goto issue_ct_rsp_exit;
|
||||
}
|
||||
|
||||
icmd->un.ulpWord[3] = ndlp->nlp_rpi;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
icmd->ulpContext =
|
||||
icmd->un.ulpWord[3] =
|
||||
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
|
||||
|
||||
/* The exchange is done, mark the entry as invalid */
|
||||
@ -1351,8 +1353,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
|
||||
|
||||
/* Xmit CT response on exchange <xid> */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
|
||||
"2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
|
||||
icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
|
||||
"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
|
||||
icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
|
||||
|
||||
ctiocb->iocb_cmpl = NULL;
|
||||
ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
|
||||
@ -1471,13 +1473,12 @@ send_mgmt_rsp_exit:
|
||||
/**
|
||||
* lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @job: LPFC_BSG_VENDOR_DIAG_MODE
|
||||
*
|
||||
* This function is responsible for preparing driver for diag loopback
|
||||
* on device.
|
||||
*/
|
||||
static int
|
||||
lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
|
||||
lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_vport **vports;
|
||||
struct Scsi_Host *shost;
|
||||
@ -1521,7 +1522,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
|
||||
/**
|
||||
* lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @job: LPFC_BSG_VENDOR_DIAG_MODE
|
||||
*
|
||||
* This function is responsible for driver exit processing of setting up
|
||||
* diag loopback mode on device.
|
||||
@ -1567,7 +1567,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
|
||||
uint32_t link_flags;
|
||||
uint32_t timeout;
|
||||
LPFC_MBOXQ_t *pmboxq;
|
||||
int mbxstatus;
|
||||
int mbxstatus = MBX_SUCCESS;
|
||||
int i = 0;
|
||||
int rc = 0;
|
||||
|
||||
@ -1586,7 +1586,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
|
||||
goto job_error;
|
||||
}
|
||||
|
||||
rc = lpfc_bsg_diag_mode_enter(phba, job);
|
||||
rc = lpfc_bsg_diag_mode_enter(phba);
|
||||
if (rc)
|
||||
goto job_error;
|
||||
|
||||
@ -1741,7 +1741,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
|
||||
uint32_t link_flags, timeout, req_len, alloc_len;
|
||||
struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
|
||||
LPFC_MBOXQ_t *pmboxq = NULL;
|
||||
int mbxstatus, i, rc = 0;
|
||||
int mbxstatus = MBX_SUCCESS, i, rc = 0;
|
||||
|
||||
/* no data to return just the return code */
|
||||
job->reply->reply_payload_rcv_len = 0;
|
||||
@ -1758,7 +1758,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
|
||||
goto job_error;
|
||||
}
|
||||
|
||||
rc = lpfc_bsg_diag_mode_enter(phba, job);
|
||||
rc = lpfc_bsg_diag_mode_enter(phba);
|
||||
if (rc)
|
||||
goto job_error;
|
||||
|
||||
@ -1982,7 +1982,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
|
||||
goto job_error;
|
||||
}
|
||||
|
||||
rc = lpfc_bsg_diag_mode_enter(phba, job);
|
||||
rc = lpfc_bsg_diag_mode_enter(phba);
|
||||
if (rc)
|
||||
goto job_error;
|
||||
|
||||
@ -3178,6 +3178,11 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
|
||||
"(x%x/x%x) complete bsg job done, bsize:%d\n",
|
||||
phba->mbox_ext_buf_ctx.nembType,
|
||||
phba->mbox_ext_buf_ctx.mboxType, size);
|
||||
lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
|
||||
phba->mbox_ext_buf_ctx.nembType,
|
||||
phba->mbox_ext_buf_ctx.mboxType,
|
||||
dma_ebuf, sta_pos_addr,
|
||||
phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
|
||||
} else
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
|
||||
@ -3430,6 +3435,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
"ext_buf_cnt:%d\n", ext_buf_cnt);
|
||||
}
|
||||
|
||||
/* before dma descriptor setup */
|
||||
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
|
||||
sta_pre_addr, dmabuf, ext_buf_cnt);
|
||||
|
||||
/* reject non-embedded mailbox command with none external buffer */
|
||||
if (ext_buf_cnt == 0) {
|
||||
rc = -EPERM;
|
||||
@ -3477,6 +3486,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
}
|
||||
}
|
||||
|
||||
/* after dma descriptor setup */
|
||||
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
|
||||
sta_pos_addr, dmabuf, ext_buf_cnt);
|
||||
|
||||
/* construct base driver mbox command */
|
||||
pmb = &pmboxq->u.mb;
|
||||
pmbx = (uint8_t *)dmabuf->virt;
|
||||
@ -3511,7 +3524,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
|
||||
"2947 Issued SLI_CONFIG ext-buffer "
|
||||
"maibox command, rc:x%x\n", rc);
|
||||
return 1;
|
||||
return SLI_CONFIG_HANDLED;
|
||||
}
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
|
||||
"2948 Failed to issue SLI_CONFIG ext-buffer "
|
||||
@ -3549,7 +3562,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
LPFC_MBOXQ_t *pmboxq = NULL;
|
||||
MAILBOX_t *pmb;
|
||||
uint8_t *mbx;
|
||||
int rc = 0, i;
|
||||
int rc = SLI_CONFIG_NOT_HANDLED, i;
|
||||
|
||||
mbox_req =
|
||||
(struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
|
||||
@ -3591,12 +3604,20 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
"ext_buf_cnt:%d\n", ext_buf_cnt);
|
||||
}
|
||||
|
||||
/* before dma buffer descriptor setup */
|
||||
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
|
||||
sta_pre_addr, dmabuf, ext_buf_cnt);
|
||||
|
||||
if (ext_buf_cnt == 0)
|
||||
return -EPERM;
|
||||
|
||||
/* for the first external buffer */
|
||||
lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
|
||||
|
||||
/* after dma descriptor setup */
|
||||
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
|
||||
sta_pos_addr, dmabuf, ext_buf_cnt);
|
||||
|
||||
/* log for looking forward */
|
||||
for (i = 1; i < ext_buf_cnt; i++) {
|
||||
if (nemb_tp == nemb_mse)
|
||||
@ -3660,7 +3681,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
|
||||
"2955 Issued SLI_CONFIG ext-buffer "
|
||||
"maibox command, rc:x%x\n", rc);
|
||||
return 1;
|
||||
return SLI_CONFIG_HANDLED;
|
||||
}
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
|
||||
"2956 Failed to issue SLI_CONFIG ext-buffer "
|
||||
@ -3668,6 +3689,11 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
rc = -EPIPE;
|
||||
}
|
||||
|
||||
/* wait for additoinal external buffers */
|
||||
job->reply->result = 0;
|
||||
job->job_done(job);
|
||||
return SLI_CONFIG_HANDLED;
|
||||
|
||||
job_error:
|
||||
if (pmboxq)
|
||||
mempool_free(pmboxq, phba->mbox_mem_pool);
|
||||
@ -3840,6 +3866,12 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
|
||||
dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
|
||||
struct lpfc_dmabuf, list);
|
||||
list_del_init(&dmabuf->list);
|
||||
|
||||
/* after dma buffer descriptor setup */
|
||||
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
|
||||
mbox_rd, dma_ebuf, sta_pos_addr,
|
||||
dmabuf, index);
|
||||
|
||||
pbuf = (uint8_t *)dmabuf->virt;
|
||||
job->reply->reply_payload_rcv_len =
|
||||
sg_copy_from_buffer(job->reply_payload.sg_list,
|
||||
@ -3922,6 +3954,11 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
dmabuf);
|
||||
list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
|
||||
|
||||
/* after write dma buffer */
|
||||
lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
|
||||
mbox_wr, dma_ebuf, sta_pos_addr,
|
||||
dmabuf, index);
|
||||
|
||||
if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
|
||||
"2968 SLI_CONFIG ext-buffer wr all %d "
|
||||
@ -3959,7 +3996,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
|
||||
"2969 Issued SLI_CONFIG ext-buffer "
|
||||
"maibox command, rc:x%x\n", rc);
|
||||
return 1;
|
||||
return SLI_CONFIG_HANDLED;
|
||||
}
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
|
||||
"2970 Failed to issue SLI_CONFIG ext-buffer "
|
||||
@ -4039,14 +4076,14 @@ lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
struct lpfc_dmabuf *dmabuf)
|
||||
{
|
||||
struct dfc_mbox_req *mbox_req;
|
||||
int rc;
|
||||
int rc = SLI_CONFIG_NOT_HANDLED;
|
||||
|
||||
mbox_req =
|
||||
(struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
|
||||
|
||||
/* mbox command with/without single external buffer */
|
||||
if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
|
||||
return SLI_CONFIG_NOT_HANDLED;
|
||||
return rc;
|
||||
|
||||
/* mbox command and first external buffer */
|
||||
if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
|
||||
@ -4249,7 +4286,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
* mailbox extension size
|
||||
*/
|
||||
if ((transmit_length > receive_length) ||
|
||||
(transmit_length > MAILBOX_EXT_SIZE)) {
|
||||
(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
|
||||
rc = -ERANGE;
|
||||
goto job_done;
|
||||
}
|
||||
@ -4272,7 +4309,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
/* receive length cannot be greater than mailbox
|
||||
* extension size
|
||||
*/
|
||||
if (receive_length > MAILBOX_EXT_SIZE) {
|
||||
if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
|
||||
rc = -ERANGE;
|
||||
goto job_done;
|
||||
}
|
||||
@ -4306,7 +4343,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
|
||||
|
||||
/* bde size cannot be greater than mailbox ext size */
|
||||
if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
|
||||
if (bde->tus.f.bdeSize >
|
||||
BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
|
||||
rc = -ERANGE;
|
||||
goto job_done;
|
||||
}
|
||||
@ -4332,7 +4370,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
|
||||
* mailbox extension size
|
||||
*/
|
||||
if ((receive_length == 0) ||
|
||||
(receive_length > MAILBOX_EXT_SIZE)) {
|
||||
(receive_length >
|
||||
BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
|
||||
rc = -ERANGE;
|
||||
goto job_done;
|
||||
}
|
||||
|
@ -235,9 +235,11 @@ int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
|
||||
void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
|
||||
void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
|
||||
uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
|
||||
void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t);
|
||||
int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
|
||||
void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
|
||||
int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
|
||||
void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
|
||||
|
||||
int lpfc_mem_alloc(struct lpfc_hba *, int align);
|
||||
void lpfc_mem_free(struct lpfc_hba *);
|
||||
@ -371,6 +373,10 @@ extern struct lpfc_hbq_init *lpfc_hbq_defs[];
|
||||
/* SLI4 if_type 2 externs. */
|
||||
int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
|
||||
int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
|
||||
int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
|
||||
uint16_t *, uint16_t *);
|
||||
int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
|
||||
uint16_t *, uint16_t *);
|
||||
|
||||
/* externs BlockGuard */
|
||||
extern char *_dump_buf_data;
|
||||
@ -432,10 +438,16 @@ void lpfc_handle_rrq_active(struct lpfc_hba *);
|
||||
int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
|
||||
int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
|
||||
uint16_t, uint16_t, uint16_t);
|
||||
uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
|
||||
void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
|
||||
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
|
||||
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
|
||||
uint32_t);
|
||||
void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *, enum nemb_type,
|
||||
enum mbox_type, enum dma_type, enum sta_type,
|
||||
struct lpfc_dmabuf *, uint32_t);
|
||||
void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *, MAILBOX_t *);
|
||||
int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
|
||||
/* functions to support SR-IOV */
|
||||
int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
|
||||
uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -39,14 +39,51 @@
|
||||
/* hbqinfo output buffer size */
|
||||
#define LPFC_HBQINFO_SIZE 8192
|
||||
|
||||
/*
|
||||
* For SLI4 iDiag debugfs diagnostics tool
|
||||
*/
|
||||
|
||||
/* pciConf */
|
||||
#define LPFC_PCI_CFG_BROWSE 0xffff
|
||||
#define LPFC_PCI_CFG_RD_CMD_ARG 2
|
||||
#define LPFC_PCI_CFG_WR_CMD_ARG 3
|
||||
#define LPFC_PCI_CFG_SIZE 4096
|
||||
#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
|
||||
#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
|
||||
|
||||
#define IDIAG_PCICFG_WHERE_INDX 0
|
||||
#define IDIAG_PCICFG_COUNT_INDX 1
|
||||
#define IDIAG_PCICFG_VALUE_INDX 2
|
||||
|
||||
/* barAcc */
|
||||
#define LPFC_PCI_BAR_BROWSE 0xffff
|
||||
#define LPFC_PCI_BAR_RD_CMD_ARG 3
|
||||
#define LPFC_PCI_BAR_WR_CMD_ARG 3
|
||||
|
||||
#define LPFC_PCI_IF0_BAR0_SIZE (1024 * 16)
|
||||
#define LPFC_PCI_IF0_BAR1_SIZE (1024 * 128)
|
||||
#define LPFC_PCI_IF0_BAR2_SIZE (1024 * 128)
|
||||
#define LPFC_PCI_IF2_BAR0_SIZE (1024 * 32)
|
||||
|
||||
#define LPFC_PCI_BAR_RD_BUF_SIZE 4096
|
||||
#define LPFC_PCI_BAR_RD_SIZE (LPFC_PCI_BAR_RD_BUF_SIZE/4)
|
||||
|
||||
#define LPFC_PCI_IF0_BAR0_RD_SIZE (LPFC_PCI_IF0_BAR0_SIZE/4)
|
||||
#define LPFC_PCI_IF0_BAR1_RD_SIZE (LPFC_PCI_IF0_BAR1_SIZE/4)
|
||||
#define LPFC_PCI_IF0_BAR2_RD_SIZE (LPFC_PCI_IF0_BAR2_SIZE/4)
|
||||
#define LPFC_PCI_IF2_BAR0_RD_SIZE (LPFC_PCI_IF2_BAR0_SIZE/4)
|
||||
|
||||
#define IDIAG_BARACC_BAR_NUM_INDX 0
|
||||
#define IDIAG_BARACC_OFF_SET_INDX 1
|
||||
#define IDIAG_BARACC_ACC_MOD_INDX 2
|
||||
#define IDIAG_BARACC_REG_VAL_INDX 2
|
||||
#define IDIAG_BARACC_BAR_SZE_INDX 3
|
||||
|
||||
#define IDIAG_BARACC_BAR_0 0
|
||||
#define IDIAG_BARACC_BAR_1 1
|
||||
#define IDIAG_BARACC_BAR_2 2
|
||||
|
||||
#define SINGLE_WORD 1
|
||||
|
||||
/* queue info */
|
||||
#define LPFC_QUE_INFO_GET_BUF_SIZE 4096
|
||||
|
||||
@ -63,7 +100,14 @@
|
||||
#define LPFC_IDIAG_WQ 4
|
||||
#define LPFC_IDIAG_RQ 5
|
||||
|
||||
/* doorbell acc */
|
||||
#define IDIAG_QUEACC_QUETP_INDX 0
|
||||
#define IDIAG_QUEACC_QUEID_INDX 1
|
||||
#define IDIAG_QUEACC_INDEX_INDX 2
|
||||
#define IDIAG_QUEACC_COUNT_INDX 3
|
||||
#define IDIAG_QUEACC_OFFST_INDX 4
|
||||
#define IDIAG_QUEACC_VALUE_INDX 5
|
||||
|
||||
/* doorbell register acc */
|
||||
#define LPFC_DRB_ACC_ALL 0xffff
|
||||
#define LPFC_DRB_ACC_RD_CMD_ARG 1
|
||||
#define LPFC_DRB_ACC_WR_CMD_ARG 2
|
||||
@ -76,6 +120,67 @@
|
||||
|
||||
#define LPFC_DRB_MAX 4
|
||||
|
||||
#define IDIAG_DRBACC_REGID_INDX 0
|
||||
#define IDIAG_DRBACC_VALUE_INDX 1
|
||||
|
||||
/* control register acc */
|
||||
#define LPFC_CTL_ACC_ALL 0xffff
|
||||
#define LPFC_CTL_ACC_RD_CMD_ARG 1
|
||||
#define LPFC_CTL_ACC_WR_CMD_ARG 2
|
||||
#define LPFC_CTL_ACC_BUF_SIZE 256
|
||||
|
||||
#define LPFC_CTL_PORT_SEM 1
|
||||
#define LPFC_CTL_PORT_STA 2
|
||||
#define LPFC_CTL_PORT_CTL 3
|
||||
#define LPFC_CTL_PORT_ER1 4
|
||||
#define LPFC_CTL_PORT_ER2 5
|
||||
#define LPFC_CTL_PDEV_CTL 6
|
||||
|
||||
#define LPFC_CTL_MAX 6
|
||||
|
||||
#define IDIAG_CTLACC_REGID_INDX 0
|
||||
#define IDIAG_CTLACC_VALUE_INDX 1
|
||||
|
||||
/* mailbox access */
|
||||
#define LPFC_MBX_DMP_ARG 4
|
||||
|
||||
#define LPFC_MBX_ACC_BUF_SIZE 512
|
||||
#define LPFC_MBX_ACC_LBUF_SZ 128
|
||||
|
||||
#define LPFC_MBX_DMP_MBX_WORD 0x00000001
|
||||
#define LPFC_MBX_DMP_MBX_BYTE 0x00000002
|
||||
#define LPFC_MBX_DMP_MBX_ALL (LPFC_MBX_DMP_MBX_WORD | LPFC_MBX_DMP_MBX_BYTE)
|
||||
|
||||
#define LPFC_BSG_DMP_MBX_RD_MBX 0x00000001
|
||||
#define LPFC_BSG_DMP_MBX_RD_BUF 0x00000002
|
||||
#define LPFC_BSG_DMP_MBX_WR_MBX 0x00000004
|
||||
#define LPFC_BSG_DMP_MBX_WR_BUF 0x00000008
|
||||
#define LPFC_BSG_DMP_MBX_ALL (LPFC_BSG_DMP_MBX_RD_MBX | \
|
||||
LPFC_BSG_DMP_MBX_RD_BUF | \
|
||||
LPFC_BSG_DMP_MBX_WR_MBX | \
|
||||
LPFC_BSG_DMP_MBX_WR_BUF)
|
||||
|
||||
#define LPFC_MBX_DMP_ALL 0xffff
|
||||
#define LPFC_MBX_ALL_CMD 0xff
|
||||
|
||||
#define IDIAG_MBXACC_MBCMD_INDX 0
|
||||
#define IDIAG_MBXACC_DPMAP_INDX 1
|
||||
#define IDIAG_MBXACC_DPCNT_INDX 2
|
||||
#define IDIAG_MBXACC_WDCNT_INDX 3
|
||||
|
||||
/* extents access */
|
||||
#define LPFC_EXT_ACC_CMD_ARG 1
|
||||
#define LPFC_EXT_ACC_BUF_SIZE 4096
|
||||
|
||||
#define LPFC_EXT_ACC_AVAIL 0x1
|
||||
#define LPFC_EXT_ACC_ALLOC 0x2
|
||||
#define LPFC_EXT_ACC_DRIVR 0x4
|
||||
#define LPFC_EXT_ACC_ALL (LPFC_EXT_ACC_DRIVR | \
|
||||
LPFC_EXT_ACC_AVAIL | \
|
||||
LPFC_EXT_ACC_ALLOC)
|
||||
|
||||
#define IDIAG_EXTACC_EXMAP_INDX 0
|
||||
|
||||
#define SIZE_U8 sizeof(uint8_t)
|
||||
#define SIZE_U16 sizeof(uint16_t)
|
||||
#define SIZE_U32 sizeof(uint32_t)
|
||||
@ -110,6 +215,11 @@ struct lpfc_idiag_cmd {
|
||||
#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
|
||||
#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
|
||||
|
||||
#define LPFC_IDIAG_CMD_BARACC_RD 0x00000008
|
||||
#define LPFC_IDIAG_CMD_BARACC_WR 0x00000009
|
||||
#define LPFC_IDIAG_CMD_BARACC_ST 0x0000000a
|
||||
#define LPFC_IDIAG_CMD_BARACC_CL 0x0000000b
|
||||
|
||||
#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011
|
||||
#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012
|
||||
#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013
|
||||
@ -119,6 +229,17 @@ struct lpfc_idiag_cmd {
|
||||
#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022
|
||||
#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023
|
||||
#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024
|
||||
|
||||
#define LPFC_IDIAG_CMD_CTLACC_RD 0x00000031
|
||||
#define LPFC_IDIAG_CMD_CTLACC_WR 0x00000032
|
||||
#define LPFC_IDIAG_CMD_CTLACC_ST 0x00000033
|
||||
#define LPFC_IDIAG_CMD_CTLACC_CL 0x00000034
|
||||
|
||||
#define LPFC_IDIAG_CMD_MBXACC_DP 0x00000041
|
||||
#define LPFC_IDIAG_BSG_MBXACC_DP 0x00000042
|
||||
|
||||
#define LPFC_IDIAG_CMD_EXTACC_RD 0x00000051
|
||||
|
||||
uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
|
||||
};
|
||||
|
||||
|
@ -647,21 +647,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
}
|
||||
lpfc_cleanup_pending_mbox(vport);
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
lpfc_sli4_unreg_all_rpis(vport);
|
||||
|
||||
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
|
||||
lpfc_mbx_unreg_vpi(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
}
|
||||
/*
|
||||
* If VPI is unreged, driver need to do INIT_VPI
|
||||
* before re-registering
|
||||
*/
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
spin_lock_irq(shost->host_lock);
|
||||
/*
|
||||
* If VPI is unreged, driver need to do INIT_VPI
|
||||
* before re-registering
|
||||
*/
|
||||
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
}
|
||||
@ -880,6 +874,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
phba->fcf.current_rec.fcf_indx,
|
||||
irsp->ulpStatus, irsp->un.ulpWord[4],
|
||||
irsp->ulpTimeout);
|
||||
lpfc_sli4_set_fcf_flogi_fail(phba,
|
||||
phba->fcf.current_rec.fcf_indx);
|
||||
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
|
||||
rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
|
||||
if (rc)
|
||||
@ -1096,11 +1092,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
/* Set the fcfi to the fcfi we registered with */
|
||||
elsiocb->iocb.ulpContext = phba->fcf.fcfi;
|
||||
}
|
||||
} else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
|
||||
sp->cmn.request_multiple_Nport = 1;
|
||||
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
|
||||
icmd->ulpCt_h = 1;
|
||||
icmd->ulpCt_l = 0;
|
||||
} else {
|
||||
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
|
||||
sp->cmn.request_multiple_Nport = 1;
|
||||
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
|
||||
icmd->ulpCt_h = 1;
|
||||
icmd->ulpCt_l = 0;
|
||||
} else
|
||||
sp->cmn.request_multiple_Nport = 0;
|
||||
}
|
||||
|
||||
if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
|
||||
@ -3656,7 +3655,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
|
||||
}
|
||||
|
||||
icmd = &elsiocb->iocb;
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
|
||||
icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
|
||||
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
|
||||
pcmd += sizeof(uint32_t);
|
||||
@ -3673,7 +3673,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
|
||||
return 1;
|
||||
|
||||
icmd = &elsiocb->iocb;
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
|
||||
icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
|
||||
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
|
||||
if (mbox)
|
||||
@ -3695,7 +3696,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
|
||||
return 1;
|
||||
|
||||
icmd = &elsiocb->iocb;
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
|
||||
icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
|
||||
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
|
||||
memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
|
||||
@ -3781,7 +3783,8 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
|
||||
|
||||
icmd = &elsiocb->iocb;
|
||||
oldcmd = &oldiocb->iocb;
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
|
||||
icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
|
||||
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
|
||||
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
|
||||
@ -3853,7 +3856,8 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
|
||||
|
||||
icmd = &elsiocb->iocb;
|
||||
oldcmd = &oldiocb->iocb;
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
|
||||
icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
|
||||
|
||||
/* Xmit ADISC ACC response tag <ulpIoTag> */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
@ -3931,7 +3935,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
|
||||
|
||||
icmd = &elsiocb->iocb;
|
||||
oldcmd = &oldiocb->iocb;
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
|
||||
icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
|
||||
|
||||
/* Xmit PRLI ACC response tag <ulpIoTag> */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"0131 Xmit PRLI ACC response tag x%x xri x%x, "
|
||||
@ -4035,7 +4041,9 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
|
||||
|
||||
icmd = &elsiocb->iocb;
|
||||
oldcmd = &oldiocb->iocb;
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
|
||||
icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
|
||||
|
||||
/* Xmit RNID ACC response tag <ulpIoTag> */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"0132 Xmit RNID ACC response tag x%x xri x%x\n",
|
||||
@ -4163,7 +4171,9 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
|
||||
if (!elsiocb)
|
||||
return 1;
|
||||
|
||||
elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */
|
||||
elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
|
||||
elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
|
||||
|
||||
/* Xmit ECHO ACC response tag <ulpIoTag> */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"2876 Xmit ECHO ACC response tag x%x xri x%x\n",
|
||||
@ -5054,13 +5064,15 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
uint8_t *pcmd;
|
||||
struct lpfc_iocbq *elsiocb;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
uint16_t xri;
|
||||
uint16_t oxid;
|
||||
uint16_t rxid;
|
||||
uint32_t cmdsize;
|
||||
|
||||
mb = &pmb->u.mb;
|
||||
|
||||
ndlp = (struct lpfc_nodelist *) pmb->context2;
|
||||
xri = (uint16_t) ((unsigned long)(pmb->context1));
|
||||
rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
|
||||
oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
|
||||
pmb->context1 = NULL;
|
||||
pmb->context2 = NULL;
|
||||
|
||||
@ -5082,7 +5094,8 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
return;
|
||||
|
||||
icmd = &elsiocb->iocb;
|
||||
icmd->ulpContext = xri;
|
||||
icmd->ulpContext = rxid;
|
||||
icmd->unsli3.rcvsli3.ox_id = oxid;
|
||||
|
||||
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
|
||||
@ -5137,13 +5150,16 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
uint8_t *pcmd;
|
||||
struct lpfc_iocbq *elsiocb;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
uint16_t xri, status;
|
||||
uint16_t status;
|
||||
uint16_t oxid;
|
||||
uint16_t rxid;
|
||||
uint32_t cmdsize;
|
||||
|
||||
mb = &pmb->u.mb;
|
||||
|
||||
ndlp = (struct lpfc_nodelist *) pmb->context2;
|
||||
xri = (uint16_t) ((unsigned long)(pmb->context1));
|
||||
rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
|
||||
oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
|
||||
pmb->context1 = NULL;
|
||||
pmb->context2 = NULL;
|
||||
|
||||
@ -5165,7 +5181,8 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
return;
|
||||
|
||||
icmd = &elsiocb->iocb;
|
||||
icmd->ulpContext = xri;
|
||||
icmd->ulpContext = rxid;
|
||||
icmd->unsli3.rcvsli3.ox_id = oxid;
|
||||
|
||||
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
|
||||
@ -5238,8 +5255,9 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
|
||||
if (mbox) {
|
||||
lpfc_read_lnk_stat(phba, mbox);
|
||||
mbox->context1 =
|
||||
(void *)((unsigned long) cmdiocb->iocb.ulpContext);
|
||||
mbox->context1 = (void *)((unsigned long)
|
||||
((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
|
||||
cmdiocb->iocb.ulpContext)); /* rx_id */
|
||||
mbox->context2 = lpfc_nlp_get(ndlp);
|
||||
mbox->vport = vport;
|
||||
mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
|
||||
@ -5314,7 +5332,8 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
pcmd += sizeof(uint32_t); /* Skip past command */
|
||||
|
||||
/* use the command's xri in the response */
|
||||
elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
|
||||
elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
|
||||
elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
|
||||
|
||||
rtv_rsp = (struct RTV_RSP *)pcmd;
|
||||
|
||||
@ -5399,8 +5418,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
|
||||
if (mbox) {
|
||||
lpfc_read_lnk_stat(phba, mbox);
|
||||
mbox->context1 =
|
||||
(void *)((unsigned long) cmdiocb->iocb.ulpContext);
|
||||
mbox->context1 = (void *)((unsigned long)
|
||||
((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
|
||||
cmdiocb->iocb.ulpContext)); /* rx_id */
|
||||
mbox->context2 = lpfc_nlp_get(ndlp);
|
||||
mbox->vport = vport;
|
||||
mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
|
||||
@ -5554,7 +5574,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
|
||||
|
||||
icmd = &elsiocb->iocb;
|
||||
oldcmd = &oldiocb->iocb;
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
|
||||
icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
|
||||
icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
|
||||
|
||||
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
|
||||
@ -6586,7 +6607,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
|
||||
{
|
||||
struct lpfc_vport *vport;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
int i = 0;
|
||||
|
||||
/* The physical ports are always vpi 0 - translate is unnecessary. */
|
||||
if (vpi > 0) {
|
||||
@ -6609,7 +6630,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
list_for_each_entry(vport, &phba->port_list, listentry) {
|
||||
if (vport->vpi == vpi) {
|
||||
if (vport->vpi == i) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
return vport;
|
||||
}
|
||||
@ -7787,6 +7808,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
|
||||
{
|
||||
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
|
||||
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
|
||||
uint16_t lxri = 0;
|
||||
|
||||
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
|
||||
unsigned long iflag = 0;
|
||||
@ -7815,7 +7837,12 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
|
||||
}
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
|
||||
sglq_entry = __lpfc_get_active_sglq(phba, xri);
|
||||
lxri = lpfc_sli4_xri_inrange(phba, xri);
|
||||
if (lxri == NO_XRI) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
return;
|
||||
}
|
||||
sglq_entry = __lpfc_get_active_sglq(phba, lxri);
|
||||
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
return;
|
||||
|
@ -1109,6 +1109,28 @@ out:
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_clear_fcf_rr_bmask
|
||||
* @phba pointer to the struct lpfc_hba for this port.
|
||||
* This fucnction resets the round robin bit mask and clears the
|
||||
* fcf priority list. The list deletions are done while holding the
|
||||
* hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
|
||||
* from the lpfc_fcf_pri record.
|
||||
**/
|
||||
void
|
||||
lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_fcf_pri *fcf_pri;
|
||||
struct lpfc_fcf_pri *next_fcf_pri;
|
||||
memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
list_for_each_entry_safe(fcf_pri, next_fcf_pri,
|
||||
&phba->fcf.fcf_pri_list, list) {
|
||||
list_del_init(&fcf_pri->list);
|
||||
fcf_pri->fcf_rec.flag = 0;
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
}
|
||||
static void
|
||||
lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
{
|
||||
@ -1130,7 +1152,8 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
/* If there is a pending FCoE event, restart FCF table scan. */
|
||||
if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
|
||||
if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
|
||||
lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
|
||||
goto fail_out;
|
||||
|
||||
/* Mark successful completion of FCF table scan */
|
||||
@ -1249,6 +1272,30 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
|
||||
return (curr_vlan_id == new_vlan_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_update_fcf_record - Update driver fcf record
|
||||
* __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @fcf_index: Index for the lpfc_fcf_record.
|
||||
* @new_fcf_record: pointer to hba fcf record.
|
||||
*
|
||||
* This routine updates the driver FCF priority record from the new HBA FCF
|
||||
* record. This routine is called with the host lock held.
|
||||
**/
|
||||
static void
|
||||
__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
|
||||
struct fcf_record *new_fcf_record
|
||||
)
|
||||
{
|
||||
struct lpfc_fcf_pri *fcf_pri;
|
||||
|
||||
fcf_pri = &phba->fcf.fcf_pri[fcf_index];
|
||||
fcf_pri->fcf_rec.fcf_index = fcf_index;
|
||||
/* FCF record priority */
|
||||
fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
|
||||
* @fcf: pointer to driver fcf record.
|
||||
@ -1332,6 +1379,9 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
|
||||
fcf_rec->addr_mode = addr_mode;
|
||||
fcf_rec->vlan_id = vlan_id;
|
||||
fcf_rec->flag |= (flag | RECORD_VALID);
|
||||
__lpfc_update_fcf_record_pri(phba,
|
||||
bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
|
||||
new_fcf_record);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1834,6 +1884,8 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
|
||||
return false;
|
||||
if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
|
||||
return false;
|
||||
if (fcf_rec->priority != new_fcf_record->fip_priority)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1896,6 +1948,152 @@ stop_flogi_current_fcf:
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_fcf_pri_list_del
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @fcf_index the index of the fcf record to delete
|
||||
* This routine checks the on list flag of the fcf_index to be deleted.
|
||||
* If it is one the list then it is removed from the list, and the flag
|
||||
* is cleared. This routine grab the hbalock before removing the fcf
|
||||
* record from the list.
|
||||
**/
|
||||
static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
|
||||
uint16_t fcf_index)
|
||||
{
|
||||
struct lpfc_fcf_pri *new_fcf_pri;
|
||||
|
||||
new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"3058 deleting idx x%x pri x%x flg x%x\n",
|
||||
fcf_index, new_fcf_pri->fcf_rec.priority,
|
||||
new_fcf_pri->fcf_rec.flag);
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
|
||||
if (phba->fcf.current_rec.priority ==
|
||||
new_fcf_pri->fcf_rec.priority)
|
||||
phba->fcf.eligible_fcf_cnt--;
|
||||
list_del_init(&new_fcf_pri->list);
|
||||
new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_set_fcf_flogi_fail
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @fcf_index the index of the fcf record to update
|
||||
* This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
|
||||
* flag so the the round robin slection for the particular priority level
|
||||
* will try a different fcf record that does not have this bit set.
|
||||
* If the fcf record is re-read for any reason this flag is cleared brfore
|
||||
* adding it to the priority list.
|
||||
**/
|
||||
void
|
||||
lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
{
|
||||
struct lpfc_fcf_pri *new_fcf_pri;
|
||||
new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_fcf_pri_list_add
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @fcf_index the index of the fcf record to add
|
||||
* This routine checks the priority of the fcf_index to be added.
|
||||
* If it is a lower priority than the current head of the fcf_pri list
|
||||
* then it is added to the list in the right order.
|
||||
* If it is the same priority as the current head of the list then it
|
||||
* is added to the head of the list and its bit in the rr_bmask is set.
|
||||
* If the fcf_index to be added is of a higher priority than the current
|
||||
* head of the list then the rr_bmask is cleared, its bit is set in the
|
||||
* rr_bmask and it is added to the head of the list.
|
||||
* returns:
|
||||
* 0=success 1=failure
|
||||
**/
|
||||
int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
|
||||
struct fcf_record *new_fcf_record)
|
||||
{
|
||||
uint16_t current_fcf_pri;
|
||||
uint16_t last_index;
|
||||
struct lpfc_fcf_pri *fcf_pri;
|
||||
struct lpfc_fcf_pri *next_fcf_pri;
|
||||
struct lpfc_fcf_pri *new_fcf_pri;
|
||||
int ret;
|
||||
|
||||
new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"3059 adding idx x%x pri x%x flg x%x\n",
|
||||
fcf_index, new_fcf_record->fip_priority,
|
||||
new_fcf_pri->fcf_rec.flag);
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
|
||||
list_del_init(&new_fcf_pri->list);
|
||||
new_fcf_pri->fcf_rec.fcf_index = fcf_index;
|
||||
new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
|
||||
if (list_empty(&phba->fcf.fcf_pri_list)) {
|
||||
list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
|
||||
ret = lpfc_sli4_fcf_rr_index_set(phba,
|
||||
new_fcf_pri->fcf_rec.fcf_index);
|
||||
goto out;
|
||||
}
|
||||
|
||||
last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
|
||||
LPFC_SLI4_FCF_TBL_INDX_MAX);
|
||||
if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
|
||||
ret = 0; /* Empty rr list */
|
||||
goto out;
|
||||
}
|
||||
current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
|
||||
if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
|
||||
list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
|
||||
if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
|
||||
memset(phba->fcf.fcf_rr_bmask, 0,
|
||||
sizeof(*phba->fcf.fcf_rr_bmask));
|
||||
/* fcfs_at_this_priority_level = 1; */
|
||||
phba->fcf.eligible_fcf_cnt = 1;
|
||||
} else
|
||||
/* fcfs_at_this_priority_level++; */
|
||||
phba->fcf.eligible_fcf_cnt++;
|
||||
ret = lpfc_sli4_fcf_rr_index_set(phba,
|
||||
new_fcf_pri->fcf_rec.fcf_index);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(fcf_pri, next_fcf_pri,
|
||||
&phba->fcf.fcf_pri_list, list) {
|
||||
if (new_fcf_pri->fcf_rec.priority <=
|
||||
fcf_pri->fcf_rec.priority) {
|
||||
if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
|
||||
list_add(&new_fcf_pri->list,
|
||||
&phba->fcf.fcf_pri_list);
|
||||
else
|
||||
list_add(&new_fcf_pri->list,
|
||||
&((struct lpfc_fcf_pri *)
|
||||
fcf_pri->list.prev)->list);
|
||||
ret = 0;
|
||||
goto out;
|
||||
} else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
|
||||
|| new_fcf_pri->fcf_rec.priority <
|
||||
next_fcf_pri->fcf_rec.priority) {
|
||||
list_add(&new_fcf_pri->list, &fcf_pri->list);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
|
||||
continue;
|
||||
|
||||
}
|
||||
ret = 1;
|
||||
out:
|
||||
/* we use = instead of |= to clear the FLOGI_FAILED flag. */
|
||||
new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@ -1958,6 +2156,9 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
* record for roundrobin FCF failover.
|
||||
*/
|
||||
if (!rc) {
|
||||
lpfc_sli4_fcf_pri_list_del(phba,
|
||||
bf_get(lpfc_fcf_record_fcf_index,
|
||||
new_fcf_record));
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
|
||||
"2781 FCF (x%x) failed connection "
|
||||
"list check: (x%x/x%x)\n",
|
||||
@ -2005,7 +2206,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
goto read_next_fcf;
|
||||
} else {
|
||||
fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
|
||||
rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
|
||||
rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
|
||||
new_fcf_record);
|
||||
if (rc)
|
||||
goto read_next_fcf;
|
||||
}
|
||||
@ -2018,7 +2220,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
*/
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
if (phba->fcf.fcf_flag & FCF_IN_USE) {
|
||||
if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
|
||||
if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
|
||||
lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
|
||||
new_fcf_record, vlan_id)) {
|
||||
if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
|
||||
phba->fcf.current_rec.fcf_indx) {
|
||||
@ -2232,7 +2435,8 @@ read_next_fcf:
|
||||
(phba->fcf.fcf_flag & FCF_REDISC_PEND))
|
||||
return;
|
||||
|
||||
if (phba->fcf.fcf_flag & FCF_IN_USE) {
|
||||
if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
|
||||
phba->fcf.fcf_flag & FCF_IN_USE) {
|
||||
/*
|
||||
* In case the current in-use FCF record no
|
||||
* longer existed during FCF discovery that
|
||||
@ -2247,7 +2451,6 @@ read_next_fcf:
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag |= FCF_REDISC_FOV;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||
lpfc_sli4_fcf_scan_read_fcf_rec(phba,
|
||||
LPFC_FCOE_FCF_GET_FIRST);
|
||||
return;
|
||||
@ -2424,7 +2627,8 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
|
||||
/* Update the eligible FCF record index bmask */
|
||||
fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
|
||||
rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
|
||||
|
||||
rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
|
||||
|
||||
out:
|
||||
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||
@ -2645,6 +2849,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
vport->vpi_state |= LPFC_VPI_REGISTERED;
|
||||
vport->fc_flag |= FC_VFI_REGISTERED;
|
||||
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
|
||||
vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
|
||||
@ -2893,8 +3098,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
|
||||
goto out;
|
||||
}
|
||||
/* Reset FCF roundrobin bmask for new discovery */
|
||||
memset(phba->fcf.fcf_rr_bmask, 0,
|
||||
sizeof(*phba->fcf.fcf_rr_bmask));
|
||||
lpfc_sli4_clear_fcf_rr_bmask(phba);
|
||||
}
|
||||
|
||||
return;
|
||||
@ -5592,7 +5796,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
/* Reset FCF roundrobin bmask for new discovery */
|
||||
memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
|
||||
lpfc_sli4_clear_fcf_rr_bmask(phba);
|
||||
|
||||
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
|
||||
|
||||
|
@ -3470,11 +3470,16 @@ typedef struct {
|
||||
or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
|
||||
|
||||
struct rcv_sli3 {
|
||||
uint32_t word8Rsvd;
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
uint16_t ox_id;
|
||||
uint16_t seq_cnt;
|
||||
|
||||
uint16_t vpi;
|
||||
uint16_t word9Rsvd;
|
||||
#else /* __LITTLE_ENDIAN */
|
||||
uint16_t seq_cnt;
|
||||
uint16_t ox_id;
|
||||
|
||||
uint16_t word9Rsvd;
|
||||
uint16_t vpi;
|
||||
#endif
|
||||
|
@ -170,15 +170,8 @@ struct lpfc_sli_intf {
|
||||
#define LPFC_PCI_FUNC3 3
|
||||
#define LPFC_PCI_FUNC4 4
|
||||
|
||||
/* SLI4 interface type-2 control register offsets */
|
||||
#define LPFC_CTL_PORT_SEM_OFFSET 0x400
|
||||
#define LPFC_CTL_PORT_STA_OFFSET 0x404
|
||||
#define LPFC_CTL_PORT_CTL_OFFSET 0x408
|
||||
#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
|
||||
#define LPFC_CTL_PORT_ER2_OFFSET 0x410
|
||||
/* SLI4 interface type-2 PDEV_CTL register */
|
||||
#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
|
||||
|
||||
/* Some SLI4 interface type-2 PDEV_CTL register bits */
|
||||
#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
|
||||
#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
|
||||
#define LPFC_CTL_PDEV_CTL_DD 0x00000004
|
||||
@ -337,6 +330,7 @@ struct lpfc_cqe {
|
||||
#define CQE_CODE_RELEASE_WQE 0x2
|
||||
#define CQE_CODE_RECEIVE 0x4
|
||||
#define CQE_CODE_XRI_ABORTED 0x5
|
||||
#define CQE_CODE_RECEIVE_V1 0x9
|
||||
|
||||
/* completion queue entry for wqe completions */
|
||||
struct lpfc_wcqe_complete {
|
||||
@ -440,7 +434,10 @@ struct lpfc_rcqe {
|
||||
#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
|
||||
#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
|
||||
#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
|
||||
uint32_t reserved1;
|
||||
uint32_t word1;
|
||||
#define lpfc_rcqe_fcf_id_v1_SHIFT 0
|
||||
#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
|
||||
#define lpfc_rcqe_fcf_id_v1_WORD word1
|
||||
uint32_t word2;
|
||||
#define lpfc_rcqe_length_SHIFT 16
|
||||
#define lpfc_rcqe_length_MASK 0x0000FFFF
|
||||
@ -451,6 +448,9 @@ struct lpfc_rcqe {
|
||||
#define lpfc_rcqe_fcf_id_SHIFT 0
|
||||
#define lpfc_rcqe_fcf_id_MASK 0x0000003F
|
||||
#define lpfc_rcqe_fcf_id_WORD word2
|
||||
#define lpfc_rcqe_rq_id_v1_SHIFT 0
|
||||
#define lpfc_rcqe_rq_id_v1_MASK 0x0000FFFF
|
||||
#define lpfc_rcqe_rq_id_v1_WORD word2
|
||||
uint32_t word3;
|
||||
#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
|
||||
#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
|
||||
@ -515,7 +515,7 @@ struct lpfc_register {
|
||||
/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
|
||||
#define LPFC_SLI_INTF 0x0058
|
||||
|
||||
#define LPFC_SLIPORT_IF2_SMPHR 0x0400
|
||||
#define LPFC_CTL_PORT_SEM_OFFSET 0x400
|
||||
#define lpfc_port_smphr_perr_SHIFT 31
|
||||
#define lpfc_port_smphr_perr_MASK 0x1
|
||||
#define lpfc_port_smphr_perr_WORD word0
|
||||
@ -575,7 +575,7 @@ struct lpfc_register {
|
||||
#define LPFC_POST_STAGE_PORT_READY 0xC000
|
||||
#define LPFC_POST_STAGE_PORT_UE 0xF000
|
||||
|
||||
#define LPFC_SLIPORT_STATUS 0x0404
|
||||
#define LPFC_CTL_PORT_STA_OFFSET 0x404
|
||||
#define lpfc_sliport_status_err_SHIFT 31
|
||||
#define lpfc_sliport_status_err_MASK 0x1
|
||||
#define lpfc_sliport_status_err_WORD word0
|
||||
@ -593,7 +593,7 @@ struct lpfc_register {
|
||||
#define lpfc_sliport_status_rdy_WORD word0
|
||||
#define MAX_IF_TYPE_2_RESETS 1000
|
||||
|
||||
#define LPFC_SLIPORT_CNTRL 0x0408
|
||||
#define LPFC_CTL_PORT_CTL_OFFSET 0x408
|
||||
#define lpfc_sliport_ctrl_end_SHIFT 30
|
||||
#define lpfc_sliport_ctrl_end_MASK 0x1
|
||||
#define lpfc_sliport_ctrl_end_WORD word0
|
||||
@ -604,8 +604,8 @@ struct lpfc_register {
|
||||
#define lpfc_sliport_ctrl_ip_WORD word0
|
||||
#define LPFC_SLIPORT_INIT_PORT 1
|
||||
|
||||
#define LPFC_SLIPORT_ERR_1 0x040C
|
||||
#define LPFC_SLIPORT_ERR_2 0x0410
|
||||
#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
|
||||
#define LPFC_CTL_PORT_ER2_OFFSET 0x410
|
||||
|
||||
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
|
||||
* reside in BAR 2.
|
||||
@ -3198,6 +3198,8 @@ struct lpfc_grp_hdr {
|
||||
#define lpfc_grp_hdr_id_MASK 0x000000FF
|
||||
#define lpfc_grp_hdr_id_WORD word2
|
||||
uint8_t rev_name[128];
|
||||
uint8_t date[12];
|
||||
uint8_t revision[32];
|
||||
};
|
||||
|
||||
#define FCP_COMMAND 0x0
|
||||
|
@ -2927,6 +2927,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
|
||||
sizeof fc_host_symbolic_name(shost));
|
||||
|
||||
fc_host_supported_speeds(shost) = 0;
|
||||
if (phba->lmt & LMT_16Gb)
|
||||
fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
|
||||
if (phba->lmt & LMT_10Gb)
|
||||
fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
|
||||
if (phba->lmt & LMT_8Gb)
|
||||
@ -3632,8 +3634,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
|
||||
lpfc_sli4_fcf_dead_failthrough(phba);
|
||||
} else {
|
||||
/* Reset FCF roundrobin bmask for new discovery */
|
||||
memset(phba->fcf.fcf_rr_bmask, 0,
|
||||
sizeof(*phba->fcf.fcf_rr_bmask));
|
||||
lpfc_sli4_clear_fcf_rr_bmask(phba);
|
||||
/*
|
||||
* Handling fast FCF failover to a DEAD FCF event is
|
||||
* considered equalivant to receiving CVL to all vports.
|
||||
@ -3647,7 +3648,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
|
||||
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
|
||||
|
||||
vport = lpfc_find_vport_by_vpid(phba,
|
||||
acqe_fip->index - phba->vpi_base);
|
||||
acqe_fip->index);
|
||||
ndlp = lpfc_sli4_perform_vport_cvl(vport);
|
||||
if (!ndlp)
|
||||
break;
|
||||
@ -3719,8 +3720,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
|
||||
* Reset FCF roundrobin bmask for new
|
||||
* discovery.
|
||||
*/
|
||||
memset(phba->fcf.fcf_rr_bmask, 0,
|
||||
sizeof(*phba->fcf.fcf_rr_bmask));
|
||||
lpfc_sli4_clear_fcf_rr_bmask(phba);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -4034,6 +4034,34 @@ lpfc_reset_hba(struct lpfc_hba *phba)
|
||||
lpfc_unblock_mgmt_io(phba);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This function enables the PCI SR-IOV virtual functions to a physical
|
||||
* function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
|
||||
* enable the number of virtual functions to the physical function. As
|
||||
* not all devices support SR-IOV, the return code from the pci_enable_sriov()
|
||||
* API call does not considered as an error condition for most of the device.
|
||||
**/
|
||||
uint16_t
|
||||
lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
|
||||
{
|
||||
struct pci_dev *pdev = phba->pcidev;
|
||||
uint16_t nr_virtfn;
|
||||
int pos;
|
||||
|
||||
if (!pdev->is_physfn)
|
||||
return 0;
|
||||
|
||||
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
|
||||
if (pos == 0)
|
||||
return 0;
|
||||
|
||||
pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
|
||||
return nr_virtfn;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@ -4049,8 +4077,17 @@ int
|
||||
lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
|
||||
{
|
||||
struct pci_dev *pdev = phba->pcidev;
|
||||
uint16_t max_nr_vfn;
|
||||
int rc;
|
||||
|
||||
max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
|
||||
if (nr_vfn > max_nr_vfn) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3057 Requested vfs (%d) greater than "
|
||||
"supported vfs (%d)", nr_vfn, max_nr_vfn);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = pci_enable_sriov(pdev, nr_vfn);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
@ -4516,7 +4553,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
|
||||
out_free_fcp_eq_hdl:
|
||||
kfree(phba->sli4_hba.fcp_eq_hdl);
|
||||
@ -4966,17 +5003,14 @@ out_free_mem:
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine is invoked to post rpi header templates to the
|
||||
* HBA consistent with the SLI-4 interface spec. This routine
|
||||
* port for those SLI4 ports that do not support extents. This routine
|
||||
* posts a PAGE_SIZE memory region to the port to hold up to
|
||||
* PAGE_SIZE modulo 64 rpi context headers.
|
||||
* No locks are held here because this is an initialization routine
|
||||
* called only from probe or lpfc_online when interrupts are not
|
||||
* enabled and the driver is reinitializing the device.
|
||||
* PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
|
||||
* and should be called only when interrupts are disabled.
|
||||
*
|
||||
* Return codes
|
||||
* 0 - successful
|
||||
* -ENOMEM - No available memory
|
||||
* -EIO - The mailbox failed to complete successfully.
|
||||
* -ERROR - otherwise.
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
|
||||
@ -5687,17 +5721,22 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
|
||||
break;
|
||||
case LPFC_SLI_INTF_IF_TYPE_2:
|
||||
phba->sli4_hba.u.if_type2.ERR1regaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1;
|
||||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PORT_ER1_OFFSET;
|
||||
phba->sli4_hba.u.if_type2.ERR2regaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2;
|
||||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PORT_ER2_OFFSET;
|
||||
phba->sli4_hba.u.if_type2.CTRLregaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL;
|
||||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PORT_CTL_OFFSET;
|
||||
phba->sli4_hba.u.if_type2.STATUSregaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS;
|
||||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PORT_STA_OFFSET;
|
||||
phba->sli4_hba.SLIINTFregaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
|
||||
phba->sli4_hba.PSMPHRregaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR;
|
||||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PORT_SEM_OFFSET;
|
||||
phba->sli4_hba.RQDBregaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
|
||||
phba->sli4_hba.WQDBregaddr =
|
||||
@ -8859,11 +8898,11 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
|
||||
return -EINVAL;
|
||||
}
|
||||
lpfc_decode_firmware_rev(phba, fwrev, 1);
|
||||
if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
|
||||
if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3023 Updating Firmware. Current Version:%s "
|
||||
"New Version:%s\n",
|
||||
fwrev, image->rev_name);
|
||||
fwrev, image->revision);
|
||||
for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
|
||||
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
|
||||
GFP_KERNEL);
|
||||
@ -8892,9 +8931,9 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
|
||||
fw->size - offset);
|
||||
break;
|
||||
}
|
||||
temp_offset += SLI4_PAGE_SIZE;
|
||||
memcpy(dmabuf->virt, fw->data + temp_offset,
|
||||
SLI4_PAGE_SIZE);
|
||||
temp_offset += SLI4_PAGE_SIZE;
|
||||
}
|
||||
rc = lpfc_wr_object(phba, &dma_buffer_list,
|
||||
(fw->size - offset), &offset);
|
||||
@ -9005,6 +9044,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&phba->active_rrq_list);
|
||||
INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
|
||||
|
||||
/* Set up common device driver resources */
|
||||
error = lpfc_setup_driver_resource_phase2(phba);
|
||||
@ -9112,7 +9152,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
|
||||
/* Check if there are static vports to be created. */
|
||||
lpfc_create_static_vport(phba);
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_intr:
|
||||
@ -9483,6 +9522,13 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
pci_restore_state(pdev);
|
||||
|
||||
/*
|
||||
* As the new kernel behavior of pci_restore_state() API call clears
|
||||
* device saved_state flag, need to save the restored state again.
|
||||
*/
|
||||
pci_save_state(pdev);
|
||||
|
||||
if (pdev->is_busmaster)
|
||||
pci_set_master(pdev);
|
||||
|
||||
|
@ -2031,7 +2031,7 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
|
||||
bf_set(lpfc_init_vfi_vp, init_vfi, 1);
|
||||
bf_set(lpfc_init_vfi_vfi, init_vfi,
|
||||
vport->phba->sli4_hba.vfi_ids[vport->vfi]);
|
||||
bf_set(lpfc_init_vpi_vpi, init_vfi,
|
||||
bf_set(lpfc_init_vfi_vpi, init_vfi,
|
||||
vport->phba->vpi_ids[vport->vpi]);
|
||||
bf_set(lpfc_init_vfi_fcfi, init_vfi,
|
||||
vport->phba->fcf.fcfi);
|
||||
|
@ -1302,13 +1302,13 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
case SCSI_PROT_NORMAL:
|
||||
default:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
|
||||
"9063 BLKGRD: Bad op/guard:%d/%d combination\n",
|
||||
scsi_get_prot_op(sc), guard_type);
|
||||
"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
|
||||
scsi_get_prot_op(sc));
|
||||
ret = 1;
|
||||
break;
|
||||
|
||||
}
|
||||
} else if (guard_type == SHOST_DIX_GUARD_CRC) {
|
||||
} else {
|
||||
switch (scsi_get_prot_op(sc)) {
|
||||
case SCSI_PROT_READ_STRIP:
|
||||
case SCSI_PROT_WRITE_INSERT:
|
||||
@ -1324,17 +1324,18 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
|
||||
case SCSI_PROT_READ_INSERT:
|
||||
case SCSI_PROT_WRITE_STRIP:
|
||||
*txop = BG_OP_IN_CRC_OUT_NODIF;
|
||||
*rxop = BG_OP_IN_NODIF_OUT_CRC;
|
||||
break;
|
||||
|
||||
case SCSI_PROT_NORMAL:
|
||||
default:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
|
||||
"9075 BLKGRD: Bad op/guard:%d/%d combination\n",
|
||||
scsi_get_prot_op(sc), guard_type);
|
||||
"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
|
||||
scsi_get_prot_op(sc));
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* unsupported format */
|
||||
BUG();
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1352,45 +1353,6 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
|
||||
return sc->device->sector_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
|
||||
* @sc: in: SCSI command
|
||||
* @apptagmask: out: app tag mask
|
||||
* @apptagval: out: app tag value
|
||||
* @reftag: out: ref tag (reference tag)
|
||||
*
|
||||
* Description:
|
||||
* Extract DIF parameters from the command if possible. Otherwise,
|
||||
* use default parameters.
|
||||
*
|
||||
**/
|
||||
static inline void
|
||||
lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
|
||||
uint16_t *apptagval, uint32_t *reftag)
|
||||
{
|
||||
struct scsi_dif_tuple *spt;
|
||||
unsigned char op = scsi_get_prot_op(sc);
|
||||
unsigned int protcnt = scsi_prot_sg_count(sc);
|
||||
static int cnt;
|
||||
|
||||
if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
|
||||
op == SCSI_PROT_WRITE_PASS)) {
|
||||
|
||||
cnt++;
|
||||
spt = page_address(sg_page(scsi_prot_sglist(sc))) +
|
||||
scsi_prot_sglist(sc)[0].offset;
|
||||
*apptagmask = 0;
|
||||
*apptagval = 0;
|
||||
*reftag = cpu_to_be32(spt->ref_tag);
|
||||
|
||||
} else {
|
||||
/* SBC defines ref tag to be lower 32bits of LBA */
|
||||
*reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
|
||||
*apptagmask = 0;
|
||||
*apptagval = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This function sets up buffer list for protection groups of
|
||||
* type LPFC_PG_TYPE_NO_DIF
|
||||
@ -1427,9 +1389,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
dma_addr_t physaddr;
|
||||
int i = 0, num_bde = 0, status;
|
||||
int datadir = sc->sc_data_direction;
|
||||
unsigned blksize;
|
||||
uint32_t reftag;
|
||||
uint16_t apptagmask, apptagval;
|
||||
unsigned blksize;
|
||||
uint8_t txop, rxop;
|
||||
|
||||
status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
|
||||
@ -1438,17 +1399,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
|
||||
/* extract some info from the scsi command for pde*/
|
||||
blksize = lpfc_cmd_blksize(sc);
|
||||
lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
|
||||
reftag = scsi_get_lba(sc) & 0xffffffff;
|
||||
|
||||
/* setup PDE5 with what we have */
|
||||
pde5 = (struct lpfc_pde5 *) bpl;
|
||||
memset(pde5, 0, sizeof(struct lpfc_pde5));
|
||||
bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
|
||||
pde5->reftag = reftag;
|
||||
|
||||
/* Endianness conversion if necessary for PDE5 */
|
||||
pde5->word0 = cpu_to_le32(pde5->word0);
|
||||
pde5->reftag = cpu_to_le32(pde5->reftag);
|
||||
pde5->reftag = cpu_to_le32(reftag);
|
||||
|
||||
/* advance bpl and increment bde count */
|
||||
num_bde++;
|
||||
@ -1463,10 +1423,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
if (datadir == DMA_FROM_DEVICE) {
|
||||
bf_set(pde6_ce, pde6, 1);
|
||||
bf_set(pde6_re, pde6, 1);
|
||||
bf_set(pde6_ae, pde6, 1);
|
||||
}
|
||||
bf_set(pde6_ai, pde6, 1);
|
||||
bf_set(pde6_apptagval, pde6, apptagval);
|
||||
bf_set(pde6_ae, pde6, 0);
|
||||
bf_set(pde6_apptagval, pde6, 0);
|
||||
|
||||
/* Endianness conversion if necessary for PDE6 */
|
||||
pde6->word0 = cpu_to_le32(pde6->word0);
|
||||
@ -1551,7 +1511,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
unsigned char pgdone = 0, alldone = 0;
|
||||
unsigned blksize;
|
||||
uint32_t reftag;
|
||||
uint16_t apptagmask, apptagval;
|
||||
uint8_t txop, rxop;
|
||||
int num_bde = 0;
|
||||
|
||||
@ -1571,7 +1530,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
|
||||
/* extract some info from the scsi command */
|
||||
blksize = lpfc_cmd_blksize(sc);
|
||||
lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
|
||||
reftag = scsi_get_lba(sc) & 0xffffffff;
|
||||
|
||||
split_offset = 0;
|
||||
do {
|
||||
@ -1579,11 +1538,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
pde5 = (struct lpfc_pde5 *) bpl;
|
||||
memset(pde5, 0, sizeof(struct lpfc_pde5));
|
||||
bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
|
||||
pde5->reftag = reftag;
|
||||
|
||||
/* Endianness conversion if necessary for PDE5 */
|
||||
pde5->word0 = cpu_to_le32(pde5->word0);
|
||||
pde5->reftag = cpu_to_le32(pde5->reftag);
|
||||
pde5->reftag = cpu_to_le32(reftag);
|
||||
|
||||
/* advance bpl and increment bde count */
|
||||
num_bde++;
|
||||
@ -1597,9 +1555,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
bf_set(pde6_oprx, pde6, rxop);
|
||||
bf_set(pde6_ce, pde6, 1);
|
||||
bf_set(pde6_re, pde6, 1);
|
||||
bf_set(pde6_ae, pde6, 1);
|
||||
bf_set(pde6_ai, pde6, 1);
|
||||
bf_set(pde6_apptagval, pde6, apptagval);
|
||||
bf_set(pde6_ae, pde6, 0);
|
||||
bf_set(pde6_apptagval, pde6, 0);
|
||||
|
||||
/* Endianness conversion if necessary for PDE6 */
|
||||
pde6->word0 = cpu_to_le32(pde6->word0);
|
||||
@ -1621,8 +1579,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
memset(pde7, 0, sizeof(struct lpfc_pde7));
|
||||
bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
|
||||
|
||||
pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
|
||||
pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
|
||||
pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
|
||||
pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
|
||||
|
||||
protgrp_blks = protgroup_len / 8;
|
||||
protgrp_bytes = protgrp_blks * blksize;
|
||||
@ -1632,7 +1590,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
||||
protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
|
||||
protgroup_offset += protgroup_remainder;
|
||||
protgrp_blks = protgroup_remainder / 8;
|
||||
protgrp_bytes = protgroup_remainder * blksize;
|
||||
protgrp_bytes = protgrp_blks * blksize;
|
||||
} else {
|
||||
protgroup_offset = 0;
|
||||
curr_prot++;
|
||||
@ -2006,16 +1964,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
|
||||
/*
|
||||
* setup sense data descriptor 0 per SPC-4 as an information
|
||||
* field, and put the failing LBA in it
|
||||
* field, and put the failing LBA in it.
|
||||
* This code assumes there was also a guard/app/ref tag error
|
||||
* indication.
|
||||
*/
|
||||
cmd->sense_buffer[8] = 0; /* Information */
|
||||
cmd->sense_buffer[9] = 0xa; /* Add. length */
|
||||
cmd->sense_buffer[7] = 0xc; /* Additional sense length */
|
||||
cmd->sense_buffer[8] = 0; /* Information descriptor type */
|
||||
cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
|
||||
cmd->sense_buffer[10] = 0x80; /* Validity bit */
|
||||
bghm /= cmd->device->sector_size;
|
||||
|
||||
failing_sector = scsi_get_lba(cmd);
|
||||
failing_sector += bghm;
|
||||
|
||||
put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
|
||||
/* Descriptor Information */
|
||||
put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
|
@ -560,7 +560,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
||||
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
|
||||
if (rrq) {
|
||||
rrq->send_rrq = send_rrq;
|
||||
rrq->xritag = phba->sli4_hba.xri_ids[xritag];
|
||||
rrq->xritag = xritag;
|
||||
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
|
||||
rrq->ndlp = ndlp;
|
||||
rrq->nlp_DID = ndlp->nlp_DID;
|
||||
@ -2452,7 +2452,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
|
||||
/* search continue save q for same XRI */
|
||||
list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
|
||||
if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
|
||||
if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
|
||||
saveq->iocb.unsli3.rcvsli3.ox_id) {
|
||||
list_add_tail(&saveq->list, &iocbq->list);
|
||||
found = 1;
|
||||
break;
|
||||
@ -3355,6 +3356,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
|
||||
irspiocbq);
|
||||
break;
|
||||
case CQE_CODE_RECEIVE:
|
||||
case CQE_CODE_RECEIVE_V1:
|
||||
dmabuf = container_of(cq_event, struct hbq_dmabuf,
|
||||
cq_event);
|
||||
lpfc_sli4_handle_received_buffer(phba, dmabuf);
|
||||
@ -4712,10 +4714,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
|
||||
* lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @type: The resource extent type.
|
||||
* @extnt_count: buffer to hold port available extent count.
|
||||
* @extnt_size: buffer to hold element count per extent.
|
||||
*
|
||||
* This function allocates all SLI4 resource identifiers.
|
||||
* This function calls the port and retrievs the number of available
|
||||
* extents and their size for a particular extent type.
|
||||
*
|
||||
* Returns: 0 if successful. Nonzero otherwise.
|
||||
**/
|
||||
static int
|
||||
int
|
||||
lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
|
||||
uint16_t *extnt_count, uint16_t *extnt_size)
|
||||
{
|
||||
@ -4892,7 +4899,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
|
||||
req_len, *emb);
|
||||
if (alloc_len < req_len) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"9000 Allocated DMA memory size (x%x) is "
|
||||
"2982 Allocated DMA memory size (x%x) is "
|
||||
"less than the requested DMA memory "
|
||||
"size (x%x)\n", alloc_len, req_len);
|
||||
return -ENOMEM;
|
||||
@ -5505,6 +5512,154 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @type: The resource extent type.
|
||||
* @extnt_count: buffer to hold port extent count response
|
||||
* @extnt_size: buffer to hold port extent size response.
|
||||
*
|
||||
* This function calls the port to read the host allocated extents
|
||||
* for a particular type.
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
|
||||
uint16_t *extnt_cnt, uint16_t *extnt_size)
|
||||
{
|
||||
bool emb;
|
||||
int rc = 0;
|
||||
uint16_t curr_blks = 0;
|
||||
uint32_t req_len, emb_len;
|
||||
uint32_t alloc_len, mbox_tmo;
|
||||
struct list_head *blk_list_head;
|
||||
struct lpfc_rsrc_blks *rsrc_blk;
|
||||
LPFC_MBOXQ_t *mbox;
|
||||
void *virtaddr = NULL;
|
||||
struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
|
||||
struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
|
||||
switch (type) {
|
||||
case LPFC_RSC_TYPE_FCOE_VPI:
|
||||
blk_list_head = &phba->lpfc_vpi_blk_list;
|
||||
break;
|
||||
case LPFC_RSC_TYPE_FCOE_XRI:
|
||||
blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
|
||||
break;
|
||||
case LPFC_RSC_TYPE_FCOE_VFI:
|
||||
blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
|
||||
break;
|
||||
case LPFC_RSC_TYPE_FCOE_RPI:
|
||||
blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
|
||||
break;
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Count the number of extents currently allocatd for this type. */
|
||||
list_for_each_entry(rsrc_blk, blk_list_head, list) {
|
||||
if (curr_blks == 0) {
|
||||
/*
|
||||
* The GET_ALLOCATED mailbox does not return the size,
|
||||
* just the count. The size should be just the size
|
||||
* stored in the current allocated block and all sizes
|
||||
* for an extent type are the same so set the return
|
||||
* value now.
|
||||
*/
|
||||
*extnt_size = rsrc_blk->rsrc_size;
|
||||
}
|
||||
curr_blks++;
|
||||
}
|
||||
|
||||
/* Calculate the total requested length of the dma memory. */
|
||||
req_len = curr_blks * sizeof(uint16_t);
|
||||
|
||||
/*
|
||||
* Calculate the size of an embedded mailbox. The uint32_t
|
||||
* accounts for extents-specific word.
|
||||
*/
|
||||
emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
|
||||
sizeof(uint32_t);
|
||||
|
||||
/*
|
||||
* Presume the allocation and response will fit into an embedded
|
||||
* mailbox. If not true, reconfigure to a non-embedded mailbox.
|
||||
*/
|
||||
emb = LPFC_SLI4_MBX_EMBED;
|
||||
req_len = emb_len;
|
||||
if (req_len > emb_len) {
|
||||
req_len = curr_blks * sizeof(uint16_t) +
|
||||
sizeof(union lpfc_sli4_cfg_shdr) +
|
||||
sizeof(uint32_t);
|
||||
emb = LPFC_SLI4_MBX_NEMBED;
|
||||
}
|
||||
|
||||
mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mbox)
|
||||
return -ENOMEM;
|
||||
memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
|
||||
|
||||
alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
|
||||
LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
|
||||
req_len, emb);
|
||||
if (alloc_len < req_len) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2983 Allocated DMA memory size (x%x) is "
|
||||
"less than the requested DMA memory "
|
||||
"size (x%x)\n", alloc_len, req_len);
|
||||
rc = -ENOMEM;
|
||||
goto err_exit;
|
||||
}
|
||||
rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
|
||||
if (unlikely(rc)) {
|
||||
rc = -EIO;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
if (!phba->sli4_hba.intr_enable)
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
||||
else {
|
||||
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
|
||||
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
|
||||
}
|
||||
|
||||
if (unlikely(rc)) {
|
||||
rc = -EIO;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out where the response is located. Then get local pointers
|
||||
* to the response data. The port does not guarantee to respond to
|
||||
* all extents counts request so update the local variable with the
|
||||
* allocated count from the port.
|
||||
*/
|
||||
if (emb == LPFC_SLI4_MBX_EMBED) {
|
||||
rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
|
||||
shdr = &rsrc_ext->header.cfg_shdr;
|
||||
*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
|
||||
} else {
|
||||
virtaddr = mbox->sge_array->addr[0];
|
||||
n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
|
||||
shdr = &n_rsrc->cfg_shdr;
|
||||
*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
|
||||
}
|
||||
|
||||
if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
|
||||
"2984 Failed to read allocated resources "
|
||||
"for type %d - Status 0x%x Add'l Status 0x%x.\n",
|
||||
type,
|
||||
bf_get(lpfc_mbox_hdr_status, &shdr->response),
|
||||
bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
|
||||
rc = -EIO;
|
||||
goto err_exit;
|
||||
}
|
||||
err_exit:
|
||||
lpfc_sli4_mbox_cmd_free(phba, mbox);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
|
||||
* @phba: Pointer to HBA context object.
|
||||
@ -5837,6 +5992,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
"Advanced Error Reporting (AER)\n");
|
||||
phba->cfg_aer_support = 0;
|
||||
}
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
|
||||
@ -6634,6 +6790,9 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
|
||||
unsigned long iflags;
|
||||
int rc;
|
||||
|
||||
/* dump from issue mailbox command if setup */
|
||||
lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
|
||||
|
||||
rc = lpfc_mbox_dev_check(phba);
|
||||
if (unlikely(rc)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
||||
@ -7318,12 +7477,12 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
|
||||
bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
|
||||
bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
|
||||
break;
|
||||
break;
|
||||
case CMD_XMIT_SEQUENCE64_CX:
|
||||
bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
|
||||
iocbq->iocb.un.ulpWord[3]);
|
||||
bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
|
||||
iocbq->iocb.ulpContext);
|
||||
iocbq->iocb.unsli3.rcvsli3.ox_id);
|
||||
/* The entire sequence is transmitted for this IOCB */
|
||||
xmit_len = total_len;
|
||||
cmnd = CMD_XMIT_SEQUENCE64_CR;
|
||||
@ -7341,7 +7500,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
|
||||
wqe->xmit_sequence.xmit_len = xmit_len;
|
||||
command_type = OTHER_COMMAND;
|
||||
break;
|
||||
break;
|
||||
case CMD_XMIT_BCAST64_CN:
|
||||
/* word3 iocb=iotag32 wqe=seq_payload_len */
|
||||
wqe->xmit_bcast64.seq_payload_len = xmit_len;
|
||||
@ -7355,7 +7514,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
|
||||
LPFC_WQE_LENLOC_WORD3);
|
||||
bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
|
||||
break;
|
||||
break;
|
||||
case CMD_FCP_IWRITE64_CR:
|
||||
command_type = FCP_COMMAND_DATA_OUT;
|
||||
/* word3 iocb=iotag wqe=payload_offset_len */
|
||||
@ -7375,7 +7534,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
LPFC_WQE_LENLOC_WORD4);
|
||||
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
|
||||
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
|
||||
break;
|
||||
break;
|
||||
case CMD_FCP_IREAD64_CR:
|
||||
/* word3 iocb=iotag wqe=payload_offset_len */
|
||||
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
|
||||
@ -7394,7 +7553,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
LPFC_WQE_LENLOC_WORD4);
|
||||
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
|
||||
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
|
||||
break;
|
||||
break;
|
||||
case CMD_FCP_ICMND64_CR:
|
||||
/* word3 iocb=IO_TAG wqe=reserved */
|
||||
wqe->fcp_icmd.rsrvd3 = 0;
|
||||
@ -7407,7 +7566,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
|
||||
LPFC_WQE_LENLOC_NONE);
|
||||
bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
|
||||
break;
|
||||
break;
|
||||
case CMD_GEN_REQUEST64_CR:
|
||||
/* For this command calculate the xmit length of the
|
||||
* request bde.
|
||||
@ -7442,7 +7601,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
|
||||
bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
|
||||
command_type = OTHER_COMMAND;
|
||||
break;
|
||||
break;
|
||||
case CMD_XMIT_ELS_RSP64_CX:
|
||||
ndlp = (struct lpfc_nodelist *)iocbq->context1;
|
||||
/* words0-2 BDE memcpy */
|
||||
@ -7457,7 +7616,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
|
||||
bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
|
||||
bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
|
||||
iocbq->iocb.ulpContext);
|
||||
iocbq->iocb.unsli3.rcvsli3.ox_id);
|
||||
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
|
||||
bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
|
||||
phba->vpi_ids[iocbq->vport->vpi]);
|
||||
@ -7470,7 +7629,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
|
||||
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
|
||||
command_type = OTHER_COMMAND;
|
||||
break;
|
||||
break;
|
||||
case CMD_CLOSE_XRI_CN:
|
||||
case CMD_ABORT_XRI_CN:
|
||||
case CMD_ABORT_XRI_CX:
|
||||
@ -7509,7 +7668,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
cmnd = CMD_ABORT_XRI_CX;
|
||||
command_type = OTHER_COMMAND;
|
||||
xritag = 0;
|
||||
break;
|
||||
break;
|
||||
case CMD_XMIT_BLS_RSP64_CX:
|
||||
/* As BLS ABTS RSP WQE is very different from other WQEs,
|
||||
* we re-construct this WQE here based on information in
|
||||
@ -7553,7 +7712,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
|
||||
}
|
||||
|
||||
break;
|
||||
break;
|
||||
case CMD_XRI_ABORTED_CX:
|
||||
case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
|
||||
case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
|
||||
@ -7565,7 +7724,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
"2014 Invalid command 0x%x\n",
|
||||
iocbq->iocb.ulpCommand);
|
||||
return IOCB_ERROR;
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
|
||||
@ -10481,10 +10640,14 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
|
||||
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
|
||||
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
|
||||
struct hbq_dmabuf *dma_buf;
|
||||
uint32_t status;
|
||||
uint32_t status, rq_id;
|
||||
unsigned long iflags;
|
||||
|
||||
if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
|
||||
if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
|
||||
rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
|
||||
else
|
||||
rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
|
||||
if (rq_id != hrq->queue_id)
|
||||
goto out;
|
||||
|
||||
status = bf_get(lpfc_rcqe_status, rcqe);
|
||||
@ -10563,6 +10726,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
(struct sli4_wcqe_xri_aborted *)&cqevt);
|
||||
break;
|
||||
case CQE_CODE_RECEIVE:
|
||||
case CQE_CODE_RECEIVE_V1:
|
||||
/* Process the RQ event */
|
||||
phba->last_completion_time = jiffies;
|
||||
workposted = lpfc_sli4_sp_handle_rcqe(phba,
|
||||
@ -12345,19 +12509,18 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
|
||||
* lpfc_sli4_alloc_xri - Get an available rpi in the device's range
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine is invoked to post rpi header templates to the
|
||||
* port for those SLI4 ports that do not support extents. This routine
|
||||
* posts a PAGE_SIZE memory region to the port to hold up to
|
||||
* PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
|
||||
* and should be called only when interrupts are disabled.
|
||||
* HBA consistent with the SLI-4 interface spec. This routine
|
||||
* posts a SLI4_PAGE_SIZE memory region to the port to hold up to
|
||||
* SLI4_PAGE_SIZE modulo 64 rpi context headers.
|
||||
*
|
||||
* Return codes
|
||||
* 0 - successful
|
||||
* -ERROR - otherwise.
|
||||
*/
|
||||
* Returns
|
||||
* A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
|
||||
* LPFC_RPI_ALLOC_ERROR if no rpis are available.
|
||||
**/
|
||||
uint16_t
|
||||
lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
|
||||
{
|
||||
@ -13406,7 +13569,7 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
|
||||
* This function validates the xri maps to the known range of XRIs allocated an
|
||||
* used by the driver.
|
||||
**/
|
||||
static uint16_t
|
||||
uint16_t
|
||||
lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
|
||||
uint16_t xri)
|
||||
{
|
||||
@ -13643,10 +13806,12 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
|
||||
static struct lpfc_iocbq *
|
||||
lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||
{
|
||||
struct hbq_dmabuf *hbq_buf;
|
||||
struct lpfc_dmabuf *d_buf, *n_buf;
|
||||
struct lpfc_iocbq *first_iocbq, *iocbq;
|
||||
struct fc_frame_header *fc_hdr;
|
||||
uint32_t sid;
|
||||
uint32_t len, tot_len;
|
||||
struct ulp_bde64 *pbde;
|
||||
|
||||
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
|
||||
@ -13655,6 +13820,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||
lpfc_update_rcv_time_stamp(vport);
|
||||
/* get the Remote Port's SID */
|
||||
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
||||
tot_len = 0;
|
||||
/* Get an iocbq struct to fill in. */
|
||||
first_iocbq = lpfc_sli_get_iocbq(vport->phba);
|
||||
if (first_iocbq) {
|
||||
@ -13662,9 +13828,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||
first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
|
||||
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
|
||||
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
|
||||
first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
|
||||
/* iocbq is prepped for internal consumption. Logical vpi. */
|
||||
first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
|
||||
first_iocbq->iocb.ulpContext = NO_XRI;
|
||||
first_iocbq->iocb.unsli3.rcvsli3.ox_id =
|
||||
be16_to_cpu(fc_hdr->fh_ox_id);
|
||||
/* iocbq is prepped for internal consumption. Physical vpi. */
|
||||
first_iocbq->iocb.unsli3.rcvsli3.vpi =
|
||||
vport->phba->vpi_ids[vport->vpi];
|
||||
/* put the first buffer into the first IOCBq */
|
||||
first_iocbq->context2 = &seq_dmabuf->dbuf;
|
||||
first_iocbq->context3 = NULL;
|
||||
@ -13672,9 +13841,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||
first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
|
||||
LPFC_DATA_BUF_SIZE;
|
||||
first_iocbq->iocb.un.rcvels.remoteID = sid;
|
||||
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
|
||||
bf_get(lpfc_rcqe_length,
|
||||
tot_len = bf_get(lpfc_rcqe_length,
|
||||
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||
first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
|
||||
}
|
||||
iocbq = first_iocbq;
|
||||
/*
|
||||
@ -13692,9 +13861,13 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||
pbde = (struct ulp_bde64 *)
|
||||
&iocbq->iocb.unsli3.sli3Words[4];
|
||||
pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
|
||||
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
|
||||
bf_get(lpfc_rcqe_length,
|
||||
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||
|
||||
/* We need to get the size out of the right CQE */
|
||||
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
|
||||
len = bf_get(lpfc_rcqe_length,
|
||||
&hbq_buf->cq_event.cqe.rcqe_cmpl);
|
||||
iocbq->iocb.unsli3.rcvsli3.acc_len += len;
|
||||
tot_len += len;
|
||||
} else {
|
||||
iocbq = lpfc_sli_get_iocbq(vport->phba);
|
||||
if (!iocbq) {
|
||||
@ -13712,9 +13885,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
|
||||
iocbq->iocb.ulpBdeCount = 1;
|
||||
iocbq->iocb.un.cont64[0].tus.f.bdeSize =
|
||||
LPFC_DATA_BUF_SIZE;
|
||||
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
|
||||
bf_get(lpfc_rcqe_length,
|
||||
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||
|
||||
/* We need to get the size out of the right CQE */
|
||||
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
|
||||
len = bf_get(lpfc_rcqe_length,
|
||||
&hbq_buf->cq_event.cqe.rcqe_cmpl);
|
||||
tot_len += len;
|
||||
iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
|
||||
|
||||
iocbq->iocb.un.rcvels.remoteID = sid;
|
||||
list_add_tail(&iocbq->list, &first_iocbq->list);
|
||||
}
|
||||
@ -13787,7 +13965,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
|
||||
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
||||
return;
|
||||
}
|
||||
fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||
if ((bf_get(lpfc_cqe_code,
|
||||
&dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
|
||||
fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
|
||||
&dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||
else
|
||||
fcfi = bf_get(lpfc_rcqe_fcf_id,
|
||||
&dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||
vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
|
||||
if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
|
||||
/* throw out the frame */
|
||||
@ -14450,6 +14634,92 @@ fail_fcf_read:
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_check_next_fcf_pri
|
||||
* phba pointer to the lpfc_hba struct for this port.
|
||||
* This routine is called from the lpfc_sli4_fcf_rr_next_index_get
|
||||
* routine when the rr_bmask is empty. The FCF indecies are put into the
|
||||
* rr_bmask based on their priority level. Starting from the highest priority
|
||||
* to the lowest. The most likely FCF candidate will be in the highest
|
||||
* priority group. When this routine is called it searches the fcf_pri list for
|
||||
* next lowest priority group and repopulates the rr_bmask with only those
|
||||
* fcf_indexes.
|
||||
* returns:
|
||||
* 1=success 0=failure
|
||||
**/
|
||||
int
|
||||
lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
|
||||
{
|
||||
uint16_t next_fcf_pri;
|
||||
uint16_t last_index;
|
||||
struct lpfc_fcf_pri *fcf_pri;
|
||||
int rc;
|
||||
int ret = 0;
|
||||
|
||||
last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
|
||||
LPFC_SLI4_FCF_TBL_INDX_MAX);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"3060 Last IDX %d\n", last_index);
|
||||
if (list_empty(&phba->fcf.fcf_pri_list)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
|
||||
"3061 Last IDX %d\n", last_index);
|
||||
return 0; /* Empty rr list */
|
||||
}
|
||||
next_fcf_pri = 0;
|
||||
/*
|
||||
* Clear the rr_bmask and set all of the bits that are at this
|
||||
* priority.
|
||||
*/
|
||||
memset(phba->fcf.fcf_rr_bmask, 0,
|
||||
sizeof(*phba->fcf.fcf_rr_bmask));
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
|
||||
if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
|
||||
continue;
|
||||
/*
|
||||
* the 1st priority that has not FLOGI failed
|
||||
* will be the highest.
|
||||
*/
|
||||
if (!next_fcf_pri)
|
||||
next_fcf_pri = fcf_pri->fcf_rec.priority;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
|
||||
rc = lpfc_sli4_fcf_rr_index_set(phba,
|
||||
fcf_pri->fcf_rec.fcf_index);
|
||||
if (rc)
|
||||
return 0;
|
||||
}
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
}
|
||||
/*
|
||||
* if next_fcf_pri was not set above and the list is not empty then
|
||||
* we have failed flogis on all of them. So reset flogi failed
|
||||
* and start at the begining.
|
||||
*/
|
||||
if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
|
||||
list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
|
||||
fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
|
||||
/*
|
||||
* the 1st priority that has not FLOGI failed
|
||||
* will be the highest.
|
||||
*/
|
||||
if (!next_fcf_pri)
|
||||
next_fcf_pri = fcf_pri->fcf_rec.priority;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
|
||||
rc = lpfc_sli4_fcf_rr_index_set(phba,
|
||||
fcf_pri->fcf_rec.fcf_index);
|
||||
if (rc)
|
||||
return 0;
|
||||
}
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
}
|
||||
} else
|
||||
ret = 1;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
/**
|
||||
* lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@ -14466,6 +14736,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
|
||||
uint16_t next_fcf_index;
|
||||
|
||||
/* Search start from next bit of currently registered FCF index */
|
||||
next_priority:
|
||||
next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
|
||||
LPFC_SLI4_FCF_TBL_INDX_MAX;
|
||||
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
|
||||
@ -14473,17 +14744,46 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
|
||||
next_fcf_index);
|
||||
|
||||
/* Wrap around condition on phba->fcf.fcf_rr_bmask */
|
||||
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
|
||||
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
|
||||
/*
|
||||
* If we have wrapped then we need to clear the bits that
|
||||
* have been tested so that we can detect when we should
|
||||
* change the priority level.
|
||||
*/
|
||||
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
|
||||
LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
|
||||
}
|
||||
|
||||
|
||||
/* Check roundrobin failover list empty condition */
|
||||
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
|
||||
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
|
||||
next_fcf_index == phba->fcf.current_rec.fcf_indx) {
|
||||
/*
|
||||
* If next fcf index is not found check if there are lower
|
||||
* Priority level fcf's in the fcf_priority list.
|
||||
* Set up the rr_bmask with all of the avaiable fcf bits
|
||||
* at that level and continue the selection process.
|
||||
*/
|
||||
if (lpfc_check_next_fcf_pri_level(phba))
|
||||
goto next_priority;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
|
||||
"2844 No roundrobin failover FCF available\n");
|
||||
return LPFC_FCOE_FCF_NEXT_NONE;
|
||||
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
|
||||
return LPFC_FCOE_FCF_NEXT_NONE;
|
||||
else {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
|
||||
"3063 Only FCF available idx %d, flag %x\n",
|
||||
next_fcf_index,
|
||||
phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
|
||||
return next_fcf_index;
|
||||
}
|
||||
}
|
||||
|
||||
if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
|
||||
phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
|
||||
LPFC_FCF_FLOGI_FAILED)
|
||||
goto next_priority;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2845 Get next roundrobin failover FCF (x%x)\n",
|
||||
next_fcf_index);
|
||||
@ -14535,6 +14835,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
void
|
||||
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
{
|
||||
struct lpfc_fcf_pri *fcf_pri;
|
||||
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
|
||||
"2762 FCF (x%x) reached driver's book "
|
||||
@ -14543,6 +14844,14 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
return;
|
||||
}
|
||||
/* Clear the eligible FCF record index bmask */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
|
||||
if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
|
||||
list_del_init(&fcf_pri->list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
|
@ -81,6 +81,8 @@
|
||||
(fc_hdr)->fh_f_ctl[1] << 8 | \
|
||||
(fc_hdr)->fh_f_ctl[2])
|
||||
|
||||
#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
|
||||
|
||||
enum lpfc_sli4_queue_type {
|
||||
LPFC_EQ,
|
||||
LPFC_GCQ,
|
||||
@ -157,6 +159,25 @@ struct lpfc_fcf_rec {
|
||||
#define RECORD_VALID 0x02
|
||||
};
|
||||
|
||||
struct lpfc_fcf_pri_rec {
|
||||
uint16_t fcf_index;
|
||||
#define LPFC_FCF_ON_PRI_LIST 0x0001
|
||||
#define LPFC_FCF_FLOGI_FAILED 0x0002
|
||||
uint16_t flag;
|
||||
uint32_t priority;
|
||||
};
|
||||
|
||||
struct lpfc_fcf_pri {
|
||||
struct list_head list;
|
||||
struct lpfc_fcf_pri_rec fcf_rec;
|
||||
};
|
||||
|
||||
/*
|
||||
* Maximum FCF table index, it is for driver internal book keeping, it
|
||||
* just needs to be no less than the supported HBA's FCF table size.
|
||||
*/
|
||||
#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
|
||||
|
||||
struct lpfc_fcf {
|
||||
uint16_t fcfi;
|
||||
uint32_t fcf_flag;
|
||||
@ -176,15 +197,13 @@ struct lpfc_fcf {
|
||||
uint32_t eligible_fcf_cnt;
|
||||
struct lpfc_fcf_rec current_rec;
|
||||
struct lpfc_fcf_rec failover_rec;
|
||||
struct list_head fcf_pri_list;
|
||||
struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
|
||||
uint32_t current_fcf_scan_pri;
|
||||
struct timer_list redisc_wait;
|
||||
unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
|
||||
};
|
||||
|
||||
/*
|
||||
* Maximum FCF table index, it is for driver internal book keeping, it
|
||||
* just needs to be no less than the supported HBA's FCF table size.
|
||||
*/
|
||||
#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
|
||||
|
||||
#define LPFC_REGION23_SIGNATURE "RG23"
|
||||
#define LPFC_REGION23_VERSION 1
|
||||
|
@ -18,7 +18,7 @@
|
||||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "8.3.23"
|
||||
#define LPFC_DRIVER_VERSION "8.3.25"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
|
||||
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
|
||||
|
@ -33,9 +33,9 @@
|
||||
/*
|
||||
* MegaRAID SAS Driver meta data
|
||||
*/
|
||||
#define MEGASAS_VERSION "00.00.05.38-rc1"
|
||||
#define MEGASAS_RELDATE "May. 11, 2011"
|
||||
#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011"
|
||||
#define MEGASAS_VERSION "00.00.05.40-rc1"
|
||||
#define MEGASAS_RELDATE "Jul. 26, 2011"
|
||||
#define MEGASAS_EXT_VERSION "Tue. Jul. 26 17:00:00 PDT 2011"
|
||||
|
||||
/*
|
||||
* Device IDs
|
||||
|
@ -18,7 +18,7 @@
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
* FILE: megaraid_sas_base.c
|
||||
* Version : v00.00.05.38-rc1
|
||||
* Version : v00.00.05.40-rc1
|
||||
*
|
||||
* Authors: LSI Corporation
|
||||
* Sreenivas Bagalkote
|
||||
@ -54,6 +54,7 @@
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include "megaraid_sas_fusion.h"
|
||||
#include "megaraid_sas.h"
|
||||
|
||||
@ -2057,6 +2058,20 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
|
||||
}
|
||||
}
|
||||
|
||||
static int megasas_change_queue_depth(struct scsi_device *sdev,
|
||||
int queue_depth, int reason)
|
||||
{
|
||||
if (reason != SCSI_QDEPTH_DEFAULT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (queue_depth > sdev->host->can_queue)
|
||||
queue_depth = sdev->host->can_queue;
|
||||
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
|
||||
queue_depth);
|
||||
|
||||
return queue_depth;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scsi host template for megaraid_sas driver
|
||||
*/
|
||||
@ -2074,6 +2089,7 @@ static struct scsi_host_template megasas_template = {
|
||||
.eh_timed_out = megasas_reset_timer,
|
||||
.bios_param = megasas_bios_param,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.change_queue_depth = megasas_change_queue_depth,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -288,7 +288,6 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
|
||||
/* Get dev handle from Pd */
|
||||
*pDevHandle = MR_PdDevHandleGet(pd, map);
|
||||
}
|
||||
retval = FALSE;
|
||||
}
|
||||
|
||||
*pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
|
||||
|
@ -4258,6 +4258,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
|
||||
u32 log_info;
|
||||
struct MPT2SAS_DEVICE *sas_device_priv_data;
|
||||
u32 response_code = 0;
|
||||
unsigned long flags;
|
||||
|
||||
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
|
||||
scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
|
||||
@ -4282,6 +4283,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
|
||||
* the failed direct I/O should be redirected to volume
|
||||
*/
|
||||
if (_scsih_scsi_direct_io_get(ioc, smid)) {
|
||||
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
|
||||
ioc->scsi_lookup[smid - 1].scmd = scmd;
|
||||
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
||||
_scsih_scsi_direct_io_set(ioc, smid, 0);
|
||||
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
|
||||
mpi_request->DevHandle =
|
||||
|
@ -3,7 +3,7 @@
|
||||
#
|
||||
# Copyright 2007 Red Hat, Inc.
|
||||
# Copyright 2008 Marvell. <kewei@marvell.com>
|
||||
# Copyright 2009-20011 Marvell. <yuxiangl@marvell.com>
|
||||
# Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
|
||||
#
|
||||
# This file is licensed under GPLv2.
|
||||
#
|
||||
@ -41,3 +41,10 @@ config SCSI_MVSAS_DEBUG
|
||||
help
|
||||
Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
|
||||
the driver prints some messages to the console.
|
||||
config SCSI_MVSAS_TASKLET
|
||||
bool "Support for interrupt tasklet"
|
||||
default n
|
||||
depends on SCSI_MVSAS
|
||||
help
|
||||
Compiles the 88SE64xx/88SE94xx driver in interrupt tasklet mode.In this mode,
|
||||
the interrupt will schedule a tasklet.
|
||||
|
@ -33,7 +33,6 @@ static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
|
||||
u32 reg;
|
||||
struct mvs_phy *phy = &mvi->phy[i];
|
||||
|
||||
/* TODO check & save device type */
|
||||
reg = mr32(MVS_GBL_PORT_TYPE);
|
||||
phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
|
||||
if (reg & MODE_SAS_SATA & (1 << i))
|
||||
@ -48,7 +47,7 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
|
||||
u32 tmp;
|
||||
|
||||
tmp = mr32(MVS_PCS);
|
||||
if (mvi->chip->n_phy <= 4)
|
||||
if (mvi->chip->n_phy <= MVS_SOC_PORTS)
|
||||
tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
|
||||
else
|
||||
tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
|
||||
@ -58,24 +57,16 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
|
||||
static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
|
||||
{
|
||||
void __iomem *regs = mvi->regs;
|
||||
int i;
|
||||
|
||||
mvs_phy_hacks(mvi);
|
||||
|
||||
if (!(mvi->flags & MVF_FLAG_SOC)) {
|
||||
/* TEST - for phy decoding error, adjust voltage levels */
|
||||
mw32(MVS_P0_VSR_ADDR + 0, 0x8);
|
||||
mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
|
||||
|
||||
mw32(MVS_P0_VSR_ADDR + 8, 0x8);
|
||||
mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
|
||||
|
||||
mw32(MVS_P0_VSR_ADDR + 16, 0x8);
|
||||
mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
|
||||
|
||||
mw32(MVS_P0_VSR_ADDR + 24, 0x8);
|
||||
mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
|
||||
for (i = 0; i < MVS_SOC_PORTS; i++) {
|
||||
mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8);
|
||||
mvs_write_port_vsr_data(mvi, i, 0x2F0);
|
||||
}
|
||||
} else {
|
||||
int i;
|
||||
/* disable auto port detection */
|
||||
mw32(MVS_GBL_PORT_TYPE, 0);
|
||||
for (i = 0; i < mvi->chip->n_phy; i++) {
|
||||
@ -95,7 +86,7 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
|
||||
u32 reg, tmp;
|
||||
|
||||
if (!(mvi->flags & MVF_FLAG_SOC)) {
|
||||
if (phy_id < 4)
|
||||
if (phy_id < MVS_SOC_PORTS)
|
||||
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, ®);
|
||||
else
|
||||
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, ®);
|
||||
@ -104,13 +95,13 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
|
||||
reg = mr32(MVS_PHY_CTL);
|
||||
|
||||
tmp = reg;
|
||||
if (phy_id < 4)
|
||||
if (phy_id < MVS_SOC_PORTS)
|
||||
tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
|
||||
else
|
||||
tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
|
||||
tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS;
|
||||
|
||||
if (!(mvi->flags & MVF_FLAG_SOC)) {
|
||||
if (phy_id < 4) {
|
||||
if (phy_id < MVS_SOC_PORTS) {
|
||||
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
|
||||
mdelay(10);
|
||||
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
|
||||
@ -133,9 +124,9 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
|
||||
tmp &= ~PHYEV_RDY_CH;
|
||||
mvs_write_port_irq_stat(mvi, phy_id, tmp);
|
||||
tmp = mvs_read_phy_ctl(mvi, phy_id);
|
||||
if (hard == 1)
|
||||
if (hard == MVS_HARD_RESET)
|
||||
tmp |= PHY_RST_HARD;
|
||||
else if (hard == 0)
|
||||
else if (hard == MVS_SOFT_RESET)
|
||||
tmp |= PHY_RST;
|
||||
mvs_write_phy_ctl(mvi, phy_id, tmp);
|
||||
if (hard) {
|
||||
@ -321,6 +312,11 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
|
||||
/* init phys */
|
||||
mvs_64xx_phy_hacks(mvi);
|
||||
|
||||
tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
|
||||
tmp &= 0x0000ffff;
|
||||
tmp |= 0x00fa0000;
|
||||
mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
|
||||
|
||||
/* enable auto port detection */
|
||||
mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
|
||||
|
||||
@ -346,7 +342,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
|
||||
|
||||
mvs_64xx_enable_xmt(mvi, i);
|
||||
|
||||
mvs_64xx_phy_reset(mvi, i, 1);
|
||||
mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET);
|
||||
msleep(500);
|
||||
mvs_64xx_detect_porttype(mvi, i);
|
||||
}
|
||||
@ -377,13 +373,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
|
||||
mvs_update_phyinfo(mvi, i, 1);
|
||||
}
|
||||
|
||||
/* FIXME: update wide port bitmaps */
|
||||
|
||||
/* little endian for open address and command table, etc. */
|
||||
/*
|
||||
* it seems that ( from the spec ) turning on big-endian won't
|
||||
* do us any good on big-endian machines, need further confirmation
|
||||
*/
|
||||
cctl = mr32(MVS_CTL);
|
||||
cctl |= CCTL_ENDIAN_CMD;
|
||||
cctl |= CCTL_ENDIAN_DATA;
|
||||
@ -394,15 +384,19 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
|
||||
/* reset CMD queue */
|
||||
tmp = mr32(MVS_PCS);
|
||||
tmp |= PCS_CMD_RST;
|
||||
tmp &= ~PCS_SELF_CLEAR;
|
||||
mw32(MVS_PCS, tmp);
|
||||
/* interrupt coalescing may cause missing HW interrput in some case,
|
||||
* and the max count is 0x1ff, while our max slot is 0x200,
|
||||
/*
|
||||
* the max count is 0x1ff, while our max slot is 0x200,
|
||||
* it will make count 0.
|
||||
*/
|
||||
tmp = 0;
|
||||
mw32(MVS_INT_COAL, tmp);
|
||||
if (MVS_CHIP_SLOT_SZ > 0x1ff)
|
||||
mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
|
||||
else
|
||||
mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
|
||||
|
||||
tmp = 0x100;
|
||||
tmp = 0x10000 | interrupt_coalescing;
|
||||
mw32(MVS_INT_COAL_TMOUT, tmp);
|
||||
|
||||
/* ladies and gentlemen, start your engines */
|
||||
@ -477,13 +471,11 @@ static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
|
||||
|
||||
/* clear CMD_CMPLT ASAP */
|
||||
mw32_f(MVS_INT_STAT, CINT_DONE);
|
||||
#ifndef MVS_USE_TASKLET
|
||||
|
||||
spin_lock(&mvi->lock);
|
||||
#endif
|
||||
mvs_int_full(mvi);
|
||||
#ifndef MVS_USE_TASKLET
|
||||
spin_unlock(&mvi->lock);
|
||||
#endif
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@ -630,7 +622,6 @@ static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
|
||||
{
|
||||
u32 tmp;
|
||||
struct mvs_phy *phy = &mvi->phy[i];
|
||||
/* workaround for HW phy decoding error on 1.5g disk drive */
|
||||
mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
|
||||
tmp = mvs_read_port_vsr_data(mvi, i);
|
||||
if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
|
||||
@ -661,7 +652,7 @@ void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
|
||||
tmp |= lrmax;
|
||||
}
|
||||
mvs_write_phy_ctl(mvi, phy_id, tmp);
|
||||
mvs_64xx_phy_reset(mvi, phy_id, 1);
|
||||
mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET);
|
||||
}
|
||||
|
||||
static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
|
||||
@ -744,11 +735,13 @@ int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
|
||||
return -1;
|
||||
}
|
||||
|
||||
#ifndef DISABLE_HOTPLUG_DMA_FIX
|
||||
void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
|
||||
void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
|
||||
int buf_len, int from, void *prd)
|
||||
{
|
||||
int i;
|
||||
struct mvs_prd *buf_prd = prd;
|
||||
dma_addr_t buf_dma = mvi->bulk_buffer_dma;
|
||||
|
||||
buf_prd += from;
|
||||
for (i = 0; i < MAX_SG_ENTRY - from; i++) {
|
||||
buf_prd->addr = cpu_to_le64(buf_dma);
|
||||
@ -756,7 +749,28 @@ void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
|
||||
++buf_prd;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time)
|
||||
{
|
||||
void __iomem *regs = mvi->regs;
|
||||
u32 tmp = 0;
|
||||
/*
|
||||
* the max count is 0x1ff, while our max slot is 0x200,
|
||||
* it will make count 0.
|
||||
*/
|
||||
if (time == 0) {
|
||||
mw32(MVS_INT_COAL, 0);
|
||||
mw32(MVS_INT_COAL_TMOUT, 0x10000);
|
||||
} else {
|
||||
if (MVS_CHIP_SLOT_SZ > 0x1ff)
|
||||
mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
|
||||
else
|
||||
mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
|
||||
|
||||
tmp = 0x10000 | time;
|
||||
mw32(MVS_INT_COAL_TMOUT, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
const struct mvs_dispatch mvs_64xx_dispatch = {
|
||||
"mv64xx",
|
||||
@ -780,7 +794,6 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
|
||||
mvs_write_port_irq_stat,
|
||||
mvs_read_port_irq_mask,
|
||||
mvs_write_port_irq_mask,
|
||||
mvs_get_sas_addr,
|
||||
mvs_64xx_command_active,
|
||||
mvs_64xx_clear_srs_irq,
|
||||
mvs_64xx_issue_stop,
|
||||
@ -808,8 +821,8 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
|
||||
mvs_64xx_spi_buildcmd,
|
||||
mvs_64xx_spi_issuecmd,
|
||||
mvs_64xx_spi_waitdataready,
|
||||
#ifndef DISABLE_HOTPLUG_DMA_FIX
|
||||
mvs_64xx_fix_dma,
|
||||
#endif
|
||||
mvs_64xx_tune_interrupt,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -48,6 +48,216 @@ static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
|
||||
}
|
||||
}
|
||||
|
||||
void set_phy_tuning(struct mvs_info *mvi, int phy_id,
|
||||
struct phy_tuning phy_tuning)
|
||||
{
|
||||
u32 tmp, setting_0 = 0, setting_1 = 0;
|
||||
u8 i;
|
||||
|
||||
/* Remap information for B0 chip:
|
||||
*
|
||||
* R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
|
||||
* R0Dh -> R118h[31:16] (Generation 1 Setting 0)
|
||||
* R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
|
||||
* R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
|
||||
* R10h -> R120h[15:0] (Generation 2 Setting 1)
|
||||
* R11h -> R120h[31:16] (Generation 3 Setting 0)
|
||||
* R12h -> R124h[15:0] (Generation 3 Setting 1)
|
||||
* R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
|
||||
*/
|
||||
|
||||
/* A0 has a different set of registers */
|
||||
if (mvi->pdev->revision == VANIR_A0_REV)
|
||||
return;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
/* loop 3 times, set Gen 1, Gen 2, Gen 3 */
|
||||
switch (i) {
|
||||
case 0:
|
||||
setting_0 = GENERATION_1_SETTING;
|
||||
setting_1 = GENERATION_1_2_SETTING;
|
||||
break;
|
||||
case 1:
|
||||
setting_0 = GENERATION_1_2_SETTING;
|
||||
setting_1 = GENERATION_2_3_SETTING;
|
||||
break;
|
||||
case 2:
|
||||
setting_0 = GENERATION_2_3_SETTING;
|
||||
setting_1 = GENERATION_3_4_SETTING;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Set:
|
||||
*
|
||||
* Transmitter Emphasis Enable
|
||||
* Transmitter Emphasis Amplitude
|
||||
* Transmitter Amplitude
|
||||
*/
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, setting_0);
|
||||
tmp = mvs_read_port_vsr_data(mvi, phy_id);
|
||||
tmp &= ~(0xFBE << 16);
|
||||
tmp |= (((phy_tuning.trans_emp_en << 11) |
|
||||
(phy_tuning.trans_emp_amp << 7) |
|
||||
(phy_tuning.trans_amp << 1)) << 16);
|
||||
mvs_write_port_vsr_data(mvi, phy_id, tmp);
|
||||
|
||||
/* Set Transmitter Amplitude Adjust */
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, setting_1);
|
||||
tmp = mvs_read_port_vsr_data(mvi, phy_id);
|
||||
tmp &= ~(0xC000);
|
||||
tmp |= (phy_tuning.trans_amp_adj << 14);
|
||||
mvs_write_port_vsr_data(mvi, phy_id, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
|
||||
struct ffe_control ffe)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
/* Don't run this if A0/B0 */
|
||||
if ((mvi->pdev->revision == VANIR_A0_REV)
|
||||
|| (mvi->pdev->revision == VANIR_B0_REV))
|
||||
return;
|
||||
|
||||
/* FFE Resistor and Capacitor */
|
||||
/* R10Ch DFE Resolution Control/Squelch and FFE Setting
|
||||
*
|
||||
* FFE_FORCE [7]
|
||||
* FFE_RES_SEL [6:4]
|
||||
* FFE_CAP_SEL [3:0]
|
||||
*/
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL);
|
||||
tmp = mvs_read_port_vsr_data(mvi, phy_id);
|
||||
tmp &= ~0xFF;
|
||||
|
||||
/* Read from HBA_Info_Page */
|
||||
tmp |= ((0x1 << 7) |
|
||||
(ffe.ffe_rss_sel << 4) |
|
||||
(ffe.ffe_cap_sel << 0));
|
||||
|
||||
mvs_write_port_vsr_data(mvi, phy_id, tmp);
|
||||
|
||||
/* R064h PHY Mode Register 1
|
||||
*
|
||||
* DFE_DIS 18
|
||||
*/
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
|
||||
tmp = mvs_read_port_vsr_data(mvi, phy_id);
|
||||
tmp &= ~0x40001;
|
||||
/* Hard coding */
|
||||
/* No defines in HBA_Info_Page */
|
||||
tmp |= (0 << 18);
|
||||
mvs_write_port_vsr_data(mvi, phy_id, tmp);
|
||||
|
||||
/* R110h DFE F0-F1 Coefficient Control/DFE Update Control
|
||||
*
|
||||
* DFE_UPDATE_EN [11:6]
|
||||
* DFE_FX_FORCE [5:0]
|
||||
*/
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL);
|
||||
tmp = mvs_read_port_vsr_data(mvi, phy_id);
|
||||
tmp &= ~0xFFF;
|
||||
/* Hard coding */
|
||||
/* No defines in HBA_Info_Page */
|
||||
tmp |= ((0x3F << 6) | (0x0 << 0));
|
||||
mvs_write_port_vsr_data(mvi, phy_id, tmp);
|
||||
|
||||
/* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
|
||||
*
|
||||
* FFE_TRAIN_EN 3
|
||||
*/
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
|
||||
tmp = mvs_read_port_vsr_data(mvi, phy_id);
|
||||
tmp &= ~0x8;
|
||||
/* Hard coding */
|
||||
/* No defines in HBA_Info_Page */
|
||||
tmp |= (0 << 3);
|
||||
mvs_write_port_vsr_data(mvi, phy_id, tmp);
|
||||
}
|
||||
|
||||
/*Notice: this function must be called when phy is disabled*/
|
||||
void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
|
||||
{
|
||||
union reg_phy_cfg phy_cfg, phy_cfg_tmp;
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
|
||||
phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id);
|
||||
phy_cfg.v = 0;
|
||||
phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy;
|
||||
phy_cfg.u.sas_support = 1;
|
||||
phy_cfg.u.sata_support = 1;
|
||||
phy_cfg.u.sata_host_mode = 1;
|
||||
|
||||
switch (rate) {
|
||||
case 0x0:
|
||||
/* support 1.5 Gbps */
|
||||
phy_cfg.u.speed_support = 1;
|
||||
phy_cfg.u.snw_3_support = 0;
|
||||
phy_cfg.u.tx_lnk_parity = 1;
|
||||
phy_cfg.u.tx_spt_phs_lnk_rate = 0x30;
|
||||
break;
|
||||
case 0x1:
|
||||
|
||||
/* support 1.5, 3.0 Gbps */
|
||||
phy_cfg.u.speed_support = 3;
|
||||
phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c;
|
||||
phy_cfg.u.tx_lgcl_lnk_rate = 0x08;
|
||||
break;
|
||||
case 0x2:
|
||||
default:
|
||||
/* support 1.5, 3.0, 6.0 Gbps */
|
||||
phy_cfg.u.speed_support = 7;
|
||||
phy_cfg.u.snw_3_support = 1;
|
||||
phy_cfg.u.tx_lnk_parity = 1;
|
||||
phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f;
|
||||
phy_cfg.u.tx_lgcl_lnk_rate = 0x09;
|
||||
break;
|
||||
}
|
||||
mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
|
||||
}
|
||||
|
||||
static void __devinit
|
||||
mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
|
||||
{
|
||||
u32 temp;
|
||||
temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
|
||||
if (temp == 0xFFFFFFFFL) {
|
||||
mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6;
|
||||
mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A;
|
||||
mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3;
|
||||
}
|
||||
|
||||
temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]);
|
||||
if (temp == 0xFFL) {
|
||||
switch (mvi->pdev->revision) {
|
||||
case VANIR_A0_REV:
|
||||
case VANIR_B0_REV:
|
||||
mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
|
||||
mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7;
|
||||
break;
|
||||
case VANIR_C0_REV:
|
||||
case VANIR_C1_REV:
|
||||
case VANIR_C2_REV:
|
||||
default:
|
||||
mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
|
||||
mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]);
|
||||
if (temp == 0xFFL)
|
||||
/*set default phy_rate = 6Gbps*/
|
||||
mvi->hba_info_param.phy_rate[phy_id] = 0x2;
|
||||
|
||||
set_phy_tuning(mvi, phy_id,
|
||||
mvi->hba_info_param.phy_tuning[phy_id]);
|
||||
set_phy_ffe_tuning(mvi, phy_id,
|
||||
mvi->hba_info_param.ffe_ctl[phy_id]);
|
||||
set_phy_rate(mvi, phy_id,
|
||||
mvi->hba_info_param.phy_rate[phy_id]);
|
||||
}
|
||||
|
||||
static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
|
||||
{
|
||||
void __iomem *regs = mvi->regs;
|
||||
@ -61,7 +271,14 @@ static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
|
||||
static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
u32 delay = 5000;
|
||||
if (hard == MVS_PHY_TUNE) {
|
||||
mvs_write_port_cfg_addr(mvi, phy_id, PHYR_SATA_CTL);
|
||||
tmp = mvs_read_port_cfg_data(mvi, phy_id);
|
||||
mvs_write_port_cfg_data(mvi, phy_id, tmp|0x20000000);
|
||||
mvs_write_port_cfg_data(mvi, phy_id, tmp|0x100000);
|
||||
return;
|
||||
}
|
||||
tmp = mvs_read_port_irq_stat(mvi, phy_id);
|
||||
tmp &= ~PHYEV_RDY_CH;
|
||||
mvs_write_port_irq_stat(mvi, phy_id, tmp);
|
||||
@ -71,12 +288,15 @@ static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
|
||||
mvs_write_phy_ctl(mvi, phy_id, tmp);
|
||||
do {
|
||||
tmp = mvs_read_phy_ctl(mvi, phy_id);
|
||||
} while (tmp & PHY_RST_HARD);
|
||||
udelay(10);
|
||||
delay--;
|
||||
} while ((tmp & PHY_RST_HARD) && delay);
|
||||
if (!delay)
|
||||
mv_dprintk("phy hard reset failed.\n");
|
||||
} else {
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
|
||||
tmp = mvs_read_port_vsr_data(mvi, phy_id);
|
||||
tmp = mvs_read_phy_ctl(mvi, phy_id);
|
||||
tmp |= PHY_RST;
|
||||
mvs_write_port_vsr_data(mvi, phy_id, tmp);
|
||||
mvs_write_phy_ctl(mvi, phy_id, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
@ -90,12 +310,25 @@ static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
|
||||
|
||||
static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
|
||||
{
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
|
||||
mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
|
||||
mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
|
||||
u32 tmp;
|
||||
u8 revision = 0;
|
||||
|
||||
revision = mvi->pdev->revision;
|
||||
if (revision == VANIR_A0_REV) {
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
|
||||
mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
|
||||
}
|
||||
if (revision == VANIR_B0_REV) {
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL);
|
||||
mvs_write_port_vsr_data(mvi, phy_id, 0x08001006);
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
|
||||
mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f);
|
||||
}
|
||||
|
||||
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
|
||||
mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
|
||||
tmp = mvs_read_port_vsr_data(mvi, phy_id);
|
||||
tmp |= bit(0);
|
||||
mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
|
||||
}
|
||||
|
||||
static int __devinit mvs_94xx_init(struct mvs_info *mvi)
|
||||
@ -103,7 +336,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
|
||||
void __iomem *regs = mvi->regs;
|
||||
int i;
|
||||
u32 tmp, cctl;
|
||||
u8 revision;
|
||||
|
||||
revision = mvi->pdev->revision;
|
||||
mvs_show_pcie_usage(mvi);
|
||||
if (mvi->flags & MVF_FLAG_SOC) {
|
||||
tmp = mr32(MVS_PHY_CTL);
|
||||
@ -133,6 +368,28 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
|
||||
msleep(100);
|
||||
}
|
||||
|
||||
/* disable Multiplexing, enable phy implemented */
|
||||
mw32(MVS_PORTS_IMP, 0xFF);
|
||||
|
||||
if (revision == VANIR_A0_REV) {
|
||||
mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET);
|
||||
mw32(MVS_PA_VSR_PORT, 0x00018080);
|
||||
}
|
||||
mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2);
|
||||
if (revision == VANIR_A0_REV || revision == VANIR_B0_REV)
|
||||
/* set 6G/3G/1.5G, multiplexing, without SSC */
|
||||
mw32(MVS_PA_VSR_PORT, 0x0084d4fe);
|
||||
else
|
||||
/* set 6G/3G/1.5G, multiplexing, with and without SSC */
|
||||
mw32(MVS_PA_VSR_PORT, 0x0084fffe);
|
||||
|
||||
if (revision == VANIR_B0_REV) {
|
||||
mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL);
|
||||
mw32(MVS_PA_VSR_PORT, 0x08001006);
|
||||
mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA);
|
||||
mw32(MVS_PA_VSR_PORT, 0x0000705f);
|
||||
}
|
||||
|
||||
/* reset control */
|
||||
mw32(MVS_PCS, 0); /* MVS_PCS */
|
||||
mw32(MVS_STP_REG_SET_0, 0);
|
||||
@ -141,17 +398,8 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
|
||||
/* init phys */
|
||||
mvs_phy_hacks(mvi);
|
||||
|
||||
/* disable Multiplexing, enable phy implemented */
|
||||
mw32(MVS_PORTS_IMP, 0xFF);
|
||||
|
||||
|
||||
mw32(MVS_PA_VSR_ADDR, 0x00000104);
|
||||
mw32(MVS_PA_VSR_PORT, 0x00018080);
|
||||
mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
|
||||
mw32(MVS_PA_VSR_PORT, 0x0084ffff);
|
||||
|
||||
/* set LED blink when IO*/
|
||||
mw32(MVS_PA_VSR_ADDR, 0x00000030);
|
||||
mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED);
|
||||
tmp = mr32(MVS_PA_VSR_PORT);
|
||||
tmp &= 0xFFFF00FF;
|
||||
tmp |= 0x00003300;
|
||||
@ -175,12 +423,13 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
|
||||
mvs_94xx_phy_disable(mvi, i);
|
||||
/* set phy local SAS address */
|
||||
mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
|
||||
(mvi->phy[i].dev_sas_addr));
|
||||
cpu_to_le64(mvi->phy[i].dev_sas_addr));
|
||||
|
||||
mvs_94xx_enable_xmt(mvi, i);
|
||||
mvs_94xx_config_reg_from_hba(mvi, i);
|
||||
mvs_94xx_phy_enable(mvi, i);
|
||||
|
||||
mvs_94xx_phy_reset(mvi, i, 1);
|
||||
mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD);
|
||||
msleep(500);
|
||||
mvs_94xx_detect_porttype(mvi, i);
|
||||
}
|
||||
@ -211,16 +460,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
|
||||
mvs_update_phyinfo(mvi, i, 1);
|
||||
}
|
||||
|
||||
/* FIXME: update wide port bitmaps */
|
||||
|
||||
/* little endian for open address and command table, etc. */
|
||||
/*
|
||||
* it seems that ( from the spec ) turning on big-endian won't
|
||||
* do us any good on big-endian machines, need further confirmation
|
||||
*/
|
||||
cctl = mr32(MVS_CTL);
|
||||
cctl |= CCTL_ENDIAN_CMD;
|
||||
cctl |= CCTL_ENDIAN_DATA;
|
||||
cctl &= ~CCTL_ENDIAN_OPEN;
|
||||
cctl |= CCTL_ENDIAN_RSP;
|
||||
mw32_f(MVS_CTL, cctl);
|
||||
@ -228,15 +470,20 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
|
||||
/* reset CMD queue */
|
||||
tmp = mr32(MVS_PCS);
|
||||
tmp |= PCS_CMD_RST;
|
||||
tmp &= ~PCS_SELF_CLEAR;
|
||||
mw32(MVS_PCS, tmp);
|
||||
/* interrupt coalescing may cause missing HW interrput in some case,
|
||||
* and the max count is 0x1ff, while our max slot is 0x200,
|
||||
/*
|
||||
* the max count is 0x1ff, while our max slot is 0x200,
|
||||
* it will make count 0.
|
||||
*/
|
||||
tmp = 0;
|
||||
mw32(MVS_INT_COAL, tmp);
|
||||
if (MVS_CHIP_SLOT_SZ > 0x1ff)
|
||||
mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
|
||||
else
|
||||
mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
|
||||
|
||||
tmp = 0x100;
|
||||
/* default interrupt coalescing time is 128us */
|
||||
tmp = 0x10000 | interrupt_coalescing;
|
||||
mw32(MVS_INT_COAL_TMOUT, tmp);
|
||||
|
||||
/* ladies and gentlemen, start your engines */
|
||||
@ -249,7 +496,7 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
|
||||
|
||||
/* enable completion queue interrupt */
|
||||
tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
|
||||
CINT_DMA_PCIE);
|
||||
CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR);
|
||||
tmp |= CINT_PHY_MASK;
|
||||
mw32(MVS_INT_MASK, tmp);
|
||||
|
||||
@ -332,13 +579,10 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
|
||||
if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
|
||||
((stat & IRQ_SAS_B) && mvi->id == 1)) {
|
||||
mw32_f(MVS_INT_STAT, CINT_DONE);
|
||||
#ifndef MVS_USE_TASKLET
|
||||
|
||||
spin_lock(&mvi->lock);
|
||||
#endif
|
||||
mvs_int_full(mvi);
|
||||
#ifndef MVS_USE_TASKLET
|
||||
spin_unlock(&mvi->lock);
|
||||
#endif
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -346,10 +590,48 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
|
||||
static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
|
||||
{
|
||||
u32 tmp;
|
||||
mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
|
||||
do {
|
||||
tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
|
||||
} while (tmp & 1 << (slot_idx % 32));
|
||||
tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
|
||||
if (tmp && 1 << (slot_idx % 32)) {
|
||||
mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
|
||||
mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
|
||||
1 << (slot_idx % 32));
|
||||
do {
|
||||
tmp = mvs_cr32(mvi,
|
||||
MVS_COMMAND_ACTIVE + (slot_idx >> 3));
|
||||
} while (tmp & 1 << (slot_idx % 32));
|
||||
}
|
||||
}
|
||||
|
||||
void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
|
||||
{
|
||||
void __iomem *regs = mvi->regs;
|
||||
u32 tmp;
|
||||
|
||||
if (clear_all) {
|
||||
tmp = mr32(MVS_INT_STAT_SRS_0);
|
||||
if (tmp) {
|
||||
mv_dprintk("check SRS 0 %08X.\n", tmp);
|
||||
mw32(MVS_INT_STAT_SRS_0, tmp);
|
||||
}
|
||||
tmp = mr32(MVS_INT_STAT_SRS_1);
|
||||
if (tmp) {
|
||||
mv_dprintk("check SRS 1 %08X.\n", tmp);
|
||||
mw32(MVS_INT_STAT_SRS_1, tmp);
|
||||
}
|
||||
} else {
|
||||
if (reg_set > 31)
|
||||
tmp = mr32(MVS_INT_STAT_SRS_1);
|
||||
else
|
||||
tmp = mr32(MVS_INT_STAT_SRS_0);
|
||||
|
||||
if (tmp & (1 << (reg_set % 32))) {
|
||||
mv_dprintk("register set 0x%x was stopped.\n", reg_set);
|
||||
if (reg_set > 31)
|
||||
mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32));
|
||||
else
|
||||
mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
|
||||
@ -357,37 +639,56 @@ static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
|
||||
{
|
||||
void __iomem *regs = mvi->regs;
|
||||
u32 tmp;
|
||||
mvs_94xx_clear_srs_irq(mvi, 0, 1);
|
||||
|
||||
if (type == PORT_TYPE_SATA) {
|
||||
tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
|
||||
mw32(MVS_INT_STAT_SRS_0, tmp);
|
||||
}
|
||||
mw32(MVS_INT_STAT, CINT_CI_STOP);
|
||||
tmp = mr32(MVS_INT_STAT);
|
||||
mw32(MVS_INT_STAT, tmp | CINT_CI_STOP);
|
||||
tmp = mr32(MVS_PCS) | 0xFF00;
|
||||
mw32(MVS_PCS, tmp);
|
||||
}
|
||||
|
||||
static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi)
|
||||
{
|
||||
void __iomem *regs = mvi->regs;
|
||||
u32 err_0, err_1;
|
||||
u8 i;
|
||||
struct mvs_device *device;
|
||||
|
||||
err_0 = mr32(MVS_NON_NCQ_ERR_0);
|
||||
err_1 = mr32(MVS_NON_NCQ_ERR_1);
|
||||
|
||||
mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
|
||||
err_0, err_1);
|
||||
for (i = 0; i < 32; i++) {
|
||||
if (err_0 & bit(i)) {
|
||||
device = mvs_find_dev_by_reg_set(mvi, i);
|
||||
if (device)
|
||||
mvs_release_task(mvi, device->sas_device);
|
||||
}
|
||||
if (err_1 & bit(i)) {
|
||||
device = mvs_find_dev_by_reg_set(mvi, i+32);
|
||||
if (device)
|
||||
mvs_release_task(mvi, device->sas_device);
|
||||
}
|
||||
}
|
||||
|
||||
mw32(MVS_NON_NCQ_ERR_0, err_0);
|
||||
mw32(MVS_NON_NCQ_ERR_1, err_1);
|
||||
}
|
||||
|
||||
static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
|
||||
{
|
||||
void __iomem *regs = mvi->regs;
|
||||
u32 tmp;
|
||||
u8 reg_set = *tfs;
|
||||
|
||||
if (*tfs == MVS_ID_NOT_MAPPED)
|
||||
return;
|
||||
|
||||
mvi->sata_reg_set &= ~bit(reg_set);
|
||||
if (reg_set < 32) {
|
||||
if (reg_set < 32)
|
||||
w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
|
||||
tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
|
||||
if (tmp)
|
||||
mw32(MVS_INT_STAT_SRS_0, tmp);
|
||||
} else {
|
||||
w_reg_set_enable(reg_set, mvi->sata_reg_set);
|
||||
tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
|
||||
if (tmp)
|
||||
mw32(MVS_INT_STAT_SRS_1, tmp);
|
||||
}
|
||||
else
|
||||
w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32));
|
||||
|
||||
*tfs = MVS_ID_NOT_MAPPED;
|
||||
|
||||
@ -403,7 +704,7 @@ static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
|
||||
return 0;
|
||||
|
||||
i = mv_ffc64(mvi->sata_reg_set);
|
||||
if (i > 32) {
|
||||
if (i >= 32) {
|
||||
mvi->sata_reg_set |= bit(i);
|
||||
w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
|
||||
*tfs = i;
|
||||
@ -422,9 +723,12 @@ static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
struct mvs_prd *buf_prd = prd;
|
||||
struct mvs_prd_imt im_len;
|
||||
*(u32 *)&im_len = 0;
|
||||
for_each_sg(scatter, sg, nr, i) {
|
||||
buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
|
||||
buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
|
||||
im_len.len = sg_dma_len(sg);
|
||||
buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
|
||||
buf_prd++;
|
||||
}
|
||||
}
|
||||
@ -433,7 +737,7 @@ static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
|
||||
{
|
||||
u32 phy_st;
|
||||
phy_st = mvs_read_phy_ctl(mvi, i);
|
||||
if (phy_st & PHY_READY_MASK) /* phy ready */
|
||||
if (phy_st & PHY_READY_MASK)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@ -447,7 +751,7 @@ static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
|
||||
for (i = 0; i < 7; i++) {
|
||||
mvs_write_port_cfg_addr(mvi, port_id,
|
||||
CONFIG_ID_FRAME0 + i * 4);
|
||||
id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
|
||||
id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
|
||||
}
|
||||
memcpy(id, id_frame, 28);
|
||||
}
|
||||
@ -458,15 +762,13 @@ static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
|
||||
int i;
|
||||
u32 id_frame[7];
|
||||
|
||||
/* mvs_hexdump(28, (u8 *)id_frame, 0); */
|
||||
for (i = 0; i < 7; i++) {
|
||||
mvs_write_port_cfg_addr(mvi, port_id,
|
||||
CONFIG_ATT_ID_FRAME0 + i * 4);
|
||||
id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
|
||||
id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
|
||||
mv_dprintk("94xx phy %d atta frame %d %x.\n",
|
||||
port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
|
||||
}
|
||||
/* mvs_hexdump(28, (u8 *)id_frame, 0); */
|
||||
memcpy(id, id_frame, 28);
|
||||
}
|
||||
|
||||
@ -526,7 +828,18 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
|
||||
void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
|
||||
struct sas_phy_linkrates *rates)
|
||||
{
|
||||
/* TODO */
|
||||
u32 lrmax = 0;
|
||||
u32 tmp;
|
||||
|
||||
tmp = mvs_read_phy_ctl(mvi, phy_id);
|
||||
lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12;
|
||||
|
||||
if (lrmax) {
|
||||
tmp &= ~(0x3 << 12);
|
||||
tmp |= lrmax;
|
||||
}
|
||||
mvs_write_phy_ctl(mvi, phy_id, tmp);
|
||||
mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD);
|
||||
}
|
||||
|
||||
static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
|
||||
@ -603,27 +916,59 @@ int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
|
||||
return -1;
|
||||
}
|
||||
|
||||
#ifndef DISABLE_HOTPLUG_DMA_FIX
|
||||
void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
|
||||
void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
|
||||
int buf_len, int from, void *prd)
|
||||
{
|
||||
int i;
|
||||
struct mvs_prd *buf_prd = prd;
|
||||
dma_addr_t buf_dma;
|
||||
struct mvs_prd_imt im_len;
|
||||
|
||||
*(u32 *)&im_len = 0;
|
||||
buf_prd += from;
|
||||
for (i = 0; i < MAX_SG_ENTRY - from; i++) {
|
||||
buf_prd->addr = cpu_to_le64(buf_dma);
|
||||
buf_prd->im_len.len = cpu_to_le32(buf_len);
|
||||
++buf_prd;
|
||||
|
||||
#define PRD_CHAINED_ENTRY 0x01
|
||||
if ((mvi->pdev->revision == VANIR_A0_REV) ||
|
||||
(mvi->pdev->revision == VANIR_B0_REV))
|
||||
buf_dma = (phy_mask <= 0x08) ?
|
||||
mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1;
|
||||
else
|
||||
return;
|
||||
|
||||
for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) {
|
||||
if (i == MAX_SG_ENTRY - 1) {
|
||||
buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1));
|
||||
im_len.len = 2;
|
||||
im_len.misc_ctl = PRD_CHAINED_ENTRY;
|
||||
} else {
|
||||
buf_prd->addr = cpu_to_le64(buf_dma);
|
||||
im_len.len = buf_len;
|
||||
}
|
||||
buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
|
||||
* with 64xx fixes
|
||||
*/
|
||||
static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
|
||||
u8 clear_all)
|
||||
static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time)
|
||||
{
|
||||
void __iomem *regs = mvi->regs;
|
||||
u32 tmp = 0;
|
||||
/*
|
||||
* the max count is 0x1ff, while our max slot is 0x200,
|
||||
* it will make count 0.
|
||||
*/
|
||||
if (time == 0) {
|
||||
mw32(MVS_INT_COAL, 0);
|
||||
mw32(MVS_INT_COAL_TMOUT, 0x10000);
|
||||
} else {
|
||||
if (MVS_CHIP_SLOT_SZ > 0x1ff)
|
||||
mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
|
||||
else
|
||||
mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
|
||||
|
||||
tmp = 0x10000 | time;
|
||||
mw32(MVS_INT_COAL_TMOUT, tmp);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
const struct mvs_dispatch mvs_94xx_dispatch = {
|
||||
@ -648,7 +993,6 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
|
||||
mvs_write_port_irq_stat,
|
||||
mvs_read_port_irq_mask,
|
||||
mvs_write_port_irq_mask,
|
||||
mvs_get_sas_addr,
|
||||
mvs_94xx_command_active,
|
||||
mvs_94xx_clear_srs_irq,
|
||||
mvs_94xx_issue_stop,
|
||||
@ -676,8 +1020,8 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
|
||||
mvs_94xx_spi_buildcmd,
|
||||
mvs_94xx_spi_issuecmd,
|
||||
mvs_94xx_spi_waitdataready,
|
||||
#ifndef DISABLE_HOTPLUG_DMA_FIX
|
||||
mvs_94xx_fix_dma,
|
||||
#endif
|
||||
mvs_94xx_tune_interrupt,
|
||||
mvs_94xx_non_spec_ncq_error,
|
||||
};
|
||||
|
||||
|
@ -30,6 +30,14 @@
|
||||
|
||||
#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
|
||||
|
||||
enum VANIR_REVISION_ID {
|
||||
VANIR_A0_REV = 0xA0,
|
||||
VANIR_B0_REV = 0x01,
|
||||
VANIR_C0_REV = 0x02,
|
||||
VANIR_C1_REV = 0x03,
|
||||
VANIR_C2_REV = 0xC2,
|
||||
};
|
||||
|
||||
enum hw_registers {
|
||||
MVS_GBL_CTL = 0x04, /* global control */
|
||||
MVS_GBL_INT_STAT = 0x00, /* global irq status */
|
||||
@ -101,6 +109,7 @@ enum hw_registers {
|
||||
MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
|
||||
MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
|
||||
MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
|
||||
MVS_COMMAND_ACTIVE = 0x300,
|
||||
};
|
||||
|
||||
enum pci_cfg_registers {
|
||||
@ -112,26 +121,29 @@ enum pci_cfg_registers {
|
||||
|
||||
/* SAS/SATA Vendor Specific Port Registers */
|
||||
enum sas_sata_vsp_regs {
|
||||
VSR_PHY_STAT = 0x00 * 4, /* Phy Status */
|
||||
VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */
|
||||
VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */
|
||||
VSR_PHY_MODE3 = 0x03 * 4, /* pll */
|
||||
VSR_PHY_MODE4 = 0x04 * 4, /* VCO */
|
||||
VSR_PHY_MODE5 = 0x05 * 4, /* Rx */
|
||||
VSR_PHY_MODE6 = 0x06 * 4, /* CDR */
|
||||
VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */
|
||||
VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */
|
||||
VSR_PHY_MODE9 = 0x09 * 4, /* Test */
|
||||
VSR_PHY_MODE10 = 0x0A * 4, /* Power */
|
||||
VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */
|
||||
VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */
|
||||
VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */
|
||||
VSR_PHY_STAT = 0x00 * 4, /* Phy Interrupt Status */
|
||||
VSR_PHY_MODE1 = 0x01 * 4, /* phy Interrupt Enable */
|
||||
VSR_PHY_MODE2 = 0x02 * 4, /* Phy Configuration */
|
||||
VSR_PHY_MODE3 = 0x03 * 4, /* Phy Status */
|
||||
VSR_PHY_MODE4 = 0x04 * 4, /* Phy Counter 0 */
|
||||
VSR_PHY_MODE5 = 0x05 * 4, /* Phy Counter 1 */
|
||||
VSR_PHY_MODE6 = 0x06 * 4, /* Event Counter Control */
|
||||
VSR_PHY_MODE7 = 0x07 * 4, /* Event Counter Select */
|
||||
VSR_PHY_MODE8 = 0x08 * 4, /* Event Counter 0 */
|
||||
VSR_PHY_MODE9 = 0x09 * 4, /* Event Counter 1 */
|
||||
VSR_PHY_MODE10 = 0x0A * 4, /* Event Counter 2 */
|
||||
VSR_PHY_MODE11 = 0x0B * 4, /* Event Counter 3 */
|
||||
VSR_PHY_ACT_LED = 0x0C * 4, /* Activity LED control */
|
||||
|
||||
VSR_PHY_FFE_CONTROL = 0x10C,
|
||||
VSR_PHY_DFE_UPDATE_CRTL = 0x110,
|
||||
VSR_REF_CLOCK_CRTL = 0x1A0,
|
||||
};
|
||||
|
||||
enum chip_register_bits {
|
||||
PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
|
||||
PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
|
||||
PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
|
||||
PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 12),
|
||||
PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
|
||||
PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
|
||||
(0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
|
||||
};
|
||||
@ -169,22 +181,75 @@ enum pci_interrupt_cause {
|
||||
IRQ_PCIE_ERR = (1 << 31),
|
||||
};
|
||||
|
||||
union reg_phy_cfg {
|
||||
u32 v;
|
||||
struct {
|
||||
u32 phy_reset:1;
|
||||
u32 sas_support:1;
|
||||
u32 sata_support:1;
|
||||
u32 sata_host_mode:1;
|
||||
/*
|
||||
* bit 2: 6Gbps support
|
||||
* bit 1: 3Gbps support
|
||||
* bit 0: 1.5Gbps support
|
||||
*/
|
||||
u32 speed_support:3;
|
||||
u32 snw_3_support:1;
|
||||
u32 tx_lnk_parity:1;
|
||||
/*
|
||||
* bit 5: G1 (1.5Gbps) Without SSC
|
||||
* bit 4: G1 (1.5Gbps) with SSC
|
||||
* bit 3: G2 (3.0Gbps) Without SSC
|
||||
* bit 2: G2 (3.0Gbps) with SSC
|
||||
* bit 1: G3 (6.0Gbps) without SSC
|
||||
* bit 0: G3 (6.0Gbps) with SSC
|
||||
*/
|
||||
u32 tx_spt_phs_lnk_rate:6;
|
||||
/* 8h: 1.5Gbps 9h: 3Gbps Ah: 6Gbps */
|
||||
u32 tx_lgcl_lnk_rate:4;
|
||||
u32 tx_ssc_type:1;
|
||||
u32 sata_spin_up_spt:1;
|
||||
u32 sata_spin_up_en:1;
|
||||
u32 bypass_oob:1;
|
||||
u32 disable_phy:1;
|
||||
u32 rsvd:8;
|
||||
} u;
|
||||
};
|
||||
|
||||
#define MAX_SG_ENTRY 255
|
||||
|
||||
struct mvs_prd_imt {
|
||||
#ifndef __BIG_ENDIAN
|
||||
__le32 len:22;
|
||||
u8 _r_a:2;
|
||||
u8 misc_ctl:4;
|
||||
u8 inter_sel:4;
|
||||
#else
|
||||
u32 inter_sel:4;
|
||||
u32 misc_ctl:4;
|
||||
u32 _r_a:2;
|
||||
u32 len:22;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct mvs_prd {
|
||||
/* 64-bit buffer address */
|
||||
__le64 addr;
|
||||
/* 22-bit length */
|
||||
struct mvs_prd_imt im_len;
|
||||
__le32 im_len;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/*
|
||||
* these registers are accessed through port vendor
|
||||
* specific address/data registers
|
||||
*/
|
||||
enum sas_sata_phy_regs {
|
||||
GENERATION_1_SETTING = 0x118,
|
||||
GENERATION_1_2_SETTING = 0x11C,
|
||||
GENERATION_2_3_SETTING = 0x120,
|
||||
GENERATION_3_4_SETTING = 0x124,
|
||||
};
|
||||
|
||||
#define SPI_CTRL_REG_94XX 0xc800
|
||||
#define SPI_ADDR_REG_94XX 0xc804
|
||||
#define SPI_WR_DATA_REG_94XX 0xc808
|
||||
|
@ -164,7 +164,6 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
/* workaround for SATA R-ERR, to ignore phy glitch */
|
||||
tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
|
||||
tmp &= ~(1 << 9);
|
||||
tmp |= (1 << 10);
|
||||
@ -179,23 +178,10 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
|
||||
tmp |= 0x3fff;
|
||||
mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
|
||||
|
||||
/* workaround for WDTIMEOUT , set to 550 ms */
|
||||
mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
|
||||
|
||||
/* not to halt for different port op during wideport link change */
|
||||
mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
|
||||
|
||||
/* workaround for Seagate disk not-found OOB sequence, recv
|
||||
* COMINIT before sending out COMWAKE */
|
||||
tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
|
||||
tmp &= 0x0000ffff;
|
||||
tmp |= 0x00fa0000;
|
||||
mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
|
||||
|
||||
tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
|
||||
tmp &= 0x1fffffff;
|
||||
tmp |= (2U << 29); /* 8 ms retry */
|
||||
mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
|
||||
}
|
||||
|
||||
static inline void mvs_int_sata(struct mvs_info *mvi)
|
||||
@ -223,6 +209,9 @@ static inline void mvs_int_full(struct mvs_info *mvi)
|
||||
mvs_int_port(mvi, i, tmp);
|
||||
}
|
||||
|
||||
if (stat & CINT_NON_SPEC_NCQ_ERROR)
|
||||
MVS_CHIP_DISP->non_spec_ncq_error(mvi);
|
||||
|
||||
if (stat & CINT_SRS)
|
||||
mvs_int_sata(mvi);
|
||||
|
||||
|
@ -43,7 +43,6 @@ enum chip_flavors {
|
||||
|
||||
/* driver compile-time configuration */
|
||||
enum driver_configuration {
|
||||
MVS_SLOTS = 512, /* command slots */
|
||||
MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
|
||||
MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
|
||||
/* software requires power-of-2
|
||||
@ -56,8 +55,7 @@ enum driver_configuration {
|
||||
MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
|
||||
MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
|
||||
MVS_OAF_SZ = 64, /* Open address frame buffer size */
|
||||
MVS_QUEUE_SIZE = 32, /* Support Queue depth */
|
||||
MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
|
||||
MVS_QUEUE_SIZE = 64, /* Support Queue depth */
|
||||
MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
|
||||
};
|
||||
|
||||
@ -144,6 +142,7 @@ enum hw_register_bits {
|
||||
CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
|
||||
CINT_MEM = (1U << 26), /* int mem parity err */
|
||||
CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
|
||||
CINT_NON_SPEC_NCQ_ERROR = (1U << 25), /* Non specific NCQ error */
|
||||
CINT_SRS = (1U << 3), /* SRS event */
|
||||
CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
|
||||
CINT_DONE = (1U << 0), /* cmd completion */
|
||||
@ -161,7 +160,7 @@ enum hw_register_bits {
|
||||
TXQ_CMD_SSP = 1, /* SSP protocol */
|
||||
TXQ_CMD_SMP = 2, /* SMP protocol */
|
||||
TXQ_CMD_STP = 3, /* STP/SATA protocol */
|
||||
TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
|
||||
TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP target free list */
|
||||
TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
|
||||
TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
|
||||
TXQ_MODE_TARGET = 0,
|
||||
@ -391,15 +390,15 @@ enum sas_cmd_port_registers {
|
||||
};
|
||||
|
||||
enum mvs_info_flags {
|
||||
MVF_MSI = (1U << 0), /* MSI is enabled */
|
||||
MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
|
||||
MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
|
||||
};
|
||||
|
||||
enum mvs_event_flags {
|
||||
PHY_PLUG_EVENT = (3U),
|
||||
PHY_PLUG_EVENT = (3U),
|
||||
PHY_PLUG_IN = (1U << 0), /* phy plug in */
|
||||
PHY_PLUG_OUT = (1U << 1), /* phy plug out */
|
||||
EXP_BRCT_CHG = (1U << 2), /* broadcast change */
|
||||
};
|
||||
|
||||
enum mvs_port_type {
|
||||
|
@ -34,22 +34,25 @@ MODULE_PARM_DESC(collector, "\n"
|
||||
"\tThe mvsas SAS LLDD supports both modes.\n"
|
||||
"\tDefault: 1 (Direct Mode).\n");
|
||||
|
||||
int interrupt_coalescing = 0x80;
|
||||
|
||||
static struct scsi_transport_template *mvs_stt;
|
||||
struct kmem_cache *mvs_task_list_cache;
|
||||
static const struct mvs_chip_info mvs_chips[] = {
|
||||
[chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
|
||||
[chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
|
||||
[chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
|
||||
[chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
|
||||
[chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
|
||||
[chip_9445] = { 1, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, },
|
||||
[chip_9485] = { 2, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, },
|
||||
[chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
|
||||
[chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
|
||||
[chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
|
||||
[chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
|
||||
[chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, },
|
||||
[chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
|
||||
[chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
|
||||
[chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
|
||||
[chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
|
||||
[chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
|
||||
[chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
|
||||
};
|
||||
|
||||
struct device_attribute *mvst_host_attrs[];
|
||||
|
||||
#define SOC_SAS_NUM 2
|
||||
#define SG_MX 64
|
||||
|
||||
static struct scsi_host_template mvs_sht = {
|
||||
.module = THIS_MODULE,
|
||||
@ -66,7 +69,7 @@ static struct scsi_host_template mvs_sht = {
|
||||
.can_queue = 1,
|
||||
.cmd_per_lun = 1,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = SG_MX,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.eh_device_reset_handler = sas_eh_device_reset_handler,
|
||||
@ -74,6 +77,7 @@ static struct scsi_host_template mvs_sht = {
|
||||
.slave_alloc = mvs_slave_alloc,
|
||||
.target_destroy = sas_target_destroy,
|
||||
.ioctl = sas_ioctl,
|
||||
.shost_attrs = mvst_host_attrs,
|
||||
};
|
||||
|
||||
static struct sas_domain_function_template mvs_transport_ops = {
|
||||
@ -100,6 +104,7 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
|
||||
phy->mvi = mvi;
|
||||
phy->port = NULL;
|
||||
init_timer(&phy->timer);
|
||||
sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
|
||||
sas_phy->class = SAS;
|
||||
@ -128,7 +133,7 @@ static void mvs_free(struct mvs_info *mvi)
|
||||
if (mvi->flags & MVF_FLAG_SOC)
|
||||
slot_nr = MVS_SOC_SLOTS;
|
||||
else
|
||||
slot_nr = MVS_SLOTS;
|
||||
slot_nr = MVS_CHIP_SLOT_SZ;
|
||||
|
||||
if (mvi->dma_pool)
|
||||
pci_pool_destroy(mvi->dma_pool);
|
||||
@ -148,25 +153,26 @@ static void mvs_free(struct mvs_info *mvi)
|
||||
dma_free_coherent(mvi->dev,
|
||||
sizeof(*mvi->slot) * slot_nr,
|
||||
mvi->slot, mvi->slot_dma);
|
||||
#ifndef DISABLE_HOTPLUG_DMA_FIX
|
||||
|
||||
if (mvi->bulk_buffer)
|
||||
dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
|
||||
mvi->bulk_buffer, mvi->bulk_buffer_dma);
|
||||
#endif
|
||||
if (mvi->bulk_buffer1)
|
||||
dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
|
||||
mvi->bulk_buffer1, mvi->bulk_buffer_dma1);
|
||||
|
||||
MVS_CHIP_DISP->chip_iounmap(mvi);
|
||||
if (mvi->shost)
|
||||
scsi_host_put(mvi->shost);
|
||||
list_for_each_entry(mwq, &mvi->wq_list, entry)
|
||||
cancel_delayed_work(&mwq->work_q);
|
||||
kfree(mvi->tags);
|
||||
kfree(mvi);
|
||||
}
|
||||
|
||||
#ifdef MVS_USE_TASKLET
|
||||
struct tasklet_struct mv_tasklet;
|
||||
#ifdef CONFIG_SCSI_MVSAS_TASKLET
|
||||
static void mvs_tasklet(unsigned long opaque)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 stat;
|
||||
u16 core_nr, i = 0;
|
||||
|
||||
@ -179,35 +185,49 @@ static void mvs_tasklet(unsigned long opaque)
|
||||
if (unlikely(!mvi))
|
||||
BUG_ON(1);
|
||||
|
||||
stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq);
|
||||
if (!stat)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < core_nr; i++) {
|
||||
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
|
||||
stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
|
||||
if (stat)
|
||||
MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
|
||||
MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat);
|
||||
}
|
||||
out:
|
||||
MVS_CHIP_DISP->interrupt_enable(mvi);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
static irqreturn_t mvs_interrupt(int irq, void *opaque)
|
||||
{
|
||||
u32 core_nr, i = 0;
|
||||
u32 core_nr;
|
||||
u32 stat;
|
||||
struct mvs_info *mvi;
|
||||
struct sas_ha_struct *sha = opaque;
|
||||
#ifndef CONFIG_SCSI_MVSAS_TASKLET
|
||||
u32 i;
|
||||
#endif
|
||||
|
||||
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
|
||||
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
|
||||
|
||||
if (unlikely(!mvi))
|
||||
return IRQ_NONE;
|
||||
#ifdef CONFIG_SCSI_MVSAS_TASKLET
|
||||
MVS_CHIP_DISP->interrupt_disable(mvi);
|
||||
#endif
|
||||
|
||||
stat = MVS_CHIP_DISP->isr_status(mvi, irq);
|
||||
if (!stat)
|
||||
if (!stat) {
|
||||
#ifdef CONFIG_SCSI_MVSAS_TASKLET
|
||||
MVS_CHIP_DISP->interrupt_enable(mvi);
|
||||
#endif
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
#ifdef MVS_USE_TASKLET
|
||||
tasklet_schedule(&mv_tasklet);
|
||||
#ifdef CONFIG_SCSI_MVSAS_TASKLET
|
||||
tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
|
||||
#else
|
||||
for (i = 0; i < core_nr; i++) {
|
||||
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
|
||||
@ -225,7 +245,7 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
|
||||
if (mvi->flags & MVF_FLAG_SOC)
|
||||
slot_nr = MVS_SOC_SLOTS;
|
||||
else
|
||||
slot_nr = MVS_SLOTS;
|
||||
slot_nr = MVS_CHIP_SLOT_SZ;
|
||||
|
||||
spin_lock_init(&mvi->lock);
|
||||
for (i = 0; i < mvi->chip->n_phy; i++) {
|
||||
@ -273,13 +293,18 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
|
||||
goto err_out;
|
||||
memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
|
||||
|
||||
#ifndef DISABLE_HOTPLUG_DMA_FIX
|
||||
mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
|
||||
TRASH_BUCKET_SIZE,
|
||||
&mvi->bulk_buffer_dma, GFP_KERNEL);
|
||||
if (!mvi->bulk_buffer)
|
||||
goto err_out;
|
||||
#endif
|
||||
|
||||
mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev,
|
||||
TRASH_BUCKET_SIZE,
|
||||
&mvi->bulk_buffer_dma1, GFP_KERNEL);
|
||||
if (!mvi->bulk_buffer1)
|
||||
goto err_out;
|
||||
|
||||
sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
|
||||
mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0);
|
||||
if (!mvi->dma_pool) {
|
||||
@ -354,11 +379,12 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent,
|
||||
struct Scsi_Host *shost, unsigned int id)
|
||||
{
|
||||
struct mvs_info *mvi;
|
||||
struct mvs_info *mvi = NULL;
|
||||
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
|
||||
|
||||
mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
|
||||
GFP_KERNEL);
|
||||
mvi = kzalloc(sizeof(*mvi) +
|
||||
(1L << mvs_chips[ent->driver_data].slot_width) *
|
||||
sizeof(struct mvs_slot_info), GFP_KERNEL);
|
||||
if (!mvi)
|
||||
return NULL;
|
||||
|
||||
@ -367,7 +393,6 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
|
||||
mvi->chip_id = ent->driver_data;
|
||||
mvi->chip = &mvs_chips[mvi->chip_id];
|
||||
INIT_LIST_HEAD(&mvi->wq_list);
|
||||
mvi->irq = pdev->irq;
|
||||
|
||||
((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
|
||||
((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
|
||||
@ -375,9 +400,10 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
|
||||
mvi->id = id;
|
||||
mvi->sas = sha;
|
||||
mvi->shost = shost;
|
||||
#ifdef MVS_USE_TASKLET
|
||||
tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
|
||||
#endif
|
||||
|
||||
mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL);
|
||||
if (!mvi->tags)
|
||||
goto err_out;
|
||||
|
||||
if (MVS_CHIP_DISP->chip_ioremap(mvi))
|
||||
goto err_out;
|
||||
@ -388,7 +414,6 @@ err_out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* move to PCI layer or libata core? */
|
||||
static int pci_go_64(struct pci_dev *pdev)
|
||||
{
|
||||
int rc;
|
||||
@ -450,7 +475,7 @@ static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
|
||||
((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
|
||||
|
||||
shost->transportt = mvs_stt;
|
||||
shost->max_id = 128;
|
||||
shost->max_id = MVS_MAX_DEVICES;
|
||||
shost->max_lun = ~0;
|
||||
shost->max_channel = 1;
|
||||
shost->max_cmd_len = 16;
|
||||
@ -493,11 +518,12 @@ static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
|
||||
if (mvi->flags & MVF_FLAG_SOC)
|
||||
can_queue = MVS_SOC_CAN_QUEUE;
|
||||
else
|
||||
can_queue = MVS_CAN_QUEUE;
|
||||
can_queue = MVS_CHIP_SLOT_SZ;
|
||||
|
||||
sha->lldd_queue_size = can_queue;
|
||||
shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
|
||||
shost->can_queue = can_queue;
|
||||
mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
|
||||
mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
|
||||
sha->core.shost = mvi->shost;
|
||||
}
|
||||
|
||||
@ -518,6 +544,7 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
|
||||
{
|
||||
unsigned int rc, nhost = 0;
|
||||
struct mvs_info *mvi;
|
||||
struct mvs_prv_info *mpi;
|
||||
irq_handler_t irq_handler = mvs_interrupt;
|
||||
struct Scsi_Host *shost = NULL;
|
||||
const struct mvs_chip_info *chip;
|
||||
@ -569,6 +596,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
|
||||
goto err_out_regions;
|
||||
}
|
||||
|
||||
memset(&mvi->hba_info_param, 0xFF,
|
||||
sizeof(struct hba_info_page));
|
||||
|
||||
mvs_init_sas_add(mvi);
|
||||
|
||||
mvi->instance = nhost;
|
||||
@ -579,8 +609,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
|
||||
}
|
||||
nhost++;
|
||||
} while (nhost < chip->n_host);
|
||||
#ifdef MVS_USE_TASKLET
|
||||
tasklet_init(&mv_tasklet, mvs_tasklet,
|
||||
mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha);
|
||||
#ifdef CONFIG_SCSI_MVSAS_TASKLET
|
||||
tasklet_init(&(mpi->mv_tasklet), mvs_tasklet,
|
||||
(unsigned long)SHOST_TO_SAS_HA(shost));
|
||||
#endif
|
||||
|
||||
@ -625,8 +656,8 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
|
||||
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
|
||||
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
|
||||
|
||||
#ifdef MVS_USE_TASKLET
|
||||
tasklet_kill(&mv_tasklet);
|
||||
#ifdef CONFIG_SCSI_MVSAS_TASKLET
|
||||
tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
|
||||
#endif
|
||||
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
@ -635,7 +666,7 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
|
||||
scsi_remove_host(mvi->shost);
|
||||
|
||||
MVS_CHIP_DISP->interrupt_disable(mvi);
|
||||
free_irq(mvi->irq, sha);
|
||||
free_irq(mvi->pdev->irq, sha);
|
||||
for (i = 0; i < core_nr; i++) {
|
||||
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
|
||||
mvs_free(mvi);
|
||||
@ -703,6 +734,70 @@ static struct pci_driver mvs_pci_driver = {
|
||||
.remove = __devexit_p(mvs_pci_remove),
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
mvs_show_driver_version(struct device *cdev,
|
||||
struct device_attribute *attr, char *buffer)
|
||||
{
|
||||
return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(driver_version,
|
||||
S_IRUGO,
|
||||
mvs_show_driver_version,
|
||||
NULL);
|
||||
|
||||
static ssize_t
|
||||
mvs_store_interrupt_coalescing(struct device *cdev,
|
||||
struct device_attribute *attr,
|
||||
const char *buffer, size_t size)
|
||||
{
|
||||
int val = 0;
|
||||
struct mvs_info *mvi = NULL;
|
||||
struct Scsi_Host *shost = class_to_shost(cdev);
|
||||
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
|
||||
u8 i, core_nr;
|
||||
if (buffer == NULL)
|
||||
return size;
|
||||
|
||||
if (sscanf(buffer, "%d", &val) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (val >= 0x10000) {
|
||||
mv_dprintk("interrupt coalescing timer %d us is"
|
||||
"too long\n", val);
|
||||
return strlen(buffer);
|
||||
}
|
||||
|
||||
interrupt_coalescing = val;
|
||||
|
||||
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
|
||||
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
|
||||
|
||||
if (unlikely(!mvi))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < core_nr; i++) {
|
||||
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
|
||||
if (MVS_CHIP_DISP->tune_interrupt)
|
||||
MVS_CHIP_DISP->tune_interrupt(mvi,
|
||||
interrupt_coalescing);
|
||||
}
|
||||
mv_dprintk("set interrupt coalescing time to %d us\n",
|
||||
interrupt_coalescing);
|
||||
return strlen(buffer);
|
||||
}
|
||||
|
||||
static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
|
||||
struct device_attribute *attr, char *buffer)
|
||||
{
|
||||
return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(interrupt_coalescing,
|
||||
S_IRUGO|S_IWUSR,
|
||||
mvs_show_interrupt_coalescing,
|
||||
mvs_store_interrupt_coalescing);
|
||||
|
||||
/* task handler */
|
||||
struct task_struct *mvs_th;
|
||||
static int __init mvs_init(void)
|
||||
@ -739,6 +834,12 @@ static void __exit mvs_exit(void)
|
||||
kmem_cache_destroy(mvs_task_list_cache);
|
||||
}
|
||||
|
||||
struct device_attribute *mvst_host_attrs[] = {
|
||||
&dev_attr_driver_version,
|
||||
&dev_attr_interrupt_coalescing,
|
||||
NULL,
|
||||
};
|
||||
|
||||
module_init(mvs_init);
|
||||
module_exit(mvs_exit);
|
||||
|
||||
|
@ -38,7 +38,7 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
|
||||
|
||||
void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
|
||||
{
|
||||
void *bitmap = &mvi->tags;
|
||||
void *bitmap = mvi->tags;
|
||||
clear_bit(tag, bitmap);
|
||||
}
|
||||
|
||||
@ -49,14 +49,14 @@ void mvs_tag_free(struct mvs_info *mvi, u32 tag)
|
||||
|
||||
void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
|
||||
{
|
||||
void *bitmap = &mvi->tags;
|
||||
void *bitmap = mvi->tags;
|
||||
set_bit(tag, bitmap);
|
||||
}
|
||||
|
||||
inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
|
||||
{
|
||||
unsigned int index, tag;
|
||||
void *bitmap = &mvi->tags;
|
||||
void *bitmap = mvi->tags;
|
||||
|
||||
index = find_first_zero_bit(bitmap, mvi->tags_num);
|
||||
tag = index;
|
||||
@ -74,126 +74,6 @@ void mvs_tag_init(struct mvs_info *mvi)
|
||||
mvs_tag_clear(mvi, i);
|
||||
}
|
||||
|
||||
void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
|
||||
{
|
||||
u32 i;
|
||||
u32 run;
|
||||
u32 offset;
|
||||
|
||||
offset = 0;
|
||||
while (size) {
|
||||
printk(KERN_DEBUG"%08X : ", baseaddr + offset);
|
||||
if (size >= 16)
|
||||
run = 16;
|
||||
else
|
||||
run = size;
|
||||
size -= run;
|
||||
for (i = 0; i < 16; i++) {
|
||||
if (i < run)
|
||||
printk(KERN_DEBUG"%02X ", (u32)data[i]);
|
||||
else
|
||||
printk(KERN_DEBUG" ");
|
||||
}
|
||||
printk(KERN_DEBUG": ");
|
||||
for (i = 0; i < run; i++)
|
||||
printk(KERN_DEBUG"%c",
|
||||
isalnum(data[i]) ? data[i] : '.');
|
||||
printk(KERN_DEBUG"\n");
|
||||
data = &data[16];
|
||||
offset += run;
|
||||
}
|
||||
printk(KERN_DEBUG"\n");
|
||||
}
|
||||
|
||||
#if (_MV_DUMP > 1)
|
||||
static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
|
||||
enum sas_protocol proto)
|
||||
{
|
||||
u32 offset;
|
||||
struct mvs_slot_info *slot = &mvi->slot_info[tag];
|
||||
|
||||
offset = slot->cmd_size + MVS_OAF_SZ +
|
||||
MVS_CHIP_DISP->prd_size() * slot->n_elem;
|
||||
dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
|
||||
tag);
|
||||
mvs_hexdump(32, (u8 *) slot->response,
|
||||
(u32) slot->buf_dma + offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
|
||||
enum sas_protocol proto)
|
||||
{
|
||||
#if (_MV_DUMP > 1)
|
||||
u32 sz, w_ptr;
|
||||
u64 addr;
|
||||
struct mvs_slot_info *slot = &mvi->slot_info[tag];
|
||||
|
||||
/*Delivery Queue */
|
||||
sz = MVS_CHIP_SLOT_SZ;
|
||||
w_ptr = slot->tx;
|
||||
addr = mvi->tx_dma;
|
||||
dev_printk(KERN_DEBUG, mvi->dev,
|
||||
"Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
|
||||
dev_printk(KERN_DEBUG, mvi->dev,
|
||||
"Delivery Queue Base Address=0x%llX (PA)"
|
||||
"(tx_dma=0x%llX), Entry=%04d\n",
|
||||
addr, (unsigned long long)mvi->tx_dma, w_ptr);
|
||||
mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
|
||||
(u32) mvi->tx_dma + sizeof(u32) * w_ptr);
|
||||
/*Command List */
|
||||
addr = mvi->slot_dma;
|
||||
dev_printk(KERN_DEBUG, mvi->dev,
|
||||
"Command List Base Address=0x%llX (PA)"
|
||||
"(slot_dma=0x%llX), Header=%03d\n",
|
||||
addr, (unsigned long long)slot->buf_dma, tag);
|
||||
dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
|
||||
/*mvs_cmd_hdr */
|
||||
mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
|
||||
(u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
|
||||
/*1.command table area */
|
||||
dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
|
||||
mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
|
||||
/*2.open address frame area */
|
||||
dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
|
||||
mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
|
||||
(u32) slot->buf_dma + slot->cmd_size);
|
||||
/*3.status buffer */
|
||||
mvs_hba_sb_dump(mvi, tag, proto);
|
||||
/*4.PRD table */
|
||||
dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
|
||||
mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
|
||||
(u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
|
||||
(u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void mvs_hba_cq_dump(struct mvs_info *mvi)
|
||||
{
|
||||
#if (_MV_DUMP > 2)
|
||||
u64 addr;
|
||||
void __iomem *regs = mvi->regs;
|
||||
u32 entry = mvi->rx_cons + 1;
|
||||
u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
|
||||
|
||||
/*Completion Queue */
|
||||
addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
|
||||
dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
|
||||
mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
|
||||
dev_printk(KERN_DEBUG, mvi->dev,
|
||||
"Completion List Base Address=0x%llX (PA), "
|
||||
"CQ_Entry=%04d, CQ_WP=0x%08X\n",
|
||||
addr, entry - 1, mvi->rx[0]);
|
||||
mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
|
||||
mvi->rx_dma + sizeof(u32) * entry);
|
||||
#endif
|
||||
}
|
||||
|
||||
void mvs_get_sas_addr(void *buf, u32 buflen)
|
||||
{
|
||||
/*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
|
||||
}
|
||||
|
||||
struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
|
||||
{
|
||||
unsigned long i = 0, j = 0, hi = 0;
|
||||
@ -222,7 +102,6 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
|
||||
|
||||
}
|
||||
|
||||
/* FIXME */
|
||||
int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
|
||||
{
|
||||
unsigned long i = 0, j = 0, n = 0, num = 0;
|
||||
@ -253,6 +132,20 @@ int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
|
||||
return num;
|
||||
}
|
||||
|
||||
struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
|
||||
u8 reg_set)
|
||||
{
|
||||
u32 dev_no;
|
||||
for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
|
||||
if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
|
||||
continue;
|
||||
|
||||
if (mvi->devices[dev_no].taskfileset == reg_set)
|
||||
return &mvi->devices[dev_no];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void mvs_free_reg_set(struct mvs_info *mvi,
|
||||
struct mvs_device *dev)
|
||||
{
|
||||
@ -283,7 +176,6 @@ void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
|
||||
}
|
||||
}
|
||||
|
||||
/* FIXME: locking? */
|
||||
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
|
||||
void *funcdata)
|
||||
{
|
||||
@ -309,12 +201,12 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
|
||||
tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
|
||||
if (tmp & PHY_RST_HARD)
|
||||
break;
|
||||
MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
|
||||
MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
|
||||
break;
|
||||
|
||||
case PHY_FUNC_LINK_RESET:
|
||||
MVS_CHIP_DISP->phy_enable(mvi, phy_id);
|
||||
MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
|
||||
MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
|
||||
break;
|
||||
|
||||
case PHY_FUNC_DISABLE:
|
||||
@ -406,14 +298,10 @@ int mvs_slave_configure(struct scsi_device *sdev)
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
if (dev_is_sata(dev)) {
|
||||
/* may set PIO mode */
|
||||
#if MV_DISABLE_NCQ
|
||||
struct ata_port *ap = dev->sata_dev.ap;
|
||||
struct ata_device *adev = ap->link.device;
|
||||
adev->flags |= ATA_DFLAG_NCQ_OFF;
|
||||
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
|
||||
#endif
|
||||
if (!dev_is_sata(dev)) {
|
||||
sas_change_queue_depth(sdev,
|
||||
MVS_QUEUE_SIZE,
|
||||
SCSI_QDEPTH_DEFAULT);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -424,6 +312,7 @@ void mvs_scan_start(struct Scsi_Host *shost)
|
||||
unsigned short core_nr;
|
||||
struct mvs_info *mvi;
|
||||
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
|
||||
struct mvs_prv_info *mvs_prv = sha->lldd_ha;
|
||||
|
||||
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
|
||||
|
||||
@ -432,15 +321,17 @@ void mvs_scan_start(struct Scsi_Host *shost)
|
||||
for (i = 0; i < mvi->chip->n_phy; ++i)
|
||||
mvs_bytes_dmaed(mvi, i);
|
||||
}
|
||||
mvs_prv->scan_finished = 1;
|
||||
}
|
||||
|
||||
int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
|
||||
{
|
||||
/* give the phy enabling interrupt event time to come in (1s
|
||||
* is empirically about all it takes) */
|
||||
if (time < HZ)
|
||||
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
|
||||
struct mvs_prv_info *mvs_prv = sha->lldd_ha;
|
||||
|
||||
if (mvs_prv->scan_finished == 0)
|
||||
return 0;
|
||||
/* Wait for discovery to finish */
|
||||
|
||||
scsi_flush_work(shost);
|
||||
return 1;
|
||||
}
|
||||
@ -461,10 +352,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
|
||||
void *buf_prd;
|
||||
struct mvs_slot_info *slot = &mvi->slot_info[tag];
|
||||
u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
|
||||
#if _MV_DUMP
|
||||
u8 *buf_cmd;
|
||||
void *from;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* DMA-map SMP request, response buffers
|
||||
*/
|
||||
@ -496,15 +384,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
|
||||
buf_tmp = slot->buf;
|
||||
buf_tmp_dma = slot->buf_dma;
|
||||
|
||||
#if _MV_DUMP
|
||||
buf_cmd = buf_tmp;
|
||||
hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
|
||||
buf_tmp += req_len;
|
||||
buf_tmp_dma += req_len;
|
||||
slot->cmd_size = req_len;
|
||||
#else
|
||||
hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
|
||||
#endif
|
||||
|
||||
/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
|
||||
buf_oaf = buf_tmp;
|
||||
@ -553,12 +433,6 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
|
||||
/* fill in PRD (scatter/gather) table, if any */
|
||||
MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
|
||||
|
||||
#if _MV_DUMP
|
||||
/* copy cmd table */
|
||||
from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
|
||||
memcpy(buf_cmd, from + sg_req->offset, req_len);
|
||||
kunmap_atomic(from, KM_IRQ0);
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
err_out_2:
|
||||
@ -616,14 +490,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
|
||||
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
|
||||
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
|
||||
|
||||
#ifndef DISABLE_HOTPLUG_DMA_FIX
|
||||
if (task->data_dir == DMA_FROM_DEVICE)
|
||||
flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
|
||||
else
|
||||
flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
|
||||
#else
|
||||
flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
|
||||
#endif
|
||||
|
||||
if (task->ata_task.use_ncq)
|
||||
flags |= MCH_FPDMA;
|
||||
if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
|
||||
@ -631,11 +502,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
|
||||
flags |= MCH_ATAPI;
|
||||
}
|
||||
|
||||
/* FIXME: fill in port multiplier number */
|
||||
|
||||
hdr->flags = cpu_to_le32(flags);
|
||||
|
||||
/* FIXME: the low order order 5 bits for the TAG if enable NCQ */
|
||||
if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
|
||||
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
|
||||
else
|
||||
@ -657,9 +525,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
|
||||
|
||||
buf_tmp += MVS_ATA_CMD_SZ;
|
||||
buf_tmp_dma += MVS_ATA_CMD_SZ;
|
||||
#if _MV_DUMP
|
||||
slot->cmd_size = MVS_ATA_CMD_SZ;
|
||||
#endif
|
||||
|
||||
/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
|
||||
/* used for STP. unused for SATA? */
|
||||
@ -682,9 +547,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
|
||||
buf_tmp_dma += i;
|
||||
|
||||
/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
|
||||
/* FIXME: probably unused, for SATA. kept here just in case
|
||||
* we get a STP/SATA error information record
|
||||
*/
|
||||
slot->response = buf_tmp;
|
||||
hdr->status_buf = cpu_to_le64(buf_tmp_dma);
|
||||
if (mvi->flags & MVF_FLAG_SOC)
|
||||
@ -715,11 +577,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
|
||||
|
||||
/* fill in PRD (scatter/gather) table, if any */
|
||||
MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
|
||||
#ifndef DISABLE_HOTPLUG_DMA_FIX
|
||||
|
||||
if (task->data_dir == DMA_FROM_DEVICE)
|
||||
MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
|
||||
MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
|
||||
TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -761,6 +623,9 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
|
||||
}
|
||||
if (is_tmf)
|
||||
flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
|
||||
else
|
||||
flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
|
||||
|
||||
hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
|
||||
hdr->tags = cpu_to_le32(tag);
|
||||
hdr->data_len = cpu_to_le32(task->total_xfer_len);
|
||||
@ -777,9 +642,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
|
||||
|
||||
buf_tmp += MVS_SSP_CMD_SZ;
|
||||
buf_tmp_dma += MVS_SSP_CMD_SZ;
|
||||
#if _MV_DUMP
|
||||
slot->cmd_size = MVS_SSP_CMD_SZ;
|
||||
#endif
|
||||
|
||||
/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
|
||||
buf_oaf = buf_tmp;
|
||||
@ -986,7 +848,6 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
|
||||
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
|
||||
spin_unlock(&task->task_state_lock);
|
||||
|
||||
mvs_hba_memory_dump(mvi, tag, task->task_proto);
|
||||
mvi_dev->running_req++;
|
||||
++(*pass);
|
||||
mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
|
||||
@ -1189,9 +1050,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
|
||||
mvs_slot_free(mvi, slot_idx);
|
||||
}
|
||||
|
||||
static void mvs_update_wideport(struct mvs_info *mvi, int i)
|
||||
static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
|
||||
{
|
||||
struct mvs_phy *phy = &mvi->phy[i];
|
||||
struct mvs_phy *phy = &mvi->phy[phy_no];
|
||||
struct mvs_port *port = phy->port;
|
||||
int j, no;
|
||||
|
||||
@ -1246,18 +1107,17 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
|
||||
return NULL;
|
||||
|
||||
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
|
||||
s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
|
||||
s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
|
||||
|
||||
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
|
||||
s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
|
||||
s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
|
||||
|
||||
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
|
||||
s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
|
||||
s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
|
||||
|
||||
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
|
||||
s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
|
||||
s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
|
||||
|
||||
/* Workaround: take some ATAPI devices for ATA */
|
||||
if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
|
||||
s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
|
||||
|
||||
@ -1269,6 +1129,13 @@ static u32 mvs_is_sig_fis_received(u32 irq_status)
|
||||
return irq_status & PHYEV_SIG_FIS;
|
||||
}
|
||||
|
||||
static void mvs_sig_remove_timer(struct mvs_phy *phy)
|
||||
{
|
||||
if (phy->timer.function)
|
||||
del_timer(&phy->timer);
|
||||
phy->timer.function = NULL;
|
||||
}
|
||||
|
||||
void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
|
||||
{
|
||||
struct mvs_phy *phy = &mvi->phy[i];
|
||||
@ -1291,6 +1158,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
|
||||
if (phy->phy_type & PORT_TYPE_SATA) {
|
||||
phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
|
||||
if (mvs_is_sig_fis_received(phy->irq_status)) {
|
||||
mvs_sig_remove_timer(phy);
|
||||
phy->phy_attached = 1;
|
||||
phy->att_dev_sas_addr =
|
||||
i + mvi->id * mvi->chip->n_phy;
|
||||
@ -1308,7 +1176,6 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
|
||||
tmp | PHYEV_SIG_FIS);
|
||||
phy->phy_attached = 0;
|
||||
phy->phy_type &= ~PORT_TYPE_SATA;
|
||||
MVS_CHIP_DISP->phy_reset(mvi, i, 0);
|
||||
goto out_done;
|
||||
}
|
||||
} else if (phy->phy_type & PORT_TYPE_SAS
|
||||
@ -1334,9 +1201,9 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
|
||||
if (MVS_CHIP_DISP->phy_work_around)
|
||||
MVS_CHIP_DISP->phy_work_around(mvi, i);
|
||||
}
|
||||
mv_dprintk("port %d attach dev info is %x\n",
|
||||
mv_dprintk("phy %d attach dev info is %x\n",
|
||||
i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
|
||||
mv_dprintk("port %d attach sas addr is %llx\n",
|
||||
mv_dprintk("phy %d attach sas addr is %llx\n",
|
||||
i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
|
||||
out_done:
|
||||
if (get_st)
|
||||
@ -1361,10 +1228,10 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
|
||||
}
|
||||
hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
|
||||
mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
|
||||
if (sas_port->id >= mvi->chip->n_phy)
|
||||
port = &mvi->port[sas_port->id - mvi->chip->n_phy];
|
||||
if (i >= mvi->chip->n_phy)
|
||||
port = &mvi->port[i - mvi->chip->n_phy];
|
||||
else
|
||||
port = &mvi->port[sas_port->id];
|
||||
port = &mvi->port[i];
|
||||
if (lock)
|
||||
spin_lock_irqsave(&mvi->lock, flags);
|
||||
port->port_attached = 1;
|
||||
@ -1393,7 +1260,7 @@ static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
|
||||
return;
|
||||
}
|
||||
list_for_each_entry(dev, &port->dev_list, dev_list_node)
|
||||
mvs_do_release_task(phy->mvi, phy_no, NULL);
|
||||
mvs_do_release_task(phy->mvi, phy_no, dev);
|
||||
|
||||
}
|
||||
|
||||
@ -1457,6 +1324,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock)
|
||||
mvi_device->dev_status = MVS_DEV_NORMAL;
|
||||
mvi_device->dev_type = dev->dev_type;
|
||||
mvi_device->mvi_info = mvi;
|
||||
mvi_device->sas_device = dev;
|
||||
if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
|
||||
int phy_id;
|
||||
u8 phy_num = parent_dev->ex_dev.num_phys;
|
||||
@ -1508,6 +1376,7 @@ void mvs_dev_gone_notify(struct domain_device *dev)
|
||||
mv_dprintk("found dev has gone.\n");
|
||||
}
|
||||
dev->lldd_dev = NULL;
|
||||
mvi_dev->sas_device = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&mvi->lock, flags);
|
||||
}
|
||||
@ -1555,7 +1424,6 @@ static void mvs_tmf_timedout(unsigned long data)
|
||||
complete(&task->completion);
|
||||
}
|
||||
|
||||
/* XXX */
|
||||
#define MVS_TASK_TIMEOUT 20
|
||||
static int mvs_exec_internal_tmf_task(struct domain_device *dev,
|
||||
void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
|
||||
@ -1588,7 +1456,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
|
||||
}
|
||||
|
||||
wait_for_completion(&task->completion);
|
||||
res = -TMF_RESP_FUNC_FAILED;
|
||||
res = TMF_RESP_FUNC_FAILED;
|
||||
/* Even TMF timed out, return direct. */
|
||||
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
|
||||
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
|
||||
@ -1638,11 +1506,10 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
|
||||
u8 *lun, struct mvs_tmf_task *tmf)
|
||||
{
|
||||
struct sas_ssp_task ssp_task;
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
if (!(dev->tproto & SAS_PROTOCOL_SSP))
|
||||
return TMF_RESP_FUNC_ESUPP;
|
||||
|
||||
strncpy((u8 *)&ssp_task.LUN, lun, 8);
|
||||
memcpy(ssp_task.LUN, lun, 8);
|
||||
|
||||
return mvs_exec_internal_tmf_task(dev, &ssp_task,
|
||||
sizeof(ssp_task), tmf);
|
||||
@ -1666,7 +1533,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
|
||||
int mvs_lu_reset(struct domain_device *dev, u8 *lun)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
|
||||
int rc = TMF_RESP_FUNC_FAILED;
|
||||
struct mvs_tmf_task tmf_task;
|
||||
struct mvs_device * mvi_dev = dev->lldd_dev;
|
||||
struct mvs_info *mvi = mvi_dev->mvi_info;
|
||||
@ -1675,10 +1542,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
|
||||
mvi_dev->dev_status = MVS_DEV_EH;
|
||||
rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
|
||||
if (rc == TMF_RESP_FUNC_COMPLETE) {
|
||||
num = mvs_find_dev_phyno(dev, phyno);
|
||||
spin_lock_irqsave(&mvi->lock, flags);
|
||||
for (i = 0; i < num; i++)
|
||||
mvs_release_task(mvi, dev);
|
||||
mvs_release_task(mvi, dev);
|
||||
spin_unlock_irqrestore(&mvi->lock, flags);
|
||||
}
|
||||
/* If failed, fall-through I_T_Nexus reset */
|
||||
@ -1696,11 +1561,12 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
|
||||
|
||||
if (mvi_dev->dev_status != MVS_DEV_EH)
|
||||
return TMF_RESP_FUNC_COMPLETE;
|
||||
else
|
||||
mvi_dev->dev_status = MVS_DEV_NORMAL;
|
||||
rc = mvs_debug_I_T_nexus_reset(dev);
|
||||
mv_printk("%s for device[%x]:rc= %d\n",
|
||||
__func__, mvi_dev->device_id, rc);
|
||||
|
||||
/* housekeeper */
|
||||
spin_lock_irqsave(&mvi->lock, flags);
|
||||
mvs_release_task(mvi, dev);
|
||||
spin_unlock_irqrestore(&mvi->lock, flags);
|
||||
@ -1739,9 +1605,6 @@ int mvs_query_task(struct sas_task *task)
|
||||
case TMF_RESP_FUNC_FAILED:
|
||||
case TMF_RESP_FUNC_COMPLETE:
|
||||
break;
|
||||
default:
|
||||
rc = TMF_RESP_FUNC_COMPLETE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mv_printk("%s:rc= %d\n", __func__, rc);
|
||||
@ -1761,8 +1624,8 @@ int mvs_abort_task(struct sas_task *task)
|
||||
u32 tag;
|
||||
|
||||
if (!mvi_dev) {
|
||||
mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__);
|
||||
rc = TMF_RESP_FUNC_FAILED;
|
||||
mv_printk("Device has removed\n");
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
}
|
||||
|
||||
mvi = mvi_dev->mvi_info;
|
||||
@ -1807,25 +1670,17 @@ int mvs_abort_task(struct sas_task *task)
|
||||
|
||||
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
|
||||
task->task_proto & SAS_PROTOCOL_STP) {
|
||||
/* to do free register_set */
|
||||
if (SATA_DEV == dev->dev_type) {
|
||||
struct mvs_slot_info *slot = task->lldd_task;
|
||||
struct task_status_struct *tstat;
|
||||
u32 slot_idx = (u32)(slot - mvi->slot_info);
|
||||
tstat = &task->task_status;
|
||||
mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
|
||||
mv_dprintk("mvs_abort_task() mvi=%p task=%p "
|
||||
"slot=%p slot_idx=x%x\n",
|
||||
mvi, task, slot, slot_idx);
|
||||
tstat->stat = SAS_ABORTED_TASK;
|
||||
if (mvi_dev && mvi_dev->running_req)
|
||||
mvi_dev->running_req--;
|
||||
if (sas_protocol_ata(task->task_proto))
|
||||
mvs_free_reg_set(mvi, mvi_dev);
|
||||
mvs_tmf_timedout((unsigned long)task);
|
||||
mvs_slot_task_free(mvi, task, slot, slot_idx);
|
||||
return -1;
|
||||
rc = TMF_RESP_FUNC_COMPLETE;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
/* SMP */
|
||||
|
||||
}
|
||||
out:
|
||||
@ -1891,12 +1746,63 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
|
||||
return stat;
|
||||
}
|
||||
|
||||
void mvs_set_sense(u8 *buffer, int len, int d_sense,
|
||||
int key, int asc, int ascq)
|
||||
{
|
||||
memset(buffer, 0, len);
|
||||
|
||||
if (d_sense) {
|
||||
/* Descriptor format */
|
||||
if (len < 4) {
|
||||
mv_printk("Length %d of sense buffer too small to "
|
||||
"fit sense %x:%x:%x", len, key, asc, ascq);
|
||||
}
|
||||
|
||||
buffer[0] = 0x72; /* Response Code */
|
||||
if (len > 1)
|
||||
buffer[1] = key; /* Sense Key */
|
||||
if (len > 2)
|
||||
buffer[2] = asc; /* ASC */
|
||||
if (len > 3)
|
||||
buffer[3] = ascq; /* ASCQ */
|
||||
} else {
|
||||
if (len < 14) {
|
||||
mv_printk("Length %d of sense buffer too small to "
|
||||
"fit sense %x:%x:%x", len, key, asc, ascq);
|
||||
}
|
||||
|
||||
buffer[0] = 0x70; /* Response Code */
|
||||
if (len > 2)
|
||||
buffer[2] = key; /* Sense Key */
|
||||
if (len > 7)
|
||||
buffer[7] = 0x0a; /* Additional Sense Length */
|
||||
if (len > 12)
|
||||
buffer[12] = asc; /* ASC */
|
||||
if (len > 13)
|
||||
buffer[13] = ascq; /* ASCQ */
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
|
||||
u8 key, u8 asc, u8 asc_q)
|
||||
{
|
||||
iu->datapres = 2;
|
||||
iu->response_data_len = 0;
|
||||
iu->sense_data_len = 17;
|
||||
iu->status = 02;
|
||||
mvs_set_sense(iu->sense_data, 17, 0,
|
||||
key, asc, asc_q);
|
||||
}
|
||||
|
||||
static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
|
||||
u32 slot_idx)
|
||||
{
|
||||
struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
|
||||
int stat;
|
||||
u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
|
||||
u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
|
||||
u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
|
||||
u32 tfs = 0;
|
||||
enum mvs_port_type type = PORT_TYPE_SAS;
|
||||
|
||||
@ -1908,8 +1814,19 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
|
||||
stat = SAM_STAT_CHECK_CONDITION;
|
||||
switch (task->task_proto) {
|
||||
case SAS_PROTOCOL_SSP:
|
||||
{
|
||||
stat = SAS_ABORTED_TASK;
|
||||
if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
|
||||
struct ssp_response_iu *iu = slot->response +
|
||||
sizeof(struct mvs_err_info);
|
||||
mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
|
||||
sas_ssp_task_response(mvi->dev, task, iu);
|
||||
stat = SAM_STAT_CHECK_CONDITION;
|
||||
}
|
||||
if (err_dw1 & bit(31))
|
||||
mv_printk("reuse same slot, retry command.\n");
|
||||
break;
|
||||
}
|
||||
case SAS_PROTOCOL_SMP:
|
||||
stat = SAM_STAT_CHECK_CONDITION;
|
||||
break;
|
||||
@ -1918,10 +1835,8 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
|
||||
case SAS_PROTOCOL_STP:
|
||||
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
|
||||
{
|
||||
if (err_dw0 == 0x80400002)
|
||||
mv_printk("find reserved error, why?\n");
|
||||
|
||||
task->ata_task.use_ncq = 0;
|
||||
stat = SAS_PROTO_RESPONSE;
|
||||
mvs_sata_done(mvi, task, slot_idx, err_dw0);
|
||||
}
|
||||
break;
|
||||
@ -1945,8 +1860,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
|
||||
void *to;
|
||||
enum exec_status sts;
|
||||
|
||||
if (mvi->exp_req)
|
||||
mvi->exp_req--;
|
||||
if (unlikely(!task || !task->lldd_task || !task->dev))
|
||||
return -1;
|
||||
|
||||
@ -1954,8 +1867,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
|
||||
dev = task->dev;
|
||||
mvi_dev = dev->lldd_dev;
|
||||
|
||||
mvs_hba_cq_dump(mvi);
|
||||
|
||||
spin_lock(&task->task_state_lock);
|
||||
task->task_state_flags &=
|
||||
~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
|
||||
@ -1978,6 +1889,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* when no device attaching, go ahead and complete by error handling*/
|
||||
if (unlikely(!mvi_dev || flags)) {
|
||||
if (!mvi_dev)
|
||||
mv_dprintk("port has not device.\n");
|
||||
@ -1987,6 +1899,9 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
|
||||
|
||||
/* error info record present */
|
||||
if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
|
||||
mv_dprintk("port %d slot %d rx_desc %X has error info"
|
||||
"%016llX.\n", slot->port->sas_port.id, slot_idx,
|
||||
rx_desc, (u64)(*(u64 *)slot->response));
|
||||
tstat->stat = mvs_slot_err(mvi, task, slot_idx);
|
||||
tstat->resp = SAS_TASK_COMPLETE;
|
||||
goto out;
|
||||
@ -2048,8 +1963,7 @@ out:
|
||||
spin_unlock(&mvi->lock);
|
||||
if (task->task_done)
|
||||
task->task_done(task);
|
||||
else
|
||||
mv_dprintk("why has not task_done.\n");
|
||||
|
||||
spin_lock(&mvi->lock);
|
||||
|
||||
return sts;
|
||||
@ -2092,7 +2006,6 @@ void mvs_release_task(struct mvs_info *mvi,
|
||||
struct domain_device *dev)
|
||||
{
|
||||
int i, phyno[WIDE_PORT_MAX_PHY], num;
|
||||
/* housekeeper */
|
||||
num = mvs_find_dev_phyno(dev, phyno);
|
||||
for (i = 0; i < num; i++)
|
||||
mvs_do_release_task(mvi, phyno[i], dev);
|
||||
@ -2111,13 +2024,13 @@ static void mvs_work_queue(struct work_struct *work)
|
||||
struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
|
||||
struct mvs_info *mvi = mwq->mvi;
|
||||
unsigned long flags;
|
||||
u32 phy_no = (unsigned long) mwq->data;
|
||||
struct sas_ha_struct *sas_ha = mvi->sas;
|
||||
struct mvs_phy *phy = &mvi->phy[phy_no];
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
|
||||
spin_lock_irqsave(&mvi->lock, flags);
|
||||
if (mwq->handler & PHY_PLUG_EVENT) {
|
||||
u32 phy_no = (unsigned long) mwq->data;
|
||||
struct sas_ha_struct *sas_ha = mvi->sas;
|
||||
struct mvs_phy *phy = &mvi->phy[phy_no];
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
|
||||
if (phy->phy_event & PHY_PLUG_OUT) {
|
||||
u32 tmp;
|
||||
@ -2139,6 +2052,11 @@ static void mvs_work_queue(struct work_struct *work)
|
||||
mv_dprintk("phy%d Attached Device\n", phy_no);
|
||||
}
|
||||
}
|
||||
} else if (mwq->handler & EXP_BRCT_CHG) {
|
||||
phy->phy_event &= ~EXP_BRCT_CHG;
|
||||
sas_ha->notify_port_event(sas_phy,
|
||||
PORTE_BROADCAST_RCVD);
|
||||
mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
|
||||
}
|
||||
list_del(&mwq->entry);
|
||||
spin_unlock_irqrestore(&mvi->lock, flags);
|
||||
@ -2174,29 +2092,21 @@ static void mvs_sig_time_out(unsigned long tphy)
|
||||
if (&mvi->phy[phy_no] == phy) {
|
||||
mv_dprintk("Get signature time out, reset phy %d\n",
|
||||
phy_no+mvi->id*mvi->chip->n_phy);
|
||||
MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
|
||||
MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void mvs_sig_remove_timer(struct mvs_phy *phy)
|
||||
{
|
||||
if (phy->timer.function)
|
||||
del_timer(&phy->timer);
|
||||
phy->timer.function = NULL;
|
||||
}
|
||||
|
||||
void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
|
||||
{
|
||||
u32 tmp;
|
||||
struct sas_ha_struct *sas_ha = mvi->sas;
|
||||
struct mvs_phy *phy = &mvi->phy[phy_no];
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
|
||||
phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
|
||||
mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
|
||||
MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
|
||||
mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
|
||||
MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
|
||||
mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
|
||||
mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
|
||||
phy->irq_status);
|
||||
|
||||
/*
|
||||
@ -2205,11 +2115,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
|
||||
*/
|
||||
|
||||
if (phy->irq_status & PHYEV_DCDR_ERR) {
|
||||
mv_dprintk("port %d STP decoding error.\n",
|
||||
mv_dprintk("phy %d STP decoding error.\n",
|
||||
phy_no + mvi->id*mvi->chip->n_phy);
|
||||
}
|
||||
|
||||
if (phy->irq_status & PHYEV_POOF) {
|
||||
mdelay(500);
|
||||
if (!(phy->phy_event & PHY_PLUG_OUT)) {
|
||||
int dev_sata = phy->phy_type & PORT_TYPE_SATA;
|
||||
int ready;
|
||||
@ -2220,17 +2131,13 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
|
||||
(void *)(unsigned long)phy_no,
|
||||
PHY_PLUG_EVENT);
|
||||
ready = mvs_is_phy_ready(mvi, phy_no);
|
||||
if (!ready)
|
||||
mv_dprintk("phy%d Unplug Notice\n",
|
||||
phy_no +
|
||||
mvi->id * mvi->chip->n_phy);
|
||||
if (ready || dev_sata) {
|
||||
if (MVS_CHIP_DISP->stp_reset)
|
||||
MVS_CHIP_DISP->stp_reset(mvi,
|
||||
phy_no);
|
||||
else
|
||||
MVS_CHIP_DISP->phy_reset(mvi,
|
||||
phy_no, 0);
|
||||
phy_no, MVS_SOFT_RESET);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -2243,13 +2150,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
|
||||
if (phy->timer.function == NULL) {
|
||||
phy->timer.data = (unsigned long)phy;
|
||||
phy->timer.function = mvs_sig_time_out;
|
||||
phy->timer.expires = jiffies + 10*HZ;
|
||||
phy->timer.expires = jiffies + 5*HZ;
|
||||
add_timer(&phy->timer);
|
||||
}
|
||||
}
|
||||
if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
|
||||
phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
|
||||
mvs_sig_remove_timer(phy);
|
||||
mv_dprintk("notify plug in on phy[%d]\n", phy_no);
|
||||
if (phy->phy_status) {
|
||||
mdelay(10);
|
||||
@ -2263,14 +2169,14 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
|
||||
}
|
||||
mvs_update_phyinfo(mvi, phy_no, 0);
|
||||
if (phy->phy_type & PORT_TYPE_SAS) {
|
||||
MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2);
|
||||
MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
|
||||
mdelay(10);
|
||||
}
|
||||
|
||||
mvs_bytes_dmaed(mvi, phy_no);
|
||||
/* whether driver is going to handle hot plug */
|
||||
if (phy->phy_event & PHY_PLUG_OUT) {
|
||||
mvs_port_notify_formed(sas_phy, 0);
|
||||
mvs_port_notify_formed(&phy->sas_phy, 0);
|
||||
phy->phy_event &= ~PHY_PLUG_OUT;
|
||||
}
|
||||
} else {
|
||||
@ -2278,13 +2184,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
|
||||
phy_no + mvi->id*mvi->chip->n_phy);
|
||||
}
|
||||
} else if (phy->irq_status & PHYEV_BROAD_CH) {
|
||||
mv_dprintk("port %d broadcast change.\n",
|
||||
mv_dprintk("phy %d broadcast change.\n",
|
||||
phy_no + mvi->id*mvi->chip->n_phy);
|
||||
/* exception for Samsung disk drive*/
|
||||
mdelay(1000);
|
||||
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
|
||||
mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
|
||||
EXP_BRCT_CHG);
|
||||
}
|
||||
MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
|
||||
}
|
||||
|
||||
int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
|
||||
|
@ -48,12 +48,8 @@
|
||||
|
||||
#define DRV_NAME "mvsas"
|
||||
#define DRV_VERSION "0.8.2"
|
||||
#define _MV_DUMP 0
|
||||
#define MVS_ID_NOT_MAPPED 0x7f
|
||||
/* #define DISABLE_HOTPLUG_DMA_FIX */
|
||||
// #define MAX_EXP_RUNNING_REQ 2
|
||||
#define WIDE_PORT_MAX_PHY 4
|
||||
#define MV_DISABLE_NCQ 0
|
||||
#define mv_printk(fmt, arg ...) \
|
||||
printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
|
||||
#ifdef MV_DEBUG
|
||||
@ -64,6 +60,7 @@
|
||||
#endif
|
||||
#define MV_MAX_U32 0xffffffff
|
||||
|
||||
extern int interrupt_coalescing;
|
||||
extern struct mvs_tgt_initiator mvs_tgt;
|
||||
extern struct mvs_info *tgt_mvi;
|
||||
extern const struct mvs_dispatch mvs_64xx_dispatch;
|
||||
@ -99,6 +96,11 @@ enum dev_status {
|
||||
MVS_DEV_EH = 0x1,
|
||||
};
|
||||
|
||||
enum dev_reset {
|
||||
MVS_SOFT_RESET = 0,
|
||||
MVS_HARD_RESET = 1,
|
||||
MVS_PHY_TUNE = 2,
|
||||
};
|
||||
|
||||
struct mvs_info;
|
||||
|
||||
@ -130,7 +132,6 @@ struct mvs_dispatch {
|
||||
u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
|
||||
void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
|
||||
|
||||
void (*get_sas_addr)(void *buf, u32 buflen);
|
||||
void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
|
||||
void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all);
|
||||
void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
|
||||
@ -167,9 +168,10 @@ struct mvs_dispatch {
|
||||
);
|
||||
int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
|
||||
int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
|
||||
#ifndef DISABLE_HOTPLUG_DMA_FIX
|
||||
void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
|
||||
#endif
|
||||
void (*dma_fix)(struct mvs_info *mvi, u32 phy_mask,
|
||||
int buf_len, int from, void *prd);
|
||||
void (*tune_interrupt)(struct mvs_info *mvi, u32 time);
|
||||
void (*non_spec_ncq_error)(struct mvs_info *mvi);
|
||||
|
||||
};
|
||||
|
||||
@ -179,9 +181,11 @@ struct mvs_chip_info {
|
||||
u32 fis_offs;
|
||||
u32 fis_count;
|
||||
u32 srs_sz;
|
||||
u32 sg_width;
|
||||
u32 slot_width;
|
||||
const struct mvs_dispatch *dispatch;
|
||||
};
|
||||
#define MVS_MAX_SG (1U << mvi->chip->sg_width)
|
||||
#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
|
||||
#define MVS_RX_FISL_SZ \
|
||||
(mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
|
||||
@ -249,6 +253,73 @@ struct mvs_device {
|
||||
u16 reserved;
|
||||
};
|
||||
|
||||
/* Generate PHY tunning parameters */
|
||||
struct phy_tuning {
|
||||
/* 1 bit, transmitter emphasis enable */
|
||||
u8 trans_emp_en:1;
|
||||
/* 4 bits, transmitter emphasis amplitude */
|
||||
u8 trans_emp_amp:4;
|
||||
/* 3 bits, reserved space */
|
||||
u8 Reserved_2bit_1:3;
|
||||
/* 5 bits, transmitter amplitude */
|
||||
u8 trans_amp:5;
|
||||
/* 2 bits, transmitter amplitude adjust */
|
||||
u8 trans_amp_adj:2;
|
||||
/* 1 bit, reserved space */
|
||||
u8 resv_2bit_2:1;
|
||||
/* 2 bytes, reserved space */
|
||||
u8 reserved[2];
|
||||
};
|
||||
|
||||
struct ffe_control {
|
||||
/* 4 bits, FFE Capacitor Select (value range 0~F) */
|
||||
u8 ffe_cap_sel:4;
|
||||
/* 3 bits, FFE Resistor Select (value range 0~7) */
|
||||
u8 ffe_rss_sel:3;
|
||||
/* 1 bit reserve*/
|
||||
u8 reserved:1;
|
||||
};
|
||||
|
||||
/*
|
||||
* HBA_Info_Page is saved in Flash/NVRAM, total 256 bytes.
|
||||
* The data area is valid only Signature="MRVL".
|
||||
* If any member fills with 0xFF, the member is invalid.
|
||||
*/
|
||||
struct hba_info_page {
|
||||
/* Dword 0 */
|
||||
/* 4 bytes, structure signature,should be "MRVL" at first initial */
|
||||
u8 signature[4];
|
||||
|
||||
/* Dword 1-13 */
|
||||
u32 reserved1[13];
|
||||
|
||||
/* Dword 14-29 */
|
||||
/* 64 bytes, SAS address for each port */
|
||||
u64 sas_addr[8];
|
||||
|
||||
/* Dword 30-31 */
|
||||
/* 8 bytes for vanir 8 port PHY FFE seeting
|
||||
* BIT 0~3 : FFE Capacitor select(value range 0~F)
|
||||
* BIT 4~6 : FFE Resistor select(value range 0~7)
|
||||
* BIT 7: reserve.
|
||||
*/
|
||||
|
||||
struct ffe_control ffe_ctl[8];
|
||||
/* Dword 32 -43 */
|
||||
u32 reserved2[12];
|
||||
|
||||
/* Dword 44-45 */
|
||||
/* 8 bytes, 0: 1.5G, 1: 3.0G, should be 0x01 at first initial */
|
||||
u8 phy_rate[8];
|
||||
|
||||
/* Dword 46-53 */
|
||||
/* 32 bytes, PHY tuning parameters for each PHY*/
|
||||
struct phy_tuning phy_tuning[8];
|
||||
|
||||
/* Dword 54-63 */
|
||||
u32 reserved3[10];
|
||||
}; /* total 256 bytes */
|
||||
|
||||
struct mvs_slot_info {
|
||||
struct list_head entry;
|
||||
union {
|
||||
@ -264,9 +335,6 @@ struct mvs_slot_info {
|
||||
*/
|
||||
void *buf;
|
||||
dma_addr_t buf_dma;
|
||||
#if _MV_DUMP
|
||||
u32 cmd_size;
|
||||
#endif
|
||||
void *response;
|
||||
struct mvs_port *port;
|
||||
struct mvs_device *device;
|
||||
@ -320,12 +388,10 @@ struct mvs_info {
|
||||
const struct mvs_chip_info *chip;
|
||||
|
||||
int tags_num;
|
||||
DECLARE_BITMAP(tags, MVS_SLOTS);
|
||||
unsigned long *tags;
|
||||
/* further per-slot information */
|
||||
struct mvs_phy phy[MVS_MAX_PHYS];
|
||||
struct mvs_port port[MVS_MAX_PHYS];
|
||||
u32 irq;
|
||||
u32 exp_req;
|
||||
u32 id;
|
||||
u64 sata_reg_set;
|
||||
struct list_head *hba_list;
|
||||
@ -337,12 +403,13 @@ struct mvs_info {
|
||||
u32 flashsectSize;
|
||||
|
||||
void *addon;
|
||||
struct hba_info_page hba_info_param;
|
||||
struct mvs_device devices[MVS_MAX_DEVICES];
|
||||
#ifndef DISABLE_HOTPLUG_DMA_FIX
|
||||
void *bulk_buffer;
|
||||
dma_addr_t bulk_buffer_dma;
|
||||
void *bulk_buffer1;
|
||||
dma_addr_t bulk_buffer_dma1;
|
||||
#define TRASH_BUCKET_SIZE 0x20000
|
||||
#endif
|
||||
void *dma_pool;
|
||||
struct mvs_slot_info slot_info[0];
|
||||
};
|
||||
@ -350,8 +417,10 @@ struct mvs_info {
|
||||
struct mvs_prv_info{
|
||||
u8 n_host;
|
||||
u8 n_phy;
|
||||
u16 reserve;
|
||||
u8 scan_finished;
|
||||
u8 reserve;
|
||||
struct mvs_info *mvi[2];
|
||||
struct tasklet_struct mv_tasklet;
|
||||
};
|
||||
|
||||
struct mvs_wq {
|
||||
@ -415,6 +484,6 @@ void mvs_do_release_task(struct mvs_info *mvi, int phy_no,
|
||||
void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
|
||||
void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
|
||||
int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
|
||||
void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
|
||||
struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set);
|
||||
#endif
|
||||
|
||||
|
@ -3871,6 +3871,9 @@ static long pmcraid_ioctl_passthrough(
|
||||
pmcraid_err("couldn't build passthrough ioadls\n");
|
||||
goto out_free_buffer;
|
||||
}
|
||||
} else if (request_size < 0) {
|
||||
rc = -EINVAL;
|
||||
goto out_free_buffer;
|
||||
}
|
||||
|
||||
/* If data is being written into the device, copy the data from user
|
||||
|
@ -42,8 +42,8 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
|
||||
int reading;
|
||||
|
||||
if (IS_QLA82XX(ha)) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Firmware dump not supported for ISP82xx\n"));
|
||||
ql_dbg(ql_dbg_user, vha, 0x705b,
|
||||
"Firmware dump not supported for ISP82xx\n");
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -56,7 +56,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
|
||||
if (!ha->fw_dump_reading)
|
||||
break;
|
||||
|
||||
qla_printk(KERN_INFO, ha,
|
||||
ql_log(ql_log_info, vha, 0x705d,
|
||||
"Firmware dump cleared on (%ld).\n", vha->host_no);
|
||||
|
||||
ha->fw_dump_reading = 0;
|
||||
@ -66,7 +66,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
|
||||
if (ha->fw_dumped && !ha->fw_dump_reading) {
|
||||
ha->fw_dump_reading = 1;
|
||||
|
||||
qla_printk(KERN_INFO, ha,
|
||||
ql_log(ql_log_info, vha, 0x705e,
|
||||
"Raw firmware dump ready for read on (%ld).\n",
|
||||
vha->host_no);
|
||||
}
|
||||
@ -148,7 +148,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
|
||||
}
|
||||
|
||||
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x705f,
|
||||
"HBA not online, failing NVRAM update.\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -158,6 +158,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
|
||||
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
|
||||
count);
|
||||
|
||||
ql_dbg(ql_dbg_user, vha, 0x7060,
|
||||
"Setting ISP_ABORT_NEEDED\n");
|
||||
/* NVRAM settings take effect immediately. */
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(vha);
|
||||
@ -255,9 +257,9 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
|
||||
|
||||
ha->optrom_state = QLA_SWAITING;
|
||||
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
ql_dbg(ql_dbg_user, vha, 0x7061,
|
||||
"Freeing flash region allocation -- 0x%x bytes.\n",
|
||||
ha->optrom_region_size));
|
||||
ha->optrom_region_size);
|
||||
|
||||
vfree(ha->optrom_buffer);
|
||||
ha->optrom_buffer = NULL;
|
||||
@ -273,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
|
||||
ha->optrom_state = QLA_SREADING;
|
||||
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
|
||||
if (ha->optrom_buffer == NULL) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7062,
|
||||
"Unable to allocate memory for optrom retrieval "
|
||||
"(%x).\n", ha->optrom_region_size);
|
||||
|
||||
@ -282,14 +284,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
|
||||
}
|
||||
|
||||
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"HBA not online, failing NVRAM update.\n");
|
||||
ql_log(ql_log_warn, vha, 0x7063,
|
||||
"HBA not online, failing NVRAM update.\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
ql_dbg(ql_dbg_user, vha, 0x7064,
|
||||
"Reading flash region -- 0x%x/0x%x.\n",
|
||||
ha->optrom_region_start, ha->optrom_region_size));
|
||||
ha->optrom_region_start, ha->optrom_region_size);
|
||||
|
||||
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
|
||||
ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
|
||||
@ -328,7 +330,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
|
||||
else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
|
||||
valid = 1;
|
||||
if (!valid) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7065,
|
||||
"Invalid start region 0x%x/0x%x.\n", start, size);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -340,17 +342,17 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
|
||||
ha->optrom_state = QLA_SWRITING;
|
||||
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
|
||||
if (ha->optrom_buffer == NULL) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7066,
|
||||
"Unable to allocate memory for optrom update "
|
||||
"(%x).\n", ha->optrom_region_size);
|
||||
"(%x)\n", ha->optrom_region_size);
|
||||
|
||||
ha->optrom_state = QLA_SWAITING;
|
||||
return count;
|
||||
}
|
||||
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
ql_dbg(ql_dbg_user, vha, 0x7067,
|
||||
"Staging flash region write -- 0x%x/0x%x.\n",
|
||||
ha->optrom_region_start, ha->optrom_region_size));
|
||||
ha->optrom_region_start, ha->optrom_region_size);
|
||||
|
||||
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
|
||||
break;
|
||||
@ -359,14 +361,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
|
||||
break;
|
||||
|
||||
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7068,
|
||||
"HBA not online, failing flash update.\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
ql_dbg(ql_dbg_user, vha, 0x7069,
|
||||
"Writing flash region -- 0x%x/0x%x.\n",
|
||||
ha->optrom_region_start, ha->optrom_region_size));
|
||||
ha->optrom_region_start, ha->optrom_region_size);
|
||||
|
||||
ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
|
||||
ha->optrom_region_start, ha->optrom_region_size);
|
||||
@ -425,7 +427,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
|
||||
return 0;
|
||||
|
||||
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x706a,
|
||||
"HBA not online, failing VPD update.\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -440,7 +442,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
|
||||
|
||||
tmp_data = vmalloc(256);
|
||||
if (!tmp_data) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x706b,
|
||||
"Unable to allocate memory for VPD information update.\n");
|
||||
goto done;
|
||||
}
|
||||
@ -480,7 +482,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
|
||||
ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
|
||||
&ha->sfp_data_dma);
|
||||
if (!ha->sfp_data) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x706c,
|
||||
"Unable to allocate memory for SFP read-data.\n");
|
||||
return 0;
|
||||
}
|
||||
@ -499,9 +501,10 @@ do_read:
|
||||
rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
|
||||
addr, offset, SFP_BLOCK_SIZE, 0);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x706d,
|
||||
"Unable to read SFP data (%x/%x/%x).\n", rval,
|
||||
addr, offset);
|
||||
|
||||
count = 0;
|
||||
break;
|
||||
}
|
||||
@ -538,8 +541,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
|
||||
type = simple_strtol(buf, NULL, 10);
|
||||
switch (type) {
|
||||
case 0x2025c:
|
||||
qla_printk(KERN_INFO, ha,
|
||||
"Issuing ISP reset on (%ld).\n", vha->host_no);
|
||||
ql_log(ql_log_info, vha, 0x706e,
|
||||
"Issuing ISP reset.\n");
|
||||
|
||||
scsi_block_requests(vha->host);
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
@ -551,8 +554,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
|
||||
if (!IS_QLA81XX(ha))
|
||||
break;
|
||||
|
||||
qla_printk(KERN_INFO, ha,
|
||||
"Issuing MPI reset on (%ld).\n", vha->host_no);
|
||||
ql_log(ql_log_info, vha, 0x706f,
|
||||
"Issuing MPI reset.\n");
|
||||
|
||||
/* Make sure FC side is not in reset */
|
||||
qla2x00_wait_for_hba_online(vha);
|
||||
@ -560,20 +563,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
|
||||
/* Issue MPI reset */
|
||||
scsi_block_requests(vha->host);
|
||||
if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"MPI reset failed on (%ld).\n", vha->host_no);
|
||||
ql_log(ql_log_warn, vha, 0x7070,
|
||||
"MPI reset failed.\n");
|
||||
scsi_unblock_requests(vha->host);
|
||||
break;
|
||||
case 0x2025e:
|
||||
if (!IS_QLA82XX(ha) || vha != base_vha) {
|
||||
qla_printk(KERN_INFO, ha,
|
||||
"FCoE ctx reset not supported for host%ld.\n",
|
||||
vha->host_no);
|
||||
ql_log(ql_log_info, vha, 0x7071,
|
||||
"FCoE ctx reset no supported.\n");
|
||||
return count;
|
||||
}
|
||||
|
||||
qla_printk(KERN_INFO, ha,
|
||||
"Issuing FCoE CTX reset on host%ld.\n", vha->host_no);
|
||||
ql_log(ql_log_info, vha, 0x7072,
|
||||
"Issuing FCoE ctx reset.\n");
|
||||
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(vha);
|
||||
qla2x00_wait_for_fcoe_ctx_reset(vha);
|
||||
@ -611,8 +613,8 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
|
||||
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
|
||||
&ha->edc_data_dma);
|
||||
if (!ha->edc_data) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Unable to allocate memory for EDC write.\n"));
|
||||
ql_log(ql_log_warn, vha, 0x7073,
|
||||
"Unable to allocate memory for EDC write.\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -631,9 +633,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
|
||||
rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
|
||||
dev, adr, len, opt);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
|
||||
rval, dev, adr, opt, len, buf[8]));
|
||||
ql_log(ql_log_warn, vha, 0x7074,
|
||||
"Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
|
||||
rval, dev, adr, opt, len, buf[8]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -669,8 +671,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
|
||||
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
|
||||
&ha->edc_data_dma);
|
||||
if (!ha->edc_data) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Unable to allocate memory for EDC status.\n"));
|
||||
ql_log(ql_log_warn, vha, 0x708c,
|
||||
"Unable to allocate memory for EDC status.\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -688,9 +690,9 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
|
||||
rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
|
||||
dev, adr, len, opt);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
|
||||
rval, dev, adr, opt, len));
|
||||
ql_log(ql_log_info, vha, 0x7075,
|
||||
"Unable to write EDC status (%x) %02x:%04x:%02x.\n",
|
||||
rval, dev, adr, opt, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -749,7 +751,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
|
||||
ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
|
||||
&ha->xgmac_data_dma, GFP_KERNEL);
|
||||
if (!ha->xgmac_data) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7076,
|
||||
"Unable to allocate memory for XGMAC read-data.\n");
|
||||
return 0;
|
||||
}
|
||||
@ -761,7 +763,7 @@ do_read:
|
||||
rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
|
||||
XGMAC_DATA_SIZE, &actual_size);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7077,
|
||||
"Unable to read XGMAC data (%x).\n", rval);
|
||||
count = 0;
|
||||
}
|
||||
@ -801,7 +803,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
|
||||
ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
|
||||
&ha->dcbx_tlv_dma, GFP_KERNEL);
|
||||
if (!ha->dcbx_tlv) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7078,
|
||||
"Unable to allocate memory for DCBX TLV read-data.\n");
|
||||
return 0;
|
||||
}
|
||||
@ -813,8 +815,8 @@ do_read:
|
||||
rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
|
||||
DCBX_TLV_DATA_SIZE);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Unable to read DCBX TLV data (%x).\n", rval);
|
||||
ql_log(ql_log_warn, vha, 0x7079,
|
||||
"Unable to read DCBX TLV (%x).\n", rval);
|
||||
count = 0;
|
||||
}
|
||||
|
||||
@ -869,9 +871,13 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
|
||||
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
|
||||
iter->attr);
|
||||
if (ret)
|
||||
qla_printk(KERN_INFO, vha->hw,
|
||||
"Unable to create sysfs %s binary attribute "
|
||||
"(%d).\n", iter->name, ret);
|
||||
ql_log(ql_log_warn, vha, 0x00f3,
|
||||
"Unable to create sysfs %s binary attribute (%d).\n",
|
||||
iter->name, ret);
|
||||
else
|
||||
ql_dbg(ql_dbg_init, vha, 0x00f4,
|
||||
"Successfully created sysfs %s binary attribure.\n",
|
||||
iter->name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1126,7 +1132,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
|
||||
return -EPERM;
|
||||
|
||||
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x707a,
|
||||
"Abort ISP active -- ignoring beacon request.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
@ -1322,9 +1328,8 @@ qla2x00_thermal_temp_show(struct device *dev,
|
||||
temp = frac = 0;
|
||||
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
|
||||
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
|
||||
DEBUG2_3_11(printk(KERN_WARNING
|
||||
"%s(%ld): isp reset in progress.\n",
|
||||
__func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x707b,
|
||||
"ISP reset active.\n");
|
||||
else if (!vha->hw->flags.eeh_busy)
|
||||
rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
|
||||
if (rval != QLA_SUCCESS)
|
||||
@ -1343,8 +1348,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
|
||||
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
|
||||
DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n",
|
||||
__func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x707c,
|
||||
"ISP reset active.\n");
|
||||
else if (!vha->hw->flags.eeh_busy)
|
||||
rval = qla2x00_get_firmware_state(vha, state);
|
||||
if (rval != QLA_SUCCESS)
|
||||
@ -1645,8 +1650,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
|
||||
|
||||
stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
|
||||
if (stats == NULL) {
|
||||
DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
|
||||
__func__, base_vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x707d,
|
||||
"Failed to allocate memory for stats.\n");
|
||||
goto done;
|
||||
}
|
||||
memset(stats, 0, DMA_POOL_SIZE);
|
||||
@ -1746,15 +1751,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
|
||||
|
||||
ret = qla24xx_vport_create_req_sanity_check(fc_vport);
|
||||
if (ret) {
|
||||
DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
|
||||
"status %x\n", ret));
|
||||
ql_log(ql_log_warn, vha, 0x707e,
|
||||
"Vport sanity check failed, status %x\n", ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
vha = qla24xx_create_vhost(fc_vport);
|
||||
if (vha == NULL) {
|
||||
DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
|
||||
vha));
|
||||
ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
|
||||
return FC_VPORT_FAILED;
|
||||
}
|
||||
if (disable) {
|
||||
@ -1764,8 +1768,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
|
||||
atomic_set(&vha->vp_state, VP_FAILED);
|
||||
|
||||
/* ready to create vport */
|
||||
qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
|
||||
vha->vp_idx);
|
||||
ql_log(ql_log_info, vha, 0x7080,
|
||||
"VP entry id %d assigned.\n", vha->vp_idx);
|
||||
|
||||
/* initialized vport states */
|
||||
atomic_set(&vha->loop_state, LOOP_DOWN);
|
||||
@ -1775,8 +1779,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
|
||||
if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
|
||||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
|
||||
/* Don't retry or attempt login of this virtual port */
|
||||
DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
|
||||
base_vha->host_no));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7081,
|
||||
"Vport loop state is not UP.\n");
|
||||
atomic_set(&vha->loop_state, LOOP_DEAD);
|
||||
if (!disable)
|
||||
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
|
||||
@ -1785,9 +1789,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
|
||||
if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
|
||||
if (ha->fw_attributes & BIT_4) {
|
||||
vha->flags.difdix_supported = 1;
|
||||
DEBUG18(qla_printk(KERN_INFO, ha,
|
||||
"Registering for DIF/DIX type 1 and 3"
|
||||
" protection.\n"));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7082,
|
||||
"Registered for DIF/DIX type 1 and 3 protection.\n");
|
||||
scsi_host_set_prot(vha->host,
|
||||
SHOST_DIF_TYPE1_PROTECTION
|
||||
| SHOST_DIF_TYPE2_PROTECTION
|
||||
@ -1802,8 +1805,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
|
||||
|
||||
if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
|
||||
&ha->pdev->dev)) {
|
||||
DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
|
||||
vha->host_no, vha->vp_idx));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7083,
|
||||
"scsi_add_host failure for VP[%d].\n", vha->vp_idx);
|
||||
goto vport_create_failed_2;
|
||||
}
|
||||
|
||||
@ -1820,6 +1823,10 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
|
||||
|
||||
if (ha->flags.cpu_affinity_enabled) {
|
||||
req = ha->req_q_map[1];
|
||||
ql_dbg(ql_dbg_multiq, vha, 0xc000,
|
||||
"Request queue %p attached with "
|
||||
"VP[%d], cpu affinity =%d\n",
|
||||
req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
|
||||
goto vport_queue;
|
||||
} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
|
||||
goto vport_queue;
|
||||
@ -1836,13 +1843,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
|
||||
ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
|
||||
qos);
|
||||
if (!ret)
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Can't create request queue for vp_idx:%d\n",
|
||||
vha->vp_idx);
|
||||
ql_log(ql_log_warn, vha, 0x7084,
|
||||
"Can't create request queue for VP[%d]\n",
|
||||
vha->vp_idx);
|
||||
else {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Request Que:%d (QoS: %d) created for vp_idx:%d\n",
|
||||
ret, qos, vha->vp_idx));
|
||||
ql_dbg(ql_dbg_multiq, vha, 0xc001,
|
||||
"Request Que:%d Q0s: %d) created for VP[%d]\n",
|
||||
ret, qos, vha->vp_idx);
|
||||
ql_dbg(ql_dbg_user, vha, 0x7085,
|
||||
"Request Que:%d Q0s: %d) created for VP[%d]\n",
|
||||
ret, qos, vha->vp_idx);
|
||||
req = ha->req_q_map[ret];
|
||||
}
|
||||
}
|
||||
@ -1882,12 +1892,13 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
|
||||
|
||||
if (vha->timer_active) {
|
||||
qla2x00_vp_stop_timer(vha);
|
||||
DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
|
||||
" = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7086,
|
||||
"Timer for the VP[%d] has stopped\n", vha->vp_idx);
|
||||
}
|
||||
|
||||
/* No pending activities shall be there on the vha now */
|
||||
DEBUG(msleep(random32()%10)); /* Just to see if something falls on
|
||||
if (ql2xextended_error_logging & ql_dbg_user)
|
||||
msleep(random32()%10); /* Just to see if something falls on
|
||||
* the net we have placed below */
|
||||
|
||||
BUG_ON(atomic_read(&vha->vref_count));
|
||||
@ -1901,12 +1912,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
|
||||
|
||||
if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
|
||||
if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Queue delete failed.\n");
|
||||
ql_log(ql_log_warn, vha, 0x7087,
|
||||
"Queue delete failed.\n");
|
||||
}
|
||||
|
||||
scsi_host_put(vha->host);
|
||||
qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
|
||||
ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,8 @@ done:
|
||||
}
|
||||
|
||||
int
|
||||
qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
|
||||
qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
|
||||
struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
|
||||
{
|
||||
int i, ret, num_valid;
|
||||
uint8_t *bcode;
|
||||
@ -51,18 +52,17 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
|
||||
|
||||
if (bcode_val == 0xFFFFFFFF) {
|
||||
/* No FCP Priority config data in flash */
|
||||
DEBUG2(printk(KERN_INFO
|
||||
"%s: No FCP priority config data.\n",
|
||||
__func__));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7051,
|
||||
"No FCP Priority config data.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
|
||||
bcode[3] != 'S') {
|
||||
/* Invalid FCP priority data header*/
|
||||
DEBUG2(printk(KERN_ERR
|
||||
"%s: Invalid FCP Priority data header. bcode=0x%x\n",
|
||||
__func__, bcode_val));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7052,
|
||||
"Invalid FCP Priority data header. bcode=0x%x.\n",
|
||||
bcode_val);
|
||||
return 0;
|
||||
}
|
||||
if (flag != 1)
|
||||
@ -77,15 +77,14 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
|
||||
|
||||
if (num_valid == 0) {
|
||||
/* No valid FCP priority data entries */
|
||||
DEBUG2(printk(KERN_ERR
|
||||
"%s: No valid FCP Priority data entries.\n",
|
||||
__func__));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7053,
|
||||
"No valid FCP Priority data entries.\n");
|
||||
ret = 0;
|
||||
} else {
|
||||
/* FCP priority data is valid */
|
||||
DEBUG2(printk(KERN_INFO
|
||||
"%s: Valid FCP priority data. num entries = %d\n",
|
||||
__func__, num_valid));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7054,
|
||||
"Valid FCP priority data. num entries = %d.\n",
|
||||
num_valid);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -182,10 +181,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
|
||||
if (!ha->fcp_prio_cfg) {
|
||||
ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
|
||||
if (!ha->fcp_prio_cfg) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Unable to allocate memory "
|
||||
"for fcp prio config data (%x).\n",
|
||||
FCP_PRIO_CFG_SIZE);
|
||||
ql_log(ql_log_warn, vha, 0x7050,
|
||||
"Unable to allocate memory for fcp prio "
|
||||
"config data (%x).\n", FCP_PRIO_CFG_SIZE);
|
||||
bsg_job->reply->result = (DID_ERROR << 16);
|
||||
ret = -ENOMEM;
|
||||
goto exit_fcp_prio_cfg;
|
||||
@ -198,9 +196,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
|
||||
FCP_PRIO_CFG_SIZE);
|
||||
|
||||
/* validate fcp priority data */
|
||||
if (!qla24xx_fcp_prio_cfg_valid(
|
||||
(struct qla_fcp_prio_cfg *)
|
||||
ha->fcp_prio_cfg, 1)) {
|
||||
|
||||
if (!qla24xx_fcp_prio_cfg_valid(vha,
|
||||
(struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
|
||||
bsg_job->reply->result = (DID_ERROR << 16);
|
||||
ret = -EINVAL;
|
||||
/* If buffer was invalidatic int
|
||||
@ -256,9 +254,8 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
|
||||
|
||||
/* pass through is supported only for ISP 4Gb or higher */
|
||||
if (!IS_FWI2_CAPABLE(ha)) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"scsi(%ld):ELS passthru not supported for ISP23xx based "
|
||||
"adapters\n", vha->host_no));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7001,
|
||||
"ELS passthru not supported for ISP23xx based adapters.\n");
|
||||
rval = -EPERM;
|
||||
goto done;
|
||||
}
|
||||
@ -266,11 +263,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
|
||||
/* Multiple SG's are not supported for ELS requests */
|
||||
if (bsg_job->request_payload.sg_cnt > 1 ||
|
||||
bsg_job->reply_payload.sg_cnt > 1) {
|
||||
DEBUG2(printk(KERN_INFO
|
||||
"multiple SG's are not supported for ELS requests"
|
||||
" [request_sg_cnt: %x reply_sg_cnt: %x]\n",
|
||||
bsg_job->request_payload.sg_cnt,
|
||||
bsg_job->reply_payload.sg_cnt));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7002,
|
||||
"Multiple SG's are not suppored for ELS requests, "
|
||||
"request_sg_cnt=%x reply_sg_cnt=%x.\n",
|
||||
bsg_job->request_payload.sg_cnt,
|
||||
bsg_job->reply_payload.sg_cnt);
|
||||
rval = -EPERM;
|
||||
goto done;
|
||||
}
|
||||
@ -281,9 +278,9 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
|
||||
* if not perform fabric login
|
||||
*/
|
||||
if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha,
|
||||
"failed to login port %06X for ELS passthru\n",
|
||||
fcport->d_id.b24));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7003,
|
||||
"Failed to login port %06X for ELS passthru.\n",
|
||||
fcport->d_id.b24);
|
||||
rval = -EIO;
|
||||
goto done;
|
||||
}
|
||||
@ -314,8 +311,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
|
||||
}
|
||||
|
||||
if (!vha->flags.online) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha,
|
||||
"host not online\n"));
|
||||
ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
|
||||
rval = -EIO;
|
||||
goto done;
|
||||
}
|
||||
@ -337,12 +333,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
|
||||
|
||||
if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
|
||||
(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
|
||||
DEBUG2(printk(KERN_INFO
|
||||
"dma mapping resulted in different sg counts \
|
||||
[request_sg_cnt: %x dma_request_sg_cnt: %x\
|
||||
reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
|
||||
bsg_job->request_payload.sg_cnt, req_sg_cnt,
|
||||
bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
|
||||
ql_log(ql_log_warn, vha, 0x7008,
|
||||
"dma mapping resulted in different sg counts, "
|
||||
"request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
|
||||
"dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
|
||||
req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
|
||||
rval = -EAGAIN;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -363,15 +358,16 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
|
||||
"bsg_els_rpt" : "bsg_els_hst");
|
||||
els->u.bsg_job = bsg_job;
|
||||
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
|
||||
"portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
|
||||
bsg_job->request->rqst_data.h_els.command_code,
|
||||
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
|
||||
fcport->d_id.b.al_pa));
|
||||
ql_dbg(ql_dbg_user, vha, 0x700a,
|
||||
"bsg rqst type: %s els type: %x - loop-id=%x "
|
||||
"portid=%-2x%02x%02x.\n", type,
|
||||
bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
|
||||
fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
|
||||
|
||||
rval = qla2x00_start_sp(sp);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_log(ql_log_warn, vha, 0x700e,
|
||||
"qla2x00_start_sp failed = %d\n", rval);
|
||||
kfree(sp->ctx);
|
||||
mempool_free(sp, ha->srb_mempool);
|
||||
rval = -EIO;
|
||||
@ -411,6 +407,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
|
||||
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
|
||||
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
|
||||
if (!req_sg_cnt) {
|
||||
ql_log(ql_log_warn, vha, 0x700f,
|
||||
"dma_map_sg return %d for request\n", req_sg_cnt);
|
||||
rval = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
@ -418,24 +416,25 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
|
||||
rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
|
||||
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
|
||||
if (!rsp_sg_cnt) {
|
||||
ql_log(ql_log_warn, vha, 0x7010,
|
||||
"dma_map_sg return %d for reply\n", rsp_sg_cnt);
|
||||
rval = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
|
||||
(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha,
|
||||
"[request_sg_cnt: %x dma_request_sg_cnt: %x\
|
||||
reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
|
||||
bsg_job->request_payload.sg_cnt, req_sg_cnt,
|
||||
bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
|
||||
ql_log(ql_log_warn, vha, 0x7011,
|
||||
"request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
|
||||
"dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
|
||||
req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
|
||||
rval = -EAGAIN;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
|
||||
if (!vha->flags.online) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha,
|
||||
"host not online\n"));
|
||||
ql_log(ql_log_warn, vha, 0x7012,
|
||||
"Host is not online.\n");
|
||||
rval = -EIO;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -451,8 +450,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
|
||||
loop_id = vha->mgmt_svr_loop_id;
|
||||
break;
|
||||
default:
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Unknown loop id: %x\n", loop_id));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7013,
|
||||
"Unknown loop id: %x.\n", loop_id);
|
||||
rval = -EINVAL;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -464,6 +463,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
|
||||
*/
|
||||
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
|
||||
if (!fcport) {
|
||||
ql_log(ql_log_warn, vha, 0x7014,
|
||||
"Failed to allocate fcport.\n");
|
||||
rval = -ENOMEM;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -479,6 +480,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
|
||||
/* Alloc SRB structure */
|
||||
sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
|
||||
if (!sp) {
|
||||
ql_log(ql_log_warn, vha, 0x7015,
|
||||
"qla2x00_get_ctx_bsg_sp failed.\n");
|
||||
rval = -ENOMEM;
|
||||
goto done_free_fcport;
|
||||
}
|
||||
@ -488,15 +491,17 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
|
||||
ct->name = "bsg_ct";
|
||||
ct->u.bsg_job = bsg_job;
|
||||
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
|
||||
"portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
|
||||
(bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
|
||||
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
|
||||
fcport->d_id.b.al_pa));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7016,
|
||||
"bsg rqst type: %s else type: %x - "
|
||||
"loop-id=%x portid=%02x%02x%02x.\n", type,
|
||||
(bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
|
||||
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
|
||||
fcport->d_id.b.al_pa);
|
||||
|
||||
rval = qla2x00_start_sp(sp);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_log(ql_log_warn, vha, 0x7017,
|
||||
"qla2x00_start_sp failed=%d.\n", rval);
|
||||
kfree(sp->ctx);
|
||||
mempool_free(sp, ha->srb_mempool);
|
||||
rval = -EIO;
|
||||
@ -535,9 +540,8 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
|
||||
ha->notify_dcbx_comp = 1;
|
||||
ret = qla81xx_set_port_config(vha, new_config);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
DEBUG2(printk(KERN_ERR
|
||||
"%s(%lu): Set port config failed\n",
|
||||
__func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7021,
|
||||
"set port config failed.\n");
|
||||
ha->notify_dcbx_comp = 0;
|
||||
rval = -EINVAL;
|
||||
goto done_set_internal;
|
||||
@ -545,11 +549,11 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
|
||||
|
||||
/* Wait for DCBX complete event */
|
||||
if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha,
|
||||
"State change notificaition not received.\n"));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7022,
|
||||
"State change notification not received.\n");
|
||||
} else
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"State change RECEIVED\n"));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7023,
|
||||
"State change received.\n");
|
||||
|
||||
ha->notify_dcbx_comp = 0;
|
||||
|
||||
@ -581,9 +585,8 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
|
||||
ha->notify_dcbx_comp = wait;
|
||||
ret = qla81xx_set_port_config(vha, new_config);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
DEBUG2(printk(KERN_ERR
|
||||
"%s(%lu): Set port config failed\n",
|
||||
__func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7025,
|
||||
"Set port config failed.\n");
|
||||
ha->notify_dcbx_comp = 0;
|
||||
rval = -EINVAL;
|
||||
goto done_reset_internal;
|
||||
@ -592,14 +595,14 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
|
||||
/* Wait for DCBX complete event */
|
||||
if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
|
||||
(20 * HZ))) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha,
|
||||
"State change notificaition not received.\n"));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7026,
|
||||
"State change notification not received.\n");
|
||||
ha->notify_dcbx_comp = 0;
|
||||
rval = -EINVAL;
|
||||
goto done_reset_internal;
|
||||
} else
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"State change RECEIVED\n"));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7027,
|
||||
"State change received.\n");
|
||||
|
||||
ha->notify_dcbx_comp = 0;
|
||||
}
|
||||
@ -629,11 +632,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
|
||||
|
||||
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
|
||||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
|
||||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
|
||||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
|
||||
ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!vha->flags.online) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
|
||||
ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -641,26 +646,31 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
|
||||
bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (!elreq.req_sg_cnt)
|
||||
if (!elreq.req_sg_cnt) {
|
||||
ql_log(ql_log_warn, vha, 0x701a,
|
||||
"dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
|
||||
bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (!elreq.rsp_sg_cnt) {
|
||||
ql_log(ql_log_warn, vha, 0x701b,
|
||||
"dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
|
||||
rval = -ENOMEM;
|
||||
goto done_unmap_req_sg;
|
||||
}
|
||||
|
||||
if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
|
||||
(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
|
||||
DEBUG2(printk(KERN_INFO
|
||||
"dma mapping resulted in different sg counts "
|
||||
"[request_sg_cnt: %x dma_request_sg_cnt: %x "
|
||||
"reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
|
||||
bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
|
||||
bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
|
||||
ql_log(ql_log_warn, vha, 0x701c,
|
||||
"dma mapping resulted in different sg counts, "
|
||||
"request_sg_cnt: %x dma_request_sg_cnt: %x "
|
||||
"reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
|
||||
bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
|
||||
bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
|
||||
rval = -EAGAIN;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -668,8 +678,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
|
||||
req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
|
||||
&req_data_dma, GFP_KERNEL);
|
||||
if (!req_data) {
|
||||
DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
|
||||
"failed for host=%lu\n", __func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x701d,
|
||||
"dma alloc failed for req_data.\n");
|
||||
rval = -ENOMEM;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -677,8 +687,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
|
||||
rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
|
||||
&rsp_data_dma, GFP_KERNEL);
|
||||
if (!rsp_data) {
|
||||
DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
|
||||
"failed for host=%lu\n", __func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7004,
|
||||
"dma alloc failed for rsp_data.\n");
|
||||
rval = -ENOMEM;
|
||||
goto done_free_dma_req;
|
||||
}
|
||||
@ -699,8 +709,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
|
||||
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
|
||||
elreq.options == EXTERNAL_LOOPBACK) {
|
||||
type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
|
||||
ql_dbg(ql_dbg_user, vha, 0x701e,
|
||||
"BSG request type: %s.\n", type);
|
||||
command_sent = INT_DEF_LB_ECHO_CMD;
|
||||
rval = qla2x00_echo_test(vha, &elreq, response);
|
||||
} else {
|
||||
@ -708,9 +718,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
|
||||
memset(config, 0, sizeof(config));
|
||||
memset(new_config, 0, sizeof(new_config));
|
||||
if (qla81xx_get_port_config(vha, config)) {
|
||||
DEBUG2(printk(KERN_ERR
|
||||
"%s(%lu): Get port config failed\n",
|
||||
__func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x701f,
|
||||
"Get port config failed.\n");
|
||||
bsg_job->reply->reply_payload_rcv_len = 0;
|
||||
bsg_job->reply->result = (DID_ERROR << 16);
|
||||
rval = -EPERM;
|
||||
@ -718,11 +727,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
|
||||
}
|
||||
|
||||
if (elreq.options != EXTERNAL_LOOPBACK) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"Internal: current port config = %x\n",
|
||||
config[0]));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7020,
|
||||
"Internal: curent port config = %x\n",
|
||||
config[0]);
|
||||
if (qla81xx_set_internal_loopback(vha, config,
|
||||
new_config)) {
|
||||
ql_log(ql_log_warn, vha, 0x7024,
|
||||
"Internal loopback failed.\n");
|
||||
bsg_job->reply->reply_payload_rcv_len =
|
||||
0;
|
||||
bsg_job->reply->result =
|
||||
@ -746,9 +757,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
|
||||
}
|
||||
|
||||
type = "FC_BSG_HST_VENDOR_LOOPBACK";
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"scsi(%ld) bsg rqst type: %s\n",
|
||||
vha->host_no, type));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7028,
|
||||
"BSG request type: %s.\n", type);
|
||||
|
||||
command_sent = INT_DEF_LB_LOOPBACK_CMD;
|
||||
rval = qla2x00_loopback_test(vha, &elreq, response);
|
||||
@ -763,17 +773,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
|
||||
|
||||
if (response[0] == MBS_COMMAND_ERROR &&
|
||||
response[1] == MBS_LB_RESET) {
|
||||
DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
|
||||
"ISP\n", __func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7029,
|
||||
"MBX command error, Aborting ISP.\n");
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(vha);
|
||||
qla2x00_wait_for_chip_reset(vha);
|
||||
/* Also reset the MPI */
|
||||
if (qla81xx_restart_mpi_firmware(vha) !=
|
||||
QLA_SUCCESS) {
|
||||
qla_printk(KERN_INFO, ha,
|
||||
"MPI reset failed for host%ld.\n",
|
||||
vha->host_no);
|
||||
ql_log(ql_log_warn, vha, 0x702a,
|
||||
"MPI reset failed.\n");
|
||||
}
|
||||
|
||||
bsg_job->reply->reply_payload_rcv_len = 0;
|
||||
@ -783,17 +792,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
|
||||
}
|
||||
} else {
|
||||
type = "FC_BSG_HST_VENDOR_LOOPBACK";
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"scsi(%ld) bsg rqst type: %s\n",
|
||||
vha->host_no, type));
|
||||
ql_dbg(ql_dbg_user, vha, 0x702b,
|
||||
"BSG request type: %s.\n", type);
|
||||
command_sent = INT_DEF_LB_LOOPBACK_CMD;
|
||||
rval = qla2x00_loopback_test(vha, &elreq, response);
|
||||
}
|
||||
}
|
||||
|
||||
if (rval) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
|
||||
"request %s failed\n", vha->host_no, type));
|
||||
ql_log(ql_log_warn, vha, 0x702c,
|
||||
"Vendor request %s failed.\n", type);
|
||||
|
||||
fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
|
||||
sizeof(struct fc_bsg_reply);
|
||||
@ -805,8 +813,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
|
||||
bsg_job->reply->reply_payload_rcv_len = 0;
|
||||
bsg_job->reply->result = (DID_ERROR << 16);
|
||||
} else {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
|
||||
"request %s completed\n", vha->host_no, type));
|
||||
ql_dbg(ql_dbg_user, vha, 0x702d,
|
||||
"Vendor request %s completed.\n", type);
|
||||
|
||||
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
|
||||
sizeof(response) + sizeof(uint8_t);
|
||||
@ -851,12 +859,13 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
|
||||
|
||||
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
|
||||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
|
||||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
|
||||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
|
||||
ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!IS_QLA84XX(ha)) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
|
||||
"exiting.\n", vha->host_no));
|
||||
ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -865,14 +874,14 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
|
||||
rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
|
||||
|
||||
if (rval) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
|
||||
"request 84xx reset failed\n", vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7030,
|
||||
"Vendor request 84xx reset failed.\n");
|
||||
rval = bsg_job->reply->reply_payload_rcv_len = 0;
|
||||
bsg_job->reply->result = (DID_ERROR << 16);
|
||||
|
||||
} else {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
|
||||
"request 84xx reset completed\n", vha->host_no));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7031,
|
||||
"Vendor request 84xx reset completed.\n");
|
||||
bsg_job->reply->result = DID_OK;
|
||||
}
|
||||
|
||||
@ -902,21 +911,24 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
|
||||
return -EBUSY;
|
||||
|
||||
if (!IS_QLA84XX(ha)) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
|
||||
"exiting.\n", vha->host_no));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7032,
|
||||
"Not 84xx, exiting.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
|
||||
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
|
||||
if (!sg_cnt)
|
||||
if (!sg_cnt) {
|
||||
ql_log(ql_log_warn, vha, 0x7033,
|
||||
"dma_map_sg returned %d for request.\n", sg_cnt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (sg_cnt != bsg_job->request_payload.sg_cnt) {
|
||||
DEBUG2(printk(KERN_INFO
|
||||
"dma mapping resulted in different sg counts "
|
||||
"request_sg_cnt: %x dma_request_sg_cnt: %x ",
|
||||
bsg_job->request_payload.sg_cnt, sg_cnt));
|
||||
ql_log(ql_log_warn, vha, 0x7034,
|
||||
"DMA mapping resulted in different sg counts, "
|
||||
"request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
|
||||
bsg_job->request_payload.sg_cnt, sg_cnt);
|
||||
rval = -EAGAIN;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -925,8 +937,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
|
||||
fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
|
||||
&fw_dma, GFP_KERNEL);
|
||||
if (!fw_buf) {
|
||||
DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
|
||||
"failed for host=%lu\n", __func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7035,
|
||||
"DMA alloc failed for fw_buf.\n");
|
||||
rval = -ENOMEM;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -936,8 +948,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
|
||||
|
||||
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
|
||||
if (!mn) {
|
||||
DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
|
||||
"failed for host=%lu\n", __func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7036,
|
||||
"DMA alloc failed for fw buffer.\n");
|
||||
rval = -ENOMEM;
|
||||
goto done_free_fw_buf;
|
||||
}
|
||||
@ -965,15 +977,15 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
|
||||
rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
|
||||
|
||||
if (rval) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
|
||||
"request 84xx updatefw failed\n", vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7037,
|
||||
"Vendor request 84xx updatefw failed.\n");
|
||||
|
||||
rval = bsg_job->reply->reply_payload_rcv_len = 0;
|
||||
bsg_job->reply->result = (DID_ERROR << 16);
|
||||
|
||||
} else {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
|
||||
"request 84xx updatefw completed\n", vha->host_no));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7038,
|
||||
"Vendor request 84xx updatefw completed.\n");
|
||||
|
||||
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
|
||||
bsg_job->reply->result = DID_OK;
|
||||
@ -1009,27 +1021,30 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
|
||||
|
||||
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
|
||||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
|
||||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
|
||||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
|
||||
ql_log(ql_log_warn, vha, 0x7039,
|
||||
"Abort active or needed.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!IS_QLA84XX(ha)) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
|
||||
"exiting.\n", vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x703a,
|
||||
"Not 84xx, exiting.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
|
||||
sizeof(struct fc_bsg_request));
|
||||
if (!ql84_mgmt) {
|
||||
DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
|
||||
__func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x703b,
|
||||
"MGMT header not provided, exiting.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
|
||||
if (!mn) {
|
||||
DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
|
||||
"failed for host=%lu\n", __func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x703c,
|
||||
"DMA alloc failed for fw buffer.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -1044,6 +1059,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
|
||||
bsg_job->reply_payload.sg_list,
|
||||
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
|
||||
if (!sg_cnt) {
|
||||
ql_log(ql_log_warn, vha, 0x703d,
|
||||
"dma_map_sg returned %d for reply.\n", sg_cnt);
|
||||
rval = -ENOMEM;
|
||||
goto exit_mgmt;
|
||||
}
|
||||
@ -1051,10 +1068,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
|
||||
dma_direction = DMA_FROM_DEVICE;
|
||||
|
||||
if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
|
||||
DEBUG2(printk(KERN_INFO
|
||||
"dma mapping resulted in different sg counts "
|
||||
"reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
|
||||
bsg_job->reply_payload.sg_cnt, sg_cnt));
|
||||
ql_log(ql_log_warn, vha, 0x703e,
|
||||
"DMA mapping resulted in different sg counts, "
|
||||
"reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
|
||||
bsg_job->reply_payload.sg_cnt, sg_cnt);
|
||||
rval = -EAGAIN;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -1064,9 +1081,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
|
||||
mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
|
||||
&mgmt_dma, GFP_KERNEL);
|
||||
if (!mgmt_b) {
|
||||
DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
|
||||
"failed for host=%lu\n",
|
||||
__func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x703f,
|
||||
"DMA alloc failed for mgmt_b.\n");
|
||||
rval = -ENOMEM;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -1094,6 +1110,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
|
||||
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
|
||||
|
||||
if (!sg_cnt) {
|
||||
ql_log(ql_log_warn, vha, 0x7040,
|
||||
"dma_map_sg returned %d.\n", sg_cnt);
|
||||
rval = -ENOMEM;
|
||||
goto exit_mgmt;
|
||||
}
|
||||
@ -1101,10 +1119,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
|
||||
dma_direction = DMA_TO_DEVICE;
|
||||
|
||||
if (sg_cnt != bsg_job->request_payload.sg_cnt) {
|
||||
DEBUG2(printk(KERN_INFO
|
||||
"dma mapping resulted in different sg counts "
|
||||
"request_sg_cnt: %x dma_request_sg_cnt: %x ",
|
||||
bsg_job->request_payload.sg_cnt, sg_cnt));
|
||||
ql_log(ql_log_warn, vha, 0x7041,
|
||||
"DMA mapping resulted in different sg counts, "
|
||||
"request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
|
||||
bsg_job->request_payload.sg_cnt, sg_cnt);
|
||||
rval = -EAGAIN;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -1113,9 +1131,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
|
||||
mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
|
||||
&mgmt_dma, GFP_KERNEL);
|
||||
if (!mgmt_b) {
|
||||
DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
|
||||
"failed for host=%lu\n",
|
||||
__func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7042,
|
||||
"DMA alloc failed for mgmt_b.\n");
|
||||
rval = -ENOMEM;
|
||||
goto done_unmap_sg;
|
||||
}
|
||||
@ -1156,15 +1173,15 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
|
||||
rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
|
||||
|
||||
if (rval) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
|
||||
"request 84xx mgmt failed\n", vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7043,
|
||||
"Vendor request 84xx mgmt failed.\n");
|
||||
|
||||
rval = bsg_job->reply->reply_payload_rcv_len = 0;
|
||||
bsg_job->reply->result = (DID_ERROR << 16);
|
||||
|
||||
} else {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
|
||||
"request 84xx mgmt completed\n", vha->host_no));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7044,
|
||||
"Vendor request 84xx mgmt completed.\n");
|
||||
|
||||
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
|
||||
bsg_job->reply->result = DID_OK;
|
||||
@ -1204,7 +1221,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
|
||||
{
|
||||
struct Scsi_Host *host = bsg_job->shost;
|
||||
scsi_qla_host_t *vha = shost_priv(host);
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int rval = 0;
|
||||
struct qla_port_param *port_param = NULL;
|
||||
fc_port_t *fcport = NULL;
|
||||
@ -1215,26 +1231,27 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
|
||||
|
||||
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
|
||||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
|
||||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
|
||||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
|
||||
ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!IS_IIDMA_CAPABLE(vha->hw)) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
|
||||
"supported\n", __func__, vha->host_no));
|
||||
ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
port_param = (struct qla_port_param *)((char *)bsg_job->request +
|
||||
sizeof(struct fc_bsg_request));
|
||||
if (!port_param) {
|
||||
DEBUG2(printk("%s(%ld): port_param header not provided, "
|
||||
"exiting.\n", __func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7047,
|
||||
"port_param header not provided.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
|
||||
DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
|
||||
__func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7048,
|
||||
"Invalid destination type.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1249,21 +1266,20 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
|
||||
}
|
||||
|
||||
if (!fcport) {
|
||||
DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
|
||||
__func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7049,
|
||||
"Failed to find port.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (atomic_read(&fcport->state) != FCS_ONLINE) {
|
||||
DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n",
|
||||
__func__, vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x704a,
|
||||
"Port is not online.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fcport->flags & FCF_LOGIN_NEEDED) {
|
||||
DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, "
|
||||
"flags = 0x%x\n",
|
||||
__func__, vha->host_no, fcport->flags));
|
||||
ql_log(ql_log_warn, vha, 0x704b,
|
||||
"Remote port not logged in flags = 0x%x.\n", fcport->flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1275,15 +1291,13 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
|
||||
&port_param->speed, mb);
|
||||
|
||||
if (rval) {
|
||||
DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
|
||||
"%02x%02x%02x%02x%02x%02x%02x%02x -- "
|
||||
"%04x %x %04x %04x.\n",
|
||||
vha->host_no, fcport->port_name[0],
|
||||
fcport->port_name[1],
|
||||
fcport->port_name[2], fcport->port_name[3],
|
||||
fcport->port_name[4], fcport->port_name[5],
|
||||
fcport->port_name[6], fcport->port_name[7], rval,
|
||||
fcport->fp_speed, mb[0], mb[1]));
|
||||
ql_log(ql_log_warn, vha, 0x704c,
|
||||
"iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
|
||||
"%04x %x %04x %04x.\n", fcport->port_name[0],
|
||||
fcport->port_name[1], fcport->port_name[2],
|
||||
fcport->port_name[3], fcport->port_name[4],
|
||||
fcport->port_name[5], fcport->port_name[6],
|
||||
fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
|
||||
rval = 0;
|
||||
bsg_job->reply->result = (DID_ERROR << 16);
|
||||
|
||||
@ -1307,11 +1321,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
|
||||
}
|
||||
|
||||
static int
|
||||
qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
|
||||
qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
|
||||
uint8_t is_update)
|
||||
{
|
||||
uint32_t start = 0;
|
||||
int valid = 0;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
bsg_job->reply->reply_payload_rcv_len = 0;
|
||||
|
||||
@ -1319,14 +1334,20 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
|
||||
return -EINVAL;
|
||||
|
||||
start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
|
||||
if (start > ha->optrom_size)
|
||||
if (start > ha->optrom_size) {
|
||||
ql_log(ql_log_warn, vha, 0x7055,
|
||||
"start %d > optrom_size %d.\n", start, ha->optrom_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ha->optrom_state != QLA_SWAITING)
|
||||
if (ha->optrom_state != QLA_SWAITING) {
|
||||
ql_log(ql_log_info, vha, 0x7056,
|
||||
"optrom_state %d.\n", ha->optrom_state);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ha->optrom_region_start = start;
|
||||
|
||||
ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
|
||||
if (is_update) {
|
||||
if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
|
||||
valid = 1;
|
||||
@ -1337,9 +1358,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
|
||||
IS_QLA8XXX_TYPE(ha))
|
||||
valid = 1;
|
||||
if (!valid) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Invalid start region 0x%x/0x%x.\n",
|
||||
start, bsg_job->request_payload.payload_len);
|
||||
ql_log(ql_log_warn, vha, 0x7058,
|
||||
"Invalid start region 0x%x/0x%x.\n", start,
|
||||
bsg_job->request_payload.payload_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1358,9 +1379,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
|
||||
|
||||
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
|
||||
if (!ha->optrom_buffer) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7059,
|
||||
"Read: Unable to allocate memory for optrom retrieval "
|
||||
"(%x).\n", ha->optrom_region_size);
|
||||
"(%x)\n", ha->optrom_region_size);
|
||||
|
||||
ha->optrom_state = QLA_SWAITING;
|
||||
return -ENOMEM;
|
||||
@ -1378,7 +1399,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int rval = 0;
|
||||
|
||||
rval = qla2x00_optrom_setup(bsg_job, ha, 0);
|
||||
rval = qla2x00_optrom_setup(bsg_job, vha, 0);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
@ -1406,7 +1427,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int rval = 0;
|
||||
|
||||
rval = qla2x00_optrom_setup(bsg_job, ha, 1);
|
||||
rval = qla2x00_optrom_setup(bsg_job, vha, 1);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
@ -1464,6 +1485,23 @@ int
|
||||
qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct fc_rport *rport;
|
||||
fc_port_t *fcport = NULL;
|
||||
struct Scsi_Host *host;
|
||||
scsi_qla_host_t *vha;
|
||||
|
||||
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
|
||||
rport = bsg_job->rport;
|
||||
fcport = *(fc_port_t **) rport->dd_data;
|
||||
host = rport_to_shost(rport);
|
||||
vha = shost_priv(host);
|
||||
} else {
|
||||
host = bsg_job->shost;
|
||||
vha = shost_priv(host);
|
||||
}
|
||||
|
||||
ql_dbg(ql_dbg_user, vha, 0x7000,
|
||||
"Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
|
||||
|
||||
switch (bsg_job->request->msgcode) {
|
||||
case FC_BSG_RPT_ELS:
|
||||
@ -1480,7 +1518,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
|
||||
case FC_BSG_HST_DEL_RPORT:
|
||||
case FC_BSG_RPT_CT:
|
||||
default:
|
||||
DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
|
||||
ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
@ -1514,17 +1552,15 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
|
||||
&& (sp_bsg->u.bsg_job == bsg_job)) {
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
if (ha->isp_ops->abort_command(sp)) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"scsi(%ld): mbx "
|
||||
"abort_command failed\n",
|
||||
vha->host_no));
|
||||
ql_log(ql_log_warn, vha, 0x7089,
|
||||
"mbx abort_command "
|
||||
"failed.\n");
|
||||
bsg_job->req->errors =
|
||||
bsg_job->reply->result = -EIO;
|
||||
} else {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"scsi(%ld): mbx "
|
||||
"abort_command success\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_user, vha, 0x708a,
|
||||
"mbx abort_command "
|
||||
"success.\n");
|
||||
bsg_job->req->errors =
|
||||
bsg_job->reply->result = 0;
|
||||
}
|
||||
@ -1535,8 +1571,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"scsi(%ld) SRB not found to abort\n", vha->host_no));
|
||||
ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
|
||||
bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
|
||||
return 0;
|
||||
|
||||
|
@ -4,10 +4,36 @@
|
||||
*
|
||||
* See LICENSE.qla2xxx for copyright and licensing details.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Table for showing the current message id in use for particular level
|
||||
* Change this table for addition of log/debug messages.
|
||||
* -----------------------------------------------------
|
||||
* | Level | Last Value Used |
|
||||
* -----------------------------------------------------
|
||||
* | Module Init and Probe | 0x0116 |
|
||||
* | Mailbox commands | 0x111e |
|
||||
* | Device Discovery | 0x2083 |
|
||||
* | Queue Command and IO tracing | 0x302e |
|
||||
* | DPC Thread | 0x401c |
|
||||
* | Async Events | 0x5059 |
|
||||
* | Timer Routines | 0x600d |
|
||||
* | User Space Interactions | 0x709c |
|
||||
* | Task Management | 0x8043 |
|
||||
* | AER/EEH | 0x900f |
|
||||
* | Virtual Port | 0xa007 |
|
||||
* | ISP82XX Specific | 0xb027 |
|
||||
* | MultiQ | 0xc00b |
|
||||
* | Misc | 0xd00b |
|
||||
* -----------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "qla_def.h"
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
static uint32_t ql_dbg_offset = 0x800;
|
||||
|
||||
static inline void
|
||||
qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
|
||||
{
|
||||
@ -383,11 +409,11 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Failed to dump firmware (%x)!!!\n", rval);
|
||||
ql_log(ql_log_warn, vha, 0xd000,
|
||||
"Failed to dump firmware (%x).\n", rval);
|
||||
ha->fw_dumped = 0;
|
||||
} else {
|
||||
qla_printk(KERN_INFO, ha,
|
||||
ql_log(ql_log_info, vha, 0xd001,
|
||||
"Firmware dump saved to temp buffer (%ld/%p).\n",
|
||||
vha->host_no, ha->fw_dump);
|
||||
ha->fw_dumped = 1;
|
||||
@ -419,15 +445,16 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"No buffer available for dump!!!\n");
|
||||
ql_log(ql_log_warn, vha, 0xd002,
|
||||
"No buffer available for dump.\n");
|
||||
goto qla2300_fw_dump_failed;
|
||||
}
|
||||
|
||||
if (ha->fw_dumped) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Firmware has been previously dumped (%p) -- ignoring "
|
||||
"request...\n", ha->fw_dump);
|
||||
ql_log(ql_log_warn, vha, 0xd003,
|
||||
"Firmware has been previously dumped (%p) "
|
||||
"-- ignoring request.\n",
|
||||
ha->fw_dump);
|
||||
goto qla2300_fw_dump_failed;
|
||||
}
|
||||
fw = &ha->fw_dump->isp.isp23;
|
||||
@ -582,15 +609,16 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"No buffer available for dump!!!\n");
|
||||
ql_log(ql_log_warn, vha, 0xd004,
|
||||
"No buffer available for dump.\n");
|
||||
goto qla2100_fw_dump_failed;
|
||||
}
|
||||
|
||||
if (ha->fw_dumped) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Firmware has been previously dumped (%p) -- ignoring "
|
||||
"request...\n", ha->fw_dump);
|
||||
ql_log(ql_log_warn, vha, 0xd005,
|
||||
"Firmware has been previously dumped (%p) "
|
||||
"-- ignoring request.\n",
|
||||
ha->fw_dump);
|
||||
goto qla2100_fw_dump_failed;
|
||||
}
|
||||
fw = &ha->fw_dump->isp.isp21;
|
||||
@ -779,15 +807,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"No buffer available for dump!!!\n");
|
||||
ql_log(ql_log_warn, vha, 0xd006,
|
||||
"No buffer available for dump.\n");
|
||||
goto qla24xx_fw_dump_failed;
|
||||
}
|
||||
|
||||
if (ha->fw_dumped) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Firmware has been previously dumped (%p) -- ignoring "
|
||||
"request...\n", ha->fw_dump);
|
||||
ql_log(ql_log_warn, vha, 0xd007,
|
||||
"Firmware has been previously dumped (%p) "
|
||||
"-- ignoring request.\n",
|
||||
ha->fw_dump);
|
||||
goto qla24xx_fw_dump_failed;
|
||||
}
|
||||
fw = &ha->fw_dump->isp.isp24;
|
||||
@ -1017,15 +1046,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"No buffer available for dump!!!\n");
|
||||
ql_log(ql_log_warn, vha, 0xd008,
|
||||
"No buffer available for dump.\n");
|
||||
goto qla25xx_fw_dump_failed;
|
||||
}
|
||||
|
||||
if (ha->fw_dumped) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Firmware has been previously dumped (%p) -- ignoring "
|
||||
"request...\n", ha->fw_dump);
|
||||
ql_log(ql_log_warn, vha, 0xd009,
|
||||
"Firmware has been previously dumped (%p) "
|
||||
"-- ignoring request.\n",
|
||||
ha->fw_dump);
|
||||
goto qla25xx_fw_dump_failed;
|
||||
}
|
||||
fw = &ha->fw_dump->isp.isp25;
|
||||
@ -1328,15 +1358,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"No buffer available for dump!!!\n");
|
||||
ql_log(ql_log_warn, vha, 0xd00a,
|
||||
"No buffer available for dump.\n");
|
||||
goto qla81xx_fw_dump_failed;
|
||||
}
|
||||
|
||||
if (ha->fw_dumped) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Firmware has been previously dumped (%p) -- ignoring "
|
||||
"request...\n", ha->fw_dump);
|
||||
ql_log(ql_log_warn, vha, 0xd00b,
|
||||
"Firmware has been previously dumped (%p) "
|
||||
"-- ignoring request.\n",
|
||||
ha->fw_dump);
|
||||
goto qla81xx_fw_dump_failed;
|
||||
}
|
||||
fw = &ha->fw_dump->isp.isp81;
|
||||
@ -1619,106 +1650,255 @@ qla81xx_fw_dump_failed:
|
||||
/****************************************************************************/
|
||||
/* Driver Debug Functions. */
|
||||
/****************************************************************************/
|
||||
/*
|
||||
* This function is for formatting and logging debug information.
|
||||
* It is to be used when vha is available. It formats the message
|
||||
* and logs it to the messages file.
|
||||
* parameters:
|
||||
* level: The level of the debug messages to be printed.
|
||||
* If ql2xextended_error_logging value is correctly set,
|
||||
* this message will appear in the messages file.
|
||||
* vha: Pointer to the scsi_qla_host_t.
|
||||
* id: This is a unique identifier for the level. It identifies the
|
||||
* part of the code from where the message originated.
|
||||
* msg: The message to be displayed.
|
||||
*/
|
||||
void
|
||||
ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
|
||||
|
||||
char pbuf[QL_DBG_BUF_LEN];
|
||||
va_list ap;
|
||||
uint32_t len;
|
||||
struct pci_dev *pdev = NULL;
|
||||
|
||||
memset(pbuf, 0, QL_DBG_BUF_LEN);
|
||||
|
||||
va_start(ap, msg);
|
||||
|
||||
if ((level & ql2xextended_error_logging) == level) {
|
||||
if (vha != NULL) {
|
||||
pdev = vha->hw->pdev;
|
||||
/* <module-name> <pci-name> <msg-id>:<host> Message */
|
||||
sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
|
||||
dev_name(&(pdev->dev)), id + ql_dbg_offset,
|
||||
vha->host_no);
|
||||
} else
|
||||
sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
|
||||
"0000:00:00.0", id + ql_dbg_offset);
|
||||
|
||||
len = strlen(pbuf);
|
||||
vsprintf(pbuf+len, msg, ap);
|
||||
pr_warning("%s", pbuf);
|
||||
}
|
||||
|
||||
va_end(ap);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is for formatting and logging debug information.
|
||||
* It is to be used when vha is not available and pci is availble,
|
||||
* i.e., before host allocation. It formats the message and logs it
|
||||
* to the messages file.
|
||||
* parameters:
|
||||
* level: The level of the debug messages to be printed.
|
||||
* If ql2xextended_error_logging value is correctly set,
|
||||
* this message will appear in the messages file.
|
||||
* pdev: Pointer to the struct pci_dev.
|
||||
* id: This is a unique id for the level. It identifies the part
|
||||
* of the code from where the message originated.
|
||||
* msg: The message to be displayed.
|
||||
*/
|
||||
void
|
||||
ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
|
||||
|
||||
char pbuf[QL_DBG_BUF_LEN];
|
||||
va_list ap;
|
||||
uint32_t len;
|
||||
|
||||
if (pdev == NULL)
|
||||
return;
|
||||
|
||||
memset(pbuf, 0, QL_DBG_BUF_LEN);
|
||||
|
||||
va_start(ap, msg);
|
||||
|
||||
if ((level & ql2xextended_error_logging) == level) {
|
||||
/* <module-name> <dev-name>:<msg-id> Message */
|
||||
sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
|
||||
dev_name(&(pdev->dev)), id + ql_dbg_offset);
|
||||
|
||||
len = strlen(pbuf);
|
||||
vsprintf(pbuf+len, msg, ap);
|
||||
pr_warning("%s", pbuf);
|
||||
}
|
||||
|
||||
va_end(ap);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is for formatting and logging log messages.
|
||||
* It is to be used when vha is available. It formats the message
|
||||
* and logs it to the messages file. All the messages will be logged
|
||||
* irrespective of value of ql2xextended_error_logging.
|
||||
* parameters:
|
||||
* level: The level of the log messages to be printed in the
|
||||
* messages file.
|
||||
* vha: Pointer to the scsi_qla_host_t
|
||||
* id: This is a unique id for the level. It identifies the
|
||||
* part of the code from where the message originated.
|
||||
* msg: The message to be displayed.
|
||||
*/
|
||||
void
|
||||
ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
|
||||
|
||||
char pbuf[QL_DBG_BUF_LEN];
|
||||
va_list ap;
|
||||
uint32_t len;
|
||||
struct pci_dev *pdev = NULL;
|
||||
|
||||
memset(pbuf, 0, QL_DBG_BUF_LEN);
|
||||
|
||||
va_start(ap, msg);
|
||||
|
||||
if (level <= ql_errlev) {
|
||||
if (vha != NULL) {
|
||||
pdev = vha->hw->pdev;
|
||||
/* <module-name> <msg-id>:<host> Message */
|
||||
sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
|
||||
dev_name(&(pdev->dev)), id, vha->host_no);
|
||||
} else
|
||||
sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
|
||||
"0000:00:00.0", id);
|
||||
|
||||
len = strlen(pbuf);
|
||||
vsprintf(pbuf+len, msg, ap);
|
||||
|
||||
switch (level) {
|
||||
case 0: /* FATAL LOG */
|
||||
pr_crit("%s", pbuf);
|
||||
break;
|
||||
case 1:
|
||||
pr_err("%s", pbuf);
|
||||
break;
|
||||
case 2:
|
||||
pr_warn("%s", pbuf);
|
||||
break;
|
||||
default:
|
||||
pr_info("%s", pbuf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is for formatting and logging log messages.
|
||||
* It is to be used when vha is not available and pci is availble,
|
||||
* i.e., before host allocation. It formats the message and logs
|
||||
* it to the messages file. All the messages are logged irrespective
|
||||
* of the value of ql2xextended_error_logging.
|
||||
* parameters:
|
||||
* level: The level of the log messages to be printed in the
|
||||
* messages file.
|
||||
* pdev: Pointer to the struct pci_dev.
|
||||
* id: This is a unique id for the level. It identifies the
|
||||
* part of the code from where the message originated.
|
||||
* msg: The message to be displayed.
|
||||
*/
|
||||
void
|
||||
ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
|
||||
|
||||
char pbuf[QL_DBG_BUF_LEN];
|
||||
va_list ap;
|
||||
uint32_t len;
|
||||
|
||||
if (pdev == NULL)
|
||||
return;
|
||||
|
||||
memset(pbuf, 0, QL_DBG_BUF_LEN);
|
||||
|
||||
va_start(ap, msg);
|
||||
|
||||
if (level <= ql_errlev) {
|
||||
/* <module-name> <dev-name>:<msg-id> Message */
|
||||
sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
|
||||
dev_name(&(pdev->dev)), id);
|
||||
|
||||
len = strlen(pbuf);
|
||||
vsprintf(pbuf+len, msg, ap);
|
||||
switch (level) {
|
||||
case 0: /* FATAL LOG */
|
||||
pr_crit("%s", pbuf);
|
||||
break;
|
||||
case 1:
|
||||
pr_err("%s", pbuf);
|
||||
break;
|
||||
case 2:
|
||||
pr_warn("%s", pbuf);
|
||||
break;
|
||||
default:
|
||||
pr_info("%s", pbuf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
void
|
||||
qla2x00_dump_regs(scsi_qla_host_t *vha)
|
||||
ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
|
||||
{
|
||||
int i;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
||||
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
|
||||
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
|
||||
uint16_t __iomem *mbx_reg;
|
||||
|
||||
mbx_reg = IS_FWI2_CAPABLE(ha) ? ®24->mailbox0:
|
||||
MAILBOX_REG(ha, reg, 0);
|
||||
if ((level & ql2xextended_error_logging) == level) {
|
||||
|
||||
printk("Mailbox registers:\n");
|
||||
for (i = 0; i < 6; i++)
|
||||
printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
|
||||
RD_REG_WORD(mbx_reg++));
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
qla2x00_dump_buffer(uint8_t * b, uint32_t size)
|
||||
{
|
||||
uint32_t cnt;
|
||||
uint8_t c;
|
||||
|
||||
printk(" 0 1 2 3 4 5 6 7 8 9 "
|
||||
"Ah Bh Ch Dh Eh Fh\n");
|
||||
printk("----------------------------------------"
|
||||
"----------------------\n");
|
||||
|
||||
for (cnt = 0; cnt < size;) {
|
||||
c = *b++;
|
||||
printk("%02x",(uint32_t) c);
|
||||
cnt++;
|
||||
if (!(cnt % 16))
|
||||
printk("\n");
|
||||
if (IS_QLA82XX(ha))
|
||||
mbx_reg = ®82->mailbox_in[0];
|
||||
else if (IS_FWI2_CAPABLE(ha))
|
||||
mbx_reg = ®24->mailbox0;
|
||||
else
|
||||
printk(" ");
|
||||
mbx_reg = MAILBOX_REG(ha, reg, 0);
|
||||
|
||||
ql_dbg(level, vha, id, "Mailbox registers:\n");
|
||||
for (i = 0; i < 6; i++)
|
||||
ql_dbg(level, vha, id,
|
||||
"mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
|
||||
}
|
||||
if (cnt % 16)
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size)
|
||||
ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
|
||||
uint8_t *b, uint32_t size)
|
||||
{
|
||||
uint32_t cnt;
|
||||
uint8_t c;
|
||||
uint8_t last16[16], cur16[16];
|
||||
uint32_t lc = 0, num_same16 = 0, j;
|
||||
if ((level & ql2xextended_error_logging) == level) {
|
||||
|
||||
printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 "
|
||||
"Ah Bh Ch Dh Eh Fh\n");
|
||||
printk(KERN_DEBUG "----------------------------------------"
|
||||
"----------------------\n");
|
||||
ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
|
||||
"9 Ah Bh Ch Dh Eh Fh\n");
|
||||
ql_dbg(level, vha, id, "----------------------------------"
|
||||
"----------------------------\n");
|
||||
|
||||
for (cnt = 0; cnt < size;) {
|
||||
c = *b++;
|
||||
|
||||
cur16[lc++] = c;
|
||||
|
||||
cnt++;
|
||||
ql_dbg(level, vha, id, "");
|
||||
for (cnt = 0; cnt < size;) {
|
||||
c = *b++;
|
||||
printk("%02x", (uint32_t) c);
|
||||
cnt++;
|
||||
if (!(cnt % 16))
|
||||
printk("\n");
|
||||
else
|
||||
printk(" ");
|
||||
}
|
||||
if (cnt % 16)
|
||||
continue;
|
||||
|
||||
/* We have 16 now */
|
||||
lc = 0;
|
||||
if (num_same16 == 0) {
|
||||
memcpy(last16, cur16, 16);
|
||||
num_same16++;
|
||||
continue;
|
||||
}
|
||||
if (memcmp(cur16, last16, 16) == 0) {
|
||||
num_same16++;
|
||||
continue;
|
||||
}
|
||||
for (j = 0; j < 16; j++)
|
||||
printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
|
||||
printk(KERN_DEBUG "\n");
|
||||
|
||||
if (num_same16 > 1)
|
||||
printk(KERN_DEBUG "> prev pattern repeats (%u)"
|
||||
"more times\n", num_same16-1);
|
||||
memcpy(last16, cur16, 16);
|
||||
num_same16 = 1;
|
||||
}
|
||||
|
||||
if (num_same16) {
|
||||
for (j = 0; j < 16; j++)
|
||||
printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
|
||||
printk(KERN_DEBUG "\n");
|
||||
|
||||
if (num_same16 > 1)
|
||||
printk(KERN_DEBUG "> prev pattern repeats (%u)"
|
||||
"more times\n", num_same16-1);
|
||||
}
|
||||
if (lc) {
|
||||
for (j = 0; j < lc; j++)
|
||||
printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]);
|
||||
printk(KERN_DEBUG "\n");
|
||||
ql_dbg(level, vha, id, "\n");
|
||||
}
|
||||
}
|
||||
|
@ -7,146 +7,6 @@
|
||||
|
||||
#include "qla_def.h"
|
||||
|
||||
/*
|
||||
* Driver debug definitions.
|
||||
*/
|
||||
/* #define QL_DEBUG_LEVEL_1 */ /* Output register accesses to COM1 */
|
||||
/* #define QL_DEBUG_LEVEL_2 */ /* Output error msgs to COM1 */
|
||||
/* #define QL_DEBUG_LEVEL_3 */ /* Output function trace msgs to COM1 */
|
||||
/* #define QL_DEBUG_LEVEL_4 */ /* Output NVRAM trace msgs to COM1 */
|
||||
/* #define QL_DEBUG_LEVEL_5 */ /* Output ring trace msgs to COM1 */
|
||||
/* #define QL_DEBUG_LEVEL_6 */ /* Output WATCHDOG timer trace to COM1 */
|
||||
/* #define QL_DEBUG_LEVEL_7 */ /* Output RISC load trace msgs to COM1 */
|
||||
/* #define QL_DEBUG_LEVEL_8 */ /* Output ring saturation msgs to COM1 */
|
||||
/* #define QL_DEBUG_LEVEL_9 */ /* Output IOCTL trace msgs */
|
||||
/* #define QL_DEBUG_LEVEL_10 */ /* Output IOCTL error msgs */
|
||||
/* #define QL_DEBUG_LEVEL_11 */ /* Output Mbx Cmd trace msgs */
|
||||
/* #define QL_DEBUG_LEVEL_12 */ /* Output IP trace msgs */
|
||||
/* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */
|
||||
/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
|
||||
/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
|
||||
/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
|
||||
/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
|
||||
/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
|
||||
|
||||
/*
|
||||
* Macros use for debugging the driver.
|
||||
*/
|
||||
|
||||
#define DEBUG(x) do { if (ql2xextended_error_logging) { x; } } while (0)
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_1)
|
||||
#define DEBUG1(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG1(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define DEBUG2(x) do { if (ql2xextended_error_logging) { x; } } while (0)
|
||||
#define DEBUG2_3(x) do { if (ql2xextended_error_logging) { x; } } while (0)
|
||||
#define DEBUG2_3_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
|
||||
#define DEBUG2_9_10(x) do { if (ql2xextended_error_logging) { x; } } while (0)
|
||||
#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
|
||||
#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0)
|
||||
#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0)
|
||||
#define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0)
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_3)
|
||||
#define DEBUG3(x) do {x;} while (0)
|
||||
#define DEBUG3_11(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG3(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_4)
|
||||
#define DEBUG4(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG4(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_5)
|
||||
#define DEBUG5(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG5(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_7)
|
||||
#define DEBUG7(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG7(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_9)
|
||||
#define DEBUG9(x) do {x;} while (0)
|
||||
#define DEBUG9_10(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG9(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_10)
|
||||
#define DEBUG10(x) do {x;} while (0)
|
||||
#define DEBUG9_10(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG10(x) do {} while (0)
|
||||
#if !defined(DEBUG9_10)
|
||||
#define DEBUG9_10(x) do {} while (0)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_11)
|
||||
#define DEBUG11(x) do{x;} while(0)
|
||||
#if !defined(DEBUG3_11)
|
||||
#define DEBUG3_11(x) do{x;} while(0)
|
||||
#endif
|
||||
#else
|
||||
#define DEBUG11(x) do{} while(0)
|
||||
#if !defined(QL_DEBUG_LEVEL_3)
|
||||
#define DEBUG3_11(x) do{} while(0)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_12)
|
||||
#define DEBUG12(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG12(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_13)
|
||||
#define DEBUG13(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG13(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_14)
|
||||
#define DEBUG14(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG14(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_15)
|
||||
#define DEBUG15(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG15(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_16)
|
||||
#define DEBUG16(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG16(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_17)
|
||||
#define DEBUG17(x) do {x;} while (0)
|
||||
#else
|
||||
#define DEBUG17(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(QL_DEBUG_LEVEL_18)
|
||||
#define DEBUG18(x) do {if (ql2xextended_error_logging) x; } while (0)
|
||||
#else
|
||||
#define DEBUG18(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Firmware Dump structure definition
|
||||
*/
|
||||
@ -370,3 +230,50 @@ struct qla2xxx_fw_dump {
|
||||
struct qla81xx_fw_dump isp81;
|
||||
} isp;
|
||||
};
|
||||
|
||||
#define QL_MSGHDR "qla2xxx"
|
||||
|
||||
#define ql_log_fatal 0 /* display fatal errors */
|
||||
#define ql_log_warn 1 /* display critical errors */
|
||||
#define ql_log_info 2 /* display all recovered errors */
|
||||
#define ql_log_all 3 /* This value is only used by ql_errlev.
|
||||
* No messages will use this value.
|
||||
* This should be always highest value
|
||||
* as compared to other log levels.
|
||||
*/
|
||||
|
||||
extern int ql_errlev;
|
||||
|
||||
void
|
||||
ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
|
||||
void
|
||||
ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
|
||||
|
||||
void
|
||||
ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
|
||||
void
|
||||
ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
|
||||
|
||||
/* Debug Levels */
|
||||
/* The 0x40000000 is the max value any debug level can have
|
||||
* as ql2xextended_error_logging is of type signed int
|
||||
*/
|
||||
#define ql_dbg_init 0x40000000 /* Init Debug */
|
||||
#define ql_dbg_mbx 0x20000000 /* MBX Debug */
|
||||
#define ql_dbg_disc 0x10000000 /* Device Discovery Debug */
|
||||
#define ql_dbg_io 0x08000000 /* IO Tracing Debug */
|
||||
#define ql_dbg_dpc 0x04000000 /* DPC Thead Debug */
|
||||
#define ql_dbg_async 0x02000000 /* Async events Debug */
|
||||
#define ql_dbg_timer 0x01000000 /* Timer Debug */
|
||||
#define ql_dbg_user 0x00800000 /* User Space Interations Debug */
|
||||
#define ql_dbg_taskm 0x00400000 /* Task Management Debug */
|
||||
#define ql_dbg_aer 0x00200000 /* AER/EEH Debug */
|
||||
#define ql_dbg_multiq 0x00100000 /* MultiQ Debug */
|
||||
#define ql_dbg_p3p 0x00080000 /* P3P specific Debug */
|
||||
#define ql_dbg_vport 0x00040000 /* Virtual Port Debug */
|
||||
#define ql_dbg_buffer 0x00020000 /* For dumping the buffer/regs */
|
||||
#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
|
||||
* not covered by upper categories
|
||||
*/
|
||||
|
||||
#define QL_DBG_BUF_LEN 512
|
||||
|
@ -64,7 +64,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
|
||||
/* Pause tracing to flush FCE buffers. */
|
||||
rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
|
||||
if (rval)
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_dbg(ql_dbg_user, vha, 0x705c,
|
||||
"DebugFS: Unable to disable FCE (%d).\n", rval);
|
||||
|
||||
ha->flags.fce_enabled = 0;
|
||||
@ -92,7 +92,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
|
||||
rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
|
||||
ha->fce_mb, &ha->fce_bufs);
|
||||
if (rval) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_dbg(ql_dbg_user, vha, 0x700d,
|
||||
"DebugFS: Unable to reinitialize FCE (%d).\n", rval);
|
||||
ha->flags.fce_enabled = 0;
|
||||
}
|
||||
@ -125,8 +125,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
|
||||
atomic_set(&qla2x00_dfs_root_count, 0);
|
||||
qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
|
||||
if (!qla2x00_dfs_root) {
|
||||
qla_printk(KERN_NOTICE, ha,
|
||||
"DebugFS: Unable to create root directory.\n");
|
||||
ql_log(ql_log_warn, vha, 0x00f7,
|
||||
"Unable to create debugfs root directory.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -137,8 +137,8 @@ create_dir:
|
||||
mutex_init(&ha->fce_mutex);
|
||||
ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
|
||||
if (!ha->dfs_dir) {
|
||||
qla_printk(KERN_NOTICE, ha,
|
||||
"DebugFS: Unable to create ha directory.\n");
|
||||
ql_log(ql_log_warn, vha, 0x00f8,
|
||||
"Unable to create debugfs ha directory.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -148,8 +148,8 @@ create_nodes:
|
||||
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
|
||||
&dfs_fce_ops);
|
||||
if (!ha->dfs_fce) {
|
||||
qla_printk(KERN_NOTICE, ha,
|
||||
"DebugFS: Unable to fce node.\n");
|
||||
ql_log(ql_log_warn, vha, 0x00f9,
|
||||
"Unable to create debugfs fce node.\n");
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
|
@ -185,7 +185,7 @@ extern int qla24xx_start_scsi(srb_t *sp);
|
||||
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
|
||||
uint16_t, uint16_t, uint8_t);
|
||||
extern int qla2x00_start_sp(srb_t *);
|
||||
extern uint16_t qla24xx_calc_iocbs(uint16_t);
|
||||
extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
|
||||
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
|
||||
extern int qla24xx_dif_start_scsi(srb_t *);
|
||||
|
||||
@ -439,6 +439,9 @@ extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
|
||||
extern void qla2x00_dump_regs(scsi_qla_host_t *);
|
||||
extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
|
||||
extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
|
||||
extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
|
||||
extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
|
||||
uint8_t *, uint32_t);
|
||||
|
||||
/*
|
||||
* Global Function Prototypes in qla_gs.c source file.
|
||||
@ -478,7 +481,8 @@ extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16
|
||||
extern int qla2x00_echo_test(scsi_qla_host_t *,
|
||||
struct msg_echo_lb *, uint16_t *);
|
||||
extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
|
||||
extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t);
|
||||
extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
|
||||
struct qla_fcp_prio_cfg *, uint8_t);
|
||||
|
||||
/*
|
||||
* Global Function Prototypes in qla_dfs.c source file.
|
||||
|
@ -121,11 +121,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
|
||||
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
if (ms_pkt->entry_status != 0) {
|
||||
DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status "
|
||||
"(%x) on port_id: %02x%02x%02x.\n",
|
||||
vha->host_no, routine, ms_pkt->entry_status,
|
||||
vha->d_id.b.domain, vha->d_id.b.area,
|
||||
vha->d_id.b.al_pa));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2031,
|
||||
"%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
|
||||
routine, ms_pkt->entry_status, vha->d_id.b.domain,
|
||||
vha->d_id.b.area, vha->d_id.b.al_pa);
|
||||
} else {
|
||||
if (IS_FWI2_CAPABLE(ha))
|
||||
comp_status = le16_to_cpu(
|
||||
@ -138,24 +137,24 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
|
||||
case CS_DATA_OVERRUN: /* Overrun? */
|
||||
if (ct_rsp->header.response !=
|
||||
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
|
||||
DEBUG2_3(printk("scsi(%ld): %s failed, "
|
||||
"rejected request on port_id: %02x%02x%02x\n",
|
||||
vha->host_no, routine,
|
||||
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
|
||||
"%s failed rejected request on port_id: "
|
||||
"%02x%02x%02x.\n", routine,
|
||||
vha->d_id.b.domain, vha->d_id.b.area,
|
||||
vha->d_id.b.al_pa));
|
||||
DEBUG2_3(qla2x00_dump_buffer(
|
||||
(uint8_t *)&ct_rsp->header,
|
||||
sizeof(struct ct_rsp_hdr)));
|
||||
vha->d_id.b.al_pa);
|
||||
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
|
||||
0x2078, (uint8_t *)&ct_rsp->header,
|
||||
sizeof(struct ct_rsp_hdr));
|
||||
rval = QLA_INVALID_COMMAND;
|
||||
} else
|
||||
rval = QLA_SUCCESS;
|
||||
break;
|
||||
default:
|
||||
DEBUG2_3(printk("scsi(%ld): %s failed, completion "
|
||||
"status (%x) on port_id: %02x%02x%02x.\n",
|
||||
vha->host_no, routine, comp_status,
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2033,
|
||||
"%s failed, completion status (%x) on port_id: "
|
||||
"%02x%02x%02x.\n", routine, comp_status,
|
||||
vha->d_id.b.domain, vha->d_id.b.area,
|
||||
vha->d_id.b.al_pa));
|
||||
vha->d_id.b.al_pa);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -202,8 +201,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2062,
|
||||
"GA_NXT issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
|
||||
QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
@ -222,11 +221,10 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
|
||||
ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
|
||||
fcport->d_id.b.domain = 0xf0;
|
||||
|
||||
DEBUG2_3(printk("scsi(%ld): GA_NXT entry - "
|
||||
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2063,
|
||||
"GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
|
||||
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
|
||||
"portid=%02x%02x%02x.\n",
|
||||
vha->host_no,
|
||||
"port_id=%02x%02x%02x.\n",
|
||||
fcport->node_name[0], fcport->node_name[1],
|
||||
fcport->node_name[2], fcport->node_name[3],
|
||||
fcport->node_name[4], fcport->node_name[5],
|
||||
@ -236,7 +234,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
|
||||
fcport->port_name[4], fcport->port_name[5],
|
||||
fcport->port_name[6], fcport->port_name[7],
|
||||
fcport->d_id.b.domain, fcport->d_id.b.area,
|
||||
fcport->d_id.b.al_pa));
|
||||
fcport->d_id.b.al_pa);
|
||||
}
|
||||
|
||||
return (rval);
|
||||
@ -287,8 +285,8 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2055,
|
||||
"GID_PT issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
|
||||
QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
@ -364,8 +362,8 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed "
|
||||
"(%d).\n", vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2056,
|
||||
"GPN_ID issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
|
||||
"GPN_ID") != QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
@ -424,8 +422,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed "
|
||||
"(%d).\n", vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2057,
|
||||
"GNN_ID issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
|
||||
"GNN_ID") != QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
@ -434,11 +432,10 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
memcpy(list[i].node_name,
|
||||
ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
|
||||
|
||||
DEBUG2_3(printk("scsi(%ld): GID_PT entry - "
|
||||
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
|
||||
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2058,
|
||||
"GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x "
|
||||
"pn %02x%02x%02x%02x%02x%02x%02X%02x "
|
||||
"portid=%02x%02x%02x.\n",
|
||||
vha->host_no,
|
||||
list[i].node_name[0], list[i].node_name[1],
|
||||
list[i].node_name[2], list[i].node_name[3],
|
||||
list[i].node_name[4], list[i].node_name[5],
|
||||
@ -448,7 +445,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
list[i].port_name[4], list[i].port_name[5],
|
||||
list[i].port_name[6], list[i].port_name[7],
|
||||
list[i].d_id.b.domain, list[i].d_id.b.area,
|
||||
list[i].d_id.b.al_pa));
|
||||
list[i].d_id.b.al_pa);
|
||||
}
|
||||
|
||||
/* Last device exit. */
|
||||
@ -499,14 +496,14 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2043,
|
||||
"RFT_ID issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
|
||||
QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2044,
|
||||
"RFT_ID exiting normally.\n");
|
||||
}
|
||||
|
||||
return (rval);
|
||||
@ -528,8 +525,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
|
||||
struct ct_sns_rsp *ct_rsp;
|
||||
|
||||
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
|
||||
DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on "
|
||||
"ISP2100/ISP2200.\n", vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2046,
|
||||
"RFF_ID call not supported on ISP2100/ISP2200.\n");
|
||||
return (QLA_SUCCESS);
|
||||
}
|
||||
|
||||
@ -556,14 +553,14 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2047,
|
||||
"RFF_ID issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
|
||||
QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2048,
|
||||
"RFF_ID exiting normally.\n");
|
||||
}
|
||||
|
||||
return (rval);
|
||||
@ -609,14 +606,14 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x204d,
|
||||
"RNN_ID issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
|
||||
QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x204e,
|
||||
"RNN_ID exiting normally.\n");
|
||||
}
|
||||
|
||||
return (rval);
|
||||
@ -647,8 +644,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
|
||||
struct ct_sns_rsp *ct_rsp;
|
||||
|
||||
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
|
||||
DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on "
|
||||
"ISP2100/ISP2200.\n", vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2050,
|
||||
"RSNN_ID call unsupported on ISP2100/ISP2200.\n");
|
||||
return (QLA_SUCCESS);
|
||||
}
|
||||
|
||||
@ -682,14 +679,14 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2051,
|
||||
"RSNN_NN issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
|
||||
QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2052,
|
||||
"RSNN_NN exiting normally.\n");
|
||||
}
|
||||
|
||||
return (rval);
|
||||
@ -757,13 +754,14 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
|
||||
sizeof(struct sns_cmd_pkt));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x205f,
|
||||
"GA_NXT Send SNS failed (%d).\n", rval);
|
||||
} else if (sns_cmd->p.gan_data[8] != 0x80 ||
|
||||
sns_cmd->p.gan_data[9] != 0x02) {
|
||||
DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, "
|
||||
"ga_nxt_rsp:\n", vha->host_no));
|
||||
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16));
|
||||
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
|
||||
"GA_NXT failed, rejected request ga_nxt_rsp:\n");
|
||||
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
|
||||
sns_cmd->p.gan_data, 16);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
/* Populate fc_port_t entry. */
|
||||
@ -778,11 +776,10 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
|
||||
sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
|
||||
fcport->d_id.b.domain = 0xf0;
|
||||
|
||||
DEBUG2_3(printk("scsi(%ld): GA_NXT entry - "
|
||||
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2061,
|
||||
"GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
|
||||
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
|
||||
"portid=%02x%02x%02x.\n",
|
||||
vha->host_no,
|
||||
"port_id=%02x%02x%02x.\n",
|
||||
fcport->node_name[0], fcport->node_name[1],
|
||||
fcport->node_name[2], fcport->node_name[3],
|
||||
fcport->node_name[4], fcport->node_name[5],
|
||||
@ -792,7 +789,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
|
||||
fcport->port_name[4], fcport->port_name[5],
|
||||
fcport->port_name[6], fcport->port_name[7],
|
||||
fcport->d_id.b.domain, fcport->d_id.b.area,
|
||||
fcport->d_id.b.al_pa));
|
||||
fcport->d_id.b.al_pa);
|
||||
}
|
||||
|
||||
return (rval);
|
||||
@ -831,13 +828,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
sizeof(struct sns_cmd_pkt));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x206d,
|
||||
"GID_PT Send SNS failed (%d).\n", rval);
|
||||
} else if (sns_cmd->p.gid_data[8] != 0x80 ||
|
||||
sns_cmd->p.gid_data[9] != 0x02) {
|
||||
DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, "
|
||||
"gid_rsp:\n", vha->host_no));
|
||||
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x202f,
|
||||
"GID_PT failed, rejected request, gid_rsp:\n");
|
||||
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
|
||||
sns_cmd->p.gid_data, 16);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
/* Set port IDs in switch info list. */
|
||||
@ -900,13 +898,14 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed "
|
||||
"(%d).\n", vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2032,
|
||||
"GPN_ID Send SNS failed (%d).\n", rval);
|
||||
} else if (sns_cmd->p.gpn_data[8] != 0x80 ||
|
||||
sns_cmd->p.gpn_data[9] != 0x02) {
|
||||
DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected "
|
||||
"request, gpn_rsp:\n", vha->host_no));
|
||||
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16));
|
||||
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
|
||||
"GPN_ID failed, rejected request, gpn_rsp:\n");
|
||||
ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
|
||||
sns_cmd->p.gpn_data, 16);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
/* Save portname */
|
||||
@ -955,24 +954,24 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed "
|
||||
"(%d).\n", vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x203f,
|
||||
"GNN_ID Send SNS failed (%d).\n", rval);
|
||||
} else if (sns_cmd->p.gnn_data[8] != 0x80 ||
|
||||
sns_cmd->p.gnn_data[9] != 0x02) {
|
||||
DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected "
|
||||
"request, gnn_rsp:\n", vha->host_no));
|
||||
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16));
|
||||
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
|
||||
"GNN_ID failed, rejected request, gnn_rsp:\n");
|
||||
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
|
||||
sns_cmd->p.gnn_data, 16);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
/* Save nodename */
|
||||
memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
|
||||
WWN_SIZE);
|
||||
|
||||
DEBUG2_3(printk("scsi(%ld): GID_PT entry - "
|
||||
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
|
||||
ql_dbg(ql_dbg_disc, vha, 0x206e,
|
||||
"GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
|
||||
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
|
||||
"portid=%02x%02x%02x.\n",
|
||||
vha->host_no,
|
||||
"port_id=%02x%02x%02x.\n",
|
||||
list[i].node_name[0], list[i].node_name[1],
|
||||
list[i].node_name[2], list[i].node_name[3],
|
||||
list[i].node_name[4], list[i].node_name[5],
|
||||
@ -982,7 +981,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
list[i].port_name[4], list[i].port_name[5],
|
||||
list[i].port_name[6], list[i].port_name[7],
|
||||
list[i].d_id.b.domain, list[i].d_id.b.area,
|
||||
list[i].d_id.b.al_pa));
|
||||
list[i].d_id.b.al_pa);
|
||||
}
|
||||
|
||||
/* Last device exit. */
|
||||
@ -1025,17 +1024,18 @@ qla2x00_sns_rft_id(scsi_qla_host_t *vha)
|
||||
sizeof(struct sns_cmd_pkt));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2060,
|
||||
"RFT_ID Send SNS failed (%d).\n", rval);
|
||||
} else if (sns_cmd->p.rft_data[8] != 0x80 ||
|
||||
sns_cmd->p.rft_data[9] != 0x02) {
|
||||
DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, "
|
||||
"rft_rsp:\n", vha->host_no));
|
||||
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16));
|
||||
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
|
||||
"RFT_ID failed, rejected request rft_rsp:\n");
|
||||
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
|
||||
sns_cmd->p.rft_data, 16);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2073,
|
||||
"RFT_ID exiting normally.\n");
|
||||
}
|
||||
|
||||
return (rval);
|
||||
@ -1081,17 +1081,18 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
|
||||
sizeof(struct sns_cmd_pkt));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x204a,
|
||||
"RNN_ID Send SNS failed (%d).\n", rval);
|
||||
} else if (sns_cmd->p.rnn_data[8] != 0x80 ||
|
||||
sns_cmd->p.rnn_data[9] != 0x02) {
|
||||
DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, "
|
||||
"rnn_rsp:\n", vha->host_no));
|
||||
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16));
|
||||
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
|
||||
"RNN_ID failed, rejected request, rnn_rsp:\n");
|
||||
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
|
||||
sns_cmd->p.rnn_data, 16);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x204c,
|
||||
"RNN_ID exiting normally.\n");
|
||||
}
|
||||
|
||||
return (rval);
|
||||
@ -1116,10 +1117,10 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
|
||||
ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
|
||||
mb, BIT_1|BIT_0);
|
||||
if (mb[0] != MBS_COMMAND_COMPLETE) {
|
||||
DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
|
||||
"loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
|
||||
__func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1],
|
||||
mb[2], mb[6], mb[7]));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2024,
|
||||
"Failed management_server login: loopid=%x mb[0]=%x "
|
||||
"mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
|
||||
vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]);
|
||||
ret = QLA_FUNCTION_FAILED;
|
||||
} else
|
||||
vha->flags.management_server_logged_in = 1;
|
||||
@ -1292,11 +1293,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
|
||||
size += 4 + WWN_SIZE;
|
||||
|
||||
DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
|
||||
__func__, vha->host_no,
|
||||
eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
|
||||
eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
|
||||
eiter->a.node_name[6], eiter->a.node_name[7]));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2025,
|
||||
"NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
|
||||
eiter->a.node_name[0], eiter->a.node_name[1],
|
||||
eiter->a.node_name[2], eiter->a.node_name[3],
|
||||
eiter->a.node_name[4], eiter->a.node_name[5],
|
||||
eiter->a.node_name[6], eiter->a.node_name[7]);
|
||||
|
||||
/* Manufacturer. */
|
||||
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
|
||||
@ -1307,8 +1309,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
eiter->len = cpu_to_be16(4 + alen);
|
||||
size += 4 + alen;
|
||||
|
||||
DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no,
|
||||
eiter->a.manufacturer));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2026,
|
||||
"Manufacturer = %s.\n", eiter->a.manufacturer);
|
||||
|
||||
/* Serial number. */
|
||||
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
|
||||
@ -1320,8 +1322,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
eiter->len = cpu_to_be16(4 + alen);
|
||||
size += 4 + alen;
|
||||
|
||||
DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no,
|
||||
eiter->a.serial_num));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2027,
|
||||
"Serial no. = %s.\n", eiter->a.serial_num);
|
||||
|
||||
/* Model name. */
|
||||
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
|
||||
@ -1332,8 +1334,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
eiter->len = cpu_to_be16(4 + alen);
|
||||
size += 4 + alen;
|
||||
|
||||
DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no,
|
||||
eiter->a.model));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2028,
|
||||
"Model Name = %s.\n", eiter->a.model);
|
||||
|
||||
/* Model description. */
|
||||
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
|
||||
@ -1345,8 +1347,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
eiter->len = cpu_to_be16(4 + alen);
|
||||
size += 4 + alen;
|
||||
|
||||
DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no,
|
||||
eiter->a.model_desc));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2029,
|
||||
"Model Desc = %s.\n", eiter->a.model_desc);
|
||||
|
||||
/* Hardware version. */
|
||||
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
|
||||
@ -1357,8 +1359,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
eiter->len = cpu_to_be16(4 + alen);
|
||||
size += 4 + alen;
|
||||
|
||||
DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no,
|
||||
eiter->a.hw_version));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x202a,
|
||||
"Hardware ver = %s.\n", eiter->a.hw_version);
|
||||
|
||||
/* Driver version. */
|
||||
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
|
||||
@ -1369,8 +1371,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
eiter->len = cpu_to_be16(4 + alen);
|
||||
size += 4 + alen;
|
||||
|
||||
DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no,
|
||||
eiter->a.driver_version));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x202b,
|
||||
"Driver ver = %s.\n", eiter->a.driver_version);
|
||||
|
||||
/* Option ROM version. */
|
||||
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
|
||||
@ -1381,8 +1383,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
eiter->len = cpu_to_be16(4 + alen);
|
||||
size += 4 + alen;
|
||||
|
||||
DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no,
|
||||
eiter->a.orom_version));
|
||||
ql_dbg(ql_dbg_disc, vha , 0x202c,
|
||||
"Optrom vers = %s.\n", eiter->a.orom_version);
|
||||
|
||||
/* Firmware version */
|
||||
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
|
||||
@ -1393,44 +1395,46 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
eiter->len = cpu_to_be16(4 + alen);
|
||||
size += 4 + alen;
|
||||
|
||||
DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no,
|
||||
eiter->a.fw_version));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x202d,
|
||||
"Firmware vers = %s.\n", eiter->a.fw_version);
|
||||
|
||||
/* Update MS request size. */
|
||||
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
|
||||
|
||||
DEBUG13(printk("%s(%ld): RHBA identifier="
|
||||
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
|
||||
vha->host_no, ct_req->req.rhba.hba_identifier[0],
|
||||
ql_dbg(ql_dbg_disc, vha, 0x202e,
|
||||
"RHBA identifier = "
|
||||
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n",
|
||||
ct_req->req.rhba.hba_identifier[0],
|
||||
ct_req->req.rhba.hba_identifier[1],
|
||||
ct_req->req.rhba.hba_identifier[2],
|
||||
ct_req->req.rhba.hba_identifier[3],
|
||||
ct_req->req.rhba.hba_identifier[4],
|
||||
ct_req->req.rhba.hba_identifier[5],
|
||||
ct_req->req.rhba.hba_identifier[6],
|
||||
ct_req->req.rhba.hba_identifier[7], size));
|
||||
DEBUG13(qla2x00_dump_buffer(entries, size));
|
||||
ct_req->req.rhba.hba_identifier[7], size);
|
||||
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
|
||||
entries, size);
|
||||
|
||||
/* Execute MS IOCB */
|
||||
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2030,
|
||||
"RHBA issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
|
||||
QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
|
||||
ct_rsp->header.explanation_code ==
|
||||
CT_EXPL_ALREADY_REGISTERED) {
|
||||
DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
|
||||
__func__, vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2034,
|
||||
"HBA already registered.\n");
|
||||
rval = QLA_ALREADY_REGISTERED;
|
||||
}
|
||||
} else {
|
||||
DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2035,
|
||||
"RHBA exiting normally.\n");
|
||||
}
|
||||
|
||||
return rval;
|
||||
@ -1464,26 +1468,26 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
|
||||
/* Prepare FDMI command arguments -- portname. */
|
||||
memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
|
||||
|
||||
DEBUG13(printk("%s(%ld): DHBA portname="
|
||||
"%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no,
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2036,
|
||||
"DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
|
||||
ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
|
||||
ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
|
||||
ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
|
||||
ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
|
||||
ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
|
||||
|
||||
/* Execute MS IOCB */
|
||||
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2037,
|
||||
"DHBA issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
|
||||
QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2038,
|
||||
"DHBA exiting normally.\n");
|
||||
}
|
||||
|
||||
return rval;
|
||||
@ -1534,9 +1538,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
|
||||
eiter->a.fc4_types[2] = 0x01;
|
||||
size += 4 + 32;
|
||||
|
||||
DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__,
|
||||
vha->host_no, eiter->a.fc4_types[2],
|
||||
eiter->a.fc4_types[1]));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2039,
|
||||
"FC4_TYPES=%02x %02x.\n",
|
||||
eiter->a.fc4_types[2],
|
||||
eiter->a.fc4_types[1]);
|
||||
|
||||
/* Supported speed. */
|
||||
eiter = (struct ct_fdmi_port_attr *) (entries + size);
|
||||
@ -1561,8 +1566,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
|
||||
FDMI_PORT_SPEED_1GB);
|
||||
size += 4 + 4;
|
||||
|
||||
DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no,
|
||||
eiter->a.sup_speed));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x203a,
|
||||
"Supported_Speed=%x.\n", eiter->a.sup_speed);
|
||||
|
||||
/* Current speed. */
|
||||
eiter = (struct ct_fdmi_port_attr *) (entries + size);
|
||||
@ -1596,8 +1601,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
|
||||
}
|
||||
size += 4 + 4;
|
||||
|
||||
DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no,
|
||||
eiter->a.cur_speed));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x203b,
|
||||
"Current_Speed=%x.\n", eiter->a.cur_speed);
|
||||
|
||||
/* Max frame size. */
|
||||
eiter = (struct ct_fdmi_port_attr *) (entries + size);
|
||||
@ -1609,8 +1614,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
|
||||
eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
|
||||
size += 4 + 4;
|
||||
|
||||
DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no,
|
||||
eiter->a.max_frame_size));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x203c,
|
||||
"Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
|
||||
|
||||
/* OS device name. */
|
||||
eiter = (struct ct_fdmi_port_attr *) (entries + size);
|
||||
@ -1621,8 +1626,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
|
||||
eiter->len = cpu_to_be16(4 + alen);
|
||||
size += 4 + alen;
|
||||
|
||||
DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no,
|
||||
eiter->a.os_dev_name));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x204b,
|
||||
"OS_Device_Name=%s.\n", eiter->a.os_dev_name);
|
||||
|
||||
/* Hostname. */
|
||||
if (strlen(fc_host_system_hostname(vha->host))) {
|
||||
@ -1637,35 +1642,36 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
|
||||
eiter->len = cpu_to_be16(4 + alen);
|
||||
size += 4 + alen;
|
||||
|
||||
DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__,
|
||||
vha->host_no, eiter->a.host_name));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x203d,
|
||||
"HostName=%s.\n", eiter->a.host_name);
|
||||
}
|
||||
|
||||
/* Update MS request size. */
|
||||
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
|
||||
|
||||
DEBUG13(printk("%s(%ld): RPA portname="
|
||||
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
|
||||
vha->host_no, ct_req->req.rpa.port_name[0],
|
||||
ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
|
||||
ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
|
||||
ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
|
||||
ct_req->req.rpa.port_name[7], size));
|
||||
DEBUG13(qla2x00_dump_buffer(entries, size));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x203e,
|
||||
"RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
|
||||
ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
|
||||
ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
|
||||
ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
|
||||
ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
|
||||
size);
|
||||
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
|
||||
entries, size);
|
||||
|
||||
/* Execute MS IOCB */
|
||||
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
|
||||
vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2040,
|
||||
"RPA issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
|
||||
QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else {
|
||||
DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2041,
|
||||
"RPA exiting nornally.\n");
|
||||
}
|
||||
|
||||
return rval;
|
||||
@ -1749,8 +1755,8 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB "
|
||||
"failed (%d).\n", vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2023,
|
||||
"GFPN_ID issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
|
||||
"GFPN_ID") != QLA_SUCCESS) {
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
@ -1860,8 +1866,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
sizeof(ms_iocb_entry_t));
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB "
|
||||
"failed (%d).\n", vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2059,
|
||||
"GPSC issue IOCB failed (%d).\n", rval);
|
||||
} else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
|
||||
"GPSC")) != QLA_SUCCESS) {
|
||||
/* FM command unsupported? */
|
||||
@ -1870,9 +1876,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
CT_REASON_INVALID_COMMAND_CODE ||
|
||||
ct_rsp->header.reason_code ==
|
||||
CT_REASON_COMMAND_UNSUPPORTED)) {
|
||||
DEBUG2(printk("scsi(%ld): GPSC command "
|
||||
"unsupported, disabling query...\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x205a,
|
||||
"GPSC command unsupported, disabling "
|
||||
"query.\n");
|
||||
ha->flags.gpsc_supported = 0;
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
break;
|
||||
@ -1898,9 +1904,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
break;
|
||||
}
|
||||
|
||||
DEBUG2_3(printk("scsi(%ld): GPSC ext entry - "
|
||||
"fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
|
||||
"speed=%04x.\n", vha->host_no,
|
||||
ql_dbg(ql_dbg_disc, vha, 0x205b,
|
||||
"GPSC ext entry - fpn "
|
||||
"%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
|
||||
"speed=%04x.\n",
|
||||
list[i].fabric_port_name[0],
|
||||
list[i].fabric_port_name[1],
|
||||
list[i].fabric_port_name[2],
|
||||
@ -1910,7 +1917,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
list[i].fabric_port_name[6],
|
||||
list[i].fabric_port_name[7],
|
||||
be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
|
||||
be16_to_cpu(ct_rsp->rsp.gpsc.speed)));
|
||||
be16_to_cpu(ct_rsp->rsp.gpsc.speed));
|
||||
}
|
||||
|
||||
/* Last device exit. */
|
||||
@ -1968,14 +1975,12 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
|
||||
sizeof(ms_iocb_entry_t));
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2_3(printk(KERN_INFO
|
||||
"scsi(%ld): GFF_ID issue IOCB failed "
|
||||
"(%d).\n", vha->host_no, rval));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x205c,
|
||||
"GFF_ID issue IOCB failed (%d).\n", rval);
|
||||
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
|
||||
"GFF_ID") != QLA_SUCCESS) {
|
||||
DEBUG2_3(printk(KERN_INFO
|
||||
"scsi(%ld): GFF_ID IOCB status had a "
|
||||
"failure status code\n", vha->host_no));
|
||||
ql_dbg(ql_dbg_disc, vha, 0x205d,
|
||||
"GFF_ID IOCB status had a failure status code.\n");
|
||||
} else {
|
||||
fcp_scsi_features =
|
||||
ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -94,11 +94,11 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
|
||||
|
||||
/* Don't print state transitions during initial allocation of fcport */
|
||||
if (old_state && old_state != state) {
|
||||
DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw,
|
||||
"scsi(%ld): FCPort state transitioned from %s to %s - "
|
||||
"portid=%02x%02x%02x.\n", fcport->vha->host_no,
|
||||
ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
|
||||
"FCPort state transitioned from %s to %s - "
|
||||
"portid=%02x%02x%02x.\n",
|
||||
port_state_str[old_state], port_state_str[state],
|
||||
fcport->d_id.b.domain, fcport->d_id.b.area,
|
||||
fcport->d_id.b.al_pa));
|
||||
fcport->d_id.b.al_pa);
|
||||
}
|
||||
}
|
||||
|
@ -150,7 +150,8 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
|
||||
|
||||
/* We only support T10 DIF right now */
|
||||
if (guard != SHOST_DIX_GUARD_CRC) {
|
||||
DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
|
||||
ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
|
||||
"Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -343,9 +344,10 @@ qla2x00_start_scsi(srb_t *sp)
|
||||
|
||||
/* Send marker if required */
|
||||
if (vha->marker_needed != 0) {
|
||||
if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
|
||||
!= QLA_SUCCESS)
|
||||
if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
|
||||
QLA_SUCCESS) {
|
||||
return (QLA_FUNCTION_FAILED);
|
||||
}
|
||||
vha->marker_needed = 0;
|
||||
}
|
||||
|
||||
@ -490,8 +492,8 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
|
||||
mrk24 = NULL;
|
||||
mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
|
||||
if (mrk == NULL) {
|
||||
DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
|
||||
__func__, base_vha->host_no));
|
||||
ql_log(ql_log_warn, base_vha, 0x3026,
|
||||
"Failed to allocate Marker IOCB.\n");
|
||||
|
||||
return (QLA_FUNCTION_FAILED);
|
||||
}
|
||||
@ -547,9 +549,10 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
|
||||
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
|
||||
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
|
||||
|
||||
DEBUG5(printk("%s(): IOCB data:\n", __func__));
|
||||
DEBUG5(qla2x00_dump_buffer(
|
||||
(uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
|
||||
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
|
||||
"IOCB data:\n");
|
||||
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
|
||||
(uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
|
||||
|
||||
/* Adjust ring index. */
|
||||
req->ring_index++;
|
||||
@ -604,7 +607,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
|
||||
* Returns the number of IOCB entries needed to store @dsds.
|
||||
*/
|
||||
inline uint16_t
|
||||
qla24xx_calc_iocbs(uint16_t dsds)
|
||||
qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
|
||||
{
|
||||
uint16_t iocbs;
|
||||
|
||||
@ -614,8 +617,6 @@ qla24xx_calc_iocbs(uint16_t dsds)
|
||||
if ((dsds - 1) % 5)
|
||||
iocbs++;
|
||||
}
|
||||
DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
|
||||
__func__, iocbs));
|
||||
return iocbs;
|
||||
}
|
||||
|
||||
@ -712,6 +713,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
|
||||
unsigned int protcnt)
|
||||
{
|
||||
struct sd_dif_tuple *spt;
|
||||
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
|
||||
unsigned char op = scsi_get_prot_op(cmd);
|
||||
|
||||
switch (scsi_get_prot_type(cmd)) {
|
||||
@ -768,9 +770,9 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
|
||||
op == SCSI_PROT_WRITE_PASS)) {
|
||||
spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
|
||||
scsi_prot_sglist(cmd)[0].offset;
|
||||
DEBUG18(printk(KERN_DEBUG
|
||||
"%s(): LBA from user %p, lba = 0x%x\n",
|
||||
__func__, spt, (int)spt->ref_tag));
|
||||
ql_dbg(ql_dbg_io, vha, 0x3008,
|
||||
"LBA from user %p, lba = 0x%x for cmd=%p.\n",
|
||||
spt, (int)spt->ref_tag, cmd);
|
||||
pkt->ref_tag = swab32(spt->ref_tag);
|
||||
pkt->app_tag_mask[0] = 0x0;
|
||||
pkt->app_tag_mask[1] = 0x0;
|
||||
@ -789,11 +791,11 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
|
||||
break;
|
||||
}
|
||||
|
||||
DEBUG18(printk(KERN_DEBUG
|
||||
"%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
|
||||
" app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
|
||||
" prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
|
||||
(int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
|
||||
ql_dbg(ql_dbg_io, vha, 0x3009,
|
||||
"Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
|
||||
"prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
|
||||
pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
|
||||
scsi_get_prot_type(cmd), cmd);
|
||||
}
|
||||
|
||||
|
||||
@ -809,6 +811,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
|
||||
uint32_t *cur_dsd = dsd;
|
||||
int i;
|
||||
uint16_t used_dsds = tot_dsds;
|
||||
scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
|
||||
|
||||
uint8_t *cp;
|
||||
|
||||
@ -853,9 +856,10 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
|
||||
cur_dsd = (uint32_t *)next_dsd;
|
||||
}
|
||||
sle_dma = sg_dma_address(sg);
|
||||
DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
|
||||
" len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
|
||||
MSD(sle_dma), sg_dma_len(sg)));
|
||||
ql_dbg(ql_dbg_io, vha, 0x300a,
|
||||
"sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
|
||||
cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
|
||||
sp->cmd);
|
||||
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
|
||||
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
|
||||
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
|
||||
@ -863,8 +867,8 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
|
||||
|
||||
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
|
||||
cp = page_address(sg_page(sg)) + sg->offset;
|
||||
DEBUG18(printk("%s(): User Data buffer= %p:\n",
|
||||
__func__ , cp));
|
||||
ql_dbg(ql_dbg_io, vha, 0x300b,
|
||||
"User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
|
||||
}
|
||||
}
|
||||
/* Null termination */
|
||||
@ -888,7 +892,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
|
||||
struct scsi_cmnd *cmd;
|
||||
uint32_t *cur_dsd = dsd;
|
||||
uint16_t used_dsds = tot_dsds;
|
||||
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
uint8_t *cp;
|
||||
|
||||
|
||||
@ -935,10 +939,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
|
||||
}
|
||||
sle_dma = sg_dma_address(sg);
|
||||
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
|
||||
DEBUG18(printk(KERN_DEBUG
|
||||
"%s(): %p, sg entry %d - addr =0x%x"
|
||||
"0x%x, len =%d\n", __func__ , cur_dsd, i,
|
||||
LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
|
||||
ql_dbg(ql_dbg_io, vha, 0x3027,
|
||||
"%s(): %p, sg_entry %d - "
|
||||
"addr=0x%x0x%x, len=%d.\n",
|
||||
__func__, cur_dsd, i,
|
||||
LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
|
||||
}
|
||||
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
|
||||
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
|
||||
@ -946,8 +951,9 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
|
||||
|
||||
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
|
||||
cp = page_address(sg_page(sg)) + sg->offset;
|
||||
DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
|
||||
__func__ , cp));
|
||||
ql_dbg(ql_dbg_io, vha, 0x3028,
|
||||
"%s(): Protection Data buffer = %p.\n", __func__,
|
||||
cp);
|
||||
}
|
||||
avail_dsds--;
|
||||
}
|
||||
@ -996,21 +1002,15 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
*((uint32_t *)(&cmd_pkt->entry_type)) =
|
||||
__constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
|
||||
|
||||
/* No data transfer */
|
||||
data_bytes = scsi_bufflen(cmd);
|
||||
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
|
||||
DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
|
||||
__func__, data_bytes));
|
||||
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
vha = sp->fcport->vha;
|
||||
ha = vha->hw;
|
||||
|
||||
DEBUG18(printk(KERN_DEBUG
|
||||
"%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
|
||||
vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
|
||||
/* No data transfer */
|
||||
data_bytes = scsi_bufflen(cmd);
|
||||
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
|
||||
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
cmd_pkt->vp_index = sp->fcport->vp_idx;
|
||||
|
||||
@ -1056,8 +1056,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
|
||||
/* Determine SCSI command length -- align to 4 byte boundary */
|
||||
if (cmd->cmd_len > 16) {
|
||||
DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
|
||||
__func__));
|
||||
additional_fcpcdb_len = cmd->cmd_len - 16;
|
||||
if ((cmd->cmd_len % 4) != 0) {
|
||||
/* SCSI cmd > 16 bytes must be multiple of 4 */
|
||||
@ -1108,11 +1106,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
|
||||
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
|
||||
|
||||
DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
|
||||
"entries %d, data bytes %d, Protection entries %d\n",
|
||||
__func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
|
||||
data_bytes, tot_prot_dsds));
|
||||
|
||||
/* Compute dif len and adjust data len to incude protection */
|
||||
total_bytes = data_bytes;
|
||||
dif_bytes = 0;
|
||||
@ -1150,14 +1143,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
additional_fcpcdb_len);
|
||||
*fcp_dl = htonl(total_bytes);
|
||||
|
||||
DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
|
||||
" = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
|
||||
vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
|
||||
crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
|
||||
|
||||
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
|
||||
DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
|
||||
__func__, data_bytes));
|
||||
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
@ -1182,8 +1168,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
return QLA_SUCCESS;
|
||||
|
||||
crc_queuing_error:
|
||||
DEBUG18(qla_printk(KERN_INFO, ha,
|
||||
"CMD sent FAILED crc_q error:sp = %p\n", sp));
|
||||
/* Cleanup will be performed by the caller */
|
||||
|
||||
return QLA_FUNCTION_FAILED;
|
||||
@ -1225,8 +1209,8 @@ qla24xx_start_scsi(srb_t *sp)
|
||||
|
||||
/* Send marker if required */
|
||||
if (vha->marker_needed != 0) {
|
||||
if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
|
||||
!= QLA_SUCCESS)
|
||||
if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
|
||||
QLA_SUCCESS)
|
||||
return QLA_FUNCTION_FAILED;
|
||||
vha->marker_needed = 0;
|
||||
}
|
||||
@ -1243,8 +1227,9 @@ qla24xx_start_scsi(srb_t *sp)
|
||||
if (!req->outstanding_cmds[handle])
|
||||
break;
|
||||
}
|
||||
if (index == MAX_OUTSTANDING_COMMANDS)
|
||||
if (index == MAX_OUTSTANDING_COMMANDS) {
|
||||
goto queuing_error;
|
||||
}
|
||||
|
||||
/* Map the sg table so we have an accurate count of sg entries needed */
|
||||
if (scsi_sg_count(cmd)) {
|
||||
@ -1256,8 +1241,7 @@ qla24xx_start_scsi(srb_t *sp)
|
||||
nseg = 0;
|
||||
|
||||
tot_dsds = nseg;
|
||||
|
||||
req_cnt = qla24xx_calc_iocbs(tot_dsds);
|
||||
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
|
||||
if (req->cnt < (req_cnt + 2)) {
|
||||
cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
|
||||
|
||||
@ -1322,7 +1306,6 @@ qla24xx_start_scsi(srb_t *sp)
|
||||
/* Specify response queue number where completion should happen */
|
||||
cmd_pkt->entry_status = (uint8_t) rsp->id;
|
||||
wmb();
|
||||
|
||||
/* Adjust ring index. */
|
||||
req->ring_index++;
|
||||
if (req->ring_index == req->length) {
|
||||
@ -1534,9 +1517,6 @@ queuing_error:
|
||||
/* Cleanup will be performed by the caller (queuecommand) */
|
||||
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
DEBUG18(qla_printk(KERN_INFO, ha,
|
||||
"CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
|
||||
@ -1581,8 +1561,11 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
|
||||
if (!req->outstanding_cmds[handle])
|
||||
break;
|
||||
}
|
||||
if (index == MAX_OUTSTANDING_COMMANDS)
|
||||
if (index == MAX_OUTSTANDING_COMMANDS) {
|
||||
ql_log(ql_log_warn, vha, 0x700b,
|
||||
"No room on oustanding cmd array.\n");
|
||||
goto queuing_error;
|
||||
}
|
||||
|
||||
/* Prep command array. */
|
||||
req->current_outstanding_cmd = handle;
|
||||
@ -1999,8 +1982,11 @@ qla2x00_start_sp(srb_t *sp)
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
|
||||
if (!pkt)
|
||||
if (!pkt) {
|
||||
ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
|
||||
"qla2x00_alloc_iocbs failed.\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
rval = QLA_SUCCESS;
|
||||
switch (ctx->type) {
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -36,8 +36,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
|
||||
mutex_lock(&ha->vport_lock);
|
||||
vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
|
||||
if (vp_id > ha->max_npiv_vports) {
|
||||
DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
|
||||
vp_id, ha->max_npiv_vports));
|
||||
ql_dbg(ql_dbg_vport, vha, 0xa000,
|
||||
"vp_id %d is bigger than max-supported %d.\n",
|
||||
vp_id, ha->max_npiv_vports);
|
||||
mutex_unlock(&ha->vport_lock);
|
||||
return vp_id;
|
||||
}
|
||||
@ -131,9 +132,9 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
|
||||
fc_port_t *fcport;
|
||||
|
||||
list_for_each_entry(fcport, &vha->vp_fcports, list) {
|
||||
DEBUG15(printk("scsi(%ld): Marking port dead, "
|
||||
"loop_id=0x%04x :%x\n",
|
||||
vha->host_no, fcport->loop_id, fcport->vp_idx));
|
||||
ql_dbg(ql_dbg_vport, vha, 0xa001,
|
||||
"Marking port dead, loop_id=0x%04x : %x.\n",
|
||||
fcport->loop_id, fcport->vp_idx);
|
||||
|
||||
qla2x00_mark_device_lost(vha, fcport, 0, 0);
|
||||
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
|
||||
@ -187,13 +188,13 @@ qla24xx_enable_vp(scsi_qla_host_t *vha)
|
||||
goto enable_failed;
|
||||
}
|
||||
|
||||
DEBUG15(qla_printk(KERN_INFO, ha,
|
||||
"Virtual port with id: %d - Enabled\n", vha->vp_idx));
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x801a,
|
||||
"Virtual port with id: %d - Enabled.\n", vha->vp_idx);
|
||||
return 0;
|
||||
|
||||
enable_failed:
|
||||
DEBUG15(qla_printk(KERN_INFO, ha,
|
||||
"Virtual port with id: %d - Disabled\n", vha->vp_idx));
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x801b,
|
||||
"Virtual port with id: %d - Disabled.\n", vha->vp_idx);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -205,12 +206,12 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
|
||||
|
||||
fc_vport = vha->fc_vport;
|
||||
|
||||
DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
|
||||
vha->host_no, __func__));
|
||||
ql_dbg(ql_dbg_vport, vha, 0xa002,
|
||||
"%s: change request #3.\n", __func__);
|
||||
ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
|
||||
"receiving of RSCN requests: 0x%x\n", ret));
|
||||
ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
|
||||
"receiving of RSCN requests: 0x%x.\n", ret);
|
||||
return;
|
||||
} else {
|
||||
/* Corresponds to SCR enabled */
|
||||
@ -248,9 +249,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
|
||||
case MBA_CHG_IN_CONNECTION:
|
||||
case MBA_PORT_UPDATE:
|
||||
case MBA_RSCN_UPDATE:
|
||||
DEBUG15(printk("scsi(%ld)%s: Async_event for"
|
||||
" VP[%d], mb = 0x%x, vha=%p\n",
|
||||
vha->host_no, __func__, i, *mb, vha));
|
||||
ql_dbg(ql_dbg_async, vha, 0x5024,
|
||||
"Async_event for VP[%d], mb=0x%x vha=%p.\n",
|
||||
i, *mb, vha);
|
||||
qla2x00_async_event(vha, rsp, mb);
|
||||
break;
|
||||
}
|
||||
@ -286,37 +287,49 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
|
||||
if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
|
||||
qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
|
||||
|
||||
DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
|
||||
vha->host_no, vha->vp_idx));
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x801d,
|
||||
"Scheduling enable of Vport %d.\n", vha->vp_idx);
|
||||
return qla24xx_enable_vp(vha);
|
||||
}
|
||||
|
||||
static int
|
||||
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
|
||||
{
|
||||
ql_dbg(ql_dbg_dpc, vha, 0x4012,
|
||||
"Entering %s.\n", __func__);
|
||||
ql_dbg(ql_dbg_dpc, vha, 0x4013,
|
||||
"vp_flags: 0x%lx.\n", vha->vp_flags);
|
||||
|
||||
qla2x00_do_work(vha);
|
||||
|
||||
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
|
||||
/* VP acquired. complete port configuration */
|
||||
ql_dbg(ql_dbg_dpc, vha, 0x4014,
|
||||
"Configure VP scheduled.\n");
|
||||
qla24xx_configure_vp(vha);
|
||||
ql_dbg(ql_dbg_dpc, vha, 0x4015,
|
||||
"Configure VP end.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
|
||||
ql_dbg(ql_dbg_dpc, vha, 0x4016,
|
||||
"FCPort update scheduled.\n");
|
||||
qla2x00_update_fcports(vha);
|
||||
clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
|
||||
ql_dbg(ql_dbg_dpc, vha, 0x4017,
|
||||
"FCPort update end.\n");
|
||||
}
|
||||
|
||||
if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
|
||||
!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
|
||||
atomic_read(&vha->loop_state) != LOOP_DOWN) {
|
||||
|
||||
DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_dpc, vha, 0x4018,
|
||||
"Relogin needed scheduled.\n");
|
||||
qla2x00_relogin(vha);
|
||||
|
||||
DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_dpc, vha, 0x4019,
|
||||
"Relogin needed end.\n");
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
|
||||
@ -326,11 +339,17 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
|
||||
|
||||
if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
|
||||
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
|
||||
ql_dbg(ql_dbg_dpc, vha, 0x401a,
|
||||
"Loop resync scheduled.\n");
|
||||
qla2x00_loop_resync(vha);
|
||||
clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
|
||||
ql_dbg(ql_dbg_dpc, vha, 0x401b,
|
||||
"Loop resync end.\n");
|
||||
}
|
||||
}
|
||||
|
||||
ql_dbg(ql_dbg_dpc, vha, 0x401c,
|
||||
"Exiting %s.\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -396,9 +415,10 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
|
||||
|
||||
/* Check up max-npiv-supports */
|
||||
if (ha->num_vhosts > ha->max_npiv_vports) {
|
||||
DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
|
||||
"max_npv_vports %ud.\n", base_vha->host_no,
|
||||
ha->num_vhosts, ha->max_npiv_vports));
|
||||
ql_dbg(ql_dbg_vport, vha, 0xa004,
|
||||
"num_vhosts %ud is bigger "
|
||||
"than max_npiv_vports %ud.\n",
|
||||
ha->num_vhosts, ha->max_npiv_vports);
|
||||
return VPCERR_UNSUPPORTED;
|
||||
}
|
||||
return 0;
|
||||
@ -415,7 +435,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
|
||||
|
||||
vha = qla2x00_create_host(sht, ha);
|
||||
if (!vha) {
|
||||
DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
|
||||
ql_log(ql_log_warn, vha, 0xa005,
|
||||
"scsi_host_alloc() failed for vport.\n");
|
||||
return(NULL);
|
||||
}
|
||||
|
||||
@ -429,8 +450,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
|
||||
vha->device_flags = 0;
|
||||
vha->vp_idx = qla24xx_allocate_vp_id(vha);
|
||||
if (vha->vp_idx > ha->max_npiv_vports) {
|
||||
DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
|
||||
vha->host_no));
|
||||
ql_dbg(ql_dbg_vport, vha, 0xa006,
|
||||
"Couldn't allocate vp_id.\n");
|
||||
goto create_vhost_failed;
|
||||
}
|
||||
vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
|
||||
@ -461,8 +482,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
|
||||
host->max_id = MAX_TARGETS_2200;
|
||||
host->transportt = qla2xxx_transport_vport_template;
|
||||
|
||||
DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
|
||||
vha->host_no, vha));
|
||||
ql_dbg(ql_dbg_vport, vha, 0xa007,
|
||||
"Detect vport hba %ld at address = %p.\n",
|
||||
vha->host_no, vha);
|
||||
|
||||
vha->flags.init_done = 1;
|
||||
|
||||
@ -567,9 +589,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
|
||||
if (req) {
|
||||
ret = qla25xx_delete_req_que(vha, req);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Couldn't delete req que %d\n",
|
||||
req->id);
|
||||
ql_log(ql_log_warn, vha, 0x00ea,
|
||||
"Couldn't delete req que %d.\n",
|
||||
req->id);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -581,9 +603,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
|
||||
if (rsp) {
|
||||
ret = qla25xx_delete_rsp_que(vha, rsp);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Couldn't delete rsp que %d\n",
|
||||
rsp->id);
|
||||
ql_log(ql_log_warn, vha, 0x00eb,
|
||||
"Couldn't delete rsp que %d.\n",
|
||||
rsp->id);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -604,8 +626,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
|
||||
|
||||
req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
|
||||
if (req == NULL) {
|
||||
qla_printk(KERN_WARNING, ha, "could not allocate memory"
|
||||
"for request que\n");
|
||||
ql_log(ql_log_fatal, base_vha, 0x00d9,
|
||||
"Failed to allocate memory for request queue.\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
@ -614,8 +636,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
|
||||
(req->length + 1) * sizeof(request_t),
|
||||
&req->dma, GFP_KERNEL);
|
||||
if (req->ring == NULL) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Memory Allocation failed - request_ring\n");
|
||||
ql_log(ql_log_fatal, base_vha, 0x00da,
|
||||
"Failed to allocte memory for request_ring.\n");
|
||||
goto que_failed;
|
||||
}
|
||||
|
||||
@ -623,8 +645,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
|
||||
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
|
||||
if (que_id >= ha->max_req_queues) {
|
||||
mutex_unlock(&ha->vport_lock);
|
||||
qla_printk(KERN_INFO, ha, "No resources to create "
|
||||
"additional request queue\n");
|
||||
ql_log(ql_log_warn, base_vha, 0x00db,
|
||||
"No resources to create additional request queue.\n");
|
||||
goto que_failed;
|
||||
}
|
||||
set_bit(que_id, ha->req_qid_map);
|
||||
@ -633,6 +655,12 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
|
||||
req->vp_idx = vp_idx;
|
||||
req->qos = qos;
|
||||
|
||||
ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
|
||||
"queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
|
||||
que_id, req->rid, req->vp_idx, req->qos);
|
||||
ql_dbg(ql_dbg_init, base_vha, 0x00dc,
|
||||
"queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
|
||||
que_id, req->rid, req->vp_idx, req->qos);
|
||||
if (rsp_que < 0)
|
||||
req->rsp = NULL;
|
||||
else
|
||||
@ -645,6 +673,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
|
||||
options |= BIT_5;
|
||||
req->options = options;
|
||||
|
||||
ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
|
||||
"options=0x%x.\n", req->options);
|
||||
ql_dbg(ql_dbg_init, base_vha, 0x00dd,
|
||||
"options=0x%x.\n", req->options);
|
||||
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
|
||||
req->outstanding_cmds[cnt] = NULL;
|
||||
req->current_outstanding_cmd = 1;
|
||||
@ -656,10 +688,21 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
|
||||
reg = ISP_QUE_REG(ha, que_id);
|
||||
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
|
||||
mutex_unlock(&ha->vport_lock);
|
||||
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
|
||||
"ring_ptr=%p ring_index=%d, "
|
||||
"cnt=%d id=%d max_q_depth=%d.\n",
|
||||
req->ring_ptr, req->ring_index,
|
||||
req->cnt, req->id, req->max_q_depth);
|
||||
ql_dbg(ql_dbg_init, base_vha, 0x00de,
|
||||
"ring_ptr=%p ring_index=%d, "
|
||||
"cnt=%d id=%d max_q_depth=%d.\n",
|
||||
req->ring_ptr, req->ring_index, req->cnt,
|
||||
req->id, req->max_q_depth);
|
||||
|
||||
ret = qla25xx_init_req_que(base_vha, req);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
|
||||
ql_log(ql_log_fatal, base_vha, 0x00df,
|
||||
"%s failed.\n", __func__);
|
||||
mutex_lock(&ha->vport_lock);
|
||||
clear_bit(que_id, ha->req_qid_map);
|
||||
mutex_unlock(&ha->vport_lock);
|
||||
@ -700,8 +743,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
|
||||
|
||||
rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
|
||||
if (rsp == NULL) {
|
||||
qla_printk(KERN_WARNING, ha, "could not allocate memory for"
|
||||
" response que\n");
|
||||
ql_log(ql_log_warn, base_vha, 0x0066,
|
||||
"Failed to allocate memory for response queue.\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
@ -710,8 +753,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
|
||||
(rsp->length + 1) * sizeof(response_t),
|
||||
&rsp->dma, GFP_KERNEL);
|
||||
if (rsp->ring == NULL) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Memory Allocation failed - response_ring\n");
|
||||
ql_log(ql_log_warn, base_vha, 0x00e1,
|
||||
"Failed to allocate memory for response ring.\n");
|
||||
goto que_failed;
|
||||
}
|
||||
|
||||
@ -719,8 +762,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
|
||||
que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
|
||||
if (que_id >= ha->max_rsp_queues) {
|
||||
mutex_unlock(&ha->vport_lock);
|
||||
qla_printk(KERN_INFO, ha, "No resources to create "
|
||||
"additional response queue\n");
|
||||
ql_log(ql_log_warn, base_vha, 0x00e2,
|
||||
"No resources to create additional request queue.\n");
|
||||
goto que_failed;
|
||||
}
|
||||
set_bit(que_id, ha->rsp_qid_map);
|
||||
@ -728,12 +771,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
|
||||
if (ha->flags.msix_enabled)
|
||||
rsp->msix = &ha->msix_entries[que_id + 1];
|
||||
else
|
||||
qla_printk(KERN_WARNING, ha, "msix not enabled\n");
|
||||
ql_log(ql_log_warn, base_vha, 0x00e3,
|
||||
"MSIX not enalbled.\n");
|
||||
|
||||
ha->rsp_q_map[que_id] = rsp;
|
||||
rsp->rid = rid;
|
||||
rsp->vp_idx = vp_idx;
|
||||
rsp->hw = ha;
|
||||
ql_dbg(ql_dbg_init, base_vha, 0x00e4,
|
||||
"queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
|
||||
que_id, rsp->rid, rsp->vp_idx, rsp->hw);
|
||||
/* Use alternate PCI bus number */
|
||||
if (MSB(rsp->rid))
|
||||
options |= BIT_4;
|
||||
@ -750,6 +797,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
|
||||
rsp->rsp_q_in = ®->isp25mq.rsp_q_in;
|
||||
rsp->rsp_q_out = ®->isp25mq.rsp_q_out;
|
||||
mutex_unlock(&ha->vport_lock);
|
||||
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
|
||||
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
|
||||
rsp->options, rsp->id, rsp->rsp_q_in,
|
||||
rsp->rsp_q_out);
|
||||
ql_dbg(ql_dbg_init, base_vha, 0x00e5,
|
||||
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
|
||||
rsp->options, rsp->id, rsp->rsp_q_in,
|
||||
rsp->rsp_q_out);
|
||||
|
||||
ret = qla25xx_request_irq(rsp);
|
||||
if (ret)
|
||||
@ -757,7 +812,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
|
||||
|
||||
ret = qla25xx_init_rsp_que(base_vha, rsp);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
|
||||
ql_log(ql_log_fatal, base_vha, 0x00e7,
|
||||
"%s failed.\n", __func__);
|
||||
mutex_lock(&ha->vport_lock);
|
||||
clear_bit(que_id, ha->rsp_qid_map);
|
||||
mutex_unlock(&ha->vport_lock);
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -189,6 +189,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
|
||||
uint16_t word;
|
||||
uint32_t nv_cmd, wait_cnt;
|
||||
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
qla2x00_nv_write(ha, NVR_DATA_OUT);
|
||||
qla2x00_nv_write(ha, 0);
|
||||
@ -220,8 +221,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
|
||||
wait_cnt = NVR_WAIT_CNT;
|
||||
do {
|
||||
if (!--wait_cnt) {
|
||||
DEBUG9_10(qla_printk(KERN_WARNING, ha,
|
||||
"NVRAM didn't go ready...\n"));
|
||||
ql_dbg(ql_dbg_user, vha, 0x708d,
|
||||
"NVRAM didn't go ready...\n");
|
||||
break;
|
||||
}
|
||||
NVRAM_DELAY();
|
||||
@ -308,6 +309,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
|
||||
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
||||
uint32_t word, wait_cnt;
|
||||
uint16_t wprot, wprot_old;
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
/* Clear NVRAM write protection. */
|
||||
ret = QLA_FUNCTION_FAILED;
|
||||
@ -350,8 +352,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
|
||||
wait_cnt = NVR_WAIT_CNT;
|
||||
do {
|
||||
if (!--wait_cnt) {
|
||||
DEBUG9_10(qla_printk(KERN_WARNING, ha,
|
||||
"NVRAM didn't go ready...\n"));
|
||||
ql_dbg(ql_dbg_user, vha, 0x708e,
|
||||
"NVRAM didn't go ready...\n");
|
||||
break;
|
||||
}
|
||||
NVRAM_DELAY();
|
||||
@ -371,6 +373,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
|
||||
{
|
||||
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
||||
uint32_t word, wait_cnt;
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
if (stat != QLA_SUCCESS)
|
||||
return;
|
||||
@ -409,8 +412,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
|
||||
wait_cnt = NVR_WAIT_CNT;
|
||||
do {
|
||||
if (!--wait_cnt) {
|
||||
DEBUG9_10(qla_printk(KERN_WARNING, ha,
|
||||
"NVRAM didn't go ready...\n"));
|
||||
ql_dbg(ql_dbg_user, vha, 0x708f,
|
||||
"NVRAM didn't go ready...\n");
|
||||
break;
|
||||
}
|
||||
NVRAM_DELAY();
|
||||
@ -607,9 +610,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
|
||||
for (chksum = 0; cnt; cnt--)
|
||||
chksum += le16_to_cpu(*wptr++);
|
||||
if (chksum) {
|
||||
qla_printk(KERN_ERR, ha,
|
||||
ql_log(ql_log_fatal, vha, 0x0045,
|
||||
"Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
|
||||
qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location));
|
||||
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
|
||||
buf, sizeof(struct qla_flt_location));
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
|
||||
@ -618,7 +622,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
|
||||
*start = (le16_to_cpu(fltl->start_hi) << 16 |
|
||||
le16_to_cpu(fltl->start_lo)) >> 2;
|
||||
end:
|
||||
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
|
||||
ql_dbg(ql_dbg_init, vha, 0x0046,
|
||||
"FLTL[%s] = 0x%x.\n",
|
||||
loc, *start);
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
@ -685,10 +691,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
|
||||
if (*wptr == __constant_cpu_to_le16(0xffff))
|
||||
goto no_flash_data;
|
||||
if (flt->version != __constant_cpu_to_le16(1)) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: "
|
||||
"version=0x%x length=0x%x checksum=0x%x.\n",
|
||||
ql_log(ql_log_warn, vha, 0x0047,
|
||||
"Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
|
||||
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
|
||||
le16_to_cpu(flt->checksum)));
|
||||
le16_to_cpu(flt->checksum));
|
||||
goto no_flash_data;
|
||||
}
|
||||
|
||||
@ -696,10 +702,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
|
||||
for (chksum = 0; cnt; cnt--)
|
||||
chksum += le16_to_cpu(*wptr++);
|
||||
if (chksum) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
|
||||
"version=0x%x length=0x%x checksum=0x%x.\n",
|
||||
ql_log(ql_log_fatal, vha, 0x0048,
|
||||
"Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
|
||||
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
|
||||
chksum));
|
||||
le16_to_cpu(flt->checksum));
|
||||
goto no_flash_data;
|
||||
}
|
||||
|
||||
@ -708,10 +714,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
|
||||
for ( ; cnt; cnt--, region++) {
|
||||
/* Store addresses as DWORD offsets. */
|
||||
start = le32_to_cpu(region->start) >> 2;
|
||||
|
||||
DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
|
||||
"end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
|
||||
le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
|
||||
ql_dbg(ql_dbg_init, vha, 0x0049,
|
||||
"FLT[%02x]: start=0x%x "
|
||||
"end=0x%x size=0x%x.\n", le32_to_cpu(region->code),
|
||||
start, le32_to_cpu(region->end) >> 2,
|
||||
le32_to_cpu(region->size));
|
||||
|
||||
switch (le32_to_cpu(region->code) & 0xff) {
|
||||
case FLT_REG_FW:
|
||||
@ -796,12 +803,16 @@ no_flash_data:
|
||||
ha->flt_region_npiv_conf = ha->flags.port0 ?
|
||||
def_npiv_conf0[def] : def_npiv_conf1[def];
|
||||
done:
|
||||
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
|
||||
"vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x "
|
||||
"npiv=0x%x. fcp_prio_cfg=0x%x\n", loc, ha->flt_region_boot,
|
||||
ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd,
|
||||
ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt,
|
||||
ha->flt_region_npiv_conf, ha->flt_region_fcp_prio));
|
||||
ql_dbg(ql_dbg_init, vha, 0x004a,
|
||||
"FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n",
|
||||
loc, ha->flt_region_boot,
|
||||
ha->flt_region_fw, ha->flt_region_vpd_nvram,
|
||||
ha->flt_region_vpd);
|
||||
ql_dbg(ql_dbg_init, vha, 0x004b,
|
||||
"nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
|
||||
ha->flt_region_nvram,
|
||||
ha->flt_region_fdt, ha->flt_region_flt,
|
||||
ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -833,10 +844,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
|
||||
cnt++)
|
||||
chksum += le16_to_cpu(*wptr++);
|
||||
if (chksum) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FDT detected: "
|
||||
"checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0],
|
||||
le16_to_cpu(fdt->version)));
|
||||
DEBUG9(qla2x00_dump_buffer((uint8_t *)fdt, sizeof(*fdt)));
|
||||
ql_dbg(ql_dbg_init, vha, 0x004c,
|
||||
"Inconsistent FDT detected:"
|
||||
" checksum=0x%x id=%c version0x%x.\n", chksum,
|
||||
fdt->sig[0], le16_to_cpu(fdt->version));
|
||||
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
|
||||
(uint8_t *)fdt, sizeof(*fdt));
|
||||
goto no_flash_data;
|
||||
}
|
||||
|
||||
@ -890,11 +903,12 @@ no_flash_data:
|
||||
break;
|
||||
}
|
||||
done:
|
||||
DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
|
||||
"pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
|
||||
ql_dbg(ql_dbg_init, vha, 0x004d,
|
||||
"FDT[%x]: (0x%x/0x%x) erase=0x%x "
|
||||
"pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
|
||||
ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
|
||||
ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable,
|
||||
ha->fdt_block_size));
|
||||
ha->fdt_wrt_disable, ha->fdt_block_size);
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
@ -919,6 +933,10 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
|
||||
ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
|
||||
ha->nx_reset_timeout = le32_to_cpu(*wptr);
|
||||
}
|
||||
ql_dbg(ql_dbg_init, vha, 0x004e,
|
||||
"nx_dev_init_timeout=%d "
|
||||
"nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout,
|
||||
ha->nx_reset_timeout);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -963,17 +981,18 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
|
||||
if (hdr.version == __constant_cpu_to_le16(0xffff))
|
||||
return;
|
||||
if (hdr.version != __constant_cpu_to_le16(1)) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config "
|
||||
ql_dbg(ql_dbg_user, vha, 0x7090,
|
||||
"Unsupported NPIV-Config "
|
||||
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
|
||||
le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
|
||||
le16_to_cpu(hdr.checksum)));
|
||||
le16_to_cpu(hdr.checksum));
|
||||
return;
|
||||
}
|
||||
|
||||
data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
|
||||
if (!data) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to "
|
||||
"allocate memory.\n"));
|
||||
ql_log(ql_log_warn, vha, 0x7091,
|
||||
"Unable to allocate memory for data.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -985,10 +1004,11 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
|
||||
for (wptr = data, chksum = 0; cnt; cnt--)
|
||||
chksum += le16_to_cpu(*wptr++);
|
||||
if (chksum) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config "
|
||||
ql_dbg(ql_dbg_user, vha, 0x7092,
|
||||
"Inconsistent NPIV-Config "
|
||||
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
|
||||
le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
|
||||
chksum));
|
||||
le16_to_cpu(hdr.checksum));
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -1014,21 +1034,22 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
|
||||
vid.port_name = wwn_to_u64(entry->port_name);
|
||||
vid.node_name = wwn_to_u64(entry->node_name);
|
||||
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
|
||||
"wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
|
||||
(unsigned long long)vid.port_name,
|
||||
(unsigned long long)vid.node_name,
|
||||
le16_to_cpu(entry->vf_id),
|
||||
entry->q_qos, entry->f_qos));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7093,
|
||||
"NPIV[%02x]: wwpn=%llx "
|
||||
"wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
|
||||
(unsigned long long)vid.port_name,
|
||||
(unsigned long long)vid.node_name,
|
||||
le16_to_cpu(entry->vf_id),
|
||||
entry->q_qos, entry->f_qos);
|
||||
|
||||
if (i < QLA_PRECONFIG_VPORTS) {
|
||||
vport = fc_vport_create(vha->host, 0, &vid);
|
||||
if (!vport)
|
||||
qla_printk(KERN_INFO, ha,
|
||||
"NPIV-Config: Failed to create vport [%02x]: "
|
||||
"wwpn=%llx wwnn=%llx.\n", cnt,
|
||||
(unsigned long long)vid.port_name,
|
||||
(unsigned long long)vid.node_name);
|
||||
ql_log(ql_log_warn, vha, 0x7094,
|
||||
"NPIV-Config Failed to create vport [%02x]: "
|
||||
"wwpn=%llx wwnn=%llx.\n", cnt,
|
||||
(unsigned long long)vid.port_name,
|
||||
(unsigned long long)vid.node_name);
|
||||
}
|
||||
}
|
||||
done:
|
||||
@ -1127,9 +1148,10 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
|
||||
&optrom_dma, GFP_KERNEL);
|
||||
if (!optrom) {
|
||||
qla_printk(KERN_DEBUG, ha,
|
||||
"Unable to allocate memory for optrom burst write "
|
||||
"(%x KB).\n", OPTROM_BURST_SIZE / 1024);
|
||||
ql_log(ql_log_warn, vha, 0x7095,
|
||||
"Unable to allocate "
|
||||
"memory for optrom burst write (%x KB).\n",
|
||||
OPTROM_BURST_SIZE / 1024);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1138,7 +1160,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
|
||||
ret = qla24xx_unprotect_flash(vha);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7096,
|
||||
"Unable to unprotect flash for update.\n");
|
||||
goto done;
|
||||
}
|
||||
@ -1156,9 +1178,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
0xff0000) | ((fdata >> 16) & 0xff));
|
||||
ret = qla24xx_erase_sector(vha, fdata);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
DEBUG9(qla_printk(KERN_WARNING, ha,
|
||||
"Unable to erase sector: address=%x.\n",
|
||||
faddr));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7007,
|
||||
"Unable to erase erase sector: address=%x.\n",
|
||||
faddr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1172,12 +1194,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
flash_data_addr(ha, faddr),
|
||||
OPTROM_BURST_DWORDS);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7097,
|
||||
"Unable to burst-write optrom segment "
|
||||
"(%x/%x/%llx).\n", ret,
|
||||
flash_data_addr(ha, faddr),
|
||||
(unsigned long long)optrom_dma);
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7098,
|
||||
"Reverting to slow-write.\n");
|
||||
|
||||
dma_free_coherent(&ha->pdev->dev,
|
||||
@ -1194,9 +1216,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
ret = qla24xx_write_flash_dword(ha,
|
||||
flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
|
||||
if (ret != QLA_SUCCESS) {
|
||||
DEBUG9(printk("%s(%ld) Unable to program flash "
|
||||
"address=%x data=%x.\n", __func__,
|
||||
vha->host_no, faddr, *dwptr));
|
||||
ql_dbg(ql_dbg_user, vha, 0x7006,
|
||||
"Unable to program flash address=%x data=%x.\n",
|
||||
faddr, *dwptr);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1211,7 +1233,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
|
||||
ret = qla24xx_protect_flash(vha);
|
||||
if (ret != QLA_SUCCESS)
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7099,
|
||||
"Unable to protect flash after update.\n");
|
||||
done:
|
||||
if (optrom)
|
||||
@ -1324,9 +1346,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
|
||||
ret = qla24xx_write_flash_dword(ha,
|
||||
nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
|
||||
if (ret != QLA_SUCCESS) {
|
||||
DEBUG9(qla_printk(KERN_WARNING, ha,
|
||||
ql_dbg(ql_dbg_user, vha, 0x709a,
|
||||
"Unable to program nvram address=%x data=%x.\n",
|
||||
naddr, *dwptr));
|
||||
naddr, *dwptr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1476,7 +1498,7 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
|
||||
ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
|
||||
|
||||
if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x709b,
|
||||
"Unable to update fw options (beacon on).\n");
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
@ -1541,7 +1563,7 @@ qla2x00_beacon_off(struct scsi_qla_host *vha)
|
||||
|
||||
rval = qla2x00_set_fw_options(vha, ha->fw_options);
|
||||
if (rval != QLA_SUCCESS)
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x709c,
|
||||
"Unable to update fw options (beacon off).\n");
|
||||
return rval;
|
||||
}
|
||||
@ -1616,7 +1638,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
|
||||
|
||||
if (qla2x00_get_fw_options(vha, ha->fw_options) !=
|
||||
QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x7009,
|
||||
"Unable to update fw options (beacon on).\n");
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
@ -1670,14 +1692,14 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
|
||||
ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
|
||||
|
||||
if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Unable to update fw options (beacon off).\n");
|
||||
ql_log(ql_log_warn, vha, 0x704d,
|
||||
"Unable to update fw options (beacon on).\n");
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
|
||||
if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Unable to get fw options (beacon off).\n");
|
||||
ql_log(ql_log_warn, vha, 0x704e,
|
||||
"Unable to update fw options (beacon on).\n");
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
|
||||
@ -2389,10 +2411,9 @@ try_fast:
|
||||
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
|
||||
&optrom_dma, GFP_KERNEL);
|
||||
if (!optrom) {
|
||||
qla_printk(KERN_DEBUG, ha,
|
||||
"Unable to allocate memory for optrom burst read "
|
||||
"(%x KB).\n", OPTROM_BURST_SIZE / 1024);
|
||||
|
||||
ql_log(ql_log_warn, vha, 0x00cc,
|
||||
"Unable to allocate memory for optrom burst read (%x KB).\n",
|
||||
OPTROM_BURST_SIZE / 1024);
|
||||
goto slow_read;
|
||||
}
|
||||
|
||||
@ -2407,12 +2428,11 @@ try_fast:
|
||||
rval = qla2x00_dump_ram(vha, optrom_dma,
|
||||
flash_data_addr(ha, faddr), burst);
|
||||
if (rval) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Unable to burst-read optrom segment "
|
||||
"(%x/%x/%llx).\n", rval,
|
||||
flash_data_addr(ha, faddr),
|
||||
ql_log(ql_log_warn, vha, 0x00f5,
|
||||
"Unable to burst-read optrom segment (%x/%x/%llx).\n",
|
||||
rval, flash_data_addr(ha, faddr),
|
||||
(unsigned long long)optrom_dma);
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
ql_log(ql_log_warn, vha, 0x00f6,
|
||||
"Reverting to slow-read.\n");
|
||||
|
||||
dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
|
||||
@ -2556,8 +2576,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
|
||||
if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
|
||||
qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
|
||||
/* No signature */
|
||||
DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
|
||||
"signature.\n"));
|
||||
ql_log(ql_log_fatal, vha, 0x0050,
|
||||
"No matching ROM signature.\n");
|
||||
ret = QLA_FUNCTION_FAILED;
|
||||
break;
|
||||
}
|
||||
@ -2573,8 +2593,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
|
||||
qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
|
||||
qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
|
||||
/* Incorrect header. */
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
|
||||
"found pcir_adr=%x.\n", pcids));
|
||||
ql_log(ql_log_fatal, vha, 0x0051,
|
||||
"PCI data struct not found pcir_adr=%x.\n", pcids);
|
||||
ret = QLA_FUNCTION_FAILED;
|
||||
break;
|
||||
}
|
||||
@ -2588,8 +2608,9 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
|
||||
qla2x00_read_flash_byte(ha, pcids + 0x12);
|
||||
ha->bios_revision[1] =
|
||||
qla2x00_read_flash_byte(ha, pcids + 0x13);
|
||||
DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
|
||||
ha->bios_revision[1], ha->bios_revision[0]));
|
||||
ql_dbg(ql_dbg_init, vha, 0x0052,
|
||||
"Read BIOS %d.%d.\n",
|
||||
ha->bios_revision[1], ha->bios_revision[0]);
|
||||
break;
|
||||
case ROM_CODE_TYPE_FCODE:
|
||||
/* Open Firmware standard for PCI (FCode). */
|
||||
@ -2602,12 +2623,14 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
|
||||
qla2x00_read_flash_byte(ha, pcids + 0x12);
|
||||
ha->efi_revision[1] =
|
||||
qla2x00_read_flash_byte(ha, pcids + 0x13);
|
||||
DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
|
||||
ha->efi_revision[1], ha->efi_revision[0]));
|
||||
ql_dbg(ql_dbg_init, vha, 0x0053,
|
||||
"Read EFI %d.%d.\n",
|
||||
ha->efi_revision[1], ha->efi_revision[0]);
|
||||
break;
|
||||
default:
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
|
||||
"type %x at pcids %x.\n", code_type, pcids));
|
||||
ql_log(ql_log_warn, vha, 0x0054,
|
||||
"Unrecognized code type %x at pcids %x.\n",
|
||||
code_type, pcids);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2627,21 +2650,28 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
|
||||
|
||||
qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
|
||||
8);
|
||||
DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from "
|
||||
"flash:\n"));
|
||||
DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8));
|
||||
ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a,
|
||||
"Dumping fw "
|
||||
"ver from flash:.\n");
|
||||
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
|
||||
(uint8_t *)dbyte, 8);
|
||||
|
||||
if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
|
||||
dcode[2] == 0xffff && dcode[3] == 0xffff) ||
|
||||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
|
||||
dcode[3] == 0)) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
|
||||
"revision at %x.\n", ha->flt_region_fw * 4));
|
||||
ql_log(ql_log_warn, vha, 0x0057,
|
||||
"Unrecognized fw revision at %x.\n",
|
||||
ha->flt_region_fw * 4);
|
||||
} else {
|
||||
/* values are in big endian */
|
||||
ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
|
||||
ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
|
||||
ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
|
||||
ql_dbg(ql_dbg_init, vha, 0x0058,
|
||||
"FW Version: "
|
||||
"%d.%d.%d.\n", ha->fw_revision[0],
|
||||
ha->fw_revision[1], ha->fw_revision[2]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2683,8 +2713,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
|
||||
bcode = mbuf + (pcihdr % 4);
|
||||
if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
|
||||
/* No signature */
|
||||
DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
|
||||
"signature.\n"));
|
||||
ql_log(ql_log_fatal, vha, 0x0059,
|
||||
"No matching ROM signature.\n");
|
||||
ret = QLA_FUNCTION_FAILED;
|
||||
break;
|
||||
}
|
||||
@ -2699,8 +2729,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
|
||||
if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
|
||||
bcode[0x2] != 'I' || bcode[0x3] != 'R') {
|
||||
/* Incorrect header. */
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
|
||||
"found pcir_adr=%x.\n", pcids));
|
||||
ql_log(ql_log_fatal, vha, 0x005a,
|
||||
"PCI data struct not found pcir_adr=%x.\n", pcids);
|
||||
ret = QLA_FUNCTION_FAILED;
|
||||
break;
|
||||
}
|
||||
@ -2712,26 +2742,30 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
|
||||
/* Intel x86, PC-AT compatible. */
|
||||
ha->bios_revision[0] = bcode[0x12];
|
||||
ha->bios_revision[1] = bcode[0x13];
|
||||
DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
|
||||
ha->bios_revision[1], ha->bios_revision[0]));
|
||||
ql_dbg(ql_dbg_init, vha, 0x005b,
|
||||
"Read BIOS %d.%d.\n",
|
||||
ha->bios_revision[1], ha->bios_revision[0]);
|
||||
break;
|
||||
case ROM_CODE_TYPE_FCODE:
|
||||
/* Open Firmware standard for PCI (FCode). */
|
||||
ha->fcode_revision[0] = bcode[0x12];
|
||||
ha->fcode_revision[1] = bcode[0x13];
|
||||
DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n",
|
||||
ha->fcode_revision[1], ha->fcode_revision[0]));
|
||||
ql_dbg(ql_dbg_init, vha, 0x005c,
|
||||
"Read FCODE %d.%d.\n",
|
||||
ha->fcode_revision[1], ha->fcode_revision[0]);
|
||||
break;
|
||||
case ROM_CODE_TYPE_EFI:
|
||||
/* Extensible Firmware Interface (EFI). */
|
||||
ha->efi_revision[0] = bcode[0x12];
|
||||
ha->efi_revision[1] = bcode[0x13];
|
||||
DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
|
||||
ha->efi_revision[1], ha->efi_revision[0]));
|
||||
ql_dbg(ql_dbg_init, vha, 0x005d,
|
||||
"Read EFI %d.%d.\n",
|
||||
ha->efi_revision[1], ha->efi_revision[0]);
|
||||
break;
|
||||
default:
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
|
||||
"type %x at pcids %x.\n", code_type, pcids));
|
||||
ql_log(ql_log_warn, vha, 0x005e,
|
||||
"Unrecognized code type %x at pcids %x.\n",
|
||||
code_type, pcids);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2753,13 +2787,18 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
|
||||
dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
|
||||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
|
||||
dcode[3] == 0)) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
|
||||
"revision at %x.\n", ha->flt_region_fw * 4));
|
||||
ql_log(ql_log_warn, vha, 0x005f,
|
||||
"Unrecognized fw revision at %x.\n",
|
||||
ha->flt_region_fw * 4);
|
||||
} else {
|
||||
ha->fw_revision[0] = dcode[0];
|
||||
ha->fw_revision[1] = dcode[1];
|
||||
ha->fw_revision[2] = dcode[2];
|
||||
ha->fw_revision[3] = dcode[3];
|
||||
ql_dbg(ql_dbg_init, vha, 0x0060,
|
||||
"Firmware revision %d.%d.%d.%d.\n",
|
||||
ha->fw_revision[0], ha->fw_revision[1],
|
||||
ha->fw_revision[2], ha->fw_revision[3]);
|
||||
}
|
||||
|
||||
/* Check for golden firmware and get version if available */
|
||||
@ -2775,9 +2814,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
|
||||
|
||||
if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
|
||||
dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"%s(%ld): Unrecognized golden fw at 0x%x.\n",
|
||||
__func__, vha->host_no, ha->flt_region_gold_fw * 4));
|
||||
ql_log(ql_log_warn, vha, 0x0056,
|
||||
"Unrecognized golden fw at 0x%x.\n",
|
||||
ha->flt_region_gold_fw * 4);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2843,9 +2882,9 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
|
||||
if (!ha->fcp_prio_cfg) {
|
||||
ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
|
||||
if (!ha->fcp_prio_cfg) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Unable to allocate memory for fcp priority data "
|
||||
"(%x).\n", FCP_PRIO_CFG_SIZE);
|
||||
ql_log(ql_log_warn, vha, 0x00d5,
|
||||
"Unable to allocate memory for fcp priorty data (%x).\n",
|
||||
FCP_PRIO_CFG_SIZE);
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
}
|
||||
@ -2857,7 +2896,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
|
||||
ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
|
||||
fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
|
||||
|
||||
if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0))
|
||||
if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
|
||||
goto fail;
|
||||
|
||||
/* read remaining FCP CMD config data from flash */
|
||||
@ -2869,7 +2908,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
|
||||
fcp_prio_addr << 2, (len < max_len ? len : max_len));
|
||||
|
||||
/* revalidate the entire FCP priority config data, including entries */
|
||||
if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1))
|
||||
if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1))
|
||||
goto fail;
|
||||
|
||||
ha->flags.fcp_prio_enabled = 1;
|
||||
|
@ -137,6 +137,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
|
||||
host->host_blocked = host->max_host_blocked;
|
||||
break;
|
||||
case SCSI_MLQUEUE_DEVICE_BUSY:
|
||||
case SCSI_MLQUEUE_EH_RETRY:
|
||||
device->device_blocked = device->max_device_blocked;
|
||||
break;
|
||||
case SCSI_MLQUEUE_TARGET_BUSY:
|
||||
|
@ -597,6 +597,28 @@ static DEVICE_ATTR(signalling, S_IRUGO,
|
||||
show_spi_host_signalling,
|
||||
store_spi_host_signalling);
|
||||
|
||||
static ssize_t show_spi_host_width(struct device *cdev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct Scsi_Host *shost = transport_class_to_shost(cdev);
|
||||
|
||||
return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow");
|
||||
}
|
||||
static DEVICE_ATTR(host_width, S_IRUGO,
|
||||
show_spi_host_width, NULL);
|
||||
|
||||
static ssize_t show_spi_host_hba_id(struct device *cdev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct Scsi_Host *shost = transport_class_to_shost(cdev);
|
||||
|
||||
return sprintf(buf, "%d\n", shost->this_id);
|
||||
}
|
||||
static DEVICE_ATTR(hba_id, S_IRUGO,
|
||||
show_spi_host_hba_id, NULL);
|
||||
|
||||
#define DV_SET(x, y) \
|
||||
if(i->f->set_##x) \
|
||||
i->f->set_##x(sdev->sdev_target, y)
|
||||
@ -1380,6 +1402,8 @@ static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class,
|
||||
|
||||
static struct attribute *host_attributes[] = {
|
||||
&dev_attr_signalling.attr,
|
||||
&dev_attr_host_width.attr,
|
||||
&dev_attr_hba_id.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -78,7 +78,6 @@ struct fc_frame {
|
||||
};
|
||||
|
||||
struct fcoe_rcv_info {
|
||||
struct packet_type *ptype;
|
||||
struct fc_lport *fr_dev; /* transport layer private pointer */
|
||||
struct fc_seq *fr_seq; /* for use with exchange manager */
|
||||
struct fc_fcp_pkt *fr_fsp; /* for the corresponding fcp I/O */
|
||||
|
Loading…
x
Reference in New Issue
Block a user