mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-08 14:13:53 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (70 commits) [SCSI] pmcraid: add support for set timestamp command and other fixes [SCSI] pmcraid: remove duplicate struct member [SCSI] qla4xxx: Fix cmd check in qla4xxx_cmd_wait [SCSI] megaraid_sas: Version and documentation update [SCSI] megaraid_sas: Add three times Online controller reset [SCSI] megaraid_sas: Add input parameter for max_sectors [SCSI] megaraid_sas: support devices update flag [SCSI] libosd: write/read_sg_kern API [SCSI] libosd: Support for scatter gather write/read commands [SCSI] libosd: Free resources in reverse order of allocation [SCSI] libosd: Fix bug in attr_page handling [SCSI] lpfc 8.3.18: Update lpfc driver version to 8.3.18 [SCSI] lpfc 8.3.18: Add new WQE support [SCSI] lpfc 8.3.18: Fix critical errors [SCSI] lpfc 8.3.18: Adapter Shutdown and Unregistration cleanup [SCSI] lpfc 8.3.18: Add logic to detect last devloss timeout [SCSI] lpfc 8.3.18: Add support of received ELS commands [SCSI] lpfc 8.3.18: FC/FCoE Discovery fixes [SCSI] ipr: add definitions for a new adapter [SCSI] bfa: fix comments for c files ...
This commit is contained in:
commit
ce9d8d9f72
@ -1,3 +1,50 @@
|
||||
1 Release Date : Thur. May 03, 2010 09:12:45 PST 2009 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Bo Yang
|
||||
|
||||
2 Current Version : 00.00.04.31-rc1
|
||||
3 Older Version : 00.00.04.17.1-rc1
|
||||
|
||||
1. Add the Online Controller Reset (OCR) to the Driver.
|
||||
OCR is the new feature for megaraid_sas driver which
|
||||
will allow the fw to do the chip reset which will not
|
||||
affact the OS behavious.
|
||||
|
||||
To add the OCR support, driver need to do:
|
||||
a). reset the controller chips -- Xscale and Gen2 which
|
||||
will change the function calls and add the reset function
|
||||
related to this two chips.
|
||||
|
||||
b). during the reset, driver will store the pending cmds
|
||||
which not returned by FW to driver's pending queue. Driver
|
||||
will re-issue those pending cmds again to FW after the OCR
|
||||
finished.
|
||||
|
||||
c). In driver's timeout routine, driver will report to
|
||||
OS as reset. Also driver's queue routine will block the
|
||||
cmds until the OCR finished.
|
||||
|
||||
d). in Driver's ISR routine, if driver get the FW state as
|
||||
state change, FW in Failure status and FW support online controller
|
||||
reset (OCR), driver will start to do the controller reset.
|
||||
|
||||
e). In driver's IOCTL routine, the application cmds will wait for the
|
||||
OCR to finish, then issue the cmds to FW.
|
||||
|
||||
f). Before driver kill adapter, driver will do last chance of
|
||||
OCR to see if driver can bring back the FW.
|
||||
|
||||
2. Add the support update flag to the driver to tell LSI megaraid_sas
|
||||
application which driver will support the device update. So application
|
||||
will not need to do the device update after application add/del the device
|
||||
from the system.
|
||||
3. In driver's timeout routine, driver will do three time reset if fw is in
|
||||
failed state. Driver will kill adapter if can't bring back FW after the
|
||||
this three times reset.
|
||||
4. Add the input parameter max_sectors to 1MB support to our GEN2 controller.
|
||||
customer can use the input paramenter max_sectors to add 1MB support to GEN2
|
||||
controller.
|
||||
|
||||
1 Release Date : Thur. Oct 29, 2009 09:12:45 PST 2009 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Bo Yang
|
||||
|
@ -270,7 +270,7 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
|
||||
if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
|
||||
sense = (char *) &fcp_rsp[1];
|
||||
if (rsp_flags & FCP_RSP_LEN_VAL)
|
||||
sense += fcp_rsp->ext.fr_sns_len;
|
||||
sense += fcp_rsp->ext.fr_rsp_len;
|
||||
sense_len = min(fcp_rsp->ext.fr_sns_len,
|
||||
(u32) SCSI_SENSE_BUFFERSIZE);
|
||||
memcpy(scsi->sense_buffer, sense, sense_len);
|
||||
|
@ -532,9 +532,6 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
|
||||
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
|
||||
adapter->hydra_version = 0;
|
||||
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
|
||||
&adapter->status);
|
||||
|
||||
zfcp_fsf_link_down_info_eval(req,
|
||||
&qtcb->header.fsf_status_qual.link_down_info);
|
||||
break;
|
||||
|
@ -142,6 +142,8 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
get_device(&port->dev);
|
||||
|
||||
if (device_register(&unit->dev)) {
|
||||
put_device(&unit->dev);
|
||||
return -ENOMEM;
|
||||
@ -152,8 +154,6 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
get_device(&port->dev);
|
||||
|
||||
write_lock_irq(&port->unit_list_lock);
|
||||
list_add_tail(&unit->list, &port->unit_list);
|
||||
write_unlock_irq(&port->unit_list_lock);
|
||||
|
@ -29,13 +29,13 @@ struct bfa_s;
|
||||
typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
|
||||
typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Interrupt message handlers
|
||||
*/
|
||||
void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
|
||||
void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Request and response queue related defines
|
||||
*/
|
||||
#define BFA_REQQ_NELEMS_MIN (4)
|
||||
@ -58,9 +58,9 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
|
||||
#define bfa_reqq_produce(__bfa, __reqq) do { \
|
||||
(__bfa)->iocfc.req_cq_pi[__reqq]++; \
|
||||
(__bfa)->iocfc.req_cq_pi[__reqq] &= \
|
||||
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
|
||||
bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \
|
||||
(__bfa)->iocfc.req_cq_pi[__reqq]); \
|
||||
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
|
||||
writel((__bfa)->iocfc.req_cq_pi[__reqq], \
|
||||
(__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \
|
||||
mmiowb(); \
|
||||
} while (0)
|
||||
|
||||
@ -76,7 +76,7 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
|
||||
(__index) &= ((__size) - 1); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
/*
|
||||
* Queue element to wait for room in request queue. FIFO order is
|
||||
* maintained when fullfilling requests.
|
||||
*/
|
||||
@ -86,7 +86,7 @@ struct bfa_reqq_wait_s {
|
||||
void *cbarg;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Circular queue usage assignments
|
||||
*/
|
||||
enum {
|
||||
@ -113,7 +113,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
|
||||
|
||||
#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq])
|
||||
|
||||
/**
|
||||
/*
|
||||
* static inline void
|
||||
* bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
|
||||
*/
|
||||
@ -130,7 +130,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
|
||||
#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* Generic BFA callback element.
|
||||
*/
|
||||
struct bfa_cb_qe_s {
|
||||
@ -163,7 +163,7 @@ struct bfa_cb_qe_s {
|
||||
} while (0)
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* PCI devices supported by the current BFA
|
||||
*/
|
||||
struct bfa_pciid_s {
|
||||
@ -173,7 +173,7 @@ struct bfa_pciid_s {
|
||||
|
||||
extern char bfa_version[];
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA memory resources
|
||||
*/
|
||||
enum bfa_mem_type {
|
||||
@ -202,19 +202,19 @@ struct bfa_meminfo_s {
|
||||
((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
|
||||
|
||||
struct bfa_iocfc_regs_s {
|
||||
bfa_os_addr_t intr_status;
|
||||
bfa_os_addr_t intr_mask;
|
||||
bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS];
|
||||
bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS];
|
||||
bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS];
|
||||
bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS];
|
||||
bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS];
|
||||
bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS];
|
||||
bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS];
|
||||
bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS];
|
||||
void __iomem *intr_status;
|
||||
void __iomem *intr_mask;
|
||||
void __iomem *cpe_q_pi[BFI_IOC_MAX_CQS];
|
||||
void __iomem *cpe_q_ci[BFI_IOC_MAX_CQS];
|
||||
void __iomem *cpe_q_depth[BFI_IOC_MAX_CQS];
|
||||
void __iomem *cpe_q_ctrl[BFI_IOC_MAX_CQS];
|
||||
void __iomem *rme_q_ci[BFI_IOC_MAX_CQS];
|
||||
void __iomem *rme_q_pi[BFI_IOC_MAX_CQS];
|
||||
void __iomem *rme_q_depth[BFI_IOC_MAX_CQS];
|
||||
void __iomem *rme_q_ctrl[BFI_IOC_MAX_CQS];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* MSIX vector handlers
|
||||
*/
|
||||
#define BFA_MSIX_MAX_VECTORS 22
|
||||
@ -224,7 +224,7 @@ struct bfa_msix_s {
|
||||
bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Chip specific interfaces
|
||||
*/
|
||||
struct bfa_hwif_s {
|
||||
@ -343,7 +343,7 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
|
||||
struct bfi_pbc_vport_s *pbc_vport);
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
*----------------------------------------------------------------------
|
||||
* BFA public interfaces
|
||||
*----------------------------------------------------------------------
|
||||
|
@ -37,18 +37,18 @@ bfad_int_to_lun(u32 luno)
|
||||
} lun;
|
||||
|
||||
lun.bfa_lun = 0;
|
||||
lun.scsi_lun[0] = bfa_os_htons(luno);
|
||||
lun.scsi_lun[0] = cpu_to_be16(luno);
|
||||
|
||||
return lun.bfa_lun;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Get LUN for the I/O request
|
||||
*/
|
||||
#define bfa_cb_ioim_get_lun(__dio) \
|
||||
bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun)
|
||||
|
||||
/**
|
||||
/*
|
||||
* Get CDB for the I/O request
|
||||
*/
|
||||
static inline u8 *
|
||||
@ -59,7 +59,7 @@ bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
|
||||
return (u8 *) cmnd->cmnd;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Get I/O direction (read/write) for the I/O request
|
||||
*/
|
||||
static inline enum fcp_iodir
|
||||
@ -77,7 +77,7 @@ bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio)
|
||||
return FCP_IODIR_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Get IO size in bytes for the I/O request
|
||||
*/
|
||||
static inline u32
|
||||
@ -88,7 +88,7 @@ bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
|
||||
return scsi_bufflen(cmnd);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Get timeout for the I/O request
|
||||
*/
|
||||
static inline u8
|
||||
@ -104,7 +104,7 @@ bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Get Command Reference Number for the I/O request. 0 if none.
|
||||
*/
|
||||
static inline u8
|
||||
@ -113,7 +113,7 @@ bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Get SAM-3 priority for the I/O request. 0 is default.
|
||||
*/
|
||||
static inline u8
|
||||
@ -122,7 +122,7 @@ bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0).
|
||||
*/
|
||||
static inline u8
|
||||
@ -148,7 +148,7 @@ bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
|
||||
return task_attr;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16).
|
||||
*/
|
||||
static inline u8
|
||||
@ -159,7 +159,7 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
|
||||
return cmnd->cmd_len;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Assign queue to be used for the I/O request. This value depends on whether
|
||||
* the driver wants to use the queues via any specific algorithm. Currently,
|
||||
* this is not supported.
|
||||
|
@ -21,11 +21,11 @@
|
||||
|
||||
BFA_TRC_FILE(HAL, CORE);
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA IOC FC related definitions
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC local definitions
|
||||
*/
|
||||
#define BFA_IOCFC_TOV 5000 /* msecs */
|
||||
@ -54,7 +54,7 @@ enum {
|
||||
#define DEF_CFG_NUM_SBOOT_TGTS 16
|
||||
#define DEF_CFG_NUM_SBOOT_LUNS 16
|
||||
|
||||
/**
|
||||
/*
|
||||
* forward declaration for IOC FC functions
|
||||
*/
|
||||
static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
|
||||
@ -63,7 +63,7 @@ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
|
||||
static void bfa_iocfc_reset_cbfn(void *bfa_arg);
|
||||
static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA Interrupt handling functions
|
||||
*/
|
||||
static void
|
||||
@ -86,7 +86,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
|
||||
|
||||
waitq = bfa_reqq(bfa, qid);
|
||||
list_for_each_safe(qe, qen, waitq) {
|
||||
/**
|
||||
/*
|
||||
* Callback only as long as there is room in request queue
|
||||
*/
|
||||
if (bfa_reqq_full(bfa, qid))
|
||||
@ -104,7 +104,7 @@ bfa_msix_all(struct bfa_s *bfa, int vec)
|
||||
bfa_intx(bfa);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* hal_intr_api
|
||||
*/
|
||||
bfa_boolean_t
|
||||
@ -113,15 +113,15 @@ bfa_intx(struct bfa_s *bfa)
|
||||
u32 intr, qintr;
|
||||
int queue;
|
||||
|
||||
intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
|
||||
intr = readl(bfa->iocfc.bfa_regs.intr_status);
|
||||
if (!intr)
|
||||
return BFA_FALSE;
|
||||
|
||||
/**
|
||||
/*
|
||||
* RME completion queue interrupt
|
||||
*/
|
||||
qintr = intr & __HFN_INT_RME_MASK;
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
|
||||
writel(qintr, bfa->iocfc.bfa_regs.intr_status);
|
||||
|
||||
for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
|
||||
if (intr & (__HFN_INT_RME_Q0 << queue))
|
||||
@ -131,11 +131,11 @@ bfa_intx(struct bfa_s *bfa)
|
||||
if (!intr)
|
||||
return BFA_TRUE;
|
||||
|
||||
/**
|
||||
/*
|
||||
* CPE completion queue interrupt
|
||||
*/
|
||||
qintr = intr & __HFN_INT_CPE_MASK;
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
|
||||
writel(qintr, bfa->iocfc.bfa_regs.intr_status);
|
||||
|
||||
for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
|
||||
if (intr & (__HFN_INT_CPE_Q0 << queue))
|
||||
@ -153,13 +153,13 @@ bfa_intx(struct bfa_s *bfa)
|
||||
void
|
||||
bfa_intx_enable(struct bfa_s *bfa)
|
||||
{
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask);
|
||||
writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask);
|
||||
}
|
||||
|
||||
void
|
||||
bfa_intx_disable(struct bfa_s *bfa)
|
||||
{
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
|
||||
writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
|
||||
}
|
||||
|
||||
void
|
||||
@ -188,8 +188,8 @@ bfa_isr_enable(struct bfa_s *bfa)
|
||||
__HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
|
||||
__HFN_INT_MBOX_LPU1);
|
||||
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
|
||||
writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
|
||||
writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
|
||||
bfa->iocfc.intr_mask = ~intr_unmask;
|
||||
bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
|
||||
}
|
||||
@ -198,7 +198,7 @@ void
|
||||
bfa_isr_disable(struct bfa_s *bfa)
|
||||
{
|
||||
bfa_isr_mode_set(bfa, BFA_FALSE);
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
|
||||
writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
|
||||
bfa_msix_uninstall(bfa);
|
||||
}
|
||||
|
||||
@ -211,7 +211,7 @@ bfa_msix_reqq(struct bfa_s *bfa, int qid)
|
||||
|
||||
bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Resume any pending requests in the corresponding reqq.
|
||||
*/
|
||||
waitq = bfa_reqq(bfa, qid);
|
||||
@ -259,14 +259,14 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* update CI
|
||||
*/
|
||||
bfa_rspq_ci(bfa, qid) = pi;
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
|
||||
writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
|
||||
mmiowb();
|
||||
|
||||
/**
|
||||
/*
|
||||
* Resume any pending requests in the corresponding reqq.
|
||||
*/
|
||||
waitq = bfa_reqq(bfa, qid);
|
||||
@ -279,7 +279,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
|
||||
{
|
||||
u32 intr, curr_value;
|
||||
|
||||
intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
|
||||
intr = readl(bfa->iocfc.bfa_regs.intr_status);
|
||||
|
||||
if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
|
||||
bfa_msix_lpu(bfa);
|
||||
@ -289,30 +289,30 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
|
||||
|
||||
if (intr) {
|
||||
if (intr & __HFN_INT_LL_HALT) {
|
||||
/**
|
||||
/*
|
||||
* If LL_HALT bit is set then FW Init Halt LL Port
|
||||
* Register needs to be cleared as well so Interrupt
|
||||
* Status Register will be cleared.
|
||||
*/
|
||||
curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
|
||||
curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
|
||||
curr_value &= ~__FW_INIT_HALT_P;
|
||||
bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
|
||||
writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
|
||||
}
|
||||
|
||||
if (intr & __HFN_INT_ERR_PSS) {
|
||||
/**
|
||||
/*
|
||||
* ERR_PSS bit needs to be cleared as well in case
|
||||
* interrups are shared so driver's interrupt handler is
|
||||
* still called eventhough it is already masked out.
|
||||
*/
|
||||
curr_value = bfa_reg_read(
|
||||
curr_value = readl(
|
||||
bfa->ioc.ioc_regs.pss_err_status_reg);
|
||||
curr_value &= __PSS_ERR_STATUS_SET;
|
||||
bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
|
||||
curr_value);
|
||||
writel(curr_value,
|
||||
bfa->ioc.ioc_regs.pss_err_status_reg);
|
||||
}
|
||||
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
|
||||
writel(intr, bfa->iocfc.bfa_regs.intr_status);
|
||||
bfa_msix_errint(bfa, intr);
|
||||
}
|
||||
}
|
||||
@ -323,11 +323,11 @@ bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
|
||||
bfa_isrs[mc] = isr_func;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA IOC FC related functions
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* hal_ioc_pvt BFA IOC private functions
|
||||
*/
|
||||
|
||||
@ -366,7 +366,7 @@ bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
|
||||
BFA_CACHELINE_SZ);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
|
||||
*/
|
||||
static void
|
||||
@ -384,14 +384,14 @@ bfa_iocfc_send_cfg(void *bfa_arg)
|
||||
|
||||
bfa_iocfc_reset_queues(bfa);
|
||||
|
||||
/**
|
||||
/*
|
||||
* initialize IOC configuration info
|
||||
*/
|
||||
cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
|
||||
cfg_info->num_cqs = cfg->fwcfg.num_cqs;
|
||||
|
||||
bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
|
||||
/**
|
||||
/*
|
||||
* dma map REQ and RSP circular queues and shadow pointers
|
||||
*/
|
||||
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
|
||||
@ -400,17 +400,17 @@ bfa_iocfc_send_cfg(void *bfa_arg)
|
||||
bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
|
||||
iocfc->req_cq_shadow_ci[i].pa);
|
||||
cfg_info->req_cq_elems[i] =
|
||||
bfa_os_htons(cfg->drvcfg.num_reqq_elems);
|
||||
cpu_to_be16(cfg->drvcfg.num_reqq_elems);
|
||||
|
||||
bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
|
||||
iocfc->rsp_cq_ba[i].pa);
|
||||
bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
|
||||
iocfc->rsp_cq_shadow_pi[i].pa);
|
||||
cfg_info->rsp_cq_elems[i] =
|
||||
bfa_os_htons(cfg->drvcfg.num_rspq_elems);
|
||||
cpu_to_be16(cfg->drvcfg.num_rspq_elems);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Enable interrupt coalescing if it is driver init path
|
||||
* and not ioc disable/enable path.
|
||||
*/
|
||||
@ -419,7 +419,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
|
||||
|
||||
iocfc->cfgdone = BFA_FALSE;
|
||||
|
||||
/**
|
||||
/*
|
||||
* dma map IOC configuration itself
|
||||
*/
|
||||
bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
|
||||
@ -440,9 +440,9 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
||||
iocfc->bfa = bfa;
|
||||
iocfc->action = BFA_IOCFC_ACT_NONE;
|
||||
|
||||
bfa_os_assign(iocfc->cfg, *cfg);
|
||||
iocfc->cfg = *cfg;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Initialize chip specific handlers.
|
||||
*/
|
||||
if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
|
||||
@ -503,13 +503,13 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
|
||||
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
|
||||
iocfc->req_cq_ba[i].kva = dm_kva;
|
||||
iocfc->req_cq_ba[i].pa = dm_pa;
|
||||
bfa_os_memset(dm_kva, 0, per_reqq_sz);
|
||||
memset(dm_kva, 0, per_reqq_sz);
|
||||
dm_kva += per_reqq_sz;
|
||||
dm_pa += per_reqq_sz;
|
||||
|
||||
iocfc->rsp_cq_ba[i].kva = dm_kva;
|
||||
iocfc->rsp_cq_ba[i].pa = dm_pa;
|
||||
bfa_os_memset(dm_kva, 0, per_rspq_sz);
|
||||
memset(dm_kva, 0, per_rspq_sz);
|
||||
dm_kva += per_rspq_sz;
|
||||
dm_pa += per_rspq_sz;
|
||||
}
|
||||
@ -559,7 +559,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Start BFA submodules.
|
||||
*/
|
||||
static void
|
||||
@ -573,7 +573,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
|
||||
hal_mods[i]->start(bfa);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Disable BFA submodules.
|
||||
*/
|
||||
static void
|
||||
@ -623,7 +623,7 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
|
||||
complete(&bfad->disable_comp);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Update BFA configuration from firmware configuration.
|
||||
*/
|
||||
static void
|
||||
@ -634,15 +634,15 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
|
||||
struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
|
||||
|
||||
fwcfg->num_cqs = fwcfg->num_cqs;
|
||||
fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
|
||||
fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
|
||||
fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
|
||||
fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
|
||||
fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
|
||||
fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
|
||||
fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
|
||||
fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
|
||||
fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
|
||||
fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
|
||||
|
||||
iocfc->cfgdone = BFA_TRUE;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Configuration is complete - initialize/start submodules
|
||||
*/
|
||||
bfa_fcport_init(bfa);
|
||||
@ -665,7 +665,7 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC enable request is complete
|
||||
*/
|
||||
static void
|
||||
@ -684,7 +684,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
|
||||
bfa_iocfc_send_cfg(bfa);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC disable request is complete
|
||||
*/
|
||||
static void
|
||||
@ -705,7 +705,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Notify sub-modules of hardware failure.
|
||||
*/
|
||||
static void
|
||||
@ -723,7 +723,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
|
||||
bfa);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Actions on chip-reset completion.
|
||||
*/
|
||||
static void
|
||||
@ -735,11 +735,11 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
|
||||
bfa_isr_enable(bfa);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* hal_ioc_public
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Query IOC memory requirement information.
|
||||
*/
|
||||
void
|
||||
@ -754,7 +754,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
|
||||
*km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Query IOC memory requirement information.
|
||||
*/
|
||||
void
|
||||
@ -772,7 +772,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
||||
ioc->trcmod = bfa->trcmod;
|
||||
bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
|
||||
*/
|
||||
if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
|
||||
@ -790,7 +790,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
||||
INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Query IOC memory requirement information.
|
||||
*/
|
||||
void
|
||||
@ -799,7 +799,7 @@ bfa_iocfc_detach(struct bfa_s *bfa)
|
||||
bfa_ioc_detach(&bfa->ioc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Query IOC memory requirement information.
|
||||
*/
|
||||
void
|
||||
@ -809,7 +809,7 @@ bfa_iocfc_init(struct bfa_s *bfa)
|
||||
bfa_ioc_enable(&bfa->ioc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC start called from bfa_start(). Called to start IOC operations
|
||||
* at driver instantiation for this instance.
|
||||
*/
|
||||
@ -820,7 +820,7 @@ bfa_iocfc_start(struct bfa_s *bfa)
|
||||
bfa_iocfc_start_submod(bfa);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC stop called from bfa_stop(). Called only when driver is unloaded
|
||||
* for this instance.
|
||||
*/
|
||||
@ -876,12 +876,12 @@ bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
|
||||
attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
|
||||
|
||||
attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
|
||||
bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
|
||||
bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
|
||||
be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
|
||||
be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
|
||||
|
||||
attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
|
||||
bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
|
||||
bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
|
||||
be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
|
||||
be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
|
||||
|
||||
attr->config = iocfc->cfg;
|
||||
}
|
||||
@ -893,8 +893,8 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
|
||||
struct bfi_iocfc_set_intr_req_s *m;
|
||||
|
||||
iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
|
||||
iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
|
||||
iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
|
||||
iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
|
||||
iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
|
||||
|
||||
if (!bfa_iocfc_is_operational(bfa))
|
||||
return BFA_STATUS_OK;
|
||||
@ -924,7 +924,7 @@ bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
|
||||
iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
|
||||
bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
|
||||
}
|
||||
/**
|
||||
/*
|
||||
* Enable IOC after it is disabled.
|
||||
*/
|
||||
void
|
||||
@ -953,7 +953,7 @@ bfa_iocfc_is_operational(struct bfa_s *bfa)
|
||||
return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Return boot target port wwns -- read from boot information in flash.
|
||||
*/
|
||||
void
|
||||
@ -998,11 +998,11 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
|
||||
return cfgrsp->pbc_cfg.nvports;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* hal_api
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Use this function query the memory requirement of the BFA library.
|
||||
* This function needs to be called before bfa_attach() to get the
|
||||
* memory required of the BFA layer for a given driver configuration.
|
||||
@ -1038,7 +1038,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
|
||||
|
||||
bfa_assert((cfg != NULL) && (meminfo != NULL));
|
||||
|
||||
bfa_os_memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
|
||||
memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
|
||||
meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
|
||||
BFA_MEM_TYPE_KVA;
|
||||
meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
|
||||
@ -1055,7 +1055,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
|
||||
meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Use this function to do attach the driver instance with the BFA
|
||||
* library. This function will not trigger any HW initialization
|
||||
* process (which will be done in bfa_init() call)
|
||||
@ -1092,7 +1092,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
||||
|
||||
bfa_assert((cfg != NULL) && (meminfo != NULL));
|
||||
|
||||
/**
|
||||
/*
|
||||
* initialize all memory pointers for iterative allocation
|
||||
*/
|
||||
for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
|
||||
@ -1109,7 +1109,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
||||
bfa_com_port_attach(bfa, meminfo);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Use this function to delete a BFA IOC. IOC should be stopped (by
|
||||
* calling bfa_stop()) before this function call.
|
||||
*
|
||||
@ -1146,7 +1146,7 @@ bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
|
||||
bfa->plog = plog;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Initialize IOC.
|
||||
*
|
||||
* This function will return immediately, when the IOC initialization is
|
||||
@ -1169,7 +1169,7 @@ bfa_init(struct bfa_s *bfa)
|
||||
bfa_iocfc_init(bfa);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Use this function initiate the IOC configuration setup. This function
|
||||
* will return immediately.
|
||||
*
|
||||
@ -1183,7 +1183,7 @@ bfa_start(struct bfa_s *bfa)
|
||||
bfa_iocfc_start(bfa);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Use this function quiese the IOC. This function will return immediately,
|
||||
* when the IOC is actually stopped, the bfad->comp will be set.
|
||||
*
|
||||
@ -1243,7 +1243,7 @@ bfa_attach_fcs(struct bfa_s *bfa)
|
||||
bfa->fcs = BFA_TRUE;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Periodic timer heart beat from driver
|
||||
*/
|
||||
void
|
||||
@ -1252,7 +1252,7 @@ bfa_timer_tick(struct bfa_s *bfa)
|
||||
bfa_timer_beat(&bfa->timer_mod);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Return the list of PCI vendor/device id lists supported by this
|
||||
* BFA instance.
|
||||
*/
|
||||
@ -1270,7 +1270,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
|
||||
*pciids = __pciids;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Use this function query the default struct bfa_iocfc_cfg_s value (compiled
|
||||
* into BFA layer). The OS driver can then turn back and overwrite entries that
|
||||
* have been configured by the user.
|
||||
@ -1328,7 +1328,7 @@ bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
|
||||
bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Retrieve firmware trace information on IOC failure.
|
||||
*/
|
||||
bfa_status_t
|
||||
@ -1337,7 +1337,7 @@ bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
|
||||
return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Clear the saved firmware trace information of an IOC.
|
||||
*/
|
||||
void
|
||||
@ -1346,7 +1346,7 @@ bfa_debug_fwsave_clear(struct bfa_s *bfa)
|
||||
bfa_ioc_debug_fwsave_clear(&bfa->ioc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Fetch firmware trace data.
|
||||
*
|
||||
* @param[in] bfa BFA instance
|
||||
@ -1362,7 +1362,7 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
|
||||
return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Dump firmware memory.
|
||||
*
|
||||
* @param[in] bfa BFA instance
|
||||
@ -1378,7 +1378,7 @@ bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
|
||||
{
|
||||
return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
|
||||
}
|
||||
/**
|
||||
/*
|
||||
* Reset hw semaphore & usage cnt regs and initialize.
|
||||
*/
|
||||
void
|
||||
@ -1388,7 +1388,7 @@ bfa_chip_reset(struct bfa_s *bfa)
|
||||
bfa_ioc_pll_init(&bfa->ioc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Fetch firmware statistics data.
|
||||
*
|
||||
* @param[in] bfa BFA instance
|
||||
|
@ -15,7 +15,7 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_cs.h BFA common services
|
||||
*/
|
||||
|
||||
@ -24,7 +24,7 @@
|
||||
|
||||
#include "bfa_os_inc.h"
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA TRC
|
||||
*/
|
||||
|
||||
@ -73,7 +73,7 @@ enum {
|
||||
#define BFA_TRC_MOD_SH 10
|
||||
#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
|
||||
|
||||
/**
|
||||
/*
|
||||
* Define a new tracing file (module). Module should match one defined above.
|
||||
*/
|
||||
#define BFA_TRC_FILE(__mod, __submod) \
|
||||
@ -155,7 +155,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
|
||||
#define bfa_trc_fp(_trcp, _data)
|
||||
#endif
|
||||
|
||||
/**
|
||||
/*
|
||||
* @ BFA LOG interfaces
|
||||
*/
|
||||
#define bfa_assert(__cond) do { \
|
||||
@ -249,13 +249,13 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
|
||||
#define bfa_q_is_on_q(_q, _qe) \
|
||||
bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
|
||||
|
||||
/**
|
||||
/*
|
||||
* @ BFA state machine interfaces
|
||||
*/
|
||||
|
||||
typedef void (*bfa_sm_t)(void *sm, int event);
|
||||
|
||||
/**
|
||||
/*
|
||||
* oc - object class eg. bfa_ioc
|
||||
* st - state, eg. reset
|
||||
* otype - object type, eg. struct bfa_ioc_s
|
||||
@ -269,7 +269,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
|
||||
#define bfa_sm_get_state(_sm) ((_sm)->sm)
|
||||
#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
|
||||
|
||||
/**
|
||||
/*
|
||||
* For converting from state machine function to state encoding.
|
||||
*/
|
||||
struct bfa_sm_table_s {
|
||||
@ -279,12 +279,12 @@ struct bfa_sm_table_s {
|
||||
};
|
||||
#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
|
||||
|
||||
/**
|
||||
/*
|
||||
* State machine with entry actions.
|
||||
*/
|
||||
typedef void (*bfa_fsm_t)(void *fsm, int event);
|
||||
|
||||
/**
|
||||
/*
|
||||
* oc - object class eg. bfa_ioc
|
||||
* st - state, eg. reset
|
||||
* otype - object type, eg. struct bfa_ioc_s
|
||||
@ -314,7 +314,7 @@ bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
|
||||
return smt[i].state;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* @ Generic wait counter.
|
||||
*/
|
||||
|
||||
@ -340,7 +340,7 @@ bfa_wc_down(struct bfa_wc_s *wc)
|
||||
wc->wc_resume(wc->wc_cbarg);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Initialize a waiting counter.
|
||||
*/
|
||||
static inline void
|
||||
@ -352,7 +352,7 @@ bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
|
||||
bfa_wc_up(wc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Wait for counter to reach zero
|
||||
*/
|
||||
static inline void
|
||||
|
@ -24,7 +24,7 @@
|
||||
#define BFA_MFG_SERIALNUM_SIZE 11
|
||||
#define STRSZ(_n) (((_n) + 4) & ~3)
|
||||
|
||||
/**
|
||||
/*
|
||||
* Manufacturing card type
|
||||
*/
|
||||
enum {
|
||||
@ -45,7 +45,7 @@ enum {
|
||||
|
||||
#pragma pack(1)
|
||||
|
||||
/**
|
||||
/*
|
||||
* Check if Mezz card
|
||||
*/
|
||||
#define bfa_mfg_is_mezz(type) (( \
|
||||
@ -55,7 +55,7 @@ enum {
|
||||
(type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
|
||||
(type) == BFA_MFG_TYPE_LIGHTNING))
|
||||
|
||||
/**
|
||||
/*
|
||||
* Check if the card having old wwn/mac handling
|
||||
*/
|
||||
#define bfa_mfg_is_old_wwn_mac_model(type) (( \
|
||||
@ -78,12 +78,12 @@ do { \
|
||||
(m)[2] = t & 0xFF; \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
/*
|
||||
* VPD data length
|
||||
*/
|
||||
#define BFA_MFG_VPD_LEN 512
|
||||
|
||||
/**
|
||||
/*
|
||||
* VPD vendor tag
|
||||
*/
|
||||
enum {
|
||||
@ -97,7 +97,7 @@ enum {
|
||||
BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* All numerical fields are in big-endian format.
|
||||
*/
|
||||
struct bfa_mfg_vpd_s {
|
||||
@ -112,7 +112,7 @@ struct bfa_mfg_vpd_s {
|
||||
|
||||
#pragma pack()
|
||||
|
||||
/**
|
||||
/*
|
||||
* Status return values
|
||||
*/
|
||||
enum bfa_status {
|
||||
@ -167,11 +167,11 @@ enum bfa_boolean {
|
||||
#define BFA_STRING_32 32
|
||||
#define BFA_VERSION_LEN 64
|
||||
|
||||
/**
|
||||
/*
|
||||
* ---------------------- adapter definitions ------------
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA adapter level attributes.
|
||||
*/
|
||||
enum {
|
||||
@ -215,7 +215,7 @@ struct bfa_adapter_attr_s {
|
||||
u8 trunk_capable;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* ---------------------- IOC definitions ------------
|
||||
*/
|
||||
|
||||
@ -224,7 +224,7 @@ enum {
|
||||
BFA_IOC_CHIP_REV_LEN = 8,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Driver and firmware versions.
|
||||
*/
|
||||
struct bfa_ioc_driver_attr_s {
|
||||
@ -236,7 +236,7 @@ struct bfa_ioc_driver_attr_s {
|
||||
char ob_ver[BFA_VERSION_LEN]; /* openboot version */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC PCI device attributes
|
||||
*/
|
||||
struct bfa_ioc_pci_attr_s {
|
||||
@ -249,7 +249,7 @@ struct bfa_ioc_pci_attr_s {
|
||||
char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC states
|
||||
*/
|
||||
enum bfa_ioc_state {
|
||||
@ -267,7 +267,7 @@ enum bfa_ioc_state {
|
||||
BFA_IOC_ENABLING = 12, /* IOC is being enabled */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC firmware stats
|
||||
*/
|
||||
struct bfa_fw_ioc_stats_s {
|
||||
@ -279,7 +279,7 @@ struct bfa_fw_ioc_stats_s {
|
||||
u32 unknown_reqs;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC driver stats
|
||||
*/
|
||||
struct bfa_ioc_drv_stats_s {
|
||||
@ -296,7 +296,7 @@ struct bfa_ioc_drv_stats_s {
|
||||
u32 enable_replies;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC statistics
|
||||
*/
|
||||
struct bfa_ioc_stats_s {
|
||||
@ -310,7 +310,7 @@ enum bfa_ioc_type_e {
|
||||
BFA_IOC_TYPE_LL = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC attributes returned in queries
|
||||
*/
|
||||
struct bfa_ioc_attr_s {
|
||||
@ -323,11 +323,11 @@ struct bfa_ioc_attr_s {
|
||||
u8 rsvd[7]; /* 64bit align */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* ---------------------- mfg definitions ------------
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Checksum size
|
||||
*/
|
||||
#define BFA_MFG_CHKSUM_SIZE 16
|
||||
@ -340,7 +340,7 @@ struct bfa_ioc_attr_s {
|
||||
|
||||
#pragma pack(1)
|
||||
|
||||
/**
|
||||
/*
|
||||
* All numerical fields are in big-endian format.
|
||||
*/
|
||||
struct bfa_mfg_block_s {
|
||||
@ -373,11 +373,11 @@ struct bfa_mfg_block_s {
|
||||
|
||||
#pragma pack()
|
||||
|
||||
/**
|
||||
/*
|
||||
* ---------------------- pci definitions ------------
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* PCI device and vendor ID information
|
||||
*/
|
||||
enum {
|
||||
@ -392,14 +392,14 @@ enum {
|
||||
((devid) == BFA_PCI_DEVICE_ID_CT || \
|
||||
(devid) == BFA_PCI_DEVICE_ID_CT_FC)
|
||||
|
||||
/**
|
||||
/*
|
||||
* PCI sub-system device and vendor ID information
|
||||
*/
|
||||
enum {
|
||||
BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Maximum number of device address ranges mapped through different BAR(s)
|
||||
*/
|
||||
#define BFA_PCI_ACCESS_RANGES 1
|
||||
@ -430,7 +430,7 @@ enum {
|
||||
#define BOOT_CFG_REV1 1
|
||||
#define BOOT_CFG_VLAN 1
|
||||
|
||||
/**
|
||||
/*
|
||||
* Boot options setting. Boot options setting determines from where
|
||||
* to get the boot lun information
|
||||
*/
|
||||
@ -442,7 +442,7 @@ enum bfa_boot_bootopt {
|
||||
};
|
||||
|
||||
#pragma pack(1)
|
||||
/**
|
||||
/*
|
||||
* Boot lun information.
|
||||
*/
|
||||
struct bfa_boot_bootlun_s {
|
||||
@ -451,7 +451,7 @@ struct bfa_boot_bootlun_s {
|
||||
};
|
||||
#pragma pack()
|
||||
|
||||
/**
|
||||
/*
|
||||
* BOOT boot configuraton
|
||||
*/
|
||||
struct bfa_boot_pbc_s {
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include "bfa_fc.h"
|
||||
#include "bfa_defs_svc.h"
|
||||
|
||||
/**
|
||||
/*
|
||||
* VF states
|
||||
*/
|
||||
enum bfa_vf_state {
|
||||
@ -35,7 +35,7 @@ enum bfa_vf_state {
|
||||
BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* VF statistics
|
||||
*/
|
||||
struct bfa_vf_stats_s {
|
||||
@ -55,7 +55,7 @@ struct bfa_vf_stats_s {
|
||||
u32 resvd; /* padding for 64 bit alignment */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* VF attributes returned in queries
|
||||
*/
|
||||
struct bfa_vf_attr_s {
|
||||
@ -67,7 +67,7 @@ struct bfa_vf_attr_s {
|
||||
#define BFA_FCS_MAX_LPORTS 256
|
||||
#define BFA_FCS_FABRIC_IPADDR_SZ 16
|
||||
|
||||
/**
|
||||
/*
|
||||
* symbolic names for base port/virtual port
|
||||
*/
|
||||
#define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */
|
||||
@ -75,7 +75,7 @@ struct bfa_lport_symname_s {
|
||||
char symname[BFA_SYMNAME_MAXLEN];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Roles of FCS port:
|
||||
* - FCP IM and FCP TM roles cannot be enabled together for a FCS port
|
||||
* - Create multiple ports if both IM and TM functions required.
|
||||
@ -86,19 +86,19 @@ enum bfa_lport_role {
|
||||
BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS port configuration.
|
||||
*/
|
||||
struct bfa_lport_cfg_s {
|
||||
wwn_t pwwn; /* port wwn */
|
||||
wwn_t nwwn; /* node wwn */
|
||||
struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
|
||||
bfa_boolean_t preboot_vp; /* vport created from PBC */
|
||||
bfa_boolean_t preboot_vp; /* vport created from PBC */
|
||||
enum bfa_lport_role roles; /* FCS port roles */
|
||||
u8 tag[16]; /* opaque tag from application */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS port states
|
||||
*/
|
||||
enum bfa_lport_state {
|
||||
@ -108,7 +108,7 @@ enum bfa_lport_state {
|
||||
BFA_LPORT_OFFLINE = 3, /* No login to fabric */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS port type.
|
||||
*/
|
||||
enum bfa_lport_type {
|
||||
@ -116,7 +116,7 @@ enum bfa_lport_type {
|
||||
BFA_LPORT_TYPE_VIRTUAL,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS port offline reason.
|
||||
*/
|
||||
enum bfa_lport_offline_reason {
|
||||
@ -128,7 +128,7 @@ enum bfa_lport_offline_reason {
|
||||
BFA_LPORT_OFFLINE_FAB_LOGOUT,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS lport info.
|
||||
*/
|
||||
struct bfa_lport_info_s {
|
||||
@ -150,7 +150,7 @@ struct bfa_lport_info_s {
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS port statistics
|
||||
*/
|
||||
struct bfa_lport_stats_s {
|
||||
@ -222,7 +222,7 @@ struct bfa_lport_stats_s {
|
||||
* (max retry of plogi) */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA port attribute returned in queries
|
||||
*/
|
||||
struct bfa_lport_attr_s {
|
||||
@ -239,7 +239,7 @@ struct bfa_lport_attr_s {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* VPORT states
|
||||
*/
|
||||
enum bfa_vport_state {
|
||||
@ -258,7 +258,7 @@ enum bfa_vport_state {
|
||||
BFA_FCS_VPORT_MAX_STATE,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* vport statistics
|
||||
*/
|
||||
struct bfa_vport_stats_s {
|
||||
@ -296,7 +296,7 @@ struct bfa_vport_stats_s {
|
||||
u32 rsvd;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA vport attribute returned in queries
|
||||
*/
|
||||
struct bfa_vport_attr_s {
|
||||
@ -305,7 +305,7 @@ struct bfa_vport_attr_s {
|
||||
u32 rsvd;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS remote port states
|
||||
*/
|
||||
enum bfa_rport_state {
|
||||
@ -321,7 +321,7 @@ enum bfa_rport_state {
|
||||
BFA_RPORT_NSDISC = 9, /* re-discover rport */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Rport Scsi Function : Initiator/Target.
|
||||
*/
|
||||
enum bfa_rport_function {
|
||||
@ -329,7 +329,7 @@ enum bfa_rport_function {
|
||||
BFA_RPORT_TARGET = 0x02, /* SCSI Target */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* port/node symbolic names for rport
|
||||
*/
|
||||
#define BFA_RPORT_SYMNAME_MAXLEN 255
|
||||
@ -337,7 +337,7 @@ struct bfa_rport_symname_s {
|
||||
char symname[BFA_RPORT_SYMNAME_MAXLEN];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS remote port statistics
|
||||
*/
|
||||
struct bfa_rport_stats_s {
|
||||
@ -374,7 +374,7 @@ struct bfa_rport_stats_s {
|
||||
struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS remote port attributes returned in queries
|
||||
*/
|
||||
struct bfa_rport_attr_s {
|
||||
@ -411,7 +411,7 @@ struct bfa_rport_remote_link_stats_s {
|
||||
#define BFA_MAX_IO_INDEX 7
|
||||
#define BFA_NO_IO_INDEX 9
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS itnim states
|
||||
*/
|
||||
enum bfa_itnim_state {
|
||||
@ -425,7 +425,7 @@ enum bfa_itnim_state {
|
||||
BFA_ITNIM_INITIATIOR = 7, /* initiator */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS remote port statistics
|
||||
*/
|
||||
struct bfa_itnim_stats_s {
|
||||
@ -443,7 +443,7 @@ struct bfa_itnim_stats_s {
|
||||
u32 rsvd; /* padding for 64 bit alignment */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS itnim attributes returned in queries
|
||||
*/
|
||||
struct bfa_itnim_attr_s {
|
||||
|
@ -27,7 +27,7 @@
|
||||
#define BFA_IOCFCOE_INTR_DELAY 25
|
||||
#define BFA_IOCFCOE_INTR_LATENCY 5
|
||||
|
||||
/**
|
||||
/*
|
||||
* Interrupt coalescing configuration.
|
||||
*/
|
||||
#pragma pack(1)
|
||||
@ -38,7 +38,7 @@ struct bfa_iocfc_intr_attr_s {
|
||||
u16 delay; /* delay in microseconds */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC firmware configuraton
|
||||
*/
|
||||
struct bfa_iocfc_fwcfg_s {
|
||||
@ -71,7 +71,7 @@ struct bfa_iocfc_drvcfg_s {
|
||||
u32 rsvd;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC configuration
|
||||
*/
|
||||
struct bfa_iocfc_cfg_s {
|
||||
@ -79,7 +79,7 @@ struct bfa_iocfc_cfg_s {
|
||||
struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC firmware IO stats
|
||||
*/
|
||||
struct bfa_fw_io_stats_s {
|
||||
@ -152,7 +152,7 @@ struct bfa_fw_io_stats_s {
|
||||
*/
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC port firmware stats
|
||||
*/
|
||||
|
||||
@ -262,7 +262,7 @@ struct bfa_fw_fcoe_stats_s {
|
||||
u32 mac_invalids; /* Invalid mac assigned */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC firmware FCoE port stats
|
||||
*/
|
||||
struct bfa_fw_fcoe_port_stats_s {
|
||||
@ -270,7 +270,7 @@ struct bfa_fw_fcoe_port_stats_s {
|
||||
struct bfa_fw_fip_stats_s fip_stats;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC firmware FC uport stats
|
||||
*/
|
||||
struct bfa_fw_fc_uport_stats_s {
|
||||
@ -278,7 +278,7 @@ struct bfa_fw_fc_uport_stats_s {
|
||||
struct bfa_fw_port_lksm_stats_s lksm_stats;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC firmware FC port stats
|
||||
*/
|
||||
union bfa_fw_fc_port_stats_s {
|
||||
@ -286,7 +286,7 @@ union bfa_fw_fc_port_stats_s {
|
||||
struct bfa_fw_fcoe_port_stats_s fcoe_stats;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC firmware port stats
|
||||
*/
|
||||
struct bfa_fw_port_stats_s {
|
||||
@ -295,7 +295,7 @@ struct bfa_fw_port_stats_s {
|
||||
union bfa_fw_fc_port_stats_s fc_port;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcxchg module statistics
|
||||
*/
|
||||
struct bfa_fw_fcxchg_stats_s {
|
||||
@ -308,7 +308,7 @@ struct bfa_fw_lpsm_stats_s {
|
||||
u32 cls_tx;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Trunk statistics
|
||||
*/
|
||||
struct bfa_fw_trunk_stats_s {
|
||||
@ -334,7 +334,7 @@ struct bfa_fw_advsm_stats_s {
|
||||
u32 elp_dropped; /* ELP dropped */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOCFC firmware stats
|
||||
*/
|
||||
struct bfa_fw_iocfc_stats_s {
|
||||
@ -345,7 +345,7 @@ struct bfa_fw_iocfc_stats_s {
|
||||
u32 set_intr_reqs; /* set interrupt reqs */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC attributes returned in queries
|
||||
*/
|
||||
struct bfa_iocfc_attr_s {
|
||||
@ -353,7 +353,7 @@ struct bfa_iocfc_attr_s {
|
||||
struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Eth_sndrcv mod stats
|
||||
*/
|
||||
struct bfa_fw_eth_sndrcv_stats_s {
|
||||
@ -361,7 +361,7 @@ struct bfa_fw_eth_sndrcv_stats_s {
|
||||
u32 rsvd; /* 64bit align */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* CT MAC mod stats
|
||||
*/
|
||||
struct bfa_fw_mac_mod_stats_s {
|
||||
@ -379,7 +379,7 @@ struct bfa_fw_mac_mod_stats_s {
|
||||
u32 rsvd; /* 64bit align */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* CT MOD stats
|
||||
*/
|
||||
struct bfa_fw_ct_mod_stats_s {
|
||||
@ -391,7 +391,7 @@ struct bfa_fw_ct_mod_stats_s {
|
||||
u32 rsvd; /* 64bit align */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC firmware stats
|
||||
*/
|
||||
struct bfa_fw_stats_s {
|
||||
@ -412,7 +412,7 @@ struct bfa_fw_stats_s {
|
||||
#define BFA_IOCFC_PATHTOV_MAX 60
|
||||
#define BFA_IOCFC_QDEPTH_MAX 2000
|
||||
|
||||
/**
|
||||
/*
|
||||
* QoS states
|
||||
*/
|
||||
enum bfa_qos_state {
|
||||
@ -420,7 +420,7 @@ enum bfa_qos_state {
|
||||
BFA_QOS_OFFLINE = 2, /* QoS is offline */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* QoS Priority levels.
|
||||
*/
|
||||
enum bfa_qos_priority {
|
||||
@ -430,7 +430,7 @@ enum bfa_qos_priority {
|
||||
BFA_QOS_LOW = 3, /* QoS Priority Level Low */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* QoS bandwidth allocation for each priority level
|
||||
*/
|
||||
enum bfa_qos_bw_alloc {
|
||||
@ -439,7 +439,7 @@ enum bfa_qos_bw_alloc {
|
||||
BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */
|
||||
};
|
||||
#pragma pack(1)
|
||||
/**
|
||||
/*
|
||||
* QoS attribute returned in QoS Query
|
||||
*/
|
||||
struct bfa_qos_attr_s {
|
||||
@ -448,7 +448,7 @@ struct bfa_qos_attr_s {
|
||||
u32 total_bb_cr; /* Total BB Credits */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* These fields should be displayed only from the CLI.
|
||||
* There will be a separate BFAL API (get_qos_vc_attr ?)
|
||||
* to retrieve this.
|
||||
@ -471,7 +471,7 @@ struct bfa_qos_vc_attr_s {
|
||||
* total_vc_count */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* QoS statistics
|
||||
*/
|
||||
struct bfa_qos_stats_s {
|
||||
@ -489,7 +489,7 @@ struct bfa_qos_stats_s {
|
||||
u32 rsvd; /* padding for 64 bit alignment */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCoE statistics
|
||||
*/
|
||||
struct bfa_fcoe_stats_s {
|
||||
@ -540,7 +540,7 @@ struct bfa_fcoe_stats_s {
|
||||
u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* QoS or FCoE stats (fcport stats excluding physical FC port stats)
|
||||
*/
|
||||
union bfa_fcport_stats_u {
|
||||
@ -639,7 +639,7 @@ enum bfa_port_states {
|
||||
BFA_PORT_ST_MAX_STATE,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Port operational type (in sync with SNIA port type).
|
||||
*/
|
||||
enum bfa_port_type {
|
||||
@ -651,7 +651,7 @@ enum bfa_port_type {
|
||||
BFA_PORT_TYPE_VPORT = 22, /* NPIV - virtual port */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Port topology setting. A port's topology and fabric login status
|
||||
* determine its operational type.
|
||||
*/
|
||||
@ -662,7 +662,7 @@ enum bfa_port_topology {
|
||||
BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Physical port loopback types.
|
||||
*/
|
||||
enum bfa_port_opmode {
|
||||
@ -679,7 +679,7 @@ enum bfa_port_opmode {
|
||||
(_mode == BFA_PORT_OPMODE_LB_SLW) || \
|
||||
(_mode == BFA_PORT_OPMODE_LB_EXT))
|
||||
|
||||
/**
|
||||
/*
|
||||
* Port link state
|
||||
*/
|
||||
enum bfa_port_linkstate {
|
||||
@ -687,7 +687,7 @@ enum bfa_port_linkstate {
|
||||
BFA_PORT_LINKDOWN = 2, /* Physical port/Trunk link down */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Port link state reason code
|
||||
*/
|
||||
enum bfa_port_linkstate_rsn {
|
||||
@ -733,7 +733,7 @@ enum bfa_port_linkstate_rsn {
|
||||
CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
|
||||
};
|
||||
#pragma pack(1)
|
||||
/**
|
||||
/*
|
||||
* Physical port configuration
|
||||
*/
|
||||
struct bfa_port_cfg_s {
|
||||
@ -753,7 +753,7 @@ struct bfa_port_cfg_s {
|
||||
};
|
||||
#pragma pack()
|
||||
|
||||
/**
|
||||
/*
|
||||
* Port attribute values.
|
||||
*/
|
||||
struct bfa_port_attr_s {
|
||||
@ -800,7 +800,7 @@ struct bfa_port_attr_s {
|
||||
u8 rsvd1[6];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Port FCP mappings.
|
||||
*/
|
||||
struct bfa_port_fcpmap_s {
|
||||
@ -815,7 +815,7 @@ struct bfa_port_fcpmap_s {
|
||||
char luid[256];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Port RNID info.
|
||||
*/
|
||||
struct bfa_port_rnid_s {
|
||||
@ -848,7 +848,7 @@ struct bfa_fcport_fcf_s {
|
||||
mac_t mac; /* FCF mac */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Trunk states for BCU/BFAL
|
||||
*/
|
||||
enum bfa_trunk_state {
|
||||
@ -857,7 +857,7 @@ enum bfa_trunk_state {
|
||||
BFA_TRUNK_OFFLINE = 2, /* Trunk is offline */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* VC attributes for trunked link
|
||||
*/
|
||||
struct bfa_trunk_vc_attr_s {
|
||||
@ -867,7 +867,7 @@ struct bfa_trunk_vc_attr_s {
|
||||
u16 vc_credits[8];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Link state information
|
||||
*/
|
||||
struct bfa_port_link_s {
|
||||
@ -959,7 +959,7 @@ struct bfa_rport_hal_stats_s {
|
||||
u32 rsvd;
|
||||
};
|
||||
#pragma pack(1)
|
||||
/**
|
||||
/*
|
||||
* Rport's QoS attributes
|
||||
*/
|
||||
struct bfa_rport_qos_attr_s {
|
||||
@ -987,7 +987,7 @@ struct bfa_itnim_ioprofile_s {
|
||||
struct bfa_itnim_latency_s io_latency;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC physical port statistics.
|
||||
*/
|
||||
struct bfa_port_fc_stats_s {
|
||||
@ -1022,7 +1022,7 @@ struct bfa_port_fc_stats_s {
|
||||
u64 err_enc; /* Encoding err frame_8b10b */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Eth Physical Port statistics.
|
||||
*/
|
||||
struct bfa_port_eth_stats_s {
|
||||
@ -1070,7 +1070,7 @@ struct bfa_port_eth_stats_s {
|
||||
u64 tx_iscsi_zero_pause; /* Tx iSCSI zero pause */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Port statistics.
|
||||
*/
|
||||
union bfa_port_stats_u {
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
#include "bfa_modules.h"
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA module list terminated by NULL
|
||||
*/
|
||||
struct bfa_module_s *hal_mods[] = {
|
||||
@ -31,7 +31,7 @@ struct bfa_module_s *hal_mods[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Message handlers for various modules.
|
||||
*/
|
||||
bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
|
||||
@ -70,7 +70,7 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* Message handlers for mailbox command classes
|
||||
*/
|
||||
bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
|
||||
|
@ -1029,7 +1029,7 @@ struct link_e2e_beacon_req_s {
|
||||
struct link_e2e_beacon_param_s beacon_parm;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* If RPSC request is sent to the Domain Controller, the request is for
|
||||
* all the ports within that domain (TODO - I don't think FOS implements
|
||||
* this...).
|
||||
@ -1049,7 +1049,7 @@ struct fc_rpsc_acc_s {
|
||||
struct fc_rpsc_speed_info_s speed_info[1];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* If RPSC2 request is sent to the Domain Controller,
|
||||
*/
|
||||
#define FC_BRCD_TOKEN 0x42524344
|
||||
@ -1094,7 +1094,7 @@ struct fc_rpsc2_acc_s {
|
||||
struct fc_rpsc2_port_info_s port_info[1]; /* port information */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* bit fields so that multiple classes can be specified
|
||||
*/
|
||||
enum fc_cos {
|
||||
@ -1131,7 +1131,7 @@ struct fc_alpabm_s {
|
||||
#define FC_VF_ID_MAX 0xEFF
|
||||
#define FC_VF_ID_CTL 0xFEF /* control VF_ID */
|
||||
|
||||
/**
|
||||
/*
|
||||
* Virtual Fabric Tagging header format
|
||||
* @caution This is defined only in BIG ENDIAN format.
|
||||
*/
|
||||
@ -1463,7 +1463,7 @@ struct fcgs_gidpn_resp_s {
|
||||
u32 dap:24; /* port identifier */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* RFT_ID
|
||||
*/
|
||||
struct fcgs_rftid_req_s {
|
||||
@ -1472,7 +1472,7 @@ struct fcgs_rftid_req_s {
|
||||
u32 fc4_type[8]; /* fc4 types */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* RFF_ID : Register FC4 features.
|
||||
*/
|
||||
|
||||
@ -1487,7 +1487,7 @@ struct fcgs_rffid_req_s {
|
||||
u32 fc4_type:8; /* corresponding FC4 Type */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* GID_FT Request
|
||||
*/
|
||||
struct fcgs_gidft_req_s {
|
||||
@ -1497,7 +1497,7 @@ struct fcgs_gidft_req_s {
|
||||
u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */
|
||||
}; /* GID_FT Request */
|
||||
|
||||
/**
|
||||
/*
|
||||
* GID_FT Response
|
||||
*/
|
||||
struct fcgs_gidft_resp_s {
|
||||
@ -1506,7 +1506,7 @@ struct fcgs_gidft_resp_s {
|
||||
u32 pid:24; /* port identifier */
|
||||
}; /* GID_FT Response */
|
||||
|
||||
/**
|
||||
/*
|
||||
* RSPN_ID
|
||||
*/
|
||||
struct fcgs_rspnid_req_s {
|
||||
@ -1516,7 +1516,7 @@ struct fcgs_rspnid_req_s {
|
||||
u8 spn[256]; /* symbolic port name */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* RPN_ID
|
||||
*/
|
||||
struct fcgs_rpnid_req_s {
|
||||
@ -1525,7 +1525,7 @@ struct fcgs_rpnid_req_s {
|
||||
wwn_t port_name;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* RNN_ID
|
||||
*/
|
||||
struct fcgs_rnnid_req_s {
|
||||
@ -1534,7 +1534,7 @@ struct fcgs_rnnid_req_s {
|
||||
wwn_t node_name;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* RCS_ID
|
||||
*/
|
||||
struct fcgs_rcsid_req_s {
|
||||
@ -1543,7 +1543,7 @@ struct fcgs_rcsid_req_s {
|
||||
u32 cos;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* RPT_ID
|
||||
*/
|
||||
struct fcgs_rptid_req_s {
|
||||
@ -1553,7 +1553,7 @@ struct fcgs_rptid_req_s {
|
||||
u32 rsvd1:24;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* GA_NXT Request
|
||||
*/
|
||||
struct fcgs_ganxt_req_s {
|
||||
@ -1561,7 +1561,7 @@ struct fcgs_ganxt_req_s {
|
||||
u32 port_id:24;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* GA_NXT Response
|
||||
*/
|
||||
struct fcgs_ganxt_rsp_s {
|
||||
|
@ -94,13 +94,13 @@ fcbuild_init(void)
|
||||
*/
|
||||
plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
|
||||
plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
|
||||
plogi_tmpl.csp.bbcred = bfa_os_htons(0x0004);
|
||||
plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004);
|
||||
plogi_tmpl.csp.ciro = 0x1;
|
||||
plogi_tmpl.csp.cisc = 0x0;
|
||||
plogi_tmpl.csp.altbbcred = 0x0;
|
||||
plogi_tmpl.csp.conseq = bfa_os_htons(0x00FF);
|
||||
plogi_tmpl.csp.ro_bitmap = bfa_os_htons(0x0002);
|
||||
plogi_tmpl.csp.e_d_tov = bfa_os_htonl(2000);
|
||||
plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF);
|
||||
plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002);
|
||||
plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000);
|
||||
|
||||
plogi_tmpl.class3.class_valid = 1;
|
||||
plogi_tmpl.class3.sequential = 1;
|
||||
@ -112,7 +112,7 @@ fcbuild_init(void)
|
||||
*/
|
||||
prli_tmpl.command = FC_ELS_PRLI;
|
||||
prli_tmpl.pglen = 0x10;
|
||||
prli_tmpl.pagebytes = bfa_os_htons(0x0014);
|
||||
prli_tmpl.pagebytes = cpu_to_be16(0x0014);
|
||||
prli_tmpl.parampage.type = FC_TYPE_FCP;
|
||||
prli_tmpl.parampage.imagepair = 1;
|
||||
prli_tmpl.parampage.servparams.rxrdisab = 1;
|
||||
@ -137,7 +137,7 @@ fcbuild_init(void)
|
||||
static void
|
||||
fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
|
||||
{
|
||||
bfa_os_memset(fchs, 0, sizeof(struct fchs_s));
|
||||
memset(fchs, 0, sizeof(struct fchs_s));
|
||||
|
||||
fchs->routing = FC_RTG_FC4_DEV_DATA;
|
||||
fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
|
||||
@ -148,9 +148,9 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
|
||||
fchs->rx_id = FC_RXID_ANY;
|
||||
fchs->d_id = (d_id);
|
||||
fchs->s_id = (s_id);
|
||||
fchs->ox_id = bfa_os_htons(ox_id);
|
||||
fchs->ox_id = cpu_to_be16(ox_id);
|
||||
|
||||
/**
|
||||
/*
|
||||
* @todo no need to set ox_id for request
|
||||
* no need to set rx_id for response
|
||||
*/
|
||||
@ -159,16 +159,16 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
|
||||
void
|
||||
fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
|
||||
{
|
||||
bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
|
||||
memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
|
||||
fchs->d_id = (d_id);
|
||||
fchs->s_id = (s_id);
|
||||
fchs->ox_id = bfa_os_htons(ox_id);
|
||||
fchs->ox_id = cpu_to_be16(ox_id);
|
||||
}
|
||||
|
||||
static void
|
||||
fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
|
||||
{
|
||||
bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
|
||||
memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
|
||||
fchs->d_id = d_id;
|
||||
fchs->s_id = s_id;
|
||||
fchs->ox_id = ox_id;
|
||||
@ -198,7 +198,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
|
||||
static void
|
||||
fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
|
||||
{
|
||||
bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
|
||||
memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
|
||||
fchs->d_id = d_id;
|
||||
fchs->s_id = s_id;
|
||||
fchs->ox_id = ox_id;
|
||||
@ -211,7 +211,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
|
||||
{
|
||||
struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
|
||||
|
||||
bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
|
||||
memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
|
||||
|
||||
plogi->els_cmd.els_code = els_code;
|
||||
if (els_code == FC_ELS_PLOGI)
|
||||
@ -219,10 +219,10 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
|
||||
else
|
||||
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
plogi->csp.rxsz = plogi->class3.rxsz = bfa_os_htons(pdu_size);
|
||||
plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size);
|
||||
|
||||
bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
|
||||
bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
|
||||
memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
|
||||
memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
|
||||
|
||||
return sizeof(struct fc_logi_s);
|
||||
}
|
||||
@ -235,12 +235,12 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
|
||||
u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
|
||||
u32 *vvl_info;
|
||||
|
||||
bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
|
||||
memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
|
||||
|
||||
flogi->els_cmd.els_code = FC_ELS_FLOGI;
|
||||
fc_els_req_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
|
||||
flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
|
||||
flogi->port_name = port_name;
|
||||
flogi->node_name = node_name;
|
||||
|
||||
@ -253,14 +253,14 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
|
||||
/* set AUTH capability */
|
||||
flogi->csp.security = set_auth;
|
||||
|
||||
flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
|
||||
flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
|
||||
|
||||
/* Set brcd token in VVL */
|
||||
vvl_info = (u32 *)&flogi->vvl[0];
|
||||
|
||||
/* set the flag to indicate the presence of VVL */
|
||||
flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */
|
||||
vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD);
|
||||
vvl_info[0] = cpu_to_be32(FLOGI_VVL_BRCD);
|
||||
|
||||
return sizeof(struct fc_logi_s);
|
||||
}
|
||||
@ -272,15 +272,15 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
|
||||
{
|
||||
u32 d_id = 0;
|
||||
|
||||
bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
|
||||
memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
|
||||
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
flogi->els_cmd.els_code = FC_ELS_ACC;
|
||||
flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
|
||||
flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
|
||||
flogi->port_name = port_name;
|
||||
flogi->node_name = node_name;
|
||||
|
||||
flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
|
||||
flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
|
||||
|
||||
return sizeof(struct fc_logi_s);
|
||||
}
|
||||
@ -291,12 +291,12 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
|
||||
{
|
||||
u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
|
||||
|
||||
bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
|
||||
memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
|
||||
|
||||
flogi->els_cmd.els_code = FC_ELS_FDISC;
|
||||
fc_els_req_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
|
||||
flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
|
||||
flogi->port_name = port_name;
|
||||
flogi->node_name = node_name;
|
||||
|
||||
@ -346,7 +346,7 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
|
||||
if (!plogi->class3.class_valid)
|
||||
return FC_PARSE_FAILURE;
|
||||
|
||||
if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
|
||||
if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
|
||||
return FC_PARSE_FAILURE;
|
||||
|
||||
return FC_PARSE_OK;
|
||||
@ -363,8 +363,8 @@ fc_plogi_parse(struct fchs_s *fchs)
|
||||
if (plogi->class3.class_valid != 1)
|
||||
return FC_PARSE_FAILURE;
|
||||
|
||||
if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ)
|
||||
|| (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ)
|
||||
if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ)
|
||||
|| (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ)
|
||||
|| (plogi->class3.rxsz == 0))
|
||||
return FC_PARSE_FAILURE;
|
||||
|
||||
@ -378,7 +378,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
|
||||
struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
|
||||
|
||||
fc_els_req_build(fchs, d_id, s_id, ox_id);
|
||||
bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
|
||||
memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
|
||||
|
||||
prli->command = FC_ELS_PRLI;
|
||||
prli->parampage.servparams.initiator = 1;
|
||||
@ -397,7 +397,7 @@ fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
|
||||
struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
|
||||
|
||||
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
|
||||
bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
|
||||
memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
|
||||
|
||||
prli->command = FC_ELS_ACC;
|
||||
|
||||
@ -448,7 +448,7 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
|
||||
{
|
||||
fc_els_req_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s));
|
||||
memset(logo, '\0', sizeof(struct fc_logo_s));
|
||||
logo->els_cmd.els_code = FC_ELS_LOGO;
|
||||
logo->nport_id = (s_id);
|
||||
logo->orig_port_name = port_name;
|
||||
@ -461,7 +461,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
|
||||
u32 s_id, u16 ox_id, wwn_t port_name,
|
||||
wwn_t node_name, u8 els_code)
|
||||
{
|
||||
bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s));
|
||||
memset(adisc, '\0', sizeof(struct fc_adisc_s));
|
||||
|
||||
adisc->els_cmd.els_code = els_code;
|
||||
|
||||
@ -537,7 +537,7 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
|
||||
if (pdisc->class3.class_valid != 1)
|
||||
return FC_PARSE_FAILURE;
|
||||
|
||||
if ((bfa_os_ntohs(pdisc->class3.rxsz) <
|
||||
if ((be16_to_cpu(pdisc->class3.rxsz) <
|
||||
(FC_MIN_PDUSZ - sizeof(struct fchs_s)))
|
||||
|| (pdisc->class3.rxsz == 0))
|
||||
return FC_PARSE_FAILURE;
|
||||
@ -554,11 +554,11 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
|
||||
u16
|
||||
fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
|
||||
{
|
||||
bfa_os_memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
|
||||
memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
|
||||
fchs->cat_info = FC_CAT_ABTS;
|
||||
fchs->d_id = (d_id);
|
||||
fchs->s_id = (s_id);
|
||||
fchs->ox_id = bfa_os_htons(ox_id);
|
||||
fchs->ox_id = cpu_to_be16(ox_id);
|
||||
|
||||
return sizeof(struct fchs_s);
|
||||
}
|
||||
@ -582,9 +582,9 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
|
||||
/*
|
||||
* build rrq payload
|
||||
*/
|
||||
bfa_os_memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
|
||||
memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
|
||||
rrq->s_id = (s_id);
|
||||
rrq->ox_id = bfa_os_htons(rrq_oxid);
|
||||
rrq->ox_id = cpu_to_be16(rrq_oxid);
|
||||
rrq->rx_id = FC_RXID_ANY;
|
||||
|
||||
return sizeof(struct fc_rrq_s);
|
||||
@ -598,7 +598,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
|
||||
|
||||
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s));
|
||||
memset(acc, 0, sizeof(struct fc_els_cmd_s));
|
||||
acc->els_code = FC_ELS_ACC;
|
||||
|
||||
return sizeof(struct fc_els_cmd_s);
|
||||
@ -610,7 +610,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
|
||||
u8 reason_code_expl)
|
||||
{
|
||||
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
|
||||
bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
|
||||
memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
|
||||
|
||||
ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
|
||||
ls_rjt->reason_code = reason_code;
|
||||
@ -626,7 +626,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
|
||||
{
|
||||
fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
bfa_os_memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
|
||||
memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
|
||||
|
||||
fchs->rx_id = rx_id;
|
||||
|
||||
@ -641,7 +641,7 @@ fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
|
||||
u32 s_id, u16 ox_id)
|
||||
{
|
||||
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
|
||||
bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
|
||||
memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
|
||||
els_cmd->els_code = FC_ELS_ACC;
|
||||
|
||||
return sizeof(struct fc_els_cmd_s);
|
||||
@ -656,10 +656,10 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
|
||||
|
||||
if (els_code == FC_ELS_PRLO) {
|
||||
prlo = (struct fc_prlo_s *) (fc_frame + 1);
|
||||
num_pages = (bfa_os_ntohs(prlo->payload_len) - 4) / 16;
|
||||
num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16;
|
||||
} else {
|
||||
tprlo = (struct fc_tprlo_s *) (fc_frame + 1);
|
||||
num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
|
||||
num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
|
||||
}
|
||||
return num_pages;
|
||||
}
|
||||
@ -672,11 +672,11 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
|
||||
|
||||
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4);
|
||||
memset(tprlo_acc, 0, (num_pages * 16) + 4);
|
||||
tprlo_acc->command = FC_ELS_ACC;
|
||||
|
||||
tprlo_acc->page_len = 0x10;
|
||||
tprlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
|
||||
tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
|
||||
|
||||
for (page = 0; page < num_pages; page++) {
|
||||
tprlo_acc->tprlo_acc_params[page].opa_valid = 0;
|
||||
@ -685,7 +685,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
|
||||
tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0;
|
||||
tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0;
|
||||
}
|
||||
return bfa_os_ntohs(tprlo_acc->payload_len);
|
||||
return be16_to_cpu(tprlo_acc->payload_len);
|
||||
}
|
||||
|
||||
u16
|
||||
@ -696,10 +696,10 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
|
||||
|
||||
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4);
|
||||
memset(prlo_acc, 0, (num_pages * 16) + 4);
|
||||
prlo_acc->command = FC_ELS_ACC;
|
||||
prlo_acc->page_len = 0x10;
|
||||
prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
|
||||
prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
|
||||
|
||||
for (page = 0; page < num_pages; page++) {
|
||||
prlo_acc->prlo_acc_params[page].opa_valid = 0;
|
||||
@ -709,7 +709,7 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
|
||||
prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
|
||||
}
|
||||
|
||||
return bfa_os_ntohs(prlo_acc->payload_len);
|
||||
return be16_to_cpu(prlo_acc->payload_len);
|
||||
}
|
||||
|
||||
u16
|
||||
@ -718,7 +718,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
|
||||
{
|
||||
fc_els_req_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
|
||||
memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
|
||||
|
||||
rnid->els_cmd.els_code = FC_ELS_RNID;
|
||||
rnid->node_id_data_format = data_format;
|
||||
@ -732,7 +732,7 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
|
||||
struct fc_rnid_common_id_data_s *common_id_data,
|
||||
struct fc_rnid_general_topology_data_s *gen_topo_data)
|
||||
{
|
||||
bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
|
||||
memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
|
||||
|
||||
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
@ -745,7 +745,7 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
|
||||
if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
|
||||
rnid_acc->specific_id_data_length =
|
||||
sizeof(struct fc_rnid_general_topology_data_s);
|
||||
bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data);
|
||||
rnid_acc->gen_topology_data = *gen_topo_data;
|
||||
return sizeof(struct fc_rnid_acc_s);
|
||||
} else {
|
||||
return sizeof(struct fc_rnid_acc_s) -
|
||||
@ -760,7 +760,7 @@ fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
|
||||
{
|
||||
fc_els_req_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
|
||||
memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
|
||||
|
||||
rpsc->els_cmd.els_code = FC_ELS_RPSC;
|
||||
return sizeof(struct fc_rpsc_cmd_s);
|
||||
@ -775,11 +775,11 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
|
||||
|
||||
fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
|
||||
|
||||
bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
|
||||
memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
|
||||
|
||||
rpsc2->els_cmd.els_code = FC_ELS_RPSC;
|
||||
rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN);
|
||||
rpsc2->num_pids = bfa_os_htons(npids);
|
||||
rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN);
|
||||
rpsc2->num_pids = cpu_to_be16(npids);
|
||||
for (i = 0; i < npids; i++)
|
||||
rpsc2->pid_list[i].pid = pid_list[i];
|
||||
|
||||
@ -791,18 +791,18 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
|
||||
u32 d_id, u32 s_id, u16 ox_id,
|
||||
struct fc_rpsc_speed_info_s *oper_speed)
|
||||
{
|
||||
bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
|
||||
memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
|
||||
|
||||
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
rpsc_acc->command = FC_ELS_ACC;
|
||||
rpsc_acc->num_entries = bfa_os_htons(1);
|
||||
rpsc_acc->num_entries = cpu_to_be16(1);
|
||||
|
||||
rpsc_acc->speed_info[0].port_speed_cap =
|
||||
bfa_os_htons(oper_speed->port_speed_cap);
|
||||
cpu_to_be16(oper_speed->port_speed_cap);
|
||||
|
||||
rpsc_acc->speed_info[0].port_op_speed =
|
||||
bfa_os_htons(oper_speed->port_op_speed);
|
||||
cpu_to_be16(oper_speed->port_op_speed);
|
||||
|
||||
return sizeof(struct fc_rpsc_acc_s);
|
||||
}
|
||||
@ -830,12 +830,12 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
|
||||
{
|
||||
struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
|
||||
|
||||
bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
|
||||
memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
|
||||
|
||||
pdisc->els_cmd.els_code = FC_ELS_PDISC;
|
||||
fc_els_req_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
pdisc->csp.rxsz = pdisc->class3.rxsz = bfa_os_htons(pdu_size);
|
||||
pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size);
|
||||
pdisc->port_name = port_name;
|
||||
pdisc->node_name = node_name;
|
||||
|
||||
@ -859,7 +859,7 @@ fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
|
||||
if (!pdisc->class3.class_valid)
|
||||
return FC_PARSE_NWWN_NOT_EQUAL;
|
||||
|
||||
if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
|
||||
if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
|
||||
return FC_PARSE_RXSZ_INVAL;
|
||||
|
||||
return FC_PARSE_OK;
|
||||
@ -873,10 +873,10 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
|
||||
int page;
|
||||
|
||||
fc_els_req_build(fchs, d_id, s_id, ox_id);
|
||||
bfa_os_memset(prlo, 0, (num_pages * 16) + 4);
|
||||
memset(prlo, 0, (num_pages * 16) + 4);
|
||||
prlo->command = FC_ELS_PRLO;
|
||||
prlo->page_len = 0x10;
|
||||
prlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
|
||||
prlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
|
||||
|
||||
for (page = 0; page < num_pages; page++) {
|
||||
prlo->prlo_params[page].type = FC_TYPE_FCP;
|
||||
@ -886,7 +886,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
|
||||
prlo->prlo_params[page].resp_process_assc = 0;
|
||||
}
|
||||
|
||||
return bfa_os_ntohs(prlo->payload_len);
|
||||
return be16_to_cpu(prlo->payload_len);
|
||||
}
|
||||
|
||||
u16
|
||||
@ -901,7 +901,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
|
||||
if (prlo->command != FC_ELS_ACC)
|
||||
return FC_PARSE_FAILURE;
|
||||
|
||||
num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16;
|
||||
num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
|
||||
|
||||
for (page = 0; page < num_pages; page++) {
|
||||
if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
|
||||
@ -931,10 +931,10 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
|
||||
int page;
|
||||
|
||||
fc_els_req_build(fchs, d_id, s_id, ox_id);
|
||||
bfa_os_memset(tprlo, 0, (num_pages * 16) + 4);
|
||||
memset(tprlo, 0, (num_pages * 16) + 4);
|
||||
tprlo->command = FC_ELS_TPRLO;
|
||||
tprlo->page_len = 0x10;
|
||||
tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
|
||||
tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
|
||||
|
||||
for (page = 0; page < num_pages; page++) {
|
||||
tprlo->tprlo_params[page].type = FC_TYPE_FCP;
|
||||
@ -950,7 +950,7 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
|
||||
}
|
||||
}
|
||||
|
||||
return bfa_os_ntohs(tprlo->payload_len);
|
||||
return be16_to_cpu(tprlo->payload_len);
|
||||
}
|
||||
|
||||
u16
|
||||
@ -965,7 +965,7 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
|
||||
if (tprlo->command != FC_ELS_ACC)
|
||||
return FC_PARSE_ACC_INVAL;
|
||||
|
||||
num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
|
||||
num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
|
||||
|
||||
for (page = 0; page < num_pages; page++) {
|
||||
if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
|
||||
@ -1011,32 +1011,32 @@ fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
|
||||
static void
|
||||
fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
|
||||
{
|
||||
bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
|
||||
memset(cthdr, 0, sizeof(struct ct_hdr_s));
|
||||
cthdr->rev_id = CT_GS3_REVISION;
|
||||
cthdr->gs_type = CT_GSTYPE_DIRSERVICE;
|
||||
cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER;
|
||||
cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
|
||||
cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
|
||||
}
|
||||
|
||||
static void
|
||||
fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
|
||||
{
|
||||
bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
|
||||
memset(cthdr, 0, sizeof(struct ct_hdr_s));
|
||||
cthdr->rev_id = CT_GS3_REVISION;
|
||||
cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
|
||||
cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER;
|
||||
cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
|
||||
cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
|
||||
}
|
||||
|
||||
static void
|
||||
fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code,
|
||||
u8 sub_type)
|
||||
{
|
||||
bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
|
||||
memset(cthdr, 0, sizeof(struct ct_hdr_s));
|
||||
cthdr->rev_id = CT_GS3_REVISION;
|
||||
cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
|
||||
cthdr->gs_sub_type = sub_type;
|
||||
cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
|
||||
cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
|
||||
}
|
||||
|
||||
u16
|
||||
@ -1050,7 +1050,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
|
||||
|
||||
bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
|
||||
memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
|
||||
gidpn->port_name = port_name;
|
||||
return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s);
|
||||
}
|
||||
@ -1066,7 +1066,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
|
||||
|
||||
bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
|
||||
memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
|
||||
gpnid->dap = port_id;
|
||||
return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s);
|
||||
}
|
||||
@ -1082,7 +1082,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
|
||||
|
||||
bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
|
||||
memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
|
||||
gnnid->dap = port_id;
|
||||
return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s);
|
||||
}
|
||||
@ -1090,7 +1090,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
|
||||
u16
|
||||
fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
|
||||
{
|
||||
if (bfa_os_ntohs(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
|
||||
if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
|
||||
if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY)
|
||||
return FC_PARSE_BUSY;
|
||||
else
|
||||
@ -1108,7 +1108,7 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
|
||||
|
||||
fc_els_req_build(fchs, d_id, s_id, ox_id);
|
||||
|
||||
bfa_os_memset(scr, 0, sizeof(struct fc_scr_s));
|
||||
memset(scr, 0, sizeof(struct fc_scr_s));
|
||||
scr->command = FC_ELS_SCR;
|
||||
scr->reg_func = FC_SCR_REG_FUNC_FULL;
|
||||
if (set_br_reg)
|
||||
@ -1129,7 +1129,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
|
||||
rscn->pagelen = sizeof(rscn->event[0]);
|
||||
|
||||
payldlen = sizeof(u32) + rscn->pagelen;
|
||||
rscn->payldlen = bfa_os_htons(payldlen);
|
||||
rscn->payldlen = cpu_to_be16(payldlen);
|
||||
|
||||
rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
|
||||
rscn->event[0].portid = s_id;
|
||||
@ -1149,14 +1149,14 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
|
||||
|
||||
bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
|
||||
memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
|
||||
|
||||
rftid->dap = s_id;
|
||||
|
||||
/* By default, FCP FC4 Type is registered */
|
||||
index = FC_TYPE_FCP >> 5;
|
||||
type_value = 1 << (FC_TYPE_FCP % 32);
|
||||
rftid->fc4_type[index] = bfa_os_htonl(type_value);
|
||||
rftid->fc4_type[index] = cpu_to_be32(type_value);
|
||||
|
||||
return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
|
||||
}
|
||||
@ -1172,10 +1172,10 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
|
||||
|
||||
bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
|
||||
memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
|
||||
|
||||
rftid->dap = s_id;
|
||||
bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
|
||||
memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
|
||||
(bitmap_size < 32 ? bitmap_size : 32));
|
||||
|
||||
return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
|
||||
@ -1192,7 +1192,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
|
||||
|
||||
bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
|
||||
memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
|
||||
|
||||
rffid->dap = s_id;
|
||||
rffid->fc4ftr_bits = fc4_ftrs;
|
||||
@ -1214,7 +1214,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
|
||||
|
||||
bfa_os_memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
|
||||
memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
|
||||
|
||||
rspnid->dap = s_id;
|
||||
rspnid->spn_len = (u8) strlen((char *)name);
|
||||
@ -1235,7 +1235,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
|
||||
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT);
|
||||
|
||||
bfa_os_memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
|
||||
memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
|
||||
gidft->fc4_type = fc4_type;
|
||||
gidft->domain_id = 0;
|
||||
gidft->area_id = 0;
|
||||
@ -1254,7 +1254,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
|
||||
|
||||
bfa_os_memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
|
||||
memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
|
||||
rpnid->port_id = port_id;
|
||||
rpnid->port_name = port_name;
|
||||
|
||||
@ -1272,7 +1272,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
|
||||
|
||||
bfa_os_memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
|
||||
memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
|
||||
rnnid->port_id = port_id;
|
||||
rnnid->node_name = node_name;
|
||||
|
||||
@ -1291,7 +1291,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
|
||||
|
||||
bfa_os_memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
|
||||
memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
|
||||
rcsid->port_id = port_id;
|
||||
rcsid->cos = cos;
|
||||
|
||||
@ -1309,7 +1309,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
|
||||
|
||||
bfa_os_memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
|
||||
memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
|
||||
rptid->port_id = port_id;
|
||||
rptid->port_type = port_type;
|
||||
|
||||
@ -1326,7 +1326,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
|
||||
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
|
||||
fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
|
||||
|
||||
bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
|
||||
memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
|
||||
ganxt->port_id = port_id;
|
||||
|
||||
return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s);
|
||||
@ -1365,7 +1365,7 @@ fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
|
||||
|
||||
index = fc4_type >> 5;
|
||||
type_value = 1 << (fc4_type % 32);
|
||||
ptr[index] = bfa_os_htonl(type_value);
|
||||
ptr[index] = cpu_to_be32(type_value);
|
||||
|
||||
}
|
||||
|
||||
@ -1383,7 +1383,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
|
||||
fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
|
||||
CT_GSSUBTYPE_CFGSERVER);
|
||||
|
||||
bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t));
|
||||
memset(gmal, 0, sizeof(fcgs_gmal_req_t));
|
||||
gmal->wwn = wwn;
|
||||
|
||||
return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t);
|
||||
@ -1403,7 +1403,7 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
|
||||
fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,
|
||||
CT_GSSUBTYPE_CFGSERVER);
|
||||
|
||||
bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t));
|
||||
memset(gfn, 0, sizeof(fcgs_gfn_req_t));
|
||||
gfn->wwn = wwn;
|
||||
|
||||
return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -104,7 +104,7 @@ struct bfa_fcpim_mod_s {
|
||||
bfa_fcpim_profile_t profile_start;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA IO (initiator mode)
|
||||
*/
|
||||
struct bfa_ioim_s {
|
||||
@ -137,7 +137,7 @@ struct bfa_ioim_sp_s {
|
||||
struct bfa_tskim_s *tskim; /* Relevant TM cmd */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA Task management command (initiator mode)
|
||||
*/
|
||||
struct bfa_tskim_s {
|
||||
@ -160,7 +160,7 @@ struct bfa_tskim_s {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA i-t-n (initiator mode)
|
||||
*/
|
||||
struct bfa_itnim_s {
|
||||
@ -303,7 +303,7 @@ bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
|
||||
struct bfa_itnim_ioprofile_s *ioprofile);
|
||||
#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA completion callback for bfa_itnim_online().
|
||||
*
|
||||
* @param[in] itnim FCS or driver itnim instance
|
||||
@ -312,7 +312,7 @@ bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
|
||||
*/
|
||||
void bfa_cb_itnim_online(void *itnim);
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA completion callback for bfa_itnim_offline().
|
||||
*
|
||||
* @param[in] itnim FCS or driver itnim instance
|
||||
@ -323,7 +323,7 @@ void bfa_cb_itnim_offline(void *itnim);
|
||||
void bfa_cb_itnim_tov_begin(void *itnim);
|
||||
void bfa_cb_itnim_tov(void *itnim);
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA notification to FCS/driver for second level error recovery.
|
||||
*
|
||||
* Atleast one I/O request has timedout and target is unresponsive to
|
||||
@ -351,7 +351,7 @@ void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
|
||||
bfa_boolean_t iotov);
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* I/O completion notification.
|
||||
*
|
||||
* @param[in] dio driver IO structure
|
||||
@ -368,7 +368,7 @@ void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
|
||||
u8 scsi_status, int sns_len,
|
||||
u8 *sns_info, s32 residue);
|
||||
|
||||
/**
|
||||
/*
|
||||
* I/O good completion notification.
|
||||
*
|
||||
* @param[in] dio driver IO structure
|
||||
@ -377,7 +377,7 @@ void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
|
||||
*/
|
||||
void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
|
||||
|
||||
/**
|
||||
/*
|
||||
* I/O abort completion notification
|
||||
*
|
||||
* @param[in] dio driver IO that was aborted
|
||||
|
@ -15,7 +15,7 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_fcs.c BFA FCS main
|
||||
*/
|
||||
|
||||
@ -25,7 +25,7 @@
|
||||
|
||||
BFA_TRC_FILE(FCS, FCS);
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS sub-modules
|
||||
*/
|
||||
struct bfa_fcs_mod_s {
|
||||
@ -43,7 +43,7 @@ static struct bfa_fcs_mod_s fcs_modules[] = {
|
||||
bfa_fcs_fabric_modexit },
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs_api BFA FCS API
|
||||
*/
|
||||
|
||||
@ -58,11 +58,11 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
|
||||
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs_api BFA FCS API
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs attach -- called once to initialize data structures at driver attach time
|
||||
*/
|
||||
void
|
||||
@ -86,7 +86,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs initialization, called once after bfa initialization is complete
|
||||
*/
|
||||
void
|
||||
@ -110,7 +110,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Start FCS operations.
|
||||
*/
|
||||
void
|
||||
@ -119,7 +119,7 @@ bfa_fcs_start(struct bfa_fcs_s *fcs)
|
||||
bfa_fcs_fabric_modstart(fcs);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* brief
|
||||
* FCS driver details initialization.
|
||||
*
|
||||
@ -138,7 +138,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
|
||||
bfa_fcs_fabric_psymb_init(&fcs->fabric);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* brief
|
||||
* FCS FDMI Driver Parameter Initialization
|
||||
*
|
||||
@ -154,7 +154,7 @@ bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
|
||||
fcs->fdmi_enabled = fdmi_enable;
|
||||
|
||||
}
|
||||
/**
|
||||
/*
|
||||
* brief
|
||||
* FCS instance cleanup and exit.
|
||||
*
|
||||
@ -196,7 +196,7 @@ bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
|
||||
bfa_wc_down(&fcs->wc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Fabric module implementation.
|
||||
*/
|
||||
|
||||
@ -232,11 +232,11 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
|
||||
u32 rsp_len,
|
||||
u32 resid_len,
|
||||
struct fchs_s *rspfchs);
|
||||
/**
|
||||
/*
|
||||
* fcs_fabric_sm fabric state machine functions
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Fabric state machine events
|
||||
*/
|
||||
enum bfa_fcs_fabric_event {
|
||||
@ -286,7 +286,7 @@ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
|
||||
enum bfa_fcs_fabric_event event);
|
||||
static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
|
||||
enum bfa_fcs_fabric_event event);
|
||||
/**
|
||||
/*
|
||||
* Beginning state before fabric creation.
|
||||
*/
|
||||
static void
|
||||
@ -312,7 +312,7 @@ bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Beginning state before fabric creation.
|
||||
*/
|
||||
static void
|
||||
@ -345,7 +345,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Link is down, awaiting LINK UP event from port. This is also the
|
||||
* first state at fabric creation.
|
||||
*/
|
||||
@ -375,7 +375,7 @@ bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FLOGI is in progress, awaiting FLOGI reply.
|
||||
*/
|
||||
static void
|
||||
@ -468,7 +468,7 @@ bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Authentication is in progress, awaiting authentication results.
|
||||
*/
|
||||
static void
|
||||
@ -508,7 +508,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Authentication failed
|
||||
*/
|
||||
static void
|
||||
@ -534,7 +534,7 @@ bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Port is in loopback mode.
|
||||
*/
|
||||
static void
|
||||
@ -560,7 +560,7 @@ bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* There is no attached fabric - private loop or NPort-to-NPort topology.
|
||||
*/
|
||||
static void
|
||||
@ -593,7 +593,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Fabric is online - normal operating state.
|
||||
*/
|
||||
static void
|
||||
@ -628,7 +628,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Exchanging virtual fabric parameters.
|
||||
*/
|
||||
static void
|
||||
@ -652,7 +652,7 @@ bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* EVFP exchange complete and VFT tagging is enabled.
|
||||
*/
|
||||
static void
|
||||
@ -663,7 +663,7 @@ bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
|
||||
bfa_trc(fabric->fcs, event);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
|
||||
*/
|
||||
static void
|
||||
@ -684,7 +684,7 @@ bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
|
||||
fabric->event_arg.swp_vfid);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Fabric is being deleted, awaiting vport delete completions.
|
||||
*/
|
||||
static void
|
||||
@ -714,7 +714,7 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
|
||||
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs_fabric_private fabric private functions
|
||||
*/
|
||||
|
||||
@ -728,7 +728,7 @@ bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
|
||||
port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Port Symbolic Name Creation for base port.
|
||||
*/
|
||||
void
|
||||
@ -789,7 +789,7 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
|
||||
port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa lps login completion callback
|
||||
*/
|
||||
void
|
||||
@ -867,7 +867,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
|
||||
bfa_trc(fabric->fcs, fabric->is_npiv);
|
||||
bfa_trc(fabric->fcs, fabric->is_auth);
|
||||
}
|
||||
/**
|
||||
/*
|
||||
* Allocate and send FLOGI.
|
||||
*/
|
||||
static void
|
||||
@ -897,7 +897,7 @@ bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
|
||||
bfa_fcs_fabric_set_opertype(fabric);
|
||||
fabric->stats.fabric_onlines++;
|
||||
|
||||
/**
|
||||
/*
|
||||
* notify online event to base and then virtual ports
|
||||
*/
|
||||
bfa_fcs_lport_online(&fabric->bport);
|
||||
@ -917,7 +917,7 @@ bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
|
||||
bfa_trc(fabric->fcs, fabric->fabric_name);
|
||||
fabric->stats.fabric_offlines++;
|
||||
|
||||
/**
|
||||
/*
|
||||
* notify offline event first to vports and then base port.
|
||||
*/
|
||||
list_for_each_safe(qe, qen, &fabric->vport_q) {
|
||||
@ -939,7 +939,7 @@ bfa_fcs_fabric_delay(void *cbarg)
|
||||
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Delete all vports and wait for vport delete completions.
|
||||
*/
|
||||
static void
|
||||
@ -965,11 +965,11 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
|
||||
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs_fabric_public fabric public functions
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Attach time initialization.
|
||||
*/
|
||||
void
|
||||
@ -978,9 +978,9 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
|
||||
struct bfa_fcs_fabric_s *fabric;
|
||||
|
||||
fabric = &fcs->fabric;
|
||||
bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
|
||||
memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
|
||||
|
||||
/**
|
||||
/*
|
||||
* Initialize base fabric.
|
||||
*/
|
||||
fabric->fcs = fcs;
|
||||
@ -989,7 +989,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
|
||||
fabric->lps = bfa_lps_alloc(fcs->bfa);
|
||||
bfa_assert(fabric->lps);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Initialize fabric delete completion handler. Fabric deletion is
|
||||
* complete when the last vport delete is complete.
|
||||
*/
|
||||
@ -1007,7 +1007,7 @@ bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
|
||||
bfa_trc(fcs, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Module cleanup
|
||||
*/
|
||||
void
|
||||
@ -1017,7 +1017,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
|
||||
|
||||
bfa_trc(fcs, 0);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Cleanup base fabric.
|
||||
*/
|
||||
fabric = &fcs->fabric;
|
||||
@ -1025,7 +1025,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
|
||||
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Fabric module start -- kick starts FCS actions
|
||||
*/
|
||||
void
|
||||
@ -1038,7 +1038,7 @@ bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
|
||||
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Suspend fabric activity as part of driver suspend.
|
||||
*/
|
||||
void
|
||||
@ -1064,7 +1064,7 @@ bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
|
||||
return fabric->oper_type;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Link up notification from BFA physical port module.
|
||||
*/
|
||||
void
|
||||
@ -1074,7 +1074,7 @@ bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
|
||||
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Link down notification from BFA physical port module.
|
||||
*/
|
||||
void
|
||||
@ -1084,7 +1084,7 @@ bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
|
||||
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* A child vport is being created in the fabric.
|
||||
*
|
||||
* Call from vport module at vport creation. A list of base port and vports
|
||||
@ -1099,7 +1099,7 @@ void
|
||||
bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
|
||||
struct bfa_fcs_vport_s *vport)
|
||||
{
|
||||
/**
|
||||
/*
|
||||
* - add vport to fabric's vport_q
|
||||
*/
|
||||
bfa_trc(fabric->fcs, fabric->vf_id);
|
||||
@ -1109,7 +1109,7 @@ bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
|
||||
bfa_wc_up(&fabric->wc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* A child vport is being deleted from fabric.
|
||||
*
|
||||
* Vport is being deleted.
|
||||
@ -1123,7 +1123,7 @@ bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
|
||||
bfa_wc_down(&fabric->wc);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Base port is deleted.
|
||||
*/
|
||||
void
|
||||
@ -1133,7 +1133,7 @@ bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* Check if fabric is online.
|
||||
*
|
||||
* param[in] fabric - Fabric instance. This can be a base fabric or vf.
|
||||
@ -1146,7 +1146,7 @@ bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
|
||||
return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* brief
|
||||
*
|
||||
*/
|
||||
@ -1158,7 +1158,7 @@ bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Lookup for a vport withing a fabric given its pwwn
|
||||
*/
|
||||
struct bfa_fcs_vport_s *
|
||||
@ -1176,7 +1176,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* In a given fabric, return the number of lports.
|
||||
*
|
||||
* param[in] fabric - Fabric instance. This can be a base fabric or vf.
|
||||
@ -1214,7 +1214,7 @@ bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
|
||||
|
||||
return oui;
|
||||
}
|
||||
/**
|
||||
/*
|
||||
* Unsolicited frame receive handling.
|
||||
*/
|
||||
void
|
||||
@ -1230,7 +1230,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
|
||||
bfa_trc(fabric->fcs, len);
|
||||
bfa_trc(fabric->fcs, pid);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Look for our own FLOGI frames being looped back. This means an
|
||||
* external loopback cable is in place. Our own FLOGI frames are
|
||||
* sometimes looped back when switch port gets temporarily bypassed.
|
||||
@ -1242,7 +1242,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FLOGI/EVFP exchanges should be consumed by base fabric.
|
||||
*/
|
||||
if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
|
||||
@ -1252,7 +1252,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
|
||||
}
|
||||
|
||||
if (fabric->bport.pid == pid) {
|
||||
/**
|
||||
/*
|
||||
* All authentication frames should be routed to auth
|
||||
*/
|
||||
bfa_trc(fabric->fcs, els_cmd->els_code);
|
||||
@ -1266,7 +1266,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* look for a matching local port ID
|
||||
*/
|
||||
list_for_each(qe, &fabric->vport_q) {
|
||||
@ -1280,7 +1280,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
|
||||
bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Unsolicited frames to be processed by fabric.
|
||||
*/
|
||||
static void
|
||||
@ -1304,7 +1304,7 @@ bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Process incoming FLOGI
|
||||
*/
|
||||
static void
|
||||
@ -1329,7 +1329,7 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
|
||||
return;
|
||||
}
|
||||
|
||||
fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred);
|
||||
fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
|
||||
bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
|
||||
bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
|
||||
|
||||
@ -1351,7 +1351,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
|
||||
struct fchs_s fchs;
|
||||
|
||||
fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
|
||||
/**
|
||||
/*
|
||||
* Do not expect this failure -- expect remote node to retry
|
||||
*/
|
||||
if (!fcxp)
|
||||
@ -1370,7 +1370,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
|
||||
FC_MAX_PDUSZ, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Flogi Acc completion callback.
|
||||
*/
|
||||
static void
|
||||
@ -1417,130 +1417,7 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* fcs_vf_api virtual fabrics API
|
||||
*/
|
||||
|
||||
/**
|
||||
* Enable VF mode.
|
||||
*
|
||||
* @param[in] fcs fcs module instance
|
||||
* @param[in] vf_id default vf_id of port, FC_VF_ID_NULL
|
||||
* to use standard default vf_id of 1.
|
||||
*
|
||||
* @retval BFA_STATUS_OK vf mode is enabled
|
||||
* @retval BFA_STATUS_BUSY Port is active. Port must be disabled
|
||||
* before VF mode can be enabled.
|
||||
*/
|
||||
bfa_status_t
|
||||
bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id)
|
||||
{
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable VF mode.
|
||||
*
|
||||
* @param[in] fcs fcs module instance
|
||||
*
|
||||
* @retval BFA_STATUS_OK vf mode is disabled
|
||||
* @retval BFA_STATUS_BUSY VFs are present and being used. All
|
||||
* VFs must be deleted before disabling
|
||||
* VF mode.
|
||||
*/
|
||||
bfa_status_t
|
||||
bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs)
|
||||
{
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new VF instance.
|
||||
*
|
||||
* A new VF is created using the given VF configuration. A VF is identified
|
||||
* by VF id. No duplicate VF creation is allowed with the same VF id. Once
|
||||
* a VF is created, VF is automatically started after link initialization
|
||||
* and EVFP exchange is completed.
|
||||
*
|
||||
* param[in] vf - FCS vf data structure. Memory is
|
||||
* allocated by caller (driver)
|
||||
* param[in] fcs - FCS module
|
||||
* param[in] vf_cfg - VF configuration
|
||||
* param[in] vf_drv - Opaque handle back to the driver's
|
||||
* virtual vf structure
|
||||
*
|
||||
* retval BFA_STATUS_OK VF creation is successful
|
||||
* retval BFA_STATUS_FAILED VF creation failed
|
||||
* retval BFA_STATUS_EEXIST A VF exists with the given vf_id
|
||||
*/
|
||||
bfa_status_t
|
||||
bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id,
|
||||
struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
|
||||
{
|
||||
bfa_trc(fcs, vf_id);
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use this function to delete a BFA VF object. VF object should
|
||||
* be stopped before this function call.
|
||||
*
|
||||
* param[in] vf - pointer to bfa_vf_t.
|
||||
*
|
||||
* retval BFA_STATUS_OK On vf deletion success
|
||||
* retval BFA_STATUS_BUSY VF is not in a stopped state
|
||||
* retval BFA_STATUS_INPROGRESS VF deletion in in progress
|
||||
*/
|
||||
bfa_status_t
|
||||
bfa_fcs_vf_delete(bfa_fcs_vf_t *vf)
|
||||
{
|
||||
bfa_trc(vf->fcs, vf->vf_id);
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns attributes of the given VF.
|
||||
*
|
||||
* param[in] vf pointer to bfa_vf_t.
|
||||
* param[out] vf_attr vf attributes returned
|
||||
*
|
||||
* return None
|
||||
*/
|
||||
void
|
||||
bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr)
|
||||
{
|
||||
bfa_trc(vf->fcs, vf->vf_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return statistics associated with the given vf.
|
||||
*
|
||||
* param[in] vf pointer to bfa_vf_t.
|
||||
* param[out] vf_stats vf statistics returned
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
void
|
||||
bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats)
|
||||
{
|
||||
bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s));
|
||||
}
|
||||
|
||||
/**
|
||||
* clear statistics associated with the given vf.
|
||||
*
|
||||
* param[in] vf pointer to bfa_vf_t.
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
void
|
||||
bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf)
|
||||
{
|
||||
bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s));
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Returns FCS vf structure for a given vf_id.
|
||||
*
|
||||
* param[in] vf_id - VF_ID
|
||||
@ -1558,81 +1435,7 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the list of VFs configured.
|
||||
*
|
||||
* param[in] fcs fcs module instance
|
||||
* param[out] vf_ids returned list of vf_ids
|
||||
* param[in,out] nvfs in:size of vf_ids array,
|
||||
* out:total elements present,
|
||||
* actual elements returned is limited by the size
|
||||
*
|
||||
* return Driver VF structure
|
||||
*/
|
||||
void
|
||||
bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
|
||||
{
|
||||
bfa_trc(fcs, *nvfs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the list of all VFs visible from fabric.
|
||||
*
|
||||
* param[in] fcs fcs module instance
|
||||
* param[out] vf_ids returned list of vf_ids
|
||||
* param[in,out] nvfs in:size of vf_ids array,
|
||||
* out:total elements present,
|
||||
* actual elements returned is limited by the size
|
||||
*
|
||||
* return Driver VF structure
|
||||
*/
|
||||
void
|
||||
bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
|
||||
{
|
||||
bfa_trc(fcs, *nvfs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the list of local logical ports present in the given VF.
|
||||
*
|
||||
* param[in] vf vf for which logical ports are returned
|
||||
* param[out] lpwwn returned logical port wwn list
|
||||
* param[in,out] nlports in:size of lpwwn list;
|
||||
* out:total elements present,
|
||||
* actual elements returned is limited by the size
|
||||
*/
|
||||
void
|
||||
bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
|
||||
{
|
||||
struct list_head *qe;
|
||||
struct bfa_fcs_vport_s *vport;
|
||||
int i;
|
||||
struct bfa_fcs_s *fcs;
|
||||
|
||||
if (vf == NULL || lpwwn == NULL || *nlports == 0)
|
||||
return;
|
||||
|
||||
fcs = vf->fcs;
|
||||
|
||||
bfa_trc(fcs, vf->vf_id);
|
||||
bfa_trc(fcs, (u32) *nlports);
|
||||
|
||||
i = 0;
|
||||
lpwwn[i++] = vf->bport.port_cfg.pwwn;
|
||||
|
||||
list_for_each(qe, &vf->vport_q) {
|
||||
if (i >= *nlports)
|
||||
break;
|
||||
|
||||
vport = (struct bfa_fcs_vport_s *) qe;
|
||||
lpwwn[i++] = vport->lport.port_cfg.pwwn;
|
||||
}
|
||||
|
||||
bfa_trc(fcs, i);
|
||||
*nlports = i;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA FCS PPORT ( physical port)
|
||||
*/
|
||||
static void
|
||||
@ -1662,11 +1465,11 @@ bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
|
||||
bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA FCS UF ( Unsolicited Frames)
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA callback for unsolicited frame receive handler.
|
||||
*
|
||||
* @param[in] cbarg callback arg for receive handler
|
||||
@ -1683,7 +1486,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
|
||||
struct fc_vft_s *vft;
|
||||
struct bfa_fcs_fabric_s *fabric;
|
||||
|
||||
/**
|
||||
/*
|
||||
* check for VFT header
|
||||
*/
|
||||
if (fchs->routing == FC_RTG_EXT_HDR &&
|
||||
@ -1695,7 +1498,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
|
||||
else
|
||||
fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
|
||||
|
||||
/**
|
||||
/*
|
||||
* drop frame if vfid is unknown
|
||||
*/
|
||||
if (!fabric) {
|
||||
@ -1705,7 +1508,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* skip vft header
|
||||
*/
|
||||
fchs = (struct fchs_s *) (vft + 1);
|
||||
|
@ -196,7 +196,7 @@ struct bfa_fcs_fabric_s {
|
||||
#define bfa_fcs_fabric_is_switched(__f) \
|
||||
((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
|
||||
|
||||
/**
|
||||
/*
|
||||
* The design calls for a single implementation of base fabric and vf.
|
||||
*/
|
||||
#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
|
||||
@ -216,7 +216,7 @@ struct bfa_fcs_fabric_s;
|
||||
|
||||
#define bfa_fcs_lport_t struct bfa_fcs_lport_s
|
||||
|
||||
/**
|
||||
/*
|
||||
* Symbolic Name related defines
|
||||
* Total bytes 255.
|
||||
* Physical Port's symbolic name 128 bytes.
|
||||
@ -239,7 +239,7 @@ struct bfa_fcs_fabric_s;
|
||||
#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48
|
||||
#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
|
||||
|
||||
/**
|
||||
/*
|
||||
* Get FC port ID for a logical port.
|
||||
*/
|
||||
#define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid)
|
||||
@ -262,7 +262,7 @@ bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
|
||||
#define bfa_fcs_lport_get_fabric_ipaddr(_lport) \
|
||||
((_lport)->fabric->fabric_ip_addr)
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa fcs port public functions
|
||||
*/
|
||||
|
||||
@ -342,7 +342,7 @@ struct bfa_fcs_vport_s {
|
||||
#define bfa_fcs_vport_get_port(vport) \
|
||||
((struct bfa_fcs_lport_s *)(&vport->port))
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa fcs vport public functions
|
||||
*/
|
||||
bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
|
||||
@ -393,7 +393,7 @@ struct bfa_fcs_rpf_s {
|
||||
enum bfa_port_speed rpsc_speed;
|
||||
/* Current Speed from RPSC. O if RPSC fails */
|
||||
enum bfa_port_speed assigned_speed;
|
||||
/**
|
||||
/*
|
||||
* Speed assigned by the user. will be used if RPSC is
|
||||
* not supported by the rport.
|
||||
*/
|
||||
@ -434,7 +434,7 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
|
||||
return rport->bfa_rport;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa fcs rport API functions
|
||||
*/
|
||||
bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
|
||||
@ -573,7 +573,7 @@ bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
|
||||
return itnim->bfa_itnim;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa fcs FCP Initiator mode API functions
|
||||
*/
|
||||
void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
|
||||
@ -677,22 +677,9 @@ void bfa_fcs_exit(struct bfa_fcs_s *fcs);
|
||||
void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
|
||||
void bfa_fcs_start(struct bfa_fcs_s *fcs);
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa fcs vf public functions
|
||||
*/
|
||||
bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
|
||||
bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
|
||||
bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
|
||||
u16 vf_id, struct bfa_lport_cfg_s *port_cfg,
|
||||
struct bfad_vf_s *vf_drv);
|
||||
bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
|
||||
void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
|
||||
void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
|
||||
void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
|
||||
void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
|
||||
struct bfa_vf_stats_s *vf_stats);
|
||||
void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
|
||||
void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
|
||||
bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
|
||||
u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
|
||||
|
||||
@ -729,11 +716,11 @@ u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
|
||||
void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
|
||||
void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA FCS callback interfaces
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcb Main fcs callbacks
|
||||
*/
|
||||
|
||||
@ -742,7 +729,7 @@ struct bfad_vf_s;
|
||||
struct bfad_vport_s;
|
||||
struct bfad_rport_s;
|
||||
|
||||
/**
|
||||
/*
|
||||
* lport callbacks
|
||||
*/
|
||||
struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
|
||||
@ -754,19 +741,19 @@ void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
|
||||
struct bfad_vf_s *vf_drv,
|
||||
struct bfad_vport_s *vp_drv);
|
||||
|
||||
/**
|
||||
/*
|
||||
* vport callbacks
|
||||
*/
|
||||
void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
|
||||
|
||||
/**
|
||||
/*
|
||||
* rport callbacks
|
||||
*/
|
||||
bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
|
||||
struct bfa_fcs_rport_s **rport,
|
||||
struct bfad_rport_s **rport_drv);
|
||||
|
||||
/**
|
||||
/*
|
||||
* itnim callbacks
|
||||
*/
|
||||
void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
|
||||
|
@ -15,7 +15,7 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcpim.c - FCP initiator mode i-t nexus state machine
|
||||
*/
|
||||
|
||||
@ -38,7 +38,7 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
|
||||
bfa_status_t req_status, u32 rsp_len,
|
||||
u32 resid_len, struct fchs_s *rsp_fchs);
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs_itnim_sm FCS itnim state machine events
|
||||
*/
|
||||
|
||||
@ -84,7 +84,7 @@ static struct bfa_sm_table_s itnim_sm_table[] = {
|
||||
{BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR},
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs_itnim_sm FCS itnim state machine
|
||||
*/
|
||||
|
||||
@ -494,11 +494,11 @@ bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
|
||||
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* itnim_public FCS ITNIM public interfaces
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by rport when a new rport is created.
|
||||
*
|
||||
* @param[in] rport - remote port.
|
||||
@ -554,7 +554,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
|
||||
return itnim;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by rport to delete the instance of FCPIM.
|
||||
*
|
||||
* @param[in] rport - remote port.
|
||||
@ -566,7 +566,7 @@ bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
|
||||
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Notification from rport that PLOGI is complete to initiate FC-4 session.
|
||||
*/
|
||||
void
|
||||
@ -586,7 +586,7 @@ bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by rport to handle a remote device offline.
|
||||
*/
|
||||
void
|
||||
@ -596,7 +596,7 @@ bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim)
|
||||
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by rport when remote port is known to be an initiator from
|
||||
* PRLI received.
|
||||
*/
|
||||
@ -608,7 +608,7 @@ bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim)
|
||||
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by rport to check if the itnim is online.
|
||||
*/
|
||||
bfa_status_t
|
||||
@ -625,7 +625,7 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA completion callback for bfa_itnim_online().
|
||||
*/
|
||||
void
|
||||
@ -637,7 +637,7 @@ bfa_cb_itnim_online(void *cbarg)
|
||||
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA completion callback for bfa_itnim_offline().
|
||||
*/
|
||||
void
|
||||
@ -649,7 +649,7 @@ bfa_cb_itnim_offline(void *cb_arg)
|
||||
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Mark the beginning of PATH TOV handling. IO completion callbacks
|
||||
* are still pending.
|
||||
*/
|
||||
@ -661,7 +661,7 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
|
||||
bfa_trc(itnim->fcs, itnim->rport->pwwn);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Mark the end of PATH TOV handling. All pending IOs are already cleaned up.
|
||||
*/
|
||||
void
|
||||
@ -674,7 +674,7 @@ bfa_cb_itnim_tov(void *cb_arg)
|
||||
itnim_drv->state = ITNIM_STATE_TIMEOUT;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA notification to FCS/driver for second level error recovery.
|
||||
*
|
||||
* Atleast one I/O request has timedout and target is unresponsive to
|
||||
@ -736,7 +736,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
|
||||
if (itnim == NULL)
|
||||
return BFA_STATUS_NO_FCPIM_NEXUS;
|
||||
|
||||
bfa_os_memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
|
||||
memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
|
||||
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
@ -753,7 +753,7 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
|
||||
if (itnim == NULL)
|
||||
return BFA_STATUS_NO_FCPIM_NEXUS;
|
||||
|
||||
bfa_os_memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
|
||||
memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -15,7 +15,7 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* rport.c Remote port implementation.
|
||||
*/
|
||||
|
||||
@ -75,7 +75,7 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
|
||||
static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
|
||||
struct fchs_s *rx_fchs, u16 len);
|
||||
static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
|
||||
/**
|
||||
/*
|
||||
* fcs_rport_sm FCS rport state machine events
|
||||
*/
|
||||
|
||||
@ -172,7 +172,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
|
||||
{BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Beginning state.
|
||||
*/
|
||||
static void
|
||||
@ -210,7 +210,7 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* PLOGI is being sent.
|
||||
*/
|
||||
static void
|
||||
@ -262,7 +262,7 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* PLOGI is being sent.
|
||||
*/
|
||||
static void
|
||||
@ -287,7 +287,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
|
||||
|
||||
case RPSM_EVENT_PLOGI_RCVD:
|
||||
case RPSM_EVENT_SCN:
|
||||
/**
|
||||
/*
|
||||
* Ignore, SCN is possibly online notification.
|
||||
*/
|
||||
break;
|
||||
@ -309,7 +309,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
|
||||
break;
|
||||
|
||||
case RPSM_EVENT_HCB_OFFLINE:
|
||||
/**
|
||||
/*
|
||||
* Ignore BFA callback, on a PLOGI receive we call bfa offline.
|
||||
*/
|
||||
break;
|
||||
@ -319,7 +319,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* PLOGI is sent.
|
||||
*/
|
||||
static void
|
||||
@ -380,7 +380,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* PLOGI is sent.
|
||||
*/
|
||||
static void
|
||||
@ -475,7 +475,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* PLOGI is complete. Awaiting BFA rport online callback. FC-4s
|
||||
* are offline.
|
||||
*/
|
||||
@ -519,7 +519,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
|
||||
break;
|
||||
|
||||
case RPSM_EVENT_SCN:
|
||||
/**
|
||||
/*
|
||||
* @todo
|
||||
* Ignore SCN - PLOGI just completed, FC-4 login should detect
|
||||
* device failures.
|
||||
@ -531,7 +531,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Rport is ONLINE. FC-4s active.
|
||||
*/
|
||||
static void
|
||||
@ -580,7 +580,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* An SCN event is received in ONLINE state. NS query is being sent
|
||||
* prior to ADISC authentication with rport. FC-4s are paused.
|
||||
*/
|
||||
@ -604,7 +604,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
|
||||
break;
|
||||
|
||||
case RPSM_EVENT_SCN:
|
||||
/**
|
||||
/*
|
||||
* ignore SCN, wait for response to query itself
|
||||
*/
|
||||
break;
|
||||
@ -638,7 +638,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* An SCN event is received in ONLINE state. NS query is sent to rport.
|
||||
* FC-4s are paused.
|
||||
*/
|
||||
@ -697,7 +697,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* An SCN event is received in ONLINE state. ADISC is being sent for
|
||||
* authenticating with rport. FC-4s are paused.
|
||||
*/
|
||||
@ -748,7 +748,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* An SCN event is received in ONLINE state. ADISC is to rport.
|
||||
* FC-4s are paused.
|
||||
*/
|
||||
@ -765,7 +765,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
|
||||
break;
|
||||
|
||||
case RPSM_EVENT_PLOGI_RCVD:
|
||||
/**
|
||||
/*
|
||||
* Too complex to cleanup FC-4 & rport and then acc to PLOGI.
|
||||
* At least go offline when a PLOGI is received.
|
||||
*/
|
||||
@ -787,7 +787,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
|
||||
break;
|
||||
|
||||
case RPSM_EVENT_SCN:
|
||||
/**
|
||||
/*
|
||||
* already processing RSCN
|
||||
*/
|
||||
break;
|
||||
@ -810,7 +810,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Rport has sent LOGO. Awaiting FC-4 offline completion callback.
|
||||
*/
|
||||
static void
|
||||
@ -841,7 +841,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* LOGO needs to be sent to rport. Awaiting FC-4 offline completion
|
||||
* callback.
|
||||
*/
|
||||
@ -864,7 +864,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Rport is going offline. Awaiting FC-4 offline completion callback.
|
||||
*/
|
||||
static void
|
||||
@ -886,7 +886,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
|
||||
case RPSM_EVENT_LOGO_RCVD:
|
||||
case RPSM_EVENT_PRLO_RCVD:
|
||||
case RPSM_EVENT_ADDRESS_CHANGE:
|
||||
/**
|
||||
/*
|
||||
* rport is already going offline.
|
||||
* SCN - ignore and wait till transitioning to offline state
|
||||
*/
|
||||
@ -901,7 +901,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Rport is offline. FC-4s are offline. Awaiting BFA rport offline
|
||||
* callback.
|
||||
*/
|
||||
@ -945,7 +945,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
|
||||
case RPSM_EVENT_SCN:
|
||||
case RPSM_EVENT_LOGO_RCVD:
|
||||
case RPSM_EVENT_PRLO_RCVD:
|
||||
/**
|
||||
/*
|
||||
* Ignore, already offline.
|
||||
*/
|
||||
break;
|
||||
@ -955,7 +955,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Rport is offline. FC-4s are offline. Awaiting BFA rport offline
|
||||
* callback to send LOGO accept.
|
||||
*/
|
||||
@ -1009,7 +1009,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
|
||||
|
||||
case RPSM_EVENT_LOGO_RCVD:
|
||||
case RPSM_EVENT_PRLO_RCVD:
|
||||
/**
|
||||
/*
|
||||
* Ignore - already processing a LOGO.
|
||||
*/
|
||||
break;
|
||||
@ -1019,7 +1019,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Rport is being deleted. FC-4s are offline.
|
||||
* Awaiting BFA rport offline
|
||||
* callback to send LOGO.
|
||||
@ -1048,7 +1048,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Rport is being deleted. FC-4s are offline. LOGO is being sent.
|
||||
*/
|
||||
static void
|
||||
@ -1082,7 +1082,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Rport is offline. FC-4s are offline. BFA rport is offline.
|
||||
* Timer active to delete stale rport.
|
||||
*/
|
||||
@ -1142,7 +1142,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Rport address has changed. Nameserver discovery request is being sent.
|
||||
*/
|
||||
static void
|
||||
@ -1199,7 +1199,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Nameserver discovery failed. Waiting for timeout to retry.
|
||||
*/
|
||||
static void
|
||||
@ -1263,7 +1263,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Rport address has changed. Nameserver discovery request is sent.
|
||||
*/
|
||||
static void
|
||||
@ -1329,13 +1329,13 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
|
||||
bfa_fcs_rport_send_prlo_acc(rport);
|
||||
break;
|
||||
case RPSM_EVENT_SCN:
|
||||
/**
|
||||
/*
|
||||
* ignore, wait for NS query response
|
||||
*/
|
||||
break;
|
||||
|
||||
case RPSM_EVENT_LOGO_RCVD:
|
||||
/**
|
||||
/*
|
||||
* Not logged-in yet. Accept LOGO.
|
||||
*/
|
||||
bfa_fcs_rport_send_logo_acc(rport);
|
||||
@ -1354,7 +1354,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
|
||||
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs_rport_private FCS RPORT provate functions
|
||||
*/
|
||||
|
||||
@ -1415,7 +1415,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
|
||||
|
||||
plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Check for failure first.
|
||||
*/
|
||||
if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
|
||||
@ -1436,7 +1436,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* PLOGI is complete. Make sure this device is not one of the known
|
||||
* device with a new FC port address.
|
||||
*/
|
||||
@ -1468,7 +1468,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Normal login path -- no evil twins.
|
||||
*/
|
||||
rport->stats.plogi_accs++;
|
||||
@ -1621,7 +1621,7 @@ bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
|
||||
bfa_trc(rport->fcs, rport->pwwn);
|
||||
|
||||
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
|
||||
cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
|
||||
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
|
||||
|
||||
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
|
||||
/* Check if the pid is the same as before. */
|
||||
@ -1691,7 +1691,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
|
||||
bfa_trc(rport->fcs, rport->pwwn);
|
||||
|
||||
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
|
||||
cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
|
||||
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
|
||||
|
||||
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
|
||||
@ -1722,7 +1722,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called to send a logout to the rport.
|
||||
*/
|
||||
static void
|
||||
@ -1759,7 +1759,7 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Send ACC for a LOGO received.
|
||||
*/
|
||||
static void
|
||||
@ -1788,7 +1788,7 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
|
||||
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* brief
|
||||
* This routine will be called by bfa_timer on timer timeouts.
|
||||
*
|
||||
@ -1961,7 +1961,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
|
||||
struct bfa_fcs_rport_s *rport;
|
||||
struct bfad_rport_s *rport_drv;
|
||||
|
||||
/**
|
||||
/*
|
||||
* allocate rport
|
||||
*/
|
||||
if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
|
||||
@ -1979,7 +1979,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
|
||||
rport->pid = rpid;
|
||||
rport->pwwn = pwwn;
|
||||
|
||||
/**
|
||||
/*
|
||||
* allocate BFA rport
|
||||
*/
|
||||
rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
|
||||
@ -1989,7 +1989,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* allocate FC-4s
|
||||
*/
|
||||
bfa_assert(bfa_fcs_lport_is_initiator(port));
|
||||
@ -2021,7 +2021,7 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
|
||||
{
|
||||
struct bfa_fcs_lport_s *port = rport->port;
|
||||
|
||||
/**
|
||||
/*
|
||||
* - delete FC-4s
|
||||
* - delete BFA rport
|
||||
* - remove from queue of rports
|
||||
@ -2093,7 +2093,7 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Update rport parameters from PLOGI or PLOGI accept.
|
||||
*/
|
||||
static void
|
||||
@ -2101,14 +2101,14 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
|
||||
{
|
||||
bfa_fcs_lport_t *port = rport->port;
|
||||
|
||||
/**
|
||||
/*
|
||||
* - port name
|
||||
* - node name
|
||||
*/
|
||||
rport->pwwn = plogi->port_name;
|
||||
rport->nwwn = plogi->node_name;
|
||||
|
||||
/**
|
||||
/*
|
||||
* - class of service
|
||||
*/
|
||||
rport->fc_cos = 0;
|
||||
@ -2118,16 +2118,16 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
|
||||
if (plogi->class2.class_valid)
|
||||
rport->fc_cos |= FC_CLASS_2;
|
||||
|
||||
/**
|
||||
/*
|
||||
* - CISC
|
||||
* - MAX receive frame size
|
||||
*/
|
||||
rport->cisc = plogi->csp.cisc;
|
||||
rport->maxfrsize = bfa_os_ntohs(plogi->class3.rxsz);
|
||||
rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
|
||||
|
||||
bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
|
||||
bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
|
||||
bfa_trc(port->fcs, port->fabric->bb_credit);
|
||||
/**
|
||||
/*
|
||||
* Direct Attach P2P mode :
|
||||
* This is to handle a bug (233476) in IBM targets in Direct Attach
|
||||
* Mode. Basically, in FLOGI Accept the target would have
|
||||
@ -2136,19 +2136,19 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
|
||||
* in PLOGI.
|
||||
*/
|
||||
if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
|
||||
(bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) {
|
||||
(be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) {
|
||||
|
||||
bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
|
||||
bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
|
||||
bfa_trc(port->fcs, port->fabric->bb_credit);
|
||||
|
||||
port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred);
|
||||
port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
|
||||
bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
|
||||
port->fabric->bb_credit);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called to handle LOGO received from an existing remote port.
|
||||
*/
|
||||
static void
|
||||
@ -2164,11 +2164,11 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
|
||||
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs_rport_public FCS rport public interfaces
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by bport/vport to create a remote port instance for a discovered
|
||||
* remote device.
|
||||
*
|
||||
@ -2191,7 +2191,7 @@ bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid)
|
||||
return rport;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called to create a rport for which only the wwn is known.
|
||||
*
|
||||
* @param[in] port - base port
|
||||
@ -2211,7 +2211,7 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
|
||||
return rport;
|
||||
}
|
||||
/**
|
||||
/*
|
||||
* Called by bport in private loop topology to indicate that a
|
||||
* rport has been discovered and plogi has been completed.
|
||||
*
|
||||
@ -2233,7 +2233,7 @@ bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by bport/vport to handle PLOGI received from a new remote port.
|
||||
* If an existing rport does a plogi, it will be handled separately.
|
||||
*/
|
||||
@ -2272,7 +2272,7 @@ wwn_compare(wwn_t wwn1, wwn_t wwn2)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by bport/vport to handle PLOGI received from an existing
|
||||
* remote port.
|
||||
*/
|
||||
@ -2280,7 +2280,7 @@ void
|
||||
bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
|
||||
struct fc_logi_s *plogi)
|
||||
{
|
||||
/**
|
||||
/*
|
||||
* @todo Handle P2P and initiator-initiator.
|
||||
*/
|
||||
|
||||
@ -2289,7 +2289,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
|
||||
rport->reply_oxid = rx_fchs->ox_id;
|
||||
bfa_trc(rport->fcs, rport->reply_oxid);
|
||||
|
||||
/**
|
||||
/*
|
||||
* In Switched fabric topology,
|
||||
* PLOGI to each other. If our pwwn is smaller, ignore it,
|
||||
* if it is not a well known address.
|
||||
@ -2307,7 +2307,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by bport/vport to delete a remote port instance.
|
||||
*
|
||||
* Rport delete is called under the following conditions:
|
||||
@ -2321,7 +2321,7 @@ bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by bport/vport to when a target goes offline.
|
||||
*
|
||||
*/
|
||||
@ -2331,7 +2331,7 @@ bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport)
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by bport in n2n when a target (attached port) becomes online.
|
||||
*
|
||||
*/
|
||||
@ -2340,7 +2340,7 @@ bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
|
||||
{
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
|
||||
}
|
||||
/**
|
||||
/*
|
||||
* Called by bport/vport to notify SCN for the remote port
|
||||
*/
|
||||
void
|
||||
@ -2350,7 +2350,7 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_SCN);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by fcpim to notify that the ITN cleanup is done.
|
||||
*/
|
||||
void
|
||||
@ -2359,7 +2359,7 @@ bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called by fcptm to notify that the ITN cleanup is done.
|
||||
*/
|
||||
void
|
||||
@ -2368,7 +2368,7 @@ bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* brief
|
||||
* This routine BFA callback for bfa_rport_online() call.
|
||||
*
|
||||
@ -2391,7 +2391,7 @@ bfa_cb_rport_online(void *cbarg)
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* brief
|
||||
* This routine BFA callback for bfa_rport_offline() call.
|
||||
*
|
||||
@ -2413,7 +2413,7 @@ bfa_cb_rport_offline(void *cbarg)
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* brief
|
||||
* This routine is a static BFA callback when there is a QoS flow_id
|
||||
* change notification
|
||||
@ -2437,7 +2437,7 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
|
||||
bfa_trc(rport->fcs, rport->pwwn);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* brief
|
||||
* This routine is a static BFA callback when there is a QoS priority
|
||||
* change notification
|
||||
@ -2461,7 +2461,7 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
|
||||
bfa_trc(rport->fcs, rport->pwwn);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called to process any unsolicted frames from this remote port
|
||||
*/
|
||||
void
|
||||
@ -2470,7 +2470,7 @@ bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
|
||||
bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called to process any unsolicted frames from this remote port
|
||||
*/
|
||||
void
|
||||
@ -2577,7 +2577,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
|
||||
FC_MAX_PDUSZ, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Return state of rport.
|
||||
*/
|
||||
int
|
||||
@ -2586,7 +2586,7 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
|
||||
return bfa_sm_to_state(rport_sm_table, rport->sm);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* brief
|
||||
* Called by the Driver to set rport delete/ageout timeout
|
||||
*
|
||||
@ -2613,15 +2613,15 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
|
||||
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* Remote port implementation.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs_rport_api FCS rport API.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Direct API to add a target by port wwn. This interface is used, for
|
||||
* example, by bios when target pwwn is known from boot lun configuration.
|
||||
*/
|
||||
@ -2634,7 +2634,7 @@ bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Direct API to remove a target and its associated resources. This
|
||||
* interface is used, for example, by driver to remove target
|
||||
* ports from the target list for a VM.
|
||||
@ -2663,7 +2663,7 @@ bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Remote device status for display/debug.
|
||||
*/
|
||||
void
|
||||
@ -2674,7 +2674,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
|
||||
bfa_fcs_lport_t *port = rport->port;
|
||||
bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
|
||||
|
||||
bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
|
||||
memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
|
||||
|
||||
rport_attr->pid = rport->pid;
|
||||
rport_attr->pwwn = rport->pwwn;
|
||||
@ -2704,7 +2704,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Per remote device statistics.
|
||||
*/
|
||||
void
|
||||
@ -2717,7 +2717,7 @@ bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
|
||||
void
|
||||
bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
|
||||
{
|
||||
bfa_os_memset((char *)&rport->stats, 0,
|
||||
memset((char *)&rport->stats, 0,
|
||||
sizeof(struct bfa_rport_stats_s));
|
||||
}
|
||||
|
||||
@ -2767,7 +2767,7 @@ bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
|
||||
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* Remote port features (RPF) implementation.
|
||||
*/
|
||||
|
||||
@ -2786,7 +2786,7 @@ static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
|
||||
|
||||
static void bfa_fcs_rpf_timeout(void *arg);
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcs_rport_ftrs_sm FCS rport state machine events
|
||||
*/
|
||||
|
||||
@ -2981,7 +2981,7 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
|
||||
bfa_sm_fault(rport->fcs, event);
|
||||
}
|
||||
}
|
||||
/**
|
||||
/*
|
||||
* Called when Rport is created.
|
||||
*/
|
||||
void
|
||||
@ -2995,7 +2995,7 @@ bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
|
||||
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called when Rport becomes online
|
||||
*/
|
||||
void
|
||||
@ -3010,7 +3010,7 @@ bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
|
||||
bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called when Rport becomes offline
|
||||
*/
|
||||
void
|
||||
@ -3090,16 +3090,16 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
|
||||
rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
|
||||
if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
|
||||
rport->stats.rpsc_accs++;
|
||||
num_ents = bfa_os_ntohs(rpsc2_acc->num_pids);
|
||||
num_ents = be16_to_cpu(rpsc2_acc->num_pids);
|
||||
bfa_trc(rport->fcs, num_ents);
|
||||
if (num_ents > 0) {
|
||||
bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
|
||||
bfa_trc(rport->fcs,
|
||||
bfa_os_ntohs(rpsc2_acc->port_info[0].pid));
|
||||
be16_to_cpu(rpsc2_acc->port_info[0].pid));
|
||||
bfa_trc(rport->fcs,
|
||||
bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
|
||||
be16_to_cpu(rpsc2_acc->port_info[0].speed));
|
||||
bfa_trc(rport->fcs,
|
||||
bfa_os_ntohs(rpsc2_acc->port_info[0].index));
|
||||
be16_to_cpu(rpsc2_acc->port_info[0].index));
|
||||
bfa_trc(rport->fcs,
|
||||
rpsc2_acc->port_info[0].type);
|
||||
|
||||
@ -3109,7 +3109,7 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
|
||||
}
|
||||
|
||||
rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
|
||||
bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
|
||||
be16_to_cpu(rpsc2_acc->port_info[0].speed));
|
||||
|
||||
bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ void
|
||||
bfa_hwcb_reginit(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
|
||||
bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
|
||||
void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
|
||||
int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
|
||||
|
||||
if (fn == 0) {
|
||||
@ -60,8 +60,8 @@ bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
|
||||
static void
|
||||
bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
|
||||
{
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
|
||||
__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq));
|
||||
writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq),
|
||||
bfa->iocfc.bfa_regs.intr_status);
|
||||
}
|
||||
|
||||
void
|
||||
@ -72,8 +72,8 @@ bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
|
||||
static void
|
||||
bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
|
||||
{
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
|
||||
__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq));
|
||||
writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
|
||||
bfa->iocfc.bfa_regs.intr_status);
|
||||
}
|
||||
|
||||
void
|
||||
@ -102,7 +102,7 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
|
||||
*num_vecs = __HFN_NUMINTS;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* No special setup required for crossbow -- vector assignments are implicit.
|
||||
*/
|
||||
void
|
||||
@ -129,7 +129,7 @@ bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
|
||||
bfa->msix.handler[i] = bfa_msix_lpu_err;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Crossbow -- dummy, interrupts are masked
|
||||
*/
|
||||
void
|
||||
@ -142,7 +142,7 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* No special enable/disable -- vector assignments are implicit.
|
||||
*/
|
||||
void
|
||||
|
@ -31,15 +31,15 @@ static void
|
||||
bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
|
||||
{
|
||||
int fn = bfa_ioc_pcifn(&bfa->ioc);
|
||||
bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
|
||||
void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
|
||||
|
||||
if (msix)
|
||||
bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], vec);
|
||||
writel(vec, kva + __ct_msix_err_vec_reg[fn]);
|
||||
else
|
||||
bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], 0);
|
||||
writel(0, kva + __ct_msix_err_vec_reg[fn]);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Dummy interrupt handler for handling spurious interrupt during chip-reinit.
|
||||
*/
|
||||
static void
|
||||
@ -51,7 +51,7 @@ void
|
||||
bfa_hwct_reginit(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
|
||||
bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
|
||||
void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
|
||||
int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
|
||||
|
||||
if (fn == 0) {
|
||||
@ -88,8 +88,8 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
|
||||
{
|
||||
u32 r32;
|
||||
|
||||
r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);
|
||||
r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
|
||||
writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
|
||||
}
|
||||
|
||||
void
|
||||
@ -97,8 +97,8 @@ bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
|
||||
{
|
||||
u32 r32;
|
||||
|
||||
r32 = bfa_reg_read(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq], r32);
|
||||
r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
|
||||
writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
|
||||
}
|
||||
|
||||
void
|
||||
@ -110,7 +110,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
|
||||
*num_vecs = BFA_MSIX_CT_MAX;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Setup MSI-X vector for catapult
|
||||
*/
|
||||
void
|
||||
@ -156,7 +156,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa)
|
||||
bfa->msix.handler[i] = bfa_hwct_msix_dummy;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Enable MSI-X vectors
|
||||
*/
|
||||
void
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -22,29 +22,29 @@
|
||||
#include "bfa_cs.h"
|
||||
#include "bfi.h"
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA timer declarations
|
||||
*/
|
||||
typedef void (*bfa_timer_cbfn_t)(void *);
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA timer data structure
|
||||
*/
|
||||
struct bfa_timer_s {
|
||||
struct list_head qe;
|
||||
bfa_timer_cbfn_t timercb;
|
||||
void *arg;
|
||||
int timeout; /**< in millisecs. */
|
||||
int timeout; /* in millisecs */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Timer module structure
|
||||
*/
|
||||
struct bfa_timer_mod_s {
|
||||
struct list_head timer_q;
|
||||
};
|
||||
|
||||
#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
|
||||
#define BFA_TIMER_FREQ 200 /* specified in millisecs */
|
||||
|
||||
void bfa_timer_beat(struct bfa_timer_mod_s *mod);
|
||||
void bfa_timer_init(struct bfa_timer_mod_s *mod);
|
||||
@ -53,7 +53,7 @@ void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
|
||||
unsigned int timeout);
|
||||
void bfa_timer_stop(struct bfa_timer_s *timer);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Generic Scatter Gather Element used by driver
|
||||
*/
|
||||
struct bfa_sge_s {
|
||||
@ -62,9 +62,9 @@ struct bfa_sge_s {
|
||||
};
|
||||
|
||||
#define bfa_sge_word_swap(__sge) do { \
|
||||
((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]); \
|
||||
((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]); \
|
||||
((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]); \
|
||||
((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]); \
|
||||
((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]); \
|
||||
((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]); \
|
||||
} while (0)
|
||||
|
||||
#define bfa_swap_words(_x) ( \
|
||||
@ -80,17 +80,17 @@ struct bfa_sge_s {
|
||||
#define bfa_sgaddr_le(_x) (_x)
|
||||
#endif
|
||||
|
||||
/**
|
||||
/*
|
||||
* PCI device information required by IOC
|
||||
*/
|
||||
struct bfa_pcidev_s {
|
||||
int pci_slot;
|
||||
u8 pci_func;
|
||||
u16 device_id;
|
||||
bfa_os_addr_t pci_bar_kva;
|
||||
u16 device_id;
|
||||
void __iomem *pci_bar_kva;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Structure used to remember the DMA-able memory block's KVA and Physical
|
||||
* Address
|
||||
*/
|
||||
@ -102,7 +102,7 @@ struct bfa_dma_s {
|
||||
#define BFA_DMA_ALIGN_SZ 256
|
||||
#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
|
||||
|
||||
/**
|
||||
/*
|
||||
* smem size for Crossbow and Catapult
|
||||
*/
|
||||
#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
|
||||
@ -125,40 +125,38 @@ __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
|
||||
static inline void
|
||||
__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
|
||||
{
|
||||
dma_addr->a32.addr_lo = (u32) bfa_os_htonl(pa);
|
||||
dma_addr->a32.addr_hi = (u32) bfa_os_htonl(bfa_os_u32(pa));
|
||||
dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa);
|
||||
dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa));
|
||||
}
|
||||
|
||||
struct bfa_ioc_regs_s {
|
||||
bfa_os_addr_t hfn_mbox_cmd;
|
||||
bfa_os_addr_t hfn_mbox;
|
||||
bfa_os_addr_t lpu_mbox_cmd;
|
||||
bfa_os_addr_t lpu_mbox;
|
||||
bfa_os_addr_t pss_ctl_reg;
|
||||
bfa_os_addr_t pss_err_status_reg;
|
||||
bfa_os_addr_t app_pll_fast_ctl_reg;
|
||||
bfa_os_addr_t app_pll_slow_ctl_reg;
|
||||
bfa_os_addr_t ioc_sem_reg;
|
||||
bfa_os_addr_t ioc_usage_sem_reg;
|
||||
bfa_os_addr_t ioc_init_sem_reg;
|
||||
bfa_os_addr_t ioc_usage_reg;
|
||||
bfa_os_addr_t host_page_num_fn;
|
||||
bfa_os_addr_t heartbeat;
|
||||
bfa_os_addr_t ioc_fwstate;
|
||||
bfa_os_addr_t ll_halt;
|
||||
bfa_os_addr_t err_set;
|
||||
bfa_os_addr_t shirq_isr_next;
|
||||
bfa_os_addr_t shirq_msk_next;
|
||||
bfa_os_addr_t smem_page_start;
|
||||
void __iomem *hfn_mbox_cmd;
|
||||
void __iomem *hfn_mbox;
|
||||
void __iomem *lpu_mbox_cmd;
|
||||
void __iomem *lpu_mbox;
|
||||
void __iomem *pss_ctl_reg;
|
||||
void __iomem *pss_err_status_reg;
|
||||
void __iomem *app_pll_fast_ctl_reg;
|
||||
void __iomem *app_pll_slow_ctl_reg;
|
||||
void __iomem *ioc_sem_reg;
|
||||
void __iomem *ioc_usage_sem_reg;
|
||||
void __iomem *ioc_init_sem_reg;
|
||||
void __iomem *ioc_usage_reg;
|
||||
void __iomem *host_page_num_fn;
|
||||
void __iomem *heartbeat;
|
||||
void __iomem *ioc_fwstate;
|
||||
void __iomem *ll_halt;
|
||||
void __iomem *err_set;
|
||||
void __iomem *shirq_isr_next;
|
||||
void __iomem *shirq_msk_next;
|
||||
void __iomem *smem_page_start;
|
||||
u32 smem_pg0;
|
||||
};
|
||||
|
||||
#define bfa_reg_read(_raddr) bfa_os_reg_read(_raddr)
|
||||
#define bfa_reg_write(_raddr, _val) bfa_os_reg_write(_raddr, _val)
|
||||
#define bfa_mem_read(_raddr, _off) bfa_os_mem_read(_raddr, _off)
|
||||
#define bfa_mem_read(_raddr, _off) swab32(readl(((_raddr) + (_off))))
|
||||
#define bfa_mem_write(_raddr, _off, _val) \
|
||||
bfa_os_mem_write(_raddr, _off, _val)
|
||||
/**
|
||||
writel(swab32((_val)), ((_raddr) + (_off)))
|
||||
/*
|
||||
* IOC Mailbox structures
|
||||
*/
|
||||
struct bfa_mbox_cmd_s {
|
||||
@ -166,7 +164,7 @@ struct bfa_mbox_cmd_s {
|
||||
u32 msg[BFI_IOC_MSGSZ];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC mailbox module
|
||||
*/
|
||||
typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
|
||||
@ -179,7 +177,7 @@ struct bfa_ioc_mbox_mod_s {
|
||||
} mbhdlr[BFI_MC_MAX];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC callback function interfaces
|
||||
*/
|
||||
typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
|
||||
@ -193,7 +191,7 @@ struct bfa_ioc_cbfn_s {
|
||||
bfa_ioc_reset_cbfn_t reset_cbfn;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Heartbeat failure notification queue element.
|
||||
*/
|
||||
struct bfa_ioc_hbfail_notify_s {
|
||||
@ -202,7 +200,7 @@ struct bfa_ioc_hbfail_notify_s {
|
||||
void *cbarg;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Initialize a heartbeat failure notification structure
|
||||
*/
|
||||
#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
|
||||
@ -249,7 +247,7 @@ struct bfa_ioc_s {
|
||||
};
|
||||
|
||||
struct bfa_ioc_hwif_s {
|
||||
bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode);
|
||||
bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode);
|
||||
bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
|
||||
void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
|
||||
void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
|
||||
@ -267,7 +265,7 @@ struct bfa_ioc_hwif_s {
|
||||
#define bfa_ioc_fetch_stats(__ioc, __stats) \
|
||||
(((__stats)->drv_stats) = (__ioc)->stats)
|
||||
#define bfa_ioc_clr_stats(__ioc) \
|
||||
bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
|
||||
memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
|
||||
#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
|
||||
#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
|
||||
#define bfa_ioc_speed_sup(__ioc) \
|
||||
@ -287,7 +285,7 @@ struct bfa_ioc_hwif_s {
|
||||
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
|
||||
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC mailbox interface
|
||||
*/
|
||||
void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd);
|
||||
@ -299,7 +297,7 @@ void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
|
||||
void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
|
||||
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC interfaces
|
||||
*/
|
||||
|
||||
@ -308,9 +306,9 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
|
||||
(__ioc)->fcmode))
|
||||
|
||||
bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
|
||||
bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
|
||||
bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb);
|
||||
bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
|
||||
bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
|
||||
bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb);
|
||||
bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
|
||||
|
||||
#define bfa_ioc_isr_mode_set(__ioc, __msix) \
|
||||
((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
|
||||
@ -370,8 +368,8 @@ void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
|
||||
bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
|
||||
void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
|
||||
struct bfa_ioc_hbfail_notify_s *notify);
|
||||
bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg);
|
||||
void bfa_ioc_sem_release(bfa_os_addr_t sem_reg);
|
||||
bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
|
||||
void bfa_ioc_sem_release(void __iomem *sem_reg);
|
||||
void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
|
||||
void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
|
||||
struct bfi_ioc_image_hdr_s *fwhdr);
|
||||
@ -441,7 +439,7 @@ bfa_cb_image_get_size(int type)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* CNA TRCMOD declaration
|
||||
*/
|
||||
/*
|
||||
|
@ -34,7 +34,7 @@ static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
|
||||
|
||||
struct bfa_ioc_hwif_s hwif_cb;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called from bfa_ioc_attach() to map asic specific calls.
|
||||
*/
|
||||
void
|
||||
@ -52,7 +52,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
|
||||
ioc->ioc_hwif = &hwif_cb;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Return true if firmware of current driver matches the running firmware.
|
||||
*/
|
||||
static bfa_boolean_t
|
||||
@ -66,17 +66,17 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Notify other functions on HB failure.
|
||||
*/
|
||||
static void
|
||||
bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
|
||||
bfa_reg_read(ioc->ioc_regs.err_set);
|
||||
writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
|
||||
readl(ioc->ioc_regs.err_set);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Host to LPU mailbox message addresses
|
||||
*/
|
||||
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
|
||||
@ -84,7 +84,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
|
||||
{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Host <-> LPU mailbox command/status registers
|
||||
*/
|
||||
static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
|
||||
@ -96,7 +96,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
|
||||
static void
|
||||
bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
bfa_os_addr_t rb;
|
||||
void __iomem *rb;
|
||||
int pcifn = bfa_ioc_pcifn(ioc);
|
||||
|
||||
rb = bfa_ioc_bar0(ioc);
|
||||
@ -113,7 +113,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
|
||||
ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Host <-> LPU mailbox command/status registers
|
||||
*/
|
||||
ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
|
||||
@ -133,7 +133,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
|
||||
ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
|
||||
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
|
||||
|
||||
/**
|
||||
/*
|
||||
* sram memory access
|
||||
*/
|
||||
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
|
||||
@ -145,14 +145,14 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
|
||||
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Initialize IOC to port mapping.
|
||||
*/
|
||||
|
||||
static void
|
||||
bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
/**
|
||||
/*
|
||||
* For crossbow, port id is same as pci function.
|
||||
*/
|
||||
ioc->port_id = bfa_ioc_pcifn(ioc);
|
||||
@ -160,7 +160,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
|
||||
bfa_trc(ioc, ioc->port_id);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Set interrupt mode for a function: INTX or MSIX
|
||||
*/
|
||||
static void
|
||||
@ -168,7 +168,7 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Cleanup hw semaphore and usecnt registers
|
||||
*/
|
||||
static void
|
||||
@ -180,14 +180,14 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
|
||||
* before we clear it. If it is not locked, writing 1
|
||||
* will lock it instead of clearing it.
|
||||
*/
|
||||
bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
|
||||
readl(ioc->ioc_regs.ioc_sem_reg);
|
||||
bfa_ioc_hw_sem_release(ioc);
|
||||
}
|
||||
|
||||
|
||||
|
||||
bfa_status_t
|
||||
bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
|
||||
bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
|
||||
{
|
||||
u32 pll_sclk, pll_fclk;
|
||||
|
||||
@ -199,38 +199,32 @@ bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
|
||||
__APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
|
||||
__APP_PLL_400_JITLMT0_1(3U) |
|
||||
__APP_PLL_400_CNTLMT0_1(3U);
|
||||
bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
|
||||
bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
|
||||
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
|
||||
bfa_reg_write(rb + APP_PLL_212_CTL_REG,
|
||||
__APP_PLL_212_LOGIC_SOFT_RESET);
|
||||
bfa_reg_write(rb + APP_PLL_212_CTL_REG,
|
||||
__APP_PLL_212_BYPASS |
|
||||
__APP_PLL_212_LOGIC_SOFT_RESET);
|
||||
bfa_reg_write(rb + APP_PLL_400_CTL_REG,
|
||||
__APP_PLL_400_LOGIC_SOFT_RESET);
|
||||
bfa_reg_write(rb + APP_PLL_400_CTL_REG,
|
||||
__APP_PLL_400_BYPASS |
|
||||
__APP_PLL_400_LOGIC_SOFT_RESET);
|
||||
bfa_os_udelay(2);
|
||||
bfa_reg_write(rb + APP_PLL_212_CTL_REG,
|
||||
__APP_PLL_212_LOGIC_SOFT_RESET);
|
||||
bfa_reg_write(rb + APP_PLL_400_CTL_REG,
|
||||
__APP_PLL_400_LOGIC_SOFT_RESET);
|
||||
bfa_reg_write(rb + APP_PLL_212_CTL_REG,
|
||||
pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
|
||||
bfa_reg_write(rb + APP_PLL_400_CTL_REG,
|
||||
pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
|
||||
bfa_os_udelay(2000);
|
||||
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
|
||||
bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk);
|
||||
bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk);
|
||||
writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
|
||||
writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
|
||||
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
|
||||
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
|
||||
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
|
||||
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
|
||||
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
|
||||
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
|
||||
writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
|
||||
writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET,
|
||||
rb + APP_PLL_212_CTL_REG);
|
||||
writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
|
||||
writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET,
|
||||
rb + APP_PLL_400_CTL_REG);
|
||||
udelay(2);
|
||||
writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
|
||||
writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
|
||||
writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET,
|
||||
rb + APP_PLL_212_CTL_REG);
|
||||
writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET,
|
||||
rb + APP_PLL_400_CTL_REG);
|
||||
udelay(2000);
|
||||
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
|
||||
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
|
||||
writel(pll_sclk, (rb + APP_PLL_212_CTL_REG));
|
||||
writel(pll_fclk, (rb + APP_PLL_400_CTL_REG));
|
||||
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
|
||||
|
||||
struct bfa_ioc_hwif_s hwif_ct;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Called from bfa_ioc_attach() to map asic specific calls.
|
||||
*/
|
||||
void
|
||||
@ -52,7 +52,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
|
||||
ioc->ioc_hwif = &hwif_ct;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Return true if firmware of current driver matches the running firmware.
|
||||
*/
|
||||
static bfa_boolean_t
|
||||
@ -62,13 +62,13 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
|
||||
u32 usecnt;
|
||||
struct bfi_ioc_image_hdr_s fwhdr;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Firmware match check is relevant only for CNA.
|
||||
*/
|
||||
if (!ioc->cna)
|
||||
return BFA_TRUE;
|
||||
|
||||
/**
|
||||
/*
|
||||
* If bios boot (flash based) -- do not increment usage count
|
||||
*/
|
||||
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
|
||||
@ -76,27 +76,27 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
|
||||
return BFA_TRUE;
|
||||
|
||||
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
|
||||
usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
|
||||
usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
|
||||
|
||||
/**
|
||||
/*
|
||||
* If usage count is 0, always return TRUE.
|
||||
*/
|
||||
if (usecnt == 0) {
|
||||
bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
|
||||
writel(1, ioc->ioc_regs.ioc_usage_reg);
|
||||
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
|
||||
bfa_trc(ioc, usecnt);
|
||||
return BFA_TRUE;
|
||||
}
|
||||
|
||||
ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
|
||||
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
|
||||
bfa_trc(ioc, ioc_fwstate);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Use count cannot be non-zero and chip in uninitialized state.
|
||||
*/
|
||||
bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Check if another driver with a different firmware is active
|
||||
*/
|
||||
bfa_ioc_fwver_get(ioc, &fwhdr);
|
||||
@ -106,11 +106,11 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
|
||||
return BFA_FALSE;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Same firmware version. Increment the reference count.
|
||||
*/
|
||||
usecnt++;
|
||||
bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
|
||||
writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
|
||||
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
|
||||
bfa_trc(ioc, usecnt);
|
||||
return BFA_TRUE;
|
||||
@ -121,50 +121,50 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
u32 usecnt;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Firmware lock is relevant only for CNA.
|
||||
*/
|
||||
if (!ioc->cna)
|
||||
return;
|
||||
|
||||
/**
|
||||
/*
|
||||
* If bios boot (flash based) -- do not decrement usage count
|
||||
*/
|
||||
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
|
||||
BFA_IOC_FWIMG_MINSZ)
|
||||
return;
|
||||
|
||||
/**
|
||||
/*
|
||||
* decrement usage count
|
||||
*/
|
||||
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
|
||||
usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
|
||||
usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
|
||||
bfa_assert(usecnt > 0);
|
||||
|
||||
usecnt--;
|
||||
bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
|
||||
writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
|
||||
bfa_trc(ioc, usecnt);
|
||||
|
||||
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Notify other functions on HB failure.
|
||||
*/
|
||||
static void
|
||||
bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
if (ioc->cna) {
|
||||
bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
|
||||
writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
|
||||
/* Wait for halt to take effect */
|
||||
bfa_reg_read(ioc->ioc_regs.ll_halt);
|
||||
readl(ioc->ioc_regs.ll_halt);
|
||||
} else {
|
||||
bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
|
||||
bfa_reg_read(ioc->ioc_regs.err_set);
|
||||
writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
|
||||
readl(ioc->ioc_regs.err_set);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Host to LPU mailbox message addresses
|
||||
*/
|
||||
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
|
||||
@ -174,7 +174,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
|
||||
{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Host <-> LPU mailbox command/status registers - port 0
|
||||
*/
|
||||
static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
|
||||
@ -184,7 +184,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
|
||||
{ HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Host <-> LPU mailbox command/status registers - port 1
|
||||
*/
|
||||
static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
|
||||
@ -197,7 +197,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
|
||||
static void
|
||||
bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
bfa_os_addr_t rb;
|
||||
void __iomem *rb;
|
||||
int pcifn = bfa_ioc_pcifn(ioc);
|
||||
|
||||
rb = bfa_ioc_bar0(ioc);
|
||||
@ -236,7 +236,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
|
||||
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
|
||||
ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
|
||||
|
||||
/**
|
||||
/*
|
||||
* sram memory access
|
||||
*/
|
||||
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
|
||||
@ -248,7 +248,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
|
||||
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Initialize IOC to port mapping.
|
||||
*/
|
||||
|
||||
@ -256,13 +256,13 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
|
||||
static void
|
||||
bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
|
||||
void __iomem *rb = ioc->pcidev.pci_bar_kva;
|
||||
u32 r32;
|
||||
|
||||
/**
|
||||
/*
|
||||
* For catapult, base port id on personality register and IOC type
|
||||
*/
|
||||
r32 = bfa_reg_read(rb + FNC_PERS_REG);
|
||||
r32 = readl(rb + FNC_PERS_REG);
|
||||
r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
|
||||
ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
|
||||
|
||||
@ -270,22 +270,22 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
|
||||
bfa_trc(ioc, ioc->port_id);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Set interrupt mode for a function: INTX or MSIX
|
||||
*/
|
||||
static void
|
||||
bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
|
||||
{
|
||||
bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
|
||||
void __iomem *rb = ioc->pcidev.pci_bar_kva;
|
||||
u32 r32, mode;
|
||||
|
||||
r32 = bfa_reg_read(rb + FNC_PERS_REG);
|
||||
r32 = readl(rb + FNC_PERS_REG);
|
||||
bfa_trc(ioc, r32);
|
||||
|
||||
mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
|
||||
__F0_INTX_STATUS;
|
||||
|
||||
/**
|
||||
/*
|
||||
* If already in desired mode, do not change anything
|
||||
*/
|
||||
if (!msix && mode)
|
||||
@ -300,10 +300,10 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
|
||||
r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
|
||||
bfa_trc(ioc, r32);
|
||||
|
||||
bfa_reg_write(rb + FNC_PERS_REG, r32);
|
||||
writel(r32, rb + FNC_PERS_REG);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Cleanup hw semaphore and usecnt registers
|
||||
*/
|
||||
static void
|
||||
@ -312,7 +312,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
|
||||
|
||||
if (ioc->cna) {
|
||||
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
|
||||
bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
|
||||
writel(0, ioc->ioc_regs.ioc_usage_reg);
|
||||
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
|
||||
}
|
||||
|
||||
@ -321,7 +321,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
|
||||
* before we clear it. If it is not locked, writing 1
|
||||
* will lock it instead of clearing it.
|
||||
*/
|
||||
bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
|
||||
readl(ioc->ioc_regs.ioc_sem_reg);
|
||||
bfa_ioc_hw_sem_release(ioc);
|
||||
}
|
||||
|
||||
@ -331,17 +331,17 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
|
||||
* Check the firmware state to know if pll_init has been completed already
|
||||
*/
|
||||
bfa_boolean_t
|
||||
bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb)
|
||||
bfa_ioc_ct_pll_init_complete(void __iomem *rb)
|
||||
{
|
||||
if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
|
||||
(bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
|
||||
if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
|
||||
(readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
|
||||
return BFA_TRUE;
|
||||
|
||||
return BFA_FALSE;
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
|
||||
bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
|
||||
{
|
||||
u32 pll_sclk, pll_fclk, r32;
|
||||
|
||||
@ -354,56 +354,51 @@ bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
|
||||
__APP_PLL_425_JITLMT0_1(3U) |
|
||||
__APP_PLL_425_CNTLMT0_1(1U);
|
||||
if (fcmode) {
|
||||
bfa_reg_write((rb + OP_MODE), 0);
|
||||
bfa_reg_write((rb + ETH_MAC_SER_REG),
|
||||
__APP_EMS_CMLCKSEL |
|
||||
__APP_EMS_REFCKBUFEN2 |
|
||||
__APP_EMS_CHANNEL_SEL);
|
||||
writel(0, (rb + OP_MODE));
|
||||
writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
|
||||
__APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
|
||||
} else {
|
||||
bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
|
||||
bfa_reg_write((rb + ETH_MAC_SER_REG),
|
||||
__APP_EMS_REFCKBUFEN1);
|
||||
writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
|
||||
writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
|
||||
}
|
||||
bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
|
||||
bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
|
||||
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
|
||||
bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
|
||||
__APP_PLL_312_LOGIC_SOFT_RESET);
|
||||
bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
|
||||
__APP_PLL_425_LOGIC_SOFT_RESET);
|
||||
bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
|
||||
__APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
|
||||
bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
|
||||
__APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
|
||||
bfa_reg_read(rb + HOSTFN0_INT_MSK);
|
||||
bfa_os_udelay(2000);
|
||||
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
|
||||
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
|
||||
bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
|
||||
__APP_PLL_312_ENABLE);
|
||||
bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
|
||||
__APP_PLL_425_ENABLE);
|
||||
writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
|
||||
writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
|
||||
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
|
||||
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
|
||||
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
|
||||
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
|
||||
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
|
||||
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
|
||||
writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
|
||||
rb + APP_PLL_312_CTL_REG);
|
||||
writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
|
||||
rb + APP_PLL_425_CTL_REG);
|
||||
writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
|
||||
rb + APP_PLL_312_CTL_REG);
|
||||
writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
|
||||
rb + APP_PLL_425_CTL_REG);
|
||||
readl(rb + HOSTFN0_INT_MSK);
|
||||
udelay(2000);
|
||||
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
|
||||
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
|
||||
writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
|
||||
writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
|
||||
if (!fcmode) {
|
||||
bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
|
||||
bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
|
||||
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
|
||||
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
|
||||
}
|
||||
r32 = bfa_reg_read((rb + PSS_CTL_REG));
|
||||
r32 = readl((rb + PSS_CTL_REG));
|
||||
r32 &= ~__PSS_LMEM_RESET;
|
||||
bfa_reg_write((rb + PSS_CTL_REG), r32);
|
||||
bfa_os_udelay(1000);
|
||||
writel(r32, (rb + PSS_CTL_REG));
|
||||
udelay(1000);
|
||||
if (!fcmode) {
|
||||
bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
|
||||
bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
|
||||
writel(0, (rb + PMM_1T_RESET_REG_P0));
|
||||
writel(0, (rb + PMM_1T_RESET_REG_P1));
|
||||
}
|
||||
|
||||
bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
|
||||
bfa_os_udelay(1000);
|
||||
r32 = bfa_reg_read((rb + MBIST_STAT_REG));
|
||||
bfa_reg_write((rb + MBIST_CTL_REG), 0);
|
||||
writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
|
||||
udelay(1000);
|
||||
r32 = readl((rb + MBIST_STAT_REG));
|
||||
writel(0, (rb + MBIST_CTL_REG));
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
@ -15,7 +15,7 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_modules.h BFA modules
|
||||
*/
|
||||
|
||||
@ -52,7 +52,7 @@ enum {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* Macro to define a new BFA module
|
||||
*/
|
||||
#define BFA_MODULE(__mod) \
|
||||
@ -80,7 +80,7 @@ enum {
|
||||
|
||||
#define BFA_CACHELINE_SZ (256)
|
||||
|
||||
/**
|
||||
/*
|
||||
* Structure used to interact between different BFA sub modules
|
||||
*
|
||||
* Each sub module needs to implement only the entry points relevant to it (and
|
||||
|
@ -15,10 +15,6 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Contains declarations all OS Specific files needed for BFA layer
|
||||
*/
|
||||
|
||||
#ifndef __BFA_OS_INC_H__
|
||||
#define __BFA_OS_INC_H__
|
||||
|
||||
@ -44,11 +40,6 @@
|
||||
#define __BIGENDIAN
|
||||
#endif
|
||||
|
||||
static inline u64 bfa_os_get_clock(void)
|
||||
{
|
||||
return jiffies;
|
||||
}
|
||||
|
||||
static inline u64 bfa_os_get_log_time(void)
|
||||
{
|
||||
u64 system_time = 0;
|
||||
@ -63,13 +54,6 @@ static inline u64 bfa_os_get_log_time(void)
|
||||
#define bfa_io_lat_clock_res_div HZ
|
||||
#define bfa_io_lat_clock_res_mul 1000
|
||||
|
||||
#define BFA_ASSERT(p) do { \
|
||||
if (!(p)) { \
|
||||
printk(KERN_ERR "assert(%s) failed at %s:%d\n", \
|
||||
#p, __FILE__, __LINE__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define BFA_LOG(level, bfad, mask, fmt, arg...) \
|
||||
do { \
|
||||
if (((mask) == 4) || (level[1] <= '4')) \
|
||||
@ -81,22 +65,6 @@ do { \
|
||||
((_x) & 0x00ff00) | \
|
||||
(((_x) & 0xff0000) >> 16))
|
||||
|
||||
#define bfa_swap_8b(_x) \
|
||||
((((_x) & 0xff00000000000000ull) >> 56) \
|
||||
| (((_x) & 0x00ff000000000000ull) >> 40) \
|
||||
| (((_x) & 0x0000ff0000000000ull) >> 24) \
|
||||
| (((_x) & 0x000000ff00000000ull) >> 8) \
|
||||
| (((_x) & 0x00000000ff000000ull) << 8) \
|
||||
| (((_x) & 0x0000000000ff0000ull) << 24) \
|
||||
| (((_x) & 0x000000000000ff00ull) << 40) \
|
||||
| (((_x) & 0x00000000000000ffull) << 56))
|
||||
|
||||
#define bfa_os_swap32(_x) \
|
||||
((((_x) & 0xff) << 24) | \
|
||||
(((_x) & 0x0000ff00) << 8) | \
|
||||
(((_x) & 0x00ff0000) >> 8) | \
|
||||
(((_x) & 0xff000000) >> 24))
|
||||
|
||||
#define bfa_os_swap_sgaddr(_x) ((u64)( \
|
||||
(((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
|
||||
(((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
|
||||
@ -108,59 +76,27 @@ do { \
|
||||
(((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
|
||||
|
||||
#ifndef __BIGENDIAN
|
||||
#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
|
||||
(((_x) & 0x00ff) << 8)))
|
||||
#define bfa_os_htonl(_x) bfa_os_swap32(_x)
|
||||
#define bfa_os_htonll(_x) bfa_swap_8b(_x)
|
||||
#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
|
||||
#define bfa_os_wtole(_x) (_x)
|
||||
#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
|
||||
#define bfa_os_sgaddr(_x) (_x)
|
||||
|
||||
#else
|
||||
|
||||
#define bfa_os_htons(_x) (_x)
|
||||
#define bfa_os_htonl(_x) (_x)
|
||||
#define bfa_os_hton3b(_x) (_x)
|
||||
#define bfa_os_htonll(_x) (_x)
|
||||
#define bfa_os_wtole(_x) bfa_os_swap32(_x)
|
||||
#define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x)
|
||||
|
||||
#endif
|
||||
|
||||
#define bfa_os_ntohs(_x) bfa_os_htons(_x)
|
||||
#define bfa_os_ntohl(_x) bfa_os_htonl(_x)
|
||||
#define bfa_os_ntohll(_x) bfa_os_htonll(_x)
|
||||
#define bfa_os_ntoh3b(_x) bfa_os_hton3b(_x)
|
||||
|
||||
#define bfa_os_u32(__pa64) ((__pa64) >> 32)
|
||||
|
||||
#define bfa_os_memset memset
|
||||
#define bfa_os_memcpy memcpy
|
||||
#define bfa_os_udelay udelay
|
||||
#define bfa_os_vsprintf vsprintf
|
||||
#define bfa_os_snprintf snprintf
|
||||
|
||||
#define bfa_os_assign(__t, __s) __t = __s
|
||||
#define bfa_os_addr_t void __iomem *
|
||||
|
||||
#define bfa_os_reg_read(_raddr) readl(_raddr)
|
||||
#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
|
||||
#define bfa_os_mem_read(_raddr, _off) \
|
||||
bfa_os_swap32(readl(((_raddr) + (_off))))
|
||||
#define bfa_os_mem_write(_raddr, _off, _val) \
|
||||
writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
|
||||
|
||||
#define BFA_TRC_TS(_trcm) \
|
||||
({ \
|
||||
struct timeval tv; \
|
||||
\
|
||||
do_gettimeofday(&tv); \
|
||||
(tv.tv_sec*1000000+tv.tv_usec); \
|
||||
})
|
||||
#define BFA_TRC_TS(_trcm) \
|
||||
({ \
|
||||
struct timeval tv; \
|
||||
\
|
||||
do_gettimeofday(&tv); \
|
||||
(tv.tv_sec*1000000+tv.tv_usec); \
|
||||
})
|
||||
|
||||
#define boolean_t int
|
||||
|
||||
/**
|
||||
/*
|
||||
* For current time stamp, OS API will fill-in
|
||||
*/
|
||||
struct bfa_timeval_s {
|
||||
|
@ -37,16 +37,16 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
|
||||
t0 = dip[i];
|
||||
t1 = dip[i + 1];
|
||||
#ifdef __BIGENDIAN
|
||||
dip[i] = bfa_os_ntohl(t0);
|
||||
dip[i + 1] = bfa_os_ntohl(t1);
|
||||
dip[i] = be32_to_cpu(t0);
|
||||
dip[i + 1] = be32_to_cpu(t1);
|
||||
#else
|
||||
dip[i] = bfa_os_ntohl(t1);
|
||||
dip[i + 1] = bfa_os_ntohl(t0);
|
||||
dip[i] = be32_to_cpu(t1);
|
||||
dip[i + 1] = be32_to_cpu(t0);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_enable_isr()
|
||||
*
|
||||
*
|
||||
@ -63,7 +63,7 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
|
||||
port->endis_cbfn(port->endis_cbarg, status);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_disable_isr()
|
||||
*
|
||||
*
|
||||
@ -80,7 +80,7 @@ bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
|
||||
port->endis_cbfn(port->endis_cbarg, status);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_get_stats_isr()
|
||||
*
|
||||
*
|
||||
@ -112,7 +112,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_clear_stats_isr()
|
||||
*
|
||||
*
|
||||
@ -129,7 +129,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
|
||||
port->stats_status = status;
|
||||
port->stats_busy = BFA_FALSE;
|
||||
|
||||
/**
|
||||
/*
|
||||
* re-initialize time stamp for stats reset
|
||||
*/
|
||||
bfa_os_gettimeofday(&tv);
|
||||
@ -141,7 +141,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_isr()
|
||||
*
|
||||
*
|
||||
@ -189,7 +189,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_meminfo()
|
||||
*
|
||||
*
|
||||
@ -203,7 +203,7 @@ bfa_port_meminfo(void)
|
||||
return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_mem_claim()
|
||||
*
|
||||
*
|
||||
@ -220,7 +220,7 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
|
||||
port->stats_dma.pa = dma_pa;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_enable()
|
||||
*
|
||||
* Send the Port enable request to the f/w
|
||||
@ -264,7 +264,7 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_disable()
|
||||
*
|
||||
* Send the Port disable request to the f/w
|
||||
@ -308,7 +308,7 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_get_stats()
|
||||
*
|
||||
* Send the request to the f/w to fetch Port statistics.
|
||||
@ -348,7 +348,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_clear_stats()
|
||||
*
|
||||
*
|
||||
@ -385,7 +385,7 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_hbfail()
|
||||
*
|
||||
*
|
||||
@ -415,7 +415,7 @@ bfa_port_hbfail(void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_attach()
|
||||
*
|
||||
*
|
||||
@ -449,7 +449,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
|
||||
bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
|
||||
bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
|
||||
|
||||
/**
|
||||
/*
|
||||
* initialize time stamp for stats reset
|
||||
*/
|
||||
bfa_os_gettimeofday(&tv);
|
||||
@ -458,7 +458,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
|
||||
bfa_trc(port, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_port_detach()
|
||||
*
|
||||
*
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -22,12 +22,12 @@
|
||||
#include "bfi_ms.h"
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scatter-gather DMA related defines
|
||||
*/
|
||||
#define BFA_SGPG_MIN (16)
|
||||
|
||||
/**
|
||||
/*
|
||||
* Alignment macro for SG page allocation
|
||||
*/
|
||||
#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
|
||||
@ -48,7 +48,7 @@ struct bfa_sgpg_s {
|
||||
union bfi_addr_u sgpg_pa; /* pa of SG page */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
|
||||
* SG pages required.
|
||||
*/
|
||||
@ -75,7 +75,7 @@ void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs);
|
||||
void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCXP related defines
|
||||
*/
|
||||
#define BFA_FCXP_MIN (1)
|
||||
@ -115,12 +115,12 @@ typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* Information needed for a FCXP request
|
||||
*/
|
||||
struct bfa_fcxp_req_info_s {
|
||||
struct bfa_rport_s *bfa_rport;
|
||||
/** Pointer to the bfa rport that was
|
||||
/* Pointer to the bfa rport that was
|
||||
* returned from bfa_rport_create().
|
||||
* This could be left NULL for WKA or
|
||||
* for FCXP interactions before the
|
||||
@ -137,11 +137,10 @@ struct bfa_fcxp_req_info_s {
|
||||
|
||||
struct bfa_fcxp_rsp_info_s {
|
||||
struct fchs_s rsp_fchs;
|
||||
/** !< Response frame's FC header will
|
||||
/* Response frame's FC header will
|
||||
* be sent back in this field */
|
||||
u8 rsp_timeout;
|
||||
/** !< timeout in seconds, 0-no response
|
||||
*/
|
||||
/* timeout in seconds, 0-no response */
|
||||
u8 rsvd2[3];
|
||||
u32 rsp_maxlen; /* max response length expected */
|
||||
};
|
||||
@ -218,7 +217,7 @@ struct bfa_fcxp_wqe_s {
|
||||
void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* RPORT related defines
|
||||
*/
|
||||
#define BFA_RPORT_MIN 4
|
||||
@ -232,7 +231,7 @@ struct bfa_rport_mod_s {
|
||||
|
||||
#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
|
||||
|
||||
/**
|
||||
/*
|
||||
* Convert rport tag to RPORT
|
||||
*/
|
||||
#define BFA_RPORT_FROM_TAG(__bfa, _tag) \
|
||||
@ -244,7 +243,7 @@ struct bfa_rport_mod_s {
|
||||
*/
|
||||
void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA rport information.
|
||||
*/
|
||||
struct bfa_rport_info_s {
|
||||
@ -259,7 +258,7 @@ struct bfa_rport_info_s {
|
||||
enum bfa_port_speed speed; /* Rport's current speed */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA rport data structure
|
||||
*/
|
||||
struct bfa_rport_s {
|
||||
@ -282,7 +281,7 @@ struct bfa_rport_s {
|
||||
#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class)
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* UF - unsolicited receive related defines
|
||||
*/
|
||||
|
||||
@ -305,7 +304,7 @@ struct bfa_uf_s {
|
||||
struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Callback prototype for unsolicited frame receive handler.
|
||||
*
|
||||
* @param[in] cbarg callback arg for receive handler
|
||||
@ -338,7 +337,7 @@ void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
|
||||
|
||||
#define BFA_UF_BUFSZ (2 * 1024 + 256)
|
||||
|
||||
/**
|
||||
/*
|
||||
* @todo private
|
||||
*/
|
||||
struct bfa_uf_buf_s {
|
||||
@ -346,7 +345,7 @@ struct bfa_uf_buf_s {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* LPS - bfa lport login/logout service interface
|
||||
*/
|
||||
struct bfa_lps_s {
|
||||
@ -397,14 +396,14 @@ struct bfa_lps_mod_s {
|
||||
void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCPORT related defines
|
||||
*/
|
||||
|
||||
#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
|
||||
typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Link notification data structure
|
||||
*/
|
||||
struct bfa_fcport_ln_s {
|
||||
@ -418,7 +417,7 @@ struct bfa_fcport_trunk_s {
|
||||
struct bfa_trunk_attr_s attr;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA FC port data structure
|
||||
*/
|
||||
struct bfa_fcport_s {
|
||||
@ -613,7 +612,7 @@ void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
|
||||
void *cbarg);
|
||||
void bfa_uf_free(struct bfa_uf_s *uf);
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa lport service api
|
||||
*/
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfad.c Linux driver PCI interface module.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
@ -151,7 +151,7 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
|
||||
static void
|
||||
bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Beginning state for the driver instance, awaiting the pci_probe event
|
||||
*/
|
||||
static void
|
||||
@ -181,7 +181,7 @@ bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Driver Instance is created, awaiting event INIT to initialize the bfad
|
||||
*/
|
||||
static void
|
||||
@ -364,7 +364,7 @@ bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA callbacks
|
||||
*/
|
||||
void
|
||||
@ -376,7 +376,7 @@ bfad_hcb_comp(void *arg, bfa_status_t status)
|
||||
complete(&fcomp->comp);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_init callback
|
||||
*/
|
||||
void
|
||||
@ -401,7 +401,7 @@ bfa_cb_init(void *drv, bfa_status_t init_status)
|
||||
complete(&bfad->comp);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA_FCS callbacks
|
||||
*/
|
||||
struct bfad_port_s *
|
||||
@ -457,7 +457,7 @@ bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS RPORT alloc callback, after successful PLOGI by FCS
|
||||
*/
|
||||
bfa_status_t
|
||||
@ -478,7 +478,7 @@ bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCS PBC VPORT Create
|
||||
*/
|
||||
void
|
||||
@ -663,7 +663,7 @@ bfad_hal_mem_alloc(struct bfad_s *bfad)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Create a vport under a vf.
|
||||
*/
|
||||
bfa_status_t
|
||||
@ -716,30 +716,6 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a vf and its base vport implicitely.
|
||||
*/
|
||||
bfa_status_t
|
||||
bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
|
||||
struct bfa_lport_cfg_s *port_cfg)
|
||||
{
|
||||
struct bfad_vf_s *vf;
|
||||
int rc = BFA_STATUS_OK;
|
||||
|
||||
vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL);
|
||||
if (!vf) {
|
||||
rc = BFA_STATUS_FAILED;
|
||||
goto ext;
|
||||
}
|
||||
|
||||
rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg,
|
||||
vf);
|
||||
if (rc != BFA_STATUS_OK)
|
||||
kfree(vf);
|
||||
ext:
|
||||
return rc;
|
||||
}
|
||||
|
||||
void
|
||||
bfad_bfa_tmo(unsigned long data)
|
||||
{
|
||||
@ -885,20 +861,6 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
}
|
||||
|
||||
void
|
||||
bfad_fcs_port_cfg(struct bfad_s *bfad)
|
||||
{
|
||||
struct bfa_lport_cfg_s port_cfg;
|
||||
struct bfa_port_attr_s attr;
|
||||
char symname[BFA_SYMNAME_MAXLEN];
|
||||
|
||||
sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
|
||||
memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
|
||||
bfa_fcport_get_attr(&bfad->bfa, &attr);
|
||||
port_cfg.nwwn = attr.nwwn;
|
||||
port_cfg.pwwn = attr.pwwn;
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
bfad_drv_init(struct bfad_s *bfad)
|
||||
{
|
||||
@ -1089,9 +1051,6 @@ bfad_start_ops(struct bfad_s *bfad) {
|
||||
bfa_fcs_init(&bfad->bfa_fcs);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
|
||||
/* PPORT FCS config */
|
||||
bfad_fcs_port_cfg(bfad);
|
||||
|
||||
retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
|
||||
if (retval != BFA_STATUS_OK) {
|
||||
if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
|
||||
@ -1181,7 +1140,7 @@ bfad_worker(void *ptr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA driver interrupt functions
|
||||
*/
|
||||
irqreturn_t
|
||||
@ -1240,7 +1199,7 @@ bfad_msix(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Initialize the MSIX entry table.
|
||||
*/
|
||||
static void
|
||||
@ -1293,7 +1252,7 @@ bfad_install_msix_handler(struct bfad_s *bfad)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Setup MSIX based interrupt.
|
||||
*/
|
||||
int
|
||||
@ -1374,7 +1333,7 @@ bfad_remove_intr(struct bfad_s *bfad)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* PCI probe entry.
|
||||
*/
|
||||
int
|
||||
@ -1460,7 +1419,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* PCI remove entry.
|
||||
*/
|
||||
void
|
||||
@ -1541,7 +1500,7 @@ static struct pci_driver bfad_pci_driver = {
|
||||
.remove = __devexit_p(bfad_pci_remove),
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Driver module init.
|
||||
*/
|
||||
static int __init
|
||||
@ -1581,7 +1540,7 @@ bfad_init(void)
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Driver module exit.
|
||||
*/
|
||||
static void __exit
|
||||
|
@ -15,14 +15,14 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_attr.c Linux driver configuration interface module.
|
||||
*/
|
||||
|
||||
#include "bfad_drv.h"
|
||||
#include "bfad_im.h"
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, get SCSI target port ID.
|
||||
*/
|
||||
void
|
||||
@ -48,7 +48,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, get SCSI target nwwn.
|
||||
*/
|
||||
void
|
||||
@ -70,11 +70,11 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
|
||||
if (itnim)
|
||||
node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
|
||||
|
||||
fc_starget_node_name(starget) = bfa_os_htonll(node_name);
|
||||
fc_starget_node_name(starget) = cpu_to_be64(node_name);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, get SCSI target pwwn.
|
||||
*/
|
||||
void
|
||||
@ -96,11 +96,11 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
|
||||
if (itnim)
|
||||
port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
|
||||
|
||||
fc_starget_port_name(starget) = bfa_os_htonll(port_name);
|
||||
fc_starget_port_name(starget) = cpu_to_be64(port_name);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, get SCSI host port ID.
|
||||
*/
|
||||
void
|
||||
@ -114,7 +114,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
|
||||
bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, get SCSI host port type.
|
||||
*/
|
||||
static void
|
||||
@ -146,7 +146,7 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, get SCSI host port state.
|
||||
*/
|
||||
static void
|
||||
@ -183,7 +183,7 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, get SCSI host active fc4s.
|
||||
*/
|
||||
static void
|
||||
@ -202,7 +202,7 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
|
||||
fc_host_active_fc4s(shost)[7] = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, get SCSI host link speed.
|
||||
*/
|
||||
static void
|
||||
@ -236,7 +236,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, get SCSI host port type.
|
||||
*/
|
||||
static void
|
||||
@ -249,11 +249,11 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
|
||||
|
||||
fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
|
||||
|
||||
fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn);
|
||||
fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, get BFAD statistics.
|
||||
*/
|
||||
static struct fc_host_statistics *
|
||||
@ -304,7 +304,7 @@ bfad_im_get_stats(struct Scsi_Host *shost)
|
||||
return hstats;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, reset BFAD statistics.
|
||||
*/
|
||||
static void
|
||||
@ -331,7 +331,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, get rport loss timeout.
|
||||
*/
|
||||
static void
|
||||
@ -347,7 +347,7 @@ bfad_im_get_rport_loss_tmo(struct fc_rport *rport)
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* FC transport template entry, set rport loss timeout.
|
||||
*/
|
||||
static void
|
||||
@ -633,7 +633,7 @@ struct fc_function_template bfad_im_vport_fc_function_template = {
|
||||
.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scsi_Host_attrs SCSI host attributes
|
||||
*/
|
||||
static ssize_t
|
||||
@ -733,7 +733,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
|
||||
u64 nwwn;
|
||||
|
||||
nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
|
||||
return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn));
|
||||
return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -318,7 +318,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
|
||||
regbuf = (u32 *)bfad->regdata;
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
for (i = 0; i < len; i++) {
|
||||
*regbuf = bfa_reg_read(reg_addr);
|
||||
*regbuf = readl(reg_addr);
|
||||
regbuf++;
|
||||
reg_addr += sizeof(u32);
|
||||
}
|
||||
@ -361,7 +361,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
|
||||
|
||||
reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
bfa_reg_write(reg_addr, val);
|
||||
writel(val, reg_addr);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
|
||||
return nbytes;
|
||||
|
@ -15,11 +15,11 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Contains base driver definitions.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfa_drv.h Linux driver data structures.
|
||||
*/
|
||||
|
||||
@ -309,7 +309,6 @@ void bfad_bfa_tmo(unsigned long data);
|
||||
void bfad_init_timer(struct bfad_s *bfad);
|
||||
int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
|
||||
void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
|
||||
void bfad_fcs_port_cfg(struct bfad_s *bfad);
|
||||
void bfad_drv_uninit(struct bfad_s *bfad);
|
||||
int bfad_worker(void *ptr);
|
||||
void bfad_debugfs_init(struct bfad_port_s *port);
|
||||
|
@ -15,7 +15,7 @@
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* bfad_im.c Linux driver IM module.
|
||||
*/
|
||||
|
||||
@ -164,10 +164,10 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
|
||||
wake_up(wq);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scsi_Host_template SCSI host template
|
||||
*/
|
||||
/**
|
||||
/*
|
||||
* Scsi_Host template entry, returns BFAD PCI info.
|
||||
*/
|
||||
static const char *
|
||||
@ -196,7 +196,7 @@ bfad_im_info(struct Scsi_Host *shost)
|
||||
return bfa_buf;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scsi_Host template entry, aborts the specified SCSI command.
|
||||
*
|
||||
* Returns: SUCCESS or FAILED.
|
||||
@ -280,7 +280,7 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scsi_Host template entry, resets a LUN and abort its all commands.
|
||||
*
|
||||
* Returns: SUCCESS or FAILED.
|
||||
@ -319,7 +319,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Set host_scribble to NULL to avoid aborting a task command
|
||||
* if happens.
|
||||
*/
|
||||
@ -346,7 +346,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scsi_Host template entry, resets the bus and abort all commands.
|
||||
*/
|
||||
static int
|
||||
@ -396,7 +396,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scsi_Host template entry slave_destroy.
|
||||
*/
|
||||
static void
|
||||
@ -406,11 +406,11 @@ bfad_im_slave_destroy(struct scsi_device *sdev)
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA FCS itnim callbacks
|
||||
*/
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA FCS itnim alloc callback, after successful PRLI
|
||||
* Context: Interrupt
|
||||
*/
|
||||
@ -433,7 +433,7 @@ bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
|
||||
bfad->bfad_flags |= BFAD_RPORT_ONLINE;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA FCS itnim free callback.
|
||||
* Context: Interrupt. bfad_lock is held
|
||||
*/
|
||||
@ -471,7 +471,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
|
||||
queue_work(im->drv_workq, &itnim_drv->itnim_work);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA FCS itnim online callback.
|
||||
* Context: Interrupt. bfad_lock is held
|
||||
*/
|
||||
@ -492,7 +492,7 @@ bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
|
||||
queue_work(im->drv_workq, &itnim_drv->itnim_work);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFA FCS itnim offline callback.
|
||||
* Context: Interrupt. bfad_lock is held
|
||||
*/
|
||||
@ -519,7 +519,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
|
||||
queue_work(im->drv_workq, &itnim_drv->itnim_work);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Allocate a Scsi_Host for a port.
|
||||
*/
|
||||
int
|
||||
@ -751,7 +751,7 @@ bfad_os_thread_workq(struct bfad_s *bfad)
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scsi_Host template entry.
|
||||
*
|
||||
* Description:
|
||||
@ -896,7 +896,7 @@ bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scsi_Host template entry slave_alloc
|
||||
*/
|
||||
static int
|
||||
@ -915,12 +915,16 @@ bfad_im_slave_alloc(struct scsi_device *sdev)
|
||||
static u32
|
||||
bfad_im_supported_speeds(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_ioc_attr_s ioc_attr;
|
||||
struct bfa_ioc_attr_s *ioc_attr;
|
||||
u32 supported_speed = 0;
|
||||
|
||||
bfa_get_attr(bfa, &ioc_attr);
|
||||
if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
|
||||
if (ioc_attr.adapter_attr.is_mezz) {
|
||||
ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL);
|
||||
if (!ioc_attr)
|
||||
return 0;
|
||||
|
||||
bfa_get_attr(bfa, ioc_attr);
|
||||
if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
|
||||
if (ioc_attr->adapter_attr.is_mezz) {
|
||||
supported_speed |= FC_PORTSPEED_8GBIT |
|
||||
FC_PORTSPEED_4GBIT |
|
||||
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
|
||||
@ -929,12 +933,13 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
|
||||
FC_PORTSPEED_4GBIT |
|
||||
FC_PORTSPEED_2GBIT;
|
||||
}
|
||||
} else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
|
||||
} else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
|
||||
supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
|
||||
FC_PORTSPEED_1GBIT;
|
||||
} else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
|
||||
} else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
|
||||
supported_speed |= FC_PORTSPEED_10GBIT;
|
||||
}
|
||||
kfree(ioc_attr);
|
||||
return supported_speed;
|
||||
}
|
||||
|
||||
@ -944,14 +949,13 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
|
||||
struct Scsi_Host *host = im_port->shost;
|
||||
struct bfad_s *bfad = im_port->bfad;
|
||||
struct bfad_port_s *port = im_port->port;
|
||||
struct bfa_port_attr_s pattr;
|
||||
struct bfa_lport_attr_s port_attr;
|
||||
char symname[BFA_SYMNAME_MAXLEN];
|
||||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
|
||||
|
||||
fc_host_node_name(host) =
|
||||
bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port)));
|
||||
cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port)));
|
||||
fc_host_port_name(host) =
|
||||
bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port)));
|
||||
cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port)));
|
||||
fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
|
||||
|
||||
fc_host_supported_classes(host) = FC_COS_CLASS3;
|
||||
@ -964,15 +968,12 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
|
||||
/* For fibre channel services type 0x20 */
|
||||
fc_host_supported_fc4s(host)[7] = 1;
|
||||
|
||||
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
|
||||
strncpy(symname, port_attr.port_cfg.sym_name.symname,
|
||||
strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
sprintf(fc_host_symbolic_name(host), "%s", symname);
|
||||
|
||||
fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
|
||||
|
||||
bfa_fcport_get_attr(&bfad->bfa, &pattr);
|
||||
fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
|
||||
fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -983,9 +984,9 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
|
||||
struct bfad_itnim_data_s *itnim_data;
|
||||
|
||||
rport_ids.node_name =
|
||||
bfa_os_htonll(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
|
||||
cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
|
||||
rport_ids.port_name =
|
||||
bfa_os_htonll(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
|
||||
cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
|
||||
rport_ids.port_id =
|
||||
bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
|
||||
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
|
||||
@ -1015,7 +1016,7 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Work queue handler using FC transport service
|
||||
* Context: kernel
|
||||
*/
|
||||
@ -1115,7 +1116,7 @@ bfad_im_itnim_work_handler(struct work_struct *work)
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scsi_Host template entry, queue a SCSI command to the BFAD.
|
||||
*/
|
||||
static int
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
#pragma pack(1)
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI FW image type
|
||||
*/
|
||||
#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
|
||||
@ -35,7 +35,7 @@ enum {
|
||||
BFI_IMAGE_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Msg header common to all msgs
|
||||
*/
|
||||
struct bfi_mhdr_s {
|
||||
@ -68,7 +68,7 @@ struct bfi_mhdr_s {
|
||||
#define BFI_I2H_OPCODE_BASE 128
|
||||
#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
|
||||
|
||||
/**
|
||||
/*
|
||||
****************************************************************************
|
||||
*
|
||||
* Scatter Gather Element and Page definition
|
||||
@ -79,7 +79,7 @@ struct bfi_mhdr_s {
|
||||
#define BFI_SGE_INLINE 1
|
||||
#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
|
||||
|
||||
/**
|
||||
/*
|
||||
* SG Flags
|
||||
*/
|
||||
enum {
|
||||
@ -90,7 +90,7 @@ enum {
|
||||
BFI_SGE_PGDLEN = 2, /* cumulative data length for page */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* DMA addresses
|
||||
*/
|
||||
union bfi_addr_u {
|
||||
@ -100,7 +100,7 @@ union bfi_addr_u {
|
||||
} a32;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scatter Gather Element
|
||||
*/
|
||||
struct bfi_sge_s {
|
||||
@ -116,7 +116,7 @@ struct bfi_sge_s {
|
||||
union bfi_addr_u sga;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Scatter Gather Page
|
||||
*/
|
||||
#define BFI_SGPG_DATA_SGES 7
|
||||
@ -139,7 +139,7 @@ struct bfi_msg_s {
|
||||
u32 pl[BFI_LMSG_PL_WSZ];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Mailbox message structure
|
||||
*/
|
||||
#define BFI_MBMSG_SZ 7
|
||||
@ -148,7 +148,7 @@ struct bfi_mbmsg_s {
|
||||
u32 pl[BFI_MBMSG_SZ];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Message Classes
|
||||
*/
|
||||
enum bfi_mclass {
|
||||
@ -186,7 +186,7 @@ enum bfi_mclass {
|
||||
#define BFI_BOOT_LOADER_BIOS 1
|
||||
#define BFI_BOOT_LOADER_UEFI 2
|
||||
|
||||
/**
|
||||
/*
|
||||
*----------------------------------------------------------------------
|
||||
* IOC
|
||||
*----------------------------------------------------------------------
|
||||
@ -208,7 +208,7 @@ enum bfi_ioc_i2h_msgs {
|
||||
BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_IOC_H2I_GETATTR_REQ message
|
||||
*/
|
||||
struct bfi_ioc_getattr_req_s {
|
||||
@ -242,7 +242,7 @@ struct bfi_ioc_attr_s {
|
||||
u32 card_type; /* card type */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_IOC_I2H_GETATTR_REPLY message
|
||||
*/
|
||||
struct bfi_ioc_getattr_reply_s {
|
||||
@ -251,19 +251,19 @@ struct bfi_ioc_getattr_reply_s {
|
||||
u8 rsvd[3];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Firmware memory page offsets
|
||||
*/
|
||||
#define BFI_IOC_SMEM_PG0_CB (0x40)
|
||||
#define BFI_IOC_SMEM_PG0_CT (0x180)
|
||||
|
||||
/**
|
||||
/*
|
||||
* Firmware statistic offset
|
||||
*/
|
||||
#define BFI_IOC_FWSTATS_OFF (0x6B40)
|
||||
#define BFI_IOC_FWSTATS_SZ (4096)
|
||||
|
||||
/**
|
||||
/*
|
||||
* Firmware trace offset
|
||||
*/
|
||||
#define BFI_IOC_TRC_OFF (0x4b00)
|
||||
@ -280,7 +280,7 @@ struct bfi_ioc_image_hdr_s {
|
||||
u32 md5sum[BFI_IOC_MD5SUM_SZ];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_IOC_I2H_READY_EVENT message
|
||||
*/
|
||||
struct bfi_ioc_rdy_event_s {
|
||||
@ -294,7 +294,7 @@ struct bfi_ioc_hbeat_s {
|
||||
u32 hb_count; /* current heart beat count */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IOC hardware/firmware state
|
||||
*/
|
||||
enum bfi_ioc_state {
|
||||
@ -340,7 +340,7 @@ enum {
|
||||
((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
|
||||
BFI_ADAPTER_UNSUPP))
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
|
||||
*/
|
||||
struct bfi_ioc_ctrl_req_s {
|
||||
@ -352,7 +352,7 @@ struct bfi_ioc_ctrl_req_s {
|
||||
#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
|
||||
#define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s;
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
|
||||
*/
|
||||
struct bfi_ioc_ctrl_reply_s {
|
||||
@ -364,7 +364,7 @@ struct bfi_ioc_ctrl_reply_s {
|
||||
#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
|
||||
|
||||
#define BFI_IOC_MSGSZ 8
|
||||
/**
|
||||
/*
|
||||
* H2I Messages
|
||||
*/
|
||||
union bfi_ioc_h2i_msg_u {
|
||||
@ -375,7 +375,7 @@ union bfi_ioc_h2i_msg_u {
|
||||
u32 mboxmsg[BFI_IOC_MSGSZ];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* I2H Messages
|
||||
*/
|
||||
union bfi_ioc_i2h_msg_u {
|
||||
@ -385,7 +385,7 @@ union bfi_ioc_i2h_msg_u {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
*----------------------------------------------------------------------
|
||||
* PBC
|
||||
*----------------------------------------------------------------------
|
||||
@ -394,7 +394,7 @@ union bfi_ioc_i2h_msg_u {
|
||||
#define BFI_PBC_MAX_BLUNS 8
|
||||
#define BFI_PBC_MAX_VPORTS 16
|
||||
|
||||
/**
|
||||
/*
|
||||
* PBC boot lun configuration
|
||||
*/
|
||||
struct bfi_pbc_blun_s {
|
||||
@ -402,7 +402,7 @@ struct bfi_pbc_blun_s {
|
||||
lun_t tgt_lun;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* PBC virtual port configuration
|
||||
*/
|
||||
struct bfi_pbc_vport_s {
|
||||
@ -410,7 +410,7 @@ struct bfi_pbc_vport_s {
|
||||
wwn_t vp_nwwn;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI pre-boot configuration information
|
||||
*/
|
||||
struct bfi_pbc_s {
|
||||
@ -427,7 +427,7 @@ struct bfi_pbc_s {
|
||||
struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
*----------------------------------------------------------------------
|
||||
* MSGQ
|
||||
*----------------------------------------------------------------------
|
||||
@ -531,7 +531,7 @@ enum bfi_port_i2h {
|
||||
BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Generic REQ type
|
||||
*/
|
||||
struct bfi_port_generic_req_s {
|
||||
@ -540,7 +540,7 @@ struct bfi_port_generic_req_s {
|
||||
u32 rsvd;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Generic RSP type
|
||||
*/
|
||||
struct bfi_port_generic_rsp_s {
|
||||
@ -550,7 +550,7 @@ struct bfi_port_generic_rsp_s {
|
||||
u32 msgtag; /* msgtag for reply */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_PORT_H2I_GET_STATS_REQ
|
||||
*/
|
||||
struct bfi_port_get_stats_req_s {
|
||||
|
@ -41,7 +41,7 @@ struct bfi_iocfc_cfg_s {
|
||||
u16 rsvd_1;
|
||||
u32 endian_sig; /* endian signature of host */
|
||||
|
||||
/**
|
||||
/*
|
||||
* Request and response circular queue base addresses, size and
|
||||
* shadow index pointers.
|
||||
*/
|
||||
@ -58,7 +58,7 @@ struct bfi_iocfc_cfg_s {
|
||||
struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Boot target wwn information for this port. This contains either the stored
|
||||
* or discovered boot target port wwns for the port.
|
||||
*/
|
||||
@ -75,7 +75,7 @@ struct bfi_iocfc_cfgrsp_s {
|
||||
struct bfi_pbc_s pbc_cfg;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_IOCFC_H2I_CFG_REQ message
|
||||
*/
|
||||
struct bfi_iocfc_cfg_req_s {
|
||||
@ -84,7 +84,7 @@ struct bfi_iocfc_cfg_req_s {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_IOCFC_I2H_CFG_REPLY message
|
||||
*/
|
||||
struct bfi_iocfc_cfg_reply_s {
|
||||
@ -95,7 +95,7 @@ struct bfi_iocfc_cfg_reply_s {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_IOCFC_H2I_SET_INTR_REQ message
|
||||
*/
|
||||
struct bfi_iocfc_set_intr_req_s {
|
||||
@ -107,7 +107,7 @@ struct bfi_iocfc_set_intr_req_s {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_IOCFC_H2I_UPDATEQ_REQ message
|
||||
*/
|
||||
struct bfi_iocfc_updateq_req_s {
|
||||
@ -119,7 +119,7 @@ struct bfi_iocfc_updateq_req_s {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_IOCFC_I2H_UPDATEQ_RSP message
|
||||
*/
|
||||
struct bfi_iocfc_updateq_rsp_s {
|
||||
@ -129,7 +129,7 @@ struct bfi_iocfc_updateq_rsp_s {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* H2I Messages
|
||||
*/
|
||||
union bfi_iocfc_h2i_msg_u {
|
||||
@ -140,7 +140,7 @@ union bfi_iocfc_h2i_msg_u {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* I2H Messages
|
||||
*/
|
||||
union bfi_iocfc_i2h_msg_u {
|
||||
@ -173,7 +173,7 @@ enum bfi_fcport_i2h {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
/*
|
||||
* Generic REQ type
|
||||
*/
|
||||
struct bfi_fcport_req_s {
|
||||
@ -181,7 +181,7 @@ struct bfi_fcport_req_s {
|
||||
u32 msgtag; /* msgtag for reply */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Generic RSP type
|
||||
*/
|
||||
struct bfi_fcport_rsp_s {
|
||||
@ -191,7 +191,7 @@ struct bfi_fcport_rsp_s {
|
||||
u32 msgtag; /* msgtag for reply */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_FCPORT_H2I_ENABLE_REQ
|
||||
*/
|
||||
struct bfi_fcport_enable_req_s {
|
||||
@ -205,7 +205,7 @@ struct bfi_fcport_enable_req_s {
|
||||
u32 rsvd2;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
|
||||
*/
|
||||
struct bfi_fcport_set_svc_params_req_s {
|
||||
@ -214,7 +214,7 @@ struct bfi_fcport_set_svc_params_req_s {
|
||||
u16 rsvd;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_FCPORT_I2H_EVENT
|
||||
*/
|
||||
struct bfi_fcport_event_s {
|
||||
@ -222,7 +222,7 @@ struct bfi_fcport_event_s {
|
||||
struct bfa_port_link_s link_state;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* BFI_FCPORT_I2H_TRUNK_SCN
|
||||
*/
|
||||
struct bfi_fcport_trunk_link_s {
|
||||
@ -243,7 +243,7 @@ struct bfi_fcport_trunk_scn_s {
|
||||
struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcport H2I message
|
||||
*/
|
||||
union bfi_fcport_h2i_msg_u {
|
||||
@ -255,7 +255,7 @@ union bfi_fcport_h2i_msg_u {
|
||||
struct bfi_fcport_req_s *pstatsclear;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* fcport I2H message
|
||||
*/
|
||||
union bfi_fcport_i2h_msg_u {
|
||||
@ -279,7 +279,7 @@ enum bfi_fcxp_i2h {
|
||||
|
||||
#define BFA_FCXP_MAX_SGES 2
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCXP send request structure
|
||||
*/
|
||||
struct bfi_fcxp_send_req_s {
|
||||
@ -299,7 +299,7 @@ struct bfi_fcxp_send_req_s {
|
||||
struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCXP send response structure
|
||||
*/
|
||||
struct bfi_fcxp_send_rsp_s {
|
||||
@ -565,14 +565,14 @@ enum bfi_ioim_i2h {
|
||||
BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /* ABORT rsp */
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* IO command DIF info
|
||||
*/
|
||||
struct bfi_ioim_dif_s {
|
||||
u32 dif_info[4];
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* FCP IO messages overview
|
||||
*
|
||||
* @note
|
||||
@ -587,7 +587,7 @@ struct bfi_ioim_req_s {
|
||||
u16 rport_hdl; /* itnim/rport firmware handle */
|
||||
struct fcp_cmnd_s cmnd; /* IO request info */
|
||||
|
||||
/**
|
||||
/*
|
||||
* SG elements array within the IO request must be double word
|
||||
* aligned. This aligment is required to optimize SGM setup for the IO.
|
||||
*/
|
||||
@ -598,7 +598,7 @@ struct bfi_ioim_req_s {
|
||||
struct bfi_ioim_dif_s dif;
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* This table shows various IO status codes from firmware and their
|
||||
* meaning. Host driver can use these status codes to further process
|
||||
* IO completions.
|
||||
@ -684,7 +684,7 @@ enum bfi_ioim_status {
|
||||
};
|
||||
|
||||
#define BFI_IOIM_SNSLEN (256)
|
||||
/**
|
||||
/*
|
||||
* I/O response message
|
||||
*/
|
||||
struct bfi_ioim_rsp_s {
|
||||
@ -746,7 +746,7 @@ enum bfi_tskim_status {
|
||||
BFI_TSKIM_STS_NOT_SUPP = 4,
|
||||
BFI_TSKIM_STS_FAILED = 5,
|
||||
|
||||
/**
|
||||
/*
|
||||
* Defined by BFA
|
||||
*/
|
||||
BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
|
||||
|
@ -692,6 +692,9 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
|
||||
&csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
|
||||
atid, tid, status, csk, csk->state, csk->flags);
|
||||
|
||||
if (status == CPL_ERR_RTX_NEG_ADVICE)
|
||||
goto rel_skb;
|
||||
|
||||
if (status && status != CPL_ERR_TCAM_FULL &&
|
||||
status != CPL_ERR_CONN_EXIST &&
|
||||
status != CPL_ERR_ARP_MISS)
|
||||
|
@ -773,6 +773,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
|
||||
{"ENGENIO", "INF-01-00"},
|
||||
{"STK", "FLEXLINE 380"},
|
||||
{"SUN", "CSM100_R_FC"},
|
||||
{"SUN", "STK6580_6780"},
|
||||
{"SUN", "SUN_6180"},
|
||||
{NULL, NULL},
|
||||
};
|
||||
|
||||
|
@ -117,7 +117,7 @@ static void fcoe_recv_frame(struct sk_buff *skb);
|
||||
|
||||
static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
|
||||
|
||||
module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_AUTO, S_IWUSR);
|
||||
module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR);
|
||||
__MODULE_PARM_TYPE(create, "string");
|
||||
MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
|
||||
module_param_call(create_vn2vn, fcoe_create, NULL,
|
||||
@ -1243,7 +1243,6 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
||||
struct fcoe_interface *fcoe;
|
||||
struct fc_frame_header *fh;
|
||||
struct fcoe_percpu_s *fps;
|
||||
struct fcoe_port *port;
|
||||
struct ethhdr *eh;
|
||||
unsigned int cpu;
|
||||
|
||||
@ -1262,16 +1261,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
||||
skb_tail_pointer(skb), skb_end_pointer(skb),
|
||||
skb->csum, skb->dev ? skb->dev->name : "<NULL>");
|
||||
|
||||
/* check for mac addresses */
|
||||
eh = eth_hdr(skb);
|
||||
port = lport_priv(lport);
|
||||
if (compare_ether_addr(eh->h_dest, port->data_src_addr) &&
|
||||
compare_ether_addr(eh->h_dest, fcoe->ctlr.ctl_src_addr) &&
|
||||
compare_ether_addr(eh->h_dest, (u8[6])FC_FCOE_FLOGI_MAC)) {
|
||||
FCOE_NETDEV_DBG(netdev, "wrong destination mac address:%pM\n",
|
||||
eh->h_dest);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (is_fip_mode(&fcoe->ctlr) &&
|
||||
compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
|
||||
@ -1291,6 +1281,12 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
||||
skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
|
||||
fh = (struct fc_frame_header *) skb_transport_header(skb);
|
||||
|
||||
if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) {
|
||||
FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n",
|
||||
eh->h_dest);
|
||||
goto err;
|
||||
}
|
||||
|
||||
fr = fcoe_dev_from_skb(skb);
|
||||
fr->fr_dev = lport;
|
||||
fr->ptype = ptype;
|
||||
|
@ -2296,7 +2296,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
||||
{
|
||||
struct fip_header *fiph;
|
||||
enum fip_vn2vn_subcode sub;
|
||||
union {
|
||||
struct {
|
||||
struct fc_rport_priv rdata;
|
||||
struct fcoe_rport frport;
|
||||
} buf;
|
||||
|
@ -4177,6 +4177,14 @@ static int ioc_general(void __user *arg, char *cmnd)
|
||||
ha = gdth_find_ha(gen.ionode);
|
||||
if (!ha)
|
||||
return -EFAULT;
|
||||
|
||||
if (gen.data_len > INT_MAX)
|
||||
return -EINVAL;
|
||||
if (gen.sense_len > INT_MAX)
|
||||
return -EINVAL;
|
||||
if (gen.data_len + gen.sense_len > INT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
if (gen.data_len + gen.sense_len != 0) {
|
||||
if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
|
||||
FALSE, &paddr)))
|
||||
|
@ -9025,6 +9025,8 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
|
||||
|
@ -82,6 +82,7 @@
|
||||
|
||||
#define IPR_SUBS_DEV_ID_57B4 0x033B
|
||||
#define IPR_SUBS_DEV_ID_57B2 0x035F
|
||||
#define IPR_SUBS_DEV_ID_57C4 0x0354
|
||||
#define IPR_SUBS_DEV_ID_57C6 0x0357
|
||||
#define IPR_SUBS_DEV_ID_57CC 0x035C
|
||||
|
||||
|
@ -684,10 +684,9 @@ void fc_disc_stop(struct fc_lport *lport)
|
||||
{
|
||||
struct fc_disc *disc = &lport->disc;
|
||||
|
||||
if (disc) {
|
||||
if (disc->pending)
|
||||
cancel_delayed_work_sync(&disc->disc_work);
|
||||
fc_disc_stop_rports(disc);
|
||||
}
|
||||
fc_disc_stop_rports(disc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -58,8 +58,7 @@ struct kmem_cache *scsi_pkt_cachep;
|
||||
#define FC_SRB_WRITE (1 << 0)
|
||||
|
||||
/*
|
||||
* The SCp.ptr should be tested and set under the host lock. NULL indicates
|
||||
* that the command has been retruned to the scsi layer.
|
||||
* The SCp.ptr should be tested and set under the scsi_pkt_queue lock
|
||||
*/
|
||||
#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
|
||||
#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
|
||||
@ -1880,8 +1879,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
|
||||
|
||||
lport = fsp->lp;
|
||||
si = fc_get_scsi_internal(lport);
|
||||
if (!fsp->cmd)
|
||||
return;
|
||||
|
||||
/*
|
||||
* if can_queue ramp down is done then try can_queue ramp up
|
||||
@ -1891,11 +1888,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
|
||||
fc_fcp_can_queue_ramp_up(lport);
|
||||
|
||||
sc_cmd = fsp->cmd;
|
||||
fsp->cmd = NULL;
|
||||
|
||||
if (!sc_cmd->SCp.ptr)
|
||||
return;
|
||||
|
||||
CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
|
||||
switch (fsp->status_code) {
|
||||
case FC_COMPLETE:
|
||||
@ -1971,15 +1963,13 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
|
||||
break;
|
||||
}
|
||||
|
||||
if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) {
|
||||
sc_cmd->result = (DID_REQUEUE << 16);
|
||||
FC_FCP_DBG(fsp, "Returning DID_REQUEUE to scsi-ml\n");
|
||||
}
|
||||
if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE)
|
||||
sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16);
|
||||
|
||||
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
||||
list_del(&fsp->list);
|
||||
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
||||
sc_cmd->SCp.ptr = NULL;
|
||||
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
||||
sc_cmd->scsi_done(sc_cmd);
|
||||
|
||||
/* release ref from initial allocation in queue command */
|
||||
@ -1997,6 +1987,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
|
||||
{
|
||||
struct fc_fcp_pkt *fsp;
|
||||
struct fc_lport *lport;
|
||||
struct fc_fcp_internal *si;
|
||||
int rc = FAILED;
|
||||
unsigned long flags;
|
||||
|
||||
@ -2006,7 +1997,8 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
|
||||
else if (!lport->link_up)
|
||||
return rc;
|
||||
|
||||
spin_lock_irqsave(lport->host->host_lock, flags);
|
||||
si = fc_get_scsi_internal(lport);
|
||||
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
||||
fsp = CMD_SP(sc_cmd);
|
||||
if (!fsp) {
|
||||
/* command completed while scsi eh was setting up */
|
||||
@ -2015,7 +2007,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
|
||||
}
|
||||
/* grab a ref so the fsp and sc_cmd cannot be relased from under us */
|
||||
fc_fcp_pkt_hold(fsp);
|
||||
spin_unlock_irqrestore(lport->host->host_lock, flags);
|
||||
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
||||
|
||||
if (fc_fcp_lock_pkt(fsp)) {
|
||||
/* completed while we were waiting for timer to be deleted */
|
||||
|
@ -1447,13 +1447,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||
}
|
||||
|
||||
did = fc_frame_did(fp);
|
||||
|
||||
if (!did) {
|
||||
FC_LPORT_DBG(lport, "Bad FLOGI response\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
|
||||
if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) {
|
||||
flp = fc_frame_payload_get(fp, sizeof(*flp));
|
||||
if (flp) {
|
||||
mfs = ntohs(flp->fl_csp.sp_bb_data) &
|
||||
@ -1492,8 +1486,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||
fc_lport_enter_dns(lport);
|
||||
}
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n");
|
||||
fc_lport_error(lport, fp);
|
||||
}
|
||||
|
||||
out:
|
||||
fc_frame_free(fp);
|
||||
|
@ -196,9 +196,9 @@ static const char *fc_rport_state(struct fc_rport_priv *rdata)
|
||||
void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
|
||||
{
|
||||
if (timeout)
|
||||
rport->dev_loss_tmo = timeout + 5;
|
||||
rport->dev_loss_tmo = timeout;
|
||||
else
|
||||
rport->dev_loss_tmo = 30;
|
||||
rport->dev_loss_tmo = 1;
|
||||
}
|
||||
EXPORT_SYMBOL(fc_set_rport_loss_tmo);
|
||||
|
||||
|
@ -202,9 +202,12 @@ struct lpfc_stats {
|
||||
uint32_t elsRcvPRLO;
|
||||
uint32_t elsRcvPRLI;
|
||||
uint32_t elsRcvLIRR;
|
||||
uint32_t elsRcvRLS;
|
||||
uint32_t elsRcvRPS;
|
||||
uint32_t elsRcvRPL;
|
||||
uint32_t elsRcvRRQ;
|
||||
uint32_t elsRcvRTV;
|
||||
uint32_t elsRcvECHO;
|
||||
uint32_t elsXmitFLOGI;
|
||||
uint32_t elsXmitFDISC;
|
||||
uint32_t elsXmitPLOGI;
|
||||
@ -549,9 +552,11 @@ struct lpfc_hba {
|
||||
#define ELS_XRI_ABORT_EVENT 0x40
|
||||
#define ASYNC_EVENT 0x80
|
||||
#define LINK_DISABLED 0x100 /* Link disabled by user */
|
||||
#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */
|
||||
#define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */
|
||||
#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */
|
||||
#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */
|
||||
#define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */
|
||||
#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */
|
||||
#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
|
||||
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
|
||||
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
|
||||
struct lpfc_dmabuf slim2p;
|
||||
|
||||
@ -573,6 +578,7 @@ struct lpfc_hba {
|
||||
/* These fields used to be binfo */
|
||||
uint32_t fc_pref_DID; /* preferred D_ID */
|
||||
uint8_t fc_pref_ALPA; /* preferred AL_PA */
|
||||
uint32_t fc_edtovResol; /* E_D_TOV timer resolution */
|
||||
uint32_t fc_edtov; /* E_D_TOV timer value */
|
||||
uint32_t fc_arbtov; /* ARB_TOV timer value */
|
||||
uint32_t fc_ratov; /* R_A_TOV timer value */
|
||||
|
@ -3789,8 +3789,13 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
|
||||
break;
|
||||
case MBX_SECURITY_MGMT:
|
||||
case MBX_AUTH_PORT:
|
||||
if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
|
||||
if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
|
||||
printk(KERN_WARNING "mbox_read:Command 0x%x "
|
||||
"is not permitted\n", pmb->mbxCommand);
|
||||
sysfs_mbox_idle(phba);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
return -EPERM;
|
||||
}
|
||||
break;
|
||||
case MBX_READ_SPARM64:
|
||||
case MBX_READ_LA:
|
||||
|
@ -3142,12 +3142,12 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
|
||||
job = menlo->set_job;
|
||||
job->dd_data = NULL; /* so timeout handler does not reply */
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
spin_lock(&phba->hbalock);
|
||||
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
|
||||
if (cmdiocbq->context2 && rspiocbq)
|
||||
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
|
||||
&rspiocbq->iocb, sizeof(IOCB_t));
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
spin_unlock(&phba->hbalock);
|
||||
|
||||
bmp = menlo->bmp;
|
||||
rspiocbq = menlo->rspiocbq;
|
||||
|
@ -44,6 +44,8 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
|
||||
void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
|
||||
void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
|
||||
void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
|
||||
void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *);
|
||||
|
||||
void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
|
||||
void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
|
||||
struct lpfc_nodelist *);
|
||||
@ -229,6 +231,7 @@ void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
|
||||
uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
|
||||
int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
|
||||
void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
|
||||
int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
|
||||
|
||||
int lpfc_mem_alloc(struct lpfc_hba *, int align);
|
||||
void lpfc_mem_free(struct lpfc_hba *);
|
||||
@ -271,6 +274,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
|
||||
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
|
||||
void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
|
||||
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
|
||||
void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
|
||||
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
|
||||
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
|
||||
struct lpfc_dmabuf *);
|
||||
|
@ -177,15 +177,18 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
|
||||
(elscmd == ELS_CMD_LOGO)))
|
||||
switch (elscmd) {
|
||||
case ELS_CMD_FLOGI:
|
||||
elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
|
||||
elsiocb->iocb_flag |=
|
||||
((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
|
||||
& LPFC_FIP_ELS_ID_MASK);
|
||||
break;
|
||||
case ELS_CMD_FDISC:
|
||||
elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
|
||||
elsiocb->iocb_flag |=
|
||||
((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
|
||||
& LPFC_FIP_ELS_ID_MASK);
|
||||
break;
|
||||
case ELS_CMD_LOGO:
|
||||
elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
|
||||
elsiocb->iocb_flag |=
|
||||
((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
|
||||
& LPFC_FIP_ELS_ID_MASK);
|
||||
break;
|
||||
}
|
||||
@ -517,18 +520,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
|
||||
phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
|
||||
|
||||
phba->fc_edtovResol = sp->cmn.edtovResolution;
|
||||
phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
|
||||
|
||||
if (phba->fc_topology == TOPOLOGY_LOOP) {
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag |= FC_PUBLIC_LOOP;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
} else {
|
||||
/*
|
||||
* If we are a N-port connected to a Fabric, fixup sparam's so
|
||||
* logins to devices on remote loops work.
|
||||
*/
|
||||
vport->fc_sparam.cmn.altBbCredit = 1;
|
||||
}
|
||||
|
||||
vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
|
||||
@ -585,6 +583,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
lpfc_unreg_rpi(vport, np);
|
||||
}
|
||||
lpfc_cleanup_pending_mbox(vport);
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
lpfc_sli4_unreg_all_rpis(vport);
|
||||
|
||||
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
|
||||
lpfc_mbx_unreg_vpi(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
@ -800,7 +802,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
|
||||
if (irsp->ulpStatus) {
|
||||
/*
|
||||
* In case of FIP mode, perform round robin FCF failover
|
||||
* In case of FIP mode, perform roundrobin FCF failover
|
||||
* due to new FCF discovery
|
||||
*/
|
||||
if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
|
||||
@ -808,48 +810,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
(irsp->ulpStatus != IOSTAT_LOCAL_REJECT) &&
|
||||
(irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
|
||||
"2611 FLOGI failed on registered "
|
||||
"FCF record fcf_index(%d), status: "
|
||||
"x%x/x%x, tmo:x%x, trying to perform "
|
||||
"round robin failover\n",
|
||||
"2611 FLOGI failed on FCF (x%x), "
|
||||
"status:x%x/x%x, tmo:x%x, perform "
|
||||
"roundrobin FCF failover\n",
|
||||
phba->fcf.current_rec.fcf_indx,
|
||||
irsp->ulpStatus, irsp->un.ulpWord[4],
|
||||
irsp->ulpTimeout);
|
||||
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
|
||||
if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
|
||||
/*
|
||||
* Exhausted the eligible FCF record list,
|
||||
* fail through to retry FLOGI on current
|
||||
* FCF record.
|
||||
*/
|
||||
lpfc_printf_log(phba, KERN_WARNING,
|
||||
LOG_FIP | LOG_ELS,
|
||||
"2760 Completed one round "
|
||||
"of FLOGI FCF round robin "
|
||||
"failover list, retry FLOGI "
|
||||
"on currently registered "
|
||||
"FCF index:%d\n",
|
||||
phba->fcf.current_rec.fcf_indx);
|
||||
} else {
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
LOG_FIP | LOG_ELS,
|
||||
"2794 FLOGI FCF round robin "
|
||||
"failover to FCF index x%x\n",
|
||||
fcf_index);
|
||||
rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
|
||||
fcf_index);
|
||||
if (rc)
|
||||
lpfc_printf_log(phba, KERN_WARNING,
|
||||
LOG_FIP | LOG_ELS,
|
||||
"2761 FLOGI round "
|
||||
"robin FCF failover "
|
||||
"read FCF failed "
|
||||
"rc:x%x, fcf_index:"
|
||||
"%d\n", rc,
|
||||
phba->fcf.current_rec.fcf_indx);
|
||||
else
|
||||
goto out;
|
||||
}
|
||||
rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* FLOGI failure */
|
||||
@ -939,6 +909,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
lpfc_nlp_put(ndlp);
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
|
||||
phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
goto out;
|
||||
}
|
||||
@ -947,13 +918,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
if (phba->hba_flag & HBA_FIP_SUPPORT)
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
|
||||
LOG_ELS,
|
||||
"2769 FLOGI successful on FCF "
|
||||
"record: current_fcf_index:"
|
||||
"x%x, terminate FCF round "
|
||||
"robin failover process\n",
|
||||
"2769 FLOGI to FCF (x%x) "
|
||||
"completed successfully\n",
|
||||
phba->fcf.current_rec.fcf_indx);
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
|
||||
phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
goto out;
|
||||
}
|
||||
@ -1175,12 +1145,13 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (lpfc_issue_els_flogi(vport, ndlp, 0))
|
||||
if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
|
||||
/* This decrement of reference count to node shall kick off
|
||||
* the release of the node.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1645,6 +1616,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
|
||||
memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
|
||||
sp = (struct serv_parm *) pcmd;
|
||||
|
||||
/*
|
||||
* If we are a N-port connected to a Fabric, fix-up paramm's so logins
|
||||
* to device on remote loops work.
|
||||
*/
|
||||
if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
|
||||
sp->cmn.altBbCredit = 1;
|
||||
|
||||
if (sp->cmn.fcphLow < FC_PH_4_3)
|
||||
sp->cmn.fcphLow = FC_PH_4_3;
|
||||
|
||||
@ -3925,6 +3903,64 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_els_rsp_echo_acc - Issue echo acc response
|
||||
* @vport: pointer to a virtual N_Port data structure.
|
||||
* @data: pointer to echo data to return in the accept.
|
||||
* @oldiocb: pointer to the original lpfc command iocb data structure.
|
||||
* @ndlp: pointer to a node-list data structure.
|
||||
*
|
||||
* Return code
|
||||
* 0 - Successfully issued acc echo response
|
||||
* 1 - Failed to issue acc echo response
|
||||
**/
|
||||
static int
|
||||
lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
|
||||
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_iocbq *elsiocb;
|
||||
struct lpfc_sli *psli;
|
||||
uint8_t *pcmd;
|
||||
uint16_t cmdsize;
|
||||
int rc;
|
||||
|
||||
psli = &phba->sli;
|
||||
cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
|
||||
|
||||
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
|
||||
ndlp->nlp_DID, ELS_CMD_ACC);
|
||||
if (!elsiocb)
|
||||
return 1;
|
||||
|
||||
elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */
|
||||
/* Xmit ECHO ACC response tag <ulpIoTag> */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"2876 Xmit ECHO ACC response tag x%x xri x%x\n",
|
||||
elsiocb->iotag, elsiocb->iocb.ulpContext);
|
||||
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
|
||||
pcmd += sizeof(uint32_t);
|
||||
memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
|
||||
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
|
||||
"Issue ACC ECHO: did:x%x flg:x%x",
|
||||
ndlp->nlp_DID, ndlp->nlp_flag, 0);
|
||||
|
||||
phba->fc_stat.elsXmitACC++;
|
||||
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
|
||||
lpfc_nlp_put(ndlp);
|
||||
elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
|
||||
* it could be freed */
|
||||
|
||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
|
||||
if (rc == IOCB_ERROR) {
|
||||
lpfc_els_free_iocb(phba, elsiocb);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
|
||||
* @vport: pointer to a host virtual N_Port data structure.
|
||||
@ -4683,6 +4719,30 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_els_rcv_echo - Process an unsolicited echo iocb
|
||||
* @vport: pointer to a host virtual N_Port data structure.
|
||||
* @cmdiocb: pointer to lpfc command iocb data structure.
|
||||
* @ndlp: pointer to a node-list data structure.
|
||||
*
|
||||
* Return code
|
||||
* 0 - Successfully processed echo iocb (currently always return 0)
|
||||
**/
|
||||
static int
|
||||
lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
uint8_t *pcmd;
|
||||
|
||||
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
|
||||
|
||||
/* skip over first word of echo command to find echo data */
|
||||
pcmd += sizeof(uint32_t);
|
||||
|
||||
lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
|
||||
* @vport: pointer to a host virtual N_Port data structure.
|
||||
@ -4734,6 +4794,89 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @pmb: pointer to the driver internal queue element for mailbox command.
|
||||
*
|
||||
* This routine is the completion callback function for the MBX_READ_LNK_STAT
|
||||
* mailbox command. This callback function is to actually send the Accept
|
||||
* (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
|
||||
* collects the link statistics from the completion of the MBX_READ_LNK_STAT
|
||||
* mailbox command, constructs the RPS response with the link statistics
|
||||
* collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
|
||||
* response to the RPS.
|
||||
*
|
||||
* Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
|
||||
* will be incremented by 1 for holding the ndlp and the reference to ndlp
|
||||
* will be stored into the context1 field of the IOCB for the completion
|
||||
* callback function to the RPS Accept Response ELS IOCB command.
|
||||
*
|
||||
**/
|
||||
static void
|
||||
lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
{
|
||||
MAILBOX_t *mb;
|
||||
IOCB_t *icmd;
|
||||
struct RLS_RSP *rls_rsp;
|
||||
uint8_t *pcmd;
|
||||
struct lpfc_iocbq *elsiocb;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
uint16_t xri;
|
||||
uint32_t cmdsize;
|
||||
|
||||
mb = &pmb->u.mb;
|
||||
|
||||
ndlp = (struct lpfc_nodelist *) pmb->context2;
|
||||
xri = (uint16_t) ((unsigned long)(pmb->context1));
|
||||
pmb->context1 = NULL;
|
||||
pmb->context2 = NULL;
|
||||
|
||||
if (mb->mbxStatus) {
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
return;
|
||||
}
|
||||
|
||||
cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
|
||||
lpfc_max_els_tries, ndlp,
|
||||
ndlp->nlp_DID, ELS_CMD_ACC);
|
||||
|
||||
/* Decrement the ndlp reference count from previous mbox command */
|
||||
lpfc_nlp_put(ndlp);
|
||||
|
||||
if (!elsiocb)
|
||||
return;
|
||||
|
||||
icmd = &elsiocb->iocb;
|
||||
icmd->ulpContext = xri;
|
||||
|
||||
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
|
||||
pcmd += sizeof(uint32_t); /* Skip past command */
|
||||
rls_rsp = (struct RLS_RSP *)pcmd;
|
||||
|
||||
rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
|
||||
rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
|
||||
rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
|
||||
rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
|
||||
rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
|
||||
rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
|
||||
|
||||
/* Xmit ELS RLS ACC response tag <ulpIoTag> */
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
|
||||
"2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
|
||||
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
|
||||
elsiocb->iotag, elsiocb->iocb.ulpContext,
|
||||
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
|
||||
ndlp->nlp_rpi);
|
||||
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
|
||||
phba->fc_stat.elsXmitACC++;
|
||||
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
|
||||
lpfc_els_free_iocb(phba, elsiocb);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@ -4827,7 +4970,155 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_els_rcv_rps - Process an unsolicited rps iocb
|
||||
* lpfc_els_rcv_rls - Process an unsolicited rls iocb
|
||||
* @vport: pointer to a host virtual N_Port data structure.
|
||||
* @cmdiocb: pointer to lpfc command iocb data structure.
|
||||
* @ndlp: pointer to a node-list data structure.
|
||||
*
|
||||
* This routine processes Read Port Status (RPL) IOCB received as an
|
||||
* ELS unsolicited event. It first checks the remote port state. If the
|
||||
* remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
|
||||
* state, it invokes the lpfc_els_rsl_reject() routine to send the reject
|
||||
* response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
|
||||
* for reading the HBA link statistics. It is for the callback function,
|
||||
* lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
|
||||
* to actually sending out RPL Accept (ACC) response.
|
||||
*
|
||||
* Return codes
|
||||
* 0 - Successfully processed rls iocb (currently always return 0)
|
||||
**/
|
||||
static int
|
||||
lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
LPFC_MBOXQ_t *mbox;
|
||||
struct lpfc_dmabuf *pcmd;
|
||||
struct ls_rjt stat;
|
||||
|
||||
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
|
||||
/* reject the unsolicited RPS request and done with it */
|
||||
goto reject_out;
|
||||
|
||||
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
|
||||
if (mbox) {
|
||||
lpfc_read_lnk_stat(phba, mbox);
|
||||
mbox->context1 =
|
||||
(void *)((unsigned long) cmdiocb->iocb.ulpContext);
|
||||
mbox->context2 = lpfc_nlp_get(ndlp);
|
||||
mbox->vport = vport;
|
||||
mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
|
||||
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
|
||||
!= MBX_NOT_FINISHED)
|
||||
/* Mbox completion will send ELS Response */
|
||||
return 0;
|
||||
/* Decrement reference count used for the failed mbox
|
||||
* command.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
}
|
||||
reject_out:
|
||||
/* issue rejection response */
|
||||
stat.un.b.lsRjtRsvd0 = 0;
|
||||
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
|
||||
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
|
||||
stat.un.b.vendorUnique = 0;
|
||||
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
|
||||
* @vport: pointer to a host virtual N_Port data structure.
|
||||
* @cmdiocb: pointer to lpfc command iocb data structure.
|
||||
* @ndlp: pointer to a node-list data structure.
|
||||
*
|
||||
* This routine processes Read Timout Value (RTV) IOCB received as an
|
||||
* ELS unsolicited event. It first checks the remote port state. If the
|
||||
* remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
|
||||
* state, it invokes the lpfc_els_rsl_reject() routine to send the reject
|
||||
* response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
|
||||
* Value (RTV) unsolicited IOCB event.
|
||||
*
|
||||
* Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
|
||||
* will be incremented by 1 for holding the ndlp and the reference to ndlp
|
||||
* will be stored into the context1 field of the IOCB for the completion
|
||||
* callback function to the RPS Accept Response ELS IOCB command.
|
||||
*
|
||||
* Return codes
|
||||
* 0 - Successfully processed rtv iocb (currently always return 0)
|
||||
**/
|
||||
static int
|
||||
lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct ls_rjt stat;
|
||||
struct RTV_RSP *rtv_rsp;
|
||||
uint8_t *pcmd;
|
||||
struct lpfc_iocbq *elsiocb;
|
||||
uint32_t cmdsize;
|
||||
|
||||
|
||||
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
|
||||
/* reject the unsolicited RPS request and done with it */
|
||||
goto reject_out;
|
||||
|
||||
cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
|
||||
elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
|
||||
lpfc_max_els_tries, ndlp,
|
||||
ndlp->nlp_DID, ELS_CMD_ACC);
|
||||
|
||||
if (!elsiocb)
|
||||
return 1;
|
||||
|
||||
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
|
||||
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
|
||||
pcmd += sizeof(uint32_t); /* Skip past command */
|
||||
|
||||
/* use the command's xri in the response */
|
||||
elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
|
||||
|
||||
rtv_rsp = (struct RTV_RSP *)pcmd;
|
||||
|
||||
/* populate RTV payload */
|
||||
rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
|
||||
rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
|
||||
bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
|
||||
bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
|
||||
rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
|
||||
|
||||
/* Xmit ELS RLS ACC response tag <ulpIoTag> */
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
|
||||
"2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
|
||||
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
|
||||
"Data: x%x x%x x%x\n",
|
||||
elsiocb->iotag, elsiocb->iocb.ulpContext,
|
||||
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
|
||||
ndlp->nlp_rpi,
|
||||
rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
|
||||
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
|
||||
phba->fc_stat.elsXmitACC++;
|
||||
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
|
||||
lpfc_els_free_iocb(phba, elsiocb);
|
||||
return 0;
|
||||
|
||||
reject_out:
|
||||
/* issue rejection response */
|
||||
stat.un.b.lsRjtRsvd0 = 0;
|
||||
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
|
||||
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
|
||||
stat.un.b.vendorUnique = 0;
|
||||
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
|
||||
* @vport: pointer to a host virtual N_Port data structure.
|
||||
* @cmdiocb: pointer to lpfc command iocb data structure.
|
||||
* @ndlp: pointer to a node-list data structure.
|
||||
@ -5017,7 +5308,6 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
|
||||
lp = (uint32_t *) pcmd->virt;
|
||||
rpl = (RPL *) (lp + 1);
|
||||
|
||||
maxsize = be32_to_cpu(rpl->maxsize);
|
||||
|
||||
/* We support only one port */
|
||||
@ -5836,6 +6126,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
if (newnode)
|
||||
lpfc_nlp_put(ndlp);
|
||||
break;
|
||||
case ELS_CMD_RLS:
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
|
||||
"RCV RLS: did:x%x/ste:x%x flg:x%x",
|
||||
did, vport->port_state, ndlp->nlp_flag);
|
||||
|
||||
phba->fc_stat.elsRcvRLS++;
|
||||
lpfc_els_rcv_rls(vport, elsiocb, ndlp);
|
||||
if (newnode)
|
||||
lpfc_nlp_put(ndlp);
|
||||
break;
|
||||
case ELS_CMD_RPS:
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
|
||||
"RCV RPS: did:x%x/ste:x%x flg:x%x",
|
||||
@ -5866,6 +6166,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
if (newnode)
|
||||
lpfc_nlp_put(ndlp);
|
||||
break;
|
||||
case ELS_CMD_RTV:
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
|
||||
"RCV RTV: did:x%x/ste:x%x flg:x%x",
|
||||
did, vport->port_state, ndlp->nlp_flag);
|
||||
phba->fc_stat.elsRcvRTV++;
|
||||
lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
|
||||
if (newnode)
|
||||
lpfc_nlp_put(ndlp);
|
||||
break;
|
||||
case ELS_CMD_RRQ:
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
|
||||
"RCV RRQ: did:x%x/ste:x%x flg:x%x",
|
||||
@ -5876,6 +6185,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
if (newnode)
|
||||
lpfc_nlp_put(ndlp);
|
||||
break;
|
||||
case ELS_CMD_ECHO:
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
|
||||
"RCV ECHO: did:x%x/ste:x%x flg:x%x",
|
||||
did, vport->port_state, ndlp->nlp_flag);
|
||||
|
||||
phba->fc_stat.elsRcvECHO++;
|
||||
lpfc_els_rcv_echo(vport, elsiocb, ndlp);
|
||||
if (newnode)
|
||||
lpfc_nlp_put(ndlp);
|
||||
break;
|
||||
default:
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
|
||||
"RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
|
||||
@ -6170,6 +6489,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
|
||||
default:
|
||||
/* Try to recover from this error */
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
lpfc_sli4_unreg_all_rpis(vport);
|
||||
lpfc_mbx_unreg_vpi(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
|
||||
@ -6437,6 +6758,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
lpfc_unreg_rpi(vport, np);
|
||||
}
|
||||
lpfc_cleanup_pending_mbox(vport);
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
lpfc_sli4_unreg_all_rpis(vport);
|
||||
|
||||
lpfc_mbx_unreg_vpi(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
|
||||
@ -6452,7 +6777,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
* to update the MAC address.
|
||||
*/
|
||||
lpfc_register_new_vport(phba, vport, ndlp);
|
||||
return ;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
|
||||
|
@ -20,6 +20,7 @@
|
||||
*******************************************************************/
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/kthread.h>
|
||||
@ -63,6 +64,7 @@ static uint8_t lpfcAlpaArray[] = {
|
||||
static void lpfc_disc_timeout_handler(struct lpfc_vport *);
|
||||
static void lpfc_disc_flush_list(struct lpfc_vport *vport);
|
||||
static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
static int lpfc_fcf_inuse(struct lpfc_hba *);
|
||||
|
||||
void
|
||||
lpfc_terminate_rport_io(struct fc_rport *rport)
|
||||
@ -160,11 +162,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called from the worker thread when dev_loss_tmo
|
||||
* expire.
|
||||
*/
|
||||
static void
|
||||
/**
|
||||
* lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
|
||||
* @ndlp: Pointer to remote node object.
|
||||
*
|
||||
* This function is called from the worker thread when devloss timeout timer
|
||||
* expires. For SLI4 host, this routine shall return 1 when at lease one
|
||||
* remote node, including this @ndlp, is still in use of FCF; otherwise, this
|
||||
* routine shall return 0 when there is no remote node is still in use of FCF
|
||||
* when devloss timeout happened to this @ndlp.
|
||||
**/
|
||||
static int
|
||||
lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
struct lpfc_rport_data *rdata;
|
||||
@ -175,17 +183,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
|
||||
int put_node;
|
||||
int put_rport;
|
||||
int warn_on = 0;
|
||||
int fcf_inuse = 0;
|
||||
|
||||
rport = ndlp->rport;
|
||||
|
||||
if (!rport)
|
||||
return;
|
||||
return fcf_inuse;
|
||||
|
||||
rdata = rport->dd_data;
|
||||
name = (uint8_t *) &ndlp->nlp_portname;
|
||||
vport = ndlp->vport;
|
||||
phba = vport->phba;
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
fcf_inuse = lpfc_fcf_inuse(phba);
|
||||
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
|
||||
"rport devlosstmo:did:x%x type:x%x id:x%x",
|
||||
ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
|
||||
@ -209,7 +221,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
|
||||
lpfc_nlp_put(ndlp);
|
||||
if (put_rport)
|
||||
put_device(&rport->dev);
|
||||
return;
|
||||
return fcf_inuse;
|
||||
}
|
||||
|
||||
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
|
||||
@ -220,7 +232,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
|
||||
*name, *(name+1), *(name+2), *(name+3),
|
||||
*(name+4), *(name+5), *(name+6), *(name+7),
|
||||
ndlp->nlp_DID);
|
||||
return;
|
||||
return fcf_inuse;
|
||||
}
|
||||
|
||||
if (ndlp->nlp_type & NLP_FABRIC) {
|
||||
@ -233,7 +245,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
|
||||
lpfc_nlp_put(ndlp);
|
||||
if (put_rport)
|
||||
put_device(&rport->dev);
|
||||
return;
|
||||
return fcf_inuse;
|
||||
}
|
||||
|
||||
if (ndlp->nlp_sid != NLP_NO_SID) {
|
||||
@ -280,6 +292,74 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
|
||||
(ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
|
||||
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
|
||||
|
||||
return fcf_inuse;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
|
||||
* @phba: Pointer to hba context object.
|
||||
* @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
|
||||
* @nlp_did: remote node identifer with devloss timeout.
|
||||
*
|
||||
* This function is called from the worker thread after invoking devloss
|
||||
* timeout handler and releasing the reference count for the ndlp with
|
||||
* which the devloss timeout was handled for SLI4 host. For the devloss
|
||||
* timeout of the last remote node which had been in use of FCF, when this
|
||||
* routine is invoked, it shall be guaranteed that none of the remote are
|
||||
* in-use of FCF. When devloss timeout to the last remote using the FCF,
|
||||
* if the FIP engine is neither in FCF table scan process nor roundrobin
|
||||
* failover process, the in-use FCF shall be unregistered. If the FIP
|
||||
* engine is in FCF discovery process, the devloss timeout state shall
|
||||
* be set for either the FCF table scan process or roundrobin failover
|
||||
* process to unregister the in-use FCF.
|
||||
**/
|
||||
static void
|
||||
lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
|
||||
uint32_t nlp_did)
|
||||
{
|
||||
/* If devloss timeout happened to a remote node when FCF had no
|
||||
* longer been in-use, do nothing.
|
||||
*/
|
||||
if (!fcf_inuse)
|
||||
return;
|
||||
|
||||
if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
|
||||
if (phba->hba_flag & HBA_DEVLOSS_TMO) {
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
return;
|
||||
}
|
||||
phba->hba_flag |= HBA_DEVLOSS_TMO;
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2847 Last remote node (x%x) using "
|
||||
"FCF devloss tmo\n", nlp_did);
|
||||
}
|
||||
if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2868 Devloss tmo to FCF rediscovery "
|
||||
"in progress\n");
|
||||
return;
|
||||
}
|
||||
if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2869 Devloss tmo to idle FIP engine, "
|
||||
"unreg in-use FCF and rescan.\n");
|
||||
/* Unregister in-use FCF and rescan */
|
||||
lpfc_unregister_fcf_rescan(phba);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
if (phba->hba_flag & FCF_TS_INPROG)
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2870 FCF table scan in progress\n");
|
||||
if (phba->hba_flag & FCF_RR_INPROG)
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2871 FLOGI roundrobin FCF failover "
|
||||
"in progress\n");
|
||||
}
|
||||
lpfc_unregister_unused_fcf(phba);
|
||||
}
|
||||
|
||||
@ -408,6 +488,8 @@ lpfc_work_list_done(struct lpfc_hba *phba)
|
||||
struct lpfc_work_evt *evtp = NULL;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
int free_evt;
|
||||
int fcf_inuse;
|
||||
uint32_t nlp_did;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
while (!list_empty(&phba->work_list)) {
|
||||
@ -427,12 +509,17 @@ lpfc_work_list_done(struct lpfc_hba *phba)
|
||||
break;
|
||||
case LPFC_EVT_DEV_LOSS:
|
||||
ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
|
||||
lpfc_dev_loss_tmo_handler(ndlp);
|
||||
fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
|
||||
free_evt = 0;
|
||||
/* decrement the node reference count held for
|
||||
* this queued work
|
||||
*/
|
||||
nlp_did = ndlp->nlp_DID;
|
||||
lpfc_nlp_put(ndlp);
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
lpfc_sli4_post_dev_loss_tmo_handler(phba,
|
||||
fcf_inuse,
|
||||
nlp_did);
|
||||
break;
|
||||
case LPFC_EVT_ONLINE:
|
||||
if (phba->link_state < LPFC_LINK_DOWN)
|
||||
@ -707,6 +794,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
|
||||
: NLP_EVT_DEVICE_RECOVERY);
|
||||
}
|
||||
if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
lpfc_sli4_unreg_all_rpis(vport);
|
||||
lpfc_mbx_unreg_vpi(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
|
||||
@ -1021,8 +1110,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
"2017 REG_FCFI mbxStatus error x%x "
|
||||
"HBA state x%x\n",
|
||||
mboxq->u.mb.mbxStatus, vport->port_state);
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
return;
|
||||
goto fail_out;
|
||||
}
|
||||
|
||||
/* Start FCoE discovery by sending a FLOGI. */
|
||||
@ -1031,20 +1119,30 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag |= FCF_REGISTERED;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
/* If there is a pending FCoE event, restart FCF table scan. */
|
||||
if (lpfc_check_pending_fcoe_event(phba, 1)) {
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
return;
|
||||
}
|
||||
if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
|
||||
goto fail_out;
|
||||
|
||||
/* Mark successful completion of FCF table scan */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
|
||||
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
if (vport->port_state != LPFC_FLOGI)
|
||||
phba->hba_flag &= ~FCF_TS_INPROG;
|
||||
if (vport->port_state != LPFC_FLOGI) {
|
||||
phba->hba_flag |= FCF_RR_INPROG;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_initial_flogi(vport);
|
||||
goto out;
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
goto out;
|
||||
|
||||
fail_out:
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->hba_flag &= ~FCF_RR_INPROG;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
out:
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1241,10 +1339,9 @@ lpfc_register_fcf(struct lpfc_hba *phba)
|
||||
int rc;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
|
||||
/* If the FCF is not availabe do nothing. */
|
||||
if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
|
||||
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||
phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
return;
|
||||
}
|
||||
@ -1252,19 +1349,22 @@ lpfc_register_fcf(struct lpfc_hba *phba)
|
||||
/* The FCF is already registered, start discovery */
|
||||
if (phba->fcf.fcf_flag & FCF_REGISTERED) {
|
||||
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
|
||||
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
if (phba->pport->port_state != LPFC_FLOGI)
|
||||
phba->hba_flag &= ~FCF_TS_INPROG;
|
||||
if (phba->pport->port_state != LPFC_FLOGI) {
|
||||
phba->hba_flag |= FCF_RR_INPROG;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_initial_flogi(phba->pport);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
|
||||
GFP_KERNEL);
|
||||
fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!fcf_mbxq) {
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||
phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
return;
|
||||
}
|
||||
@ -1275,7 +1375,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
|
||||
rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||
phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
mempool_free(fcf_mbxq, phba->mbox_mem_pool);
|
||||
}
|
||||
@ -1493,7 +1593,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
|
||||
* FCF discovery, no need to restart FCF discovery.
|
||||
*/
|
||||
if ((phba->link_state >= LPFC_LINK_UP) &&
|
||||
(phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
|
||||
(phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
|
||||
return 0;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
@ -1517,14 +1617,14 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
|
||||
lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
|
||||
} else {
|
||||
/*
|
||||
* Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
|
||||
* Do not continue FCF discovery and clear FCF_TS_INPROG
|
||||
* flag
|
||||
*/
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
|
||||
"2833 Stop FCF discovery process due to link "
|
||||
"state change (x%x)\n", phba->link_state);
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||
phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
|
||||
phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
}
|
||||
@ -1728,6 +1828,65 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
|
||||
* @vport: Pointer to vport object.
|
||||
* @fcf_index: index to next fcf.
|
||||
*
|
||||
* This function processing the roundrobin fcf failover to next fcf index.
|
||||
* When this function is invoked, there will be a current fcf registered
|
||||
* for flogi.
|
||||
* Return: 0 for continue retrying flogi on currently registered fcf;
|
||||
* 1 for stop flogi on currently registered fcf;
|
||||
*/
|
||||
int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
int rc;
|
||||
|
||||
if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
if (phba->hba_flag & HBA_DEVLOSS_TMO) {
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2872 Devloss tmo with no eligible "
|
||||
"FCF, unregister in-use FCF (x%x) "
|
||||
"and rescan FCF table\n",
|
||||
phba->fcf.current_rec.fcf_indx);
|
||||
lpfc_unregister_fcf_rescan(phba);
|
||||
goto stop_flogi_current_fcf;
|
||||
}
|
||||
/* Mark the end to FLOGI roundrobin failover */
|
||||
phba->hba_flag &= ~FCF_RR_INPROG;
|
||||
/* Allow action to new fcf asynchronous event */
|
||||
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2865 No FCF available, stop roundrobin FCF "
|
||||
"failover and change port state:x%x/x%x\n",
|
||||
phba->pport->port_state, LPFC_VPORT_UNKNOWN);
|
||||
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
|
||||
goto stop_flogi_current_fcf;
|
||||
} else {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
|
||||
"2794 Try FLOGI roundrobin FCF failover to "
|
||||
"(x%x)\n", fcf_index);
|
||||
rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
|
||||
if (rc)
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
|
||||
"2761 FLOGI roundrobin FCF failover "
|
||||
"failed (rc:x%x) to read FCF (x%x)\n",
|
||||
rc, phba->fcf.current_rec.fcf_indx);
|
||||
else
|
||||
goto stop_flogi_current_fcf;
|
||||
}
|
||||
return 0;
|
||||
|
||||
stop_flogi_current_fcf:
|
||||
lpfc_can_disctmo(vport);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@ -1756,7 +1915,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
int rc;
|
||||
|
||||
/* If there is pending FCoE event restart FCF table scan */
|
||||
if (lpfc_check_pending_fcoe_event(phba, 0)) {
|
||||
if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
|
||||
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||
return;
|
||||
}
|
||||
@ -1765,12 +1924,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
|
||||
&next_fcf_index);
|
||||
if (!new_fcf_record) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
|
||||
"2765 Mailbox command READ_FCF_RECORD "
|
||||
"failed to retrieve a FCF record.\n");
|
||||
/* Let next new FCF event trigger fast failover */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||
phba->hba_flag &= ~FCF_TS_INPROG;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||
return;
|
||||
@ -1787,13 +1946,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
/*
|
||||
* If the fcf record does not match with connect list entries
|
||||
* read the next entry; otherwise, this is an eligible FCF
|
||||
* record for round robin FCF failover.
|
||||
* record for roundrobin FCF failover.
|
||||
*/
|
||||
if (!rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
|
||||
"2781 FCF record (x%x) failed FCF "
|
||||
"connection list check, fcf_avail:x%x, "
|
||||
"fcf_valid:x%x\n",
|
||||
"2781 FCF (x%x) failed connection "
|
||||
"list check: (x%x/x%x)\n",
|
||||
bf_get(lpfc_fcf_record_fcf_index,
|
||||
new_fcf_record),
|
||||
bf_get(lpfc_fcf_record_fcf_avail,
|
||||
@ -1803,6 +1961,16 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
|
||||
lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
|
||||
new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
|
||||
if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
|
||||
phba->fcf.current_rec.fcf_indx) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
|
||||
"2862 FCF (x%x) matches property "
|
||||
"of in-use FCF (x%x)\n",
|
||||
bf_get(lpfc_fcf_record_fcf_index,
|
||||
new_fcf_record),
|
||||
phba->fcf.current_rec.fcf_indx);
|
||||
goto read_next_fcf;
|
||||
}
|
||||
/*
|
||||
* In case the current in-use FCF record becomes
|
||||
* invalid/unavailable during FCF discovery that
|
||||
@ -1813,9 +1981,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
|
||||
"2835 Invalid in-use FCF "
|
||||
"record (x%x) reported, "
|
||||
"entering fast FCF failover "
|
||||
"mode scanning.\n",
|
||||
"(x%x), enter FCF failover "
|
||||
"table scan.\n",
|
||||
phba->fcf.current_rec.fcf_indx);
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag |= FCF_REDISC_FOV;
|
||||
@ -1844,22 +2011,29 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
if (phba->fcf.fcf_flag & FCF_IN_USE) {
|
||||
if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
|
||||
new_fcf_record, vlan_id)) {
|
||||
phba->fcf.fcf_flag |= FCF_AVAILABLE;
|
||||
if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
|
||||
/* Stop FCF redisc wait timer if pending */
|
||||
__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
|
||||
else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
|
||||
/* If in fast failover, mark it's completed */
|
||||
phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2836 The new FCF record (x%x) "
|
||||
"matches the in-use FCF record "
|
||||
"(x%x)\n",
|
||||
phba->fcf.current_rec.fcf_indx,
|
||||
if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
|
||||
phba->fcf.current_rec.fcf_indx) {
|
||||
phba->fcf.fcf_flag |= FCF_AVAILABLE;
|
||||
if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
|
||||
/* Stop FCF redisc wait timer */
|
||||
__lpfc_sli4_stop_fcf_redisc_wait_timer(
|
||||
phba);
|
||||
else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
|
||||
/* Fast failover, mark completed */
|
||||
phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2836 New FCF matches in-use "
|
||||
"FCF (x%x)\n",
|
||||
phba->fcf.current_rec.fcf_indx);
|
||||
goto out;
|
||||
} else
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
|
||||
"2863 New FCF (x%x) matches "
|
||||
"property of in-use FCF (x%x)\n",
|
||||
bf_get(lpfc_fcf_record_fcf_index,
|
||||
new_fcf_record));
|
||||
goto out;
|
||||
new_fcf_record),
|
||||
phba->fcf.current_rec.fcf_indx);
|
||||
}
|
||||
/*
|
||||
* Read next FCF record from HBA searching for the matching
|
||||
@ -1953,8 +2127,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
*/
|
||||
if (fcf_rec) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2840 Update current FCF record "
|
||||
"with initial FCF record (x%x)\n",
|
||||
"2840 Update initial FCF candidate "
|
||||
"with FCF (x%x)\n",
|
||||
bf_get(lpfc_fcf_record_fcf_index,
|
||||
new_fcf_record));
|
||||
__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
|
||||
@ -1984,20 +2158,28 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
*/
|
||||
if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
|
||||
"2782 No suitable FCF record "
|
||||
"found during this round of "
|
||||
"post FCF rediscovery scan: "
|
||||
"fcf_evt_tag:x%x, fcf_index: "
|
||||
"x%x\n",
|
||||
"2782 No suitable FCF found: "
|
||||
"(x%x/x%x)\n",
|
||||
phba->fcoe_eventtag_at_fcf_scan,
|
||||
bf_get(lpfc_fcf_record_fcf_index,
|
||||
new_fcf_record));
|
||||
/*
|
||||
* Let next new FCF event trigger fast
|
||||
* failover
|
||||
*/
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||
if (phba->hba_flag & HBA_DEVLOSS_TMO) {
|
||||
phba->hba_flag &= ~FCF_TS_INPROG;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
/* Unregister in-use FCF and rescan */
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
LOG_FIP,
|
||||
"2864 On devloss tmo "
|
||||
"unreg in-use FCF and "
|
||||
"rescan FCF table\n");
|
||||
lpfc_unregister_fcf_rescan(phba);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Let next new FCF event trigger fast failover
|
||||
*/
|
||||
phba->hba_flag &= ~FCF_TS_INPROG;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
return;
|
||||
}
|
||||
@ -2015,9 +2197,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
|
||||
/* Replace in-use record with the new record */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2842 Replace the current in-use "
|
||||
"FCF record (x%x) with failover FCF "
|
||||
"record (x%x)\n",
|
||||
"2842 Replace in-use FCF (x%x) "
|
||||
"with failover FCF (x%x)\n",
|
||||
phba->fcf.current_rec.fcf_indx,
|
||||
phba->fcf.failover_rec.fcf_indx);
|
||||
memcpy(&phba->fcf.current_rec,
|
||||
@ -2029,15 +2210,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
* FCF failover.
|
||||
*/
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag &=
|
||||
~(FCF_REDISC_FOV | FCF_REDISC_RRU);
|
||||
phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
/*
|
||||
* Set up the initial registered FCF index for FLOGI
|
||||
* round robin FCF failover.
|
||||
*/
|
||||
phba->fcf.fcf_rr_init_indx =
|
||||
phba->fcf.failover_rec.fcf_indx;
|
||||
/* Register to the new FCF record */
|
||||
lpfc_register_fcf(phba);
|
||||
} else {
|
||||
@ -2069,28 +2243,6 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
LPFC_FCOE_FCF_GET_FIRST);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise, initial scan or post linkdown rescan,
|
||||
* register with the best FCF record found so far
|
||||
* through the FCF scanning process.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Mark the initial FCF discovery completed and
|
||||
* the start of the first round of the roundrobin
|
||||
* FCF failover.
|
||||
*/
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag &=
|
||||
~(FCF_INIT_DISC | FCF_REDISC_RRU);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
/*
|
||||
* Set up the initial registered FCF index for FLOGI
|
||||
* round robin FCF failover
|
||||
*/
|
||||
phba->fcf.fcf_rr_init_indx =
|
||||
phba->fcf.current_rec.fcf_indx;
|
||||
/* Register to the new FCF record */
|
||||
lpfc_register_fcf(phba);
|
||||
}
|
||||
@ -2106,11 +2258,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
|
||||
* lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @mboxq: pointer to mailbox object.
|
||||
*
|
||||
* This is the callback function for FLOGI failure round robin FCF failover
|
||||
* This is the callback function for FLOGI failure roundrobin FCF failover
|
||||
* read FCF record mailbox command from the eligible FCF record bmask for
|
||||
* performing the failover. If the FCF read back is not valid/available, it
|
||||
* fails through to retrying FLOGI to the currently registered FCF again.
|
||||
@ -2125,17 +2277,18 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
{
|
||||
struct fcf_record *new_fcf_record;
|
||||
uint32_t boot_flag, addr_mode;
|
||||
uint16_t next_fcf_index;
|
||||
uint16_t next_fcf_index, fcf_index;
|
||||
uint16_t current_fcf_index;
|
||||
uint16_t vlan_id;
|
||||
int rc;
|
||||
|
||||
/* If link state is not up, stop the round robin failover process */
|
||||
/* If link state is not up, stop the roundrobin failover process */
|
||||
if (phba->link_state < LPFC_LINK_UP) {
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
|
||||
phba->hba_flag &= ~FCF_RR_INPROG;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Parse the FCF record from the non-embedded mailbox command */
|
||||
@ -2145,23 +2298,47 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
|
||||
"2766 Mailbox command READ_FCF_RECORD "
|
||||
"failed to retrieve a FCF record.\n");
|
||||
goto out;
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
/* Get the needed parameters from FCF record */
|
||||
lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
|
||||
&addr_mode, &vlan_id);
|
||||
rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
|
||||
&addr_mode, &vlan_id);
|
||||
|
||||
/* Log the FCF record information if turned on */
|
||||
lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
|
||||
next_fcf_index);
|
||||
|
||||
fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
|
||||
if (!rc) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2848 Remove ineligible FCF (x%x) from "
|
||||
"from roundrobin bmask\n", fcf_index);
|
||||
/* Clear roundrobin bmask bit for ineligible FCF */
|
||||
lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
|
||||
/* Perform next round of roundrobin FCF failover */
|
||||
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
|
||||
rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
|
||||
if (rc)
|
||||
goto out;
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
if (fcf_index == phba->fcf.current_rec.fcf_indx) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2760 Perform FLOGI roundrobin FCF failover: "
|
||||
"FCF (x%x) back to FCF (x%x)\n",
|
||||
phba->fcf.current_rec.fcf_indx, fcf_index);
|
||||
/* Wait 500 ms before retrying FLOGI to current FCF */
|
||||
msleep(500);
|
||||
lpfc_initial_flogi(phba->pport);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Upload new FCF record to the failover FCF record */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2834 Update the current FCF record (x%x) "
|
||||
"with the next FCF record (x%x)\n",
|
||||
phba->fcf.failover_rec.fcf_indx,
|
||||
bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
|
||||
"2834 Update current FCF (x%x) with new FCF (x%x)\n",
|
||||
phba->fcf.failover_rec.fcf_indx, fcf_index);
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
__lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
|
||||
new_fcf_record, addr_mode, vlan_id,
|
||||
@ -2178,14 +2355,13 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
sizeof(struct lpfc_fcf_rec));
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2783 FLOGI round robin FCF failover from FCF "
|
||||
"(x%x) to FCF (x%x).\n",
|
||||
current_fcf_index,
|
||||
bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
|
||||
"2783 Perform FLOGI roundrobin FCF failover: FCF "
|
||||
"(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
|
||||
|
||||
error_out:
|
||||
lpfc_register_fcf(phba);
|
||||
out:
|
||||
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||
lpfc_register_fcf(phba);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2194,10 +2370,10 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
* @mboxq: pointer to mailbox object.
|
||||
*
|
||||
* This is the callback function of read FCF record mailbox command for
|
||||
* updating the eligible FCF bmask for FLOGI failure round robin FCF
|
||||
* updating the eligible FCF bmask for FLOGI failure roundrobin FCF
|
||||
* failover when a new FCF event happened. If the FCF read back is
|
||||
* valid/available and it passes the connection list check, it updates
|
||||
* the bmask for the eligible FCF record for round robin failover.
|
||||
* the bmask for the eligible FCF record for roundrobin failover.
|
||||
*/
|
||||
void
|
||||
lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
@ -2639,7 +2815,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
|
||||
* and get the FCF Table.
|
||||
*/
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
if (phba->hba_flag & FCF_DISC_INPROGRESS) {
|
||||
if (phba->hba_flag & FCF_TS_INPROG) {
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
return;
|
||||
}
|
||||
@ -3906,6 +4082,11 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
|
||||
LPFC_MBOXQ_t *mbox;
|
||||
int rc;
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
lpfc_sli4_unreg_all_rpis(vport);
|
||||
return;
|
||||
}
|
||||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (mbox) {
|
||||
lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
|
||||
@ -3992,6 +4173,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
}
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
/* Cleanup REG_LOGIN completions which are not yet processed */
|
||||
list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
|
||||
if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
|
||||
(ndlp != (struct lpfc_nodelist *) mb->context2))
|
||||
continue;
|
||||
|
||||
mb->context2 = NULL;
|
||||
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
|
||||
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
|
||||
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
|
||||
@ -5170,6 +5361,8 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
|
||||
if (ndlp)
|
||||
lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
|
||||
lpfc_cleanup_pending_mbox(vports[i]);
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
lpfc_sli4_unreg_all_rpis(vports[i]);
|
||||
lpfc_mbx_unreg_vpi(vports[i]);
|
||||
shost = lpfc_shost_from_vport(vports[i]);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
|
@ -861,6 +861,47 @@ typedef struct _RPS_RSP { /* Structure is in Big Endian format */
|
||||
uint32_t crcCnt;
|
||||
} RPS_RSP;
|
||||
|
||||
struct RLS { /* Structure is in Big Endian format */
|
||||
uint32_t rls;
|
||||
#define rls_rsvd_SHIFT 24
|
||||
#define rls_rsvd_MASK 0x000000ff
|
||||
#define rls_rsvd_WORD rls
|
||||
#define rls_did_SHIFT 0
|
||||
#define rls_did_MASK 0x00ffffff
|
||||
#define rls_did_WORD rls
|
||||
};
|
||||
|
||||
struct RLS_RSP { /* Structure is in Big Endian format */
|
||||
uint32_t linkFailureCnt;
|
||||
uint32_t lossSyncCnt;
|
||||
uint32_t lossSignalCnt;
|
||||
uint32_t primSeqErrCnt;
|
||||
uint32_t invalidXmitWord;
|
||||
uint32_t crcCnt;
|
||||
};
|
||||
|
||||
struct RTV_RSP { /* Structure is in Big Endian format */
|
||||
uint32_t ratov;
|
||||
uint32_t edtov;
|
||||
uint32_t qtov;
|
||||
#define qtov_rsvd0_SHIFT 28
|
||||
#define qtov_rsvd0_MASK 0x0000000f
|
||||
#define qtov_rsvd0_WORD qtov /* reserved */
|
||||
#define qtov_edtovres_SHIFT 27
|
||||
#define qtov_edtovres_MASK 0x00000001
|
||||
#define qtov_edtovres_WORD qtov /* E_D_TOV Resolution */
|
||||
#define qtov__rsvd1_SHIFT 19
|
||||
#define qtov_rsvd1_MASK 0x0000003f
|
||||
#define qtov_rsvd1_WORD qtov /* reserved */
|
||||
#define qtov_rttov_SHIFT 18
|
||||
#define qtov_rttov_MASK 0x00000001
|
||||
#define qtov_rttov_WORD qtov /* R_T_TOV value */
|
||||
#define qtov_rsvd2_SHIFT 0
|
||||
#define qtov_rsvd2_MASK 0x0003ffff
|
||||
#define qtov_rsvd2_WORD qtov /* reserved */
|
||||
};
|
||||
|
||||
|
||||
typedef struct _RPL { /* Structure is in Big Endian format */
|
||||
uint32_t maxsize;
|
||||
uint32_t index;
|
||||
|
@ -424,79 +424,6 @@ struct lpfc_rcqe {
|
||||
#define FCOE_SOFn3 0x36
|
||||
};
|
||||
|
||||
struct lpfc_wqe_generic{
|
||||
struct ulp_bde64 bde;
|
||||
uint32_t word3;
|
||||
uint32_t word4;
|
||||
uint32_t word5;
|
||||
uint32_t word6;
|
||||
#define lpfc_wqe_gen_context_SHIFT 16
|
||||
#define lpfc_wqe_gen_context_MASK 0x0000FFFF
|
||||
#define lpfc_wqe_gen_context_WORD word6
|
||||
#define lpfc_wqe_gen_xri_SHIFT 0
|
||||
#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
|
||||
#define lpfc_wqe_gen_xri_WORD word6
|
||||
uint32_t word7;
|
||||
#define lpfc_wqe_gen_lnk_SHIFT 23
|
||||
#define lpfc_wqe_gen_lnk_MASK 0x00000001
|
||||
#define lpfc_wqe_gen_lnk_WORD word7
|
||||
#define lpfc_wqe_gen_erp_SHIFT 22
|
||||
#define lpfc_wqe_gen_erp_MASK 0x00000001
|
||||
#define lpfc_wqe_gen_erp_WORD word7
|
||||
#define lpfc_wqe_gen_pu_SHIFT 20
|
||||
#define lpfc_wqe_gen_pu_MASK 0x00000003
|
||||
#define lpfc_wqe_gen_pu_WORD word7
|
||||
#define lpfc_wqe_gen_class_SHIFT 16
|
||||
#define lpfc_wqe_gen_class_MASK 0x00000007
|
||||
#define lpfc_wqe_gen_class_WORD word7
|
||||
#define lpfc_wqe_gen_command_SHIFT 8
|
||||
#define lpfc_wqe_gen_command_MASK 0x000000FF
|
||||
#define lpfc_wqe_gen_command_WORD word7
|
||||
#define lpfc_wqe_gen_status_SHIFT 4
|
||||
#define lpfc_wqe_gen_status_MASK 0x0000000F
|
||||
#define lpfc_wqe_gen_status_WORD word7
|
||||
#define lpfc_wqe_gen_ct_SHIFT 2
|
||||
#define lpfc_wqe_gen_ct_MASK 0x00000003
|
||||
#define lpfc_wqe_gen_ct_WORD word7
|
||||
uint32_t abort_tag;
|
||||
uint32_t word9;
|
||||
#define lpfc_wqe_gen_request_tag_SHIFT 0
|
||||
#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
|
||||
#define lpfc_wqe_gen_request_tag_WORD word9
|
||||
uint32_t word10;
|
||||
#define lpfc_wqe_gen_ccp_SHIFT 24
|
||||
#define lpfc_wqe_gen_ccp_MASK 0x000000FF
|
||||
#define lpfc_wqe_gen_ccp_WORD word10
|
||||
#define lpfc_wqe_gen_ccpe_SHIFT 23
|
||||
#define lpfc_wqe_gen_ccpe_MASK 0x00000001
|
||||
#define lpfc_wqe_gen_ccpe_WORD word10
|
||||
#define lpfc_wqe_gen_pv_SHIFT 19
|
||||
#define lpfc_wqe_gen_pv_MASK 0x00000001
|
||||
#define lpfc_wqe_gen_pv_WORD word10
|
||||
#define lpfc_wqe_gen_pri_SHIFT 16
|
||||
#define lpfc_wqe_gen_pri_MASK 0x00000007
|
||||
#define lpfc_wqe_gen_pri_WORD word10
|
||||
uint32_t word11;
|
||||
#define lpfc_wqe_gen_cq_id_SHIFT 16
|
||||
#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF
|
||||
#define lpfc_wqe_gen_cq_id_WORD word11
|
||||
#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
|
||||
#define lpfc_wqe_gen_wqec_SHIFT 7
|
||||
#define lpfc_wqe_gen_wqec_MASK 0x00000001
|
||||
#define lpfc_wqe_gen_wqec_WORD word11
|
||||
#define ELS_ID_FLOGI 3
|
||||
#define ELS_ID_FDISC 2
|
||||
#define ELS_ID_LOGO 1
|
||||
#define ELS_ID_DEFAULT 0
|
||||
#define lpfc_wqe_gen_els_id_SHIFT 4
|
||||
#define lpfc_wqe_gen_els_id_MASK 0x00000003
|
||||
#define lpfc_wqe_gen_els_id_WORD word11
|
||||
#define lpfc_wqe_gen_cmd_type_SHIFT 0
|
||||
#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
|
||||
#define lpfc_wqe_gen_cmd_type_WORD word11
|
||||
uint32_t payload[4];
|
||||
};
|
||||
|
||||
struct lpfc_rqe {
|
||||
uint32_t address_hi;
|
||||
uint32_t address_lo;
|
||||
@ -2279,9 +2206,36 @@ struct wqe_common {
|
||||
#define wqe_reqtag_MASK 0x0000FFFF
|
||||
#define wqe_reqtag_WORD word9
|
||||
#define wqe_rcvoxid_SHIFT 16
|
||||
#define wqe_rcvoxid_MASK 0x0000FFFF
|
||||
#define wqe_rcvoxid_WORD word9
|
||||
#define wqe_rcvoxid_MASK 0x0000FFFF
|
||||
#define wqe_rcvoxid_WORD word9
|
||||
uint32_t word10;
|
||||
#define wqe_ebde_cnt_SHIFT 0
|
||||
#define wqe_ebde_cnt_MASK 0x00000007
|
||||
#define wqe_ebde_cnt_WORD word10
|
||||
#define wqe_lenloc_SHIFT 7
|
||||
#define wqe_lenloc_MASK 0x00000003
|
||||
#define wqe_lenloc_WORD word10
|
||||
#define LPFC_WQE_LENLOC_NONE 0
|
||||
#define LPFC_WQE_LENLOC_WORD3 1
|
||||
#define LPFC_WQE_LENLOC_WORD12 2
|
||||
#define LPFC_WQE_LENLOC_WORD4 3
|
||||
#define wqe_qosd_SHIFT 9
|
||||
#define wqe_qosd_MASK 0x00000001
|
||||
#define wqe_qosd_WORD word10
|
||||
#define wqe_xbl_SHIFT 11
|
||||
#define wqe_xbl_MASK 0x00000001
|
||||
#define wqe_xbl_WORD word10
|
||||
#define wqe_iod_SHIFT 13
|
||||
#define wqe_iod_MASK 0x00000001
|
||||
#define wqe_iod_WORD word10
|
||||
#define LPFC_WQE_IOD_WRITE 0
|
||||
#define LPFC_WQE_IOD_READ 1
|
||||
#define wqe_dbde_SHIFT 14
|
||||
#define wqe_dbde_MASK 0x00000001
|
||||
#define wqe_dbde_WORD word10
|
||||
#define wqe_wqes_SHIFT 15
|
||||
#define wqe_wqes_MASK 0x00000001
|
||||
#define wqe_wqes_WORD word10
|
||||
#define wqe_pri_SHIFT 16
|
||||
#define wqe_pri_MASK 0x00000007
|
||||
#define wqe_pri_WORD word10
|
||||
@ -2295,18 +2249,26 @@ struct wqe_common {
|
||||
#define wqe_ccpe_MASK 0x00000001
|
||||
#define wqe_ccpe_WORD word10
|
||||
#define wqe_ccp_SHIFT 24
|
||||
#define wqe_ccp_MASK 0x000000ff
|
||||
#define wqe_ccp_WORD word10
|
||||
#define wqe_ccp_MASK 0x000000ff
|
||||
#define wqe_ccp_WORD word10
|
||||
uint32_t word11;
|
||||
#define wqe_cmd_type_SHIFT 0
|
||||
#define wqe_cmd_type_MASK 0x0000000f
|
||||
#define wqe_cmd_type_WORD word11
|
||||
#define wqe_wqec_SHIFT 7
|
||||
#define wqe_wqec_MASK 0x00000001
|
||||
#define wqe_wqec_WORD word11
|
||||
#define wqe_cqid_SHIFT 16
|
||||
#define wqe_cqid_MASK 0x0000ffff
|
||||
#define wqe_cqid_WORD word11
|
||||
#define wqe_cmd_type_SHIFT 0
|
||||
#define wqe_cmd_type_MASK 0x0000000f
|
||||
#define wqe_cmd_type_WORD word11
|
||||
#define wqe_els_id_SHIFT 4
|
||||
#define wqe_els_id_MASK 0x00000003
|
||||
#define wqe_els_id_WORD word11
|
||||
#define LPFC_ELS_ID_FLOGI 3
|
||||
#define LPFC_ELS_ID_FDISC 2
|
||||
#define LPFC_ELS_ID_LOGO 1
|
||||
#define LPFC_ELS_ID_DEFAULT 0
|
||||
#define wqe_wqec_SHIFT 7
|
||||
#define wqe_wqec_MASK 0x00000001
|
||||
#define wqe_wqec_WORD word11
|
||||
#define wqe_cqid_SHIFT 16
|
||||
#define wqe_cqid_MASK 0x0000ffff
|
||||
#define wqe_cqid_WORD word11
|
||||
#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
|
||||
};
|
||||
|
||||
struct wqe_did {
|
||||
@ -2325,6 +2287,15 @@ struct wqe_did {
|
||||
#define wqe_xmit_bls_xo_WORD word5
|
||||
};
|
||||
|
||||
struct lpfc_wqe_generic{
|
||||
struct ulp_bde64 bde;
|
||||
uint32_t word3;
|
||||
uint32_t word4;
|
||||
uint32_t word5;
|
||||
struct wqe_common wqe_com;
|
||||
uint32_t payload[4];
|
||||
};
|
||||
|
||||
struct els_request64_wqe {
|
||||
struct ulp_bde64 bde;
|
||||
uint32_t payload_len;
|
||||
@ -2356,9 +2327,9 @@ struct els_request64_wqe {
|
||||
|
||||
struct xmit_els_rsp64_wqe {
|
||||
struct ulp_bde64 bde;
|
||||
uint32_t rsvd3;
|
||||
uint32_t response_payload_len;
|
||||
uint32_t rsvd4;
|
||||
struct wqe_did wqe_dest;
|
||||
struct wqe_did wqe_dest;
|
||||
struct wqe_common wqe_com; /* words 6-11 */
|
||||
uint32_t rsvd_12_15[4];
|
||||
};
|
||||
@ -2427,7 +2398,7 @@ struct wqe_rctl_dfctl {
|
||||
|
||||
struct xmit_seq64_wqe {
|
||||
struct ulp_bde64 bde;
|
||||
uint32_t paylaod_offset;
|
||||
uint32_t rsvd3;
|
||||
uint32_t relative_offset;
|
||||
struct wqe_rctl_dfctl wge_ctl;
|
||||
struct wqe_common wqe_com; /* words 6-11 */
|
||||
@ -2437,7 +2408,7 @@ struct xmit_seq64_wqe {
|
||||
};
|
||||
struct xmit_bcast64_wqe {
|
||||
struct ulp_bde64 bde;
|
||||
uint32_t paylaod_len;
|
||||
uint32_t seq_payload_len;
|
||||
uint32_t rsvd4;
|
||||
struct wqe_rctl_dfctl wge_ctl; /* word 5 */
|
||||
struct wqe_common wqe_com; /* words 6-11 */
|
||||
@ -2446,8 +2417,8 @@ struct xmit_bcast64_wqe {
|
||||
|
||||
struct gen_req64_wqe {
|
||||
struct ulp_bde64 bde;
|
||||
uint32_t command_len;
|
||||
uint32_t payload_len;
|
||||
uint32_t request_payload_len;
|
||||
uint32_t relative_offset;
|
||||
struct wqe_rctl_dfctl wge_ctl; /* word 5 */
|
||||
struct wqe_common wqe_com; /* words 6-11 */
|
||||
uint32_t rsvd_12_15[4];
|
||||
@ -2480,7 +2451,7 @@ struct abort_cmd_wqe {
|
||||
|
||||
struct fcp_iwrite64_wqe {
|
||||
struct ulp_bde64 bde;
|
||||
uint32_t payload_len;
|
||||
uint32_t payload_offset_len;
|
||||
uint32_t total_xfer_len;
|
||||
uint32_t initial_xfer_len;
|
||||
struct wqe_common wqe_com; /* words 6-11 */
|
||||
@ -2489,7 +2460,7 @@ struct fcp_iwrite64_wqe {
|
||||
|
||||
struct fcp_iread64_wqe {
|
||||
struct ulp_bde64 bde;
|
||||
uint32_t payload_len; /* word 3 */
|
||||
uint32_t payload_offset_len; /* word 3 */
|
||||
uint32_t total_xfer_len; /* word 4 */
|
||||
uint32_t rsrvd5; /* word 5 */
|
||||
struct wqe_common wqe_com; /* words 6-11 */
|
||||
@ -2497,10 +2468,12 @@ struct fcp_iread64_wqe {
|
||||
};
|
||||
|
||||
struct fcp_icmnd64_wqe {
|
||||
struct ulp_bde64 bde; /* words 0-2 */
|
||||
uint32_t rsrvd[3]; /* words 3-5 */
|
||||
struct ulp_bde64 bde; /* words 0-2 */
|
||||
uint32_t rsrvd3; /* word 3 */
|
||||
uint32_t rsrvd4; /* word 4 */
|
||||
uint32_t rsrvd5; /* word 5 */
|
||||
struct wqe_common wqe_com; /* words 6-11 */
|
||||
uint32_t rsvd_12_15[4]; /* word 12-15 */
|
||||
uint32_t rsvd_12_15[4]; /* word 12-15 */
|
||||
};
|
||||
|
||||
|
||||
|
@ -813,6 +813,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
|
||||
* @phba: pointer to lpfc HBA data structure.
|
||||
@ -2234,10 +2235,9 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
|
||||
void
|
||||
__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
|
||||
{
|
||||
/* Clear pending FCF rediscovery wait and failover in progress flags */
|
||||
phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
|
||||
FCF_DEAD_DISC |
|
||||
FCF_ACVL_DISC);
|
||||
/* Clear pending FCF rediscovery wait flag */
|
||||
phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
|
||||
|
||||
/* Now, try to stop the timer */
|
||||
del_timer(&phba->fcf.redisc_wait);
|
||||
}
|
||||
@ -2261,6 +2261,8 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
|
||||
return;
|
||||
}
|
||||
__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
|
||||
/* Clear failover in progress flags */
|
||||
phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
}
|
||||
|
||||
@ -2935,8 +2937,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
|
||||
phba->fcf.fcf_flag |= FCF_REDISC_EVT;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2776 FCF rediscover wait timer expired, post "
|
||||
"a worker thread event for FCF table scan\n");
|
||||
"2776 FCF rediscover quiescent timer expired\n");
|
||||
/* wake up worker thread */
|
||||
lpfc_worker_wake_up(phba);
|
||||
}
|
||||
@ -3311,35 +3312,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
|
||||
if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
|
||||
LOG_DISCOVERY,
|
||||
"2546 New FCF found event: "
|
||||
"evt_tag:x%x, fcf_index:x%x\n",
|
||||
"2546 New FCF event, evt_tag:x%x, "
|
||||
"index:x%x\n",
|
||||
acqe_fcoe->event_tag,
|
||||
acqe_fcoe->index);
|
||||
else
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
|
||||
LOG_DISCOVERY,
|
||||
"2788 FCF parameter modified event: "
|
||||
"evt_tag:x%x, fcf_index:x%x\n",
|
||||
"2788 FCF param modified event, "
|
||||
"evt_tag:x%x, index:x%x\n",
|
||||
acqe_fcoe->event_tag,
|
||||
acqe_fcoe->index);
|
||||
if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
|
||||
/*
|
||||
* During period of FCF discovery, read the FCF
|
||||
* table record indexed by the event to update
|
||||
* FCF round robin failover eligible FCF bmask.
|
||||
* FCF roundrobin failover eligible FCF bmask.
|
||||
*/
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
|
||||
LOG_DISCOVERY,
|
||||
"2779 Read new FCF record with "
|
||||
"fcf_index:x%x for updating FCF "
|
||||
"round robin failover bmask\n",
|
||||
"2779 Read FCF (x%x) for updating "
|
||||
"roundrobin FCF failover bmask\n",
|
||||
acqe_fcoe->index);
|
||||
rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
|
||||
}
|
||||
|
||||
/* If the FCF discovery is in progress, do nothing. */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
if (phba->hba_flag & FCF_DISC_INPROGRESS) {
|
||||
if (phba->hba_flag & FCF_TS_INPROG) {
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
break;
|
||||
}
|
||||
@ -3358,15 +3358,15 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
|
||||
|
||||
/* Otherwise, scan the entire FCF table and re-discover SAN */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
|
||||
"2770 Start FCF table scan due to new FCF "
|
||||
"event: evt_tag:x%x, fcf_index:x%x\n",
|
||||
"2770 Start FCF table scan per async FCF "
|
||||
"event, evt_tag:x%x, index:x%x\n",
|
||||
acqe_fcoe->event_tag, acqe_fcoe->index);
|
||||
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
|
||||
LPFC_FCOE_FCF_GET_FIRST);
|
||||
if (rc)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
|
||||
"2547 Issue FCF scan read FCF mailbox "
|
||||
"command failed 0x%x\n", rc);
|
||||
"command failed (x%x)\n", rc);
|
||||
break;
|
||||
|
||||
case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
|
||||
@ -3378,9 +3378,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
|
||||
|
||||
case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
|
||||
"2549 FCF disconnected from network index 0x%x"
|
||||
" tag 0x%x\n", acqe_fcoe->index,
|
||||
acqe_fcoe->event_tag);
|
||||
"2549 FCF (x%x) disconnected from network, "
|
||||
"tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
|
||||
/*
|
||||
* If we are in the middle of FCF failover process, clear
|
||||
* the corresponding FCF bit in the roundrobin bitmap.
|
||||
@ -3494,9 +3493,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
|
||||
LOG_DISCOVERY,
|
||||
"2773 Start FCF fast failover due "
|
||||
"to CVL event: evt_tag:x%x\n",
|
||||
acqe_fcoe->event_tag);
|
||||
"2773 Start FCF failover per CVL, "
|
||||
"evt_tag:x%x\n", acqe_fcoe->event_tag);
|
||||
rc = lpfc_sli4_redisc_fcf_table(phba);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
|
||||
@ -3646,8 +3644,7 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
|
||||
|
||||
/* Scan FCF table from the first entry to re-discover SAN */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
|
||||
"2777 Start FCF table scan after FCF "
|
||||
"rediscovery quiescent period over\n");
|
||||
"2777 Start post-quiescent FCF table scan\n");
|
||||
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
|
||||
if (rc)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
|
||||
@ -4165,7 +4162,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
goto out_free_active_sgl;
|
||||
}
|
||||
|
||||
/* Allocate eligible FCF bmask memory for FCF round robin failover */
|
||||
/* Allocate eligible FCF bmask memory for FCF roundrobin failover */
|
||||
longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
|
||||
phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
|
||||
GFP_KERNEL);
|
||||
@ -7270,6 +7267,51 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba)
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
|
||||
* @phba: Pointer to HBA context object.
|
||||
*
|
||||
* This function is called in the SLI4 code path to wait for completion
|
||||
* of device's XRIs exchange busy. It will check the XRI exchange busy
|
||||
* on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
|
||||
* that, it will check the XRI exchange busy on outstanding FCP and ELS
|
||||
* I/Os every 30 seconds, log error message, and wait forever. Only when
|
||||
* all XRI exchange busy complete, the driver unload shall proceed with
|
||||
* invoking the function reset ioctl mailbox command to the CNA and the
|
||||
* the rest of the driver unload resource release.
|
||||
**/
|
||||
static void
|
||||
lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
{
|
||||
int wait_time = 0;
|
||||
int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
|
||||
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
||||
|
||||
while (!fcp_xri_cmpl || !els_xri_cmpl) {
|
||||
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
|
||||
if (!fcp_xri_cmpl)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2877 FCP XRI exchange busy "
|
||||
"wait time: %d seconds.\n",
|
||||
wait_time/1000);
|
||||
if (!els_xri_cmpl)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2878 ELS XRI exchange busy "
|
||||
"wait time: %d seconds.\n",
|
||||
wait_time/1000);
|
||||
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
|
||||
wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
|
||||
} else {
|
||||
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
|
||||
wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
|
||||
}
|
||||
fcp_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
|
||||
els_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_hba_unset - Unset the fcoe hba
|
||||
* @phba: Pointer to HBA context object.
|
||||
@ -7315,6 +7357,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
}
|
||||
|
||||
/* Abort all iocbs associated with the hba */
|
||||
lpfc_sli_hba_iocb_abort(phba);
|
||||
|
||||
/* Wait for completion of device XRI exchange busy */
|
||||
lpfc_sli4_xri_exchange_busy_wait(phba);
|
||||
|
||||
/* Disable PCI subsystem interrupt */
|
||||
lpfc_sli4_disable_intr(phba);
|
||||
|
||||
|
@ -796,6 +796,34 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA.
|
||||
* @vport: pointer to a vport object.
|
||||
*
|
||||
* This routine sends mailbox command to unregister all active RPIs for
|
||||
* a vport.
|
||||
**/
|
||||
void
|
||||
lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
LPFC_MBOXQ_t *mbox;
|
||||
int rc;
|
||||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (mbox) {
|
||||
lpfc_unreg_login(phba, vport->vpi,
|
||||
vport->vpi + phba->vpi_base, mbox);
|
||||
mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
|
||||
mbox->vport = vport;
|
||||
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
mbox->context1 = NULL;
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED)
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
|
@ -169,6 +169,7 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
if (!vport->stat_data_enabled ||
|
||||
vport->stat_data_blocked ||
|
||||
!pnode ||
|
||||
!pnode->lat_data ||
|
||||
(phba->bucket_type == LPFC_NO_BUCKET)) {
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
@ -2040,6 +2041,9 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
|
||||
struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
|
||||
unsigned long flags;
|
||||
|
||||
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
|
||||
return;
|
||||
|
||||
/* If there is queuefull or busy condition send a scsi event */
|
||||
if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
|
||||
(cmnd->result == SAM_STAT_BUSY)) {
|
||||
@ -3226,10 +3230,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
|
||||
struct lpfc_scsi_buf *lpfc_cmd;
|
||||
struct lpfc_iocbq *iocbq;
|
||||
struct lpfc_iocbq *iocbqrsp;
|
||||
struct lpfc_nodelist *pnode = rdata->pnode;
|
||||
int ret;
|
||||
int status;
|
||||
|
||||
if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
|
||||
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
|
||||
return FAILED;
|
||||
|
||||
lpfc_cmd = lpfc_get_scsi_buf(phba);
|
||||
@ -3256,7 +3261,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
|
||||
"0702 Issue %s to TGT %d LUN %d "
|
||||
"rpi x%x nlp_flag x%x\n",
|
||||
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
|
||||
rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
|
||||
pnode->nlp_rpi, pnode->nlp_flag);
|
||||
|
||||
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
|
||||
iocbq, iocbqrsp, lpfc_cmd->timeout);
|
||||
|
@ -95,7 +95,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
|
||||
return -ENOMEM;
|
||||
/* set consumption flag every once in a while */
|
||||
if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
|
||||
bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
|
||||
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
|
||||
|
||||
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
|
||||
|
||||
@ -1735,6 +1735,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
struct lpfc_vport *vport = pmb->vport;
|
||||
struct lpfc_dmabuf *mp;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct Scsi_Host *shost;
|
||||
uint16_t rpi, vpi;
|
||||
int rc;
|
||||
|
||||
@ -1746,7 +1747,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
}
|
||||
|
||||
if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
|
||||
(phba->sli_rev == LPFC_SLI_REV4))
|
||||
(phba->sli_rev == LPFC_SLI_REV4) &&
|
||||
(pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
|
||||
lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
|
||||
|
||||
/*
|
||||
@ -1765,16 +1767,14 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unreg VPI, if the REG_VPI succeed after VLink failure */
|
||||
if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
|
||||
!(phba->pport->load_flag & FC_UNLOADING) &&
|
||||
!pmb->u.mb.mbxStatus) {
|
||||
lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
|
||||
pmb->vport = vport;
|
||||
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
|
||||
if (rc != MBX_NOT_FINISHED)
|
||||
return;
|
||||
shost = lpfc_shost_from_vport(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->vpi_state |= LPFC_VPI_REGISTERED;
|
||||
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
}
|
||||
|
||||
if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
|
||||
@ -5921,7 +5921,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
|
||||
* lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
|
||||
* @phba: Pointer to HBA context object.
|
||||
*
|
||||
* This routine performs a round robin SCSI command to SLI4 FCP WQ index
|
||||
* This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
|
||||
* distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
|
||||
* held.
|
||||
*
|
||||
@ -5965,7 +5965,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
uint16_t abrt_iotag;
|
||||
struct lpfc_iocbq *abrtiocbq;
|
||||
struct ulp_bde64 *bpl = NULL;
|
||||
uint32_t els_id = ELS_ID_DEFAULT;
|
||||
uint32_t els_id = LPFC_ELS_ID_DEFAULT;
|
||||
int numBdes, i;
|
||||
struct ulp_bde64 bde;
|
||||
|
||||
@ -5982,7 +5982,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
|
||||
abort_tag = (uint32_t) iocbq->iotag;
|
||||
xritag = iocbq->sli4_xritag;
|
||||
wqe->words[7] = 0; /* The ct field has moved so reset */
|
||||
wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
|
||||
/* words0-2 bpl convert bde */
|
||||
if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
|
||||
numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
|
||||
@ -6033,109 +6033,117 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
* contains the FCFI and remote N_Port_ID is
|
||||
* in word 5.
|
||||
*/
|
||||
|
||||
ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
|
||||
bf_set(lpfc_wqe_gen_context, &wqe->generic,
|
||||
iocbq->iocb.ulpContext);
|
||||
|
||||
bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
|
||||
bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
|
||||
bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
|
||||
iocbq->iocb.ulpContext);
|
||||
bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
|
||||
bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
|
||||
/* CCP CCPE PV PRI in word10 were set in the memcpy */
|
||||
|
||||
if (command_type == ELS_COMMAND_FIP) {
|
||||
els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
|
||||
>> LPFC_FIP_ELS_ID_SHIFT);
|
||||
}
|
||||
bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
|
||||
|
||||
bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
|
||||
bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
|
||||
bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
|
||||
bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
|
||||
bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
|
||||
bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
|
||||
break;
|
||||
case CMD_XMIT_SEQUENCE64_CX:
|
||||
bf_set(lpfc_wqe_gen_context, &wqe->generic,
|
||||
iocbq->iocb.un.ulpWord[3]);
|
||||
wqe->generic.word3 = 0;
|
||||
bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
|
||||
bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
|
||||
iocbq->iocb.un.ulpWord[3]);
|
||||
bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
|
||||
iocbq->iocb.ulpContext);
|
||||
/* The entire sequence is transmitted for this IOCB */
|
||||
xmit_len = total_len;
|
||||
cmnd = CMD_XMIT_SEQUENCE64_CR;
|
||||
case CMD_XMIT_SEQUENCE64_CR:
|
||||
/* word3 iocb=io_tag32 wqe=payload_offset */
|
||||
/* payload offset used for multilpe outstanding
|
||||
* sequences on the same exchange
|
||||
*/
|
||||
wqe->words[3] = 0;
|
||||
/* word3 iocb=io_tag32 wqe=reserved */
|
||||
wqe->xmit_sequence.rsvd3 = 0;
|
||||
/* word4 relative_offset memcpy */
|
||||
/* word5 r_ctl/df_ctl memcpy */
|
||||
bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
|
||||
bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
|
||||
bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
|
||||
bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
|
||||
LPFC_WQE_IOD_WRITE);
|
||||
bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
|
||||
LPFC_WQE_LENLOC_WORD12);
|
||||
bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
|
||||
wqe->xmit_sequence.xmit_len = xmit_len;
|
||||
command_type = OTHER_COMMAND;
|
||||
break;
|
||||
case CMD_XMIT_BCAST64_CN:
|
||||
/* word3 iocb=iotag32 wqe=payload_len */
|
||||
wqe->words[3] = 0; /* no definition for this in wqe */
|
||||
/* word3 iocb=iotag32 wqe=seq_payload_len */
|
||||
wqe->xmit_bcast64.seq_payload_len = xmit_len;
|
||||
/* word4 iocb=rsvd wqe=rsvd */
|
||||
/* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
|
||||
/* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
|
||||
bf_set(lpfc_wqe_gen_ct, &wqe->generic,
|
||||
bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
|
||||
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
|
||||
bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
|
||||
bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
|
||||
bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
|
||||
LPFC_WQE_LENLOC_WORD3);
|
||||
bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
|
||||
break;
|
||||
case CMD_FCP_IWRITE64_CR:
|
||||
command_type = FCP_COMMAND_DATA_OUT;
|
||||
/* The struct for wqe fcp_iwrite has 3 fields that are somewhat
|
||||
* confusing.
|
||||
* word3 is payload_len: byte offset to the sgl entry for the
|
||||
* fcp_command.
|
||||
* word4 is total xfer len, same as the IOCB->ulpParameter.
|
||||
* word5 is initial xfer len 0 = wait for xfer-ready
|
||||
*/
|
||||
|
||||
/* Always wait for xfer-ready before sending data */
|
||||
wqe->fcp_iwrite.initial_xfer_len = 0;
|
||||
/* word 4 (xfer length) should have been set on the memcpy */
|
||||
|
||||
/* allow write to fall through to read */
|
||||
case CMD_FCP_IREAD64_CR:
|
||||
/* FCP_CMD is always the 1st sgl entry */
|
||||
wqe->fcp_iread.payload_len =
|
||||
/* word3 iocb=iotag wqe=payload_offset_len */
|
||||
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
|
||||
wqe->fcp_iwrite.payload_offset_len =
|
||||
xmit_len + sizeof(struct fcp_rsp);
|
||||
|
||||
/* word 4 (xfer length) should have been set on the memcpy */
|
||||
|
||||
bf_set(lpfc_wqe_gen_erp, &wqe->generic,
|
||||
iocbq->iocb.ulpFCP2Rcvy);
|
||||
bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
|
||||
/* The XC bit and the XS bit are similar. The driver never
|
||||
* tracked whether or not the exchange was previouslly open.
|
||||
* XC = Exchange create, 0 is create. 1 is already open.
|
||||
* XS = link cmd: 1 do not close the exchange after command.
|
||||
* XS = 0 close exchange when command completes.
|
||||
* The only time we would not set the XC bit is when the XS bit
|
||||
* is set and we are sending our 2nd or greater command on
|
||||
* this exchange.
|
||||
*/
|
||||
/* word4 iocb=parameter wqe=total_xfer_length memcpy */
|
||||
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
|
||||
bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
|
||||
iocbq->iocb.ulpFCP2Rcvy);
|
||||
bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
|
||||
/* Always open the exchange */
|
||||
bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
|
||||
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
|
||||
bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
|
||||
bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
|
||||
LPFC_WQE_LENLOC_WORD4);
|
||||
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
|
||||
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
|
||||
break;
|
||||
case CMD_FCP_IREAD64_CR:
|
||||
/* word3 iocb=iotag wqe=payload_offset_len */
|
||||
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
|
||||
wqe->fcp_iread.payload_offset_len =
|
||||
xmit_len + sizeof(struct fcp_rsp);
|
||||
/* word4 iocb=parameter wqe=total_xfer_length memcpy */
|
||||
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
|
||||
bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
|
||||
iocbq->iocb.ulpFCP2Rcvy);
|
||||
bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
|
||||
/* Always open the exchange */
|
||||
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
|
||||
|
||||
wqe->words[10] &= 0xffff0000; /* zero out ebde count */
|
||||
bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
|
||||
break;
|
||||
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
|
||||
bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
|
||||
bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
|
||||
LPFC_WQE_LENLOC_WORD4);
|
||||
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
|
||||
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
|
||||
break;
|
||||
case CMD_FCP_ICMND64_CR:
|
||||
/* word3 iocb=IO_TAG wqe=reserved */
|
||||
wqe->fcp_icmd.rsrvd3 = 0;
|
||||
bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
|
||||
/* Always open the exchange */
|
||||
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
|
||||
|
||||
wqe->words[4] = 0;
|
||||
wqe->words[10] &= 0xffff0000; /* zero out ebde count */
|
||||
bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
|
||||
bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
|
||||
bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
|
||||
bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
|
||||
bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
|
||||
bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
|
||||
LPFC_WQE_LENLOC_NONE);
|
||||
bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
|
||||
break;
|
||||
case CMD_GEN_REQUEST64_CR:
|
||||
/* word3 command length is described as byte offset to the
|
||||
* rsp_data. Would always be 16, sizeof(struct sli4_sge)
|
||||
* sgl[0] = cmnd
|
||||
* sgl[1] = rsp.
|
||||
*
|
||||
*/
|
||||
wqe->gen_req.command_len = xmit_len;
|
||||
/* Word4 parameter copied in the memcpy */
|
||||
/* Word5 [rctl, type, df_ctl, la] copied in memcpy */
|
||||
/* word3 iocb=IO_TAG wqe=request_payload_len */
|
||||
wqe->gen_req.request_payload_len = xmit_len;
|
||||
/* word4 iocb=parameter wqe=relative_offset memcpy */
|
||||
/* word5 [rctl, type, df_ctl, la] copied in memcpy */
|
||||
/* word6 context tag copied in memcpy */
|
||||
if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
|
||||
ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
|
||||
@ -6144,31 +6152,39 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
ct, iocbq->iocb.ulpCommand);
|
||||
return IOCB_ERROR;
|
||||
}
|
||||
bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
|
||||
bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
|
||||
iocbq->iocb.ulpTimeout);
|
||||
|
||||
bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
|
||||
bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
|
||||
bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
|
||||
bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
|
||||
bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
|
||||
bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
|
||||
bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
|
||||
bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
|
||||
bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
|
||||
command_type = OTHER_COMMAND;
|
||||
break;
|
||||
case CMD_XMIT_ELS_RSP64_CX:
|
||||
/* words0-2 BDE memcpy */
|
||||
/* word3 iocb=iotag32 wqe=rsvd */
|
||||
wqe->words[3] = 0;
|
||||
/* word3 iocb=iotag32 wqe=response_payload_len */
|
||||
wqe->xmit_els_rsp.response_payload_len = xmit_len;
|
||||
/* word4 iocb=did wge=rsvd. */
|
||||
wqe->words[4] = 0;
|
||||
wqe->xmit_els_rsp.rsvd4 = 0;
|
||||
/* word5 iocb=rsvd wge=did */
|
||||
bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
|
||||
iocbq->iocb.un.elsreq64.remoteID);
|
||||
|
||||
bf_set(lpfc_wqe_gen_ct, &wqe->generic,
|
||||
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
|
||||
|
||||
bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
|
||||
bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
|
||||
bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
|
||||
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
|
||||
bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
|
||||
bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
|
||||
iocbq->iocb.ulpContext);
|
||||
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
|
||||
bf_set(lpfc_wqe_gen_context, &wqe->generic,
|
||||
bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
|
||||
iocbq->vport->vpi + phba->vpi_base);
|
||||
bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
|
||||
bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
|
||||
bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
|
||||
bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
|
||||
LPFC_WQE_LENLOC_WORD3);
|
||||
bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
|
||||
command_type = OTHER_COMMAND;
|
||||
break;
|
||||
case CMD_CLOSE_XRI_CN:
|
||||
@ -6193,15 +6209,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
else
|
||||
bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
|
||||
bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
|
||||
wqe->words[5] = 0;
|
||||
bf_set(lpfc_wqe_gen_ct, &wqe->generic,
|
||||
/* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
|
||||
wqe->abort_cmd.rsrvd5 = 0;
|
||||
bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
|
||||
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
|
||||
abort_tag = iocbq->iocb.un.acxri.abortIoTag;
|
||||
/*
|
||||
* The abort handler will send us CMD_ABORT_XRI_CN or
|
||||
* CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
|
||||
*/
|
||||
bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
|
||||
bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
|
||||
bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
|
||||
bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
|
||||
LPFC_WQE_LENLOC_NONE);
|
||||
cmnd = CMD_ABORT_XRI_CX;
|
||||
command_type = OTHER_COMMAND;
|
||||
xritag = 0;
|
||||
@ -6235,18 +6255,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
|
||||
bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
|
||||
iocbq->iocb.ulpContext);
|
||||
bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
|
||||
bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
|
||||
LPFC_WQE_LENLOC_NONE);
|
||||
/* Overwrite the pre-set comnd type with OTHER_COMMAND */
|
||||
command_type = OTHER_COMMAND;
|
||||
break;
|
||||
case CMD_XRI_ABORTED_CX:
|
||||
case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
|
||||
/* words0-2 are all 0's no bde */
|
||||
/* word3 and word4 are rsvrd */
|
||||
wqe->words[3] = 0;
|
||||
wqe->words[4] = 0;
|
||||
/* word5 iocb=rsvd wge=did */
|
||||
/* There is no remote port id in the IOCB? */
|
||||
/* Let this fall through and fail */
|
||||
case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
|
||||
case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
|
||||
case CMD_FCP_TRSP64_CX: /* Target mode rcv */
|
||||
@ -6257,16 +6273,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
iocbq->iocb.ulpCommand);
|
||||
return IOCB_ERROR;
|
||||
break;
|
||||
|
||||
}
|
||||
bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
|
||||
bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
|
||||
wqe->generic.abort_tag = abort_tag;
|
||||
bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
|
||||
bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
|
||||
bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
|
||||
bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
|
||||
|
||||
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
|
||||
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
|
||||
wqe->generic.wqe_com.abort_tag = abort_tag;
|
||||
bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
|
||||
bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
|
||||
bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
|
||||
bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -7257,25 +7271,26 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_issue_abort_iotag - Abort function for a command iocb
|
||||
* lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @pring: Pointer to driver SLI ring object.
|
||||
* @cmdiocb: Pointer to driver command iocb object.
|
||||
*
|
||||
* This function issues an abort iocb for the provided command
|
||||
* iocb. This function is called with hbalock held.
|
||||
* The function returns 0 when it fails due to memory allocation
|
||||
* failure or when the command iocb is an abort request.
|
||||
* This function issues an abort iocb for the provided command iocb down to
|
||||
* the port. Other than the case the outstanding command iocb is an abort
|
||||
* request, this function issues abort out unconditionally. This function is
|
||||
* called with hbalock held. The function returns 0 when it fails due to
|
||||
* memory allocation failure or when the command iocb is an abort request.
|
||||
**/
|
||||
int
|
||||
lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
static int
|
||||
lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
struct lpfc_iocbq *cmdiocb)
|
||||
{
|
||||
struct lpfc_vport *vport = cmdiocb->vport;
|
||||
struct lpfc_iocbq *abtsiocbp;
|
||||
IOCB_t *icmd = NULL;
|
||||
IOCB_t *iabt = NULL;
|
||||
int retval = IOCB_ERROR;
|
||||
int retval;
|
||||
|
||||
/*
|
||||
* There are certain command types we don't want to abort. And we
|
||||
@ -7288,18 +7303,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
|
||||
return 0;
|
||||
|
||||
/* If we're unloading, don't abort iocb on the ELS ring, but change the
|
||||
* callback so that nothing happens when it finishes.
|
||||
*/
|
||||
if ((vport->load_flag & FC_UNLOADING) &&
|
||||
(pring->ringno == LPFC_ELS_RING)) {
|
||||
if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
|
||||
cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
|
||||
else
|
||||
cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
|
||||
goto abort_iotag_exit;
|
||||
}
|
||||
|
||||
/* issue ABTS for this IOCB based on iotag */
|
||||
abtsiocbp = __lpfc_sli_get_iocbq(phba);
|
||||
if (abtsiocbp == NULL)
|
||||
@ -7344,6 +7347,63 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
|
||||
if (retval)
|
||||
__lpfc_sli_release_iocbq(phba, abtsiocbp);
|
||||
|
||||
/*
|
||||
* Caller to this routine should check for IOCB_ERROR
|
||||
* and handle it properly. This routine no longer removes
|
||||
* iocb off txcmplq and call compl in case of IOCB_ERROR.
|
||||
*/
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_issue_abort_iotag - Abort function for a command iocb
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @pring: Pointer to driver SLI ring object.
|
||||
* @cmdiocb: Pointer to driver command iocb object.
|
||||
*
|
||||
* This function issues an abort iocb for the provided command iocb. In case
|
||||
* of unloading, the abort iocb will not be issued to commands on the ELS
|
||||
* ring. Instead, the callback function shall be changed to those commands
|
||||
* so that nothing happens when them finishes. This function is called with
|
||||
* hbalock held. The function returns 0 when the command iocb is an abort
|
||||
* request.
|
||||
**/
|
||||
int
|
||||
lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
struct lpfc_iocbq *cmdiocb)
|
||||
{
|
||||
struct lpfc_vport *vport = cmdiocb->vport;
|
||||
int retval = IOCB_ERROR;
|
||||
IOCB_t *icmd = NULL;
|
||||
|
||||
/*
|
||||
* There are certain command types we don't want to abort. And we
|
||||
* don't want to abort commands that are already in the process of
|
||||
* being aborted.
|
||||
*/
|
||||
icmd = &cmdiocb->iocb;
|
||||
if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
|
||||
icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
|
||||
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If we're unloading, don't abort iocb on the ELS ring, but change
|
||||
* the callback so that nothing happens when it finishes.
|
||||
*/
|
||||
if ((vport->load_flag & FC_UNLOADING) &&
|
||||
(pring->ringno == LPFC_ELS_RING)) {
|
||||
if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
|
||||
cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
|
||||
else
|
||||
cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
|
||||
goto abort_iotag_exit;
|
||||
}
|
||||
|
||||
/* Now, we try to issue the abort to the cmdiocb out */
|
||||
retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
|
||||
|
||||
abort_iotag_exit:
|
||||
/*
|
||||
* Caller to this routine should check for IOCB_ERROR
|
||||
@ -7353,6 +7413,62 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @pring: Pointer to driver SLI ring object.
|
||||
*
|
||||
* This function aborts all iocbs in the given ring and frees all the iocb
|
||||
* objects in txq. This function issues abort iocbs unconditionally for all
|
||||
* the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
|
||||
* to complete before the return of this function. The caller is not required
|
||||
* to hold any locks.
|
||||
**/
|
||||
static void
|
||||
lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
|
||||
{
|
||||
LIST_HEAD(completions);
|
||||
struct lpfc_iocbq *iocb, *next_iocb;
|
||||
|
||||
if (pring->ringno == LPFC_ELS_RING)
|
||||
lpfc_fabric_abort_hba(phba);
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
|
||||
/* Take off all the iocbs on txq for cancelling */
|
||||
list_splice_init(&pring->txq, &completions);
|
||||
pring->txq_cnt = 0;
|
||||
|
||||
/* Next issue ABTS for everything on the txcmplq */
|
||||
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
|
||||
lpfc_sli_abort_iotag_issue(phba, pring, iocb);
|
||||
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
/* Cancel all the IOCBs from the completions list */
|
||||
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
|
||||
IOERR_SLI_ABORTED);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
|
||||
* @phba: pointer to lpfc HBA data structure.
|
||||
*
|
||||
* This routine will abort all pending and outstanding iocbs to an HBA.
|
||||
**/
|
||||
void
|
||||
lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_sli *psli = &phba->sli;
|
||||
struct lpfc_sli_ring *pring;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < psli->num_rings; i++) {
|
||||
pring = &psli->ring[i];
|
||||
lpfc_sli_iocb_ring_abort(phba, pring);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
|
||||
* @iocbq: Pointer to driver iocb object.
|
||||
@ -12242,13 +12358,15 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
/* Issue the mailbox command asynchronously */
|
||||
mboxq->vport = phba->pport;
|
||||
mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->hba_flag |= FCF_TS_INPROG;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED)
|
||||
error = -EIO;
|
||||
else {
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->hba_flag |= FCF_DISC_INPROGRESS;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
/* Reset eligible FCF count for new scan */
|
||||
if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
|
||||
phba->fcf.eligible_fcf_cnt = 0;
|
||||
@ -12258,21 +12376,21 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
if (error) {
|
||||
if (mboxq)
|
||||
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||
/* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
|
||||
/* FCF scan failed, clear FCF_TS_INPROG flag */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
|
||||
phba->hba_flag &= ~FCF_TS_INPROG;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
|
||||
* lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @fcf_index: FCF table entry offset.
|
||||
*
|
||||
* This routine is invoked to read an FCF record indicated by @fcf_index
|
||||
* and to use it for FLOGI round robin FCF failover.
|
||||
* and to use it for FLOGI roundrobin FCF failover.
|
||||
*
|
||||
* Return 0 if the mailbox command is submitted sucessfully, none 0
|
||||
* otherwise.
|
||||
@ -12318,7 +12436,7 @@ lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
* @fcf_index: FCF table entry offset.
|
||||
*
|
||||
* This routine is invoked to read an FCF record indicated by @fcf_index to
|
||||
* determine whether it's eligible for FLOGI round robin failover list.
|
||||
* determine whether it's eligible for FLOGI roundrobin failover list.
|
||||
*
|
||||
* Return 0 if the mailbox command is submitted sucessfully, none 0
|
||||
* otherwise.
|
||||
@ -12364,7 +12482,7 @@ lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
*
|
||||
* This routine is to get the next eligible FCF record index in a round
|
||||
* robin fashion. If the next eligible FCF record index equals to the
|
||||
* initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
|
||||
* initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
|
||||
* shall be returned, otherwise, the next eligible FCF record's index
|
||||
* shall be returned.
|
||||
**/
|
||||
@ -12392,28 +12510,10 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
|
||||
return LPFC_FCOE_FCF_NEXT_NONE;
|
||||
}
|
||||
|
||||
/* Check roundrobin failover index bmask stop condition */
|
||||
if (next_fcf_index == phba->fcf.fcf_rr_init_indx) {
|
||||
if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
|
||||
"2847 Round robin failover FCF index "
|
||||
"search hit stop condition:x%x\n",
|
||||
next_fcf_index);
|
||||
return LPFC_FCOE_FCF_NEXT_NONE;
|
||||
}
|
||||
/* The roundrobin failover index bmask updated, start over */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2848 Round robin failover FCF index bmask "
|
||||
"updated, start over\n");
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag &= ~FCF_REDISC_RRU;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
return phba->fcf.fcf_rr_init_indx;
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2845 Get next round robin failover "
|
||||
"FCF index x%x\n", next_fcf_index);
|
||||
"2845 Get next roundrobin failover FCF (x%x)\n",
|
||||
next_fcf_index);
|
||||
|
||||
return next_fcf_index;
|
||||
}
|
||||
|
||||
@ -12422,7 +12522,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine sets the FCF record index in to the eligible bmask for
|
||||
* round robin failover search. It checks to make sure that the index
|
||||
* roundrobin failover search. It checks to make sure that the index
|
||||
* does not go beyond the range of the driver allocated bmask dimension
|
||||
* before setting the bit.
|
||||
*
|
||||
@ -12434,22 +12534,16 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
{
|
||||
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
|
||||
"2610 HBA FCF index reached driver's "
|
||||
"book keeping dimension: fcf_index:%d, "
|
||||
"driver_bmask_max:%d\n",
|
||||
"2610 FCF (x%x) reached driver's book "
|
||||
"keeping dimension:x%x\n",
|
||||
fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Set the eligible FCF record index bmask */
|
||||
set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
|
||||
|
||||
/* Set the roundrobin index bmask updated */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->fcf.fcf_flag |= FCF_REDISC_RRU;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2790 Set FCF index x%x to round robin failover "
|
||||
"2790 Set FCF (x%x) to roundrobin FCF failover "
|
||||
"bmask\n", fcf_index);
|
||||
|
||||
return 0;
|
||||
@ -12460,7 +12554,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine clears the FCF record index from the eligible bmask for
|
||||
* round robin failover search. It checks to make sure that the index
|
||||
* roundrobin failover search. It checks to make sure that the index
|
||||
* does not go beyond the range of the driver allocated bmask dimension
|
||||
* before clearing the bit.
|
||||
**/
|
||||
@ -12469,9 +12563,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
{
|
||||
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
|
||||
"2762 HBA FCF index goes beyond driver's "
|
||||
"book keeping dimension: fcf_index:%d, "
|
||||
"driver_bmask_max:%d\n",
|
||||
"2762 FCF (x%x) reached driver's book "
|
||||
"keeping dimension:x%x\n",
|
||||
fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
|
||||
return;
|
||||
}
|
||||
@ -12479,7 +12572,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2791 Clear FCF index x%x from round robin failover "
|
||||
"2791 Clear FCF (x%x) from roundrobin failover "
|
||||
"bmask\n", fcf_index);
|
||||
}
|
||||
|
||||
@ -12530,8 +12623,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
|
||||
}
|
||||
} else {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
|
||||
"2775 Start FCF rediscovery quiescent period "
|
||||
"wait timer before scaning FCF table\n");
|
||||
"2775 Start FCF rediscover quiescent timer\n");
|
||||
/*
|
||||
* Start FCF rediscovery wait timer for pending FCF
|
||||
* before rescan FCF record table.
|
||||
|
@ -19,10 +19,16 @@
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
|
||||
#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
|
||||
#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
|
||||
#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
|
||||
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
|
||||
#define LPFC_GET_QE_REL_INT 32
|
||||
#define LPFC_RPI_LOW_WATER_MARK 10
|
||||
|
||||
#define LPFC_UNREG_FCF 1
|
||||
#define LPFC_SKIP_UNREG_FCF 0
|
||||
|
||||
/* Amount of time in seconds for waiting FCF rediscovery to complete */
|
||||
#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
|
||||
|
||||
@ -163,9 +169,8 @@ struct lpfc_fcf {
|
||||
#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
|
||||
#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
|
||||
#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
|
||||
#define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */
|
||||
#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
|
||||
uint32_t addr_mode;
|
||||
uint16_t fcf_rr_init_indx;
|
||||
uint32_t eligible_fcf_cnt;
|
||||
struct lpfc_fcf_rec current_rec;
|
||||
struct lpfc_fcf_rec failover_rec;
|
||||
|
@ -18,7 +18,7 @@
|
||||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "8.3.17"
|
||||
#define LPFC_DRIVER_VERSION "8.3.18"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
|
||||
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
|
||||
|
@ -10,7 +10,7 @@
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FILE : megaraid_sas.c
|
||||
* Version : v00.00.04.17.1-rc1
|
||||
* Version : v00.00.04.31-rc1
|
||||
*
|
||||
* Authors:
|
||||
* (email-id : megaraidlinux@lsi.com)
|
||||
@ -56,6 +56,15 @@ module_param_named(poll_mode_io, poll_mode_io, int, 0);
|
||||
MODULE_PARM_DESC(poll_mode_io,
|
||||
"Complete cmds from IO path, (default=0)");
|
||||
|
||||
/*
|
||||
* Number of sectors per IO command
|
||||
* Will be set in megasas_init_mfi if user does not provide
|
||||
*/
|
||||
static unsigned int max_sectors;
|
||||
module_param_named(max_sectors, max_sectors, int, 0);
|
||||
MODULE_PARM_DESC(max_sectors,
|
||||
"Maximum number of sectors per IO command");
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(MEGASAS_VERSION);
|
||||
MODULE_AUTHOR("megaraidlinux@lsi.com");
|
||||
@ -103,6 +112,7 @@ static int megasas_poll_wait_aen;
|
||||
static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
|
||||
static u32 support_poll_for_event;
|
||||
static u32 megasas_dbg_lvl;
|
||||
static u32 support_device_change;
|
||||
|
||||
/* define lock for aen poll */
|
||||
spinlock_t poll_aen_lock;
|
||||
@ -718,6 +728,10 @@ static int
|
||||
megasas_check_reset_gen2(struct megasas_instance *instance,
|
||||
struct megasas_register_set __iomem *regs)
|
||||
{
|
||||
if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -930,6 +944,7 @@ megasas_make_sgl_skinny(struct megasas_instance *instance,
|
||||
mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
|
||||
mfi_sgl->sge_skinny[i].phys_addr =
|
||||
sg_dma_address(os_sgl);
|
||||
mfi_sgl->sge_skinny[i].flag = 0;
|
||||
}
|
||||
}
|
||||
return sge_count;
|
||||
@ -1557,6 +1572,28 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
|
||||
|
||||
static void
|
||||
process_fw_state_change_wq(struct work_struct *work);
|
||||
|
||||
void megasas_do_ocr(struct megasas_instance *instance)
|
||||
{
|
||||
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
|
||||
(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
|
||||
(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
|
||||
*instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN;
|
||||
}
|
||||
instance->instancet->disable_intr(instance->reg_set);
|
||||
instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
|
||||
instance->issuepend_done = 0;
|
||||
|
||||
atomic_set(&instance->fw_outstanding, 0);
|
||||
megasas_internal_reset_defer_cmds(instance);
|
||||
process_fw_state_change_wq(&instance->work_init);
|
||||
}
|
||||
|
||||
/**
|
||||
* megasas_wait_for_outstanding - Wait for all outstanding cmds
|
||||
* @instance: Adapter soft state
|
||||
@ -1574,6 +1611,8 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
|
||||
unsigned long flags;
|
||||
struct list_head clist_local;
|
||||
struct megasas_cmd *reset_cmd;
|
||||
u32 fw_state;
|
||||
u8 kill_adapter_flag;
|
||||
|
||||
spin_lock_irqsave(&instance->hba_lock, flags);
|
||||
adprecovery = instance->adprecovery;
|
||||
@ -1659,7 +1698,45 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
|
||||
msleep(1000);
|
||||
}
|
||||
|
||||
if (atomic_read(&instance->fw_outstanding)) {
|
||||
i = 0;
|
||||
kill_adapter_flag = 0;
|
||||
do {
|
||||
fw_state = instance->instancet->read_fw_status_reg(
|
||||
instance->reg_set) & MFI_STATE_MASK;
|
||||
if ((fw_state == MFI_STATE_FAULT) &&
|
||||
(instance->disableOnlineCtrlReset == 0)) {
|
||||
if (i == 3) {
|
||||
kill_adapter_flag = 2;
|
||||
break;
|
||||
}
|
||||
megasas_do_ocr(instance);
|
||||
kill_adapter_flag = 1;
|
||||
|
||||
/* wait for 1 secs to let FW finish the pending cmds */
|
||||
msleep(1000);
|
||||
}
|
||||
i++;
|
||||
} while (i <= 3);
|
||||
|
||||
if (atomic_read(&instance->fw_outstanding) &&
|
||||
!kill_adapter_flag) {
|
||||
if (instance->disableOnlineCtrlReset == 0) {
|
||||
|
||||
megasas_do_ocr(instance);
|
||||
|
||||
/* wait for 5 secs to let FW finish the pending cmds */
|
||||
for (i = 0; i < wait_time; i++) {
|
||||
int outstanding =
|
||||
atomic_read(&instance->fw_outstanding);
|
||||
if (!outstanding)
|
||||
return SUCCESS;
|
||||
msleep(1000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (atomic_read(&instance->fw_outstanding) ||
|
||||
(kill_adapter_flag == 2)) {
|
||||
printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
|
||||
/*
|
||||
* Send signal to FW to stop processing any pending cmds.
|
||||
@ -2669,6 +2746,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(cmd->frame, 0, total_sz);
|
||||
cmd->frame->io.context = cmd->index;
|
||||
cmd->frame->io.pad_0 = 0;
|
||||
}
|
||||
@ -3585,6 +3663,27 @@ static int megasas_io_attach(struct megasas_instance *instance)
|
||||
instance->max_fw_cmds - MEGASAS_INT_CMDS;
|
||||
host->this_id = instance->init_id;
|
||||
host->sg_tablesize = instance->max_num_sge;
|
||||
/*
|
||||
* Check if the module parameter value for max_sectors can be used
|
||||
*/
|
||||
if (max_sectors && max_sectors < instance->max_sectors_per_req)
|
||||
instance->max_sectors_per_req = max_sectors;
|
||||
else {
|
||||
if (max_sectors) {
|
||||
if (((instance->pdev->device ==
|
||||
PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
|
||||
(instance->pdev->device ==
|
||||
PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
|
||||
(max_sectors <= MEGASAS_MAX_SECTORS)) {
|
||||
instance->max_sectors_per_req = max_sectors;
|
||||
} else {
|
||||
printk(KERN_INFO "megasas: max_sectors should be > 0"
|
||||
"and <= %d (or < 1MB for GEN2 controller)\n",
|
||||
instance->max_sectors_per_req);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
host->max_sectors = instance->max_sectors_per_req;
|
||||
host->cmd_per_lun = 128;
|
||||
host->max_channel = MEGASAS_MAX_CHANNELS - 1;
|
||||
@ -4658,6 +4757,15 @@ megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
|
||||
static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
|
||||
megasas_sysfs_show_support_poll_for_event, NULL);
|
||||
|
||||
static ssize_t
|
||||
megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", support_device_change);
|
||||
}
|
||||
|
||||
static DRIVER_ATTR(support_device_change, S_IRUGO,
|
||||
megasas_sysfs_show_support_device_change, NULL);
|
||||
|
||||
static ssize_t
|
||||
megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
|
||||
{
|
||||
@ -4978,6 +5086,7 @@ static int __init megasas_init(void)
|
||||
MEGASAS_EXT_VERSION);
|
||||
|
||||
support_poll_for_event = 2;
|
||||
support_device_change = 1;
|
||||
|
||||
memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
|
||||
|
||||
@ -5026,8 +5135,17 @@ static int __init megasas_init(void)
|
||||
if (rval)
|
||||
goto err_dcf_poll_mode_io;
|
||||
|
||||
rval = driver_create_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_support_device_change);
|
||||
if (rval)
|
||||
goto err_dcf_support_device_change;
|
||||
|
||||
return rval;
|
||||
|
||||
err_dcf_support_device_change:
|
||||
driver_remove_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_poll_mode_io);
|
||||
|
||||
err_dcf_poll_mode_io:
|
||||
driver_remove_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_dbg_lvl);
|
||||
@ -5057,6 +5175,10 @@ static void __exit megasas_exit(void)
|
||||
&driver_attr_poll_mode_io);
|
||||
driver_remove_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_dbg_lvl);
|
||||
driver_remove_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_support_poll_for_event);
|
||||
driver_remove_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_support_device_change);
|
||||
driver_remove_file(&megasas_pci_driver.driver,
|
||||
&driver_attr_release_date);
|
||||
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
|
||||
|
@ -18,9 +18,9 @@
|
||||
/*
|
||||
* MegaRAID SAS Driver meta data
|
||||
*/
|
||||
#define MEGASAS_VERSION "00.00.04.17.1-rc1"
|
||||
#define MEGASAS_RELDATE "Oct. 29, 2009"
|
||||
#define MEGASAS_EXT_VERSION "Thu. Oct. 29, 11:41:51 PST 2009"
|
||||
#define MEGASAS_VERSION "00.00.04.31-rc1"
|
||||
#define MEGASAS_RELDATE "May 3, 2010"
|
||||
#define MEGASAS_EXT_VERSION "Mon. May 3, 11:41:51 PST 2010"
|
||||
|
||||
/*
|
||||
* Device IDs
|
||||
@ -706,6 +706,7 @@ struct megasas_ctrl_info {
|
||||
#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
|
||||
MEGASAS_MAX_DEV_PER_CHANNEL)
|
||||
|
||||
#define MEGASAS_MAX_SECTORS (2*1024)
|
||||
#define MEGASAS_DBG_LVL 1
|
||||
|
||||
#define MEGASAS_FW_BUSY 1
|
||||
|
@ -452,10 +452,6 @@ void osd_end_request(struct osd_request *or)
|
||||
{
|
||||
struct request *rq = or->request;
|
||||
|
||||
_osd_free_seg(or, &or->set_attr);
|
||||
_osd_free_seg(or, &or->enc_get_attr);
|
||||
_osd_free_seg(or, &or->get_attr);
|
||||
|
||||
if (rq) {
|
||||
if (rq->next_rq) {
|
||||
_put_request(rq->next_rq);
|
||||
@ -464,6 +460,12 @@ void osd_end_request(struct osd_request *or)
|
||||
|
||||
_put_request(rq);
|
||||
}
|
||||
|
||||
_osd_free_seg(or, &or->get_attr);
|
||||
_osd_free_seg(or, &or->enc_get_attr);
|
||||
_osd_free_seg(or, &or->set_attr);
|
||||
_osd_free_seg(or, &or->cdb_cont);
|
||||
|
||||
_osd_request_free(or);
|
||||
}
|
||||
EXPORT_SYMBOL(osd_end_request);
|
||||
@ -547,6 +549,12 @@ static int _osd_realloc_seg(struct osd_request *or,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
|
||||
{
|
||||
OSD_DEBUG("total_bytes=%d\n", total_bytes);
|
||||
return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
|
||||
}
|
||||
|
||||
static int _alloc_set_attr_list(struct osd_request *or,
|
||||
const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
|
||||
{
|
||||
@ -885,6 +893,199 @@ int osd_req_read_kern(struct osd_request *or,
|
||||
}
|
||||
EXPORT_SYMBOL(osd_req_read_kern);
|
||||
|
||||
static int _add_sg_continuation_descriptor(struct osd_request *or,
|
||||
const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
|
||||
{
|
||||
struct osd_sg_continuation_descriptor *oscd;
|
||||
u32 oscd_size;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
|
||||
|
||||
if (!or->cdb_cont.total_bytes) {
|
||||
/* First time, jump over the header, we will write to:
|
||||
* cdb_cont.buff + cdb_cont.total_bytes
|
||||
*/
|
||||
or->cdb_cont.total_bytes =
|
||||
sizeof(struct osd_continuation_segment_header);
|
||||
}
|
||||
|
||||
ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
|
||||
oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
|
||||
oscd->hdr.pad_length = 0;
|
||||
oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
|
||||
|
||||
*len = 0;
|
||||
/* copy the sg entries and convert to network byte order */
|
||||
for (i = 0; i < numentries; i++) {
|
||||
oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
|
||||
oscd->entries[i].len = cpu_to_be64(sglist[i].len);
|
||||
*len += sglist[i].len;
|
||||
}
|
||||
|
||||
or->cdb_cont.total_bytes += oscd_size;
|
||||
OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
|
||||
or->cdb_cont.total_bytes, oscd_size, numentries);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
|
||||
{
|
||||
struct request_queue *req_q = osd_request_queue(or->osd_dev);
|
||||
struct bio *bio;
|
||||
struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
|
||||
struct osd_continuation_segment_header *cont_seg_hdr;
|
||||
|
||||
if (!or->cdb_cont.total_bytes)
|
||||
return 0;
|
||||
|
||||
cont_seg_hdr = or->cdb_cont.buff;
|
||||
cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
|
||||
cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
|
||||
|
||||
/* create a bio for continuation segment */
|
||||
bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(!bio))
|
||||
return -ENOMEM;
|
||||
|
||||
bio->bi_rw |= REQ_WRITE;
|
||||
|
||||
/* integrity check the continuation before the bio is linked
|
||||
* with the other data segments since the continuation
|
||||
* integrity is separate from the other data segments.
|
||||
*/
|
||||
osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
|
||||
|
||||
cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
|
||||
|
||||
/* we can't use _req_append_segment, because we need to link in the
|
||||
* continuation bio to the head of the bio list - the
|
||||
* continuation segment (if it exists) is always the first segment in
|
||||
* the out data buffer.
|
||||
*/
|
||||
bio->bi_next = or->out.bio;
|
||||
or->out.bio = bio;
|
||||
or->out.total_bytes += or->cdb_cont.total_bytes;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
|
||||
* @sglist that has the scatter gather entries. Scatter-gather enables a write
|
||||
* of multiple none-contiguous areas of an object, in a single call. The extents
|
||||
* may overlap and/or be in any order. The only constrain is that:
|
||||
* total_bytes(sglist) >= total_bytes(bio)
|
||||
*/
|
||||
int osd_req_write_sg(struct osd_request *or,
|
||||
const struct osd_obj_id *obj, struct bio *bio,
|
||||
const struct osd_sg_entry *sglist, unsigned numentries)
|
||||
{
|
||||
u64 len;
|
||||
int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
osd_req_write(or, obj, 0, bio, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(osd_req_write_sg);
|
||||
|
||||
/* osd_req_read_sg: Read multiple extents of an object into @bio
|
||||
* See osd_req_write_sg
|
||||
*/
|
||||
int osd_req_read_sg(struct osd_request *or,
|
||||
const struct osd_obj_id *obj, struct bio *bio,
|
||||
const struct osd_sg_entry *sglist, unsigned numentries)
|
||||
{
|
||||
u64 len;
|
||||
int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
osd_req_read(or, obj, 0, bio, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(osd_req_read_sg);
|
||||
|
||||
/* SG-list write/read Kern API
|
||||
*
|
||||
* osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
|
||||
* of sg_entries. @numentries indicates how many pointers and sg_entries there
|
||||
* are. By requiring an array of buff pointers. This allows a caller to do a
|
||||
* single write/read and scatter into multiple buffers.
|
||||
* NOTE: Each buffer + len should not cross a page boundary.
|
||||
*/
|
||||
static struct bio *_create_sg_bios(struct osd_request *or,
|
||||
void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
|
||||
{
|
||||
struct request_queue *q = osd_request_queue(or->osd_dev);
|
||||
struct bio *bio;
|
||||
unsigned i;
|
||||
|
||||
bio = bio_kmalloc(GFP_KERNEL, numentries);
|
||||
if (unlikely(!bio)) {
|
||||
OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for (i = 0; i < numentries; i++) {
|
||||
unsigned offset = offset_in_page(buff[i]);
|
||||
struct page *page = virt_to_page(buff[i]);
|
||||
unsigned len = sglist[i].len;
|
||||
unsigned added_len;
|
||||
|
||||
BUG_ON(offset + len > PAGE_SIZE);
|
||||
added_len = bio_add_pc_page(q, bio, page, len, offset);
|
||||
if (unlikely(len != added_len)) {
|
||||
OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
|
||||
len, added_len);
|
||||
bio_put(bio);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
return bio;
|
||||
}
|
||||
|
||||
int osd_req_write_sg_kern(struct osd_request *or,
|
||||
const struct osd_obj_id *obj, void **buff,
|
||||
const struct osd_sg_entry *sglist, unsigned numentries)
|
||||
{
|
||||
struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
|
||||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
|
||||
bio->bi_rw |= REQ_WRITE;
|
||||
osd_req_write_sg(or, obj, bio, sglist, numentries);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(osd_req_write_sg_kern);
|
||||
|
||||
int osd_req_read_sg_kern(struct osd_request *or,
|
||||
const struct osd_obj_id *obj, void **buff,
|
||||
const struct osd_sg_entry *sglist, unsigned numentries)
|
||||
{
|
||||
struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
|
||||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
|
||||
osd_req_read_sg(or, obj, bio, sglist, numentries);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(osd_req_read_sg_kern);
|
||||
|
||||
|
||||
|
||||
void osd_req_get_attributes(struct osd_request *or,
|
||||
const struct osd_obj_id *obj)
|
||||
{
|
||||
@ -1218,17 +1419,18 @@ int osd_req_add_get_attr_page(struct osd_request *or,
|
||||
or->get_attr.buff = attar_page;
|
||||
or->get_attr.total_bytes = max_page_len;
|
||||
|
||||
or->set_attr.buff = set_one_attr->val_ptr;
|
||||
or->set_attr.total_bytes = set_one_attr->len;
|
||||
|
||||
cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
|
||||
cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
|
||||
/* ocdb->attrs_page.get_attr_offset; */
|
||||
|
||||
if (!set_one_attr || !set_one_attr->attr_page)
|
||||
return 0; /* The set is optional */
|
||||
|
||||
or->set_attr.buff = set_one_attr->val_ptr;
|
||||
or->set_attr.total_bytes = set_one_attr->len;
|
||||
|
||||
cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
|
||||
cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
|
||||
cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
|
||||
/* ocdb->attrs_page.set_attr_offset; */
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(osd_req_add_get_attr_page);
|
||||
@ -1248,11 +1450,14 @@ static int _osd_req_finalize_attr_page(struct osd_request *or)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (or->set_attr.total_bytes == 0)
|
||||
return 0;
|
||||
|
||||
/* set one value */
|
||||
cdbh->attrs_page.set_attr_offset =
|
||||
osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
|
||||
|
||||
ret = _req_append_segment(or, out_padding, &or->enc_get_attr, NULL,
|
||||
ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
|
||||
&or->out);
|
||||
return ret;
|
||||
}
|
||||
@ -1276,7 +1481,8 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1,
|
||||
}
|
||||
|
||||
static int _osd_req_finalize_data_integrity(struct osd_request *or,
|
||||
bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
|
||||
bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
|
||||
const u8 *cap_key)
|
||||
{
|
||||
struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
|
||||
int ret;
|
||||
@ -1307,7 +1513,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
|
||||
or->out.last_seg = NULL;
|
||||
|
||||
/* they are now all chained to request sign them all together */
|
||||
osd_sec_sign_data(&or->out_data_integ, or->out.req->bio,
|
||||
osd_sec_sign_data(&or->out_data_integ, out_data_bio,
|
||||
cap_key);
|
||||
}
|
||||
|
||||
@ -1403,6 +1609,8 @@ int osd_finalize_request(struct osd_request *or,
|
||||
{
|
||||
struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
|
||||
bool has_in, has_out;
|
||||
/* Save for data_integrity without the cdb_continuation */
|
||||
struct bio *out_data_bio = or->out.bio;
|
||||
u64 out_data_bytes = or->out.total_bytes;
|
||||
int ret;
|
||||
|
||||
@ -1418,9 +1626,14 @@ int osd_finalize_request(struct osd_request *or,
|
||||
osd_set_caps(&or->cdb, cap);
|
||||
|
||||
has_in = or->in.bio || or->get_attr.total_bytes;
|
||||
has_out = or->out.bio || or->set_attr.total_bytes ||
|
||||
or->enc_get_attr.total_bytes;
|
||||
has_out = or->out.bio || or->cdb_cont.total_bytes ||
|
||||
or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
|
||||
|
||||
ret = _osd_req_finalize_cdb_cont(or, cap_key);
|
||||
if (ret) {
|
||||
OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
|
||||
return ret;
|
||||
}
|
||||
ret = _init_blk_request(or, has_in, has_out);
|
||||
if (ret) {
|
||||
OSD_DEBUG("_init_blk_request failed\n");
|
||||
@ -1458,7 +1671,8 @@ int osd_finalize_request(struct osd_request *or,
|
||||
}
|
||||
|
||||
ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
|
||||
out_data_bytes, cap_key);
|
||||
out_data_bio, out_data_bytes,
|
||||
cap_key);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1594,10 +1594,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
|
||||
cfg_entry = &ccn_hcam->cfg_entry;
|
||||
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
|
||||
|
||||
pmcraid_info
|
||||
("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n",
|
||||
pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
|
||||
res: %x:%x:%x:%x\n",
|
||||
pinstance->ccn.hcam->ilid,
|
||||
pinstance->ccn.hcam->op_code,
|
||||
((pinstance->ccn.hcam->timestamp1) |
|
||||
((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)),
|
||||
pinstance->ccn.hcam->notification_type,
|
||||
pinstance->ccn.hcam->notification_lost,
|
||||
pinstance->ccn.hcam->flags,
|
||||
@ -1850,6 +1852,7 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
|
||||
* none
|
||||
*/
|
||||
static void pmcraid_initiate_reset(struct pmcraid_instance *);
|
||||
static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
|
||||
|
||||
static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
|
||||
{
|
||||
@ -1881,6 +1884,10 @@ static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
|
||||
lock_flags);
|
||||
return;
|
||||
}
|
||||
if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) {
|
||||
pinstance->timestamp_error = 1;
|
||||
pmcraid_set_timestamp(cmd);
|
||||
}
|
||||
} else {
|
||||
dev_info(&pinstance->pdev->dev,
|
||||
"Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
|
||||
@ -3363,7 +3370,7 @@ static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
|
||||
sg_size = buflen;
|
||||
|
||||
for (i = 0; i < num_elem; i++) {
|
||||
page = alloc_pages(GFP_KERNEL|GFP_DMA, order);
|
||||
page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order);
|
||||
if (!page) {
|
||||
for (j = i - 1; j >= 0; j--)
|
||||
__free_pages(sg_page(&scatterlist[j]), order);
|
||||
@ -3739,6 +3746,7 @@ static long pmcraid_ioctl_passthrough(
|
||||
unsigned long request_buffer;
|
||||
unsigned long request_offset;
|
||||
unsigned long lock_flags;
|
||||
void *ioasa;
|
||||
u32 ioasc;
|
||||
int request_size;
|
||||
int buffer_size;
|
||||
@ -3780,6 +3788,11 @@ static long pmcraid_ioctl_passthrough(
|
||||
rc = __copy_from_user(buffer,
|
||||
(struct pmcraid_passthrough_ioctl_buffer *) arg,
|
||||
sizeof(struct pmcraid_passthrough_ioctl_buffer));
|
||||
|
||||
ioasa =
|
||||
(void *)(arg +
|
||||
offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
|
||||
|
||||
if (rc) {
|
||||
pmcraid_err("ioctl: can't copy passthrough buffer\n");
|
||||
rc = -EFAULT;
|
||||
@ -3947,22 +3960,14 @@ static long pmcraid_ioctl_passthrough(
|
||||
}
|
||||
|
||||
out_handle_response:
|
||||
/* If the command failed for any reason, copy entire IOASA buffer and
|
||||
* return IOCTL success. If copying IOASA to user-buffer fails, return
|
||||
/* copy entire IOASA buffer and return IOCTL success.
|
||||
* If copying IOASA to user-buffer fails, return
|
||||
* EFAULT
|
||||
*/
|
||||
if (PMCRAID_IOASC_SENSE_KEY(le32_to_cpu(cmd->ioa_cb->ioasa.ioasc))) {
|
||||
void *ioasa =
|
||||
(void *)(arg +
|
||||
offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
|
||||
|
||||
pmcraid_info("command failed with %x\n",
|
||||
le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
|
||||
if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
|
||||
sizeof(struct pmcraid_ioasa))) {
|
||||
pmcraid_err("failed to copy ioasa buffer to user\n");
|
||||
rc = -EFAULT;
|
||||
}
|
||||
if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
|
||||
sizeof(struct pmcraid_ioasa))) {
|
||||
pmcraid_err("failed to copy ioasa buffer to user\n");
|
||||
rc = -EFAULT;
|
||||
}
|
||||
|
||||
/* If the data transfer was from device, copy the data onto user
|
||||
@ -5147,6 +5152,16 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
|
||||
pinstance->inq_data = NULL;
|
||||
pinstance->inq_data_baddr = 0;
|
||||
}
|
||||
|
||||
if (pinstance->timestamp_data != NULL) {
|
||||
pci_free_consistent(pinstance->pdev,
|
||||
sizeof(struct pmcraid_timestamp_data),
|
||||
pinstance->timestamp_data,
|
||||
pinstance->timestamp_data_baddr);
|
||||
|
||||
pinstance->timestamp_data = NULL;
|
||||
pinstance->timestamp_data_baddr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -5205,6 +5220,20 @@ static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* allocate DMAable memory for set timestamp data buffer */
|
||||
pinstance->timestamp_data = pci_alloc_consistent(
|
||||
pinstance->pdev,
|
||||
sizeof(struct pmcraid_timestamp_data),
|
||||
&pinstance->timestamp_data_baddr);
|
||||
|
||||
if (pinstance->timestamp_data == NULL) {
|
||||
pmcraid_err("couldn't allocate DMA memory for \
|
||||
set time_stamp \n");
|
||||
pmcraid_release_buffers(pinstance);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
||||
/* Initialize all the command blocks and add them to free pool. No
|
||||
* need to lock (free_pool_lock) as this is done in initialization
|
||||
* itself
|
||||
@ -5609,6 +5638,68 @@ static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* pmcraid_set_timestamp - set the timestamp to IOAFP
|
||||
*
|
||||
* @cmd: pointer to pmcraid_cmd structure
|
||||
*
|
||||
* Return Value
|
||||
* 0 for success or non-zero for failure cases
|
||||
*/
|
||||
static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
|
||||
{
|
||||
struct pmcraid_instance *pinstance = cmd->drv_inst;
|
||||
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
|
||||
__be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
|
||||
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
|
||||
|
||||
struct timeval tv;
|
||||
__le64 timestamp;
|
||||
|
||||
do_gettimeofday(&tv);
|
||||
timestamp = tv.tv_sec * 1000;
|
||||
|
||||
pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp);
|
||||
pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8);
|
||||
pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16);
|
||||
pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24);
|
||||
pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32);
|
||||
pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp) >> 40);
|
||||
|
||||
pmcraid_reinit_cmdblk(cmd);
|
||||
ioarcb->request_type = REQ_TYPE_SCSI;
|
||||
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
|
||||
ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP;
|
||||
ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION;
|
||||
memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len));
|
||||
|
||||
ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
|
||||
offsetof(struct pmcraid_ioarcb,
|
||||
add_data.u.ioadl[0]));
|
||||
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
|
||||
ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
|
||||
|
||||
ioarcb->request_flags0 |= NO_LINK_DESCS;
|
||||
ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
|
||||
ioarcb->data_transfer_length =
|
||||
cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
|
||||
ioadl = &(ioarcb->add_data.u.ioadl[0]);
|
||||
ioadl->flags = IOADL_FLAGS_LAST_DESC;
|
||||
ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr);
|
||||
ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
|
||||
|
||||
if (!pinstance->timestamp_error) {
|
||||
pinstance->timestamp_error = 0;
|
||||
pmcraid_send_cmd(cmd, pmcraid_set_supported_devs,
|
||||
PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
|
||||
} else {
|
||||
pmcraid_send_cmd(cmd, pmcraid_return_cmd,
|
||||
PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* pmcraid_init_res_table - Initialize the resource table
|
||||
* @cmd: pointer to pmcraid command struct
|
||||
@ -5720,7 +5811,7 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
|
||||
|
||||
/* release the resource list lock */
|
||||
spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
|
||||
pmcraid_set_supported_devs(cmd);
|
||||
pmcraid_set_timestamp(cmd);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -6054,10 +6145,10 @@ static int __init pmcraid_init(void)
|
||||
static void __exit pmcraid_exit(void)
|
||||
{
|
||||
pmcraid_netlink_release();
|
||||
class_destroy(pmcraid_class);
|
||||
unregister_chrdev_region(MKDEV(pmcraid_major, 0),
|
||||
PMCRAID_MAX_ADAPTERS);
|
||||
pci_unregister_driver(&pmcraid_driver);
|
||||
class_destroy(pmcraid_class);
|
||||
}
|
||||
|
||||
module_init(pmcraid_init);
|
||||
|
@ -42,7 +42,7 @@
|
||||
*/
|
||||
#define PMCRAID_DRIVER_NAME "PMC MaxRAID"
|
||||
#define PMCRAID_DEVFILE "pmcsas"
|
||||
#define PMCRAID_DRIVER_VERSION "2.0.2"
|
||||
#define PMCRAID_DRIVER_VERSION "2.0.3"
|
||||
#define PMCRAID_DRIVER_DATE __DATE__
|
||||
|
||||
#define PMCRAID_FW_VERSION_1 0x002
|
||||
@ -184,6 +184,7 @@
|
||||
#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000
|
||||
#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000
|
||||
#define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000
|
||||
#define PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC 0x06908B00
|
||||
#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000
|
||||
|
||||
/* Driver defined IOASCs */
|
||||
@ -561,6 +562,17 @@ struct pmcraid_inquiry_data {
|
||||
__u8 reserved3[16];
|
||||
};
|
||||
|
||||
#define PMCRAID_TIMESTAMP_LEN 12
|
||||
#define PMCRAID_REQ_TM_STR_LEN 6
|
||||
#define PMCRAID_SCSI_SET_TIMESTAMP 0xA4
|
||||
#define PMCRAID_SCSI_SERVICE_ACTION 0x0F
|
||||
|
||||
struct pmcraid_timestamp_data {
|
||||
__u8 reserved1[4];
|
||||
__u8 timestamp[PMCRAID_REQ_TM_STR_LEN]; /* current time value */
|
||||
__u8 reserved2[2];
|
||||
};
|
||||
|
||||
/* pmcraid_cmd - LLD representation of SCSI command */
|
||||
struct pmcraid_cmd {
|
||||
|
||||
@ -568,7 +580,6 @@ struct pmcraid_cmd {
|
||||
struct pmcraid_control_block *ioa_cb;
|
||||
dma_addr_t ioa_cb_bus_addr;
|
||||
dma_addr_t dma_handle;
|
||||
u8 *sense_buffer;
|
||||
|
||||
/* pointer to mid layer structure of SCSI commands */
|
||||
struct scsi_cmnd *scsi_cmd;
|
||||
@ -705,6 +716,9 @@ struct pmcraid_instance {
|
||||
struct pmcraid_inquiry_data *inq_data;
|
||||
dma_addr_t inq_data_baddr;
|
||||
|
||||
struct pmcraid_timestamp_data *timestamp_data;
|
||||
dma_addr_t timestamp_data_baddr;
|
||||
|
||||
/* size of configuration table entry, varies based on the firmware */
|
||||
u32 config_table_entry_size;
|
||||
|
||||
@ -791,6 +805,7 @@ struct pmcraid_instance {
|
||||
#define SHUTDOWN_NONE 0x0
|
||||
#define SHUTDOWN_NORMAL 0x1
|
||||
#define SHUTDOWN_ABBREV 0x2
|
||||
u32 timestamp_error:1; /* indicate set timestamp for out of sync */
|
||||
|
||||
};
|
||||
|
||||
@ -1056,10 +1071,10 @@ struct pmcraid_passthrough_ioctl_buffer {
|
||||
#define PMCRAID_PASSTHROUGH_IOCTL 'F'
|
||||
|
||||
#define DRV_IOCTL(n, size) \
|
||||
_IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
|
||||
_IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
|
||||
|
||||
#define FMW_IOCTL(n, size) \
|
||||
_IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
|
||||
_IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
|
||||
|
||||
/*
|
||||
* _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
|
||||
|
@ -1538,6 +1538,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
|
||||
if (!fcport)
|
||||
return;
|
||||
|
||||
/* Now that the rport has been deleted, set the fcport state to
|
||||
FCS_DEVICE_DEAD */
|
||||
atomic_set(&fcport->state, FCS_DEVICE_DEAD);
|
||||
|
||||
/*
|
||||
* Transport has effectively 'deleted' the rport, clear
|
||||
* all local references.
|
||||
|
@ -1306,6 +1306,125 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
|
||||
return rval;
|
||||
}
|
||||
|
||||
static int
|
||||
qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
|
||||
uint8_t is_update)
|
||||
{
|
||||
uint32_t start = 0;
|
||||
int valid = 0;
|
||||
|
||||
bsg_job->reply->reply_payload_rcv_len = 0;
|
||||
|
||||
if (unlikely(pci_channel_offline(ha->pdev)))
|
||||
return -EINVAL;
|
||||
|
||||
start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
|
||||
if (start > ha->optrom_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (ha->optrom_state != QLA_SWAITING)
|
||||
return -EBUSY;
|
||||
|
||||
ha->optrom_region_start = start;
|
||||
|
||||
if (is_update) {
|
||||
if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
|
||||
valid = 1;
|
||||
else if (start == (ha->flt_region_boot * 4) ||
|
||||
start == (ha->flt_region_fw * 4))
|
||||
valid = 1;
|
||||
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
|
||||
IS_QLA8XXX_TYPE(ha))
|
||||
valid = 1;
|
||||
if (!valid) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Invalid start region 0x%x/0x%x.\n",
|
||||
start, bsg_job->request_payload.payload_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ha->optrom_region_size = start +
|
||||
bsg_job->request_payload.payload_len > ha->optrom_size ?
|
||||
ha->optrom_size - start :
|
||||
bsg_job->request_payload.payload_len;
|
||||
ha->optrom_state = QLA_SWRITING;
|
||||
} else {
|
||||
ha->optrom_region_size = start +
|
||||
bsg_job->reply_payload.payload_len > ha->optrom_size ?
|
||||
ha->optrom_size - start :
|
||||
bsg_job->reply_payload.payload_len;
|
||||
ha->optrom_state = QLA_SREADING;
|
||||
}
|
||||
|
||||
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
|
||||
if (!ha->optrom_buffer) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Read: Unable to allocate memory for optrom retrieval "
|
||||
"(%x).\n", ha->optrom_region_size);
|
||||
|
||||
ha->optrom_state = QLA_SWAITING;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
|
||||
{
|
||||
struct Scsi_Host *host = bsg_job->shost;
|
||||
scsi_qla_host_t *vha = shost_priv(host);
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int rval = 0;
|
||||
|
||||
rval = qla2x00_optrom_setup(bsg_job, ha, 0);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
|
||||
ha->optrom_region_start, ha->optrom_region_size);
|
||||
|
||||
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
|
||||
bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
|
||||
ha->optrom_region_size);
|
||||
|
||||
bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
|
||||
bsg_job->reply->result = DID_OK;
|
||||
vfree(ha->optrom_buffer);
|
||||
ha->optrom_buffer = NULL;
|
||||
ha->optrom_state = QLA_SWAITING;
|
||||
bsg_job->job_done(bsg_job);
|
||||
return rval;
|
||||
}
|
||||
|
||||
static int
|
||||
qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
|
||||
{
|
||||
struct Scsi_Host *host = bsg_job->shost;
|
||||
scsi_qla_host_t *vha = shost_priv(host);
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int rval = 0;
|
||||
|
||||
rval = qla2x00_optrom_setup(bsg_job, ha, 1);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
|
||||
bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
|
||||
ha->optrom_region_size);
|
||||
|
||||
ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
|
||||
ha->optrom_region_start, ha->optrom_region_size);
|
||||
|
||||
bsg_job->reply->result = DID_OK;
|
||||
vfree(ha->optrom_buffer);
|
||||
ha->optrom_buffer = NULL;
|
||||
ha->optrom_state = QLA_SWAITING;
|
||||
bsg_job->job_done(bsg_job);
|
||||
return rval;
|
||||
}
|
||||
|
||||
static int
|
||||
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
|
||||
{
|
||||
@ -1328,6 +1447,12 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
|
||||
case QL_VND_FCP_PRIO_CFG_CMD:
|
||||
return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
|
||||
|
||||
case QL_VND_READ_FLASH:
|
||||
return qla2x00_read_optrom(bsg_job);
|
||||
|
||||
case QL_VND_UPDATE_FLASH:
|
||||
return qla2x00_update_optrom(bsg_job);
|
||||
|
||||
default:
|
||||
bsg_job->reply->result = (DID_ERROR << 16);
|
||||
bsg_job->job_done(bsg_job);
|
||||
|
@ -14,6 +14,8 @@
|
||||
#define QL_VND_A84_MGMT_CMD 0x04
|
||||
#define QL_VND_IIDMA 0x05
|
||||
#define QL_VND_FCP_PRIO_CFG_CMD 0x06
|
||||
#define QL_VND_READ_FLASH 0x07
|
||||
#define QL_VND_UPDATE_FLASH 0x08
|
||||
|
||||
/* BSG definations for interpreting CommandSent field */
|
||||
#define INT_DEF_LB_LOOPBACK_CMD 0
|
||||
|
@ -1700,9 +1700,7 @@ typedef struct fc_port {
|
||||
atomic_t state;
|
||||
uint32_t flags;
|
||||
|
||||
int port_login_retry_count;
|
||||
int login_retry;
|
||||
atomic_t port_down_timer;
|
||||
|
||||
struct fc_rport *rport, *drport;
|
||||
u32 supported_classes;
|
||||
|
@ -92,6 +92,7 @@ extern int ql2xshiftctondsd;
|
||||
extern int ql2xdbwr;
|
||||
extern int ql2xdontresethba;
|
||||
extern int ql2xasynctmfenable;
|
||||
extern int ql2xgffidenable;
|
||||
extern int ql2xenabledif;
|
||||
extern int ql2xenablehba_err_chk;
|
||||
extern int ql2xtargetreset;
|
||||
|
@ -71,7 +71,7 @@ qla2x00_ctx_sp_free(srb_t *sp)
|
||||
struct srb_iocb *iocb = ctx->u.iocb_cmd;
|
||||
struct scsi_qla_host *vha = sp->fcport->vha;
|
||||
|
||||
del_timer_sync(&iocb->timer);
|
||||
del_timer(&iocb->timer);
|
||||
kfree(iocb);
|
||||
kfree(ctx);
|
||||
mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
|
||||
@ -1344,6 +1344,13 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
|
||||
qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
|
||||
"firmware dump!!!\n", dump_size / 1024);
|
||||
|
||||
if (ha->fce) {
|
||||
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
|
||||
ha->fce_dma);
|
||||
ha->fce = NULL;
|
||||
ha->fce_dma = 0;
|
||||
}
|
||||
|
||||
if (ha->eft) {
|
||||
dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
|
||||
ha->eft_dma);
|
||||
@ -1818,14 +1825,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
|
||||
qla2x00_init_response_q_entries(rsp);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ha->vport_slock, flags);
|
||||
spin_lock(&ha->vport_slock);
|
||||
/* Clear RSCN queue. */
|
||||
list_for_each_entry(vp, &ha->vp_list, list) {
|
||||
vp->rscn_in_ptr = 0;
|
||||
vp->rscn_out_ptr = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ha->vport_slock, flags);
|
||||
spin_unlock(&ha->vport_slock);
|
||||
|
||||
ha->isp_ops->config_rings(vha);
|
||||
|
||||
@ -2916,21 +2923,13 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
|
||||
void
|
||||
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
fcport->vha = vha;
|
||||
fcport->login_retry = 0;
|
||||
fcport->port_login_retry_count = ha->port_down_retry_count *
|
||||
PORT_RETRY_TIME;
|
||||
atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
|
||||
PORT_RETRY_TIME);
|
||||
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
|
||||
|
||||
qla2x00_iidma_fcport(vha, fcport);
|
||||
|
||||
atomic_set(&fcport->state, FCS_ONLINE);
|
||||
|
||||
qla2x00_reg_remote_port(vha, fcport);
|
||||
atomic_set(&fcport->state, FCS_ONLINE);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3292,8 +3291,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
|
||||
continue;
|
||||
|
||||
/* Bypass ports whose FCP-4 type is not FCP_SCSI */
|
||||
if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
|
||||
new_fcport->fc4_type != FC4_TYPE_UNKNOWN)
|
||||
if (ql2xgffidenable &&
|
||||
(new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
|
||||
new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
|
||||
continue;
|
||||
|
||||
/* Locate matching device in database. */
|
||||
|
@ -992,8 +992,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
ha = vha->hw;
|
||||
|
||||
DEBUG18(printk(KERN_DEBUG
|
||||
"%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__,
|
||||
vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd)));
|
||||
"%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
|
||||
vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
|
||||
|
||||
cmd_pkt->vp_index = sp->fcport->vp_idx;
|
||||
|
||||
|
@ -1240,12 +1240,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
|
||||
case LSC_SCODE_NPORT_USED:
|
||||
data[0] = MBS_LOOP_ID_USED;
|
||||
break;
|
||||
case LSC_SCODE_CMD_FAILED:
|
||||
if ((iop[1] & 0xff) == 0x05) {
|
||||
data[0] = MBS_NOT_LOGGED_IN;
|
||||
break;
|
||||
}
|
||||
/* Fall through. */
|
||||
default:
|
||||
data[0] = MBS_COMMAND_ERROR;
|
||||
break;
|
||||
@ -1431,9 +1425,8 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
|
||||
rsp->status_srb = sp;
|
||||
|
||||
DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
|
||||
"cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
|
||||
cp->device->channel, cp->device->id, cp->device->lun, cp,
|
||||
cp->serial_number));
|
||||
"cmd=%p\n", __func__, sp->fcport->vha->host_no,
|
||||
cp->device->channel, cp->device->id, cp->device->lun, cp));
|
||||
if (sense_len)
|
||||
DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
|
||||
}
|
||||
@ -1757,6 +1750,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
case CS_INCOMPLETE:
|
||||
case CS_PORT_UNAVAILABLE:
|
||||
case CS_TIMEOUT:
|
||||
case CS_RESET:
|
||||
|
||||
/*
|
||||
* We are going to have the fc class block the rport
|
||||
* while we try to recover so instruct the mid layer
|
||||
@ -1781,10 +1776,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
|
||||
break;
|
||||
|
||||
case CS_RESET:
|
||||
cp->result = DID_TRANSPORT_DISRUPTED << 16;
|
||||
break;
|
||||
|
||||
case CS_ABORTED:
|
||||
cp->result = DID_RESET << 16;
|
||||
break;
|
||||
@ -1801,10 +1792,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
if (logit)
|
||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||
"scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
|
||||
"oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x "
|
||||
"oxid=0x%x cdb=%02x%02x%02x len=0x%x "
|
||||
"rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
|
||||
cp->device->id, cp->device->lun, comp_status, scsi_status,
|
||||
cp->result, ox_id, cp->serial_number, cp->cmnd[0],
|
||||
cp->result, ox_id, cp->cmnd[0],
|
||||
cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
|
||||
resid_len, fw_resid_len));
|
||||
|
||||
|
@ -160,6 +160,11 @@ MODULE_PARM_DESC(ql2xtargetreset,
|
||||
"Enable target reset."
|
||||
"Default is 1 - use hw defaults.");
|
||||
|
||||
int ql2xgffidenable;
|
||||
module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR);
|
||||
MODULE_PARM_DESC(ql2xgffidenable,
|
||||
"Enables GFF_ID checks of port type. "
|
||||
"Default is 0 - Do not use GFF_ID information.");
|
||||
|
||||
int ql2xasynctmfenable;
|
||||
module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
|
||||
@ -255,6 +260,7 @@ static void qla2x00_rst_aen(scsi_qla_host_t *);
|
||||
|
||||
static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
|
||||
struct req_que **, struct rsp_que **);
|
||||
static void qla2x00_free_fw_dump(struct qla_hw_data *);
|
||||
static void qla2x00_mem_free(struct qla_hw_data *);
|
||||
static void qla2x00_sp_free_dma(srb_t *);
|
||||
|
||||
@ -539,6 +545,7 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||
srb_t *sp;
|
||||
int rval;
|
||||
|
||||
spin_unlock_irq(vha->host->host_lock);
|
||||
if (ha->flags.eeh_busy) {
|
||||
if (ha->flags.pci_channel_io_perm_failure)
|
||||
cmd->result = DID_NO_CONNECT << 16;
|
||||
@ -553,10 +560,6 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||
goto qc24_fail_command;
|
||||
}
|
||||
|
||||
/* Close window on fcport/rport state-transitioning. */
|
||||
if (fcport->drport)
|
||||
goto qc24_target_busy;
|
||||
|
||||
if (!vha->flags.difdix_supported &&
|
||||
scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
|
||||
DEBUG2(qla_printk(KERN_ERR, ha,
|
||||
@ -567,15 +570,14 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||
}
|
||||
if (atomic_read(&fcport->state) != FCS_ONLINE) {
|
||||
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
|
||||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
|
||||
atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
|
||||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
|
||||
cmd->result = DID_NO_CONNECT << 16;
|
||||
goto qc24_fail_command;
|
||||
}
|
||||
goto qc24_target_busy;
|
||||
}
|
||||
|
||||
spin_unlock_irq(vha->host->host_lock);
|
||||
|
||||
sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
|
||||
if (!sp)
|
||||
goto qc24_host_busy_lock;
|
||||
@ -597,9 +599,11 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
qc24_target_busy:
|
||||
spin_lock_irq(vha->host->host_lock);
|
||||
return SCSI_MLQUEUE_TARGET_BUSY;
|
||||
|
||||
qc24_fail_command:
|
||||
spin_lock_irq(vha->host->host_lock);
|
||||
done(cmd);
|
||||
|
||||
return 0;
|
||||
@ -824,81 +828,58 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
|
||||
{
|
||||
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
|
||||
srb_t *sp;
|
||||
int ret, i;
|
||||
int ret;
|
||||
unsigned int id, lun;
|
||||
unsigned long serial;
|
||||
unsigned long flags;
|
||||
int wait = 0;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct req_que *req = vha->req;
|
||||
srb_t *spt;
|
||||
int got_ref = 0;
|
||||
|
||||
fc_block_scsi_eh(cmd);
|
||||
|
||||
if (!CMD_SP(cmd))
|
||||
return SUCCESS;
|
||||
|
||||
ret = SUCCESS;
|
||||
|
||||
id = cmd->device->id;
|
||||
lun = cmd->device->lun;
|
||||
serial = cmd->serial_number;
|
||||
spt = (srb_t *) CMD_SP(cmd);
|
||||
if (!spt)
|
||||
return SUCCESS;
|
||||
|
||||
/* Check active list for command command. */
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
|
||||
sp = req->outstanding_cmds[i];
|
||||
|
||||
if (sp == NULL)
|
||||
continue;
|
||||
if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
|
||||
!IS_PROT_IO(sp))
|
||||
continue;
|
||||
if (sp->cmd != cmd)
|
||||
continue;
|
||||
|
||||
DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
|
||||
" pid=%ld.\n", __func__, vha->host_no, sp, serial));
|
||||
|
||||
/* Get a reference to the sp and drop the lock.*/
|
||||
sp_get(sp);
|
||||
got_ref++;
|
||||
|
||||
sp = (srb_t *) CMD_SP(cmd);
|
||||
if (!sp) {
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
if (ha->isp_ops->abort_command(sp)) {
|
||||
DEBUG2(printk("%s(%ld): abort_command "
|
||||
"mbx failed.\n", __func__, vha->host_no));
|
||||
ret = FAILED;
|
||||
} else {
|
||||
DEBUG3(printk("%s(%ld): abort_command "
|
||||
"mbx success.\n", __func__, vha->host_no));
|
||||
wait = 1;
|
||||
}
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
break;
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
DEBUG2(printk("%s(%ld): aborting sp %p from RISC.",
|
||||
__func__, vha->host_no, sp));
|
||||
|
||||
/* Get a reference to the sp and drop the lock.*/
|
||||
sp_get(sp);
|
||||
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
if (ha->isp_ops->abort_command(sp)) {
|
||||
DEBUG2(printk("%s(%ld): abort_command "
|
||||
"mbx failed.\n", __func__, vha->host_no));
|
||||
ret = FAILED;
|
||||
} else {
|
||||
DEBUG3(printk("%s(%ld): abort_command "
|
||||
"mbx success.\n", __func__, vha->host_no));
|
||||
wait = 1;
|
||||
}
|
||||
qla2x00_sp_compl(ha, sp);
|
||||
|
||||
/* Wait for the command to be returned. */
|
||||
if (wait) {
|
||||
if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
|
||||
qla_printk(KERN_ERR, ha,
|
||||
"scsi(%ld:%d:%d): Abort handler timed out -- %lx "
|
||||
"%x.\n", vha->host_no, id, lun, serial, ret);
|
||||
"scsi(%ld:%d:%d): Abort handler timed out -- %x.\n",
|
||||
vha->host_no, id, lun, ret);
|
||||
ret = FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
if (got_ref)
|
||||
qla2x00_sp_compl(ha, sp);
|
||||
|
||||
qla_printk(KERN_INFO, ha,
|
||||
"scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
|
||||
vha->host_no, id, lun, wait, serial, ret);
|
||||
"scsi(%ld:%d:%d): Abort command issued -- %d %x.\n",
|
||||
vha->host_no, id, lun, wait, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1043,13 +1024,11 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
|
||||
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
|
||||
int ret = FAILED;
|
||||
unsigned int id, lun;
|
||||
unsigned long serial;
|
||||
|
||||
fc_block_scsi_eh(cmd);
|
||||
|
||||
id = cmd->device->id;
|
||||
lun = cmd->device->lun;
|
||||
serial = cmd->serial_number;
|
||||
|
||||
if (!fcport)
|
||||
return ret;
|
||||
@ -1104,14 +1083,12 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int ret = FAILED;
|
||||
unsigned int id, lun;
|
||||
unsigned long serial;
|
||||
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
fc_block_scsi_eh(cmd);
|
||||
|
||||
id = cmd->device->id;
|
||||
lun = cmd->device->lun;
|
||||
serial = cmd->serial_number;
|
||||
|
||||
if (!fcport)
|
||||
return ret;
|
||||
@ -1974,6 +1951,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
ha->bars = bars;
|
||||
ha->mem_only = mem_only;
|
||||
spin_lock_init(&ha->hardware_lock);
|
||||
spin_lock_init(&ha->vport_slock);
|
||||
|
||||
/* Set ISP-type information. */
|
||||
qla2x00_set_isp_flags(ha);
|
||||
@ -2341,6 +2319,42 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
qla2x00_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
scsi_qla_host_t *vha;
|
||||
struct qla_hw_data *ha;
|
||||
|
||||
vha = pci_get_drvdata(pdev);
|
||||
ha = vha->hw;
|
||||
|
||||
/* Turn-off FCE trace */
|
||||
if (ha->flags.fce_enabled) {
|
||||
qla2x00_disable_fce_trace(vha, NULL, NULL);
|
||||
ha->flags.fce_enabled = 0;
|
||||
}
|
||||
|
||||
/* Turn-off EFT trace */
|
||||
if (ha->eft)
|
||||
qla2x00_disable_eft_trace(vha);
|
||||
|
||||
/* Stop currently executing firmware. */
|
||||
qla2x00_try_to_stop_firmware(vha);
|
||||
|
||||
/* Turn adapter off line */
|
||||
vha->flags.online = 0;
|
||||
|
||||
/* turn-off interrupts on the card */
|
||||
if (ha->interrupts_on) {
|
||||
vha->flags.init_done = 0;
|
||||
ha->isp_ops->disable_intrs(ha);
|
||||
}
|
||||
|
||||
qla2x00_free_irqs(vha);
|
||||
|
||||
qla2x00_free_fw_dump(ha);
|
||||
}
|
||||
|
||||
static void
|
||||
qla2x00_remove_one(struct pci_dev *pdev)
|
||||
{
|
||||
@ -2597,12 +2611,12 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
|
||||
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
|
||||
continue;
|
||||
if (atomic_read(&fcport->state) == FCS_ONLINE) {
|
||||
atomic_set(&fcport->state, FCS_DEVICE_LOST);
|
||||
if (defer)
|
||||
qla2x00_schedule_rport_del(vha, fcport, defer);
|
||||
else if (vha->vp_idx == fcport->vp_idx)
|
||||
qla2x00_schedule_rport_del(vha, fcport, defer);
|
||||
}
|
||||
atomic_set(&fcport->state, FCS_DEVICE_LOST);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2829,6 +2843,35 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* qla2x00_free_fw_dump
|
||||
* Frees fw dump stuff.
|
||||
*
|
||||
* Input:
|
||||
* ha = adapter block pointer.
|
||||
*/
|
||||
static void
|
||||
qla2x00_free_fw_dump(struct qla_hw_data *ha)
|
||||
{
|
||||
if (ha->fce)
|
||||
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
|
||||
ha->fce_dma);
|
||||
|
||||
if (ha->fw_dump) {
|
||||
if (ha->eft)
|
||||
dma_free_coherent(&ha->pdev->dev,
|
||||
ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
|
||||
vfree(ha->fw_dump);
|
||||
}
|
||||
ha->fce = NULL;
|
||||
ha->fce_dma = 0;
|
||||
ha->eft = NULL;
|
||||
ha->eft_dma = 0;
|
||||
ha->fw_dump = NULL;
|
||||
ha->fw_dumped = 0;
|
||||
ha->fw_dump_reading = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* qla2x00_mem_free
|
||||
* Frees all adapter allocated memory.
|
||||
@ -2839,20 +2882,11 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
|
||||
static void
|
||||
qla2x00_mem_free(struct qla_hw_data *ha)
|
||||
{
|
||||
qla2x00_free_fw_dump(ha);
|
||||
|
||||
if (ha->srb_mempool)
|
||||
mempool_destroy(ha->srb_mempool);
|
||||
|
||||
if (ha->fce)
|
||||
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
|
||||
ha->fce_dma);
|
||||
|
||||
if (ha->fw_dump) {
|
||||
if (ha->eft)
|
||||
dma_free_coherent(&ha->pdev->dev,
|
||||
ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
|
||||
vfree(ha->fw_dump);
|
||||
}
|
||||
|
||||
if (ha->dcbx_tlv)
|
||||
dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
|
||||
ha->dcbx_tlv, ha->dcbx_tlv_dma);
|
||||
@ -2925,8 +2959,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
|
||||
|
||||
ha->srb_mempool = NULL;
|
||||
ha->ctx_mempool = NULL;
|
||||
ha->eft = NULL;
|
||||
ha->eft_dma = 0;
|
||||
ha->sns_cmd = NULL;
|
||||
ha->sns_cmd_dma = 0;
|
||||
ha->ct_sns = NULL;
|
||||
@ -2946,10 +2978,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
|
||||
|
||||
ha->gid_list = NULL;
|
||||
ha->gid_list_dma = 0;
|
||||
|
||||
ha->fw_dump = NULL;
|
||||
ha->fw_dumped = 0;
|
||||
ha->fw_dump_reading = 0;
|
||||
}
|
||||
|
||||
struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
|
||||
@ -3547,11 +3575,9 @@ void
|
||||
qla2x00_timer(scsi_qla_host_t *vha)
|
||||
{
|
||||
unsigned long cpu_flags = 0;
|
||||
fc_port_t *fcport;
|
||||
int start_dpc = 0;
|
||||
int index;
|
||||
srb_t *sp;
|
||||
int t;
|
||||
uint16_t w;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct req_que *req;
|
||||
@ -3567,34 +3593,6 @@ qla2x00_timer(scsi_qla_host_t *vha)
|
||||
/* Hardware read to raise pending EEH errors during mailbox waits. */
|
||||
if (!pci_channel_offline(ha->pdev))
|
||||
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
|
||||
/*
|
||||
* Ports - Port down timer.
|
||||
*
|
||||
* Whenever, a port is in the LOST state we start decrementing its port
|
||||
* down timer every second until it reaches zero. Once it reaches zero
|
||||
* the port it marked DEAD.
|
||||
*/
|
||||
t = 0;
|
||||
list_for_each_entry(fcport, &vha->vp_fcports, list) {
|
||||
if (fcport->port_type != FCT_TARGET)
|
||||
continue;
|
||||
|
||||
if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
|
||||
|
||||
if (atomic_read(&fcport->port_down_timer) == 0)
|
||||
continue;
|
||||
|
||||
if (atomic_dec_and_test(&fcport->port_down_timer) != 0)
|
||||
atomic_set(&fcport->state, FCS_DEVICE_DEAD);
|
||||
|
||||
DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
|
||||
"%d remaining\n",
|
||||
vha->host_no,
|
||||
t, atomic_read(&fcport->port_down_timer)));
|
||||
}
|
||||
t++;
|
||||
} /* End of for fcport */
|
||||
|
||||
|
||||
/* Loop down handler. */
|
||||
if (atomic_read(&vha->loop_down_timer) > 0 &&
|
||||
@ -4079,6 +4077,7 @@ static struct pci_driver qla2xxx_pci_driver = {
|
||||
.id_table = qla2xxx_pci_tbl,
|
||||
.probe = qla2x00_probe_one,
|
||||
.remove = qla2x00_remove_one,
|
||||
.shutdown = qla2x00_shutdown,
|
||||
.err_handler = &qla2xxx_err_handler,
|
||||
};
|
||||
|
||||
|
@ -30,3 +30,104 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
|
||||
printk(KERN_INFO "\n");
|
||||
}
|
||||
|
||||
void qla4xxx_dump_registers(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint8_t i;
|
||||
|
||||
if (is_qla8022(ha)) {
|
||||
for (i = 1; i < MBOX_REG_COUNT; i++)
|
||||
printk(KERN_INFO "mailbox[%d] = 0x%08X\n",
|
||||
i, readl(&ha->qla4_8xxx_reg->mailbox_in[i]));
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < MBOX_REG_COUNT; i++) {
|
||||
printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
|
||||
readw(&ha->reg->mailbox[i]));
|
||||
}
|
||||
|
||||
printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, flash_address),
|
||||
readw(&ha->reg->flash_address));
|
||||
printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, flash_data),
|
||||
readw(&ha->reg->flash_data));
|
||||
printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, ctrl_status),
|
||||
readw(&ha->reg->ctrl_status));
|
||||
|
||||
if (is_qla4010(ha)) {
|
||||
printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
|
||||
readw(&ha->reg->u1.isp4010.nvram));
|
||||
} else if (is_qla4022(ha) | is_qla4032(ha)) {
|
||||
printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask),
|
||||
readw(&ha->reg->u1.isp4022.intr_mask));
|
||||
printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
|
||||
readw(&ha->reg->u1.isp4022.nvram));
|
||||
printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore),
|
||||
readw(&ha->reg->u1.isp4022.semaphore));
|
||||
}
|
||||
printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, req_q_in),
|
||||
readw(&ha->reg->req_q_in));
|
||||
printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, rsp_q_out),
|
||||
readw(&ha->reg->rsp_q_out));
|
||||
|
||||
if (is_qla4010(ha)) {
|
||||
printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf),
|
||||
readw(&ha->reg->u2.isp4010.ext_hw_conf));
|
||||
printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl),
|
||||
readw(&ha->reg->u2.isp4010.port_ctrl));
|
||||
printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status),
|
||||
readw(&ha->reg->u2.isp4010.port_status));
|
||||
printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out),
|
||||
readw(&ha->reg->u2.isp4010.req_q_out));
|
||||
printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
|
||||
readw(&ha->reg->u2.isp4010.gp_out));
|
||||
printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
|
||||
readw(&ha->reg->u2.isp4010.gp_in));
|
||||
printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t)
|
||||
offsetof(struct isp_reg, u2.isp4010.port_err_status),
|
||||
readw(&ha->reg->u2.isp4010.port_err_status));
|
||||
} else if (is_qla4022(ha) | is_qla4032(ha)) {
|
||||
printk(KERN_INFO "Page 0 Registers:\n");
|
||||
printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", (uint8_t)
|
||||
offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf),
|
||||
readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
|
||||
printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", (uint8_t)
|
||||
offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl),
|
||||
readw(&ha->reg->u2.isp4022.p0.port_ctrl));
|
||||
printk(KERN_INFO "0x%02X port_status = 0x%08X\n", (uint8_t)
|
||||
offsetof(struct isp_reg, u2.isp4022.p0.port_status),
|
||||
readw(&ha->reg->u2.isp4022.p0.port_status));
|
||||
printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out),
|
||||
readw(&ha->reg->u2.isp4022.p0.gp_out));
|
||||
printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
|
||||
readw(&ha->reg->u2.isp4022.p0.gp_in));
|
||||
printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t)
|
||||
offsetof(struct isp_reg, u2.isp4022.p0.port_err_status),
|
||||
readw(&ha->reg->u2.isp4022.p0.port_err_status));
|
||||
printk(KERN_INFO "Page 1 Registers:\n");
|
||||
writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
|
||||
&ha->reg->ctrl_status);
|
||||
printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
|
||||
(uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out),
|
||||
readw(&ha->reg->u2.isp4022.p1.req_q_out));
|
||||
writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
|
||||
&ha->reg->ctrl_status);
|
||||
}
|
||||
}
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/aer.h>
|
||||
|
||||
#include <net/tcp.h>
|
||||
#include <scsi/scsi.h>
|
||||
@ -36,24 +37,6 @@
|
||||
#include "ql4_dbg.h"
|
||||
#include "ql4_nx.h"
|
||||
|
||||
#if defined(CONFIG_PCIEAER)
|
||||
#include <linux/aer.h>
|
||||
#else
|
||||
/* AER releated */
|
||||
static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
|
||||
#endif
|
||||
@ -179,6 +162,7 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
|
||||
#define IOCB_TOV_MARGIN 10
|
||||
#define RELOGIN_TOV 18
|
||||
#define ISNS_DEREG_TOV 5
|
||||
#define HBA_ONLINE_TOV 30
|
||||
|
||||
#define MAX_RESET_HA_RETRIES 2
|
||||
|
||||
|
@ -416,6 +416,8 @@ struct qla_flt_region {
|
||||
#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
|
||||
#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
|
||||
#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
|
||||
#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
|
||||
#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
|
||||
|
||||
#define ISNS_EVENT_DATA_RECEIVED 0x0000
|
||||
#define ISNS_EVENT_CONNECTION_OPENED 0x0001
|
||||
@ -446,6 +448,7 @@ struct addr_ctrl_blk {
|
||||
#define FWOPT_SESSION_MODE 0x0040
|
||||
#define FWOPT_INITIATOR_MODE 0x0020
|
||||
#define FWOPT_TARGET_MODE 0x0010
|
||||
#define FWOPT_ENABLE_CRBDB 0x8000
|
||||
|
||||
uint16_t exec_throttle; /* 04-05 */
|
||||
uint8_t zio_count; /* 06 */
|
||||
|
@ -94,6 +94,7 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha);
|
||||
void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
|
||||
void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
|
||||
void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
|
||||
void qla4xxx_dump_registers(struct scsi_qla_host *ha);
|
||||
|
||||
void qla4_8xxx_pci_config(struct scsi_qla_host *);
|
||||
int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
|
||||
|
@ -1207,8 +1207,8 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
|
||||
break;
|
||||
|
||||
DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
|
||||
"firmware to complete... ctrl_sts=0x%x\n",
|
||||
ha->host_no, __func__, ctrl_status));
|
||||
"firmware to complete... ctrl_sts=0x%x, remaining=%ld\n",
|
||||
ha->host_no, __func__, ctrl_status, max_wait_time));
|
||||
|
||||
msleep_interruptible(250);
|
||||
} while (!time_after_eq(jiffies, max_wait_time));
|
||||
@ -1459,6 +1459,12 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
|
||||
exit_init_online:
|
||||
set_bit(AF_ONLINE, &ha->flags);
|
||||
exit_init_hba:
|
||||
if (is_qla8022(ha) && (status == QLA_ERROR)) {
|
||||
/* Since interrupts are registered in start_firmware for
|
||||
* 82xx, release them here if initialize_adapter fails */
|
||||
qla4xxx_free_irqs(ha);
|
||||
}
|
||||
|
||||
DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
|
||||
status == QLA_ERROR ? "FAILED" : "SUCCEDED"));
|
||||
return status;
|
||||
|
@ -202,19 +202,11 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
|
||||
void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint32_t dbval = 0;
|
||||
unsigned long wtime;
|
||||
|
||||
dbval = 0x14 | (ha->func_num << 5);
|
||||
dbval = dbval | (0 << 8) | (ha->request_in << 16);
|
||||
writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
|
||||
wmb();
|
||||
|
||||
wtime = jiffies + (2 * HZ);
|
||||
while (readl((void __iomem *)ha->nx_db_rd_ptr) != dbval &&
|
||||
!time_after_eq(jiffies, wtime)) {
|
||||
writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
|
||||
wmb();
|
||||
}
|
||||
qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -72,7 +72,7 @@ qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
|
||||
{
|
||||
struct srb *srb = ha->status_srb;
|
||||
struct scsi_cmnd *cmd;
|
||||
uint8_t sense_len;
|
||||
uint16_t sense_len;
|
||||
|
||||
if (srb == NULL)
|
||||
return;
|
||||
@ -487,6 +487,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
|
||||
case MBOX_ASTS_SYSTEM_ERROR:
|
||||
/* Log Mailbox registers */
|
||||
ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
|
||||
qla4xxx_dump_registers(ha);
|
||||
|
||||
if (ql4xdontresethba) {
|
||||
DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
|
||||
ha->host_no, __func__));
|
||||
@ -621,6 +623,18 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
|
||||
}
|
||||
break;
|
||||
|
||||
case MBOX_ASTS_TXSCVR_INSERTED:
|
||||
DEBUG2(printk(KERN_WARNING
|
||||
"scsi%ld: AEN %04x Transceiver"
|
||||
" inserted\n", ha->host_no, mbox_sts[0]));
|
||||
break;
|
||||
|
||||
case MBOX_ASTS_TXSCVR_REMOVED:
|
||||
DEBUG2(printk(KERN_WARNING
|
||||
"scsi%ld: AEN %04x Transceiver"
|
||||
" removed\n", ha->host_no, mbox_sts[0]));
|
||||
break;
|
||||
|
||||
default:
|
||||
DEBUG2(printk(KERN_WARNING
|
||||
"scsi%ld: AEN %04x UNKNOWN\n",
|
||||
|
@ -299,6 +299,10 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
|
||||
{
|
||||
memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
|
||||
memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
|
||||
|
||||
if (is_qla8022(ha))
|
||||
qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0);
|
||||
|
||||
mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
|
||||
mbox_cmd[1] = 0;
|
||||
mbox_cmd[2] = LSDW(init_fw_cb_dma);
|
||||
@ -472,6 +476,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
|
||||
init_fw_cb->fw_options |=
|
||||
__constant_cpu_to_le16(FWOPT_SESSION_MODE |
|
||||
FWOPT_INITIATOR_MODE);
|
||||
|
||||
if (is_qla8022(ha))
|
||||
init_fw_cb->fw_options |=
|
||||
__constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
|
||||
|
||||
init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
|
||||
|
||||
if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
|
||||
@ -592,7 +601,7 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
|
||||
}
|
||||
|
||||
ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n",
|
||||
ha->host_no, mbox_cmd[2]);
|
||||
ha->host_no, mbox_sts[2]);
|
||||
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
@ -839,8 +839,11 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
|
||||
done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
|
||||
if (done == 1)
|
||||
break;
|
||||
if (timeout >= qla4_8xxx_rom_lock_timeout)
|
||||
if (timeout >= qla4_8xxx_rom_lock_timeout) {
|
||||
ql4_printk(KERN_WARNING, ha,
|
||||
"%s: Failed to acquire rom lock", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
timeout++;
|
||||
|
||||
@ -1078,21 +1081,6 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qla4_8xxx_check_for_bad_spd(struct scsi_qla_host *ha)
|
||||
{
|
||||
u32 val = 0;
|
||||
val = qla4_8xxx_rd_32(ha, BOOT_LOADER_DIMM_STATUS) ;
|
||||
val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
|
||||
if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
|
||||
printk("Memory DIMM SPD not programmed. Assumed valid.\n");
|
||||
return 1;
|
||||
} else if (val) {
|
||||
printk("Memory DIMM type incorrect. Info:%08X.\n", val);
|
||||
return 2;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
|
||||
{
|
||||
@ -1377,8 +1365,6 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
|
||||
|
||||
} while (--retries);
|
||||
|
||||
qla4_8xxx_check_for_bad_spd(ha);
|
||||
|
||||
if (!retries) {
|
||||
pegtune_val = qla4_8xxx_rd_32(ha,
|
||||
QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
|
||||
@ -1540,14 +1526,31 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
|
||||
ql4_printk(KERN_INFO, ha,
|
||||
"FW: Attempting to load firmware from flash...\n");
|
||||
rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw);
|
||||
if (rval == QLA_SUCCESS)
|
||||
return rval;
|
||||
|
||||
ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash FAILED...\n");
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
|
||||
" FAILED...\n");
|
||||
return rval;
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
|
||||
{
|
||||
if (qla4_8xxx_rom_lock(ha)) {
|
||||
/* Someone else is holding the lock. */
|
||||
dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Either we got the lock, or someone
|
||||
* else died while holding it.
|
||||
* In either case, unlock.
|
||||
*/
|
||||
qla4_8xxx_rom_unlock(ha);
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
|
||||
* @ha: pointer to adapter structure
|
||||
@ -1557,11 +1560,12 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
|
||||
static int
|
||||
qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
|
||||
{
|
||||
int rval, i, timeout;
|
||||
int rval = QLA_ERROR;
|
||||
int i, timeout;
|
||||
uint32_t old_count, count;
|
||||
int need_reset = 0, peg_stuck = 1;
|
||||
|
||||
if (qla4_8xxx_need_reset(ha))
|
||||
goto dev_initialize;
|
||||
need_reset = qla4_8xxx_need_reset(ha);
|
||||
|
||||
old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
|
||||
|
||||
@ -1570,12 +1574,30 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
|
||||
if (timeout) {
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
|
||||
QLA82XX_DEV_FAILED);
|
||||
return QLA_ERROR;
|
||||
return rval;
|
||||
}
|
||||
|
||||
count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
|
||||
if (count != old_count)
|
||||
peg_stuck = 0;
|
||||
}
|
||||
|
||||
if (need_reset) {
|
||||
/* We are trying to perform a recovery here. */
|
||||
if (peg_stuck)
|
||||
qla4_8xxx_rom_lock_recovery(ha);
|
||||
goto dev_initialize;
|
||||
} else {
|
||||
/* Start of day for this ha context. */
|
||||
if (peg_stuck) {
|
||||
/* Either we are the first or recovery in progress. */
|
||||
qla4_8xxx_rom_lock_recovery(ha);
|
||||
goto dev_initialize;
|
||||
} else {
|
||||
/* Firmware already running. */
|
||||
rval = QLA_SUCCESS;
|
||||
goto dev_ready;
|
||||
}
|
||||
}
|
||||
|
||||
dev_initialize:
|
||||
@ -1601,7 +1623,7 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
|
||||
ql4_printk(KERN_INFO, ha, "HW State: READY\n");
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
|
||||
|
||||
return QLA_SUCCESS;
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1764,20 +1786,9 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
|
||||
int retval;
|
||||
retval = qla4_8xxx_device_state_handler(ha);
|
||||
|
||||
if (retval == QLA_SUCCESS &&
|
||||
!test_bit(AF_INIT_DONE, &ha->flags)) {
|
||||
if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
|
||||
retval = qla4xxx_request_irqs(ha);
|
||||
if (retval != QLA_SUCCESS) {
|
||||
ql4_printk(KERN_WARNING, ha,
|
||||
"Failed to reserve interrupt %d already in use.\n",
|
||||
ha->pdev->irq);
|
||||
} else {
|
||||
set_bit(AF_IRQ_ATTACHED, &ha->flags);
|
||||
ha->host->irq = ha->pdev->irq;
|
||||
ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
|
||||
__func__, ha->pdev->irq);
|
||||
}
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,6 @@
|
||||
|
||||
#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
|
||||
#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
|
||||
#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
|
||||
#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
|
||||
|
||||
#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
|
||||
@ -529,12 +528,12 @@
|
||||
# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
|
||||
# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
|
||||
|
||||
#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000
|
||||
#define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff
|
||||
#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24))
|
||||
#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8))
|
||||
#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac))
|
||||
#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0))
|
||||
#define QLA82XX_CAM_RAM_DB1 (QLA82XX_CAM_RAM(0x1b0))
|
||||
#define QLA82XX_CAM_RAM_DB2 (QLA82XX_CAM_RAM(0x1b4))
|
||||
|
||||
#define HALT_STATUS_UNRECOVERABLE 0x80000000
|
||||
#define HALT_STATUS_RECOVERABLE 0x40000000
|
||||
|
@ -167,8 +167,6 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
|
||||
"of (%d) secs exhausted, marking device DEAD.\n",
|
||||
ha->host_no, __func__, ddb_entry->fw_ddb_index,
|
||||
QL4_SESS_RECOVERY_TMO));
|
||||
|
||||
qla4xxx_wake_dpc(ha);
|
||||
}
|
||||
}
|
||||
|
||||
@ -573,10 +571,6 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
|
||||
if (ha->nx_pcibase)
|
||||
iounmap(
|
||||
(struct device_reg_82xx __iomem *)ha->nx_pcibase);
|
||||
|
||||
if (ha->nx_db_wr_ptr)
|
||||
iounmap(
|
||||
(struct device_reg_82xx __iomem *)ha->nx_db_wr_ptr);
|
||||
} else if (ha->reg)
|
||||
iounmap(ha->reg);
|
||||
pci_release_regions(ha->pdev);
|
||||
@ -692,7 +686,9 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
|
||||
qla4xxx_wake_dpc(ha);
|
||||
qla4xxx_mailbox_premature_completion(ha);
|
||||
}
|
||||
}
|
||||
} else
|
||||
ha->seconds_since_last_heartbeat = 0;
|
||||
|
||||
ha->fw_heartbeat_counter = fw_heartbeat_counter;
|
||||
}
|
||||
|
||||
@ -885,7 +881,13 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
|
||||
/* Find a command that hasn't completed. */
|
||||
for (index = 0; index < ha->host->can_queue; index++) {
|
||||
cmd = scsi_host_find_tag(ha->host, index);
|
||||
if (cmd != NULL)
|
||||
/*
|
||||
* We cannot just check if the index is valid,
|
||||
* becase if we are run from the scsi eh, then
|
||||
* the scsi/block layer is going to prevent
|
||||
* the tag from being released.
|
||||
*/
|
||||
if (cmd != NULL && CMD_SP(cmd))
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
@ -937,11 +939,14 @@ int qla4xxx_soft_reset(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint32_t max_wait_time;
|
||||
unsigned long flags = 0;
|
||||
int status = QLA_ERROR;
|
||||
int status;
|
||||
uint32_t ctrl_status;
|
||||
|
||||
qla4xxx_hw_reset(ha);
|
||||
status = qla4xxx_hw_reset(ha);
|
||||
if (status != QLA_SUCCESS)
|
||||
return status;
|
||||
|
||||
status = QLA_ERROR;
|
||||
/* Wait until the Network Reset Intr bit is cleared */
|
||||
max_wait_time = RESET_INTR_TOV;
|
||||
do {
|
||||
@ -1101,7 +1106,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
|
||||
ha->host_no, __func__));
|
||||
status = ha->isp_ops->reset_firmware(ha);
|
||||
if (status == QLA_SUCCESS) {
|
||||
qla4xxx_cmd_wait(ha);
|
||||
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
|
||||
qla4xxx_cmd_wait(ha);
|
||||
ha->isp_ops->disable_intrs(ha);
|
||||
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
|
||||
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
|
||||
@ -1118,7 +1124,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
|
||||
* or if stop_firmware fails for ISP-82xx.
|
||||
* This is the default case for ISP-4xxx */
|
||||
if (!is_qla8022(ha) || reset_chip) {
|
||||
qla4xxx_cmd_wait(ha);
|
||||
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
|
||||
qla4xxx_cmd_wait(ha);
|
||||
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
|
||||
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
@ -1471,24 +1478,10 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
|
||||
db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
|
||||
db_len = pci_resource_len(pdev, 4);
|
||||
|
||||
/* mapping of doorbell write pointer */
|
||||
ha->nx_db_wr_ptr = (unsigned long)ioremap(db_base +
|
||||
(ha->pdev->devfn << 12), 4);
|
||||
if (!ha->nx_db_wr_ptr) {
|
||||
printk(KERN_ERR
|
||||
"cannot remap MMIO doorbell-write (%s), aborting\n",
|
||||
pci_name(pdev));
|
||||
goto iospace_error_exit;
|
||||
}
|
||||
/* mapping of doorbell read pointer */
|
||||
ha->nx_db_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
|
||||
(ha->pdev->devfn * 8);
|
||||
if (!ha->nx_db_rd_ptr)
|
||||
printk(KERN_ERR
|
||||
"cannot remap MMIO doorbell-read (%s), aborting\n",
|
||||
pci_name(pdev));
|
||||
return 0;
|
||||
ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
|
||||
QLA82XX_CAM_RAM_DB2);
|
||||
|
||||
return 0;
|
||||
iospace_error_exit:
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1960,13 +1953,11 @@ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
|
||||
{
|
||||
unsigned long wait_online;
|
||||
|
||||
wait_online = jiffies + (30 * HZ);
|
||||
wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
|
||||
while (time_before(jiffies, wait_online)) {
|
||||
|
||||
if (adapter_up(ha))
|
||||
return QLA_SUCCESS;
|
||||
else if (ha->retry_reset_ha_cnt == 0)
|
||||
return QLA_ERROR;
|
||||
|
||||
msleep(2000);
|
||||
}
|
||||
@ -2021,6 +2012,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
|
||||
unsigned int id = cmd->device->id;
|
||||
unsigned int lun = cmd->device->lun;
|
||||
unsigned long serial = cmd->serial_number;
|
||||
unsigned long flags;
|
||||
struct srb *srb = NULL;
|
||||
int ret = SUCCESS;
|
||||
int wait = 0;
|
||||
@ -2029,12 +2021,14 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
|
||||
"scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n",
|
||||
ha->host_no, id, lun, cmd, serial);
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
srb = (struct srb *) CMD_SP(cmd);
|
||||
|
||||
if (!srb)
|
||||
if (!srb) {
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
return SUCCESS;
|
||||
|
||||
}
|
||||
kref_get(&srb->srb_ref);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
|
||||
DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
|
||||
@ -2267,6 +2261,8 @@ qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
||||
qla4xxx_mailbox_premature_completion(ha);
|
||||
qla4xxx_free_irqs(ha);
|
||||
pci_disable_device(pdev);
|
||||
/* Return back all IOs */
|
||||
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
case pci_channel_io_perm_failure:
|
||||
set_bit(AF_EEH_BUSY, &ha->flags);
|
||||
@ -2290,17 +2286,13 @@ qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
|
||||
if (!is_aer_supported(ha))
|
||||
return PCI_ERS_RESULT_NONE;
|
||||
|
||||
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
|
||||
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: firmware hang -- "
|
||||
"mmio_enabled\n", ha->host_no, __func__);
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
} else
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
|
||||
static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint32_t rval = QLA_ERROR;
|
||||
uint32_t ret = 0;
|
||||
int fn;
|
||||
struct pci_dev *other_pdev = NULL;
|
||||
|
||||
@ -2312,7 +2304,6 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
|
||||
clear_bit(AF_ONLINE, &ha->flags);
|
||||
qla4xxx_mark_all_devices_missing(ha);
|
||||
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
|
||||
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
|
||||
}
|
||||
|
||||
fn = PCI_FUNC(ha->pdev->devfn);
|
||||
@ -2375,7 +2366,16 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
|
||||
/* Clear driver state register */
|
||||
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
|
||||
qla4_8xxx_set_drv_active(ha);
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
ret = qla4xxx_request_irqs(ha);
|
||||
if (ret) {
|
||||
ql4_printk(KERN_WARNING, ha, "Failed to "
|
||||
"reserve interrupt %d already in use.\n",
|
||||
ha->pdev->irq);
|
||||
rval = QLA_ERROR;
|
||||
} else {
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
rval = QLA_SUCCESS;
|
||||
}
|
||||
}
|
||||
qla4_8xxx_idc_unlock(ha);
|
||||
} else {
|
||||
@ -2387,8 +2387,18 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
|
||||
clear_bit(AF_FW_RECOVERY, &ha->flags);
|
||||
rval = qla4xxx_initialize_adapter(ha,
|
||||
PRESERVE_DDB_LIST);
|
||||
if (rval == QLA_SUCCESS)
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
if (rval == QLA_SUCCESS) {
|
||||
ret = qla4xxx_request_irqs(ha);
|
||||
if (ret) {
|
||||
ql4_printk(KERN_WARNING, ha, "Failed to"
|
||||
" reserve interrupt %d already in"
|
||||
" use.\n", ha->pdev->irq);
|
||||
rval = QLA_ERROR;
|
||||
} else {
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
rval = QLA_SUCCESS;
|
||||
}
|
||||
}
|
||||
qla4_8xxx_idc_lock(ha);
|
||||
qla4_8xxx_set_drv_active(ha);
|
||||
qla4_8xxx_idc_unlock(ha);
|
||||
@ -2430,12 +2440,7 @@ qla4xxx_pci_slot_reset(struct pci_dev *pdev)
|
||||
goto exit_slot_reset;
|
||||
}
|
||||
|
||||
ret = qla4xxx_request_irqs(ha);
|
||||
if (ret) {
|
||||
ql4_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d"
|
||||
" already in use.\n", pdev->irq);
|
||||
goto exit_slot_reset;
|
||||
}
|
||||
ha->isp_ops->disable_intrs(ha);
|
||||
|
||||
if (is_qla8022(ha)) {
|
||||
if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
|
||||
|
@ -5,4 +5,4 @@
|
||||
* See LICENSE.qla4xxx for copyright and licensing details.
|
||||
*/
|
||||
|
||||
#define QLA4XXX_DRIVER_VERSION "5.02.00-k3"
|
||||
#define QLA4XXX_DRIVER_VERSION "5.02.00-k4"
|
||||
|
@ -2438,7 +2438,8 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
|
||||
sdev->sdev_state = SDEV_RUNNING;
|
||||
else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
|
||||
sdev->sdev_state = SDEV_CREATED;
|
||||
else
|
||||
else if (sdev->sdev_state != SDEV_CANCEL &&
|
||||
sdev->sdev_state != SDEV_OFFLINE)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
@ -964,10 +964,11 @@ static void __scsi_remove_target(struct scsi_target *starget)
|
||||
list_for_each_entry(sdev, &shost->__devices, siblings) {
|
||||
if (sdev->channel != starget->channel ||
|
||||
sdev->id != starget->id ||
|
||||
sdev->sdev_state == SDEV_DEL)
|
||||
scsi_device_get(sdev))
|
||||
continue;
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
scsi_remove_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
goto restart;
|
||||
}
|
||||
|
@ -258,6 +258,28 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr,
|
||||
return snprintf(buf, 20, "%u\n", sdkp->protection_type);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct scsi_disk *sdkp = to_scsi_disk(dev);
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
unsigned int dif, dix;
|
||||
|
||||
dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
|
||||
dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
|
||||
|
||||
if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) {
|
||||
dif = 0;
|
||||
dix = 1;
|
||||
}
|
||||
|
||||
if (!dif && !dix)
|
||||
return snprintf(buf, 20, "none\n");
|
||||
|
||||
return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
@ -285,6 +307,7 @@ static struct device_attribute sd_disk_attrs[] = {
|
||||
__ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
|
||||
sd_store_manage_start_stop),
|
||||
__ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
|
||||
__ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
|
||||
__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
|
||||
__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
|
||||
__ATTR_NULL,
|
||||
|
@ -324,6 +324,15 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
|
||||
return CDS_NO_DISC;
|
||||
}
|
||||
|
||||
/*
|
||||
* SK/ASC/ASCQ of 2/4/2 means "initialization required"
|
||||
* Using CD_TRAY_OPEN results in an START_STOP_UNIT to close
|
||||
* the tray, which resolves the initialization requirement.
|
||||
*/
|
||||
if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY
|
||||
&& sshdr.asc == 0x04 && sshdr.ascq == 0x02)
|
||||
return CDS_TRAY_OPEN;
|
||||
|
||||
/*
|
||||
* 0x04 is format in progress .. but there must be a disc present!
|
||||
*/
|
||||
|
@ -721,7 +721,7 @@ struct libfc_function_template {
|
||||
* struct fc_disc - Discovery context
|
||||
* @retry_count: Number of retries
|
||||
* @pending: 1 if discovery is pending, 0 if not
|
||||
* @requesting: 1 if discovery has been requested, 0 if not
|
||||
* @requested: 1 if discovery has been requested, 0 if not
|
||||
* @seq_count: Number of sequences used for discovery
|
||||
* @buf_len: Length of the discovery buffer
|
||||
* @disc_id: Discovery ID
|
||||
|
@ -137,7 +137,7 @@ struct osd_request {
|
||||
void *buff;
|
||||
unsigned alloc_size; /* 0 here means: don't call kfree */
|
||||
unsigned total_bytes;
|
||||
} set_attr, enc_get_attr, get_attr;
|
||||
} cdb_cont, set_attr, enc_get_attr, get_attr;
|
||||
|
||||
struct _osd_io_info {
|
||||
struct bio *bio;
|
||||
@ -448,6 +448,20 @@ void osd_req_read(struct osd_request *or,
|
||||
int osd_req_read_kern(struct osd_request *or,
|
||||
const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
|
||||
|
||||
/* Scatter/Gather write/read commands */
|
||||
int osd_req_write_sg(struct osd_request *or,
|
||||
const struct osd_obj_id *obj, struct bio *bio,
|
||||
const struct osd_sg_entry *sglist, unsigned numentries);
|
||||
int osd_req_read_sg(struct osd_request *or,
|
||||
const struct osd_obj_id *obj, struct bio *bio,
|
||||
const struct osd_sg_entry *sglist, unsigned numentries);
|
||||
int osd_req_write_sg_kern(struct osd_request *or,
|
||||
const struct osd_obj_id *obj, void **buff,
|
||||
const struct osd_sg_entry *sglist, unsigned numentries);
|
||||
int osd_req_read_sg_kern(struct osd_request *or,
|
||||
const struct osd_obj_id *obj, void **buff,
|
||||
const struct osd_sg_entry *sglist, unsigned numentries);
|
||||
|
||||
/*
|
||||
* Root/Partition/Collection/Object Attributes commands
|
||||
*/
|
||||
|
@ -631,4 +631,46 @@ static inline void osd_sec_set_caps(struct osd_capability_head *cap,
|
||||
put_unaligned_le16(bit_mask, &cap->permissions_bit_mask);
|
||||
}
|
||||
|
||||
/* osd2r05a sec 5.3: CDB continuation segment formats */
|
||||
enum osd_continuation_segment_format {
|
||||
CDB_CONTINUATION_FORMAT_V2 = 0x01,
|
||||
};
|
||||
|
||||
struct osd_continuation_segment_header {
|
||||
u8 format;
|
||||
u8 reserved1;
|
||||
__be16 service_action;
|
||||
__be32 reserved2;
|
||||
u8 integrity_check[OSDv2_CRYPTO_KEYID_SIZE];
|
||||
} __packed;
|
||||
|
||||
/* osd2r05a sec 5.4.1: CDB continuation descriptors */
|
||||
enum osd_continuation_descriptor_type {
|
||||
NO_MORE_DESCRIPTORS = 0x0000,
|
||||
SCATTER_GATHER_LIST = 0x0001,
|
||||
QUERY_LIST = 0x0002,
|
||||
USER_OBJECT = 0x0003,
|
||||
COPY_USER_OBJECT_SOURCE = 0x0101,
|
||||
EXTENSION_CAPABILITIES = 0xFFEE
|
||||
};
|
||||
|
||||
struct osd_continuation_descriptor_header {
|
||||
__be16 type;
|
||||
u8 reserved;
|
||||
u8 pad_length;
|
||||
__be32 length;
|
||||
} __packed;
|
||||
|
||||
|
||||
/* osd2r05a sec 5.4.2: Scatter/gather list */
|
||||
struct osd_sg_list_entry {
|
||||
__be64 offset;
|
||||
__be64 len;
|
||||
};
|
||||
|
||||
struct osd_sg_continuation_descriptor {
|
||||
struct osd_continuation_descriptor_header hdr;
|
||||
struct osd_sg_list_entry entries[];
|
||||
};
|
||||
|
||||
#endif /* ndef __OSD_PROTOCOL_H__ */
|
||||
|
@ -37,4 +37,9 @@ struct osd_attr {
|
||||
void *val_ptr; /* in network order */
|
||||
};
|
||||
|
||||
struct osd_sg_entry {
|
||||
u64 offset;
|
||||
u64 len;
|
||||
};
|
||||
|
||||
#endif /* ndef __OSD_TYPES_H__ */
|
||||
|
Loading…
Reference in New Issue
Block a user