mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
Merge branch '6.6/scsi-staging' into 6.6/scsi-fixes
Pull in staged fixes for 6.6. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
4f6cee6045
@ -2332,7 +2332,7 @@ struct megasas_instance {
|
||||
u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
|
||||
bool use_seqnum_jbod_fp; /* Added for PD sequence */
|
||||
bool smp_affinity_enable;
|
||||
spinlock_t crashdump_lock;
|
||||
struct mutex crashdump_lock;
|
||||
|
||||
struct megasas_register_set __iomem *reg_set;
|
||||
u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
|
||||
|
@ -3271,14 +3271,13 @@ fw_crash_buffer_store(struct device *cdev,
|
||||
struct megasas_instance *instance =
|
||||
(struct megasas_instance *) shost->hostdata;
|
||||
int val = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (kstrtoint(buf, 0, &val) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&instance->crashdump_lock, flags);
|
||||
mutex_lock(&instance->crashdump_lock);
|
||||
instance->fw_crash_buffer_offset = val;
|
||||
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
|
||||
mutex_unlock(&instance->crashdump_lock);
|
||||
return strlen(buf);
|
||||
}
|
||||
|
||||
@ -3293,24 +3292,23 @@ fw_crash_buffer_show(struct device *cdev,
|
||||
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
|
||||
unsigned long chunk_left_bytes;
|
||||
unsigned long src_addr;
|
||||
unsigned long flags;
|
||||
u32 buff_offset;
|
||||
|
||||
spin_lock_irqsave(&instance->crashdump_lock, flags);
|
||||
mutex_lock(&instance->crashdump_lock);
|
||||
buff_offset = instance->fw_crash_buffer_offset;
|
||||
if (!instance->crash_dump_buf ||
|
||||
!((instance->fw_crash_state == AVAILABLE) ||
|
||||
(instance->fw_crash_state == COPYING))) {
|
||||
dev_err(&instance->pdev->dev,
|
||||
"Firmware crash dump is not available\n");
|
||||
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
|
||||
mutex_unlock(&instance->crashdump_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
|
||||
dev_err(&instance->pdev->dev,
|
||||
"Firmware crash dump offset is out of range\n");
|
||||
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
|
||||
mutex_unlock(&instance->crashdump_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3322,7 +3320,7 @@ fw_crash_buffer_show(struct device *cdev,
|
||||
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
|
||||
(buff_offset % dmachunk);
|
||||
memcpy(buf, (void *)src_addr, size);
|
||||
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
|
||||
mutex_unlock(&instance->crashdump_lock);
|
||||
|
||||
return size;
|
||||
}
|
||||
@ -3347,7 +3345,6 @@ fw_crash_state_store(struct device *cdev,
|
||||
struct megasas_instance *instance =
|
||||
(struct megasas_instance *) shost->hostdata;
|
||||
int val = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (kstrtoint(buf, 0, &val) != 0)
|
||||
return -EINVAL;
|
||||
@ -3361,9 +3358,9 @@ fw_crash_state_store(struct device *cdev,
|
||||
instance->fw_crash_state = val;
|
||||
|
||||
if ((val == COPIED) || (val == COPY_ERROR)) {
|
||||
spin_lock_irqsave(&instance->crashdump_lock, flags);
|
||||
mutex_lock(&instance->crashdump_lock);
|
||||
megasas_free_host_crash_buffer(instance);
|
||||
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
|
||||
mutex_unlock(&instance->crashdump_lock);
|
||||
if (val == COPY_ERROR)
|
||||
dev_info(&instance->pdev->dev, "application failed to "
|
||||
"copy Firmware crash dump\n");
|
||||
@ -7422,7 +7419,7 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
|
||||
init_waitqueue_head(&instance->int_cmd_wait_q);
|
||||
init_waitqueue_head(&instance->abort_cmd_wait_q);
|
||||
|
||||
spin_lock_init(&instance->crashdump_lock);
|
||||
mutex_init(&instance->crashdump_lock);
|
||||
spin_lock_init(&instance->mfi_pool_lock);
|
||||
spin_lock_init(&instance->hba_lock);
|
||||
spin_lock_init(&instance->stream_lock);
|
||||
|
@ -307,9 +307,9 @@ static int ppa_out(ppa_struct *dev, char *buffer, int len)
|
||||
case PPA_EPP_8:
|
||||
epp_reset(ppb);
|
||||
w_ctr(ppb, 0x4);
|
||||
if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x01))
|
||||
if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03))
|
||||
outsl(ppb + 4, buffer, len >> 2);
|
||||
else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x03))
|
||||
else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01))
|
||||
outsw(ppb + 4, buffer, len >> 1);
|
||||
else
|
||||
outsb(ppb + 4, buffer, len);
|
||||
|
@ -1904,6 +1904,7 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
|
||||
goto drop_rdata_kref;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fcport->rport_lock, flags);
|
||||
if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
|
||||
test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
|
||||
test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
|
||||
@ -1911,17 +1912,20 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
|
||||
"io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
|
||||
io_req->xid, io_req->sc_cmd);
|
||||
rc = 1;
|
||||
spin_unlock_irqrestore(&fcport->rport_lock, flags);
|
||||
goto drop_rdata_kref;
|
||||
}
|
||||
|
||||
/* Set the command type to abort */
|
||||
io_req->cmd_type = QEDF_ABTS;
|
||||
spin_unlock_irqrestore(&fcport->rport_lock, flags);
|
||||
|
||||
kref_get(&io_req->refcount);
|
||||
|
||||
xid = io_req->xid;
|
||||
qedf->control_requests++;
|
||||
qedf->packet_aborts++;
|
||||
|
||||
/* Set the command type to abort */
|
||||
io_req->cmd_type = QEDF_ABTS;
|
||||
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
|
||||
|
||||
set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
|
||||
@ -2210,7 +2214,9 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
|
||||
refcount, fcport, fcport->rdata->ids.port_id);
|
||||
|
||||
/* Cleanup cmds re-use the same TID as the original I/O */
|
||||
spin_lock_irqsave(&fcport->rport_lock, flags);
|
||||
io_req->cmd_type = QEDF_CLEANUP;
|
||||
spin_unlock_irqrestore(&fcport->rport_lock, flags);
|
||||
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
|
||||
|
||||
init_completion(&io_req->cleanup_done);
|
||||
|
@ -2805,6 +2805,8 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
|
||||
struct qedf_ioreq *io_req;
|
||||
struct qedf_rport *fcport;
|
||||
u32 comp_type;
|
||||
u8 io_comp_type;
|
||||
unsigned long flags;
|
||||
|
||||
comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
|
||||
FCOE_CQE_CQE_TYPE_MASK;
|
||||
@ -2838,11 +2840,14 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fcport->rport_lock, flags);
|
||||
io_comp_type = io_req->cmd_type;
|
||||
spin_unlock_irqrestore(&fcport->rport_lock, flags);
|
||||
|
||||
switch (comp_type) {
|
||||
case FCOE_GOOD_COMPLETION_CQE_TYPE:
|
||||
atomic_inc(&fcport->free_sqes);
|
||||
switch (io_req->cmd_type) {
|
||||
switch (io_comp_type) {
|
||||
case QEDF_SCSI_CMD:
|
||||
qedf_scsi_completion(qedf, cqe, io_req);
|
||||
break;
|
||||
|
@ -116,7 +116,7 @@ qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
|
||||
|
||||
sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
|
||||
fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
|
||||
if (!fp->dfs_rport_dir)
|
||||
if (IS_ERR(fp->dfs_rport_dir))
|
||||
return;
|
||||
if (NVME_TARGET(vha->hw, fp))
|
||||
debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
|
||||
@ -708,14 +708,14 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
|
||||
if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
|
||||
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
|
||||
0400, ha->dfs_dir, vha, &dfs_naqp_ops);
|
||||
if (!ha->tgt.dfs_naqp) {
|
||||
if (IS_ERR(ha->tgt.dfs_naqp)) {
|
||||
ql_log(ql_log_warn, vha, 0xd011,
|
||||
"Unable to create debugFS naqp node.\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
|
||||
if (!vha->dfs_rport_root) {
|
||||
if (IS_ERR(vha->dfs_rport_root)) {
|
||||
ql_log(ql_log_warn, vha, 0xd012,
|
||||
"Unable to create debugFS rports node.\n");
|
||||
goto out;
|
||||
|
@ -577,7 +577,7 @@ fcport_is_bigger(fc_port_t *fcport)
|
||||
static inline struct qla_qpair *
|
||||
qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
|
||||
{
|
||||
int cpuid = smp_processor_id();
|
||||
int cpuid = raw_smp_processor_id();
|
||||
|
||||
if (qpair->cpuid != cpuid &&
|
||||
ha->qp_cpu_map[cpuid]) {
|
||||
|
@ -3965,7 +3965,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
|
||||
if (!ha->flags.fw_started)
|
||||
return;
|
||||
|
||||
if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
|
||||
if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) {
|
||||
rsp->qpair->rcv_intr = 1;
|
||||
|
||||
if (!rsp->qpair->cpu_mapped)
|
||||
@ -4468,7 +4468,7 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
|
||||
}
|
||||
ha = qpair->hw;
|
||||
|
||||
queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
|
||||
queue_work(ha->wq, &qpair->q_work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -4494,7 +4494,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
|
||||
wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
|
||||
queue_work(ha->wq, &qpair->q_work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -399,14 +399,14 @@ static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
|
||||
nvme->u.nvme.dl = 0;
|
||||
nvme->u.nvme.timeout_sec = 0;
|
||||
nvme->u.nvme.cmd_dma = fd_resp->rspdma;
|
||||
nvme->u.nvme.cmd_len = fd_resp->rsplen;
|
||||
nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen);
|
||||
nvme->u.nvme.rsp_len = 0;
|
||||
nvme->u.nvme.rsp_dma = 0;
|
||||
nvme->u.nvme.exchange_address = uctx->exchange_address;
|
||||
nvme->u.nvme.nport_handle = uctx->nport_handle;
|
||||
nvme->u.nvme.ox_id = uctx->ox_id;
|
||||
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
|
||||
le32_to_cpu(fd_resp->rsplen), DMA_TO_DEVICE);
|
||||
fd_resp->rsplen, DMA_TO_DEVICE);
|
||||
|
||||
ql_dbg(ql_dbg_unsol, vha, 0x2122,
|
||||
"Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
|
||||
@ -504,13 +504,13 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
|
||||
nvme->u.nvme.desc = fd;
|
||||
nvme->u.nvme.dir = 0;
|
||||
nvme->u.nvme.dl = 0;
|
||||
nvme->u.nvme.cmd_len = fd->rqstlen;
|
||||
nvme->u.nvme.rsp_len = fd->rsplen;
|
||||
nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen);
|
||||
nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen);
|
||||
nvme->u.nvme.rsp_dma = fd->rspdma;
|
||||
nvme->u.nvme.timeout_sec = fd->timeout;
|
||||
nvme->u.nvme.cmd_dma = fd->rqstdma;
|
||||
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
|
||||
le32_to_cpu(fd->rqstlen), DMA_TO_DEVICE);
|
||||
fd->rqstlen, DMA_TO_DEVICE);
|
||||
|
||||
rval = qla2x00_start_sp(sp);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
|
@ -4425,8 +4425,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
|
||||
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
|
||||
} else if (ha->msix_count) {
|
||||
if (cmd->atio.u.isp24.fcp_cmnd.rddata)
|
||||
queue_work_on(smp_processor_id(), qla_tgt_wq,
|
||||
&cmd->work);
|
||||
queue_work(qla_tgt_wq, &cmd->work);
|
||||
else
|
||||
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
|
||||
&cmd->work);
|
||||
|
@ -310,7 +310,7 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
|
||||
cmd->trc_flags |= TRC_CMD_DONE;
|
||||
|
||||
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
|
||||
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
|
||||
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -547,7 +547,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
|
||||
cmd->trc_flags |= TRC_DATA_IN;
|
||||
cmd->cmd_in_wq = 1;
|
||||
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
|
||||
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
|
||||
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
|
||||
}
|
||||
|
||||
static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
|
||||
|
@ -1392,16 +1392,16 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
|
||||
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
|
||||
unsigned char buf[INQUIRY_VENDOR_LEN + 2];
|
||||
char *stripped = NULL;
|
||||
size_t len;
|
||||
ssize_t len;
|
||||
ssize_t ret;
|
||||
|
||||
len = strlcpy(buf, page, sizeof(buf));
|
||||
if (len < sizeof(buf)) {
|
||||
len = strscpy(buf, page, sizeof(buf));
|
||||
if (len > 0) {
|
||||
/* Strip any newline added from userspace. */
|
||||
stripped = strstrip(buf);
|
||||
len = strlen(stripped);
|
||||
}
|
||||
if (len > INQUIRY_VENDOR_LEN) {
|
||||
if (len < 0 || len > INQUIRY_VENDOR_LEN) {
|
||||
pr_err("Emulated T10 Vendor Identification exceeds"
|
||||
" INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
|
||||
"\n");
|
||||
@ -1448,16 +1448,16 @@ static ssize_t target_wwn_product_id_store(struct config_item *item,
|
||||
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
|
||||
unsigned char buf[INQUIRY_MODEL_LEN + 2];
|
||||
char *stripped = NULL;
|
||||
size_t len;
|
||||
ssize_t len;
|
||||
ssize_t ret;
|
||||
|
||||
len = strlcpy(buf, page, sizeof(buf));
|
||||
if (len < sizeof(buf)) {
|
||||
len = strscpy(buf, page, sizeof(buf));
|
||||
if (len > 0) {
|
||||
/* Strip any newline added from userspace. */
|
||||
stripped = strstrip(buf);
|
||||
len = strlen(stripped);
|
||||
}
|
||||
if (len > INQUIRY_MODEL_LEN) {
|
||||
if (len < 0 || len > INQUIRY_MODEL_LEN) {
|
||||
pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
|
||||
__stringify(INQUIRY_MODEL_LEN)
|
||||
"\n");
|
||||
@ -1504,16 +1504,16 @@ static ssize_t target_wwn_revision_store(struct config_item *item,
|
||||
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
|
||||
unsigned char buf[INQUIRY_REVISION_LEN + 2];
|
||||
char *stripped = NULL;
|
||||
size_t len;
|
||||
ssize_t len;
|
||||
ssize_t ret;
|
||||
|
||||
len = strlcpy(buf, page, sizeof(buf));
|
||||
if (len < sizeof(buf)) {
|
||||
len = strscpy(buf, page, sizeof(buf));
|
||||
if (len > 0) {
|
||||
/* Strip any newline added from userspace. */
|
||||
stripped = strstrip(buf);
|
||||
len = strlen(stripped);
|
||||
}
|
||||
if (len > INQUIRY_REVISION_LEN) {
|
||||
if (len < 0 || len > INQUIRY_REVISION_LEN) {
|
||||
pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
|
||||
__stringify(INQUIRY_REVISION_LEN)
|
||||
"\n");
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_driver.h>
|
||||
@ -2299,7 +2300,11 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
|
||||
*/
|
||||
static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
|
||||
{
|
||||
return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
|
||||
u32 val;
|
||||
int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
|
||||
500, UIC_CMD_TIMEOUT * 1000, false, hba,
|
||||
REG_CONTROLLER_STATUS);
|
||||
return ret == 0 ? true : false;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2392,7 +2397,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
|
||||
bool completion)
|
||||
{
|
||||
lockdep_assert_held(&hba->uic_cmd_mutex);
|
||||
lockdep_assert_held(hba->host->host_lock);
|
||||
|
||||
if (!ufshcd_ready_for_uic_cmd(hba)) {
|
||||
dev_err(hba->dev,
|
||||
@ -2419,7 +2423,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
|
||||
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
|
||||
return 0;
|
||||
@ -2428,9 +2431,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
|
||||
mutex_lock(&hba->uic_cmd_mutex);
|
||||
ufshcd_add_delay_before_dme_cmd(hba);
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
if (!ret)
|
||||
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
|
||||
|
||||
@ -4133,8 +4134,8 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
||||
wmb();
|
||||
reenable_intr = true;
|
||||
}
|
||||
ret = __ufshcd_send_uic_cmd(hba, cmd, false);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ret = __ufshcd_send_uic_cmd(hba, cmd, false);
|
||||
if (ret) {
|
||||
dev_err(hba->dev,
|
||||
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
|
||||
|
@ -53,10 +53,10 @@
|
||||
struct nvmefc_ls_req {
|
||||
void *rqstaddr;
|
||||
dma_addr_t rqstdma;
|
||||
__le32 rqstlen;
|
||||
u32 rqstlen;
|
||||
void *rspaddr;
|
||||
dma_addr_t rspdma;
|
||||
__le32 rsplen;
|
||||
u32 rsplen;
|
||||
u32 timeout;
|
||||
|
||||
void *private;
|
||||
@ -120,7 +120,7 @@ struct nvmefc_ls_req {
|
||||
struct nvmefc_ls_rsp {
|
||||
void *rspbuf;
|
||||
dma_addr_t rspdma;
|
||||
__le32 rsplen;
|
||||
u16 rsplen;
|
||||
|
||||
void (*done)(struct nvmefc_ls_rsp *rsp);
|
||||
void *nvme_fc_private; /* LLDD is not to access !! */
|
||||
|
Loading…
Reference in New Issue
Block a user