mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-08 15:04:45 +00:00
scsi: ufs: mcq: Use ufshcd_mcq_poll_cqe_lock() in MCQ mode
In preparation for adding MCQ error handler support, update the MCQ code to use the ufshcd_mcq_poll_cqe_lock() in interrupt context instead of using ufshcd_mcq_poll_cqe_nolock(). This is to keep synchronization between MCQ interrupt and error handler contexts because both need to access the MCQ hardware in separate contexts. Signed-off-by: Bao D. Nguyen <quic_nguyenb@quicinc.com> Link: https://lore.kernel.org/r/6ae727ad2a4040469b8f0632b55e0577d80da11b.1685396241.git.quic_nguyenb@quicinc.com Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Stanley Chu <stanley.chu@mediatek.com> Tested-by: Stanley Chu <stanley.chu@mediatek.com> Reviewed-by: Can Guo <quic_cang@quicinc.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
f1304d4420
commit
57d6ef4601
@ -284,8 +284,8 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
|
||||
ufshcd_compl_one_cqe(hba, tag, cqe);
|
||||
}
|
||||
|
||||
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq)
|
||||
static unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq)
|
||||
{
|
||||
unsigned long completed_reqs = 0;
|
||||
|
||||
@ -301,7 +301,6 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
|
||||
|
||||
return completed_reqs;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
|
||||
|
||||
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq)
|
||||
@ -314,6 +313,7 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
|
||||
|
||||
return completed_reqs;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
|
||||
|
||||
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
|
||||
{
|
||||
|
@ -71,8 +71,6 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
|
||||
void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
|
||||
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
|
||||
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
|
||||
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq);
|
||||
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
|
||||
struct request *req);
|
||||
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
|
||||
|
@ -6804,7 +6804,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
|
||||
ufshcd_mcq_write_cqis(hba, events, i);
|
||||
|
||||
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
|
||||
ufshcd_mcq_poll_cqe_nolock(hba, hwq);
|
||||
ufshcd_mcq_poll_cqe_lock(hba, hwq);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -1556,7 +1556,7 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
|
||||
struct ufs_hw_queue *hwq = &hba->uhq[id];
|
||||
|
||||
ufshcd_mcq_write_cqis(hba, 0x1, id);
|
||||
ufshcd_mcq_poll_cqe_nolock(hba, hwq);
|
||||
ufshcd_mcq_poll_cqe_lock(hba, hwq);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -1243,7 +1243,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
|
||||
void ufshcd_hba_stop(struct ufs_hba *hba);
|
||||
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
|
||||
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
|
||||
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
|
||||
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq);
|
||||
void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
|
||||
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
|
||||
|
Loading…
Reference in New Issue
Block a user