diff options
author | Subhash Jadavani <subhashj@codeaurora.org> | 2015-07-20 10:31:04 -0700 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-22 11:00:22 -0700 |
commit | 00d5062d03023677f75993d7d76326833f958a9a (patch) | |
tree | 7c0cbeff17a47a125bbe9c7336b7db3424226373 /drivers/scsi/ufs/ufshcd.c | |
parent | dc63550fa391d4f15453fdc7e7db6f50a2383322 (diff) |
scsi: ufs: add reference counting for scsi block requests
Currently we call the scsi_block_requests()/scsi_unblock_requests()
whenever we want to block/unblock scsi requests but as there is no
reference counting, nesting of these calls could leave us in undesired
state sometime. Consider following call flow sequence:
1. func1() calls scsi_block_requests() but calls func2() before
calling scsi_unblock_requests()
2. func2() calls scsi_block_requests()
3. func2() calls scsi_unblock_requests()
4. func1() calls scsi_unblock_requests()
As there is no reference counting, we will have scsi requests unblocked
after #3 instead of it to be unblocked only after #4. Though we may not
have failures seen with this, we might run into some failures in future.
Better solution would be to fix this by adding reference counting.
Change-Id: I1eaa39c0716cf41120269cf389ebb322e3006da1
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r-- | drivers/scsi/ufs/ufshcd.c | 52 |
1 files changed, 41 insertions, 11 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index b2571ac21f39..f7090863971d 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -397,6 +397,36 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) } } +void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) +{ + unsigned long flags; + bool unblock = false; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->scsi_block_reqs_cnt--; + unblock = !hba->scsi_block_reqs_cnt; + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (unblock) + scsi_unblock_requests(hba->host); +} +EXPORT_SYMBOL(ufshcd_scsi_unblock_requests); + +static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba) +{ + if (!hba->scsi_block_reqs_cnt++) + scsi_block_requests(hba->host); +} + +void ufshcd_scsi_block_requests(struct ufs_hba *hba) +{ + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + __ufshcd_scsi_block_requests(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); +} +EXPORT_SYMBOL(ufshcd_scsi_block_requests); + /* replace non-printable or non-ASCII characters with spaces */ static inline void ufshcd_remove_non_printable(char *val) { @@ -1156,7 +1186,7 @@ static void ufshcd_ungate_work(struct work_struct *work) unblock_reqs: if (hba->clk_scaling.is_allowed) ufshcd_resume_clkscaling(hba); - scsi_unblock_requests(hba->host); + ufshcd_scsi_unblock_requests(hba); } /** @@ -1197,7 +1227,7 @@ start: * work and to enable clocks. */ case CLKS_OFF: - scsi_block_requests(hba->host); + __ufshcd_scsi_block_requests(hba); hba->clk_gating.state = REQ_CLKS_ON; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); @@ -1472,7 +1502,7 @@ start: * work and exit hibern8. */ case HIBERN8_ENTERED: - scsi_block_requests(hba->host); + __ufshcd_scsi_block_requests(hba); hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT; trace_ufshcd_hibern8_on_idle(dev_name(hba->dev), hba->hibern8_on_idle.state); @@ -1635,7 +1665,7 @@ static void ufshcd_hibern8_exit_work(struct work_struct *work) } } unblock_reqs: - scsi_unblock_requests(hba->host); + ufshcd_scsi_unblock_requests(hba); } static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev, @@ -5115,7 +5145,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eeh_work); pm_runtime_get_sync(hba->dev); - scsi_block_requests(hba->host); + ufshcd_scsi_block_requests(hba); err = ufshcd_get_ee_status(hba, &status); if (err) { dev_err(hba->dev, "%s: failed to get exception status %d\n", @@ -5129,7 +5159,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ufshcd_bkops_exception_event_handler(hba); out: - scsi_unblock_requests(hba->host); + ufshcd_scsi_unblock_requests(hba); pm_runtime_put_sync(hba->dev); return; } @@ -5392,7 +5422,7 @@ skip_err_handling: ufshcd_clear_eh_in_progress(hba); out: spin_unlock_irqrestore(hba->host->host_lock, flags); - scsi_unblock_requests(hba->host); + ufshcd_scsi_unblock_requests(hba); ufshcd_release_all(hba); pm_runtime_put_sync(hba->dev); } @@ -5498,7 +5528,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba) /* handle fatal errors only when link is functional */ if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { /* block commands from scsi mid-layer */ - scsi_block_requests(hba->host); + __ufshcd_scsi_block_requests(hba); hba->ufshcd_state = UFSHCD_STATE_ERROR; schedule_work(&hba->eh_work); @@ -8232,12 +8262,12 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) * make sure that there are no outstanding requests when * clock scaling is in progress */ - scsi_block_requests(hba->host); + ufshcd_scsi_block_requests(hba); down_write(&hba->clk_scaling_lock); if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { ret = -EBUSY; up_write(&hba->clk_scaling_lock); - scsi_unblock_requests(hba->host); + ufshcd_scsi_unblock_requests(hba); } return ret; @@ -8246,7 +8276,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba) { up_write(&hba->clk_scaling_lock); - scsi_unblock_requests(hba->host); + ufshcd_scsi_unblock_requests(hba); } /** |