From 0f7f11ff7fac6843310feafca2c4329a4bbf4870 Mon Sep 17 00:00:00 2001 From: hgchu Date: Fri, 12 Jan 2018 10:49:15 +0900 Subject: [PATCH] scsi: ufs: Add support hibernation control Change-Id: I40feacabe23011491cfb24119ba4d861dfbd738d Signed-off-by: hgchu --- drivers/scsi/ufs/ufshcd.c | 50 +++++++++++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 193f67f5424b..53c69646937e 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -232,6 +232,7 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused); static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); +static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); static void ufshcd_resume_clkscaling(struct ufs_hba *hba); @@ -1431,7 +1432,7 @@ static void ufshcd_ungate_work(struct work_struct *work) /* Prevent gating in this path */ hba->clk_gating.is_suspended = true; if (ufshcd_is_link_hibern8(hba)) { - ret = ufshcd_uic_hibern8_exit(hba); + ret = ufshcd_link_hibern8_ctrl(hba, false); if (ret) dev_err(hba->dev, "%s: hibern8 exit failed %d\n", __func__, ret); @@ -1560,7 +1561,7 @@ static void ufshcd_gate_work(struct work_struct *work) /* put the link into hibern8 mode before turning off clocks */ if (ufshcd_can_hibern8_during_gating(hba)) { - if (ufshcd_uic_hibern8_enter(hba)) { + if (ufshcd_link_hibern8_ctrl(hba, true)) { hba->clk_gating.state = CLKS_ON; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); @@ -1690,7 +1691,7 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba) if (!ufshcd_is_clkgating_allowed(hba)) return; - hba->clk_gating.delay_ms = 150; + hba->clk_gating.delay_ms = LINK_H8_DELAY; INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); @@ -1719,8 +1720,6 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) return; device_remove_file(hba->dev, &hba->clk_gating.delay_attr); device_remove_file(hba->dev, &hba->clk_gating.enable_attr); - cancel_work_sync(&hba->clk_gating.ungate_work); - cancel_delayed_work_sync(&hba->clk_gating.gate_work); } /* Must be called with host lock acquired */ @@ -2594,6 +2593,10 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, struct completion wait; unsigned long flags; + if (!ufshcd_is_link_active(hba)) { + + return -EPERM; + } down_read(&hba->clk_scaling_lock); /* @@ -3851,6 +3854,26 @@ static void ufshcd_init_pwr_info(struct ufs_hba *hba) hba->pwr_info.hs_rate = 0; } +static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en) +{ + int ret; + + if (hba->vops && hba->vops->hibern8_notify) + hba->vops->hibern8_notify(hba, en, PRE_CHANGE); + + if (en) + ret = ufshcd_uic_hibern8_enter(hba); + else + ret = ufshcd_uic_hibern8_exit(hba); + if (ret) + goto out; + } + if (hba->vops && hba->vops->hibern8_notify) + hba->vops->hibern8_notify(hba, en, POST_CHANGE); +out: + return ret; +} + /** * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device * @hba: per-adapter instance @@ -5862,6 +5885,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) /* Reset the host controller */ spin_lock_irqsave(hba->host->host_lock, flags); + hba->ufshcd_state = UFSHCD_STATE_RESET; + ufshcd_set_eh_in_progress(hba); ufshcd_hba_stop(hba, false); spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -5875,6 +5900,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) dev_err(hba->dev, "%s: failed\n", __func__); err = -EIO; } + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_clear_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); return err; } @@ -6661,6 +6689,7 @@ static struct scsi_host_template ufshcd_driver_template = { .cmd_per_lun = UFSHCD_CMD_PER_LUN, .can_queue = UFSHCD_CAN_QUEUE, .max_host_blocked = 1, + .skip_settle_delay = 1, .track_queue_depth = 1, }; @@ -7227,7 +7256,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba, return 0; if (req_link_state == UIC_LINK_HIBERN8_STATE) { - ret = ufshcd_uic_hibern8_enter(hba); + ret = ufshcd_link_hibern8_ctrl(hba, true); if (!ret) ufshcd_set_link_hibern8(hba); else @@ -7435,6 +7464,13 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto set_dev_active; disable_clks: + + /* + * Flush pending works before clock is disabled + */ + cancel_work_sync(&hba->eh_work); + cancel_work_sync(&hba->eeh_work); + /* * Disable the host irq as host controller as there won't be any * host controller trasanction expected till resume. @@ -7528,7 +7564,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto disable_vreg; if (ufshcd_is_link_hibern8(hba)) { - ret = ufshcd_uic_hibern8_exit(hba); + ret = ufshcd_link_hibern8_ctrl(hba, false); if (!ret) ufshcd_set_link_active(hba); else -- 2.20.1