static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
/* Prevent gating in this path */
hba->clk_gating.is_suspended = true;
if (ufshcd_is_link_hibern8(hba)) {
- ret = ufshcd_uic_hibern8_exit(hba);
+ ret = ufshcd_link_hibern8_ctrl(hba, false);
if (ret)
dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
__func__, ret);
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
- if (ufshcd_uic_hibern8_enter(hba)) {
+ if (ufshcd_link_hibern8_ctrl(hba, true)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
if (!ufshcd_is_clkgating_allowed(hba))
return;
- hba->clk_gating.delay_ms = 150;
+ hba->clk_gating.delay_ms = LINK_H8_DELAY;
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
return;
device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
- cancel_work_sync(&hba->clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
}
/* Must be called with host lock acquired */
struct completion wait;
unsigned long flags;
+ if (!ufshcd_is_link_active(hba)) {
+
+ return -EPERM;
+ }
down_read(&hba->clk_scaling_lock);
/*
hba->pwr_info.hs_rate = 0;
}
+static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en)
+{
+ int ret;
+
+ if (hba->vops && hba->vops->hibern8_notify)
+ hba->vops->hibern8_notify(hba, en, PRE_CHANGE);
+
+ if (en)
+ ret = ufshcd_uic_hibern8_enter(hba);
+ else
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (ret)
+ goto out;
+ }
+ if (hba->vops && hba->vops->hibern8_notify)
+ hba->vops->hibern8_notify(hba, en, POST_CHANGE);
+out:
+ return ret;
+}
+
/**
* ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
* @hba: per-adapter instance
/* Reset the host controller */
spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
ufshcd_hba_stop(hba, false);
spin_unlock_irqrestore(hba->host->host_lock, flags);
dev_err(hba->dev, "%s: failed\n", __func__);
err = -EIO;
}
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
return err;
}
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_host_blocked = 1,
+ .skip_settle_delay = 1,
.track_queue_depth = 1,
};
return 0;
if (req_link_state == UIC_LINK_HIBERN8_STATE) {
- ret = ufshcd_uic_hibern8_enter(hba);
+ ret = ufshcd_link_hibern8_ctrl(hba, true);
if (!ret)
ufshcd_set_link_hibern8(hba);
else
goto set_dev_active;
disable_clks:
+
+ /*
+ * Flush pending works before clock is disabled
+ */
+ cancel_work_sync(&hba->eh_work);
+ cancel_work_sync(&hba->eeh_work);
+
/*
* Disable the host irq as host controller as there won't be any
* host controller trasanction expected till resume.
goto disable_vreg;
if (ufshcd_is_link_hibern8(hba)) {
- ret = ufshcd_uic_hibern8_exit(hba);
+ ret = ufshcd_link_hibern8_ctrl(hba, false);
if (!ret)
ufshcd_set_link_active(hba);
else