cfg->val = ufshcd_readl(hba, cfg->offset);
else if (sel_api == LOG_VS_HCI_SFR)
cfg->val = hci_readl(ufs, cfg->offset);
+#ifdef CONFIG_EXYNOS_SMC_LOGGING
else if (sel_api == LOG_FMP_SFR)
cfg->val = exynos_smc(SMC_CMD_FMP_SMU_DUMP, 0, 0, cfg->offset);
+#endif
else if (sel_api == LOG_UNIPRO_SFR)
cfg->val = unipro_readl(ufs, cfg->offset);
else if (sel_api == LOG_PMA_SFR)
struct exynos_ufs *ufs = to_exynos_ufs(hba);
/* secure log */
+#ifdef CONFIG_EXYNOS_SMC_LOGGING
exynos_smc(SMC_CMD_UFS_LOG, 1, 0, hba->secure_log.paddr);
+#endif
exynos_ufs_get_sfr(hba, ufs->debug.sfr);
exynos_ufs_get_attr(hba, ufs->debug.attr);
exynos_clki->freq = 0;
list_add_tail(&exynos_clki->list, &ufs->debug.misc.clk_list_head);
}
-
+#ifdef CONFIG_EXYNOS_SNAPSHOT
hba->secure_log.paddr = exynos_ss_get_spare_paddr(0);
hba->secure_log.vaddr = (u32 *)exynos_ss_get_spare_vaddr(0);
+#endif
return 0;
}
static int ufshcd_link_hibern8_ctrl(struct ufs_hba *hba, bool en);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
+#if defined(CONFIG_PM_DEVFREQ)
static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
+#endif
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
tag = cmd->request->tag;
/* secure log */
+#ifdef CONFIG_EXYNOS_SMC_LOGGING
exynos_smc(SMC_CMD_UFS_LOG, 1, 0, hba->secure_log.paddr);
+#endif
/* Dump debugging information to system memory */
ufshcd_vops_dbg_register_dump(hba);
return ufshcd_eh_host_reset_handler(cmd);
/* secure log */
+#ifdef CONFIG_EXYNOS_SMC_LOGGING
exynos_smc(SMC_CMD_UFS_LOG, 1, 0, hba->secure_log.paddr);
+#endif
if (cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == WRITE_10) {
unsigned long lba = (unsigned long) ((cmd->cmnd[2] << 24) |
ufshcd_hba_stop(hba, false);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+#if defined(CONFIG_PM_DEVFREQ)
/* scale up clocks to max frequency before full reinitialization */
ufshcd_scale_clks(hba, true);
+#endif
/* Establish the link again and restore the device */
#ifdef CONFIG_SCSI_UFS_ASYNC_RELINK
sizeof(struct ufs_pa_layer_attr));
hba->clk_scaling.saved_pwr_info.is_valid = true;
if (!hba->devfreq) {
+#if defined(CONFIG_PM_DEVFREQ)
hba->devfreq = devm_devfreq_add_device(hba->dev,
&ufs_devfreq_profile,
"simple_ondemand",
NULL);
+#endif
if (IS_ERR(hba->devfreq)) {
ret = PTR_ERR(hba->devfreq);
dev_err(hba->dev, "Unable to register with devfreq %d\n",
if (hba->is_powered) {
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
+#if defined(CONFIG_PM_DEVFREQ)
ufshcd_suspend_clkscaling(hba);
+#endif
if (ufshcd_is_clkscaling_supported(hba)) {
+#if defined(CONFIG_PM_DEVFREQ)
if (hba->devfreq)
ufshcd_suspend_clkscaling(hba);
+#endif
destroy_workqueue(hba->clk_scaling.workq);
}
ufshcd_setup_clocks(hba, false);
if (hba->clk_scaling.is_allowed) {
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
+#if defined(CONFIG_PM_DEVFREQ)
ufshcd_suspend_clkscaling(hba);
+#endif
}
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
goto out;
set_link_active:
+#if defined(CONFIG_PM_DEVFREQ)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
+#endif
if (ufshcd_is_shutdown_pm(pm_op))
goto out;
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
+#if defined(CONFIG_PM_DEVFREQ)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
+#endif
ufshcd_release(hba);
out:
hba->pm_op_in_progress = 0;
ufshcd_vops_suspend(hba, pm_op);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
+#if defined(CONFIG_PM_DEVFREQ)
if (hba->clk_scaling.is_allowed)
ufshcd_suspend_clkscaling(hba);
+#endif
if (gating_allowed)
ufshcd_setup_clocks(hba, false);