#include "ufs-exynos.h"
#include "ufs-exynos-fmp.h"
#include "ufs-exynos-smu.h"
+#include <soc/samsung/exynos-fsys0-tcxo.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/soc/samsung/exynos-soc.h>
+#include <linux/spinlock.h>
+
+
/*
#define IATOVAL_NSEC 20000 /* unit: ns */
+#if defined(CONFIG_SOC_EXYNOS9810)
+#define UFS_PHY_CONTROL_OFFSET 0x0724
+#endif
+
/* UFS CAL interface */
/*
*/
static struct exynos_ufs *ufs_host_backup[1];
static int ufs_host_index = 0;
+static spinlock_t fsys0_tcxo_lock;
static struct exynos_ufs_sfr_log ufs_log_std_sfr[] = {
{"CAPABILITIES" , REG_CONTROLLER_CAPABILITIES, 0},
return msecs_to_jiffies(ms);
}
+
static inline void exynos_ufs_ctrl_phy_pwr(struct exynos_ufs *ufs, bool en)
{
- u32 reg;
-
- reg = readl(ufs->phy.reg_pmu);
+ int ret = 0;
if (en)
- writel(reg | BIT(0), ufs->phy.reg_pmu);
+ ret = regmap_update_bits(ufs->pmureg, UFS_PHY_CONTROL_OFFSET, 0x1, 1);
else
- writel(reg & ~(BIT(0)), ufs->phy.reg_pmu);
+ ret = regmap_update_bits(ufs->pmureg, UFS_PHY_CONTROL_OFFSET, 0x1, 0);
+
+ if (ret)
+ dev_err(ufs->dev, "Unable to update PHY ISO control\n");
}
#ifndef __EXYNOS_UFS_VS_DEBUG__
hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
}
+static void exynos_ufs_tcxo_ctrl(struct exynos_ufs *ufs, bool tcxo_on)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(ufs->pmureg , UFS_PHY_CONTROL_OFFSET , &val);
+
+ if (tcxo_on == true)
+ val |= (1 << 16);
+ else
+ val &= ~(1 << 16);
+
+ if (!ret)
+ ret = regmap_write(ufs->pmureg , UFS_PHY_CONTROL_OFFSET , val);
+
+ if (ret)
+ dev_err(ufs->dev, "Unable to access the pmureg using regmap\n");
+}
+
+
+static bool tcxo_used_by[OWNER_MAX];
+
+static int exynos_check_shared_resource(int owner)
+{
+ if (owner == OWNER_FIRST)
+ return tcxo_used_by[OWNER_SECOND];
+ else
+ return tcxo_used_by[OWNER_FIRST];
+}
+
+
+static bool exynos_use_shared_resource(int owner, bool use)
+{
+ tcxo_used_by[owner] = use;
+
+ return exynos_check_shared_resource(owner);
+}
static int exynos_ufs_pre_setup_clocks(struct ufs_hba *hba, bool on)
{
struct exynos_ufs *ufs = to_exynos_ufs(hba);
int ret = 0;
+ unsigned long flags;
if (on) {
#ifdef CONFIG_CPU_IDLE
exynos_update_ip_idle_status(ufs->idle_ip_index, 0);
#endif
+
+ spin_lock_irqsave(&fsys0_tcxo_lock, flags);
+ if (exynos_use_shared_resource(OWNER_FIRST, on) == !on)
+ exynos_ufs_tcxo_ctrl(ufs, true);
+ spin_unlock_irqrestore(&fsys0_tcxo_lock, flags);
+
/*
* Now all used blocks would not be turned off in a host.
*/
{
struct exynos_ufs *ufs = to_exynos_ufs(hba);
int ret = 0;
+ unsigned long flags;
if (on) {
/*
/* HWAGC enable */
exynos_ufs_set_hwacg_control(ufs, true);
+ spin_lock_irqsave(&fsys0_tcxo_lock, flags);
+ if (exynos_use_shared_resource(OWNER_FIRST, on) == on)
+ exynos_ufs_tcxo_ctrl(ufs, false);
+ spin_unlock_irqrestore(&fsys0_tcxo_lock, flags);
+
+
#ifdef CONFIG_CPU_IDLE
exynos_update_ip_idle_status(ufs->idle_ip_index, 1);
#endif
static int exynos_ufs_populate_dt_phy(struct device *dev, struct exynos_ufs *ufs)
{
- struct device_node *ufs_phy, *phy_sys;
+ struct device_node *ufs_phy;
struct exynos_ufs_phy *phy = &ufs->phy;
struct resource io_res;
+ struct device_node *phy_sys = dev->of_node;
int ret;
ufs_phy = of_get_child_by_name(dev->of_node, "ufs-phy");
goto err_0;
}
- phy_sys = of_get_child_by_name(ufs_phy, "ufs-phy-sys");
- if (!phy_sys) {
- dev_err(dev, "failed to get ufs-phy-sys node\n");
- ret = -ENODEV;
- goto err_0;
- }
-
- ret = of_address_to_resource(phy_sys, 0, &io_res);
- if (ret) {
- dev_err(dev, "failed to get i/o address ufs-phy pmu\n");
- goto err_1;
- }
-
- phy->reg_pmu = devm_ioremap_resource(dev, &io_res);
- if (!phy->reg_pmu) {
- dev_err(dev, "failed to ioremap for ufs-phy pmu\n");
- ret = -ENOMEM;
- }
+ /* regmap pmureg*/
+ ufs->pmureg = syscon_regmap_lookup_by_phandle(phy_sys,
+ "samsung,pmu-phandle");
+ if (IS_ERR(ufs->pmureg)) {
+ dev_err(dev, "syscon regmap lookup failed.\n");
+ return PTR_ERR(ufs->pmureg);
+ }
-err_1:
- of_node_put(phy_sys);
err_0:
of_node_put(ufs_phy);
return ret;
}
+static int exynos_ufs_lp_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct exynos_ufs *ufs =
+ container_of(nb, struct exynos_ufs, tcxo_nb);
+ int ret = NOTIFY_DONE;
+ bool on = true;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsys0_tcxo_lock, flags);
+ switch (event) {
+ case SLEEP_ENTER:
+ on = false;
+ if (exynos_use_shared_resource(OWNER_SECOND, on) == on)
+ exynos_ufs_tcxo_ctrl(ufs, false);
+ break;
+ case SLEEP_EXIT:
+ if (exynos_use_shared_resource(OWNER_SECOND, on) == !on)
+ exynos_ufs_tcxo_ctrl(ufs, true);
+ break;
+ }
+ spin_unlock_irqrestore(&fsys0_tcxo_lock, flags);
+
+ return ret;
+}
+
static u64 exynos_ufs_dma_mask = DMA_BIT_MASK(32);
static int exynos_ufs_probe(struct platform_device *pdev)
return ret;
}
+ ufs->tcxo_nb.notifier_call = exynos_ufs_lp_event;
+ ufs->tcxo_nb.next = NULL;
+ ufs->tcxo_nb.priority = 0;
+
+ ret = exynos_fsys0_tcxo_register_notifier(&ufs->tcxo_nb);
+ if (ret) {
+ dev_err(dev, "failed to register fsys0 txco notifier\n");
+ return ret;
+ }
#ifdef CONFIG_CPU_IDLE
ufs->idle_ip_index = exynos_get_idle_ip_index(dev_name(&pdev->dev));
exynos_update_ip_idle_status(ufs->idle_ip_index, 0);
dev->dma_mask = &exynos_ufs_dma_mask;
pm_qos_add_request(&ufs->pm_qos_fsys0, PM_QOS_FSYS0_THROUGHPUT, 0);
+ spin_lock_init(&fsys0_tcxo_lock);
ret = ufshcd_pltfrm_init(pdev, &exynos_ufs_ops);
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/suspend.h>
+
+#include <soc/samsung/exynos-fsys0-tcxo.h>
+
+#include <linux/notifier.h>
+
+static DEFINE_RWLOCK(exynos_fsys0_tcxo_notifier_lock);
+static RAW_NOTIFIER_HEAD(exynos_fsys0_tcxo_notifier_chain);
+
+
+static int exynos_fsys0_tcxo_notify(enum exynos_fsys0_tcxo_event event, int nr_to_call, int *nr_calls)
+{
+ int ret;
+
+ ret = __raw_notifier_call_chain(&exynos_fsys0_tcxo_notifier_chain, event, NULL,
+ nr_to_call, nr_calls);
+
+ return notifier_to_errno(ret);
+}
+
+int exynos_fsys0_tcxo_register_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+ write_lock_irqsave(&exynos_fsys0_tcxo_notifier_lock, flags);
+ ret = raw_notifier_chain_register(&exynos_fsys0_tcxo_notifier_chain, nb);
+ write_unlock_irqrestore(&exynos_fsys0_tcxo_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_fsys0_tcxo_register_notifier);
+
+
+int exynos_fsys0_tcxo_unregister_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&exynos_fsys0_tcxo_notifier_lock, flags);
+ ret = raw_notifier_chain_unregister(&exynos_fsys0_tcxo_notifier_chain, nb);
+ write_unlock_irqrestore(&exynos_fsys0_tcxo_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_fsys0_tcxo_unregister_notifier);
+
+
+
+int exynos_fsys0_tcxo_sleep_enter(void)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&exynos_fsys0_tcxo_notifier_lock);
+ ret = exynos_fsys0_tcxo_notify(SLEEP_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+ * Inform listeners (nr_calls - 1) about failure of LPA
+ * entry who are notified earlier to prepare for it.
+ */
+ exynos_fsys0_tcxo_notify(SLEEP_ENTER_FAIL, nr_calls - 1, NULL);
+ read_unlock(&exynos_fsys0_tcxo_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_fsys0_tcxo_sleep_enter);
+
+
+
+int exynos_fsys0_tcxo_sleep_exit(void)
+{
+ int ret;
+
+ read_lock(&exynos_fsys0_tcxo_notifier_lock);
+ ret = exynos_fsys0_tcxo_notify(SLEEP_EXIT, -1, NULL);
+ read_unlock(&exynos_fsys0_tcxo_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_fsys0_tcxo_sleep_exit);