--- /dev/null
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/suspend.h>
+#include <linux/wakeup_reason.h>
+#include <linux/gpio.h>
+#include <linux/syscore_ops.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/psci.h>
+#include <linux/debugfs.h>
+#include <asm/cpuidle.h>
+#include <asm/smp_plat.h>
+
+#include <soc/samsung/exynos-pm.h>
+#include <soc/samsung/exynos-pmu.h>
+#include <soc/samsung/exynos-powermode.h>
+
+#define WAKEUP_STAT_EINT (1 << 0)
+#define WAKEUP_STAT_RTC_ALARM (1 << 1)
+/*
+ * PMU register offset
+ */
+#define EXYNOS_PMU_WAKEUP_STAT 0x0600
+#define EXYNOS_PMU_EINT_WAKEUP_MASK 0x060C
+#define BOOT_CPU 0
+
+extern u32 exynos_eint_to_pin_num(int eint);
+#define EXYNOS_EINT_PEND(b, x) ((b) + 0xA00 + (((x) >> 3) * 4))
+
+struct exynos_pm_info {
+ void __iomem *eint_base; /* GPIO_ALIVE base to check wkup reason */
+ void __iomem *gic_base; /* GICD_ISPENDRn base to check wkup reason */
+ unsigned int num_eint; /* Total number of EINT sources */
+ unsigned int num_gic; /* Total number of GIC sources */
+ bool is_early_wakeup;
+ bool is_usbl2_suspend;
+ unsigned int suspend_mode_idx; /* power mode to be used in suspend scenario */
+ unsigned int suspend_psci_idx; /* psci index to be used in suspend scenario */
+ u8 num_extra_stat; /* Total number of extra wakeup_stat */
+ unsigned int *extra_wakeup_stat; /* Extra wakeup stat SFRs offset */
+
+ unsigned int usbl2_suspend_available;
+ unsigned int usbl2_suspend_mode_idx; /* power mode to be used in suspend scenario */
+ bool (*usb_is_connect)(void);
+};
+static struct exynos_pm_info *pm_info;
+
+struct exynos_pm_dbg {
+ u32 test_early_wakeup;
+ u32 test_usbl2_suspend;
+};
+static struct exynos_pm_dbg *pm_dbg;
+
+static void exynos_show_wakeup_reason_eint(void)
+{
+ int bit;
+ int i, size;
+ long unsigned int ext_int_pend;
+ u64 eint_wakeup_mask;
+ bool found = 0;
+ unsigned int val;
+
+ exynos_pmu_read(EXYNOS_PMU_EINT_WAKEUP_MASK, &val);
+ eint_wakeup_mask = val;
+
+ for (i = 0, size = 8; i < pm_info->num_eint; i += size) {
+ ext_int_pend =
+ __raw_readl(EXYNOS_EINT_PEND(pm_info->eint_base, i));
+
+ for_each_set_bit(bit, &ext_int_pend, size) {
+ u32 gpio;
+ int irq;
+
+ if (eint_wakeup_mask & (1 << (i + bit)))
+ continue;
+
+ gpio = exynos_eint_to_pin_num(i + bit);
+ irq = gpio_to_irq(gpio);
+
+#ifdef CONFIG_SUSPEND
+ log_wakeup_reason(irq);
+ // update_wakeup_reason_stats(irq, i + bit);
+#endif
+ found = 1;
+ }
+ }
+
+ if (!found)
+ pr_info("%s Resume caused by unknown EINT\n", EXYNOS_PM_PREFIX);
+}
+
+static void exynos_show_wakeup_registers(unsigned int wakeup_stat)
+{
+ int i, size;
+ int extra_wakeup_stat;
+
+ pr_info("WAKEUP_STAT:\n");
+ pr_info("0x%08x\n", wakeup_stat);
+ for (i = 0; i < pm_info->num_extra_stat; i++) {
+ exynos_pmu_read(pm_info->extra_wakeup_stat[i], &extra_wakeup_stat);
+ pr_info("0x%08x\n", extra_wakeup_stat);
+ }
+
+ pr_info("EINT_PEND: ");
+ for (i = 0, size = 8; i < pm_info->num_eint; i += size)
+ pr_info("0x%02x ", __raw_readl(EXYNOS_EINT_PEND(pm_info->eint_base, i)));
+}
+
+static void exynos_show_wakeup_reason(bool sleep_abort)
+{
+ unsigned int wakeup_stat;
+ int i, size;
+
+ if (sleep_abort) {
+ pr_info("%s early wakeup! Dumping pending registers...\n", EXYNOS_PM_PREFIX);
+
+ pr_info("EINT_PEND:\n");
+ for (i = 0, size = 8; i < pm_info->num_eint; i += size)
+ pr_info("0x%x\n", __raw_readl(EXYNOS_EINT_PEND(pm_info->eint_base, i)));
+
+ pr_info("GIC_PEND:\n");
+ for (i = 0; i < pm_info->num_gic; i++)
+ pr_info("GICD_ISPENDR[%d] = 0x%x\n", i, __raw_readl(pm_info->gic_base + i*4));
+
+ pr_info("%s done.\n", EXYNOS_PM_PREFIX);
+ return ;
+ }
+
+ exynos_pmu_read(EXYNOS_PMU_WAKEUP_STAT, &wakeup_stat);
+ exynos_show_wakeup_registers(wakeup_stat);
+
+ if (wakeup_stat & WAKEUP_STAT_RTC_ALARM)
+ pr_info("%s Resume caused by RTC alarm\n", EXYNOS_PM_PREFIX);
+ else if (wakeup_stat & WAKEUP_STAT_EINT)
+ exynos_show_wakeup_reason_eint();
+ else
+ pr_info("%s Resume caused by wakeup_stat 0x%08x\n",
+ EXYNOS_PM_PREFIX, wakeup_stat);
+}
+
+#ifdef CONFIG_CPU_IDLE
+static DEFINE_RWLOCK(exynos_pm_notifier_lock);
+static RAW_NOTIFIER_HEAD(exynos_pm_notifier_chain);
+
+int exynos_pm_register_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&exynos_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_register(&exynos_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&exynos_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_pm_register_notifier);
+
+int exynos_pm_unregister_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&exynos_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_unregister(&exynos_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&exynos_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_pm_unregister_notifier);
+
+static int __exynos_pm_notify(enum exynos_pm_event event, int nr_to_call, int *nr_calls)
+{
+ int ret;
+
+ ret = __raw_notifier_call_chain(&exynos_pm_notifier_chain, event, NULL,
+ nr_to_call, nr_calls);
+
+ return notifier_to_errno(ret);
+}
+
+int exynos_pm_notify(enum exynos_pm_event event)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&exynos_pm_notifier_lock);
+ ret = __exynos_pm_notify(event, -1, &nr_calls);
+ read_unlock(&exynos_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_pm_notify);
+#endif /* CONFIG_CPU_IDLE */
+
+#if defined(CONFIG_SOC_EXYNOS8895)
+#define SLEEP_VTS_ON 9
+#define SLEEP_AUD_ON 10
+#endif
+
+static int exynos_pm_syscore_suspend(void)
+{
+ if (!exynos_check_cp_status()) {
+ pr_info("%s %s: sleep canceled by CP reset \n",
+ EXYNOS_PM_PREFIX, __func__);
+ return -EINVAL;
+ }
+
+ pm_info->is_usbl2_suspend = false;
+ if (pm_info->usbl2_suspend_available) {
+ if (!IS_ERR_OR_NULL(pm_info->usb_is_connect))
+ pm_info->is_usbl2_suspend = pm_info->usb_is_connect();
+ }
+
+ if (pm_info->is_usbl2_suspend || pm_dbg->test_usbl2_suspend) {
+ exynos_prepare_sys_powerdown(pm_info->usbl2_suspend_mode_idx);
+ pr_info("%s %s: Enter Suspend scenario. usbl2_mode_idx = %d)\n",
+ EXYNOS_PM_PREFIX,__func__, pm_info->usbl2_suspend_mode_idx);
+ } else {
+ exynos_prepare_sys_powerdown(pm_info->suspend_mode_idx);
+ pr_info("%s %s: Enter Suspend scenario. suspend_mode_idx = %d)\n",
+ EXYNOS_PM_PREFIX,__func__, pm_info->suspend_mode_idx);
+ }
+
+ return 0;
+}
+
+static void exynos_pm_syscore_resume(void)
+{
+ if (pm_info->is_usbl2_suspend || pm_dbg->test_usbl2_suspend)
+ exynos_wakeup_sys_powerdown(pm_info->usbl2_suspend_mode_idx, pm_info->is_early_wakeup);
+ else
+ exynos_wakeup_sys_powerdown(pm_info->suspend_mode_idx, pm_info->is_early_wakeup);
+
+ exynos_show_wakeup_reason(pm_info->is_early_wakeup);
+
+ if (!pm_info->is_early_wakeup)
+ pr_debug("%s %s: post sleep, preparing to return\n",
+ EXYNOS_PM_PREFIX, __func__);
+}
+
+static struct syscore_ops exynos_pm_syscore_ops = {
+ .suspend = exynos_pm_syscore_suspend,
+ .resume = exynos_pm_syscore_resume,
+};
+
+static int exynos_pm_enter(suspend_state_t state)
+{
+ unsigned int psci_index;
+ unsigned int prev_mif = 0, post_mif = 0;
+ unsigned int prev_req;
+
+ psci_index = pm_info->suspend_psci_idx;
+
+ /* Send an IPI if test_early_wakeup flag is set */
+ if (pm_dbg->test_early_wakeup)
+ arch_send_call_function_single_ipi(0);
+
+ prev_mif = acpm_get_mifdn_count();
+ prev_req = acpm_get_mif_request();
+
+ pr_info("%s: prev mif_count %d\n",EXYNOS_PM_PREFIX, prev_mif);
+ /* This will also act as our return point when
+ * we resume as it saves its own register state and restores it
+ * during the resume. */
+ pm_info->is_early_wakeup = (bool)arm_cpuidle_suspend(psci_index);
+ if (pm_info->is_early_wakeup)
+ pr_info("%s %s: return to originator\n",
+ EXYNOS_PM_PREFIX, __func__);
+
+ post_mif = acpm_get_mifdn_count();
+ pr_info("%s: post mif_count %d\n",EXYNOS_PM_PREFIX, post_mif);
+
+ if (post_mif == prev_mif)
+ pr_info("%s: MIF blocked. MIF request Mster was 0x%x\n", EXYNOS_PM_PREFIX, prev_req);
+ else
+ pr_info("%s: MIF down. cur_count: %d, acc_count: %d\n",
+ EXYNOS_PM_PREFIX, post_mif - prev_mif, post_mif);
+
+ return pm_info->is_early_wakeup;
+}
+
+static const struct platform_suspend_ops exynos_pm_ops = {
+ .enter = exynos_pm_enter,
+ .valid = suspend_valid_only_mem,
+};
+
+int register_usb_is_connect(bool (*func)(void))
+{
+ if(func) {
+ pm_info->usb_is_connect = func;
+ pr_info("Registered usb_is_connect func\n");
+ return 0;
+ } else {
+ pr_err("%s :function pointer is NULL \n", __func__);
+ return -ENXIO;
+ }
+}
+EXPORT_SYMBOL_GPL(register_usb_is_connect);
+
+bool is_test_usbl2_suspend_set(void)
+{
+ if (!pm_dbg)
+ return false;
+
+ return pm_dbg->test_usbl2_suspend;
+}
+EXPORT_SYMBOL_GPL(is_test_usbl2_suspend_set);
+
+#ifdef CONFIG_DEBUG_FS
+static void __init exynos_pm_debugfs_init(void)
+{
+ struct dentry *root, *d;
+
+ root = debugfs_create_dir("exynos-pm", NULL);
+ if (!root) {
+ pr_err("%s %s: could't create debugfs dir\n", EXYNOS_PM_PREFIX, __func__);
+ return;
+ }
+
+ d = debugfs_create_u32("test_early_wakeup", 0644, root, &pm_dbg->test_early_wakeup);
+ if (!d) {
+ pr_err("%s %s: could't create debugfs test_early_wakeup\n",
+ EXYNOS_PM_PREFIX, __func__);
+ return;
+ }
+
+ d = debugfs_create_u32("test_usbl2_suspend", 0644, root, &pm_dbg->test_usbl2_suspend);
+ if (!d) {
+ pr_err("%s %s: could't create debugfs test_usbl2_suspend\n",
+ EXYNOS_PM_PREFIX, __func__);
+ return;
+ }
+}
+#endif
+
+static __init int exynos_pm_drvinit(void)
+{
+ int ret;
+
+ pm_info = kzalloc(sizeof(struct exynos_pm_info), GFP_KERNEL);
+ if (pm_info == NULL) {
+ pr_err("%s %s: failed to allocate memory for exynos_pm_info\n",
+ EXYNOS_PM_PREFIX, __func__);
+ BUG();
+ }
+
+ pm_dbg = kzalloc(sizeof(struct exynos_pm_dbg), GFP_KERNEL);
+ if (pm_dbg == NULL) {
+ pr_err("%s %s: failed to allocate memory for exynos_pm_dbg\n",
+ EXYNOS_PM_PREFIX, __func__);
+ BUG();
+ }
+
+ if (of_have_populated_dt()) {
+ struct device_node *np;
+ np = of_find_compatible_node(NULL, NULL, "samsung,exynos-pm");
+ if (!np) {
+ pr_err("%s %s: unabled to find compatible node (%s)\n",
+ EXYNOS_PM_PREFIX, __func__, "samsung,exynos-pm");
+ BUG();
+ }
+
+ pm_info->eint_base = of_iomap(np, 0);
+ if (!pm_info->eint_base) {
+ pr_err("%s %s: unabled to ioremap EINT base address\n",
+ EXYNOS_PM_PREFIX, __func__);
+ BUG();
+ }
+
+ pm_info->gic_base = of_iomap(np, 1);
+ if (!pm_info->gic_base) {
+ pr_err("%s %s: unbaled to ioremap GIC base address\n",
+ EXYNOS_PM_PREFIX, __func__);
+ BUG();
+ }
+
+ ret = of_property_read_u32(np, "num-eint", &pm_info->num_eint);
+ if (ret) {
+ pr_err("%s %s: unabled to get the number of eint from DT\n",
+ EXYNOS_PM_PREFIX, __func__);
+ BUG();
+ }
+
+ ret = of_property_read_u32(np, "num-gic", &pm_info->num_gic);
+ if (ret) {
+ pr_err("%s %s: unabled to get the number of gic from DT\n",
+ EXYNOS_PM_PREFIX, __func__);
+ BUG();
+ }
+
+ ret = of_property_read_u32(np, "suspend_mode_idx", &pm_info->suspend_mode_idx);
+ if (ret) {
+ pr_err("%s %s: unabled to get suspend_mode_idx from DT\n",
+ EXYNOS_PM_PREFIX, __func__);
+ BUG();
+ }
+
+ ret = of_property_read_u32(np, "suspend_psci_idx", &pm_info->suspend_psci_idx);
+ if (ret) {
+ pr_err("%s %s: unabled to get suspend_psci_idx from DT\n",
+ EXYNOS_PM_PREFIX, __func__);
+ BUG();
+ }
+
+ ret = of_property_read_u32(np, "usbl2_suspend_available", &pm_info->usbl2_suspend_available);
+ if (ret) {
+ pr_info("%s %s: Not support usbl2_suspend mode\n",
+ EXYNOS_PM_PREFIX, __func__);
+ } else {
+ ret = of_property_read_u32(np, "usbl2_suspend_mode_idx", &pm_info->usbl2_suspend_mode_idx);
+ if (ret) {
+ pr_err("%s %s: unabled to get usbl2_suspend_mode_idx from DT\n",
+ EXYNOS_PM_PREFIX, __func__);
+ BUG();
+ }
+ }
+
+ ret = of_property_count_u32_elems(np, "extra_wakeup_stat");
+ if (!ret) {
+ pr_err("%s %s: unabled to get wakeup_stat value from DT\n",
+ EXYNOS_PM_PREFIX, __func__);
+ BUG();
+ } else {
+ pm_info->num_extra_stat = ret;
+ pm_info->extra_wakeup_stat = kzalloc(sizeof(unsigned int) * ret, GFP_KERNEL);
+ of_property_read_u32_array(np, "extra_wakeup_stat", pm_info->extra_wakeup_stat, ret);
+ }
+ } else {
+ pr_err("%s %s: failed to have populated device tree\n",
+ EXYNOS_PM_PREFIX, __func__);
+ BUG();
+ }
+
+ suspend_set_ops(&exynos_pm_ops);
+ register_syscore_ops(&exynos_pm_syscore_ops);
+#ifdef CONFIG_DEBUG_FS
+ exynos_pm_debugfs_init();
+#endif
+
+ return 0;
+}
+arch_initcall(exynos_pm_drvinit);
--- /dev/null
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS Power mode
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/tick.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/psci.h>
+#include <linux/exynos-ss.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+
+#include <asm/smp_plat.h>
+
+#include <soc/samsung/exynos-pm.h>
+#include <soc/samsung/exynos-pmu.h>
+#include <soc/samsung/exynos-powermode.h>
+
+extern void big_reset_control(int en);
+
+struct exynos_powermode_info {
+ unsigned int cpd_residency; /* target residency of cpd */
+ unsigned int sicd_residency; /* target residency of sicd */
+
+ struct cpumask c2_mask; /* per cpu c2 status */
+
+ int cpd_enabled; /* CPD activation */
+ int cpd_block_cpufreq; /* blocking CPD by cpufreq */
+ int cpd_block_boost; /* blocking CPD by boost */
+
+ int sicd_enabled; /* SICD activation */
+ bool sicd_entered;
+
+ /*
+ * While system boot, wakeup_mask and idle_ip_mask is intialized with
+ * device tree. These are used by system power mode.
+ */
+ unsigned int num_wakeup_mask;
+ unsigned int *wakeup_mask_offset;
+ unsigned int *wakeup_mask[NUM_SYS_POWERDOWN];
+ int idle_ip_mask[NUM_SYS_POWERDOWN][NUM_IDLE_IP];
+};
+
+static struct exynos_powermode_info *pm_info;
+
+/******************************************************************************
+ * CAL interfaces *
+ ******************************************************************************/
+static inline unsigned int linear_phycpu(unsigned int mpidr)
+{
+ unsigned int lvl = (mpidr & MPIDR_MT_BITMASK) ? 1 : 0;
+ return ((MPIDR_AFFINITY_LEVEL(mpidr, (1 + lvl)) << 2)
+ | MPIDR_AFFINITY_LEVEL(mpidr, lvl));
+}
+static inline unsigned int linear_phycluster(unsigned int mpidr)
+{
+ unsigned int lvl = (mpidr & MPIDR_MT_BITMASK) ? 1 : 0;
+ return MPIDR_AFFINITY_LEVEL(mpidr, (1 + lvl));
+}
+
+static void cpu_enable(unsigned int cpu)
+{
+ unsigned int mpidr = cpu_logical_map(cpu);
+ unsigned int phycpu = linear_phycpu(mpidr);
+
+ cal_cpu_enable(phycpu);
+}
+
+static void cpu_disable(unsigned int cpu)
+{
+ unsigned int mpidr = cpu_logical_map(cpu);
+ unsigned int phycpu = linear_phycpu(mpidr);
+
+ cal_cpu_disable(phycpu);
+}
+
+static void cluster_enable(unsigned int cpu)
+{
+ unsigned int mpidr = cpu_logical_map(cpu);
+ unsigned int phycluster = linear_phycluster(mpidr);
+
+ cal_cluster_enable(phycluster);
+ big_reset_control(1);
+}
+
+static void cluster_disable(unsigned int cpu)
+{
+ unsigned int mpidr = cpu_logical_map(cpu);
+ unsigned int phycluster = linear_phycluster(mpidr);
+
+ big_reset_control(0);
+ cal_cluster_disable(phycluster);
+}
+
+#ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
+
+/******************************************************************************
+ * IDLE_IP *
+ ******************************************************************************/
+#define PMU_IDLE_IP_BASE 0x03E0
+#define PMU_IDLE_IP_MASK_BASE 0x03F0
+#define PMU_IDLE_IP(x) (PMU_IDLE_IP_BASE + (x * 0x4))
+#define PMU_IDLE_IP_MASK(x) (PMU_IDLE_IP_MASK_BASE + (x * 0x4))
+
+extern void cpuidle_profile_collect_idle_ip(int mode, int index,
+ unsigned int idle_ip);
+
+static int exynos_check_idle_ip_stat(int mode, int reg_index)
+{
+ unsigned int val, mask;
+ int ret;
+
+ exynos_pmu_read(PMU_IDLE_IP(reg_index), &val);
+ mask = pm_info->idle_ip_mask[mode][reg_index];
+
+ ret = (val & ~mask) == ~mask ? 0 : -EBUSY;
+
+ if (ret) {
+ /*
+ * Profile non-idle IP using idle_ip.
+ * A bit of idle-ip equals 0, it means non-idle. But then, if
+ * same bit of idle-ip-mask is 1, PMU does not see this bit.
+ * To know what IP blocks to enter system power mode, suppose
+ * below example: (express only 8 bits)
+ *
+ * idle-ip : 1 0 1 1 0 0 1 0
+ * mask : 1 1 0 0 1 0 0 1
+ *
+ * First, clear masked idle-ip bit.
+ *
+ * idle-ip : 1 0 1 1 0 0 1 0
+ * ~mask : 0 0 1 1 0 1 1 0
+ * -------------------------- (AND)
+ * idle-ip' : 0 0 1 1 0 0 1 0
+ *
+ * In upper case, only idle-ip[2] is not in idle. Calculates
+ * as follows, then we can get the non-idle IP easily.
+ *
+ * idle-ip' : 0 0 1 1 0 0 1 0
+ * ~mask : 0 0 1 1 0 1 1 0
+ *--------------------------- (XOR)
+ * 0 0 0 0 0 1 0 0
+ */
+ cpuidle_profile_collect_idle_ip(mode, reg_index,
+ ((val & ~mask) ^ ~mask));
+ }
+
+ return ret;
+}
+
+static int syspwr_mode_available(unsigned int mode)
+{
+ int index;
+
+ for_each_idle_ip(index)
+ if (exynos_check_idle_ip_stat(mode, index))
+ return false;
+
+ return true;
+}
+
+static DEFINE_SPINLOCK(idle_ip_mask_lock);
+static void exynos_set_idle_ip_mask(enum sys_powerdown mode)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&idle_ip_mask_lock, flags);
+ for_each_idle_ip(i)
+ exynos_pmu_write(PMU_IDLE_IP_MASK(i),
+ pm_info->idle_ip_mask[mode][i]);
+ spin_unlock_irqrestore(&idle_ip_mask_lock, flags);
+}
+
+/**
+ * There are 4 IDLE_IP registers in PMU, IDLE_IP therefore supports 128 index,
+ * 0 from 127. To access the IDLE_IP register, convert_idle_ip_index() converts
+ * idle_ip index to register index and bit in regster. For example, idle_ip index
+ * 33 converts to IDLE_IP1[1]. convert_idle_ip_index() returns register index
+ * and ships bit in register to *ip_index.
+ */
+static int convert_idle_ip_index(int *ip_index)
+{
+ int reg_index;
+
+ reg_index = *ip_index / IDLE_IP_REG_SIZE;
+ *ip_index = *ip_index % IDLE_IP_REG_SIZE;
+
+ return reg_index;
+}
+
+static void idle_ip_unmask(int mode, int ip_index)
+{
+ int reg_index = convert_idle_ip_index(&ip_index);
+ unsigned long flags;
+
+ spin_lock_irqsave(&idle_ip_mask_lock, flags);
+ pm_info->idle_ip_mask[mode][reg_index] &= ~(0x1 << ip_index);
+ spin_unlock_irqrestore(&idle_ip_mask_lock, flags);
+}
+
+static int is_idle_ip_index_used(struct device_node *node, int ip_index)
+{
+ int proplen;
+ int ref_idle_ip[IDLE_IP_MAX_INDEX];
+ int i;
+
+ proplen = of_property_count_u32_elems(node, "ref-idle-ip");
+
+ if (proplen <= 0)
+ return false;
+
+ if (!of_property_read_u32_array(node, "ref-idle-ip",
+ ref_idle_ip, proplen)) {
+ for (i = 0; i < proplen; i++)
+ if (ip_index == ref_idle_ip[i])
+ return true;
+ }
+
+ return false;
+}
+
+static void exynos_create_idle_ip_mask(int ip_index)
+{
+ struct device_node *root = of_find_node_by_path("/exynos-powermode/idle_ip_mask");
+ struct device_node *node;
+
+ for_each_child_of_node(root, node) {
+ int mode;
+
+ if (of_property_read_u32(node, "mode-index", &mode))
+ continue;
+
+ if (is_idle_ip_index_used(node, ip_index))
+ idle_ip_unmask(mode, ip_index);
+ }
+}
+
+int exynos_get_idle_ip_index(const char *ip_name)
+{
+ struct device_node *np = of_find_node_by_name(NULL, "exynos-powermode");
+ int ip_index;
+
+ ip_index = of_property_match_string(np, "idle-ip", ip_name);
+ if (ip_index < 0) {
+ pr_err("%s: Fail to find %s in idle-ip list with err %d\n",
+ __func__, ip_name, ip_index);
+ return ip_index;
+ }
+
+ if (ip_index > IDLE_IP_MAX_CONFIGURABLE_INDEX) {
+ pr_err("%s: %s index %d is out of range\n",
+ __func__, ip_name, ip_index);
+ return -EINVAL;
+ }
+
+ /**
+ * If it successes to find IP in idle_ip list, we set
+ * corresponding bit in idle_ip mask.
+ */
+ exynos_create_idle_ip_mask(ip_index);
+
+ return ip_index;
+}
+
+static DEFINE_SPINLOCK(ip_idle_lock);
+void exynos_update_ip_idle_status(int ip_index, int idle)
+{
+ unsigned long flags;
+ int reg_index;
+
+ /*
+ * If ip_index is not valid, it should not update IDLE_IP.
+ */
+ if (ip_index < 0 || ip_index > IDLE_IP_MAX_CONFIGURABLE_INDEX)
+ return;
+
+ reg_index = convert_idle_ip_index(&ip_index);
+
+ spin_lock_irqsave(&ip_idle_lock, flags);
+ exynos_pmu_update(PMU_IDLE_IP(reg_index),
+ 1 << ip_index, idle << ip_index);
+ spin_unlock_irqrestore(&ip_idle_lock, flags);
+
+ return;
+}
+
+void exynos_get_idle_ip_list(char *(*idle_ip_list)[IDLE_IP_REG_SIZE])
+{
+ struct device_node *np = of_find_node_by_name(NULL, "exynos-powermode");
+ int size;
+ const char *list[IDLE_IP_MAX_CONFIGURABLE_INDEX];
+ int i, bit, reg_index;
+
+ size = of_property_count_strings(np, "idle-ip");
+ if (size < 0)
+ return;
+
+ of_property_read_string_array(np, "idle-ip", list, size);
+ for (i = 0, bit = 0; i < size; i++, bit = i) {
+ reg_index = convert_idle_ip_index(&bit);
+ idle_ip_list[reg_index][bit] = (char *)list[i];
+ }
+
+ size = of_property_count_strings(np, "fix-idle-ip");
+ if (size < 0)
+ return;
+
+ of_property_read_string_array(np, "fix-idle-ip", list, size);
+ for (i = 0; i < size; i++) {
+ if (!of_property_read_u32_index(np, "fix-idle-ip-index", i, &bit)) {
+ reg_index = convert_idle_ip_index(&bit);
+ idle_ip_list[reg_index][bit] = (char *)list[i];
+ }
+ }
+}
+
+static void __init init_idle_ip(void)
+{
+ struct device_node *np = of_find_node_by_name(NULL, "exynos-powermode");
+ int mode, index, size, i;
+
+ for_each_syspwr_mode(mode)
+ for_each_idle_ip(index)
+ pm_info->idle_ip_mask[mode][index] = 0xFFFFFFFF;
+
+ /*
+ * To unmask fixed idle-ip, fix-idle-ip and fix-idle-ip-index,
+ * both properties must be existed and size must be same.
+ */
+ if (!of_find_property(np, "fix-idle-ip", NULL)
+ || !of_find_property(np, "fix-idle-ip-index", NULL))
+ return;
+
+ size = of_property_count_strings(np, "fix-idle-ip");
+ if (size != of_property_count_u32_elems(np, "fix-idle-ip-index")) {
+ pr_err("Mismatch between fih-idle-ip and fix-idle-ip-index\n");
+ return;
+ }
+
+ for (i = 0; i < size; i++) {
+ of_property_read_u32_index(np, "fix-idle-ip-index", i, &index);
+ exynos_create_idle_ip_mask(index);
+ }
+}
+
+/******************************************************************************
+ * CPU power management *
+ ******************************************************************************/
+/**
+ * If cpu is powered down, c2_mask in struct exynos_powermode_info is set. On
+ * the contrary, cpu is powered on, c2_mask is cleard. To keep coherency of
+ * c2_mask, use the spinlock, c2_lock. In Exynos, it supports C2 subordinate
+ * power mode, CPD.
+ *
+ * - CPD (Cluster Power Down)
+ * All cpus in a cluster are set c2_mask, and these cpus have enough idle
+ * time which is longer than cpd_residency, cluster can be powered off.
+ *
+ * SICD (System Idle Clock Down) : All cpus are set c2_mask and these cpus
+ * have enough idle time which is longer than sicd_residency, and besides no
+ * device is operated, AP can be put into SICD.
+ */
+
+static DEFINE_SPINLOCK(c2_lock);
+
+static void update_c2_state(bool down, unsigned int cpu)
+{
+ if (down)
+ cpumask_set_cpu(cpu, &pm_info->c2_mask);
+ else
+ cpumask_clear_cpu(cpu, &pm_info->c2_mask);
+}
+
+static s64 get_next_event_time_us(unsigned int cpu)
+{
+ return ktime_to_us(tick_nohz_get_sleep_length());
+}
+
+static int is_cpus_busy(unsigned int target_residency,
+ const struct cpumask *mask)
+{
+ int cpu;
+
+ /*
+ * If there is even one cpu in "mask" which has the smaller idle time
+ * than "target_residency", it returns -EBUSY.
+ */
+ for_each_cpu_and(cpu, cpu_online_mask, mask) {
+ if (!cpumask_test_cpu(cpu, &pm_info->c2_mask))
+ return -EBUSY;
+
+ /*
+ * Compare cpu's next event time and target_residency.
+ * Next event time means idle time.
+ */
+ if (get_next_event_time_us(cpu) < target_residency)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static unsigned int get_cluster_id(unsigned int cpu)
+{
+ return MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+}
+
+static bool is_cpu_boot_cluster(unsigned int cpu)
+{
+ /*
+ * The cluster included cpu0 is boot cluster
+ */
+ return (get_cluster_id(0) == get_cluster_id(cpu));
+}
+
+static int is_cpd_available(unsigned int cpu)
+{
+ struct cpumask mask;
+
+ if (!pm_info->cpd_enabled)
+ return false;
+
+ if (pm_info->cpd_block_cpufreq)
+ return false;
+
+ if (pm_info->cpd_block_boost)
+ return false;
+
+ /*
+ * Power down of boot cluster have nothing to gain power consumption,
+ * so it is not supported.
+ */
+ if (is_cpu_boot_cluster(cpu))
+ return false;
+
+ cpumask_and(&mask, cpu_coregroup_mask(cpu), cpu_online_mask);
+ if (is_cpus_busy(pm_info->cpd_residency, &mask))
+ return false;
+
+ return true;
+}
+
+static int is_sicd_available(unsigned int cpu)
+{
+ if (!pm_info->sicd_enabled)
+ return false;
+
+ /*
+ * When the cpu in non-boot cluster enters SICD, interrupts of
+ * boot cluster is not blocked. For stability, SICD entry by
+ * non-boot cluster is not supported.
+ */
+ if (!is_cpu_boot_cluster(cpu))
+ return false;
+
+ if (is_cpus_busy(pm_info->sicd_residency, cpu_online_mask))
+ return false;
+
+ if (syspwr_mode_available(SYS_SICD))
+ return true;
+
+ return false;
+}
+
+/**
+ * cluster_idle_state shows whether cluster is in idle or not.
+ *
+ * check_cluster_idle_state() : Show cluster idle state.
+ * If it returns true, cluster is in idle state.
+ * update_cluster_idle_state() : Update cluster idle state.
+ */
+static int cluster_idle_state[MAX_CLUSTER];
+
+static int check_cluster_idle_state(unsigned int cpu)
+{
+ return cluster_idle_state[get_cluster_id(cpu)];
+}
+
+static void update_cluster_idle_state(int idle, unsigned int cpu)
+{
+ cluster_idle_state[get_cluster_id(cpu)] = idle;
+}
+
+void block_cpd(bool block)
+{
+ pm_info->cpd_block_boost = block;
+}
+
+/**
+ * Exynos cpuidle driver call exynos_cpu_pm_enter() and exynos_cpu_pm_exit() to
+ * handle platform specific configuration to power off the cpu power domain.
+ */
+int exynos_cpu_pm_enter(unsigned int cpu, int index)
+{
+ spin_lock(&c2_lock);
+ cpu_disable(cpu);
+
+ update_c2_state(true, cpu);
+
+ /*
+ * Below sequence determines whether to power down the cluster/enter SICD
+ * or not. If idle time of cpu is not enough, go out of this function.
+ */
+ if (get_next_event_time_us(cpu) <
+ min(pm_info->cpd_residency, pm_info->sicd_residency))
+ goto out;
+
+ if (is_cpd_available(cpu)) {
+ cluster_disable(cpu);
+ update_cluster_idle_state(true, cpu);
+
+ exynos_ss_cpuidle("CPD", 0, 0, ESS_FLAG_IN);
+
+ index = PSCI_CLUSTER_SLEEP;
+ }
+
+ if (is_sicd_available(cpu)) {
+ if (exynos_prepare_sys_powerdown(SYS_SICD))
+ goto out;
+
+ s3c24xx_serial_fifo_wait();
+ exynos_ss_cpuidle("SICD", 0, 0, ESS_FLAG_IN);
+
+ pm_info->sicd_entered = true;
+ index = PSCI_SYSTEM_IDLE;
+ }
+out:
+ spin_unlock(&c2_lock);
+
+ return index;
+}
+
+void exynos_cpu_pm_exit(unsigned int cpu, int enter_failed)
+{
+ spin_lock(&c2_lock);
+ cpu_enable(cpu);
+
+ if (check_cluster_idle_state(cpu)) {
+ cluster_enable(cpu);
+ update_cluster_idle_state(false, cpu);
+
+ exynos_ss_cpuidle("CPD", 0, 0, ESS_FLAG_OUT);
+ }
+
+ if (pm_info->sicd_entered) {
+ exynos_wakeup_sys_powerdown(SYS_SICD, enter_failed);
+ exynos_ss_cpuidle("SICD", 0, 0, ESS_FLAG_OUT);
+
+ pm_info->sicd_entered = false;
+ }
+
+ update_c2_state(false, cpu);
+
+ spin_unlock(&c2_lock);
+}
+
+/**
+ * powermode_attr_read() / show_##file_name() -
+ * print out power mode information
+ *
+ * powermode_attr_write() / store_##file_name() -
+ * sysfs write access
+ */
+#define show_one(file_name, object) \
+static ssize_t show_##file_name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return snprintf(buf, 3, "%d\n", \
+ pm_info->object); \
+}
+
+#define store_one(file_name, object) \
+static ssize_t store_##file_name(struct kobject *kobj, \
+ struct kobj_attribute *attr, const char *buf, \
+ size_t count) \
+{ \
+ int input; \
+ \
+ if (!sscanf(buf, "%1d", &input)) \
+ return -EINVAL; \
+ \
+ pm_info->object = !!input; \
+ \
+ return count; \
+}
+
+#define attr_rw(_name) \
+static struct kobj_attribute _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+show_one(cpd, cpd_enabled);
+show_one(sicd, sicd_enabled);
+store_one(cpd, cpd_enabled);
+store_one(sicd, sicd_enabled);
+
+attr_rw(cpd);
+attr_rw(sicd);
+
+#endif /* __CONFIG_ARM64_EXYNOS_CPUIDLE__ */
+
+/******************************************************************************
+ * System power mode *
+ ******************************************************************************/
+#define PMU_EINT_WAKEUP_MASK 0x60C
+static void exynos_set_wakeupmask(enum sys_powerdown mode)
+{
+ int i;
+ u64 eintmask = exynos_get_eint_wake_mask();
+
+ /* Set external interrupt mask */
+ exynos_pmu_write(PMU_EINT_WAKEUP_MASK, (u32)eintmask);
+
+ for (i = 0; i < pm_info->num_wakeup_mask; i++)
+ exynos_pmu_write(pm_info->wakeup_mask_offset[i],
+ pm_info->wakeup_mask[mode][i]);
+}
+
+int exynos_prepare_sys_powerdown(enum sys_powerdown mode)
+{
+ int ret;
+
+#ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
+ exynos_set_idle_ip_mask(mode);
+#endif
+ exynos_set_wakeupmask(mode);
+
+ ret = cal_pm_enter(mode);
+ if (ret) {
+ pr_err("CAL Fail to set powermode\n");
+ goto out;
+ }
+
+ switch (mode) {
+ case SYS_SICD:
+ exynos_pm_notify(SICD_ENTER);
+ break;
+ default:
+ break;
+ }
+
+out:
+ return ret;
+}
+
+void exynos_wakeup_sys_powerdown(enum sys_powerdown mode, bool early_wakeup)
+{
+ if (early_wakeup)
+ cal_pm_earlywakeup(mode);
+ else
+ cal_pm_exit(mode);
+
+ switch (mode) {
+ case SYS_SICD:
+ exynos_pm_notify(SICD_EXIT);
+ break;
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/******************************************************************************
+ * HOTPLUG FUNCTION *
+ ******************************************************************************/
+int exynos_hotplug_in_callback(unsigned int cpu)
+{
+ /* this function should be executed by this cpu. */
+ struct cpumask mask;
+
+ cpumask_and(&mask, cpu_coregroup_mask(cpu), cpu_online_mask);
+ if (cpumask_weight(&mask) == 0)
+ cluster_enable(cpu);
+
+ cpu_enable(cpu);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_hotplug_in_callback);
+
+int exynos_hotplug_out_callback(unsigned int cpu)
+{
+ /* this function should be executed by this cpu. */
+ struct cpumask mask;
+
+ cpu_disable(cpu);
+
+ cpumask_and(&mask, cpu_coregroup_mask(cpu), cpu_online_mask);
+ if (cpumask_weight(&mask) == 0)
+ cluster_disable(cpu);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_hotplug_out_callback);
+#endif
+
+/**
+ * powermode_cpufreq_transition() blocks to power down the cluster
+ * before frequency changing. And it release the blocking after
+ * completion of frequency changing.
+ */
+#ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
+static void nop_func(void *info) {}
+#endif
+static int exynos_powermode_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+#ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
+ struct cpufreq_freqs *freq = data;
+ int cpu = freq->cpu;
+
+ /*
+ * Boot cluster does not support cluster power down.
+ * Do nothing in this notify call.
+ */
+ if (is_cpu_boot_cluster(cpu))
+ return NOTIFY_OK;
+
+ if (!pm_info->cpd_enabled)
+ return NOTIFY_OK;;
+
+ switch (val) {
+ case CPUFREQ_PRECHANGE:
+ pm_info->cpd_block_cpufreq = true;
+ if (check_cluster_idle_state(cpu))
+ smp_call_function_single(cpu, nop_func, NULL, 0);
+ break;
+ case CPUFREQ_POSTCHANGE:
+ pm_info->cpd_block_cpufreq = false;
+ break;
+ }
+#endif
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos_powermode_cpufreq_notifier = {
+ .notifier_call = exynos_powermode_cpufreq_transition,
+};
+
+/******************************************************************************
+ * Extern function *
+ ******************************************************************************/
+int exynos_rtc_wakeup(void)
+{
+#define WAKEUP_MASK_RTC_TICK BIT(2)
+#define WAKEUP_MASK_RTC_ALARM BIT(1)
+ unsigned int sleep_mask = pm_info->wakeup_mask[SYS_SLEEP][0];
+
+ if (!(sleep_mask & WAKEUP_MASK_RTC_ALARM) ||
+ !(sleep_mask & WAKEUP_MASK_RTC_TICK))
+ return 0;
+
+ return -ENXIO;
+}
+
+/******************************************************************************
+ * Driver initialization *
+ ******************************************************************************/
+static int alloc_wakeup_mask(int num_wakeup_mask)
+{
+ unsigned int mode;
+
+ pm_info->wakeup_mask_offset = kzalloc(sizeof(unsigned int)
+ * num_wakeup_mask, GFP_KERNEL);
+ if (!pm_info->wakeup_mask_offset)
+ return -ENOMEM;
+
+ for_each_syspwr_mode(mode) {
+ pm_info->wakeup_mask[mode] = kzalloc(sizeof(unsigned int)
+ * num_wakeup_mask, GFP_KERNEL);
+
+ if (!pm_info->wakeup_mask[mode])
+ goto free_reg_offset;
+ }
+
+ return 0;
+
+free_reg_offset:
+ for_each_syspwr_mode(mode)
+ if (pm_info->wakeup_mask[mode])
+ kfree(pm_info->wakeup_mask[mode]);
+
+ kfree(pm_info->wakeup_mask_offset);
+
+ return -ENOMEM;
+}
+
+static int parsing_dt_wakeup_mask(struct device_node *np)
+{
+ int ret;
+ struct device_node *root, *child;
+ unsigned int mode, mask_index = 0;
+
+ root = of_find_node_by_name(np, "wakeup-masks");
+ pm_info->num_wakeup_mask = of_get_child_count(root);
+
+ ret = alloc_wakeup_mask(pm_info->num_wakeup_mask);
+ if (ret)
+ return ret;
+
+ for_each_child_of_node(root, child) {
+ for_each_syspwr_mode(mode) {
+ ret = of_property_read_u32_index(child, "mask",
+ mode, &pm_info->wakeup_mask[mode][mask_index]);
+ if (ret)
+ return ret;
+ }
+
+ ret = of_property_read_u32(child, "reg-offset",
+ &pm_info->wakeup_mask_offset[mask_index]);
+ if (ret)
+ return ret;
+
+ mask_index++;
+ }
+
+ return 0;
+}
+
+static int __init dt_init(void)
+{
+ struct device_node *np = of_find_node_by_name(NULL, "exynos-powermode");
+ int ret;
+
+ ret = parsing_dt_wakeup_mask(np);
+ if (ret)
+ pr_warn("Fail to initialize the wakeup mask with err = %d\n", ret);
+
+ if (of_property_read_u32(np, "cpd_residency", &pm_info->cpd_residency))
+ pr_warn("No matching property: cpd_residency\n");
+
+ if (of_property_read_u32(np, "sicd_residency", &pm_info->sicd_residency))
+ pr_warn("No matching property: sicd_residency\n");
+
+ if (of_property_read_u32(np, "cpd_enabled", &pm_info->cpd_enabled))
+ pr_warn("No matching property: cpd_enabled\n");
+
+ if (of_property_read_u32(np, "sicd_enabled", &pm_info->sicd_enabled))
+ pr_warn("No matching property: sicd_enabled\n");
+
+ return 0;
+}
+
+static int __init exynos_powermode_init(void)
+{
+ pm_info = kzalloc(sizeof(struct exynos_powermode_info), GFP_KERNEL);
+ if (pm_info == NULL) {
+ pr_err("%s: failed to allocate exynos_powermode_info\n", __func__);
+ return -ENOMEM;
+ }
+
+ dt_init();
+
+#ifdef CONFIG_ARM64_EXYNOS_CPUIDLE
+ init_idle_ip();
+
+ if (sysfs_create_file(power_kobj, &cpd.attr))
+ pr_err("%s: failed to create sysfs to control CPD\n", __func__);
+
+ if (sysfs_create_file(power_kobj, &sicd.attr))
+ pr_err("%s: failed to create sysfs to control SICD\n", __func__);
+#endif
+
+
+ cpufreq_register_notifier(&exynos_powermode_cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return 0;
+}
+arch_initcall(exynos_powermode_init);
+
+static int __init exynos_powermode_cpu_hotplug_init(void)
+{
+ cpuhp_setup_state(CPUHP_AP_EXYNOS_CPU_UP_POWER_CONTROL,
+ "AP_EXYNOS_CPU_UP_POWER_CONTROL",
+ exynos_hotplug_in_callback,
+ NULL);
+ cpuhp_setup_state(CPUHP_AP_EXYNOS_CPU_DOWN_POWER_CONTROL,
+ "AP_EXYNOS_CPU_DOWN_POWER_CONTROL",
+ NULL,
+ exynos_hotplug_out_callback);
+
+ return 0;
+}
+early_initcall(exynos_powermode_cpu_hotplug_init);