--- /dev/null
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * CPUIDLE driver for exynos 64bit
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/cpu.h>
+#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/psci.h>
+
+#include <asm/tlbflush.h>
+#include <asm/cpuidle.h>
+#include <asm/topology.h>
+
+#include <soc/samsung/exynos-powermode.h>
+
+#include "dt_idle_states.h"
+#include "profiler.h"
+
+/*
+ * Exynos cpuidle driver supports the below idle states
+ *
+ * IDLE_C1 : WFI(Wait For Interrupt) low-power state
+ * IDLE_C2 : Local CPU power gating
+ */
+
+/***************************************************************************
+ * Cpuidle state handler *
+ ***************************************************************************/
+static unsigned int prepare_idle(unsigned int cpu, int index)
+{
+ unsigned int entry_state = 0;
+
+ if (index > 0) {
+ cpu_pm_enter();
+ entry_state = exynos_cpu_pm_enter(cpu, index);
+ }
+
+ cpuidle_profile_start(cpu, index, entry_state);
+
+ return entry_state;
+}
+
+static void post_idle(unsigned int cpu, int index, int fail)
+{
+ cpuidle_profile_finish(cpu, fail);
+
+ if (!index)
+ return;
+
+ exynos_cpu_pm_exit(cpu, fail);
+ cpu_pm_exit();
+}
+
+static int enter_idle(unsigned int index)
+{
+ /*
+ * idle state index 0 corresponds to wfi, should never be called
+ * from the cpu_suspend operations
+ */
+ if (!index) {
+ cpu_do_idle();
+ return 0;
+ }
+
+ return arm_cpuidle_suspend(index);
+}
+
+static int exynos_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ int entry_state, ret = 0;
+
+ entry_state = prepare_idle(dev->cpu, index);
+
+ ret = enter_idle(entry_state);
+
+ post_idle(dev->cpu, index, ret);
+
+ /*
+ * If cpu fail to enter idle, it should not update state usage
+ * count. Driver have to return an error value to
+ * cpuidle_enter_state().
+ */
+ if (ret < 0)
+ return ret;
+ else
+ return index;
+}
+
+/***************************************************************************
+ * Define notifier call *
+ ***************************************************************************/
+static int exynos_cpuidle_reboot_notifier(struct notifier_block *this,
+ unsigned long event, void *_cmd)
+{
+ switch (event) {
+ case SYSTEM_POWER_OFF:
+ case SYS_RESTART:
+ cpuidle_pause();
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos_cpuidle_reboot_nb = {
+ .notifier_call = exynos_cpuidle_reboot_notifier,
+};
+
+/***************************************************************************
+ * Initialize cpuidle driver *
+ ***************************************************************************/
+#define exynos_idle_wfi_state(state) \
+ do { \
+ state.enter = exynos_enter_idle; \
+ state.exit_latency = 1; \
+ state.target_residency = 1; \
+ state.power_usage = UINT_MAX; \
+ strncpy(state.name, "WFI", CPUIDLE_NAME_LEN - 1); \
+ strncpy(state.desc, "c1", CPUIDLE_DESC_LEN - 1); \
+ } while (0)
+
+static struct cpuidle_driver exynos_idle_driver[NR_CPUS];
+
+static const struct of_device_id exynos_idle_state_match[] __initconst = {
+ { .compatible = "exynos,idle-state",
+ .data = exynos_enter_idle },
+ { },
+};
+
+static int __init exynos_idle_driver_init(struct cpuidle_driver *drv,
+ struct cpumask *cpumask)
+{
+ int cpu = cpumask_first(cpumask);
+
+ drv->name = kzalloc(sizeof("exynos_idleX"), GFP_KERNEL);
+ if (!drv->name)
+ return -ENOMEM;
+
+ scnprintf((char *)drv->name, 12, "exynos_idle%d", cpu);
+ drv->owner = THIS_MODULE;
+ drv->cpumask = cpumask;
+ exynos_idle_wfi_state(drv->states[0]);
+
+ return 0;
+}
+
+static int __init exynos_idle_init(void)
+{
+ int ret, cpu, i;
+
+ for_each_possible_cpu(cpu) {
+ ret = exynos_idle_driver_init(&exynos_idle_driver[cpu],
+ topology_sibling_cpumask(cpu));
+
+ if (ret) {
+ pr_err("failed to initialize cpuidle driver for cpu%d",
+ cpu);
+ goto out_unregister;
+ }
+
+ /*
+ * Initialize idle states data, starting at index 1.
+ * This driver is DT only, if no DT idle states are detected
+ * (ret == 0) let the driver initialization fail accordingly
+ * since there is no reason to initialize the idle driver
+ * if only wfi is supported.
+ */
+ ret = dt_init_idle_driver(&exynos_idle_driver[cpu],
+ exynos_idle_state_match, 1);
+ if (ret < 0) {
+ pr_err("failed to initialize idle state for cpu%d\n", cpu);
+ goto out_unregister;
+ }
+
+ /*
+ * Call arch CPU operations in order to initialize
+ * idle states suspend back-end specific data
+ */
+ ret = arm_cpuidle_init(cpu);
+ if (ret) {
+ pr_err("failed to initialize idle operation for cpu%d\n", cpu);
+ goto out_unregister;
+ }
+
+ ret = cpuidle_register(&exynos_idle_driver[cpu], NULL);
+ if (ret) {
+ pr_err("failed to register cpuidle for cpu%d\n", cpu);
+ goto out_unregister;
+ }
+ }
+
+ register_reboot_notifier(&exynos_cpuidle_reboot_nb);
+
+ cpuidle_profile_register(&exynos_idle_driver[0]);
+
+ pr_info("Exynos cpuidle driver Initialized\n");
+
+ return 0;
+
+out_unregister:
+ for (i = 0; i <= cpu; i++) {
+ kfree(exynos_idle_driver[i].name);
+
+ /*
+ * Cpuidle driver of variable "cpu" is always not registered.
+ * "cpu" should not call cpuidle_unregister().
+ */
+ if (i < cpu)
+ cpuidle_unregister(&exynos_idle_driver[i]);
+ }
+
+ return ret;
+}
+device_initcall(exynos_idle_init);
--- /dev/null
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/cpuidle.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+#include <asm/topology.h>
+
+#include <soc/samsung/exynos-powermode.h>
+
+#include "profiler.h"
+
+static bool profile_started;
+
+/*
+ * "profile_info" contains profiling data for per cpu idle state which
+ * declared in cpuidle driver.
+ */
+static DEFINE_PER_CPU(struct cpuidle_profile_info, profile_info);
+
+/*
+ * "cpd_info" contains profiling data for CPD(Cluster Power Down) which
+ * is subordinate to C2 state idle. Each cluster has one element in
+ * cpd_info[].
+ */
+static struct cpuidle_profile_info cpd_info[MAX_CLUSTER];
+
+/*
+ * "sys_info" contains profiling data for system power mode
+ */
+static struct cpuidle_profile_info sys_info;
+
+/*
+ * "idle_ip_pending" contains which blocks to enter system power mode
+ */
+static int idle_ip_pending[NUM_SYS_POWERDOWN][NUM_IDLE_IP][IDLE_IP_REG_SIZE];
+
+/*
+ * "idle_ip_list" contains IP name in IDLE_IP
+ */
+char *idle_ip_list[NUM_IDLE_IP][IDLE_IP_REG_SIZE];
+
+/************************************************************************
+ * Profiling *
+ ************************************************************************/
+/*
+ * If cpu does not enter idle state, cur_state has -EINVAL. By this,
+ * profiler can be aware of cpu state.
+ */
+#define state_entered(state) ((state < 0) ? 0 : 1)
+
+static void enter_idle_state(struct cpuidle_profile_info *info,
+ int state, ktime_t now)
+{
+ if (state_entered(info->cur_state))
+ return;
+
+ info->cur_state = state;
+ info->last_entry_time = now;
+
+ info->usage[state].entry_count++;
+}
+
+static void exit_idle_state(struct cpuidle_profile_info *info,
+ int state, ktime_t now,
+ int earlywakeup)
+{
+ s64 diff;
+
+ if (!state_entered(info->cur_state))
+ return;
+
+ info->cur_state = -EINVAL;
+
+ if (earlywakeup) {
+ /*
+ * If cpu cannot enter power mode, residency time
+ * should not be updated.
+ */
+ info->usage[state].early_wakeup_count++;
+ return;
+ }
+
+ diff = ktime_to_us(ktime_sub(now, info->last_entry_time));
+ info->usage[state].time += diff;
+}
+
+/*
+ * C2 subordinate state such as CPD and SICD can be entered by many cpus.
+ * The variables which contains these idle states need to keep
+ * synchronization.
+ */
+static DEFINE_SPINLOCK(substate_lock);
+
+void __cpuidle_profile_start(int cpu, int state, int substate)
+{
+ struct cpuidle_profile_info *info = &per_cpu(profile_info, cpu);
+ ktime_t now = ktime_get();
+
+ /*
+ * Start to profile idle state. profile_info is per-CPU variable,
+ * it does not need to synchronization.
+ */
+ enter_idle_state(info, state, now);
+
+ /* Start to profile subordinate idle state. */
+ if (substate) {
+ spin_lock(&substate_lock);
+
+ if (state == PROFILE_C2) {
+ switch (substate) {
+ case C2_CPD:
+ info = &cpd_info[to_cluster(cpu)];
+ enter_idle_state(info, 0, now);
+ break;
+ case C2_SICD:
+ /*
+ * SICD is a system power mode but it is also
+ * C2 subordinate state. In case of SICD,
+ * profiler updates sys_info although state is
+ * PROFILE_C2.
+ */
+ info = &sys_info;
+ enter_idle_state(info, SYS_SICD, now);
+ break;
+ }
+ } else if (state == PROFILE_SYS)
+ enter_idle_state(&sys_info, substate, now);
+
+ spin_unlock(&substate_lock);
+ }
+}
+
+void cpuidle_profile_start(int cpu, int state, int substate)
+{
+ /*
+ * Return if profile is not started
+ */
+ if (!profile_started)
+ return;
+
+ __cpuidle_profile_start(cpu, state, substate);
+}
+
+void __cpuidle_profile_finish(int cpu, int earlywakeup)
+{
+ struct cpuidle_profile_info *info = &per_cpu(profile_info, cpu);
+ int state = info->cur_state;
+ ktime_t now = ktime_get();
+
+ exit_idle_state(info, state, now, earlywakeup);
+
+ spin_lock(&substate_lock);
+
+ /*
+ * Subordinate state can be wakeup by many cpus. We cannot predict
+ * which cpu wakeup from idle state, profiler always try to update
+ * residency time of subordinate state. To avoid duplicate updating,
+ * exit_idle_state() checks validation.
+ */
+ if (has_sub_state(state)) {
+ info = &cpd_info[to_cluster(cpu)];
+ exit_idle_state(info, info->cur_state, now, earlywakeup);
+
+ info = &sys_info;
+ exit_idle_state(info, info->cur_state, now, earlywakeup);
+ }
+
+ spin_unlock(&substate_lock);
+}
+
+void cpuidle_profile_finish(int cpu, int earlywakeup)
+{
+ /*
+ * Return if profile is not started
+ */
+ if (!profile_started)
+ return;
+
+ __cpuidle_profile_finish(cpu, earlywakeup);
+}
+
+/*
+ * Before system enters system power mode, it checks idle-ip status. Its
+ * status is conveyed to cpuidle_profile_collect_idle_ip().
+ */
+void cpuidle_profile_collect_idle_ip(int mode, int index,
+ unsigned int idle_ip)
+{
+ int i;
+
+ /*
+ * Return if profile is not started
+ */
+ if (!profile_started)
+ return;
+
+ for (i = 0; i < IDLE_IP_REG_SIZE; i++) {
+ /*
+ * If bit of idle_ip has 1, IP corresponding to its bit
+ * is not idle.
+ */
+ if (idle_ip & (1 << i))
+ idle_ip_pending[mode][index][i]++;
+ }
+}
+
+/************************************************************************
+ * Show result *
+ ************************************************************************/
+static ktime_t profile_start_time;
+static ktime_t profile_finish_time;
+static s64 profile_time;
+
+static int calculate_percent(s64 residency)
+{
+ if (!residency)
+ return 0;
+
+ residency *= 100;
+ do_div(residency, profile_time);
+
+ return residency;
+}
+
+static unsigned long long sum_idle_time(int cpu)
+{
+ int i;
+ unsigned long long idle_time = 0;
+ struct cpuidle_profile_info *info = &per_cpu(profile_info, cpu);
+
+ for (i = 0; i < info->state_count; i++)
+ idle_time += info->usage[i].time;
+
+ return idle_time;
+}
+
+static int total_idle_ratio(int cpu)
+{
+ return calculate_percent(sum_idle_time(cpu));
+}
+
+static void show_result(void)
+{
+ int i, idle_ip, bit, cpu;
+ struct cpuidle_profile_info *info;
+ int state_count;
+
+ pr_info("#############################################################\n");
+ pr_info("Profiling Time : %lluus\n", profile_time);
+
+ pr_info("\n");
+
+ pr_info("[total idle ratio]\n");
+ pr_info("#cpu #time #ratio\n");
+ for_each_possible_cpu(cpu)
+ pr_info("cpu%d %10lluus %3u%%\n", cpu,
+ sum_idle_time(cpu), total_idle_ratio(cpu));
+
+ pr_info("\n");
+
+ /*
+ * All profile_info has same state_count. As a representative,
+ * cpu0's is used.
+ */
+ state_count = per_cpu(profile_info, 0).state_count;
+
+ for (i = 0; i < state_count; i++) {
+ pr_info("[state%d]\n", i);
+ pr_info("#cpu #entry #early #time #ratio\n");
+ for_each_possible_cpu(cpu) {
+ info = &per_cpu(profile_info, cpu);
+ pr_info("cpu%d %5u %5u %10lluus %3u%%\n", cpu,
+ info->usage[i].entry_count,
+ info->usage[i].early_wakeup_count,
+ info->usage[i].time,
+ calculate_percent(info->usage[i].time));
+ }
+
+ pr_info("\n");
+ }
+
+ pr_info("[Cluster Power Down]\n");
+ pr_info("#cluster #entry #early #time #ratio\n");
+ for_each_cluster(i) {
+ pr_info("cl_%s %5u %5u %10lluus %3u%%\n",
+ i == to_cluster(0) ? "boot " : "nonboot",
+ cpd_info[i].usage->entry_count,
+ cpd_info[i].usage->early_wakeup_count,
+ cpd_info[i].usage->time,
+ calculate_percent(cpd_info[i].usage->time));
+ }
+
+ pr_info("\n");
+
+ pr_info("[System Power Mode]\n");
+ pr_info("#mode #entry #early #time #ratio\n");
+ for_each_syspwr_mode(i) {
+ pr_info("%-13s %5u %5u %10lluus %3u%%\n",
+ get_sys_powerdown_str(i),
+ sys_info.usage[i].entry_count,
+ sys_info.usage[i].early_wakeup_count,
+ sys_info.usage[i].time,
+ calculate_percent(sys_info.usage[i].time));
+ }
+
+ pr_info("\n");
+
+ pr_info("[LPM blockers]\n");
+ for_each_syspwr_mode(i) {
+ for_each_idle_ip(idle_ip) {
+ for (bit = 0; bit < IDLE_IP_REG_SIZE; bit++) {
+ if (idle_ip_pending[i][idle_ip][bit])
+ pr_info("%s block by IDLE_IP%d[%d](%s, count = %d)\n",
+ get_sys_powerdown_str(i),
+ idle_ip, bit, idle_ip_list[idle_ip][bit],
+ idle_ip_pending[i][idle_ip][bit]);
+ }
+ }
+ }
+
+ pr_info("\n");
+ pr_info("#############################################################\n");
+}
+
+/************************************************************************
+ * Profile control *
+ ************************************************************************/
+static void clear_time(ktime_t *time)
+{
+ *time = 0;
+}
+
+static void clear_profile_info(struct cpuidle_profile_info *info)
+{
+ memset(info->usage, 0,
+ sizeof(struct cpuidle_profile_state_usage) * info->state_count);
+
+ clear_time(&info->last_entry_time);
+ info->cur_state = -EINVAL;
+}
+
+static void reset_profile_record(void)
+{
+ int i;
+
+ clear_time(&profile_start_time);
+ clear_time(&profile_finish_time);
+
+ for_each_possible_cpu(i)
+ clear_profile_info(&per_cpu(profile_info, i));
+
+ for_each_cluster(i)
+ clear_profile_info(&cpd_info[i]);
+
+ clear_profile_info(&sys_info);
+
+ memset(idle_ip_pending, 0,
+ NUM_SYS_POWERDOWN * NUM_IDLE_IP * IDLE_IP_REG_SIZE * sizeof(int));
+}
+
+static void call_cpu_start_profile(void *p) {};
+static void call_cpu_finish_profile(void *p) {};
+
+static void cpuidle_profile_main_start(void)
+{
+ if (profile_started) {
+ pr_err("cpuidle profile is ongoing\n");
+ return;
+ }
+
+ reset_profile_record();
+ profile_start_time = ktime_get();
+
+ profile_started = 1;
+
+ /* Wakeup all cpus and clear own profile data to start profile */
+ preempt_disable();
+ smp_call_function(call_cpu_start_profile, NULL, 1);
+ preempt_enable();
+
+ pr_info("cpuidle profile start\n");
+}
+
+static void cpuidle_profile_main_finish(void)
+{
+ if (!profile_started) {
+ pr_err("CPUIDLE profile does not start yet\n");
+ return;
+ }
+
+ pr_info("cpuidle profile finish\n");
+
+ /* Wakeup all cpus to update own profile data to finish profile */
+ preempt_disable();
+ smp_call_function(call_cpu_finish_profile, NULL, 1);
+ preempt_enable();
+
+ profile_started = 0;
+
+ profile_finish_time = ktime_get();
+ profile_time = ktime_to_us(ktime_sub(profile_finish_time,
+ profile_start_time));
+
+ show_result();
+}
+
+/*********************************************************************
+ * Sysfs interface *
+ *********************************************************************/
+static ssize_t show_sysfs_result(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ int ret = 0;
+ int i, cpu, idle_ip, bit;
+ struct cpuidle_profile_info *info;
+ int state_count;
+
+ if (profile_started) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "CPUIDLE profile is ongoing\n");
+ return ret;
+ }
+
+ if (profile_time == 0) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "CPUIDLE profiler has not started yet\n");
+ return ret;
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "#############################################################\n");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "Profiling Time : %lluus\n", profile_time);
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "\n");
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "[total idle ratio]\n");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "#cpu #time #ratio\n");
+ for_each_possible_cpu(cpu)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "cpu%d %10lluus %3u%%\n",
+ cpu, sum_idle_time(cpu), total_idle_ratio(cpu));
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "\n");
+
+ /*
+ * All profile_info has same state_count. As a representative,
+ * cpu0's is used.
+ */
+ state_count = per_cpu(profile_info, 0).state_count;
+
+ for (i = 0; i < state_count; i++) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "[state%d]\n", i);
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "#cpu #entry #early #time #ratio\n");
+ for_each_possible_cpu(cpu) {
+ info = &per_cpu(profile_info, cpu);
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "cpu%d %5u %5u %10lluus %3u%%\n",
+ cpu,
+ info->usage[i].entry_count,
+ info->usage[i].early_wakeup_count,
+ info->usage[i].time,
+ calculate_percent(info->usage[i].time));
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "\n");
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "[CPD] - Cluster Power Down\n");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "#cluster #entry #early #time #ratio\n");
+ for_each_cluster(i) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "cl_%s %5u %5u %10lluus %3u%%\n",
+ i == to_cluster(0) ? "boot " : "nonboot",
+ cpd_info[i].usage->entry_count,
+ cpd_info[i].usage->early_wakeup_count,
+ cpd_info[i].usage->time,
+ calculate_percent(cpd_info[i].usage->time));
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "\n");
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "[LPM] - Low Power Mode\n");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "#mode #entry #early #time #ratio\n");
+ for_each_syspwr_mode(i) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "%-9s %5u %5u %10lluus %3u%%\n",
+ get_sys_powerdown_str(i),
+ sys_info.usage[i].entry_count,
+ sys_info.usage[i].early_wakeup_count,
+ sys_info.usage[i].time,
+ calculate_percent(sys_info.usage[i].time));
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "\n");
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "[LPM blockers]\n");
+ for_each_syspwr_mode(i) {
+ for_each_idle_ip(idle_ip) {
+ for (bit = 0; bit < IDLE_IP_REG_SIZE; bit++) {
+ if (idle_ip_pending[i][idle_ip][bit])
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "%s block by IDLE_IP%d[%d](%s, count = %d)\n",
+ get_sys_powerdown_str(i),
+ idle_ip, bit, idle_ip_list[idle_ip][bit],
+ idle_ip_pending[i][idle_ip][bit]);
+ }
+ }
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "#############################################################\n");
+
+ return ret;
+}
+
+static ssize_t show_cpuidle_profile(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ int ret = 0;
+
+ if (profile_started)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "CPUIDLE profile is ongoing\n");
+ else
+ ret = show_sysfs_result(kobj, attr, buf);
+
+
+ return ret;
+}
+
+static ssize_t store_cpuidle_profile(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int input;
+
+ if (!sscanf(buf, "%1d", &input))
+ return -EINVAL;
+
+ if (!!input)
+ cpuidle_profile_main_start();
+ else
+ cpuidle_profile_main_finish();
+
+ return count;
+}
+
+static struct kobj_attribute cpuidle_profile_attr =
+ __ATTR(profile, 0644, show_cpuidle_profile, store_cpuidle_profile);
+
+static struct attribute *cpuidle_profile_attrs[] = {
+ &cpuidle_profile_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group cpuidle_profile_group = {
+ .attrs = cpuidle_profile_attrs,
+};
+
+
+/*********************************************************************
+ * Initialize cpuidle profiler *
+ *********************************************************************/
+static void __init cpuidle_profile_info_init(struct cpuidle_profile_info *info,
+ int state_count)
+{
+ int size = sizeof(struct cpuidle_profile_state_usage) * state_count;
+
+ info->state_count = state_count;
+ info->usage = kmalloc(size, GFP_KERNEL);
+ if (!info->usage) {
+ pr_err("%s:%d: Memory allocation failed\n", __func__, __LINE__);
+ return;
+ }
+}
+
+void __init cpuidle_profile_register(struct cpuidle_driver *drv)
+{
+ int idle_state_count = drv->state_count;
+ int i;
+
+ /* Initialize each cpuidle state information */
+ for_each_possible_cpu(i)
+ cpuidle_profile_info_init(&per_cpu(profile_info, i),
+ idle_state_count);
+
+ /* Initiailize CPD(Cluster Power Down) information */
+ for_each_cluster(i)
+ cpuidle_profile_info_init(&cpd_info[i], 1);
+
+ /* Initiailize System power mode information */
+ cpuidle_profile_info_init(&sys_info, NUM_SYS_POWERDOWN);
+}
+
+static int __init cpuidle_profile_init(void)
+{
+ struct class *class;
+ struct device *dev;
+
+ class = class_create(THIS_MODULE, "cpuidle");
+ dev = device_create(class, NULL, 0, NULL, "cpuidle_profiler");
+
+ if (sysfs_create_group(&dev->kobj, &cpuidle_profile_group)) {
+ pr_err("CPUIDLE Profiler : error to create sysfs\n");
+ return -EINVAL;
+ }
+
+ exynos_get_idle_ip_list(idle_ip_list);
+
+ return 0;
+}
+late_initcall(cpuidle_profile_init);