/*
- * Copyright (c) 2015 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
+ * Copyright (c) 2018 Park Bumgyu, Samsung Electronics Co., Ltd <bumgyu.park@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ *
+ * CPUIDLE profiler for Exynos
*/
#include <linux/device.h>
#include <linux/cpuidle.h>
#include <linux/slab.h>
-#include <asm/page.h>
-#include <asm/cputype.h>
-#include <asm/smp_plat.h>
-#include <asm/topology.h>
-
-#include <soc/samsung/exynos-powermode.h>
-
-#include "profiler.h"
-
+/* whether profiling has started */
static bool profile_started;
/*
- * "profile_info" contains profiling data for per cpu idle state which
- * declared in cpuidle driver.
+ * Represents statistic of idle state.
+ * All idle states are mapped 1:1 with cpuidle_stats.
*/
-static DEFINE_PER_CPU(struct cpuidle_profile_info, profile_info);
+struct cpuidle_stats {
+ /* time to enter idle state */
+ ktime_t idle_entry_time;
-/*
- * "cpd_info" contains profiling data for CPD(Cluster Power Down) which
- * is subordinate to C2 state idle. Each cluster has one element in
- * cpd_info[].
- */
-static struct cpuidle_profile_info cpd_info[MAX_CLUSTER];
+ /* number of times an idle state is entered */
+ unsigned int entry_count;
+
+ /* number of times the entry into idle state is canceled */
+ unsigned int cancel_count;
+
+ /* time in idle state */
+ unsigned long long time;
+};
+
+/* description length of idle state */
+#define DESC_LEN 32
/*
- * "sys_info" contains profiling data for system power mode
+ * Manages idle state where cpu enters individually. One cpu_idle_state
+ * structure manages a idle state for each cpu to enter, and the number
+ * of structure is determined by cpuidle driver.
*/
-static struct cpuidle_profile_info sys_info;
+struct cpu_idle_state {
+ /* description of idle state */
+ char desc[DESC_LEN];
+
+ /* idle state statstics for each cpu */
+ struct cpuidle_stats stats[NR_CPUS];
+};
+
+/* cpu idle state list and length of cpu idle state list */
+static struct cpu_idle_state *cpu_idle_state;
+static int cpu_idle_state_count;
/*
- * "idle_ip_pending" contains which blocks to enter system power mode
+ * Manages idle state in which multiple cpus unit enter. Each idle state
+ * has one group_idle_state structure.
*/
-static int idle_ip_pending[NUM_SYS_POWERDOWN][NUM_IDLE_IP][IDLE_IP_REG_SIZE];
+struct group_idle_state {
+ /* idle state id, it must be unique */
+ int id;
+
+ /* description of idle state */
+ char desc[DESC_LEN];
+
+ /* idle state statstics */
+ struct cpuidle_stats stats;
+};
/*
- * "idle_ip_list" contains IP name in IDLE_IP
+ * To easily manage group_idle_state dynamically, manage the list as an
+ * list. Currently, the maximum number of group idle states supported is 5,
+ * which is unlikely to exceed the number of states empirically.
*/
-char *idle_ip_list[NUM_IDLE_IP][IDLE_IP_REG_SIZE];
+#define MAX_GROUP_IDLE_STATE 5
+
+/* group idle state list and length of group idle state list */
+static struct group_idle_state * group_idle_state[MAX_GROUP_IDLE_STATE];
+static int group_idle_state_count;
/************************************************************************
* Profiling *
************************************************************************/
-/*
- * If cpu does not enter idle state, cur_state has -EINVAL. By this,
- * profiler can be aware of cpu state.
- */
-#define state_entered(state) ((state < 0) ? 0 : 1)
-
-static void enter_idle_state(struct cpuidle_profile_info *info,
- int state, ktime_t now)
+static void idle_enter(struct cpuidle_stats *stats)
{
- if (state_entered(info->cur_state))
- return;
-
- info->cur_state = state;
- info->last_entry_time = now;
-
- info->usage[state].entry_count++;
+ stats->idle_entry_time = ktime_get();
+ stats->entry_count++;
}
-static void exit_idle_state(struct cpuidle_profile_info *info,
- int state, ktime_t now,
- int earlywakeup)
+static void idle_exit(struct cpuidle_stats *stats, int cancel)
{
s64 diff;
- if (!state_entered(info->cur_state))
+ /*
+ * If profiler is started with cpu already in idle state,
+ * idle_entry_time is 0 because entry event is not recorded.
+ * From the start of the profile to cpu wakeup is the idle time,
+ * but ignore this because it is complex to handle it and the
+ * time is not large.
+ */
+ if (!stats->idle_entry_time)
return;
- info->cur_state = -EINVAL;
-
- if (earlywakeup) {
- /*
- * If cpu cannot enter power mode, residency time
- * should not be updated.
- */
- info->usage[state].early_wakeup_count++;
+ if (cancel) {
+ stats->cancel_count++;
return;
}
- diff = ktime_to_us(ktime_sub(now, info->last_entry_time));
- info->usage[state].time += diff;
+ diff = ktime_to_us(ktime_sub(ktime_get(), stats->idle_entry_time));
+ stats->time += diff;
+
+ stats->idle_entry_time = 0;
}
/*
- * C2 subordinate state such as CPD and SICD can be entered by many cpus.
- * The variables which contains these idle states need to keep
- * synchronization.
+ * cpuidle_profile_cpu_idle_enter/cpuidle_profile_cpu_idle_exit
+ * : profilie for cpu idle state
*/
-static DEFINE_SPINLOCK(substate_lock);
-
-void __cpuidle_profile_start(int cpu, int state, int substate)
+void cpuidle_profile_cpu_idle_enter(int cpu, int index)
{
- struct cpuidle_profile_info *info = &per_cpu(profile_info, cpu);
- ktime_t now = ktime_get();
-
- /*
- * Start to profile idle state. profile_info is per-CPU variable,
- * it does not need to synchronization.
- */
- enter_idle_state(info, state, now);
-
- /* Start to profile subordinate idle state. */
- if (substate) {
- spin_lock(&substate_lock);
-
- if (state == PROFILE_C2) {
- switch (substate) {
- case C2_CPD:
- info = &cpd_info[to_cluster(cpu)];
- enter_idle_state(info, 0, now);
- break;
- case C2_SICD:
- /*
- * SICD is a system power mode but it is also
- * C2 subordinate state. In case of SICD,
- * profiler updates sys_info although state is
- * PROFILE_C2.
- */
- info = &sys_info;
- enter_idle_state(info, SYS_SICD, now);
- break;
- }
- } else if (state == PROFILE_SYS)
- enter_idle_state(&sys_info, substate, now);
-
- spin_unlock(&substate_lock);
- }
-}
-
-void cpuidle_profile_start(int cpu, int state, int substate)
-{
- /*
- * Return if profile is not started
- */
if (!profile_started)
return;
- __cpuidle_profile_start(cpu, state, substate);
+ idle_enter(&cpu_idle_state[index].stats[cpu]);
}
-void __cpuidle_profile_finish(int cpu, int earlywakeup)
+void cpuidle_profile_cpu_idle_exit(int cpu, int index, int cancel)
{
- struct cpuidle_profile_info *info = &per_cpu(profile_info, cpu);
- int state = info->cur_state;
- ktime_t now = ktime_get();
-
- exit_idle_state(info, state, now, earlywakeup);
-
- spin_lock(&substate_lock);
-
- /*
- * Subordinate state can be wakeup by many cpus. We cannot predict
- * which cpu wakeup from idle state, profiler always try to update
- * residency time of subordinate state. To avoid duplicate updating,
- * exit_idle_state() checks validation.
- */
- if (has_sub_state(state)) {
- info = &cpd_info[to_cluster(cpu)];
- exit_idle_state(info, info->cur_state, now, earlywakeup);
-
- info = &sys_info;
- exit_idle_state(info, info->cur_state, now, earlywakeup);
- }
-
- spin_unlock(&substate_lock);
-}
-
-void cpuidle_profile_finish(int cpu, int earlywakeup)
-{
- /*
- * Return if profile is not started
- */
if (!profile_started)
return;
- __cpuidle_profile_finish(cpu, earlywakeup);
+ idle_exit(&cpu_idle_state[index].stats[cpu], cancel);
}
/*
- * Before system enters system power mode, it checks idle-ip status. Its
- * status is conveyed to cpuidle_profile_collect_idle_ip().
+ * cpuidle_profile_group_idle_enter/cpuidle_profile_group_idle_exit
+ * : profilie for group idle state
*/
-void cpuidle_profile_collect_idle_ip(int mode, int index,
- unsigned int idle_ip)
+void cpuidle_profile_group_idle_enter(int id)
{
int i;
- /*
- * Return if profile is not started
- */
if (!profile_started)
return;
- for (i = 0; i < IDLE_IP_REG_SIZE; i++) {
- /*
- * If bit of idle_ip has 1, IP corresponding to its bit
- * is not idle.
- */
- if (idle_ip & (1 << i))
- idle_ip_pending[mode][index][i]++;
- }
-}
-
-/************************************************************************
- * Show result *
- ************************************************************************/
-static ktime_t profile_start_time;
-static ktime_t profile_finish_time;
-static s64 profile_time;
-
-static int calculate_percent(s64 residency)
-{
- if (!residency)
- return 0;
-
- residency *= 100;
- do_div(residency, profile_time);
+ for (i = 0; i < group_idle_state_count; i++)
+ if (group_idle_state[i]->id == id)
+ break;
- return residency;
+ idle_enter(&group_idle_state[i]->stats);
}
-static unsigned long long sum_idle_time(int cpu)
+void cpuidle_profile_group_idle_exit(int id, int cancel)
{
int i;
- unsigned long long idle_time = 0;
- struct cpuidle_profile_info *info = &per_cpu(profile_info, cpu);
-
- for (i = 0; i < info->state_count; i++)
- idle_time += info->usage[i].time;
-
- return idle_time;
-}
-
-static int total_idle_ratio(int cpu)
-{
- return calculate_percent(sum_idle_time(cpu));
-}
-
-static void show_result(void)
-{
- int i, idle_ip, bit, cpu;
- struct cpuidle_profile_info *info;
- int state_count;
-
- pr_info("#############################################################\n");
- pr_info("Profiling Time : %lluus\n", profile_time);
-
- pr_info("\n");
-
- pr_info("[total idle ratio]\n");
- pr_info("#cpu #time #ratio\n");
- for_each_possible_cpu(cpu)
- pr_info("cpu%d %10lluus %3u%%\n", cpu,
- sum_idle_time(cpu), total_idle_ratio(cpu));
-
- pr_info("\n");
-
- /*
- * All profile_info has same state_count. As a representative,
- * cpu0's is used.
- */
- state_count = per_cpu(profile_info, 0).state_count;
-
- for (i = 0; i < state_count; i++) {
- pr_info("[state%d]\n", i);
- pr_info("#cpu #entry #early #time #ratio\n");
- for_each_possible_cpu(cpu) {
- info = &per_cpu(profile_info, cpu);
- pr_info("cpu%d %5u %5u %10lluus %3u%%\n", cpu,
- info->usage[i].entry_count,
- info->usage[i].early_wakeup_count,
- info->usage[i].time,
- calculate_percent(info->usage[i].time));
- }
-
- pr_info("\n");
- }
-
- pr_info("[Cluster Power Down]\n");
- pr_info("#cluster #entry #early #time #ratio\n");
- for_each_cluster(i) {
- pr_info("cl_%s %5u %5u %10lluus %3u%%\n",
- i == to_cluster(0) ? "boot " : "nonboot",
- cpd_info[i].usage->entry_count,
- cpd_info[i].usage->early_wakeup_count,
- cpd_info[i].usage->time,
- calculate_percent(cpd_info[i].usage->time));
- }
- pr_info("\n");
-
- pr_info("[System Power Mode]\n");
- pr_info("#mode #entry #early #time #ratio\n");
- for_each_syspwr_mode(i) {
- pr_info("%-13s %5u %5u %10lluus %3u%%\n",
- get_sys_powerdown_str(i),
- sys_info.usage[i].entry_count,
- sys_info.usage[i].early_wakeup_count,
- sys_info.usage[i].time,
- calculate_percent(sys_info.usage[i].time));
- }
+ if (!profile_started)
+ return;
- pr_info("\n");
-
- pr_info("[LPM blockers]\n");
- for_each_syspwr_mode(i) {
- for_each_idle_ip(idle_ip) {
- for (bit = 0; bit < IDLE_IP_REG_SIZE; bit++) {
- if (idle_ip_pending[i][idle_ip][bit])
- pr_info("%s block by IDLE_IP%d[%d](%s, count = %d)\n",
- get_sys_powerdown_str(i),
- idle_ip, bit, idle_ip_list[idle_ip][bit],
- idle_ip_pending[i][idle_ip][bit]);
- }
- }
- }
+ for (i = 0; i < group_idle_state_count; i++)
+ if (group_idle_state[i]->id == id)
+ break;
- pr_info("\n");
- pr_info("#############################################################\n");
+ idle_exit(&group_idle_state[i]->stats, cancel);
}
/************************************************************************
- * Profile control *
+ * Profile start/stop *
************************************************************************/
-static void clear_time(ktime_t *time)
-{
- *time = 0;
-}
+/* totoal profiling time */
+static s64 profile_time;
+
+/* start time of profile */
+static ktime_t profile_start_time;
-static void clear_profile_info(struct cpuidle_profile_info *info)
+static void clear_stats(struct cpuidle_stats *stats)
{
- memset(info->usage, 0,
- sizeof(struct cpuidle_profile_state_usage) * info->state_count);
+ if (!stats)
+ return;
+
+ stats->idle_entry_time = 0;
- clear_time(&info->last_entry_time);
- info->cur_state = -EINVAL;
+ stats->entry_count = 0;
+ stats->cancel_count = 0;
+ stats->time = 0;
}
-static void reset_profile_record(void)
+static void reset_profile(void)
{
- int i;
+ int cpu, i;
- clear_time(&profile_start_time);
- clear_time(&profile_finish_time);
+ profile_start_time = 0;
- for_each_possible_cpu(i)
- clear_profile_info(&per_cpu(profile_info, i));
+ for (i = 0; i < cpu_idle_state_count; i++)
+ for_each_possible_cpu(cpu)
+ clear_stats(&cpu_idle_state[i].stats[cpu]);
- for_each_cluster(i)
- clear_profile_info(&cpd_info[i]);
-
- clear_profile_info(&sys_info);
-
- memset(idle_ip_pending, 0,
- NUM_SYS_POWERDOWN * NUM_IDLE_IP * IDLE_IP_REG_SIZE * sizeof(int));
+ for (i = 0; i < group_idle_state_count; i++)
+ clear_stats(&group_idle_state[i]->stats);
}
-static void call_cpu_start_profile(void *p) {};
-static void call_cpu_finish_profile(void *p) {};
+static void do_nothing(void *unused)
+{
+}
-static void cpuidle_profile_main_start(void)
+static void cpuidle_profile_start(void)
{
if (profile_started) {
pr_err("cpuidle profile is ongoing\n");
return;
}
- reset_profile_record();
+ reset_profile();
profile_start_time = ktime_get();
profile_started = 1;
- /* Wakeup all cpus and clear own profile data to start profile */
preempt_disable();
- smp_call_function(call_cpu_start_profile, NULL, 1);
+ /* wakeup all cpus to start profile */
+ smp_call_function(do_nothing, NULL, 1);
preempt_enable();
pr_info("cpuidle profile start\n");
}
-static void cpuidle_profile_main_finish(void)
+static void cpuidle_profile_stop(void)
{
if (!profile_started) {
pr_err("CPUIDLE profile does not start yet\n");
return;
}
- pr_info("cpuidle profile finish\n");
+ pr_info("cpuidle profile stop\n");
- /* Wakeup all cpus to update own profile data to finish profile */
preempt_disable();
- smp_call_function(call_cpu_finish_profile, NULL, 1);
+ /* wakeup all cpus to stop profile */
+ smp_call_function(do_nothing, NULL, 1);
preempt_enable();
profile_started = 0;
- profile_finish_time = ktime_get();
- profile_time = ktime_to_us(ktime_sub(profile_finish_time,
- profile_start_time));
+ profile_time = ktime_to_us(ktime_sub(ktime_get(), profile_start_time));
+}
+
+/************************************************************************
+ * IDLE IP *
+ ************************************************************************/
+static int idle_ip_stats[4][32];
+extern char *idle_ip_names[4][32];
+
+void cpuidle_profile_idle_ip(int index, unsigned int idle_ip)
+{
+ int i;
+
+ /*
+ * Return if profile is not started
+ */
+ if (!profile_started)
+ return;
- show_result();
+ for (i = 0; i < 32; i++) {
+ /*
+ * If bit of idle_ip has 1, IP corresponding to its bit
+ * is not idle.
+ */
+ if (idle_ip & (1 << i))
+ idle_ip_stats[index][i]++;
+ }
}
-/*********************************************************************
- * Sysfs interface *
- *********************************************************************/
-static ssize_t show_sysfs_result(struct kobject *kobj,
+/************************************************************************
+ * Show result *
+ ************************************************************************/
+static int calculate_percent(s64 residency)
+{
+ if (!residency)
+ return 0;
+
+ residency *= 100;
+ do_div(residency, profile_time);
+
+ return residency;
+}
+
+static unsigned long long cpu_idle_time(int cpu)
+{
+ unsigned long long idle_time = 0;
+ int i;
+
+ for (i = 0; i < cpu_idle_state_count; i++)
+ idle_time += cpu_idle_state[i].stats[cpu].time;
+
+ return idle_time;
+}
+
+static int cpu_idle_ratio(int cpu)
+{
+ return calculate_percent(cpu_idle_time(cpu));
+}
+
+static ssize_t show_result(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
int ret = 0;
- int i, cpu, idle_ip, bit;
- struct cpuidle_profile_info *info;
- int state_count;
+ int cpu, i, bit;
if (profile_started) {
ret += snprintf(buf + ret, PAGE_SIZE - ret,
return ret;
}
- if (profile_time == 0) {
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "CPUIDLE profiler has not started yet\n");
- return ret;
- }
-
ret += snprintf(buf + ret, PAGE_SIZE - ret,
"#############################################################\n");
ret += snprintf(buf + ret, PAGE_SIZE - ret,
for_each_possible_cpu(cpu)
ret += snprintf(buf + ret, PAGE_SIZE - ret,
"cpu%d %10lluus %3u%%\n",
- cpu, sum_idle_time(cpu), total_idle_ratio(cpu));
+ cpu, cpu_idle_time(cpu), cpu_idle_ratio(cpu));
ret += snprintf(buf + ret, PAGE_SIZE - ret,
"\n");
/*
- * All profile_info has same state_count. As a representative,
- * cpu0's is used.
+ * Example of cpu idle state profile result.
+ * Below is an example from the quad core architecture. The number of
+ * rows depends on the number of cpu.
+ *
+ * [state : {desc}]
+ * #cpu #entry #cancel #time #ratio
+ * cpu0 985 8 8808916us 87%
+ * cpu1 340 2 8311318us 82%
+ * cpu2 270 7 8744801us 87%
+ * cpu3 330 2 9001329us 89%
*/
- state_count = per_cpu(profile_info, 0).state_count;
-
- for (i = 0; i < state_count; i++) {
+ for (i = 0; i < cpu_idle_state_count; i++) {
ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "[state%d]\n", i);
+ "[state : %s]\n", cpu_idle_state[i].desc);
ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "#cpu #entry #early #time #ratio\n");
+ "#cpu #entry #cancel #time #ratio\n");
for_each_possible_cpu(cpu) {
- info = &per_cpu(profile_info, cpu);
+ struct cpuidle_stats *stats = &cpu_idle_state[i].stats[cpu];
+
ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "cpu%d %5u %5u %10lluus %3u%%\n",
+ "cpu%d %5u %5u %10lluus %3u%%\n",
cpu,
- info->usage[i].entry_count,
- info->usage[i].early_wakeup_count,
- info->usage[i].time,
- calculate_percent(info->usage[i].time));
+ stats->entry_count,
+ stats->cancel_count,
+ stats->time,
+ calculate_percent(stats->time));
}
ret += snprintf(buf + ret, PAGE_SIZE - ret,
"\n");
}
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "[CPD] - Cluster Power Down\n");
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "#cluster #entry #early #time #ratio\n");
- for_each_cluster(i) {
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "cl_%s %5u %5u %10lluus %3u%%\n",
- i == to_cluster(0) ? "boot " : "nonboot",
- cpd_info[i].usage->entry_count,
- cpd_info[i].usage->early_wakeup_count,
- cpd_info[i].usage->time,
- calculate_percent(cpd_info[i].usage->time));
- }
+ /*
+ * Example of group idle state profile result.
+ * The number of results depends on the number of group idle state.
+ *
+ * [state : {desc}]
+ * #entry #cancel #time #ratio
+ * 52 1 4296397us 42%
+ *
+ * [state : {desc}]
+ * #entry #cancel #time #ratio
+ * 20 0 2230528us 22%
+ */
+ for (i = 0; i < group_idle_state_count; i++) {
+ struct cpuidle_stats *stats = &group_idle_state[i]->stats;
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "\n");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "[state : %s]\n", group_idle_state[i]->desc);
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "#entry #cancel #time #ratio\n");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "%5u %5u %10lluus %3u%%\n",
+ stats->entry_count,
+ stats->cancel_count,
+ stats->time,
+ calculate_percent(stats->time));
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "[LPM] - Low Power Mode\n");
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "#mode #entry #early #time #ratio\n");
- for_each_syspwr_mode(i) {
ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "%-9s %5u %5u %10lluus %3u%%\n",
- get_sys_powerdown_str(i),
- sys_info.usage[i].entry_count,
- sys_info.usage[i].early_wakeup_count,
- sys_info.usage[i].time,
- calculate_percent(sys_info.usage[i].time));
+ "\n");
}
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "\n");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "[IDLE-IP statistics]\n");
+ for (i = 0; i < 4; i++) {
+ for (bit = 0; bit < 32; bit++) {
+ if (!idle_ip_stats[i][bit])
+ continue;
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "[LPM blockers]\n");
- for_each_syspwr_mode(i) {
- for_each_idle_ip(idle_ip) {
- for (bit = 0; bit < IDLE_IP_REG_SIZE; bit++) {
- if (idle_ip_pending[i][idle_ip][bit])
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "%s block by IDLE_IP%d[%d](%s, count = %d)\n",
- get_sys_powerdown_str(i),
- idle_ip, bit, idle_ip_list[idle_ip][bit],
- idle_ip_pending[i][idle_ip][bit]);
- }
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "busy IP : %s(count = %d)\n",
+ idle_ip_names[i][bit], idle_ip_stats[i][bit]);
}
}
return ret;
}
+/*********************************************************************
+ * Sysfs interface *
+ *********************************************************************/
static ssize_t show_cpuidle_profile(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
ret += snprintf(buf + ret, PAGE_SIZE - ret,
"CPUIDLE profile is ongoing\n");
else
- ret = show_sysfs_result(kobj, attr, buf);
-
+ ret = show_result(kobj, attr, buf);
return ret;
}
return -EINVAL;
if (!!input)
- cpuidle_profile_main_start();
+ cpuidle_profile_start();
else
- cpuidle_profile_main_finish();
+ cpuidle_profile_stop();
return count;
}
.attrs = cpuidle_profile_attrs,
};
-
/*********************************************************************
* Initialize cpuidle profiler *
*********************************************************************/
-static void __init cpuidle_profile_info_init(struct cpuidle_profile_info *info,
- int state_count)
+void __init
+cpuidle_profile_cpu_idle_register(struct cpuidle_driver *drv)
{
- int size = sizeof(struct cpuidle_profile_state_usage) * state_count;
+ struct cpu_idle_state *state;
+ int state_count = drv->state_count;
+ int i;
- info->state_count = state_count;
- info->usage = kmalloc(size, GFP_KERNEL);
- if (!info->usage) {
- pr_err("%s:%d: Memory allocation failed\n", __func__, __LINE__);
+ state = kzalloc(sizeof(struct cpu_idle_state) * state_count,
+ GFP_KERNEL);
+ if (!state) {
+ pr_err("%s: Failed to allocate memory\n", __func__);
return;
}
+
+ for (i = 0; i < state_count; i++)
+ strncpy(state[i].desc, drv->states[i].desc, DESC_LEN);
+
+ cpu_idle_state = state;
+ cpu_idle_state_count = state_count;
}
-void __init cpuidle_profile_register(struct cpuidle_driver *drv)
+void __init
+cpuidle_profile_group_idle_register(int id, const char *name)
{
- int idle_state_count = drv->state_count;
- int i;
+ struct group_idle_state *state;
- /* Initialize each cpuidle state information */
- for_each_possible_cpu(i)
- cpuidle_profile_info_init(&per_cpu(profile_info, i),
- idle_state_count);
+ state = kzalloc(sizeof(struct group_idle_state), GFP_KERNEL);
+ if (!state) {
+ pr_err("%s: Failed to allocate memory\n", __func__);
+ return;
+ }
- /* Initiailize CPD(Cluster Power Down) information */
- for_each_cluster(i)
- cpuidle_profile_info_init(&cpd_info[i], 1);
+ state->id = id;
+ strncpy(state->desc, name, DESC_LEN);
- /* Initiailize System power mode information */
- cpuidle_profile_info_init(&sys_info, NUM_SYS_POWERDOWN);
+ group_idle_state[group_idle_state_count] = state;
+ group_idle_state_count++;
}
static int __init cpuidle_profile_init(void)
{
struct class *class;
struct device *dev;
+ int ret = 0;
class = class_create(THIS_MODULE, "cpuidle");
dev = device_create(class, NULL, 0, NULL, "cpuidle_profiler");
- if (sysfs_create_group(&dev->kobj, &cpuidle_profile_group)) {
- pr_err("CPUIDLE Profiler : error to create sysfs\n");
- return -EINVAL;
- }
-
- exynos_get_idle_ip_list(idle_ip_list);
+ ret = sysfs_create_group(&dev->kobj, &cpuidle_profile_group);
+ if (ret)
+ pr_err("%s: failed to create sysfs group", __func__);
- return 0;
+ return ret;
}
late_initcall(cpuidle_profile_init);