*/
hrtimer_peek_ahead_timers();
#endif
+
+ /*
+ * Call the device's prepare function before calling the
+ * governor's select function. ->prepare gives the device's
+ * cpuidle driver a chance to update any dynamic information
+ * of its cpuidle states for the current idle period, e.g.
+ * state availability, latencies, residencies, etc.
+ */
+ if (dev->prepare)
+ dev->prepare(dev);
+
/* ask the governor for the next state */
next_state = cpuidle_curr_governor->select(dev);
if (need_resched()) {
poll_idle_init(dev);
+ /*
+ * cpuidle driver should set the dev->power_specified bit
+ * before registering the device if the driver provides
+ * power_usage numbers.
+ *
+ * For those devices whose ->power_specified is not set,
+ * we fill in power_usage with decreasing values as the
+ * cpuidle code has an implicit assumption that state Cn
+ * uses less power than C(n-1).
+ *
+ * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
+ * an power value of -1. So we use -2, -3, etc, for other
+ * c-states.
+ */
+ if (!dev->power_specified) {
+ int i;
+ for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++)
+ dev->states[i].power_usage = -1 - i;
+ }
+
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
if ((ret = cpuidle_add_sysfs(sys_dev))) {
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
+ unsigned int power_usage = -1;
int i;
int multiplier;
if (data->expected_us > 5)
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
-
- /* find the deepest idle state that satisfies our constraints */
+ /*
+ * Find the idle state with the lowest power while satisfying
+ * our constraints.
+ */
for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
struct cpuidle_state *s = &dev->states[i];
+ if (s->flags & CPUIDLE_FLAG_IGNORE)
+ continue;
if (s->target_residency > data->predicted_us)
- break;
+ continue;
if (s->exit_latency > latency_req)
- break;
+ continue;
if (s->exit_latency * multiplier > data->predicted_us)
- break;
- data->exit_us = s->exit_latency;
- data->last_state_idx = i;
+ continue;
+
+ if (s->power_usage < power_usage) {
+ power_usage = s->power_usage;
+ data->last_state_idx = i;
+ data->exit_us = s->exit_latency;
+ }
}
return data->last_state_idx;
#define CPUIDLE_FLAG_SHALLOW (0x20) /* low latency, minimal savings */
#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */
#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */
+#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
+ unsigned int power_specified:1;
unsigned int cpu;
int last_residency;
struct completion kobj_unregister;
void *governor_data;
struct cpuidle_state *safe_state;
+
+ int (*prepare) (struct cpuidle_device *dev);
};
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);