#include "sched.h"
-#define THROTTLE_NSEC 50000000 /* 50ms default */
+#define THROTTLE_DOWN_NSEC 50000000 /* 50ms default */
+#define THROTTLE_UP_NSEC 500000 /* 500us default */
struct static_key __read_mostly __sched_freq = STATIC_KEY_INIT_FALSE;
static bool __read_mostly cpufreq_driver_slow;
/**
* gov_data - per-policy data internal to the governor
- * @throttle: next throttling period expiry. Derived from throttle_nsec
- * @throttle_nsec: throttle period length in nanoseconds
+ * @up_throttle: next throttling period expiry if increasing OPP
+ * @down_throttle: next throttling period expiry if decreasing OPP
+ * @up_throttle_nsec: throttle period length in nanoseconds if increasing OPP
+ * @down_throttle_nsec: throttle period length in nanoseconds if decreasing OPP
* @task: worker thread for dvfs transition that may block/sleep
* @irq_work: callback used to wake up worker thread
* @requested_freq: last frequency requested by the sched governor
* call down_write(policy->rwsem).
*/
struct gov_data {
- ktime_t throttle;
- unsigned int throttle_nsec;
+ ktime_t up_throttle;
+ ktime_t down_throttle;
+ unsigned int up_throttle_nsec;
+ unsigned int down_throttle_nsec;
struct task_struct *task;
struct irq_work irq_work;
unsigned int requested_freq;
+ int max;
};
static void cpufreq_sched_try_driver_target(struct cpufreq_policy *policy,
__cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
- gd->throttle = ktime_add_ns(ktime_get(), gd->throttle_nsec);
+ gd->up_throttle = ktime_add_ns(ktime_get(), gd->up_throttle_nsec);
+ gd->down_throttle = ktime_add_ns(ktime_get(), gd->down_throttle_nsec);
up_write(&policy->rwsem);
}
-static bool finish_last_request(struct gov_data *gd)
+static bool finish_last_request(struct gov_data *gd, unsigned int cur_freq)
{
ktime_t now = ktime_get();
- if (ktime_after(now, gd->throttle))
+ ktime_t throttle = gd->requested_freq < cur_freq ?
+ gd->down_throttle : gd->up_throttle;
+
+ if (ktime_after(now, throttle))
return false;
while (1) {
- int usec_left = ktime_to_ns(ktime_sub(gd->throttle, now));
+ int usec_left = ktime_to_ns(ktime_sub(throttle, now));
usec_left /= NSEC_PER_USEC;
trace_cpufreq_sched_throttled(usec_left);
usleep_range(usec_left, usec_left + 100);
now = ktime_get();
- if (ktime_after(now, gd->throttle))
+ if (ktime_after(now, throttle))
return true;
}
}
* if the frequency thread sleeps while waiting to be
* unthrottled, start over to check for a newer request
*/
- if (finish_last_request(gd))
+ if (finish_last_request(gd, policy->cur))
continue;
last_request = new_request;
cpufreq_sched_try_driver_target(policy, new_request);
}
/* Convert the new maximum capacity request into a cpu frequency */
- freq_new = capacity * policy->max >> SCHED_CAPACITY_SHIFT;
+ freq_new = capacity * gd->max >> SCHED_CAPACITY_SHIFT;
if (cpufreq_frequency_table_target(policy, policy->freq_table,
freq_new, CPUFREQ_RELATION_L,
&index_new))
goto out;
freq_new = policy->freq_table[index_new].frequency;
+ if (freq_new > policy->max)
+ freq_new = policy->max;
+
+ if (freq_new < policy->min)
+ freq_new = policy->min;
+
trace_cpufreq_sched_request_opp(cpu, capacity, freq_new,
gd->requested_freq);
-
if (freq_new == gd->requested_freq)
goto out;
static_key_slow_dec(&__sched_freq);
}
+static struct attribute_group sched_attr_group_gov_pol;
+static struct attribute_group *get_sysfs_attr(void)
+{
+ return &sched_attr_group_gov_pol;
+}
+
static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
{
struct gov_data *gd;
int cpu;
+ int rc;
for_each_cpu(cpu, policy->cpus)
memset(&per_cpu(cpu_sched_capacity_reqs, cpu), 0,
if (!gd)
return -ENOMEM;
- gd->throttle_nsec = policy->cpuinfo.transition_latency ?
+ gd->up_throttle_nsec = policy->cpuinfo.transition_latency ?
policy->cpuinfo.transition_latency :
- THROTTLE_NSEC;
+ THROTTLE_UP_NSEC;
+ gd->down_throttle_nsec = THROTTLE_DOWN_NSEC;
pr_debug("%s: throttle threshold = %u [ns]\n",
- __func__, gd->throttle_nsec);
+ __func__, gd->up_throttle_nsec);
+
+ gd->max = policy->max;
+
+ rc = sysfs_create_group(get_governor_parent_kobj(policy), get_sysfs_attr());
+ if (rc) {
+ pr_err("%s: couldn't create sysfs attributes: %d\n", __func__, rc);
+ goto err;
+ }
if (cpufreq_driver_is_slow()) {
cpufreq_driver_slow = true;
put_task_struct(gd->task);
}
+ sysfs_remove_group(get_governor_parent_kobj(policy), get_sysfs_attr());
+
policy->governor_data = NULL;
kfree(gd);
return 0;
}
+static void cpufreq_sched_limits(struct cpufreq_policy *policy)
+{
+ struct gov_data *gd;
+
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
+ policy->cpu, policy->min, policy->max,
+ policy->cur);
+
+ if (!down_write_trylock(&policy->rwsem))
+ return;
+ /*
+ * Need to keep track of highest max frequency for
+ * capacity calculations
+ */
+ gd = policy->governor_data;
+ if (gd->max < policy->max)
+ gd->max = policy->max;
+
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+
+ up_write(&policy->rwsem);
+}
+
static int cpufreq_sched_stop(struct cpufreq_policy *policy)
{
int cpu;
case CPUFREQ_GOV_STOP:
return cpufreq_sched_stop(policy);
case CPUFREQ_GOV_LIMITS:
+ cpufreq_sched_limits(policy);
break;
}
return 0;
}
+/* Tunables */
+static ssize_t show_up_throttle_nsec(struct gov_data *gd, char *buf)
+{
+ return sprintf(buf, "%u\n", gd->up_throttle_nsec);
+}
+
+static ssize_t store_up_throttle_nsec(struct gov_data *gd,
+ const char *buf, size_t count)
+{
+ int ret;
+ long unsigned int val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ gd->up_throttle_nsec = val;
+ return count;
+}
+
+static ssize_t show_down_throttle_nsec(struct gov_data *gd, char *buf)
+{
+ return sprintf(buf, "%u\n", gd->down_throttle_nsec);
+}
+
+static ssize_t store_down_throttle_nsec(struct gov_data *gd,
+ const char *buf, size_t count)
+{
+ int ret;
+ long unsigned int val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ gd->down_throttle_nsec = val;
+ return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name) \
+static ssize_t show_##file_name##_gov_pol \
+(struct cpufreq_policy *policy, char *buf) \
+{ \
+ return show_##file_name(policy->governor_data, buf); \
+}
+
+#define store_gov_pol_sys(file_name) \
+static ssize_t store_##file_name##_gov_pol \
+(struct cpufreq_policy *policy, const char *buf, size_t count) \
+{ \
+ return store_##file_name(policy->governor_data, buf, count); \
+}
+
+#define gov_pol_attr_rw(_name) \
+ static struct freq_attr _name##_gov_pol = \
+ __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define show_store_gov_pol_sys(file_name) \
+ show_gov_pol_sys(file_name); \
+ store_gov_pol_sys(file_name)
+#define tunable_handlers(file_name) \
+ show_gov_pol_sys(file_name); \
+ store_gov_pol_sys(file_name); \
+ gov_pol_attr_rw(file_name)
+
+tunable_handlers(down_throttle_nsec);
+tunable_handlers(up_throttle_nsec);
+
+/* Per policy governor instance */
+static struct attribute *sched_attributes_gov_pol[] = {
+ &up_throttle_nsec_gov_pol.attr,
+ &down_throttle_nsec_gov_pol.attr,
+ NULL,
+};
+
+static struct attribute_group sched_attr_group_gov_pol = {
+ .attrs = sched_attributes_gov_pol,
+ .name = "sched",
+};
+
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
static
#endif