[COMMON] sched: support freqvar tuner
authorPark Bumgyu <bumgyu.park@samsung.com>
Fri, 19 Jan 2018 07:42:10 +0000 (16:42 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:26:35 +0000 (17:26 +0900)
Change-Id: Ifd2fb13a6ab44f3137aca816c8537a85af0e7850
Signed-off-by: Park Bumgyu <bumgyu.park@samsung.com>
drivers/cpufreq/Kconfig
kernel/sched/Makefile
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/sched/freqvar_tune.c [new file with mode: 0644]

index d8addbce40bcc4f9c6a29e32c98cd0c15bac15b4..8c409edac140bff80c08a125f2930cc4bd4bf3f1 100644 (file)
@@ -190,6 +190,7 @@ config CPU_FREQ_GOV_SCHEDUTIL
        depends on CPU_FREQ && SMP
        select CPU_FREQ_GOV_ATTR_SET
        select IRQ_WORK
+       select FREQVAR_TUNE
        help
          This governor makes decisions based on the utilization data provided
          by the scheduler.  It sets the CPU frequency to be proportional to
@@ -202,6 +203,15 @@ config CPU_FREQ_GOV_SCHEDUTIL
 
          If in doubt, say N.
 
+config FREQVAR_TUNE
+       bool "CPU frequency variant tuner"
+       depends on CPU_FREQ_GOV_SCHEDUTIL
+       help
+         This option provides the controller which tunes system performance
+         as frequency variant.
+
+         Say N if unsure.
+
 comment "CPU frequency scaling drivers"
 
 config CPUFREQ_DT
index 00aba22d914b14b1cfdd0847273dbb45770ad406..7c7516a210206f6c0b41a1dc1815f82b21cb4765 100644 (file)
@@ -31,3 +31,4 @@ obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
 obj-$(CONFIG_CPU_FREQ) += cpufreq.o
 obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
 obj-$(CONFIG_MEMBARRIER) += membarrier.o
+obj-$(CONFIG_FREQVAR_TUNE) += freqvar_tune.o
index 1f525acba1fb49cf550a0da44a3bb300131ff2e2..8f91c8a92011e1f571b6f5377a92a377ac9642fd 100644 (file)
@@ -171,6 +171,15 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
        }
 }
 
+#ifdef CONFIG_FREQVAR_TUNE
+unsigned int freqvar_tipping_point(int cpu, unsigned int freq);
+#else
+static inline unsigned int freqvar_tipping_point(int cpu, unsigned int freq)
+{
+       return  freq + (freq >> 2);
+}
+#endif
+
 /**
  * get_next_freq - Compute a new frequency for a given cpufreq policy.
  * @sg_policy: schedutil policy object to compute the new frequency for.
@@ -200,7 +209,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
        unsigned int freq = arch_scale_freq_invariant() ?
                                policy->cpuinfo.max_freq : policy->cur;
 
-       freq = (freq + (freq >> 2)) * util / max;
+       freq = freqvar_tipping_point(policy->cpu, freq) * util / max;
 
        if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
                return sg_policy->next_freq;
@@ -433,6 +442,54 @@ static void sugov_irq_work(struct irq_work *irq_work)
        kthread_queue_work(&sg_policy->worker, &sg_policy->work);
 }
 
+/************************ Governor externals ***********************/
+static void update_min_rate_limit_ns(struct sugov_policy *sg_policy);
+void sugov_update_rate_limit_us(struct cpufreq_policy *policy,
+                       int up_rate_limit_ms, int down_rate_limit_ms)
+{
+       struct sugov_policy *sg_policy;
+       struct sugov_tunables *tunables;
+
+       sg_policy = policy->governor_data;
+       if (!sg_policy)
+               return;
+
+       tunables = sg_policy->tunables;
+       if (!tunables)
+               return;
+
+       tunables->up_rate_limit_us = (unsigned int)(up_rate_limit_ms * USEC_PER_MSEC);
+       tunables->down_rate_limit_us = (unsigned int)(down_rate_limit_ms * USEC_PER_MSEC);
+
+       sg_policy->up_rate_delay_ns = up_rate_limit_ms * NSEC_PER_MSEC;
+       sg_policy->down_rate_delay_ns = down_rate_limit_ms * NSEC_PER_MSEC;
+
+       update_min_rate_limit_ns(sg_policy);
+}
+
+int sugov_sysfs_add_attr(struct cpufreq_policy *policy, const struct attribute *attr)
+{
+       struct sugov_policy *sg_policy;
+       struct sugov_tunables *tunables;
+
+       sg_policy = policy->governor_data;
+       if (!sg_policy)
+               return -ENODEV;
+
+       tunables = sg_policy->tunables;
+       if (!tunables)
+               return -ENODEV;
+
+       return sysfs_create_file(&tunables->attr_set.kobj, attr);
+}
+
+struct cpufreq_policy *sugov_get_attr_policy(struct gov_attr_set *attr_set)
+{
+       struct sugov_policy *sg_policy = list_first_entry(&attr_set->policy_list,
+                                               typeof(*sg_policy), tunables_hook);
+       return sg_policy->policy;
+}
+
 /************************** sysfs interface ************************/
 
 static struct sugov_tunables *global_tunables;
index 4ac11857ffcadf19401d89e33b75ace53a10ac0c..409946f224b70c19a4b620ff1076a2c4a1e97193 100644 (file)
@@ -2900,6 +2900,11 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
 
 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
 
+#ifdef CONFIG_FREQVAR_TUNE
+extern unsigned long freqvar_boost_vector(int cpu, unsigned long util,
+                                               struct cfs_rq *cfs_rq);
+#endif
+
 /*
  * Accumulate the three separate parts of the sum; d1 the remainder
  * of the last (incomplete) period, d2 the span of full periods and d3
@@ -2930,7 +2935,11 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
        u64 periods;
 
        scale_freq = arch_scale_freq_capacity(NULL, cpu);
+#ifdef CONFIG_FREQVAR_TUNE
+       scale_cpu = freqvar_boost_vector(cpu, sa->util_avg, cfs_rq);
+#else
        scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+#endif
 
        delta += sa->period_contrib;
        periods = delta / 1024; /* A period is 1024us (~1ms) */
diff --git a/kernel/sched/freqvar_tune.c b/kernel/sched/freqvar_tune.c
new file mode 100644 (file)
index 0000000..d869549
--- /dev/null
@@ -0,0 +1,639 @@
+/*
+ * Frequency variant cpufreq driver
+ *
+ * Copyright (C) 2017 Samsung Electronics Co., Ltd
+ * Park Bumgyu <bumgyu.park@samsung.com>
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include "sched.h"
+
+/**********************************************************************
+ * common APIs                                                        *
+ **********************************************************************/
+struct freqvar_table {
+       int frequency;
+       int value;
+};
+
+static int freqvar_get_value(int freq, struct freqvar_table *table)
+{
+       struct freqvar_table *pos = table;
+       int value = -EINVAL;
+
+       for (; pos->frequency != CPUFREQ_TABLE_END; pos++)
+               if (freq == pos->frequency) {
+                       value = pos->value;
+                       break;
+               }
+
+       return value;
+}
+
+static int freqvar_get_table_size(struct cpufreq_policy *policy)
+{
+       struct cpufreq_frequency_table *cpufreq_table, *pos;
+       int size = 0;
+
+       cpufreq_table = policy->freq_table;
+       if (unlikely(!cpufreq_table)) {
+               pr_debug("%s: Unable to find frequency table\n", __func__);
+               return -ENOENT;
+       }
+
+       cpufreq_for_each_valid_entry(pos, cpufreq_table)
+               size++;
+
+       return size;
+}
+
+static int freqvar_fill_frequency_table(struct cpufreq_policy *policy,
+                                       struct freqvar_table *table)
+{
+       struct cpufreq_frequency_table *cpufreq_table, *pos;
+       int index;
+
+       cpufreq_table = policy->freq_table;
+       if (unlikely(!cpufreq_table)) {
+               pr_debug("%s: Unable to find frequency table\n", __func__);
+               return -ENOENT;
+       }
+
+       index = 0;
+       cpufreq_for_each_valid_entry(pos, cpufreq_table) {
+               table[index].frequency = pos->frequency;
+               index++;
+       }
+       table[index].frequency = CPUFREQ_TABLE_END;
+
+       return 0;
+}
+
+static int freqvar_update_table(unsigned int *src, int src_size,
+                                       struct freqvar_table *dst)
+{
+       struct freqvar_table *pos, *last_pos = dst;
+       unsigned int value = 0, freq = 0;
+       int i;
+
+       for (i = src_size - 1; i >= 0; i--) {
+               value = src[i];
+               freq  = (i <= 0) ? 0 : src[i - 1];
+
+               for (pos = last_pos; pos->frequency != CPUFREQ_TABLE_END; pos++)
+                       if (pos->frequency >= freq) {
+                               pos->value = value;
+                       } else {
+                               last_pos = pos;
+                               break;
+                       }
+       }
+
+       return 0;
+}
+
+static int freqvar_parse_value_dt(struct device_node *dn, const char *table_name,
+                                               struct freqvar_table *table)
+{
+       int size, ret = 0;
+       unsigned int *temp;
+
+       /* get the table from device tree source */
+       size = of_property_count_u32_elems(dn, table_name);
+       if (size <= 0)
+               return size;
+
+       temp = kzalloc(sizeof(unsigned int) * size, GFP_KERNEL);
+       if (!temp)
+               return -ENOMEM;
+
+       ret = of_property_read_u32_array(dn, table_name, temp, size);
+       if (ret)
+               goto fail_parsing;
+
+       freqvar_update_table(temp, size, table);
+
+fail_parsing:
+       kfree(temp);
+       return ret;
+}
+
+static void freqvar_free(void *data)
+{
+       if (data)
+               kfree(data);
+}
+
+static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
+{
+       const char *cp;
+       int i;
+       int ntokens = 1;
+       unsigned int *tokenized_data;
+       int err = -EINVAL;
+
+       cp = buf;
+       while ((cp = strpbrk(cp + 1, " :")))
+               ntokens++;
+
+       if (!(ntokens & 0x1))
+               goto err;
+
+       tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+       if (!tokenized_data) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       cp = buf;
+       i = 0;
+       while (i < ntokens) {
+               if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
+                       goto err_kfree;
+
+               cp = strpbrk(cp, " :");
+               if (!cp)
+                       break;
+               cp++;
+       }
+
+       if (i != ntokens)
+               goto err_kfree;
+
+       *num_tokens = ntokens;
+       return tokenized_data;
+
+err_kfree:
+       kfree(tokenized_data);
+err:
+       return ERR_PTR(err);
+}
+
+#define attr_freqvar(type, name, table)                                                \
+static ssize_t freqvar_##name##_show(struct gov_attr_set *attr_set, char *buf) \
+{                                                                              \
+       struct cpufreq_policy *policy = sugov_get_attr_policy(attr_set);        \
+       struct freqvar_##type *data = per_cpu(freqvar_##type, policy->cpu);     \
+       struct freqvar_table *pos = data->table;                                \
+       int ret = 0;                                                            \
+                                                                               \
+       for (; pos->frequency != CPUFREQ_TABLE_END; pos++)                      \
+               ret += sprintf(buf + ret, "%8d ratio:%3d \n",                   \
+                                       pos->frequency, pos->value);            \
+                                                                               \
+       return ret;                                                             \
+}                                                                              \
+                                                                               \
+static ssize_t freqvar_##name##_store(struct gov_attr_set *attr_set,           \
+                                     const char *buf, size_t count)            \
+{                                                                              \
+       struct cpufreq_policy *policy = sugov_get_attr_policy(attr_set);        \
+       struct freqvar_##type *data = per_cpu(freqvar_##type, policy->cpu);     \
+       struct freqvar_table *old_table = data->table;                          \
+       int *new_table = NULL;                                                  \
+       int ntokens;                                                            \
+                                                                               \
+       new_table = get_tokenized_data(buf, &ntokens);                          \
+       if (IS_ERR(new_table))                                                  \
+               return PTR_RET(new_table);                                      \
+                                                                               \
+       freqvar_update_table(new_table, ntokens, old_table);                    \
+       kfree(new_table);                                                       \
+                                                                               \
+       return count;                                                           \
+}                                                                              \
+
+int sugov_sysfs_add_attr(struct cpufreq_policy *policy, const struct attribute *attr);
+struct cpufreq_policy *sugov_get_attr_policy(struct gov_attr_set *attr_set);
+
+/**********************************************************************
+ * freqvar boost                                                 *
+ **********************************************************************/
+struct freqvar_boost {
+       struct freqvar_table *table;
+       unsigned int ratio;
+};
+DEFINE_PER_CPU(struct freqvar_boost *, freqvar_boost);
+
+attr_freqvar(boost, boost, table);
+static struct governor_attr freqvar_boost_attr = __ATTR_RW(freqvar_boost);
+
+unsigned long freqvar_boost_vector(int cpu, unsigned long util, struct cfs_rq *cfs_rq)
+{
+       struct freqvar_boost *boost = per_cpu(freqvar_boost, cpu);
+       unsigned long cap = arch_scale_cpu_capacity(NULL, cpu);
+       unsigned long vector;
+       int margin;
+
+       if (!boost)
+               return cap;
+
+       /*
+        * boost task load(util_sum/avg) and load of cfs_rq is not included.
+        * boost ratio is changed with frequency scale.
+        * 1024 is default boost_vector. it is no effect.
+        * if boost_vector is 2048, it means adding twice bigger load than orinal load
+        */
+       if (cfs_rq && cfs_rq->nr_running)
+               margin = cap - (util / cfs_rq->nr_running);
+       else
+               margin = cap - util;
+
+       if (margin <= 0)
+               return cap;
+
+       vector = cap + (margin * boost->ratio / 100);
+
+       return vector;
+}
+
+static void freqvar_boost_update(int cpu, int new_freq)
+{
+       struct freqvar_boost *boost;
+
+       boost = per_cpu(freqvar_boost, cpu);
+       if (!boost)
+               return;
+
+       boost->ratio = freqvar_get_value(new_freq, boost->table);
+}
+
+static void freqvar_boost_free(struct freqvar_boost *boost)
+{
+       if (boost)
+               freqvar_free(boost->table);
+
+       freqvar_free(boost);
+}
+
+static struct
+freqvar_boost *freqvar_boost_alloc(struct cpufreq_policy *policy)
+{
+       struct freqvar_boost *boost;
+       int size;
+
+       boost = kzalloc(sizeof(*boost), GFP_KERNEL);
+       if (!boost)
+               return NULL;
+
+       size = freqvar_get_table_size(policy);
+       if (size <= 0)
+               goto fail_alloc;
+
+       boost->table = kzalloc(sizeof(struct freqvar_table) * (size + 1), GFP_KERNEL);
+       if (!boost->table)
+               goto fail_alloc;
+
+       return boost;
+
+fail_alloc:
+       freqvar_boost_free(boost);
+       return NULL;
+}
+
+static int freqvar_boost_init(struct device_node *dn, const struct cpumask *mask)
+{
+       struct freqvar_boost *boost;
+       struct cpufreq_policy *policy;
+       int cpu, ret = 0;
+
+       policy = cpufreq_cpu_get(cpumask_first(mask));
+       if (!policy)
+               return -ENODEV;
+
+       boost = freqvar_boost_alloc(policy);
+       if (!boost) {
+               ret = -ENOMEM;
+               goto fail_init;
+       }
+
+       ret = freqvar_fill_frequency_table(policy, boost->table);
+       if (ret)
+               goto fail_init;
+
+       ret = freqvar_parse_value_dt(dn, "boost_table", boost->table);
+       if (ret)
+               goto fail_init;
+
+       for_each_cpu(cpu, mask)
+               per_cpu(freqvar_boost, cpu) = boost;
+
+       freqvar_boost_update(policy->cpu, policy->cur);
+
+       ret = sugov_sysfs_add_attr(policy, &freqvar_boost_attr.attr);
+       if (ret)
+               goto fail_init;
+
+       return 0;
+
+fail_init:
+       cpufreq_cpu_put(policy);
+       freqvar_boost_free(boost);
+
+       return ret;
+}
+
+/**********************************************************************
+ * freqvar rate limit                                                 *
+ **********************************************************************/
+struct freqvar_rate_limit {
+       struct freqvar_table *up_table;
+       struct freqvar_table *down_table;
+};
+DEFINE_PER_CPU(struct freqvar_rate_limit *, freqvar_rate_limit);
+
+attr_freqvar(rate_limit, up_rate_limit, up_table);
+attr_freqvar(rate_limit, down_rate_limit, down_table);
+static struct governor_attr freqvar_up_rate_limit = __ATTR_RW(freqvar_up_rate_limit);
+static struct governor_attr freqvar_down_rate_limit = __ATTR_RW(freqvar_down_rate_limit);
+
+void sugov_update_rate_limit_us(struct cpufreq_policy *policy,
+                       int up_rate_limit_ms, int down_rate_limit_ms);
+static void freqvar_rate_limit_update(int cpu, int new_freq)
+{
+       struct freqvar_rate_limit *rate_limit;
+       int up_rate_limit, down_rate_limit;
+       struct cpufreq_policy *policy;
+
+       rate_limit = per_cpu(freqvar_rate_limit, cpu);
+       if (!rate_limit)
+               return;
+
+       up_rate_limit = freqvar_get_value(new_freq, rate_limit->up_table);
+       down_rate_limit = freqvar_get_value(new_freq, rate_limit->down_table);
+
+       policy = cpufreq_cpu_get(cpu);
+       if (!policy)
+               return;
+
+       sugov_update_rate_limit_us(policy, up_rate_limit, down_rate_limit);
+
+       cpufreq_cpu_put(policy);
+}
+
+static void freqvar_rate_limit_free(struct freqvar_rate_limit *rate_limit)
+{
+       if (rate_limit) {
+               freqvar_free(rate_limit->up_table);
+               freqvar_free(rate_limit->down_table);
+       }
+
+       freqvar_free(rate_limit);
+}
+
+static struct
+freqvar_rate_limit *freqvar_rate_limit_alloc(struct cpufreq_policy *policy)
+{
+       struct freqvar_rate_limit *rate_limit;
+       int size;
+
+       rate_limit = kzalloc(sizeof(*rate_limit), GFP_KERNEL);
+       if (!rate_limit)
+               return NULL;
+
+       size = freqvar_get_table_size(policy);
+       if (size <= 0)
+               goto fail_alloc;
+
+       rate_limit->up_table = kzalloc(sizeof(struct freqvar_table)
+                                       * (size + 1), GFP_KERNEL);
+       if (!rate_limit->up_table)
+               goto fail_alloc;
+
+       rate_limit->down_table = kzalloc(sizeof(struct freqvar_table)
+                                       * (size + 1), GFP_KERNEL);
+       if (!rate_limit->down_table)
+               goto fail_alloc;
+
+       return rate_limit;
+
+fail_alloc:
+       freqvar_rate_limit_free(rate_limit);
+       return NULL;
+}
+
+static int freqvar_rate_limit_init(struct device_node *dn, const struct cpumask *mask)
+{
+       struct freqvar_rate_limit *rate_limit;
+       struct cpufreq_policy *policy;
+       int cpu, ret = 0;
+
+       policy = cpufreq_cpu_get(cpumask_first(mask));
+       if (!policy)
+               return -ENODEV;
+
+       rate_limit = freqvar_rate_limit_alloc(policy);
+       if (!rate_limit) {
+               ret = -ENOMEM;
+               goto fail_init;
+       }
+
+       ret = freqvar_fill_frequency_table(policy, rate_limit->up_table);
+       if (ret)
+               goto fail_init;
+
+       ret = freqvar_fill_frequency_table(policy, rate_limit->down_table);
+       if (ret)
+               goto fail_init;
+
+       ret = freqvar_parse_value_dt(dn, "up_rate_limit_table", rate_limit->up_table);
+       if (ret)
+               goto fail_init;
+
+       ret = freqvar_parse_value_dt(dn, "down_rate_limit_table", rate_limit->down_table);
+       if (ret)
+               goto fail_init;
+
+       ret = sugov_sysfs_add_attr(policy, &freqvar_up_rate_limit.attr);
+       if (ret)
+               goto fail_init;
+
+       ret = sugov_sysfs_add_attr(policy, &freqvar_down_rate_limit.attr);
+       if (ret)
+               goto fail_init;
+
+       for_each_cpu(cpu, mask)
+               per_cpu(freqvar_rate_limit, cpu) = rate_limit;
+
+       freqvar_rate_limit_update(policy->cpu, policy->cur);
+
+       return 0;
+
+fail_init:
+       freqvar_rate_limit_free(rate_limit);
+       cpufreq_cpu_put(policy);
+
+       return ret;
+}
+
+/**********************************************************************
+ * freqvar up-scale ratio                                             *
+ **********************************************************************/
+struct freqvar_upscale_ratio {
+       struct freqvar_table *table;
+       int ratio;
+};
+DEFINE_PER_CPU(struct freqvar_upscale_ratio *, freqvar_upscale_ratio);
+
+attr_freqvar(upscale_ratio, upscale_ratio, table);
+static struct governor_attr freqvar_upscale_ratio_attr = __ATTR_RW(freqvar_upscale_ratio);
+
+unsigned int freqvar_tipping_point(int cpu, unsigned int freq)
+{
+       struct freqvar_upscale_ratio *upscale = per_cpu(freqvar_upscale_ratio, cpu);
+
+       if (!upscale)
+               return freq + (freq >> 2);
+
+       return freq * 100 / upscale->ratio;
+}
+
+static void freqvar_upscale_ratio_update(int cpu, int new_freq)
+{
+       struct freqvar_upscale_ratio *upscale;
+
+       upscale = per_cpu(freqvar_upscale_ratio, cpu);
+       if (!upscale)
+               return;
+
+       upscale->ratio = freqvar_get_value(new_freq, upscale->table);
+}
+
+static void freqvar_upscale_ratio_free(struct freqvar_upscale_ratio *upscale)
+{
+       if (upscale)
+               freqvar_free(upscale->table);
+
+       freqvar_free(upscale);
+}
+
+static struct
+freqvar_upscale_ratio *freqvar_upscale_ratio_alloc(struct cpufreq_policy *policy)
+{
+       struct freqvar_upscale_ratio *upscale;
+       int size;
+
+       upscale = kzalloc(sizeof(*upscale), GFP_KERNEL);
+       if (!upscale)
+               return NULL;
+
+       size = freqvar_get_table_size(policy);
+       if (size <= 0)
+               goto fail_alloc;
+
+       upscale->table = kzalloc(sizeof(struct freqvar_table) * (size + 1), GFP_KERNEL);
+       if (!upscale->table)
+               goto fail_alloc;
+
+       return upscale;
+
+fail_alloc:
+       freqvar_upscale_ratio_free(upscale);
+       return NULL;
+}
+
+static int freqvar_upscale_ratio_init(struct device_node *dn, const struct cpumask *mask)
+{
+       struct freqvar_upscale_ratio *upscale;
+       struct cpufreq_policy *policy;
+       int cpu, ret = 0;
+
+       policy = cpufreq_cpu_get(cpumask_first(mask));
+       if (!policy)
+               return -ENODEV;
+
+       upscale = freqvar_upscale_ratio_alloc(policy);
+       if (!upscale) {
+               ret = -ENOMEM;
+               goto fail_init;
+       }
+
+       ret = freqvar_fill_frequency_table(policy, upscale->table);
+       if (ret)
+               goto fail_init;
+
+       ret = freqvar_parse_value_dt(dn, "upscale_ratio_table", upscale->table);
+       if (ret)
+               goto fail_init;
+
+       for_each_cpu(cpu, mask)
+               per_cpu(freqvar_upscale_ratio, cpu) = upscale;
+
+       freqvar_upscale_ratio_update(policy->cpu, policy->cur);
+
+       ret = sugov_sysfs_add_attr(policy, &freqvar_upscale_ratio_attr.attr);
+       if (ret)
+               goto fail_init;
+
+       return 0;
+
+fail_init:
+       cpufreq_cpu_put(policy);
+       freqvar_upscale_ratio_free(upscale);
+
+       return ret;
+}
+
+/**********************************************************************
+ * cpufreq notifier callback                                          *
+ **********************************************************************/
+static int freqvar_cpufreq_callback(struct notifier_block *nb,
+                                       unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freq = data;
+
+       if (freq->flags & CPUFREQ_CONST_LOOPS)
+               return NOTIFY_OK;
+
+       if (val != CPUFREQ_POSTCHANGE)
+               return NOTIFY_OK;
+
+       freqvar_boost_update(freq->cpu, freq->new);
+       freqvar_rate_limit_update(freq->cpu, freq->new);
+       freqvar_upscale_ratio_update(freq->cpu, freq->new);
+
+       return 0;
+}
+
+static struct notifier_block freqvar_cpufreq_notifier = {
+       .notifier_call  = freqvar_cpufreq_callback,
+};
+
+/**********************************************************************
+ * initialization                                                     *
+ **********************************************************************/
+static int __init freqvar_tune_init(void)
+{
+       struct device_node *dn = NULL;
+       struct cpumask shared_mask;
+       const char *buf;
+
+       while ((dn = of_find_node_by_type(dn, "freqvar-tune"))) {
+               /*
+                * shared-cpus includes cpus scaling at the sametime.
+                * it is called "sibling cpus" in the CPUFreq and
+                * masked on the realated_cpus of the policy
+                */
+               if (of_property_read_string(dn, "shared-cpus", &buf))
+                       continue;
+
+               cpumask_clear(&shared_mask);
+               cpulist_parse(buf, &shared_mask);
+               cpumask_and(&shared_mask, &shared_mask, cpu_possible_mask);
+               if (cpumask_weight(&shared_mask) == 0)
+                       continue;
+
+               freqvar_boost_init(dn, &shared_mask);
+               freqvar_rate_limit_init(dn, &shared_mask);
+               freqvar_upscale_ratio_init(dn, &shared_mask);
+       }
+
+       cpufreq_register_notifier(&freqvar_cpufreq_notifier,
+                                       CPUFREQ_TRANSITION_NOTIFIER);
+
+       return 0;
+}
+late_initcall(freqvar_tune_init);