cpufreq: Add fast_on/offline for fast hotplug
authorYoungtae Lee <yt0729.lee@samsung.com>
Mon, 23 Apr 2018 04:00:19 +0000 (13:00 +0900)
committerlakkyung.jung <lakkyung.jung@samsung.com>
Mon, 23 Jul 2018 05:59:01 +0000 (14:59 +0900)
This function performs fast hotplug processing
on fast hp cpus at once. Even if cluster off,
executes only goernor stop & start for fast hp

Change-Id: I288ebd19ee0fbb91f596234eac0fb11fd71573c5
Signed-off-by: Youngtae Lee <yt0729.lee@samsung.com>
drivers/cpufreq/cpufreq.c

index 9d204ca049365a053dd1257a1f5d2ba0c457b6cc..55fa42807221ad30216590878bd378407fff7e00 100644 (file)
@@ -1172,6 +1172,43 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
        kfree(policy);
 }
 
+int cpufreq_fast_online(void)
+{
+       int cpu, ret;
+       struct cpufreq_policy *policy;
+       struct cpumask cl_online_mask;
+
+       for_each_cpu(cpu, &cpu_faston_mask) {
+               policy = per_cpu(cpufreq_cpu_data, cpu);
+               if (!policy)
+                       panic("%s: can't to get policy\n", __func__);
+
+               WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+               down_write(&policy->rwsem);
+
+               if (cpumask_test_cpu(cpu, policy->cpus)) {
+                       up_write(&policy->rwsem);
+                       continue;
+               }
+
+
+               if (!policy_is_inactive(policy))
+                       cpufreq_stop_governor(policy);
+
+               cpumask_and(&cl_online_mask, &cpu_faston_mask, policy->related_cpus);
+               cpumask_or(policy->cpus, &cl_online_mask, policy->cpus);
+
+               policy->cpu = cpumask_first(policy->cpus);
+               ret = cpufreq_start_governor(policy);
+               if (ret)
+                       panic("%s: Failed to start governor\n", __func__);
+
+               up_write(&policy->rwsem);
+       }
+
+       return 0;
+}
+
 static int cpufreq_online(unsigned int cpu)
 {
        struct cpufreq_policy *policy;
@@ -1180,6 +1217,11 @@ static int cpufreq_online(unsigned int cpu)
        unsigned int j;
        int ret;
 
+       if (cpumask_test_cpu(cpu, &cpu_faston_mask)) {
+               cpufreq_fast_online();
+               return 0;
+       }
+
        pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
 
        /* Check if this CPU already has a policy to manage it */
@@ -1361,11 +1403,45 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        return 0;
 }
 
+int cpufreq_fast_offline(void)
+{
+       int cpu, ret;
+       struct cpufreq_policy *policy;
+
+       for_each_cpu(cpu, &cpu_fastoff_mask) {
+               policy = per_cpu(cpufreq_cpu_data, cpu);
+               down_write(&policy->rwsem);
+               if (!cpumask_test_cpu(cpu, policy->cpus)) {
+                       up_write(&policy->rwsem);
+                       continue;
+               }
+
+               cpufreq_stop_governor(policy);
+
+               cpumask_andnot(policy->cpus, policy->cpus, &cpu_fastoff_mask);
+
+               if (!policy_is_inactive(policy)) {
+                       policy->cpu = cpumask_first(policy->cpus);
+                       ret = cpufreq_start_governor(policy);
+                       if (ret)
+                               panic("%s: Failed to start governor\n", __func__);
+               }
+               up_write(&policy->rwsem);
+       }
+
+       return 0;
+}
+
 static int cpufreq_offline(unsigned int cpu)
 {
        struct cpufreq_policy *policy;
        int ret;
 
+       if (cpumask_test_cpu(cpu, &cpu_fastoff_mask)) {
+               cpufreq_fast_offline();
+               return 0;
+       }
+
        pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
 
        policy = cpufreq_cpu_get_raw(cpu);