From: Youngtae Lee Date: Mon, 23 Apr 2018 04:00:19 +0000 (+0900) Subject: cpufreq: Add fast_on/offline for fast hotplug X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=6f186d8aeed1152248ba01e40ff8a735faa60167;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git cpufreq: Add fast_on/offline for fast hotplug This function performs fast hotplug processing on fast hp cpus at once. Even if cluster off, executes only goernor stop & start for fast hp Change-Id: I288ebd19ee0fbb91f596234eac0fb11fd71573c5 Signed-off-by: Youngtae Lee --- diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 9d204ca04936..55fa42807221 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1172,6 +1172,43 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) kfree(policy); } +int cpufreq_fast_online(void) +{ + int cpu, ret; + struct cpufreq_policy *policy; + struct cpumask cl_online_mask; + + for_each_cpu(cpu, &cpu_faston_mask) { + policy = per_cpu(cpufreq_cpu_data, cpu); + if (!policy) + panic("%s: can't to get policy\n", __func__); + + WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); + down_write(&policy->rwsem); + + if (cpumask_test_cpu(cpu, policy->cpus)) { + up_write(&policy->rwsem); + continue; + } + + + if (!policy_is_inactive(policy)) + cpufreq_stop_governor(policy); + + cpumask_and(&cl_online_mask, &cpu_faston_mask, policy->related_cpus); + cpumask_or(policy->cpus, &cl_online_mask, policy->cpus); + + policy->cpu = cpumask_first(policy->cpus); + ret = cpufreq_start_governor(policy); + if (ret) + panic("%s: Failed to start governor\n", __func__); + + up_write(&policy->rwsem); + } + + return 0; +} + static int cpufreq_online(unsigned int cpu) { struct cpufreq_policy *policy; @@ -1180,6 +1217,11 @@ static int cpufreq_online(unsigned int cpu) unsigned int j; int ret; + if (cpumask_test_cpu(cpu, &cpu_faston_mask)) { + cpufreq_fast_online(); + return 0; + } + pr_debug("%s: bringing CPU%u online\n", __func__, cpu); /* Check if this CPU already has a policy to manage it */ @@ -1361,11 +1403,45 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) return 0; } +int cpufreq_fast_offline(void) +{ + int cpu, ret; + struct cpufreq_policy *policy; + + for_each_cpu(cpu, &cpu_fastoff_mask) { + policy = per_cpu(cpufreq_cpu_data, cpu); + down_write(&policy->rwsem); + if (!cpumask_test_cpu(cpu, policy->cpus)) { + up_write(&policy->rwsem); + continue; + } + + cpufreq_stop_governor(policy); + + cpumask_andnot(policy->cpus, policy->cpus, &cpu_fastoff_mask); + + if (!policy_is_inactive(policy)) { + policy->cpu = cpumask_first(policy->cpus); + ret = cpufreq_start_governor(policy); + if (ret) + panic("%s: Failed to start governor\n", __func__); + } + up_write(&policy->rwsem); + } + + return 0; +} + static int cpufreq_offline(unsigned int cpu) { struct cpufreq_policy *policy; int ret; + if (cpumask_test_cpu(cpu, &cpu_fastoff_mask)) { + cpufreq_fast_offline(); + return 0; + } + pr_debug("%s: unregistering CPU %u\n", __func__, cpu); policy = cpufreq_cpu_get_raw(cpu);