Merge branch 'android-4.14-p' into android-exynos-4.14-ww-9610-minor_up-dev
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / cpufreq / cpufreq.c
index 927399454d625aa9465f87f1c44fa1bfd1221175..88f77e6bb931d2acda86cf803f43a14a0f30a4a2 100644 (file)
@@ -556,13 +556,21 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
  *                          SYSFS INTERFACE                          *
  *********************************************************************/
 static ssize_t show_boost(struct kobject *kobj,
-                                struct attribute *attr, char *buf)
+<<<<<<< HEAD
+                                struct kobj_attribute *attr, char *buf)
+=======
+                         struct kobj_attribute *attr, char *buf)
+>>>>>>> android-4.14-p
 {
        return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
 }
 
-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+<<<<<<< HEAD
                                  const char *buf, size_t count)
+=======
+                          const char *buf, size_t count)
+>>>>>>> android-4.14-p
 {
        int ret, enable;
 
@@ -1170,6 +1178,43 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
        kfree(policy);
 }
 
+int cpufreq_fast_online(void)
+{
+       int cpu, ret;
+       struct cpufreq_policy *policy;
+       struct cpumask cl_online_mask;
+
+       for_each_cpu(cpu, &cpu_faston_mask) {
+               policy = per_cpu(cpufreq_cpu_data, cpu);
+               if (!policy)
+                       panic("%s: can't to get policy\n", __func__);
+
+               WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+               down_write(&policy->rwsem);
+
+               if (cpumask_test_cpu(cpu, policy->cpus)) {
+                       up_write(&policy->rwsem);
+                       continue;
+               }
+
+
+               if (!policy_is_inactive(policy))
+                       cpufreq_stop_governor(policy);
+
+               cpumask_and(&cl_online_mask, &cpu_faston_mask, policy->related_cpus);
+               cpumask_or(policy->cpus, &cl_online_mask, policy->cpus);
+
+               policy->cpu = cpumask_first(policy->cpus);
+               ret = cpufreq_start_governor(policy);
+               if (ret)
+                       panic("%s: Failed to start governor\n", __func__);
+
+               up_write(&policy->rwsem);
+       }
+
+       return 0;
+}
+
 static int cpufreq_online(unsigned int cpu)
 {
        struct cpufreq_policy *policy;
@@ -1178,6 +1223,11 @@ static int cpufreq_online(unsigned int cpu)
        unsigned int j;
        int ret;
 
+       if (cpumask_test_cpu(cpu, &cpu_faston_mask)) {
+               cpufreq_fast_online();
+               return 0;
+       }
+
        pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
 
        /* Check if this CPU already has a policy to manage it */
@@ -1360,11 +1410,45 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        return 0;
 }
 
+int cpufreq_fast_offline(void)
+{
+       int cpu, ret;
+       struct cpufreq_policy *policy;
+
+       for_each_cpu(cpu, &cpu_fastoff_mask) {
+               policy = per_cpu(cpufreq_cpu_data, cpu);
+               down_write(&policy->rwsem);
+               if (!cpumask_test_cpu(cpu, policy->cpus)) {
+                       up_write(&policy->rwsem);
+                       continue;
+               }
+
+               cpufreq_stop_governor(policy);
+
+               cpumask_andnot(policy->cpus, policy->cpus, &cpu_fastoff_mask);
+
+               if (!policy_is_inactive(policy)) {
+                       policy->cpu = cpumask_first(policy->cpus);
+                       ret = cpufreq_start_governor(policy);
+                       if (ret)
+                               panic("%s: Failed to start governor\n", __func__);
+               }
+               up_write(&policy->rwsem);
+       }
+
+       return 0;
+}
+
 static int cpufreq_offline(unsigned int cpu)
 {
        struct cpufreq_policy *policy;
        int ret;
 
+       if (cpumask_test_cpu(cpu, &cpu_fastoff_mask)) {
+               cpufreq_fast_offline();
+               return 0;
+       }
+
        pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
 
        policy = cpufreq_cpu_get_raw(cpu);
@@ -1526,17 +1610,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
 {
        unsigned int ret_freq = 0;
 
-       if (!cpufreq_driver->get)
+       if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
                return ret_freq;
 
        ret_freq = cpufreq_driver->get(policy->cpu);
 
        /*
-        * Updating inactive policies is invalid, so avoid doing that.  Also
-        * if fast frequency switching is used with the given policy, the check
+        * If fast frequency switching is used with the given policy, the check
         * against policy->cur is pointless, so skip it in that case too.
         */
-       if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
+       if (policy->fast_switch_enabled)
                return ret_freq;
 
        if (ret_freq && policy->cur &&
@@ -1565,10 +1648,7 @@ unsigned int cpufreq_get(unsigned int cpu)
 
        if (policy) {
                down_read(&policy->rwsem);
-
-               if (!policy_is_inactive(policy))
-                       ret_freq = __cpufreq_get(policy);
-
+               ret_freq = __cpufreq_get(policy);
                up_read(&policy->rwsem);
 
                cpufreq_cpu_put(policy);
@@ -1945,7 +2025,7 @@ static int __target_index(struct cpufreq_policy *policy, int index)
        return retval;
 }
 
-int __cpufreq_driver_target(struct cpufreq_policy *policy,
+int __weak __cpufreq_driver_target(struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
 {