Merge branch 'android-4.14-p' into android-exynos-4.14-ww-9610-minor_up-dev
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / cpufreq / cpufreq.c
index 8e9c2c0f8576f56ccc571cfcef7d5f90c74e1daf..88f77e6bb931d2acda86cf803f43a14a0f30a4a2 100644 (file)
@@ -556,13 +556,21 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
  *                          SYSFS INTERFACE                          *
  *********************************************************************/
 static ssize_t show_boost(struct kobject *kobj,
-                                struct attribute *attr, char *buf)
+<<<<<<< HEAD
+                                struct kobj_attribute *attr, char *buf)
+=======
+                         struct kobj_attribute *attr, char *buf)
+>>>>>>> android-4.14-p
 {
        return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
 }
 
-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+<<<<<<< HEAD
                                  const char *buf, size_t count)
+=======
+                          const char *buf, size_t count)
+>>>>>>> android-4.14-p
 {
        int ret, enable;
 
@@ -695,6 +703,8 @@ static ssize_t store_##file_name                                    \
        struct cpufreq_policy new_policy;                               \
                                                                        \
        memcpy(&new_policy, policy, sizeof(*policy));                   \
+       new_policy.min = policy->user_policy.min;                       \
+       new_policy.max = policy->user_policy.max;                       \
                                                                        \
        ret = sscanf(buf, "%u", &new_policy.object);                    \
        if (ret != 1)                                                   \
@@ -1168,6 +1178,43 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
        kfree(policy);
 }
 
+int cpufreq_fast_online(void)
+{
+       int cpu, ret;
+       struct cpufreq_policy *policy;
+       struct cpumask cl_online_mask;
+
+       for_each_cpu(cpu, &cpu_faston_mask) {
+               policy = per_cpu(cpufreq_cpu_data, cpu);
+               if (!policy)
+                       panic("%s: can't to get policy\n", __func__);
+
+               WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+               down_write(&policy->rwsem);
+
+               if (cpumask_test_cpu(cpu, policy->cpus)) {
+                       up_write(&policy->rwsem);
+                       continue;
+               }
+
+
+               if (!policy_is_inactive(policy))
+                       cpufreq_stop_governor(policy);
+
+               cpumask_and(&cl_online_mask, &cpu_faston_mask, policy->related_cpus);
+               cpumask_or(policy->cpus, &cl_online_mask, policy->cpus);
+
+               policy->cpu = cpumask_first(policy->cpus);
+               ret = cpufreq_start_governor(policy);
+               if (ret)
+                       panic("%s: Failed to start governor\n", __func__);
+
+               up_write(&policy->rwsem);
+       }
+
+       return 0;
+}
+
 static int cpufreq_online(unsigned int cpu)
 {
        struct cpufreq_policy *policy;
@@ -1176,6 +1223,11 @@ static int cpufreq_online(unsigned int cpu)
        unsigned int j;
        int ret;
 
+       if (cpumask_test_cpu(cpu, &cpu_faston_mask)) {
+               cpufreq_fast_online();
+               return 0;
+       }
+
        pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
 
        /* Check if this CPU already has a policy to manage it */
@@ -1318,14 +1370,14 @@ static int cpufreq_online(unsigned int cpu)
        return 0;
 
 out_exit_policy:
+       for_each_cpu(j, policy->real_cpus)
+               remove_cpu_dev_symlink(policy, get_cpu_device(j));
+
        up_write(&policy->rwsem);
 
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 
-       for_each_cpu(j, policy->real_cpus)
-               remove_cpu_dev_symlink(policy, get_cpu_device(j));
-
 out_free_policy:
        cpufreq_policy_free(policy);
        return ret;
@@ -1358,11 +1410,45 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        return 0;
 }
 
+int cpufreq_fast_offline(void)
+{
+       int cpu, ret;
+       struct cpufreq_policy *policy;
+
+       for_each_cpu(cpu, &cpu_fastoff_mask) {
+               policy = per_cpu(cpufreq_cpu_data, cpu);
+               down_write(&policy->rwsem);
+               if (!cpumask_test_cpu(cpu, policy->cpus)) {
+                       up_write(&policy->rwsem);
+                       continue;
+               }
+
+               cpufreq_stop_governor(policy);
+
+               cpumask_andnot(policy->cpus, policy->cpus, &cpu_fastoff_mask);
+
+               if (!policy_is_inactive(policy)) {
+                       policy->cpu = cpumask_first(policy->cpus);
+                       ret = cpufreq_start_governor(policy);
+                       if (ret)
+                               panic("%s: Failed to start governor\n", __func__);
+               }
+               up_write(&policy->rwsem);
+       }
+
+       return 0;
+}
+
 static int cpufreq_offline(unsigned int cpu)
 {
        struct cpufreq_policy *policy;
        int ret;
 
+       if (cpumask_test_cpu(cpu, &cpu_fastoff_mask)) {
+               cpufreq_fast_offline();
+               return 0;
+       }
+
        pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
 
        policy = cpufreq_cpu_get_raw(cpu);
@@ -1524,17 +1610,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
 {
        unsigned int ret_freq = 0;
 
-       if (!cpufreq_driver->get)
+       if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
                return ret_freq;
 
        ret_freq = cpufreq_driver->get(policy->cpu);
 
        /*
-        * Updating inactive policies is invalid, so avoid doing that.  Also
-        * if fast frequency switching is used with the given policy, the check
+        * If fast frequency switching is used with the given policy, the check
         * against policy->cur is pointless, so skip it in that case too.
         */
-       if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
+       if (policy->fast_switch_enabled)
                return ret_freq;
 
        if (ret_freq && policy->cur &&
@@ -1563,10 +1648,7 @@ unsigned int cpufreq_get(unsigned int cpu)
 
        if (policy) {
                down_read(&policy->rwsem);
-
-               if (!policy_is_inactive(policy))
-                       ret_freq = __cpufreq_get(policy);
-
+               ret_freq = __cpufreq_get(policy);
                up_read(&policy->rwsem);
 
                cpufreq_cpu_put(policy);
@@ -1943,7 +2025,7 @@ static int __target_index(struct cpufreq_policy *policy, int index)
        return retval;
 }
 
-int __cpufreq_driver_target(struct cpufreq_policy *policy,
+int __weak __cpufreq_driver_target(struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
 {
@@ -2235,6 +2317,9 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
 
        policy->min = new_policy->min;
        policy->max = new_policy->max;
+
+       arch_set_max_freq_scale(policy->cpus, policy->max);
+
        trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
 
        policy->cached_target_freq = UINT_MAX;
@@ -2445,6 +2530,12 @@ __weak void arch_set_freq_scale(struct cpumask *cpus,
 }
 EXPORT_SYMBOL_GPL(arch_set_freq_scale);
 
+__weak void arch_set_max_freq_scale(struct cpumask *cpus,
+                                   unsigned long policy_max_freq)
+{
+}
+EXPORT_SYMBOL_GPL(arch_set_max_freq_scale);
+
 /*********************************************************************
  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
  *********************************************************************/