* SYSFS INTERFACE *
*********************************************************************/
static ssize_t show_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
+<<<<<<< HEAD
+ struct kobj_attribute *attr, char *buf)
+=======
+ struct kobj_attribute *attr, char *buf)
+>>>>>>> android-4.14-p
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
}
-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+<<<<<<< HEAD
const char *buf, size_t count)
+=======
+ const char *buf, size_t count)
+>>>>>>> android-4.14-p
{
int ret, enable;
kfree(policy);
}
+int cpufreq_fast_online(void)
+{
+ int cpu, ret;
+ struct cpufreq_policy *policy;
+ struct cpumask cl_online_mask;
+
+ for_each_cpu(cpu, &cpu_faston_mask) {
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (!policy)
+ panic("%s: can't to get policy\n", __func__);
+
+ WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+ down_write(&policy->rwsem);
+
+ if (cpumask_test_cpu(cpu, policy->cpus)) {
+ up_write(&policy->rwsem);
+ continue;
+ }
+
+
+ if (!policy_is_inactive(policy))
+ cpufreq_stop_governor(policy);
+
+ cpumask_and(&cl_online_mask, &cpu_faston_mask, policy->related_cpus);
+ cpumask_or(policy->cpus, &cl_online_mask, policy->cpus);
+
+ policy->cpu = cpumask_first(policy->cpus);
+ ret = cpufreq_start_governor(policy);
+ if (ret)
+ panic("%s: Failed to start governor\n", __func__);
+
+ up_write(&policy->rwsem);
+ }
+
+ return 0;
+}
+
static int cpufreq_online(unsigned int cpu)
{
struct cpufreq_policy *policy;
unsigned int j;
int ret;
+ if (cpumask_test_cpu(cpu, &cpu_faston_mask)) {
+ cpufreq_fast_online();
+ return 0;
+ }
+
pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
/* Check if this CPU already has a policy to manage it */
return 0;
}
+int cpufreq_fast_offline(void)
+{
+ int cpu, ret;
+ struct cpufreq_policy *policy;
+
+ for_each_cpu(cpu, &cpu_fastoff_mask) {
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ down_write(&policy->rwsem);
+ if (!cpumask_test_cpu(cpu, policy->cpus)) {
+ up_write(&policy->rwsem);
+ continue;
+ }
+
+ cpufreq_stop_governor(policy);
+
+ cpumask_andnot(policy->cpus, policy->cpus, &cpu_fastoff_mask);
+
+ if (!policy_is_inactive(policy)) {
+ policy->cpu = cpumask_first(policy->cpus);
+ ret = cpufreq_start_governor(policy);
+ if (ret)
+ panic("%s: Failed to start governor\n", __func__);
+ }
+ up_write(&policy->rwsem);
+ }
+
+ return 0;
+}
+
static int cpufreq_offline(unsigned int cpu)
{
struct cpufreq_policy *policy;
int ret;
+ if (cpumask_test_cpu(cpu, &cpu_fastoff_mask)) {
+ cpufreq_fast_offline();
+ return 0;
+ }
+
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu);
{
unsigned int ret_freq = 0;
- if (!cpufreq_driver->get)
+ if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
return ret_freq;
ret_freq = cpufreq_driver->get(policy->cpu);
/*
- * Updating inactive policies is invalid, so avoid doing that. Also
- * if fast frequency switching is used with the given policy, the check
+ * If fast frequency switching is used with the given policy, the check
* against policy->cur is pointless, so skip it in that case too.
*/
- if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
+ if (policy->fast_switch_enabled)
return ret_freq;
if (ret_freq && policy->cur &&
if (policy) {
down_read(&policy->rwsem);
-
- if (!policy_is_inactive(policy))
- ret_freq = __cpufreq_get(policy);
-
+ ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
return retval;
}
-int __cpufreq_driver_target(struct cpufreq_policy *policy,
+int __weak __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{