* SYSFS INTERFACE *
*********************************************************************/
static ssize_t show_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
+<<<<<<< HEAD
+ struct kobj_attribute *attr, char *buf)
+=======
+ struct kobj_attribute *attr, char *buf)
+>>>>>>> android-4.14-p
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
}
-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+<<<<<<< HEAD
const char *buf, size_t count)
+=======
+ const char *buf, size_t count)
+>>>>>>> android-4.14-p
{
int ret, enable;
struct cpufreq_policy new_policy; \
\
memcpy(&new_policy, policy, sizeof(*policy)); \
+ new_policy.min = policy->user_policy.min; \
+ new_policy.max = policy->user_policy.max; \
\
ret = sscanf(buf, "%u", &new_policy.object); \
if (ret != 1) \
kfree(policy);
}
+int cpufreq_fast_online(void)
+{
+ int cpu, ret;
+ struct cpufreq_policy *policy;
+ struct cpumask cl_online_mask;
+
+ for_each_cpu(cpu, &cpu_faston_mask) {
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (!policy)
+ panic("%s: can't to get policy\n", __func__);
+
+ WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+ down_write(&policy->rwsem);
+
+ if (cpumask_test_cpu(cpu, policy->cpus)) {
+ up_write(&policy->rwsem);
+ continue;
+ }
+
+
+ if (!policy_is_inactive(policy))
+ cpufreq_stop_governor(policy);
+
+ cpumask_and(&cl_online_mask, &cpu_faston_mask, policy->related_cpus);
+ cpumask_or(policy->cpus, &cl_online_mask, policy->cpus);
+
+ policy->cpu = cpumask_first(policy->cpus);
+ ret = cpufreq_start_governor(policy);
+ if (ret)
+ panic("%s: Failed to start governor\n", __func__);
+
+ up_write(&policy->rwsem);
+ }
+
+ return 0;
+}
+
static int cpufreq_online(unsigned int cpu)
{
struct cpufreq_policy *policy;
unsigned int j;
int ret;
+ if (cpumask_test_cpu(cpu, &cpu_faston_mask)) {
+ cpufreq_fast_online();
+ return 0;
+ }
+
pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
/* Check if this CPU already has a policy to manage it */
return 0;
out_exit_policy:
+ for_each_cpu(j, policy->real_cpus)
+ remove_cpu_dev_symlink(policy, get_cpu_device(j));
+
up_write(&policy->rwsem);
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
- for_each_cpu(j, policy->real_cpus)
- remove_cpu_dev_symlink(policy, get_cpu_device(j));
-
out_free_policy:
cpufreq_policy_free(policy);
return ret;
return 0;
}
+int cpufreq_fast_offline(void)
+{
+ int cpu, ret;
+ struct cpufreq_policy *policy;
+
+ for_each_cpu(cpu, &cpu_fastoff_mask) {
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ down_write(&policy->rwsem);
+ if (!cpumask_test_cpu(cpu, policy->cpus)) {
+ up_write(&policy->rwsem);
+ continue;
+ }
+
+ cpufreq_stop_governor(policy);
+
+ cpumask_andnot(policy->cpus, policy->cpus, &cpu_fastoff_mask);
+
+ if (!policy_is_inactive(policy)) {
+ policy->cpu = cpumask_first(policy->cpus);
+ ret = cpufreq_start_governor(policy);
+ if (ret)
+ panic("%s: Failed to start governor\n", __func__);
+ }
+ up_write(&policy->rwsem);
+ }
+
+ return 0;
+}
+
static int cpufreq_offline(unsigned int cpu)
{
struct cpufreq_policy *policy;
int ret;
+ if (cpumask_test_cpu(cpu, &cpu_fastoff_mask)) {
+ cpufreq_fast_offline();
+ return 0;
+ }
+
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu);
{
unsigned int ret_freq = 0;
- if (!cpufreq_driver->get)
+ if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
return ret_freq;
ret_freq = cpufreq_driver->get(policy->cpu);
/*
- * Updating inactive policies is invalid, so avoid doing that. Also
- * if fast frequency switching is used with the given policy, the check
+ * If fast frequency switching is used with the given policy, the check
* against policy->cur is pointless, so skip it in that case too.
*/
- if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
+ if (policy->fast_switch_enabled)
return ret_freq;
if (ret_freq && policy->cur &&
if (policy) {
down_read(&policy->rwsem);
-
- if (!policy_is_inactive(policy))
- ret_freq = __cpufreq_get(policy);
-
+ ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
return retval;
}
-int __cpufreq_driver_target(struct cpufreq_policy *policy,
+int __weak __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
policy->min = new_policy->min;
policy->max = new_policy->max;
+
+ arch_set_max_freq_scale(policy->cpus, policy->max);
+
trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
policy->cached_target_freq = UINT_MAX;
}
EXPORT_SYMBOL_GPL(arch_set_freq_scale);
+__weak void arch_set_max_freq_scale(struct cpumask *cpus,
+ unsigned long policy_max_freq)
+{
+}
+EXPORT_SYMBOL_GPL(arch_set_max_freq_scale);
+
/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
*********************************************************************/