static void cs_check_cpu(int cpu, unsigned int load)
{
struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
- struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+ struct cpufreq_policy *policy = dbs_info->cdbs.policy;
struct dbs_data *dbs_data = policy->governor_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
{
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
struct cs_cpu_dbs_info_s, cdbs.dwork.work);
- unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+ unsigned int cpu = dbs_info->cdbs.policy->cpu;
struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
cpu);
- struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
+ struct dbs_data *dbs_data = dbs_info->cdbs.policy->governor_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
bool modify_all = true;
else
dbs_check_cpu(dbs_data, cpu);
- gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
+ gov_queue_work(dbs_data, dbs_info->cdbs.policy, delay, modify_all);
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
if (!dbs_info->enable)
return 0;
- policy = dbs_info->cdbs.cur_policy;
+ policy = dbs_info->cdbs.policy;
/*
* we only care if our internally tracked freq moves outside the 'valid'
ignore_nice = cs_tuners->ignore_nice_load;
}
- policy = cdbs->cur_policy;
+ policy = cdbs->policy;
/* Get Absolute Load */
for_each_cpu(j, policy->cpus) {
/* Will return if we need to evaluate cpu load again or not */
bool need_load_eval(struct cpu_dbs_info *cdbs, unsigned int sampling_rate)
{
- if (policy_is_shared(cdbs->cur_policy)) {
+ if (policy_is_shared(cdbs->policy)) {
ktime_t time_now = ktime_get();
s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
unsigned int prev_load;
- j_cdbs->cur_policy = policy;
+ j_cdbs->policy = policy;
j_cdbs->prev_cpu_idle =
get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
gov_cancel_work(dbs_data, policy);
mutex_destroy(&cdbs->timer_mutex);
- cdbs->cur_policy = NULL;
+ cdbs->policy = NULL;
}
static void cpufreq_governor_limits(struct cpufreq_policy *policy,
unsigned int cpu = policy->cpu;
struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
- if (!cdbs->cur_policy)
+ if (!cdbs->policy)
return;
mutex_lock(&cdbs->timer_mutex);
- if (policy->max < cdbs->cur_policy->cur)
- __cpufreq_driver_target(cdbs->cur_policy, policy->max,
+ if (policy->max < cdbs->policy->cur)
+ __cpufreq_driver_target(cdbs->policy, policy->max,
CPUFREQ_RELATION_H);
- else if (policy->min > cdbs->cur_policy->cur)
- __cpufreq_driver_target(cdbs->cur_policy, policy->min,
+ else if (policy->min > cdbs->policy->cur)
+ __cpufreq_driver_target(cdbs->policy, policy->min,
CPUFREQ_RELATION_L);
dbs_check_cpu(dbs_data, cpu);
mutex_unlock(&cdbs->timer_mutex);
* wake-up from idle.
*/
unsigned int prev_load;
- struct cpufreq_policy *cur_policy;
+ struct cpufreq_policy *policy;
struct delayed_work dwork;
/*
* percpu mutex that serializes governor limit change with gov_dbs_timer
static void od_check_cpu(int cpu, unsigned int load)
{
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+ struct cpufreq_policy *policy = dbs_info->cdbs.policy;
struct dbs_data *dbs_data = policy->governor_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
{
struct od_cpu_dbs_info_s *dbs_info =
container_of(work, struct od_cpu_dbs_info_s, cdbs.dwork.work);
- unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+ unsigned int cpu = dbs_info->cdbs.policy->cpu;
struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
cpu);
- struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
+ struct dbs_data *dbs_data = dbs_info->cdbs.policy->governor_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
int delay = 0, sample_type = core_dbs_info->sample_type;
bool modify_all = true;
core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
if (sample_type == OD_SUB_SAMPLE) {
delay = core_dbs_info->freq_lo_jiffies;
- __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
- core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(core_dbs_info->cdbs.policy,
+ core_dbs_info->freq_lo,
+ CPUFREQ_RELATION_H);
} else {
dbs_check_cpu(dbs_data, cpu);
if (core_dbs_info->freq_lo) {
delay = delay_for_sampling_rate(od_tuners->sampling_rate
* core_dbs_info->rate_mult);
- gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
+ gov_queue_work(dbs_data, dbs_info->cdbs.policy, delay, modify_all);
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
cancel_delayed_work_sync(&dbs_info->cdbs.dwork);
mutex_lock(&dbs_info->cdbs.timer_mutex);
- gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
- usecs_to_jiffies(new_rate), true);
+ gov_queue_work(dbs_data, dbs_info->cdbs.policy,
+ usecs_to_jiffies(new_rate), true);
}
mutex_unlock(&dbs_info->cdbs.timer_mutex);
if (cpumask_test_cpu(cpu, &done))
continue;
- policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
+ policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.policy;
if (!policy)
continue;