static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
{
- int i;
+ struct cpufreq_frequency_table *pos;
struct acpi_processor_performance *perf;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
perf = data->acpi_data;
- for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
- if (msr == perf->states[data->freq_table[i].driver_data].status)
- return data->freq_table[i].frequency;
- }
+ cpufreq_for_each_entry(pos, data->freq_table)
+ if (msr == perf->states[pos->driver_data].status)
+ return pos->frequency;
return data->freq_table[0].frequency;
}
/* get the minimum frequency in the cpufreq_frequency_table */
static inline u32 get_table_min(struct cpufreq_frequency_table *table)
{
- int i;
+ struct cpufreq_frequency_table *pos;
uint32_t min_freq = ~0;
- for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
- if (table[i].frequency < min_freq)
- min_freq = table[i].frequency;
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency < min_freq)
+ min_freq = pos->frequency;
return min_freq;
}
/* get the maximum frequency in the cpufreq_frequency_table */
static inline u32 get_table_max(struct cpufreq_frequency_table *table)
{
- int i;
+ struct cpufreq_frequency_table *pos;
uint32_t max_freq = 0;
- for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
- if (table[i].frequency > max_freq)
- max_freq = table[i].frequency;
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency > max_freq)
+ max_freq = pos->frequency;
return max_freq;
}
static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
{
- unsigned int i, j, count = 0, ret = 0;
+ unsigned int i, count = 0, ret = 0;
struct cpufreq_stats *stat;
unsigned int alloc_size;
unsigned int cpu = policy->cpu;
- struct cpufreq_frequency_table *table;
+ struct cpufreq_frequency_table *pos, *table;
table = cpufreq_frequency_get_table(cpu);
if (unlikely(!table))
stat->cpu = cpu;
per_cpu(cpufreq_stats_table, cpu) = stat;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- unsigned int freq = table[i].frequency;
- if (freq == CPUFREQ_ENTRY_INVALID)
- continue;
+ cpufreq_for_each_valid_entry(pos, table)
count++;
- }
alloc_size = count * sizeof(int) + count * sizeof(u64);
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
stat->trans_table = stat->freq_table + count;
#endif
- j = 0;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- unsigned int freq = table[i].frequency;
- if (freq == CPUFREQ_ENTRY_INVALID)
- continue;
- if (freq_table_get_index(stat, freq) == -1)
- stat->freq_table[j++] = freq;
- }
- stat->state_num = j;
+ i = 0;
+ cpufreq_for_each_valid_entry(pos, table)
+ if (freq_table_get_index(stat, pos->frequency) == -1)
+ stat->freq_table[i++] = pos->frequency;
+ stat->state_num = i;
spin_lock(&cpufreq_stats_lock);
stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur);
static int dbx500_cpufreq_probe(struct platform_device *pdev)
{
- int i = 0;
+ struct cpufreq_frequency_table *pos;
freq_table = dev_get_platdata(&pdev->dev);
if (!freq_table) {
}
pr_info("dbx500-cpufreq: Available frequencies:\n");
- while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
- pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
- i++;
- }
+ cpufreq_for_each_entry(pos, freq_table)
+ pr_info(" %d Mhz\n", pos->frequency / 1000);
return cpufreq_register_driver(&dbx500_cpufreq_driver);
}
static int elanfreq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
- unsigned int i;
+ struct cpufreq_frequency_table *pos;
/* capability check */
if ((c->x86_vendor != X86_VENDOR_AMD) ||
max_freq = elanfreq_get_cpu_frequency(0);
/* table init */
- for (i = 0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
- if (elanfreq_table[i].frequency > max_freq)
- elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
- }
+ cpufreq_for_each_entry(pos, elanfreq_table)
+ if (pos->frequency > max_freq)
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
static int exynos_cpufreq_get_index(unsigned int freq)
{
struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
- int index;
+ struct cpufreq_frequency_table *pos;
- for (index = 0;
- freq_table[index].frequency != CPUFREQ_TABLE_END; index++)
- if (freq_table[index].frequency == freq)
+ cpufreq_for_each_entry(pos, freq_table)
+ if (pos->frequency == freq)
break;
- if (freq_table[index].frequency == CPUFREQ_TABLE_END)
+ if (pos->frequency == CPUFREQ_TABLE_END)
return -EINVAL;
- return index;
+ return pos - freq_table;
}
static int exynos_cpufreq_scale(unsigned int target_freq)
static int init_div_table(void)
{
- struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
+ struct cpufreq_frequency_table *pos, *freq_tbl = dvfs_info->freq_table;
unsigned int tmp, clk_div, ema_div, freq, volt_id;
- int i = 0;
struct dev_pm_opp *opp;
rcu_read_lock();
- for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
-
+ cpufreq_for_each_entry(pos, freq_tbl) {
opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
- freq_tbl[i].frequency * 1000, true);
+ pos->frequency * 1000, true);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(dvfs_info->dev,
"failed to find valid OPP for %u KHZ\n",
- freq_tbl[i].frequency);
+ pos->frequency);
return PTR_ERR(opp);
}
- freq = freq_tbl[i].frequency / 1000; /* In MHZ */
+ freq = pos->frequency / 1000; /* In MHZ */
clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
<< P0_7_CPUCLKDEV_SHIFT;
clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
| ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
- __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i);
+ __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 *
+ (pos - freq_tbl));
}
rcu_read_unlock();
static void exynos_enable_dvfs(unsigned int cur_frequency)
{
- unsigned int tmp, i, cpu;
+ unsigned int tmp, cpu;
struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
+ struct cpufreq_frequency_table *pos;
/* Disable DVFS */
__raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
__raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
/* Set initial performance index */
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
- if (freq_table[i].frequency == cur_frequency)
+ cpufreq_for_each_entry(pos, freq_table)
+ if (pos->frequency == cur_frequency)
break;
- if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
+ if (pos->frequency == CPUFREQ_TABLE_END) {
dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
/* Assign the highest frequency */
- i = 0;
- cur_frequency = freq_table[i].frequency;
+ pos = freq_table;
+ cur_frequency = pos->frequency;
}
dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
- tmp |= (i << C0_3_PSTATE_NEW_SHIFT);
+ tmp |= ((pos - freq_table) << C0_3_PSTATE_NEW_SHIFT);
__raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
}
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
+ struct cpufreq_frequency_table *pos;
unsigned int min_freq = ~0;
unsigned int max_freq = 0;
- unsigned int i;
+ unsigned int freq;
- for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
- unsigned int freq = table[i].frequency;
- if (freq == CPUFREQ_ENTRY_INVALID) {
- pr_debug("table entry %u is invalid, skipping\n", i);
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
- continue;
- }
if (!cpufreq_boost_enabled()
- && (table[i].flags & CPUFREQ_BOOST_FREQ))
+ && (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- pr_debug("table entry %u: %u kHz\n", i, freq);
+ pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
- unsigned int next_larger = ~0, freq, i = 0;
+ struct cpufreq_frequency_table *pos;
+ unsigned int freq, next_larger = ~0;
bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
cpufreq_verify_within_cpu_limits(policy);
- for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) {
- if (freq == CPUFREQ_ENTRY_INVALID)
- continue;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
+
if ((freq >= policy->min) && (freq <= policy->max)) {
found = true;
break;
.driver_data = ~0,
.frequency = 0,
};
- unsigned int i;
+ struct cpufreq_frequency_table *pos;
+ unsigned int freq, i = 0;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
target_freq, relation, policy->cpu);
break;
}
- for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
- unsigned int freq = table[i].frequency;
- if (freq == CPUFREQ_ENTRY_INVALID)
- continue;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
+
+ i = pos - table;
if ((freq < policy->min) || (freq > policy->max))
continue;
switch (relation) {
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq)
{
- struct cpufreq_frequency_table *table;
- int i;
+ struct cpufreq_frequency_table *pos, *table;
table = cpufreq_frequency_get_table(policy->cpu);
if (unlikely(!table)) {
return -ENOENT;
}
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- if (table[i].frequency == freq)
- return i;
- }
+ cpufreq_for_each_valid_entry(pos, table)
+ if (pos->frequency == freq)
+ return pos - table;
return -EINVAL;
}
static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
bool show_boost)
{
- unsigned int i = 0;
ssize_t count = 0;
- struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
if (!table)
return -ENODEV;
- for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
- if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
- continue;
+ cpufreq_for_each_valid_entry(pos, table) {
/*
* show_boost = true and driver_data = BOOST freq
* display BOOST freqs
* show_boost = false and driver_data != BOOST freq
* display NON BOOST freqs
*/
- if (show_boost ^ (table[i].flags & CPUFREQ_BOOST_FREQ))
+ if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- count += sprintf(&buf[count], "%d ", table[i].frequency);
+ count += sprintf(&buf[count], "%d ", pos->frequency);
}
count += sprintf(&buf[count], "\n");
static void longhaul_setup_voltagescaling(void)
{
+ struct cpufreq_frequency_table *freq_pos;
union msr_longhaul longhaul;
struct mV_pos minvid, maxvid, vid;
unsigned int j, speed, pos, kHz_step, numvscales;
/* Calculate kHz for one voltage step */
kHz_step = (highest_speed - min_vid_speed) / numvscales;
- j = 0;
- while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
- speed = longhaul_table[j].frequency;
+ cpufreq_for_each_entry(freq_pos, longhaul_table) {
+ speed = freq_pos->frequency;
if (speed > min_vid_speed)
pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
else
pos = minvid.pos;
- longhaul_table[j].driver_data |= mV_vrm_table[pos] << 8;
+ freq_pos->driver_data |= mV_vrm_table[pos] << 8;
vid = vrm_mV_table[mV_vrm_table[pos]];
printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
- speed, j, vid.mV);
- j++;
+ speed, (int)(freq_pos - longhaul_table), vid.mV);
}
can_scale_voltage = 1;
static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
+ struct cpufreq_frequency_table *pos;
const u32 *max_freqp;
u32 max_freq;
- int i, cur_astate;
+ int cur_astate;
struct resource res;
struct device_node *cpu, *dn;
int err = -ENODEV;
pr_debug("initializing frequency table\n");
/* initialize frequency table */
- for (i=0; pas_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
- pas_freqs[i].frequency =
- get_astate_freq(pas_freqs[i].driver_data) * 100000;
- pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
+ cpufreq_for_each_entry(pos, pas_freqs) {
+ pos->frequency = get_astate_freq(pos->driver_data) * 100000;
+ pr_debug("%d: %d\n", (int)(pos - pas_freqs), pos->frequency);
}
cur_astate = get_cur_astate(policy->cpu);
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
{
+ struct cpufreq_frequency_table *pos;
unsigned int i, f;
unsigned khz;
}
}
if (param_max_multiplier) {
- for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
- if (clock_ratio[i].driver_data == param_max_multiplier) {
+ cpufreq_for_each_entry(pos, clock_ratio)
+ if (pos->driver_data == param_max_multiplier) {
max_multiplier = param_max_multiplier;
goto have_max_multiplier;
}
- }
printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
return -EINVAL;
}
param_busfreq = busfreq * 10;
/* table init */
- for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
- f = clock_ratio[i].driver_data;
+ cpufreq_for_each_entry(pos, clock_ratio) {
+ f = pos->driver_data;
if (f > max_multiplier)
- clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
else
- clock_ratio[i].frequency = busfreq * f;
+ pos->frequency = busfreq * f;
}
/* cpuinfo and default policy values */
static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
+ struct cpufreq_frequency_table *pos;
const u32 *max_freqp;
u32 max_freq;
- int i, cur_pmode;
+ int cur_pmode;
struct device_node *cpu;
cpu = of_get_cpu_node(policy->cpu, NULL);
pr_debug("initializing frequency table\n");
/* initialize frequency table */
- for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
- cbe_freqs[i].frequency = max_freq / cbe_freqs[i].driver_data;
- pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
+ cpufreq_for_each_entry(pos, cbe_freqs) {
+ pos->frequency = max_freq / pos->driver_data;
+ pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency);
}
/* if DEBUG is enabled set_pmode() measures the latency
static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
{
int count, v, i, found;
- struct cpufreq_frequency_table *freq;
+ struct cpufreq_frequency_table *pos;
struct s3c2416_dvfs *dvfs;
count = regulator_count_voltages(s3c_freq->vddarm);
return;
}
- freq = s3c_freq->freq_table;
- while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) {
- if (freq->frequency == CPUFREQ_ENTRY_INVALID)
- continue;
+ if (!count)
+ goto out;
- dvfs = &s3c2416_dvfs_table[freq->driver_data];
+ cpufreq_for_each_valid_entry(pos, s3c_freq->freq_table) {
+ dvfs = &s3c2416_dvfs_table[pos->driver_data];
found = 0;
/* Check only the min-voltage, more is always ok on S3C2416 */
if (!found) {
pr_debug("cpufreq: %dkHz unsupported by regulator\n",
- freq->frequency);
- freq->frequency = CPUFREQ_ENTRY_INVALID;
+ pos->frequency);
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
}
-
- freq++;
}
+out:
/* Guessed */
s3c_freq->regulator_latency = 1 * 1000 * 1000;
}
static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- struct cpufreq_frequency_table *freq;
+ struct cpufreq_frequency_table *pos;
struct clk *msysclk;
unsigned long rate;
int ret;
s3c_freq->regulator_latency = 0;
#endif
- freq = s3c_freq->freq_table;
- while (freq->frequency != CPUFREQ_TABLE_END) {
+ cpufreq_for_each_entry(pos, s3c_freq->freq_table) {
/* special handling for dvs mode */
- if (freq->driver_data == 0) {
+ if (pos->driver_data == 0) {
if (!s3c_freq->hclk) {
pr_debug("cpufreq: %dkHz unsupported as it would need unavailable dvs mode\n",
- freq->frequency);
- freq->frequency = CPUFREQ_ENTRY_INVALID;
+ pos->frequency);
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
} else {
- freq++;
continue;
}
}
/* Check for frequencies we can generate */
rate = clk_round_rate(s3c_freq->armdiv,
- freq->frequency * 1000);
+ pos->frequency * 1000);
rate /= 1000;
- if (rate != freq->frequency) {
+ if (rate != pos->frequency) {
pr_debug("cpufreq: %dkHz unsupported by clock (clk_round_rate return %lu)\n",
- freq->frequency, rate);
- freq->frequency = CPUFREQ_ENTRY_INVALID;
+ pos->frequency, rate);
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
}
-
- freq++;
}
/* Datasheet says PLL stabalisation time must be at least 300us,
pr_err("Unable to check supported voltages\n");
}
- freq = s3c64xx_freq_table;
- while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) {
- if (freq->frequency == CPUFREQ_ENTRY_INVALID)
- continue;
+ if (!count)
+ goto out;
+ cpufreq_for_each_valid_entry(freq, s3c64xx_freq_table) {
dvfs = &s3c64xx_dvfs_table[freq->driver_data];
found = 0;
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
-
- freq++;
}
+out:
/* Guess based on having to do an I2C/SPI write; in future we
* will be able to query the regulator performance here. */
regulator_latency = 1 * 1000 * 1000;
}
#endif
- freq = s3c64xx_freq_table;
- while (freq->frequency != CPUFREQ_TABLE_END) {
+ cpufreq_for_each_entry(freq, s3c64xx_freq_table) {
unsigned long r;
/* Check for frequencies we can generate */
* frequency is the maximum we can support. */
if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000)
freq->frequency = CPUFREQ_ENTRY_INVALID;
-
- freq++;
}
/* Datasheet says PLL stabalisation time (if we were to use