continue;
}
- if (parse_cpu_capacity(cn, cpu)) {
+ if (topology_parse_cpu_capacity(cn, cpu)) {
of_node_put(cn);
continue;
}
>> (SCHED_CAPACITY_SHIFT-1)) + 1;
if (cap_from_dt)
- normalize_cpu_capacity();
+ topology_normalize_cpu_scale();
}
/*
if (!cpu_capacity(cpu) || cap_from_dt)
return;
- set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+ topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
pr_info("CPU%u: update cpu_capacity %lu\n",
- cpu, arch_scale_cpu_capacity(NULL, cpu));
+ cpu, topology_get_cpu_scale(NULL, cpu));
}
#else
static DEFINE_MUTEX(cpu_scale_mutex);
static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
{
return per_cpu(cpu_scale, cpu);
}
-void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{
per_cpu(cpu_scale, cpu) = capacity;
}
struct cpu *cpu = container_of(dev, struct cpu, dev);
return sprintf(buf, "%lu\n",
- arch_scale_cpu_capacity(NULL, cpu->dev.id));
+ topology_get_cpu_scale(NULL, cpu->dev.id));
}
static ssize_t cpu_capacity_store(struct device *dev,
mutex_lock(&cpu_scale_mutex);
for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
- set_capacity_scale(i, new_capacity);
+ topology_set_cpu_scale(i, new_capacity);
mutex_unlock(&cpu_scale_mutex);
return count;
static u32 *raw_capacity;
static bool cap_parsing_failed;
-void normalize_cpu_capacity(void)
+void topology_normalize_cpu_scale(void)
{
u64 capacity;
int cpu;
cpu, raw_capacity[cpu]);
capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
/ capacity_scale;
- set_capacity_scale(cpu, capacity);
+ topology_set_cpu_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
- cpu, arch_scale_cpu_capacity(NULL, cpu));
+ cpu, topology_get_cpu_scale(NULL, cpu));
}
mutex_unlock(&cpu_scale_mutex);
}
-int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{
int ret = 1;
u32 cpu_capacity;
cpus_to_visit,
policy->related_cpus);
for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
+ raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
policy->cpuinfo.max_freq / 1000UL;
capacity_scale = max(raw_capacity[cpu], capacity_scale);
}
if (cpumask_empty(cpus_to_visit)) {
- normalize_cpu_capacity();
+ topology_normalize_cpu_scale();
kfree(raw_capacity);
pr_debug("cpu_capacity: parsing done\n");
cap_parsing_done = true;
#ifndef _LINUX_ARCH_TOPOLOGY_H_
#define _LINUX_ARCH_TOPOLOGY_H_
-void normalize_cpu_capacity(void);
+void topology_normalize_cpu_scale(void);
struct device_node;
-int parse_cpu_capacity(struct device_node *cpu_node, int cpu);
+int topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
struct sched_domain;
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu);
+unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
-void set_capacity_scale(unsigned int cpu, unsigned long capacity);
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */