Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / topology.c
1 /*
2 * arch/arm/kernel/topology.c
3 *
4 * Copyright (C) 2011 Linaro Limited.
5 * Written by: Vincent Guittot
6 *
7 * based on arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/export.h>
17 #include <linux/init.h>
18 #include <linux/percpu.h>
19 #include <linux/node.h>
20 #include <linux/nodemask.h>
21 #include <linux/of.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24
25 #include <asm/cputype.h>
26 #include <asm/topology.h>
27
28 /*
29 * cpu power scale management
30 */
31
32 /*
33 * cpu power table
34 * This per cpu data structure describes the relative capacity of each core.
35 * On a heteregenous system, cores don't have the same computation capacity
36 * and we reflect that difference in the cpu_power field so the scheduler can
37 * take this difference into account during load balance. A per cpu structure
38 * is preferred because each CPU updates its own cpu_power field during the
39 * load balance except for idle cores. One idle core is selected to run the
40 * rebalance_domains for all idle cores and the cpu_power can be updated
41 * during this sequence.
42 */
43 static DEFINE_PER_CPU(unsigned long, cpu_scale);
44
45 unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
46 {
47 return per_cpu(cpu_scale, cpu);
48 }
49
50 static void set_power_scale(unsigned int cpu, unsigned long power)
51 {
52 per_cpu(cpu_scale, cpu) = power;
53 }
54
55 #ifdef CONFIG_OF
56 struct cpu_efficiency {
57 const char *compatible;
58 unsigned long efficiency;
59 };
60
61 /*
62 * Table of relative efficiency of each processors
63 * The efficiency value must fit in 20bit and the final
64 * cpu_scale value must be in the range
65 * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
66 * in order to return at most 1 when DIV_ROUND_CLOSEST
67 * is used to compute the capacity of a CPU.
68 * Processors that are not defined in the table,
69 * use the default SCHED_POWER_SCALE value for cpu_scale.
70 */
71 struct cpu_efficiency table_efficiency[] = {
72 {"arm,cortex-a15", 3891},
73 {"arm,cortex-a7", 2048},
74 {NULL, },
75 };
76
77 struct cpu_capacity {
78 unsigned long hwid;
79 unsigned long capacity;
80 };
81
82 struct cpu_capacity *cpu_capacity;
83
84 unsigned long middle_capacity = 1;
85
86 /*
87 * Iterate all CPUs' descriptor in DT and compute the efficiency
88 * (as per table_efficiency). Also calculate a middle efficiency
89 * as close as possible to (max{eff_i} - min{eff_i}) / 2
90 * This is later used to scale the cpu_power field such that an
91 * 'average' CPU is of middle power. Also see the comments near
92 * table_efficiency[] and update_cpu_power().
93 */
94 static void __init parse_dt_topology(void)
95 {
96 struct cpu_efficiency *cpu_eff;
97 struct device_node *cn = NULL;
98 unsigned long min_capacity = (unsigned long)(-1);
99 unsigned long max_capacity = 0;
100 unsigned long capacity = 0;
101 int alloc_size, cpu = 0;
102
103 alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
104 cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
105
106 while ((cn = of_find_node_by_type(cn, "cpu"))) {
107 const u32 *rate, *reg;
108 int len;
109
110 if (cpu >= num_possible_cpus())
111 break;
112
113 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
114 if (of_device_is_compatible(cn, cpu_eff->compatible))
115 break;
116
117 if (cpu_eff->compatible == NULL)
118 continue;
119
120 rate = of_get_property(cn, "clock-frequency", &len);
121 if (!rate || len != 4) {
122 pr_err("%s missing clock-frequency property\n",
123 cn->full_name);
124 continue;
125 }
126
127 reg = of_get_property(cn, "reg", &len);
128 if (!reg || len != 4) {
129 pr_err("%s missing reg property\n", cn->full_name);
130 continue;
131 }
132
133 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
134
135 /* Save min capacity of the system */
136 if (capacity < min_capacity)
137 min_capacity = capacity;
138
139 /* Save max capacity of the system */
140 if (capacity > max_capacity)
141 max_capacity = capacity;
142
143 cpu_capacity[cpu].capacity = capacity;
144 cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
145 }
146
147 if (cpu < num_possible_cpus())
148 cpu_capacity[cpu].hwid = (unsigned long)(-1);
149
150 /* If min and max capacities are equals, we bypass the update of the
151 * cpu_scale because all CPUs have the same capacity. Otherwise, we
152 * compute a middle_capacity factor that will ensure that the capacity
153 * of an 'average' CPU of the system will be as close as possible to
154 * SCHED_POWER_SCALE, which is the default value, but with the
155 * constraint explained near table_efficiency[].
156 */
157 if (min_capacity == max_capacity)
158 cpu_capacity[0].hwid = (unsigned long)(-1);
159 else if (4*max_capacity < (3*(max_capacity + min_capacity)))
160 middle_capacity = (min_capacity + max_capacity)
161 >> (SCHED_POWER_SHIFT+1);
162 else
163 middle_capacity = ((max_capacity / 3)
164 >> (SCHED_POWER_SHIFT-1)) + 1;
165
166 }
167
168 /*
169 * Look for a customed capacity of a CPU in the cpu_capacity table during the
170 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
171 * function returns directly for SMP system.
172 */
173 void update_cpu_power(unsigned int cpu, unsigned long hwid)
174 {
175 unsigned int idx = 0;
176
177 /* look for the cpu's hwid in the cpu capacity table */
178 for (idx = 0; idx < num_possible_cpus(); idx++) {
179 if (cpu_capacity[idx].hwid == hwid)
180 break;
181
182 if (cpu_capacity[idx].hwid == -1)
183 return;
184 }
185
186 if (idx == num_possible_cpus())
187 return;
188
189 set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity);
190
191 printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
192 cpu, arch_scale_freq_power(NULL, cpu));
193 }
194
195 #else
196 static inline void parse_dt_topology(void) {}
197 static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
198 #endif
199
200 /*
201 * cpu topology table
202 */
203 struct cputopo_arm cpu_topology[NR_CPUS];
204 EXPORT_SYMBOL_GPL(cpu_topology);
205
206 const struct cpumask *cpu_coregroup_mask(int cpu)
207 {
208 return &cpu_topology[cpu].core_sibling;
209 }
210
211 void update_siblings_masks(unsigned int cpuid)
212 {
213 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
214 int cpu;
215
216 /* update core and thread sibling masks */
217 for_each_possible_cpu(cpu) {
218 cpu_topo = &cpu_topology[cpu];
219
220 if (cpuid_topo->socket_id != cpu_topo->socket_id)
221 continue;
222
223 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
224 if (cpu != cpuid)
225 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
226
227 if (cpuid_topo->core_id != cpu_topo->core_id)
228 continue;
229
230 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
231 if (cpu != cpuid)
232 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
233 }
234 smp_wmb();
235 }
236
237 /*
238 * store_cpu_topology is called at boot when only one cpu is running
239 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
240 * which prevents simultaneous write access to cpu_topology array
241 */
242 void store_cpu_topology(unsigned int cpuid)
243 {
244 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
245 unsigned int mpidr;
246
247 /* If the cpu topology has been already set, just return */
248 if (cpuid_topo->core_id != -1)
249 return;
250
251 mpidr = read_cpuid_mpidr();
252
253 /* create cpu topology mapping */
254 if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
255 /*
256 * This is a multiprocessor system
257 * multiprocessor format & multiprocessor mode field are set
258 */
259
260 if (mpidr & MPIDR_MT_BITMASK) {
261 /* core performance interdependency */
262 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
263 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
264 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
265 } else {
266 /* largely independent cores */
267 cpuid_topo->thread_id = -1;
268 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
269 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
270 }
271 } else {
272 /*
273 * This is an uniprocessor system
274 * we are in multiprocessor format but uniprocessor system
275 * or in the old uniprocessor format
276 */
277 cpuid_topo->thread_id = -1;
278 cpuid_topo->core_id = 0;
279 cpuid_topo->socket_id = -1;
280 }
281
282 update_siblings_masks(cpuid);
283
284 update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK);
285
286 printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
287 cpuid, cpu_topology[cpuid].thread_id,
288 cpu_topology[cpuid].core_id,
289 cpu_topology[cpuid].socket_id, mpidr);
290 }
291
292 /*
293 * init_cpu_topology is called at boot when only one cpu is running
294 * which prevent simultaneous write access to cpu_topology array
295 */
296 void __init init_cpu_topology(void)
297 {
298 unsigned int cpu;
299
300 /* init core mask and power*/
301 for_each_possible_cpu(cpu) {
302 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
303
304 cpu_topo->thread_id = -1;
305 cpu_topo->core_id = -1;
306 cpu_topo->socket_id = -1;
307 cpumask_clear(&cpu_topo->core_sibling);
308 cpumask_clear(&cpu_topo->thread_sibling);
309
310 set_power_scale(cpu, SCHED_POWER_SCALE);
311 }
312 smp_wmb();
313
314 parse_dt_topology();
315 }