f10316b4ecdc7558b05644d3758532a26fa6de58
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / topology.c
1 /*
2 * arch/arm/kernel/topology.c
3 *
4 * Copyright (C) 2011 Linaro Limited.
5 * Written by: Vincent Guittot
6 *
7 * based on arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/init.h>
17 #include <linux/percpu.h>
18 #include <linux/node.h>
19 #include <linux/nodemask.h>
20 #include <linux/of.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23
24 #include <asm/cputype.h>
25 #include <asm/topology.h>
26
27 /*
28 * cpu power scale management
29 */
30
31 /*
32 * cpu power table
33 * This per cpu data structure describes the relative capacity of each core.
34 * On a heteregenous system, cores don't have the same computation capacity
35 * and we reflect that difference in the cpu_power field so the scheduler can
36 * take this difference into account during load balance. A per cpu structure
37 * is preferred because each CPU updates its own cpu_power field during the
38 * load balance except for idle cores. One idle core is selected to run the
39 * rebalance_domains for all idle cores and the cpu_power can be updated
40 * during this sequence.
41 */
42 static DEFINE_PER_CPU(unsigned long, cpu_scale);
43
44 unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
45 {
46 return per_cpu(cpu_scale, cpu);
47 }
48
49 static void set_power_scale(unsigned int cpu, unsigned long power)
50 {
51 per_cpu(cpu_scale, cpu) = power;
52 }
53
54 #ifdef CONFIG_OF
55 struct cpu_efficiency {
56 const char *compatible;
57 unsigned long efficiency;
58 };
59
60 /*
61 * Table of relative efficiency of each processors
62 * The efficiency value must fit in 20bit and the final
63 * cpu_scale value must be in the range
64 * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
65 * in order to return at most 1 when DIV_ROUND_CLOSEST
66 * is used to compute the capacity of a CPU.
67 * Processors that are not defined in the table,
68 * use the default SCHED_POWER_SCALE value for cpu_scale.
69 */
70 struct cpu_efficiency table_efficiency[] = {
71 {"arm,cortex-a15", 3891},
72 {"arm,cortex-a7", 2048},
73 {NULL, },
74 };
75
76 struct cpu_capacity {
77 unsigned long hwid;
78 unsigned long capacity;
79 };
80
81 struct cpu_capacity *cpu_capacity;
82
83 unsigned long middle_capacity = 1;
84
85 /*
86 * Iterate all CPUs' descriptor in DT and compute the efficiency
87 * (as per table_efficiency). Also calculate a middle efficiency
88 * as close as possible to (max{eff_i} - min{eff_i}) / 2
89 * This is later used to scale the cpu_power field such that an
90 * 'average' CPU is of middle power. Also see the comments near
91 * table_efficiency[] and update_cpu_power().
92 */
93 static void __init parse_dt_topology(void)
94 {
95 struct cpu_efficiency *cpu_eff;
96 struct device_node *cn = NULL;
97 unsigned long min_capacity = (unsigned long)(-1);
98 unsigned long max_capacity = 0;
99 unsigned long capacity = 0;
100 int alloc_size, cpu = 0;
101
102 alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
103 cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
104
105 while ((cn = of_find_node_by_type(cn, "cpu"))) {
106 const u32 *rate, *reg;
107 int len;
108
109 if (cpu >= num_possible_cpus())
110 break;
111
112 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
113 if (of_device_is_compatible(cn, cpu_eff->compatible))
114 break;
115
116 if (cpu_eff->compatible == NULL)
117 continue;
118
119 rate = of_get_property(cn, "clock-frequency", &len);
120 if (!rate || len != 4) {
121 pr_err("%s missing clock-frequency property\n",
122 cn->full_name);
123 continue;
124 }
125
126 reg = of_get_property(cn, "reg", &len);
127 if (!reg || len != 4) {
128 pr_err("%s missing reg property\n", cn->full_name);
129 continue;
130 }
131
132 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
133
134 /* Save min capacity of the system */
135 if (capacity < min_capacity)
136 min_capacity = capacity;
137
138 /* Save max capacity of the system */
139 if (capacity > max_capacity)
140 max_capacity = capacity;
141
142 cpu_capacity[cpu].capacity = capacity;
143 cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
144 }
145
146 if (cpu < num_possible_cpus())
147 cpu_capacity[cpu].hwid = (unsigned long)(-1);
148
149 /* If min and max capacities are equals, we bypass the update of the
150 * cpu_scale because all CPUs have the same capacity. Otherwise, we
151 * compute a middle_capacity factor that will ensure that the capacity
152 * of an 'average' CPU of the system will be as close as possible to
153 * SCHED_POWER_SCALE, which is the default value, but with the
154 * constraint explained near table_efficiency[].
155 */
156 if (min_capacity == max_capacity)
157 cpu_capacity[0].hwid = (unsigned long)(-1);
158 else if (4*max_capacity < (3*(max_capacity + min_capacity)))
159 middle_capacity = (min_capacity + max_capacity)
160 >> (SCHED_POWER_SHIFT+1);
161 else
162 middle_capacity = ((max_capacity / 3)
163 >> (SCHED_POWER_SHIFT-1)) + 1;
164
165 }
166
167 /*
168 * Look for a customed capacity of a CPU in the cpu_capacity table during the
169 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
170 * function returns directly for SMP system.
171 */
172 void update_cpu_power(unsigned int cpu, unsigned long hwid)
173 {
174 unsigned int idx = 0;
175
176 /* look for the cpu's hwid in the cpu capacity table */
177 for (idx = 0; idx < num_possible_cpus(); idx++) {
178 if (cpu_capacity[idx].hwid == hwid)
179 break;
180
181 if (cpu_capacity[idx].hwid == -1)
182 return;
183 }
184
185 if (idx == num_possible_cpus())
186 return;
187
188 set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity);
189
190 printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
191 cpu, arch_scale_freq_power(NULL, cpu));
192 }
193
194 #else
195 static inline void parse_dt_topology(void) {}
196 static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
197 #endif
198
199 /*
200 * cpu topology table
201 */
202 struct cputopo_arm cpu_topology[NR_CPUS];
203
204 const struct cpumask *cpu_coregroup_mask(int cpu)
205 {
206 return &cpu_topology[cpu].core_sibling;
207 }
208
209 void update_siblings_masks(unsigned int cpuid)
210 {
211 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
212 int cpu;
213
214 /* update core and thread sibling masks */
215 for_each_possible_cpu(cpu) {
216 cpu_topo = &cpu_topology[cpu];
217
218 if (cpuid_topo->socket_id != cpu_topo->socket_id)
219 continue;
220
221 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
222 if (cpu != cpuid)
223 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
224
225 if (cpuid_topo->core_id != cpu_topo->core_id)
226 continue;
227
228 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
229 if (cpu != cpuid)
230 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
231 }
232 smp_wmb();
233 }
234
235 /*
236 * store_cpu_topology is called at boot when only one cpu is running
237 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
238 * which prevents simultaneous write access to cpu_topology array
239 */
240 void store_cpu_topology(unsigned int cpuid)
241 {
242 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
243 unsigned int mpidr;
244
245 /* If the cpu topology has been already set, just return */
246 if (cpuid_topo->core_id != -1)
247 return;
248
249 mpidr = read_cpuid_mpidr();
250
251 /* create cpu topology mapping */
252 if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
253 /*
254 * This is a multiprocessor system
255 * multiprocessor format & multiprocessor mode field are set
256 */
257
258 if (mpidr & MPIDR_MT_BITMASK) {
259 /* core performance interdependency */
260 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
261 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
262 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
263 } else {
264 /* largely independent cores */
265 cpuid_topo->thread_id = -1;
266 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
267 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
268 }
269 } else {
270 /*
271 * This is an uniprocessor system
272 * we are in multiprocessor format but uniprocessor system
273 * or in the old uniprocessor format
274 */
275 cpuid_topo->thread_id = -1;
276 cpuid_topo->core_id = 0;
277 cpuid_topo->socket_id = -1;
278 }
279
280 update_siblings_masks(cpuid);
281
282 update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK);
283
284 printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
285 cpuid, cpu_topology[cpuid].thread_id,
286 cpu_topology[cpuid].core_id,
287 cpu_topology[cpuid].socket_id, mpidr);
288 }
289
290 /*
291 * init_cpu_topology is called at boot when only one cpu is running
292 * which prevent simultaneous write access to cpu_topology array
293 */
294 void __init init_cpu_topology(void)
295 {
296 unsigned int cpu;
297
298 /* init core mask and power*/
299 for_each_possible_cpu(cpu) {
300 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
301
302 cpu_topo->thread_id = -1;
303 cpu_topo->core_id = -1;
304 cpu_topo->socket_id = -1;
305 cpumask_clear(&cpu_topo->core_sibling);
306 cpumask_clear(&cpu_topo->thread_sibling);
307
308 set_power_scale(cpu, SCHED_POWER_SCALE);
309 }
310 smp_wmb();
311
312 parse_dt_topology();
313 }