import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / topology.c
CommitLineData
c9018aab
VG
1/*
2 * arch/arm/kernel/topology.c
3 *
4 * Copyright (C) 2011 Linaro Limited.
5 * Written by: Vincent Guittot
6 *
7 * based on arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <linux/cpu.h>
15#include <linux/cpumask.h>
92bdd3f5 16#include <linux/export.h>
c9018aab
VG
17#include <linux/init.h>
18#include <linux/percpu.h>
19#include <linux/node.h>
20#include <linux/nodemask.h>
339ca09d 21#include <linux/of.h>
c9018aab 22#include <linux/sched.h>
339ca09d 23#include <linux/slab.h>
c9018aab
VG
24
25#include <asm/cputype.h>
6fa3eb70 26#include <asm/smp_plat.h>
c9018aab
VG
27#include <asm/topology.h>
28
130d9aab
VG
29/*
30 * cpu power scale management
31 */
32
33/*
34 * cpu power table
35 * This per cpu data structure describes the relative capacity of each core.
36 * On a heteregenous system, cores don't have the same computation capacity
37 * and we reflect that difference in the cpu_power field so the scheduler can
38 * take this difference into account during load balance. A per cpu structure
39 * is preferred because each CPU updates its own cpu_power field during the
40 * load balance except for idle cores. One idle core is selected to run the
41 * rebalance_domains for all idle cores and the cpu_power can be updated
42 * during this sequence.
43 */
6fa3eb70
S
44
45/* when CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY is in use, a new measure of
46 * compute capacity is available. This is limited to a maximum of 1024 and
47 * scaled between 0 and 1023 according to frequency.
48 * Cores with different base CPU powers are scaled in line with this.
49 * CPU capacity for each core represents a comparable ratio to maximum
50 * achievable core compute capacity for a core in this system.
51 *
52 * e.g.1 If all cores in the system have a base CPU power of 1024 according to
53 * efficiency calculations and are DVFS scalable between 500MHz and 1GHz, the
54 * cores currently at 1GHz will have CPU power of 1024 whilst the cores
55 * currently at 500MHz will have CPU power of 512.
56 *
57 * e.g.2
58 * If core 0 has a base CPU power of 2048 and runs at 500MHz & 1GHz whilst
59 * core 1 has a base CPU power of 1024 and runs at 100MHz and 200MHz, then
60 * the following possibilities are available:
61 *
62 * cpu power\| 1GHz:100Mhz | 1GHz : 200MHz | 500MHz:100MHz | 500MHz:200MHz |
63 * ----------|-------------|---------------|---------------|---------------|
64 * core 0 | 1024 | 1024 | 512 | 512 |
65 * core 1 | 256 | 512 | 256 | 512 |
66 *
67 * This information may be useful to the scheduler when load balancing,
68 * so that the compute capacity of the core a task ran on can be baked into
69 * task load histories.
70 */
130d9aab 71static DEFINE_PER_CPU(unsigned long, cpu_scale);
6fa3eb70
S
72#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
73static DEFINE_PER_CPU(unsigned long, base_cpu_capacity);
74static DEFINE_PER_CPU(unsigned long, invariant_cpu_capacity);
75static DEFINE_PER_CPU(unsigned long, prescaled_cpu_capacity);
76#endif /* CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY */
77
78static int frequency_invariant_power_enabled = 1;
79
80/* >0=1, <=0=0 */
81void arch_set_invariant_power_enabled(int val)
82{
83 if(val>0)
84 frequency_invariant_power_enabled = 1;
85 else
86 frequency_invariant_power_enabled = 0;
87}
88
89int arch_get_invariant_power_enabled(void)
90{
91 return frequency_invariant_power_enabled;
92}
130d9aab
VG
93
94unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
95{
96 return per_cpu(cpu_scale, cpu);
97}
98
6fa3eb70
S
99#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
100unsigned long arch_get_cpu_capacity(int cpu)
101{
102 return per_cpu(invariant_cpu_capacity, cpu);
103}
104unsigned long arch_get_max_cpu_capacity(int cpu)
105{
106 return per_cpu(base_cpu_capacity, cpu);
107}
108#endif /* CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY */
109
130d9aab
VG
110static void set_power_scale(unsigned int cpu, unsigned long power)
111{
112 per_cpu(cpu_scale, cpu) = power;
113}
114
339ca09d
VG
115#ifdef CONFIG_OF
116struct cpu_efficiency {
117 const char *compatible;
118 unsigned long efficiency;
119};
120
121/*
122 * Table of relative efficiency of each processors
123 * The efficiency value must fit in 20bit and the final
124 * cpu_scale value must be in the range
125 * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
126 * in order to return at most 1 when DIV_ROUND_CLOSEST
127 * is used to compute the capacity of a CPU.
128 * Processors that are not defined in the table,
129 * use the default SCHED_POWER_SCALE value for cpu_scale.
130 */
131struct cpu_efficiency table_efficiency[] = {
132 {"arm,cortex-a15", 3891},
6fa3eb70
S
133 {"arm,cortex-a17", 3276},
134 {"arm,cortex-a12", 3276},
135 {"arm,cortex-a53", 2520},
339ca09d
VG
136 {"arm,cortex-a7", 2048},
137 {NULL, },
138};
139
140struct cpu_capacity {
141 unsigned long hwid;
142 unsigned long capacity;
143};
144
145struct cpu_capacity *cpu_capacity;
146
147unsigned long middle_capacity = 1;
339ca09d
VG
148/*
149 * Iterate all CPUs' descriptor in DT and compute the efficiency
150 * (as per table_efficiency). Also calculate a middle efficiency
151 * as close as possible to (max{eff_i} - min{eff_i}) / 2
152 * This is later used to scale the cpu_power field such that an
153 * 'average' CPU is of middle power. Also see the comments near
154 * table_efficiency[] and update_cpu_power().
155 */
156static void __init parse_dt_topology(void)
157{
158 struct cpu_efficiency *cpu_eff;
159 struct device_node *cn = NULL;
160 unsigned long min_capacity = (unsigned long)(-1);
161 unsigned long max_capacity = 0;
162 unsigned long capacity = 0;
163 int alloc_size, cpu = 0;
164
165 alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
8c655c9b 166 cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
339ca09d
VG
167
168 while ((cn = of_find_node_by_type(cn, "cpu"))) {
169 const u32 *rate, *reg;
170 int len;
171
172 if (cpu >= num_possible_cpus())
173 break;
174
175 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
176 if (of_device_is_compatible(cn, cpu_eff->compatible))
177 break;
178
179 if (cpu_eff->compatible == NULL)
180 continue;
181
182 rate = of_get_property(cn, "clock-frequency", &len);
183 if (!rate || len != 4) {
184 pr_err("%s missing clock-frequency property\n",
6fa3eb70 185 cn->full_name);
339ca09d
VG
186 continue;
187 }
188
189 reg = of_get_property(cn, "reg", &len);
190 if (!reg || len != 4) {
191 pr_err("%s missing reg property\n", cn->full_name);
192 continue;
193 }
194
195 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
196
197 /* Save min capacity of the system */
198 if (capacity < min_capacity)
199 min_capacity = capacity;
200
201 /* Save max capacity of the system */
202 if (capacity > max_capacity)
203 max_capacity = capacity;
204
205 cpu_capacity[cpu].capacity = capacity;
206 cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
207 }
208
209 if (cpu < num_possible_cpus())
210 cpu_capacity[cpu].hwid = (unsigned long)(-1);
211
212 /* If min and max capacities are equals, we bypass the update of the
213 * cpu_scale because all CPUs have the same capacity. Otherwise, we
214 * compute a middle_capacity factor that will ensure that the capacity
215 * of an 'average' CPU of the system will be as close as possible to
216 * SCHED_POWER_SCALE, which is the default value, but with the
217 * constraint explained near table_efficiency[].
218 */
219 if (min_capacity == max_capacity)
220 cpu_capacity[0].hwid = (unsigned long)(-1);
221 else if (4*max_capacity < (3*(max_capacity + min_capacity)))
222 middle_capacity = (min_capacity + max_capacity)
6fa3eb70 223 >> (SCHED_POWER_SHIFT+1);
339ca09d
VG
224 else
225 middle_capacity = ((max_capacity / 3)
6fa3eb70 226 >> (SCHED_POWER_SHIFT-1)) + 1;
339ca09d
VG
227
228}
229
230/*
231 * Look for a customed capacity of a CPU in the cpu_capacity table during the
232 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
233 * function returns directly for SMP system.
234 */
235void update_cpu_power(unsigned int cpu, unsigned long hwid)
236{
237 unsigned int idx = 0;
238
239 /* look for the cpu's hwid in the cpu capacity table */
240 for (idx = 0; idx < num_possible_cpus(); idx++) {
241 if (cpu_capacity[idx].hwid == hwid)
242 break;
243
244 if (cpu_capacity[idx].hwid == -1)
245 return;
246 }
247
248 if (idx == num_possible_cpus())
249 return;
250
251 set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity);
252
253 printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
6fa3eb70 254 cpu, arch_scale_freq_power(NULL, cpu));
339ca09d
VG
255}
256
257#else
258static inline void parse_dt_topology(void) {}
259static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
260#endif
261
6fa3eb70 262/*
130d9aab
VG
263 * cpu topology table
264 */
c9018aab 265struct cputopo_arm cpu_topology[NR_CPUS];
92bdd3f5 266EXPORT_SYMBOL_GPL(cpu_topology);
c9018aab 267
6fa3eb70
S
268#if defined (CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK) || defined (CONFIG_HMP_PACK_SMALL_TASK)
269int arch_sd_share_power_line(void)
270{
271 return 0*SD_SHARE_POWERLINE;
272}
273#endif /* CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK || CONFIG_HMP_PACK_SMALL_TASK */
274
4cbd6b16 275const struct cpumask *cpu_coregroup_mask(int cpu)
c9018aab
VG
276{
277 return &cpu_topology[cpu].core_sibling;
278}
279
cb75dacb
VG
280void update_siblings_masks(unsigned int cpuid)
281{
282 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
283 int cpu;
284
285 /* update core and thread sibling masks */
286 for_each_possible_cpu(cpu) {
287 cpu_topo = &cpu_topology[cpu];
288
289 if (cpuid_topo->socket_id != cpu_topo->socket_id)
290 continue;
291
292 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
293 if (cpu != cpuid)
294 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
295
296 if (cpuid_topo->core_id != cpu_topo->core_id)
297 continue;
298
299 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
300 if (cpu != cpuid)
301 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
302 }
303 smp_wmb();
304}
305
6fa3eb70
S
306#ifdef CONFIG_MTK_CPU_TOPOLOGY
307
308enum {
309 ARCH_UNKNOWN = 0,
310 ARCH_SINGLE_CLUSTER,
311 ARCH_MULTI_CLUSTER,
312 ARCH_BIG_LITTLE,
313};
314
315struct cpu_cluster {
316 int cluster_id;
317 cpumask_t siblings;
318 void *next;
319};
320
321struct cpu_compatible {
322 const char *name;
323 const unsigned int cpuidr;
324 struct cpu_cluster *cluster;
325 int clscnt;
326};
327
328struct cpu_arch_info {
329 struct cpu_compatible *compat_big;
330 struct cpu_compatible *compat_ltt;
331 bool arch_ready;
332 int arch_type;
333 int nr_clusters;
334};
335
336/* NOTE: absolute decending ordered by cpu capacity */
337struct cpu_compatible cpu_compat_table[] = {
338 { "arm,cortex-a15", ARM_CPU_PART_CORTEX_A15, NULL, 0 },
339 { "arm,cortex-a17", ARM_CPU_PART_CORTEX_A17, NULL, 0 },
340 { "arm,cortex-a12", ARM_CPU_PART_CORTEX_A12, NULL, 0 },
341 { "arm,cortex-a53", ARM_CPU_PART_CORTEX_A53, NULL, 0 },
342 { "arm,cortex-a9", ARM_CPU_PART_CORTEX_A9, NULL, 0 },
343 { "arm,cortex-a7", ARM_CPU_PART_CORTEX_A7, NULL, 0 },
344 { NULL, 0, NULL, 0 }
345};
346
347static struct cpu_compatible* compat_cputopo[NR_CPUS];
348
349static struct cpu_arch_info default_cpu_arch = {
350 NULL,
351 NULL,
352 0,
353 ARCH_UNKNOWN,
354 0,
355};
356static struct cpu_arch_info *glb_cpu_arch = &default_cpu_arch;
357
358static int __arch_type(void)
359{
360 int i, num_compat = 0;
361
362 if (!glb_cpu_arch->arch_ready)
363 return ARCH_UNKNOWN;
364
365 // return the cached setting if query more than once.
366 if (glb_cpu_arch->arch_type != ARCH_UNKNOWN)
367 return glb_cpu_arch->arch_type;
368
369 for (i = 0; i < ARRAY_SIZE(cpu_compat_table); i++) {
370 struct cpu_compatible *mc = &cpu_compat_table[i];
371 if (mc->clscnt != 0)
372 num_compat++;
373 }
374
375 if (num_compat > 1)
376 glb_cpu_arch->arch_type = ARCH_BIG_LITTLE;
377 else if (glb_cpu_arch->nr_clusters > 1)
378 glb_cpu_arch->arch_type = ARCH_MULTI_CLUSTER;
379 else if (num_compat == 1 && glb_cpu_arch->nr_clusters == 1)
380 glb_cpu_arch->arch_type = ARCH_SINGLE_CLUSTER;
381
382 return glb_cpu_arch->arch_type;
383}
384
385static DEFINE_SPINLOCK(__cpu_cluster_lock);
386static void __setup_cpu_cluster(const unsigned int cpu,
387 struct cpu_compatible * const cpt,
388 const u32 mpidr)
389{
390 struct cpu_cluster *prev_cls, *cls;
391 u32 cls_id = -1;
392
393 if (mpidr & MPIDR_MT_BITMASK)
394 cls_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
395 else
396 cls_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
397
398 spin_lock(&__cpu_cluster_lock);
399
400 cls = cpt->cluster;
401 prev_cls = cls;
402 while (cls) {
403 if (cls->cluster_id == cls_id)
404 break;
405 prev_cls = cls;
406 cls = (struct cpu_cluster *)cls->next;
407 }
408
409 if (!cls) {
410 cls = kzalloc(sizeof(struct cpu_cluster), GFP_ATOMIC);
411 BUG_ON(!cls);
412 cls->cluster_id = cls_id;
413 cpt->clscnt++;
414 glb_cpu_arch->nr_clusters++;
415 /* link it */
416 if (!cpt->cluster)
417 cpt->cluster = cls;
418 else
419 prev_cls->next = cls;
420 }
421 BUG_ON(cls->cluster_id != cls_id);
422
423 cpumask_set_cpu(cpu, &cls->siblings);
424 smp_wmb();
425
426 spin_unlock(&__cpu_cluster_lock);
427}
428
429static void setup_cputopo(const unsigned int cpu,
430 struct cpu_compatible * const cpt,
431 const u32 mpidr)
432
433{
434 if (compat_cputopo[cpu])
435 return;
436
437 compat_cputopo[cpu] = cpt;
438
439 if (!glb_cpu_arch->compat_big || glb_cpu_arch->compat_big > cpt)
440 glb_cpu_arch->compat_big = cpt;
441
442 if (!glb_cpu_arch->compat_ltt || glb_cpu_arch->compat_ltt < cpt)
443 glb_cpu_arch->compat_ltt = cpt;
444
445 __setup_cpu_cluster(cpu, cpt, mpidr);
446}
447
448static void setup_cputopo_def(const unsigned int cpu)
449{
450 struct cpu_compatible *idx = NULL;
451 unsigned int cpuidr = 0, mpidr;
452
453 BUG_ON(cpu != smp_processor_id());
454 cpuidr = read_cpuid_part_number();
455 mpidr = read_cpuid_mpidr();
456 for (idx = cpu_compat_table; idx->name; idx++) {
457 if (idx->cpuidr == cpuidr)
458 break;
459 }
460 BUG_ON(!idx || !idx->name);
461 setup_cputopo(cpu, idx, mpidr);
462}
463
464static void reset_cputopo(void)
465{
466 struct cpu_compatible *idx;
467
468 memset(glb_cpu_arch, 0, sizeof(struct cpu_arch_info));
469 glb_cpu_arch->arch_type = ARCH_UNKNOWN;
470
471 memset(&compat_cputopo, 0, sizeof(compat_cputopo));
472
473 spin_lock(&__cpu_cluster_lock);
474 for (idx = cpu_compat_table; idx->name; idx++) {
475 struct cpu_cluster *curr, *next;
476
477 if (idx->clscnt == 0)
478 continue;
479 BUG_ON(!idx->cluster);
480
481 curr = idx->cluster;
482 next = (struct cpu_cluster *)curr->next;
483 kfree(curr);
484
485 while (next) {
486 curr = next;
487 next = (struct cpu_cluster *)curr->next;
488 kfree(curr);
489 }
490 idx->cluster = NULL;
491 idx->clscnt = 0;
492 }
493 spin_unlock(&__cpu_cluster_lock);
494}
495
496/* verify cpu topology correctness by device tree.
497 * This function is called when current CPU is cpuid!
498 */
499static void verify_cputopo(const unsigned int cpuid, const u32 mpidr)
500{
501 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
502 struct cpu_compatible *cpt;
503 struct cpu_cluster *cls;
504
505 if (!glb_cpu_arch->arch_ready) {
506 int i;
507
508 setup_cputopo_def(cpuid);
509 for (i = 0; i < nr_cpu_ids; i++)
510 if (!compat_cputopo[i])
511 break;
512 if (i == nr_cpu_ids)
513 glb_cpu_arch->arch_ready = true;
514
515 return;
516 }
517
518 cpt = compat_cputopo[cpuid];
519 BUG_ON(!cpt);
520 cls = cpt->cluster;
521 while (cls) {
522 if (cpu_isset(cpuid, cls->siblings))
523 break;
524 cls = cls->next;
525 }
526 BUG_ON(!cls);
527 WARN(cls->cluster_id != cpuid_topo->socket_id,
528 "[%s] cpu id: %d, cluster id (%d) != socket id (%d)\n",
529 __func__, cpuid, cls->cluster_id, cpuid_topo->socket_id);
530}
531
532/*
533 * return 1 while every cpu is recognizible
534 */
535void arch_build_cpu_topology_domain(void)
536{
537 struct device_node *cn = NULL;
538 unsigned int cpu = 0;
539 u32 mpidr;
540
541 memset(&compat_cputopo, 0, sizeof(compat_cputopo));
542 // default by device tree parsing
543 while ((cn = of_find_node_by_type(cn, "cpu"))) {
544 struct cpu_compatible *idx;
545 const u32 *reg;
546 int len;
547
548 if (unlikely(cpu >= nr_cpu_ids)) {
549 pr_err("[CPUTOPO][%s] device tree cpu%d is over possible's\n",
550 __func__, cpu);
551 break;
552 }
553
554 for (idx = cpu_compat_table; idx->name; idx++)
555 if (of_device_is_compatible(cn, idx->name))
556 break;
557
558 if (!idx || !idx->name) {
559 int cplen;
560 const char *cp;
561 cp = (char *) of_get_property(cn, "compatible", &cplen);
562 pr_err("[CPUTOPO][%s] device tree cpu%d (%s) is not compatible!!\n",
563 __func__, cpu, cp);
564 break;
565 }
566
567 reg = of_get_property(cn, "reg", &len);
568 if (!reg || len != 4) {
569 pr_err("[CPUTOPO][%s] missing reg property\n", cn->full_name);
570 break;
571 }
572 mpidr = be32_to_cpup(reg);
573 setup_cputopo(cpu, idx, mpidr);
574 cpu++;
575 }
576 glb_cpu_arch->arch_ready = (cpu == nr_cpu_ids);
577
578 if (!glb_cpu_arch->arch_ready) {
579 pr_warn("[CPUTOPO][%s] build cpu topology failed, to be handled by mpidr/cpuidr regs!\n", __func__);
580 reset_cputopo();
581 setup_cputopo_def(smp_processor_id());
582 }
583}
584
585int arch_cpu_is_big(unsigned int cpu)
586{
587 int type;
588
589 if (unlikely(cpu >= nr_cpu_ids))
590 BUG();
591
592 type = __arch_type();
593 switch(type) {
594 case ARCH_BIG_LITTLE:
595 return (compat_cputopo[cpu] == glb_cpu_arch->compat_big);
596 default:
597 /* treat as little */
598 return 0;
599 }
600}
601
602int arch_cpu_is_little(unsigned int cpu)
603{
604 int type;
605
606 if (unlikely(cpu >= nr_cpu_ids))
607 BUG();
608
609 type = __arch_type();
610 switch(type) {
611 case ARCH_BIG_LITTLE:
612 return (compat_cputopo[cpu] == glb_cpu_arch->compat_ltt);
613 default:
614 /* treat as little */
615 return 1;
616 }
617}
618
619int arch_is_multi_cluster(void)
620{
621 return (__arch_type() == ARCH_MULTI_CLUSTER || __arch_type() == ARCH_BIG_LITTLE);
622}
623
624int arch_is_big_little(void)
625{
626 return (__arch_type() == ARCH_BIG_LITTLE);
627}
628
629int arch_get_nr_clusters(void)
630{
631 return glb_cpu_arch->nr_clusters;
632}
633
634int arch_get_cluster_id(unsigned int cpu)
635{
636 struct cputopo_arm *arm_cputopo = &cpu_topology[cpu];
637 struct cpu_compatible *cpt;
638 struct cpu_cluster *cls;
639
640 BUG_ON(cpu >= nr_cpu_ids);
641 if (!glb_cpu_arch->arch_ready) {
642 WARN_ONCE(!glb_cpu_arch->arch_ready, "[CPUTOPO][%s] cpu(%d), socket_id(%d) topology is not ready!\n",
643 __func__, cpu, arm_cputopo->socket_id);
644 if (unlikely(arm_cputopo->socket_id < 0))
645 return 0;
646 return arm_cputopo->socket_id;
647 }
648
649 cpt = compat_cputopo[cpu];
650 BUG_ON(!cpt);
651 cls = cpt->cluster;
652 while (cls) {
653 if (cpu_isset(cpu, cls->siblings))
654 break;
655 cls = cls->next;
656 }
657 BUG_ON(!cls);
658 WARN_ONCE(cls->cluster_id != arm_cputopo->socket_id, "[CPUTOPO][%s] cpu(%d): cluster_id(%d) != socket_id(%d) !\n",
659 __func__, cpu, cls->cluster_id, arm_cputopo->socket_id);
660
661 return cls->cluster_id;
662}
663
664static struct cpu_cluster *__get_cluster_slowpath(int cluster_id)
665{
666 int i = 0;
667 struct cpu_compatible *cpt;
668 struct cpu_cluster *cls;
669
670 for (i = 0; i < nr_cpu_ids; i++) {
671 cpt = compat_cputopo[i];
672 BUG_ON(!cpt);
673 cls = cpt->cluster;
674 while (cls) {
675 if (cls->cluster_id == cluster_id)
676 return cls;
677 cls = cls->next;
678 }
679 }
680 return NULL;
681}
682
683void arch_get_cluster_cpus(struct cpumask *cpus, int cluster_id)
684{
685 struct cpu_cluster *cls = NULL;
686
687 cpumask_clear(cpus);
688
689 if (likely(glb_cpu_arch->compat_ltt)) {
690 cls = glb_cpu_arch->compat_ltt->cluster;
691 while (cls) {
692 if (cls->cluster_id == cluster_id)
693 goto found;
694 cls = cls->next;
695 }
696 }
697 if (likely(glb_cpu_arch->compat_big)) {
698 cls = glb_cpu_arch->compat_big->cluster;
699 while (cls) {
700 if (cls->cluster_id == cluster_id)
701 goto found;
702 cls = cls->next;
703 }
704 }
705
706 cls = __get_cluster_slowpath(cluster_id);
707 BUG_ON(!cls); // debug only.. remove later...
708 if (!cls)
709 return;
710
711found:
712 cpumask_copy(cpus, &cls->siblings);
713}
714
715/*
716 * arch_get_big_little_cpus - get big/LITTLE cores in cpumask
717 * @big: the cpumask pointer of big cores
718 * @little: the cpumask pointer of little cores
719 *
720 * Treat it as little cores, if it's not big.LITTLE architecture
721 */
722void arch_get_big_little_cpus(struct cpumask *big, struct cpumask *little)
723{
724 int type;
725 struct cpu_cluster *cls = NULL;
726 struct cpumask tmpmask;
727 unsigned int cpu;
728
729 if (unlikely(!glb_cpu_arch->arch_ready))
730 BUG();
731
732 type = __arch_type();
733 spin_lock(&__cpu_cluster_lock);
734 switch(type) {
735 case ARCH_BIG_LITTLE:
736 if (likely(1 == glb_cpu_arch->compat_big->clscnt)) {
737 cls = glb_cpu_arch->compat_big->cluster;
738 cpumask_copy(big, &cls->siblings);
739 } else {
740 cls = glb_cpu_arch->compat_big->cluster;
741 while (cls) {
742 cpumask_or(&tmpmask, big, &cls->siblings);
743 cpumask_copy(big, &tmpmask);
744 cls = cls->next;
745 }
746 }
747 if (likely(1 == glb_cpu_arch->compat_ltt->clscnt)) {
748 cls = glb_cpu_arch->compat_ltt->cluster;
749 cpumask_copy(little, &cls->siblings);
750 } else {
751 cls = glb_cpu_arch->compat_ltt->cluster;
752 while (cls) {
753 cpumask_or(&tmpmask, little, &cls->siblings);
754 cpumask_copy(little, &tmpmask);
755 cls = cls->next;
756 }
757 }
758 break;
759 default:
760 /* treat as little */
761 cpumask_clear(big);
762 cpumask_clear(little);
763 for_each_possible_cpu(cpu)
764 cpumask_set_cpu(cpu, little);
765 }
766 spin_unlock(&__cpu_cluster_lock);
767}
768#else /* !CONFIG_MTK_CPU_TOPOLOGY */
769int arch_cpu_is_big(unsigned int cpu) { return 0; }
770int arch_cpu_is_little(unsigned int cpu) { return 1; }
771int arch_is_big_little(void) { return 0; }
772
773int arch_get_nr_clusters(void)
774{
775 int max_id = 0;
776 unsigned int cpu;
777
778 // assume socket id is monotonic increasing without gap.
779 for_each_possible_cpu(cpu) {
780 struct cputopo_arm *arm_cputopo = &cpu_topology[cpu];
781 if (arm_cputopo->socket_id > max_id)
782 max_id = arm_cputopo->socket_id;
783 }
784 return max_id+1;
785}
786
787int arch_is_multi_cluster(void)
788{
789 return (arch_get_nr_clusters() > 1 ? 1 : 0);
790}
791
792int arch_get_cluster_id(unsigned int cpu)
793{
794 struct cputopo_arm *arm_cputopo = &cpu_topology[cpu];
795 return arm_cputopo->socket_id < 0 ? 0 : arm_cputopo->socket_id;
796}
797
798void arch_get_cluster_cpus(struct cpumask *cpus, int cluster_id)
799{
800 unsigned int cpu, found_id = -1;
801
802 for_each_possible_cpu(cpu) {
803 struct cputopo_arm *arm_cputopo = &cpu_topology[cpu];
804 if (arm_cputopo->socket_id == cluster_id) {
805 found_id = cluster_id;
806 break;
807 }
808 }
809 if (-1 == found_id || cluster_to_logical_mask(found_id, cpus)) {
810 cpumask_clear(cpus);
811 for_each_possible_cpu(cpu)
812 cpumask_set_cpu(cpu, cpus);
813 }
814}
815void arch_get_big_little_cpus(struct cpumask *big, struct cpumask *little)
816{
817 unsigned int cpu;
818 cpumask_clear(big);
819 cpumask_clear(little);
820 for_each_possible_cpu(cpu)
821 cpumask_set_cpu(cpu, little);
822}
823#endif /* CONFIG_MTK_CPU_TOPOLOGY */
824
c9018aab
VG
825/*
826 * store_cpu_topology is called at boot when only one cpu is running
827 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
828 * which prevents simultaneous write access to cpu_topology array
829 */
830void store_cpu_topology(unsigned int cpuid)
831{
832 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
833 unsigned int mpidr;
c9018aab
VG
834
835 /* If the cpu topology has been already set, just return */
836 if (cpuid_topo->core_id != -1)
837 return;
838
839 mpidr = read_cpuid_mpidr();
840
841 /* create cpu topology mapping */
842 if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
843 /*
844 * This is a multiprocessor system
845 * multiprocessor format & multiprocessor mode field are set
846 */
847
848 if (mpidr & MPIDR_MT_BITMASK) {
849 /* core performance interdependency */
71db5bfe
LP
850 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
851 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
852 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
c9018aab
VG
853 } else {
854 /* largely independent cores */
855 cpuid_topo->thread_id = -1;
71db5bfe
LP
856 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
857 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
c9018aab
VG
858 }
859 } else {
860 /*
861 * This is an uniprocessor system
862 * we are in multiprocessor format but uniprocessor system
863 * or in the old uniprocessor format
864 */
865 cpuid_topo->thread_id = -1;
866 cpuid_topo->core_id = 0;
867 cpuid_topo->socket_id = -1;
868 }
869
6fa3eb70
S
870#ifdef CONFIG_MTK_CPU_TOPOLOGY
871 verify_cputopo(cpuid, (u32)mpidr);
872#endif
873
cb75dacb 874 update_siblings_masks(cpuid);
c9018aab 875
339ca09d
VG
876 update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK);
877
c9018aab 878 printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
6fa3eb70
S
879 cpuid, cpu_topology[cpuid].thread_id,
880 cpu_topology[cpuid].core_id,
881 cpu_topology[cpuid].socket_id, mpidr);
c9018aab
VG
882}
883
6fa3eb70
S
884/*
885 * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
886 * @socket_id: cluster HW identifier
887 * @cluster_mask: the cpumask location to be initialized, modified by the
888 * function only if return value == 0
889 *
890 * Return:
891 *
892 * 0 on success
893 * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
894 */
895int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
896{
897 int cpu;
898
899 if (!cluster_mask)
900 return -EINVAL;
901
902 for_each_online_cpu(cpu)
903 if (socket_id == topology_physical_package_id(cpu)) {
904 cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
905 return 0;
906 }
907
908 return -EINVAL;
909}
910
911#ifdef CONFIG_SCHED_HMP
912static const char * const little_cores[] = {
913 "arm,cortex-a53",
914 "arm,cortex-a7",
915 NULL,
916};
917
918static bool is_little_cpu(struct device_node *cn)
919{
920 const char * const *lc;
921 for (lc = little_cores; *lc; lc++)
922 if (of_device_is_compatible(cn, *lc)) {
923 return true;
924 }
925 return false;
926}
927
928void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
929 struct cpumask *slow)
930{
931 struct device_node *cn = NULL;
932 int cpu;
933
934 cpumask_clear(fast);
935 cpumask_clear(slow);
936
937 /*
938 * Use the config options if they are given. This helps testing
939 * HMP scheduling on systems without a big.LITTLE architecture.
940 */
941 if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
942 if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
943 WARN(1, "Failed to parse HMP fast cpu mask!\n");
944 if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
945 WARN(1, "Failed to parse HMP slow cpu mask!\n");
946 return;
947 }
948
949 /*
950 * Else, parse device tree for little cores.
951 */
952 while ((cn = of_find_node_by_type(cn, "cpu"))) {
953
954 const u32 *mpidr;
955 int len;
956
957 mpidr = of_get_property(cn, "reg", &len);
958 if (!mpidr || len != 4) {
959 pr_err("* %s missing reg property\n", cn->full_name);
960 continue;
961 }
962
963 cpu = get_logical_index(be32_to_cpup(mpidr));
964 if (cpu == -EINVAL) {
965 pr_err("couldn't get logical index for mpidr %x\n",
966 be32_to_cpup(mpidr));
967 break;
968 }
969
970 if (is_little_cpu(cn))
971 cpumask_set_cpu(cpu, slow);
972 else
973 cpumask_set_cpu(cpu, fast);
974 }
975
976 if (!cpumask_empty(fast) && !cpumask_empty(slow))
977 return;
978
979 /*
980 * We didn't find both big and little cores so let's call all cores
981 * fast as this will keep the system running, with all cores being
982 * treated equal.
983 */
984 cpumask_setall(fast);
985 cpumask_clear(slow);
986}
987
988struct cpumask hmp_fast_cpu_mask;
989struct cpumask hmp_slow_cpu_mask;
990
991void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
992{
993 struct hmp_domain *domain;
994
995 arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
996
997 /*
998 * Initialize hmp_domains
999 * Must be ordered with respect to compute capacity.
1000 * Fastest domain at head of list.
1001 */
1002 if(!cpumask_empty(&hmp_slow_cpu_mask)) {
1003 domain = (struct hmp_domain *)
1004 kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
1005 cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
1006 cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
1007 list_add(&domain->hmp_domains, hmp_domains_list);
1008 }
1009 domain = (struct hmp_domain *)
1010 kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
1011 cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
1012 cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
1013 list_add(&domain->hmp_domains, hmp_domains_list);
1014}
1015#endif /* CONFIG_SCHED_HMP */
1016
c9018aab
VG
1017/*
1018 * init_cpu_topology is called at boot when only one cpu is running
1019 * which prevent simultaneous write access to cpu_topology array
1020 */
f7e416eb 1021void __init init_cpu_topology(void)
c9018aab
VG
1022{
1023 unsigned int cpu;
1024
130d9aab 1025 /* init core mask and power*/
c9018aab
VG
1026 for_each_possible_cpu(cpu) {
1027 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
1028
1029 cpu_topo->thread_id = -1;
1030 cpu_topo->core_id = -1;
1031 cpu_topo->socket_id = -1;
1032 cpumask_clear(&cpu_topo->core_sibling);
1033 cpumask_clear(&cpu_topo->thread_sibling);
130d9aab
VG
1034
1035 set_power_scale(cpu, SCHED_POWER_SCALE);
c9018aab
VG
1036 }
1037 smp_wmb();
339ca09d
VG
1038
1039 parse_dt_topology();
c9018aab 1040}
6fa3eb70
S
1041
1042
1043#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
1044#include <linux/cpufreq.h>
1045#define ARCH_SCALE_INVA_CPU_CAP_PERCLS 1
1046
1047struct cpufreq_extents {
1048 u32 max;
1049 u32 flags;
1050 u32 const_max;
1051 u32 throttling;
1052};
1053/* Flag set when the governor in use only allows one frequency.
1054 * Disables scaling.
1055 */
1056#define CPUPOWER_FREQINVAR_SINGLEFREQ 0x01
1057static struct cpufreq_extents freq_scale[CONFIG_NR_CPUS];
1058
1059static unsigned long get_max_cpu_power(void)
1060{
1061 unsigned long max_cpu_power = 0;
1062 int cpu;
1063 for_each_online_cpu(cpu){
1064 if( per_cpu(cpu_scale, cpu) > max_cpu_power)
1065 max_cpu_power = per_cpu(cpu_scale, cpu);
1066 }
1067 return max_cpu_power;
1068}
1069
1070int arch_get_cpu_throttling(int cpu)
1071{
1072 return freq_scale[cpu].throttling;
1073}
1074
1075/* Called when the CPU Frequency is changed.
1076 * Once for each CPU.
1077 */
1078static int cpufreq_callback(struct notifier_block *nb,
1079 unsigned long val, void *data)
1080{
1081 struct cpufreq_freqs *freq = data;
1082 int cpu = freq->cpu;
1083 struct cpufreq_extents *extents;
1084 unsigned int curr_freq;
1085#ifdef ARCH_SCALE_INVA_CPU_CAP_PERCLS
1086 int i = 0;
1087#endif
1088
1089 if (freq->flags & CPUFREQ_CONST_LOOPS)
1090 return NOTIFY_OK;
1091
1092 if (val != CPUFREQ_POSTCHANGE)
1093 return NOTIFY_OK;
1094
1095 /* if dynamic load scale is disabled, set the load scale to 1.0 */
1096 if (!frequency_invariant_power_enabled) {
1097 per_cpu(invariant_cpu_capacity, cpu) = per_cpu(base_cpu_capacity, cpu);
1098 return NOTIFY_OK;
1099 }
1100
1101 extents = &freq_scale[cpu];
1102 if (extents->max < extents->const_max) {
1103 extents->throttling = 1;
1104 } else {
1105 extents->throttling = 0;
1106 }
1107 /* If our governor was recognised as a single-freq governor,
1108 * use curr = max to be sure multiplier is 1.0
1109 */
1110 if (extents->flags & CPUPOWER_FREQINVAR_SINGLEFREQ)
1111 curr_freq = extents->max >> CPUPOWER_FREQSCALE_SHIFT;
1112 else
1113 curr_freq = freq->new >> CPUPOWER_FREQSCALE_SHIFT;
1114
1115#ifdef ARCH_SCALE_INVA_CPU_CAP_PERCLS
1116 for_each_cpu(i, topology_core_cpumask(cpu)) {
1117 per_cpu(invariant_cpu_capacity, i) = DIV_ROUND_UP(
1118 (curr_freq * per_cpu(prescaled_cpu_capacity, i)), CPUPOWER_FREQSCALE_DEFAULT);
1119 }
1120#else
1121 per_cpu(invariant_cpu_capacity, cpu) = DIV_ROUND_UP(
1122 (curr_freq * per_cpu(prescaled_cpu_capacity, cpu)), CPUPOWER_FREQSCALE_DEFAULT);
1123#endif
1124 return NOTIFY_OK;
1125}
1126
1127/* Called when the CPUFreq governor is changed.
1128 * Only called for the CPUs which are actually changed by the
1129 * userspace.
1130 */
1131static int cpufreq_policy_callback(struct notifier_block *nb,
1132 unsigned long event, void *data)
1133{
1134 struct cpufreq_policy *policy = data;
1135 struct cpufreq_extents *extents;
1136 int cpu, singleFreq = 0, cpu_capacity;
1137 static const char performance_governor[] = "performance";
1138 static const char powersave_governor[] = "powersave";
1139 unsigned long max_cpu_power;
1140#ifdef ARCH_SCALE_INVA_CPU_CAP_PERCLS
1141 int i = 0;
1142#endif
1143
1144 if (event == CPUFREQ_START)
1145 return 0;
1146
1147 if (event != CPUFREQ_INCOMPATIBLE)
1148 return 0;
1149
1150 /* CPUFreq governors do not accurately report the range of
1151 * CPU Frequencies they will choose from.
1152 * We recognise performance and powersave governors as
1153 * single-frequency only.
1154 */
1155 if (!strncmp(policy->governor->name, performance_governor,
1156 strlen(performance_governor)) ||
1157 !strncmp(policy->governor->name, powersave_governor,
1158 strlen(powersave_governor)))
1159 singleFreq = 1;
1160
1161 max_cpu_power = get_max_cpu_power();
1162 /* Make sure that all CPUs impacted by this policy are
1163 * updated since we will only get a notification when the
1164 * user explicitly changes the policy on a CPU.
1165 */
1166 for_each_cpu(cpu, policy->cpus) {
1167 /* scale cpu_power to max(1024) */
1168 cpu_capacity = (per_cpu(cpu_scale, cpu) << CPUPOWER_FREQSCALE_SHIFT)
1169 / max_cpu_power;
1170 extents = &freq_scale[cpu];
1171 extents->max = policy->max >> CPUPOWER_FREQSCALE_SHIFT;
1172 extents->const_max = policy->cpuinfo.max_freq >> CPUPOWER_FREQSCALE_SHIFT;
1173 if (!frequency_invariant_power_enabled) {
1174 /* when disabled, invariant_cpu_scale = cpu_scale */
1175 per_cpu(base_cpu_capacity, cpu) = CPUPOWER_FREQSCALE_DEFAULT;
1176 per_cpu(invariant_cpu_capacity, cpu) = CPUPOWER_FREQSCALE_DEFAULT;
1177 /* unused when disabled */
1178 per_cpu(prescaled_cpu_capacity, cpu) = CPUPOWER_FREQSCALE_DEFAULT;
1179 } else {
1180 if (singleFreq)
1181 extents->flags |= CPUPOWER_FREQINVAR_SINGLEFREQ;
1182 else
1183 extents->flags &= ~CPUPOWER_FREQINVAR_SINGLEFREQ;
1184 per_cpu(base_cpu_capacity, cpu) = cpu_capacity;
1185#ifdef CONFIG_SCHED_HMP_ENHANCEMENT
1186 per_cpu(prescaled_cpu_capacity, cpu) =
1187 ((cpu_capacity << CPUPOWER_FREQSCALE_SHIFT) / extents->const_max);
1188#else
1189 per_cpu(prescaled_cpu_capacity, cpu) =
1190 ((cpu_capacity << CPUPOWER_FREQSCALE_SHIFT) / extents->max);
1191#endif
1192
1193#ifdef ARCH_SCALE_INVA_CPU_CAP_PERCLS
1194 for_each_cpu(i, topology_core_cpumask(cpu)) {
1195 per_cpu(invariant_cpu_capacity, i) = DIV_ROUND_UP(
1196 ((policy->cur>>CPUPOWER_FREQSCALE_SHIFT) *
1197 per_cpu(prescaled_cpu_capacity, i)), CPUPOWER_FREQSCALE_DEFAULT);
1198 }
1199#else
1200 per_cpu(invariant_cpu_capacity, cpu) = DIV_ROUND_UP(
1201 ((policy->cur>>CPUPOWER_FREQSCALE_SHIFT) *
1202 per_cpu(prescaled_cpu_capacity, cpu)), CPUPOWER_FREQSCALE_DEFAULT);
1203#endif
1204 }
1205 }
1206 return 0;
1207}
1208
1209static struct notifier_block cpufreq_notifier = {
1210 .notifier_call = cpufreq_callback,
1211};
1212static struct notifier_block cpufreq_policy_notifier = {
1213 .notifier_call = cpufreq_policy_callback,
1214};
1215
1216static int __init register_topology_cpufreq_notifier(void)
1217{
1218 int ret;
1219
1220 /* init safe defaults since there are no policies at registration */
1221 for (ret = 0; ret < CONFIG_NR_CPUS; ret++) {
1222 /* safe defaults */
1223 freq_scale[ret].max = CPUPOWER_FREQSCALE_DEFAULT;
1224 per_cpu(base_cpu_capacity, ret) = CPUPOWER_FREQSCALE_DEFAULT;
1225 per_cpu(invariant_cpu_capacity, ret) = CPUPOWER_FREQSCALE_DEFAULT;
1226 per_cpu(prescaled_cpu_capacity, ret) = CPUPOWER_FREQSCALE_DEFAULT;
1227 }
1228
1229 pr_info("topology: registering cpufreq notifiers for scale-invariant CPU Power\n");
1230 ret = cpufreq_register_notifier(&cpufreq_policy_notifier,
1231 CPUFREQ_POLICY_NOTIFIER);
1232
1233 if (ret != -EINVAL)
1234 ret = cpufreq_register_notifier(&cpufreq_notifier,
1235 CPUFREQ_TRANSITION_NOTIFIER);
1236
1237 return ret;
1238}
1239
1240core_initcall(register_topology_cpufreq_notifier);
1241#endif /* CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY */