2 * arch/arm/kernel/topology.c
4 * Copyright (C) 2011 Linaro Limited.
5 * Written by: Vincent Guittot
7 * based on arch/sh/kernel/topology.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/export.h>
17 #include <linux/init.h>
18 #include <linux/percpu.h>
19 #include <linux/node.h>
20 #include <linux/nodemask.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
25 #include <asm/cputype.h>
26 #include <asm/smp_plat.h>
27 #include <asm/topology.h>
30 * cpu power scale management
35 * This per cpu data structure describes the relative capacity of each core.
36 * On a heteregenous system, cores don't have the same computation capacity
37 * and we reflect that difference in the cpu_power field so the scheduler can
38 * take this difference into account during load balance. A per cpu structure
39 * is preferred because each CPU updates its own cpu_power field during the
40 * load balance except for idle cores. One idle core is selected to run the
41 * rebalance_domains for all idle cores and the cpu_power can be updated
42 * during this sequence.
45 /* when CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY is in use, a new measure of
46 * compute capacity is available. This is limited to a maximum of 1024 and
47 * scaled between 0 and 1023 according to frequency.
48 * Cores with different base CPU powers are scaled in line with this.
49 * CPU capacity for each core represents a comparable ratio to maximum
50 * achievable core compute capacity for a core in this system.
52 * e.g.1 If all cores in the system have a base CPU power of 1024 according to
53 * efficiency calculations and are DVFS scalable between 500MHz and 1GHz, the
54 * cores currently at 1GHz will have CPU power of 1024 whilst the cores
55 * currently at 500MHz will have CPU power of 512.
58 * If core 0 has a base CPU power of 2048 and runs at 500MHz & 1GHz whilst
59 * core 1 has a base CPU power of 1024 and runs at 100MHz and 200MHz, then
60 * the following possibilities are available:
62 * cpu power\| 1GHz:100Mhz | 1GHz : 200MHz | 500MHz:100MHz | 500MHz:200MHz |
63 * ----------|-------------|---------------|---------------|---------------|
64 * core 0 | 1024 | 1024 | 512 | 512 |
65 * core 1 | 256 | 512 | 256 | 512 |
67 * This information may be useful to the scheduler when load balancing,
68 * so that the compute capacity of the core a task ran on can be baked into
69 * task load histories.
71 static DEFINE_PER_CPU(unsigned long, cpu_scale
);
72 #ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
73 static DEFINE_PER_CPU(unsigned long, base_cpu_capacity
);
74 static DEFINE_PER_CPU(unsigned long, invariant_cpu_capacity
);
75 static DEFINE_PER_CPU(unsigned long, prescaled_cpu_capacity
);
76 #endif /* CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY */
78 static int frequency_invariant_power_enabled
= 1;
81 void arch_set_invariant_power_enabled(int val
)
84 frequency_invariant_power_enabled
= 1;
86 frequency_invariant_power_enabled
= 0;
89 int arch_get_invariant_power_enabled(void)
91 return frequency_invariant_power_enabled
;
94 unsigned long arch_scale_freq_power(struct sched_domain
*sd
, int cpu
)
96 return per_cpu(cpu_scale
, cpu
);
99 #ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
100 unsigned long arch_get_cpu_capacity(int cpu
)
102 return per_cpu(invariant_cpu_capacity
, cpu
);
104 unsigned long arch_get_max_cpu_capacity(int cpu
)
106 return per_cpu(base_cpu_capacity
, cpu
);
108 #endif /* CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY */
110 static void set_power_scale(unsigned int cpu
, unsigned long power
)
112 per_cpu(cpu_scale
, cpu
) = power
;
116 struct cpu_efficiency
{
117 const char *compatible
;
118 unsigned long efficiency
;
122 * Table of relative efficiency of each processors
123 * The efficiency value must fit in 20bit and the final
124 * cpu_scale value must be in the range
125 * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
126 * in order to return at most 1 when DIV_ROUND_CLOSEST
127 * is used to compute the capacity of a CPU.
128 * Processors that are not defined in the table,
129 * use the default SCHED_POWER_SCALE value for cpu_scale.
131 struct cpu_efficiency table_efficiency
[] = {
132 {"arm,cortex-a15", 3891},
133 {"arm,cortex-a17", 3276},
134 {"arm,cortex-a12", 3276},
135 {"arm,cortex-a53", 2520},
136 {"arm,cortex-a7", 2048},
140 struct cpu_capacity
{
142 unsigned long capacity
;
145 struct cpu_capacity
*cpu_capacity
;
147 unsigned long middle_capacity
= 1;
149 * Iterate all CPUs' descriptor in DT and compute the efficiency
150 * (as per table_efficiency). Also calculate a middle efficiency
151 * as close as possible to (max{eff_i} - min{eff_i}) / 2
152 * This is later used to scale the cpu_power field such that an
153 * 'average' CPU is of middle power. Also see the comments near
154 * table_efficiency[] and update_cpu_power().
156 static void __init
parse_dt_topology(void)
158 struct cpu_efficiency
*cpu_eff
;
159 struct device_node
*cn
= NULL
;
160 unsigned long min_capacity
= (unsigned long)(-1);
161 unsigned long max_capacity
= 0;
162 unsigned long capacity
= 0;
163 int alloc_size
, cpu
= 0;
165 alloc_size
= nr_cpu_ids
* sizeof(struct cpu_capacity
);
166 cpu_capacity
= kzalloc(alloc_size
, GFP_NOWAIT
);
168 while ((cn
= of_find_node_by_type(cn
, "cpu"))) {
169 const u32
*rate
, *reg
;
172 if (cpu
>= num_possible_cpus())
175 for (cpu_eff
= table_efficiency
; cpu_eff
->compatible
; cpu_eff
++)
176 if (of_device_is_compatible(cn
, cpu_eff
->compatible
))
179 if (cpu_eff
->compatible
== NULL
)
182 rate
= of_get_property(cn
, "clock-frequency", &len
);
183 if (!rate
|| len
!= 4) {
184 pr_err("%s missing clock-frequency property\n",
189 reg
= of_get_property(cn
, "reg", &len
);
190 if (!reg
|| len
!= 4) {
191 pr_err("%s missing reg property\n", cn
->full_name
);
195 capacity
= ((be32_to_cpup(rate
)) >> 20) * cpu_eff
->efficiency
;
197 /* Save min capacity of the system */
198 if (capacity
< min_capacity
)
199 min_capacity
= capacity
;
201 /* Save max capacity of the system */
202 if (capacity
> max_capacity
)
203 max_capacity
= capacity
;
205 cpu_capacity
[cpu
].capacity
= capacity
;
206 cpu_capacity
[cpu
++].hwid
= be32_to_cpup(reg
);
209 if (cpu
< num_possible_cpus())
210 cpu_capacity
[cpu
].hwid
= (unsigned long)(-1);
212 /* If min and max capacities are equals, we bypass the update of the
213 * cpu_scale because all CPUs have the same capacity. Otherwise, we
214 * compute a middle_capacity factor that will ensure that the capacity
215 * of an 'average' CPU of the system will be as close as possible to
216 * SCHED_POWER_SCALE, which is the default value, but with the
217 * constraint explained near table_efficiency[].
219 if (min_capacity
== max_capacity
)
220 cpu_capacity
[0].hwid
= (unsigned long)(-1);
221 else if (4*max_capacity
< (3*(max_capacity
+ min_capacity
)))
222 middle_capacity
= (min_capacity
+ max_capacity
)
223 >> (SCHED_POWER_SHIFT
+1);
225 middle_capacity
= ((max_capacity
/ 3)
226 >> (SCHED_POWER_SHIFT
-1)) + 1;
231 * Look for a customed capacity of a CPU in the cpu_capacity table during the
232 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
233 * function returns directly for SMP system.
235 void update_cpu_power(unsigned int cpu
, unsigned long hwid
)
237 unsigned int idx
= 0;
239 /* look for the cpu's hwid in the cpu capacity table */
240 for (idx
= 0; idx
< num_possible_cpus(); idx
++) {
241 if (cpu_capacity
[idx
].hwid
== hwid
)
244 if (cpu_capacity
[idx
].hwid
== -1)
248 if (idx
== num_possible_cpus())
251 set_power_scale(cpu
, cpu_capacity
[idx
].capacity
/ middle_capacity
);
253 printk(KERN_INFO
"CPU%u: update cpu_power %lu\n",
254 cpu
, arch_scale_freq_power(NULL
, cpu
));
258 static inline void parse_dt_topology(void) {}
259 static inline void update_cpu_power(unsigned int cpuid
, unsigned int mpidr
) {}
265 struct cputopo_arm cpu_topology
[NR_CPUS
];
266 EXPORT_SYMBOL_GPL(cpu_topology
);
268 #if defined (CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK) || defined (CONFIG_HMP_PACK_SMALL_TASK)
269 int arch_sd_share_power_line(void)
271 return 0*SD_SHARE_POWERLINE
;
273 #endif /* CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK || CONFIG_HMP_PACK_SMALL_TASK */
275 const struct cpumask
*cpu_coregroup_mask(int cpu
)
277 return &cpu_topology
[cpu
].core_sibling
;
280 void update_siblings_masks(unsigned int cpuid
)
282 struct cputopo_arm
*cpu_topo
, *cpuid_topo
= &cpu_topology
[cpuid
];
285 /* update core and thread sibling masks */
286 for_each_possible_cpu(cpu
) {
287 cpu_topo
= &cpu_topology
[cpu
];
289 if (cpuid_topo
->socket_id
!= cpu_topo
->socket_id
)
292 cpumask_set_cpu(cpuid
, &cpu_topo
->core_sibling
);
294 cpumask_set_cpu(cpu
, &cpuid_topo
->core_sibling
);
296 if (cpuid_topo
->core_id
!= cpu_topo
->core_id
)
299 cpumask_set_cpu(cpuid
, &cpu_topo
->thread_sibling
);
301 cpumask_set_cpu(cpu
, &cpuid_topo
->thread_sibling
);
306 #ifdef CONFIG_MTK_CPU_TOPOLOGY
321 struct cpu_compatible
{
323 const unsigned int cpuidr
;
324 struct cpu_cluster
*cluster
;
328 struct cpu_arch_info
{
329 struct cpu_compatible
*compat_big
;
330 struct cpu_compatible
*compat_ltt
;
336 /* NOTE: absolute decending ordered by cpu capacity */
337 struct cpu_compatible cpu_compat_table
[] = {
338 { "arm,cortex-a15", ARM_CPU_PART_CORTEX_A15
, NULL
, 0 },
339 { "arm,cortex-a17", ARM_CPU_PART_CORTEX_A17
, NULL
, 0 },
340 { "arm,cortex-a12", ARM_CPU_PART_CORTEX_A12
, NULL
, 0 },
341 { "arm,cortex-a53", ARM_CPU_PART_CORTEX_A53
, NULL
, 0 },
342 { "arm,cortex-a9", ARM_CPU_PART_CORTEX_A9
, NULL
, 0 },
343 { "arm,cortex-a7", ARM_CPU_PART_CORTEX_A7
, NULL
, 0 },
347 static struct cpu_compatible
* compat_cputopo
[NR_CPUS
];
349 static struct cpu_arch_info default_cpu_arch
= {
356 static struct cpu_arch_info
*glb_cpu_arch
= &default_cpu_arch
;
358 static int __arch_type(void)
360 int i
, num_compat
= 0;
362 if (!glb_cpu_arch
->arch_ready
)
365 // return the cached setting if query more than once.
366 if (glb_cpu_arch
->arch_type
!= ARCH_UNKNOWN
)
367 return glb_cpu_arch
->arch_type
;
369 for (i
= 0; i
< ARRAY_SIZE(cpu_compat_table
); i
++) {
370 struct cpu_compatible
*mc
= &cpu_compat_table
[i
];
376 glb_cpu_arch
->arch_type
= ARCH_BIG_LITTLE
;
377 else if (glb_cpu_arch
->nr_clusters
> 1)
378 glb_cpu_arch
->arch_type
= ARCH_MULTI_CLUSTER
;
379 else if (num_compat
== 1 && glb_cpu_arch
->nr_clusters
== 1)
380 glb_cpu_arch
->arch_type
= ARCH_SINGLE_CLUSTER
;
382 return glb_cpu_arch
->arch_type
;
385 static DEFINE_SPINLOCK(__cpu_cluster_lock
);
386 static void __setup_cpu_cluster(const unsigned int cpu
,
387 struct cpu_compatible
* const cpt
,
390 struct cpu_cluster
*prev_cls
, *cls
;
393 if (mpidr
& MPIDR_MT_BITMASK
)
394 cls_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 2);
396 cls_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
398 spin_lock(&__cpu_cluster_lock
);
403 if (cls
->cluster_id
== cls_id
)
406 cls
= (struct cpu_cluster
*)cls
->next
;
410 cls
= kzalloc(sizeof(struct cpu_cluster
), GFP_ATOMIC
);
412 cls
->cluster_id
= cls_id
;
414 glb_cpu_arch
->nr_clusters
++;
419 prev_cls
->next
= cls
;
421 BUG_ON(cls
->cluster_id
!= cls_id
);
423 cpumask_set_cpu(cpu
, &cls
->siblings
);
426 spin_unlock(&__cpu_cluster_lock
);
429 static void setup_cputopo(const unsigned int cpu
,
430 struct cpu_compatible
* const cpt
,
434 if (compat_cputopo
[cpu
])
437 compat_cputopo
[cpu
] = cpt
;
439 if (!glb_cpu_arch
->compat_big
|| glb_cpu_arch
->compat_big
> cpt
)
440 glb_cpu_arch
->compat_big
= cpt
;
442 if (!glb_cpu_arch
->compat_ltt
|| glb_cpu_arch
->compat_ltt
< cpt
)
443 glb_cpu_arch
->compat_ltt
= cpt
;
445 __setup_cpu_cluster(cpu
, cpt
, mpidr
);
448 static void setup_cputopo_def(const unsigned int cpu
)
450 struct cpu_compatible
*idx
= NULL
;
451 unsigned int cpuidr
= 0, mpidr
;
453 BUG_ON(cpu
!= smp_processor_id());
454 cpuidr
= read_cpuid_part_number();
455 mpidr
= read_cpuid_mpidr();
456 for (idx
= cpu_compat_table
; idx
->name
; idx
++) {
457 if (idx
->cpuidr
== cpuidr
)
460 BUG_ON(!idx
|| !idx
->name
);
461 setup_cputopo(cpu
, idx
, mpidr
);
464 static void reset_cputopo(void)
466 struct cpu_compatible
*idx
;
468 memset(glb_cpu_arch
, 0, sizeof(struct cpu_arch_info
));
469 glb_cpu_arch
->arch_type
= ARCH_UNKNOWN
;
471 memset(&compat_cputopo
, 0, sizeof(compat_cputopo
));
473 spin_lock(&__cpu_cluster_lock
);
474 for (idx
= cpu_compat_table
; idx
->name
; idx
++) {
475 struct cpu_cluster
*curr
, *next
;
477 if (idx
->clscnt
== 0)
479 BUG_ON(!idx
->cluster
);
482 next
= (struct cpu_cluster
*)curr
->next
;
487 next
= (struct cpu_cluster
*)curr
->next
;
493 spin_unlock(&__cpu_cluster_lock
);
496 /* verify cpu topology correctness by device tree.
497 * This function is called when current CPU is cpuid!
499 static void verify_cputopo(const unsigned int cpuid
, const u32 mpidr
)
501 struct cputopo_arm
*cpuid_topo
= &cpu_topology
[cpuid
];
502 struct cpu_compatible
*cpt
;
503 struct cpu_cluster
*cls
;
505 if (!glb_cpu_arch
->arch_ready
) {
508 setup_cputopo_def(cpuid
);
509 for (i
= 0; i
< nr_cpu_ids
; i
++)
510 if (!compat_cputopo
[i
])
513 glb_cpu_arch
->arch_ready
= true;
518 cpt
= compat_cputopo
[cpuid
];
522 if (cpu_isset(cpuid
, cls
->siblings
))
527 WARN(cls
->cluster_id
!= cpuid_topo
->socket_id
,
528 "[%s] cpu id: %d, cluster id (%d) != socket id (%d)\n",
529 __func__
, cpuid
, cls
->cluster_id
, cpuid_topo
->socket_id
);
533 * return 1 while every cpu is recognizible
535 void arch_build_cpu_topology_domain(void)
537 struct device_node
*cn
= NULL
;
538 unsigned int cpu
= 0;
541 memset(&compat_cputopo
, 0, sizeof(compat_cputopo
));
542 // default by device tree parsing
543 while ((cn
= of_find_node_by_type(cn
, "cpu"))) {
544 struct cpu_compatible
*idx
;
548 if (unlikely(cpu
>= nr_cpu_ids
)) {
549 pr_err("[CPUTOPO][%s] device tree cpu%d is over possible's\n",
554 for (idx
= cpu_compat_table
; idx
->name
; idx
++)
555 if (of_device_is_compatible(cn
, idx
->name
))
558 if (!idx
|| !idx
->name
) {
561 cp
= (char *) of_get_property(cn
, "compatible", &cplen
);
562 pr_err("[CPUTOPO][%s] device tree cpu%d (%s) is not compatible!!\n",
567 reg
= of_get_property(cn
, "reg", &len
);
568 if (!reg
|| len
!= 4) {
569 pr_err("[CPUTOPO][%s] missing reg property\n", cn
->full_name
);
572 mpidr
= be32_to_cpup(reg
);
573 setup_cputopo(cpu
, idx
, mpidr
);
576 glb_cpu_arch
->arch_ready
= (cpu
== nr_cpu_ids
);
578 if (!glb_cpu_arch
->arch_ready
) {
579 pr_warn("[CPUTOPO][%s] build cpu topology failed, to be handled by mpidr/cpuidr regs!\n", __func__
);
581 setup_cputopo_def(smp_processor_id());
585 int arch_cpu_is_big(unsigned int cpu
)
589 if (unlikely(cpu
>= nr_cpu_ids
))
592 type
= __arch_type();
594 case ARCH_BIG_LITTLE
:
595 return (compat_cputopo
[cpu
] == glb_cpu_arch
->compat_big
);
597 /* treat as little */
602 int arch_cpu_is_little(unsigned int cpu
)
606 if (unlikely(cpu
>= nr_cpu_ids
))
609 type
= __arch_type();
611 case ARCH_BIG_LITTLE
:
612 return (compat_cputopo
[cpu
] == glb_cpu_arch
->compat_ltt
);
614 /* treat as little */
619 int arch_is_multi_cluster(void)
621 return (__arch_type() == ARCH_MULTI_CLUSTER
|| __arch_type() == ARCH_BIG_LITTLE
);
624 int arch_is_big_little(void)
626 return (__arch_type() == ARCH_BIG_LITTLE
);
629 int arch_get_nr_clusters(void)
631 return glb_cpu_arch
->nr_clusters
;
634 int arch_get_cluster_id(unsigned int cpu
)
636 struct cputopo_arm
*arm_cputopo
= &cpu_topology
[cpu
];
637 struct cpu_compatible
*cpt
;
638 struct cpu_cluster
*cls
;
640 BUG_ON(cpu
>= nr_cpu_ids
);
641 if (!glb_cpu_arch
->arch_ready
) {
642 WARN_ONCE(!glb_cpu_arch
->arch_ready
, "[CPUTOPO][%s] cpu(%d), socket_id(%d) topology is not ready!\n",
643 __func__
, cpu
, arm_cputopo
->socket_id
);
644 if (unlikely(arm_cputopo
->socket_id
< 0))
646 return arm_cputopo
->socket_id
;
649 cpt
= compat_cputopo
[cpu
];
653 if (cpu_isset(cpu
, cls
->siblings
))
658 WARN_ONCE(cls
->cluster_id
!= arm_cputopo
->socket_id
, "[CPUTOPO][%s] cpu(%d): cluster_id(%d) != socket_id(%d) !\n",
659 __func__
, cpu
, cls
->cluster_id
, arm_cputopo
->socket_id
);
661 return cls
->cluster_id
;
664 static struct cpu_cluster
*__get_cluster_slowpath(int cluster_id
)
667 struct cpu_compatible
*cpt
;
668 struct cpu_cluster
*cls
;
670 for (i
= 0; i
< nr_cpu_ids
; i
++) {
671 cpt
= compat_cputopo
[i
];
675 if (cls
->cluster_id
== cluster_id
)
683 void arch_get_cluster_cpus(struct cpumask
*cpus
, int cluster_id
)
685 struct cpu_cluster
*cls
= NULL
;
689 if (likely(glb_cpu_arch
->compat_ltt
)) {
690 cls
= glb_cpu_arch
->compat_ltt
->cluster
;
692 if (cls
->cluster_id
== cluster_id
)
697 if (likely(glb_cpu_arch
->compat_big
)) {
698 cls
= glb_cpu_arch
->compat_big
->cluster
;
700 if (cls
->cluster_id
== cluster_id
)
706 cls
= __get_cluster_slowpath(cluster_id
);
707 BUG_ON(!cls
); // debug only.. remove later...
712 cpumask_copy(cpus
, &cls
->siblings
);
716 * arch_get_big_little_cpus - get big/LITTLE cores in cpumask
717 * @big: the cpumask pointer of big cores
718 * @little: the cpumask pointer of little cores
720 * Treat it as little cores, if it's not big.LITTLE architecture
722 void arch_get_big_little_cpus(struct cpumask
*big
, struct cpumask
*little
)
725 struct cpu_cluster
*cls
= NULL
;
726 struct cpumask tmpmask
;
729 if (unlikely(!glb_cpu_arch
->arch_ready
))
732 type
= __arch_type();
733 spin_lock(&__cpu_cluster_lock
);
735 case ARCH_BIG_LITTLE
:
736 if (likely(1 == glb_cpu_arch
->compat_big
->clscnt
)) {
737 cls
= glb_cpu_arch
->compat_big
->cluster
;
738 cpumask_copy(big
, &cls
->siblings
);
740 cls
= glb_cpu_arch
->compat_big
->cluster
;
742 cpumask_or(&tmpmask
, big
, &cls
->siblings
);
743 cpumask_copy(big
, &tmpmask
);
747 if (likely(1 == glb_cpu_arch
->compat_ltt
->clscnt
)) {
748 cls
= glb_cpu_arch
->compat_ltt
->cluster
;
749 cpumask_copy(little
, &cls
->siblings
);
751 cls
= glb_cpu_arch
->compat_ltt
->cluster
;
753 cpumask_or(&tmpmask
, little
, &cls
->siblings
);
754 cpumask_copy(little
, &tmpmask
);
760 /* treat as little */
762 cpumask_clear(little
);
763 for_each_possible_cpu(cpu
)
764 cpumask_set_cpu(cpu
, little
);
766 spin_unlock(&__cpu_cluster_lock
);
768 #else /* !CONFIG_MTK_CPU_TOPOLOGY */
769 int arch_cpu_is_big(unsigned int cpu
) { return 0; }
770 int arch_cpu_is_little(unsigned int cpu
) { return 1; }
771 int arch_is_big_little(void) { return 0; }
773 int arch_get_nr_clusters(void)
778 // assume socket id is monotonic increasing without gap.
779 for_each_possible_cpu(cpu
) {
780 struct cputopo_arm
*arm_cputopo
= &cpu_topology
[cpu
];
781 if (arm_cputopo
->socket_id
> max_id
)
782 max_id
= arm_cputopo
->socket_id
;
787 int arch_is_multi_cluster(void)
789 return (arch_get_nr_clusters() > 1 ? 1 : 0);
792 int arch_get_cluster_id(unsigned int cpu
)
794 struct cputopo_arm
*arm_cputopo
= &cpu_topology
[cpu
];
795 return arm_cputopo
->socket_id
< 0 ? 0 : arm_cputopo
->socket_id
;
798 void arch_get_cluster_cpus(struct cpumask
*cpus
, int cluster_id
)
800 unsigned int cpu
, found_id
= -1;
802 for_each_possible_cpu(cpu
) {
803 struct cputopo_arm
*arm_cputopo
= &cpu_topology
[cpu
];
804 if (arm_cputopo
->socket_id
== cluster_id
) {
805 found_id
= cluster_id
;
809 if (-1 == found_id
|| cluster_to_logical_mask(found_id
, cpus
)) {
811 for_each_possible_cpu(cpu
)
812 cpumask_set_cpu(cpu
, cpus
);
815 void arch_get_big_little_cpus(struct cpumask
*big
, struct cpumask
*little
)
819 cpumask_clear(little
);
820 for_each_possible_cpu(cpu
)
821 cpumask_set_cpu(cpu
, little
);
823 #endif /* CONFIG_MTK_CPU_TOPOLOGY */
826 * store_cpu_topology is called at boot when only one cpu is running
827 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
828 * which prevents simultaneous write access to cpu_topology array
830 void store_cpu_topology(unsigned int cpuid
)
832 struct cputopo_arm
*cpuid_topo
= &cpu_topology
[cpuid
];
835 /* If the cpu topology has been already set, just return */
836 if (cpuid_topo
->core_id
!= -1)
839 mpidr
= read_cpuid_mpidr();
841 /* create cpu topology mapping */
842 if ((mpidr
& MPIDR_SMP_BITMASK
) == MPIDR_SMP_VALUE
) {
844 * This is a multiprocessor system
845 * multiprocessor format & multiprocessor mode field are set
848 if (mpidr
& MPIDR_MT_BITMASK
) {
849 /* core performance interdependency */
850 cpuid_topo
->thread_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
851 cpuid_topo
->core_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
852 cpuid_topo
->socket_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 2);
854 /* largely independent cores */
855 cpuid_topo
->thread_id
= -1;
856 cpuid_topo
->core_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
857 cpuid_topo
->socket_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
861 * This is an uniprocessor system
862 * we are in multiprocessor format but uniprocessor system
863 * or in the old uniprocessor format
865 cpuid_topo
->thread_id
= -1;
866 cpuid_topo
->core_id
= 0;
867 cpuid_topo
->socket_id
= -1;
870 #ifdef CONFIG_MTK_CPU_TOPOLOGY
871 verify_cputopo(cpuid
, (u32
)mpidr
);
874 update_siblings_masks(cpuid
);
876 update_cpu_power(cpuid
, mpidr
& MPIDR_HWID_BITMASK
);
878 printk(KERN_INFO
"CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
879 cpuid
, cpu_topology
[cpuid
].thread_id
,
880 cpu_topology
[cpuid
].core_id
,
881 cpu_topology
[cpuid
].socket_id
, mpidr
);
885 * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
886 * @socket_id: cluster HW identifier
887 * @cluster_mask: the cpumask location to be initialized, modified by the
888 * function only if return value == 0
893 * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
895 int cluster_to_logical_mask(unsigned int socket_id
, cpumask_t
*cluster_mask
)
902 for_each_online_cpu(cpu
)
903 if (socket_id
== topology_physical_package_id(cpu
)) {
904 cpumask_copy(cluster_mask
, topology_core_cpumask(cpu
));
911 #ifdef CONFIG_SCHED_HMP
912 static const char * const little_cores
[] = {
918 static bool is_little_cpu(struct device_node
*cn
)
920 const char * const *lc
;
921 for (lc
= little_cores
; *lc
; lc
++)
922 if (of_device_is_compatible(cn
, *lc
)) {
928 void __init
arch_get_fast_and_slow_cpus(struct cpumask
*fast
,
929 struct cpumask
*slow
)
931 struct device_node
*cn
= NULL
;
938 * Use the config options if they are given. This helps testing
939 * HMP scheduling on systems without a big.LITTLE architecture.
941 if (strlen(CONFIG_HMP_FAST_CPU_MASK
) && strlen(CONFIG_HMP_SLOW_CPU_MASK
)) {
942 if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK
, fast
))
943 WARN(1, "Failed to parse HMP fast cpu mask!\n");
944 if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK
, slow
))
945 WARN(1, "Failed to parse HMP slow cpu mask!\n");
950 * Else, parse device tree for little cores.
952 while ((cn
= of_find_node_by_type(cn
, "cpu"))) {
957 mpidr
= of_get_property(cn
, "reg", &len
);
958 if (!mpidr
|| len
!= 4) {
959 pr_err("* %s missing reg property\n", cn
->full_name
);
963 cpu
= get_logical_index(be32_to_cpup(mpidr
));
964 if (cpu
== -EINVAL
) {
965 pr_err("couldn't get logical index for mpidr %x\n",
966 be32_to_cpup(mpidr
));
970 if (is_little_cpu(cn
))
971 cpumask_set_cpu(cpu
, slow
);
973 cpumask_set_cpu(cpu
, fast
);
976 if (!cpumask_empty(fast
) && !cpumask_empty(slow
))
980 * We didn't find both big and little cores so let's call all cores
981 * fast as this will keep the system running, with all cores being
984 cpumask_setall(fast
);
988 struct cpumask hmp_fast_cpu_mask
;
989 struct cpumask hmp_slow_cpu_mask
;
991 void __init
arch_get_hmp_domains(struct list_head
*hmp_domains_list
)
993 struct hmp_domain
*domain
;
995 arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask
, &hmp_slow_cpu_mask
);
998 * Initialize hmp_domains
999 * Must be ordered with respect to compute capacity.
1000 * Fastest domain at head of list.
1002 if(!cpumask_empty(&hmp_slow_cpu_mask
)) {
1003 domain
= (struct hmp_domain
*)
1004 kmalloc(sizeof(struct hmp_domain
), GFP_KERNEL
);
1005 cpumask_copy(&domain
->possible_cpus
, &hmp_slow_cpu_mask
);
1006 cpumask_and(&domain
->cpus
, cpu_online_mask
, &domain
->possible_cpus
);
1007 list_add(&domain
->hmp_domains
, hmp_domains_list
);
1009 domain
= (struct hmp_domain
*)
1010 kmalloc(sizeof(struct hmp_domain
), GFP_KERNEL
);
1011 cpumask_copy(&domain
->possible_cpus
, &hmp_fast_cpu_mask
);
1012 cpumask_and(&domain
->cpus
, cpu_online_mask
, &domain
->possible_cpus
);
1013 list_add(&domain
->hmp_domains
, hmp_domains_list
);
1015 #endif /* CONFIG_SCHED_HMP */
1018 * init_cpu_topology is called at boot when only one cpu is running
1019 * which prevent simultaneous write access to cpu_topology array
1021 void __init
init_cpu_topology(void)
1025 /* init core mask and power*/
1026 for_each_possible_cpu(cpu
) {
1027 struct cputopo_arm
*cpu_topo
= &(cpu_topology
[cpu
]);
1029 cpu_topo
->thread_id
= -1;
1030 cpu_topo
->core_id
= -1;
1031 cpu_topo
->socket_id
= -1;
1032 cpumask_clear(&cpu_topo
->core_sibling
);
1033 cpumask_clear(&cpu_topo
->thread_sibling
);
1035 set_power_scale(cpu
, SCHED_POWER_SCALE
);
1039 parse_dt_topology();
1043 #ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
1044 #include <linux/cpufreq.h>
1045 #define ARCH_SCALE_INVA_CPU_CAP_PERCLS 1
1047 struct cpufreq_extents
{
1053 /* Flag set when the governor in use only allows one frequency.
1056 #define CPUPOWER_FREQINVAR_SINGLEFREQ 0x01
1057 static struct cpufreq_extents freq_scale
[CONFIG_NR_CPUS
];
1059 static unsigned long get_max_cpu_power(void)
1061 unsigned long max_cpu_power
= 0;
1063 for_each_online_cpu(cpu
){
1064 if( per_cpu(cpu_scale
, cpu
) > max_cpu_power
)
1065 max_cpu_power
= per_cpu(cpu_scale
, cpu
);
1067 return max_cpu_power
;
1070 int arch_get_cpu_throttling(int cpu
)
1072 return freq_scale
[cpu
].throttling
;
1075 /* Called when the CPU Frequency is changed.
1076 * Once for each CPU.
1078 static int cpufreq_callback(struct notifier_block
*nb
,
1079 unsigned long val
, void *data
)
1081 struct cpufreq_freqs
*freq
= data
;
1082 int cpu
= freq
->cpu
;
1083 struct cpufreq_extents
*extents
;
1084 unsigned int curr_freq
;
1085 #ifdef ARCH_SCALE_INVA_CPU_CAP_PERCLS
1089 if (freq
->flags
& CPUFREQ_CONST_LOOPS
)
1092 if (val
!= CPUFREQ_POSTCHANGE
)
1095 /* if dynamic load scale is disabled, set the load scale to 1.0 */
1096 if (!frequency_invariant_power_enabled
) {
1097 per_cpu(invariant_cpu_capacity
, cpu
) = per_cpu(base_cpu_capacity
, cpu
);
1101 extents
= &freq_scale
[cpu
];
1102 if (extents
->max
< extents
->const_max
) {
1103 extents
->throttling
= 1;
1105 extents
->throttling
= 0;
1107 /* If our governor was recognised as a single-freq governor,
1108 * use curr = max to be sure multiplier is 1.0
1110 if (extents
->flags
& CPUPOWER_FREQINVAR_SINGLEFREQ
)
1111 curr_freq
= extents
->max
>> CPUPOWER_FREQSCALE_SHIFT
;
1113 curr_freq
= freq
->new >> CPUPOWER_FREQSCALE_SHIFT
;
1115 #ifdef ARCH_SCALE_INVA_CPU_CAP_PERCLS
1116 for_each_cpu(i
, topology_core_cpumask(cpu
)) {
1117 per_cpu(invariant_cpu_capacity
, i
) = DIV_ROUND_UP(
1118 (curr_freq
* per_cpu(prescaled_cpu_capacity
, i
)), CPUPOWER_FREQSCALE_DEFAULT
);
1121 per_cpu(invariant_cpu_capacity
, cpu
) = DIV_ROUND_UP(
1122 (curr_freq
* per_cpu(prescaled_cpu_capacity
, cpu
)), CPUPOWER_FREQSCALE_DEFAULT
);
1127 /* Called when the CPUFreq governor is changed.
1128 * Only called for the CPUs which are actually changed by the
1131 static int cpufreq_policy_callback(struct notifier_block
*nb
,
1132 unsigned long event
, void *data
)
1134 struct cpufreq_policy
*policy
= data
;
1135 struct cpufreq_extents
*extents
;
1136 int cpu
, singleFreq
= 0, cpu_capacity
;
1137 static const char performance_governor
[] = "performance";
1138 static const char powersave_governor
[] = "powersave";
1139 unsigned long max_cpu_power
;
1140 #ifdef ARCH_SCALE_INVA_CPU_CAP_PERCLS
1144 if (event
== CPUFREQ_START
)
1147 if (event
!= CPUFREQ_INCOMPATIBLE
)
1150 /* CPUFreq governors do not accurately report the range of
1151 * CPU Frequencies they will choose from.
1152 * We recognise performance and powersave governors as
1153 * single-frequency only.
1155 if (!strncmp(policy
->governor
->name
, performance_governor
,
1156 strlen(performance_governor
)) ||
1157 !strncmp(policy
->governor
->name
, powersave_governor
,
1158 strlen(powersave_governor
)))
1161 max_cpu_power
= get_max_cpu_power();
1162 /* Make sure that all CPUs impacted by this policy are
1163 * updated since we will only get a notification when the
1164 * user explicitly changes the policy on a CPU.
1166 for_each_cpu(cpu
, policy
->cpus
) {
1167 /* scale cpu_power to max(1024) */
1168 cpu_capacity
= (per_cpu(cpu_scale
, cpu
) << CPUPOWER_FREQSCALE_SHIFT
)
1170 extents
= &freq_scale
[cpu
];
1171 extents
->max
= policy
->max
>> CPUPOWER_FREQSCALE_SHIFT
;
1172 extents
->const_max
= policy
->cpuinfo
.max_freq
>> CPUPOWER_FREQSCALE_SHIFT
;
1173 if (!frequency_invariant_power_enabled
) {
1174 /* when disabled, invariant_cpu_scale = cpu_scale */
1175 per_cpu(base_cpu_capacity
, cpu
) = CPUPOWER_FREQSCALE_DEFAULT
;
1176 per_cpu(invariant_cpu_capacity
, cpu
) = CPUPOWER_FREQSCALE_DEFAULT
;
1177 /* unused when disabled */
1178 per_cpu(prescaled_cpu_capacity
, cpu
) = CPUPOWER_FREQSCALE_DEFAULT
;
1181 extents
->flags
|= CPUPOWER_FREQINVAR_SINGLEFREQ
;
1183 extents
->flags
&= ~CPUPOWER_FREQINVAR_SINGLEFREQ
;
1184 per_cpu(base_cpu_capacity
, cpu
) = cpu_capacity
;
1185 #ifdef CONFIG_SCHED_HMP_ENHANCEMENT
1186 per_cpu(prescaled_cpu_capacity
, cpu
) =
1187 ((cpu_capacity
<< CPUPOWER_FREQSCALE_SHIFT
) / extents
->const_max
);
1189 per_cpu(prescaled_cpu_capacity
, cpu
) =
1190 ((cpu_capacity
<< CPUPOWER_FREQSCALE_SHIFT
) / extents
->max
);
1193 #ifdef ARCH_SCALE_INVA_CPU_CAP_PERCLS
1194 for_each_cpu(i
, topology_core_cpumask(cpu
)) {
1195 per_cpu(invariant_cpu_capacity
, i
) = DIV_ROUND_UP(
1196 ((policy
->cur
>>CPUPOWER_FREQSCALE_SHIFT
) *
1197 per_cpu(prescaled_cpu_capacity
, i
)), CPUPOWER_FREQSCALE_DEFAULT
);
1200 per_cpu(invariant_cpu_capacity
, cpu
) = DIV_ROUND_UP(
1201 ((policy
->cur
>>CPUPOWER_FREQSCALE_SHIFT
) *
1202 per_cpu(prescaled_cpu_capacity
, cpu
)), CPUPOWER_FREQSCALE_DEFAULT
);
1209 static struct notifier_block cpufreq_notifier
= {
1210 .notifier_call
= cpufreq_callback
,
1212 static struct notifier_block cpufreq_policy_notifier
= {
1213 .notifier_call
= cpufreq_policy_callback
,
1216 static int __init
register_topology_cpufreq_notifier(void)
1220 /* init safe defaults since there are no policies at registration */
1221 for (ret
= 0; ret
< CONFIG_NR_CPUS
; ret
++) {
1223 freq_scale
[ret
].max
= CPUPOWER_FREQSCALE_DEFAULT
;
1224 per_cpu(base_cpu_capacity
, ret
) = CPUPOWER_FREQSCALE_DEFAULT
;
1225 per_cpu(invariant_cpu_capacity
, ret
) = CPUPOWER_FREQSCALE_DEFAULT
;
1226 per_cpu(prescaled_cpu_capacity
, ret
) = CPUPOWER_FREQSCALE_DEFAULT
;
1229 pr_info("topology: registering cpufreq notifiers for scale-invariant CPU Power\n");
1230 ret
= cpufreq_register_notifier(&cpufreq_policy_notifier
,
1231 CPUFREQ_POLICY_NOTIFIER
);
1234 ret
= cpufreq_register_notifier(&cpufreq_notifier
,
1235 CPUFREQ_TRANSITION_NOTIFIER
);
1240 core_initcall(register_topology_cpufreq_notifier
);
1241 #endif /* CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY */