ARM: 7462/1: topology: factorize the update of sibling masks
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / topology.c
CommitLineData
c9018aab
VG
1/*
2 * arch/arm/kernel/topology.c
3 *
4 * Copyright (C) 2011 Linaro Limited.
5 * Written by: Vincent Guittot
6 *
7 * based on arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <linux/cpu.h>
15#include <linux/cpumask.h>
16#include <linux/init.h>
17#include <linux/percpu.h>
18#include <linux/node.h>
19#include <linux/nodemask.h>
20#include <linux/sched.h>
21
22#include <asm/cputype.h>
23#include <asm/topology.h>
24
130d9aab
VG
25/*
26 * cpu power scale management
27 */
28
29/*
30 * cpu power table
31 * This per cpu data structure describes the relative capacity of each core.
32 * On a heteregenous system, cores don't have the same computation capacity
33 * and we reflect that difference in the cpu_power field so the scheduler can
34 * take this difference into account during load balance. A per cpu structure
35 * is preferred because each CPU updates its own cpu_power field during the
36 * load balance except for idle cores. One idle core is selected to run the
37 * rebalance_domains for all idle cores and the cpu_power can be updated
38 * during this sequence.
39 */
40static DEFINE_PER_CPU(unsigned long, cpu_scale);
41
42unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
43{
44 return per_cpu(cpu_scale, cpu);
45}
46
47static void set_power_scale(unsigned int cpu, unsigned long power)
48{
49 per_cpu(cpu_scale, cpu) = power;
50}
51
52/*
53 * cpu topology management
54 */
55
c9018aab
VG
56#define MPIDR_SMP_BITMASK (0x3 << 30)
57#define MPIDR_SMP_VALUE (0x2 << 30)
58
59#define MPIDR_MT_BITMASK (0x1 << 24)
60
61/*
62 * These masks reflect the current use of the affinity levels.
63 * The affinity level can be up to 16 bits according to ARM ARM
64 */
65
66#define MPIDR_LEVEL0_MASK 0x3
67#define MPIDR_LEVEL0_SHIFT 0
68
69#define MPIDR_LEVEL1_MASK 0xF
70#define MPIDR_LEVEL1_SHIFT 8
71
72#define MPIDR_LEVEL2_MASK 0xFF
73#define MPIDR_LEVEL2_SHIFT 16
74
130d9aab
VG
75/*
76 * cpu topology table
77 */
c9018aab
VG
78struct cputopo_arm cpu_topology[NR_CPUS];
79
4cbd6b16 80const struct cpumask *cpu_coregroup_mask(int cpu)
c9018aab
VG
81{
82 return &cpu_topology[cpu].core_sibling;
83}
84
cb75dacb
VG
85void update_siblings_masks(unsigned int cpuid)
86{
87 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
88 int cpu;
89
90 /* update core and thread sibling masks */
91 for_each_possible_cpu(cpu) {
92 cpu_topo = &cpu_topology[cpu];
93
94 if (cpuid_topo->socket_id != cpu_topo->socket_id)
95 continue;
96
97 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
98 if (cpu != cpuid)
99 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
100
101 if (cpuid_topo->core_id != cpu_topo->core_id)
102 continue;
103
104 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
105 if (cpu != cpuid)
106 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
107 }
108 smp_wmb();
109}
110
c9018aab
VG
111/*
112 * store_cpu_topology is called at boot when only one cpu is running
113 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
114 * which prevents simultaneous write access to cpu_topology array
115 */
116void store_cpu_topology(unsigned int cpuid)
117{
118 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
119 unsigned int mpidr;
c9018aab
VG
120
121 /* If the cpu topology has been already set, just return */
122 if (cpuid_topo->core_id != -1)
123 return;
124
125 mpidr = read_cpuid_mpidr();
126
127 /* create cpu topology mapping */
128 if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
129 /*
130 * This is a multiprocessor system
131 * multiprocessor format & multiprocessor mode field are set
132 */
133
134 if (mpidr & MPIDR_MT_BITMASK) {
135 /* core performance interdependency */
136 cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
137 & MPIDR_LEVEL0_MASK;
138 cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
139 & MPIDR_LEVEL1_MASK;
140 cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT)
141 & MPIDR_LEVEL2_MASK;
142 } else {
143 /* largely independent cores */
144 cpuid_topo->thread_id = -1;
145 cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
146 & MPIDR_LEVEL0_MASK;
147 cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
148 & MPIDR_LEVEL1_MASK;
149 }
150 } else {
151 /*
152 * This is an uniprocessor system
153 * we are in multiprocessor format but uniprocessor system
154 * or in the old uniprocessor format
155 */
156 cpuid_topo->thread_id = -1;
157 cpuid_topo->core_id = 0;
158 cpuid_topo->socket_id = -1;
159 }
160
cb75dacb 161 update_siblings_masks(cpuid);
c9018aab
VG
162
163 printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
164 cpuid, cpu_topology[cpuid].thread_id,
165 cpu_topology[cpuid].core_id,
166 cpu_topology[cpuid].socket_id, mpidr);
167}
168
169/*
170 * init_cpu_topology is called at boot when only one cpu is running
171 * which prevent simultaneous write access to cpu_topology array
172 */
173void init_cpu_topology(void)
174{
175 unsigned int cpu;
176
130d9aab 177 /* init core mask and power*/
c9018aab
VG
178 for_each_possible_cpu(cpu) {
179 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
180
181 cpu_topo->thread_id = -1;
182 cpu_topo->core_id = -1;
183 cpu_topo->socket_id = -1;
184 cpumask_clear(&cpu_topo->core_sibling);
185 cpumask_clear(&cpu_topo->thread_sibling);
130d9aab
VG
186
187 set_power_scale(cpu, SCHED_POWER_SCALE);
c9018aab
VG
188 }
189 smp_wmb();
190}