Merge tag 'v3.10.89' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / topology.h
1 /*
2 * include/linux/topology.h
3 *
4 * Written by: Matthew Dobson, IBM Corporation
5 *
6 * Copyright (C) 2002, IBM Corp.
7 *
8 * All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Send feedback to <colpatch@us.ibm.com>
26 */
27 #ifndef _LINUX_TOPOLOGY_H
28 #define _LINUX_TOPOLOGY_H
29
30 #include <linux/cpumask.h>
31 #include <linux/bitops.h>
32 #include <linux/mmzone.h>
33 #include <linux/smp.h>
34 #include <linux/percpu.h>
35 #include <asm/topology.h>
36
37 #ifndef node_has_online_mem
38 #define node_has_online_mem(nid) (1)
39 #endif
40
41 #ifndef nr_cpus_node
42 #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
43 #endif
44
45 #define for_each_node_with_cpus(node) \
46 for_each_online_node(node) \
47 if (nr_cpus_node(node))
48
49 int arch_update_cpu_topology(void);
50
51 /* Conform to ACPI 2.0 SLIT distance definitions */
52 #define LOCAL_DISTANCE 10
53 #define REMOTE_DISTANCE 20
54 #ifndef node_distance
55 #define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
56 #endif
57 #ifndef RECLAIM_DISTANCE
58 /*
59 * If the distance between nodes in a system is larger than RECLAIM_DISTANCE
60 * (in whatever arch specific measurement units returned by node_distance())
61 * then switch on zone reclaim on boot.
62 */
63 #define RECLAIM_DISTANCE 30
64 #endif
65 #ifndef PENALTY_FOR_NODE_WITH_CPUS
66 #define PENALTY_FOR_NODE_WITH_CPUS (1)
67 #endif
68
69 /*
70 * Below are the 3 major initializers used in building sched_domains:
71 * SD_SIBLING_INIT, for SMT domains
72 * SD_CPU_INIT, for SMP domains
73 *
74 * Any architecture that cares to do any tuning to these values should do so
75 * by defining their own arch-specific initializer in include/asm/topology.h.
76 * A definition there will automagically override these default initializers
77 * and allow arch-specific performance tuning of sched_domains.
78 * (Only non-zero and non-null fields need be specified.)
79 */
80
81 #ifdef CONFIG_SCHED_SMT
82 /* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
83 * so can't we drop this in favor of CONFIG_SCHED_SMT?
84 */
85 #define ARCH_HAS_SCHED_WAKE_IDLE
86 /* Common values for SMT siblings */
87 #ifndef SD_SIBLING_INIT
88 #define SD_SIBLING_INIT (struct sched_domain) { \
89 .min_interval = 1, \
90 .max_interval = 2, \
91 .busy_factor = 64, \
92 .imbalance_pct = 110, \
93 \
94 .flags = 1*SD_LOAD_BALANCE \
95 | 1*SD_BALANCE_NEWIDLE \
96 | 1*SD_BALANCE_EXEC \
97 | 1*SD_BALANCE_FORK \
98 | 0*SD_BALANCE_WAKE \
99 | 1*SD_WAKE_AFFINE \
100 | 1*SD_SHARE_CPUPOWER \
101 | 1*SD_SHARE_PKG_RESOURCES \
102 | 0*SD_SERIALIZE \
103 | 0*SD_PREFER_SIBLING \
104 | arch_sd_sibling_asym_packing() \
105 , \
106 .last_balance = jiffies, \
107 .balance_interval = 1, \
108 .smt_gain = 1178, /* 15% */ \
109 }
110 #endif
111 #endif /* CONFIG_SCHED_SMT */
112
113 #ifdef CONFIG_SCHED_MC
114 /* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
115 #ifndef SD_MC_INIT
116 #ifdef CONFIG_MT_LOAD_BALANCE_ENHANCEMENT
117 #define SD_MC_INIT (struct sched_domain) { \
118 .min_interval = 1, \
119 .max_interval = 4, \
120 .busy_factor = 64, \
121 .imbalance_pct = 125, \
122 .cache_nice_tries = 1, \
123 .busy_idx = 2, \
124 .wake_idx = 0, \
125 .forkexec_idx = 0, \
126 \
127 .flags = 1*SD_LOAD_BALANCE \
128 | 1*SD_BALANCE_NEWIDLE \
129 | 1*SD_BALANCE_EXEC \
130 | 1*SD_BALANCE_FORK \
131 | 1*SD_BALANCE_WAKE \
132 | 0*SD_WAKE_AFFINE \
133 | 0*SD_SHARE_CPUPOWER \
134 | 1*SD_SHARE_PKG_RESOURCES \
135 | 0*SD_SERIALIZE \
136 , \
137 .last_balance = jiffies, \
138 .balance_interval = 1, \
139 }
140 #else
141 #define SD_MC_INIT (struct sched_domain) { \
142 .min_interval = 1, \
143 .max_interval = 4, \
144 .busy_factor = 64, \
145 .imbalance_pct = 125, \
146 .cache_nice_tries = 1, \
147 .busy_idx = 2, \
148 .wake_idx = 0, \
149 .forkexec_idx = 0, \
150 \
151 .flags = 1*SD_LOAD_BALANCE \
152 | 1*SD_BALANCE_NEWIDLE \
153 | 1*SD_BALANCE_EXEC \
154 | 1*SD_BALANCE_FORK \
155 | 0*SD_BALANCE_WAKE \
156 | 1*SD_WAKE_AFFINE \
157 | 0*SD_SHARE_CPUPOWER \
158 | 1*SD_SHARE_PKG_RESOURCES \
159 | 0*SD_SERIALIZE \
160 , \
161 .last_balance = jiffies, \
162 .balance_interval = 1, \
163 }
164 #endif
165 #endif
166 #endif /* CONFIG_SCHED_MC */
167
168 /* Common values for CPUs */
169 #ifndef SD_CPU_INIT
170 # ifdef CONFIG_MT_LOAD_BALANCE_ENHANCEMENT
171 # ifdef CONFIG_MTK_SCHED_CMP_TGS
172 # ifdef CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK
173 #define SD_CPU_INIT (struct sched_domain) { \
174 .min_interval = 1, \
175 .max_interval = 4, \
176 .busy_factor = 64, \
177 .imbalance_pct = 125, \
178 .cache_nice_tries = 1, \
179 .busy_idx = 2, \
180 .idle_idx = 1, \
181 .newidle_idx = 0, \
182 .wake_idx = 0, \
183 .forkexec_idx = 0, \
184 \
185 .flags = 1*SD_LOAD_BALANCE \
186 | 1*SD_BALANCE_NEWIDLE \
187 | 1*SD_BALANCE_EXEC \
188 | 1*SD_BALANCE_FORK \
189 | 1*SD_BALANCE_WAKE \
190 | 0*SD_WAKE_AFFINE \
191 | 0*SD_SHARE_CPUPOWER \
192 | 0*SD_SHARE_PKG_RESOURCES \
193 | 0*SD_SERIALIZE \
194 | 1*SD_PREFER_SIBLING \
195 | arch_sd_share_power_line() \
196 | 1*SD_BALANCE_TG \
197 , \
198 .last_balance = jiffies, \
199 .balance_interval = 1, \
200 }
201 # else
202 #define SD_CPU_INIT (struct sched_domain) { \
203 .min_interval = 1, \
204 .max_interval = 4, \
205 .busy_factor = 64, \
206 .imbalance_pct = 125, \
207 .cache_nice_tries = 1, \
208 .busy_idx = 2, \
209 .idle_idx = 1, \
210 .newidle_idx = 0, \
211 .wake_idx = 0, \
212 .forkexec_idx = 0, \
213 \
214 .flags = 1*SD_LOAD_BALANCE \
215 | 1*SD_BALANCE_NEWIDLE \
216 | 1*SD_BALANCE_EXEC \
217 | 1*SD_BALANCE_FORK \
218 | 1*SD_BALANCE_WAKE \
219 | 0*SD_WAKE_AFFINE \
220 | 0*SD_SHARE_CPUPOWER \
221 | 0*SD_SHARE_PKG_RESOURCES \
222 | 0*SD_SERIALIZE \
223 | 1*SD_PREFER_SIBLING \
224 | 1*SD_BALANCE_TG \
225 , \
226 .last_balance = jiffies, \
227 .balance_interval = 1, \
228 }
229 # endif
230 # else
231 # ifdef CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK
232 #define SD_CPU_INIT (struct sched_domain) { \
233 .min_interval = 1, \
234 .max_interval = 4, \
235 .busy_factor = 64, \
236 .imbalance_pct = 125, \
237 .cache_nice_tries = 1, \
238 .busy_idx = 2, \
239 .idle_idx = 1, \
240 .newidle_idx = 0, \
241 .wake_idx = 0, \
242 .forkexec_idx = 0, \
243 \
244 .flags = 1*SD_LOAD_BALANCE \
245 | 1*SD_BALANCE_NEWIDLE \
246 | 1*SD_BALANCE_EXEC \
247 | 1*SD_BALANCE_FORK \
248 | 1*SD_BALANCE_WAKE \
249 | 0*SD_WAKE_AFFINE \
250 | 0*SD_SHARE_CPUPOWER \
251 | 0*SD_SHARE_PKG_RESOURCES \
252 | 0*SD_SERIALIZE \
253 | 1*SD_PREFER_SIBLING \
254 | arch_sd_share_power_line() \
255 , \
256 .last_balance = jiffies, \
257 .balance_interval = 1, \
258 }
259 # else
260 #define SD_CPU_INIT (struct sched_domain) { \
261 .min_interval = 1, \
262 .max_interval = 4, \
263 .busy_factor = 64, \
264 .imbalance_pct = 125, \
265 .cache_nice_tries = 1, \
266 .busy_idx = 2, \
267 .idle_idx = 1, \
268 .newidle_idx = 0, \
269 .wake_idx = 0, \
270 .forkexec_idx = 0, \
271 \
272 .flags = 1*SD_LOAD_BALANCE \
273 | 1*SD_BALANCE_NEWIDLE \
274 | 1*SD_BALANCE_EXEC \
275 | 1*SD_BALANCE_FORK \
276 | 1*SD_BALANCE_WAKE \
277 | 0*SD_WAKE_AFFINE \
278 | 0*SD_SHARE_CPUPOWER \
279 | 0*SD_SHARE_PKG_RESOURCES \
280 | 0*SD_SERIALIZE \
281 | 1*SD_PREFER_SIBLING \
282 , \
283 .last_balance = jiffies, \
284 .balance_interval = 1, \
285 }
286 # endif
287 # endif
288
289 # else //CONFIG_MT_LOAD_BALANCE_ENHANCEMENT
290
291 # ifdef CONFIG_MTK_SCHED_CMP_TGS
292 # ifdef CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK
293 #define SD_CPU_INIT (struct sched_domain) { \
294 .min_interval = 1, \
295 .max_interval = 4, \
296 .busy_factor = 64, \
297 .imbalance_pct = 125, \
298 .cache_nice_tries = 1, \
299 .busy_idx = 2, \
300 .idle_idx = 1, \
301 .newidle_idx = 0, \
302 .wake_idx = 0, \
303 .forkexec_idx = 0, \
304 \
305 .flags = 1*SD_LOAD_BALANCE \
306 | 1*SD_BALANCE_NEWIDLE \
307 | 1*SD_BALANCE_EXEC \
308 | 1*SD_BALANCE_FORK \
309 | 0*SD_BALANCE_WAKE \
310 | 1*SD_WAKE_AFFINE \
311 | 0*SD_SHARE_CPUPOWER \
312 | 0*SD_SHARE_PKG_RESOURCES \
313 | 0*SD_SERIALIZE \
314 | 1*SD_PREFER_SIBLING \
315 | arch_sd_share_power_line() \
316 | 1*SD_BALANCE_TG \
317 , \
318 .last_balance = jiffies, \
319 .balance_interval = 1, \
320 }
321 # else
322 #define SD_CPU_INIT (struct sched_domain) { \
323 .min_interval = 1, \
324 .max_interval = 4, \
325 .busy_factor = 64, \
326 .imbalance_pct = 125, \
327 .cache_nice_tries = 1, \
328 .busy_idx = 2, \
329 .idle_idx = 1, \
330 .newidle_idx = 0, \
331 .wake_idx = 0, \
332 .forkexec_idx = 0, \
333 \
334 .flags = 1*SD_LOAD_BALANCE \
335 | 1*SD_BALANCE_NEWIDLE \
336 | 1*SD_BALANCE_EXEC \
337 | 1*SD_BALANCE_FORK \
338 | 0*SD_BALANCE_WAKE \
339 | 1*SD_WAKE_AFFINE \
340 | 0*SD_SHARE_CPUPOWER \
341 | 0*SD_SHARE_PKG_RESOURCES \
342 | 0*SD_SERIALIZE \
343 | 1*SD_PREFER_SIBLING \
344 | 1*SD_BALANCE_TG \
345 , \
346 .last_balance = jiffies, \
347 .balance_interval = 1, \
348 }
349 # endif
350 # else
351 # ifdef CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK
352 #define SD_CPU_INIT (struct sched_domain) { \
353 .min_interval = 1, \
354 .max_interval = 4, \
355 .busy_factor = 64, \
356 .imbalance_pct = 125, \
357 .cache_nice_tries = 1, \
358 .busy_idx = 2, \
359 .idle_idx = 1, \
360 .newidle_idx = 0, \
361 .wake_idx = 0, \
362 .forkexec_idx = 0, \
363 \
364 .flags = 1*SD_LOAD_BALANCE \
365 | 1*SD_BALANCE_NEWIDLE \
366 | 1*SD_BALANCE_EXEC \
367 | 1*SD_BALANCE_FORK \
368 | 0*SD_BALANCE_WAKE \
369 | 1*SD_WAKE_AFFINE \
370 | 0*SD_SHARE_CPUPOWER \
371 | 0*SD_SHARE_PKG_RESOURCES \
372 | 0*SD_SERIALIZE \
373 | 1*SD_PREFER_SIBLING \
374 | arch_sd_share_power_line() \
375 , \
376 .last_balance = jiffies, \
377 .balance_interval = 1, \
378 }
379 # else
380 #define SD_CPU_INIT (struct sched_domain) { \
381 .min_interval = 1, \
382 .max_interval = 4, \
383 .busy_factor = 64, \
384 .imbalance_pct = 125, \
385 .cache_nice_tries = 1, \
386 .busy_idx = 2, \
387 .idle_idx = 1, \
388 .newidle_idx = 0, \
389 .wake_idx = 0, \
390 .forkexec_idx = 0, \
391 \
392 .flags = 1*SD_LOAD_BALANCE \
393 | 1*SD_BALANCE_NEWIDLE \
394 | 1*SD_BALANCE_EXEC \
395 | 1*SD_BALANCE_FORK \
396 | 0*SD_BALANCE_WAKE \
397 | 1*SD_WAKE_AFFINE \
398 | 0*SD_SHARE_CPUPOWER \
399 | 0*SD_SHARE_PKG_RESOURCES \
400 | 0*SD_SERIALIZE \
401 | 1*SD_PREFER_SIBLING \
402 , \
403 .last_balance = jiffies, \
404 .balance_interval = 1, \
405 }
406 # endif
407 # endif
408
409 # endif //CONFIG_MT_LOAD_BALANCE_ENHANCEMENT
410 #endif
411
412 #ifdef CONFIG_SCHED_BOOK
413 #ifndef SD_BOOK_INIT
414 #error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!!
415 #endif
416 #endif /* CONFIG_SCHED_BOOK */
417
418 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
419 DECLARE_PER_CPU(int, numa_node);
420
421 #ifndef numa_node_id
422 /* Returns the number of the current Node. */
423 static inline int numa_node_id(void)
424 {
425 return __this_cpu_read(numa_node);
426 }
427 #endif
428
429 #ifndef cpu_to_node
430 static inline int cpu_to_node(int cpu)
431 {
432 return per_cpu(numa_node, cpu);
433 }
434 #endif
435
436 #ifndef set_numa_node
437 static inline void set_numa_node(int node)
438 {
439 this_cpu_write(numa_node, node);
440 }
441 #endif
442
443 #ifndef set_cpu_numa_node
444 static inline void set_cpu_numa_node(int cpu, int node)
445 {
446 per_cpu(numa_node, cpu) = node;
447 }
448 #endif
449
450 #else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
451
452 /* Returns the number of the current Node. */
453 #ifndef numa_node_id
454 static inline int numa_node_id(void)
455 {
456 return cpu_to_node(raw_smp_processor_id());
457 }
458 #endif
459
460 #endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
461
462 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
463
464 /*
465 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
466 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
467 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
468 */
469 DECLARE_PER_CPU(int, _numa_mem_);
470
471 #ifndef set_numa_mem
472 static inline void set_numa_mem(int node)
473 {
474 this_cpu_write(_numa_mem_, node);
475 }
476 #endif
477
478 #ifndef numa_mem_id
479 /* Returns the number of the nearest Node with memory */
480 static inline int numa_mem_id(void)
481 {
482 return __this_cpu_read(_numa_mem_);
483 }
484 #endif
485
486 #ifndef cpu_to_mem
487 static inline int cpu_to_mem(int cpu)
488 {
489 return per_cpu(_numa_mem_, cpu);
490 }
491 #endif
492
493 #ifndef set_cpu_numa_mem
494 static inline void set_cpu_numa_mem(int cpu, int node)
495 {
496 per_cpu(_numa_mem_, cpu) = node;
497 }
498 #endif
499
500 #else /* !CONFIG_HAVE_MEMORYLESS_NODES */
501
502 #ifndef numa_mem_id
503 /* Returns the number of the nearest Node with memory */
504 static inline int numa_mem_id(void)
505 {
506 return numa_node_id();
507 }
508 #endif
509
510 #ifndef cpu_to_mem
511 static inline int cpu_to_mem(int cpu)
512 {
513 return cpu_to_node(cpu);
514 }
515 #endif
516
517 #endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
518
519 #ifndef topology_physical_package_id
520 #define topology_physical_package_id(cpu) ((void)(cpu), -1)
521 #endif
522 #ifndef topology_core_id
523 #define topology_core_id(cpu) ((void)(cpu), 0)
524 #endif
525 #ifndef topology_thread_cpumask
526 #define topology_thread_cpumask(cpu) cpumask_of(cpu)
527 #endif
528 #ifndef topology_core_cpumask
529 #define topology_core_cpumask(cpu) cpumask_of(cpu)
530 #endif
531
532 #endif /* _LINUX_TOPOLOGY_H */