2 * include/linux/topology.h
4 * Written by: Matthew Dobson, IBM Corporation
6 * Copyright (C) 2002, IBM Corp.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * Send feedback to <colpatch@us.ibm.com>
27 #ifndef _LINUX_TOPOLOGY_H
28 #define _LINUX_TOPOLOGY_H
30 #include <linux/cpumask.h>
31 #include <linux/bitops.h>
32 #include <linux/mmzone.h>
33 #include <linux/smp.h>
34 #include <linux/percpu.h>
35 #include <asm/topology.h>
37 #ifndef node_has_online_mem
38 #define node_has_online_mem(nid) (1)
42 #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
45 #define for_each_node_with_cpus(node) \
46 for_each_online_node(node) \
47 if (nr_cpus_node(node))
49 int arch_update_cpu_topology(void);
51 /* Conform to ACPI 2.0 SLIT distance definitions */
52 #define LOCAL_DISTANCE 10
53 #define REMOTE_DISTANCE 20
55 #define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
57 #ifndef RECLAIM_DISTANCE
59 * If the distance between nodes in a system is larger than RECLAIM_DISTANCE
60 * (in whatever arch specific measurement units returned by node_distance())
61 * then switch on zone reclaim on boot.
63 #define RECLAIM_DISTANCE 30
65 #ifndef PENALTY_FOR_NODE_WITH_CPUS
66 #define PENALTY_FOR_NODE_WITH_CPUS (1)
70 * Below are the 3 major initializers used in building sched_domains:
71 * SD_SIBLING_INIT, for SMT domains
72 * SD_CPU_INIT, for SMP domains
74 * Any architecture that cares to do any tuning to these values should do so
75 * by defining their own arch-specific initializer in include/asm/topology.h.
76 * A definition there will automagically override these default initializers
77 * and allow arch-specific performance tuning of sched_domains.
78 * (Only non-zero and non-null fields need be specified.)
81 #ifdef CONFIG_SCHED_SMT
82 /* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
83 * so can't we drop this in favor of CONFIG_SCHED_SMT?
85 #define ARCH_HAS_SCHED_WAKE_IDLE
86 /* Common values for SMT siblings */
87 #ifndef SD_SIBLING_INIT
88 #define SD_SIBLING_INIT (struct sched_domain) { \
92 .imbalance_pct = 110, \
94 .flags = 1*SD_LOAD_BALANCE \
95 | 1*SD_BALANCE_NEWIDLE \
100 | 1*SD_SHARE_CPUPOWER \
101 | 1*SD_SHARE_PKG_RESOURCES \
103 | 0*SD_PREFER_SIBLING \
104 | arch_sd_sibling_asym_packing() \
106 .last_balance = jiffies, \
107 .balance_interval = 1, \
108 .smt_gain = 1178, /* 15% */ \
111 #endif /* CONFIG_SCHED_SMT */
113 #ifdef CONFIG_SCHED_MC
114 /* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
116 #ifdef CONFIG_MT_LOAD_BALANCE_ENHANCEMENT
117 #define SD_MC_INIT (struct sched_domain) { \
121 .imbalance_pct = 125, \
122 .cache_nice_tries = 1, \
127 .flags = 1*SD_LOAD_BALANCE \
128 | 1*SD_BALANCE_NEWIDLE \
129 | 1*SD_BALANCE_EXEC \
130 | 1*SD_BALANCE_FORK \
131 | 1*SD_BALANCE_WAKE \
133 | 0*SD_SHARE_CPUPOWER \
134 | 1*SD_SHARE_PKG_RESOURCES \
137 .last_balance = jiffies, \
138 .balance_interval = 1, \
141 #define SD_MC_INIT (struct sched_domain) { \
145 .imbalance_pct = 125, \
146 .cache_nice_tries = 1, \
151 .flags = 1*SD_LOAD_BALANCE \
152 | 1*SD_BALANCE_NEWIDLE \
153 | 1*SD_BALANCE_EXEC \
154 | 1*SD_BALANCE_FORK \
155 | 0*SD_BALANCE_WAKE \
157 | 0*SD_SHARE_CPUPOWER \
158 | 1*SD_SHARE_PKG_RESOURCES \
161 .last_balance = jiffies, \
162 .balance_interval = 1, \
166 #endif /* CONFIG_SCHED_MC */
168 /* Common values for CPUs */
170 # ifdef CONFIG_MT_LOAD_BALANCE_ENHANCEMENT
171 # ifdef CONFIG_MTK_SCHED_CMP_TGS
172 # ifdef CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK
173 #define SD_CPU_INIT (struct sched_domain) { \
177 .imbalance_pct = 125, \
178 .cache_nice_tries = 1, \
185 .flags = 1*SD_LOAD_BALANCE \
186 | 1*SD_BALANCE_NEWIDLE \
187 | 1*SD_BALANCE_EXEC \
188 | 1*SD_BALANCE_FORK \
189 | 1*SD_BALANCE_WAKE \
191 | 0*SD_SHARE_CPUPOWER \
192 | 0*SD_SHARE_PKG_RESOURCES \
194 | 1*SD_PREFER_SIBLING \
195 | arch_sd_share_power_line() \
198 .last_balance = jiffies, \
199 .balance_interval = 1, \
202 #define SD_CPU_INIT (struct sched_domain) { \
206 .imbalance_pct = 125, \
207 .cache_nice_tries = 1, \
214 .flags = 1*SD_LOAD_BALANCE \
215 | 1*SD_BALANCE_NEWIDLE \
216 | 1*SD_BALANCE_EXEC \
217 | 1*SD_BALANCE_FORK \
218 | 1*SD_BALANCE_WAKE \
220 | 0*SD_SHARE_CPUPOWER \
221 | 0*SD_SHARE_PKG_RESOURCES \
223 | 1*SD_PREFER_SIBLING \
226 .last_balance = jiffies, \
227 .balance_interval = 1, \
231 # ifdef CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK
232 #define SD_CPU_INIT (struct sched_domain) { \
236 .imbalance_pct = 125, \
237 .cache_nice_tries = 1, \
244 .flags = 1*SD_LOAD_BALANCE \
245 | 1*SD_BALANCE_NEWIDLE \
246 | 1*SD_BALANCE_EXEC \
247 | 1*SD_BALANCE_FORK \
248 | 1*SD_BALANCE_WAKE \
250 | 0*SD_SHARE_CPUPOWER \
251 | 0*SD_SHARE_PKG_RESOURCES \
253 | 1*SD_PREFER_SIBLING \
254 | arch_sd_share_power_line() \
256 .last_balance = jiffies, \
257 .balance_interval = 1, \
260 #define SD_CPU_INIT (struct sched_domain) { \
264 .imbalance_pct = 125, \
265 .cache_nice_tries = 1, \
272 .flags = 1*SD_LOAD_BALANCE \
273 | 1*SD_BALANCE_NEWIDLE \
274 | 1*SD_BALANCE_EXEC \
275 | 1*SD_BALANCE_FORK \
276 | 1*SD_BALANCE_WAKE \
278 | 0*SD_SHARE_CPUPOWER \
279 | 0*SD_SHARE_PKG_RESOURCES \
281 | 1*SD_PREFER_SIBLING \
283 .last_balance = jiffies, \
284 .balance_interval = 1, \
289 # else //CONFIG_MT_LOAD_BALANCE_ENHANCEMENT
291 # ifdef CONFIG_MTK_SCHED_CMP_TGS
292 # ifdef CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK
293 #define SD_CPU_INIT (struct sched_domain) { \
297 .imbalance_pct = 125, \
298 .cache_nice_tries = 1, \
305 .flags = 1*SD_LOAD_BALANCE \
306 | 1*SD_BALANCE_NEWIDLE \
307 | 1*SD_BALANCE_EXEC \
308 | 1*SD_BALANCE_FORK \
309 | 0*SD_BALANCE_WAKE \
311 | 0*SD_SHARE_CPUPOWER \
312 | 0*SD_SHARE_PKG_RESOURCES \
314 | 1*SD_PREFER_SIBLING \
315 | arch_sd_share_power_line() \
318 .last_balance = jiffies, \
319 .balance_interval = 1, \
322 #define SD_CPU_INIT (struct sched_domain) { \
326 .imbalance_pct = 125, \
327 .cache_nice_tries = 1, \
334 .flags = 1*SD_LOAD_BALANCE \
335 | 1*SD_BALANCE_NEWIDLE \
336 | 1*SD_BALANCE_EXEC \
337 | 1*SD_BALANCE_FORK \
338 | 0*SD_BALANCE_WAKE \
340 | 0*SD_SHARE_CPUPOWER \
341 | 0*SD_SHARE_PKG_RESOURCES \
343 | 1*SD_PREFER_SIBLING \
346 .last_balance = jiffies, \
347 .balance_interval = 1, \
351 # ifdef CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK
352 #define SD_CPU_INIT (struct sched_domain) { \
356 .imbalance_pct = 125, \
357 .cache_nice_tries = 1, \
364 .flags = 1*SD_LOAD_BALANCE \
365 | 1*SD_BALANCE_NEWIDLE \
366 | 1*SD_BALANCE_EXEC \
367 | 1*SD_BALANCE_FORK \
368 | 0*SD_BALANCE_WAKE \
370 | 0*SD_SHARE_CPUPOWER \
371 | 0*SD_SHARE_PKG_RESOURCES \
373 | 1*SD_PREFER_SIBLING \
374 | arch_sd_share_power_line() \
376 .last_balance = jiffies, \
377 .balance_interval = 1, \
380 #define SD_CPU_INIT (struct sched_domain) { \
384 .imbalance_pct = 125, \
385 .cache_nice_tries = 1, \
392 .flags = 1*SD_LOAD_BALANCE \
393 | 1*SD_BALANCE_NEWIDLE \
394 | 1*SD_BALANCE_EXEC \
395 | 1*SD_BALANCE_FORK \
396 | 0*SD_BALANCE_WAKE \
398 | 0*SD_SHARE_CPUPOWER \
399 | 0*SD_SHARE_PKG_RESOURCES \
401 | 1*SD_PREFER_SIBLING \
403 .last_balance = jiffies, \
404 .balance_interval = 1, \
409 # endif //CONFIG_MT_LOAD_BALANCE_ENHANCEMENT
412 #ifdef CONFIG_SCHED_BOOK
414 #error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!!
416 #endif /* CONFIG_SCHED_BOOK */
418 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
419 DECLARE_PER_CPU(int, numa_node
);
422 /* Returns the number of the current Node. */
423 static inline int numa_node_id(void)
425 return __this_cpu_read(numa_node
);
430 static inline int cpu_to_node(int cpu
)
432 return per_cpu(numa_node
, cpu
);
436 #ifndef set_numa_node
437 static inline void set_numa_node(int node
)
439 this_cpu_write(numa_node
, node
);
443 #ifndef set_cpu_numa_node
444 static inline void set_cpu_numa_node(int cpu
, int node
)
446 per_cpu(numa_node
, cpu
) = node
;
450 #else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
452 /* Returns the number of the current Node. */
454 static inline int numa_node_id(void)
456 return cpu_to_node(raw_smp_processor_id());
460 #endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
462 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
465 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
466 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
467 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
469 DECLARE_PER_CPU(int, _numa_mem_
);
472 static inline void set_numa_mem(int node
)
474 this_cpu_write(_numa_mem_
, node
);
479 /* Returns the number of the nearest Node with memory */
480 static inline int numa_mem_id(void)
482 return __this_cpu_read(_numa_mem_
);
487 static inline int cpu_to_mem(int cpu
)
489 return per_cpu(_numa_mem_
, cpu
);
493 #ifndef set_cpu_numa_mem
494 static inline void set_cpu_numa_mem(int cpu
, int node
)
496 per_cpu(_numa_mem_
, cpu
) = node
;
500 #else /* !CONFIG_HAVE_MEMORYLESS_NODES */
503 /* Returns the number of the nearest Node with memory */
504 static inline int numa_mem_id(void)
506 return numa_node_id();
511 static inline int cpu_to_mem(int cpu
)
513 return cpu_to_node(cpu
);
517 #endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
519 #ifndef topology_physical_package_id
520 #define topology_physical_package_id(cpu) ((void)(cpu), -1)
522 #ifndef topology_core_id
523 #define topology_core_id(cpu) ((void)(cpu), 0)
525 #ifndef topology_thread_cpumask
526 #define topology_thread_cpumask(cpu) cpumask_of(cpu)
528 #ifndef topology_core_cpumask
529 #define topology_core_cpumask(cpu) cpumask_of(cpu)
532 #endif /* _LINUX_TOPOLOGY_H */