Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / include / asm / topology.h
1 #ifndef _ASM_ARM_TOPOLOGY_H
2 #define _ASM_ARM_TOPOLOGY_H
3
4 #ifdef CONFIG_ARM_CPU_TOPOLOGY
5
6 #include <linux/cpumask.h>
7
8 struct cputopo_arm {
9 int thread_id;
10 int core_id;
11 int socket_id;
12 cpumask_t thread_sibling;
13 cpumask_t core_sibling;
14 };
15
16 extern struct cputopo_arm cpu_topology[NR_CPUS];
17
18 #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
19 #define topology_core_id(cpu) (cpu_topology[cpu].core_id)
20 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
21 #define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
22 #ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
23 #define CPUPOWER_FREQSCALE_SHIFT 10
24 #define CPUPOWER_FREQSCALE_DEFAULT (1L << CPUPOWER_FREQSCALE_SHIFT)
25 extern unsigned long arch_get_max_cpu_capacity(int);
26 extern unsigned long arch_get_cpu_capacity(int);
27 extern int arch_get_invariant_power_enabled(void);
28 extern int arch_get_cpu_throttling(int);
29
30 #define topology_max_cpu_capacity(cpu) (arch_get_max_cpu_capacity(cpu))
31 #define topology_cpu_capacity(cpu) (arch_get_cpu_capacity(cpu))
32 #define topology_cpu_throttling(cpu) (arch_get_cpu_capacity(cpu))
33 #define topology_cpu_inv_power_en(void) (arch_get_invariant_power_enabled())
34 #endif /* CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY */
35
36 #define mc_capable() (cpu_topology[0].socket_id != -1)
37 #define smt_capable() (cpu_topology[0].thread_id != -1)
38
39 void init_cpu_topology(void);
40 void store_cpu_topology(unsigned int cpuid);
41 const struct cpumask *cpu_coregroup_mask(int cpu);
42 int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask);
43
44 #ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
45
46 #if defined (CONFIG_HMP_PACK_SMALL_TASK) && !defined(CONFIG_MTK_SCHED_CMP)
47 /* Common values for CPUs */
48 #ifndef SD_CPU_INIT
49 #define SD_CPU_INIT (struct sched_domain) { \
50 .min_interval = 1, \
51 .max_interval = 4, \
52 .busy_factor = 64, \
53 .imbalance_pct = 125, \
54 .cache_nice_tries = 1, \
55 .busy_idx = 2, \
56 .idle_idx = 1, \
57 .newidle_idx = 0, \
58 .wake_idx = 0, \
59 .forkexec_idx = 0, \
60 \
61 .flags = 0*SD_LOAD_BALANCE \
62 | 1*SD_BALANCE_NEWIDLE \
63 | 1*SD_BALANCE_EXEC \
64 | 1*SD_BALANCE_FORK \
65 | 0*SD_BALANCE_WAKE \
66 | 1*SD_WAKE_AFFINE \
67 | 0*SD_SHARE_CPUPOWER \
68 | 0*SD_SHARE_PKG_RESOURCES \
69 | arch_sd_share_power_line() \
70 | 0*SD_SERIALIZE \
71 , \
72 .last_balance = jiffies, \
73 .balance_interval = 1, \
74 }
75 #endif
76 #endif /* CONFIG_HMP_PACK_SMALL_TASK */
77
78 #endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */
79
80 /* CPU cluster functions */
81 extern void arch_build_cpu_topology_domain(void);
82 extern int arch_cpu_is_big(unsigned int cpu);
83 extern int arch_cpu_is_little(unsigned int cpu);
84 extern int arch_is_multi_cluster(void);
85 extern int arch_is_big_little(void);
86 extern int arch_get_nr_clusters(void);
87 extern int arch_get_cluster_id(unsigned int cpu);
88 extern void arch_get_cluster_cpus(struct cpumask *cpus, int cluster_id);
89 extern void arch_get_big_little_cpus(struct cpumask *big, struct cpumask *little);
90
91 #else /* !CONFIG_ARM_CPU_TOPOLOGY */
92
93 static inline void init_cpu_topology(void) { }
94 static inline void store_cpu_topology(unsigned int cpuid) { }
95 static inline int cluster_to_logical_mask(unsigned int socket_id,
96 cpumask_t *cluster_mask) { return -EINVAL; }
97
98 static inline void arch_build_cpu_topology_domain(void) {}
99 static inline int arch_cpu_is_big(unsigned int cpu) { return 0; }
100 static inline int arch_cpu_is_little(unsigned int cpu) { return 1; }
101 static inline int arch_is_multi_cluster(void) { return 0; }
102 static inline int arch_is_big_little(void) { return 0; }
103 static inline int arch_get_nr_clusters(void) { return 1; }
104 static inline int arch_get_cluster_id(unsigned int cpu) { return 0; }
105 static inline void arch_get_cluster_cpus(struct cpumask *cpus, int cluster_id)
106 {
107 cpumask_clear(cpus);
108 if (0 == cluster_id) {
109 unsigned int cpu;
110 for_each_possible_cpu(cpu)
111 cpumask_set_cpu(cpu, cpus);
112 }
113 }
114 static inline void arch_get_big_little_cpus(struct cpumask *big,struct cpumask *little)
115 {
116 unsigned int cpu;
117 cpumask_clear(big);
118 cpumask_clear(little);
119 for_each_possible_cpu(cpu)
120 cpumask_set_cpu(cpu, little);
121 }
122
123 #endif /* CONFIG_ARM_CPU_TOPOLOGY */
124
125 #include <asm-generic/topology.h>
126
127 #endif /* _ASM_ARM_TOPOLOGY_H */