nlm: Ensure callback code also checks that the files match
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / smp.h
CommitLineData
1da177e4
LT
1#ifndef __LINUX_SMP_H
2#define __LINUX_SMP_H
3
4/*
5 * Generic SMP support
6 * Alan Cox. <alan@redhat.com>
7 */
8
79974a0e 9#include <linux/errno.h>
54514a70 10#include <linux/types.h>
3d442233 11#include <linux/list.h>
3d442233 12#include <linux/cpumask.h>
04948c7f 13#include <linux/init.h>
f21afc25 14#include <linux/irqflags.h>
1da177e4
LT
15
16extern void cpu_idle(void);
17
3a5f65df 18typedef void (*smp_call_func_t)(void *info);
3d442233
JA
19struct call_single_data {
20 struct list_head list;
3a5f65df 21 smp_call_func_t func;
3d442233 22 void *info;
54514a70 23 u16 flags;
3d442233
JA
24};
25
e057d7ae
MT
26/* total number of cpus in this system (may exceed NR_CPUS) */
27extern unsigned int total_cpus;
28
3a5f65df
DH
29int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
30 int wait);
53ce3d95 31
1da177e4
LT
32#ifdef CONFIG_SMP
33
34#include <linux/preempt.h>
35#include <linux/kernel.h>
36#include <linux/compiler.h>
37#include <linux/thread_info.h>
38#include <asm/smp.h>
1da177e4
LT
39
40/*
41 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
42 * (defined in asm header):
d1dedb52 43 */
1da177e4
LT
44
45/*
46 * stops all CPUs but the current one:
47 */
48extern void smp_send_stop(void);
49
50/*
51 * sends a 'reschedule' event to another CPU:
52 */
53extern void smp_send_reschedule(int cpu);
54
55
56/*
57 * Prepare machine for booting other CPUs.
58 */
59extern void smp_prepare_cpus(unsigned int max_cpus);
60
61/*
62 * Bring a CPU up
63 */
8239c25f 64extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
1da177e4
LT
65
66/*
67 * Final polishing of CPUs
68 */
69extern void smp_cpus_done(unsigned int max_cpus);
70
71/*
72 * Call a function on all other processors
73 */
3a5f65df 74int smp_call_function(smp_call_func_t func, void *info, int wait);
54b11e6d 75void smp_call_function_many(const struct cpumask *mask,
3a5f65df 76 smp_call_func_t func, void *info, bool wait);
2d3854a3 77
6e275637
PZ
78void __smp_call_function_single(int cpuid, struct call_single_data *data,
79 int wait);
3d442233 80
2ea6dec4 81int smp_call_function_any(const struct cpumask *mask,
3a5f65df 82 smp_call_func_t func, void *info, int wait);
2ea6dec4 83
f37f435f
TG
84void kick_all_cpus_sync(void);
85
3d442233
JA
86/*
87 * Generic and arch helpers
88 */
89#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
d8ad7d11 90void __init call_function_init(void);
3d442233 91void generic_smp_call_function_single_interrupt(void);
9a46ad6d
SL
92#define generic_smp_call_function_interrupt \
93 generic_smp_call_function_single_interrupt
d8ad7d11
TI
94#else
95static inline void call_function_init(void) { }
3d442233 96#endif
a3bc0dbc 97
1da177e4
LT
98/*
99 * Call a function on all processors
100 */
3a5f65df 101int on_each_cpu(smp_call_func_t func, void *info, int wait);
1da177e4 102
3fc498f1
GBY
103/*
104 * Call a function on processors specified by mask, which might include
105 * the local one.
106 */
107void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
108 void *info, bool wait);
109
b3a7e98e
GBY
110/*
111 * Call a function on each processor for which the supplied function
112 * cond_func returns a positive value. This may include the local
113 * processor.
114 */
115void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
116 smp_call_func_t func, void *info, bool wait,
117 gfp_t gfp_flags);
118
1da177e4
LT
119/*
120 * Mark the boot cpu "online" so that it can call console drivers in
121 * printk() and can access its per-cpu storage.
122 */
123void smp_prepare_boot_cpu(void);
124
ca74a6f8 125extern unsigned int setup_max_cpus;
34db18a0
AW
126extern void __init setup_nr_cpu_ids(void);
127extern void __init smp_init(void);
ca74a6f8 128
1da177e4
LT
129#else /* !SMP */
130
d1dedb52
IM
131static inline void smp_send_stop(void) { }
132
1da177e4
LT
133/*
134 * These macros fold the SMP functionality into a single CPU system
135 */
39c715b7 136#define raw_smp_processor_id() 0
3a5f65df 137static inline int up_smp_call_function(smp_call_func_t func, void *info)
3c30b06d
CK
138{
139 return 0;
140}
8691e5a8 141#define smp_call_function(func, info, wait) \
a5fbb6d1 142 (up_smp_call_function(func, info))
f21afc25
DD
143
144static inline int on_each_cpu(smp_call_func_t func, void *info, int wait)
145{
146 unsigned long flags;
147
148 local_irq_save(flags);
149 func(info);
150 local_irq_restore(flags);
151 return 0;
152}
153
3fc498f1
GBY
154/*
155 * Note we still need to test the mask even for UP
156 * because we actually can get an empty mask from
157 * code that on SMP might call us without the local
158 * CPU in the mask.
159 */
160#define on_each_cpu_mask(mask, func, info, wait) \
161 do { \
162 if (cpumask_test_cpu(0, (mask))) { \
163 local_irq_disable(); \
164 (func)(info); \
165 local_irq_enable(); \
166 } \
167 } while (0)
b3a7e98e
GBY
168/*
169 * Preemption is disabled here to make sure the cond_func is called under the
170 * same condtions in UP and SMP.
171 */
172#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
173 do { \
174 void *__info = (info); \
175 preempt_disable(); \
176 if ((cond_func)(0, __info)) { \
177 local_irq_disable(); \
178 (func)(__info); \
179 local_irq_enable(); \
180 } \
181 preempt_enable(); \
182 } while (0)
3fc498f1 183
79a88102 184static inline void smp_send_reschedule(int cpu) { }
2ac6608c 185#define smp_prepare_boot_cpu() do {} while (0)
d2ff9118
RR
186#define smp_call_function_many(mask, func, info, wait) \
187 (up_smp_call_function(func, info))
d8ad7d11 188static inline void call_function_init(void) { }
2ea6dec4
RR
189
190static inline int
3a5f65df 191smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
2ea6dec4 192 void *info, int wait)
3d442233 193{
2ea6dec4 194 return smp_call_function_single(0, func, info, wait);
3d442233 195}
2ea6dec4 196
f37f435f
TG
197static inline void kick_all_cpus_sync(void) { }
198
1da177e4
LT
199#endif /* !SMP */
200
201/*
39c715b7 202 * smp_processor_id(): get the current CPU ID.
1da177e4 203 *
cfd8d6c0 204 * if DEBUG_PREEMPT is enabled then we check whether it is
39c715b7
IM
205 * used in a preemption-safe way. (smp_processor_id() is safe
206 * if it's used in a preemption-off critical section, or in
207 * a thread that is bound to the current CPU.)
1da177e4 208 *
39c715b7
IM
209 * NOTE: raw_smp_processor_id() is for internal use only
210 * (smp_processor_id() is the preferred variant), but in rare
211 * instances it might also be used to turn off false positives
212 * (i.e. smp_processor_id() use that the debugging code reports but
213 * which use for some reason is legal). Don't use this to hack around
214 * the warning message, as your code might not work under PREEMPT.
1da177e4 215 */
39c715b7
IM
216#ifdef CONFIG_DEBUG_PREEMPT
217 extern unsigned int debug_smp_processor_id(void);
218# define smp_processor_id() debug_smp_processor_id()
1da177e4 219#else
39c715b7 220# define smp_processor_id() raw_smp_processor_id()
1da177e4
LT
221#endif
222
223#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
224#define put_cpu() preempt_enable()
1da177e4 225
a146649b
IM
226/*
227 * Callback to arch code if there's nosmp or maxcpus=0 on the
228 * boot command line:
229 */
230extern void arch_disable_smp_support(void);
231
033ab7f8
AM
232void smp_setup_processor_id(void);
233
1da177e4 234#endif /* __LINUX_SMP_H */