Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / spinlock.c
1 /*
2 * Copyright (2004) Linus Torvalds
3 *
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
5 *
6 * Copyright (2004, 2005) Ingo Molnar
7 *
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
10 */
11
12 #include <linux/linkage.h>
13 #include <linux/preempt.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17
18 /*
19 * Generic declaration of the raw read_trylock() function,
20 * architectures are supposed to optimize this:
21 */
22 int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock)
23 {
24 __raw_read_lock(lock);
25 return 1;
26 }
27 EXPORT_SYMBOL(generic__raw_read_trylock);
28
29 int __lockfunc _spin_trylock(spinlock_t *lock)
30 {
31 preempt_disable();
32 if (_raw_spin_trylock(lock))
33 return 1;
34
35 preempt_enable();
36 return 0;
37 }
38 EXPORT_SYMBOL(_spin_trylock);
39
40 int __lockfunc _read_trylock(rwlock_t *lock)
41 {
42 preempt_disable();
43 if (_raw_read_trylock(lock))
44 return 1;
45
46 preempt_enable();
47 return 0;
48 }
49 EXPORT_SYMBOL(_read_trylock);
50
51 int __lockfunc _write_trylock(rwlock_t *lock)
52 {
53 preempt_disable();
54 if (_raw_write_trylock(lock))
55 return 1;
56
57 preempt_enable();
58 return 0;
59 }
60 EXPORT_SYMBOL(_write_trylock);
61
62 #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
63
64 void __lockfunc _read_lock(rwlock_t *lock)
65 {
66 preempt_disable();
67 _raw_read_lock(lock);
68 }
69 EXPORT_SYMBOL(_read_lock);
70
71 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
72 {
73 unsigned long flags;
74
75 local_irq_save(flags);
76 preempt_disable();
77 _raw_spin_lock_flags(lock, &flags);
78 return flags;
79 }
80 EXPORT_SYMBOL(_spin_lock_irqsave);
81
82 void __lockfunc _spin_lock_irq(spinlock_t *lock)
83 {
84 local_irq_disable();
85 preempt_disable();
86 _raw_spin_lock(lock);
87 }
88 EXPORT_SYMBOL(_spin_lock_irq);
89
90 void __lockfunc _spin_lock_bh(spinlock_t *lock)
91 {
92 local_bh_disable();
93 preempt_disable();
94 _raw_spin_lock(lock);
95 }
96 EXPORT_SYMBOL(_spin_lock_bh);
97
98 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
99 {
100 unsigned long flags;
101
102 local_irq_save(flags);
103 preempt_disable();
104 _raw_read_lock(lock);
105 return flags;
106 }
107 EXPORT_SYMBOL(_read_lock_irqsave);
108
109 void __lockfunc _read_lock_irq(rwlock_t *lock)
110 {
111 local_irq_disable();
112 preempt_disable();
113 _raw_read_lock(lock);
114 }
115 EXPORT_SYMBOL(_read_lock_irq);
116
117 void __lockfunc _read_lock_bh(rwlock_t *lock)
118 {
119 local_bh_disable();
120 preempt_disable();
121 _raw_read_lock(lock);
122 }
123 EXPORT_SYMBOL(_read_lock_bh);
124
125 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
126 {
127 unsigned long flags;
128
129 local_irq_save(flags);
130 preempt_disable();
131 _raw_write_lock(lock);
132 return flags;
133 }
134 EXPORT_SYMBOL(_write_lock_irqsave);
135
136 void __lockfunc _write_lock_irq(rwlock_t *lock)
137 {
138 local_irq_disable();
139 preempt_disable();
140 _raw_write_lock(lock);
141 }
142 EXPORT_SYMBOL(_write_lock_irq);
143
144 void __lockfunc _write_lock_bh(rwlock_t *lock)
145 {
146 local_bh_disable();
147 preempt_disable();
148 _raw_write_lock(lock);
149 }
150 EXPORT_SYMBOL(_write_lock_bh);
151
152 void __lockfunc _spin_lock(spinlock_t *lock)
153 {
154 preempt_disable();
155 _raw_spin_lock(lock);
156 }
157
158 EXPORT_SYMBOL(_spin_lock);
159
160 void __lockfunc _write_lock(rwlock_t *lock)
161 {
162 preempt_disable();
163 _raw_write_lock(lock);
164 }
165
166 EXPORT_SYMBOL(_write_lock);
167
168 #else /* CONFIG_PREEMPT: */
169
170 /*
171 * This could be a long-held lock. We both prepare to spin for a long
172 * time (making _this_ CPU preemptable if possible), and we also signal
173 * towards that other CPU that it should break the lock ASAP.
174 *
175 * (We do this in a function because inlining it would be excessive.)
176 */
177
178 #define BUILD_LOCK_OPS(op, locktype) \
179 void __lockfunc _##op##_lock(locktype##_t *lock) \
180 { \
181 for (;;) { \
182 preempt_disable(); \
183 if (likely(_raw_##op##_trylock(lock))) \
184 break; \
185 preempt_enable(); \
186 \
187 if (!(lock)->break_lock) \
188 (lock)->break_lock = 1; \
189 while (!op##_can_lock(lock) && (lock)->break_lock) \
190 cpu_relax(); \
191 } \
192 (lock)->break_lock = 0; \
193 } \
194 \
195 EXPORT_SYMBOL(_##op##_lock); \
196 \
197 unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
198 { \
199 unsigned long flags; \
200 \
201 for (;;) { \
202 preempt_disable(); \
203 local_irq_save(flags); \
204 if (likely(_raw_##op##_trylock(lock))) \
205 break; \
206 local_irq_restore(flags); \
207 preempt_enable(); \
208 \
209 if (!(lock)->break_lock) \
210 (lock)->break_lock = 1; \
211 while (!op##_can_lock(lock) && (lock)->break_lock) \
212 cpu_relax(); \
213 } \
214 (lock)->break_lock = 0; \
215 return flags; \
216 } \
217 \
218 EXPORT_SYMBOL(_##op##_lock_irqsave); \
219 \
220 void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
221 { \
222 _##op##_lock_irqsave(lock); \
223 } \
224 \
225 EXPORT_SYMBOL(_##op##_lock_irq); \
226 \
227 void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
228 { \
229 unsigned long flags; \
230 \
231 /* */ \
232 /* Careful: we must exclude softirqs too, hence the */ \
233 /* irq-disabling. We use the generic preemption-aware */ \
234 /* function: */ \
235 /**/ \
236 flags = _##op##_lock_irqsave(lock); \
237 local_bh_disable(); \
238 local_irq_restore(flags); \
239 } \
240 \
241 EXPORT_SYMBOL(_##op##_lock_bh)
242
243 /*
244 * Build preemption-friendly versions of the following
245 * lock-spinning functions:
246 *
247 * _[spin|read|write]_lock()
248 * _[spin|read|write]_lock_irq()
249 * _[spin|read|write]_lock_irqsave()
250 * _[spin|read|write]_lock_bh()
251 */
252 BUILD_LOCK_OPS(spin, spinlock);
253 BUILD_LOCK_OPS(read, rwlock);
254 BUILD_LOCK_OPS(write, rwlock);
255
256 #endif /* CONFIG_PREEMPT */
257
258 void __lockfunc _spin_unlock(spinlock_t *lock)
259 {
260 _raw_spin_unlock(lock);
261 preempt_enable();
262 }
263 EXPORT_SYMBOL(_spin_unlock);
264
265 void __lockfunc _write_unlock(rwlock_t *lock)
266 {
267 _raw_write_unlock(lock);
268 preempt_enable();
269 }
270 EXPORT_SYMBOL(_write_unlock);
271
272 void __lockfunc _read_unlock(rwlock_t *lock)
273 {
274 _raw_read_unlock(lock);
275 preempt_enable();
276 }
277 EXPORT_SYMBOL(_read_unlock);
278
279 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
280 {
281 _raw_spin_unlock(lock);
282 local_irq_restore(flags);
283 preempt_enable();
284 }
285 EXPORT_SYMBOL(_spin_unlock_irqrestore);
286
287 void __lockfunc _spin_unlock_irq(spinlock_t *lock)
288 {
289 _raw_spin_unlock(lock);
290 local_irq_enable();
291 preempt_enable();
292 }
293 EXPORT_SYMBOL(_spin_unlock_irq);
294
295 void __lockfunc _spin_unlock_bh(spinlock_t *lock)
296 {
297 _raw_spin_unlock(lock);
298 preempt_enable_no_resched();
299 local_bh_enable();
300 }
301 EXPORT_SYMBOL(_spin_unlock_bh);
302
303 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
304 {
305 _raw_read_unlock(lock);
306 local_irq_restore(flags);
307 preempt_enable();
308 }
309 EXPORT_SYMBOL(_read_unlock_irqrestore);
310
311 void __lockfunc _read_unlock_irq(rwlock_t *lock)
312 {
313 _raw_read_unlock(lock);
314 local_irq_enable();
315 preempt_enable();
316 }
317 EXPORT_SYMBOL(_read_unlock_irq);
318
319 void __lockfunc _read_unlock_bh(rwlock_t *lock)
320 {
321 _raw_read_unlock(lock);
322 preempt_enable_no_resched();
323 local_bh_enable();
324 }
325 EXPORT_SYMBOL(_read_unlock_bh);
326
327 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
328 {
329 _raw_write_unlock(lock);
330 local_irq_restore(flags);
331 preempt_enable();
332 }
333 EXPORT_SYMBOL(_write_unlock_irqrestore);
334
335 void __lockfunc _write_unlock_irq(rwlock_t *lock)
336 {
337 _raw_write_unlock(lock);
338 local_irq_enable();
339 preempt_enable();
340 }
341 EXPORT_SYMBOL(_write_unlock_irq);
342
343 void __lockfunc _write_unlock_bh(rwlock_t *lock)
344 {
345 _raw_write_unlock(lock);
346 preempt_enable_no_resched();
347 local_bh_enable();
348 }
349 EXPORT_SYMBOL(_write_unlock_bh);
350
351 int __lockfunc _spin_trylock_bh(spinlock_t *lock)
352 {
353 local_bh_disable();
354 preempt_disable();
355 if (_raw_spin_trylock(lock))
356 return 1;
357
358 preempt_enable_no_resched();
359 local_bh_enable();
360 return 0;
361 }
362 EXPORT_SYMBOL(_spin_trylock_bh);
363
364 int in_lock_functions(unsigned long addr)
365 {
366 /* Linker adds these: start and end of __lockfunc functions */
367 extern char __lock_text_start[], __lock_text_end[];
368
369 return addr >= (unsigned long)__lock_text_start
370 && addr < (unsigned long)__lock_text_end;
371 }
372 EXPORT_SYMBOL(in_lock_functions);