Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / spinlock.c
1 /*
2 * Copyright (2004) Linus Torvalds
3 *
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
5 *
6 * Copyright (2004, 2005) Ingo Molnar
7 *
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
10 *
11 * Note that some architectures have special knowledge about the
12 * stack frames of these functions in their profile_pc. If you
13 * change anything significant here that could change the stack
14 * frame contact the architecture maintainers.
15 */
16
17 #include <linux/linkage.h>
18 #include <linux/preempt.h>
19 #include <linux/spinlock.h>
20 #include <linux/interrupt.h>
21 #include <linux/debug_locks.h>
22 #include <linux/module.h>
23
24 int __lockfunc _spin_trylock(spinlock_t *lock)
25 {
26 preempt_disable();
27 if (_raw_spin_trylock(lock)) {
28 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
29 return 1;
30 }
31
32 preempt_enable();
33 return 0;
34 }
35 EXPORT_SYMBOL(_spin_trylock);
36
37 int __lockfunc _read_trylock(rwlock_t *lock)
38 {
39 preempt_disable();
40 if (_raw_read_trylock(lock)) {
41 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
42 return 1;
43 }
44
45 preempt_enable();
46 return 0;
47 }
48 EXPORT_SYMBOL(_read_trylock);
49
50 int __lockfunc _write_trylock(rwlock_t *lock)
51 {
52 preempt_disable();
53 if (_raw_write_trylock(lock)) {
54 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
55 return 1;
56 }
57
58 preempt_enable();
59 return 0;
60 }
61 EXPORT_SYMBOL(_write_trylock);
62
63 /*
64 * If lockdep is enabled then we use the non-preemption spin-ops
65 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
66 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
67 */
68 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
69
70 void __lockfunc _read_lock(rwlock_t *lock)
71 {
72 preempt_disable();
73 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
74 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
75 }
76 EXPORT_SYMBOL(_read_lock);
77
78 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
79 {
80 unsigned long flags;
81
82 local_irq_save(flags);
83 preempt_disable();
84 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
85 /*
86 * On lockdep we dont want the hand-coded irq-enable of
87 * _raw_spin_lock_flags() code, because lockdep assumes
88 * that interrupts are not re-enabled during lock-acquire:
89 */
90 #ifdef CONFIG_LOCKDEP
91 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
92 #else
93 _raw_spin_lock_flags(lock, &flags);
94 #endif
95 return flags;
96 }
97 EXPORT_SYMBOL(_spin_lock_irqsave);
98
99 void __lockfunc _spin_lock_irq(spinlock_t *lock)
100 {
101 local_irq_disable();
102 preempt_disable();
103 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
104 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
105 }
106 EXPORT_SYMBOL(_spin_lock_irq);
107
108 void __lockfunc _spin_lock_bh(spinlock_t *lock)
109 {
110 local_bh_disable();
111 preempt_disable();
112 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
113 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
114 }
115 EXPORT_SYMBOL(_spin_lock_bh);
116
117 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
118 {
119 unsigned long flags;
120
121 local_irq_save(flags);
122 preempt_disable();
123 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
124 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
125 return flags;
126 }
127 EXPORT_SYMBOL(_read_lock_irqsave);
128
129 void __lockfunc _read_lock_irq(rwlock_t *lock)
130 {
131 local_irq_disable();
132 preempt_disable();
133 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
134 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
135 }
136 EXPORT_SYMBOL(_read_lock_irq);
137
138 void __lockfunc _read_lock_bh(rwlock_t *lock)
139 {
140 local_bh_disable();
141 preempt_disable();
142 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
143 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
144 }
145 EXPORT_SYMBOL(_read_lock_bh);
146
147 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
148 {
149 unsigned long flags;
150
151 local_irq_save(flags);
152 preempt_disable();
153 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
154 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
155 return flags;
156 }
157 EXPORT_SYMBOL(_write_lock_irqsave);
158
159 void __lockfunc _write_lock_irq(rwlock_t *lock)
160 {
161 local_irq_disable();
162 preempt_disable();
163 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
164 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
165 }
166 EXPORT_SYMBOL(_write_lock_irq);
167
168 void __lockfunc _write_lock_bh(rwlock_t *lock)
169 {
170 local_bh_disable();
171 preempt_disable();
172 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
173 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
174 }
175 EXPORT_SYMBOL(_write_lock_bh);
176
177 void __lockfunc _spin_lock(spinlock_t *lock)
178 {
179 preempt_disable();
180 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
181 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
182 }
183
184 EXPORT_SYMBOL(_spin_lock);
185
186 void __lockfunc _write_lock(rwlock_t *lock)
187 {
188 preempt_disable();
189 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
190 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
191 }
192
193 EXPORT_SYMBOL(_write_lock);
194
195 #else /* CONFIG_PREEMPT: */
196
197 /*
198 * This could be a long-held lock. We both prepare to spin for a long
199 * time (making _this_ CPU preemptable if possible), and we also signal
200 * towards that other CPU that it should break the lock ASAP.
201 *
202 * (We do this in a function because inlining it would be excessive.)
203 */
204
205 #define BUILD_LOCK_OPS(op, locktype) \
206 void __lockfunc _##op##_lock(locktype##_t *lock) \
207 { \
208 for (;;) { \
209 preempt_disable(); \
210 if (likely(_raw_##op##_trylock(lock))) \
211 break; \
212 preempt_enable(); \
213 \
214 if (!(lock)->break_lock) \
215 (lock)->break_lock = 1; \
216 while (!op##_can_lock(lock) && (lock)->break_lock) \
217 _raw_##op##_relax(&lock->raw_lock); \
218 } \
219 (lock)->break_lock = 0; \
220 } \
221 \
222 EXPORT_SYMBOL(_##op##_lock); \
223 \
224 unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
225 { \
226 unsigned long flags; \
227 \
228 for (;;) { \
229 preempt_disable(); \
230 local_irq_save(flags); \
231 if (likely(_raw_##op##_trylock(lock))) \
232 break; \
233 local_irq_restore(flags); \
234 preempt_enable(); \
235 \
236 if (!(lock)->break_lock) \
237 (lock)->break_lock = 1; \
238 while (!op##_can_lock(lock) && (lock)->break_lock) \
239 _raw_##op##_relax(&lock->raw_lock); \
240 } \
241 (lock)->break_lock = 0; \
242 return flags; \
243 } \
244 \
245 EXPORT_SYMBOL(_##op##_lock_irqsave); \
246 \
247 void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
248 { \
249 _##op##_lock_irqsave(lock); \
250 } \
251 \
252 EXPORT_SYMBOL(_##op##_lock_irq); \
253 \
254 void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
255 { \
256 unsigned long flags; \
257 \
258 /* */ \
259 /* Careful: we must exclude softirqs too, hence the */ \
260 /* irq-disabling. We use the generic preemption-aware */ \
261 /* function: */ \
262 /**/ \
263 flags = _##op##_lock_irqsave(lock); \
264 local_bh_disable(); \
265 local_irq_restore(flags); \
266 } \
267 \
268 EXPORT_SYMBOL(_##op##_lock_bh)
269
270 /*
271 * Build preemption-friendly versions of the following
272 * lock-spinning functions:
273 *
274 * _[spin|read|write]_lock()
275 * _[spin|read|write]_lock_irq()
276 * _[spin|read|write]_lock_irqsave()
277 * _[spin|read|write]_lock_bh()
278 */
279 BUILD_LOCK_OPS(spin, spinlock);
280 BUILD_LOCK_OPS(read, rwlock);
281 BUILD_LOCK_OPS(write, rwlock);
282
283 #endif /* CONFIG_PREEMPT */
284
285 #ifdef CONFIG_DEBUG_LOCK_ALLOC
286
287 void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
288 {
289 preempt_disable();
290 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
291 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
292 }
293
294 EXPORT_SYMBOL(_spin_lock_nested);
295
296 unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
297 {
298 unsigned long flags;
299
300 local_irq_save(flags);
301 preempt_disable();
302 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
303 /*
304 * On lockdep we dont want the hand-coded irq-enable of
305 * _raw_spin_lock_flags() code, because lockdep assumes
306 * that interrupts are not re-enabled during lock-acquire:
307 */
308 #ifdef CONFIG_LOCKDEP
309 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
310 #else
311 _raw_spin_lock_flags(lock, &flags);
312 #endif
313 return flags;
314 }
315
316 EXPORT_SYMBOL(_spin_lock_irqsave_nested);
317
318 void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
319 struct lockdep_map *nest_lock)
320 {
321 preempt_disable();
322 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
323 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
324 }
325
326 EXPORT_SYMBOL(_spin_lock_nest_lock);
327
328 #endif
329
330 void __lockfunc _spin_unlock(spinlock_t *lock)
331 {
332 spin_release(&lock->dep_map, 1, _RET_IP_);
333 _raw_spin_unlock(lock);
334 preempt_enable();
335 }
336 EXPORT_SYMBOL(_spin_unlock);
337
338 void __lockfunc _write_unlock(rwlock_t *lock)
339 {
340 rwlock_release(&lock->dep_map, 1, _RET_IP_);
341 _raw_write_unlock(lock);
342 preempt_enable();
343 }
344 EXPORT_SYMBOL(_write_unlock);
345
346 void __lockfunc _read_unlock(rwlock_t *lock)
347 {
348 rwlock_release(&lock->dep_map, 1, _RET_IP_);
349 _raw_read_unlock(lock);
350 preempt_enable();
351 }
352 EXPORT_SYMBOL(_read_unlock);
353
354 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
355 {
356 spin_release(&lock->dep_map, 1, _RET_IP_);
357 _raw_spin_unlock(lock);
358 local_irq_restore(flags);
359 preempt_enable();
360 }
361 EXPORT_SYMBOL(_spin_unlock_irqrestore);
362
363 void __lockfunc _spin_unlock_irq(spinlock_t *lock)
364 {
365 spin_release(&lock->dep_map, 1, _RET_IP_);
366 _raw_spin_unlock(lock);
367 local_irq_enable();
368 preempt_enable();
369 }
370 EXPORT_SYMBOL(_spin_unlock_irq);
371
372 void __lockfunc _spin_unlock_bh(spinlock_t *lock)
373 {
374 spin_release(&lock->dep_map, 1, _RET_IP_);
375 _raw_spin_unlock(lock);
376 preempt_enable_no_resched();
377 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
378 }
379 EXPORT_SYMBOL(_spin_unlock_bh);
380
381 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
382 {
383 rwlock_release(&lock->dep_map, 1, _RET_IP_);
384 _raw_read_unlock(lock);
385 local_irq_restore(flags);
386 preempt_enable();
387 }
388 EXPORT_SYMBOL(_read_unlock_irqrestore);
389
390 void __lockfunc _read_unlock_irq(rwlock_t *lock)
391 {
392 rwlock_release(&lock->dep_map, 1, _RET_IP_);
393 _raw_read_unlock(lock);
394 local_irq_enable();
395 preempt_enable();
396 }
397 EXPORT_SYMBOL(_read_unlock_irq);
398
399 void __lockfunc _read_unlock_bh(rwlock_t *lock)
400 {
401 rwlock_release(&lock->dep_map, 1, _RET_IP_);
402 _raw_read_unlock(lock);
403 preempt_enable_no_resched();
404 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
405 }
406 EXPORT_SYMBOL(_read_unlock_bh);
407
408 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
409 {
410 rwlock_release(&lock->dep_map, 1, _RET_IP_);
411 _raw_write_unlock(lock);
412 local_irq_restore(flags);
413 preempt_enable();
414 }
415 EXPORT_SYMBOL(_write_unlock_irqrestore);
416
417 void __lockfunc _write_unlock_irq(rwlock_t *lock)
418 {
419 rwlock_release(&lock->dep_map, 1, _RET_IP_);
420 _raw_write_unlock(lock);
421 local_irq_enable();
422 preempt_enable();
423 }
424 EXPORT_SYMBOL(_write_unlock_irq);
425
426 void __lockfunc _write_unlock_bh(rwlock_t *lock)
427 {
428 rwlock_release(&lock->dep_map, 1, _RET_IP_);
429 _raw_write_unlock(lock);
430 preempt_enable_no_resched();
431 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
432 }
433 EXPORT_SYMBOL(_write_unlock_bh);
434
435 int __lockfunc _spin_trylock_bh(spinlock_t *lock)
436 {
437 local_bh_disable();
438 preempt_disable();
439 if (_raw_spin_trylock(lock)) {
440 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
441 return 1;
442 }
443
444 preempt_enable_no_resched();
445 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
446 return 0;
447 }
448 EXPORT_SYMBOL(_spin_trylock_bh);
449
450 notrace int in_lock_functions(unsigned long addr)
451 {
452 /* Linker adds these: start and end of __lockfunc functions */
453 extern char __lock_text_start[], __lock_text_end[];
454
455 return addr >= (unsigned long)__lock_text_start
456 && addr < (unsigned long)__lock_text_end;
457 }
458 EXPORT_SYMBOL(in_lock_functions);