locking: Move spinlock function bodies to header file
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / spinlock.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (2004) Linus Torvalds
3 *
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
5 *
fb1c8f93
IM
6 * Copyright (2004, 2005) Ingo Molnar
7 *
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
0cb91a22
AK
10 *
11 * Note that some architectures have special knowledge about the
12 * stack frames of these functions in their profile_pc. If you
13 * change anything significant here that could change the stack
14 * frame contact the architecture maintainers.
1da177e4
LT
15 */
16
1da177e4
LT
17#include <linux/linkage.h>
18#include <linux/preempt.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
8a25d5de 21#include <linux/debug_locks.h>
1da177e4
LT
22#include <linux/module.h>
23
1da177e4
LT
24int __lockfunc _spin_trylock(spinlock_t *lock)
25{
69d0ee73 26 return __spin_trylock(lock);
1da177e4
LT
27}
28EXPORT_SYMBOL(_spin_trylock);
29
30int __lockfunc _read_trylock(rwlock_t *lock)
31{
69d0ee73 32 return __read_trylock(lock);
1da177e4
LT
33}
34EXPORT_SYMBOL(_read_trylock);
35
36int __lockfunc _write_trylock(rwlock_t *lock)
37{
69d0ee73 38 return __write_trylock(lock);
1da177e4
LT
39}
40EXPORT_SYMBOL(_write_trylock);
41
8a25d5de
IM
42/*
43 * If lockdep is enabled then we use the non-preemption spin-ops
44 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
45 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
46 */
95c354fe 47#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
1da177e4
LT
48
49void __lockfunc _read_lock(rwlock_t *lock)
50{
69d0ee73 51 __read_lock(lock);
1da177e4
LT
52}
53EXPORT_SYMBOL(_read_lock);
54
55unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
56{
69d0ee73 57 return __spin_lock_irqsave(lock);
1da177e4
LT
58}
59EXPORT_SYMBOL(_spin_lock_irqsave);
60
61void __lockfunc _spin_lock_irq(spinlock_t *lock)
62{
69d0ee73 63 __spin_lock_irq(lock);
1da177e4
LT
64}
65EXPORT_SYMBOL(_spin_lock_irq);
66
67void __lockfunc _spin_lock_bh(spinlock_t *lock)
68{
69d0ee73 69 __spin_lock_bh(lock);
1da177e4
LT
70}
71EXPORT_SYMBOL(_spin_lock_bh);
72
73unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
74{
69d0ee73 75 return __read_lock_irqsave(lock);
1da177e4
LT
76}
77EXPORT_SYMBOL(_read_lock_irqsave);
78
79void __lockfunc _read_lock_irq(rwlock_t *lock)
80{
69d0ee73 81 __read_lock_irq(lock);
1da177e4
LT
82}
83EXPORT_SYMBOL(_read_lock_irq);
84
85void __lockfunc _read_lock_bh(rwlock_t *lock)
86{
69d0ee73 87 __read_lock_bh(lock);
1da177e4
LT
88}
89EXPORT_SYMBOL(_read_lock_bh);
90
91unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
92{
69d0ee73 93 return __write_lock_irqsave(lock);
1da177e4
LT
94}
95EXPORT_SYMBOL(_write_lock_irqsave);
96
97void __lockfunc _write_lock_irq(rwlock_t *lock)
98{
69d0ee73 99 __write_lock_irq(lock);
1da177e4
LT
100}
101EXPORT_SYMBOL(_write_lock_irq);
102
103void __lockfunc _write_lock_bh(rwlock_t *lock)
104{
69d0ee73 105 __write_lock_bh(lock);
1da177e4
LT
106}
107EXPORT_SYMBOL(_write_lock_bh);
108
109void __lockfunc _spin_lock(spinlock_t *lock)
110{
69d0ee73 111 __spin_lock(lock);
1da177e4 112}
1da177e4
LT
113EXPORT_SYMBOL(_spin_lock);
114
115void __lockfunc _write_lock(rwlock_t *lock)
116{
69d0ee73 117 __write_lock(lock);
1da177e4 118}
1da177e4
LT
119EXPORT_SYMBOL(_write_lock);
120
121#else /* CONFIG_PREEMPT: */
122
123/*
124 * This could be a long-held lock. We both prepare to spin for a long
125 * time (making _this_ CPU preemptable if possible), and we also signal
126 * towards that other CPU that it should break the lock ASAP.
127 *
128 * (We do this in a function because inlining it would be excessive.)
129 */
130
131#define BUILD_LOCK_OPS(op, locktype) \
132void __lockfunc _##op##_lock(locktype##_t *lock) \
133{ \
1da177e4 134 for (;;) { \
ee25e96f 135 preempt_disable(); \
1da177e4
LT
136 if (likely(_raw_##op##_trylock(lock))) \
137 break; \
138 preempt_enable(); \
ee25e96f 139 \
1da177e4
LT
140 if (!(lock)->break_lock) \
141 (lock)->break_lock = 1; \
142 while (!op##_can_lock(lock) && (lock)->break_lock) \
ef6edc97 143 _raw_##op##_relax(&lock->raw_lock); \
1da177e4
LT
144 } \
145 (lock)->break_lock = 0; \
146} \
147 \
148EXPORT_SYMBOL(_##op##_lock); \
149 \
150unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
151{ \
152 unsigned long flags; \
153 \
1da177e4 154 for (;;) { \
ee25e96f 155 preempt_disable(); \
1da177e4
LT
156 local_irq_save(flags); \
157 if (likely(_raw_##op##_trylock(lock))) \
158 break; \
159 local_irq_restore(flags); \
1da177e4 160 preempt_enable(); \
ee25e96f 161 \
1da177e4
LT
162 if (!(lock)->break_lock) \
163 (lock)->break_lock = 1; \
164 while (!op##_can_lock(lock) && (lock)->break_lock) \
ef6edc97 165 _raw_##op##_relax(&lock->raw_lock); \
1da177e4
LT
166 } \
167 (lock)->break_lock = 0; \
168 return flags; \
169} \
170 \
171EXPORT_SYMBOL(_##op##_lock_irqsave); \
172 \
173void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
174{ \
175 _##op##_lock_irqsave(lock); \
176} \
177 \
178EXPORT_SYMBOL(_##op##_lock_irq); \
179 \
180void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
181{ \
182 unsigned long flags; \
183 \
184 /* */ \
185 /* Careful: we must exclude softirqs too, hence the */ \
186 /* irq-disabling. We use the generic preemption-aware */ \
187 /* function: */ \
188 /**/ \
189 flags = _##op##_lock_irqsave(lock); \
190 local_bh_disable(); \
191 local_irq_restore(flags); \
192} \
193 \
194EXPORT_SYMBOL(_##op##_lock_bh)
195
196/*
197 * Build preemption-friendly versions of the following
198 * lock-spinning functions:
199 *
200 * _[spin|read|write]_lock()
201 * _[spin|read|write]_lock_irq()
202 * _[spin|read|write]_lock_irqsave()
203 * _[spin|read|write]_lock_bh()
204 */
205BUILD_LOCK_OPS(spin, spinlock);
206BUILD_LOCK_OPS(read, rwlock);
207BUILD_LOCK_OPS(write, rwlock);
208
209#endif /* CONFIG_PREEMPT */
210
8a25d5de
IM
211#ifdef CONFIG_DEBUG_LOCK_ALLOC
212
213void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
214{
215 preempt_disable();
216 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
4fe87745 217 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
8a25d5de 218}
8a25d5de 219EXPORT_SYMBOL(_spin_lock_nested);
b7d39aff 220
cfd3ef23
AV
221unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
222{
223 unsigned long flags;
224
225 local_irq_save(flags);
226 preempt_disable();
227 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
e8c158bb
RH
228 LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
229 _raw_spin_lock_flags, &flags);
cfd3ef23
AV
230 return flags;
231}
cfd3ef23 232EXPORT_SYMBOL(_spin_lock_irqsave_nested);
8a25d5de 233
b7d39aff
PZ
234void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
235 struct lockdep_map *nest_lock)
236{
237 preempt_disable();
238 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
239 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
240}
b7d39aff
PZ
241EXPORT_SYMBOL(_spin_lock_nest_lock);
242
8a25d5de
IM
243#endif
244
1da177e4
LT
245void __lockfunc _spin_unlock(spinlock_t *lock)
246{
69d0ee73 247 __spin_unlock(lock);
1da177e4
LT
248}
249EXPORT_SYMBOL(_spin_unlock);
250
251void __lockfunc _write_unlock(rwlock_t *lock)
252{
69d0ee73 253 __write_unlock(lock);
1da177e4
LT
254}
255EXPORT_SYMBOL(_write_unlock);
256
257void __lockfunc _read_unlock(rwlock_t *lock)
258{
69d0ee73 259 __read_unlock(lock);
1da177e4
LT
260}
261EXPORT_SYMBOL(_read_unlock);
262
263void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
264{
69d0ee73 265 __spin_unlock_irqrestore(lock, flags);
1da177e4
LT
266}
267EXPORT_SYMBOL(_spin_unlock_irqrestore);
268
269void __lockfunc _spin_unlock_irq(spinlock_t *lock)
270{
69d0ee73 271 __spin_unlock_irq(lock);
1da177e4
LT
272}
273EXPORT_SYMBOL(_spin_unlock_irq);
274
275void __lockfunc _spin_unlock_bh(spinlock_t *lock)
276{
69d0ee73 277 __spin_unlock_bh(lock);
1da177e4
LT
278}
279EXPORT_SYMBOL(_spin_unlock_bh);
280
281void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
282{
69d0ee73 283 __read_unlock_irqrestore(lock, flags);
1da177e4
LT
284}
285EXPORT_SYMBOL(_read_unlock_irqrestore);
286
287void __lockfunc _read_unlock_irq(rwlock_t *lock)
288{
69d0ee73 289 __read_unlock_irq(lock);
1da177e4
LT
290}
291EXPORT_SYMBOL(_read_unlock_irq);
292
293void __lockfunc _read_unlock_bh(rwlock_t *lock)
294{
69d0ee73 295 __read_unlock_bh(lock);
1da177e4
LT
296}
297EXPORT_SYMBOL(_read_unlock_bh);
298
299void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
300{
69d0ee73 301 __write_unlock_irqrestore(lock, flags);
1da177e4
LT
302}
303EXPORT_SYMBOL(_write_unlock_irqrestore);
304
305void __lockfunc _write_unlock_irq(rwlock_t *lock)
306{
69d0ee73 307 __write_unlock_irq(lock);
1da177e4
LT
308}
309EXPORT_SYMBOL(_write_unlock_irq);
310
311void __lockfunc _write_unlock_bh(rwlock_t *lock)
312{
69d0ee73 313 __write_unlock_bh(lock);
1da177e4
LT
314}
315EXPORT_SYMBOL(_write_unlock_bh);
316
317int __lockfunc _spin_trylock_bh(spinlock_t *lock)
318{
69d0ee73 319 return __spin_trylock_bh(lock);
1da177e4
LT
320}
321EXPORT_SYMBOL(_spin_trylock_bh);
322
0764d23c 323notrace int in_lock_functions(unsigned long addr)
1da177e4
LT
324{
325 /* Linker adds these: start and end of __lockfunc functions */
326 extern char __lock_text_start[], __lock_text_end[];
327
328 return addr >= (unsigned long)__lock_text_start
329 && addr < (unsigned long)__lock_text_end;
330}
331EXPORT_SYMBOL(in_lock_functions);