staging: zram: show correct disksize
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / mutex.c
CommitLineData
6053ee3b
IM
1/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
0d66bf6d
PZ
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
6053ee3b
IM
18 * Also see Documentation/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/sched.h>
9984de1a 22#include <linux/export.h>
6053ee3b
IM
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
9a11b49a 25#include <linux/debug_locks.h>
6053ee3b
IM
26
27/*
28 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
29 * which forces all calls into the slowpath:
30 */
31#ifdef CONFIG_DEBUG_MUTEXES
32# include "mutex-debug.h"
33# include <asm-generic/mutex-null.h>
34#else
35# include "mutex.h"
36# include <asm/mutex.h>
37#endif
38
ef5d4707
IM
39void
40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b
IM
41{
42 atomic_set(&lock->count, 1);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
0d66bf6d 45 mutex_clear_owner(lock);
6053ee3b 46
ef5d4707 47 debug_mutex_init(lock, name, key);
6053ee3b
IM
48}
49
50EXPORT_SYMBOL(__mutex_init);
51
e4564f79 52#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
53/*
54 * We split the mutex lock/unlock logic into separate fastpath and
55 * slowpath functions, to reduce the register pressure on the fastpath.
56 * We also put the fastpath first in the kernel image, to make sure the
57 * branch is predicted by the CPU as default-untaken.
58 */
7918baa5 59static __used noinline void __sched
9a11b49a 60__mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b 61
ef5dc121 62/**
6053ee3b
IM
63 * mutex_lock - acquire the mutex
64 * @lock: the mutex to be acquired
65 *
66 * Lock the mutex exclusively for this task. If the mutex is not
67 * available right now, it will sleep until it can get it.
68 *
69 * The mutex must later on be released by the same task that
70 * acquired it. Recursive locking is not allowed. The task
71 * may not exit without first unlocking the mutex. Also, kernel
72 * memory where the mutex resides mutex must not be freed with
73 * the mutex still locked. The mutex must first be initialized
74 * (or statically defined) before it can be locked. memset()-ing
75 * the mutex to 0 is not allowed.
76 *
77 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
78 * checks that will enforce the restrictions and will also do
79 * deadlock debugging. )
80 *
81 * This function is similar to (but not equivalent to) down().
82 */
b09d2501 83void __sched mutex_lock(struct mutex *lock)
6053ee3b 84{
c544bdb1 85 might_sleep();
6053ee3b
IM
86 /*
87 * The locking fastpath is the 1->0 transition from
88 * 'unlocked' into 'locked' state.
6053ee3b
IM
89 */
90 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
0d66bf6d 91 mutex_set_owner(lock);
6053ee3b
IM
92}
93
94EXPORT_SYMBOL(mutex_lock);
e4564f79 95#endif
6053ee3b 96
7918baa5 97static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b 98
ef5dc121 99/**
6053ee3b
IM
100 * mutex_unlock - release the mutex
101 * @lock: the mutex to be released
102 *
103 * Unlock a mutex that has been locked by this task previously.
104 *
105 * This function must not be used in interrupt context. Unlocking
106 * of a not locked mutex is not allowed.
107 *
108 * This function is similar to (but not equivalent to) up().
109 */
7ad5b3a5 110void __sched mutex_unlock(struct mutex *lock)
6053ee3b
IM
111{
112 /*
113 * The unlocking fastpath is the 0->1 transition from 'locked'
114 * into 'unlocked' state:
6053ee3b 115 */
0d66bf6d
PZ
116#ifndef CONFIG_DEBUG_MUTEXES
117 /*
118 * When debugging is enabled we must not clear the owner before time,
119 * the slow path will always be taken, and that clears the owner field
120 * after verifying that it was indeed current.
121 */
122 mutex_clear_owner(lock);
123#endif
6053ee3b
IM
124 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
125}
126
127EXPORT_SYMBOL(mutex_unlock);
128
129/*
130 * Lock a mutex (possibly interruptible), slowpath:
131 */
132static inline int __sched
e4564f79 133__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
e4c70a66 134 struct lockdep_map *nest_lock, unsigned long ip)
6053ee3b
IM
135{
136 struct task_struct *task = current;
137 struct mutex_waiter waiter;
1fb00c6c 138 unsigned long flags;
6053ee3b 139
41719b03 140 preempt_disable();
e4c70a66 141 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
c0226027
FW
142
143#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d
PZ
144 /*
145 * Optimistic spinning.
146 *
147 * We try to spin for acquisition when we find that there are no
148 * pending waiters and the lock owner is currently running on a
149 * (different) CPU.
150 *
151 * The rationale is that if the lock owner is running, it is likely to
152 * release the lock soon.
153 *
154 * Since this needs the lock owner, and this mutex implementation
155 * doesn't track the owner atomically in the lock field, we need to
156 * track it non-atomically.
157 *
158 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
159 * to serialize everything.
160 */
161
162 for (;;) {
c6eb3dda 163 struct task_struct *owner;
0d66bf6d 164
0d66bf6d
PZ
165 /*
166 * If there's an owner, wait for it to either
167 * release the lock or go to sleep.
168 */
169 owner = ACCESS_ONCE(lock->owner);
170 if (owner && !mutex_spin_on_owner(lock, owner))
171 break;
172
ac6e60ee
CM
173 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
174 lock_acquired(&lock->dep_map, ip);
175 mutex_set_owner(lock);
176 preempt_enable();
177 return 0;
178 }
179
0d66bf6d
PZ
180 /*
181 * When there's no owner, we might have preempted between the
182 * owner acquiring the lock and setting the owner field. If
183 * we're an RT task that will live-lock because we won't let
184 * the owner complete.
185 */
186 if (!owner && (need_resched() || rt_task(task)))
187 break;
188
0d66bf6d
PZ
189 /*
190 * The cpu_relax() call is a compiler barrier which forces
191 * everything in this loop to be re-loaded. We don't need
192 * memory barriers as we'll eventually observe the right
193 * values at the cost of a few extra spins.
194 */
335d7afb 195 arch_mutex_cpu_relax();
0d66bf6d
PZ
196 }
197#endif
1fb00c6c 198 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b 199
9a11b49a 200 debug_mutex_lock_common(lock, &waiter);
c9f4f06d 201 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
6053ee3b
IM
202
203 /* add waiting tasks to the end of the waitqueue (FIFO): */
204 list_add_tail(&waiter.list, &lock->wait_list);
205 waiter.task = task;
206
93d81d1a 207 if (atomic_xchg(&lock->count, -1) == 1)
4fe87745
PZ
208 goto done;
209
e4564f79 210 lock_contended(&lock->dep_map, ip);
4fe87745 211
6053ee3b
IM
212 for (;;) {
213 /*
214 * Lets try to take the lock again - this is needed even if
215 * we get here for the first time (shortly after failing to
216 * acquire the lock), to make sure that we get a wakeup once
217 * it's unlocked. Later on, if we sleep, this is the
218 * operation that gives us the lock. We xchg it to -1, so
219 * that when we release the lock, we properly wake up the
220 * other waiters:
221 */
93d81d1a 222 if (atomic_xchg(&lock->count, -1) == 1)
6053ee3b
IM
223 break;
224
225 /*
226 * got a signal? (This code gets eliminated in the
227 * TASK_UNINTERRUPTIBLE case.)
228 */
6ad36762 229 if (unlikely(signal_pending_state(state, task))) {
ad776537
LH
230 mutex_remove_waiter(lock, &waiter,
231 task_thread_info(task));
e4564f79 232 mutex_release(&lock->dep_map, 1, ip);
1fb00c6c 233 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
234
235 debug_mutex_free_waiter(&waiter);
41719b03 236 preempt_enable();
6053ee3b
IM
237 return -EINTR;
238 }
239 __set_task_state(task, state);
240
25985edc 241 /* didn't get the lock, go to sleep: */
1fb00c6c 242 spin_unlock_mutex(&lock->wait_lock, flags);
bd2f5536 243 schedule_preempt_disabled();
1fb00c6c 244 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
245 }
246
4fe87745 247done:
c7e78cff 248 lock_acquired(&lock->dep_map, ip);
6053ee3b 249 /* got the lock - rejoice! */
0d66bf6d
PZ
250 mutex_remove_waiter(lock, &waiter, current_thread_info());
251 mutex_set_owner(lock);
6053ee3b
IM
252
253 /* set it to 0 if there are no waiters left: */
254 if (likely(list_empty(&lock->wait_list)))
255 atomic_set(&lock->count, 0);
256
1fb00c6c 257 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
258
259 debug_mutex_free_waiter(&waiter);
41719b03 260 preempt_enable();
6053ee3b 261
6053ee3b
IM
262 return 0;
263}
264
ef5d4707
IM
265#ifdef CONFIG_DEBUG_LOCK_ALLOC
266void __sched
267mutex_lock_nested(struct mutex *lock, unsigned int subclass)
268{
269 might_sleep();
e4c70a66 270 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
ef5d4707
IM
271}
272
273EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74 274
e4c70a66
PZ
275void __sched
276_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
277{
278 might_sleep();
279 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
280}
281
282EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
283
ad776537
LH
284int __sched
285mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
286{
287 might_sleep();
e4c70a66 288 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
ad776537
LH
289}
290EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
291
d63a5a74
N
292int __sched
293mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
294{
295 might_sleep();
0d66bf6d 296 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
e4c70a66 297 subclass, NULL, _RET_IP_);
d63a5a74
N
298}
299
300EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
ef5d4707
IM
301#endif
302
6053ee3b
IM
303/*
304 * Release the lock, slowpath:
305 */
7ad5b3a5 306static inline void
ef5d4707 307__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
6053ee3b 308{
02706647 309 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 310 unsigned long flags;
6053ee3b 311
1fb00c6c 312 spin_lock_mutex(&lock->wait_lock, flags);
ef5d4707 313 mutex_release(&lock->dep_map, nested, _RET_IP_);
9a11b49a 314 debug_mutex_unlock(lock);
6053ee3b
IM
315
316 /*
317 * some architectures leave the lock unlocked in the fastpath failure
318 * case, others need to leave it locked. In the later case we have to
319 * unlock it here
320 */
321 if (__mutex_slowpath_needs_to_unlock())
322 atomic_set(&lock->count, 1);
323
6053ee3b
IM
324 if (!list_empty(&lock->wait_list)) {
325 /* get the first entry from the wait-list: */
326 struct mutex_waiter *waiter =
327 list_entry(lock->wait_list.next,
328 struct mutex_waiter, list);
329
330 debug_mutex_wake_waiter(lock, waiter);
331
332 wake_up_process(waiter->task);
333 }
334
1fb00c6c 335 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
336}
337
9a11b49a
IM
338/*
339 * Release the lock, slowpath:
340 */
7918baa5 341static __used noinline void
9a11b49a
IM
342__mutex_unlock_slowpath(atomic_t *lock_count)
343{
ef5d4707 344 __mutex_unlock_common_slowpath(lock_count, 1);
9a11b49a
IM
345}
346
e4564f79 347#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
348/*
349 * Here come the less common (and hence less performance-critical) APIs:
350 * mutex_lock_interruptible() and mutex_trylock().
351 */
7ad5b3a5 352static noinline int __sched
ad776537
LH
353__mutex_lock_killable_slowpath(atomic_t *lock_count);
354
7ad5b3a5 355static noinline int __sched
9a11b49a 356__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
6053ee3b 357
ef5dc121
RD
358/**
359 * mutex_lock_interruptible - acquire the mutex, interruptible
6053ee3b
IM
360 * @lock: the mutex to be acquired
361 *
362 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
363 * been acquired or sleep until the mutex becomes available. If a
364 * signal arrives while waiting for the lock then this function
365 * returns -EINTR.
366 *
367 * This function is similar to (but not equivalent to) down_interruptible().
368 */
7ad5b3a5 369int __sched mutex_lock_interruptible(struct mutex *lock)
6053ee3b 370{
0d66bf6d
PZ
371 int ret;
372
c544bdb1 373 might_sleep();
0d66bf6d 374 ret = __mutex_fastpath_lock_retval
6053ee3b 375 (&lock->count, __mutex_lock_interruptible_slowpath);
0d66bf6d
PZ
376 if (!ret)
377 mutex_set_owner(lock);
378
379 return ret;
6053ee3b
IM
380}
381
382EXPORT_SYMBOL(mutex_lock_interruptible);
383
7ad5b3a5 384int __sched mutex_lock_killable(struct mutex *lock)
ad776537 385{
0d66bf6d
PZ
386 int ret;
387
ad776537 388 might_sleep();
0d66bf6d 389 ret = __mutex_fastpath_lock_retval
ad776537 390 (&lock->count, __mutex_lock_killable_slowpath);
0d66bf6d
PZ
391 if (!ret)
392 mutex_set_owner(lock);
393
394 return ret;
ad776537
LH
395}
396EXPORT_SYMBOL(mutex_lock_killable);
397
7918baa5 398static __used noinline void __sched
e4564f79
PZ
399__mutex_lock_slowpath(atomic_t *lock_count)
400{
401 struct mutex *lock = container_of(lock_count, struct mutex, count);
402
e4c70a66 403 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
e4564f79
PZ
404}
405
7ad5b3a5 406static noinline int __sched
ad776537
LH
407__mutex_lock_killable_slowpath(atomic_t *lock_count)
408{
409 struct mutex *lock = container_of(lock_count, struct mutex, count);
410
e4c70a66 411 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
ad776537
LH
412}
413
7ad5b3a5 414static noinline int __sched
9a11b49a 415__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
6053ee3b
IM
416{
417 struct mutex *lock = container_of(lock_count, struct mutex, count);
418
e4c70a66 419 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
6053ee3b 420}
e4564f79 421#endif
6053ee3b
IM
422
423/*
424 * Spinlock based trylock, we take the spinlock and check whether we
425 * can get the lock:
426 */
427static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
428{
429 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 430 unsigned long flags;
6053ee3b
IM
431 int prev;
432
1fb00c6c 433 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
434
435 prev = atomic_xchg(&lock->count, -1);
ef5d4707 436 if (likely(prev == 1)) {
0d66bf6d 437 mutex_set_owner(lock);
ef5d4707
IM
438 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
439 }
0d66bf6d 440
6053ee3b
IM
441 /* Set it back to 0 if there are no waiters: */
442 if (likely(list_empty(&lock->wait_list)))
443 atomic_set(&lock->count, 0);
444
1fb00c6c 445 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
446
447 return prev == 1;
448}
449
ef5dc121
RD
450/**
451 * mutex_trylock - try to acquire the mutex, without waiting
6053ee3b
IM
452 * @lock: the mutex to be acquired
453 *
454 * Try to acquire the mutex atomically. Returns 1 if the mutex
455 * has been acquired successfully, and 0 on contention.
456 *
457 * NOTE: this function follows the spin_trylock() convention, so
ef5dc121 458 * it is negated from the down_trylock() return values! Be careful
6053ee3b
IM
459 * about this when converting semaphore users to mutexes.
460 *
461 * This function must not be used in interrupt context. The
462 * mutex must be released by the same task that acquired it.
463 */
7ad5b3a5 464int __sched mutex_trylock(struct mutex *lock)
6053ee3b 465{
0d66bf6d
PZ
466 int ret;
467
468 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
469 if (ret)
470 mutex_set_owner(lock);
471
472 return ret;
6053ee3b 473}
6053ee3b 474EXPORT_SYMBOL(mutex_trylock);
a511e3f9
AM
475
476/**
477 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
478 * @cnt: the atomic which we are to dec
479 * @lock: the mutex to return holding if we dec to 0
480 *
481 * return true and hold lock if we dec to 0, return false otherwise
482 */
483int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
484{
485 /* dec if we can't possibly hit 0 */
486 if (atomic_add_unless(cnt, -1, 1))
487 return 0;
488 /* we might hit 0, so take the lock */
489 mutex_lock(lock);
490 if (!atomic_dec_and_test(cnt)) {
491 /* when we actually did the dec, we didn't hit 0 */
492 mutex_unlock(lock);
493 return 0;
494 }
495 /* we hit 0, and we hold the lock */
496 return 1;
497}
498EXPORT_SYMBOL(atomic_dec_and_mutex_lock);