Commit | Line | Data |
---|---|---|
6053ee3b IM |
1 | /* |
2 | * kernel/mutex.c | |
3 | * | |
4 | * Mutexes: blocking mutual exclusion locks | |
5 | * | |
6 | * Started by Ingo Molnar: | |
7 | * | |
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
9 | * | |
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | |
11 | * David Howells for suggestions and improvements. | |
12 | * | |
0d66bf6d PZ |
13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
14 | * from the -rt tree, where it was originally implemented for rtmutexes | |
15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | |
16 | * and Sven Dietrich. | |
17 | * | |
6053ee3b IM |
18 | * Also see Documentation/mutex-design.txt. |
19 | */ | |
20 | #include <linux/mutex.h> | |
21 | #include <linux/sched.h> | |
8bd75c77 | 22 | #include <linux/sched/rt.h> |
9984de1a | 23 | #include <linux/export.h> |
6053ee3b IM |
24 | #include <linux/spinlock.h> |
25 | #include <linux/interrupt.h> | |
9a11b49a | 26 | #include <linux/debug_locks.h> |
6053ee3b IM |
27 | |
28 | /* | |
29 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | |
30 | * which forces all calls into the slowpath: | |
31 | */ | |
32 | #ifdef CONFIG_DEBUG_MUTEXES | |
33 | # include "mutex-debug.h" | |
34 | # include <asm-generic/mutex-null.h> | |
6fa3eb70 S |
35 | |
36 | # ifndef CONFIG_LOCKDEP | |
37 | # define CREATE_TRACE_POINTS | |
38 | # endif | |
39 | # include <trace/events/lock.h> | |
40 | ||
6053ee3b IM |
41 | #else |
42 | # include "mutex.h" | |
43 | # include <asm/mutex.h> | |
44 | #endif | |
45 | ||
0dc8c730 | 46 | /* |
cc189d25 WL |
47 | * A negative mutex count indicates that waiters are sleeping waiting for the |
48 | * mutex. | |
0dc8c730 | 49 | */ |
0dc8c730 | 50 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
0dc8c730 | 51 | |
ef5d4707 IM |
52 | void |
53 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |
6053ee3b IM |
54 | { |
55 | atomic_set(&lock->count, 1); | |
56 | spin_lock_init(&lock->wait_lock); | |
57 | INIT_LIST_HEAD(&lock->wait_list); | |
0d66bf6d | 58 | mutex_clear_owner(lock); |
2bd2c92c WL |
59 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
60 | lock->spin_mlock = NULL; | |
61 | #endif | |
6053ee3b | 62 | |
ef5d4707 | 63 | debug_mutex_init(lock, name, key); |
6053ee3b IM |
64 | } |
65 | ||
66 | EXPORT_SYMBOL(__mutex_init); | |
67 | ||
e4564f79 | 68 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
69 | /* |
70 | * We split the mutex lock/unlock logic into separate fastpath and | |
71 | * slowpath functions, to reduce the register pressure on the fastpath. | |
72 | * We also put the fastpath first in the kernel image, to make sure the | |
73 | * branch is predicted by the CPU as default-untaken. | |
74 | */ | |
7918baa5 | 75 | static __used noinline void __sched |
9a11b49a | 76 | __mutex_lock_slowpath(atomic_t *lock_count); |
6053ee3b | 77 | |
ef5dc121 | 78 | /** |
6053ee3b IM |
79 | * mutex_lock - acquire the mutex |
80 | * @lock: the mutex to be acquired | |
81 | * | |
82 | * Lock the mutex exclusively for this task. If the mutex is not | |
83 | * available right now, it will sleep until it can get it. | |
84 | * | |
85 | * The mutex must later on be released by the same task that | |
86 | * acquired it. Recursive locking is not allowed. The task | |
87 | * may not exit without first unlocking the mutex. Also, kernel | |
88 | * memory where the mutex resides mutex must not be freed with | |
89 | * the mutex still locked. The mutex must first be initialized | |
90 | * (or statically defined) before it can be locked. memset()-ing | |
91 | * the mutex to 0 is not allowed. | |
92 | * | |
93 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | |
94 | * checks that will enforce the restrictions and will also do | |
95 | * deadlock debugging. ) | |
96 | * | |
97 | * This function is similar to (but not equivalent to) down(). | |
98 | */ | |
b09d2501 | 99 | void __sched mutex_lock(struct mutex *lock) |
6053ee3b | 100 | { |
c544bdb1 | 101 | might_sleep(); |
6053ee3b IM |
102 | /* |
103 | * The locking fastpath is the 1->0 transition from | |
104 | * 'unlocked' into 'locked' state. | |
6053ee3b IM |
105 | */ |
106 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | |
0d66bf6d | 107 | mutex_set_owner(lock); |
6053ee3b IM |
108 | } |
109 | ||
110 | EXPORT_SYMBOL(mutex_lock); | |
e4564f79 | 111 | #endif |
6053ee3b | 112 | |
41fcb9f2 | 113 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
2bd2c92c WL |
114 | /* |
115 | * In order to avoid a stampede of mutex spinners from acquiring the mutex | |
116 | * more or less simultaneously, the spinners need to acquire a MCS lock | |
117 | * first before spinning on the owner field. | |
118 | * | |
119 | * We don't inline mspin_lock() so that perf can correctly account for the | |
120 | * time spent in this lock function. | |
121 | */ | |
122 | struct mspin_node { | |
123 | struct mspin_node *next ; | |
124 | int locked; /* 1 if lock acquired */ | |
125 | }; | |
126 | #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock)) | |
127 | ||
128 | static noinline | |
129 | void mspin_lock(struct mspin_node **lock, struct mspin_node *node) | |
130 | { | |
131 | struct mspin_node *prev; | |
132 | ||
133 | /* Init node */ | |
134 | node->locked = 0; | |
135 | node->next = NULL; | |
136 | ||
137 | prev = xchg(lock, node); | |
138 | if (likely(prev == NULL)) { | |
139 | /* Lock acquired */ | |
140 | node->locked = 1; | |
141 | return; | |
142 | } | |
143 | ACCESS_ONCE(prev->next) = node; | |
144 | smp_wmb(); | |
145 | /* Wait until the lock holder passes the lock down */ | |
146 | while (!ACCESS_ONCE(node->locked)) | |
147 | arch_mutex_cpu_relax(); | |
148 | } | |
149 | ||
150 | static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) | |
151 | { | |
152 | struct mspin_node *next = ACCESS_ONCE(node->next); | |
153 | ||
154 | if (likely(!next)) { | |
155 | /* | |
156 | * Release the lock by setting it to NULL | |
157 | */ | |
158 | if (cmpxchg(lock, node, NULL) == node) | |
159 | return; | |
160 | /* Wait until the next pointer is set */ | |
161 | while (!(next = ACCESS_ONCE(node->next))) | |
162 | arch_mutex_cpu_relax(); | |
163 | } | |
164 | ACCESS_ONCE(next->locked) = 1; | |
165 | smp_wmb(); | |
166 | } | |
167 | ||
41fcb9f2 WL |
168 | /* |
169 | * Mutex spinning code migrated from kernel/sched/core.c | |
170 | */ | |
171 | ||
172 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | |
173 | { | |
174 | if (lock->owner != owner) | |
175 | return false; | |
176 | ||
177 | /* | |
178 | * Ensure we emit the owner->on_cpu, dereference _after_ checking | |
179 | * lock->owner still matches owner, if that fails, owner might | |
180 | * point to free()d memory, if it still matches, the rcu_read_lock() | |
181 | * ensures the memory stays valid. | |
182 | */ | |
183 | barrier(); | |
184 | ||
185 | return owner->on_cpu; | |
186 | } | |
187 | ||
188 | /* | |
189 | * Look out! "owner" is an entirely speculative pointer | |
190 | * access and not reliable. | |
191 | */ | |
192 | static noinline | |
193 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) | |
194 | { | |
195 | rcu_read_lock(); | |
196 | while (owner_running(lock, owner)) { | |
197 | if (need_resched()) | |
198 | break; | |
199 | ||
200 | arch_mutex_cpu_relax(); | |
201 | } | |
202 | rcu_read_unlock(); | |
203 | ||
204 | /* | |
205 | * We break out the loop above on need_resched() and when the | |
206 | * owner changed, which is a sign for heavy contention. Return | |
207 | * success only when lock->owner is NULL. | |
208 | */ | |
209 | return lock->owner == NULL; | |
210 | } | |
2bd2c92c WL |
211 | |
212 | /* | |
213 | * Initial check for entering the mutex spinning loop | |
214 | */ | |
215 | static inline int mutex_can_spin_on_owner(struct mutex *lock) | |
216 | { | |
217 | int retval = 1; | |
218 | ||
219 | rcu_read_lock(); | |
220 | if (lock->owner) | |
221 | retval = lock->owner->on_cpu; | |
222 | rcu_read_unlock(); | |
223 | /* | |
224 | * if lock->owner is not set, the mutex owner may have just acquired | |
225 | * it and not set the owner yet or the mutex has been released. | |
226 | */ | |
227 | return retval; | |
228 | } | |
41fcb9f2 WL |
229 | #endif |
230 | ||
7918baa5 | 231 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
6053ee3b | 232 | |
ef5dc121 | 233 | /** |
6053ee3b IM |
234 | * mutex_unlock - release the mutex |
235 | * @lock: the mutex to be released | |
236 | * | |
237 | * Unlock a mutex that has been locked by this task previously. | |
238 | * | |
239 | * This function must not be used in interrupt context. Unlocking | |
240 | * of a not locked mutex is not allowed. | |
241 | * | |
242 | * This function is similar to (but not equivalent to) up(). | |
243 | */ | |
7ad5b3a5 | 244 | void __sched mutex_unlock(struct mutex *lock) |
6053ee3b IM |
245 | { |
246 | /* | |
247 | * The unlocking fastpath is the 0->1 transition from 'locked' | |
248 | * into 'unlocked' state: | |
6053ee3b | 249 | */ |
0d66bf6d PZ |
250 | #ifndef CONFIG_DEBUG_MUTEXES |
251 | /* | |
252 | * When debugging is enabled we must not clear the owner before time, | |
253 | * the slow path will always be taken, and that clears the owner field | |
254 | * after verifying that it was indeed current. | |
255 | */ | |
256 | mutex_clear_owner(lock); | |
257 | #endif | |
6053ee3b IM |
258 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
259 | } | |
260 | ||
261 | EXPORT_SYMBOL(mutex_unlock); | |
262 | ||
263 | /* | |
264 | * Lock a mutex (possibly interruptible), slowpath: | |
265 | */ | |
266 | static inline int __sched | |
e4564f79 | 267 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
e4c70a66 | 268 | struct lockdep_map *nest_lock, unsigned long ip) |
6053ee3b IM |
269 | { |
270 | struct task_struct *task = current; | |
271 | struct mutex_waiter waiter; | |
1fb00c6c | 272 | unsigned long flags; |
6fa3eb70 S |
273 | #ifdef CONFIG_DEBUG_MUTEXES |
274 | unsigned char __mutex_contended = 0; | |
275 | #endif | |
6053ee3b | 276 | |
41719b03 | 277 | preempt_disable(); |
e4c70a66 | 278 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
c0226027 FW |
279 | |
280 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | |
0d66bf6d PZ |
281 | /* |
282 | * Optimistic spinning. | |
283 | * | |
284 | * We try to spin for acquisition when we find that there are no | |
285 | * pending waiters and the lock owner is currently running on a | |
286 | * (different) CPU. | |
287 | * | |
288 | * The rationale is that if the lock owner is running, it is likely to | |
289 | * release the lock soon. | |
290 | * | |
291 | * Since this needs the lock owner, and this mutex implementation | |
292 | * doesn't track the owner atomically in the lock field, we need to | |
293 | * track it non-atomically. | |
294 | * | |
295 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock | |
296 | * to serialize everything. | |
2bd2c92c WL |
297 | * |
298 | * The mutex spinners are queued up using MCS lock so that only one | |
299 | * spinner can compete for the mutex. However, if mutex spinning isn't | |
300 | * going to happen, there is no point in going through the lock/unlock | |
301 | * overhead. | |
0d66bf6d | 302 | */ |
2bd2c92c WL |
303 | if (!mutex_can_spin_on_owner(lock)) |
304 | goto slowpath; | |
0d66bf6d PZ |
305 | |
306 | for (;;) { | |
c6eb3dda | 307 | struct task_struct *owner; |
2bd2c92c | 308 | struct mspin_node node; |
0d66bf6d | 309 | |
0d66bf6d PZ |
310 | /* |
311 | * If there's an owner, wait for it to either | |
312 | * release the lock or go to sleep. | |
313 | */ | |
2bd2c92c | 314 | mspin_lock(MLOCK(lock), &node); |
0d66bf6d | 315 | owner = ACCESS_ONCE(lock->owner); |
2bd2c92c WL |
316 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
317 | mspin_unlock(MLOCK(lock), &node); | |
0d66bf6d | 318 | break; |
2bd2c92c | 319 | } |
0d66bf6d | 320 | |
0dc8c730 WL |
321 | if ((atomic_read(&lock->count) == 1) && |
322 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { | |
ac6e60ee CM |
323 | lock_acquired(&lock->dep_map, ip); |
324 | mutex_set_owner(lock); | |
2bd2c92c | 325 | mspin_unlock(MLOCK(lock), &node); |
ac6e60ee CM |
326 | preempt_enable(); |
327 | return 0; | |
328 | } | |
2bd2c92c | 329 | mspin_unlock(MLOCK(lock), &node); |
ac6e60ee | 330 | |
0d66bf6d PZ |
331 | /* |
332 | * When there's no owner, we might have preempted between the | |
333 | * owner acquiring the lock and setting the owner field. If | |
334 | * we're an RT task that will live-lock because we won't let | |
335 | * the owner complete. | |
336 | */ | |
337 | if (!owner && (need_resched() || rt_task(task))) | |
338 | break; | |
339 | ||
0d66bf6d PZ |
340 | /* |
341 | * The cpu_relax() call is a compiler barrier which forces | |
342 | * everything in this loop to be re-loaded. We don't need | |
343 | * memory barriers as we'll eventually observe the right | |
344 | * values at the cost of a few extra spins. | |
345 | */ | |
335d7afb | 346 | arch_mutex_cpu_relax(); |
0d66bf6d | 347 | } |
2bd2c92c | 348 | slowpath: |
0d66bf6d | 349 | #endif |
1fb00c6c | 350 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b | 351 | |
9a11b49a | 352 | debug_mutex_lock_common(lock, &waiter); |
c9f4f06d | 353 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
6053ee3b IM |
354 | |
355 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | |
356 | list_add_tail(&waiter.list, &lock->wait_list); | |
357 | waiter.task = task; | |
358 | ||
0dc8c730 | 359 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) |
4fe87745 PZ |
360 | goto done; |
361 | ||
e4564f79 | 362 | lock_contended(&lock->dep_map, ip); |
6fa3eb70 S |
363 | #ifdef CONFIG_DEBUG_MUTEXES |
364 | trace_mutex_contended(lock, ip); | |
365 | __mutex_contended = 1; // to pair mutex_contended & mutex_acquired | |
366 | #endif | |
4fe87745 | 367 | |
6053ee3b IM |
368 | for (;;) { |
369 | /* | |
370 | * Lets try to take the lock again - this is needed even if | |
371 | * we get here for the first time (shortly after failing to | |
372 | * acquire the lock), to make sure that we get a wakeup once | |
373 | * it's unlocked. Later on, if we sleep, this is the | |
374 | * operation that gives us the lock. We xchg it to -1, so | |
375 | * that when we release the lock, we properly wake up the | |
376 | * other waiters: | |
377 | */ | |
0dc8c730 WL |
378 | if (MUTEX_SHOW_NO_WAITER(lock) && |
379 | (atomic_xchg(&lock->count, -1) == 1)) | |
6053ee3b IM |
380 | break; |
381 | ||
382 | /* | |
383 | * got a signal? (This code gets eliminated in the | |
384 | * TASK_UNINTERRUPTIBLE case.) | |
385 | */ | |
6ad36762 | 386 | if (unlikely(signal_pending_state(state, task))) { |
ad776537 LH |
387 | mutex_remove_waiter(lock, &waiter, |
388 | task_thread_info(task)); | |
e4564f79 | 389 | mutex_release(&lock->dep_map, 1, ip); |
1fb00c6c | 390 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
391 | |
392 | debug_mutex_free_waiter(&waiter); | |
41719b03 | 393 | preempt_enable(); |
6053ee3b IM |
394 | return -EINTR; |
395 | } | |
396 | __set_task_state(task, state); | |
397 | ||
25985edc | 398 | /* didn't get the lock, go to sleep: */ |
1fb00c6c | 399 | spin_unlock_mutex(&lock->wait_lock, flags); |
bd2f5536 | 400 | schedule_preempt_disabled(); |
1fb00c6c | 401 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
402 | } |
403 | ||
4fe87745 | 404 | done: |
6fa3eb70 S |
405 | #ifdef CONFIG_DEBUG_MUTEXES |
406 | if(unlikely(__mutex_contended > 0)) | |
407 | trace_mutex_acquired(lock, ip); | |
408 | #endif | |
c7e78cff | 409 | lock_acquired(&lock->dep_map, ip); |
6053ee3b | 410 | /* got the lock - rejoice! */ |
0d66bf6d PZ |
411 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
412 | mutex_set_owner(lock); | |
6053ee3b IM |
413 | |
414 | /* set it to 0 if there are no waiters left: */ | |
415 | if (likely(list_empty(&lock->wait_list))) | |
416 | atomic_set(&lock->count, 0); | |
417 | ||
1fb00c6c | 418 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
419 | |
420 | debug_mutex_free_waiter(&waiter); | |
41719b03 | 421 | preempt_enable(); |
6053ee3b | 422 | |
6053ee3b IM |
423 | return 0; |
424 | } | |
425 | ||
ef5d4707 IM |
426 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
427 | void __sched | |
428 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | |
429 | { | |
430 | might_sleep(); | |
e4c70a66 | 431 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
ef5d4707 IM |
432 | } |
433 | ||
434 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | |
d63a5a74 | 435 | |
e4c70a66 PZ |
436 | void __sched |
437 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) | |
438 | { | |
439 | might_sleep(); | |
440 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); | |
441 | } | |
442 | ||
443 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); | |
444 | ||
ad776537 LH |
445 | int __sched |
446 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | |
447 | { | |
448 | might_sleep(); | |
e4c70a66 | 449 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
ad776537 LH |
450 | } |
451 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | |
452 | ||
d63a5a74 N |
453 | int __sched |
454 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |
455 | { | |
456 | might_sleep(); | |
0d66bf6d | 457 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
e4c70a66 | 458 | subclass, NULL, _RET_IP_); |
d63a5a74 N |
459 | } |
460 | ||
461 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | |
ef5d4707 IM |
462 | #endif |
463 | ||
6053ee3b IM |
464 | /* |
465 | * Release the lock, slowpath: | |
466 | */ | |
7ad5b3a5 | 467 | static inline void |
ef5d4707 | 468 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
6053ee3b | 469 | { |
02706647 | 470 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
1fb00c6c | 471 | unsigned long flags; |
6053ee3b | 472 | |
1fb00c6c | 473 | spin_lock_mutex(&lock->wait_lock, flags); |
ef5d4707 | 474 | mutex_release(&lock->dep_map, nested, _RET_IP_); |
9a11b49a | 475 | debug_mutex_unlock(lock); |
6053ee3b IM |
476 | |
477 | /* | |
478 | * some architectures leave the lock unlocked in the fastpath failure | |
479 | * case, others need to leave it locked. In the later case we have to | |
480 | * unlock it here | |
481 | */ | |
482 | if (__mutex_slowpath_needs_to_unlock()) | |
483 | atomic_set(&lock->count, 1); | |
484 | ||
6053ee3b IM |
485 | if (!list_empty(&lock->wait_list)) { |
486 | /* get the first entry from the wait-list: */ | |
487 | struct mutex_waiter *waiter = | |
488 | list_entry(lock->wait_list.next, | |
489 | struct mutex_waiter, list); | |
490 | ||
491 | debug_mutex_wake_waiter(lock, waiter); | |
492 | ||
493 | wake_up_process(waiter->task); | |
494 | } | |
495 | ||
1fb00c6c | 496 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
497 | } |
498 | ||
9a11b49a IM |
499 | /* |
500 | * Release the lock, slowpath: | |
501 | */ | |
7918baa5 | 502 | static __used noinline void |
9a11b49a IM |
503 | __mutex_unlock_slowpath(atomic_t *lock_count) |
504 | { | |
ef5d4707 | 505 | __mutex_unlock_common_slowpath(lock_count, 1); |
9a11b49a IM |
506 | } |
507 | ||
e4564f79 | 508 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
509 | /* |
510 | * Here come the less common (and hence less performance-critical) APIs: | |
511 | * mutex_lock_interruptible() and mutex_trylock(). | |
512 | */ | |
7ad5b3a5 | 513 | static noinline int __sched |
ad776537 LH |
514 | __mutex_lock_killable_slowpath(atomic_t *lock_count); |
515 | ||
7ad5b3a5 | 516 | static noinline int __sched |
9a11b49a | 517 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
6053ee3b | 518 | |
ef5dc121 RD |
519 | /** |
520 | * mutex_lock_interruptible - acquire the mutex, interruptible | |
6053ee3b IM |
521 | * @lock: the mutex to be acquired |
522 | * | |
523 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | |
524 | * been acquired or sleep until the mutex becomes available. If a | |
525 | * signal arrives while waiting for the lock then this function | |
526 | * returns -EINTR. | |
527 | * | |
528 | * This function is similar to (but not equivalent to) down_interruptible(). | |
529 | */ | |
7ad5b3a5 | 530 | int __sched mutex_lock_interruptible(struct mutex *lock) |
6053ee3b | 531 | { |
0d66bf6d PZ |
532 | int ret; |
533 | ||
c544bdb1 | 534 | might_sleep(); |
0d66bf6d | 535 | ret = __mutex_fastpath_lock_retval |
6053ee3b | 536 | (&lock->count, __mutex_lock_interruptible_slowpath); |
0d66bf6d PZ |
537 | if (!ret) |
538 | mutex_set_owner(lock); | |
539 | ||
540 | return ret; | |
6053ee3b IM |
541 | } |
542 | ||
543 | EXPORT_SYMBOL(mutex_lock_interruptible); | |
544 | ||
7ad5b3a5 | 545 | int __sched mutex_lock_killable(struct mutex *lock) |
ad776537 | 546 | { |
0d66bf6d PZ |
547 | int ret; |
548 | ||
ad776537 | 549 | might_sleep(); |
0d66bf6d | 550 | ret = __mutex_fastpath_lock_retval |
ad776537 | 551 | (&lock->count, __mutex_lock_killable_slowpath); |
0d66bf6d PZ |
552 | if (!ret) |
553 | mutex_set_owner(lock); | |
554 | ||
555 | return ret; | |
ad776537 LH |
556 | } |
557 | EXPORT_SYMBOL(mutex_lock_killable); | |
558 | ||
7918baa5 | 559 | static __used noinline void __sched |
e4564f79 PZ |
560 | __mutex_lock_slowpath(atomic_t *lock_count) |
561 | { | |
562 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
563 | ||
e4c70a66 | 564 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
e4564f79 PZ |
565 | } |
566 | ||
7ad5b3a5 | 567 | static noinline int __sched |
ad776537 LH |
568 | __mutex_lock_killable_slowpath(atomic_t *lock_count) |
569 | { | |
570 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
571 | ||
e4c70a66 | 572 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
ad776537 LH |
573 | } |
574 | ||
7ad5b3a5 | 575 | static noinline int __sched |
9a11b49a | 576 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
6053ee3b IM |
577 | { |
578 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
579 | ||
e4c70a66 | 580 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
6053ee3b | 581 | } |
e4564f79 | 582 | #endif |
6053ee3b IM |
583 | |
584 | /* | |
585 | * Spinlock based trylock, we take the spinlock and check whether we | |
586 | * can get the lock: | |
587 | */ | |
588 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |
589 | { | |
590 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
1fb00c6c | 591 | unsigned long flags; |
6053ee3b IM |
592 | int prev; |
593 | ||
1fb00c6c | 594 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
595 | |
596 | prev = atomic_xchg(&lock->count, -1); | |
ef5d4707 | 597 | if (likely(prev == 1)) { |
0d66bf6d | 598 | mutex_set_owner(lock); |
ef5d4707 IM |
599 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
600 | } | |
0d66bf6d | 601 | |
6053ee3b IM |
602 | /* Set it back to 0 if there are no waiters: */ |
603 | if (likely(list_empty(&lock->wait_list))) | |
604 | atomic_set(&lock->count, 0); | |
605 | ||
1fb00c6c | 606 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
607 | |
608 | return prev == 1; | |
609 | } | |
610 | ||
ef5dc121 RD |
611 | /** |
612 | * mutex_trylock - try to acquire the mutex, without waiting | |
6053ee3b IM |
613 | * @lock: the mutex to be acquired |
614 | * | |
615 | * Try to acquire the mutex atomically. Returns 1 if the mutex | |
616 | * has been acquired successfully, and 0 on contention. | |
617 | * | |
618 | * NOTE: this function follows the spin_trylock() convention, so | |
ef5dc121 | 619 | * it is negated from the down_trylock() return values! Be careful |
6053ee3b IM |
620 | * about this when converting semaphore users to mutexes. |
621 | * | |
622 | * This function must not be used in interrupt context. The | |
623 | * mutex must be released by the same task that acquired it. | |
624 | */ | |
7ad5b3a5 | 625 | int __sched mutex_trylock(struct mutex *lock) |
6053ee3b | 626 | { |
0d66bf6d PZ |
627 | int ret; |
628 | ||
629 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); | |
630 | if (ret) | |
631 | mutex_set_owner(lock); | |
632 | ||
633 | return ret; | |
6053ee3b | 634 | } |
6053ee3b | 635 | EXPORT_SYMBOL(mutex_trylock); |
a511e3f9 AM |
636 | |
637 | /** | |
638 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 | |
639 | * @cnt: the atomic which we are to dec | |
640 | * @lock: the mutex to return holding if we dec to 0 | |
641 | * | |
642 | * return true and hold lock if we dec to 0, return false otherwise | |
643 | */ | |
644 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | |
645 | { | |
646 | /* dec if we can't possibly hit 0 */ | |
647 | if (atomic_add_unless(cnt, -1, 1)) | |
648 | return 0; | |
649 | /* we might hit 0, so take the lock */ | |
650 | mutex_lock(lock); | |
651 | if (!atomic_dec_and_test(cnt)) { | |
652 | /* when we actually did the dec, we didn't hit 0 */ | |
653 | mutex_unlock(lock); | |
654 | return 0; | |
655 | } | |
656 | /* we hit 0, and we hold the lock */ | |
657 | return 1; | |
658 | } | |
659 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |