| 1 | /* |
| 2 | * kernel/mutex.c |
| 3 | * |
| 4 | * Mutexes: blocking mutual exclusion locks |
| 5 | * |
| 6 | * Started by Ingo Molnar: |
| 7 | * |
| 8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 9 | * |
| 10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
| 11 | * David Howells for suggestions and improvements. |
| 12 | * |
| 13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
| 14 | * from the -rt tree, where it was originally implemented for rtmutexes |
| 15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
| 16 | * and Sven Dietrich. |
| 17 | * |
| 18 | * Also see Documentation/mutex-design.txt. |
| 19 | */ |
| 20 | #include <linux/mutex.h> |
| 21 | #include <linux/sched.h> |
| 22 | #include <linux/sched/rt.h> |
| 23 | #include <linux/export.h> |
| 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/debug_locks.h> |
| 27 | |
| 28 | /* |
| 29 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, |
| 30 | * which forces all calls into the slowpath: |
| 31 | */ |
| 32 | #ifdef CONFIG_DEBUG_MUTEXES |
| 33 | # include "mutex-debug.h" |
| 34 | # include <asm-generic/mutex-null.h> |
| 35 | |
| 36 | # ifndef CONFIG_LOCKDEP |
| 37 | # define CREATE_TRACE_POINTS |
| 38 | # endif |
| 39 | # include <trace/events/lock.h> |
| 40 | |
| 41 | #else |
| 42 | # include "mutex.h" |
| 43 | # include <asm/mutex.h> |
| 44 | #endif |
| 45 | |
| 46 | /* |
| 47 | * A negative mutex count indicates that waiters are sleeping waiting for the |
| 48 | * mutex. |
| 49 | */ |
| 50 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
| 51 | |
| 52 | void |
| 53 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
| 54 | { |
| 55 | atomic_set(&lock->count, 1); |
| 56 | spin_lock_init(&lock->wait_lock); |
| 57 | INIT_LIST_HEAD(&lock->wait_list); |
| 58 | mutex_clear_owner(lock); |
| 59 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 60 | lock->spin_mlock = NULL; |
| 61 | #endif |
| 62 | |
| 63 | debug_mutex_init(lock, name, key); |
| 64 | } |
| 65 | |
| 66 | EXPORT_SYMBOL(__mutex_init); |
| 67 | |
| 68 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 69 | /* |
| 70 | * We split the mutex lock/unlock logic into separate fastpath and |
| 71 | * slowpath functions, to reduce the register pressure on the fastpath. |
| 72 | * We also put the fastpath first in the kernel image, to make sure the |
| 73 | * branch is predicted by the CPU as default-untaken. |
| 74 | */ |
| 75 | static __used noinline void __sched |
| 76 | __mutex_lock_slowpath(atomic_t *lock_count); |
| 77 | |
| 78 | /** |
| 79 | * mutex_lock - acquire the mutex |
| 80 | * @lock: the mutex to be acquired |
| 81 | * |
| 82 | * Lock the mutex exclusively for this task. If the mutex is not |
| 83 | * available right now, it will sleep until it can get it. |
| 84 | * |
| 85 | * The mutex must later on be released by the same task that |
| 86 | * acquired it. Recursive locking is not allowed. The task |
| 87 | * may not exit without first unlocking the mutex. Also, kernel |
| 88 | * memory where the mutex resides mutex must not be freed with |
| 89 | * the mutex still locked. The mutex must first be initialized |
| 90 | * (or statically defined) before it can be locked. memset()-ing |
| 91 | * the mutex to 0 is not allowed. |
| 92 | * |
| 93 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging |
| 94 | * checks that will enforce the restrictions and will also do |
| 95 | * deadlock debugging. ) |
| 96 | * |
| 97 | * This function is similar to (but not equivalent to) down(). |
| 98 | */ |
| 99 | void __sched mutex_lock(struct mutex *lock) |
| 100 | { |
| 101 | might_sleep(); |
| 102 | /* |
| 103 | * The locking fastpath is the 1->0 transition from |
| 104 | * 'unlocked' into 'locked' state. |
| 105 | */ |
| 106 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
| 107 | mutex_set_owner(lock); |
| 108 | } |
| 109 | |
| 110 | EXPORT_SYMBOL(mutex_lock); |
| 111 | #endif |
| 112 | |
| 113 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 114 | /* |
| 115 | * In order to avoid a stampede of mutex spinners from acquiring the mutex |
| 116 | * more or less simultaneously, the spinners need to acquire a MCS lock |
| 117 | * first before spinning on the owner field. |
| 118 | * |
| 119 | * We don't inline mspin_lock() so that perf can correctly account for the |
| 120 | * time spent in this lock function. |
| 121 | */ |
| 122 | struct mspin_node { |
| 123 | struct mspin_node *next ; |
| 124 | int locked; /* 1 if lock acquired */ |
| 125 | }; |
| 126 | #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock)) |
| 127 | |
| 128 | static noinline |
| 129 | void mspin_lock(struct mspin_node **lock, struct mspin_node *node) |
| 130 | { |
| 131 | struct mspin_node *prev; |
| 132 | |
| 133 | /* Init node */ |
| 134 | node->locked = 0; |
| 135 | node->next = NULL; |
| 136 | |
| 137 | prev = xchg(lock, node); |
| 138 | if (likely(prev == NULL)) { |
| 139 | /* Lock acquired */ |
| 140 | node->locked = 1; |
| 141 | return; |
| 142 | } |
| 143 | ACCESS_ONCE(prev->next) = node; |
| 144 | smp_wmb(); |
| 145 | /* Wait until the lock holder passes the lock down */ |
| 146 | while (!ACCESS_ONCE(node->locked)) |
| 147 | arch_mutex_cpu_relax(); |
| 148 | } |
| 149 | |
| 150 | static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) |
| 151 | { |
| 152 | struct mspin_node *next = ACCESS_ONCE(node->next); |
| 153 | |
| 154 | if (likely(!next)) { |
| 155 | /* |
| 156 | * Release the lock by setting it to NULL |
| 157 | */ |
| 158 | if (cmpxchg(lock, node, NULL) == node) |
| 159 | return; |
| 160 | /* Wait until the next pointer is set */ |
| 161 | while (!(next = ACCESS_ONCE(node->next))) |
| 162 | arch_mutex_cpu_relax(); |
| 163 | } |
| 164 | ACCESS_ONCE(next->locked) = 1; |
| 165 | smp_wmb(); |
| 166 | } |
| 167 | |
| 168 | /* |
| 169 | * Mutex spinning code migrated from kernel/sched/core.c |
| 170 | */ |
| 171 | |
| 172 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
| 173 | { |
| 174 | if (lock->owner != owner) |
| 175 | return false; |
| 176 | |
| 177 | /* |
| 178 | * Ensure we emit the owner->on_cpu, dereference _after_ checking |
| 179 | * lock->owner still matches owner, if that fails, owner might |
| 180 | * point to free()d memory, if it still matches, the rcu_read_lock() |
| 181 | * ensures the memory stays valid. |
| 182 | */ |
| 183 | barrier(); |
| 184 | |
| 185 | return owner->on_cpu; |
| 186 | } |
| 187 | |
| 188 | /* |
| 189 | * Look out! "owner" is an entirely speculative pointer |
| 190 | * access and not reliable. |
| 191 | */ |
| 192 | static noinline |
| 193 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) |
| 194 | { |
| 195 | rcu_read_lock(); |
| 196 | while (owner_running(lock, owner)) { |
| 197 | if (need_resched()) |
| 198 | break; |
| 199 | |
| 200 | arch_mutex_cpu_relax(); |
| 201 | } |
| 202 | rcu_read_unlock(); |
| 203 | |
| 204 | /* |
| 205 | * We break out the loop above on need_resched() and when the |
| 206 | * owner changed, which is a sign for heavy contention. Return |
| 207 | * success only when lock->owner is NULL. |
| 208 | */ |
| 209 | return lock->owner == NULL; |
| 210 | } |
| 211 | |
| 212 | /* |
| 213 | * Initial check for entering the mutex spinning loop |
| 214 | */ |
| 215 | static inline int mutex_can_spin_on_owner(struct mutex *lock) |
| 216 | { |
| 217 | int retval = 1; |
| 218 | |
| 219 | rcu_read_lock(); |
| 220 | if (lock->owner) |
| 221 | retval = lock->owner->on_cpu; |
| 222 | rcu_read_unlock(); |
| 223 | /* |
| 224 | * if lock->owner is not set, the mutex owner may have just acquired |
| 225 | * it and not set the owner yet or the mutex has been released. |
| 226 | */ |
| 227 | return retval; |
| 228 | } |
| 229 | #endif |
| 230 | |
| 231 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
| 232 | |
| 233 | /** |
| 234 | * mutex_unlock - release the mutex |
| 235 | * @lock: the mutex to be released |
| 236 | * |
| 237 | * Unlock a mutex that has been locked by this task previously. |
| 238 | * |
| 239 | * This function must not be used in interrupt context. Unlocking |
| 240 | * of a not locked mutex is not allowed. |
| 241 | * |
| 242 | * This function is similar to (but not equivalent to) up(). |
| 243 | */ |
| 244 | void __sched mutex_unlock(struct mutex *lock) |
| 245 | { |
| 246 | /* |
| 247 | * The unlocking fastpath is the 0->1 transition from 'locked' |
| 248 | * into 'unlocked' state: |
| 249 | */ |
| 250 | #ifndef CONFIG_DEBUG_MUTEXES |
| 251 | /* |
| 252 | * When debugging is enabled we must not clear the owner before time, |
| 253 | * the slow path will always be taken, and that clears the owner field |
| 254 | * after verifying that it was indeed current. |
| 255 | */ |
| 256 | mutex_clear_owner(lock); |
| 257 | #endif |
| 258 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
| 259 | } |
| 260 | |
| 261 | EXPORT_SYMBOL(mutex_unlock); |
| 262 | |
| 263 | /* |
| 264 | * Lock a mutex (possibly interruptible), slowpath: |
| 265 | */ |
| 266 | static inline int __sched |
| 267 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
| 268 | struct lockdep_map *nest_lock, unsigned long ip) |
| 269 | { |
| 270 | struct task_struct *task = current; |
| 271 | struct mutex_waiter waiter; |
| 272 | unsigned long flags; |
| 273 | #ifdef CONFIG_DEBUG_MUTEXES |
| 274 | unsigned char __mutex_contended = 0; |
| 275 | #endif |
| 276 | |
| 277 | preempt_disable(); |
| 278 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
| 279 | |
| 280 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 281 | /* |
| 282 | * Optimistic spinning. |
| 283 | * |
| 284 | * We try to spin for acquisition when we find that there are no |
| 285 | * pending waiters and the lock owner is currently running on a |
| 286 | * (different) CPU. |
| 287 | * |
| 288 | * The rationale is that if the lock owner is running, it is likely to |
| 289 | * release the lock soon. |
| 290 | * |
| 291 | * Since this needs the lock owner, and this mutex implementation |
| 292 | * doesn't track the owner atomically in the lock field, we need to |
| 293 | * track it non-atomically. |
| 294 | * |
| 295 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock |
| 296 | * to serialize everything. |
| 297 | * |
| 298 | * The mutex spinners are queued up using MCS lock so that only one |
| 299 | * spinner can compete for the mutex. However, if mutex spinning isn't |
| 300 | * going to happen, there is no point in going through the lock/unlock |
| 301 | * overhead. |
| 302 | */ |
| 303 | if (!mutex_can_spin_on_owner(lock)) |
| 304 | goto slowpath; |
| 305 | |
| 306 | for (;;) { |
| 307 | struct task_struct *owner; |
| 308 | struct mspin_node node; |
| 309 | |
| 310 | /* |
| 311 | * If there's an owner, wait for it to either |
| 312 | * release the lock or go to sleep. |
| 313 | */ |
| 314 | mspin_lock(MLOCK(lock), &node); |
| 315 | owner = ACCESS_ONCE(lock->owner); |
| 316 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
| 317 | mspin_unlock(MLOCK(lock), &node); |
| 318 | break; |
| 319 | } |
| 320 | |
| 321 | if ((atomic_read(&lock->count) == 1) && |
| 322 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { |
| 323 | lock_acquired(&lock->dep_map, ip); |
| 324 | mutex_set_owner(lock); |
| 325 | mspin_unlock(MLOCK(lock), &node); |
| 326 | preempt_enable(); |
| 327 | return 0; |
| 328 | } |
| 329 | mspin_unlock(MLOCK(lock), &node); |
| 330 | |
| 331 | /* |
| 332 | * When there's no owner, we might have preempted between the |
| 333 | * owner acquiring the lock and setting the owner field. If |
| 334 | * we're an RT task that will live-lock because we won't let |
| 335 | * the owner complete. |
| 336 | */ |
| 337 | if (!owner && (need_resched() || rt_task(task))) |
| 338 | break; |
| 339 | |
| 340 | /* |
| 341 | * The cpu_relax() call is a compiler barrier which forces |
| 342 | * everything in this loop to be re-loaded. We don't need |
| 343 | * memory barriers as we'll eventually observe the right |
| 344 | * values at the cost of a few extra spins. |
| 345 | */ |
| 346 | arch_mutex_cpu_relax(); |
| 347 | } |
| 348 | slowpath: |
| 349 | #endif |
| 350 | spin_lock_mutex(&lock->wait_lock, flags); |
| 351 | |
| 352 | debug_mutex_lock_common(lock, &waiter); |
| 353 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
| 354 | |
| 355 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
| 356 | list_add_tail(&waiter.list, &lock->wait_list); |
| 357 | waiter.task = task; |
| 358 | |
| 359 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) |
| 360 | goto done; |
| 361 | |
| 362 | lock_contended(&lock->dep_map, ip); |
| 363 | #ifdef CONFIG_DEBUG_MUTEXES |
| 364 | trace_mutex_contended(lock, ip); |
| 365 | __mutex_contended = 1; // to pair mutex_contended & mutex_acquired |
| 366 | #endif |
| 367 | |
| 368 | for (;;) { |
| 369 | /* |
| 370 | * Lets try to take the lock again - this is needed even if |
| 371 | * we get here for the first time (shortly after failing to |
| 372 | * acquire the lock), to make sure that we get a wakeup once |
| 373 | * it's unlocked. Later on, if we sleep, this is the |
| 374 | * operation that gives us the lock. We xchg it to -1, so |
| 375 | * that when we release the lock, we properly wake up the |
| 376 | * other waiters: |
| 377 | */ |
| 378 | if (MUTEX_SHOW_NO_WAITER(lock) && |
| 379 | (atomic_xchg(&lock->count, -1) == 1)) |
| 380 | break; |
| 381 | |
| 382 | /* |
| 383 | * got a signal? (This code gets eliminated in the |
| 384 | * TASK_UNINTERRUPTIBLE case.) |
| 385 | */ |
| 386 | if (unlikely(signal_pending_state(state, task))) { |
| 387 | mutex_remove_waiter(lock, &waiter, |
| 388 | task_thread_info(task)); |
| 389 | mutex_release(&lock->dep_map, 1, ip); |
| 390 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 391 | |
| 392 | debug_mutex_free_waiter(&waiter); |
| 393 | preempt_enable(); |
| 394 | return -EINTR; |
| 395 | } |
| 396 | __set_task_state(task, state); |
| 397 | |
| 398 | /* didn't get the lock, go to sleep: */ |
| 399 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 400 | schedule_preempt_disabled(); |
| 401 | spin_lock_mutex(&lock->wait_lock, flags); |
| 402 | } |
| 403 | |
| 404 | done: |
| 405 | #ifdef CONFIG_DEBUG_MUTEXES |
| 406 | if(unlikely(__mutex_contended > 0)) |
| 407 | trace_mutex_acquired(lock, ip); |
| 408 | #endif |
| 409 | lock_acquired(&lock->dep_map, ip); |
| 410 | /* got the lock - rejoice! */ |
| 411 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
| 412 | mutex_set_owner(lock); |
| 413 | |
| 414 | /* set it to 0 if there are no waiters left: */ |
| 415 | if (likely(list_empty(&lock->wait_list))) |
| 416 | atomic_set(&lock->count, 0); |
| 417 | |
| 418 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 419 | |
| 420 | debug_mutex_free_waiter(&waiter); |
| 421 | preempt_enable(); |
| 422 | |
| 423 | return 0; |
| 424 | } |
| 425 | |
| 426 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 427 | void __sched |
| 428 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
| 429 | { |
| 430 | might_sleep(); |
| 431 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
| 432 | } |
| 433 | |
| 434 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
| 435 | |
| 436 | void __sched |
| 437 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) |
| 438 | { |
| 439 | might_sleep(); |
| 440 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); |
| 441 | } |
| 442 | |
| 443 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
| 444 | |
| 445 | int __sched |
| 446 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) |
| 447 | { |
| 448 | might_sleep(); |
| 449 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
| 450 | } |
| 451 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
| 452 | |
| 453 | int __sched |
| 454 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
| 455 | { |
| 456 | might_sleep(); |
| 457 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
| 458 | subclass, NULL, _RET_IP_); |
| 459 | } |
| 460 | |
| 461 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
| 462 | #endif |
| 463 | |
| 464 | /* |
| 465 | * Release the lock, slowpath: |
| 466 | */ |
| 467 | static inline void |
| 468 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
| 469 | { |
| 470 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 471 | unsigned long flags; |
| 472 | |
| 473 | spin_lock_mutex(&lock->wait_lock, flags); |
| 474 | mutex_release(&lock->dep_map, nested, _RET_IP_); |
| 475 | debug_mutex_unlock(lock); |
| 476 | |
| 477 | /* |
| 478 | * some architectures leave the lock unlocked in the fastpath failure |
| 479 | * case, others need to leave it locked. In the later case we have to |
| 480 | * unlock it here |
| 481 | */ |
| 482 | if (__mutex_slowpath_needs_to_unlock()) |
| 483 | atomic_set(&lock->count, 1); |
| 484 | |
| 485 | if (!list_empty(&lock->wait_list)) { |
| 486 | /* get the first entry from the wait-list: */ |
| 487 | struct mutex_waiter *waiter = |
| 488 | list_entry(lock->wait_list.next, |
| 489 | struct mutex_waiter, list); |
| 490 | |
| 491 | debug_mutex_wake_waiter(lock, waiter); |
| 492 | |
| 493 | wake_up_process(waiter->task); |
| 494 | } |
| 495 | |
| 496 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 497 | } |
| 498 | |
| 499 | /* |
| 500 | * Release the lock, slowpath: |
| 501 | */ |
| 502 | static __used noinline void |
| 503 | __mutex_unlock_slowpath(atomic_t *lock_count) |
| 504 | { |
| 505 | __mutex_unlock_common_slowpath(lock_count, 1); |
| 506 | } |
| 507 | |
| 508 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 509 | /* |
| 510 | * Here come the less common (and hence less performance-critical) APIs: |
| 511 | * mutex_lock_interruptible() and mutex_trylock(). |
| 512 | */ |
| 513 | static noinline int __sched |
| 514 | __mutex_lock_killable_slowpath(atomic_t *lock_count); |
| 515 | |
| 516 | static noinline int __sched |
| 517 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
| 518 | |
| 519 | /** |
| 520 | * mutex_lock_interruptible - acquire the mutex, interruptible |
| 521 | * @lock: the mutex to be acquired |
| 522 | * |
| 523 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
| 524 | * been acquired or sleep until the mutex becomes available. If a |
| 525 | * signal arrives while waiting for the lock then this function |
| 526 | * returns -EINTR. |
| 527 | * |
| 528 | * This function is similar to (but not equivalent to) down_interruptible(). |
| 529 | */ |
| 530 | int __sched mutex_lock_interruptible(struct mutex *lock) |
| 531 | { |
| 532 | int ret; |
| 533 | |
| 534 | might_sleep(); |
| 535 | ret = __mutex_fastpath_lock_retval |
| 536 | (&lock->count, __mutex_lock_interruptible_slowpath); |
| 537 | if (!ret) |
| 538 | mutex_set_owner(lock); |
| 539 | |
| 540 | return ret; |
| 541 | } |
| 542 | |
| 543 | EXPORT_SYMBOL(mutex_lock_interruptible); |
| 544 | |
| 545 | int __sched mutex_lock_killable(struct mutex *lock) |
| 546 | { |
| 547 | int ret; |
| 548 | |
| 549 | might_sleep(); |
| 550 | ret = __mutex_fastpath_lock_retval |
| 551 | (&lock->count, __mutex_lock_killable_slowpath); |
| 552 | if (!ret) |
| 553 | mutex_set_owner(lock); |
| 554 | |
| 555 | return ret; |
| 556 | } |
| 557 | EXPORT_SYMBOL(mutex_lock_killable); |
| 558 | |
| 559 | static __used noinline void __sched |
| 560 | __mutex_lock_slowpath(atomic_t *lock_count) |
| 561 | { |
| 562 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 563 | |
| 564 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
| 565 | } |
| 566 | |
| 567 | static noinline int __sched |
| 568 | __mutex_lock_killable_slowpath(atomic_t *lock_count) |
| 569 | { |
| 570 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 571 | |
| 572 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
| 573 | } |
| 574 | |
| 575 | static noinline int __sched |
| 576 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
| 577 | { |
| 578 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 579 | |
| 580 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
| 581 | } |
| 582 | #endif |
| 583 | |
| 584 | /* |
| 585 | * Spinlock based trylock, we take the spinlock and check whether we |
| 586 | * can get the lock: |
| 587 | */ |
| 588 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) |
| 589 | { |
| 590 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 591 | unsigned long flags; |
| 592 | int prev; |
| 593 | |
| 594 | spin_lock_mutex(&lock->wait_lock, flags); |
| 595 | |
| 596 | prev = atomic_xchg(&lock->count, -1); |
| 597 | if (likely(prev == 1)) { |
| 598 | mutex_set_owner(lock); |
| 599 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| 600 | } |
| 601 | |
| 602 | /* Set it back to 0 if there are no waiters: */ |
| 603 | if (likely(list_empty(&lock->wait_list))) |
| 604 | atomic_set(&lock->count, 0); |
| 605 | |
| 606 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 607 | |
| 608 | return prev == 1; |
| 609 | } |
| 610 | |
| 611 | /** |
| 612 | * mutex_trylock - try to acquire the mutex, without waiting |
| 613 | * @lock: the mutex to be acquired |
| 614 | * |
| 615 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
| 616 | * has been acquired successfully, and 0 on contention. |
| 617 | * |
| 618 | * NOTE: this function follows the spin_trylock() convention, so |
| 619 | * it is negated from the down_trylock() return values! Be careful |
| 620 | * about this when converting semaphore users to mutexes. |
| 621 | * |
| 622 | * This function must not be used in interrupt context. The |
| 623 | * mutex must be released by the same task that acquired it. |
| 624 | */ |
| 625 | int __sched mutex_trylock(struct mutex *lock) |
| 626 | { |
| 627 | int ret; |
| 628 | |
| 629 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); |
| 630 | if (ret) |
| 631 | mutex_set_owner(lock); |
| 632 | |
| 633 | return ret; |
| 634 | } |
| 635 | EXPORT_SYMBOL(mutex_trylock); |
| 636 | |
| 637 | /** |
| 638 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 |
| 639 | * @cnt: the atomic which we are to dec |
| 640 | * @lock: the mutex to return holding if we dec to 0 |
| 641 | * |
| 642 | * return true and hold lock if we dec to 0, return false otherwise |
| 643 | */ |
| 644 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) |
| 645 | { |
| 646 | /* dec if we can't possibly hit 0 */ |
| 647 | if (atomic_add_unless(cnt, -1, 1)) |
| 648 | return 0; |
| 649 | /* we might hit 0, so take the lock */ |
| 650 | mutex_lock(lock); |
| 651 | if (!atomic_dec_and_test(cnt)) { |
| 652 | /* when we actually did the dec, we didn't hit 0 */ |
| 653 | mutex_unlock(lock); |
| 654 | return 0; |
| 655 | } |
| 656 | /* we hit 0, and we hold the lock */ |
| 657 | return 1; |
| 658 | } |
| 659 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |