Commit | Line | Data |
---|---|---|
23f78d4a IM |
1 | /* |
2 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support | |
3 | * | |
4 | * started by Ingo Molnar and Thomas Gleixner. | |
5 | * | |
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
7 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | |
8 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | |
9 | * Copyright (C) 2006 Esben Nielsen | |
d07fe82c SR |
10 | * |
11 | * See Documentation/rt-mutex-design.txt for details. | |
23f78d4a IM |
12 | */ |
13 | #include <linux/spinlock.h> | |
9984de1a | 14 | #include <linux/export.h> |
23f78d4a | 15 | #include <linux/sched.h> |
8bd75c77 | 16 | #include <linux/sched/rt.h> |
23f78d4a IM |
17 | #include <linux/timer.h> |
18 | ||
19 | #include "rtmutex_common.h" | |
20 | ||
23f78d4a IM |
21 | /* |
22 | * lock->owner state tracking: | |
23 | * | |
8161239a LJ |
24 | * lock->owner holds the task_struct pointer of the owner. Bit 0 |
25 | * is used to keep track of the "lock has waiters" state. | |
23f78d4a | 26 | * |
8161239a LJ |
27 | * owner bit0 |
28 | * NULL 0 lock is free (fast acquire possible) | |
29 | * NULL 1 lock is free and has waiters and the top waiter | |
30 | * is going to take the lock* | |
31 | * taskpointer 0 lock is held (fast release possible) | |
32 | * taskpointer 1 lock is held and has waiters** | |
23f78d4a IM |
33 | * |
34 | * The fast atomic compare exchange based acquire and release is only | |
8161239a LJ |
35 | * possible when bit 0 of lock->owner is 0. |
36 | * | |
37 | * (*) It also can be a transitional state when grabbing the lock | |
38 | * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, | |
39 | * we need to set the bit0 before looking at the lock, and the owner may be | |
40 | * NULL in this small time, hence this can be a transitional state. | |
23f78d4a | 41 | * |
8161239a LJ |
42 | * (**) There is a small time when bit 0 is set but there are no |
43 | * waiters. This can happen when grabbing the lock in the slow path. | |
44 | * To prevent a cmpxchg of the owner releasing the lock, we need to | |
45 | * set this bit before looking at the lock. | |
23f78d4a IM |
46 | */ |
47 | ||
bd197234 | 48 | static void |
8161239a | 49 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) |
23f78d4a | 50 | { |
8161239a | 51 | unsigned long val = (unsigned long)owner; |
23f78d4a IM |
52 | |
53 | if (rt_mutex_has_waiters(lock)) | |
54 | val |= RT_MUTEX_HAS_WAITERS; | |
55 | ||
56 | lock->owner = (struct task_struct *)val; | |
57 | } | |
58 | ||
59 | static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | |
60 | { | |
61 | lock->owner = (struct task_struct *) | |
62 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | |
63 | } | |
64 | ||
65 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |
66 | { | |
07f03860 TG |
67 | unsigned long owner, *p = (unsigned long *) &lock->owner; |
68 | ||
69 | if (rt_mutex_has_waiters(lock)) | |
70 | return; | |
71 | ||
72 | /* | |
73 | * The rbtree has no waiters enqueued, now make sure that the | |
74 | * lock->owner still has the waiters bit set, otherwise the | |
75 | * following can happen: | |
76 | * | |
77 | * CPU 0 CPU 1 CPU2 | |
78 | * l->owner=T1 | |
79 | * rt_mutex_lock(l) | |
80 | * lock(l->lock) | |
81 | * l->owner = T1 | HAS_WAITERS; | |
82 | * enqueue(T2) | |
83 | * boost() | |
84 | * unlock(l->lock) | |
85 | * block() | |
86 | * | |
87 | * rt_mutex_lock(l) | |
88 | * lock(l->lock) | |
89 | * l->owner = T1 | HAS_WAITERS; | |
90 | * enqueue(T3) | |
91 | * boost() | |
92 | * unlock(l->lock) | |
93 | * block() | |
94 | * signal(->T2) signal(->T3) | |
95 | * lock(l->lock) | |
96 | * dequeue(T2) | |
97 | * deboost() | |
98 | * unlock(l->lock) | |
99 | * lock(l->lock) | |
100 | * dequeue(T3) | |
101 | * ==> wait list is empty | |
102 | * deboost() | |
103 | * unlock(l->lock) | |
104 | * lock(l->lock) | |
105 | * fixup_rt_mutex_waiters() | |
106 | * if (wait_list_empty(l) { | |
107 | * l->owner = owner | |
108 | * owner = l->owner & ~HAS_WAITERS; | |
109 | * ==> l->owner = T1 | |
110 | * } | |
111 | * lock(l->lock) | |
112 | * rt_mutex_unlock(l) fixup_rt_mutex_waiters() | |
113 | * if (wait_list_empty(l) { | |
114 | * owner = l->owner & ~HAS_WAITERS; | |
115 | * cmpxchg(l->owner, T1, NULL) | |
116 | * ===> Success (l->owner = NULL) | |
117 | * | |
118 | * l->owner = owner | |
119 | * ==> l->owner = T1 | |
120 | * } | |
121 | * | |
122 | * With the check for the waiter bit in place T3 on CPU2 will not | |
123 | * overwrite. All tasks fiddling with the waiters bit are | |
124 | * serialized by l->lock, so nothing else can modify the waiters | |
125 | * bit. If the bit is set then nothing can change l->owner either | |
126 | * so the simple RMW is safe. The cmpxchg() will simply fail if it | |
127 | * happens in the middle of the RMW because the waiters bit is | |
128 | * still set. | |
129 | */ | |
130 | owner = ACCESS_ONCE(*p); | |
131 | if (owner & RT_MUTEX_HAS_WAITERS) | |
132 | ACCESS_ONCE(*p) = owner & ~RT_MUTEX_HAS_WAITERS; | |
23f78d4a IM |
133 | } |
134 | ||
bd197234 TG |
135 | /* |
136 | * We can speed up the acquire/release, if the architecture | |
137 | * supports cmpxchg and if there's no debugging state to be set up | |
138 | */ | |
139 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | |
140 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | |
141 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
142 | { | |
143 | unsigned long owner, *p = (unsigned long *) &lock->owner; | |
144 | ||
145 | do { | |
146 | owner = *p; | |
147 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | |
148 | } | |
2371e977 TG |
149 | |
150 | /* | |
151 | * Safe fastpath aware unlock: | |
152 | * 1) Clear the waiters bit | |
153 | * 2) Drop lock->wait_lock | |
154 | * 3) Try to unlock the lock with cmpxchg | |
155 | */ | |
156 | static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) | |
157 | __releases(lock->wait_lock) | |
158 | { | |
159 | struct task_struct *owner = rt_mutex_owner(lock); | |
160 | ||
161 | clear_rt_mutex_waiters(lock); | |
162 | raw_spin_unlock(&lock->wait_lock); | |
163 | /* | |
164 | * If a new waiter comes in between the unlock and the cmpxchg | |
165 | * we have two situations: | |
166 | * | |
167 | * unlock(wait_lock); | |
168 | * lock(wait_lock); | |
169 | * cmpxchg(p, owner, 0) == owner | |
170 | * mark_rt_mutex_waiters(lock); | |
171 | * acquire(lock); | |
172 | * or: | |
173 | * | |
174 | * unlock(wait_lock); | |
175 | * lock(wait_lock); | |
176 | * mark_rt_mutex_waiters(lock); | |
177 | * | |
178 | * cmpxchg(p, owner, 0) != owner | |
179 | * enqueue_waiter(); | |
180 | * unlock(wait_lock); | |
181 | * lock(wait_lock); | |
182 | * wake waiter(); | |
183 | * unlock(wait_lock); | |
184 | * lock(wait_lock); | |
185 | * acquire(lock); | |
186 | */ | |
187 | return rt_mutex_cmpxchg(lock, owner, NULL); | |
188 | } | |
189 | ||
bd197234 TG |
190 | #else |
191 | # define rt_mutex_cmpxchg(l,c,n) (0) | |
192 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
193 | { | |
194 | lock->owner = (struct task_struct *) | |
195 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | |
196 | } | |
2371e977 TG |
197 | |
198 | /* | |
199 | * Simple slow path only version: lock->owner is protected by lock->wait_lock. | |
200 | */ | |
201 | static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) | |
202 | __releases(lock->wait_lock) | |
203 | { | |
204 | lock->owner = NULL; | |
205 | raw_spin_unlock(&lock->wait_lock); | |
206 | return true; | |
207 | } | |
bd197234 TG |
208 | #endif |
209 | ||
23f78d4a IM |
210 | /* |
211 | * Calculate task priority from the waiter list priority | |
212 | * | |
213 | * Return task->normal_prio when the waiter list is empty or when | |
214 | * the waiter is not allowed to do priority boosting | |
215 | */ | |
216 | int rt_mutex_getprio(struct task_struct *task) | |
217 | { | |
218 | if (likely(!task_has_pi_waiters(task))) | |
219 | return task->normal_prio; | |
220 | ||
221 | return min(task_top_pi_waiter(task)->pi_list_entry.prio, | |
222 | task->normal_prio); | |
223 | } | |
224 | ||
225 | /* | |
226 | * Adjust the priority of a task, after its pi_waiters got modified. | |
227 | * | |
228 | * This can be both boosting and unboosting. task->pi_lock must be held. | |
229 | */ | |
bd197234 | 230 | static void __rt_mutex_adjust_prio(struct task_struct *task) |
23f78d4a IM |
231 | { |
232 | int prio = rt_mutex_getprio(task); | |
233 | ||
234 | if (task->prio != prio) | |
235 | rt_mutex_setprio(task, prio); | |
236 | } | |
237 | ||
238 | /* | |
239 | * Adjust task priority (undo boosting). Called from the exit path of | |
240 | * rt_mutex_slowunlock() and rt_mutex_slowlock(). | |
241 | * | |
242 | * (Note: We do this outside of the protection of lock->wait_lock to | |
243 | * allow the lock to be taken while or before we readjust the priority | |
244 | * of task. We do not use the spin_xx_mutex() variants here as we are | |
245 | * outside of the debug path.) | |
246 | */ | |
247 | static void rt_mutex_adjust_prio(struct task_struct *task) | |
248 | { | |
249 | unsigned long flags; | |
250 | ||
1d615482 | 251 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a | 252 | __rt_mutex_adjust_prio(task); |
1d615482 | 253 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
254 | } |
255 | ||
256 | /* | |
257 | * Max number of times we'll walk the boosting chain: | |
258 | */ | |
259 | int max_lock_depth = 1024; | |
260 | ||
98be12bc TG |
261 | static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) |
262 | { | |
263 | return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; | |
264 | } | |
265 | ||
23f78d4a IM |
266 | /* |
267 | * Adjust the priority chain. Also used for deadlock detection. | |
268 | * Decreases task's usage by one - may thus free the task. | |
269 | * Returns 0 or -EDEADLK. | |
270 | */ | |
bd197234 TG |
271 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
272 | int deadlock_detect, | |
273 | struct rt_mutex *orig_lock, | |
98be12bc | 274 | struct rt_mutex *next_lock, |
bd197234 TG |
275 | struct rt_mutex_waiter *orig_waiter, |
276 | struct task_struct *top_task) | |
23f78d4a IM |
277 | { |
278 | struct rt_mutex *lock; | |
279 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | |
280 | int detect_deadlock, ret = 0, depth = 0; | |
281 | unsigned long flags; | |
282 | ||
283 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, | |
284 | deadlock_detect); | |
285 | ||
286 | /* | |
287 | * The (de)boosting is a step by step approach with a lot of | |
288 | * pitfalls. We want this to be preemptible and we want hold a | |
289 | * maximum of two locks per step. So we have to check | |
290 | * carefully whether things change under us. | |
291 | */ | |
292 | again: | |
293 | if (++depth > max_lock_depth) { | |
294 | static int prev_max; | |
295 | ||
296 | /* | |
297 | * Print this only once. If the admin changes the limit, | |
298 | * print a new message when reaching the limit again. | |
299 | */ | |
300 | if (prev_max != max_lock_depth) { | |
301 | prev_max = max_lock_depth; | |
302 | printk(KERN_WARNING "Maximum lock depth %d reached " | |
303 | "task: %s (%d)\n", max_lock_depth, | |
ba25f9dc | 304 | top_task->comm, task_pid_nr(top_task)); |
23f78d4a IM |
305 | } |
306 | put_task_struct(task); | |
307 | ||
1201613a | 308 | return -EDEADLK; |
23f78d4a IM |
309 | } |
310 | retry: | |
311 | /* | |
312 | * Task can not go away as we did a get_task() before ! | |
313 | */ | |
1d615482 | 314 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a IM |
315 | |
316 | waiter = task->pi_blocked_on; | |
317 | /* | |
318 | * Check whether the end of the boosting chain has been | |
319 | * reached or the state of the chain has changed while we | |
320 | * dropped the locks. | |
321 | */ | |
8161239a | 322 | if (!waiter) |
23f78d4a IM |
323 | goto out_unlock_pi; |
324 | ||
1a539a87 TG |
325 | /* |
326 | * Check the orig_waiter state. After we dropped the locks, | |
8161239a | 327 | * the previous owner of the lock might have released the lock. |
1a539a87 | 328 | */ |
8161239a | 329 | if (orig_waiter && !rt_mutex_owner(orig_lock)) |
1a539a87 TG |
330 | goto out_unlock_pi; |
331 | ||
98be12bc TG |
332 | /* |
333 | * We dropped all locks after taking a refcount on @task, so | |
334 | * the task might have moved on in the lock chain or even left | |
335 | * the chain completely and blocks now on an unrelated lock or | |
336 | * on @orig_lock. | |
337 | * | |
338 | * We stored the lock on which @task was blocked in @next_lock, | |
339 | * so we can detect the chain change. | |
340 | */ | |
341 | if (next_lock != waiter->lock) | |
342 | goto out_unlock_pi; | |
343 | ||
1a539a87 TG |
344 | /* |
345 | * Drop out, when the task has no waiters. Note, | |
346 | * top_waiter can be NULL, when we are in the deboosting | |
347 | * mode! | |
348 | */ | |
d88b1b40 TG |
349 | if (top_waiter) { |
350 | if (!task_has_pi_waiters(task)) | |
351 | goto out_unlock_pi; | |
352 | /* | |
353 | * If deadlock detection is off, we stop here if we | |
354 | * are not the top pi waiter of the task. | |
355 | */ | |
356 | if (!detect_deadlock && top_waiter != task_top_pi_waiter(task)) | |
357 | goto out_unlock_pi; | |
358 | } | |
23f78d4a IM |
359 | |
360 | /* | |
361 | * When deadlock detection is off then we check, if further | |
362 | * priority adjustment is necessary. | |
363 | */ | |
364 | if (!detect_deadlock && waiter->list_entry.prio == task->prio) | |
365 | goto out_unlock_pi; | |
366 | ||
367 | lock = waiter->lock; | |
d209d74d | 368 | if (!raw_spin_trylock(&lock->wait_lock)) { |
1d615482 | 369 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
370 | cpu_relax(); |
371 | goto retry; | |
372 | } | |
373 | ||
d88b1b40 TG |
374 | /* |
375 | * Deadlock detection. If the lock is the same as the original | |
376 | * lock which caused us to walk the lock chain or if the | |
377 | * current lock is owned by the task which initiated the chain | |
378 | * walk, we detected a deadlock. | |
379 | */ | |
95e02ca9 | 380 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
23f78d4a | 381 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
d209d74d | 382 | raw_spin_unlock(&lock->wait_lock); |
1201613a | 383 | ret = -EDEADLK; |
23f78d4a IM |
384 | goto out_unlock_pi; |
385 | } | |
386 | ||
387 | top_waiter = rt_mutex_top_waiter(lock); | |
388 | ||
389 | /* Requeue the waiter */ | |
390 | plist_del(&waiter->list_entry, &lock->wait_list); | |
391 | waiter->list_entry.prio = task->prio; | |
392 | plist_add(&waiter->list_entry, &lock->wait_list); | |
393 | ||
394 | /* Release the task */ | |
1d615482 | 395 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
8161239a LJ |
396 | if (!rt_mutex_owner(lock)) { |
397 | /* | |
398 | * If the requeue above changed the top waiter, then we need | |
399 | * to wake the new top waiter up to try to get the lock. | |
400 | */ | |
401 | ||
402 | if (top_waiter != rt_mutex_top_waiter(lock)) | |
403 | wake_up_process(rt_mutex_top_waiter(lock)->task); | |
404 | raw_spin_unlock(&lock->wait_lock); | |
405 | goto out_put_task; | |
406 | } | |
23f78d4a IM |
407 | put_task_struct(task); |
408 | ||
409 | /* Grab the next task */ | |
410 | task = rt_mutex_owner(lock); | |
db630637 | 411 | get_task_struct(task); |
1d615482 | 412 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a IM |
413 | |
414 | if (waiter == rt_mutex_top_waiter(lock)) { | |
415 | /* Boost the owner */ | |
416 | plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); | |
417 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | |
418 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | |
419 | __rt_mutex_adjust_prio(task); | |
420 | ||
421 | } else if (top_waiter == waiter) { | |
422 | /* Deboost the owner */ | |
423 | plist_del(&waiter->pi_list_entry, &task->pi_waiters); | |
424 | waiter = rt_mutex_top_waiter(lock); | |
425 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | |
426 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | |
427 | __rt_mutex_adjust_prio(task); | |
428 | } | |
429 | ||
98be12bc TG |
430 | /* |
431 | * Check whether the task which owns the current lock is pi | |
432 | * blocked itself. If yes we store a pointer to the lock for | |
433 | * the lock chain change detection above. After we dropped | |
434 | * task->pi_lock next_lock cannot be dereferenced anymore. | |
435 | */ | |
436 | next_lock = task_blocked_on_lock(task); | |
437 | ||
1d615482 | 438 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
439 | |
440 | top_waiter = rt_mutex_top_waiter(lock); | |
d209d74d | 441 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 442 | |
98be12bc TG |
443 | /* |
444 | * We reached the end of the lock chain. Stop right here. No | |
445 | * point to go back just to figure that out. | |
446 | */ | |
447 | if (!next_lock) | |
448 | goto out_put_task; | |
449 | ||
23f78d4a IM |
450 | if (!detect_deadlock && waiter != top_waiter) |
451 | goto out_put_task; | |
452 | ||
453 | goto again; | |
454 | ||
455 | out_unlock_pi: | |
1d615482 | 456 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
457 | out_put_task: |
458 | put_task_struct(task); | |
36c8b586 | 459 | |
23f78d4a IM |
460 | return ret; |
461 | } | |
462 | ||
23f78d4a IM |
463 | /* |
464 | * Try to take an rt-mutex | |
465 | * | |
23f78d4a | 466 | * Must be called with lock->wait_lock held. |
8161239a LJ |
467 | * |
468 | * @lock: the lock to be acquired. | |
469 | * @task: the task which wants to acquire the lock | |
470 | * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) | |
23f78d4a | 471 | */ |
8161239a LJ |
472 | static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, |
473 | struct rt_mutex_waiter *waiter) | |
23f78d4a IM |
474 | { |
475 | /* | |
476 | * We have to be careful here if the atomic speedups are | |
477 | * enabled, such that, when | |
478 | * - no other waiter is on the lock | |
479 | * - the lock has been released since we did the cmpxchg | |
480 | * the lock can be released or taken while we are doing the | |
481 | * checks and marking the lock with RT_MUTEX_HAS_WAITERS. | |
482 | * | |
483 | * The atomic acquire/release aware variant of | |
484 | * mark_rt_mutex_waiters uses a cmpxchg loop. After setting | |
485 | * the WAITERS bit, the atomic release / acquire can not | |
486 | * happen anymore and lock->wait_lock protects us from the | |
487 | * non-atomic case. | |
488 | * | |
489 | * Note, that this might set lock->owner = | |
490 | * RT_MUTEX_HAS_WAITERS in the case the lock is not contended | |
491 | * any more. This is fixed up when we take the ownership. | |
492 | * This is the transitional state explained at the top of this file. | |
493 | */ | |
494 | mark_rt_mutex_waiters(lock); | |
495 | ||
8161239a | 496 | if (rt_mutex_owner(lock)) |
23f78d4a IM |
497 | return 0; |
498 | ||
8161239a LJ |
499 | /* |
500 | * It will get the lock because of one of these conditions: | |
501 | * 1) there is no waiter | |
502 | * 2) higher priority than waiters | |
503 | * 3) it is top waiter | |
504 | */ | |
505 | if (rt_mutex_has_waiters(lock)) { | |
506 | if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { | |
507 | if (!waiter || waiter != rt_mutex_top_waiter(lock)) | |
508 | return 0; | |
509 | } | |
510 | } | |
511 | ||
512 | if (waiter || rt_mutex_has_waiters(lock)) { | |
513 | unsigned long flags; | |
514 | struct rt_mutex_waiter *top; | |
515 | ||
516 | raw_spin_lock_irqsave(&task->pi_lock, flags); | |
517 | ||
518 | /* remove the queued waiter. */ | |
519 | if (waiter) { | |
520 | plist_del(&waiter->list_entry, &lock->wait_list); | |
521 | task->pi_blocked_on = NULL; | |
522 | } | |
523 | ||
524 | /* | |
525 | * We have to enqueue the top waiter(if it exists) into | |
526 | * task->pi_waiters list. | |
527 | */ | |
528 | if (rt_mutex_has_waiters(lock)) { | |
529 | top = rt_mutex_top_waiter(lock); | |
530 | top->pi_list_entry.prio = top->list_entry.prio; | |
531 | plist_add(&top->pi_list_entry, &task->pi_waiters); | |
532 | } | |
533 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | |
534 | } | |
535 | ||
23f78d4a | 536 | /* We got the lock. */ |
9a11b49a | 537 | debug_rt_mutex_lock(lock); |
23f78d4a | 538 | |
8161239a | 539 | rt_mutex_set_owner(lock, task); |
23f78d4a | 540 | |
8161239a | 541 | rt_mutex_deadlock_account_lock(lock, task); |
23f78d4a IM |
542 | |
543 | return 1; | |
544 | } | |
545 | ||
546 | /* | |
547 | * Task blocks on lock. | |
548 | * | |
549 | * Prepare waiter and propagate pi chain | |
550 | * | |
551 | * This must be called with lock->wait_lock held. | |
552 | */ | |
553 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |
554 | struct rt_mutex_waiter *waiter, | |
8dac456a | 555 | struct task_struct *task, |
9a11b49a | 556 | int detect_deadlock) |
23f78d4a | 557 | { |
36c8b586 | 558 | struct task_struct *owner = rt_mutex_owner(lock); |
23f78d4a | 559 | struct rt_mutex_waiter *top_waiter = waiter; |
98be12bc | 560 | struct rt_mutex *next_lock; |
db630637 | 561 | int chain_walk = 0, res; |
98be12bc | 562 | unsigned long flags; |
23f78d4a | 563 | |
d88b1b40 TG |
564 | /* |
565 | * Early deadlock detection. We really don't want the task to | |
566 | * enqueue on itself just to untangle the mess later. It's not | |
567 | * only an optimization. We drop the locks, so another waiter | |
568 | * can come in before the chain walk detects the deadlock. So | |
569 | * the other will detect the deadlock and return -EDEADLOCK, | |
570 | * which is wrong, as the other waiter is not in a deadlock | |
571 | * situation. | |
572 | */ | |
1201613a | 573 | if (owner == task) |
d88b1b40 TG |
574 | return -EDEADLK; |
575 | ||
1d615482 | 576 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
8dac456a DH |
577 | __rt_mutex_adjust_prio(task); |
578 | waiter->task = task; | |
23f78d4a | 579 | waiter->lock = lock; |
8dac456a DH |
580 | plist_node_init(&waiter->list_entry, task->prio); |
581 | plist_node_init(&waiter->pi_list_entry, task->prio); | |
23f78d4a IM |
582 | |
583 | /* Get the top priority waiter on the lock */ | |
584 | if (rt_mutex_has_waiters(lock)) | |
585 | top_waiter = rt_mutex_top_waiter(lock); | |
586 | plist_add(&waiter->list_entry, &lock->wait_list); | |
587 | ||
8dac456a | 588 | task->pi_blocked_on = waiter; |
23f78d4a | 589 | |
1d615482 | 590 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a | 591 | |
8161239a LJ |
592 | if (!owner) |
593 | return 0; | |
594 | ||
98be12bc | 595 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
23f78d4a | 596 | if (waiter == rt_mutex_top_waiter(lock)) { |
23f78d4a IM |
597 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); |
598 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); | |
599 | ||
600 | __rt_mutex_adjust_prio(owner); | |
db630637 SR |
601 | if (owner->pi_blocked_on) |
602 | chain_walk = 1; | |
98be12bc | 603 | } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { |
db630637 | 604 | chain_walk = 1; |
98be12bc | 605 | } |
db630637 | 606 | |
98be12bc TG |
607 | /* Store the lock on which owner is blocked or NULL */ |
608 | next_lock = task_blocked_on_lock(owner); | |
609 | ||
610 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); | |
611 | /* | |
612 | * Even if full deadlock detection is on, if the owner is not | |
613 | * blocked itself, we can avoid finding this out in the chain | |
614 | * walk. | |
615 | */ | |
616 | if (!chain_walk || !next_lock) | |
23f78d4a IM |
617 | return 0; |
618 | ||
db630637 SR |
619 | /* |
620 | * The owner can't disappear while holding a lock, | |
621 | * so the owner struct is protected by wait_lock. | |
622 | * Gets dropped in rt_mutex_adjust_prio_chain()! | |
623 | */ | |
624 | get_task_struct(owner); | |
625 | ||
d209d74d | 626 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 627 | |
98be12bc TG |
628 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, |
629 | next_lock, waiter, task); | |
23f78d4a | 630 | |
d209d74d | 631 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
632 | |
633 | return res; | |
634 | } | |
635 | ||
636 | /* | |
637 | * Wake up the next waiter on the lock. | |
638 | * | |
2371e977 TG |
639 | * Remove the top waiter from the current tasks pi waiter list and |
640 | * wake it up. | |
23f78d4a IM |
641 | * |
642 | * Called with lock->wait_lock held. | |
643 | */ | |
644 | static void wakeup_next_waiter(struct rt_mutex *lock) | |
645 | { | |
646 | struct rt_mutex_waiter *waiter; | |
23f78d4a IM |
647 | unsigned long flags; |
648 | ||
1d615482 | 649 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
23f78d4a IM |
650 | |
651 | waiter = rt_mutex_top_waiter(lock); | |
23f78d4a IM |
652 | |
653 | /* | |
654 | * Remove it from current->pi_waiters. We do not adjust a | |
655 | * possible priority boost right now. We execute wakeup in the | |
656 | * boosted mode and go back to normal after releasing | |
657 | * lock->wait_lock. | |
658 | */ | |
659 | plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); | |
23f78d4a | 660 | |
2371e977 TG |
661 | /* |
662 | * As we are waking up the top waiter, and the waiter stays | |
663 | * queued on the lock until it gets the lock, this lock | |
664 | * obviously has waiters. Just set the bit here and this has | |
665 | * the added benefit of forcing all new tasks into the | |
666 | * slow path making sure no task of lower priority than | |
667 | * the top waiter can steal this lock. | |
668 | */ | |
669 | lock->owner = (void *) RT_MUTEX_HAS_WAITERS; | |
23f78d4a | 670 | |
1d615482 | 671 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
23f78d4a | 672 | |
2371e977 TG |
673 | /* |
674 | * It's safe to dereference waiter as it cannot go away as | |
675 | * long as we hold lock->wait_lock. The waiter task needs to | |
676 | * acquire it in order to dequeue the waiter. | |
677 | */ | |
8161239a | 678 | wake_up_process(waiter->task); |
23f78d4a IM |
679 | } |
680 | ||
681 | /* | |
8161239a | 682 | * Remove a waiter from a lock and give up |
23f78d4a | 683 | * |
8161239a LJ |
684 | * Must be called with lock->wait_lock held and |
685 | * have just failed to try_to_take_rt_mutex(). | |
23f78d4a | 686 | */ |
bd197234 TG |
687 | static void remove_waiter(struct rt_mutex *lock, |
688 | struct rt_mutex_waiter *waiter) | |
23f78d4a IM |
689 | { |
690 | int first = (waiter == rt_mutex_top_waiter(lock)); | |
36c8b586 | 691 | struct task_struct *owner = rt_mutex_owner(lock); |
98be12bc | 692 | struct rt_mutex *next_lock = NULL; |
23f78d4a IM |
693 | unsigned long flags; |
694 | ||
1d615482 | 695 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
23f78d4a | 696 | plist_del(&waiter->list_entry, &lock->wait_list); |
23f78d4a | 697 | current->pi_blocked_on = NULL; |
1d615482 | 698 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
23f78d4a | 699 | |
8161239a LJ |
700 | if (!owner) |
701 | return; | |
702 | ||
703 | if (first) { | |
23f78d4a | 704 | |
1d615482 | 705 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
23f78d4a IM |
706 | |
707 | plist_del(&waiter->pi_list_entry, &owner->pi_waiters); | |
708 | ||
709 | if (rt_mutex_has_waiters(lock)) { | |
710 | struct rt_mutex_waiter *next; | |
711 | ||
712 | next = rt_mutex_top_waiter(lock); | |
713 | plist_add(&next->pi_list_entry, &owner->pi_waiters); | |
714 | } | |
715 | __rt_mutex_adjust_prio(owner); | |
716 | ||
98be12bc TG |
717 | /* Store the lock on which owner is blocked or NULL */ |
718 | next_lock = task_blocked_on_lock(owner); | |
db630637 | 719 | |
1d615482 | 720 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
23f78d4a IM |
721 | } |
722 | ||
723 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | |
724 | ||
98be12bc | 725 | if (!next_lock) |
23f78d4a IM |
726 | return; |
727 | ||
db630637 SR |
728 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
729 | get_task_struct(owner); | |
730 | ||
d209d74d | 731 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 732 | |
98be12bc | 733 | rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current); |
23f78d4a | 734 | |
d209d74d | 735 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
736 | } |
737 | ||
95e02ca9 TG |
738 | /* |
739 | * Recheck the pi chain, in case we got a priority setting | |
740 | * | |
741 | * Called from sched_setscheduler | |
742 | */ | |
743 | void rt_mutex_adjust_pi(struct task_struct *task) | |
744 | { | |
745 | struct rt_mutex_waiter *waiter; | |
98be12bc | 746 | struct rt_mutex *next_lock; |
95e02ca9 TG |
747 | unsigned long flags; |
748 | ||
1d615482 | 749 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
95e02ca9 TG |
750 | |
751 | waiter = task->pi_blocked_on; | |
752 | if (!waiter || waiter->list_entry.prio == task->prio) { | |
1d615482 | 753 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 TG |
754 | return; |
755 | } | |
98be12bc | 756 | next_lock = waiter->lock; |
1d615482 | 757 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 | 758 | |
db630637 SR |
759 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
760 | get_task_struct(task); | |
98be12bc TG |
761 | |
762 | rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task); | |
95e02ca9 TG |
763 | } |
764 | ||
8dac456a DH |
765 | /** |
766 | * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop | |
767 | * @lock: the rt_mutex to take | |
768 | * @state: the state the task should block in (TASK_INTERRUPTIBLE | |
769 | * or TASK_UNINTERRUPTIBLE) | |
770 | * @timeout: the pre-initialized and started timer, or NULL for none | |
771 | * @waiter: the pre-initialized rt_mutex_waiter | |
8dac456a DH |
772 | * |
773 | * lock->wait_lock must be held by the caller. | |
23f78d4a IM |
774 | */ |
775 | static int __sched | |
8dac456a DH |
776 | __rt_mutex_slowlock(struct rt_mutex *lock, int state, |
777 | struct hrtimer_sleeper *timeout, | |
8161239a | 778 | struct rt_mutex_waiter *waiter) |
23f78d4a | 779 | { |
23f78d4a IM |
780 | int ret = 0; |
781 | ||
23f78d4a IM |
782 | for (;;) { |
783 | /* Try to acquire the lock: */ | |
8161239a | 784 | if (try_to_take_rt_mutex(lock, current, waiter)) |
23f78d4a IM |
785 | break; |
786 | ||
787 | /* | |
788 | * TASK_INTERRUPTIBLE checks for signals and | |
789 | * timeout. Ignored otherwise. | |
790 | */ | |
791 | if (unlikely(state == TASK_INTERRUPTIBLE)) { | |
792 | /* Signal pending? */ | |
793 | if (signal_pending(current)) | |
794 | ret = -EINTR; | |
795 | if (timeout && !timeout->task) | |
796 | ret = -ETIMEDOUT; | |
797 | if (ret) | |
798 | break; | |
799 | } | |
800 | ||
d209d74d | 801 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 802 | |
8dac456a | 803 | debug_rt_mutex_print_deadlock(waiter); |
23f78d4a | 804 | |
8161239a | 805 | schedule_rt_mutex(lock); |
23f78d4a | 806 | |
d209d74d | 807 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
808 | set_current_state(state); |
809 | } | |
810 | ||
8dac456a DH |
811 | return ret; |
812 | } | |
813 | ||
1201613a TG |
814 | static void rt_mutex_handle_deadlock(int res, int detect_deadlock, |
815 | struct rt_mutex_waiter *w) | |
816 | { | |
817 | /* | |
818 | * If the result is not -EDEADLOCK or the caller requested | |
819 | * deadlock detection, nothing to do here. | |
820 | */ | |
821 | if (res != -EDEADLOCK || detect_deadlock) | |
822 | return; | |
823 | ||
824 | /* | |
825 | * Yell lowdly and stop the task right here. | |
826 | */ | |
827 | rt_mutex_print_deadlock(w); | |
828 | while (1) { | |
829 | set_current_state(TASK_INTERRUPTIBLE); | |
830 | schedule(); | |
831 | } | |
832 | } | |
833 | ||
8dac456a DH |
834 | /* |
835 | * Slow path lock function: | |
836 | */ | |
837 | static int __sched | |
838 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | |
839 | struct hrtimer_sleeper *timeout, | |
840 | int detect_deadlock) | |
841 | { | |
842 | struct rt_mutex_waiter waiter; | |
843 | int ret = 0; | |
844 | ||
845 | debug_rt_mutex_init_waiter(&waiter); | |
8dac456a | 846 | |
d209d74d | 847 | raw_spin_lock(&lock->wait_lock); |
8dac456a DH |
848 | |
849 | /* Try to acquire the lock again: */ | |
8161239a | 850 | if (try_to_take_rt_mutex(lock, current, NULL)) { |
d209d74d | 851 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
852 | return 0; |
853 | } | |
854 | ||
855 | set_current_state(state); | |
856 | ||
857 | /* Setup the timer, when timeout != NULL */ | |
858 | if (unlikely(timeout)) { | |
859 | hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); | |
860 | if (!hrtimer_active(&timeout->timer)) | |
861 | timeout->task = NULL; | |
862 | } | |
863 | ||
8161239a LJ |
864 | ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); |
865 | ||
866 | if (likely(!ret)) | |
867 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); | |
8dac456a | 868 | |
23f78d4a IM |
869 | set_current_state(TASK_RUNNING); |
870 | ||
1201613a | 871 | if (unlikely(ret)) { |
9a11b49a | 872 | remove_waiter(lock, &waiter); |
1201613a TG |
873 | rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter); |
874 | } | |
23f78d4a IM |
875 | |
876 | /* | |
877 | * try_to_take_rt_mutex() sets the waiter bit | |
878 | * unconditionally. We might have to fix that up. | |
879 | */ | |
880 | fixup_rt_mutex_waiters(lock); | |
881 | ||
d209d74d | 882 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
883 | |
884 | /* Remove pending timer: */ | |
885 | if (unlikely(timeout)) | |
886 | hrtimer_cancel(&timeout->timer); | |
887 | ||
23f78d4a IM |
888 | debug_rt_mutex_free_waiter(&waiter); |
889 | ||
890 | return ret; | |
891 | } | |
892 | ||
893 | /* | |
894 | * Slow path try-lock function: | |
895 | */ | |
896 | static inline int | |
9a11b49a | 897 | rt_mutex_slowtrylock(struct rt_mutex *lock) |
23f78d4a IM |
898 | { |
899 | int ret = 0; | |
900 | ||
d209d74d | 901 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
902 | |
903 | if (likely(rt_mutex_owner(lock) != current)) { | |
904 | ||
8161239a | 905 | ret = try_to_take_rt_mutex(lock, current, NULL); |
23f78d4a IM |
906 | /* |
907 | * try_to_take_rt_mutex() sets the lock waiters | |
908 | * bit unconditionally. Clean this up. | |
909 | */ | |
910 | fixup_rt_mutex_waiters(lock); | |
911 | } | |
912 | ||
d209d74d | 913 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
914 | |
915 | return ret; | |
916 | } | |
917 | ||
918 | /* | |
919 | * Slow path to release a rt-mutex: | |
920 | */ | |
921 | static void __sched | |
922 | rt_mutex_slowunlock(struct rt_mutex *lock) | |
923 | { | |
d209d74d | 924 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
925 | |
926 | debug_rt_mutex_unlock(lock); | |
927 | ||
928 | rt_mutex_deadlock_account_unlock(current); | |
929 | ||
2371e977 TG |
930 | /* |
931 | * We must be careful here if the fast path is enabled. If we | |
932 | * have no waiters queued we cannot set owner to NULL here | |
933 | * because of: | |
934 | * | |
935 | * foo->lock->owner = NULL; | |
936 | * rtmutex_lock(foo->lock); <- fast path | |
937 | * free = atomic_dec_and_test(foo->refcnt); | |
938 | * rtmutex_unlock(foo->lock); <- fast path | |
939 | * if (free) | |
940 | * kfree(foo); | |
941 | * raw_spin_unlock(foo->lock->wait_lock); | |
942 | * | |
943 | * So for the fastpath enabled kernel: | |
944 | * | |
945 | * Nothing can set the waiters bit as long as we hold | |
946 | * lock->wait_lock. So we do the following sequence: | |
947 | * | |
948 | * owner = rt_mutex_owner(lock); | |
949 | * clear_rt_mutex_waiters(lock); | |
950 | * raw_spin_unlock(&lock->wait_lock); | |
951 | * if (cmpxchg(&lock->owner, owner, 0) == owner) | |
952 | * return; | |
953 | * goto retry; | |
954 | * | |
955 | * The fastpath disabled variant is simple as all access to | |
956 | * lock->owner is serialized by lock->wait_lock: | |
957 | * | |
958 | * lock->owner = NULL; | |
959 | * raw_spin_unlock(&lock->wait_lock); | |
960 | */ | |
961 | while (!rt_mutex_has_waiters(lock)) { | |
962 | /* Drops lock->wait_lock ! */ | |
963 | if (unlock_rt_mutex_safe(lock) == true) | |
964 | return; | |
965 | /* Relock the rtmutex and try again */ | |
966 | raw_spin_lock(&lock->wait_lock); | |
23f78d4a IM |
967 | } |
968 | ||
2371e977 TG |
969 | /* |
970 | * The wakeup next waiter path does not suffer from the above | |
971 | * race. See the comments there. | |
972 | */ | |
23f78d4a IM |
973 | wakeup_next_waiter(lock); |
974 | ||
d209d74d | 975 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
976 | |
977 | /* Undo pi boosting if necessary: */ | |
978 | rt_mutex_adjust_prio(current); | |
979 | } | |
980 | ||
981 | /* | |
982 | * debug aware fast / slowpath lock,trylock,unlock | |
983 | * | |
984 | * The atomic acquire/release ops are compiled away, when either the | |
985 | * architecture does not support cmpxchg or when debugging is enabled. | |
986 | */ | |
987 | static inline int | |
988 | rt_mutex_fastlock(struct rt_mutex *lock, int state, | |
989 | int detect_deadlock, | |
990 | int (*slowfn)(struct rt_mutex *lock, int state, | |
991 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 992 | int detect_deadlock)) |
23f78d4a IM |
993 | { |
994 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
995 | rt_mutex_deadlock_account_lock(lock, current); | |
996 | return 0; | |
997 | } else | |
9a11b49a | 998 | return slowfn(lock, state, NULL, detect_deadlock); |
23f78d4a IM |
999 | } |
1000 | ||
1001 | static inline int | |
1002 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | |
1003 | struct hrtimer_sleeper *timeout, int detect_deadlock, | |
1004 | int (*slowfn)(struct rt_mutex *lock, int state, | |
1005 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 1006 | int detect_deadlock)) |
23f78d4a IM |
1007 | { |
1008 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
1009 | rt_mutex_deadlock_account_lock(lock, current); | |
1010 | return 0; | |
1011 | } else | |
9a11b49a | 1012 | return slowfn(lock, state, timeout, detect_deadlock); |
23f78d4a IM |
1013 | } |
1014 | ||
1015 | static inline int | |
1016 | rt_mutex_fasttrylock(struct rt_mutex *lock, | |
9a11b49a | 1017 | int (*slowfn)(struct rt_mutex *lock)) |
23f78d4a IM |
1018 | { |
1019 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
1020 | rt_mutex_deadlock_account_lock(lock, current); | |
1021 | return 1; | |
1022 | } | |
9a11b49a | 1023 | return slowfn(lock); |
23f78d4a IM |
1024 | } |
1025 | ||
1026 | static inline void | |
1027 | rt_mutex_fastunlock(struct rt_mutex *lock, | |
1028 | void (*slowfn)(struct rt_mutex *lock)) | |
1029 | { | |
1030 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) | |
1031 | rt_mutex_deadlock_account_unlock(current); | |
1032 | else | |
1033 | slowfn(lock); | |
1034 | } | |
1035 | ||
1036 | /** | |
1037 | * rt_mutex_lock - lock a rt_mutex | |
1038 | * | |
1039 | * @lock: the rt_mutex to be locked | |
1040 | */ | |
1041 | void __sched rt_mutex_lock(struct rt_mutex *lock) | |
1042 | { | |
1043 | might_sleep(); | |
1044 | ||
1045 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); | |
1046 | } | |
1047 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | |
1048 | ||
1049 | /** | |
1050 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible | |
1051 | * | |
1052 | * @lock: the rt_mutex to be locked | |
1053 | * @detect_deadlock: deadlock detection on/off | |
1054 | * | |
1055 | * Returns: | |
1056 | * 0 on success | |
1057 | * -EINTR when interrupted by a signal | |
1058 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | |
1059 | */ | |
1060 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, | |
1061 | int detect_deadlock) | |
1062 | { | |
1063 | might_sleep(); | |
1064 | ||
1065 | return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, | |
1066 | detect_deadlock, rt_mutex_slowlock); | |
1067 | } | |
1068 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | |
1069 | ||
1070 | /** | |
23b94b96 LH |
1071 | * rt_mutex_timed_lock - lock a rt_mutex interruptible |
1072 | * the timeout structure is provided | |
1073 | * by the caller | |
23f78d4a IM |
1074 | * |
1075 | * @lock: the rt_mutex to be locked | |
1076 | * @timeout: timeout structure or NULL (no timeout) | |
1077 | * @detect_deadlock: deadlock detection on/off | |
1078 | * | |
1079 | * Returns: | |
1080 | * 0 on success | |
1081 | * -EINTR when interrupted by a signal | |
3ac49a1c | 1082 | * -ETIMEDOUT when the timeout expired |
23f78d4a IM |
1083 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) |
1084 | */ | |
1085 | int | |
1086 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, | |
1087 | int detect_deadlock) | |
1088 | { | |
1089 | might_sleep(); | |
1090 | ||
1091 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | |
1092 | detect_deadlock, rt_mutex_slowlock); | |
1093 | } | |
1094 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | |
1095 | ||
1096 | /** | |
1097 | * rt_mutex_trylock - try to lock a rt_mutex | |
1098 | * | |
1099 | * @lock: the rt_mutex to be locked | |
1100 | * | |
1101 | * Returns 1 on success and 0 on contention | |
1102 | */ | |
1103 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | |
1104 | { | |
1105 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | |
1106 | } | |
1107 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | |
1108 | ||
1109 | /** | |
1110 | * rt_mutex_unlock - unlock a rt_mutex | |
1111 | * | |
1112 | * @lock: the rt_mutex to be unlocked | |
1113 | */ | |
1114 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | |
1115 | { | |
1116 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); | |
1117 | } | |
1118 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | |
1119 | ||
23b94b96 | 1120 | /** |
23f78d4a IM |
1121 | * rt_mutex_destroy - mark a mutex unusable |
1122 | * @lock: the mutex to be destroyed | |
1123 | * | |
1124 | * This function marks the mutex uninitialized, and any subsequent | |
1125 | * use of the mutex is forbidden. The mutex must not be locked when | |
1126 | * this function is called. | |
1127 | */ | |
1128 | void rt_mutex_destroy(struct rt_mutex *lock) | |
1129 | { | |
1130 | WARN_ON(rt_mutex_is_locked(lock)); | |
1131 | #ifdef CONFIG_DEBUG_RT_MUTEXES | |
1132 | lock->magic = NULL; | |
1133 | #endif | |
1134 | } | |
1135 | ||
1136 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); | |
1137 | ||
1138 | /** | |
1139 | * __rt_mutex_init - initialize the rt lock | |
1140 | * | |
1141 | * @lock: the rt lock to be initialized | |
1142 | * | |
1143 | * Initialize the rt lock to unlocked state. | |
1144 | * | |
1145 | * Initializing of a locked rt lock is not allowed | |
1146 | */ | |
1147 | void __rt_mutex_init(struct rt_mutex *lock, const char *name) | |
1148 | { | |
1149 | lock->owner = NULL; | |
d209d74d | 1150 | raw_spin_lock_init(&lock->wait_lock); |
732375c6 | 1151 | plist_head_init(&lock->wait_list); |
23f78d4a IM |
1152 | |
1153 | debug_rt_mutex_init(lock, name); | |
1154 | } | |
1155 | EXPORT_SYMBOL_GPL(__rt_mutex_init); | |
0cdbee99 IM |
1156 | |
1157 | /** | |
1158 | * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a | |
1159 | * proxy owner | |
1160 | * | |
1161 | * @lock: the rt_mutex to be locked | |
1162 | * @proxy_owner:the task to set as owner | |
1163 | * | |
1164 | * No locking. Caller has to do serializing itself | |
1165 | * Special API call for PI-futex support | |
1166 | */ | |
1167 | void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | |
1168 | struct task_struct *proxy_owner) | |
1169 | { | |
1170 | __rt_mutex_init(lock, NULL); | |
9a11b49a | 1171 | debug_rt_mutex_proxy_lock(lock, proxy_owner); |
8161239a | 1172 | rt_mutex_set_owner(lock, proxy_owner); |
0cdbee99 IM |
1173 | rt_mutex_deadlock_account_lock(lock, proxy_owner); |
1174 | } | |
1175 | ||
1176 | /** | |
1177 | * rt_mutex_proxy_unlock - release a lock on behalf of owner | |
1178 | * | |
1179 | * @lock: the rt_mutex to be locked | |
1180 | * | |
1181 | * No locking. Caller has to do serializing itself | |
1182 | * Special API call for PI-futex support | |
1183 | */ | |
1184 | void rt_mutex_proxy_unlock(struct rt_mutex *lock, | |
1185 | struct task_struct *proxy_owner) | |
1186 | { | |
1187 | debug_rt_mutex_proxy_unlock(lock); | |
8161239a | 1188 | rt_mutex_set_owner(lock, NULL); |
0cdbee99 IM |
1189 | rt_mutex_deadlock_account_unlock(proxy_owner); |
1190 | } | |
1191 | ||
8dac456a DH |
1192 | /** |
1193 | * rt_mutex_start_proxy_lock() - Start lock acquisition for another task | |
1194 | * @lock: the rt_mutex to take | |
1195 | * @waiter: the pre-initialized rt_mutex_waiter | |
1196 | * @task: the task to prepare | |
1197 | * @detect_deadlock: perform deadlock detection (1) or not (0) | |
1198 | * | |
1199 | * Returns: | |
1200 | * 0 - task blocked on lock | |
1201 | * 1 - acquired the lock for task, caller should wake it up | |
1202 | * <0 - error | |
1203 | * | |
1204 | * Special API call for FUTEX_REQUEUE_PI support. | |
1205 | */ | |
1206 | int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |
1207 | struct rt_mutex_waiter *waiter, | |
1208 | struct task_struct *task, int detect_deadlock) | |
1209 | { | |
1210 | int ret; | |
1211 | ||
d209d74d | 1212 | raw_spin_lock(&lock->wait_lock); |
8dac456a | 1213 | |
8161239a | 1214 | if (try_to_take_rt_mutex(lock, task, NULL)) { |
d209d74d | 1215 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
1216 | return 1; |
1217 | } | |
1218 | ||
1201613a TG |
1219 | /* We enforce deadlock detection for futexes */ |
1220 | ret = task_blocks_on_rt_mutex(lock, waiter, task, 1); | |
8dac456a | 1221 | |
8161239a | 1222 | if (ret && !rt_mutex_owner(lock)) { |
8dac456a DH |
1223 | /* |
1224 | * Reset the return value. We might have | |
1225 | * returned with -EDEADLK and the owner | |
1226 | * released the lock while we were walking the | |
1227 | * pi chain. Let the waiter sort it out. | |
1228 | */ | |
1229 | ret = 0; | |
1230 | } | |
8161239a LJ |
1231 | |
1232 | if (unlikely(ret)) | |
1233 | remove_waiter(lock, waiter); | |
1234 | ||
d209d74d | 1235 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
1236 | |
1237 | debug_rt_mutex_print_deadlock(waiter); | |
1238 | ||
1239 | return ret; | |
1240 | } | |
1241 | ||
0cdbee99 IM |
1242 | /** |
1243 | * rt_mutex_next_owner - return the next owner of the lock | |
1244 | * | |
1245 | * @lock: the rt lock query | |
1246 | * | |
1247 | * Returns the next owner of the lock or NULL | |
1248 | * | |
1249 | * Caller has to serialize against other accessors to the lock | |
1250 | * itself. | |
1251 | * | |
1252 | * Special API call for PI-futex support | |
1253 | */ | |
1254 | struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) | |
1255 | { | |
1256 | if (!rt_mutex_has_waiters(lock)) | |
1257 | return NULL; | |
1258 | ||
1259 | return rt_mutex_top_waiter(lock)->task; | |
1260 | } | |
8dac456a DH |
1261 | |
1262 | /** | |
1263 | * rt_mutex_finish_proxy_lock() - Complete lock acquisition | |
1264 | * @lock: the rt_mutex we were woken on | |
1265 | * @to: the timeout, null if none. hrtimer should already have | |
1266 | * been started. | |
1267 | * @waiter: the pre-initialized rt_mutex_waiter | |
1268 | * @detect_deadlock: perform deadlock detection (1) or not (0) | |
1269 | * | |
1270 | * Complete the lock acquisition started our behalf by another thread. | |
1271 | * | |
1272 | * Returns: | |
1273 | * 0 - success | |
1274 | * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK | |
1275 | * | |
1276 | * Special API call for PI-futex requeue support | |
1277 | */ | |
1278 | int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, | |
1279 | struct hrtimer_sleeper *to, | |
1280 | struct rt_mutex_waiter *waiter, | |
1281 | int detect_deadlock) | |
1282 | { | |
1283 | int ret; | |
1284 | ||
d209d74d | 1285 | raw_spin_lock(&lock->wait_lock); |
8dac456a DH |
1286 | |
1287 | set_current_state(TASK_INTERRUPTIBLE); | |
1288 | ||
8161239a | 1289 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
8dac456a DH |
1290 | |
1291 | set_current_state(TASK_RUNNING); | |
1292 | ||
8161239a | 1293 | if (unlikely(ret)) |
8dac456a DH |
1294 | remove_waiter(lock, waiter); |
1295 | ||
1296 | /* | |
1297 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | |
1298 | * have to fix that up. | |
1299 | */ | |
1300 | fixup_rt_mutex_waiters(lock); | |
1301 | ||
d209d74d | 1302 | raw_spin_unlock(&lock->wait_lock); |
8dac456a | 1303 | |
8dac456a DH |
1304 | return ret; |
1305 | } |