Commit | Line | Data |
---|---|---|
23f78d4a IM |
1 | /* |
2 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support | |
3 | * | |
4 | * started by Ingo Molnar and Thomas Gleixner. | |
5 | * | |
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
7 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | |
8 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | |
9 | * Copyright (C) 2006 Esben Nielsen | |
d07fe82c SR |
10 | * |
11 | * See Documentation/rt-mutex-design.txt for details. | |
23f78d4a IM |
12 | */ |
13 | #include <linux/spinlock.h> | |
9984de1a | 14 | #include <linux/export.h> |
23f78d4a | 15 | #include <linux/sched.h> |
8bd75c77 | 16 | #include <linux/sched/rt.h> |
23f78d4a IM |
17 | #include <linux/timer.h> |
18 | ||
19 | #include "rtmutex_common.h" | |
20 | ||
23f78d4a IM |
21 | /* |
22 | * lock->owner state tracking: | |
23 | * | |
8161239a LJ |
24 | * lock->owner holds the task_struct pointer of the owner. Bit 0 |
25 | * is used to keep track of the "lock has waiters" state. | |
23f78d4a | 26 | * |
8161239a LJ |
27 | * owner bit0 |
28 | * NULL 0 lock is free (fast acquire possible) | |
29 | * NULL 1 lock is free and has waiters and the top waiter | |
30 | * is going to take the lock* | |
31 | * taskpointer 0 lock is held (fast release possible) | |
32 | * taskpointer 1 lock is held and has waiters** | |
23f78d4a IM |
33 | * |
34 | * The fast atomic compare exchange based acquire and release is only | |
8161239a LJ |
35 | * possible when bit 0 of lock->owner is 0. |
36 | * | |
37 | * (*) It also can be a transitional state when grabbing the lock | |
38 | * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, | |
39 | * we need to set the bit0 before looking at the lock, and the owner may be | |
40 | * NULL in this small time, hence this can be a transitional state. | |
23f78d4a | 41 | * |
8161239a LJ |
42 | * (**) There is a small time when bit 0 is set but there are no |
43 | * waiters. This can happen when grabbing the lock in the slow path. | |
44 | * To prevent a cmpxchg of the owner releasing the lock, we need to | |
45 | * set this bit before looking at the lock. | |
23f78d4a IM |
46 | */ |
47 | ||
bd197234 | 48 | static void |
8161239a | 49 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) |
23f78d4a | 50 | { |
8161239a | 51 | unsigned long val = (unsigned long)owner; |
23f78d4a IM |
52 | |
53 | if (rt_mutex_has_waiters(lock)) | |
54 | val |= RT_MUTEX_HAS_WAITERS; | |
55 | ||
56 | lock->owner = (struct task_struct *)val; | |
57 | } | |
58 | ||
59 | static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | |
60 | { | |
61 | lock->owner = (struct task_struct *) | |
62 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | |
63 | } | |
64 | ||
65 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |
66 | { | |
67 | if (!rt_mutex_has_waiters(lock)) | |
68 | clear_rt_mutex_waiters(lock); | |
69 | } | |
70 | ||
bd197234 TG |
71 | /* |
72 | * We can speed up the acquire/release, if the architecture | |
73 | * supports cmpxchg and if there's no debugging state to be set up | |
74 | */ | |
75 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | |
76 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | |
77 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
78 | { | |
79 | unsigned long owner, *p = (unsigned long *) &lock->owner; | |
80 | ||
81 | do { | |
82 | owner = *p; | |
83 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | |
84 | } | |
85 | #else | |
86 | # define rt_mutex_cmpxchg(l,c,n) (0) | |
87 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
88 | { | |
89 | lock->owner = (struct task_struct *) | |
90 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | |
91 | } | |
92 | #endif | |
93 | ||
23f78d4a IM |
94 | /* |
95 | * Calculate task priority from the waiter list priority | |
96 | * | |
97 | * Return task->normal_prio when the waiter list is empty or when | |
98 | * the waiter is not allowed to do priority boosting | |
99 | */ | |
100 | int rt_mutex_getprio(struct task_struct *task) | |
101 | { | |
102 | if (likely(!task_has_pi_waiters(task))) | |
103 | return task->normal_prio; | |
104 | ||
105 | return min(task_top_pi_waiter(task)->pi_list_entry.prio, | |
106 | task->normal_prio); | |
107 | } | |
108 | ||
109 | /* | |
110 | * Adjust the priority of a task, after its pi_waiters got modified. | |
111 | * | |
112 | * This can be both boosting and unboosting. task->pi_lock must be held. | |
113 | */ | |
bd197234 | 114 | static void __rt_mutex_adjust_prio(struct task_struct *task) |
23f78d4a IM |
115 | { |
116 | int prio = rt_mutex_getprio(task); | |
117 | ||
118 | if (task->prio != prio) | |
119 | rt_mutex_setprio(task, prio); | |
120 | } | |
121 | ||
122 | /* | |
123 | * Adjust task priority (undo boosting). Called from the exit path of | |
124 | * rt_mutex_slowunlock() and rt_mutex_slowlock(). | |
125 | * | |
126 | * (Note: We do this outside of the protection of lock->wait_lock to | |
127 | * allow the lock to be taken while or before we readjust the priority | |
128 | * of task. We do not use the spin_xx_mutex() variants here as we are | |
129 | * outside of the debug path.) | |
130 | */ | |
131 | static void rt_mutex_adjust_prio(struct task_struct *task) | |
132 | { | |
133 | unsigned long flags; | |
134 | ||
1d615482 | 135 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a | 136 | __rt_mutex_adjust_prio(task); |
1d615482 | 137 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
138 | } |
139 | ||
140 | /* | |
141 | * Max number of times we'll walk the boosting chain: | |
142 | */ | |
143 | int max_lock_depth = 1024; | |
144 | ||
98be12bc TG |
145 | static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) |
146 | { | |
147 | return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; | |
148 | } | |
149 | ||
23f78d4a IM |
150 | /* |
151 | * Adjust the priority chain. Also used for deadlock detection. | |
152 | * Decreases task's usage by one - may thus free the task. | |
153 | * Returns 0 or -EDEADLK. | |
154 | */ | |
bd197234 TG |
155 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
156 | int deadlock_detect, | |
157 | struct rt_mutex *orig_lock, | |
98be12bc | 158 | struct rt_mutex *next_lock, |
bd197234 TG |
159 | struct rt_mutex_waiter *orig_waiter, |
160 | struct task_struct *top_task) | |
23f78d4a IM |
161 | { |
162 | struct rt_mutex *lock; | |
163 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | |
164 | int detect_deadlock, ret = 0, depth = 0; | |
165 | unsigned long flags; | |
166 | ||
167 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, | |
168 | deadlock_detect); | |
169 | ||
170 | /* | |
171 | * The (de)boosting is a step by step approach with a lot of | |
172 | * pitfalls. We want this to be preemptible and we want hold a | |
173 | * maximum of two locks per step. So we have to check | |
174 | * carefully whether things change under us. | |
175 | */ | |
176 | again: | |
177 | if (++depth > max_lock_depth) { | |
178 | static int prev_max; | |
179 | ||
180 | /* | |
181 | * Print this only once. If the admin changes the limit, | |
182 | * print a new message when reaching the limit again. | |
183 | */ | |
184 | if (prev_max != max_lock_depth) { | |
185 | prev_max = max_lock_depth; | |
186 | printk(KERN_WARNING "Maximum lock depth %d reached " | |
187 | "task: %s (%d)\n", max_lock_depth, | |
ba25f9dc | 188 | top_task->comm, task_pid_nr(top_task)); |
23f78d4a IM |
189 | } |
190 | put_task_struct(task); | |
191 | ||
192 | return deadlock_detect ? -EDEADLK : 0; | |
193 | } | |
194 | retry: | |
195 | /* | |
196 | * Task can not go away as we did a get_task() before ! | |
197 | */ | |
1d615482 | 198 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a IM |
199 | |
200 | waiter = task->pi_blocked_on; | |
201 | /* | |
202 | * Check whether the end of the boosting chain has been | |
203 | * reached or the state of the chain has changed while we | |
204 | * dropped the locks. | |
205 | */ | |
8161239a | 206 | if (!waiter) |
23f78d4a IM |
207 | goto out_unlock_pi; |
208 | ||
1a539a87 TG |
209 | /* |
210 | * Check the orig_waiter state. After we dropped the locks, | |
8161239a | 211 | * the previous owner of the lock might have released the lock. |
1a539a87 | 212 | */ |
8161239a | 213 | if (orig_waiter && !rt_mutex_owner(orig_lock)) |
1a539a87 TG |
214 | goto out_unlock_pi; |
215 | ||
98be12bc TG |
216 | /* |
217 | * We dropped all locks after taking a refcount on @task, so | |
218 | * the task might have moved on in the lock chain or even left | |
219 | * the chain completely and blocks now on an unrelated lock or | |
220 | * on @orig_lock. | |
221 | * | |
222 | * We stored the lock on which @task was blocked in @next_lock, | |
223 | * so we can detect the chain change. | |
224 | */ | |
225 | if (next_lock != waiter->lock) | |
226 | goto out_unlock_pi; | |
227 | ||
1a539a87 TG |
228 | /* |
229 | * Drop out, when the task has no waiters. Note, | |
230 | * top_waiter can be NULL, when we are in the deboosting | |
231 | * mode! | |
232 | */ | |
d88b1b40 TG |
233 | if (top_waiter) { |
234 | if (!task_has_pi_waiters(task)) | |
235 | goto out_unlock_pi; | |
236 | /* | |
237 | * If deadlock detection is off, we stop here if we | |
238 | * are not the top pi waiter of the task. | |
239 | */ | |
240 | if (!detect_deadlock && top_waiter != task_top_pi_waiter(task)) | |
241 | goto out_unlock_pi; | |
242 | } | |
23f78d4a IM |
243 | |
244 | /* | |
245 | * When deadlock detection is off then we check, if further | |
246 | * priority adjustment is necessary. | |
247 | */ | |
248 | if (!detect_deadlock && waiter->list_entry.prio == task->prio) | |
249 | goto out_unlock_pi; | |
250 | ||
251 | lock = waiter->lock; | |
d209d74d | 252 | if (!raw_spin_trylock(&lock->wait_lock)) { |
1d615482 | 253 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
254 | cpu_relax(); |
255 | goto retry; | |
256 | } | |
257 | ||
d88b1b40 TG |
258 | /* |
259 | * Deadlock detection. If the lock is the same as the original | |
260 | * lock which caused us to walk the lock chain or if the | |
261 | * current lock is owned by the task which initiated the chain | |
262 | * walk, we detected a deadlock. | |
263 | */ | |
95e02ca9 | 264 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
23f78d4a | 265 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
d209d74d | 266 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
267 | ret = deadlock_detect ? -EDEADLK : 0; |
268 | goto out_unlock_pi; | |
269 | } | |
270 | ||
271 | top_waiter = rt_mutex_top_waiter(lock); | |
272 | ||
273 | /* Requeue the waiter */ | |
274 | plist_del(&waiter->list_entry, &lock->wait_list); | |
275 | waiter->list_entry.prio = task->prio; | |
276 | plist_add(&waiter->list_entry, &lock->wait_list); | |
277 | ||
278 | /* Release the task */ | |
1d615482 | 279 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
8161239a LJ |
280 | if (!rt_mutex_owner(lock)) { |
281 | /* | |
282 | * If the requeue above changed the top waiter, then we need | |
283 | * to wake the new top waiter up to try to get the lock. | |
284 | */ | |
285 | ||
286 | if (top_waiter != rt_mutex_top_waiter(lock)) | |
287 | wake_up_process(rt_mutex_top_waiter(lock)->task); | |
288 | raw_spin_unlock(&lock->wait_lock); | |
289 | goto out_put_task; | |
290 | } | |
23f78d4a IM |
291 | put_task_struct(task); |
292 | ||
293 | /* Grab the next task */ | |
294 | task = rt_mutex_owner(lock); | |
db630637 | 295 | get_task_struct(task); |
1d615482 | 296 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a IM |
297 | |
298 | if (waiter == rt_mutex_top_waiter(lock)) { | |
299 | /* Boost the owner */ | |
300 | plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); | |
301 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | |
302 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | |
303 | __rt_mutex_adjust_prio(task); | |
304 | ||
305 | } else if (top_waiter == waiter) { | |
306 | /* Deboost the owner */ | |
307 | plist_del(&waiter->pi_list_entry, &task->pi_waiters); | |
308 | waiter = rt_mutex_top_waiter(lock); | |
309 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | |
310 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | |
311 | __rt_mutex_adjust_prio(task); | |
312 | } | |
313 | ||
98be12bc TG |
314 | /* |
315 | * Check whether the task which owns the current lock is pi | |
316 | * blocked itself. If yes we store a pointer to the lock for | |
317 | * the lock chain change detection above. After we dropped | |
318 | * task->pi_lock next_lock cannot be dereferenced anymore. | |
319 | */ | |
320 | next_lock = task_blocked_on_lock(task); | |
321 | ||
1d615482 | 322 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
323 | |
324 | top_waiter = rt_mutex_top_waiter(lock); | |
d209d74d | 325 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 326 | |
98be12bc TG |
327 | /* |
328 | * We reached the end of the lock chain. Stop right here. No | |
329 | * point to go back just to figure that out. | |
330 | */ | |
331 | if (!next_lock) | |
332 | goto out_put_task; | |
333 | ||
23f78d4a IM |
334 | if (!detect_deadlock && waiter != top_waiter) |
335 | goto out_put_task; | |
336 | ||
337 | goto again; | |
338 | ||
339 | out_unlock_pi: | |
1d615482 | 340 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
341 | out_put_task: |
342 | put_task_struct(task); | |
36c8b586 | 343 | |
23f78d4a IM |
344 | return ret; |
345 | } | |
346 | ||
23f78d4a IM |
347 | /* |
348 | * Try to take an rt-mutex | |
349 | * | |
23f78d4a | 350 | * Must be called with lock->wait_lock held. |
8161239a LJ |
351 | * |
352 | * @lock: the lock to be acquired. | |
353 | * @task: the task which wants to acquire the lock | |
354 | * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) | |
23f78d4a | 355 | */ |
8161239a LJ |
356 | static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, |
357 | struct rt_mutex_waiter *waiter) | |
23f78d4a IM |
358 | { |
359 | /* | |
360 | * We have to be careful here if the atomic speedups are | |
361 | * enabled, such that, when | |
362 | * - no other waiter is on the lock | |
363 | * - the lock has been released since we did the cmpxchg | |
364 | * the lock can be released or taken while we are doing the | |
365 | * checks and marking the lock with RT_MUTEX_HAS_WAITERS. | |
366 | * | |
367 | * The atomic acquire/release aware variant of | |
368 | * mark_rt_mutex_waiters uses a cmpxchg loop. After setting | |
369 | * the WAITERS bit, the atomic release / acquire can not | |
370 | * happen anymore and lock->wait_lock protects us from the | |
371 | * non-atomic case. | |
372 | * | |
373 | * Note, that this might set lock->owner = | |
374 | * RT_MUTEX_HAS_WAITERS in the case the lock is not contended | |
375 | * any more. This is fixed up when we take the ownership. | |
376 | * This is the transitional state explained at the top of this file. | |
377 | */ | |
378 | mark_rt_mutex_waiters(lock); | |
379 | ||
8161239a | 380 | if (rt_mutex_owner(lock)) |
23f78d4a IM |
381 | return 0; |
382 | ||
8161239a LJ |
383 | /* |
384 | * It will get the lock because of one of these conditions: | |
385 | * 1) there is no waiter | |
386 | * 2) higher priority than waiters | |
387 | * 3) it is top waiter | |
388 | */ | |
389 | if (rt_mutex_has_waiters(lock)) { | |
390 | if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { | |
391 | if (!waiter || waiter != rt_mutex_top_waiter(lock)) | |
392 | return 0; | |
393 | } | |
394 | } | |
395 | ||
396 | if (waiter || rt_mutex_has_waiters(lock)) { | |
397 | unsigned long flags; | |
398 | struct rt_mutex_waiter *top; | |
399 | ||
400 | raw_spin_lock_irqsave(&task->pi_lock, flags); | |
401 | ||
402 | /* remove the queued waiter. */ | |
403 | if (waiter) { | |
404 | plist_del(&waiter->list_entry, &lock->wait_list); | |
405 | task->pi_blocked_on = NULL; | |
406 | } | |
407 | ||
408 | /* | |
409 | * We have to enqueue the top waiter(if it exists) into | |
410 | * task->pi_waiters list. | |
411 | */ | |
412 | if (rt_mutex_has_waiters(lock)) { | |
413 | top = rt_mutex_top_waiter(lock); | |
414 | top->pi_list_entry.prio = top->list_entry.prio; | |
415 | plist_add(&top->pi_list_entry, &task->pi_waiters); | |
416 | } | |
417 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | |
418 | } | |
419 | ||
23f78d4a | 420 | /* We got the lock. */ |
9a11b49a | 421 | debug_rt_mutex_lock(lock); |
23f78d4a | 422 | |
8161239a | 423 | rt_mutex_set_owner(lock, task); |
23f78d4a | 424 | |
8161239a | 425 | rt_mutex_deadlock_account_lock(lock, task); |
23f78d4a IM |
426 | |
427 | return 1; | |
428 | } | |
429 | ||
430 | /* | |
431 | * Task blocks on lock. | |
432 | * | |
433 | * Prepare waiter and propagate pi chain | |
434 | * | |
435 | * This must be called with lock->wait_lock held. | |
436 | */ | |
437 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |
438 | struct rt_mutex_waiter *waiter, | |
8dac456a | 439 | struct task_struct *task, |
9a11b49a | 440 | int detect_deadlock) |
23f78d4a | 441 | { |
36c8b586 | 442 | struct task_struct *owner = rt_mutex_owner(lock); |
23f78d4a | 443 | struct rt_mutex_waiter *top_waiter = waiter; |
98be12bc | 444 | struct rt_mutex *next_lock; |
db630637 | 445 | int chain_walk = 0, res; |
98be12bc | 446 | unsigned long flags; |
23f78d4a | 447 | |
d88b1b40 TG |
448 | /* |
449 | * Early deadlock detection. We really don't want the task to | |
450 | * enqueue on itself just to untangle the mess later. It's not | |
451 | * only an optimization. We drop the locks, so another waiter | |
452 | * can come in before the chain walk detects the deadlock. So | |
453 | * the other will detect the deadlock and return -EDEADLOCK, | |
454 | * which is wrong, as the other waiter is not in a deadlock | |
455 | * situation. | |
456 | */ | |
457 | if (detect_deadlock && owner == task) | |
458 | return -EDEADLK; | |
459 | ||
1d615482 | 460 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
8dac456a DH |
461 | __rt_mutex_adjust_prio(task); |
462 | waiter->task = task; | |
23f78d4a | 463 | waiter->lock = lock; |
8dac456a DH |
464 | plist_node_init(&waiter->list_entry, task->prio); |
465 | plist_node_init(&waiter->pi_list_entry, task->prio); | |
23f78d4a IM |
466 | |
467 | /* Get the top priority waiter on the lock */ | |
468 | if (rt_mutex_has_waiters(lock)) | |
469 | top_waiter = rt_mutex_top_waiter(lock); | |
470 | plist_add(&waiter->list_entry, &lock->wait_list); | |
471 | ||
8dac456a | 472 | task->pi_blocked_on = waiter; |
23f78d4a | 473 | |
1d615482 | 474 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a | 475 | |
8161239a LJ |
476 | if (!owner) |
477 | return 0; | |
478 | ||
98be12bc | 479 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
23f78d4a | 480 | if (waiter == rt_mutex_top_waiter(lock)) { |
23f78d4a IM |
481 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); |
482 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); | |
483 | ||
484 | __rt_mutex_adjust_prio(owner); | |
db630637 SR |
485 | if (owner->pi_blocked_on) |
486 | chain_walk = 1; | |
98be12bc | 487 | } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { |
db630637 | 488 | chain_walk = 1; |
98be12bc | 489 | } |
db630637 | 490 | |
98be12bc TG |
491 | /* Store the lock on which owner is blocked or NULL */ |
492 | next_lock = task_blocked_on_lock(owner); | |
493 | ||
494 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); | |
495 | /* | |
496 | * Even if full deadlock detection is on, if the owner is not | |
497 | * blocked itself, we can avoid finding this out in the chain | |
498 | * walk. | |
499 | */ | |
500 | if (!chain_walk || !next_lock) | |
23f78d4a IM |
501 | return 0; |
502 | ||
db630637 SR |
503 | /* |
504 | * The owner can't disappear while holding a lock, | |
505 | * so the owner struct is protected by wait_lock. | |
506 | * Gets dropped in rt_mutex_adjust_prio_chain()! | |
507 | */ | |
508 | get_task_struct(owner); | |
509 | ||
d209d74d | 510 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 511 | |
98be12bc TG |
512 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, |
513 | next_lock, waiter, task); | |
23f78d4a | 514 | |
d209d74d | 515 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
516 | |
517 | return res; | |
518 | } | |
519 | ||
520 | /* | |
521 | * Wake up the next waiter on the lock. | |
522 | * | |
8161239a | 523 | * Remove the top waiter from the current tasks waiter list and wake it up. |
23f78d4a IM |
524 | * |
525 | * Called with lock->wait_lock held. | |
526 | */ | |
527 | static void wakeup_next_waiter(struct rt_mutex *lock) | |
528 | { | |
529 | struct rt_mutex_waiter *waiter; | |
23f78d4a IM |
530 | unsigned long flags; |
531 | ||
1d615482 | 532 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
23f78d4a IM |
533 | |
534 | waiter = rt_mutex_top_waiter(lock); | |
23f78d4a IM |
535 | |
536 | /* | |
537 | * Remove it from current->pi_waiters. We do not adjust a | |
538 | * possible priority boost right now. We execute wakeup in the | |
539 | * boosted mode and go back to normal after releasing | |
540 | * lock->wait_lock. | |
541 | */ | |
542 | plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); | |
23f78d4a | 543 | |
8161239a | 544 | rt_mutex_set_owner(lock, NULL); |
23f78d4a | 545 | |
1d615482 | 546 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
23f78d4a | 547 | |
8161239a | 548 | wake_up_process(waiter->task); |
23f78d4a IM |
549 | } |
550 | ||
551 | /* | |
8161239a | 552 | * Remove a waiter from a lock and give up |
23f78d4a | 553 | * |
8161239a LJ |
554 | * Must be called with lock->wait_lock held and |
555 | * have just failed to try_to_take_rt_mutex(). | |
23f78d4a | 556 | */ |
bd197234 TG |
557 | static void remove_waiter(struct rt_mutex *lock, |
558 | struct rt_mutex_waiter *waiter) | |
23f78d4a IM |
559 | { |
560 | int first = (waiter == rt_mutex_top_waiter(lock)); | |
36c8b586 | 561 | struct task_struct *owner = rt_mutex_owner(lock); |
98be12bc | 562 | struct rt_mutex *next_lock = NULL; |
23f78d4a IM |
563 | unsigned long flags; |
564 | ||
1d615482 | 565 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
23f78d4a | 566 | plist_del(&waiter->list_entry, &lock->wait_list); |
23f78d4a | 567 | current->pi_blocked_on = NULL; |
1d615482 | 568 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
23f78d4a | 569 | |
8161239a LJ |
570 | if (!owner) |
571 | return; | |
572 | ||
573 | if (first) { | |
23f78d4a | 574 | |
1d615482 | 575 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
23f78d4a IM |
576 | |
577 | plist_del(&waiter->pi_list_entry, &owner->pi_waiters); | |
578 | ||
579 | if (rt_mutex_has_waiters(lock)) { | |
580 | struct rt_mutex_waiter *next; | |
581 | ||
582 | next = rt_mutex_top_waiter(lock); | |
583 | plist_add(&next->pi_list_entry, &owner->pi_waiters); | |
584 | } | |
585 | __rt_mutex_adjust_prio(owner); | |
586 | ||
98be12bc TG |
587 | /* Store the lock on which owner is blocked or NULL */ |
588 | next_lock = task_blocked_on_lock(owner); | |
db630637 | 589 | |
1d615482 | 590 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
23f78d4a IM |
591 | } |
592 | ||
593 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | |
594 | ||
98be12bc | 595 | if (!next_lock) |
23f78d4a IM |
596 | return; |
597 | ||
db630637 SR |
598 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
599 | get_task_struct(owner); | |
600 | ||
d209d74d | 601 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 602 | |
98be12bc | 603 | rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current); |
23f78d4a | 604 | |
d209d74d | 605 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
606 | } |
607 | ||
95e02ca9 TG |
608 | /* |
609 | * Recheck the pi chain, in case we got a priority setting | |
610 | * | |
611 | * Called from sched_setscheduler | |
612 | */ | |
613 | void rt_mutex_adjust_pi(struct task_struct *task) | |
614 | { | |
615 | struct rt_mutex_waiter *waiter; | |
98be12bc | 616 | struct rt_mutex *next_lock; |
95e02ca9 TG |
617 | unsigned long flags; |
618 | ||
1d615482 | 619 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
95e02ca9 TG |
620 | |
621 | waiter = task->pi_blocked_on; | |
622 | if (!waiter || waiter->list_entry.prio == task->prio) { | |
1d615482 | 623 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 TG |
624 | return; |
625 | } | |
98be12bc | 626 | next_lock = waiter->lock; |
1d615482 | 627 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 | 628 | |
db630637 SR |
629 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
630 | get_task_struct(task); | |
98be12bc TG |
631 | |
632 | rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task); | |
95e02ca9 TG |
633 | } |
634 | ||
8dac456a DH |
635 | /** |
636 | * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop | |
637 | * @lock: the rt_mutex to take | |
638 | * @state: the state the task should block in (TASK_INTERRUPTIBLE | |
639 | * or TASK_UNINTERRUPTIBLE) | |
640 | * @timeout: the pre-initialized and started timer, or NULL for none | |
641 | * @waiter: the pre-initialized rt_mutex_waiter | |
8dac456a DH |
642 | * |
643 | * lock->wait_lock must be held by the caller. | |
23f78d4a IM |
644 | */ |
645 | static int __sched | |
8dac456a DH |
646 | __rt_mutex_slowlock(struct rt_mutex *lock, int state, |
647 | struct hrtimer_sleeper *timeout, | |
8161239a | 648 | struct rt_mutex_waiter *waiter) |
23f78d4a | 649 | { |
23f78d4a IM |
650 | int ret = 0; |
651 | ||
23f78d4a IM |
652 | for (;;) { |
653 | /* Try to acquire the lock: */ | |
8161239a | 654 | if (try_to_take_rt_mutex(lock, current, waiter)) |
23f78d4a IM |
655 | break; |
656 | ||
657 | /* | |
658 | * TASK_INTERRUPTIBLE checks for signals and | |
659 | * timeout. Ignored otherwise. | |
660 | */ | |
661 | if (unlikely(state == TASK_INTERRUPTIBLE)) { | |
662 | /* Signal pending? */ | |
663 | if (signal_pending(current)) | |
664 | ret = -EINTR; | |
665 | if (timeout && !timeout->task) | |
666 | ret = -ETIMEDOUT; | |
667 | if (ret) | |
668 | break; | |
669 | } | |
670 | ||
d209d74d | 671 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 672 | |
8dac456a | 673 | debug_rt_mutex_print_deadlock(waiter); |
23f78d4a | 674 | |
8161239a | 675 | schedule_rt_mutex(lock); |
23f78d4a | 676 | |
d209d74d | 677 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
678 | set_current_state(state); |
679 | } | |
680 | ||
8dac456a DH |
681 | return ret; |
682 | } | |
683 | ||
684 | /* | |
685 | * Slow path lock function: | |
686 | */ | |
687 | static int __sched | |
688 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | |
689 | struct hrtimer_sleeper *timeout, | |
690 | int detect_deadlock) | |
691 | { | |
692 | struct rt_mutex_waiter waiter; | |
693 | int ret = 0; | |
694 | ||
695 | debug_rt_mutex_init_waiter(&waiter); | |
8dac456a | 696 | |
d209d74d | 697 | raw_spin_lock(&lock->wait_lock); |
8dac456a DH |
698 | |
699 | /* Try to acquire the lock again: */ | |
8161239a | 700 | if (try_to_take_rt_mutex(lock, current, NULL)) { |
d209d74d | 701 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
702 | return 0; |
703 | } | |
704 | ||
705 | set_current_state(state); | |
706 | ||
707 | /* Setup the timer, when timeout != NULL */ | |
708 | if (unlikely(timeout)) { | |
709 | hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); | |
710 | if (!hrtimer_active(&timeout->timer)) | |
711 | timeout->task = NULL; | |
712 | } | |
713 | ||
8161239a LJ |
714 | ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); |
715 | ||
716 | if (likely(!ret)) | |
717 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); | |
8dac456a | 718 | |
23f78d4a IM |
719 | set_current_state(TASK_RUNNING); |
720 | ||
8161239a | 721 | if (unlikely(ret)) |
9a11b49a | 722 | remove_waiter(lock, &waiter); |
23f78d4a IM |
723 | |
724 | /* | |
725 | * try_to_take_rt_mutex() sets the waiter bit | |
726 | * unconditionally. We might have to fix that up. | |
727 | */ | |
728 | fixup_rt_mutex_waiters(lock); | |
729 | ||
d209d74d | 730 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
731 | |
732 | /* Remove pending timer: */ | |
733 | if (unlikely(timeout)) | |
734 | hrtimer_cancel(&timeout->timer); | |
735 | ||
23f78d4a IM |
736 | debug_rt_mutex_free_waiter(&waiter); |
737 | ||
738 | return ret; | |
739 | } | |
740 | ||
741 | /* | |
742 | * Slow path try-lock function: | |
743 | */ | |
744 | static inline int | |
9a11b49a | 745 | rt_mutex_slowtrylock(struct rt_mutex *lock) |
23f78d4a IM |
746 | { |
747 | int ret = 0; | |
748 | ||
d209d74d | 749 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
750 | |
751 | if (likely(rt_mutex_owner(lock) != current)) { | |
752 | ||
8161239a | 753 | ret = try_to_take_rt_mutex(lock, current, NULL); |
23f78d4a IM |
754 | /* |
755 | * try_to_take_rt_mutex() sets the lock waiters | |
756 | * bit unconditionally. Clean this up. | |
757 | */ | |
758 | fixup_rt_mutex_waiters(lock); | |
759 | } | |
760 | ||
d209d74d | 761 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
762 | |
763 | return ret; | |
764 | } | |
765 | ||
766 | /* | |
767 | * Slow path to release a rt-mutex: | |
768 | */ | |
769 | static void __sched | |
770 | rt_mutex_slowunlock(struct rt_mutex *lock) | |
771 | { | |
d209d74d | 772 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
773 | |
774 | debug_rt_mutex_unlock(lock); | |
775 | ||
776 | rt_mutex_deadlock_account_unlock(current); | |
777 | ||
778 | if (!rt_mutex_has_waiters(lock)) { | |
779 | lock->owner = NULL; | |
d209d74d | 780 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
781 | return; |
782 | } | |
783 | ||
784 | wakeup_next_waiter(lock); | |
785 | ||
d209d74d | 786 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
787 | |
788 | /* Undo pi boosting if necessary: */ | |
789 | rt_mutex_adjust_prio(current); | |
790 | } | |
791 | ||
792 | /* | |
793 | * debug aware fast / slowpath lock,trylock,unlock | |
794 | * | |
795 | * The atomic acquire/release ops are compiled away, when either the | |
796 | * architecture does not support cmpxchg or when debugging is enabled. | |
797 | */ | |
798 | static inline int | |
799 | rt_mutex_fastlock(struct rt_mutex *lock, int state, | |
800 | int detect_deadlock, | |
801 | int (*slowfn)(struct rt_mutex *lock, int state, | |
802 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 803 | int detect_deadlock)) |
23f78d4a IM |
804 | { |
805 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
806 | rt_mutex_deadlock_account_lock(lock, current); | |
807 | return 0; | |
808 | } else | |
9a11b49a | 809 | return slowfn(lock, state, NULL, detect_deadlock); |
23f78d4a IM |
810 | } |
811 | ||
812 | static inline int | |
813 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | |
814 | struct hrtimer_sleeper *timeout, int detect_deadlock, | |
815 | int (*slowfn)(struct rt_mutex *lock, int state, | |
816 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 817 | int detect_deadlock)) |
23f78d4a IM |
818 | { |
819 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
820 | rt_mutex_deadlock_account_lock(lock, current); | |
821 | return 0; | |
822 | } else | |
9a11b49a | 823 | return slowfn(lock, state, timeout, detect_deadlock); |
23f78d4a IM |
824 | } |
825 | ||
826 | static inline int | |
827 | rt_mutex_fasttrylock(struct rt_mutex *lock, | |
9a11b49a | 828 | int (*slowfn)(struct rt_mutex *lock)) |
23f78d4a IM |
829 | { |
830 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
831 | rt_mutex_deadlock_account_lock(lock, current); | |
832 | return 1; | |
833 | } | |
9a11b49a | 834 | return slowfn(lock); |
23f78d4a IM |
835 | } |
836 | ||
837 | static inline void | |
838 | rt_mutex_fastunlock(struct rt_mutex *lock, | |
839 | void (*slowfn)(struct rt_mutex *lock)) | |
840 | { | |
841 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) | |
842 | rt_mutex_deadlock_account_unlock(current); | |
843 | else | |
844 | slowfn(lock); | |
845 | } | |
846 | ||
847 | /** | |
848 | * rt_mutex_lock - lock a rt_mutex | |
849 | * | |
850 | * @lock: the rt_mutex to be locked | |
851 | */ | |
852 | void __sched rt_mutex_lock(struct rt_mutex *lock) | |
853 | { | |
854 | might_sleep(); | |
855 | ||
856 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); | |
857 | } | |
858 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | |
859 | ||
860 | /** | |
861 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible | |
862 | * | |
863 | * @lock: the rt_mutex to be locked | |
864 | * @detect_deadlock: deadlock detection on/off | |
865 | * | |
866 | * Returns: | |
867 | * 0 on success | |
868 | * -EINTR when interrupted by a signal | |
869 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | |
870 | */ | |
871 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, | |
872 | int detect_deadlock) | |
873 | { | |
874 | might_sleep(); | |
875 | ||
876 | return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, | |
877 | detect_deadlock, rt_mutex_slowlock); | |
878 | } | |
879 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | |
880 | ||
881 | /** | |
23b94b96 LH |
882 | * rt_mutex_timed_lock - lock a rt_mutex interruptible |
883 | * the timeout structure is provided | |
884 | * by the caller | |
23f78d4a IM |
885 | * |
886 | * @lock: the rt_mutex to be locked | |
887 | * @timeout: timeout structure or NULL (no timeout) | |
888 | * @detect_deadlock: deadlock detection on/off | |
889 | * | |
890 | * Returns: | |
891 | * 0 on success | |
892 | * -EINTR when interrupted by a signal | |
3ac49a1c | 893 | * -ETIMEDOUT when the timeout expired |
23f78d4a IM |
894 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) |
895 | */ | |
896 | int | |
897 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, | |
898 | int detect_deadlock) | |
899 | { | |
900 | might_sleep(); | |
901 | ||
902 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | |
903 | detect_deadlock, rt_mutex_slowlock); | |
904 | } | |
905 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | |
906 | ||
907 | /** | |
908 | * rt_mutex_trylock - try to lock a rt_mutex | |
909 | * | |
910 | * @lock: the rt_mutex to be locked | |
911 | * | |
912 | * Returns 1 on success and 0 on contention | |
913 | */ | |
914 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | |
915 | { | |
916 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | |
917 | } | |
918 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | |
919 | ||
920 | /** | |
921 | * rt_mutex_unlock - unlock a rt_mutex | |
922 | * | |
923 | * @lock: the rt_mutex to be unlocked | |
924 | */ | |
925 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | |
926 | { | |
927 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); | |
928 | } | |
929 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | |
930 | ||
23b94b96 | 931 | /** |
23f78d4a IM |
932 | * rt_mutex_destroy - mark a mutex unusable |
933 | * @lock: the mutex to be destroyed | |
934 | * | |
935 | * This function marks the mutex uninitialized, and any subsequent | |
936 | * use of the mutex is forbidden. The mutex must not be locked when | |
937 | * this function is called. | |
938 | */ | |
939 | void rt_mutex_destroy(struct rt_mutex *lock) | |
940 | { | |
941 | WARN_ON(rt_mutex_is_locked(lock)); | |
942 | #ifdef CONFIG_DEBUG_RT_MUTEXES | |
943 | lock->magic = NULL; | |
944 | #endif | |
945 | } | |
946 | ||
947 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); | |
948 | ||
949 | /** | |
950 | * __rt_mutex_init - initialize the rt lock | |
951 | * | |
952 | * @lock: the rt lock to be initialized | |
953 | * | |
954 | * Initialize the rt lock to unlocked state. | |
955 | * | |
956 | * Initializing of a locked rt lock is not allowed | |
957 | */ | |
958 | void __rt_mutex_init(struct rt_mutex *lock, const char *name) | |
959 | { | |
960 | lock->owner = NULL; | |
d209d74d | 961 | raw_spin_lock_init(&lock->wait_lock); |
732375c6 | 962 | plist_head_init(&lock->wait_list); |
23f78d4a IM |
963 | |
964 | debug_rt_mutex_init(lock, name); | |
965 | } | |
966 | EXPORT_SYMBOL_GPL(__rt_mutex_init); | |
0cdbee99 IM |
967 | |
968 | /** | |
969 | * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a | |
970 | * proxy owner | |
971 | * | |
972 | * @lock: the rt_mutex to be locked | |
973 | * @proxy_owner:the task to set as owner | |
974 | * | |
975 | * No locking. Caller has to do serializing itself | |
976 | * Special API call for PI-futex support | |
977 | */ | |
978 | void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | |
979 | struct task_struct *proxy_owner) | |
980 | { | |
981 | __rt_mutex_init(lock, NULL); | |
9a11b49a | 982 | debug_rt_mutex_proxy_lock(lock, proxy_owner); |
8161239a | 983 | rt_mutex_set_owner(lock, proxy_owner); |
0cdbee99 IM |
984 | rt_mutex_deadlock_account_lock(lock, proxy_owner); |
985 | } | |
986 | ||
987 | /** | |
988 | * rt_mutex_proxy_unlock - release a lock on behalf of owner | |
989 | * | |
990 | * @lock: the rt_mutex to be locked | |
991 | * | |
992 | * No locking. Caller has to do serializing itself | |
993 | * Special API call for PI-futex support | |
994 | */ | |
995 | void rt_mutex_proxy_unlock(struct rt_mutex *lock, | |
996 | struct task_struct *proxy_owner) | |
997 | { | |
998 | debug_rt_mutex_proxy_unlock(lock); | |
8161239a | 999 | rt_mutex_set_owner(lock, NULL); |
0cdbee99 IM |
1000 | rt_mutex_deadlock_account_unlock(proxy_owner); |
1001 | } | |
1002 | ||
8dac456a DH |
1003 | /** |
1004 | * rt_mutex_start_proxy_lock() - Start lock acquisition for another task | |
1005 | * @lock: the rt_mutex to take | |
1006 | * @waiter: the pre-initialized rt_mutex_waiter | |
1007 | * @task: the task to prepare | |
1008 | * @detect_deadlock: perform deadlock detection (1) or not (0) | |
1009 | * | |
1010 | * Returns: | |
1011 | * 0 - task blocked on lock | |
1012 | * 1 - acquired the lock for task, caller should wake it up | |
1013 | * <0 - error | |
1014 | * | |
1015 | * Special API call for FUTEX_REQUEUE_PI support. | |
1016 | */ | |
1017 | int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |
1018 | struct rt_mutex_waiter *waiter, | |
1019 | struct task_struct *task, int detect_deadlock) | |
1020 | { | |
1021 | int ret; | |
1022 | ||
d209d74d | 1023 | raw_spin_lock(&lock->wait_lock); |
8dac456a | 1024 | |
8161239a | 1025 | if (try_to_take_rt_mutex(lock, task, NULL)) { |
d209d74d | 1026 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
1027 | return 1; |
1028 | } | |
1029 | ||
1030 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); | |
1031 | ||
8161239a | 1032 | if (ret && !rt_mutex_owner(lock)) { |
8dac456a DH |
1033 | /* |
1034 | * Reset the return value. We might have | |
1035 | * returned with -EDEADLK and the owner | |
1036 | * released the lock while we were walking the | |
1037 | * pi chain. Let the waiter sort it out. | |
1038 | */ | |
1039 | ret = 0; | |
1040 | } | |
8161239a LJ |
1041 | |
1042 | if (unlikely(ret)) | |
1043 | remove_waiter(lock, waiter); | |
1044 | ||
d209d74d | 1045 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
1046 | |
1047 | debug_rt_mutex_print_deadlock(waiter); | |
1048 | ||
1049 | return ret; | |
1050 | } | |
1051 | ||
0cdbee99 IM |
1052 | /** |
1053 | * rt_mutex_next_owner - return the next owner of the lock | |
1054 | * | |
1055 | * @lock: the rt lock query | |
1056 | * | |
1057 | * Returns the next owner of the lock or NULL | |
1058 | * | |
1059 | * Caller has to serialize against other accessors to the lock | |
1060 | * itself. | |
1061 | * | |
1062 | * Special API call for PI-futex support | |
1063 | */ | |
1064 | struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) | |
1065 | { | |
1066 | if (!rt_mutex_has_waiters(lock)) | |
1067 | return NULL; | |
1068 | ||
1069 | return rt_mutex_top_waiter(lock)->task; | |
1070 | } | |
8dac456a DH |
1071 | |
1072 | /** | |
1073 | * rt_mutex_finish_proxy_lock() - Complete lock acquisition | |
1074 | * @lock: the rt_mutex we were woken on | |
1075 | * @to: the timeout, null if none. hrtimer should already have | |
1076 | * been started. | |
1077 | * @waiter: the pre-initialized rt_mutex_waiter | |
1078 | * @detect_deadlock: perform deadlock detection (1) or not (0) | |
1079 | * | |
1080 | * Complete the lock acquisition started our behalf by another thread. | |
1081 | * | |
1082 | * Returns: | |
1083 | * 0 - success | |
1084 | * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK | |
1085 | * | |
1086 | * Special API call for PI-futex requeue support | |
1087 | */ | |
1088 | int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, | |
1089 | struct hrtimer_sleeper *to, | |
1090 | struct rt_mutex_waiter *waiter, | |
1091 | int detect_deadlock) | |
1092 | { | |
1093 | int ret; | |
1094 | ||
d209d74d | 1095 | raw_spin_lock(&lock->wait_lock); |
8dac456a DH |
1096 | |
1097 | set_current_state(TASK_INTERRUPTIBLE); | |
1098 | ||
8161239a | 1099 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
8dac456a DH |
1100 | |
1101 | set_current_state(TASK_RUNNING); | |
1102 | ||
8161239a | 1103 | if (unlikely(ret)) |
8dac456a DH |
1104 | remove_waiter(lock, waiter); |
1105 | ||
1106 | /* | |
1107 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | |
1108 | * have to fix that up. | |
1109 | */ | |
1110 | fixup_rt_mutex_waiters(lock); | |
1111 | ||
d209d74d | 1112 | raw_spin_unlock(&lock->wait_lock); |
8dac456a | 1113 | |
8dac456a DH |
1114 | return ret; |
1115 | } |