locking/rtmutex: Implement lockless top-waiter wakeup
authorDavidlohr Bueso <dave@stgolabs.net>
Tue, 19 May 2015 17:24:55 +0000 (10:24 -0700)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 18 Jun 2015 20:27:46 +0000 (22:27 +0200)
Mark the task for later wakeup after the wait_lock has been released.
This way, once the next task is awoken, it will have a better chance
to of finding the wait_lock free when continuing executing in
__rt_mutex_slowlock() when trying to acquire the rtmutex, calling
try_to_take_rt_mutex(). Upon contended scenarios, other tasks attempting
take the lock may acquire it first, right after the wait_lock is released,
but (a) this can also occur with the current code, as it relies on the
spinlock fairness, and (b) we are dealing with the top-waiter anyway,
so it will always take the lock next.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1432056298-18738-2-git-send-email-dave@stgolabs.net
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/locking/rtmutex.c

index b025295f49662469d1f3b4257f3835d2f40f01e1..44ee8f85a78bbef5d5628fc61cde3214745b8df9 100644 (file)
@@ -955,14 +955,13 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 }
 
 /*
- * Wake up the next waiter on the lock.
- *
  * Remove the top waiter from the current tasks pi waiter list and
- * wake it up.
+ * queue it up.
  *
  * Called with lock->wait_lock held.
  */
-static void wakeup_next_waiter(struct rt_mutex *lock)
+static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
+                                   struct rt_mutex *lock)
 {
        struct rt_mutex_waiter *waiter;
        unsigned long flags;
@@ -991,12 +990,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
 
        raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 
-       /*
-        * It's safe to dereference waiter as it cannot go away as
-        * long as we hold lock->wait_lock. The waiter task needs to
-        * acquire it in order to dequeue the waiter.
-        */
-       wake_up_process(waiter->task);
+       wake_q_add(wake_q, waiter->task);
 }
 
 /*
@@ -1258,6 +1252,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
 static void __sched
 rt_mutex_slowunlock(struct rt_mutex *lock)
 {
+       WAKE_Q(wake_q);
+
        raw_spin_lock(&lock->wait_lock);
 
        debug_rt_mutex_unlock(lock);
@@ -1306,10 +1302,13 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
        /*
         * The wakeup next waiter path does not suffer from the above
         * race. See the comments there.
+        *
+        * Queue the next waiter for wakeup once we release the wait_lock.
         */
-       wakeup_next_waiter(lock);
+       mark_wakeup_next_waiter(&wake_q, lock);
 
        raw_spin_unlock(&lock->wait_lock);
+       wake_up_q(&wake_q);
 
        /* Undo pi boosting if necessary: */
        rt_mutex_adjust_prio(current);