rwsem: more agressive lock stealing in rwsem_down_write_failed
authorMichel Lespinasse <walken@google.com>
Tue, 7 May 2013 13:45:54 +0000 (06:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 May 2013 14:20:16 +0000 (07:20 -0700)
Some small code simplifications can be achieved by doing more agressive
lock stealing:

- When rwsem_down_write_failed() notices that there are no active locks
  (and thus no thread to wake us if we decided to sleep), it used to wake
  the first queued process. However, stealing the lock is also sufficient
  to deal with this case, so we don't need this check anymore.

- In try_get_writer_sem(), we can steal the lock even when the first waiter
  is a reader. This is correct because the code path that wakes readers is
  protected by the wait_lock. As to the performance effects of this change,
  they are expected to be minimal: readers are still granted the lock
  (rather than having to acquire it themselves) when they reach the front
  of the wait queue, so we have essentially the same behavior as in
  rwsem-spinlock.

Signed-off-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Peter Hurley <peter@hurleysoftware.com>
Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
lib/rwsem.c

index c73bd96dc30c980779e44b05010f3574f8d369e2..2360bf204098aa4ef2a488a94b0e188f182c67bd 100644 (file)
@@ -143,20 +143,12 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
 }
 
 /* Try to get write sem, caller holds sem->wait_lock: */
-static int try_get_writer_sem(struct rw_semaphore *sem,
-                                       struct rwsem_waiter *waiter)
+static int try_get_writer_sem(struct rw_semaphore *sem)
 {
-       struct rwsem_waiter *fwaiter;
        long oldcount, adjustment;
 
-       /* only steal when first waiter is writing */
-       fwaiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
-       if (fwaiter->type != RWSEM_WAITING_FOR_WRITE)
-               return 0;
-
        adjustment = RWSEM_ACTIVE_WRITE_BIAS;
-       /* Only one waiter in the queue: */
-       if (fwaiter == waiter && waiter->list.next == &sem->wait_list)
+       if (list_is_singular(&sem->wait_list))
                adjustment -= RWSEM_WAITING_BIAS;
 
 try_again_write:
@@ -233,23 +225,18 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
        /* we're now waiting on the lock, but no longer actively locking */
        count = rwsem_atomic_update(adjustment, sem);
 
-       /* If there are no active locks, wake the front queued process(es) up.
-        *
-        * Alternatively, if we're called from a failed down_write(), there
-        * were already threads queued before us and there are no active
-        * writers, the lock must be read owned; so we try to wake any read
-        * locks that were queued ahead of us. */
-       if (count == RWSEM_WAITING_BIAS)
-               sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE);
-       else if (count > RWSEM_WAITING_BIAS &&
-                adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
+       /* If there were already threads queued before us and there are no
+        * active writers, the lock must be read owned; so we try to wake
+        * any read locks that were queued ahead of us. */
+       if (count > RWSEM_WAITING_BIAS &&
+           adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
                sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
 
        /* wait until we successfully acquire the lock */
        while (true) {
                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 
-               if (try_get_writer_sem(sem, &waiter))
+               if (try_get_writer_sem(sem))
                        break;
 
                raw_spin_unlock_irq(&sem->wait_lock);