locking/percpu-rwsem: Replace waitqueue with rcuwait
authorDavidlohr Bueso <dave@stgolabs.net>
Wed, 11 Jan 2017 15:22:26 +0000 (07:22 -0800)
committerIngo Molnar <mingo@kernel.org>
Sat, 14 Jan 2017 10:14:35 +0000 (11:14 +0100)
The use of any kind of wait queue is an overkill for pcpu-rwsems.
While one option would be to use the less heavy simple (swait)
flavor, this is still too much for what pcpu-rwsems needs. For one,
we do not care about any sort of queuing in that the only (rare) time
writers (and readers, for that matter) are queued is when trying to
acquire the regular contended rw_sem. There cannot be any further
queuing as writers are serialized by the rw_sem in the first place.

Given that percpu_down_write() must not be called after exit_notify(),
we can replace the bulky waitqueue with rcuwait such that a writer
can wait for its turn to take the lock. As such, we can avoid the
queue handling and locking overhead.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dave@stgolabs.net
Link: http://lkml.kernel.org/r/1484148146-14210-3-git-send-email-dave@stgolabs.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/percpu-rwsem.h
kernel/locking/percpu-rwsem.c

index 5b2e6159b744a35a845a82e9213295be754fbabd..93664f022ecf15fcbe7640d37989d520d3ae6d9a 100644 (file)
@@ -4,15 +4,15 @@
 #include <linux/atomic.h>
 #include <linux/rwsem.h>
 #include <linux/percpu.h>
-#include <linux/wait.h>
+#include <linux/rcuwait.h>
 #include <linux/rcu_sync.h>
 #include <linux/lockdep.h>
 
 struct percpu_rw_semaphore {
        struct rcu_sync         rss;
        unsigned int __percpu   *read_count;
-       struct rw_semaphore     rw_sem;
-       wait_queue_head_t       writer;
+       struct rw_semaphore     rw_sem; /* slowpath */
+       struct rcuwait          writer; /* blocked writer */
        int                     readers_block;
 };
 
@@ -22,7 +22,7 @@ static struct percpu_rw_semaphore name = {                            \
        .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC),        \
        .read_count = &__percpu_rwsem_rc_##name,                        \
        .rw_sem = __RWSEM_INITIALIZER(name.rw_sem),                     \
-       .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer),           \
+       .writer = __RCUWAIT_INITIALIZER(name.writer),                   \
 }
 
 extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
index ce182599cf2e98b51831adbf5dca6ce545df0d7f..883cf1b92d9084f30a21f699211d6cd2ca3b9362 100644 (file)
@@ -1,7 +1,6 @@
 #include <linux/atomic.h>
 #include <linux/rwsem.h>
 #include <linux/percpu.h>
-#include <linux/wait.h>
 #include <linux/lockdep.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/rcupdate.h>
@@ -18,7 +17,7 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
        /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
        rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
        __init_rwsem(&sem->rw_sem, name, rwsem_key);
-       init_waitqueue_head(&sem->writer);
+       rcuwait_init(&sem->writer);
        sem->readers_block = 0;
        return 0;
 }
@@ -103,7 +102,7 @@ void __percpu_up_read(struct percpu_rw_semaphore *sem)
        __this_cpu_dec(*sem->read_count);
 
        /* Prod writer to recheck readers_active */
-       wake_up(&sem->writer);
+       rcuwait_wake_up(&sem->writer);
 }
 EXPORT_SYMBOL_GPL(__percpu_up_read);
 
@@ -160,7 +159,7 @@ void percpu_down_write(struct percpu_rw_semaphore *sem)
         */
 
        /* Wait for all now active readers to complete. */
-       wait_event(sem->writer, readers_active_check(sem));
+       rcuwait_wait_event(&sem->writer, readers_active_check(sem));
 }
 EXPORT_SYMBOL_GPL(percpu_down_write);