rcu: Move rcu_barrier_completion to rcu_state structure
authorPaul E. McKenney <paul.mckenney@linaro.org>
Tue, 29 May 2012 10:03:37 +0000 (03:03 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 2 Jul 2012 19:33:22 +0000 (12:33 -0700)
In order to allow each RCU flavor to concurrently execute its
rcu_barrier() function, it is necessary to move the relevant
state to the rcu_state structure.  This commit therefore moves the
rcu_barrier_completion global variable to a new ->barrier_completion
field in the rcu_state structure.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
kernel/rcutree.c
kernel/rcutree.h

index 5929b021666ddab88bb84c24ea0b735cb989b68d..ca7d1678ac79f9af6685bf3d72202df9662b692c 100644 (file)
@@ -158,7 +158,6 @@ unsigned long rcutorture_vernum;
 /* State information for rcu_barrier() and friends. */
 
 static DEFINE_MUTEX(rcu_barrier_mutex);
-static struct completion rcu_barrier_completion;
 
 /*
  * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
@@ -2275,7 +2274,7 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
        struct rcu_state *rsp = rdp->rsp;
 
        if (atomic_dec_and_test(&rsp->barrier_cpu_count))
-               complete(&rcu_barrier_completion);
+               complete(&rsp->barrier_completion);
 }
 
 /*
@@ -2325,7 +2324,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
         * 6.   Both rcu_barrier_callback() callbacks are invoked, awakening
         *      us -- but before CPU 1's orphaned callbacks are invoked!!!
         */
-       init_completion(&rcu_barrier_completion);
+       init_completion(&rsp->barrier_completion);
        atomic_set(&rsp->barrier_cpu_count, 1);
        raw_spin_lock_irqsave(&rsp->onofflock, flags);
        rsp->rcu_barrier_in_progress = current;
@@ -2375,10 +2374,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
         * CPU, and thus each counted, remove the initial count.
         */
        if (atomic_dec_and_test(&rsp->barrier_cpu_count))
-               complete(&rcu_barrier_completion);
+               complete(&rsp->barrier_completion);
 
        /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
-       wait_for_completion(&rcu_barrier_completion);
+       wait_for_completion(&rsp->barrier_completion);
 
        /* Other rcu_barrier() invocations can now safely proceed. */
        mutex_unlock(&rcu_barrier_mutex);
index c57ef0b7f0978ac61ff0743b91deb6288e14385a..d1ca4424122bf2f1e365968877bbb6a933ae681c 100644 (file)
@@ -401,6 +401,7 @@ struct rcu_state {
                                                /* Task doing rcu_barrier(), */
                                                /*  or NULL if no barrier. */
        atomic_t barrier_cpu_count;             /* # CPUs waiting on. */
+       struct completion barrier_completion;   /* Wake at barrier end. */
        raw_spinlock_t fqslock;                 /* Only one task forcing */
                                                /*  quiescent states. */
        unsigned long jiffies_force_qs;         /* Time at which to invoke */