rcu: Add transitivity to remaining rcu_node ->lock acquisitions
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 8 Oct 2015 22:36:54 +0000 (15:36 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 23 Nov 2015 18:37:35 +0000 (10:37 -0800)
The rule is that all acquisitions of the rcu_node structure's ->lock
must provide transitivity:  The lock is not acquired that frequently,
and sorting out exactly which required it and which did not would be
a maintenance nightmare.  This commit therefore supplies the needed
transitivity to the remaining ->lock acquisitions.

Reported-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h
kernel/rcu/tree_trace.c

index daf17e248757604500f542887f171e75146f3b92..81aa1cdc6bc99b1f164aeb2dd1178764b914913b 100644 (file)
@@ -1214,7 +1214,7 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
        struct rcu_node *rnp;
 
        rcu_for_each_leaf_node(rsp, rnp) {
-               raw_spin_lock_irqsave(&rnp->lock, flags);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->qsmask != 0) {
                        for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
                                if (rnp->qsmask & (1UL << cpu))
@@ -1237,7 +1237,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
 
        /* Only let one CPU complain about others per time interval. */
 
-       raw_spin_lock_irqsave(&rnp->lock, flags);
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
        delta = jiffies - READ_ONCE(rsp->jiffies_stall);
        if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
@@ -1256,7 +1256,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
               rsp->name);
        print_cpu_stall_info_begin();
        rcu_for_each_leaf_node(rsp, rnp) {
-               raw_spin_lock_irqsave(&rnp->lock, flags);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                ndetected += rcu_print_task_stall(rnp);
                if (rnp->qsmask != 0) {
                        for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
@@ -1327,7 +1327,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
 
        rcu_dump_cpu_stacks(rsp);
 
-       raw_spin_lock_irqsave(&rnp->lock, flags);
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
        if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
                WRITE_ONCE(rsp->jiffies_stall,
                           jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
@@ -2897,7 +2897,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
        /* Does this CPU require a not-yet-started grace period? */
        local_irq_save(flags);
        if (cpu_needs_another_gp(rsp, rdp)) {
-               raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
+               raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */
                needwake = rcu_start_gp(rsp);
                raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
                if (needwake)
@@ -3718,7 +3718,7 @@ retry_ipi:
                                mask_ofl_ipi &= ~mask;
                        } else {
                                /* Failed, raced with offline. */
-                               raw_spin_lock_irqsave(&rnp->lock, flags);
+                               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                                if (cpu_online(cpu) &&
                                    (rnp->expmask & mask)) {
                                        raw_spin_unlock_irqrestore(&rnp->lock,
@@ -3727,8 +3727,8 @@ retry_ipi:
                                        if (cpu_online(cpu) &&
                                            (rnp->expmask & mask))
                                                goto retry_ipi;
-                                       raw_spin_lock_irqsave(&rnp->lock,
-                                                             flags);
+                                       raw_spin_lock_irqsave_rcu_node(rnp,
+                                                                      flags);
                                }
                                if (!(rnp->expmask & mask))
                                        mask_ofl_ipi &= ~mask;
@@ -4110,7 +4110,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
                rnp = rnp->parent;
                if (rnp == NULL)
                        return;
-               raw_spin_lock(&rnp->lock); /* Interrupts already disabled. */
+               raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
                rnp->qsmaskinit |= mask;
                raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
        }
@@ -4127,7 +4127,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        struct rcu_node *rnp = rcu_get_root(rsp);
 
        /* Set up local state, ensuring consistent view of global state. */
-       raw_spin_lock_irqsave(&rnp->lock, flags);
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
        rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
@@ -4154,7 +4154,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        struct rcu_node *rnp = rcu_get_root(rsp);
 
        /* Set up local state, ensuring consistent view of global state. */
-       raw_spin_lock_irqsave(&rnp->lock, flags);
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
        rdp->qlen_last_fqs_check = 0;
        rdp->n_force_qs_snap = rsp->n_force_qs;
        rdp->blimit = blimit;
@@ -4301,7 +4301,7 @@ static int __init rcu_spawn_gp_kthread(void)
                t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
                BUG_ON(IS_ERR(t));
                rnp = rcu_get_root(rsp);
-               raw_spin_lock_irqsave(&rnp->lock, flags);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                rsp->gp_kthread = t;
                if (kthread_prio) {
                        sp.sched_priority = kthread_prio;
index fa0e3b96a9edd44b55789ba182373064a6a21b59..57ba873d2f180066ef2d9b8bbd5d333297679c4d 100644 (file)
@@ -525,7 +525,7 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
        unsigned long flags;
        struct task_struct *t;
 
-       raw_spin_lock_irqsave(&rnp->lock, flags);
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
        if (!rcu_preempt_blocked_readers_cgp(rnp)) {
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                return;
index ef7093cc9b5cd86c3f9f6c6cff6a33f4e0b62ea8..8efaba870d9608e9feccf55e814fa0d64bcc3727 100644 (file)
@@ -319,7 +319,7 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
        unsigned long gpmax;
        struct rcu_node *rnp = &rsp->node[0];
 
-       raw_spin_lock_irqsave(&rnp->lock, flags);
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
        completed = READ_ONCE(rsp->completed);
        gpnum = READ_ONCE(rsp->gpnum);
        if (completed == gpnum)