rcu: Consolidate expedited CPU selection
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Sun, 16 Aug 2015 02:00:31 +0000 (19:00 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 7 Oct 2015 23:02:50 +0000 (16:02 -0700)
Now that sync_sched_exp_select_cpus() and sync_rcu_exp_select_cpus()
are identical aside from the the argument to smp_call_function_single(),
this commit consolidates them with a functional argument.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h

index ae582e3d4136bfcdce132e368ce69140bdd173b1..f44f4b30c68a1151c99de026f95ada982ef730dd 100644 (file)
@@ -3654,7 +3654,8 @@ static void synchronize_sched_expedited_cpu_stop(void *data)
  * Select the nodes that the upcoming expedited grace period needs
  * to wait for.
  */
-static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
+static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
+                                    smp_call_func_t func)
 {
        int cpu;
        unsigned long flags;
@@ -3696,7 +3697,7 @@ static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
                for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
                        if (!(mask_ofl_ipi & mask))
                                continue;
-                       ret = smp_call_function_single(cpu, synchronize_sched_expedited_cpu_stop, NULL, 0);
+                       ret = smp_call_function_single(cpu, func, rsp, 0);
                        if (!ret)
                                mask_ofl_ipi &= ~mask;
                }
@@ -3788,7 +3789,7 @@ void synchronize_sched_expedited(void)
                return;  /* Someone else did our work for us. */
 
        rcu_exp_gp_seq_start(rsp);
-       sync_sched_exp_select_cpus(rsp);
+       sync_rcu_exp_select_cpus(rsp, synchronize_sched_expedited_cpu_stop);
        synchronize_sched_expedited_wait(rsp);
 
        rcu_exp_gp_seq_end(rsp);
index 7880202f1e3829f4415a9af62263ea6c9b53e1b3..6cbfbfc586563a5d91bebcd88978f19559204b92 100644 (file)
@@ -708,65 +708,6 @@ static void sync_rcu_exp_handler(void *info)
        rcu_report_exp_rdp(rsp, rdp, true);
 }
 
-/*
- * Select the nodes that the upcoming expedited grace period needs
- * to wait for.
- */
-static void sync_rcu_exp_select_cpus(struct rcu_state *rsp)
-{
-       int cpu;
-       unsigned long flags;
-       unsigned long mask;
-       unsigned long mask_ofl_test;
-       unsigned long mask_ofl_ipi;
-       int ret;
-       struct rcu_node *rnp;
-
-       sync_exp_reset_tree(rsp);
-       rcu_for_each_leaf_node(rsp, rnp) {
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               smp_mb__after_unlock_lock();
-
-               /* Each pass checks a CPU for identity, offline, and idle. */
-               mask_ofl_test = 0;
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
-                       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
-
-                       if (raw_smp_processor_id() == cpu ||
-                           cpu_is_offline(cpu) ||
-                           !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
-                               mask_ofl_test |= rdp->grpmask;
-               }
-               mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
-
-               /*
-                * Need to wait for any blocked tasks as well.  Note that
-                * additional blocking tasks will also block the expedited
-                * GP until such time as the ->expmask bits are cleared.
-                */
-               if (rcu_preempt_has_tasks(rnp))
-                       rnp->exp_tasks = rnp->blkd_tasks.next;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-
-               /* IPI the remaining CPUs for expedited quiescent state. */
-               mask = 1;
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
-                       if (!(mask_ofl_ipi & mask))
-                               continue;
-                       ret = smp_call_function_single(cpu,
-                                                      sync_rcu_exp_handler,
-                                                      rsp, 0);
-                       if (!ret)
-                               mask_ofl_ipi &= ~mask;
-               }
-               /* Report quiescent states for those that went offline. */
-               mask_ofl_test |= mask_ofl_ipi;
-               if (mask_ofl_test)
-                       rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
-       }
-}
-
 /**
  * synchronize_rcu_expedited - Brute-force RCU grace period
  *
@@ -795,7 +736,7 @@ void synchronize_rcu_expedited(void)
        rcu_exp_gp_seq_start(rsp);
 
        /* Initialize the rcu_node tree in preparation for the wait. */
-       sync_rcu_exp_select_cpus(rsp);
+       sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
 
        /* Wait for snapshotted ->blkd_tasks lists to drain. */
        rnp = rcu_get_root(rsp);