rcu: Add expedited-grace-period event tracing
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Fri, 29 Jan 2016 04:49:49 +0000 (20:49 -0800)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 31 Mar 2016 20:34:06 +0000 (13:34 -0700)
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h

index 79e9206a7b11c820bb7819880d766aaeea250b5e..524026fd9dd7f2167be8ee7829c084298b987ad3 100644 (file)
@@ -3584,17 +3584,18 @@ static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
                               atomic_long_t *stat, unsigned long s)
 {
        if (rcu_exp_gp_seq_done(rsp, s)) {
+               trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
                if (rnp) {
-                       mutex_unlock(&rnp->exp_funnel_mutex);
                        trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
                                                  rnp->grplo, rnp->grphi,
                                                  TPS("rel"));
+                       mutex_unlock(&rnp->exp_funnel_mutex);
                } else if (rdp) {
-                       mutex_unlock(&rdp->exp_funnel_mutex);
                        trace_rcu_exp_funnel_lock(rsp->name,
                                                  rdp->mynode->level + 1,
                                                  rdp->cpu, rdp->cpu,
                                                  TPS("rel"));
+                       mutex_unlock(&rdp->exp_funnel_mutex);
                }
                /* Ensure test happens before caller kfree(). */
                smp_mb__before_atomic(); /* ^^^ */
@@ -3624,12 +3625,12 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
        rnp0 = rcu_get_root(rsp);
        if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) {
                if (mutex_trylock(&rnp0->exp_funnel_mutex)) {
-                       if (sync_exp_work_done(rsp, rnp0, NULL,
-                                              &rdp->expedited_workdone0, s))
-                               return NULL;
                        trace_rcu_exp_funnel_lock(rsp->name, rnp0->level,
                                                  rnp0->grplo, rnp0->grphi,
                                                  TPS("acq"));
+                       if (sync_exp_work_done(rsp, rnp0, NULL,
+                                              &rdp->expedited_workdone0, s))
+                               return NULL;
                        return rnp0;
                }
        }
@@ -3656,16 +3657,16 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
                trace_rcu_exp_funnel_lock(rsp->name, rnp0->level,
                                          rnp0->grplo, rnp0->grphi, TPS("acq"));
                if (rnp1) {
-                       mutex_unlock(&rnp1->exp_funnel_mutex);
                        trace_rcu_exp_funnel_lock(rsp->name, rnp1->level,
                                                  rnp1->grplo, rnp1->grphi,
                                                  TPS("rel"));
+                       mutex_unlock(&rnp1->exp_funnel_mutex);
                } else {
-                       mutex_unlock(&rdp->exp_funnel_mutex);
                        trace_rcu_exp_funnel_lock(rsp->name,
                                                  rdp->mynode->level + 1,
                                                  rdp->cpu, rdp->cpu,
                                                  TPS("rel"));
+                       mutex_unlock(&rdp->exp_funnel_mutex);
                }
                rnp1 = rnp0;
        }
@@ -3895,16 +3896,21 @@ void synchronize_sched_expedited(void)
 
        /* Take a snapshot of the sequence number.  */
        s = rcu_exp_gp_seq_snap(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
 
        rnp = exp_funnel_lock(rsp, s);
        if (rnp == NULL)
                return;  /* Someone else did our work for us. */
 
        rcu_exp_gp_seq_start(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
        sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
        synchronize_sched_expedited_wait(rsp);
 
        rcu_exp_gp_seq_end(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
+       trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
+                                 rnp->grplo, rnp->grphi, TPS("rel"));
        mutex_unlock(&rnp->exp_funnel_mutex);
 }
 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
index cd2dae43ff48f51d1041babb49f0b41fcf0a4469..36e94aed38a7c822d2f1d343b2d542851959e6fb 100644 (file)
@@ -750,12 +750,14 @@ void synchronize_rcu_expedited(void)
        }
 
        s = rcu_exp_gp_seq_snap(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
 
        rnp_unlock = exp_funnel_lock(rsp, s);
        if (rnp_unlock == NULL)
                return;  /* Someone else did our work for us. */
 
        rcu_exp_gp_seq_start(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
 
        /* Initialize the rcu_node tree in preparation for the wait. */
        sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
@@ -766,6 +768,7 @@ void synchronize_rcu_expedited(void)
 
        /* Clean up and exit. */
        rcu_exp_gp_seq_end(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
        mutex_unlock(&rnp_unlock->exp_funnel_mutex);
        trace_rcu_exp_funnel_lock(rsp->name, rnp_unlock->level,
                                  rnp_unlock->grplo, rnp_unlock->grphi,