Merge commit '8700c95adb03' into timers/nohz
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / rcutree_plugin.h
index 073ded26e2595f0d2971a771f01e0b066d4ea56f..71bd7337d0ccf2e49ff15ae3cdd687a2519352f8 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/gfp.h>
 #include <linux/oom.h>
 #include <linux/smpboot.h>
+#include <linux/tick.h>
 
 #define RCU_KTHREAD_PRIO 1
 
@@ -1913,10 +1914,11 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
                ticks_value = rsp->gpnum - rdp->gpnum;
        }
        print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
-       printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
+       printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
               cpu, ticks_value, ticks_title,
               atomic_read(&rdtp->dynticks) & 0xfff,
               rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
+              rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
               fast_no_hz);
 }
 
@@ -1930,6 +1932,7 @@ static void print_cpu_stall_info_end(void)
 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
 {
        rdp->ticks_this_gp = 0;
+       rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
 }
 
 /* Increment ->ticks_this_gp for all flavors of RCU. */
@@ -2018,30 +2021,20 @@ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
 {
        struct rcu_node *rnp = rcu_get_root(rsp);
 
-       return rnp->n_nocb_gp_requests[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
+       return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
 }
 
 /*
- * Clean up this rcu_node structure's no-CBs state at the end of
- * a grace period, and also return whether any no-CBs CPU associated
- * with this rcu_node structure needs another grace period.
+ * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
+ * grace period.
  */
-static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
 {
-       int c = rnp->completed;
-       int needmore;
-
-       wake_up_all(&rnp->nocb_gp_wq[c & 0x1]);
-       rnp->n_nocb_gp_requests[c & 0x1] = 0;
-       needmore = rnp->n_nocb_gp_requests[(c + 1) & 0x1];
-       trace_rcu_future_grace_period(rsp->name, rnp->gpnum, rnp->completed,
-                                     c, rnp->level, rnp->grplo, rnp->grphi,
-                                     needmore ? "CleanupMore" : "Cleanup");
-       return needmore;
+       wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
 }
 
 /*
- * Set the root rcu_node structure's ->n_nocb_gp_requests field
+ * Set the root rcu_node structure's ->need_future_gp field
  * based on the sum of those of all rcu_node structures.  This does
  * double-count the root rcu_node structure's requests, but this
  * is necessary to handle the possibility of a rcu_nocb_kthread()
@@ -2050,7 +2043,7 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
  */
 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
 {
-       rnp->n_nocb_gp_requests[(rnp->completed + 1) & 0x1] += nrq;
+       rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
 }
 
 static void rcu_init_one_nocb(struct rcu_node *rnp)
@@ -2060,7 +2053,7 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
 }
 
 /* Is the specified CPU a no-CPUs CPU? */
-static bool is_nocb_cpu(int cpu)
+bool rcu_is_nocb_cpu(int cpu)
 {
        if (have_rcu_nocb_mask)
                return cpumask_test_cpu(cpu, rcu_nocb_mask);
@@ -2118,7 +2111,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
                            bool lazy)
 {
 
-       if (!is_nocb_cpu(rdp->cpu))
+       if (!rcu_is_nocb_cpu(rdp->cpu))
                return 0;
        __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
        if (__is_kfree_rcu_offset((unsigned long)rhp->func))
@@ -2142,7 +2135,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
        long qll = rsp->qlen_lazy;
 
        /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
-       if (!is_nocb_cpu(smp_processor_id()))
+       if (!rcu_is_nocb_cpu(smp_processor_id()))
                return 0;
        rsp->qlen = 0;
        rsp->qlen_lazy = 0;
@@ -2175,84 +2168,16 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
        bool d;
        unsigned long flags;
        struct rcu_node *rnp = rdp->mynode;
-       struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
-       c = rnp->completed + 2;
-
-       /* Count our request for a grace period. */
-       rnp->n_nocb_gp_requests[c & 0x1]++;
-       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
-                                     rnp->completed, c, rnp->level,
-                                     rnp->grplo, rnp->grphi, "Startleaf");
-
-       if (rnp->gpnum != rnp->completed) {
-
-               /*
-                * This rcu_node structure believes that a grace period
-                * is in progress, so we are done.  When this grace
-                * period ends, our request will be acted upon.
-                */
-               trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
-                                             rnp->completed, c, rnp->level,
-                                             rnp->grplo, rnp->grphi,
-                                             "Startedleaf");
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-
-       } else {
-
-               /*
-                * Might not be a grace period, check root rcu_node
-                * structure to see if we must start one.
-                */
-               if (rnp != rnp_root)
-                       raw_spin_lock(&rnp_root->lock); /* irqs disabled. */
-               if (rnp_root->gpnum != rnp_root->completed) {
-                       trace_rcu_future_grace_period(rdp->rsp->name,
-                                                     rnp->gpnum,
-                                                     rnp->completed,
-                                                     c, rnp->level,
-                                                     rnp->grplo, rnp->grphi,
-                                                     "Startedleafroot");
-                       raw_spin_unlock(&rnp_root->lock); /* irqs disabled. */
-               } else {
-
-                       /*
-                        * No grace period, so we need to start one.
-                        * The good news is that we can wait for exactly
-                        * one grace period instead of part of the current
-                        * grace period and all of the next grace period.
-                        * Adjust counters accordingly and start the
-                        * needed grace period.
-                        */
-                       rnp->n_nocb_gp_requests[c & 0x1]--;
-                       c = rnp_root->completed + 1;
-                       rnp->n_nocb_gp_requests[c & 0x1]++;
-                       rnp_root->n_nocb_gp_requests[c & 0x1]++;
-                       trace_rcu_future_grace_period(rdp->rsp->name,
-                                                     rnp->gpnum,
-                                                     rnp->completed,
-                                                     c, rnp->level,
-                                                     rnp->grplo, rnp->grphi,
-                                                     "Startedroot");
-                       rcu_start_gp(rdp->rsp);
-                       raw_spin_unlock(&rnp->lock);
-               }
-
-               /* Clean up locking and irq state. */
-               if (rnp != rnp_root)
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               else
-                       local_irq_restore(flags);
-       }
+       c = rcu_start_future_gp(rnp, rdp);
+       raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
        /*
         * Wait for the grace period.  Do so interruptibly to avoid messing
         * up the load average.
         */
-       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
-                                     rnp->completed, c, rnp->level,
-                                     rnp->grplo, rnp->grphi, "StartWait");
+       trace_rcu_future_gp(rnp, rdp, c, "StartWait");
        for (;;) {
                wait_event_interruptible(
                        rnp->nocb_gp_wq[c & 0x1],
@@ -2260,14 +2185,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
                if (likely(d))
                        break;
                flush_signals(current);
-               trace_rcu_future_grace_period(rdp->rsp->name,
-                                             rnp->gpnum, rnp->completed, c,
-                                             rnp->level, rnp->grplo,
-                                             rnp->grphi, "ResumeWait");
+               trace_rcu_future_gp(rnp, rdp, c, "ResumeWait");
        }
-       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
-                                     rnp->completed, c, rnp->level,
-                                     rnp->grplo, rnp->grphi, "EndWait");
+       trace_rcu_future_gp(rnp, rdp, c, "EndWait");
        smp_mb(); /* Ensure that CB invocation happens after GP end. */
 }
 
@@ -2375,9 +2295,8 @@ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
        return 0;
 }
 
-static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
 {
-       return 0;
 }
 
 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
@@ -2388,11 +2307,6 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
 {
 }
 
-static bool is_nocb_cpu(int cpu)
-{
-       return false;
-}
-
 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
                            bool lazy)
 {
@@ -2419,3 +2333,20 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
 }
 
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+
+/*
+ * An adaptive-ticks CPU can potentially execute in kernel mode for an
+ * arbitrarily long period of time with the scheduling-clock tick turned
+ * off.  RCU will be paying attention to this CPU because it is in the
+ * kernel, but the CPU cannot be guaranteed to be executing the RCU state
+ * machine because the scheduling-clock tick has been disabled.  Therefore,
+ * if an adaptive-ticks CPU is failing to respond to the current grace
+ * period and has not be idle from an RCU perspective, kick it.
+ */
+static void rcu_kick_nohz_cpu(int cpu)
+{
+#ifdef CONFIG_NO_HZ_FULL
+       if (tick_nohz_full_cpu(cpu))
+               smp_send_reschedule(cpu);
+#endif /* #ifdef CONFIG_NO_HZ_FULL */
+}