nohz: Move ts->idle_calls incrementation into strict idle logic
authorFrederic Weisbecker <fweisbec@gmail.com>
Sun, 31 Jul 2011 22:06:10 +0000 (00:06 +0200)
committerFrederic Weisbecker <fweisbec@gmail.com>
Mon, 11 Jun 2012 18:07:17 +0000 (20:07 +0200)
Since we want to prepare for making the nohz API to work further
the idle case, we need to pull ts->idle_calls incrementation up to
the callers in idle.

To perform this, we split tick_nohz_stop_sched_tick() in two parts:
a first one that checks if we can really stop the tick for idle,
and another that actually stops it. Then from the callers in idle,
we check if we can stop the tick and only then we increment idle_calls
and finally relay to the nohz API that won't care about these details
anymore.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Alessio Igor Bogani <abogani@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Avi Kivity <avi@redhat.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Gilad Ben Yossef <gilad@benyossef.com>
Cc: Hakan Akkan <hakanakkan@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Kevin Hilman <khilman@ti.com>
Cc: Max Krasnyansky <maxk@qualcomm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephen Hemminger <shemminger@vyatta.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Sven-Thorsten Dietrich <thebigcorporation@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
kernel/time/tick-sched.c

index 73cc4901336d846955e7b809a6f6baa401deca91..430e1b6901ccb662ad7a15503f3b7624384f0a6a 100644 (file)
@@ -271,47 +271,15 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
 }
 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 
-static void tick_nohz_stop_sched_tick(struct tick_sched *ts, ktime_t now)
+static void tick_nohz_stop_sched_tick(struct tick_sched *ts,
+                                     ktime_t now, int cpu)
 {
        unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
        ktime_t last_update, expires;
        struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
        u64 time_delta;
-       int cpu;
-
-       cpu = smp_processor_id();
-       ts = &per_cpu(tick_cpu_sched, cpu);
-
-       /*
-        * If this cpu is offline and it is the one which updates
-        * jiffies, then give up the assignment and let it be taken by
-        * the cpu which runs the tick timer next. If we don't drop
-        * this here the jiffies might be stale and do_timer() never
-        * invoked.
-        */
-       if (unlikely(!cpu_online(cpu))) {
-               if (cpu == tick_do_timer_cpu)
-                       tick_do_timer_cpu = TICK_DO_TIMER_NONE;
-       }
-
-       if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
-               return;
-
-       if (need_resched())
-               return;
 
-       if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
-               static int ratelimit;
-
-               if (ratelimit < 10) {
-                       printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
-                              (unsigned int) local_softirq_pending());
-                       ratelimit++;
-               }
-               return;
-       }
 
-       ts->idle_calls++;
        /* Read jiffies and the time when jiffies were updated last */
        do {
                seq = read_seqbegin(&xtime_lock);
@@ -441,16 +409,56 @@ out:
        ts->sleep_length = ktime_sub(dev->next_event, now);
 }
 
+static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
+{
+       /*
+        * If this cpu is offline and it is the one which updates
+        * jiffies, then give up the assignment and let it be taken by
+        * the cpu which runs the tick timer next. If we don't drop
+        * this here the jiffies might be stale and do_timer() never
+        * invoked.
+        */
+       if (unlikely(!cpu_online(cpu))) {
+               if (cpu == tick_do_timer_cpu)
+                       tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+       }
+
+       if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
+               return false;
+
+       if (need_resched())
+               return false;
+
+       if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
+               static int ratelimit;
+
+               if (ratelimit < 10) {
+                       printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+                              (unsigned int) local_softirq_pending());
+                       ratelimit++;
+               }
+               return false;
+       }
+
+       return true;
+}
+
 static void __tick_nohz_idle_enter(struct tick_sched *ts)
 {
        ktime_t now;
-       int was_stopped = ts->tick_stopped;
+       int cpu = smp_processor_id();
 
-       now = tick_nohz_start_idle(smp_processor_id(), ts);
-       tick_nohz_stop_sched_tick(ts, now);
+       now = tick_nohz_start_idle(cpu, ts);
 
-       if (!was_stopped && ts->tick_stopped)
-               ts->idle_jiffies = ts->last_jiffies;
+       if (can_stop_idle_tick(cpu, ts)) {
+               int was_stopped = ts->tick_stopped;
+
+               ts->idle_calls++;
+               tick_nohz_stop_sched_tick(ts, now, cpu);
+
+               if (!was_stopped && ts->tick_stopped)
+                       ts->idle_jiffies = ts->last_jiffies;
+       }
 }
 
 /**