Merge tag 'v3.10.90' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / time / tick-sched.c
index f4208138fbf4cd1a72791f99c31adcbea9180e3e..e37b09b1f85a352be3177b6e0d0f65b6df4ec781 100644 (file)
@@ -20,6 +20,9 @@
 #include <linux/profile.h>
 #include <linux/sched.h>
 #include <linux/module.h>
+#ifdef CONFIG_MTK_SCHED_RQAVG_US
+#include <linux/rq_stats.h>
+#endif
 #include <linux/irq_work.h>
 #include <linux/posix-timers.h>
 #include <linux/perf_event.h>
 
 #include <trace/events/timer.h>
 
+#ifdef CONFIG_MTK_SCHED_RQAVG_US
+struct rq_data rq_info;
+#ifdef CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ
+struct workqueue_struct *rq_wq;
+#endif
+spinlock_t rq_lock;
+#endif
+
+#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
+#include <mtlbprof/mtlbprof.h>
+#endif
+
 /*
  * Per cpu nohz control structure
  */
@@ -306,7 +321,7 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb,
                 * we can't safely shutdown that CPU.
                 */
                if (have_nohz_full_mask && tick_do_timer_cpu == cpu)
-                       return -EINVAL;
+                       return NOTIFY_BAD;
                break;
        }
        return NOTIFY_OK;
@@ -441,6 +456,9 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
 
        update_ts_time_stats(cpu, ts, now, NULL);
        ts->idle_active = 0;
+#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
+       mt_lbprof_update_state(cpu, MT_LBPROF_NO_TASK_STATE);
+#endif
 
        sched_clock_idle_wakeup_event(0);
 }
@@ -451,6 +469,10 @@ static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
 
        ts->idle_entrytime = now;
        ts->idle_active = 1;
+#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
+       mt_lbprof_update_state(cpu, MT_LBPROF_NO_TASK_STATE);
+#endif 
+
        sched_clock_idle_sleep_event();
        return now;
 }
@@ -720,8 +742,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
                return false;
        }
 
-       if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
+       if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
+               ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
                return false;
+       }
 
        if (need_resched())
                return false;
@@ -832,13 +856,10 @@ void tick_nohz_irq_exit(void)
 {
        struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
 
-       if (ts->inidle) {
-               /* Cancel the timer because CPU already waken up from the C-states*/
-               menu_hrtimer_cancel();
+       if (ts->inidle)
                __tick_nohz_idle_enter(ts);
-       } else {
+       else
                tick_nohz_full_stop_tick(ts);
-       }
 }
 
 /**
@@ -936,8 +957,6 @@ void tick_nohz_idle_exit(void)
 
        ts->inidle = 0;
 
-       /* Cancel the timer because CPU already waken up from the C-states*/
-       menu_hrtimer_cancel();
        if (ts->idle_active || ts->tick_stopped)
                now = ktime_get();
 
@@ -1082,6 +1101,54 @@ void tick_check_idle(int cpu)
  * High resolution timer specific code
  */
 #ifdef CONFIG_HIGH_RES_TIMERS
+
+#ifdef CONFIG_MTK_SCHED_RQAVG_US
+static void update_rq_stats(void)
+{
+       unsigned long jiffy_gap = 0;
+       unsigned int rq_avg = 0;
+       unsigned long flags = 0;
+
+    spin_lock_irqsave(&rq_lock, flags);
+
+       jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy;
+       if (jiffy_gap >= rq_info.rq_poll_jiffies) {
+               if (!rq_info.rq_avg)
+                       rq_info.rq_poll_total_jiffies = 0;
+
+               rq_avg = nr_running() * 10;
+
+               if (rq_info.rq_poll_total_jiffies) {
+                       rq_avg = (rq_avg * jiffy_gap) +
+                               (rq_info.rq_avg *
+                                rq_info.rq_poll_total_jiffies);
+                       do_div(rq_avg,
+                              rq_info.rq_poll_total_jiffies + jiffy_gap);
+               }
+
+               rq_info.rq_avg =  rq_avg;
+               rq_info.rq_poll_total_jiffies += jiffy_gap;
+               rq_info.rq_poll_last_jiffy = jiffies;
+       }
+
+    spin_unlock_irqrestore(&rq_lock, flags);
+}
+
+#ifdef CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ
+static void wakeup_user(void)
+{
+       unsigned long jiffy_gap;
+
+       jiffy_gap = jiffies - rq_info.def_timer_last_jiffy;
+
+       if (jiffy_gap >= rq_info.def_timer_jiffies) {
+               rq_info.def_timer_last_jiffy = jiffies;
+               queue_work(rq_wq, &rq_info.def_timer_work);
+       }
+}
+#endif /* CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ */
+
+#endif /* CONFIG_MTK_SCHED_RQAVG_US */
 /*
  * We rearm the timer until we get disabled by the idle code.
  * Called with interrupts disabled.
@@ -1102,6 +1169,23 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
        if (regs)
                tick_sched_handle(ts, regs);
 
+#ifdef CONFIG_MTK_SCHED_RQAVG_US
+               if ((rq_info.init == 1) && (tick_do_timer_cpu == smp_processor_id())) {
+
+                       /*
+                        * update run queue statistics
+                        */
+                       update_rq_stats();
+
+#ifdef CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ
+                       /*
+                        * wakeup user if needed
+                        */
+                       wakeup_user();
+#endif /* CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ */
+               }
+#endif /* CONFIG_MTK_SCHED_RQAVG_US */
+
        hrtimer_forward(timer, now, tick_period);
 
        return HRTIMER_RESTART;
@@ -1169,7 +1253,8 @@ void tick_cancel_sched_timer(int cpu)
                hrtimer_cancel(&ts->sched_timer);
 # endif
 
-       memset(ts, 0, sizeof(*ts));
+       //memset(ts, 0, sizeof(*ts)); /*to avoid idle time clear to 0 after CPU plug off*/
+       ts->nohz_mode = NOHZ_MODE_INACTIVE;
 }
 #endif