Merge 4.14.15 into android-4.14
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / kernel / sched / core.c
index d17c5da523a0bc817a6b32413ee7e0ba3e7a6b2e..15d9be654a55cc3a2301d7b2092d446f4a3b8d30 100644 (file)
@@ -39,6 +39,7 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
+#include "walt.h"
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
@@ -438,6 +439,8 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
        if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
                return;
 
+       head->count++;
+
        get_task_struct(task);
 
        /*
@@ -447,6 +450,10 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
        head->lastp = &node->next;
 }
 
+static int
+try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
+              int sibling_count_hint);
+
 void wake_up_q(struct wake_q_head *head)
 {
        struct wake_q_node *node = head->first;
@@ -461,10 +468,10 @@ void wake_up_q(struct wake_q_head *head)
                task->wake_q.next = NULL;
 
                /*
-                * wake_up_process() implies a wmb() to pair with the queueing
+                * try_to_wake_up() implies a wmb() to pair with the queueing
                 * in wake_q_add() so as not to miss wakeups.
                 */
-               wake_up_process(task);
+               try_to_wake_up(task, TASK_NORMAL, 0, head->count);
                put_task_struct(task);
        }
 }
@@ -505,8 +512,7 @@ void resched_cpu(int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       if (!raw_spin_trylock_irqsave(&rq->lock, flags))
-               return;
+       raw_spin_lock_irqsave(&rq->lock, flags);
        resched_curr(rq);
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
@@ -1186,6 +1192,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
                        p->sched_class->migrate_task_rq(p);
                p->se.nr_migrations++;
                perf_event_task_migrate(p);
+
+               walt_fixup_busy_time(p, new_cpu);
        }
 
        __set_task_cpu(p, new_cpu);
@@ -1536,12 +1544,14 @@ out:
  * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
  */
 static inline
-int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
+int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags,
+                  int sibling_count_hint)
 {
        lockdep_assert_held(&p->pi_lock);
 
        if (p->nr_cpus_allowed > 1)
-               cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
+               cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags,
+                                                    sibling_count_hint);
        else
                cpu = cpumask_any(&p->cpus_allowed);
 
@@ -1948,11 +1958,33 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
  *
  */
 
+#ifdef CONFIG_SMP
+#ifdef CONFIG_SCHED_WALT
+/* utility function to update walt signals at wakeup */
+static inline void walt_try_to_wake_up(struct task_struct *p)
+{
+       struct rq *rq = cpu_rq(task_cpu(p));
+       struct rq_flags rf;
+       u64 wallclock;
+
+       rq_lock_irqsave(rq, &rf);
+       wallclock = walt_ktime_clock();
+       walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+       walt_update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+       rq_unlock_irqrestore(rq, &rf);
+}
+#else
+#define walt_try_to_wake_up(a) {}
+#endif
+#endif
+
 /**
  * try_to_wake_up - wake up a thread
  * @p: the thread to be awakened
  * @state: the mask of task states that can be woken
  * @wake_flags: wake modifier flags (WF_*)
+ * @sibling_count_hint: A hint at the number of threads that are being woken up
+ *                      in this event.
  *
  * If (@state & @p->state) @p->state = TASK_RUNNING.
  *
@@ -1965,7 +1997,8 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
  *        %false otherwise.
  */
 static int
-try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
+              int sibling_count_hint)
 {
        unsigned long flags;
        int cpu, success = 0;
@@ -2043,15 +2076,18 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         */
        smp_cond_load_acquire(&p->on_cpu, !VAL);
 
+       walt_try_to_wake_up(p);
+
        p->sched_contributes_to_load = !!task_contributes_to_load(p);
        p->state = TASK_WAKING;
 
        if (p->in_iowait) {
-               delayacct_blkio_end();
+               delayacct_blkio_end(p);
                atomic_dec(&task_rq(p)->nr_iowait);
        }
 
-       cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
+       cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags,
+                            sibling_count_hint);
        if (task_cpu(p) != cpu) {
                wake_flags |= WF_MIGRATED;
                set_task_cpu(p, cpu);
@@ -2060,7 +2096,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 #else /* CONFIG_SMP */
 
        if (p->in_iowait) {
-               delayacct_blkio_end();
+               delayacct_blkio_end(p);
                atomic_dec(&task_rq(p)->nr_iowait);
        }
 
@@ -2112,8 +2148,13 @@ static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
        trace_sched_waking(p);
 
        if (!task_on_rq_queued(p)) {
+               u64 wallclock = walt_ktime_clock();
+
+               walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+               walt_update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+
                if (p->in_iowait) {
-                       delayacct_blkio_end();
+                       delayacct_blkio_end(p);
                        atomic_dec(&rq->nr_iowait);
                }
                ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
@@ -2139,13 +2180,13 @@ out:
  */
 int wake_up_process(struct task_struct *p)
 {
-       return try_to_wake_up(p, TASK_NORMAL, 0);
+       return try_to_wake_up(p, TASK_NORMAL, 0, 1);
 }
 EXPORT_SYMBOL(wake_up_process);
 
 int wake_up_state(struct task_struct *p, unsigned int state)
 {
-       return try_to_wake_up(p, state, 0);
+       return try_to_wake_up(p, state, 0, 1);
 }
 
 /*
@@ -2164,7 +2205,12 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
        p->se.prev_sum_exec_runtime     = 0;
        p->se.nr_migrations             = 0;
        p->se.vruntime                  = 0;
+#ifdef CONFIG_SCHED_WALT
+       p->last_sleep_ts                = 0;
+#endif
+
        INIT_LIST_HEAD(&p->se.group_node);
+       walt_init_new_task_load(p);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        p->se.cfs_rq                    = NULL;
@@ -2441,6 +2487,9 @@ void wake_up_new_task(struct task_struct *p)
        struct rq *rq;
 
        raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
+
+       walt_init_new_task_load(p);
+
        p->state = TASK_RUNNING;
 #ifdef CONFIG_SMP
        /*
@@ -2451,13 +2500,15 @@ void wake_up_new_task(struct task_struct *p)
         * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
         * as we're not fully set-up yet.
         */
-       __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
+       __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0, 1));
 #endif
        rq = __task_rq_lock(p, &rf);
        update_rq_clock(rq);
        post_init_entity_util_avg(&p->se);
 
        activate_task(rq, p, ENQUEUE_NOCLOCK);
+       walt_mark_task_starting(p);
+
        p->on_rq = TASK_ON_RQ_QUEUED;
        trace_sched_wakeup_new(p);
        check_preempt_curr(rq, p, WF_FORK);
@@ -2912,7 +2963,7 @@ void sched_exec(void)
        int dest_cpu;
 
        raw_spin_lock_irqsave(&p->pi_lock, flags);
-       dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
+       dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0, 1);
        if (dest_cpu == smp_processor_id())
                goto unlock;
 
@@ -3011,6 +3062,9 @@ void scheduler_tick(void)
 
        rq_lock(rq, &rf);
 
+       walt_set_window_start(rq, &rf);
+       walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
+                       walt_ktime_clock(), 0);
        update_rq_clock(rq);
        curr->sched_class->task_tick(rq, curr, 0);
        cpu_load_update_active(rq);
@@ -3282,6 +3336,7 @@ static void __sched notrace __schedule(bool preempt)
        struct rq_flags rf;
        struct rq *rq;
        int cpu;
+       u64 wallclock;
 
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
@@ -3337,10 +3392,17 @@ static void __sched notrace __schedule(bool preempt)
        }
 
        next = pick_next_task(rq, prev, &rf);
+       wallclock = walt_ktime_clock();
+       walt_update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
+       walt_update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
        clear_tsk_need_resched(prev);
        clear_preempt_need_resched();
 
        if (likely(prev != next)) {
+#ifdef CONFIG_SCHED_WALT
+               if (!prev->on_rq)
+                       prev->last_sleep_ts = wallclock;
+#endif
                rq->nr_switches++;
                rq->curr = next;
                /*
@@ -3616,7 +3678,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
                          void *key)
 {
-       return try_to_wake_up(curr->private, mode, wake_flags);
+       return try_to_wake_up(curr->private, mode, wake_flags, 1);
 }
 EXPORT_SYMBOL(default_wake_function);
 
@@ -5692,6 +5754,9 @@ int sched_cpu_dying(unsigned int cpu)
        sched_ttwu_pending();
 
        rq_lock_irqsave(rq, &rf);
+
+       walt_migrate_sync_cpu(cpu);
+
        if (rq->rd) {
                BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
                set_rq_offline(rq);
@@ -5917,12 +5982,18 @@ void __init sched_init(void)
                rq->idle_stamp = 0;
                rq->avg_idle = 2*sysctl_sched_migration_cost;
                rq->max_idle_balance_cost = sysctl_sched_migration_cost;
+#ifdef CONFIG_SCHED_WALT
+               rq->cur_irqload = 0;
+               rq->avg_irqload = 0;
+               rq->irqload_ts = 0;
+#endif
 
                INIT_LIST_HEAD(&rq->cfs_tasks);
 
                rq_attach_root(rq, &def_root_domain);
 #ifdef CONFIG_NO_HZ_COMMON
                rq->last_load_update_tick = jiffies;
+               rq->last_blocked_load_update_tick = jiffies;
                rq->nohz_flags = 0;
 #endif
 #ifdef CONFIG_NO_HZ_FULL