[COMMON] sched/rt: add API of propagating of RT TG tree
authorJohnlay Park <jonglae.park@samsung.com>
Wed, 28 Feb 2018 11:52:41 +0000 (20:52 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:33:14 +0000 (17:33 +0900)
And remove_load_avg used for inheriting the load between
RT and FAIR task pelt load

Change-Id: Ic30db1ba0e7eb146617bff233842137222c30044
Signed-off-by: Johnlay Park <jonglae.park@samsung.com>
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h

index 9a86694f6d88499092ac89a25f0ceec6d9db53ed..cd02c8a3013607f837c88acd5f27e272c287703d 100644 (file)
@@ -3405,16 +3405,24 @@ int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running)
        int ret;
        struct sched_avg *sa = &rt_rq->avg;
 
+       if (atomic_long_read(&rt_rq->removed_load_avg)) {
+               long r = atomic_long_xchg(&rt_rq->removed_load_avg, 0);
+               sub_positive(&sa->load_avg, r);
+               sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
+#ifdef CONFIG_RT_GROUP_SCHED
+               rt_rq->propagate_avg = 1;
+#endif
+       }
+
        if (atomic_long_read(&rt_rq->removed_util_avg)) {
                long r = atomic_long_xchg(&rt_rq->removed_util_avg, 0);
                sub_positive(&sa->util_avg, r);
                sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
+#ifdef CONFIG_RT_GROUP_SCHED
+               rt_rq->propagate_avg = 1;
+#endif
        }
 
-       /* TODO:
-        * Do something on removed_load_avg
-        * Do propagate_avg for removed_load/util_avg
-        */
        ret = ___update_load_avg(now, cpu, sa, 0, running, NULL, rt_rq);
 
 #ifndef CONFIG_64BIT
index 4ee56235052a31a39dc5bcbbd634bf195efcc751..c3e566eaa215d49f48805e175acfb4d073a41ba6 100644 (file)
@@ -99,6 +99,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
        rt_rq->overloaded = 0;
        plist_head_init(&rt_rq->pushable_tasks);
        atomic_long_set(&rt_rq->removed_util_avg, 0);
+       atomic_long_set(&rt_rq->removed_load_avg, 0);
 #endif /* CONFIG_SMP */
        /* We start is dequeued state, because no RT tasks are queued */
        rt_rq->rt_queued = 0;
@@ -1298,9 +1299,9 @@ attach_rt_entity_load_avg(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
        rt_rq->avg.util_sum += rt_se->avg.util_sum;
        rt_rq->avg.load_avg += rt_se->avg.load_avg;
        rt_rq->avg.load_sum += rt_se->avg.load_sum;
-       /* Need to do something about
-        * propagate_avg of rt_rq, rt_rq_util_change()
-        */
+#ifdef CONFIG_RT_GROUP_SCHED
+       rt_rq->propagate_avg = 1;
+#endif
 }
 
 static void
@@ -1310,9 +1311,9 @@ detach_rt_entity_load_avg(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
        sub_positive(&rt_rq->avg.util_sum, rt_se->avg.util_sum);
        sub_positive(&rt_rq->avg.load_avg, rt_se->avg.load_avg);
        sub_positive(&rt_rq->avg.load_sum, rt_se->avg.load_sum);
-       /* Need to do something about
-        * propagate_avg of rt_rq, rt_rq_util_change()
-        */
+#ifdef CONFIG_RT_GROUP_SCHED
+       rt_rq->propagate_avg = 1;
+#endif
 }
 #else
 static inline void
@@ -1677,6 +1678,7 @@ static void remove_rt_entity_load_avg(struct sched_rt_entity *rt_se)
         */
 
        sync_rt_entity_load_avg(rt_se);
+       atomic_long_add(rt_se->avg.load_avg, &rt_rq->removed_load_avg);
        atomic_long_add(rt_se->avg.util_avg, &rt_rq->removed_util_avg);
 }
 
@@ -1954,6 +1956,93 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 }
 
 #ifdef CONFIG_SMP
+#ifdef CONFIG_RT_GROUP_SCHED
+/* Take into account change of utilization of a child task group */
+static inline void
+update_tg_rt_util(struct rt_rq *cfs_rq, struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *grt_rq = rt_se->my_q;
+       long delta = grt_rq->avg.util_avg - rt_se->avg.util_avg;
+
+       /* Nothing to update */
+       if (!delta)
+               return;
+
+       /* Set new sched_rt_entity's utilization */
+       rt_se->avg.util_avg = grt_rq->avg.util_avg;
+       rt_se->avg.util_sum = rt_se->avg.util_avg * LOAD_AVG_MAX;
+
+       /* Update parent rt_rq utilization */
+       add_positive(&cfs_rq->avg.util_avg, delta);
+       cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
+}
+
+
+/* Take into account change of load of a child task group */
+static inline void
+update_tg_rt_load(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *grt_rq = rt_se->my_q;
+       long delta = grt_rq->avg.load_avg - rt_se->avg.load_avg;
+
+       /*
+        * TODO: Need to consider the TG group update
+        * for RT RQ
+        */
+
+       /* Nothing to update */
+       if (!delta)
+               return;
+
+       /* Set new sched_rt_entity's load */
+       rt_se->avg.load_avg = grt_rq->avg.load_avg;
+       rt_se->avg.load_sum = rt_se->avg.load_avg * LOAD_AVG_MAX;
+
+       /* Update parent cfs_rq load */
+       add_positive(&rt_rq->avg.load_avg, delta);
+       rt_rq->avg.load_sum = rt_rq->avg.load_avg * LOAD_AVG_MAX;
+
+       /*
+        * TODO: If the sched_entity is already enqueued, should we have to update the
+        * runnable load avg.
+        */
+}
+
+static inline int test_and_clear_tg_rt_propagate(struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *rt_rq = rt_se->my_q;
+
+       if (!rt_rq->propagate_avg)
+               return 0;
+
+       rt_rq->propagate_avg = 0;
+       return 1;
+}
+
+/* Update task and its cfs_rq load average */
+static inline int propagate_entity_rt_load_avg(struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *rt_rq;
+
+       if (rt_entity_is_task(rt_se))
+               return 0;
+
+       if (!test_and_clear_tg_rt_propagate(rt_se))
+               return 0;
+
+       rt_rq = rt_rq_of_se(rt_se);
+
+       rt_rq->propagate_avg = 1;
+
+       update_tg_rt_util(rt_rq, rt_se);
+       update_tg_rt_load(rt_rq, rt_se);
+
+       return 1;
+}
+#else
+static inline int propagate_entity_rt_load_avg(struct sched_rt_entity *rt_se) { };
+#endif
+
 void update_rt_load_avg(u64 now, struct sched_rt_entity *rt_se)
 {
        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
@@ -1967,10 +2056,7 @@ void update_rt_load_avg(u64 now, struct sched_rt_entity *rt_se)
                        rt_rq->curr == rt_se, NULL);
 
        update_rt_rq_load_avg(now, cpu, rt_rq, true);
-       /* TODO
-        * propagate the rt entity load average ()
-        * if (rt_entity_is_task(rt_se)) tracing the rt average
-        */
+       propagate_entity_rt_load_avg(rt_se);
 }
 
 /* Only try algorithms three times */
index 6e88be5d98bec170de559c1feef614788b1aa11d..dee30713798fef74dceaad9fe03fddd89ff8679e 100644 (file)
@@ -579,6 +579,7 @@ struct rt_rq {
        struct sched_avg avg;
        struct sched_rt_entity *curr;
        atomic_long_t removed_util_avg;
+       atomic_long_t removed_load_avg;
 #endif /* CONFIG_SMP */
        int rt_queued;
 
@@ -593,6 +594,7 @@ struct rt_rq {
 
        struct rq *rq;
        struct task_group *tg;
+       unsigned long propagate_avg;
 #ifndef CONFIG_64BIT
                u64 load_last_update_time_copy;
 #endif