[COMMON] sched/rt : fix unnecessary operation for PELT
authorSoohyun Kim <soohyuni.kim@samsung.com>
Wed, 28 Mar 2018 07:15:33 +0000 (16:15 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:33:17 +0000 (17:33 +0900)
Change-Id: I3579565f8025b151b303862a710374963197731e
Signed-off-by: Soohyun Kim <soohyuni.kim@samsung.com>
kernel/sched/fair.c
kernel/sched/rt.c

index ba6c2b1f9f6044ba49ff318a2927adb3d0132166..ec584ec4935c68bb391d328ffacbeed424a45939 100644 (file)
@@ -8570,7 +8570,6 @@ static void update_blocked_averages(int cpu)
                if (cfs_rq_is_decayed(cfs_rq))
                        list_del_leaf_cfs_rq(cfs_rq);
        }
-       update_rt_rq_load_avg(rq_clock_task(rq), cpu, &rq->rt, 0);
 #ifdef CONFIG_NO_HZ_COMMON
        rq->last_blocked_load_update_tick = jiffies;
 #endif
@@ -8633,7 +8632,6 @@ static inline void update_blocked_averages(int cpu)
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
        update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
-       update_rt_rq_load_avg(rq_clock_task(rq), cpu, &rq->rt, 0);
 #ifdef CONFIG_NO_HZ_COMMON
        rq->last_blocked_load_update_tick = jiffies;
 #endif
index 5e9b2fecf01e5ca84f096e4901be4771af757f92..70bf66acf8d456da3fcdc9d9a766af910815e832 100644 (file)
@@ -1674,7 +1674,8 @@ void sync_rt_entity_load_avg(struct sched_rt_entity *rt_se)
        u64 last_update_time;
 
        last_update_time = rt_rq_last_update_time(rt_rq);
-       update_rt_load_avg(last_update_time, rt_se);
+       __update_load_avg(last_update_time, cpu_of(rq_of_rt_rq(rt_rq)),
+                               &rt_se->avg, 0, 0, NULL);
 }
 
 /*
@@ -1983,8 +1984,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 
        update_curr_rt(rq);
 
-       update_rt_rq_load_avg(rq_clock_task(rq), cpu_of(rq), &rq->rt, 1);
-
        /*
         * The previous task needs to be made eligible for pushing
         * if it is still active
@@ -3089,7 +3088,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
        u64 now = rq_clock_task(rq);
 
        update_curr_rt(rq);
-       update_rt_rq_load_avg(now, cpu_of(rq), &rq->rt, 1);
 
        for_each_sched_rt_entity(rt_se)
                update_rt_load_avg(now, rt_se);