[COMMON] sched/rt: remove the useless rq lock
authorJohnlay Park <jonglae.park@samsung.com>
Mon, 16 Apr 2018 05:53:28 +0000 (14:53 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:35:54 +0000 (17:35 +0900)
in find_lock_lowest_rq(). In an aging test, rq lock is unbalanced,
because the rq lock is released in double locking routine in
this function from rto_push_irq_work_func.

Change-Id: I338bc98fd4053aefcf8fdd4a6e991ce240d649ec
Signed-off-by: Johnlay Park <jonglae.park@samsung.com>
kernel/sched/rt.c

index 66155bc47aab55b95e576aacdd0d40ff7192f27f..bea7bef3666778258d7be45d63c7940ec6949ed1 100644 (file)
@@ -2546,15 +2546,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        break;
 
                lowest_rq = cpu_rq(cpu);
-#ifdef CONFIG_SCHED_USE_FLUID_RT
-               if (task->rt.sync_flag == 1 && lowest_rq->rt.highest_prio.curr == task->prio) {
-                       /*
-                        * If the sync flag is set,
-                        * let the task go even though its priority is same with current.
-                        */
-                       trace_sched_fluid_stat(task, &task->rt.avg, cpu, "SYNC AGAIN");
-               } else
- #else
                if (lowest_rq->rt.highest_prio.curr <= task->prio)
                {
                        /*
@@ -2565,7 +2556,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        lowest_rq = NULL;
                        break;
                }
-#endif
 
                /* if the prio of this runqueue changed, try again */
                if (double_lock_balance(rq, lowest_rq)) {