[COMMON] sched/rt: remove the useless rq lock
authorJohnlay Park <jonglae.park@samsung.com>
Mon, 16 Apr 2018 05:53:28 +0000 (14:53 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:24:55 +0000 (20:24 +0300)
in find_lock_lowest_rq(). In an aging test, rq lock is unbalanced,
because the rq lock is released in double locking routine in
this function from rto_push_irq_work_func.

Change-Id: I338bc98fd4053aefcf8fdd4a6e991ce240d649ec
Signed-off-by: Johnlay Park <jonglae.park@samsung.com>
kernel/sched/rt.c

index 883a5374cfd1331674df0aae8aeef69dced7612a..cc68f7e1c35f51e66b3b27d143ea14c0c29bd849 100644 (file)
@@ -2564,15 +2564,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        break;
 
                lowest_rq = cpu_rq(cpu);
-#ifdef CONFIG_SCHED_USE_FLUID_RT
-               if (task->rt.sync_flag == 1 && lowest_rq->rt.highest_prio.curr == task->prio) {
-                       /*
-                        * If the sync flag is set,
-                        * let the task go even though its priority is same with current.
-                        */
-                       trace_sched_fluid_stat(task, &task->rt.avg, cpu, "SYNC AGAIN");
-               } else
- #else
                if (lowest_rq->rt.highest_prio.curr <= task->prio)
                {
                        /*
@@ -2583,7 +2574,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        lowest_rq = NULL;
                        break;
                }
-#endif
 
                /* if the prio of this runqueue changed, try again */
                if (double_lock_balance(rq, lowest_rq)) {