sched: clean up find_lock_lowest_rq()
authorIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:15 +0000 (21:08 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:15 +0000 (21:08 +0100)
clean up find_lock_lowest_rq().

Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_rt.c

index b8435fd47f78c92aed6e676712e5a626496f296c..0749c1837b102bd840cda4f3fccba58c5d60eab6 100644 (file)
@@ -438,12 +438,11 @@ static int find_lowest_rq(struct task_struct *task)
 }
 
 /* Will lock the rq it finds */
-static struct rq *find_lock_lowest_rq(struct task_struct *task,
-                                     struct rq *rq)
+static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
 {
        struct rq *lowest_rq = NULL;
-       int cpu;
        int tries;
+       int cpu;
 
        for (tries = 0; tries < RT_MAX_TRIES; tries++) {
                cpu = find_lowest_rq(task);
@@ -462,9 +461,11 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task,
                         * Also make sure that it wasn't scheduled on its rq.
                         */
                        if (unlikely(task_rq(task) != rq ||
-                                    !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
+                                    !cpu_isset(lowest_rq->cpu,
+                                               task->cpus_allowed) ||
                                     task_running(rq, task) ||
                                     !task->se.on_rq)) {
+
                                spin_unlock(&lowest_rq->lock);
                                lowest_rq = NULL;
                                break;