sched: fair-group: de-couple load-balancing from the rb-trees
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Sat, 19 Apr 2008 17:45:00 +0000 (19:45 +0200)
committerIngo Molnar <mingo@elte.hu>
Sat, 19 Apr 2008 17:45:00 +0000 (19:45 +0200)
De-couple load-balancing from the rb-trees, so that I can change their
organization.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/init_task.h
include/linux/sched.h
kernel/sched.c
kernel/sched_fair.c

index 1f74e1d7415fe9042e2e71467b40e4a56184ffa8..37a6f5bc4a92ab5262fcb75e161b0de94114b98b 100644 (file)
@@ -151,6 +151,9 @@ extern struct group_info init_groups;
        .cpus_allowed   = CPU_MASK_ALL,                                 \
        .mm             = NULL,                                         \
        .active_mm      = &init_mm,                                     \
+       .se             = {                                             \
+               .group_node     = LIST_HEAD_INIT(tsk.se.group_node),    \
+       },                                                              \
        .rt             = {                                             \
                .run_list       = LIST_HEAD_INIT(tsk.rt.run_list),      \
                .time_slice     = HZ,                                   \
index 887f5db8942d28ec5bc2425f938d7c8c6be56173..be6914014c7045d0faf603406bfd586f404c9968 100644 (file)
@@ -946,6 +946,7 @@ struct load_weight {
 struct sched_entity {
        struct load_weight      load;           /* for load-balancing */
        struct rb_node          run_node;
+       struct list_head        group_node;
        unsigned int            on_rq;
 
        u64                     exec_start;
index ae1a3e936d28ca97d4dda8a4f9d54c4524d20e5d..3202462109f509c1f9eeebc2e45ef1fd678e83d9 100644 (file)
@@ -384,8 +384,12 @@ struct cfs_rq {
 
        struct rb_root tasks_timeline;
        struct rb_node *rb_leftmost;
-       struct rb_node *rb_load_balance_curr;
-       /* 'curr' points to currently running entity on this cfs_rq.
+
+       struct list_head tasks;
+       struct list_head *balance_iterator;
+
+       /*
+        * 'curr' points to currently running entity on this cfs_rq.
         * It is set to NULL otherwise (i.e when none are currently running).
         */
        struct sched_entity *curr, *next;
@@ -2525,6 +2529,7 @@ static void __sched_fork(struct task_struct *p)
 
        INIT_LIST_HEAD(&p->rt.run_list);
        p->se.on_rq = 0;
+       INIT_LIST_HEAD(&p->se.group_node);
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
        INIT_HLIST_HEAD(&p->preempt_notifiers);
@@ -7898,6 +7903,7 @@ int in_sched_functions(unsigned long addr)
 static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
 {
        cfs_rq->tasks_timeline = RB_ROOT;
+       INIT_LIST_HEAD(&cfs_rq->tasks);
 #ifdef CONFIG_FAIR_GROUP_SCHED
        cfs_rq->rq = rq;
 #endif
index 9e301a2bab6f79e0321cacbc8fb4bf2b15e809f5..ed8ce329899bcca5c7442f5c9176ac7bb125def7 100644 (file)
@@ -533,6 +533,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
                add_cfs_task_weight(cfs_rq, se->load.weight);
        cfs_rq->nr_running++;
        se->on_rq = 1;
+       list_add(&se->group_node, &cfs_rq->tasks);
 }
 
 static void
@@ -545,6 +546,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
                add_cfs_task_weight(cfs_rq, -se->load.weight);
        cfs_rq->nr_running--;
        se->on_rq = 0;
+       list_del_init(&se->group_node);
 }
 
 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -1289,21 +1291,24 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
  * the current task:
  */
 static struct task_struct *
-__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
+__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
 {
        struct task_struct *p = NULL;
        struct sched_entity *se;
 
-       if (!curr)
+       if (next == &cfs_rq->tasks)
                return NULL;
 
        /* Skip over entities that are not tasks */
        do {
-               se = rb_entry(curr, struct sched_entity, run_node);
-               curr = rb_next(curr);
-       } while (curr && !entity_is_task(se));
+               se = list_entry(next, struct sched_entity, group_node);
+               next = next->next;
+       } while (next != &cfs_rq->tasks && !entity_is_task(se));
 
-       cfs_rq->rb_load_balance_curr = curr;
+       if (next == &cfs_rq->tasks)
+               return NULL;
+
+       cfs_rq->balance_iterator = next;
 
        if (entity_is_task(se))
                p = task_of(se);
@@ -1315,14 +1320,14 @@ static struct task_struct *load_balance_start_fair(void *arg)
 {
        struct cfs_rq *cfs_rq = arg;
 
-       return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
+       return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
 }
 
 static struct task_struct *load_balance_next_fair(void *arg)
 {
        struct cfs_rq *cfs_rq = arg;
 
-       return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
+       return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
 }
 
 static unsigned long