[PATCH] sched: cleanup task_activated()
authorCon Kolivas <kernel@kolivas.org>
Fri, 31 Mar 2006 10:31:23 +0000 (02:31 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Fri, 31 Mar 2006 20:18:58 +0000 (12:18 -0800)
The activated flag in task_struct is used to track different sleep types and
its usage is somewhat obfuscated.  Convert the variable to an enum with more
descriptive names without altering the function.

Signed-off-by: Con Kolivas <kernel@kolivas.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/linux/sched.h
kernel/sched.c

index ab84adf5bb9af8f3a36e8445880a37f0794abded..c4fd3fcd3feb1d975e9c693ba016f70e24926880 100644 (file)
@@ -684,6 +684,13 @@ static inline void prefetch_stack(struct task_struct *t) { }
 struct audit_context;          /* See audit.c */
 struct mempolicy;
 
+enum sleep_type {
+       SLEEP_NORMAL,
+       SLEEP_NONINTERACTIVE,
+       SLEEP_INTERACTIVE,
+       SLEEP_INTERRUPTED,
+};
+
 struct task_struct {
        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
        struct thread_info *thread_info;
@@ -706,7 +713,7 @@ struct task_struct {
        unsigned long sleep_avg;
        unsigned long long timestamp, last_ran;
        unsigned long long sched_time; /* sched_clock time spent running */
-       int activated;
+       enum sleep_type sleep_type;
 
        unsigned long policy;
        cpumask_t cpus_allowed;
index 6e52e0adff80dfc04fa921dd88a0df7bce8a4a2a..f55ce5adac55212d8bf154e1abee7232125ad687 100644 (file)
@@ -704,7 +704,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
                 * prevent them suddenly becoming cpu hogs and starving
                 * other processes.
                 */
-               if (p->mm && p->activated != -1 &&
+               if (p->mm && p->sleep_type != SLEEP_NONINTERACTIVE &&
                        sleep_time > INTERACTIVE_SLEEP(p)) {
                                p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
                                                DEF_TIMESLICE);
@@ -714,7 +714,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
                         * limited in their sleep_avg rise as they
                         * are likely to be waiting on I/O
                         */
-                       if (p->activated == -1 && p->mm) {
+                       if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
                                if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
                                        sleep_time = 0;
                                else if (p->sleep_avg + sleep_time >=
@@ -769,7 +769,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
         * This checks to make sure it's not an uninterruptible task
         * that is now waking up.
         */
-       if (!p->activated) {
+       if (p->sleep_type == SLEEP_NORMAL) {
                /*
                 * Tasks which were woken up by interrupts (ie. hw events)
                 * are most likely of interactive nature. So we give them
@@ -778,13 +778,13 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
                 * on a CPU, first time around:
                 */
                if (in_interrupt())
-                       p->activated = 2;
+                       p->sleep_type = SLEEP_INTERRUPTED;
                else {
                        /*
                         * Normal first-time wakeups get a credit too for
                         * on-runqueue time, but it will be weighted down:
                         */
-                       p->activated = 1;
+                       p->sleep_type = SLEEP_INTERACTIVE;
                }
        }
        p->timestamp = now;
@@ -1272,7 +1272,7 @@ out_activate:
                 * Tasks on involuntary sleep don't earn
                 * sleep_avg beyond just interactive state.
                 */
-               p->activated = -1;
+               p->sleep_type = SLEEP_NONINTERACTIVE;
        }
 
        /*
@@ -2875,6 +2875,12 @@ EXPORT_SYMBOL(sub_preempt_count);
 
 #endif
 
+static inline int interactive_sleep(enum sleep_type sleep_type)
+{
+       return (sleep_type == SLEEP_INTERACTIVE ||
+               sleep_type == SLEEP_INTERRUPTED);
+}
+
 /*
  * schedule() is the main scheduler function.
  */
@@ -2998,12 +3004,12 @@ go_idle:
        queue = array->queue + idx;
        next = list_entry(queue->next, task_t, run_list);
 
-       if (!rt_task(next) && next->activated > 0) {
+       if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
                unsigned long long delta = now - next->timestamp;
                if (unlikely((long long)(now - next->timestamp) < 0))
                        delta = 0;
 
-               if (next->activated == 1)
+               if (next->sleep_type == SLEEP_INTERACTIVE)
                        delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
 
                array = next->array;
@@ -3016,7 +3022,7 @@ go_idle:
                } else
                        requeue_task(next, array);
        }
-       next->activated = 0;
+       next->sleep_type = SLEEP_NORMAL;
 switch_tasks:
        if (next == rq->idle)
                schedstat_inc(rq, sched_goidle);