sched: keep utime/stime monotonic
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 29 Oct 2007 20:18:11 +0000 (21:18 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 29 Oct 2007 20:18:11 +0000 (21:18 +0100)
keep utime/stime monotonic.

cpustats use utime/stime as a ratio against sum_exec_runtime, as a
consequence it can happen - when the ratio changes faster than time
accumulates - that either can be appear to go backwards.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
fs/proc/array.c
include/linux/sched.h
kernel/fork.c

index 63c95afb561f8e8742118197b0731317ea871463..d80baaabf835e1c98911004035518bf5df7dc51d 100644 (file)
@@ -358,7 +358,8 @@ static cputime_t task_utime(struct task_struct *p)
        }
        utime = (clock_t)temp;
 
-       return clock_t_to_cputime(utime);
+       p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
+       return p->prev_utime;
 }
 
 static cputime_t task_stime(struct task_struct *p)
index 3c07d595979fc71b4db19a65ce8e6cef7f143769..b0b1fe6e0b17a5c41eb139c66137b4b2a51634c4 100644 (file)
@@ -1009,6 +1009,7 @@ struct task_struct {
        unsigned int rt_priority;
        cputime_t utime, stime, utimescaled, stimescaled;
        cputime_t gtime;
+       cputime_t prev_utime;
        unsigned long nvcsw, nivcsw; /* context switch counts */
        struct timespec start_time;             /* monotonic time */
        struct timespec real_start_time;        /* boot based time */
index ddafdfac9456e151b2d8731b52475a5af4ca68e1..a65bfc47177ce7e6c3f6bc68fa7a3b3e1a70dbf2 100644 (file)
@@ -1056,6 +1056,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->gtime = cputime_zero;
        p->utimescaled = cputime_zero;
        p->stimescaled = cputime_zero;
+       p->prev_utime = cputime_zero;
 
 #ifdef CONFIG_TASK_XACCT
        p->rchar = 0;           /* I/O counter: bytes read */