sched: add rq_clock()/__rq_clock()
authorIngo Molnar <mingo@elte.hu>
Mon, 9 Jul 2007 16:51:58 +0000 (18:51 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 9 Jul 2007 16:51:58 +0000 (18:51 +0200)
add rq_clock()/__rq_clock(), a robust wrapper around sched_clock(),
used by CFS. It protects against common type of sched_clock() problems
(caused by hardware): time warps forwards and backwards.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c

index 085418bedccd02dd25a53fa7a0df850d4f170c15..29eb227e33f77a7154220dec0e355f3f147b2fde 100644 (file)
@@ -388,6 +388,52 @@ static inline int cpu_of(struct rq *rq)
 #endif
 }
 
+/*
+ * Per-runqueue clock, as finegrained as the platform can give us:
+ */
+static unsigned long long __rq_clock(struct rq *rq)
+{
+       u64 prev_raw = rq->prev_clock_raw;
+       u64 now = sched_clock();
+       s64 delta = now - prev_raw;
+       u64 clock = rq->clock;
+
+       /*
+        * Protect against sched_clock() occasionally going backwards:
+        */
+       if (unlikely(delta < 0)) {
+               clock++;
+               rq->clock_warps++;
+       } else {
+               /*
+                * Catch too large forward jumps too:
+                */
+               if (unlikely(delta > 2*TICK_NSEC)) {
+                       clock++;
+                       rq->clock_overflows++;
+               } else {
+                       if (unlikely(delta > rq->clock_max_delta))
+                               rq->clock_max_delta = delta;
+                       clock += delta;
+               }
+       }
+
+       rq->prev_clock_raw = now;
+       rq->clock = clock;
+
+       return clock;
+}
+
+static inline unsigned long long rq_clock(struct rq *rq)
+{
+       int this_cpu = smp_processor_id();
+
+       if (this_cpu == cpu_of(rq))
+               return __rq_clock(rq);
+
+       return rq->clock;
+}
+
 /*
  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  * See detach_destroy_domains: synchronize_sched for details.