2 * sched_clock for unstable cpu clocks
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Ingo Molnar <mingo@redhat.com>
8 * Guillaume Chazarain <guichaz@gmail.com>
10 * Create a semi stable clock from a mixture of other events, including:
14 * - explicit idle events
16 * We use gtod as base and the unstable clock deltas. The deltas are filtered,
17 * making it monotonic and keeping it within an expected window. This window
18 * is set up using jiffies.
20 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
21 * that is otherwise invisible (TSC gets stopped).
23 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
24 * consistent between cpus (never more than 1 jiffies difference).
26 #include <linux/sched.h>
27 #include <linux/percpu.h>
28 #include <linux/spinlock.h>
29 #include <linux/ktime.h>
30 #include <linux/module.h>
33 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
35 struct sched_clock_data
{
37 * Raw spinlock - this is a special case: this might be called
38 * from within instrumentation code so we dont want to do any
39 * instrumentation ourselves.
43 unsigned long tick_jiffies
;
53 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data
, sched_clock_data
);
55 static inline struct sched_clock_data
*this_scd(void)
57 return &__get_cpu_var(sched_clock_data
);
60 static inline struct sched_clock_data
*cpu_sdc(int cpu
)
62 return &per_cpu(sched_clock_data
, cpu
);
65 static __read_mostly
int sched_clock_running
;
67 void sched_clock_init(void)
69 u64 ktime_now
= ktime_to_ns(ktime_get());
70 unsigned long now_jiffies
= jiffies
;
73 for_each_possible_cpu(cpu
) {
74 struct sched_clock_data
*scd
= cpu_sdc(cpu
);
76 scd
->lock
= (raw_spinlock_t
)__RAW_SPIN_LOCK_UNLOCKED
;
77 scd
->tick_jiffies
= now_jiffies
;
80 scd
->tick_gtod
= ktime_now
;
81 scd
->clock
= ktime_now
;
87 sched_clock_running
= 1;
92 * The dynamic ticks makes the delta jiffies inaccurate. This
93 * prevents us from checking the maximum time update.
94 * Disable the maximum check during stopped ticks.
96 void sched_clock_tick_stop(int cpu
)
98 struct sched_clock_data
*scd
= cpu_sdc(cpu
);
103 void sched_clock_tick_start(int cpu
)
105 struct sched_clock_data
*scd
= cpu_sdc(cpu
);
110 static int check_max(struct sched_clock_data
*scd
)
112 return scd
->check_max
;
115 static int check_max(struct sched_clock_data
*scd
)
119 #endif /* CONFIG_NO_HZ */
122 * update the percpu scd from the raw @now value
124 * - filter out backward motion
125 * - use jiffies to generate a min,max window to clip the raw values
127 static void __update_sched_clock(struct sched_clock_data
*scd
, u64 now
, u64
*time
)
129 unsigned long now_jiffies
= jiffies
;
130 long delta_jiffies
= now_jiffies
- scd
->tick_jiffies
;
131 u64 clock
= scd
->clock
;
132 u64 min_clock
, max_clock
;
133 s64 delta
= now
- scd
->prev_raw
;
135 WARN_ON_ONCE(!irqs_disabled());
137 min_clock
= scd
->tick_gtod
+
138 (delta_jiffies
? delta_jiffies
- 1 : 0) * TICK_NSEC
;
140 if (unlikely(delta
< 0)) {
146 * The clock must stay within a jiffie of the gtod.
147 * But since we may be at the start of a jiffy or the end of one
148 * we add another jiffy buffer.
150 max_clock
= scd
->tick_gtod
+ (2 + delta_jiffies
) * TICK_NSEC
;
152 if (unlikely(clock
+ delta
> max_clock
) && check_max(scd
)) {
153 if (clock
< max_clock
)
162 if (unlikely(clock
< min_clock
))
173 static void lock_double_clock(struct sched_clock_data
*data1
,
174 struct sched_clock_data
*data2
)
177 __raw_spin_lock(&data1
->lock
);
178 __raw_spin_lock(&data2
->lock
);
180 __raw_spin_lock(&data2
->lock
);
181 __raw_spin_lock(&data1
->lock
);
185 u64
sched_clock_cpu(int cpu
)
187 struct sched_clock_data
*scd
= cpu_sdc(cpu
);
190 if (unlikely(!sched_clock_running
))
193 WARN_ON_ONCE(!irqs_disabled());
196 if (cpu
!= raw_smp_processor_id()) {
198 * in order to update a remote cpu's clock based on our
199 * unstable raw time rebase it against:
200 * tick_raw (offset between raw counters)
201 * tick_gotd (tick offset between cpus)
203 struct sched_clock_data
*my_scd
= this_scd();
205 lock_double_clock(scd
, my_scd
);
207 now
-= my_scd
->tick_raw
;
208 now
+= scd
->tick_raw
;
210 now
+= my_scd
->tick_gtod
;
211 now
-= scd
->tick_gtod
;
213 __raw_spin_unlock(&my_scd
->lock
);
215 __update_sched_clock(scd
, now
, &clock
);
217 __raw_spin_unlock(&scd
->lock
);
220 __raw_spin_lock(&scd
->lock
);
221 __update_sched_clock(scd
, now
, NULL
);
223 __raw_spin_unlock(&scd
->lock
);
229 void sched_clock_tick(void)
231 struct sched_clock_data
*scd
= this_scd();
232 unsigned long now_jiffies
= jiffies
;
235 if (unlikely(!sched_clock_running
))
238 WARN_ON_ONCE(!irqs_disabled());
240 now_gtod
= ktime_to_ns(ktime_get());
243 __raw_spin_lock(&scd
->lock
);
244 __update_sched_clock(scd
, now
, NULL
);
246 * update tick_gtod after __update_sched_clock() because that will
247 * already observe 1 new jiffy; adding a new tick_gtod to that would
248 * increase the clock 2 jiffies.
250 scd
->tick_jiffies
= now_jiffies
;
252 scd
->tick_gtod
= now_gtod
;
253 __raw_spin_unlock(&scd
->lock
);
257 * We are going deep-idle (irqs are disabled):
259 void sched_clock_idle_sleep_event(void)
261 sched_clock_cpu(smp_processor_id());
263 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event
);
266 * We just idled delta nanoseconds (called with irqs disabled):
268 void sched_clock_idle_wakeup_event(u64 delta_ns
)
270 struct sched_clock_data
*scd
= this_scd();
271 u64 now
= sched_clock();
274 * Override the previous timestamp and ignore all
275 * sched_clock() deltas that occured while we idled,
276 * and use the PM-provided delta_ns to advance the
279 __raw_spin_lock(&scd
->lock
);
281 scd
->clock
+= delta_ns
;
282 __raw_spin_unlock(&scd
->lock
);
284 touch_softlockup_watchdog();
286 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event
);
291 * Scheduler clock - returns current time in nanosec units.
292 * This is default implementation.
293 * Architectures and sub-architectures can override this.
295 unsigned long long __attribute__((weak
)) sched_clock(void)
297 return (unsigned long long)jiffies
* (NSEC_PER_SEC
/ HZ
);