Commit | Line | Data |
---|---|---|
3e51f33f PZ |
1 | /* |
2 | * sched_clock for unstable cpu clocks | |
3 | * | |
4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
5 | * | |
6 | * Based on code by: | |
7 | * Ingo Molnar <mingo@redhat.com> | |
8 | * Guillaume Chazarain <guichaz@gmail.com> | |
9 | * | |
10 | * Create a semi stable clock from a mixture of other events, including: | |
11 | * - gtod | |
12 | * - jiffies | |
13 | * - sched_clock() | |
14 | * - explicit idle events | |
15 | * | |
16 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, | |
17 | * making it monotonic and keeping it within an expected window. This window | |
18 | * is set up using jiffies. | |
19 | * | |
20 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | |
21 | * that is otherwise invisible (TSC gets stopped). | |
22 | * | |
23 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | |
24 | * consistent between cpus (never more than 1 jiffies difference). | |
25 | */ | |
26 | #include <linux/sched.h> | |
27 | #include <linux/percpu.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/ktime.h> | |
30 | #include <linux/module.h> | |
31 | ||
32 | ||
33 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | |
34 | ||
35 | struct sched_clock_data { | |
36 | /* | |
37 | * Raw spinlock - this is a special case: this might be called | |
38 | * from within instrumentation code so we dont want to do any | |
39 | * instrumentation ourselves. | |
40 | */ | |
41 | raw_spinlock_t lock; | |
42 | ||
62c43dd9 | 43 | unsigned long tick_jiffies; |
3e51f33f PZ |
44 | u64 prev_raw; |
45 | u64 tick_raw; | |
46 | u64 tick_gtod; | |
47 | u64 clock; | |
48 | }; | |
49 | ||
50 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | |
51 | ||
52 | static inline struct sched_clock_data *this_scd(void) | |
53 | { | |
54 | return &__get_cpu_var(sched_clock_data); | |
55 | } | |
56 | ||
57 | static inline struct sched_clock_data *cpu_sdc(int cpu) | |
58 | { | |
59 | return &per_cpu(sched_clock_data, cpu); | |
60 | } | |
61 | ||
a381759d PZ |
62 | static __read_mostly int sched_clock_running; |
63 | ||
3e51f33f PZ |
64 | void sched_clock_init(void) |
65 | { | |
66 | u64 ktime_now = ktime_to_ns(ktime_get()); | |
a381759d | 67 | unsigned long now_jiffies = jiffies; |
3e51f33f PZ |
68 | int cpu; |
69 | ||
70 | for_each_possible_cpu(cpu) { | |
71 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
72 | ||
73 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | |
62c43dd9 | 74 | scd->tick_jiffies = now_jiffies; |
a381759d PZ |
75 | scd->prev_raw = 0; |
76 | scd->tick_raw = 0; | |
3e51f33f PZ |
77 | scd->tick_gtod = ktime_now; |
78 | scd->clock = ktime_now; | |
79 | } | |
a381759d PZ |
80 | |
81 | sched_clock_running = 1; | |
3e51f33f PZ |
82 | } |
83 | ||
84 | /* | |
85 | * update the percpu scd from the raw @now value | |
86 | * | |
87 | * - filter out backward motion | |
88 | * - use jiffies to generate a min,max window to clip the raw values | |
89 | */ | |
90 | static void __update_sched_clock(struct sched_clock_data *scd, u64 now) | |
91 | { | |
92 | unsigned long now_jiffies = jiffies; | |
62c43dd9 | 93 | long delta_jiffies = now_jiffies - scd->tick_jiffies; |
3e51f33f PZ |
94 | u64 clock = scd->clock; |
95 | u64 min_clock, max_clock; | |
96 | s64 delta = now - scd->prev_raw; | |
97 | ||
98 | WARN_ON_ONCE(!irqs_disabled()); | |
f7cce27f SR |
99 | |
100 | min_clock = scd->tick_gtod + | |
101 | (delta_jiffies ? delta_jiffies - 1 : 0) * TICK_NSEC; | |
3e51f33f PZ |
102 | |
103 | if (unlikely(delta < 0)) { | |
104 | clock++; | |
105 | goto out; | |
106 | } | |
107 | ||
f7cce27f SR |
108 | /* |
109 | * The clock must stay within a jiffie of the gtod. | |
110 | * But since we may be at the start of a jiffy or the end of one | |
111 | * we add another jiffy buffer. | |
112 | */ | |
113 | max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC; | |
3e51f33f PZ |
114 | |
115 | if (unlikely(clock + delta > max_clock)) { | |
116 | if (clock < max_clock) | |
117 | clock = max_clock; | |
118 | else | |
119 | clock++; | |
120 | } else { | |
121 | clock += delta; | |
122 | } | |
123 | ||
124 | out: | |
125 | if (unlikely(clock < min_clock)) | |
126 | clock = min_clock; | |
127 | ||
128 | scd->prev_raw = now; | |
3e51f33f PZ |
129 | scd->clock = clock; |
130 | } | |
131 | ||
132 | static void lock_double_clock(struct sched_clock_data *data1, | |
133 | struct sched_clock_data *data2) | |
134 | { | |
135 | if (data1 < data2) { | |
136 | __raw_spin_lock(&data1->lock); | |
137 | __raw_spin_lock(&data2->lock); | |
138 | } else { | |
139 | __raw_spin_lock(&data2->lock); | |
140 | __raw_spin_lock(&data1->lock); | |
141 | } | |
142 | } | |
143 | ||
144 | u64 sched_clock_cpu(int cpu) | |
145 | { | |
146 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
147 | u64 now, clock; | |
148 | ||
a381759d PZ |
149 | if (unlikely(!sched_clock_running)) |
150 | return 0ull; | |
151 | ||
3e51f33f PZ |
152 | WARN_ON_ONCE(!irqs_disabled()); |
153 | now = sched_clock(); | |
154 | ||
155 | if (cpu != raw_smp_processor_id()) { | |
156 | /* | |
157 | * in order to update a remote cpu's clock based on our | |
158 | * unstable raw time rebase it against: | |
159 | * tick_raw (offset between raw counters) | |
160 | * tick_gotd (tick offset between cpus) | |
161 | */ | |
162 | struct sched_clock_data *my_scd = this_scd(); | |
163 | ||
164 | lock_double_clock(scd, my_scd); | |
165 | ||
166 | now -= my_scd->tick_raw; | |
167 | now += scd->tick_raw; | |
168 | ||
169 | now -= my_scd->tick_gtod; | |
170 | now += scd->tick_gtod; | |
171 | ||
172 | __raw_spin_unlock(&my_scd->lock); | |
173 | } else { | |
174 | __raw_spin_lock(&scd->lock); | |
175 | } | |
176 | ||
177 | __update_sched_clock(scd, now); | |
178 | clock = scd->clock; | |
179 | ||
180 | __raw_spin_unlock(&scd->lock); | |
181 | ||
182 | return clock; | |
183 | } | |
184 | ||
185 | void sched_clock_tick(void) | |
186 | { | |
187 | struct sched_clock_data *scd = this_scd(); | |
62c43dd9 | 188 | unsigned long now_jiffies = jiffies; |
3e51f33f PZ |
189 | u64 now, now_gtod; |
190 | ||
a381759d PZ |
191 | if (unlikely(!sched_clock_running)) |
192 | return; | |
193 | ||
3e51f33f PZ |
194 | WARN_ON_ONCE(!irqs_disabled()); |
195 | ||
196 | now = sched_clock(); | |
197 | now_gtod = ktime_to_ns(ktime_get()); | |
198 | ||
199 | __raw_spin_lock(&scd->lock); | |
200 | __update_sched_clock(scd, now); | |
201 | /* | |
202 | * update tick_gtod after __update_sched_clock() because that will | |
203 | * already observe 1 new jiffy; adding a new tick_gtod to that would | |
204 | * increase the clock 2 jiffies. | |
205 | */ | |
62c43dd9 | 206 | scd->tick_jiffies = now_jiffies; |
3e51f33f PZ |
207 | scd->tick_raw = now; |
208 | scd->tick_gtod = now_gtod; | |
209 | __raw_spin_unlock(&scd->lock); | |
210 | } | |
211 | ||
212 | /* | |
213 | * We are going deep-idle (irqs are disabled): | |
214 | */ | |
215 | void sched_clock_idle_sleep_event(void) | |
216 | { | |
217 | sched_clock_cpu(smp_processor_id()); | |
218 | } | |
219 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |
220 | ||
221 | /* | |
222 | * We just idled delta nanoseconds (called with irqs disabled): | |
223 | */ | |
224 | void sched_clock_idle_wakeup_event(u64 delta_ns) | |
225 | { | |
226 | struct sched_clock_data *scd = this_scd(); | |
227 | u64 now = sched_clock(); | |
228 | ||
229 | /* | |
230 | * Override the previous timestamp and ignore all | |
231 | * sched_clock() deltas that occured while we idled, | |
232 | * and use the PM-provided delta_ns to advance the | |
233 | * rq clock: | |
234 | */ | |
235 | __raw_spin_lock(&scd->lock); | |
236 | scd->prev_raw = now; | |
237 | scd->clock += delta_ns; | |
238 | __raw_spin_unlock(&scd->lock); | |
239 | ||
240 | touch_softlockup_watchdog(); | |
241 | } | |
242 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |
243 | ||
244 | #endif | |
245 | ||
246 | /* | |
247 | * Scheduler clock - returns current time in nanosec units. | |
248 | * This is default implementation. | |
249 | * Architectures and sub-architectures can override this. | |
250 | */ | |
251 | unsigned long long __attribute__((weak)) sched_clock(void) | |
252 | { | |
253 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | |
254 | } |