Commit | Line | Data |
---|---|---|
112f38a4 RK |
1 | /* |
2 | * sched_clock.c: support for extending counters to full 64-bit ns counter | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | #include <linux/clocksource.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/jiffies.h> | |
11 | #include <linux/kernel.h> | |
a42c3629 | 12 | #include <linux/moduleparam.h> |
112f38a4 | 13 | #include <linux/sched.h> |
f153d017 | 14 | #include <linux/syscore_ops.h> |
112f38a4 RK |
15 | #include <linux/timer.h> |
16 | ||
17 | #include <asm/sched_clock.h> | |
18 | ||
2f0778af MZ |
19 | struct clock_data { |
20 | u64 epoch_ns; | |
21 | u32 epoch_cyc; | |
22 | u32 epoch_cyc_copy; | |
c115739d | 23 | unsigned long rate; |
2f0778af MZ |
24 | u32 mult; |
25 | u32 shift; | |
237ec6f2 CC |
26 | bool suspended; |
27 | bool needs_suspend; | |
2f0778af MZ |
28 | }; |
29 | ||
112f38a4 RK |
30 | static void sched_clock_poll(unsigned long wrap_ticks); |
31 | static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); | |
a42c3629 RK |
32 | static int irqtime = -1; |
33 | ||
34 | core_param(irqtime, irqtime, int, 0400); | |
2f0778af MZ |
35 | |
36 | static struct clock_data cd = { | |
37 | .mult = NSEC_PER_SEC / HZ, | |
38 | }; | |
39 | ||
40 | static u32 __read_mostly sched_clock_mask = 0xffffffff; | |
41 | ||
42 | static u32 notrace jiffy_sched_clock_read(void) | |
43 | { | |
44 | return (u32)(jiffies - INITIAL_JIFFIES); | |
45 | } | |
46 | ||
47 | static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; | |
48 | ||
cea15092 | 49 | static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
2f0778af MZ |
50 | { |
51 | return (cyc * mult) >> shift; | |
52 | } | |
53 | ||
cea15092 | 54 | static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) |
2f0778af MZ |
55 | { |
56 | u64 epoch_ns; | |
57 | u32 epoch_cyc; | |
58 | ||
237ec6f2 CC |
59 | if (cd.suspended) |
60 | return cd.epoch_ns; | |
61 | ||
2f0778af MZ |
62 | /* |
63 | * Load the epoch_cyc and epoch_ns atomically. We do this by | |
64 | * ensuring that we always write epoch_cyc, epoch_ns and | |
65 | * epoch_cyc_copy in strict order, and read them in strict order. | |
66 | * If epoch_cyc and epoch_cyc_copy are not equal, then we're in | |
67 | * the middle of an update, and we should repeat the load. | |
68 | */ | |
69 | do { | |
70 | epoch_cyc = cd.epoch_cyc; | |
71 | smp_rmb(); | |
72 | epoch_ns = cd.epoch_ns; | |
73 | smp_rmb(); | |
74 | } while (epoch_cyc != cd.epoch_cyc_copy); | |
75 | ||
76 | return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift); | |
77 | } | |
78 | ||
79 | /* | |
80 | * Atomically update the sched_clock epoch. | |
81 | */ | |
82 | static void notrace update_sched_clock(void) | |
83 | { | |
84 | unsigned long flags; | |
85 | u32 cyc; | |
86 | u64 ns; | |
87 | ||
88 | cyc = read_sched_clock(); | |
89 | ns = cd.epoch_ns + | |
90 | cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, | |
91 | cd.mult, cd.shift); | |
92 | /* | |
93 | * Write epoch_cyc and epoch_ns in a way that the update is | |
94 | * detectable in cyc_to_fixed_sched_clock(). | |
95 | */ | |
96 | raw_local_irq_save(flags); | |
7c4e9ced | 97 | cd.epoch_cyc_copy = cyc; |
2f0778af MZ |
98 | smp_wmb(); |
99 | cd.epoch_ns = ns; | |
100 | smp_wmb(); | |
7c4e9ced | 101 | cd.epoch_cyc = cyc; |
2f0778af MZ |
102 | raw_local_irq_restore(flags); |
103 | } | |
112f38a4 RK |
104 | |
105 | static void sched_clock_poll(unsigned long wrap_ticks) | |
106 | { | |
107 | mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); | |
2f0778af | 108 | update_sched_clock(); |
112f38a4 RK |
109 | } |
110 | ||
2f0778af | 111 | void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) |
112f38a4 RK |
112 | { |
113 | unsigned long r, w; | |
114 | u64 res, wrap; | |
115 | char r_unit; | |
116 | ||
c115739d RH |
117 | if (cd.rate > rate) |
118 | return; | |
119 | ||
2f0778af MZ |
120 | BUG_ON(bits > 32); |
121 | WARN_ON(!irqs_disabled()); | |
2f0778af MZ |
122 | read_sched_clock = read; |
123 | sched_clock_mask = (1 << bits) - 1; | |
c115739d | 124 | cd.rate = rate; |
112f38a4 RK |
125 | |
126 | /* calculate the mult/shift to convert counter ticks to ns. */ | |
2f0778af | 127 | clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); |
112f38a4 RK |
128 | |
129 | r = rate; | |
130 | if (r >= 4000000) { | |
131 | r /= 1000000; | |
132 | r_unit = 'M'; | |
2f0778af | 133 | } else if (r >= 1000) { |
112f38a4 RK |
134 | r /= 1000; |
135 | r_unit = 'k'; | |
2f0778af MZ |
136 | } else |
137 | r_unit = ' '; | |
112f38a4 RK |
138 | |
139 | /* calculate how many ns until we wrap */ | |
2f0778af | 140 | wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); |
112f38a4 RK |
141 | do_div(wrap, NSEC_PER_MSEC); |
142 | w = wrap; | |
143 | ||
144 | /* calculate the ns resolution of this counter */ | |
2f0778af | 145 | res = cyc_to_ns(1ULL, cd.mult, cd.shift); |
112f38a4 | 146 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", |
2f0778af | 147 | bits, r, r_unit, res, w); |
112f38a4 RK |
148 | |
149 | /* | |
150 | * Start the timer to keep sched_clock() properly updated and | |
151 | * sets the initial epoch. | |
152 | */ | |
153 | sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); | |
2f0778af | 154 | update_sched_clock(); |
112f38a4 RK |
155 | |
156 | /* | |
157 | * Ensure that sched_clock() starts off at 0ns | |
158 | */ | |
2f0778af MZ |
159 | cd.epoch_ns = 0; |
160 | ||
a42c3629 RK |
161 | /* Enable IRQ time accounting if we have a fast enough sched_clock */ |
162 | if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) | |
163 | enable_sched_clock_irqtime(); | |
164 | ||
2f0778af MZ |
165 | pr_debug("Registered %pF as sched_clock source\n", read); |
166 | } | |
167 | ||
7e48c0b9 | 168 | static unsigned long long notrace sched_clock_32(void) |
2f0778af MZ |
169 | { |
170 | u32 cyc = read_sched_clock(); | |
171 | return cyc_to_sched_clock(cyc, sched_clock_mask); | |
112f38a4 | 172 | } |
211baa70 | 173 | |
7e48c0b9 RH |
174 | unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; |
175 | ||
176 | unsigned long long notrace sched_clock(void) | |
177 | { | |
178 | return sched_clock_func(); | |
179 | } | |
180 | ||
211baa70 RK |
181 | void __init sched_clock_postinit(void) |
182 | { | |
2f0778af MZ |
183 | /* |
184 | * If no sched_clock function has been provided at that point, | |
185 | * make it the final one one. | |
186 | */ | |
187 | if (read_sched_clock == jiffy_sched_clock_read) | |
188 | setup_sched_clock(jiffy_sched_clock_read, 32, HZ); | |
189 | ||
211baa70 RK |
190 | sched_clock_poll(sched_clock_timer.data); |
191 | } | |
f153d017 RK |
192 | |
193 | static int sched_clock_suspend(void) | |
194 | { | |
195 | sched_clock_poll(sched_clock_timer.data); | |
6a4dae5e | 196 | cd.suspended = true; |
f153d017 RK |
197 | return 0; |
198 | } | |
199 | ||
237ec6f2 CC |
200 | static void sched_clock_resume(void) |
201 | { | |
6a4dae5e FB |
202 | cd.epoch_cyc = read_sched_clock(); |
203 | cd.epoch_cyc_copy = cd.epoch_cyc; | |
204 | cd.suspended = false; | |
237ec6f2 CC |
205 | } |
206 | ||
f153d017 RK |
207 | static struct syscore_ops sched_clock_ops = { |
208 | .suspend = sched_clock_suspend, | |
237ec6f2 | 209 | .resume = sched_clock_resume, |
f153d017 RK |
210 | }; |
211 | ||
212 | static int __init sched_clock_syscore_init(void) | |
213 | { | |
214 | register_syscore_ops(&sched_clock_ops); | |
215 | return 0; | |
216 | } | |
217 | device_initcall(sched_clock_syscore_init); |