Commit | Line | Data |
---|---|---|
f8381cba TG |
1 | /* |
2 | * linux/kernel/time/tick-broadcast.c | |
3 | * | |
4 | * This file contains functions which emulate a local clock-event | |
5 | * device via a broadcast event source. | |
6 | * | |
7 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
8 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
9 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner | |
10 | * | |
11 | * This code is licenced under the GPL version 2. For details see | |
12 | * kernel-base/COPYING. | |
13 | */ | |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
17 | #include <linux/irq.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/profile.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/tick.h> | |
22 | ||
23 | #include "tick-internal.h" | |
24 | ||
25 | /* | |
26 | * Broadcast support for broken x86 hardware, where the local apic | |
27 | * timer stops in C3 state. | |
28 | */ | |
29 | ||
30 | struct tick_device tick_broadcast_device; | |
31 | static cpumask_t tick_broadcast_mask; | |
79bf2bb3 | 32 | static DEFINE_SPINLOCK(tick_broadcast_lock); |
f8381cba TG |
33 | |
34 | /* | |
35 | * Start the device in periodic mode | |
36 | */ | |
37 | static void tick_broadcast_start_periodic(struct clock_event_device *bc) | |
38 | { | |
39 | if (bc && bc->mode == CLOCK_EVT_MODE_SHUTDOWN) | |
40 | tick_setup_periodic(bc, 1); | |
41 | } | |
42 | ||
43 | /* | |
44 | * Check, if the device can be utilized as broadcast device: | |
45 | */ | |
46 | int tick_check_broadcast_device(struct clock_event_device *dev) | |
47 | { | |
48 | if (tick_broadcast_device.evtdev || | |
49 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) | |
50 | return 0; | |
51 | ||
52 | clockevents_exchange_device(NULL, dev); | |
53 | tick_broadcast_device.evtdev = dev; | |
54 | if (!cpus_empty(tick_broadcast_mask)) | |
55 | tick_broadcast_start_periodic(dev); | |
56 | return 1; | |
57 | } | |
58 | ||
59 | /* | |
60 | * Check, if the device is the broadcast device | |
61 | */ | |
62 | int tick_is_broadcast_device(struct clock_event_device *dev) | |
63 | { | |
64 | return (dev && tick_broadcast_device.evtdev == dev); | |
65 | } | |
66 | ||
67 | /* | |
68 | * Check, if the device is disfunctional and a place holder, which | |
69 | * needs to be handled by the broadcast device. | |
70 | */ | |
71 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |
72 | { | |
73 | unsigned long flags; | |
74 | int ret = 0; | |
75 | ||
76 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
77 | ||
78 | /* | |
79 | * Devices might be registered with both periodic and oneshot | |
80 | * mode disabled. This signals, that the device needs to be | |
81 | * operated from the broadcast device and is a placeholder for | |
82 | * the cpu local device. | |
83 | */ | |
84 | if (!tick_device_is_functional(dev)) { | |
85 | dev->event_handler = tick_handle_periodic; | |
86 | cpu_set(cpu, tick_broadcast_mask); | |
87 | tick_broadcast_start_periodic(tick_broadcast_device.evtdev); | |
88 | ret = 1; | |
89 | } | |
90 | ||
91 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
92 | return ret; | |
93 | } | |
94 | ||
95 | /* | |
96 | * Broadcast the event to the cpus, which are set in the mask | |
97 | */ | |
98 | int tick_do_broadcast(cpumask_t mask) | |
99 | { | |
100 | int ret = 0, cpu = smp_processor_id(); | |
101 | struct tick_device *td; | |
102 | ||
103 | /* | |
104 | * Check, if the current cpu is in the mask | |
105 | */ | |
106 | if (cpu_isset(cpu, mask)) { | |
107 | cpu_clear(cpu, mask); | |
108 | td = &per_cpu(tick_cpu_device, cpu); | |
109 | td->evtdev->event_handler(td->evtdev); | |
110 | ret = 1; | |
111 | } | |
112 | ||
113 | if (!cpus_empty(mask)) { | |
114 | /* | |
115 | * It might be necessary to actually check whether the devices | |
116 | * have different broadcast functions. For now, just use the | |
117 | * one of the first device. This works as long as we have this | |
118 | * misfeature only on x86 (lapic) | |
119 | */ | |
120 | cpu = first_cpu(mask); | |
121 | td = &per_cpu(tick_cpu_device, cpu); | |
122 | td->evtdev->broadcast(mask); | |
123 | ret = 1; | |
124 | } | |
125 | return ret; | |
126 | } | |
127 | ||
128 | /* | |
129 | * Periodic broadcast: | |
130 | * - invoke the broadcast handlers | |
131 | */ | |
132 | static void tick_do_periodic_broadcast(void) | |
133 | { | |
134 | cpumask_t mask; | |
135 | ||
136 | spin_lock(&tick_broadcast_lock); | |
137 | ||
138 | cpus_and(mask, cpu_online_map, tick_broadcast_mask); | |
139 | tick_do_broadcast(mask); | |
140 | ||
141 | spin_unlock(&tick_broadcast_lock); | |
142 | } | |
143 | ||
144 | /* | |
145 | * Event handler for periodic broadcast ticks | |
146 | */ | |
147 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |
148 | { | |
149 | dev->next_event.tv64 = KTIME_MAX; | |
150 | ||
151 | tick_do_periodic_broadcast(); | |
152 | ||
153 | /* | |
154 | * The device is in periodic mode. No reprogramming necessary: | |
155 | */ | |
156 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) | |
157 | return; | |
158 | ||
159 | /* | |
160 | * Setup the next period for devices, which do not have | |
161 | * periodic mode: | |
162 | */ | |
163 | for (;;) { | |
164 | ktime_t next = ktime_add(dev->next_event, tick_period); | |
165 | ||
166 | if (!clockevents_program_event(dev, next, ktime_get())) | |
167 | return; | |
168 | tick_do_periodic_broadcast(); | |
169 | } | |
170 | } | |
171 | ||
172 | /* | |
173 | * Powerstate information: The system enters/leaves a state, where | |
174 | * affected devices might stop | |
175 | */ | |
176 | static void tick_do_broadcast_on_off(void *why) | |
177 | { | |
178 | struct clock_event_device *bc, *dev; | |
179 | struct tick_device *td; | |
180 | unsigned long flags, *reason = why; | |
181 | int cpu; | |
182 | ||
183 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
184 | ||
185 | cpu = smp_processor_id(); | |
186 | td = &per_cpu(tick_cpu_device, cpu); | |
187 | dev = td->evtdev; | |
188 | bc = tick_broadcast_device.evtdev; | |
189 | ||
190 | /* | |
191 | * Is the device in broadcast mode forever or is it not | |
192 | * affected by the powerstate ? | |
193 | */ | |
194 | if (!dev || !tick_device_is_functional(dev) || | |
195 | !(dev->features & CLOCK_EVT_FEAT_C3STOP)) | |
196 | goto out; | |
197 | ||
198 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) { | |
199 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | |
200 | cpu_set(cpu, tick_broadcast_mask); | |
201 | if (td->mode == TICKDEV_MODE_PERIODIC) | |
202 | clockevents_set_mode(dev, | |
203 | CLOCK_EVT_MODE_SHUTDOWN); | |
204 | } | |
205 | } else { | |
206 | if (cpu_isset(cpu, tick_broadcast_mask)) { | |
207 | cpu_clear(cpu, tick_broadcast_mask); | |
208 | if (td->mode == TICKDEV_MODE_PERIODIC) | |
209 | tick_setup_periodic(dev, 0); | |
210 | } | |
211 | } | |
212 | ||
213 | if (cpus_empty(tick_broadcast_mask)) | |
214 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | |
215 | else { | |
216 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | |
217 | tick_broadcast_start_periodic(bc); | |
79bf2bb3 TG |
218 | else |
219 | tick_broadcast_setup_oneshot(bc); | |
f8381cba TG |
220 | } |
221 | out: | |
222 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
223 | } | |
224 | ||
225 | /* | |
226 | * Powerstate information: The system enters/leaves a state, where | |
227 | * affected devices might stop. | |
228 | */ | |
229 | void tick_broadcast_on_off(unsigned long reason, int *oncpu) | |
230 | { | |
231 | int cpu = get_cpu(); | |
232 | ||
233 | if (cpu == *oncpu) | |
234 | tick_do_broadcast_on_off(&reason); | |
235 | else | |
236 | smp_call_function_single(*oncpu, tick_do_broadcast_on_off, | |
237 | &reason, 1, 1); | |
238 | put_cpu(); | |
239 | } | |
240 | ||
241 | /* | |
242 | * Set the periodic handler depending on broadcast on/off | |
243 | */ | |
244 | void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) | |
245 | { | |
246 | if (!broadcast) | |
247 | dev->event_handler = tick_handle_periodic; | |
248 | else | |
249 | dev->event_handler = tick_handle_periodic_broadcast; | |
250 | } | |
251 | ||
252 | /* | |
253 | * Remove a CPU from broadcasting | |
254 | */ | |
255 | void tick_shutdown_broadcast(unsigned int *cpup) | |
256 | { | |
257 | struct clock_event_device *bc; | |
258 | unsigned long flags; | |
259 | unsigned int cpu = *cpup; | |
260 | ||
261 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
262 | ||
263 | bc = tick_broadcast_device.evtdev; | |
264 | cpu_clear(cpu, tick_broadcast_mask); | |
265 | ||
266 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | |
267 | if (bc && cpus_empty(tick_broadcast_mask)) | |
268 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | |
269 | } | |
270 | ||
271 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
272 | } | |
79bf2bb3 TG |
273 | |
274 | #ifdef CONFIG_TICK_ONESHOT | |
275 | ||
276 | static cpumask_t tick_broadcast_oneshot_mask; | |
277 | ||
278 | static int tick_broadcast_set_event(ktime_t expires, int force) | |
279 | { | |
280 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | |
281 | ktime_t now = ktime_get(); | |
282 | int res; | |
283 | ||
284 | for(;;) { | |
285 | res = clockevents_program_event(bc, expires, now); | |
286 | if (!res || !force) | |
287 | return res; | |
288 | now = ktime_get(); | |
289 | expires = ktime_add(now, ktime_set(0, bc->min_delta_ns)); | |
290 | } | |
291 | } | |
292 | ||
293 | /* | |
294 | * Reprogram the broadcast device: | |
295 | * | |
296 | * Called with tick_broadcast_lock held and interrupts disabled. | |
297 | */ | |
298 | static int tick_broadcast_reprogram(void) | |
299 | { | |
300 | ktime_t expires = { .tv64 = KTIME_MAX }; | |
301 | struct tick_device *td; | |
302 | int cpu; | |
303 | ||
304 | /* | |
305 | * Find the event which expires next: | |
306 | */ | |
307 | for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; | |
308 | cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { | |
309 | td = &per_cpu(tick_cpu_device, cpu); | |
310 | if (td->evtdev->next_event.tv64 < expires.tv64) | |
311 | expires = td->evtdev->next_event; | |
312 | } | |
313 | ||
314 | if (expires.tv64 == KTIME_MAX) | |
315 | return 0; | |
316 | ||
317 | return tick_broadcast_set_event(expires, 0); | |
318 | } | |
319 | ||
320 | /* | |
321 | * Handle oneshot mode broadcasting | |
322 | */ | |
323 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | |
324 | { | |
325 | struct tick_device *td; | |
326 | cpumask_t mask; | |
327 | ktime_t now; | |
328 | int cpu; | |
329 | ||
330 | spin_lock(&tick_broadcast_lock); | |
331 | again: | |
332 | dev->next_event.tv64 = KTIME_MAX; | |
333 | mask = CPU_MASK_NONE; | |
334 | now = ktime_get(); | |
335 | /* Find all expired events */ | |
336 | for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; | |
337 | cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { | |
338 | td = &per_cpu(tick_cpu_device, cpu); | |
339 | if (td->evtdev->next_event.tv64 <= now.tv64) | |
340 | cpu_set(cpu, mask); | |
341 | } | |
342 | ||
343 | /* | |
344 | * Wakeup the cpus which have an expired event. The broadcast | |
345 | * device is reprogrammed in the return from idle code. | |
346 | */ | |
347 | if (!tick_do_broadcast(mask)) { | |
348 | /* | |
349 | * The global event did not expire any CPU local | |
350 | * events. This happens in dyntick mode, as the | |
351 | * maximum PIT delta is quite small. | |
352 | */ | |
353 | if (tick_broadcast_reprogram()) | |
354 | goto again; | |
355 | } | |
356 | spin_unlock(&tick_broadcast_lock); | |
357 | } | |
358 | ||
359 | /* | |
360 | * Powerstate information: The system enters/leaves a state, where | |
361 | * affected devices might stop | |
362 | */ | |
363 | void tick_broadcast_oneshot_control(unsigned long reason) | |
364 | { | |
365 | struct clock_event_device *bc, *dev; | |
366 | struct tick_device *td; | |
367 | unsigned long flags; | |
368 | int cpu; | |
369 | ||
370 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
371 | ||
372 | /* | |
373 | * Periodic mode does not care about the enter/exit of power | |
374 | * states | |
375 | */ | |
376 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | |
377 | goto out; | |
378 | ||
379 | bc = tick_broadcast_device.evtdev; | |
380 | cpu = smp_processor_id(); | |
381 | td = &per_cpu(tick_cpu_device, cpu); | |
382 | dev = td->evtdev; | |
383 | ||
384 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | |
385 | goto out; | |
386 | ||
387 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { | |
388 | if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { | |
389 | cpu_set(cpu, tick_broadcast_oneshot_mask); | |
390 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | |
391 | if (dev->next_event.tv64 < bc->next_event.tv64) | |
392 | tick_broadcast_set_event(dev->next_event, 1); | |
393 | } | |
394 | } else { | |
395 | if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { | |
396 | cpu_clear(cpu, tick_broadcast_oneshot_mask); | |
397 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); | |
398 | if (dev->next_event.tv64 != KTIME_MAX) | |
399 | tick_program_event(dev->next_event, 1); | |
400 | } | |
401 | } | |
402 | ||
403 | out: | |
404 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
405 | } | |
406 | ||
407 | /** | |
408 | * tick_broadcast_setup_highres - setup the broadcast device for highres | |
409 | */ | |
410 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |
411 | { | |
412 | if (bc->mode != CLOCK_EVT_MODE_ONESHOT) { | |
413 | bc->event_handler = tick_handle_oneshot_broadcast; | |
414 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | |
415 | bc->next_event.tv64 = KTIME_MAX; | |
416 | } | |
417 | } | |
418 | ||
419 | /* | |
420 | * Select oneshot operating mode for the broadcast device | |
421 | */ | |
422 | void tick_broadcast_switch_to_oneshot(void) | |
423 | { | |
424 | struct clock_event_device *bc; | |
425 | unsigned long flags; | |
426 | ||
427 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
428 | ||
429 | tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; | |
430 | bc = tick_broadcast_device.evtdev; | |
431 | if (bc) | |
432 | tick_broadcast_setup_oneshot(bc); | |
433 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
434 | } | |
435 | ||
436 | ||
437 | /* | |
438 | * Remove a dead CPU from broadcasting | |
439 | */ | |
440 | void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |
441 | { | |
442 | struct clock_event_device *bc; | |
443 | unsigned long flags; | |
444 | unsigned int cpu = *cpup; | |
445 | ||
446 | spin_lock_irqsave(&tick_broadcast_lock, flags); | |
447 | ||
448 | bc = tick_broadcast_device.evtdev; | |
449 | cpu_clear(cpu, tick_broadcast_oneshot_mask); | |
450 | ||
451 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) { | |
452 | if (bc && cpus_empty(tick_broadcast_oneshot_mask)) | |
453 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | |
454 | } | |
455 | ||
456 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
457 | } | |
458 | ||
459 | #endif |