kernel/extable.c: mark core_kernel_text notrace
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / time / tick-common.c
CommitLineData
906568c9
TG
1/*
2 * linux/kernel/time/tick-common.c
3 *
4 * This file contains the base functions to manage periodic tick
5 * related events.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
d7b90689 17#include <linux/interrupt.h>
906568c9
TG
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
409d4ffa 21#include <linux/module.h>
906568c9 22
d7b90689
RK
23#include <asm/irq_regs.h>
24
f8381cba
TG
25#include "tick-internal.h"
26
906568c9
TG
27/*
28 * Tick devices
29 */
f8381cba 30DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
906568c9
TG
31/*
32 * Tick next event: keeps track of the tick time
33 */
f8381cba
TG
34ktime_t tick_next_period;
35ktime_t tick_period;
6441402b 36int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
b5f91da0 37static DEFINE_RAW_SPINLOCK(tick_device_lock);
906568c9 38
289f480a
IM
39/*
40 * Debugging: see timer_list.c
41 */
42struct tick_device *tick_get_device(int cpu)
43{
44 return &per_cpu(tick_cpu_device, cpu);
45}
46
79bf2bb3
TG
47/**
48 * tick_is_oneshot_available - check for a oneshot capable event device
49 */
50int tick_is_oneshot_available(void)
51{
909ea964 52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
79bf2bb3 53
3a142a06
TG
54 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
55 return 0;
56 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
57 return 1;
58 return tick_broadcast_oneshot_available();
79bf2bb3
TG
59}
60
906568c9
TG
61/*
62 * Periodic tick
63 */
64static void tick_periodic(int cpu)
65{
66 if (tick_do_timer_cpu == cpu) {
d6ad4187 67 write_seqlock(&jiffies_lock);
906568c9
TG
68
69 /* Keep track of the next tick event */
70 tick_next_period = ktime_add(tick_next_period, tick_period);
71
72 do_timer(1);
d6ad4187 73 write_sequnlock(&jiffies_lock);
906568c9
TG
74 }
75
76 update_process_times(user_mode(get_irq_regs()));
77 profile_tick(CPU_PROFILING);
78}
79
80/*
81 * Event handler for periodic ticks
82 */
83void tick_handle_periodic(struct clock_event_device *dev)
84{
85 int cpu = smp_processor_id();
3494c166 86 ktime_t next;
906568c9
TG
87
88 tick_periodic(cpu);
89
90 if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
91 return;
92 /*
93 * Setup the next period for devices, which do not have
94 * periodic mode:
95 */
3494c166 96 next = ktime_add(dev->next_event, tick_period);
906568c9 97 for (;;) {
d1748302 98 if (!clockevents_program_event(dev, next, false))
906568c9 99 return;
74a03b69
JS
100 /*
101 * Have to be careful here. If we're in oneshot mode,
102 * before we call tick_periodic() in a loop, we need
103 * to be sure we're using a real hardware clocksource.
104 * Otherwise we could get trapped in an infinite
105 * loop, as the tick_periodic() increments jiffies,
106 * when then will increment time, posibly causing
107 * the loop to trigger again and again.
108 */
109 if (timekeeping_valid_for_hres())
110 tick_periodic(cpu);
3494c166 111 next = ktime_add(next, tick_period);
906568c9
TG
112 }
113}
114
115/*
116 * Setup the device for a periodic tick
117 */
f8381cba 118void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
906568c9 119{
f8381cba
TG
120 tick_set_periodic_handler(dev, broadcast);
121
122 /* Broadcast setup ? */
123 if (!tick_device_is_functional(dev))
124 return;
906568c9 125
27ce4cb4
TG
126 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
127 !tick_broadcast_oneshot_active()) {
906568c9
TG
128 clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
129 } else {
130 unsigned long seq;
131 ktime_t next;
132
133 do {
d6ad4187 134 seq = read_seqbegin(&jiffies_lock);
906568c9 135 next = tick_next_period;
d6ad4187 136 } while (read_seqretry(&jiffies_lock, seq));
906568c9
TG
137
138 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
139
140 for (;;) {
d1748302 141 if (!clockevents_program_event(dev, next, false))
906568c9
TG
142 return;
143 next = ktime_add(next, tick_period);
144 }
145 }
146}
147
148/*
149 * Setup the tick device
150 */
151static void tick_setup_device(struct tick_device *td,
152 struct clock_event_device *newdev, int cpu,
0de26520 153 const struct cpumask *cpumask)
906568c9
TG
154{
155 ktime_t next_event;
156 void (*handler)(struct clock_event_device *) = NULL;
157
158 /*
159 * First device setup ?
160 */
161 if (!td->evtdev) {
162 /*
163 * If no cpu took the do_timer update, assign it to
164 * this cpu:
165 */
6441402b 166 if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
c5bfece2 167 if (!tick_nohz_full_cpu(cpu))
a382bf93
FW
168 tick_do_timer_cpu = cpu;
169 else
170 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
906568c9
TG
171 tick_next_period = ktime_get();
172 tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
173 }
174
175 /*
176 * Startup in periodic mode first.
177 */
178 td->mode = TICKDEV_MODE_PERIODIC;
179 } else {
180 handler = td->evtdev->event_handler;
181 next_event = td->evtdev->next_event;
7c1e7689 182 td->evtdev->event_handler = clockevents_handle_noop;
906568c9
TG
183 }
184
185 td->evtdev = newdev;
186
187 /*
188 * When the device is not per cpu, pin the interrupt to the
189 * current cpu:
190 */
320ab2b0 191 if (!cpumask_equal(newdev->cpumask, cpumask))
0de26520 192 irq_set_affinity(newdev->irq, cpumask);
906568c9 193
f8381cba
TG
194 /*
195 * When global broadcasting is active, check if the current
196 * device is registered as a placeholder for broadcast mode.
197 * This allows us to handle this x86 misfeature in a generic
1c0d08e6
TG
198 * way. This function also returns !=0 when we keep the
199 * current active broadcast state for this CPU.
f8381cba
TG
200 */
201 if (tick_device_uses_broadcast(newdev, cpu))
202 return;
203
906568c9
TG
204 if (td->mode == TICKDEV_MODE_PERIODIC)
205 tick_setup_periodic(newdev, 0);
79bf2bb3
TG
206 else
207 tick_setup_oneshot(newdev, handler, next_event);
906568c9
TG
208}
209
9bae8ea0
TG
210static bool tick_check_percpu(struct clock_event_device *curdev,
211 struct clock_event_device *newdev, int cpu)
212{
213 if (!cpumask_test_cpu(cpu, newdev->cpumask))
214 return false;
215 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
216 return true;
217 /* Check if irq affinity can be set */
218 if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
219 return false;
220 /* Prefer an existing cpu local device */
221 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
222 return false;
223 return true;
224}
225
226static bool tick_check_preferred(struct clock_event_device *curdev,
227 struct clock_event_device *newdev)
228{
229 /* Prefer oneshot capable device */
230 if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
231 if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
232 return false;
233 if (tick_oneshot_mode_active())
234 return false;
235 }
236
7281bb56
SB
237 /*
238 * Use the higher rated one, but prefer a CPU local device with a lower
239 * rating than a non-CPU local device
240 */
241 return !curdev ||
242 newdev->rating > curdev->rating ||
243 !cpumask_equal(curdev->cpumask, newdev->cpumask);
9bae8ea0
TG
244}
245
906568c9
TG
246/*
247 * Check, if the new registered device should be used.
248 */
e8d63033 249void tick_check_new_device(struct clock_event_device *newdev)
906568c9
TG
250{
251 struct clock_event_device *curdev;
252 struct tick_device *td;
e8d63033 253 int cpu;
906568c9 254 unsigned long flags;
906568c9 255
b5f91da0 256 raw_spin_lock_irqsave(&tick_device_lock, flags);
906568c9
TG
257
258 cpu = smp_processor_id();
320ab2b0 259 if (!cpumask_test_cpu(cpu, newdev->cpumask))
4a93232d 260 goto out_bc;
906568c9
TG
261
262 td = &per_cpu(tick_cpu_device, cpu);
263 curdev = td->evtdev;
906568c9
TG
264
265 /* cpu local device ? */
9bae8ea0
TG
266 if (!tick_check_percpu(curdev, newdev, cpu))
267 goto out_bc;
906568c9 268
9bae8ea0
TG
269 /* Preference decision */
270 if (!tick_check_preferred(curdev, newdev))
271 goto out_bc;
906568c9 272
409d4ffa
TG
273 if (!try_module_get(newdev->owner))
274 return;
275
906568c9
TG
276 /*
277 * Replace the eventually existing device by the new
f8381cba
TG
278 * device. If the current device is the broadcast device, do
279 * not give it back to the clockevents layer !
906568c9 280 */
f8381cba 281 if (tick_is_broadcast_device(curdev)) {
2344abbc 282 clockevents_shutdown(curdev);
f8381cba
TG
283 curdev = NULL;
284 }
906568c9 285 clockevents_exchange_device(curdev, newdev);
6b954823 286 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
79bf2bb3
TG
287 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
288 tick_oneshot_notify();
906568c9 289
b5f91da0 290 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
e8d63033 291 return;
f8381cba
TG
292
293out_bc:
294 /*
295 * Can the new device be used as a broadcast device ?
296 */
e8d63033 297 tick_install_broadcast_device(newdev);
b5f91da0 298 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
906568c9
TG
299}
300
94df7de0
SD
301/*
302 * Transfer the do_timer job away from a dying cpu.
303 *
304 * Called with interrupts disabled.
305 */
306static void tick_handover_do_timer(int *cpup)
307{
308 if (*cpup == tick_do_timer_cpu) {
309 int cpu = cpumask_first(cpu_online_mask);
310
311 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
312 TICK_DO_TIMER_NONE;
313 }
314}
315
906568c9
TG
316/*
317 * Shutdown an event device on a given cpu:
318 *
319 * This is called on a life CPU, when a CPU is dead. So we cannot
320 * access the hardware device itself.
321 * We just set the mode and remove it from the lists.
322 */
323static void tick_shutdown(unsigned int *cpup)
324{
325 struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
326 struct clock_event_device *dev = td->evtdev;
327 unsigned long flags;
328
b5f91da0 329 raw_spin_lock_irqsave(&tick_device_lock, flags);
906568c9
TG
330 td->mode = TICKDEV_MODE_PERIODIC;
331 if (dev) {
332 /*
333 * Prevent that the clock events layer tries to call
334 * the set mode function!
335 */
336 dev->mode = CLOCK_EVT_MODE_UNUSED;
337 clockevents_exchange_device(dev, NULL);
6f7a05d7 338 dev->event_handler = clockevents_handle_noop;
906568c9
TG
339 td->evtdev = NULL;
340 }
b5f91da0 341 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
906568c9
TG
342}
343
cd05a1f8 344static void tick_suspend(void)
6321dd60
TG
345{
346 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
347 unsigned long flags;
348
b5f91da0 349 raw_spin_lock_irqsave(&tick_device_lock, flags);
2344abbc 350 clockevents_shutdown(td->evtdev);
b5f91da0 351 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
6321dd60
TG
352}
353
cd05a1f8 354static void tick_resume(void)
6321dd60
TG
355{
356 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
357 unsigned long flags;
18de5bc4 358 int broadcast = tick_resume_broadcast();
6321dd60 359
b5f91da0 360 raw_spin_lock_irqsave(&tick_device_lock, flags);
18de5bc4
TG
361 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
362
363 if (!broadcast) {
364 if (td->mode == TICKDEV_MODE_PERIODIC)
365 tick_setup_periodic(td->evtdev, 0);
366 else
367 tick_resume_oneshot();
368 }
b5f91da0 369 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
6321dd60
TG
370}
371
e8d63033 372void tick_notify(unsigned long reason, void *dev)
906568c9
TG
373{
374 switch (reason) {
375
f8381cba
TG
376 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
377 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
1595f452 378 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
f8381cba
TG
379 tick_broadcast_on_off(reason, dev);
380 break;
381
79bf2bb3
TG
382 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
383 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
384 tick_broadcast_oneshot_control(reason);
385 break;
386
94df7de0
SD
387 case CLOCK_EVT_NOTIFY_CPU_DYING:
388 tick_handover_do_timer(dev);
389 break;
390
906568c9 391 case CLOCK_EVT_NOTIFY_CPU_DEAD:
79bf2bb3 392 tick_shutdown_broadcast_oneshot(dev);
f8381cba 393 tick_shutdown_broadcast(dev);
906568c9
TG
394 tick_shutdown(dev);
395 break;
396
6321dd60 397 case CLOCK_EVT_NOTIFY_SUSPEND:
cd05a1f8 398 tick_suspend();
6321dd60
TG
399 tick_suspend_broadcast();
400 break;
401
402 case CLOCK_EVT_NOTIFY_RESUME:
18de5bc4 403 tick_resume();
6321dd60
TG
404 break;
405
906568c9
TG
406 default:
407 break;
408 }
906568c9
TG
409}
410
906568c9
TG
411/**
412 * tick_init - initialize the tick control
906568c9
TG
413 */
414void __init tick_init(void)
415{
b352bc1c 416 tick_broadcast_init();
906568c9 417}