Linux 3.1-rc10
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / posix-cpu-timers.c
CommitLineData
1da177e4
LT
1/*
2 * Implement CPU time clocks for the POSIX clock interface.
3 */
4
5#include <linux/sched.h>
6#include <linux/posix-timers.h>
1da177e4 7#include <linux/errno.h>
f8bd2258
RZ
8#include <linux/math64.h>
9#include <asm/uaccess.h>
bb34d92f 10#include <linux/kernel_stat.h>
3f0a525e 11#include <trace/events/timer.h>
1da177e4 12
f06febc9 13/*
f55db609
SG
14 * Called after updating RLIMIT_CPU to run cpu timer and update
15 * tsk->signal->cputime_expires expiration cache if necessary. Needs
16 * siglock protection since other code may update expiration cache as
17 * well.
f06febc9 18 */
5ab46b34 19void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
f06febc9 20{
42c4ab41 21 cputime_t cputime = secs_to_cputime(rlim_new);
f06febc9 22
5ab46b34
JS
23 spin_lock_irq(&task->sighand->siglock);
24 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
25 spin_unlock_irq(&task->sighand->siglock);
f06febc9
FM
26}
27
a924b04d 28static int check_clock(const clockid_t which_clock)
1da177e4
LT
29{
30 int error = 0;
31 struct task_struct *p;
32 const pid_t pid = CPUCLOCK_PID(which_clock);
33
34 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
35 return -EINVAL;
36
37 if (pid == 0)
38 return 0;
39
c0deae8c 40 rcu_read_lock();
8dc86af0 41 p = find_task_by_vpid(pid);
bac0abd6 42 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
c0deae8c 43 same_thread_group(p, current) : has_group_leader_pid(p))) {
1da177e4
LT
44 error = -EINVAL;
45 }
c0deae8c 46 rcu_read_unlock();
1da177e4
LT
47
48 return error;
49}
50
51static inline union cpu_time_count
a924b04d 52timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
1da177e4
LT
53{
54 union cpu_time_count ret;
55 ret.sched = 0; /* high half always zero when .cpu used */
56 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
ee500f27 57 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
1da177e4
LT
58 } else {
59 ret.cpu = timespec_to_cputime(tp);
60 }
61 return ret;
62}
63
a924b04d 64static void sample_to_timespec(const clockid_t which_clock,
1da177e4
LT
65 union cpu_time_count cpu,
66 struct timespec *tp)
67{
f8bd2258
RZ
68 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
69 *tp = ns_to_timespec(cpu.sched);
70 else
1da177e4 71 cputime_to_timespec(cpu.cpu, tp);
1da177e4
LT
72}
73
a924b04d 74static inline int cpu_time_before(const clockid_t which_clock,
1da177e4
LT
75 union cpu_time_count now,
76 union cpu_time_count then)
77{
78 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
79 return now.sched < then.sched;
80 } else {
81 return cputime_lt(now.cpu, then.cpu);
82 }
83}
a924b04d 84static inline void cpu_time_add(const clockid_t which_clock,
1da177e4
LT
85 union cpu_time_count *acc,
86 union cpu_time_count val)
87{
88 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
89 acc->sched += val.sched;
90 } else {
91 acc->cpu = cputime_add(acc->cpu, val.cpu);
92 }
93}
a924b04d 94static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
1da177e4
LT
95 union cpu_time_count a,
96 union cpu_time_count b)
97{
98 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
99 a.sched -= b.sched;
100 } else {
101 a.cpu = cputime_sub(a.cpu, b.cpu);
102 }
103 return a;
104}
105
ac08c264
TG
106/*
107 * Divide and limit the result to res >= 1
108 *
109 * This is necessary to prevent signal delivery starvation, when the result of
110 * the division would be rounded down to 0.
111 */
112static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
113{
114 cputime_t res = cputime_div(time, div);
115
116 return max_t(cputime_t, res, 1);
117}
118
1da177e4
LT
119/*
120 * Update expiry time from increment, and increase overrun count,
121 * given the current clock sample.
122 */
7a4ed937 123static void bump_cpu_timer(struct k_itimer *timer,
1da177e4
LT
124 union cpu_time_count now)
125{
126 int i;
127
128 if (timer->it.cpu.incr.sched == 0)
129 return;
130
131 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
132 unsigned long long delta, incr;
133
134 if (now.sched < timer->it.cpu.expires.sched)
135 return;
136 incr = timer->it.cpu.incr.sched;
137 delta = now.sched + incr - timer->it.cpu.expires.sched;
138 /* Don't use (incr*2 < delta), incr*2 might overflow. */
139 for (i = 0; incr < delta - incr; i++)
140 incr = incr << 1;
141 for (; i >= 0; incr >>= 1, i--) {
7a4ed937 142 if (delta < incr)
1da177e4
LT
143 continue;
144 timer->it.cpu.expires.sched += incr;
145 timer->it_overrun += 1 << i;
146 delta -= incr;
147 }
148 } else {
149 cputime_t delta, incr;
150
151 if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
152 return;
153 incr = timer->it.cpu.incr.cpu;
154 delta = cputime_sub(cputime_add(now.cpu, incr),
155 timer->it.cpu.expires.cpu);
156 /* Don't use (incr*2 < delta), incr*2 might overflow. */
157 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
158 incr = cputime_add(incr, incr);
159 for (; i >= 0; incr = cputime_halve(incr), i--) {
7a4ed937 160 if (cputime_lt(delta, incr))
1da177e4
LT
161 continue;
162 timer->it.cpu.expires.cpu =
163 cputime_add(timer->it.cpu.expires.cpu, incr);
164 timer->it_overrun += 1 << i;
165 delta = cputime_sub(delta, incr);
166 }
167 }
168}
169
170static inline cputime_t prof_ticks(struct task_struct *p)
171{
172 return cputime_add(p->utime, p->stime);
173}
174static inline cputime_t virt_ticks(struct task_struct *p)
175{
176 return p->utime;
177}
1da177e4 178
bc2c8ea4
TG
179static int
180posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
1da177e4
LT
181{
182 int error = check_clock(which_clock);
183 if (!error) {
184 tp->tv_sec = 0;
185 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
186 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
187 /*
188 * If sched_clock is using a cycle counter, we
189 * don't have any idea of its true resolution
190 * exported, but it is much more than 1s/HZ.
191 */
192 tp->tv_nsec = 1;
193 }
194 }
195 return error;
196}
197
bc2c8ea4
TG
198static int
199posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
1da177e4
LT
200{
201 /*
202 * You can never reset a CPU clock, but we check for other errors
203 * in the call before failing with EPERM.
204 */
205 int error = check_clock(which_clock);
206 if (error == 0) {
207 error = -EPERM;
208 }
209 return error;
210}
211
212
213/*
214 * Sample a per-thread clock for the given task.
215 */
a924b04d 216static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
1da177e4
LT
217 union cpu_time_count *cpu)
218{
219 switch (CPUCLOCK_WHICH(which_clock)) {
220 default:
221 return -EINVAL;
222 case CPUCLOCK_PROF:
223 cpu->cpu = prof_ticks(p);
224 break;
225 case CPUCLOCK_VIRT:
226 cpu->cpu = virt_ticks(p);
227 break;
228 case CPUCLOCK_SCHED:
c5f8d995 229 cpu->sched = task_sched_runtime(p);
1da177e4
LT
230 break;
231 }
232 return 0;
233}
234
4cd4c1b4
PZ
235void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
236{
bfac7009 237 struct signal_struct *sig = tsk->signal;
4cd4c1b4
PZ
238 struct task_struct *t;
239
bfac7009
ON
240 times->utime = sig->utime;
241 times->stime = sig->stime;
242 times->sum_exec_runtime = sig->sum_sched_runtime;
4cd4c1b4
PZ
243
244 rcu_read_lock();
bfac7009
ON
245 /* make sure we can trust tsk->thread_group list */
246 if (!likely(pid_alive(tsk)))
4cd4c1b4
PZ
247 goto out;
248
4cd4c1b4
PZ
249 t = tsk;
250 do {
251 times->utime = cputime_add(times->utime, t->utime);
252 times->stime = cputime_add(times->stime, t->stime);
d670ec13 253 times->sum_exec_runtime += task_sched_runtime(t);
bfac7009 254 } while_each_thread(tsk, t);
4cd4c1b4
PZ
255out:
256 rcu_read_unlock();
257}
258
4da94d49
PZ
259static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
260{
261 if (cputime_gt(b->utime, a->utime))
262 a->utime = b->utime;
263
264 if (cputime_gt(b->stime, a->stime))
265 a->stime = b->stime;
266
267 if (b->sum_exec_runtime > a->sum_exec_runtime)
268 a->sum_exec_runtime = b->sum_exec_runtime;
269}
270
271void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
272{
273 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
274 struct task_cputime sum;
275 unsigned long flags;
276
277 spin_lock_irqsave(&cputimer->lock, flags);
278 if (!cputimer->running) {
279 cputimer->running = 1;
280 /*
281 * The POSIX timer interface allows for absolute time expiry
282 * values through the TIMER_ABSTIME flag, therefore we have
283 * to synchronize the timer to the clock every time we start
284 * it.
285 */
286 thread_group_cputime(tsk, &sum);
287 update_gt_cputime(&cputimer->cputime, &sum);
288 }
289 *times = cputimer->cputime;
290 spin_unlock_irqrestore(&cputimer->lock, flags);
291}
292
1da177e4
LT
293/*
294 * Sample a process (thread group) clock for the given group_leader task.
295 * Must be called with tasklist_lock held for reading.
1da177e4 296 */
bb34d92f
FM
297static int cpu_clock_sample_group(const clockid_t which_clock,
298 struct task_struct *p,
299 union cpu_time_count *cpu)
1da177e4 300{
f06febc9
FM
301 struct task_cputime cputime;
302
eccdaeaf 303 switch (CPUCLOCK_WHICH(which_clock)) {
1da177e4
LT
304 default:
305 return -EINVAL;
306 case CPUCLOCK_PROF:
c5f8d995 307 thread_group_cputime(p, &cputime);
f06febc9 308 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
1da177e4
LT
309 break;
310 case CPUCLOCK_VIRT:
c5f8d995 311 thread_group_cputime(p, &cputime);
f06febc9 312 cpu->cpu = cputime.utime;
1da177e4
LT
313 break;
314 case CPUCLOCK_SCHED:
d670ec13
PZ
315 thread_group_cputime(p, &cputime);
316 cpu->sched = cputime.sum_exec_runtime;
1da177e4
LT
317 break;
318 }
319 return 0;
320}
321
1da177e4 322
bc2c8ea4 323static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
1da177e4
LT
324{
325 const pid_t pid = CPUCLOCK_PID(which_clock);
326 int error = -EINVAL;
327 union cpu_time_count rtn;
328
329 if (pid == 0) {
330 /*
331 * Special case constant value for our own clocks.
332 * We don't have to do any lookup to find ourselves.
333 */
334 if (CPUCLOCK_PERTHREAD(which_clock)) {
335 /*
336 * Sampling just ourselves we can do with no locking.
337 */
338 error = cpu_clock_sample(which_clock,
339 current, &rtn);
340 } else {
341 read_lock(&tasklist_lock);
342 error = cpu_clock_sample_group(which_clock,
343 current, &rtn);
344 read_unlock(&tasklist_lock);
345 }
346 } else {
347 /*
348 * Find the given PID, and validate that the caller
349 * should be able to see it.
350 */
351 struct task_struct *p;
1f2ea083 352 rcu_read_lock();
8dc86af0 353 p = find_task_by_vpid(pid);
1da177e4
LT
354 if (p) {
355 if (CPUCLOCK_PERTHREAD(which_clock)) {
bac0abd6 356 if (same_thread_group(p, current)) {
1da177e4
LT
357 error = cpu_clock_sample(which_clock,
358 p, &rtn);
359 }
1f2ea083
PM
360 } else {
361 read_lock(&tasklist_lock);
d30fda35 362 if (thread_group_leader(p) && p->sighand) {
1f2ea083
PM
363 error =
364 cpu_clock_sample_group(which_clock,
365 p, &rtn);
366 }
367 read_unlock(&tasklist_lock);
1da177e4
LT
368 }
369 }
1f2ea083 370 rcu_read_unlock();
1da177e4
LT
371 }
372
373 if (error)
374 return error;
375 sample_to_timespec(which_clock, rtn, tp);
376 return 0;
377}
378
379
380/*
381 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
ba5ea951
SG
382 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
383 * new timer already all-zeros initialized.
1da177e4 384 */
bc2c8ea4 385static int posix_cpu_timer_create(struct k_itimer *new_timer)
1da177e4
LT
386{
387 int ret = 0;
388 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
389 struct task_struct *p;
390
391 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
392 return -EINVAL;
393
394 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
1da177e4 395
c0deae8c 396 rcu_read_lock();
1da177e4
LT
397 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
398 if (pid == 0) {
399 p = current;
400 } else {
8dc86af0 401 p = find_task_by_vpid(pid);
bac0abd6 402 if (p && !same_thread_group(p, current))
1da177e4
LT
403 p = NULL;
404 }
405 } else {
406 if (pid == 0) {
407 p = current->group_leader;
408 } else {
8dc86af0 409 p = find_task_by_vpid(pid);
c0deae8c 410 if (p && !has_group_leader_pid(p))
1da177e4
LT
411 p = NULL;
412 }
413 }
414 new_timer->it.cpu.task = p;
415 if (p) {
416 get_task_struct(p);
417 } else {
418 ret = -EINVAL;
419 }
c0deae8c 420 rcu_read_unlock();
1da177e4
LT
421
422 return ret;
423}
424
425/*
426 * Clean up a CPU-clock timer that is about to be destroyed.
427 * This is called from timer deletion with the timer already locked.
428 * If we return TIMER_RETRY, it's necessary to release the timer's lock
429 * and try again. (This happens when the timer is in the middle of firing.)
430 */
bc2c8ea4 431static int posix_cpu_timer_del(struct k_itimer *timer)
1da177e4
LT
432{
433 struct task_struct *p = timer->it.cpu.task;
108150ea 434 int ret = 0;
1da177e4 435
108150ea 436 if (likely(p != NULL)) {
9465bee8 437 read_lock(&tasklist_lock);
d30fda35 438 if (unlikely(p->sighand == NULL)) {
9465bee8
LT
439 /*
440 * We raced with the reaping of the task.
441 * The deletion should have cleared us off the list.
442 */
443 BUG_ON(!list_empty(&timer->it.cpu.entry));
444 } else {
9465bee8 445 spin_lock(&p->sighand->siglock);
108150ea
ON
446 if (timer->it.cpu.firing)
447 ret = TIMER_RETRY;
448 else
449 list_del(&timer->it.cpu.entry);
9465bee8
LT
450 spin_unlock(&p->sighand->siglock);
451 }
452 read_unlock(&tasklist_lock);
108150ea
ON
453
454 if (!ret)
455 put_task_struct(p);
1da177e4 456 }
1da177e4 457
108150ea 458 return ret;
1da177e4
LT
459}
460
461/*
462 * Clean out CPU timers still ticking when a thread exited. The task
463 * pointer is cleared, and the expiry time is replaced with the residual
464 * time for later timer_gettime calls to return.
465 * This must be called with the siglock held.
466 */
467static void cleanup_timers(struct list_head *head,
468 cputime_t utime, cputime_t stime,
41b86e9c 469 unsigned long long sum_exec_runtime)
1da177e4
LT
470{
471 struct cpu_timer_list *timer, *next;
472 cputime_t ptime = cputime_add(utime, stime);
473
474 list_for_each_entry_safe(timer, next, head, entry) {
1da177e4
LT
475 list_del_init(&timer->entry);
476 if (cputime_lt(timer->expires.cpu, ptime)) {
477 timer->expires.cpu = cputime_zero;
478 } else {
479 timer->expires.cpu = cputime_sub(timer->expires.cpu,
480 ptime);
481 }
482 }
483
484 ++head;
485 list_for_each_entry_safe(timer, next, head, entry) {
1da177e4
LT
486 list_del_init(&timer->entry);
487 if (cputime_lt(timer->expires.cpu, utime)) {
488 timer->expires.cpu = cputime_zero;
489 } else {
490 timer->expires.cpu = cputime_sub(timer->expires.cpu,
491 utime);
492 }
493 }
494
495 ++head;
496 list_for_each_entry_safe(timer, next, head, entry) {
1da177e4 497 list_del_init(&timer->entry);
41b86e9c 498 if (timer->expires.sched < sum_exec_runtime) {
1da177e4
LT
499 timer->expires.sched = 0;
500 } else {
41b86e9c 501 timer->expires.sched -= sum_exec_runtime;
1da177e4
LT
502 }
503 }
504}
505
506/*
507 * These are both called with the siglock held, when the current thread
508 * is being reaped. When the final (leader) thread in the group is reaped,
509 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
510 */
511void posix_cpu_timers_exit(struct task_struct *tsk)
512{
513 cleanup_timers(tsk->cpu_timers,
41b86e9c 514 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
1da177e4
LT
515
516}
517void posix_cpu_timers_exit_group(struct task_struct *tsk)
518{
17d42c1c 519 struct signal_struct *const sig = tsk->signal;
ca531a0a 520
f06febc9 521 cleanup_timers(tsk->signal->cpu_timers,
17d42c1c
SG
522 cputime_add(tsk->utime, sig->utime),
523 cputime_add(tsk->stime, sig->stime),
524 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
1da177e4
LT
525}
526
527static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
528{
529 /*
530 * That's all for this thread or process.
531 * We leave our residual in expires to be reported.
532 */
533 put_task_struct(timer->it.cpu.task);
534 timer->it.cpu.task = NULL;
535 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
536 timer->it.cpu.expires,
537 now);
538}
539
d1e3b6d1
SG
540static inline int expires_gt(cputime_t expires, cputime_t new_exp)
541{
542 return cputime_eq(expires, cputime_zero) ||
543 cputime_gt(expires, new_exp);
544}
545
1da177e4
LT
546/*
547 * Insert the timer on the appropriate list before any timers that
548 * expire later. This must be called with the tasklist_lock held
c2873937 549 * for reading, interrupts disabled and p->sighand->siglock taken.
1da177e4 550 */
5eb9aa64 551static void arm_timer(struct k_itimer *timer)
1da177e4
LT
552{
553 struct task_struct *p = timer->it.cpu.task;
554 struct list_head *head, *listpos;
5eb9aa64 555 struct task_cputime *cputime_expires;
1da177e4
LT
556 struct cpu_timer_list *const nt = &timer->it.cpu;
557 struct cpu_timer_list *next;
1da177e4 558
5eb9aa64
SG
559 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
560 head = p->cpu_timers;
561 cputime_expires = &p->cputime_expires;
562 } else {
563 head = p->signal->cpu_timers;
564 cputime_expires = &p->signal->cputime_expires;
565 }
1da177e4
LT
566 head += CPUCLOCK_WHICH(timer->it_clock);
567
1da177e4 568 listpos = head;
5eb9aa64
SG
569 list_for_each_entry(next, head, entry) {
570 if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
571 break;
572 listpos = &next->entry;
1da177e4
LT
573 }
574 list_add(&nt->entry, listpos);
575
576 if (listpos == head) {
5eb9aa64
SG
577 union cpu_time_count *exp = &nt->expires;
578
1da177e4 579 /*
5eb9aa64
SG
580 * We are the new earliest-expiring POSIX 1.b timer, hence
581 * need to update expiration cache. Take into account that
582 * for process timers we share expiration cache with itimers
583 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
1da177e4
LT
584 */
585
5eb9aa64
SG
586 switch (CPUCLOCK_WHICH(timer->it_clock)) {
587 case CPUCLOCK_PROF:
588 if (expires_gt(cputime_expires->prof_exp, exp->cpu))
589 cputime_expires->prof_exp = exp->cpu;
590 break;
591 case CPUCLOCK_VIRT:
592 if (expires_gt(cputime_expires->virt_exp, exp->cpu))
593 cputime_expires->virt_exp = exp->cpu;
594 break;
595 case CPUCLOCK_SCHED:
596 if (cputime_expires->sched_exp == 0 ||
597 cputime_expires->sched_exp > exp->sched)
598 cputime_expires->sched_exp = exp->sched;
599 break;
1da177e4
LT
600 }
601 }
1da177e4
LT
602}
603
604/*
605 * The timer is locked, fire it and arrange for its reload.
606 */
607static void cpu_timer_fire(struct k_itimer *timer)
608{
1f169f84
SG
609 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
610 /*
611 * User don't want any signal.
612 */
613 timer->it.cpu.expires.sched = 0;
614 } else if (unlikely(timer->sigq == NULL)) {
1da177e4
LT
615 /*
616 * This a special case for clock_nanosleep,
617 * not a normal timer from sys_timer_create.
618 */
619 wake_up_process(timer->it_process);
620 timer->it.cpu.expires.sched = 0;
621 } else if (timer->it.cpu.incr.sched == 0) {
622 /*
623 * One-shot timer. Clear it as soon as it's fired.
624 */
625 posix_timer_event(timer, 0);
626 timer->it.cpu.expires.sched = 0;
627 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
628 /*
629 * The signal did not get queued because the signal
630 * was ignored, so we won't get any callback to
631 * reload the timer. But we need to keep it
632 * ticking in case the signal is deliverable next time.
633 */
634 posix_cpu_timer_schedule(timer);
635 }
636}
637
3997ad31
PZ
638/*
639 * Sample a process (thread group) timer for the given group_leader task.
640 * Must be called with tasklist_lock held for reading.
641 */
642static int cpu_timer_sample_group(const clockid_t which_clock,
643 struct task_struct *p,
644 union cpu_time_count *cpu)
645{
646 struct task_cputime cputime;
647
648 thread_group_cputimer(p, &cputime);
649 switch (CPUCLOCK_WHICH(which_clock)) {
650 default:
651 return -EINVAL;
652 case CPUCLOCK_PROF:
653 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
654 break;
655 case CPUCLOCK_VIRT:
656 cpu->cpu = cputime.utime;
657 break;
658 case CPUCLOCK_SCHED:
659 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
660 break;
661 }
662 return 0;
663}
664
1da177e4
LT
665/*
666 * Guts of sys_timer_settime for CPU timers.
667 * This is called with the timer locked and interrupts disabled.
668 * If we return TIMER_RETRY, it's necessary to release the timer's lock
669 * and try again. (This happens when the timer is in the middle of firing.)
670 */
bc2c8ea4
TG
671static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
672 struct itimerspec *new, struct itimerspec *old)
1da177e4
LT
673{
674 struct task_struct *p = timer->it.cpu.task;
ae1a78ee 675 union cpu_time_count old_expires, new_expires, old_incr, val;
1da177e4
LT
676 int ret;
677
678 if (unlikely(p == NULL)) {
679 /*
680 * Timer refers to a dead task's clock.
681 */
682 return -ESRCH;
683 }
684
685 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
686
687 read_lock(&tasklist_lock);
688 /*
689 * We need the tasklist_lock to protect against reaping that
d30fda35 690 * clears p->sighand. If p has just been reaped, we can no
1da177e4
LT
691 * longer get any information about it at all.
692 */
d30fda35 693 if (unlikely(p->sighand == NULL)) {
1da177e4
LT
694 read_unlock(&tasklist_lock);
695 put_task_struct(p);
696 timer->it.cpu.task = NULL;
697 return -ESRCH;
698 }
699
700 /*
701 * Disarm any old timer after extracting its expiry time.
702 */
703 BUG_ON(!irqs_disabled());
a69ac4a7
ON
704
705 ret = 0;
ae1a78ee 706 old_incr = timer->it.cpu.incr;
1da177e4
LT
707 spin_lock(&p->sighand->siglock);
708 old_expires = timer->it.cpu.expires;
a69ac4a7
ON
709 if (unlikely(timer->it.cpu.firing)) {
710 timer->it.cpu.firing = -1;
711 ret = TIMER_RETRY;
712 } else
713 list_del_init(&timer->it.cpu.entry);
1da177e4
LT
714
715 /*
716 * We need to sample the current value to convert the new
717 * value from to relative and absolute, and to convert the
718 * old value from absolute to relative. To set a process
719 * timer, we need a sample to balance the thread expiry
720 * times (in arm_timer). With an absolute time, we must
721 * check if it's already passed. In short, we need a sample.
722 */
723 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
724 cpu_clock_sample(timer->it_clock, p, &val);
725 } else {
3997ad31 726 cpu_timer_sample_group(timer->it_clock, p, &val);
1da177e4
LT
727 }
728
729 if (old) {
730 if (old_expires.sched == 0) {
731 old->it_value.tv_sec = 0;
732 old->it_value.tv_nsec = 0;
733 } else {
734 /*
735 * Update the timer in case it has
736 * overrun already. If it has,
737 * we'll report it as having overrun
738 * and with the next reloaded timer
739 * already ticking, though we are
740 * swallowing that pending
741 * notification here to install the
742 * new setting.
743 */
744 bump_cpu_timer(timer, val);
745 if (cpu_time_before(timer->it_clock, val,
746 timer->it.cpu.expires)) {
747 old_expires = cpu_time_sub(
748 timer->it_clock,
749 timer->it.cpu.expires, val);
750 sample_to_timespec(timer->it_clock,
751 old_expires,
752 &old->it_value);
753 } else {
754 old->it_value.tv_nsec = 1;
755 old->it_value.tv_sec = 0;
756 }
757 }
758 }
759
a69ac4a7 760 if (unlikely(ret)) {
1da177e4
LT
761 /*
762 * We are colliding with the timer actually firing.
763 * Punt after filling in the timer's old value, and
764 * disable this firing since we are already reporting
765 * it as an overrun (thanks to bump_cpu_timer above).
766 */
c2873937 767 spin_unlock(&p->sighand->siglock);
1da177e4 768 read_unlock(&tasklist_lock);
1da177e4
LT
769 goto out;
770 }
771
772 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
773 cpu_time_add(timer->it_clock, &new_expires, val);
774 }
775
776 /*
777 * Install the new expiry time (or zero).
778 * For a timer with no notification action, we don't actually
779 * arm the timer (we'll just fake it for timer_gettime).
780 */
781 timer->it.cpu.expires = new_expires;
782 if (new_expires.sched != 0 &&
1da177e4 783 cpu_time_before(timer->it_clock, val, new_expires)) {
5eb9aa64 784 arm_timer(timer);
1da177e4
LT
785 }
786
c2873937 787 spin_unlock(&p->sighand->siglock);
1da177e4
LT
788 read_unlock(&tasklist_lock);
789
790 /*
791 * Install the new reload setting, and
792 * set up the signal and overrun bookkeeping.
793 */
794 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
795 &new->it_interval);
796
797 /*
798 * This acts as a modification timestamp for the timer,
799 * so any automatic reload attempt will punt on seeing
800 * that we have reset the timer manually.
801 */
802 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
803 ~REQUEUE_PENDING;
804 timer->it_overrun_last = 0;
805 timer->it_overrun = -1;
806
807 if (new_expires.sched != 0 &&
1da177e4
LT
808 !cpu_time_before(timer->it_clock, val, new_expires)) {
809 /*
810 * The designated time already passed, so we notify
811 * immediately, even if the thread never runs to
812 * accumulate more time on this clock.
813 */
814 cpu_timer_fire(timer);
815 }
816
817 ret = 0;
818 out:
819 if (old) {
820 sample_to_timespec(timer->it_clock,
ae1a78ee 821 old_incr, &old->it_interval);
1da177e4
LT
822 }
823 return ret;
824}
825
bc2c8ea4 826static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
1da177e4
LT
827{
828 union cpu_time_count now;
829 struct task_struct *p = timer->it.cpu.task;
830 int clear_dead;
831
832 /*
833 * Easy part: convert the reload time.
834 */
835 sample_to_timespec(timer->it_clock,
836 timer->it.cpu.incr, &itp->it_interval);
837
838 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
839 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
840 return;
841 }
842
843 if (unlikely(p == NULL)) {
844 /*
845 * This task already died and the timer will never fire.
846 * In this case, expires is actually the dead value.
847 */
848 dead:
849 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
850 &itp->it_value);
851 return;
852 }
853
854 /*
855 * Sample the clock to take the difference with the expiry time.
856 */
857 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
858 cpu_clock_sample(timer->it_clock, p, &now);
859 clear_dead = p->exit_state;
860 } else {
861 read_lock(&tasklist_lock);
d30fda35 862 if (unlikely(p->sighand == NULL)) {
1da177e4
LT
863 /*
864 * The process has been reaped.
865 * We can't even collect a sample any more.
866 * Call the timer disarmed, nothing else to do.
867 */
868 put_task_struct(p);
869 timer->it.cpu.task = NULL;
870 timer->it.cpu.expires.sched = 0;
871 read_unlock(&tasklist_lock);
872 goto dead;
873 } else {
3997ad31 874 cpu_timer_sample_group(timer->it_clock, p, &now);
1da177e4
LT
875 clear_dead = (unlikely(p->exit_state) &&
876 thread_group_empty(p));
877 }
878 read_unlock(&tasklist_lock);
879 }
880
1da177e4
LT
881 if (unlikely(clear_dead)) {
882 /*
883 * We've noticed that the thread is dead, but
884 * not yet reaped. Take this opportunity to
885 * drop our task ref.
886 */
887 clear_dead_task(timer, now);
888 goto dead;
889 }
890
891 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
892 sample_to_timespec(timer->it_clock,
893 cpu_time_sub(timer->it_clock,
894 timer->it.cpu.expires, now),
895 &itp->it_value);
896 } else {
897 /*
898 * The timer should have expired already, but the firing
899 * hasn't taken place yet. Say it's just about to expire.
900 */
901 itp->it_value.tv_nsec = 1;
902 itp->it_value.tv_sec = 0;
903 }
904}
905
906/*
907 * Check for any per-thread CPU timers that have fired and move them off
908 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
909 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
910 */
911static void check_thread_timers(struct task_struct *tsk,
912 struct list_head *firing)
913{
e80eda94 914 int maxfire;
1da177e4 915 struct list_head *timers = tsk->cpu_timers;
78f2c7db 916 struct signal_struct *const sig = tsk->signal;
d4bb5274 917 unsigned long soft;
1da177e4 918
e80eda94 919 maxfire = 20;
f06febc9 920 tsk->cputime_expires.prof_exp = cputime_zero;
1da177e4 921 while (!list_empty(timers)) {
b5e61818 922 struct cpu_timer_list *t = list_first_entry(timers,
1da177e4
LT
923 struct cpu_timer_list,
924 entry);
e80eda94 925 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
f06febc9 926 tsk->cputime_expires.prof_exp = t->expires.cpu;
1da177e4
LT
927 break;
928 }
929 t->firing = 1;
930 list_move_tail(&t->entry, firing);
931 }
932
933 ++timers;
e80eda94 934 maxfire = 20;
f06febc9 935 tsk->cputime_expires.virt_exp = cputime_zero;
1da177e4 936 while (!list_empty(timers)) {
b5e61818 937 struct cpu_timer_list *t = list_first_entry(timers,
1da177e4
LT
938 struct cpu_timer_list,
939 entry);
e80eda94 940 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
f06febc9 941 tsk->cputime_expires.virt_exp = t->expires.cpu;
1da177e4
LT
942 break;
943 }
944 t->firing = 1;
945 list_move_tail(&t->entry, firing);
946 }
947
948 ++timers;
e80eda94 949 maxfire = 20;
f06febc9 950 tsk->cputime_expires.sched_exp = 0;
1da177e4 951 while (!list_empty(timers)) {
b5e61818 952 struct cpu_timer_list *t = list_first_entry(timers,
1da177e4
LT
953 struct cpu_timer_list,
954 entry);
41b86e9c 955 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
f06febc9 956 tsk->cputime_expires.sched_exp = t->expires.sched;
1da177e4
LT
957 break;
958 }
959 t->firing = 1;
960 list_move_tail(&t->entry, firing);
961 }
78f2c7db
PZ
962
963 /*
964 * Check for the special case thread timers.
965 */
78d7d407 966 soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
d4bb5274 967 if (soft != RLIM_INFINITY) {
78d7d407
JS
968 unsigned long hard =
969 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
78f2c7db 970
5a52dd50
PZ
971 if (hard != RLIM_INFINITY &&
972 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
78f2c7db
PZ
973 /*
974 * At the hard limit, we just die.
975 * No need to calculate anything else now.
976 */
977 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
978 return;
979 }
d4bb5274 980 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
78f2c7db
PZ
981 /*
982 * At the soft limit, send a SIGXCPU every second.
983 */
d4bb5274
JS
984 if (soft < hard) {
985 soft += USEC_PER_SEC;
986 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
78f2c7db 987 }
81d50bb2
HS
988 printk(KERN_INFO
989 "RT Watchdog Timeout: %s[%d]\n",
990 tsk->comm, task_pid_nr(tsk));
78f2c7db
PZ
991 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
992 }
993 }
1da177e4
LT
994}
995
15365c10 996static void stop_process_timers(struct signal_struct *sig)
3fccfd67 997{
15365c10 998 struct thread_group_cputimer *cputimer = &sig->cputimer;
3fccfd67
PZ
999 unsigned long flags;
1000
3fccfd67
PZ
1001 spin_lock_irqsave(&cputimer->lock, flags);
1002 cputimer->running = 0;
1003 spin_unlock_irqrestore(&cputimer->lock, flags);
1004}
1005
8356b5f9
SG
1006static u32 onecputick;
1007
42c4ab41
SG
1008static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
1009 cputime_t *expires, cputime_t cur_time, int signo)
1010{
1011 if (cputime_eq(it->expires, cputime_zero))
1012 return;
1013
1014 if (cputime_ge(cur_time, it->expires)) {
8356b5f9
SG
1015 if (!cputime_eq(it->incr, cputime_zero)) {
1016 it->expires = cputime_add(it->expires, it->incr);
1017 it->error += it->incr_error;
1018 if (it->error >= onecputick) {
1019 it->expires = cputime_sub(it->expires,
a42548a1 1020 cputime_one_jiffy);
8356b5f9
SG
1021 it->error -= onecputick;
1022 }
3f0a525e 1023 } else {
8356b5f9 1024 it->expires = cputime_zero;
3f0a525e 1025 }
42c4ab41 1026
3f0a525e
XG
1027 trace_itimer_expire(signo == SIGPROF ?
1028 ITIMER_PROF : ITIMER_VIRTUAL,
1029 tsk->signal->leader_pid, cur_time);
42c4ab41
SG
1030 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
1031 }
1032
1033 if (!cputime_eq(it->expires, cputime_zero) &&
1034 (cputime_eq(*expires, cputime_zero) ||
1035 cputime_lt(it->expires, *expires))) {
1036 *expires = it->expires;
1037 }
1038}
1039
29f87b79
SG
1040/**
1041 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1042 *
1043 * @cputime: The struct to compare.
1044 *
1045 * Checks @cputime to see if all fields are zero. Returns true if all fields
1046 * are zero, false if any field is nonzero.
1047 */
1048static inline int task_cputime_zero(const struct task_cputime *cputime)
1049{
1050 if (cputime_eq(cputime->utime, cputime_zero) &&
1051 cputime_eq(cputime->stime, cputime_zero) &&
1052 cputime->sum_exec_runtime == 0)
1053 return 1;
1054 return 0;
1055}
1056
1da177e4
LT
1057/*
1058 * Check for any per-thread CPU timers that have fired and move them
1059 * off the tsk->*_timers list onto the firing list. Per-thread timers
1060 * have already been taken off.
1061 */
1062static void check_process_timers(struct task_struct *tsk,
1063 struct list_head *firing)
1064{
e80eda94 1065 int maxfire;
1da177e4 1066 struct signal_struct *const sig = tsk->signal;
f06febc9 1067 cputime_t utime, ptime, virt_expires, prof_expires;
41b86e9c 1068 unsigned long long sum_sched_runtime, sched_expires;
1da177e4 1069 struct list_head *timers = sig->cpu_timers;
f06febc9 1070 struct task_cputime cputime;
d4bb5274 1071 unsigned long soft;
1da177e4 1072
1da177e4
LT
1073 /*
1074 * Collect the current process totals.
1075 */
4cd4c1b4 1076 thread_group_cputimer(tsk, &cputime);
f06febc9
FM
1077 utime = cputime.utime;
1078 ptime = cputime_add(utime, cputime.stime);
1079 sum_sched_runtime = cputime.sum_exec_runtime;
e80eda94 1080 maxfire = 20;
1da177e4
LT
1081 prof_expires = cputime_zero;
1082 while (!list_empty(timers)) {
ee7dd205 1083 struct cpu_timer_list *tl = list_first_entry(timers,
1da177e4
LT
1084 struct cpu_timer_list,
1085 entry);
ee7dd205
WC
1086 if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
1087 prof_expires = tl->expires.cpu;
1da177e4
LT
1088 break;
1089 }
ee7dd205
WC
1090 tl->firing = 1;
1091 list_move_tail(&tl->entry, firing);
1da177e4
LT
1092 }
1093
1094 ++timers;
e80eda94 1095 maxfire = 20;
1da177e4
LT
1096 virt_expires = cputime_zero;
1097 while (!list_empty(timers)) {
ee7dd205 1098 struct cpu_timer_list *tl = list_first_entry(timers,
1da177e4
LT
1099 struct cpu_timer_list,
1100 entry);
ee7dd205
WC
1101 if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
1102 virt_expires = tl->expires.cpu;
1da177e4
LT
1103 break;
1104 }
ee7dd205
WC
1105 tl->firing = 1;
1106 list_move_tail(&tl->entry, firing);
1da177e4
LT
1107 }
1108
1109 ++timers;
e80eda94 1110 maxfire = 20;
1da177e4
LT
1111 sched_expires = 0;
1112 while (!list_empty(timers)) {
ee7dd205 1113 struct cpu_timer_list *tl = list_first_entry(timers,
1da177e4
LT
1114 struct cpu_timer_list,
1115 entry);
ee7dd205
WC
1116 if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
1117 sched_expires = tl->expires.sched;
1da177e4
LT
1118 break;
1119 }
ee7dd205
WC
1120 tl->firing = 1;
1121 list_move_tail(&tl->entry, firing);
1da177e4
LT
1122 }
1123
1124 /*
1125 * Check for the special case process timers.
1126 */
42c4ab41
SG
1127 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1128 SIGPROF);
1129 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1130 SIGVTALRM);
78d7d407 1131 soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
d4bb5274 1132 if (soft != RLIM_INFINITY) {
1da177e4 1133 unsigned long psecs = cputime_to_secs(ptime);
78d7d407
JS
1134 unsigned long hard =
1135 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
1da177e4 1136 cputime_t x;
d4bb5274 1137 if (psecs >= hard) {
1da177e4
LT
1138 /*
1139 * At the hard limit, we just die.
1140 * No need to calculate anything else now.
1141 */
1142 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1143 return;
1144 }
d4bb5274 1145 if (psecs >= soft) {
1da177e4
LT
1146 /*
1147 * At the soft limit, send a SIGXCPU every second.
1148 */
1149 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
d4bb5274
JS
1150 if (soft < hard) {
1151 soft++;
1152 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1da177e4
LT
1153 }
1154 }
d4bb5274 1155 x = secs_to_cputime(soft);
1da177e4
LT
1156 if (cputime_eq(prof_expires, cputime_zero) ||
1157 cputime_lt(x, prof_expires)) {
1158 prof_expires = x;
1159 }
1160 }
1161
29f87b79
SG
1162 sig->cputime_expires.prof_exp = prof_expires;
1163 sig->cputime_expires.virt_exp = virt_expires;
1164 sig->cputime_expires.sched_exp = sched_expires;
1165 if (task_cputime_zero(&sig->cputime_expires))
1166 stop_process_timers(sig);
1da177e4
LT
1167}
1168
1169/*
1170 * This is called from the signal code (via do_schedule_next_timer)
1171 * when the last timer signal was delivered and we have to reload the timer.
1172 */
1173void posix_cpu_timer_schedule(struct k_itimer *timer)
1174{
1175 struct task_struct *p = timer->it.cpu.task;
1176 union cpu_time_count now;
1177
1178 if (unlikely(p == NULL))
1179 /*
1180 * The task was cleaned up already, no future firings.
1181 */
708f430d 1182 goto out;
1da177e4
LT
1183
1184 /*
1185 * Fetch the current sample and update the timer's expiry time.
1186 */
1187 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1188 cpu_clock_sample(timer->it_clock, p, &now);
1189 bump_cpu_timer(timer, now);
1190 if (unlikely(p->exit_state)) {
1191 clear_dead_task(timer, now);
708f430d 1192 goto out;
1da177e4
LT
1193 }
1194 read_lock(&tasklist_lock); /* arm_timer needs it. */
c2873937 1195 spin_lock(&p->sighand->siglock);
1da177e4
LT
1196 } else {
1197 read_lock(&tasklist_lock);
d30fda35 1198 if (unlikely(p->sighand == NULL)) {
1da177e4
LT
1199 /*
1200 * The process has been reaped.
1201 * We can't even collect a sample any more.
1202 */
1203 put_task_struct(p);
1204 timer->it.cpu.task = p = NULL;
1205 timer->it.cpu.expires.sched = 0;
708f430d 1206 goto out_unlock;
1da177e4
LT
1207 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1208 /*
1209 * We've noticed that the thread is dead, but
1210 * not yet reaped. Take this opportunity to
1211 * drop our task ref.
1212 */
1213 clear_dead_task(timer, now);
708f430d 1214 goto out_unlock;
1da177e4 1215 }
c2873937 1216 spin_lock(&p->sighand->siglock);
3997ad31 1217 cpu_timer_sample_group(timer->it_clock, p, &now);
1da177e4
LT
1218 bump_cpu_timer(timer, now);
1219 /* Leave the tasklist_lock locked for the call below. */
1220 }
1221
1222 /*
1223 * Now re-arm for the new expiry time.
1224 */
c2873937 1225 BUG_ON(!irqs_disabled());
5eb9aa64 1226 arm_timer(timer);
c2873937 1227 spin_unlock(&p->sighand->siglock);
1da177e4 1228
708f430d 1229out_unlock:
1da177e4 1230 read_unlock(&tasklist_lock);
708f430d
RM
1231
1232out:
1233 timer->it_overrun_last = timer->it_overrun;
1234 timer->it_overrun = -1;
1235 ++timer->it_requeue_pending;
1da177e4
LT
1236}
1237
f06febc9
FM
1238/**
1239 * task_cputime_expired - Compare two task_cputime entities.
1240 *
1241 * @sample: The task_cputime structure to be checked for expiration.
1242 * @expires: Expiration times, against which @sample will be checked.
1243 *
1244 * Checks @sample against @expires to see if any field of @sample has expired.
1245 * Returns true if any field of the former is greater than the corresponding
1246 * field of the latter if the latter field is set. Otherwise returns false.
1247 */
1248static inline int task_cputime_expired(const struct task_cputime *sample,
1249 const struct task_cputime *expires)
1250{
1251 if (!cputime_eq(expires->utime, cputime_zero) &&
1252 cputime_ge(sample->utime, expires->utime))
1253 return 1;
1254 if (!cputime_eq(expires->stime, cputime_zero) &&
1255 cputime_ge(cputime_add(sample->utime, sample->stime),
1256 expires->stime))
1257 return 1;
1258 if (expires->sum_exec_runtime != 0 &&
1259 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1260 return 1;
1261 return 0;
1262}
1263
1264/**
1265 * fastpath_timer_check - POSIX CPU timers fast path.
1266 *
1267 * @tsk: The task (thread) being checked.
f06febc9 1268 *
bb34d92f
FM
1269 * Check the task and thread group timers. If both are zero (there are no
1270 * timers set) return false. Otherwise snapshot the task and thread group
1271 * timers and compare them with the corresponding expiration times. Return
1272 * true if a timer has expired, else return false.
f06febc9 1273 */
bb34d92f 1274static inline int fastpath_timer_check(struct task_struct *tsk)
f06febc9 1275{
ad133ba3 1276 struct signal_struct *sig;
bb34d92f 1277
bb34d92f
FM
1278 if (!task_cputime_zero(&tsk->cputime_expires)) {
1279 struct task_cputime task_sample = {
1280 .utime = tsk->utime,
1281 .stime = tsk->stime,
1282 .sum_exec_runtime = tsk->se.sum_exec_runtime
1283 };
1284
1285 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1286 return 1;
1287 }
ad133ba3
ON
1288
1289 sig = tsk->signal;
29f87b79 1290 if (sig->cputimer.running) {
bb34d92f
FM
1291 struct task_cputime group_sample;
1292
8d1f431c
ON
1293 spin_lock(&sig->cputimer.lock);
1294 group_sample = sig->cputimer.cputime;
1295 spin_unlock(&sig->cputimer.lock);
1296
bb34d92f
FM
1297 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1298 return 1;
1299 }
37bebc70 1300
f55db609 1301 return 0;
f06febc9
FM
1302}
1303
1da177e4
LT
1304/*
1305 * This is called from the timer interrupt handler. The irq handler has
1306 * already updated our counts. We need to check if any timers fire now.
1307 * Interrupts are disabled.
1308 */
1309void run_posix_cpu_timers(struct task_struct *tsk)
1310{
1311 LIST_HEAD(firing);
1312 struct k_itimer *timer, *next;
0bdd2ed4 1313 unsigned long flags;
1da177e4
LT
1314
1315 BUG_ON(!irqs_disabled());
1316
1da177e4 1317 /*
f06febc9 1318 * The fast path checks that there are no expired thread or thread
bb34d92f 1319 * group timers. If that's so, just return.
1da177e4 1320 */
bb34d92f 1321 if (!fastpath_timer_check(tsk))
f06febc9 1322 return;
5ce73a4a 1323
0bdd2ed4
ON
1324 if (!lock_task_sighand(tsk, &flags))
1325 return;
bb34d92f
FM
1326 /*
1327 * Here we take off tsk->signal->cpu_timers[N] and
1328 * tsk->cpu_timers[N] all the timers that are firing, and
1329 * put them on the firing list.
1330 */
1331 check_thread_timers(tsk, &firing);
29f87b79
SG
1332 /*
1333 * If there are any active process wide timers (POSIX 1.b, itimers,
1334 * RLIMIT_CPU) cputimer must be running.
1335 */
1336 if (tsk->signal->cputimer.running)
1337 check_process_timers(tsk, &firing);
1da177e4 1338
bb34d92f
FM
1339 /*
1340 * We must release these locks before taking any timer's lock.
1341 * There is a potential race with timer deletion here, as the
1342 * siglock now protects our private firing list. We have set
1343 * the firing flag in each timer, so that a deletion attempt
1344 * that gets the timer lock before we do will give it up and
1345 * spin until we've taken care of that timer below.
1346 */
0bdd2ed4 1347 unlock_task_sighand(tsk, &flags);
1da177e4
LT
1348
1349 /*
1350 * Now that all the timers on our list have the firing flag,
25985edc 1351 * no one will touch their list entries but us. We'll take
1da177e4
LT
1352 * each timer's lock before clearing its firing flag, so no
1353 * timer call will interfere.
1354 */
1355 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
6e85c5ba
HS
1356 int cpu_firing;
1357
1da177e4
LT
1358 spin_lock(&timer->it_lock);
1359 list_del_init(&timer->it.cpu.entry);
6e85c5ba 1360 cpu_firing = timer->it.cpu.firing;
1da177e4
LT
1361 timer->it.cpu.firing = 0;
1362 /*
1363 * The firing flag is -1 if we collided with a reset
1364 * of the timer, which already reported this
1365 * almost-firing as an overrun. So don't generate an event.
1366 */
6e85c5ba 1367 if (likely(cpu_firing >= 0))
1da177e4 1368 cpu_timer_fire(timer);
1da177e4
LT
1369 spin_unlock(&timer->it_lock);
1370 }
1371}
1372
1373/*
f55db609 1374 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
f06febc9 1375 * The tsk->sighand->siglock must be held by the caller.
1da177e4
LT
1376 */
1377void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1378 cputime_t *newval, cputime_t *oldval)
1379{
1380 union cpu_time_count now;
1da177e4
LT
1381
1382 BUG_ON(clock_idx == CPUCLOCK_SCHED);
4cd4c1b4 1383 cpu_timer_sample_group(clock_idx, tsk, &now);
1da177e4
LT
1384
1385 if (oldval) {
f55db609
SG
1386 /*
1387 * We are setting itimer. The *oldval is absolute and we update
1388 * it to be relative, *newval argument is relative and we update
1389 * it to be absolute.
1390 */
1da177e4
LT
1391 if (!cputime_eq(*oldval, cputime_zero)) {
1392 if (cputime_le(*oldval, now.cpu)) {
1393 /* Just about to fire. */
a42548a1 1394 *oldval = cputime_one_jiffy;
1da177e4
LT
1395 } else {
1396 *oldval = cputime_sub(*oldval, now.cpu);
1397 }
1398 }
1399
1400 if (cputime_eq(*newval, cputime_zero))
1401 return;
1402 *newval = cputime_add(*newval, now.cpu);
1da177e4
LT
1403 }
1404
1405 /*
f55db609
SG
1406 * Update expiration cache if we are the earliest timer, or eventually
1407 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1da177e4 1408 */
f55db609
SG
1409 switch (clock_idx) {
1410 case CPUCLOCK_PROF:
1411 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
f06febc9 1412 tsk->signal->cputime_expires.prof_exp = *newval;
f55db609
SG
1413 break;
1414 case CPUCLOCK_VIRT:
1415 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
f06febc9 1416 tsk->signal->cputime_expires.virt_exp = *newval;
f55db609 1417 break;
1da177e4
LT
1418 }
1419}
1420
e4b76555
TA
1421static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1422 struct timespec *rqtp, struct itimerspec *it)
1da177e4 1423{
1da177e4
LT
1424 struct k_itimer timer;
1425 int error;
1426
1da177e4
LT
1427 /*
1428 * Set up a temporary timer and then wait for it to go off.
1429 */
1430 memset(&timer, 0, sizeof timer);
1431 spin_lock_init(&timer.it_lock);
1432 timer.it_clock = which_clock;
1433 timer.it_overrun = -1;
1434 error = posix_cpu_timer_create(&timer);
1435 timer.it_process = current;
1436 if (!error) {
1da177e4 1437 static struct itimerspec zero_it;
e4b76555
TA
1438
1439 memset(it, 0, sizeof *it);
1440 it->it_value = *rqtp;
1da177e4
LT
1441
1442 spin_lock_irq(&timer.it_lock);
e4b76555 1443 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1da177e4
LT
1444 if (error) {
1445 spin_unlock_irq(&timer.it_lock);
1446 return error;
1447 }
1448
1449 while (!signal_pending(current)) {
1450 if (timer.it.cpu.expires.sched == 0) {
1451 /*
1452 * Our timer fired and was reset.
1453 */
1454 spin_unlock_irq(&timer.it_lock);
1455 return 0;
1456 }
1457
1458 /*
1459 * Block until cpu_timer_fire (or a signal) wakes us.
1460 */
1461 __set_current_state(TASK_INTERRUPTIBLE);
1462 spin_unlock_irq(&timer.it_lock);
1463 schedule();
1464 spin_lock_irq(&timer.it_lock);
1465 }
1466
1467 /*
1468 * We were interrupted by a signal.
1469 */
1470 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
e4b76555 1471 posix_cpu_timer_set(&timer, 0, &zero_it, it);
1da177e4
LT
1472 spin_unlock_irq(&timer.it_lock);
1473
e4b76555 1474 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1da177e4
LT
1475 /*
1476 * It actually did fire already.
1477 */
1478 return 0;
1479 }
1480
e4b76555
TA
1481 error = -ERESTART_RESTARTBLOCK;
1482 }
1483
1484 return error;
1485}
1486
bc2c8ea4
TG
1487static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1488
1489static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1490 struct timespec *rqtp, struct timespec __user *rmtp)
e4b76555
TA
1491{
1492 struct restart_block *restart_block =
3751f9f2 1493 &current_thread_info()->restart_block;
e4b76555
TA
1494 struct itimerspec it;
1495 int error;
1496
1497 /*
1498 * Diagnose required errors first.
1499 */
1500 if (CPUCLOCK_PERTHREAD(which_clock) &&
1501 (CPUCLOCK_PID(which_clock) == 0 ||
1502 CPUCLOCK_PID(which_clock) == current->pid))
1503 return -EINVAL;
1504
1505 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1506
1507 if (error == -ERESTART_RESTARTBLOCK) {
1508
3751f9f2 1509 if (flags & TIMER_ABSTIME)
e4b76555 1510 return -ERESTARTNOHAND;
1da177e4 1511 /*
3751f9f2
TG
1512 * Report back to the user the time still remaining.
1513 */
1514 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1da177e4
LT
1515 return -EFAULT;
1516
1711ef38 1517 restart_block->fn = posix_cpu_nsleep_restart;
ab8177bc 1518 restart_block->nanosleep.clockid = which_clock;
3751f9f2
TG
1519 restart_block->nanosleep.rmtp = rmtp;
1520 restart_block->nanosleep.expires = timespec_to_ns(rqtp);
1da177e4 1521 }
1da177e4
LT
1522 return error;
1523}
1524
bc2c8ea4 1525static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1da177e4 1526{
ab8177bc 1527 clockid_t which_clock = restart_block->nanosleep.clockid;
97735f25 1528 struct timespec t;
e4b76555
TA
1529 struct itimerspec it;
1530 int error;
97735f25 1531
3751f9f2 1532 t = ns_to_timespec(restart_block->nanosleep.expires);
97735f25 1533
e4b76555
TA
1534 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1535
1536 if (error == -ERESTART_RESTARTBLOCK) {
3751f9f2 1537 struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
e4b76555 1538 /*
3751f9f2
TG
1539 * Report back to the user the time still remaining.
1540 */
1541 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
e4b76555
TA
1542 return -EFAULT;
1543
3751f9f2 1544 restart_block->nanosleep.expires = timespec_to_ns(&t);
e4b76555
TA
1545 }
1546 return error;
1547
1da177e4
LT
1548}
1549
1da177e4
LT
1550#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1551#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1552
a924b04d
TG
1553static int process_cpu_clock_getres(const clockid_t which_clock,
1554 struct timespec *tp)
1da177e4
LT
1555{
1556 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1557}
a924b04d
TG
1558static int process_cpu_clock_get(const clockid_t which_clock,
1559 struct timespec *tp)
1da177e4
LT
1560{
1561 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1562}
1563static int process_cpu_timer_create(struct k_itimer *timer)
1564{
1565 timer->it_clock = PROCESS_CLOCK;
1566 return posix_cpu_timer_create(timer);
1567}
a924b04d 1568static int process_cpu_nsleep(const clockid_t which_clock, int flags,
97735f25
TG
1569 struct timespec *rqtp,
1570 struct timespec __user *rmtp)
1da177e4 1571{
97735f25 1572 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1da177e4 1573}
1711ef38
TA
1574static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1575{
1576 return -EINVAL;
1577}
a924b04d
TG
1578static int thread_cpu_clock_getres(const clockid_t which_clock,
1579 struct timespec *tp)
1da177e4
LT
1580{
1581 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1582}
a924b04d
TG
1583static int thread_cpu_clock_get(const clockid_t which_clock,
1584 struct timespec *tp)
1da177e4
LT
1585{
1586 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1587}
1588static int thread_cpu_timer_create(struct k_itimer *timer)
1589{
1590 timer->it_clock = THREAD_CLOCK;
1591 return posix_cpu_timer_create(timer);
1592}
1da177e4 1593
1976945e
TG
1594struct k_clock clock_posix_cpu = {
1595 .clock_getres = posix_cpu_clock_getres,
1596 .clock_set = posix_cpu_clock_set,
1597 .clock_get = posix_cpu_clock_get,
1598 .timer_create = posix_cpu_timer_create,
1599 .nsleep = posix_cpu_nsleep,
1600 .nsleep_restart = posix_cpu_nsleep_restart,
1601 .timer_set = posix_cpu_timer_set,
1602 .timer_del = posix_cpu_timer_del,
1603 .timer_get = posix_cpu_timer_get,
1604};
1605
1da177e4
LT
1606static __init int init_posix_cpu_timers(void)
1607{
1608 struct k_clock process = {
2fd1f040
TG
1609 .clock_getres = process_cpu_clock_getres,
1610 .clock_get = process_cpu_clock_get,
2fd1f040
TG
1611 .timer_create = process_cpu_timer_create,
1612 .nsleep = process_cpu_nsleep,
1613 .nsleep_restart = process_cpu_nsleep_restart,
1da177e4
LT
1614 };
1615 struct k_clock thread = {
2fd1f040
TG
1616 .clock_getres = thread_cpu_clock_getres,
1617 .clock_get = thread_cpu_clock_get,
2fd1f040 1618 .timer_create = thread_cpu_timer_create,
1da177e4 1619 };
8356b5f9 1620 struct timespec ts;
1da177e4 1621
52708737
TG
1622 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1623 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1da177e4 1624
a42548a1 1625 cputime_to_timespec(cputime_one_jiffy, &ts);
8356b5f9
SG
1626 onecputick = ts.tv_nsec;
1627 WARN_ON(ts.tv_sec != 0);
1628
1da177e4
LT
1629 return 0;
1630}
1631__initcall(init_posix_cpu_timers);