itimers: Add tracepoints for itimer
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / itimer.c
1 /*
2 * linux/kernel/itimer.c
3 *
4 * Copyright (C) 1992 Darren Senn
5 */
6
7 /* These are all the functions necessary to implement itimers */
8
9 #include <linux/mm.h>
10 #include <linux/interrupt.h>
11 #include <linux/syscalls.h>
12 #include <linux/time.h>
13 #include <linux/posix-timers.h>
14 #include <linux/hrtimer.h>
15 #include <trace/events/timer.h>
16
17 #include <asm/uaccess.h>
18
19 /**
20 * itimer_get_remtime - get remaining time for the timer
21 *
22 * @timer: the timer to read
23 *
24 * Returns the delta between the expiry time and now, which can be
25 * less than zero or 1usec for an pending expired timer
26 */
27 static struct timeval itimer_get_remtime(struct hrtimer *timer)
28 {
29 ktime_t rem = hrtimer_get_remaining(timer);
30
31 /*
32 * Racy but safe: if the itimer expires after the above
33 * hrtimer_get_remtime() call but before this condition
34 * then we return 0 - which is correct.
35 */
36 if (hrtimer_active(timer)) {
37 if (rem.tv64 <= 0)
38 rem.tv64 = NSEC_PER_USEC;
39 } else
40 rem.tv64 = 0;
41
42 return ktime_to_timeval(rem);
43 }
44
45 static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
46 struct itimerval *const value)
47 {
48 cputime_t cval, cinterval;
49 struct cpu_itimer *it = &tsk->signal->it[clock_id];
50
51 spin_lock_irq(&tsk->sighand->siglock);
52
53 cval = it->expires;
54 cinterval = it->incr;
55 if (!cputime_eq(cval, cputime_zero)) {
56 struct task_cputime cputime;
57 cputime_t t;
58
59 thread_group_cputimer(tsk, &cputime);
60 if (clock_id == CPUCLOCK_PROF)
61 t = cputime_add(cputime.utime, cputime.stime);
62 else
63 /* CPUCLOCK_VIRT */
64 t = cputime.utime;
65
66 if (cputime_le(cval, t))
67 /* about to fire */
68 cval = cputime_one_jiffy;
69 else
70 cval = cputime_sub(cval, t);
71 }
72
73 spin_unlock_irq(&tsk->sighand->siglock);
74
75 cputime_to_timeval(cval, &value->it_value);
76 cputime_to_timeval(cinterval, &value->it_interval);
77 }
78
79 int do_getitimer(int which, struct itimerval *value)
80 {
81 struct task_struct *tsk = current;
82
83 switch (which) {
84 case ITIMER_REAL:
85 spin_lock_irq(&tsk->sighand->siglock);
86 value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
87 value->it_interval =
88 ktime_to_timeval(tsk->signal->it_real_incr);
89 spin_unlock_irq(&tsk->sighand->siglock);
90 break;
91 case ITIMER_VIRTUAL:
92 get_cpu_itimer(tsk, CPUCLOCK_VIRT, value);
93 break;
94 case ITIMER_PROF:
95 get_cpu_itimer(tsk, CPUCLOCK_PROF, value);
96 break;
97 default:
98 return(-EINVAL);
99 }
100 return 0;
101 }
102
103 SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value)
104 {
105 int error = -EFAULT;
106 struct itimerval get_buffer;
107
108 if (value) {
109 error = do_getitimer(which, &get_buffer);
110 if (!error &&
111 copy_to_user(value, &get_buffer, sizeof(get_buffer)))
112 error = -EFAULT;
113 }
114 return error;
115 }
116
117
118 /*
119 * The timer is automagically restarted, when interval != 0
120 */
121 enum hrtimer_restart it_real_fn(struct hrtimer *timer)
122 {
123 struct signal_struct *sig =
124 container_of(timer, struct signal_struct, real_timer);
125
126 trace_itimer_expire(ITIMER_REAL, sig->leader_pid, 0);
127 kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid);
128
129 return HRTIMER_NORESTART;
130 }
131
132 static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
133 {
134 struct timespec ts;
135 s64 cpu_ns;
136
137 cputime_to_timespec(ct, &ts);
138 cpu_ns = timespec_to_ns(&ts);
139
140 return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
141 }
142
143 static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
144 const struct itimerval *const value,
145 struct itimerval *const ovalue)
146 {
147 cputime_t cval, nval, cinterval, ninterval;
148 s64 ns_ninterval, ns_nval;
149 struct cpu_itimer *it = &tsk->signal->it[clock_id];
150
151 nval = timeval_to_cputime(&value->it_value);
152 ns_nval = timeval_to_ns(&value->it_value);
153 ninterval = timeval_to_cputime(&value->it_interval);
154 ns_ninterval = timeval_to_ns(&value->it_interval);
155
156 it->incr_error = cputime_sub_ns(ninterval, ns_ninterval);
157 it->error = cputime_sub_ns(nval, ns_nval);
158
159 spin_lock_irq(&tsk->sighand->siglock);
160
161 cval = it->expires;
162 cinterval = it->incr;
163 if (!cputime_eq(cval, cputime_zero) ||
164 !cputime_eq(nval, cputime_zero)) {
165 if (cputime_gt(nval, cputime_zero))
166 nval = cputime_add(nval, cputime_one_jiffy);
167 set_process_cpu_timer(tsk, clock_id, &nval, &cval);
168 }
169 it->expires = nval;
170 it->incr = ninterval;
171 trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
172 ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
173
174 spin_unlock_irq(&tsk->sighand->siglock);
175
176 if (ovalue) {
177 cputime_to_timeval(cval, &ovalue->it_value);
178 cputime_to_timeval(cinterval, &ovalue->it_interval);
179 }
180 }
181
182 /*
183 * Returns true if the timeval is in canonical form
184 */
185 #define timeval_valid(t) \
186 (((t)->tv_sec >= 0) && (((unsigned long) (t)->tv_usec) < USEC_PER_SEC))
187
188 int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
189 {
190 struct task_struct *tsk = current;
191 struct hrtimer *timer;
192 ktime_t expires;
193
194 /*
195 * Validate the timevals in value.
196 */
197 if (!timeval_valid(&value->it_value) ||
198 !timeval_valid(&value->it_interval))
199 return -EINVAL;
200
201 switch (which) {
202 case ITIMER_REAL:
203 again:
204 spin_lock_irq(&tsk->sighand->siglock);
205 timer = &tsk->signal->real_timer;
206 if (ovalue) {
207 ovalue->it_value = itimer_get_remtime(timer);
208 ovalue->it_interval
209 = ktime_to_timeval(tsk->signal->it_real_incr);
210 }
211 /* We are sharing ->siglock with it_real_fn() */
212 if (hrtimer_try_to_cancel(timer) < 0) {
213 spin_unlock_irq(&tsk->sighand->siglock);
214 goto again;
215 }
216 expires = timeval_to_ktime(value->it_value);
217 if (expires.tv64 != 0) {
218 tsk->signal->it_real_incr =
219 timeval_to_ktime(value->it_interval);
220 hrtimer_start(timer, expires, HRTIMER_MODE_REL);
221 } else
222 tsk->signal->it_real_incr.tv64 = 0;
223
224 trace_itimer_state(ITIMER_REAL, value, 0);
225 spin_unlock_irq(&tsk->sighand->siglock);
226 break;
227 case ITIMER_VIRTUAL:
228 set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue);
229 break;
230 case ITIMER_PROF:
231 set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue);
232 break;
233 default:
234 return -EINVAL;
235 }
236 return 0;
237 }
238
239 /**
240 * alarm_setitimer - set alarm in seconds
241 *
242 * @seconds: number of seconds until alarm
243 * 0 disables the alarm
244 *
245 * Returns the remaining time in seconds of a pending timer or 0 when
246 * the timer is not active.
247 *
248 * On 32 bit machines the seconds value is limited to (INT_MAX/2) to avoid
249 * negative timeval settings which would cause immediate expiry.
250 */
251 unsigned int alarm_setitimer(unsigned int seconds)
252 {
253 struct itimerval it_new, it_old;
254
255 #if BITS_PER_LONG < 64
256 if (seconds > INT_MAX)
257 seconds = INT_MAX;
258 #endif
259 it_new.it_value.tv_sec = seconds;
260 it_new.it_value.tv_usec = 0;
261 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
262
263 do_setitimer(ITIMER_REAL, &it_new, &it_old);
264
265 /*
266 * We can't return 0 if we have an alarm pending ... And we'd
267 * better return too much than too little anyway
268 */
269 if ((!it_old.it_value.tv_sec && it_old.it_value.tv_usec) ||
270 it_old.it_value.tv_usec >= 500000)
271 it_old.it_value.tv_sec++;
272
273 return it_old.it_value.tv_sec;
274 }
275
276 SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
277 struct itimerval __user *, ovalue)
278 {
279 struct itimerval set_buffer, get_buffer;
280 int error;
281
282 if (value) {
283 if(copy_from_user(&set_buffer, value, sizeof(set_buffer)))
284 return -EFAULT;
285 } else
286 memset((char *) &set_buffer, 0, sizeof(set_buffer));
287
288 error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL);
289 if (error || !ovalue)
290 return error;
291
292 if (copy_to_user(ovalue, &get_buffer, sizeof(get_buffer)))
293 return -EFAULT;
294 return 0;
295 }