[PATCH] cleanup __exit_signal()
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13#include <linux/config.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/smp_lock.h>
17#include <linux/init.h>
18#include <linux/sched.h>
19#include <linux/fs.h>
20#include <linux/tty.h>
21#include <linux/binfmts.h>
22#include <linux/security.h>
23#include <linux/syscalls.h>
24#include <linux/ptrace.h>
25#include <linux/posix-timers.h>
7ed20e1a 26#include <linux/signal.h>
c2f0c7c3 27#include <linux/audit.h>
c59ede7b 28#include <linux/capability.h>
1da177e4
LT
29#include <asm/param.h>
30#include <asm/uaccess.h>
31#include <asm/unistd.h>
32#include <asm/siginfo.h>
33
34/*
35 * SLAB caches for signal bits.
36 */
37
38static kmem_cache_t *sigqueue_cachep;
39
40/*
41 * In POSIX a signal is sent either to a specific thread (Linux task)
42 * or to the process as a whole (Linux thread group). How the signal
43 * is sent determines whether it's to one thread or the whole group,
44 * which determines which signal mask(s) are involved in blocking it
45 * from being delivered until later. When the signal is delivered,
46 * either it's caught or ignored by a user handler or it has a default
47 * effect that applies to the whole thread group (POSIX process).
48 *
49 * The possible effects an unblocked signal set to SIG_DFL can have are:
50 * ignore - Nothing Happens
51 * terminate - kill the process, i.e. all threads in the group,
52 * similar to exit_group. The group leader (only) reports
53 * WIFSIGNALED status to its parent.
54 * coredump - write a core dump file describing all threads using
55 * the same mm and then kill all those threads
56 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
57 *
58 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
59 * Other signals when not blocked and set to SIG_DFL behaves as follows.
60 * The job control signals also have other special effects.
61 *
62 * +--------------------+------------------+
63 * | POSIX signal | default action |
64 * +--------------------+------------------+
65 * | SIGHUP | terminate |
66 * | SIGINT | terminate |
67 * | SIGQUIT | coredump |
68 * | SIGILL | coredump |
69 * | SIGTRAP | coredump |
70 * | SIGABRT/SIGIOT | coredump |
71 * | SIGBUS | coredump |
72 * | SIGFPE | coredump |
73 * | SIGKILL | terminate(+) |
74 * | SIGUSR1 | terminate |
75 * | SIGSEGV | coredump |
76 * | SIGUSR2 | terminate |
77 * | SIGPIPE | terminate |
78 * | SIGALRM | terminate |
79 * | SIGTERM | terminate |
80 * | SIGCHLD | ignore |
81 * | SIGCONT | ignore(*) |
82 * | SIGSTOP | stop(*)(+) |
83 * | SIGTSTP | stop(*) |
84 * | SIGTTIN | stop(*) |
85 * | SIGTTOU | stop(*) |
86 * | SIGURG | ignore |
87 * | SIGXCPU | coredump |
88 * | SIGXFSZ | coredump |
89 * | SIGVTALRM | terminate |
90 * | SIGPROF | terminate |
91 * | SIGPOLL/SIGIO | terminate |
92 * | SIGSYS/SIGUNUSED | coredump |
93 * | SIGSTKFLT | terminate |
94 * | SIGWINCH | ignore |
95 * | SIGPWR | terminate |
96 * | SIGRTMIN-SIGRTMAX | terminate |
97 * +--------------------+------------------+
98 * | non-POSIX signal | default action |
99 * +--------------------+------------------+
100 * | SIGEMT | coredump |
101 * +--------------------+------------------+
102 *
103 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
104 * (*) Special job control effects:
105 * When SIGCONT is sent, it resumes the process (all threads in the group)
106 * from TASK_STOPPED state and also clears any pending/queued stop signals
107 * (any of those marked with "stop(*)"). This happens regardless of blocking,
108 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
109 * any pending/queued SIGCONT signals; this happens regardless of blocking,
110 * catching, or ignored the stop signal, though (except for SIGSTOP) the
111 * default action of stopping the process may happen later or never.
112 */
113
114#ifdef SIGEMT
115#define M_SIGEMT M(SIGEMT)
116#else
117#define M_SIGEMT 0
118#endif
119
120#if SIGRTMIN > BITS_PER_LONG
121#define M(sig) (1ULL << ((sig)-1))
122#else
123#define M(sig) (1UL << ((sig)-1))
124#endif
125#define T(sig, mask) (M(sig) & (mask))
126
127#define SIG_KERNEL_ONLY_MASK (\
128 M(SIGKILL) | M(SIGSTOP) )
129
130#define SIG_KERNEL_STOP_MASK (\
131 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
132
133#define SIG_KERNEL_COREDUMP_MASK (\
134 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
135 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
136 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
137
138#define SIG_KERNEL_IGNORE_MASK (\
139 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
140
141#define sig_kernel_only(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
143#define sig_kernel_coredump(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
145#define sig_kernel_ignore(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
147#define sig_kernel_stop(sig) \
148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
149
a9e88e84
ON
150#define sig_needs_tasklist(sig) \
151 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK | M(SIGCONT)))
152
1da177e4
LT
153#define sig_user_defined(t, signr) \
154 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
155 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
156
157#define sig_fatal(t, signr) \
158 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
159 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
160
161static int sig_ignored(struct task_struct *t, int sig)
162{
163 void __user * handler;
164
165 /*
166 * Tracers always want to know about signals..
167 */
168 if (t->ptrace & PT_PTRACED)
169 return 0;
170
171 /*
172 * Blocked signals are never ignored, since the
173 * signal handler may change by the time it is
174 * unblocked.
175 */
176 if (sigismember(&t->blocked, sig))
177 return 0;
178
179 /* Is it explicitly or implicitly ignored? */
180 handler = t->sighand->action[sig-1].sa.sa_handler;
181 return handler == SIG_IGN ||
182 (handler == SIG_DFL && sig_kernel_ignore(sig));
183}
184
185/*
186 * Re-calculate pending state from the set of locally pending
187 * signals, globally pending signals, and blocked signals.
188 */
189static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
190{
191 unsigned long ready;
192 long i;
193
194 switch (_NSIG_WORDS) {
195 default:
196 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
197 ready |= signal->sig[i] &~ blocked->sig[i];
198 break;
199
200 case 4: ready = signal->sig[3] &~ blocked->sig[3];
201 ready |= signal->sig[2] &~ blocked->sig[2];
202 ready |= signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
204 break;
205
206 case 2: ready = signal->sig[1] &~ blocked->sig[1];
207 ready |= signal->sig[0] &~ blocked->sig[0];
208 break;
209
210 case 1: ready = signal->sig[0] &~ blocked->sig[0];
211 }
212 return ready != 0;
213}
214
215#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
216
217fastcall void recalc_sigpending_tsk(struct task_struct *t)
218{
219 if (t->signal->group_stop_count > 0 ||
3e1d1d28 220 (freezing(t)) ||
1da177e4
LT
221 PENDING(&t->pending, &t->blocked) ||
222 PENDING(&t->signal->shared_pending, &t->blocked))
223 set_tsk_thread_flag(t, TIF_SIGPENDING);
224 else
225 clear_tsk_thread_flag(t, TIF_SIGPENDING);
226}
227
228void recalc_sigpending(void)
229{
230 recalc_sigpending_tsk(current);
231}
232
233/* Given the mask, find the first available signal that should be serviced. */
234
235static int
236next_signal(struct sigpending *pending, sigset_t *mask)
237{
238 unsigned long i, *s, *m, x;
239 int sig = 0;
240
241 s = pending->signal.sig;
242 m = mask->sig;
243 switch (_NSIG_WORDS) {
244 default:
245 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
246 if ((x = *s &~ *m) != 0) {
247 sig = ffz(~x) + i*_NSIG_BPW + 1;
248 break;
249 }
250 break;
251
252 case 2: if ((x = s[0] &~ m[0]) != 0)
253 sig = 1;
254 else if ((x = s[1] &~ m[1]) != 0)
255 sig = _NSIG_BPW + 1;
256 else
257 break;
258 sig += ffz(~x);
259 break;
260
261 case 1: if ((x = *s &~ *m) != 0)
262 sig = ffz(~x) + 1;
263 break;
264 }
265
266 return sig;
267}
268
dd0fc66f 269static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
1da177e4
LT
270 int override_rlimit)
271{
272 struct sigqueue *q = NULL;
273
274 atomic_inc(&t->user->sigpending);
275 if (override_rlimit ||
276 atomic_read(&t->user->sigpending) <=
277 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
278 q = kmem_cache_alloc(sigqueue_cachep, flags);
279 if (unlikely(q == NULL)) {
280 atomic_dec(&t->user->sigpending);
281 } else {
282 INIT_LIST_HEAD(&q->list);
283 q->flags = 0;
1da177e4
LT
284 q->user = get_uid(t->user);
285 }
286 return(q);
287}
288
514a01b8 289static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
290{
291 if (q->flags & SIGQUEUE_PREALLOC)
292 return;
293 atomic_dec(&q->user->sigpending);
294 free_uid(q->user);
295 kmem_cache_free(sigqueue_cachep, q);
296}
297
298static void flush_sigqueue(struct sigpending *queue)
299{
300 struct sigqueue *q;
301
302 sigemptyset(&queue->signal);
303 while (!list_empty(&queue->list)) {
304 q = list_entry(queue->list.next, struct sigqueue , list);
305 list_del_init(&q->list);
306 __sigqueue_free(q);
307 }
308}
309
310/*
311 * Flush all pending signals for a task.
312 */
313
314void
315flush_signals(struct task_struct *t)
316{
317 unsigned long flags;
318
319 spin_lock_irqsave(&t->sighand->siglock, flags);
320 clear_tsk_thread_flag(t,TIF_SIGPENDING);
321 flush_sigqueue(&t->pending);
322 flush_sigqueue(&t->signal->shared_pending);
323 spin_unlock_irqrestore(&t->sighand->siglock, flags);
324}
325
326/*
327 * This function expects the tasklist_lock write-locked.
328 */
329void __exit_sighand(struct task_struct *tsk)
330{
331 struct sighand_struct * sighand = tsk->sighand;
332
333 /* Ok, we're done with the signal handlers */
334 tsk->sighand = NULL;
335 if (atomic_dec_and_test(&sighand->count))
aa1757f9 336 kmem_cache_free(sighand_cachep, sighand);
1da177e4
LT
337}
338
1da177e4
LT
339/*
340 * This function expects the tasklist_lock write-locked.
341 */
342void __exit_signal(struct task_struct *tsk)
343{
29ff4712
ON
344 struct signal_struct *sig = tsk->signal;
345 struct sighand_struct *sighand;
346
347 BUG_ON(!sig);
348 BUG_ON(!atomic_read(&sig->count));
1da177e4 349
e56d0903
IM
350 rcu_read_lock();
351 sighand = rcu_dereference(tsk->sighand);
1da177e4 352 spin_lock(&sighand->siglock);
29ff4712 353
1da177e4 354 posix_cpu_timers_exit(tsk);
29ff4712 355 if (atomic_dec_and_test(&sig->count))
1da177e4 356 posix_cpu_timers_exit_group(tsk);
29ff4712 357 else {
1da177e4
LT
358 /*
359 * If there is any task waiting for the group exit
360 * then notify it:
361 */
362 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
363 wake_up_process(sig->group_exit_task);
364 sig->group_exit_task = NULL;
365 }
366 if (tsk == sig->curr_target)
367 sig->curr_target = next_thread(tsk);
1da177e4
LT
368 /*
369 * Accumulate here the counters for all threads but the
370 * group leader as they die, so they can be added into
371 * the process-wide totals when those are taken.
372 * The group leader stays around as a zombie as long
373 * as there are other threads. When it gets reaped,
374 * the exit.c code will add its counts into these totals.
375 * We won't ever get here for the group leader, since it
376 * will have been the last reference on the signal_struct.
377 */
378 sig->utime = cputime_add(sig->utime, tsk->utime);
379 sig->stime = cputime_add(sig->stime, tsk->stime);
380 sig->min_flt += tsk->min_flt;
381 sig->maj_flt += tsk->maj_flt;
382 sig->nvcsw += tsk->nvcsw;
383 sig->nivcsw += tsk->nivcsw;
384 sig->sched_time += tsk->sched_time;
29ff4712 385 sig = NULL; /* Marker for below. */
1da177e4 386 }
29ff4712
ON
387
388 tsk->signal = NULL;
389 __exit_sighand(tsk);
390 spin_unlock(&sighand->siglock);
e56d0903 391 rcu_read_unlock();
29ff4712 392
1da177e4
LT
393 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
394 flush_sigqueue(&tsk->pending);
395 if (sig) {
29ff4712 396 flush_sigqueue(&sig->shared_pending);
6b3934ef 397 __cleanup_signal(sig);
1da177e4
LT
398 }
399}
400
1da177e4
LT
401/*
402 * Flush all handlers for a task.
403 */
404
405void
406flush_signal_handlers(struct task_struct *t, int force_default)
407{
408 int i;
409 struct k_sigaction *ka = &t->sighand->action[0];
410 for (i = _NSIG ; i != 0 ; i--) {
411 if (force_default || ka->sa.sa_handler != SIG_IGN)
412 ka->sa.sa_handler = SIG_DFL;
413 ka->sa.sa_flags = 0;
414 sigemptyset(&ka->sa.sa_mask);
415 ka++;
416 }
417}
418
419
420/* Notify the system that a driver wants to block all signals for this
421 * process, and wants to be notified if any signals at all were to be
422 * sent/acted upon. If the notifier routine returns non-zero, then the
423 * signal will be acted upon after all. If the notifier routine returns 0,
424 * then then signal will be blocked. Only one block per process is
425 * allowed. priv is a pointer to private data that the notifier routine
426 * can use to determine if the signal should be blocked or not. */
427
428void
429block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
430{
431 unsigned long flags;
432
433 spin_lock_irqsave(&current->sighand->siglock, flags);
434 current->notifier_mask = mask;
435 current->notifier_data = priv;
436 current->notifier = notifier;
437 spin_unlock_irqrestore(&current->sighand->siglock, flags);
438}
439
440/* Notify the system that blocking has ended. */
441
442void
443unblock_all_signals(void)
444{
445 unsigned long flags;
446
447 spin_lock_irqsave(&current->sighand->siglock, flags);
448 current->notifier = NULL;
449 current->notifier_data = NULL;
450 recalc_sigpending();
451 spin_unlock_irqrestore(&current->sighand->siglock, flags);
452}
453
858119e1 454static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
1da177e4
LT
455{
456 struct sigqueue *q, *first = NULL;
457 int still_pending = 0;
458
459 if (unlikely(!sigismember(&list->signal, sig)))
460 return 0;
461
462 /*
463 * Collect the siginfo appropriate to this signal. Check if
464 * there is another siginfo for the same signal.
465 */
466 list_for_each_entry(q, &list->list, list) {
467 if (q->info.si_signo == sig) {
468 if (first) {
469 still_pending = 1;
470 break;
471 }
472 first = q;
473 }
474 }
475 if (first) {
476 list_del_init(&first->list);
477 copy_siginfo(info, &first->info);
478 __sigqueue_free(first);
479 if (!still_pending)
480 sigdelset(&list->signal, sig);
481 } else {
482
483 /* Ok, it wasn't in the queue. This must be
484 a fast-pathed signal or we must have been
485 out of queue space. So zero out the info.
486 */
487 sigdelset(&list->signal, sig);
488 info->si_signo = sig;
489 info->si_errno = 0;
490 info->si_code = 0;
491 info->si_pid = 0;
492 info->si_uid = 0;
493 }
494 return 1;
495}
496
497static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
498 siginfo_t *info)
499{
500 int sig = 0;
501
b17b0421 502 sig = next_signal(pending, mask);
1da177e4
LT
503 if (sig) {
504 if (current->notifier) {
505 if (sigismember(current->notifier_mask, sig)) {
506 if (!(current->notifier)(current->notifier_data)) {
507 clear_thread_flag(TIF_SIGPENDING);
508 return 0;
509 }
510 }
511 }
512
513 if (!collect_signal(sig, pending, info))
514 sig = 0;
515
516 }
517 recalc_sigpending();
518
519 return sig;
520}
521
522/*
523 * Dequeue a signal and return the element to the caller, which is
524 * expected to free it.
525 *
526 * All callers have to hold the siglock.
527 */
528int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
529{
530 int signr = __dequeue_signal(&tsk->pending, mask, info);
531 if (!signr)
532 signr = __dequeue_signal(&tsk->signal->shared_pending,
533 mask, info);
534 if (signr && unlikely(sig_kernel_stop(signr))) {
535 /*
536 * Set a marker that we have dequeued a stop signal. Our
537 * caller might release the siglock and then the pending
538 * stop signal it is about to process is no longer in the
539 * pending bitmasks, but must still be cleared by a SIGCONT
540 * (and overruled by a SIGKILL). So those cases clear this
541 * shared flag after we've set it. Note that this flag may
542 * remain set after the signal we return is ignored or
543 * handled. That doesn't matter because its only purpose
544 * is to alert stop-signal processing code when another
545 * processor has come along and cleared the flag.
546 */
788e05a6
ON
547 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
548 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1da177e4
LT
549 }
550 if ( signr &&
551 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
552 info->si_sys_private){
553 /*
554 * Release the siglock to ensure proper locking order
555 * of timer locks outside of siglocks. Note, we leave
556 * irqs disabled here, since the posix-timers code is
557 * about to disable them again anyway.
558 */
559 spin_unlock(&tsk->sighand->siglock);
560 do_schedule_next_timer(info);
561 spin_lock(&tsk->sighand->siglock);
562 }
563 return signr;
564}
565
566/*
567 * Tell a process that it has a new active signal..
568 *
569 * NOTE! we rely on the previous spin_lock to
570 * lock interrupts for us! We can only be called with
571 * "siglock" held, and the local interrupt must
572 * have been disabled when that got acquired!
573 *
574 * No need to set need_resched since signal event passing
575 * goes through ->blocked
576 */
577void signal_wake_up(struct task_struct *t, int resume)
578{
579 unsigned int mask;
580
581 set_tsk_thread_flag(t, TIF_SIGPENDING);
582
583 /*
584 * For SIGKILL, we want to wake it up in the stopped/traced case.
585 * We don't check t->state here because there is a race with it
586 * executing another processor and just now entering stopped state.
587 * By using wake_up_state, we ensure the process will wake up and
588 * handle its death signal.
589 */
590 mask = TASK_INTERRUPTIBLE;
591 if (resume)
592 mask |= TASK_STOPPED | TASK_TRACED;
593 if (!wake_up_state(t, mask))
594 kick_process(t);
595}
596
71fabd5e
GA
597/*
598 * Remove signals in mask from the pending set and queue.
599 * Returns 1 if any signals were found.
600 *
601 * All callers must be holding the siglock.
602 *
603 * This version takes a sigset mask and looks at all signals,
604 * not just those in the first mask word.
605 */
606static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
607{
608 struct sigqueue *q, *n;
609 sigset_t m;
610
611 sigandsets(&m, mask, &s->signal);
612 if (sigisemptyset(&m))
613 return 0;
614
615 signandsets(&s->signal, &s->signal, mask);
616 list_for_each_entry_safe(q, n, &s->list, list) {
617 if (sigismember(mask, q->info.si_signo)) {
618 list_del_init(&q->list);
619 __sigqueue_free(q);
620 }
621 }
622 return 1;
623}
1da177e4
LT
624/*
625 * Remove signals in mask from the pending set and queue.
626 * Returns 1 if any signals were found.
627 *
628 * All callers must be holding the siglock.
629 */
630static int rm_from_queue(unsigned long mask, struct sigpending *s)
631{
632 struct sigqueue *q, *n;
633
634 if (!sigtestsetmask(&s->signal, mask))
635 return 0;
636
637 sigdelsetmask(&s->signal, mask);
638 list_for_each_entry_safe(q, n, &s->list, list) {
639 if (q->info.si_signo < SIGRTMIN &&
640 (mask & sigmask(q->info.si_signo))) {
641 list_del_init(&q->list);
642 __sigqueue_free(q);
643 }
644 }
645 return 1;
646}
647
648/*
649 * Bad permissions for sending the signal
650 */
651static int check_kill_permission(int sig, struct siginfo *info,
652 struct task_struct *t)
653{
654 int error = -EINVAL;
7ed20e1a 655 if (!valid_signal(sig))
1da177e4
LT
656 return error;
657 error = -EPERM;
621d3121 658 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1da177e4
LT
659 && ((sig != SIGCONT) ||
660 (current->signal->session != t->signal->session))
661 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
662 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
663 && !capable(CAP_KILL))
664 return error;
c2f0c7c3
SG
665
666 error = security_task_kill(t, info, sig);
667 if (!error)
668 audit_signal_info(sig, t); /* Let audit system see the signal */
669 return error;
1da177e4
LT
670}
671
672/* forward decl */
673static void do_notify_parent_cldstop(struct task_struct *tsk,
bc505a47 674 int to_self,
1da177e4
LT
675 int why);
676
677/*
678 * Handle magic process-wide effects of stop/continue signals.
679 * Unlike the signal actions, these happen immediately at signal-generation
680 * time regardless of blocking, ignoring, or handling. This does the
681 * actual continuing for SIGCONT, but not the actual stopping for stop
682 * signals. The process stop is done as a signal action for SIG_DFL.
683 */
684static void handle_stop_signal(int sig, struct task_struct *p)
685{
686 struct task_struct *t;
687
dd12f48d 688 if (p->signal->flags & SIGNAL_GROUP_EXIT)
1da177e4
LT
689 /*
690 * The process is in the middle of dying already.
691 */
692 return;
693
694 if (sig_kernel_stop(sig)) {
695 /*
696 * This is a stop signal. Remove SIGCONT from all queues.
697 */
698 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
699 t = p;
700 do {
701 rm_from_queue(sigmask(SIGCONT), &t->pending);
702 t = next_thread(t);
703 } while (t != p);
704 } else if (sig == SIGCONT) {
705 /*
706 * Remove all stop signals from all queues,
707 * and wake all threads.
708 */
709 if (unlikely(p->signal->group_stop_count > 0)) {
710 /*
711 * There was a group stop in progress. We'll
712 * pretend it finished before we got here. We are
713 * obliged to report it to the parent: if the
714 * SIGSTOP happened "after" this SIGCONT, then it
715 * would have cleared this pending SIGCONT. If it
716 * happened "before" this SIGCONT, then the parent
717 * got the SIGCHLD about the stop finishing before
718 * the continue happened. We do the notification
719 * now, and it's as if the stop had finished and
720 * the SIGCHLD was pending on entry to this kill.
721 */
722 p->signal->group_stop_count = 0;
723 p->signal->flags = SIGNAL_STOP_CONTINUED;
724 spin_unlock(&p->sighand->siglock);
bc505a47 725 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
1da177e4
LT
726 spin_lock(&p->sighand->siglock);
727 }
728 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
729 t = p;
730 do {
731 unsigned int state;
732 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
733
734 /*
735 * If there is a handler for SIGCONT, we must make
736 * sure that no thread returns to user mode before
737 * we post the signal, in case it was the only
738 * thread eligible to run the signal handler--then
739 * it must not do anything between resuming and
740 * running the handler. With the TIF_SIGPENDING
741 * flag set, the thread will pause and acquire the
742 * siglock that we hold now and until we've queued
743 * the pending signal.
744 *
745 * Wake up the stopped thread _after_ setting
746 * TIF_SIGPENDING
747 */
748 state = TASK_STOPPED;
749 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
750 set_tsk_thread_flag(t, TIF_SIGPENDING);
751 state |= TASK_INTERRUPTIBLE;
752 }
753 wake_up_state(t, state);
754
755 t = next_thread(t);
756 } while (t != p);
757
758 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
759 /*
760 * We were in fact stopped, and are now continued.
761 * Notify the parent with CLD_CONTINUED.
762 */
763 p->signal->flags = SIGNAL_STOP_CONTINUED;
764 p->signal->group_exit_code = 0;
765 spin_unlock(&p->sighand->siglock);
bc505a47 766 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
1da177e4
LT
767 spin_lock(&p->sighand->siglock);
768 } else {
769 /*
770 * We are not stopped, but there could be a stop
771 * signal in the middle of being processed after
772 * being removed from the queue. Clear that too.
773 */
774 p->signal->flags = 0;
775 }
776 } else if (sig == SIGKILL) {
777 /*
778 * Make sure that any pending stop signal already dequeued
779 * is undone by the wakeup for SIGKILL.
780 */
781 p->signal->flags = 0;
782 }
783}
784
785static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
786 struct sigpending *signals)
787{
788 struct sigqueue * q = NULL;
789 int ret = 0;
790
791 /*
792 * fast-pathed signals for kernel-internal things like SIGSTOP
793 * or SIGKILL.
794 */
b67a1b9e 795 if (info == SEND_SIG_FORCED)
1da177e4
LT
796 goto out_set;
797
798 /* Real-time signals must be queued if sent by sigqueue, or
799 some other real-time mechanism. It is implementation
800 defined whether kill() does so. We attempt to do so, on
801 the principle of least surprise, but since kill is not
802 allowed to fail with EAGAIN when low on memory we just
803 make sure at least one signal gets delivered and don't
804 pass on the info struct. */
805
806 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
621d3121 807 (is_si_special(info) ||
1da177e4
LT
808 info->si_code >= 0)));
809 if (q) {
810 list_add_tail(&q->list, &signals->list);
811 switch ((unsigned long) info) {
b67a1b9e 812 case (unsigned long) SEND_SIG_NOINFO:
1da177e4
LT
813 q->info.si_signo = sig;
814 q->info.si_errno = 0;
815 q->info.si_code = SI_USER;
816 q->info.si_pid = current->pid;
817 q->info.si_uid = current->uid;
818 break;
b67a1b9e 819 case (unsigned long) SEND_SIG_PRIV:
1da177e4
LT
820 q->info.si_signo = sig;
821 q->info.si_errno = 0;
822 q->info.si_code = SI_KERNEL;
823 q->info.si_pid = 0;
824 q->info.si_uid = 0;
825 break;
826 default:
827 copy_siginfo(&q->info, info);
828 break;
829 }
621d3121
ON
830 } else if (!is_si_special(info)) {
831 if (sig >= SIGRTMIN && info->si_code != SI_USER)
1da177e4
LT
832 /*
833 * Queue overflow, abort. We may abort if the signal was rt
834 * and sent by user using something other than kill().
835 */
836 return -EAGAIN;
1da177e4
LT
837 }
838
839out_set:
840 sigaddset(&signals->signal, sig);
841 return ret;
842}
843
844#define LEGACY_QUEUE(sigptr, sig) \
845 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
846
847
848static int
849specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
850{
851 int ret = 0;
852
853 if (!irqs_disabled())
854 BUG();
855 assert_spin_locked(&t->sighand->siglock);
856
1da177e4
LT
857 /* Short-circuit ignored signals. */
858 if (sig_ignored(t, sig))
859 goto out;
860
861 /* Support queueing exactly one non-rt signal, so that we
862 can get more detailed information about the cause of
863 the signal. */
864 if (LEGACY_QUEUE(&t->pending, sig))
865 goto out;
866
867 ret = send_signal(sig, info, t, &t->pending);
868 if (!ret && !sigismember(&t->blocked, sig))
869 signal_wake_up(t, sig == SIGKILL);
870out:
871 return ret;
872}
873
874/*
875 * Force a signal that the process can't ignore: if necessary
876 * we unblock the signal and change any SIG_IGN to SIG_DFL.
877 */
878
879int
880force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
881{
882 unsigned long int flags;
883 int ret;
884
885 spin_lock_irqsave(&t->sighand->siglock, flags);
b0423a0d 886 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
1da177e4 887 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
b0423a0d
PM
888 }
889 if (sigismember(&t->blocked, sig)) {
1da177e4 890 sigdelset(&t->blocked, sig);
1da177e4 891 }
b0423a0d 892 recalc_sigpending_tsk(t);
1da177e4
LT
893 ret = specific_send_sig_info(sig, info, t);
894 spin_unlock_irqrestore(&t->sighand->siglock, flags);
895
896 return ret;
897}
898
899void
900force_sig_specific(int sig, struct task_struct *t)
901{
b0423a0d 902 force_sig_info(sig, SEND_SIG_FORCED, t);
1da177e4
LT
903}
904
905/*
906 * Test if P wants to take SIG. After we've checked all threads with this,
907 * it's equivalent to finding no threads not blocking SIG. Any threads not
908 * blocking SIG were ruled out because they are not running and already
909 * have pending signals. Such threads will dequeue from the shared queue
910 * as soon as they're available, so putting the signal on the shared queue
911 * will be equivalent to sending it to one such thread.
912 */
188a1eaf
LT
913static inline int wants_signal(int sig, struct task_struct *p)
914{
915 if (sigismember(&p->blocked, sig))
916 return 0;
917 if (p->flags & PF_EXITING)
918 return 0;
919 if (sig == SIGKILL)
920 return 1;
921 if (p->state & (TASK_STOPPED | TASK_TRACED))
922 return 0;
923 return task_curr(p) || !signal_pending(p);
924}
1da177e4
LT
925
926static void
927__group_complete_signal(int sig, struct task_struct *p)
928{
1da177e4
LT
929 struct task_struct *t;
930
1da177e4
LT
931 /*
932 * Now find a thread we can wake up to take the signal off the queue.
933 *
934 * If the main thread wants the signal, it gets first crack.
935 * Probably the least surprising to the average bear.
936 */
188a1eaf 937 if (wants_signal(sig, p))
1da177e4
LT
938 t = p;
939 else if (thread_group_empty(p))
940 /*
941 * There is just one thread and it does not need to be woken.
942 * It will dequeue unblocked signals before it runs again.
943 */
944 return;
945 else {
946 /*
947 * Otherwise try to find a suitable thread.
948 */
949 t = p->signal->curr_target;
950 if (t == NULL)
951 /* restart balancing at this thread */
952 t = p->signal->curr_target = p;
953 BUG_ON(t->tgid != p->tgid);
954
188a1eaf 955 while (!wants_signal(sig, t)) {
1da177e4
LT
956 t = next_thread(t);
957 if (t == p->signal->curr_target)
958 /*
959 * No thread needs to be woken.
960 * Any eligible threads will see
961 * the signal in the queue soon.
962 */
963 return;
964 }
965 p->signal->curr_target = t;
966 }
967
968 /*
969 * Found a killable thread. If the signal will be fatal,
970 * then start taking the whole group down immediately.
971 */
972 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
973 !sigismember(&t->real_blocked, sig) &&
974 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
975 /*
976 * This signal will be fatal to the whole group.
977 */
978 if (!sig_kernel_coredump(sig)) {
979 /*
980 * Start a group exit and wake everybody up.
981 * This way we don't have other threads
982 * running and doing things after a slower
983 * thread has the fatal signal pending.
984 */
985 p->signal->flags = SIGNAL_GROUP_EXIT;
986 p->signal->group_exit_code = sig;
987 p->signal->group_stop_count = 0;
988 t = p;
989 do {
990 sigaddset(&t->pending.signal, SIGKILL);
991 signal_wake_up(t, 1);
992 t = next_thread(t);
993 } while (t != p);
994 return;
995 }
996
997 /*
998 * There will be a core dump. We make all threads other
999 * than the chosen one go into a group stop so that nothing
1000 * happens until it gets scheduled, takes the signal off
1001 * the shared queue, and does the core dump. This is a
1002 * little more complicated than strictly necessary, but it
1003 * keeps the signal state that winds up in the core dump
1004 * unchanged from the death state, e.g. which thread had
1005 * the core-dump signal unblocked.
1006 */
1007 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1008 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1009 p->signal->group_stop_count = 0;
1010 p->signal->group_exit_task = t;
1011 t = p;
1012 do {
1013 p->signal->group_stop_count++;
1014 signal_wake_up(t, 0);
1015 t = next_thread(t);
1016 } while (t != p);
1017 wake_up_process(p->signal->group_exit_task);
1018 return;
1019 }
1020
1021 /*
1022 * The signal is already in the shared-pending queue.
1023 * Tell the chosen thread to wake up and dequeue it.
1024 */
1025 signal_wake_up(t, sig == SIGKILL);
1026 return;
1027}
1028
1029int
1030__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1031{
1032 int ret = 0;
1033
1034 assert_spin_locked(&p->sighand->siglock);
1035 handle_stop_signal(sig, p);
1036
1da177e4
LT
1037 /* Short-circuit ignored signals. */
1038 if (sig_ignored(p, sig))
1039 return ret;
1040
1041 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1042 /* This is a non-RT signal and we already have one queued. */
1043 return ret;
1044
1045 /*
1046 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1047 * We always use the shared queue for process-wide signals,
1048 * to avoid several races.
1049 */
1050 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1051 if (unlikely(ret))
1052 return ret;
1053
1054 __group_complete_signal(sig, p);
1055 return 0;
1056}
1057
1058/*
1059 * Nuke all other threads in the group.
1060 */
1061void zap_other_threads(struct task_struct *p)
1062{
1063 struct task_struct *t;
1064
1065 p->signal->flags = SIGNAL_GROUP_EXIT;
1066 p->signal->group_stop_count = 0;
1067
1068 if (thread_group_empty(p))
1069 return;
1070
1071 for (t = next_thread(p); t != p; t = next_thread(t)) {
1072 /*
1073 * Don't bother with already dead threads
1074 */
1075 if (t->exit_state)
1076 continue;
1077
1078 /*
1079 * We don't want to notify the parent, since we are
1080 * killed as part of a thread group due to another
1081 * thread doing an execve() or similar. So set the
1082 * exit signal to -1 to allow immediate reaping of
1083 * the process. But don't detach the thread group
1084 * leader.
1085 */
1086 if (t != p->group_leader)
1087 t->exit_signal = -1;
1088
30e0fca6 1089 /* SIGKILL will be handled before any pending SIGSTOP */
1da177e4 1090 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
1091 signal_wake_up(t, 1);
1092 }
1093}
1094
1095/*
e56d0903 1096 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1da177e4 1097 */
f63ee72e
ON
1098struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1099{
1100 struct sighand_struct *sighand;
1101
1102 for (;;) {
1103 sighand = rcu_dereference(tsk->sighand);
1104 if (unlikely(sighand == NULL))
1105 break;
1106
1107 spin_lock_irqsave(&sighand->siglock, *flags);
1108 if (likely(sighand == tsk->sighand))
1109 break;
1110 spin_unlock_irqrestore(&sighand->siglock, *flags);
1111 }
1112
1113 return sighand;
1114}
1115
1da177e4
LT
1116int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1117{
1118 unsigned long flags;
1119 int ret;
1120
1121 ret = check_kill_permission(sig, info, p);
f63ee72e
ON
1122
1123 if (!ret && sig) {
1124 ret = -ESRCH;
1125 if (lock_task_sighand(p, &flags)) {
1126 ret = __group_send_sig_info(sig, info, p);
1127 unlock_task_sighand(p, &flags);
2d89c929 1128 }
1da177e4
LT
1129 }
1130
1131 return ret;
1132}
1133
1134/*
1135 * kill_pg_info() sends a signal to a process group: this is what the tty
1136 * control characters do (^C, ^Z etc)
1137 */
1138
1139int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1140{
1141 struct task_struct *p = NULL;
1142 int retval, success;
1143
1144 if (pgrp <= 0)
1145 return -EINVAL;
1146
1147 success = 0;
1148 retval = -ESRCH;
1149 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1150 int err = group_send_sig_info(sig, info, p);
1151 success |= !err;
1152 retval = err;
1153 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1154 return success ? 0 : retval;
1155}
1156
1157int
1158kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1159{
1160 int retval;
1161
1162 read_lock(&tasklist_lock);
1163 retval = __kill_pg_info(sig, info, pgrp);
1164 read_unlock(&tasklist_lock);
1165
1166 return retval;
1167}
1168
1169int
1170kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1171{
1172 int error;
e56d0903 1173 int acquired_tasklist_lock = 0;
1da177e4
LT
1174 struct task_struct *p;
1175
e56d0903 1176 rcu_read_lock();
a9e88e84 1177 if (unlikely(sig_needs_tasklist(sig))) {
e56d0903
IM
1178 read_lock(&tasklist_lock);
1179 acquired_tasklist_lock = 1;
1180 }
1da177e4
LT
1181 p = find_task_by_pid(pid);
1182 error = -ESRCH;
1183 if (p)
1184 error = group_send_sig_info(sig, info, p);
e56d0903
IM
1185 if (unlikely(acquired_tasklist_lock))
1186 read_unlock(&tasklist_lock);
1187 rcu_read_unlock();
1da177e4
LT
1188 return error;
1189}
1190
46113830
HW
1191/* like kill_proc_info(), but doesn't use uid/euid of "current" */
1192int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1193 uid_t uid, uid_t euid)
1194{
1195 int ret = -EINVAL;
1196 struct task_struct *p;
1197
1198 if (!valid_signal(sig))
1199 return ret;
1200
1201 read_lock(&tasklist_lock);
1202 p = find_task_by_pid(pid);
1203 if (!p) {
1204 ret = -ESRCH;
1205 goto out_unlock;
1206 }
0811af28 1207 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
46113830
HW
1208 && (euid != p->suid) && (euid != p->uid)
1209 && (uid != p->suid) && (uid != p->uid)) {
1210 ret = -EPERM;
1211 goto out_unlock;
1212 }
1213 if (sig && p->sighand) {
1214 unsigned long flags;
1215 spin_lock_irqsave(&p->sighand->siglock, flags);
1216 ret = __group_send_sig_info(sig, info, p);
1217 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1218 }
1219out_unlock:
1220 read_unlock(&tasklist_lock);
1221 return ret;
1222}
1223EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1da177e4
LT
1224
1225/*
1226 * kill_something_info() interprets pid in interesting ways just like kill(2).
1227 *
1228 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1229 * is probably wrong. Should make it like BSD or SYSV.
1230 */
1231
1232static int kill_something_info(int sig, struct siginfo *info, int pid)
1233{
1234 if (!pid) {
1235 return kill_pg_info(sig, info, process_group(current));
1236 } else if (pid == -1) {
1237 int retval = 0, count = 0;
1238 struct task_struct * p;
1239
1240 read_lock(&tasklist_lock);
1241 for_each_process(p) {
1242 if (p->pid > 1 && p->tgid != current->tgid) {
1243 int err = group_send_sig_info(sig, info, p);
1244 ++count;
1245 if (err != -EPERM)
1246 retval = err;
1247 }
1248 }
1249 read_unlock(&tasklist_lock);
1250 return count ? retval : -ESRCH;
1251 } else if (pid < 0) {
1252 return kill_pg_info(sig, info, -pid);
1253 } else {
1254 return kill_proc_info(sig, info, pid);
1255 }
1256}
1257
1258/*
1259 * These are for backward compatibility with the rest of the kernel source.
1260 */
1261
1262/*
1263 * These two are the most common entry points. They send a signal
1264 * just to the specific thread.
1265 */
1266int
1267send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1268{
1269 int ret;
1270 unsigned long flags;
1271
1272 /*
1273 * Make sure legacy kernel users don't send in bad values
1274 * (normal paths check this in check_kill_permission).
1275 */
7ed20e1a 1276 if (!valid_signal(sig))
1da177e4
LT
1277 return -EINVAL;
1278
1279 /*
1280 * We need the tasklist lock even for the specific
1281 * thread case (when we don't need to follow the group
1282 * lists) in order to avoid races with "p->sighand"
1283 * going away or changing from under us.
1284 */
1285 read_lock(&tasklist_lock);
1286 spin_lock_irqsave(&p->sighand->siglock, flags);
1287 ret = specific_send_sig_info(sig, info, p);
1288 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1289 read_unlock(&tasklist_lock);
1290 return ret;
1291}
1292
b67a1b9e
ON
1293#define __si_special(priv) \
1294 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1295
1da177e4
LT
1296int
1297send_sig(int sig, struct task_struct *p, int priv)
1298{
b67a1b9e 1299 return send_sig_info(sig, __si_special(priv), p);
1da177e4
LT
1300}
1301
1302/*
1303 * This is the entry point for "process-wide" signals.
1304 * They will go to an appropriate thread in the thread group.
1305 */
1306int
1307send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1308{
1309 int ret;
1310 read_lock(&tasklist_lock);
1311 ret = group_send_sig_info(sig, info, p);
1312 read_unlock(&tasklist_lock);
1313 return ret;
1314}
1315
1316void
1317force_sig(int sig, struct task_struct *p)
1318{
b67a1b9e 1319 force_sig_info(sig, SEND_SIG_PRIV, p);
1da177e4
LT
1320}
1321
1322/*
1323 * When things go south during signal handling, we
1324 * will force a SIGSEGV. And if the signal that caused
1325 * the problem was already a SIGSEGV, we'll want to
1326 * make sure we don't even try to deliver the signal..
1327 */
1328int
1329force_sigsegv(int sig, struct task_struct *p)
1330{
1331 if (sig == SIGSEGV) {
1332 unsigned long flags;
1333 spin_lock_irqsave(&p->sighand->siglock, flags);
1334 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1335 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1336 }
1337 force_sig(SIGSEGV, p);
1338 return 0;
1339}
1340
1341int
1342kill_pg(pid_t pgrp, int sig, int priv)
1343{
b67a1b9e 1344 return kill_pg_info(sig, __si_special(priv), pgrp);
1da177e4
LT
1345}
1346
1347int
1348kill_proc(pid_t pid, int sig, int priv)
1349{
b67a1b9e 1350 return kill_proc_info(sig, __si_special(priv), pid);
1da177e4
LT
1351}
1352
1353/*
1354 * These functions support sending signals using preallocated sigqueue
1355 * structures. This is needed "because realtime applications cannot
1356 * afford to lose notifications of asynchronous events, like timer
1357 * expirations or I/O completions". In the case of Posix Timers
1358 * we allocate the sigqueue structure from the timer_create. If this
1359 * allocation fails we are able to report the failure to the application
1360 * with an EAGAIN error.
1361 */
1362
1363struct sigqueue *sigqueue_alloc(void)
1364{
1365 struct sigqueue *q;
1366
1367 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1368 q->flags |= SIGQUEUE_PREALLOC;
1369 return(q);
1370}
1371
1372void sigqueue_free(struct sigqueue *q)
1373{
1374 unsigned long flags;
1375 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1376 /*
1377 * If the signal is still pending remove it from the
1378 * pending queue.
1379 */
1380 if (unlikely(!list_empty(&q->list))) {
19a4fcb5
ON
1381 spinlock_t *lock = &current->sighand->siglock;
1382 read_lock(&tasklist_lock);
1383 spin_lock_irqsave(lock, flags);
1da177e4
LT
1384 if (!list_empty(&q->list))
1385 list_del_init(&q->list);
19a4fcb5 1386 spin_unlock_irqrestore(lock, flags);
1da177e4
LT
1387 read_unlock(&tasklist_lock);
1388 }
1389 q->flags &= ~SIGQUEUE_PREALLOC;
1390 __sigqueue_free(q);
1391}
1392
1393int
1394send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1395{
1396 unsigned long flags;
1397 int ret = 0;
e56d0903 1398 struct sighand_struct *sh;
1da177e4 1399
1da177e4 1400 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e56d0903
IM
1401
1402 /*
1403 * The rcu based delayed sighand destroy makes it possible to
1404 * run this without tasklist lock held. The task struct itself
1405 * cannot go away as create_timer did get_task_struct().
1406 *
1407 * We return -1, when the task is marked exiting, so
1408 * posix_timer_event can redirect it to the group leader
1409 */
1410 rcu_read_lock();
e752dd6c
ON
1411
1412 if (unlikely(p->flags & PF_EXITING)) {
1413 ret = -1;
1414 goto out_err;
1415 }
1416
e56d0903
IM
1417retry:
1418 sh = rcu_dereference(p->sighand);
1419
1420 spin_lock_irqsave(&sh->siglock, flags);
1421 if (p->sighand != sh) {
1422 /* We raced with exec() in a multithreaded process... */
1423 spin_unlock_irqrestore(&sh->siglock, flags);
1424 goto retry;
1425 }
1426
1427 /*
1428 * We do the check here again to handle the following scenario:
1429 *
1430 * CPU 0 CPU 1
1431 * send_sigqueue
1432 * check PF_EXITING
1433 * interrupt exit code running
1434 * __exit_signal
1435 * lock sighand->siglock
1436 * unlock sighand->siglock
1437 * lock sh->siglock
1438 * add(tsk->pending) flush_sigqueue(tsk->pending)
1439 *
1440 */
1441
1442 if (unlikely(p->flags & PF_EXITING)) {
1443 ret = -1;
1444 goto out;
1445 }
e752dd6c 1446
1da177e4
LT
1447 if (unlikely(!list_empty(&q->list))) {
1448 /*
1449 * If an SI_TIMER entry is already queue just increment
1450 * the overrun count.
1451 */
1452 if (q->info.si_code != SI_TIMER)
1453 BUG();
1454 q->info.si_overrun++;
1455 goto out;
e752dd6c 1456 }
1da177e4
LT
1457 /* Short-circuit ignored signals. */
1458 if (sig_ignored(p, sig)) {
1459 ret = 1;
1460 goto out;
1461 }
1462
1da177e4
LT
1463 list_add_tail(&q->list, &p->pending.list);
1464 sigaddset(&p->pending.signal, sig);
1465 if (!sigismember(&p->blocked, sig))
1466 signal_wake_up(p, sig == SIGKILL);
1467
1468out:
e56d0903 1469 spin_unlock_irqrestore(&sh->siglock, flags);
e752dd6c 1470out_err:
e56d0903 1471 rcu_read_unlock();
e752dd6c
ON
1472
1473 return ret;
1da177e4
LT
1474}
1475
1476int
1477send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1478{
1479 unsigned long flags;
1480 int ret = 0;
1481
1482 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e56d0903 1483
1da177e4 1484 read_lock(&tasklist_lock);
e56d0903 1485 /* Since it_lock is held, p->sighand cannot be NULL. */
1da177e4
LT
1486 spin_lock_irqsave(&p->sighand->siglock, flags);
1487 handle_stop_signal(sig, p);
1488
1489 /* Short-circuit ignored signals. */
1490 if (sig_ignored(p, sig)) {
1491 ret = 1;
1492 goto out;
1493 }
1494
1495 if (unlikely(!list_empty(&q->list))) {
1496 /*
1497 * If an SI_TIMER entry is already queue just increment
1498 * the overrun count. Other uses should not try to
1499 * send the signal multiple times.
1500 */
1501 if (q->info.si_code != SI_TIMER)
1502 BUG();
1503 q->info.si_overrun++;
1504 goto out;
1505 }
1506
1507 /*
1508 * Put this signal on the shared-pending queue.
1509 * We always use the shared queue for process-wide signals,
1510 * to avoid several races.
1511 */
1da177e4
LT
1512 list_add_tail(&q->list, &p->signal->shared_pending.list);
1513 sigaddset(&p->signal->shared_pending.signal, sig);
1514
1515 __group_complete_signal(sig, p);
1516out:
1517 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1518 read_unlock(&tasklist_lock);
e56d0903 1519 return ret;
1da177e4
LT
1520}
1521
1522/*
1523 * Wake up any threads in the parent blocked in wait* syscalls.
1524 */
1525static inline void __wake_up_parent(struct task_struct *p,
1526 struct task_struct *parent)
1527{
1528 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1529}
1530
1531/*
1532 * Let a parent know about the death of a child.
1533 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1534 */
1535
1536void do_notify_parent(struct task_struct *tsk, int sig)
1537{
1538 struct siginfo info;
1539 unsigned long flags;
1540 struct sighand_struct *psig;
1541
1542 BUG_ON(sig == -1);
1543
1544 /* do_notify_parent_cldstop should have been called instead. */
1545 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1546
1547 BUG_ON(!tsk->ptrace &&
1548 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1549
1550 info.si_signo = sig;
1551 info.si_errno = 0;
1552 info.si_pid = tsk->pid;
1553 info.si_uid = tsk->uid;
1554
1555 /* FIXME: find out whether or not this is supposed to be c*time. */
1556 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1557 tsk->signal->utime));
1558 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1559 tsk->signal->stime));
1560
1561 info.si_status = tsk->exit_code & 0x7f;
1562 if (tsk->exit_code & 0x80)
1563 info.si_code = CLD_DUMPED;
1564 else if (tsk->exit_code & 0x7f)
1565 info.si_code = CLD_KILLED;
1566 else {
1567 info.si_code = CLD_EXITED;
1568 info.si_status = tsk->exit_code >> 8;
1569 }
1570
1571 psig = tsk->parent->sighand;
1572 spin_lock_irqsave(&psig->siglock, flags);
7ed0175a 1573 if (!tsk->ptrace && sig == SIGCHLD &&
1da177e4
LT
1574 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1575 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1576 /*
1577 * We are exiting and our parent doesn't care. POSIX.1
1578 * defines special semantics for setting SIGCHLD to SIG_IGN
1579 * or setting the SA_NOCLDWAIT flag: we should be reaped
1580 * automatically and not left for our parent's wait4 call.
1581 * Rather than having the parent do it as a magic kind of
1582 * signal handler, we just set this to tell do_exit that we
1583 * can be cleaned up without becoming a zombie. Note that
1584 * we still call __wake_up_parent in this case, because a
1585 * blocked sys_wait4 might now return -ECHILD.
1586 *
1587 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1588 * is implementation-defined: we do (if you don't want
1589 * it, just use SIG_IGN instead).
1590 */
1591 tsk->exit_signal = -1;
1592 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1593 sig = 0;
1594 }
7ed20e1a 1595 if (valid_signal(sig) && sig > 0)
1da177e4
LT
1596 __group_send_sig_info(sig, &info, tsk->parent);
1597 __wake_up_parent(tsk, tsk->parent);
1598 spin_unlock_irqrestore(&psig->siglock, flags);
1599}
1600
bc505a47 1601static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1da177e4
LT
1602{
1603 struct siginfo info;
1604 unsigned long flags;
bc505a47 1605 struct task_struct *parent;
1da177e4
LT
1606 struct sighand_struct *sighand;
1607
bc505a47
ON
1608 if (to_self)
1609 parent = tsk->parent;
1610 else {
1611 tsk = tsk->group_leader;
1612 parent = tsk->real_parent;
1613 }
1614
1da177e4
LT
1615 info.si_signo = SIGCHLD;
1616 info.si_errno = 0;
1617 info.si_pid = tsk->pid;
1618 info.si_uid = tsk->uid;
1619
1620 /* FIXME: find out whether or not this is supposed to be c*time. */
1621 info.si_utime = cputime_to_jiffies(tsk->utime);
1622 info.si_stime = cputime_to_jiffies(tsk->stime);
1623
1624 info.si_code = why;
1625 switch (why) {
1626 case CLD_CONTINUED:
1627 info.si_status = SIGCONT;
1628 break;
1629 case CLD_STOPPED:
1630 info.si_status = tsk->signal->group_exit_code & 0x7f;
1631 break;
1632 case CLD_TRAPPED:
1633 info.si_status = tsk->exit_code & 0x7f;
1634 break;
1635 default:
1636 BUG();
1637 }
1638
1639 sighand = parent->sighand;
1640 spin_lock_irqsave(&sighand->siglock, flags);
1641 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1642 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1643 __group_send_sig_info(SIGCHLD, &info, parent);
1644 /*
1645 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1646 */
1647 __wake_up_parent(tsk, parent);
1648 spin_unlock_irqrestore(&sighand->siglock, flags);
1649}
1650
1651/*
1652 * This must be called with current->sighand->siglock held.
1653 *
1654 * This should be the path for all ptrace stops.
1655 * We always set current->last_siginfo while stopped here.
1656 * That makes it a way to test a stopped process for
1657 * being ptrace-stopped vs being job-control-stopped.
1658 *
1659 * If we actually decide not to stop at all because the tracer is gone,
1660 * we leave nostop_code in current->exit_code.
1661 */
1662static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1663{
1664 /*
1665 * If there is a group stop in progress,
1666 * we must participate in the bookkeeping.
1667 */
1668 if (current->signal->group_stop_count > 0)
1669 --current->signal->group_stop_count;
1670
1671 current->last_siginfo = info;
1672 current->exit_code = exit_code;
1673
1674 /* Let the debugger run. */
1675 set_current_state(TASK_TRACED);
1676 spin_unlock_irq(&current->sighand->siglock);
1677 read_lock(&tasklist_lock);
1678 if (likely(current->ptrace & PT_PTRACED) &&
1679 likely(current->parent != current->real_parent ||
1680 !(current->ptrace & PT_ATTACHED)) &&
1681 (likely(current->parent->signal != current->signal) ||
1682 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
bc505a47 1683 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1da177e4
LT
1684 read_unlock(&tasklist_lock);
1685 schedule();
1686 } else {
1687 /*
1688 * By the time we got the lock, our tracer went away.
1689 * Don't stop here.
1690 */
1691 read_unlock(&tasklist_lock);
1692 set_current_state(TASK_RUNNING);
1693 current->exit_code = nostop_code;
1694 }
1695
1696 /*
1697 * We are back. Now reacquire the siglock before touching
1698 * last_siginfo, so that we are sure to have synchronized with
1699 * any signal-sending on another CPU that wants to examine it.
1700 */
1701 spin_lock_irq(&current->sighand->siglock);
1702 current->last_siginfo = NULL;
1703
1704 /*
1705 * Queued signals ignored us while we were stopped for tracing.
1706 * So check for any that we should take before resuming user mode.
1707 */
1708 recalc_sigpending();
1709}
1710
1711void ptrace_notify(int exit_code)
1712{
1713 siginfo_t info;
1714
1715 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1716
1717 memset(&info, 0, sizeof info);
1718 info.si_signo = SIGTRAP;
1719 info.si_code = exit_code;
1720 info.si_pid = current->pid;
1721 info.si_uid = current->uid;
1722
1723 /* Let the debugger run. */
1724 spin_lock_irq(&current->sighand->siglock);
1725 ptrace_stop(exit_code, 0, &info);
1726 spin_unlock_irq(&current->sighand->siglock);
1727}
1728
1da177e4
LT
1729static void
1730finish_stop(int stop_count)
1731{
bc505a47
ON
1732 int to_self;
1733
1da177e4
LT
1734 /*
1735 * If there are no other threads in the group, or if there is
1736 * a group stop in progress and we are the last to stop,
1737 * report to the parent. When ptraced, every thread reports itself.
1738 */
bc505a47
ON
1739 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1740 to_self = 1;
1741 else if (stop_count == 0)
1742 to_self = 0;
1743 else
1744 goto out;
1da177e4 1745
bc505a47
ON
1746 read_lock(&tasklist_lock);
1747 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1748 read_unlock(&tasklist_lock);
1749
1750out:
1da177e4
LT
1751 schedule();
1752 /*
1753 * Now we don't run again until continued.
1754 */
1755 current->exit_code = 0;
1756}
1757
1758/*
1759 * This performs the stopping for SIGSTOP and other stop signals.
1760 * We have to stop all threads in the thread group.
1761 * Returns nonzero if we've actually stopped and released the siglock.
1762 * Returns zero if we didn't stop and still hold the siglock.
1763 */
1764static int
1765do_signal_stop(int signr)
1766{
1767 struct signal_struct *sig = current->signal;
1768 struct sighand_struct *sighand = current->sighand;
1769 int stop_count = -1;
1770
1771 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1772 return 0;
1773
1774 if (sig->group_stop_count > 0) {
1775 /*
1776 * There is a group stop in progress. We don't need to
1777 * start another one.
1778 */
1779 signr = sig->group_exit_code;
1780 stop_count = --sig->group_stop_count;
1781 current->exit_code = signr;
1782 set_current_state(TASK_STOPPED);
1783 if (stop_count == 0)
1784 sig->flags = SIGNAL_STOP_STOPPED;
1785 spin_unlock_irq(&sighand->siglock);
1786 }
1787 else if (thread_group_empty(current)) {
1788 /*
1789 * Lock must be held through transition to stopped state.
1790 */
1791 current->exit_code = current->signal->group_exit_code = signr;
1792 set_current_state(TASK_STOPPED);
1793 sig->flags = SIGNAL_STOP_STOPPED;
1794 spin_unlock_irq(&sighand->siglock);
1795 }
1796 else {
1797 /*
1798 * There is no group stop already in progress.
1799 * We must initiate one now, but that requires
1800 * dropping siglock to get both the tasklist lock
1801 * and siglock again in the proper order. Note that
1802 * this allows an intervening SIGCONT to be posted.
1803 * We need to check for that and bail out if necessary.
1804 */
1805 struct task_struct *t;
1806
1807 spin_unlock_irq(&sighand->siglock);
1808
1809 /* signals can be posted during this window */
1810
1811 read_lock(&tasklist_lock);
1812 spin_lock_irq(&sighand->siglock);
1813
1814 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1815 /*
1816 * Another stop or continue happened while we
1817 * didn't have the lock. We can just swallow this
1818 * signal now. If we raced with a SIGCONT, that
1819 * should have just cleared it now. If we raced
1820 * with another processor delivering a stop signal,
1821 * then the SIGCONT that wakes us up should clear it.
1822 */
1823 read_unlock(&tasklist_lock);
1824 return 0;
1825 }
1826
1827 if (sig->group_stop_count == 0) {
1828 sig->group_exit_code = signr;
1829 stop_count = 0;
1830 for (t = next_thread(current); t != current;
1831 t = next_thread(t))
1832 /*
1833 * Setting state to TASK_STOPPED for a group
1834 * stop is always done with the siglock held,
1835 * so this check has no races.
1836 */
5acbc5cb
RM
1837 if (!t->exit_state &&
1838 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1da177e4
LT
1839 stop_count++;
1840 signal_wake_up(t, 0);
1841 }
1842 sig->group_stop_count = stop_count;
1843 }
1844 else {
1845 /* A race with another thread while unlocked. */
1846 signr = sig->group_exit_code;
1847 stop_count = --sig->group_stop_count;
1848 }
1849
1850 current->exit_code = signr;
1851 set_current_state(TASK_STOPPED);
1852 if (stop_count == 0)
1853 sig->flags = SIGNAL_STOP_STOPPED;
1854
1855 spin_unlock_irq(&sighand->siglock);
1856 read_unlock(&tasklist_lock);
1857 }
1858
1859 finish_stop(stop_count);
1860 return 1;
1861}
1862
1863/*
1864 * Do appropriate magic when group_stop_count > 0.
1865 * We return nonzero if we stopped, after releasing the siglock.
1866 * We return zero if we still hold the siglock and should look
1867 * for another signal without checking group_stop_count again.
1868 */
858119e1 1869static int handle_group_stop(void)
1da177e4
LT
1870{
1871 int stop_count;
1872
1873 if (current->signal->group_exit_task == current) {
1874 /*
1875 * Group stop is so we can do a core dump,
1876 * We are the initiating thread, so get on with it.
1877 */
1878 current->signal->group_exit_task = NULL;
1879 return 0;
1880 }
1881
1882 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1883 /*
1884 * Group stop is so another thread can do a core dump,
1885 * or else we are racing against a death signal.
1886 * Just punt the stop so we can get the next signal.
1887 */
1888 return 0;
1889
1890 /*
1891 * There is a group stop in progress. We stop
1892 * without any associated signal being in our queue.
1893 */
1894 stop_count = --current->signal->group_stop_count;
1895 if (stop_count == 0)
1896 current->signal->flags = SIGNAL_STOP_STOPPED;
1897 current->exit_code = current->signal->group_exit_code;
1898 set_current_state(TASK_STOPPED);
1899 spin_unlock_irq(&current->sighand->siglock);
1900 finish_stop(stop_count);
1901 return 1;
1902}
1903
1904int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1905 struct pt_regs *regs, void *cookie)
1906{
1907 sigset_t *mask = &current->blocked;
1908 int signr = 0;
1909
fc558a74
RW
1910 try_to_freeze();
1911
1da177e4
LT
1912relock:
1913 spin_lock_irq(&current->sighand->siglock);
1914 for (;;) {
1915 struct k_sigaction *ka;
1916
1917 if (unlikely(current->signal->group_stop_count > 0) &&
1918 handle_group_stop())
1919 goto relock;
1920
1921 signr = dequeue_signal(current, mask, info);
1922
1923 if (!signr)
1924 break; /* will return 0 */
1925
1926 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1927 ptrace_signal_deliver(regs, cookie);
1928
1929 /* Let the debugger run. */
1930 ptrace_stop(signr, signr, info);
1931
30e0fca6 1932 /* We're back. Did the debugger cancel the sig or group_exit? */
1da177e4 1933 signr = current->exit_code;
30e0fca6 1934 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1da177e4
LT
1935 continue;
1936
1937 current->exit_code = 0;
1938
1939 /* Update the siginfo structure if the signal has
1940 changed. If the debugger wanted something
1941 specific in the siginfo structure then it should
1942 have updated *info via PTRACE_SETSIGINFO. */
1943 if (signr != info->si_signo) {
1944 info->si_signo = signr;
1945 info->si_errno = 0;
1946 info->si_code = SI_USER;
1947 info->si_pid = current->parent->pid;
1948 info->si_uid = current->parent->uid;
1949 }
1950
1951 /* If the (new) signal is now blocked, requeue it. */
1952 if (sigismember(&current->blocked, signr)) {
1953 specific_send_sig_info(signr, info, current);
1954 continue;
1955 }
1956 }
1957
1958 ka = &current->sighand->action[signr-1];
1959 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1960 continue;
1961 if (ka->sa.sa_handler != SIG_DFL) {
1962 /* Run the handler. */
1963 *return_ka = *ka;
1964
1965 if (ka->sa.sa_flags & SA_ONESHOT)
1966 ka->sa.sa_handler = SIG_DFL;
1967
1968 break; /* will return non-zero "signr" value */
1969 }
1970
1971 /*
1972 * Now we are doing the default action for this signal.
1973 */
1974 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1975 continue;
1976
1977 /* Init gets no signals it doesn't want. */
fef23e7f 1978 if (current == child_reaper)
1da177e4
LT
1979 continue;
1980
1981 if (sig_kernel_stop(signr)) {
1982 /*
1983 * The default action is to stop all threads in
1984 * the thread group. The job control signals
1985 * do nothing in an orphaned pgrp, but SIGSTOP
1986 * always works. Note that siglock needs to be
1987 * dropped during the call to is_orphaned_pgrp()
1988 * because of lock ordering with tasklist_lock.
1989 * This allows an intervening SIGCONT to be posted.
1990 * We need to check for that and bail out if necessary.
1991 */
1992 if (signr != SIGSTOP) {
1993 spin_unlock_irq(&current->sighand->siglock);
1994
1995 /* signals can be posted during this window */
1996
1997 if (is_orphaned_pgrp(process_group(current)))
1998 goto relock;
1999
2000 spin_lock_irq(&current->sighand->siglock);
2001 }
2002
2003 if (likely(do_signal_stop(signr))) {
2004 /* It released the siglock. */
2005 goto relock;
2006 }
2007
2008 /*
2009 * We didn't actually stop, due to a race
2010 * with SIGCONT or something like that.
2011 */
2012 continue;
2013 }
2014
2015 spin_unlock_irq(&current->sighand->siglock);
2016
2017 /*
2018 * Anything else is fatal, maybe with a core dump.
2019 */
2020 current->flags |= PF_SIGNALED;
2021 if (sig_kernel_coredump(signr)) {
2022 /*
2023 * If it was able to dump core, this kills all
2024 * other threads in the group and synchronizes with
2025 * their demise. If we lost the race with another
2026 * thread getting here, it set group_exit_code
2027 * first and our do_group_exit call below will use
2028 * that value and ignore the one we pass it.
2029 */
2030 do_coredump((long)signr, signr, regs);
2031 }
2032
2033 /*
2034 * Death signals, no core dump.
2035 */
2036 do_group_exit(signr);
2037 /* NOTREACHED */
2038 }
2039 spin_unlock_irq(&current->sighand->siglock);
2040 return signr;
2041}
2042
1da177e4
LT
2043EXPORT_SYMBOL(recalc_sigpending);
2044EXPORT_SYMBOL_GPL(dequeue_signal);
2045EXPORT_SYMBOL(flush_signals);
2046EXPORT_SYMBOL(force_sig);
2047EXPORT_SYMBOL(kill_pg);
2048EXPORT_SYMBOL(kill_proc);
2049EXPORT_SYMBOL(ptrace_notify);
2050EXPORT_SYMBOL(send_sig);
2051EXPORT_SYMBOL(send_sig_info);
2052EXPORT_SYMBOL(sigprocmask);
2053EXPORT_SYMBOL(block_all_signals);
2054EXPORT_SYMBOL(unblock_all_signals);
2055
2056
2057/*
2058 * System call entry points.
2059 */
2060
2061asmlinkage long sys_restart_syscall(void)
2062{
2063 struct restart_block *restart = &current_thread_info()->restart_block;
2064 return restart->fn(restart);
2065}
2066
2067long do_no_restart_syscall(struct restart_block *param)
2068{
2069 return -EINTR;
2070}
2071
2072/*
2073 * We don't need to get the kernel lock - this is all local to this
2074 * particular thread.. (and that's good, because this is _heavily_
2075 * used by various programs)
2076 */
2077
2078/*
2079 * This is also useful for kernel threads that want to temporarily
2080 * (or permanently) block certain signals.
2081 *
2082 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2083 * interface happily blocks "unblockable" signals like SIGKILL
2084 * and friends.
2085 */
2086int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2087{
2088 int error;
1da177e4
LT
2089
2090 spin_lock_irq(&current->sighand->siglock);
a26fd335
ON
2091 if (oldset)
2092 *oldset = current->blocked;
2093
1da177e4
LT
2094 error = 0;
2095 switch (how) {
2096 case SIG_BLOCK:
2097 sigorsets(&current->blocked, &current->blocked, set);
2098 break;
2099 case SIG_UNBLOCK:
2100 signandsets(&current->blocked, &current->blocked, set);
2101 break;
2102 case SIG_SETMASK:
2103 current->blocked = *set;
2104 break;
2105 default:
2106 error = -EINVAL;
2107 }
2108 recalc_sigpending();
2109 spin_unlock_irq(&current->sighand->siglock);
a26fd335 2110
1da177e4
LT
2111 return error;
2112}
2113
2114asmlinkage long
2115sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2116{
2117 int error = -EINVAL;
2118 sigset_t old_set, new_set;
2119
2120 /* XXX: Don't preclude handling different sized sigset_t's. */
2121 if (sigsetsize != sizeof(sigset_t))
2122 goto out;
2123
2124 if (set) {
2125 error = -EFAULT;
2126 if (copy_from_user(&new_set, set, sizeof(*set)))
2127 goto out;
2128 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2129
2130 error = sigprocmask(how, &new_set, &old_set);
2131 if (error)
2132 goto out;
2133 if (oset)
2134 goto set_old;
2135 } else if (oset) {
2136 spin_lock_irq(&current->sighand->siglock);
2137 old_set = current->blocked;
2138 spin_unlock_irq(&current->sighand->siglock);
2139
2140 set_old:
2141 error = -EFAULT;
2142 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2143 goto out;
2144 }
2145 error = 0;
2146out:
2147 return error;
2148}
2149
2150long do_sigpending(void __user *set, unsigned long sigsetsize)
2151{
2152 long error = -EINVAL;
2153 sigset_t pending;
2154
2155 if (sigsetsize > sizeof(sigset_t))
2156 goto out;
2157
2158 spin_lock_irq(&current->sighand->siglock);
2159 sigorsets(&pending, &current->pending.signal,
2160 &current->signal->shared_pending.signal);
2161 spin_unlock_irq(&current->sighand->siglock);
2162
2163 /* Outside the lock because only this thread touches it. */
2164 sigandsets(&pending, &current->blocked, &pending);
2165
2166 error = -EFAULT;
2167 if (!copy_to_user(set, &pending, sigsetsize))
2168 error = 0;
2169
2170out:
2171 return error;
2172}
2173
2174asmlinkage long
2175sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2176{
2177 return do_sigpending(set, sigsetsize);
2178}
2179
2180#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2181
2182int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2183{
2184 int err;
2185
2186 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2187 return -EFAULT;
2188 if (from->si_code < 0)
2189 return __copy_to_user(to, from, sizeof(siginfo_t))
2190 ? -EFAULT : 0;
2191 /*
2192 * If you change siginfo_t structure, please be sure
2193 * this code is fixed accordingly.
2194 * It should never copy any pad contained in the structure
2195 * to avoid security leaks, but must copy the generic
2196 * 3 ints plus the relevant union member.
2197 */
2198 err = __put_user(from->si_signo, &to->si_signo);
2199 err |= __put_user(from->si_errno, &to->si_errno);
2200 err |= __put_user((short)from->si_code, &to->si_code);
2201 switch (from->si_code & __SI_MASK) {
2202 case __SI_KILL:
2203 err |= __put_user(from->si_pid, &to->si_pid);
2204 err |= __put_user(from->si_uid, &to->si_uid);
2205 break;
2206 case __SI_TIMER:
2207 err |= __put_user(from->si_tid, &to->si_tid);
2208 err |= __put_user(from->si_overrun, &to->si_overrun);
2209 err |= __put_user(from->si_ptr, &to->si_ptr);
2210 break;
2211 case __SI_POLL:
2212 err |= __put_user(from->si_band, &to->si_band);
2213 err |= __put_user(from->si_fd, &to->si_fd);
2214 break;
2215 case __SI_FAULT:
2216 err |= __put_user(from->si_addr, &to->si_addr);
2217#ifdef __ARCH_SI_TRAPNO
2218 err |= __put_user(from->si_trapno, &to->si_trapno);
2219#endif
2220 break;
2221 case __SI_CHLD:
2222 err |= __put_user(from->si_pid, &to->si_pid);
2223 err |= __put_user(from->si_uid, &to->si_uid);
2224 err |= __put_user(from->si_status, &to->si_status);
2225 err |= __put_user(from->si_utime, &to->si_utime);
2226 err |= __put_user(from->si_stime, &to->si_stime);
2227 break;
2228 case __SI_RT: /* This is not generated by the kernel as of now. */
2229 case __SI_MESGQ: /* But this is */
2230 err |= __put_user(from->si_pid, &to->si_pid);
2231 err |= __put_user(from->si_uid, &to->si_uid);
2232 err |= __put_user(from->si_ptr, &to->si_ptr);
2233 break;
2234 default: /* this is just in case for now ... */
2235 err |= __put_user(from->si_pid, &to->si_pid);
2236 err |= __put_user(from->si_uid, &to->si_uid);
2237 break;
2238 }
2239 return err;
2240}
2241
2242#endif
2243
2244asmlinkage long
2245sys_rt_sigtimedwait(const sigset_t __user *uthese,
2246 siginfo_t __user *uinfo,
2247 const struct timespec __user *uts,
2248 size_t sigsetsize)
2249{
2250 int ret, sig;
2251 sigset_t these;
2252 struct timespec ts;
2253 siginfo_t info;
2254 long timeout = 0;
2255
2256 /* XXX: Don't preclude handling different sized sigset_t's. */
2257 if (sigsetsize != sizeof(sigset_t))
2258 return -EINVAL;
2259
2260 if (copy_from_user(&these, uthese, sizeof(these)))
2261 return -EFAULT;
2262
2263 /*
2264 * Invert the set of allowed signals to get those we
2265 * want to block.
2266 */
2267 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2268 signotset(&these);
2269
2270 if (uts) {
2271 if (copy_from_user(&ts, uts, sizeof(ts)))
2272 return -EFAULT;
2273 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2274 || ts.tv_sec < 0)
2275 return -EINVAL;
2276 }
2277
2278 spin_lock_irq(&current->sighand->siglock);
2279 sig = dequeue_signal(current, &these, &info);
2280 if (!sig) {
2281 timeout = MAX_SCHEDULE_TIMEOUT;
2282 if (uts)
2283 timeout = (timespec_to_jiffies(&ts)
2284 + (ts.tv_sec || ts.tv_nsec));
2285
2286 if (timeout) {
2287 /* None ready -- temporarily unblock those we're
2288 * interested while we are sleeping in so that we'll
2289 * be awakened when they arrive. */
2290 current->real_blocked = current->blocked;
2291 sigandsets(&current->blocked, &current->blocked, &these);
2292 recalc_sigpending();
2293 spin_unlock_irq(&current->sighand->siglock);
2294
75bcc8c5 2295 timeout = schedule_timeout_interruptible(timeout);
1da177e4 2296
1da177e4
LT
2297 spin_lock_irq(&current->sighand->siglock);
2298 sig = dequeue_signal(current, &these, &info);
2299 current->blocked = current->real_blocked;
2300 siginitset(&current->real_blocked, 0);
2301 recalc_sigpending();
2302 }
2303 }
2304 spin_unlock_irq(&current->sighand->siglock);
2305
2306 if (sig) {
2307 ret = sig;
2308 if (uinfo) {
2309 if (copy_siginfo_to_user(uinfo, &info))
2310 ret = -EFAULT;
2311 }
2312 } else {
2313 ret = -EAGAIN;
2314 if (timeout)
2315 ret = -EINTR;
2316 }
2317
2318 return ret;
2319}
2320
2321asmlinkage long
2322sys_kill(int pid, int sig)
2323{
2324 struct siginfo info;
2325
2326 info.si_signo = sig;
2327 info.si_errno = 0;
2328 info.si_code = SI_USER;
2329 info.si_pid = current->tgid;
2330 info.si_uid = current->uid;
2331
2332 return kill_something_info(sig, &info, pid);
2333}
2334
6dd69f10 2335static int do_tkill(int tgid, int pid, int sig)
1da177e4 2336{
1da177e4 2337 int error;
6dd69f10 2338 struct siginfo info;
1da177e4
LT
2339 struct task_struct *p;
2340
6dd69f10 2341 error = -ESRCH;
1da177e4
LT
2342 info.si_signo = sig;
2343 info.si_errno = 0;
2344 info.si_code = SI_TKILL;
2345 info.si_pid = current->tgid;
2346 info.si_uid = current->uid;
2347
2348 read_lock(&tasklist_lock);
2349 p = find_task_by_pid(pid);
6dd69f10 2350 if (p && (tgid <= 0 || p->tgid == tgid)) {
1da177e4
LT
2351 error = check_kill_permission(sig, &info, p);
2352 /*
2353 * The null signal is a permissions and process existence
2354 * probe. No signal is actually delivered.
2355 */
2356 if (!error && sig && p->sighand) {
2357 spin_lock_irq(&p->sighand->siglock);
2358 handle_stop_signal(sig, p);
2359 error = specific_send_sig_info(sig, &info, p);
2360 spin_unlock_irq(&p->sighand->siglock);
2361 }
2362 }
2363 read_unlock(&tasklist_lock);
6dd69f10 2364
1da177e4
LT
2365 return error;
2366}
2367
6dd69f10
VL
2368/**
2369 * sys_tgkill - send signal to one specific thread
2370 * @tgid: the thread group ID of the thread
2371 * @pid: the PID of the thread
2372 * @sig: signal to be sent
2373 *
2374 * This syscall also checks the tgid and returns -ESRCH even if the PID
2375 * exists but it's not belonging to the target process anymore. This
2376 * method solves the problem of threads exiting and PIDs getting reused.
2377 */
2378asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2379{
2380 /* This is only valid for single tasks */
2381 if (pid <= 0 || tgid <= 0)
2382 return -EINVAL;
2383
2384 return do_tkill(tgid, pid, sig);
2385}
2386
1da177e4
LT
2387/*
2388 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2389 */
2390asmlinkage long
2391sys_tkill(int pid, int sig)
2392{
1da177e4
LT
2393 /* This is only valid for single tasks */
2394 if (pid <= 0)
2395 return -EINVAL;
2396
6dd69f10 2397 return do_tkill(0, pid, sig);
1da177e4
LT
2398}
2399
2400asmlinkage long
2401sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2402{
2403 siginfo_t info;
2404
2405 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2406 return -EFAULT;
2407
2408 /* Not even root can pretend to send signals from the kernel.
2409 Nor can they impersonate a kill(), which adds source info. */
2410 if (info.si_code >= 0)
2411 return -EPERM;
2412 info.si_signo = sig;
2413
2414 /* POSIX.1b doesn't mention process groups. */
2415 return kill_proc_info(sig, &info, pid);
2416}
2417
2418int
9ac95f2f 2419do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4
LT
2420{
2421 struct k_sigaction *k;
71fabd5e 2422 sigset_t mask;
1da177e4 2423
7ed20e1a 2424 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
2425 return -EINVAL;
2426
2427 k = &current->sighand->action[sig-1];
2428
2429 spin_lock_irq(&current->sighand->siglock);
2430 if (signal_pending(current)) {
2431 /*
2432 * If there might be a fatal signal pending on multiple
2433 * threads, make sure we take it before changing the action.
2434 */
2435 spin_unlock_irq(&current->sighand->siglock);
2436 return -ERESTARTNOINTR;
2437 }
2438
2439 if (oact)
2440 *oact = *k;
2441
2442 if (act) {
9ac95f2f
ON
2443 sigdelsetmask(&act->sa.sa_mask,
2444 sigmask(SIGKILL) | sigmask(SIGSTOP));
1da177e4
LT
2445 /*
2446 * POSIX 3.3.1.3:
2447 * "Setting a signal action to SIG_IGN for a signal that is
2448 * pending shall cause the pending signal to be discarded,
2449 * whether or not it is blocked."
2450 *
2451 * "Setting a signal action to SIG_DFL for a signal that is
2452 * pending and whose default action is to ignore the signal
2453 * (for example, SIGCHLD), shall cause the pending signal to
2454 * be discarded, whether or not it is blocked"
2455 */
2456 if (act->sa.sa_handler == SIG_IGN ||
2457 (act->sa.sa_handler == SIG_DFL &&
2458 sig_kernel_ignore(sig))) {
2459 /*
2460 * This is a fairly rare case, so we only take the
2461 * tasklist_lock once we're sure we'll need it.
2462 * Now we must do this little unlock and relock
2463 * dance to maintain the lock hierarchy.
2464 */
2465 struct task_struct *t = current;
2466 spin_unlock_irq(&t->sighand->siglock);
2467 read_lock(&tasklist_lock);
2468 spin_lock_irq(&t->sighand->siglock);
2469 *k = *act;
71fabd5e
GA
2470 sigemptyset(&mask);
2471 sigaddset(&mask, sig);
2472 rm_from_queue_full(&mask, &t->signal->shared_pending);
1da177e4 2473 do {
71fabd5e 2474 rm_from_queue_full(&mask, &t->pending);
1da177e4
LT
2475 recalc_sigpending_tsk(t);
2476 t = next_thread(t);
2477 } while (t != current);
2478 spin_unlock_irq(&current->sighand->siglock);
2479 read_unlock(&tasklist_lock);
2480 return 0;
2481 }
2482
2483 *k = *act;
1da177e4
LT
2484 }
2485
2486 spin_unlock_irq(&current->sighand->siglock);
2487 return 0;
2488}
2489
2490int
2491do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2492{
2493 stack_t oss;
2494 int error;
2495
2496 if (uoss) {
2497 oss.ss_sp = (void __user *) current->sas_ss_sp;
2498 oss.ss_size = current->sas_ss_size;
2499 oss.ss_flags = sas_ss_flags(sp);
2500 }
2501
2502 if (uss) {
2503 void __user *ss_sp;
2504 size_t ss_size;
2505 int ss_flags;
2506
2507 error = -EFAULT;
2508 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2509 || __get_user(ss_sp, &uss->ss_sp)
2510 || __get_user(ss_flags, &uss->ss_flags)
2511 || __get_user(ss_size, &uss->ss_size))
2512 goto out;
2513
2514 error = -EPERM;
2515 if (on_sig_stack(sp))
2516 goto out;
2517
2518 error = -EINVAL;
2519 /*
2520 *
2521 * Note - this code used to test ss_flags incorrectly
2522 * old code may have been written using ss_flags==0
2523 * to mean ss_flags==SS_ONSTACK (as this was the only
2524 * way that worked) - this fix preserves that older
2525 * mechanism
2526 */
2527 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2528 goto out;
2529
2530 if (ss_flags == SS_DISABLE) {
2531 ss_size = 0;
2532 ss_sp = NULL;
2533 } else {
2534 error = -ENOMEM;
2535 if (ss_size < MINSIGSTKSZ)
2536 goto out;
2537 }
2538
2539 current->sas_ss_sp = (unsigned long) ss_sp;
2540 current->sas_ss_size = ss_size;
2541 }
2542
2543 if (uoss) {
2544 error = -EFAULT;
2545 if (copy_to_user(uoss, &oss, sizeof(oss)))
2546 goto out;
2547 }
2548
2549 error = 0;
2550out:
2551 return error;
2552}
2553
2554#ifdef __ARCH_WANT_SYS_SIGPENDING
2555
2556asmlinkage long
2557sys_sigpending(old_sigset_t __user *set)
2558{
2559 return do_sigpending(set, sizeof(*set));
2560}
2561
2562#endif
2563
2564#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2565/* Some platforms have their own version with special arguments others
2566 support only sys_rt_sigprocmask. */
2567
2568asmlinkage long
2569sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2570{
2571 int error;
2572 old_sigset_t old_set, new_set;
2573
2574 if (set) {
2575 error = -EFAULT;
2576 if (copy_from_user(&new_set, set, sizeof(*set)))
2577 goto out;
2578 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2579
2580 spin_lock_irq(&current->sighand->siglock);
2581 old_set = current->blocked.sig[0];
2582
2583 error = 0;
2584 switch (how) {
2585 default:
2586 error = -EINVAL;
2587 break;
2588 case SIG_BLOCK:
2589 sigaddsetmask(&current->blocked, new_set);
2590 break;
2591 case SIG_UNBLOCK:
2592 sigdelsetmask(&current->blocked, new_set);
2593 break;
2594 case SIG_SETMASK:
2595 current->blocked.sig[0] = new_set;
2596 break;
2597 }
2598
2599 recalc_sigpending();
2600 spin_unlock_irq(&current->sighand->siglock);
2601 if (error)
2602 goto out;
2603 if (oset)
2604 goto set_old;
2605 } else if (oset) {
2606 old_set = current->blocked.sig[0];
2607 set_old:
2608 error = -EFAULT;
2609 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2610 goto out;
2611 }
2612 error = 0;
2613out:
2614 return error;
2615}
2616#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2617
2618#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2619asmlinkage long
2620sys_rt_sigaction(int sig,
2621 const struct sigaction __user *act,
2622 struct sigaction __user *oact,
2623 size_t sigsetsize)
2624{
2625 struct k_sigaction new_sa, old_sa;
2626 int ret = -EINVAL;
2627
2628 /* XXX: Don't preclude handling different sized sigset_t's. */
2629 if (sigsetsize != sizeof(sigset_t))
2630 goto out;
2631
2632 if (act) {
2633 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2634 return -EFAULT;
2635 }
2636
2637 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2638
2639 if (!ret && oact) {
2640 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2641 return -EFAULT;
2642 }
2643out:
2644 return ret;
2645}
2646#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2647
2648#ifdef __ARCH_WANT_SYS_SGETMASK
2649
2650/*
2651 * For backwards compatibility. Functionality superseded by sigprocmask.
2652 */
2653asmlinkage long
2654sys_sgetmask(void)
2655{
2656 /* SMP safe */
2657 return current->blocked.sig[0];
2658}
2659
2660asmlinkage long
2661sys_ssetmask(int newmask)
2662{
2663 int old;
2664
2665 spin_lock_irq(&current->sighand->siglock);
2666 old = current->blocked.sig[0];
2667
2668 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2669 sigmask(SIGSTOP)));
2670 recalc_sigpending();
2671 spin_unlock_irq(&current->sighand->siglock);
2672
2673 return old;
2674}
2675#endif /* __ARCH_WANT_SGETMASK */
2676
2677#ifdef __ARCH_WANT_SYS_SIGNAL
2678/*
2679 * For backwards compatibility. Functionality superseded by sigaction.
2680 */
2681asmlinkage unsigned long
2682sys_signal(int sig, __sighandler_t handler)
2683{
2684 struct k_sigaction new_sa, old_sa;
2685 int ret;
2686
2687 new_sa.sa.sa_handler = handler;
2688 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 2689 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
2690
2691 ret = do_sigaction(sig, &new_sa, &old_sa);
2692
2693 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2694}
2695#endif /* __ARCH_WANT_SYS_SIGNAL */
2696
2697#ifdef __ARCH_WANT_SYS_PAUSE
2698
2699asmlinkage long
2700sys_pause(void)
2701{
2702 current->state = TASK_INTERRUPTIBLE;
2703 schedule();
2704 return -ERESTARTNOHAND;
2705}
2706
2707#endif
2708
150256d8
DW
2709#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2710asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2711{
2712 sigset_t newset;
2713
2714 /* XXX: Don't preclude handling different sized sigset_t's. */
2715 if (sigsetsize != sizeof(sigset_t))
2716 return -EINVAL;
2717
2718 if (copy_from_user(&newset, unewset, sizeof(newset)))
2719 return -EFAULT;
2720 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2721
2722 spin_lock_irq(&current->sighand->siglock);
2723 current->saved_sigmask = current->blocked;
2724 current->blocked = newset;
2725 recalc_sigpending();
2726 spin_unlock_irq(&current->sighand->siglock);
2727
2728 current->state = TASK_INTERRUPTIBLE;
2729 schedule();
2730 set_thread_flag(TIF_RESTORE_SIGMASK);
2731 return -ERESTARTNOHAND;
2732}
2733#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2734
1da177e4
LT
2735void __init signals_init(void)
2736{
2737 sigqueue_cachep =
2738 kmem_cache_create("sigqueue",
2739 sizeof(struct sigqueue),
2740 __alignof__(struct sigqueue),
2741 SLAB_PANIC, NULL, NULL);
2742}