2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/signal.h>
39 #include <asm/param.h>
40 #include <asm/uaccess.h>
41 #include <asm/unistd.h>
42 #include <asm/siginfo.h>
43 #include <asm/cacheflush.h>
44 #include "audit.h" /* audit_signal_info() */
47 * SLAB caches for signal bits.
50 static struct kmem_cache
*sigqueue_cachep
;
52 int print_fatal_signals __read_mostly
;
54 static void __user
*sig_handler(struct task_struct
*t
, int sig
)
56 return t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
59 static int sig_handler_ignored(void __user
*handler
, int sig
)
61 /* Is it explicitly or implicitly ignored? */
62 return handler
== SIG_IGN
||
63 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
66 static int sig_task_ignored(struct task_struct
*t
, int sig
, bool force
)
70 handler
= sig_handler(t
, sig
);
72 if (unlikely(t
->signal
->flags
& SIGNAL_UNKILLABLE
) &&
73 handler
== SIG_DFL
&& !force
)
76 return sig_handler_ignored(handler
, sig
);
79 static int sig_ignored(struct task_struct
*t
, int sig
, bool force
)
82 * Blocked signals are never ignored, since the
83 * signal handler may change by the time it is
86 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
89 if (!sig_task_ignored(t
, sig
, force
))
93 * Tracers may want to know about even ignored signals.
99 * Re-calculate pending state from the set of locally pending
100 * signals, globally pending signals, and blocked signals.
102 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
107 switch (_NSIG_WORDS
) {
109 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
110 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
113 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
114 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
115 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
116 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
119 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
120 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
123 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
128 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
130 static int recalc_sigpending_tsk(struct task_struct
*t
)
132 if ((t
->jobctl
& JOBCTL_PENDING_MASK
) ||
133 PENDING(&t
->pending
, &t
->blocked
) ||
134 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
135 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
139 * We must never clear the flag in another thread, or in current
140 * when it's possible the current syscall is returning -ERESTART*.
141 * So we don't clear it here, and only callers who know they should do.
147 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
148 * This is superfluous when called on current, the wakeup is a harmless no-op.
150 void recalc_sigpending_and_wake(struct task_struct
*t
)
152 if (recalc_sigpending_tsk(t
))
153 signal_wake_up(t
, 0);
156 void recalc_sigpending(void)
158 if (!recalc_sigpending_tsk(current
) && !freezing(current
))
159 clear_thread_flag(TIF_SIGPENDING
);
163 /* Given the mask, find the first available signal that should be serviced. */
165 #define SYNCHRONOUS_MASK \
166 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
167 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
169 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
171 unsigned long i
, *s
, *m
, x
;
174 s
= pending
->signal
.sig
;
178 * Handle the first word specially: it contains the
179 * synchronous signals that need to be dequeued first.
183 if (x
& SYNCHRONOUS_MASK
)
184 x
&= SYNCHRONOUS_MASK
;
189 switch (_NSIG_WORDS
) {
191 for (i
= 1; i
< _NSIG_WORDS
; ++i
) {
195 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
204 sig
= ffz(~x
) + _NSIG_BPW
+ 1;
215 static inline void print_dropped_signal(int sig
)
217 static DEFINE_RATELIMIT_STATE(ratelimit_state
, 5 * HZ
, 10);
219 if (!print_fatal_signals
)
222 if (!__ratelimit(&ratelimit_state
))
225 printk(KERN_INFO
"%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
226 current
->comm
, current
->pid
, sig
);
230 * task_set_jobctl_pending - set jobctl pending bits
232 * @mask: pending bits to set
234 * Clear @mask from @task->jobctl. @mask must be subset of
235 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
236 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
237 * cleared. If @task is already being killed or exiting, this function
241 * Must be called with @task->sighand->siglock held.
244 * %true if @mask is set, %false if made noop because @task was dying.
246 bool task_set_jobctl_pending(struct task_struct
*task
, unsigned int mask
)
248 BUG_ON(mask
& ~(JOBCTL_PENDING_MASK
| JOBCTL_STOP_CONSUME
|
249 JOBCTL_STOP_SIGMASK
| JOBCTL_TRAPPING
));
250 BUG_ON((mask
& JOBCTL_TRAPPING
) && !(mask
& JOBCTL_PENDING_MASK
));
252 if (unlikely(fatal_signal_pending(task
) || (task
->flags
& PF_EXITING
)))
255 if (mask
& JOBCTL_STOP_SIGMASK
)
256 task
->jobctl
&= ~JOBCTL_STOP_SIGMASK
;
258 task
->jobctl
|= mask
;
263 * task_clear_jobctl_trapping - clear jobctl trapping bit
266 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
267 * Clear it and wake up the ptracer. Note that we don't need any further
268 * locking. @task->siglock guarantees that @task->parent points to the
272 * Must be called with @task->sighand->siglock held.
274 void task_clear_jobctl_trapping(struct task_struct
*task
)
276 if (unlikely(task
->jobctl
& JOBCTL_TRAPPING
)) {
277 task
->jobctl
&= ~JOBCTL_TRAPPING
;
278 wake_up_bit(&task
->jobctl
, JOBCTL_TRAPPING_BIT
);
283 * task_clear_jobctl_pending - clear jobctl pending bits
285 * @mask: pending bits to clear
287 * Clear @mask from @task->jobctl. @mask must be subset of
288 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
289 * STOP bits are cleared together.
291 * If clearing of @mask leaves no stop or trap pending, this function calls
292 * task_clear_jobctl_trapping().
295 * Must be called with @task->sighand->siglock held.
297 void task_clear_jobctl_pending(struct task_struct
*task
, unsigned int mask
)
299 BUG_ON(mask
& ~JOBCTL_PENDING_MASK
);
301 if (mask
& JOBCTL_STOP_PENDING
)
302 mask
|= JOBCTL_STOP_CONSUME
| JOBCTL_STOP_DEQUEUED
;
304 task
->jobctl
&= ~mask
;
306 if (!(task
->jobctl
& JOBCTL_PENDING_MASK
))
307 task_clear_jobctl_trapping(task
);
311 * task_participate_group_stop - participate in a group stop
312 * @task: task participating in a group stop
314 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
315 * Group stop states are cleared and the group stop count is consumed if
316 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
317 * stop, the appropriate %SIGNAL_* flags are set.
320 * Must be called with @task->sighand->siglock held.
323 * %true if group stop completion should be notified to the parent, %false
326 static bool task_participate_group_stop(struct task_struct
*task
)
328 struct signal_struct
*sig
= task
->signal
;
329 bool consume
= task
->jobctl
& JOBCTL_STOP_CONSUME
;
331 WARN_ON_ONCE(!(task
->jobctl
& JOBCTL_STOP_PENDING
));
333 task_clear_jobctl_pending(task
, JOBCTL_STOP_PENDING
);
338 if (!WARN_ON_ONCE(sig
->group_stop_count
== 0))
339 sig
->group_stop_count
--;
342 * Tell the caller to notify completion iff we are entering into a
343 * fresh group stop. Read comment in do_signal_stop() for details.
345 if (!sig
->group_stop_count
&& !(sig
->flags
& SIGNAL_STOP_STOPPED
)) {
346 sig
->flags
= SIGNAL_STOP_STOPPED
;
353 * allocate a new signal queue record
354 * - this may be called without locks if and only if t == current, otherwise an
355 * appropriate lock must be held to stop the target task from exiting
357 static struct sigqueue
*
358 __sigqueue_alloc(int sig
, struct task_struct
*t
, gfp_t flags
, int override_rlimit
)
360 struct sigqueue
*q
= NULL
;
361 struct user_struct
*user
;
364 * Protect access to @t credentials. This can go away when all
365 * callers hold rcu read lock.
368 user
= get_uid(__task_cred(t
)->user
);
369 atomic_inc(&user
->sigpending
);
372 if (override_rlimit
||
373 atomic_read(&user
->sigpending
) <=
374 task_rlimit(t
, RLIMIT_SIGPENDING
)) {
375 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
377 print_dropped_signal(sig
);
380 if (unlikely(q
== NULL
)) {
381 atomic_dec(&user
->sigpending
);
384 INIT_LIST_HEAD(&q
->list
);
392 static void __sigqueue_free(struct sigqueue
*q
)
394 if (q
->flags
& SIGQUEUE_PREALLOC
)
396 atomic_dec(&q
->user
->sigpending
);
398 kmem_cache_free(sigqueue_cachep
, q
);
401 void flush_sigqueue(struct sigpending
*queue
)
405 sigemptyset(&queue
->signal
);
406 while (!list_empty(&queue
->list
)) {
407 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
408 list_del_init(&q
->list
);
414 * Flush all pending signals for a task.
416 void __flush_signals(struct task_struct
*t
)
418 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
419 flush_sigqueue(&t
->pending
);
420 flush_sigqueue(&t
->signal
->shared_pending
);
423 void flush_signals(struct task_struct
*t
)
427 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
429 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
432 static void __flush_itimer_signals(struct sigpending
*pending
)
434 sigset_t signal
, retain
;
435 struct sigqueue
*q
, *n
;
437 signal
= pending
->signal
;
438 sigemptyset(&retain
);
440 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
441 int sig
= q
->info
.si_signo
;
443 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
444 sigaddset(&retain
, sig
);
446 sigdelset(&signal
, sig
);
447 list_del_init(&q
->list
);
452 sigorsets(&pending
->signal
, &signal
, &retain
);
455 void flush_itimer_signals(void)
457 struct task_struct
*tsk
= current
;
460 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
461 __flush_itimer_signals(&tsk
->pending
);
462 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
463 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
466 void ignore_signals(struct task_struct
*t
)
470 for (i
= 0; i
< _NSIG
; ++i
)
471 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
477 * Flush all handlers for a task.
481 flush_signal_handlers(struct task_struct
*t
, int force_default
)
484 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
485 for (i
= _NSIG
; i
!= 0 ; i
--) {
486 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
487 ka
->sa
.sa_handler
= SIG_DFL
;
489 #ifdef __ARCH_HAS_SA_RESTORER
490 ka
->sa
.sa_restorer
= NULL
;
492 sigemptyset(&ka
->sa
.sa_mask
);
497 int unhandled_signal(struct task_struct
*tsk
, int sig
)
499 void __user
*handler
= tsk
->sighand
->action
[sig
-1].sa
.sa_handler
;
500 if (is_global_init(tsk
))
502 if (handler
!= SIG_IGN
&& handler
!= SIG_DFL
)
504 /* if ptraced, let the tracer determine */
509 * Notify the system that a driver wants to block all signals for this
510 * process, and wants to be notified if any signals at all were to be
511 * sent/acted upon. If the notifier routine returns non-zero, then the
512 * signal will be acted upon after all. If the notifier routine returns 0,
513 * then then signal will be blocked. Only one block per process is
514 * allowed. priv is a pointer to private data that the notifier routine
515 * can use to determine if the signal should be blocked or not.
518 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
522 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
523 current
->notifier_mask
= mask
;
524 current
->notifier_data
= priv
;
525 current
->notifier
= notifier
;
526 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
529 /* Notify the system that blocking has ended. */
532 unblock_all_signals(void)
536 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
537 current
->notifier
= NULL
;
538 current
->notifier_data
= NULL
;
540 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
543 static void collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
545 struct sigqueue
*q
, *first
= NULL
;
548 * Collect the siginfo appropriate to this signal. Check if
549 * there is another siginfo for the same signal.
551 list_for_each_entry(q
, &list
->list
, list
) {
552 if (q
->info
.si_signo
== sig
) {
559 sigdelset(&list
->signal
, sig
);
563 list_del_init(&first
->list
);
564 copy_siginfo(info
, &first
->info
);
565 __sigqueue_free(first
);
568 * Ok, it wasn't in the queue. This must be
569 * a fast-pathed signal or we must have been
570 * out of queue space. So zero out the info.
572 info
->si_signo
= sig
;
574 info
->si_code
= SI_USER
;
580 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
583 int sig
= next_signal(pending
, mask
);
586 if (current
->notifier
) {
587 if (sigismember(current
->notifier_mask
, sig
)) {
588 if (!(current
->notifier
)(current
->notifier_data
)) {
589 clear_thread_flag(TIF_SIGPENDING
);
595 collect_signal(sig
, pending
, info
);
602 * Dequeue a signal and return the element to the caller, which is
603 * expected to free it.
605 * All callers have to hold the siglock.
607 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
611 /* We only dequeue private signals from ourselves, we don't let
612 * signalfd steal them
614 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
616 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
621 * itimers are process shared and we restart periodic
622 * itimers in the signal delivery path to prevent DoS
623 * attacks in the high resolution timer case. This is
624 * compliant with the old way of self-restarting
625 * itimers, as the SIGALRM is a legacy signal and only
626 * queued once. Changing the restart behaviour to
627 * restart the timer in the signal dequeue path is
628 * reducing the timer noise on heavy loaded !highres
631 if (unlikely(signr
== SIGALRM
)) {
632 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
634 if (!hrtimer_is_queued(tmr
) &&
635 tsk
->signal
->it_real_incr
.tv64
!= 0) {
636 hrtimer_forward(tmr
, tmr
->base
->get_time(),
637 tsk
->signal
->it_real_incr
);
638 hrtimer_restart(tmr
);
647 if (unlikely(sig_kernel_stop(signr
))) {
649 * Set a marker that we have dequeued a stop signal. Our
650 * caller might release the siglock and then the pending
651 * stop signal it is about to process is no longer in the
652 * pending bitmasks, but must still be cleared by a SIGCONT
653 * (and overruled by a SIGKILL). So those cases clear this
654 * shared flag after we've set it. Note that this flag may
655 * remain set after the signal we return is ignored or
656 * handled. That doesn't matter because its only purpose
657 * is to alert stop-signal processing code when another
658 * processor has come along and cleared the flag.
660 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
662 if ((info
->si_code
& __SI_MASK
) == __SI_TIMER
&& info
->si_sys_private
) {
664 * Release the siglock to ensure proper locking order
665 * of timer locks outside of siglocks. Note, we leave
666 * irqs disabled here, since the posix-timers code is
667 * about to disable them again anyway.
669 spin_unlock(&tsk
->sighand
->siglock
);
670 do_schedule_next_timer(info
);
671 spin_lock(&tsk
->sighand
->siglock
);
677 * Tell a process that it has a new active signal..
679 * NOTE! we rely on the previous spin_lock to
680 * lock interrupts for us! We can only be called with
681 * "siglock" held, and the local interrupt must
682 * have been disabled when that got acquired!
684 * No need to set need_resched since signal event passing
685 * goes through ->blocked
687 void signal_wake_up_state(struct task_struct
*t
, unsigned int state
)
689 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
691 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
692 * case. We don't check t->state here because there is a race with it
693 * executing another processor and just now entering stopped state.
694 * By using wake_up_state, we ensure the process will wake up and
695 * handle its death signal.
697 if (!wake_up_state(t
, state
| TASK_INTERRUPTIBLE
))
702 * Remove signals in mask from the pending set and queue.
703 * Returns 1 if any signals were found.
705 * All callers must be holding the siglock.
707 * This version takes a sigset mask and looks at all signals,
708 * not just those in the first mask word.
710 static int rm_from_queue_full(sigset_t
*mask
, struct sigpending
*s
)
712 struct sigqueue
*q
, *n
;
715 sigandsets(&m
, mask
, &s
->signal
);
716 if (sigisemptyset(&m
))
719 sigandnsets(&s
->signal
, &s
->signal
, mask
);
720 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
721 if (sigismember(mask
, q
->info
.si_signo
)) {
722 list_del_init(&q
->list
);
729 * Remove signals in mask from the pending set and queue.
730 * Returns 1 if any signals were found.
732 * All callers must be holding the siglock.
734 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
736 struct sigqueue
*q
, *n
;
738 if (!sigtestsetmask(&s
->signal
, mask
))
741 sigdelsetmask(&s
->signal
, mask
);
742 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
743 if (q
->info
.si_signo
< SIGRTMIN
&&
744 (mask
& sigmask(q
->info
.si_signo
))) {
745 list_del_init(&q
->list
);
752 static inline int is_si_special(const struct siginfo
*info
)
754 return info
<= SEND_SIG_FORCED
;
757 static inline bool si_fromuser(const struct siginfo
*info
)
759 return info
== SEND_SIG_NOINFO
||
760 (!is_si_special(info
) && SI_FROMUSER(info
));
764 * called with RCU read lock from check_kill_permission()
766 static int kill_ok_by_cred(struct task_struct
*t
)
768 const struct cred
*cred
= current_cred();
769 const struct cred
*tcred
= __task_cred(t
);
771 if (uid_eq(cred
->euid
, tcred
->suid
) ||
772 uid_eq(cred
->euid
, tcred
->uid
) ||
773 uid_eq(cred
->uid
, tcred
->suid
) ||
774 uid_eq(cred
->uid
, tcred
->uid
))
777 if (ns_capable(tcred
->user_ns
, CAP_KILL
))
784 * Bad permissions for sending the signal
785 * - the caller must hold the RCU read lock
787 static int check_kill_permission(int sig
, struct siginfo
*info
,
788 struct task_struct
*t
)
793 if (!valid_signal(sig
))
796 if (!si_fromuser(info
))
799 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
803 if (!same_thread_group(current
, t
) &&
804 !kill_ok_by_cred(t
)) {
807 sid
= task_session(t
);
809 * We don't return the error if sid == NULL. The
810 * task was unhashed, the caller must notice this.
812 if (!sid
|| sid
== task_session(current
))
819 return security_task_kill(t
, info
, sig
, 0);
823 * ptrace_trap_notify - schedule trap to notify ptracer
824 * @t: tracee wanting to notify tracer
826 * This function schedules sticky ptrace trap which is cleared on the next
827 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
830 * If @t is running, STOP trap will be taken. If trapped for STOP and
831 * ptracer is listening for events, tracee is woken up so that it can
832 * re-trap for the new event. If trapped otherwise, STOP trap will be
833 * eventually taken without returning to userland after the existing traps
834 * are finished by PTRACE_CONT.
837 * Must be called with @task->sighand->siglock held.
839 static void ptrace_trap_notify(struct task_struct
*t
)
841 WARN_ON_ONCE(!(t
->ptrace
& PT_SEIZED
));
842 assert_spin_locked(&t
->sighand
->siglock
);
844 task_set_jobctl_pending(t
, JOBCTL_TRAP_NOTIFY
);
845 ptrace_signal_wake_up(t
, t
->jobctl
& JOBCTL_LISTENING
);
849 * Handle magic process-wide effects of stop/continue signals. Unlike
850 * the signal actions, these happen immediately at signal-generation
851 * time regardless of blocking, ignoring, or handling. This does the
852 * actual continuing for SIGCONT, but not the actual stopping for stop
853 * signals. The process stop is done as a signal action for SIG_DFL.
855 * Returns true if the signal should be actually delivered, otherwise
856 * it should be dropped.
858 static bool prepare_signal(int sig
, struct task_struct
*p
, bool force
)
860 struct signal_struct
*signal
= p
->signal
;
861 struct task_struct
*t
;
863 if (signal
->flags
& (SIGNAL_GROUP_EXIT
| SIGNAL_GROUP_COREDUMP
)) {
864 if (signal
->flags
& SIGNAL_GROUP_COREDUMP
) {
865 printk(KERN_DEBUG
"[%d:%s] is in the middle of dying so skip sig %d\n",p
->pid
, p
->comm
, sig
);
869 * The process is in the middle of dying, nothing to do.
871 } else if (sig_kernel_stop(sig
)) {
873 * This is a stop signal. Remove SIGCONT from all queues.
875 rm_from_queue(sigmask(SIGCONT
), &signal
->shared_pending
);
878 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
879 } while_each_thread(p
, t
);
880 } else if (sig
== SIGCONT
) {
883 * Remove all stop signals from all queues, wake all threads.
885 rm_from_queue(SIG_KERNEL_STOP_MASK
, &signal
->shared_pending
);
888 task_clear_jobctl_pending(t
, JOBCTL_STOP_PENDING
);
889 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
890 if (likely(!(t
->ptrace
& PT_SEIZED
)))
891 wake_up_state(t
, __TASK_STOPPED
);
893 ptrace_trap_notify(t
);
894 } while_each_thread(p
, t
);
897 * Notify the parent with CLD_CONTINUED if we were stopped.
899 * If we were in the middle of a group stop, we pretend it
900 * was already finished, and then continued. Since SIGCHLD
901 * doesn't queue we report only CLD_STOPPED, as if the next
902 * CLD_CONTINUED was dropped.
905 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
906 why
|= SIGNAL_CLD_CONTINUED
;
907 else if (signal
->group_stop_count
)
908 why
|= SIGNAL_CLD_STOPPED
;
912 * The first thread which returns from do_signal_stop()
913 * will take ->siglock, notice SIGNAL_CLD_MASK, and
914 * notify its parent. See get_signal_to_deliver().
916 signal
->flags
= why
| SIGNAL_STOP_CONTINUED
;
917 signal
->group_stop_count
= 0;
918 signal
->group_exit_code
= 0;
922 return !sig_ignored(p
, sig
, force
);
926 * Test if P wants to take SIG. After we've checked all threads with this,
927 * it's equivalent to finding no threads not blocking SIG. Any threads not
928 * blocking SIG were ruled out because they are not running and already
929 * have pending signals. Such threads will dequeue from the shared queue
930 * as soon as they're available, so putting the signal on the shared queue
931 * will be equivalent to sending it to one such thread.
933 static inline int wants_signal(int sig
, struct task_struct
*p
)
935 if (sigismember(&p
->blocked
, sig
))
937 if (p
->flags
& PF_EXITING
)
941 if (task_is_stopped_or_traced(p
))
943 return task_curr(p
) || !signal_pending(p
);
946 static void complete_signal(int sig
, struct task_struct
*p
, int group
)
948 struct signal_struct
*signal
= p
->signal
;
949 struct task_struct
*t
;
952 * Now find a thread we can wake up to take the signal off the queue.
954 * If the main thread wants the signal, it gets first crack.
955 * Probably the least surprising to the average bear.
957 if (wants_signal(sig
, p
))
959 else if (!group
|| thread_group_empty(p
))
961 * There is just one thread and it does not need to be woken.
962 * It will dequeue unblocked signals before it runs again.
967 * Otherwise try to find a suitable thread.
969 t
= signal
->curr_target
;
970 while (!wants_signal(sig
, t
)) {
972 if (t
== signal
->curr_target
)
974 * No thread needs to be woken.
975 * Any eligible threads will see
976 * the signal in the queue soon.
980 signal
->curr_target
= t
;
984 * Found a killable thread. If the signal will be fatal,
985 * then start taking the whole group down immediately.
987 if (sig_fatal(p
, sig
) &&
988 !(signal
->flags
& (SIGNAL_UNKILLABLE
| SIGNAL_GROUP_EXIT
)) &&
989 !sigismember(&t
->real_blocked
, sig
) &&
990 (sig
== SIGKILL
|| !t
->ptrace
)) {
992 * This signal will be fatal to the whole group.
994 if (!sig_kernel_coredump(sig
)) {
996 * Start a group exit and wake everybody up.
997 * This way we don't have other threads
998 * running and doing things after a slower
999 * thread has the fatal signal pending.
1001 signal
->flags
= SIGNAL_GROUP_EXIT
;
1002 signal
->group_exit_code
= sig
;
1003 signal
->group_stop_count
= 0;
1006 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
1007 sigaddset(&t
->pending
.signal
, SIGKILL
);
1008 signal_wake_up(t
, 1);
1009 } while_each_thread(p
, t
);
1015 * The signal is already in the shared-pending queue.
1016 * Tell the chosen thread to wake up and dequeue it.
1018 signal_wake_up(t
, sig
== SIGKILL
);
1022 static inline int legacy_queue(struct sigpending
*signals
, int sig
)
1024 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
1027 #ifdef CONFIG_USER_NS
1028 static inline void userns_fixup_signal_uid(struct siginfo
*info
, struct task_struct
*t
)
1030 if (current_user_ns() == task_cred_xxx(t
, user_ns
))
1033 if (SI_FROMKERNEL(info
))
1037 info
->si_uid
= from_kuid_munged(task_cred_xxx(t
, user_ns
),
1038 make_kuid(current_user_ns(), info
->si_uid
));
1042 static inline void userns_fixup_signal_uid(struct siginfo
*info
, struct task_struct
*t
)
1048 static const char stat_nam
[] = TASK_STATE_TO_CHAR_STR
;
1050 static int __send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
1051 int group
, int from_ancestor_ns
)
1053 struct sigpending
*pending
;
1055 int override_rlimit
;
1056 int ret
= 0, result
;
1059 state
= t
->state
? __ffs(t
->state
) + 1 : 0;
1060 printk(KERN_DEBUG
"[%d:%s] sig %d to [%d:%s] stat=%c\n",
1061 current
->pid
, current
->comm
, sig
, t
->pid
, t
->comm
,
1062 state
< sizeof(stat_nam
) - 1 ? stat_nam
[state
] : '?');
1063 assert_spin_locked(&t
->sighand
->siglock
);
1065 result
= TRACE_SIGNAL_IGNORED
;
1066 if (!prepare_signal(sig
, t
,
1067 from_ancestor_ns
|| (info
== SEND_SIG_FORCED
)))
1070 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1072 * Short-circuit ignored signals and support queuing
1073 * exactly one non-rt signal, so that we can get more
1074 * detailed information about the cause of the signal.
1076 result
= TRACE_SIGNAL_ALREADY_PENDING
;
1077 if (legacy_queue(pending
, sig
))
1080 result
= TRACE_SIGNAL_DELIVERED
;
1082 * fast-pathed signals for kernel-internal things like SIGSTOP
1085 if (info
== SEND_SIG_FORCED
)
1089 * Real-time signals must be queued if sent by sigqueue, or
1090 * some other real-time mechanism. It is implementation
1091 * defined whether kill() does so. We attempt to do so, on
1092 * the principle of least surprise, but since kill is not
1093 * allowed to fail with EAGAIN when low on memory we just
1094 * make sure at least one signal gets delivered and don't
1095 * pass on the info struct.
1098 override_rlimit
= (is_si_special(info
) || info
->si_code
>= 0);
1100 override_rlimit
= 0;
1102 q
= __sigqueue_alloc(sig
, t
, GFP_ATOMIC
| __GFP_NOTRACK_FALSE_POSITIVE
,
1105 list_add_tail(&q
->list
, &pending
->list
);
1106 switch ((unsigned long) info
) {
1107 case (unsigned long) SEND_SIG_NOINFO
:
1108 q
->info
.si_signo
= sig
;
1109 q
->info
.si_errno
= 0;
1110 q
->info
.si_code
= SI_USER
;
1111 q
->info
.si_pid
= task_tgid_nr_ns(current
,
1112 task_active_pid_ns(t
));
1113 q
->info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
1115 case (unsigned long) SEND_SIG_PRIV
:
1116 q
->info
.si_signo
= sig
;
1117 q
->info
.si_errno
= 0;
1118 q
->info
.si_code
= SI_KERNEL
;
1123 copy_siginfo(&q
->info
, info
);
1124 if (from_ancestor_ns
)
1129 userns_fixup_signal_uid(&q
->info
, t
);
1131 } else if (!is_si_special(info
)) {
1132 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
) {
1134 * Queue overflow, abort. We may abort if the
1135 * signal was rt and sent by user using something
1136 * other than kill().
1138 result
= TRACE_SIGNAL_OVERFLOW_FAIL
;
1143 * This is a silent loss of information. We still
1144 * send the signal, but the *info bits are lost.
1146 result
= TRACE_SIGNAL_LOSE_INFO
;
1151 signalfd_notify(t
, sig
);
1152 sigaddset(&pending
->signal
, sig
);
1153 complete_signal(sig
, t
, group
);
1155 trace_signal_generate(sig
, info
, t
, group
, result
);
1159 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
1162 int from_ancestor_ns
= 0;
1164 #ifdef CONFIG_PID_NS
1165 from_ancestor_ns
= si_fromuser(info
) &&
1166 !task_pid_nr_ns(current
, task_active_pid_ns(t
));
1169 return __send_signal(sig
, info
, t
, group
, from_ancestor_ns
);
1172 static void print_fatal_signal(int signr
)
1174 struct pt_regs
*regs
= signal_pt_regs();
1175 printk(KERN_INFO
"potentially unexpected fatal signal %d.\n", signr
);
1177 #if defined(__i386__) && !defined(__arch_um__)
1178 printk(KERN_INFO
"code at %08lx: ", regs
->ip
);
1181 for (i
= 0; i
< 16; i
++) {
1184 if (get_user(insn
, (unsigned char *)(regs
->ip
+ i
)))
1186 printk(KERN_CONT
"%02x ", insn
);
1189 printk(KERN_CONT
"\n");
1196 static int __init
setup_print_fatal_signals(char *str
)
1198 get_option (&str
, &print_fatal_signals
);
1203 __setup("print-fatal-signals=", setup_print_fatal_signals
);
1206 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1208 return send_signal(sig
, info
, p
, 1);
1212 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
1214 return send_signal(sig
, info
, t
, 0);
1217 int do_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
,
1220 unsigned long flags
;
1223 if (lock_task_sighand(p
, &flags
)) {
1224 ret
= send_signal(sig
, info
, p
, group
);
1225 unlock_task_sighand(p
, &flags
);
1232 * Force a signal that the process can't ignore: if necessary
1233 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1235 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1236 * since we do not want to have a signal handler that was blocked
1237 * be invoked when user space had explicitly blocked it.
1239 * We don't want to have recursive SIGSEGV's etc, for example,
1240 * that is why we also clear SIGNAL_UNKILLABLE.
1243 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
1245 unsigned long int flags
;
1246 int ret
, blocked
, ignored
;
1247 struct k_sigaction
*action
;
1249 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
1250 action
= &t
->sighand
->action
[sig
-1];
1251 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
1252 blocked
= sigismember(&t
->blocked
, sig
);
1253 if (blocked
|| ignored
) {
1254 action
->sa
.sa_handler
= SIG_DFL
;
1256 sigdelset(&t
->blocked
, sig
);
1257 recalc_sigpending_and_wake(t
);
1260 if (action
->sa
.sa_handler
== SIG_DFL
)
1261 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
1262 ret
= specific_send_sig_info(sig
, info
, t
);
1263 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
1269 * Nuke all other threads in the group.
1271 int zap_other_threads(struct task_struct
*p
)
1273 struct task_struct
*t
= p
;
1276 p
->signal
->group_stop_count
= 0;
1278 while_each_thread(p
, t
) {
1279 task_clear_jobctl_pending(t
, JOBCTL_PENDING_MASK
);
1282 /* Don't bother with already dead threads */
1285 sigaddset(&t
->pending
.signal
, SIGKILL
);
1286 signal_wake_up(t
, 1);
1292 struct sighand_struct
*__lock_task_sighand(struct task_struct
*tsk
,
1293 unsigned long *flags
)
1295 struct sighand_struct
*sighand
;
1298 local_irq_save(*flags
);
1300 sighand
= rcu_dereference(tsk
->sighand
);
1301 if (unlikely(sighand
== NULL
)) {
1303 local_irq_restore(*flags
);
1307 spin_lock(&sighand
->siglock
);
1308 if (likely(sighand
== tsk
->sighand
)) {
1312 spin_unlock(&sighand
->siglock
);
1314 local_irq_restore(*flags
);
1321 * send signal info to all the members of a group
1323 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1328 ret
= check_kill_permission(sig
, info
, p
);
1332 ret
= do_send_sig_info(sig
, info
, p
, true);
1338 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1339 * control characters do (^C, ^Z etc)
1340 * - the caller must hold at least a readlock on tasklist_lock
1342 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1344 struct task_struct
*p
= NULL
;
1345 int retval
, success
;
1349 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1350 int err
= group_send_sig_info(sig
, info
, p
);
1353 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1354 return success
? 0 : retval
;
1357 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1360 struct task_struct
*p
;
1364 p
= pid_task(pid
, PIDTYPE_PID
);
1366 error
= group_send_sig_info(sig
, info
, p
);
1367 if (unlikely(error
== -ESRCH
))
1369 * The task was unhashed in between, try again.
1370 * If it is dead, pid_task() will return NULL,
1371 * if we race with de_thread() it will find the
1381 int kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1385 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1390 static int kill_as_cred_perm(const struct cred
*cred
,
1391 struct task_struct
*target
)
1393 const struct cred
*pcred
= __task_cred(target
);
1394 if (!uid_eq(cred
->euid
, pcred
->suid
) && !uid_eq(cred
->euid
, pcred
->uid
) &&
1395 !uid_eq(cred
->uid
, pcred
->suid
) && !uid_eq(cred
->uid
, pcred
->uid
))
1400 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1401 int kill_pid_info_as_cred(int sig
, struct siginfo
*info
, struct pid
*pid
,
1402 const struct cred
*cred
, u32 secid
)
1405 struct task_struct
*p
;
1406 unsigned long flags
;
1408 if (!valid_signal(sig
))
1412 p
= pid_task(pid
, PIDTYPE_PID
);
1417 if (si_fromuser(info
) && !kill_as_cred_perm(cred
, p
)) {
1421 ret
= security_task_kill(p
, info
, sig
, secid
);
1426 if (lock_task_sighand(p
, &flags
)) {
1427 ret
= __send_signal(sig
, info
, p
, 1, 0);
1428 unlock_task_sighand(p
, &flags
);
1436 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred
);
1439 * kill_something_info() interprets pid in interesting ways just like kill(2).
1441 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1442 * is probably wrong. Should make it like BSD or SYSV.
1445 static int kill_something_info(int sig
, struct siginfo
*info
, pid_t pid
)
1451 ret
= kill_pid_info(sig
, info
, find_vpid(pid
));
1456 read_lock(&tasklist_lock
);
1458 ret
= __kill_pgrp_info(sig
, info
,
1459 pid
? find_vpid(-pid
) : task_pgrp(current
));
1461 int retval
= 0, count
= 0;
1462 struct task_struct
* p
;
1464 for_each_process(p
) {
1465 if (task_pid_vnr(p
) > 1 &&
1466 !same_thread_group(p
, current
)) {
1467 int err
= group_send_sig_info(sig
, info
, p
);
1473 ret
= count
? retval
: -ESRCH
;
1475 read_unlock(&tasklist_lock
);
1481 * These are for backward compatibility with the rest of the kernel source.
1484 int send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1487 * Make sure legacy kernel users don't send in bad values
1488 * (normal paths check this in check_kill_permission).
1490 if (!valid_signal(sig
))
1493 return do_send_sig_info(sig
, info
, p
, false);
1496 #define __si_special(priv) \
1497 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1500 send_sig(int sig
, struct task_struct
*p
, int priv
)
1502 return send_sig_info(sig
, __si_special(priv
), p
);
1506 force_sig(int sig
, struct task_struct
*p
)
1508 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1512 * When things go south during signal handling, we
1513 * will force a SIGSEGV. And if the signal that caused
1514 * the problem was already a SIGSEGV, we'll want to
1515 * make sure we don't even try to deliver the signal..
1518 force_sigsegv(int sig
, struct task_struct
*p
)
1520 if (sig
== SIGSEGV
) {
1521 unsigned long flags
;
1522 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1523 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1524 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1526 force_sig(SIGSEGV
, p
);
1530 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1534 read_lock(&tasklist_lock
);
1535 ret
= __kill_pgrp_info(sig
, __si_special(priv
), pid
);
1536 read_unlock(&tasklist_lock
);
1540 EXPORT_SYMBOL(kill_pgrp
);
1542 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1544 return kill_pid_info(sig
, __si_special(priv
), pid
);
1546 EXPORT_SYMBOL(kill_pid
);
1549 * These functions support sending signals using preallocated sigqueue
1550 * structures. This is needed "because realtime applications cannot
1551 * afford to lose notifications of asynchronous events, like timer
1552 * expirations or I/O completions". In the case of POSIX Timers
1553 * we allocate the sigqueue structure from the timer_create. If this
1554 * allocation fails we are able to report the failure to the application
1555 * with an EAGAIN error.
1557 struct sigqueue
*sigqueue_alloc(void)
1559 struct sigqueue
*q
= __sigqueue_alloc(-1, current
, GFP_KERNEL
, 0);
1562 q
->flags
|= SIGQUEUE_PREALLOC
;
1567 void sigqueue_free(struct sigqueue
*q
)
1569 unsigned long flags
;
1570 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1572 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1574 * We must hold ->siglock while testing q->list
1575 * to serialize with collect_signal() or with
1576 * __exit_signal()->flush_sigqueue().
1578 spin_lock_irqsave(lock
, flags
);
1579 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1581 * If it is queued it will be freed when dequeued,
1582 * like the "regular" sigqueue.
1584 if (!list_empty(&q
->list
))
1586 spin_unlock_irqrestore(lock
, flags
);
1592 int send_sigqueue(struct sigqueue
*q
, struct task_struct
*t
, int group
)
1594 int sig
= q
->info
.si_signo
;
1595 struct sigpending
*pending
;
1596 unsigned long flags
;
1599 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1602 if (!likely(lock_task_sighand(t
, &flags
)))
1605 ret
= 1; /* the signal is ignored */
1606 result
= TRACE_SIGNAL_IGNORED
;
1607 if (!prepare_signal(sig
, t
, false))
1611 if (unlikely(!list_empty(&q
->list
))) {
1613 * If an SI_TIMER entry is already queue just increment
1614 * the overrun count.
1616 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1617 q
->info
.si_overrun
++;
1618 result
= TRACE_SIGNAL_ALREADY_PENDING
;
1621 q
->info
.si_overrun
= 0;
1623 signalfd_notify(t
, sig
);
1624 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1625 list_add_tail(&q
->list
, &pending
->list
);
1626 sigaddset(&pending
->signal
, sig
);
1627 complete_signal(sig
, t
, group
);
1628 result
= TRACE_SIGNAL_DELIVERED
;
1630 trace_signal_generate(sig
, &q
->info
, t
, group
, result
);
1631 unlock_task_sighand(t
, &flags
);
1637 * Let a parent know about the death of a child.
1638 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1640 * Returns true if our parent ignored us and so we've switched to
1643 bool do_notify_parent(struct task_struct
*tsk
, int sig
)
1645 struct siginfo info
;
1646 unsigned long flags
;
1647 struct sighand_struct
*psig
;
1648 bool autoreap
= false;
1649 cputime_t utime
, stime
;
1653 /* do_notify_parent_cldstop should have been called instead. */
1654 BUG_ON(task_is_stopped_or_traced(tsk
));
1656 BUG_ON(!tsk
->ptrace
&&
1657 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1659 if (sig
!= SIGCHLD
) {
1661 * This is only possible if parent == real_parent.
1662 * Check if it has changed security domain.
1664 if (tsk
->parent_exec_id
!= tsk
->parent
->self_exec_id
)
1668 info
.si_signo
= sig
;
1671 * We are under tasklist_lock here so our parent is tied to
1672 * us and cannot change.
1674 * task_active_pid_ns will always return the same pid namespace
1675 * until a task passes through release_task.
1677 * write_lock() currently calls preempt_disable() which is the
1678 * same as rcu_read_lock(), but according to Oleg, this is not
1679 * correct to rely on this
1682 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(tsk
->parent
));
1683 info
.si_uid
= from_kuid_munged(task_cred_xxx(tsk
->parent
, user_ns
),
1687 task_cputime(tsk
, &utime
, &stime
);
1688 info
.si_utime
= cputime_to_clock_t(utime
+ tsk
->signal
->utime
);
1689 info
.si_stime
= cputime_to_clock_t(stime
+ tsk
->signal
->stime
);
1691 info
.si_status
= tsk
->exit_code
& 0x7f;
1692 if (tsk
->exit_code
& 0x80)
1693 info
.si_code
= CLD_DUMPED
;
1694 else if (tsk
->exit_code
& 0x7f)
1695 info
.si_code
= CLD_KILLED
;
1697 info
.si_code
= CLD_EXITED
;
1698 info
.si_status
= tsk
->exit_code
>> 8;
1701 psig
= tsk
->parent
->sighand
;
1702 spin_lock_irqsave(&psig
->siglock
, flags
);
1703 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1704 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1705 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1707 * We are exiting and our parent doesn't care. POSIX.1
1708 * defines special semantics for setting SIGCHLD to SIG_IGN
1709 * or setting the SA_NOCLDWAIT flag: we should be reaped
1710 * automatically and not left for our parent's wait4 call.
1711 * Rather than having the parent do it as a magic kind of
1712 * signal handler, we just set this to tell do_exit that we
1713 * can be cleaned up without becoming a zombie. Note that
1714 * we still call __wake_up_parent in this case, because a
1715 * blocked sys_wait4 might now return -ECHILD.
1717 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1718 * is implementation-defined: we do (if you don't want
1719 * it, just use SIG_IGN instead).
1722 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1725 if (valid_signal(sig
) && sig
)
1726 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1727 __wake_up_parent(tsk
, tsk
->parent
);
1728 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1734 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1735 * @tsk: task reporting the state change
1736 * @for_ptracer: the notification is for ptracer
1737 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1739 * Notify @tsk's parent that the stopped/continued state has changed. If
1740 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1741 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1744 * Must be called with tasklist_lock at least read locked.
1746 static void do_notify_parent_cldstop(struct task_struct
*tsk
,
1747 bool for_ptracer
, int why
)
1749 struct siginfo info
;
1750 unsigned long flags
;
1751 struct task_struct
*parent
;
1752 struct sighand_struct
*sighand
;
1753 cputime_t utime
, stime
;
1756 parent
= tsk
->parent
;
1758 tsk
= tsk
->group_leader
;
1759 parent
= tsk
->real_parent
;
1762 info
.si_signo
= SIGCHLD
;
1765 * see comment in do_notify_parent() about the following 4 lines
1768 info
.si_pid
= task_pid_nr_ns(tsk
, task_active_pid_ns(parent
));
1769 info
.si_uid
= from_kuid_munged(task_cred_xxx(parent
, user_ns
), task_uid(tsk
));
1772 task_cputime(tsk
, &utime
, &stime
);
1773 info
.si_utime
= cputime_to_clock_t(utime
);
1774 info
.si_stime
= cputime_to_clock_t(stime
);
1779 info
.si_status
= SIGCONT
;
1782 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1785 info
.si_status
= tsk
->exit_code
& 0x7f;
1791 sighand
= parent
->sighand
;
1792 spin_lock_irqsave(&sighand
->siglock
, flags
);
1793 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1794 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1795 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1797 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1799 __wake_up_parent(tsk
, parent
);
1800 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1803 static inline int may_ptrace_stop(void)
1805 if (!likely(current
->ptrace
))
1808 * Are we in the middle of do_coredump?
1809 * If so and our tracer is also part of the coredump stopping
1810 * is a deadlock situation, and pointless because our tracer
1811 * is dead so don't allow us to stop.
1812 * If SIGKILL was already sent before the caller unlocked
1813 * ->siglock we must see ->core_state != NULL. Otherwise it
1814 * is safe to enter schedule().
1816 * This is almost outdated, a task with the pending SIGKILL can't
1817 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1818 * after SIGKILL was already dequeued.
1820 if (unlikely(current
->mm
->core_state
) &&
1821 unlikely(current
->mm
== current
->parent
->mm
))
1828 * Return non-zero if there is a SIGKILL that should be waking us up.
1829 * Called with the siglock held.
1831 static int sigkill_pending(struct task_struct
*tsk
)
1833 return sigismember(&tsk
->pending
.signal
, SIGKILL
) ||
1834 sigismember(&tsk
->signal
->shared_pending
.signal
, SIGKILL
);
1838 * This must be called with current->sighand->siglock held.
1840 * This should be the path for all ptrace stops.
1841 * We always set current->last_siginfo while stopped here.
1842 * That makes it a way to test a stopped process for
1843 * being ptrace-stopped vs being job-control-stopped.
1845 * If we actually decide not to stop at all because the tracer
1846 * is gone, we keep current->exit_code unless clear_code.
1848 static void ptrace_stop(int exit_code
, int why
, int clear_code
, siginfo_t
*info
)
1849 __releases(¤t
->sighand
->siglock
)
1850 __acquires(¤t
->sighand
->siglock
)
1852 bool gstop_done
= false;
1854 if (arch_ptrace_stop_needed(exit_code
, info
)) {
1856 * The arch code has something special to do before a
1857 * ptrace stop. This is allowed to block, e.g. for faults
1858 * on user stack pages. We can't keep the siglock while
1859 * calling arch_ptrace_stop, so we must release it now.
1860 * To preserve proper semantics, we must do this before
1861 * any signal bookkeeping like checking group_stop_count.
1862 * Meanwhile, a SIGKILL could come in before we retake the
1863 * siglock. That must prevent us from sleeping in TASK_TRACED.
1864 * So after regaining the lock, we must check for SIGKILL.
1866 spin_unlock_irq(¤t
->sighand
->siglock
);
1867 arch_ptrace_stop(exit_code
, info
);
1868 spin_lock_irq(¤t
->sighand
->siglock
);
1869 if (sigkill_pending(current
))
1874 * We're committing to trapping. TRACED should be visible before
1875 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1876 * Also, transition to TRACED and updates to ->jobctl should be
1877 * atomic with respect to siglock and should be done after the arch
1878 * hook as siglock is released and regrabbed across it.
1880 set_current_state(TASK_TRACED
);
1882 current
->last_siginfo
= info
;
1883 current
->exit_code
= exit_code
;
1886 * If @why is CLD_STOPPED, we're trapping to participate in a group
1887 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1888 * across siglock relocks since INTERRUPT was scheduled, PENDING
1889 * could be clear now. We act as if SIGCONT is received after
1890 * TASK_TRACED is entered - ignore it.
1892 if (why
== CLD_STOPPED
&& (current
->jobctl
& JOBCTL_STOP_PENDING
))
1893 gstop_done
= task_participate_group_stop(current
);
1895 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1896 task_clear_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
1897 if (info
&& info
->si_code
>> 8 == PTRACE_EVENT_STOP
)
1898 task_clear_jobctl_pending(current
, JOBCTL_TRAP_NOTIFY
);
1900 /* entering a trap, clear TRAPPING */
1901 task_clear_jobctl_trapping(current
);
1903 spin_unlock_irq(¤t
->sighand
->siglock
);
1904 read_lock(&tasklist_lock
);
1905 if (may_ptrace_stop()) {
1907 * Notify parents of the stop.
1909 * While ptraced, there are two parents - the ptracer and
1910 * the real_parent of the group_leader. The ptracer should
1911 * know about every stop while the real parent is only
1912 * interested in the completion of group stop. The states
1913 * for the two don't interact with each other. Notify
1914 * separately unless they're gonna be duplicates.
1916 do_notify_parent_cldstop(current
, true, why
);
1917 if (gstop_done
&& ptrace_reparented(current
))
1918 do_notify_parent_cldstop(current
, false, why
);
1921 * Don't want to allow preemption here, because
1922 * sys_ptrace() needs this task to be inactive.
1924 * XXX: implement read_unlock_no_resched().
1927 read_unlock(&tasklist_lock
);
1928 preempt_enable_no_resched();
1929 freezable_schedule();
1932 * By the time we got the lock, our tracer went away.
1933 * Don't drop the lock yet, another tracer may come.
1935 * If @gstop_done, the ptracer went away between group stop
1936 * completion and here. During detach, it would have set
1937 * JOBCTL_STOP_PENDING on us and we'll re-enter
1938 * TASK_STOPPED in do_signal_stop() on return, so notifying
1939 * the real parent of the group stop completion is enough.
1942 do_notify_parent_cldstop(current
, false, why
);
1944 /* tasklist protects us from ptrace_freeze_traced() */
1945 __set_current_state(TASK_RUNNING
);
1947 current
->exit_code
= 0;
1948 read_unlock(&tasklist_lock
);
1952 * We are back. Now reacquire the siglock before touching
1953 * last_siginfo, so that we are sure to have synchronized with
1954 * any signal-sending on another CPU that wants to examine it.
1956 spin_lock_irq(¤t
->sighand
->siglock
);
1957 current
->last_siginfo
= NULL
;
1959 /* LISTENING can be set only during STOP traps, clear it */
1960 current
->jobctl
&= ~JOBCTL_LISTENING
;
1963 * Queued signals ignored us while we were stopped for tracing.
1964 * So check for any that we should take before resuming user mode.
1965 * This sets TIF_SIGPENDING, but never clears it.
1967 recalc_sigpending_tsk(current
);
1970 static void ptrace_do_notify(int signr
, int exit_code
, int why
)
1974 memset(&info
, 0, sizeof info
);
1975 info
.si_signo
= signr
;
1976 info
.si_code
= exit_code
;
1977 info
.si_pid
= task_pid_vnr(current
);
1978 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
1980 /* Let the debugger run. */
1981 ptrace_stop(exit_code
, why
, 1, &info
);
1984 void ptrace_notify(int exit_code
)
1986 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1987 if (unlikely(current
->task_works
))
1990 spin_lock_irq(¤t
->sighand
->siglock
);
1991 ptrace_do_notify(SIGTRAP
, exit_code
, CLD_TRAPPED
);
1992 spin_unlock_irq(¤t
->sighand
->siglock
);
1996 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1997 * @signr: signr causing group stop if initiating
1999 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2000 * and participate in it. If already set, participate in the existing
2001 * group stop. If participated in a group stop (and thus slept), %true is
2002 * returned with siglock released.
2004 * If ptraced, this function doesn't handle stop itself. Instead,
2005 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2006 * untouched. The caller must ensure that INTERRUPT trap handling takes
2007 * places afterwards.
2010 * Must be called with @current->sighand->siglock held, which is released
2014 * %false if group stop is already cancelled or ptrace trap is scheduled.
2015 * %true if participated in group stop.
2017 static bool do_signal_stop(int signr
)
2018 __releases(¤t
->sighand
->siglock
)
2020 struct signal_struct
*sig
= current
->signal
;
2022 if (!(current
->jobctl
& JOBCTL_STOP_PENDING
)) {
2023 unsigned int gstop
= JOBCTL_STOP_PENDING
| JOBCTL_STOP_CONSUME
;
2024 struct task_struct
*t
;
2026 /* signr will be recorded in task->jobctl for retries */
2027 WARN_ON_ONCE(signr
& ~JOBCTL_STOP_SIGMASK
);
2029 if (!likely(current
->jobctl
& JOBCTL_STOP_DEQUEUED
) ||
2030 unlikely(signal_group_exit(sig
)))
2033 * There is no group stop already in progress. We must
2036 * While ptraced, a task may be resumed while group stop is
2037 * still in effect and then receive a stop signal and
2038 * initiate another group stop. This deviates from the
2039 * usual behavior as two consecutive stop signals can't
2040 * cause two group stops when !ptraced. That is why we
2041 * also check !task_is_stopped(t) below.
2043 * The condition can be distinguished by testing whether
2044 * SIGNAL_STOP_STOPPED is already set. Don't generate
2045 * group_exit_code in such case.
2047 * This is not necessary for SIGNAL_STOP_CONTINUED because
2048 * an intervening stop signal is required to cause two
2049 * continued events regardless of ptrace.
2051 if (!(sig
->flags
& SIGNAL_STOP_STOPPED
))
2052 sig
->group_exit_code
= signr
;
2054 sig
->group_stop_count
= 0;
2056 if (task_set_jobctl_pending(current
, signr
| gstop
))
2057 sig
->group_stop_count
++;
2059 for (t
= next_thread(current
); t
!= current
;
2060 t
= next_thread(t
)) {
2062 * Setting state to TASK_STOPPED for a group
2063 * stop is always done with the siglock held,
2064 * so this check has no races.
2066 if (!task_is_stopped(t
) &&
2067 task_set_jobctl_pending(t
, signr
| gstop
)) {
2068 sig
->group_stop_count
++;
2069 if (likely(!(t
->ptrace
& PT_SEIZED
)))
2070 signal_wake_up(t
, 0);
2072 ptrace_trap_notify(t
);
2077 if (likely(!current
->ptrace
)) {
2081 * If there are no other threads in the group, or if there
2082 * is a group stop in progress and we are the last to stop,
2083 * report to the parent.
2085 if (task_participate_group_stop(current
))
2086 notify
= CLD_STOPPED
;
2088 __set_current_state(TASK_STOPPED
);
2089 spin_unlock_irq(¤t
->sighand
->siglock
);
2092 * Notify the parent of the group stop completion. Because
2093 * we're not holding either the siglock or tasklist_lock
2094 * here, ptracer may attach inbetween; however, this is for
2095 * group stop and should always be delivered to the real
2096 * parent of the group leader. The new ptracer will get
2097 * its notification when this task transitions into
2101 read_lock(&tasklist_lock
);
2102 do_notify_parent_cldstop(current
, false, notify
);
2103 read_unlock(&tasklist_lock
);
2106 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2107 freezable_schedule();
2111 * While ptraced, group stop is handled by STOP trap.
2112 * Schedule it and let the caller deal with it.
2114 task_set_jobctl_pending(current
, JOBCTL_TRAP_STOP
);
2120 * do_jobctl_trap - take care of ptrace jobctl traps
2122 * When PT_SEIZED, it's used for both group stop and explicit
2123 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2124 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2125 * the stop signal; otherwise, %SIGTRAP.
2127 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2128 * number as exit_code and no siginfo.
2131 * Must be called with @current->sighand->siglock held, which may be
2132 * released and re-acquired before returning with intervening sleep.
2134 static void do_jobctl_trap(void)
2136 struct signal_struct
*signal
= current
->signal
;
2137 int signr
= current
->jobctl
& JOBCTL_STOP_SIGMASK
;
2139 if (current
->ptrace
& PT_SEIZED
) {
2140 if (!signal
->group_stop_count
&&
2141 !(signal
->flags
& SIGNAL_STOP_STOPPED
))
2143 WARN_ON_ONCE(!signr
);
2144 ptrace_do_notify(signr
, signr
| (PTRACE_EVENT_STOP
<< 8),
2147 WARN_ON_ONCE(!signr
);
2148 ptrace_stop(signr
, CLD_STOPPED
, 0, NULL
);
2149 current
->exit_code
= 0;
2153 static int ptrace_signal(int signr
, siginfo_t
*info
)
2155 ptrace_signal_deliver();
2157 * We do not check sig_kernel_stop(signr) but set this marker
2158 * unconditionally because we do not know whether debugger will
2159 * change signr. This flag has no meaning unless we are going
2160 * to stop after return from ptrace_stop(). In this case it will
2161 * be checked in do_signal_stop(), we should only stop if it was
2162 * not cleared by SIGCONT while we were sleeping. See also the
2163 * comment in dequeue_signal().
2165 current
->jobctl
|= JOBCTL_STOP_DEQUEUED
;
2166 ptrace_stop(signr
, CLD_TRAPPED
, 0, info
);
2168 /* We're back. Did the debugger cancel the sig? */
2169 signr
= current
->exit_code
;
2173 current
->exit_code
= 0;
2176 * Update the siginfo structure if the signal has
2177 * changed. If the debugger wanted something
2178 * specific in the siginfo structure then it should
2179 * have updated *info via PTRACE_SETSIGINFO.
2181 if (signr
!= info
->si_signo
) {
2182 info
->si_signo
= signr
;
2184 info
->si_code
= SI_USER
;
2186 info
->si_pid
= task_pid_vnr(current
->parent
);
2187 info
->si_uid
= from_kuid_munged(current_user_ns(),
2188 task_uid(current
->parent
));
2192 /* If the (new) signal is now blocked, requeue it. */
2193 if (sigismember(¤t
->blocked
, signr
)) {
2194 specific_send_sig_info(signr
, info
, current
);
2201 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
2202 struct pt_regs
*regs
, void *cookie
)
2204 struct sighand_struct
*sighand
= current
->sighand
;
2205 struct signal_struct
*signal
= current
->signal
;
2208 if (unlikely(current
->task_works
))
2211 if (unlikely(uprobe_deny_signal()))
2215 * Do this once, we can't return to user-mode if freezing() == T.
2216 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2217 * thus do not need another check after return.
2222 spin_lock_irq(&sighand
->siglock
);
2224 * Every stopped thread goes here after wakeup. Check to see if
2225 * we should notify the parent, prepare_signal(SIGCONT) encodes
2226 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2228 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
2231 if (signal
->flags
& SIGNAL_CLD_CONTINUED
)
2232 why
= CLD_CONTINUED
;
2236 signal
->flags
&= ~SIGNAL_CLD_MASK
;
2238 spin_unlock_irq(&sighand
->siglock
);
2241 * Notify the parent that we're continuing. This event is
2242 * always per-process and doesn't make whole lot of sense
2243 * for ptracers, who shouldn't consume the state via
2244 * wait(2) either, but, for backward compatibility, notify
2245 * the ptracer of the group leader too unless it's gonna be
2248 read_lock(&tasklist_lock
);
2249 do_notify_parent_cldstop(current
, false, why
);
2251 if (ptrace_reparented(current
->group_leader
))
2252 do_notify_parent_cldstop(current
->group_leader
,
2254 read_unlock(&tasklist_lock
);
2260 struct k_sigaction
*ka
;
2262 if (unlikely(current
->jobctl
& JOBCTL_STOP_PENDING
) &&
2266 if (unlikely(current
->jobctl
& JOBCTL_TRAP_MASK
)) {
2268 spin_unlock_irq(&sighand
->siglock
);
2272 signr
= dequeue_signal(current
, ¤t
->blocked
, info
);
2275 break; /* will return 0 */
2277 if (unlikely(current
->ptrace
) && signr
!= SIGKILL
) {
2278 signr
= ptrace_signal(signr
, info
);
2283 ka
= &sighand
->action
[signr
-1];
2285 /* Trace actually delivered signals. */
2286 trace_signal_deliver(signr
, info
, ka
);
2288 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
2290 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
2291 /* Run the handler. */
2294 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
2295 ka
->sa
.sa_handler
= SIG_DFL
;
2297 break; /* will return non-zero "signr" value */
2301 * Now we are doing the default action for this signal.
2303 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
2307 * Global init gets no signals it doesn't want.
2308 * Container-init gets no signals it doesn't want from same
2311 * Note that if global/container-init sees a sig_kernel_only()
2312 * signal here, the signal must have been generated internally
2313 * or must have come from an ancestor namespace. In either
2314 * case, the signal cannot be dropped.
2316 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
2317 !sig_kernel_only(signr
))
2320 if (sig_kernel_stop(signr
)) {
2322 * The default action is to stop all threads in
2323 * the thread group. The job control signals
2324 * do nothing in an orphaned pgrp, but SIGSTOP
2325 * always works. Note that siglock needs to be
2326 * dropped during the call to is_orphaned_pgrp()
2327 * because of lock ordering with tasklist_lock.
2328 * This allows an intervening SIGCONT to be posted.
2329 * We need to check for that and bail out if necessary.
2331 if (signr
!= SIGSTOP
) {
2332 spin_unlock_irq(&sighand
->siglock
);
2334 /* signals can be posted during this window */
2336 if (is_current_pgrp_orphaned())
2339 spin_lock_irq(&sighand
->siglock
);
2342 if (likely(do_signal_stop(info
->si_signo
))) {
2343 /* It released the siglock. */
2348 * We didn't actually stop, due to a race
2349 * with SIGCONT or something like that.
2354 spin_unlock_irq(&sighand
->siglock
);
2357 * Anything else is fatal, maybe with a core dump.
2359 current
->flags
|= PF_SIGNALED
;
2361 if (sig_kernel_coredump(signr
)) {
2362 if (print_fatal_signals
)
2363 print_fatal_signal(info
->si_signo
);
2364 proc_coredump_connector(current
);
2366 * If it was able to dump core, this kills all
2367 * other threads in the group and synchronizes with
2368 * their demise. If we lost the race with another
2369 * thread getting here, it set group_exit_code
2370 * first and our do_group_exit call below will use
2371 * that value and ignore the one we pass it.
2377 * Death signals, no core dump.
2379 do_group_exit(info
->si_signo
);
2382 spin_unlock_irq(&sighand
->siglock
);
2387 * signal_delivered -
2388 * @sig: number of signal being delivered
2389 * @info: siginfo_t of signal being delivered
2390 * @ka: sigaction setting that chose the handler
2391 * @regs: user register state
2392 * @stepping: nonzero if debugger single-step or block-step in use
2394 * This function should be called when a signal has succesfully been
2395 * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
2396 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2397 * is set in @ka->sa.sa_flags. Tracing is notified.
2399 void signal_delivered(int sig
, siginfo_t
*info
, struct k_sigaction
*ka
,
2400 struct pt_regs
*regs
, int stepping
)
2404 /* A signal was successfully delivered, and the
2405 saved sigmask was stored on the signal frame,
2406 and will be restored by sigreturn. So we can
2407 simply clear the restore sigmask flag. */
2408 clear_restore_sigmask();
2410 sigorsets(&blocked
, ¤t
->blocked
, &ka
->sa
.sa_mask
);
2411 if (!(ka
->sa
.sa_flags
& SA_NODEFER
))
2412 sigaddset(&blocked
, sig
);
2413 set_current_blocked(&blocked
);
2414 tracehook_signal_handler(sig
, info
, ka
, regs
, stepping
);
2417 void signal_setup_done(int failed
, struct ksignal
*ksig
, int stepping
)
2420 force_sigsegv(ksig
->sig
, current
);
2422 signal_delivered(ksig
->sig
, &ksig
->info
, &ksig
->ka
,
2423 signal_pt_regs(), stepping
);
2427 * It could be that complete_signal() picked us to notify about the
2428 * group-wide signal. Other threads should be notified now to take
2429 * the shared signals in @which since we will not.
2431 static void retarget_shared_pending(struct task_struct
*tsk
, sigset_t
*which
)
2434 struct task_struct
*t
;
2436 sigandsets(&retarget
, &tsk
->signal
->shared_pending
.signal
, which
);
2437 if (sigisemptyset(&retarget
))
2441 while_each_thread(tsk
, t
) {
2442 if (t
->flags
& PF_EXITING
)
2445 if (!has_pending_signals(&retarget
, &t
->blocked
))
2447 /* Remove the signals this thread can handle. */
2448 sigandsets(&retarget
, &retarget
, &t
->blocked
);
2450 if (!signal_pending(t
))
2451 signal_wake_up(t
, 0);
2453 if (sigisemptyset(&retarget
))
2458 void exit_signals(struct task_struct
*tsk
)
2464 * @tsk is about to have PF_EXITING set - lock out users which
2465 * expect stable threadgroup.
2467 threadgroup_change_begin(tsk
);
2469 if (thread_group_empty(tsk
) || signal_group_exit(tsk
->signal
)) {
2470 tsk
->flags
|= PF_EXITING
;
2471 threadgroup_change_end(tsk
);
2475 spin_lock_irq(&tsk
->sighand
->siglock
);
2477 * From now this task is not visible for group-wide signals,
2478 * see wants_signal(), do_signal_stop().
2480 tsk
->flags
|= PF_EXITING
;
2482 threadgroup_change_end(tsk
);
2484 if (!signal_pending(tsk
))
2487 unblocked
= tsk
->blocked
;
2488 signotset(&unblocked
);
2489 retarget_shared_pending(tsk
, &unblocked
);
2491 if (unlikely(tsk
->jobctl
& JOBCTL_STOP_PENDING
) &&
2492 task_participate_group_stop(tsk
))
2493 group_stop
= CLD_STOPPED
;
2495 spin_unlock_irq(&tsk
->sighand
->siglock
);
2498 * If group stop has completed, deliver the notification. This
2499 * should always go to the real parent of the group leader.
2501 if (unlikely(group_stop
)) {
2502 read_lock(&tasklist_lock
);
2503 do_notify_parent_cldstop(tsk
, false, group_stop
);
2504 read_unlock(&tasklist_lock
);
2508 EXPORT_SYMBOL(recalc_sigpending
);
2509 EXPORT_SYMBOL_GPL(dequeue_signal
);
2510 EXPORT_SYMBOL(flush_signals
);
2511 EXPORT_SYMBOL(force_sig
);
2512 EXPORT_SYMBOL(send_sig
);
2513 EXPORT_SYMBOL(send_sig_info
);
2514 EXPORT_SYMBOL(sigprocmask
);
2515 EXPORT_SYMBOL(block_all_signals
);
2516 EXPORT_SYMBOL(unblock_all_signals
);
2520 * System call entry points.
2524 * sys_restart_syscall - restart a system call
2526 SYSCALL_DEFINE0(restart_syscall
)
2528 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
2529 return restart
->fn(restart
);
2532 long do_no_restart_syscall(struct restart_block
*param
)
2537 static void __set_task_blocked(struct task_struct
*tsk
, const sigset_t
*newset
)
2539 if (signal_pending(tsk
) && !thread_group_empty(tsk
)) {
2540 sigset_t newblocked
;
2541 /* A set of now blocked but previously unblocked signals. */
2542 sigandnsets(&newblocked
, newset
, ¤t
->blocked
);
2543 retarget_shared_pending(tsk
, &newblocked
);
2545 tsk
->blocked
= *newset
;
2546 recalc_sigpending();
2550 * set_current_blocked - change current->blocked mask
2553 * It is wrong to change ->blocked directly, this helper should be used
2554 * to ensure the process can't miss a shared signal we are going to block.
2556 void set_current_blocked(sigset_t
*newset
)
2558 sigdelsetmask(newset
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2559 __set_current_blocked(newset
);
2562 void __set_current_blocked(const sigset_t
*newset
)
2564 struct task_struct
*tsk
= current
;
2566 spin_lock_irq(&tsk
->sighand
->siglock
);
2567 __set_task_blocked(tsk
, newset
);
2568 spin_unlock_irq(&tsk
->sighand
->siglock
);
2572 * This is also useful for kernel threads that want to temporarily
2573 * (or permanently) block certain signals.
2575 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2576 * interface happily blocks "unblockable" signals like SIGKILL
2579 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
2581 struct task_struct
*tsk
= current
;
2584 /* Lockless, only current can change ->blocked, never from irq */
2586 *oldset
= tsk
->blocked
;
2590 sigorsets(&newset
, &tsk
->blocked
, set
);
2593 sigandnsets(&newset
, &tsk
->blocked
, set
);
2602 __set_current_blocked(&newset
);
2607 * sys_rt_sigprocmask - change the list of currently blocked signals
2608 * @how: whether to add, remove, or set signals
2609 * @nset: stores pending signals
2610 * @oset: previous value of signal mask if non-null
2611 * @sigsetsize: size of sigset_t type
2613 SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, sigset_t __user
*, nset
,
2614 sigset_t __user
*, oset
, size_t, sigsetsize
)
2616 sigset_t old_set
, new_set
;
2619 /* XXX: Don't preclude handling different sized sigset_t's. */
2620 if (sigsetsize
!= sizeof(sigset_t
))
2623 old_set
= current
->blocked
;
2626 if (copy_from_user(&new_set
, nset
, sizeof(sigset_t
)))
2628 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2630 error
= sigprocmask(how
, &new_set
, NULL
);
2636 if (copy_to_user(oset
, &old_set
, sizeof(sigset_t
)))
2643 #ifdef CONFIG_COMPAT
2644 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, compat_sigset_t __user
*, nset
,
2645 compat_sigset_t __user
*, oset
, compat_size_t
, sigsetsize
)
2648 sigset_t old_set
= current
->blocked
;
2650 /* XXX: Don't preclude handling different sized sigset_t's. */
2651 if (sigsetsize
!= sizeof(sigset_t
))
2655 compat_sigset_t new32
;
2658 if (copy_from_user(&new32
, nset
, sizeof(compat_sigset_t
)))
2661 sigset_from_compat(&new_set
, &new32
);
2662 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2664 error
= sigprocmask(how
, &new_set
, NULL
);
2669 compat_sigset_t old32
;
2670 sigset_to_compat(&old32
, &old_set
);
2671 if (copy_to_user(oset
, &old32
, sizeof(compat_sigset_t
)))
2676 return sys_rt_sigprocmask(how
, (sigset_t __user
*)nset
,
2677 (sigset_t __user
*)oset
, sigsetsize
);
2682 static int do_sigpending(void *set
, unsigned long sigsetsize
)
2684 if (sigsetsize
> sizeof(sigset_t
))
2687 spin_lock_irq(¤t
->sighand
->siglock
);
2688 sigorsets(set
, ¤t
->pending
.signal
,
2689 ¤t
->signal
->shared_pending
.signal
);
2690 spin_unlock_irq(¤t
->sighand
->siglock
);
2692 /* Outside the lock because only this thread touches it. */
2693 sigandsets(set
, ¤t
->blocked
, set
);
2698 * sys_rt_sigpending - examine a pending signal that has been raised
2700 * @uset: stores pending signals
2701 * @sigsetsize: size of sigset_t type or larger
2703 SYSCALL_DEFINE2(rt_sigpending
, sigset_t __user
*, uset
, size_t, sigsetsize
)
2706 int err
= do_sigpending(&set
, sigsetsize
);
2707 if (!err
&& copy_to_user(uset
, &set
, sigsetsize
))
2712 #ifdef CONFIG_COMPAT
2713 COMPAT_SYSCALL_DEFINE2(rt_sigpending
, compat_sigset_t __user
*, uset
,
2714 compat_size_t
, sigsetsize
)
2718 int err
= do_sigpending(&set
, sigsetsize
);
2720 compat_sigset_t set32
;
2721 sigset_to_compat(&set32
, &set
);
2722 /* we can get here only if sigsetsize <= sizeof(set) */
2723 if (copy_to_user(uset
, &set32
, sigsetsize
))
2728 return sys_rt_sigpending((sigset_t __user
*)uset
, sigsetsize
);
2733 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2735 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2739 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2741 if (from
->si_code
< 0)
2742 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2745 * If you change siginfo_t structure, please be sure
2746 * this code is fixed accordingly.
2747 * Please remember to update the signalfd_copyinfo() function
2748 * inside fs/signalfd.c too, in case siginfo_t changes.
2749 * It should never copy any pad contained in the structure
2750 * to avoid security leaks, but must copy the generic
2751 * 3 ints plus the relevant union member.
2753 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2754 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2755 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2756 switch (from
->si_code
& __SI_MASK
) {
2758 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2759 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2762 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2763 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2764 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2767 err
|= __put_user(from
->si_band
, &to
->si_band
);
2768 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2771 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2772 #ifdef __ARCH_SI_TRAPNO
2773 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2775 #ifdef BUS_MCEERR_AO
2777 * Other callers might not initialize the si_lsb field,
2778 * so check explicitly for the right codes here.
2780 if (from
->si_signo
== SIGBUS
&&
2781 (from
->si_code
== BUS_MCEERR_AR
|| from
->si_code
== BUS_MCEERR_AO
))
2782 err
|= __put_user(from
->si_addr_lsb
, &to
->si_addr_lsb
);
2786 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2787 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2788 err
|= __put_user(from
->si_status
, &to
->si_status
);
2789 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2790 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2792 case __SI_RT
: /* This is not generated by the kernel as of now. */
2793 case __SI_MESGQ
: /* But this is */
2794 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2795 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2796 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2798 #ifdef __ARCH_SIGSYS
2800 err
|= __put_user(from
->si_call_addr
, &to
->si_call_addr
);
2801 err
|= __put_user(from
->si_syscall
, &to
->si_syscall
);
2802 err
|= __put_user(from
->si_arch
, &to
->si_arch
);
2805 default: /* this is just in case for now ... */
2806 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2807 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2816 * do_sigtimedwait - wait for queued signals specified in @which
2817 * @which: queued signals to wait for
2818 * @info: if non-null, the signal's siginfo is returned here
2819 * @ts: upper bound on process time suspension
2821 int do_sigtimedwait(const sigset_t
*which
, siginfo_t
*info
,
2822 const struct timespec
*ts
)
2824 struct task_struct
*tsk
= current
;
2825 long timeout
= MAX_SCHEDULE_TIMEOUT
;
2826 sigset_t mask
= *which
;
2830 if (!timespec_valid(ts
))
2832 timeout
= timespec_to_jiffies(ts
);
2834 * We can be close to the next tick, add another one
2835 * to ensure we will wait at least the time asked for.
2837 if (ts
->tv_sec
|| ts
->tv_nsec
)
2842 * Invert the set of allowed signals to get those we want to block.
2844 sigdelsetmask(&mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2847 spin_lock_irq(&tsk
->sighand
->siglock
);
2848 sig
= dequeue_signal(tsk
, &mask
, info
);
2849 if (!sig
&& timeout
) {
2851 * None ready, temporarily unblock those we're interested
2852 * while we are sleeping in so that we'll be awakened when
2853 * they arrive. Unblocking is always fine, we can avoid
2854 * set_current_blocked().
2856 tsk
->real_blocked
= tsk
->blocked
;
2857 sigandsets(&tsk
->blocked
, &tsk
->blocked
, &mask
);
2858 recalc_sigpending();
2859 spin_unlock_irq(&tsk
->sighand
->siglock
);
2861 timeout
= freezable_schedule_timeout_interruptible(timeout
);
2863 spin_lock_irq(&tsk
->sighand
->siglock
);
2864 __set_task_blocked(tsk
, &tsk
->real_blocked
);
2865 siginitset(&tsk
->real_blocked
, 0);
2866 sig
= dequeue_signal(tsk
, &mask
, info
);
2868 spin_unlock_irq(&tsk
->sighand
->siglock
);
2872 return timeout
? -EINTR
: -EAGAIN
;
2876 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2878 * @uthese: queued signals to wait for
2879 * @uinfo: if non-null, the signal's siginfo is returned here
2880 * @uts: upper bound on process time suspension
2881 * @sigsetsize: size of sigset_t type
2883 SYSCALL_DEFINE4(rt_sigtimedwait
, const sigset_t __user
*, uthese
,
2884 siginfo_t __user
*, uinfo
, const struct timespec __user
*, uts
,
2892 /* XXX: Don't preclude handling different sized sigset_t's. */
2893 if (sigsetsize
!= sizeof(sigset_t
))
2896 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2900 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2904 ret
= do_sigtimedwait(&these
, &info
, uts
? &ts
: NULL
);
2906 if (ret
> 0 && uinfo
) {
2907 if (copy_siginfo_to_user(uinfo
, &info
))
2915 * sys_kill - send a signal to a process
2916 * @pid: the PID of the process
2917 * @sig: signal to be sent
2919 SYSCALL_DEFINE2(kill
, pid_t
, pid
, int, sig
)
2921 struct siginfo info
;
2923 info
.si_signo
= sig
;
2925 info
.si_code
= SI_USER
;
2926 info
.si_pid
= task_tgid_vnr(current
);
2927 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
2929 return kill_something_info(sig
, &info
, pid
);
2933 do_send_specific(pid_t tgid
, pid_t pid
, int sig
, struct siginfo
*info
)
2935 struct task_struct
*p
;
2939 p
= find_task_by_vpid(pid
);
2940 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
2941 error
= check_kill_permission(sig
, info
, p
);
2943 * The null signal is a permissions and process existence
2944 * probe. No signal is actually delivered.
2946 if (!error
&& sig
) {
2947 error
= do_send_sig_info(sig
, info
, p
, false);
2949 * If lock_task_sighand() failed we pretend the task
2950 * dies after receiving the signal. The window is tiny,
2951 * and the signal is private anyway.
2953 if (unlikely(error
== -ESRCH
))
2962 static int do_tkill(pid_t tgid
, pid_t pid
, int sig
)
2964 struct siginfo info
= {};
2966 info
.si_signo
= sig
;
2968 info
.si_code
= SI_TKILL
;
2969 info
.si_pid
= task_tgid_vnr(current
);
2970 info
.si_uid
= from_kuid_munged(current_user_ns(), current_uid());
2972 return do_send_specific(tgid
, pid
, sig
, &info
);
2976 * sys_tgkill - send signal to one specific thread
2977 * @tgid: the thread group ID of the thread
2978 * @pid: the PID of the thread
2979 * @sig: signal to be sent
2981 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2982 * exists but it's not belonging to the target process anymore. This
2983 * method solves the problem of threads exiting and PIDs getting reused.
2985 SYSCALL_DEFINE3(tgkill
, pid_t
, tgid
, pid_t
, pid
, int, sig
)
2987 /* This is only valid for single tasks */
2988 if (pid
<= 0 || tgid
<= 0)
2991 return do_tkill(tgid
, pid
, sig
);
2995 * sys_tkill - send signal to one specific task
2996 * @pid: the PID of the task
2997 * @sig: signal to be sent
2999 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3001 SYSCALL_DEFINE2(tkill
, pid_t
, pid
, int, sig
)
3003 /* This is only valid for single tasks */
3007 return do_tkill(0, pid
, sig
);
3010 static int do_rt_sigqueueinfo(pid_t pid
, int sig
, siginfo_t
*info
)
3012 /* Not even root can pretend to send signals from the kernel.
3013 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3015 if ((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
) &&
3016 (task_pid_vnr(current
) != pid
)) {
3017 /* We used to allow any < 0 si_code */
3018 WARN_ON_ONCE(info
->si_code
< 0);
3021 info
->si_signo
= sig
;
3023 /* POSIX.1b doesn't mention process groups. */
3024 return kill_proc_info(sig
, info
, pid
);
3028 * sys_rt_sigqueueinfo - send signal information to a signal
3029 * @pid: the PID of the thread
3030 * @sig: signal to be sent
3031 * @uinfo: signal info to be sent
3033 SYSCALL_DEFINE3(rt_sigqueueinfo
, pid_t
, pid
, int, sig
,
3034 siginfo_t __user
*, uinfo
)
3037 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
3039 return do_rt_sigqueueinfo(pid
, sig
, &info
);
3042 #ifdef CONFIG_COMPAT
3043 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo
,
3046 struct compat_siginfo __user
*, uinfo
)
3048 siginfo_t info
= {};
3049 int ret
= copy_siginfo_from_user32(&info
, uinfo
);
3052 return do_rt_sigqueueinfo(pid
, sig
, &info
);
3056 static int do_rt_tgsigqueueinfo(pid_t tgid
, pid_t pid
, int sig
, siginfo_t
*info
)
3058 /* This is only valid for single tasks */
3059 if (pid
<= 0 || tgid
<= 0)
3062 /* Not even root can pretend to send signals from the kernel.
3063 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3065 if (((info
->si_code
>= 0 || info
->si_code
== SI_TKILL
)) &&
3066 (task_pid_vnr(current
) != pid
)) {
3067 /* We used to allow any < 0 si_code */
3068 WARN_ON_ONCE(info
->si_code
< 0);
3071 info
->si_signo
= sig
;
3073 return do_send_specific(tgid
, pid
, sig
, info
);
3076 SYSCALL_DEFINE4(rt_tgsigqueueinfo
, pid_t
, tgid
, pid_t
, pid
, int, sig
,
3077 siginfo_t __user
*, uinfo
)
3081 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
3084 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
3087 #ifdef CONFIG_COMPAT
3088 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo
,
3092 struct compat_siginfo __user
*, uinfo
)
3094 siginfo_t info
= {};
3096 if (copy_siginfo_from_user32(&info
, uinfo
))
3098 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
3102 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
3104 struct task_struct
*t
= current
;
3105 struct k_sigaction
*k
;
3108 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
3111 k
= &t
->sighand
->action
[sig
-1];
3113 spin_lock_irq(¤t
->sighand
->siglock
);
3118 sigdelsetmask(&act
->sa
.sa_mask
,
3119 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
3123 * "Setting a signal action to SIG_IGN for a signal that is
3124 * pending shall cause the pending signal to be discarded,
3125 * whether or not it is blocked."
3127 * "Setting a signal action to SIG_DFL for a signal that is
3128 * pending and whose default action is to ignore the signal
3129 * (for example, SIGCHLD), shall cause the pending signal to
3130 * be discarded, whether or not it is blocked"
3132 if (sig_handler_ignored(sig_handler(t
, sig
), sig
)) {
3134 sigaddset(&mask
, sig
);
3135 rm_from_queue_full(&mask
, &t
->signal
->shared_pending
);
3137 rm_from_queue_full(&mask
, &t
->pending
);
3139 } while (t
!= current
);
3143 spin_unlock_irq(¤t
->sighand
->siglock
);
3148 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
3153 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
3154 oss
.ss_size
= current
->sas_ss_size
;
3155 oss
.ss_flags
= sas_ss_flags(sp
);
3163 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
)))
3165 error
= __get_user(ss_sp
, &uss
->ss_sp
) |
3166 __get_user(ss_flags
, &uss
->ss_flags
) |
3167 __get_user(ss_size
, &uss
->ss_size
);
3172 if (on_sig_stack(sp
))
3177 * Note - this code used to test ss_flags incorrectly:
3178 * old code may have been written using ss_flags==0
3179 * to mean ss_flags==SS_ONSTACK (as this was the only
3180 * way that worked) - this fix preserves that older
3183 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
3186 if (ss_flags
== SS_DISABLE
) {
3191 if (ss_size
< MINSIGSTKSZ
)
3195 current
->sas_ss_sp
= (unsigned long) ss_sp
;
3196 current
->sas_ss_size
= ss_size
;
3202 if (!access_ok(VERIFY_WRITE
, uoss
, sizeof(*uoss
)))
3204 error
= __put_user(oss
.ss_sp
, &uoss
->ss_sp
) |
3205 __put_user(oss
.ss_size
, &uoss
->ss_size
) |
3206 __put_user(oss
.ss_flags
, &uoss
->ss_flags
);
3212 SYSCALL_DEFINE2(sigaltstack
,const stack_t __user
*,uss
, stack_t __user
*,uoss
)
3214 return do_sigaltstack(uss
, uoss
, current_user_stack_pointer());
3217 int restore_altstack(const stack_t __user
*uss
)
3219 int err
= do_sigaltstack(uss
, NULL
, current_user_stack_pointer());
3220 /* squash all but EFAULT for now */
3221 return err
== -EFAULT
? err
: 0;
3224 int __save_altstack(stack_t __user
*uss
, unsigned long sp
)
3226 struct task_struct
*t
= current
;
3227 return __put_user((void __user
*)t
->sas_ss_sp
, &uss
->ss_sp
) |
3228 __put_user(sas_ss_flags(sp
), &uss
->ss_flags
) |
3229 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
3232 #ifdef CONFIG_COMPAT
3233 COMPAT_SYSCALL_DEFINE2(sigaltstack
,
3234 const compat_stack_t __user
*, uss_ptr
,
3235 compat_stack_t __user
*, uoss_ptr
)
3242 compat_stack_t uss32
;
3244 memset(&uss
, 0, sizeof(stack_t
));
3245 if (copy_from_user(&uss32
, uss_ptr
, sizeof(compat_stack_t
)))
3247 uss
.ss_sp
= compat_ptr(uss32
.ss_sp
);
3248 uss
.ss_flags
= uss32
.ss_flags
;
3249 uss
.ss_size
= uss32
.ss_size
;
3253 ret
= do_sigaltstack((stack_t __force __user
*) (uss_ptr
? &uss
: NULL
),
3254 (stack_t __force __user
*) &uoss
,
3255 compat_user_stack_pointer());
3257 if (ret
>= 0 && uoss_ptr
) {
3258 if (!access_ok(VERIFY_WRITE
, uoss_ptr
, sizeof(compat_stack_t
)) ||
3259 __put_user(ptr_to_compat(uoss
.ss_sp
), &uoss_ptr
->ss_sp
) ||
3260 __put_user(uoss
.ss_flags
, &uoss_ptr
->ss_flags
) ||
3261 __put_user(uoss
.ss_size
, &uoss_ptr
->ss_size
))
3267 int compat_restore_altstack(const compat_stack_t __user
*uss
)
3269 int err
= compat_sys_sigaltstack(uss
, NULL
);
3270 /* squash all but -EFAULT for now */
3271 return err
== -EFAULT
? err
: 0;
3274 int __compat_save_altstack(compat_stack_t __user
*uss
, unsigned long sp
)
3276 struct task_struct
*t
= current
;
3277 return __put_user(ptr_to_compat((void __user
*)t
->sas_ss_sp
), &uss
->ss_sp
) |
3278 __put_user(sas_ss_flags(sp
), &uss
->ss_flags
) |
3279 __put_user(t
->sas_ss_size
, &uss
->ss_size
);
3283 #ifdef __ARCH_WANT_SYS_SIGPENDING
3286 * sys_sigpending - examine pending signals
3287 * @set: where mask of pending signal is returned
3289 SYSCALL_DEFINE1(sigpending
, old_sigset_t __user
*, set
)
3291 return sys_rt_sigpending((sigset_t __user
*)set
, sizeof(old_sigset_t
));
3296 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3298 * sys_sigprocmask - examine and change blocked signals
3299 * @how: whether to add, remove, or set signals
3300 * @nset: signals to add or remove (if non-null)
3301 * @oset: previous value of signal mask if non-null
3303 * Some platforms have their own version with special arguments;
3304 * others support only sys_rt_sigprocmask.
3307 SYSCALL_DEFINE3(sigprocmask
, int, how
, old_sigset_t __user
*, nset
,
3308 old_sigset_t __user
*, oset
)
3310 old_sigset_t old_set
, new_set
;
3311 sigset_t new_blocked
;
3313 old_set
= current
->blocked
.sig
[0];
3316 if (copy_from_user(&new_set
, nset
, sizeof(*nset
)))
3319 new_blocked
= current
->blocked
;
3323 sigaddsetmask(&new_blocked
, new_set
);
3326 sigdelsetmask(&new_blocked
, new_set
);
3329 new_blocked
.sig
[0] = new_set
;
3335 set_current_blocked(&new_blocked
);
3339 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
3345 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3347 #ifndef CONFIG_ODD_RT_SIGACTION
3349 * sys_rt_sigaction - alter an action taken by a process
3350 * @sig: signal to be sent
3351 * @act: new sigaction
3352 * @oact: used to save the previous sigaction
3353 * @sigsetsize: size of sigset_t type
3355 SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
3356 const struct sigaction __user
*, act
,
3357 struct sigaction __user
*, oact
,
3360 struct k_sigaction new_sa
, old_sa
;
3363 /* XXX: Don't preclude handling different sized sigset_t's. */
3364 if (sigsetsize
!= sizeof(sigset_t
))
3368 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
3372 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
3375 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
3381 #ifdef CONFIG_COMPAT
3382 COMPAT_SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
3383 const struct compat_sigaction __user
*, act
,
3384 struct compat_sigaction __user
*, oact
,
3385 compat_size_t
, sigsetsize
)
3387 struct k_sigaction new_ka
, old_ka
;
3388 compat_sigset_t mask
;
3389 #ifdef __ARCH_HAS_SA_RESTORER
3390 compat_uptr_t restorer
;
3394 /* XXX: Don't preclude handling different sized sigset_t's. */
3395 if (sigsetsize
!= sizeof(compat_sigset_t
))
3399 compat_uptr_t handler
;
3400 ret
= get_user(handler
, &act
->sa_handler
);
3401 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
3402 #ifdef __ARCH_HAS_SA_RESTORER
3403 ret
|= get_user(restorer
, &act
->sa_restorer
);
3404 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
3406 ret
|= copy_from_user(&mask
, &act
->sa_mask
, sizeof(mask
));
3407 ret
|= __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
);
3410 sigset_from_compat(&new_ka
.sa
.sa_mask
, &mask
);
3413 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3415 sigset_to_compat(&mask
, &old_ka
.sa
.sa_mask
);
3416 ret
= put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
3418 ret
|= copy_to_user(&oact
->sa_mask
, &mask
, sizeof(mask
));
3419 ret
|= __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
);
3420 #ifdef __ARCH_HAS_SA_RESTORER
3421 ret
|= put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
3422 &oact
->sa_restorer
);
3428 #endif /* !CONFIG_ODD_RT_SIGACTION */
3430 #ifdef CONFIG_OLD_SIGACTION
3431 SYSCALL_DEFINE3(sigaction
, int, sig
,
3432 const struct old_sigaction __user
*, act
,
3433 struct old_sigaction __user
*, oact
)
3435 struct k_sigaction new_ka
, old_ka
;
3440 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
3441 __get_user(new_ka
.sa
.sa_handler
, &act
->sa_handler
) ||
3442 __get_user(new_ka
.sa
.sa_restorer
, &act
->sa_restorer
) ||
3443 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
3444 __get_user(mask
, &act
->sa_mask
))
3446 #ifdef __ARCH_HAS_KA_RESTORER
3447 new_ka
.ka_restorer
= NULL
;
3449 siginitset(&new_ka
.sa
.sa_mask
, mask
);
3452 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3455 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
3456 __put_user(old_ka
.sa
.sa_handler
, &oact
->sa_handler
) ||
3457 __put_user(old_ka
.sa
.sa_restorer
, &oact
->sa_restorer
) ||
3458 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
3459 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
3466 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3467 COMPAT_SYSCALL_DEFINE3(sigaction
, int, sig
,
3468 const struct compat_old_sigaction __user
*, act
,
3469 struct compat_old_sigaction __user
*, oact
)
3471 struct k_sigaction new_ka
, old_ka
;
3473 compat_old_sigset_t mask
;
3474 compat_uptr_t handler
, restorer
;
3477 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
3478 __get_user(handler
, &act
->sa_handler
) ||
3479 __get_user(restorer
, &act
->sa_restorer
) ||
3480 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
3481 __get_user(mask
, &act
->sa_mask
))
3484 #ifdef __ARCH_HAS_KA_RESTORER
3485 new_ka
.ka_restorer
= NULL
;
3487 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
3488 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
3489 siginitset(&new_ka
.sa
.sa_mask
, mask
);
3492 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
3495 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
3496 __put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
3497 &oact
->sa_handler
) ||
3498 __put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
3499 &oact
->sa_restorer
) ||
3500 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
3501 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
3508 #ifdef __ARCH_WANT_SYS_SGETMASK
3511 * For backwards compatibility. Functionality superseded by sigprocmask.
3513 SYSCALL_DEFINE0(sgetmask
)
3516 return current
->blocked
.sig
[0];
3519 SYSCALL_DEFINE1(ssetmask
, int, newmask
)
3521 int old
= current
->blocked
.sig
[0];
3524 siginitset(&newset
, newmask
);
3525 set_current_blocked(&newset
);
3529 #endif /* __ARCH_WANT_SGETMASK */
3531 #ifdef __ARCH_WANT_SYS_SIGNAL
3533 * For backwards compatibility. Functionality superseded by sigaction.
3535 SYSCALL_DEFINE2(signal
, int, sig
, __sighandler_t
, handler
)
3537 struct k_sigaction new_sa
, old_sa
;
3540 new_sa
.sa
.sa_handler
= handler
;
3541 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
3542 sigemptyset(&new_sa
.sa
.sa_mask
);
3544 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
3546 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
3548 #endif /* __ARCH_WANT_SYS_SIGNAL */
3550 #ifdef __ARCH_WANT_SYS_PAUSE
3552 SYSCALL_DEFINE0(pause
)
3554 while (!signal_pending(current
)) {
3555 current
->state
= TASK_INTERRUPTIBLE
;
3558 return -ERESTARTNOHAND
;
3563 int sigsuspend(sigset_t
*set
)
3565 current
->saved_sigmask
= current
->blocked
;
3566 set_current_blocked(set
);
3568 current
->state
= TASK_INTERRUPTIBLE
;
3570 set_restore_sigmask();
3571 return -ERESTARTNOHAND
;
3575 * sys_rt_sigsuspend - replace the signal mask for a value with the
3576 * @unewset value until a signal is received
3577 * @unewset: new signal mask value
3578 * @sigsetsize: size of sigset_t type
3580 SYSCALL_DEFINE2(rt_sigsuspend
, sigset_t __user
*, unewset
, size_t, sigsetsize
)
3584 /* XXX: Don't preclude handling different sized sigset_t's. */
3585 if (sigsetsize
!= sizeof(sigset_t
))
3588 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
3590 return sigsuspend(&newset
);
3593 #ifdef CONFIG_COMPAT
3594 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend
, compat_sigset_t __user
*, unewset
, compat_size_t
, sigsetsize
)
3598 compat_sigset_t newset32
;
3600 /* XXX: Don't preclude handling different sized sigset_t's. */
3601 if (sigsetsize
!= sizeof(sigset_t
))
3604 if (copy_from_user(&newset32
, unewset
, sizeof(compat_sigset_t
)))
3606 sigset_from_compat(&newset
, &newset32
);
3607 return sigsuspend(&newset
);
3609 /* on little-endian bitmaps don't care about granularity */
3610 return sys_rt_sigsuspend((sigset_t __user
*)unewset
, sigsetsize
);
3615 #ifdef CONFIG_OLD_SIGSUSPEND
3616 SYSCALL_DEFINE1(sigsuspend
, old_sigset_t
, mask
)
3619 siginitset(&blocked
, mask
);
3620 return sigsuspend(&blocked
);
3623 #ifdef CONFIG_OLD_SIGSUSPEND3
3624 SYSCALL_DEFINE3(sigsuspend
, int, unused1
, int, unused2
, old_sigset_t
, mask
)
3627 siginitset(&blocked
, mask
);
3628 return sigsuspend(&blocked
);
3632 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
3637 void __init
signals_init(void)
3639 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);
3642 #ifdef CONFIG_KGDB_KDB
3643 #include <linux/kdb.h>
3645 * kdb_send_sig_info - Allows kdb to send signals without exposing
3646 * signal internals. This function checks if the required locks are
3647 * available before calling the main signal code, to avoid kdb
3651 kdb_send_sig_info(struct task_struct
*t
, struct siginfo
*info
)
3653 static struct task_struct
*kdb_prev_t
;
3655 if (!spin_trylock(&t
->sighand
->siglock
)) {
3656 kdb_printf("Can't do kill command now.\n"
3657 "The sigmask lock is held somewhere else in "
3658 "kernel, try again later\n");
3661 spin_unlock(&t
->sighand
->siglock
);
3662 new_t
= kdb_prev_t
!= t
;
3664 if (t
->state
!= TASK_RUNNING
&& new_t
) {
3665 kdb_printf("Process is not RUNNING, sending a signal from "
3666 "kdb risks deadlock\n"
3667 "on the run queue locks. "
3668 "The signal has _not_ been sent.\n"
3669 "Reissue the kill command if you want to risk "
3673 sig
= info
->si_signo
;
3674 if (send_sig_info(sig
, info
, t
))
3675 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3678 kdb_printf("Signal %d is sent to process %d.\n", sig
, t
->pid
);
3680 #endif /* CONFIG_KGDB_KDB */