BACKPORT: signal: add pidfd_send_signal() syscall
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / kernel / signal.c
1 /*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/proc_fs.h>
25 #include <linux/tty.h>
26 #include <linux/binfmts.h>
27 #include <linux/coredump.h>
28 #include <linux/security.h>
29 #include <linux/syscalls.h>
30 #include <linux/ptrace.h>
31 #include <linux/signal.h>
32 #include <linux/signalfd.h>
33 #include <linux/ratelimit.h>
34 #include <linux/tracehook.h>
35 #include <linux/capability.h>
36 #include <linux/freezer.h>
37 #include <linux/pid_namespace.h>
38 #include <linux/nsproxy.h>
39 #include <linux/user_namespace.h>
40 #include <linux/uprobes.h>
41 #include <linux/compat.h>
42 #include <linux/cn_proc.h>
43 #include <linux/compiler.h>
44 #include <linux/posix-timers.h>
45 #include <linux/oom.h>
46 #include <linux/capability.h>
47
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/signal.h>
50
51 #include <asm/param.h>
52 #include <linux/uaccess.h>
53 #include <asm/unistd.h>
54 #include <asm/siginfo.h>
55 #include <asm/cacheflush.h>
56 #include "audit.h" /* audit_signal_info() */
57
58 /*
59 * SLAB caches for signal bits.
60 */
61
62 static struct kmem_cache *sigqueue_cachep;
63
64 int print_fatal_signals __read_mostly;
65
66 static void __user *sig_handler(struct task_struct *t, int sig)
67 {
68 return t->sighand->action[sig - 1].sa.sa_handler;
69 }
70
71 static int sig_handler_ignored(void __user *handler, int sig)
72 {
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
76 }
77
78 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
79 {
80 void __user *handler;
81
82 handler = sig_handler(t, sig);
83
84 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
85 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
86 return 1;
87
88 return sig_handler_ignored(handler, sig);
89 }
90
91 static int sig_ignored(struct task_struct *t, int sig, bool force)
92 {
93 /*
94 * Blocked signals are never ignored, since the
95 * signal handler may change by the time it is
96 * unblocked.
97 */
98 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
99 return 0;
100
101 /*
102 * Tracers may want to know about even ignored signal unless it
103 * is SIGKILL which can't be reported anyway but can be ignored
104 * by SIGNAL_UNKILLABLE task.
105 */
106 if (t->ptrace && sig != SIGKILL)
107 return 0;
108
109 return sig_task_ignored(t, sig, force);
110 }
111
112 /*
113 * Re-calculate pending state from the set of locally pending
114 * signals, globally pending signals, and blocked signals.
115 */
116 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
117 {
118 unsigned long ready;
119 long i;
120
121 switch (_NSIG_WORDS) {
122 default:
123 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
124 ready |= signal->sig[i] &~ blocked->sig[i];
125 break;
126
127 case 4: ready = signal->sig[3] &~ blocked->sig[3];
128 ready |= signal->sig[2] &~ blocked->sig[2];
129 ready |= signal->sig[1] &~ blocked->sig[1];
130 ready |= signal->sig[0] &~ blocked->sig[0];
131 break;
132
133 case 2: ready = signal->sig[1] &~ blocked->sig[1];
134 ready |= signal->sig[0] &~ blocked->sig[0];
135 break;
136
137 case 1: ready = signal->sig[0] &~ blocked->sig[0];
138 }
139 return ready != 0;
140 }
141
142 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
143
144 static int recalc_sigpending_tsk(struct task_struct *t)
145 {
146 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
147 PENDING(&t->pending, &t->blocked) ||
148 PENDING(&t->signal->shared_pending, &t->blocked)) {
149 set_tsk_thread_flag(t, TIF_SIGPENDING);
150 return 1;
151 }
152 /*
153 * We must never clear the flag in another thread, or in current
154 * when it's possible the current syscall is returning -ERESTART*.
155 * So we don't clear it here, and only callers who know they should do.
156 */
157 return 0;
158 }
159
160 /*
161 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
162 * This is superfluous when called on current, the wakeup is a harmless no-op.
163 */
164 void recalc_sigpending_and_wake(struct task_struct *t)
165 {
166 if (recalc_sigpending_tsk(t))
167 signal_wake_up(t, 0);
168 }
169
170 void recalc_sigpending(void)
171 {
172 if (!recalc_sigpending_tsk(current) && !freezing(current))
173 clear_thread_flag(TIF_SIGPENDING);
174
175 }
176
177 /* Given the mask, find the first available signal that should be serviced. */
178
179 #define SYNCHRONOUS_MASK \
180 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
181 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
182
183 int next_signal(struct sigpending *pending, sigset_t *mask)
184 {
185 unsigned long i, *s, *m, x;
186 int sig = 0;
187
188 s = pending->signal.sig;
189 m = mask->sig;
190
191 /*
192 * Handle the first word specially: it contains the
193 * synchronous signals that need to be dequeued first.
194 */
195 x = *s &~ *m;
196 if (x) {
197 if (x & SYNCHRONOUS_MASK)
198 x &= SYNCHRONOUS_MASK;
199 sig = ffz(~x) + 1;
200 return sig;
201 }
202
203 switch (_NSIG_WORDS) {
204 default:
205 for (i = 1; i < _NSIG_WORDS; ++i) {
206 x = *++s &~ *++m;
207 if (!x)
208 continue;
209 sig = ffz(~x) + i*_NSIG_BPW + 1;
210 break;
211 }
212 break;
213
214 case 2:
215 x = s[1] &~ m[1];
216 if (!x)
217 break;
218 sig = ffz(~x) + _NSIG_BPW + 1;
219 break;
220
221 case 1:
222 /* Nothing to do */
223 break;
224 }
225
226 return sig;
227 }
228
229 static inline void print_dropped_signal(int sig)
230 {
231 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
232
233 if (!print_fatal_signals)
234 return;
235
236 if (!__ratelimit(&ratelimit_state))
237 return;
238
239 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
240 current->comm, current->pid, sig);
241 }
242
243 /**
244 * task_set_jobctl_pending - set jobctl pending bits
245 * @task: target task
246 * @mask: pending bits to set
247 *
248 * Clear @mask from @task->jobctl. @mask must be subset of
249 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
250 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
251 * cleared. If @task is already being killed or exiting, this function
252 * becomes noop.
253 *
254 * CONTEXT:
255 * Must be called with @task->sighand->siglock held.
256 *
257 * RETURNS:
258 * %true if @mask is set, %false if made noop because @task was dying.
259 */
260 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
261 {
262 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
263 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
264 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
265
266 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
267 return false;
268
269 if (mask & JOBCTL_STOP_SIGMASK)
270 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
271
272 task->jobctl |= mask;
273 return true;
274 }
275
276 /**
277 * task_clear_jobctl_trapping - clear jobctl trapping bit
278 * @task: target task
279 *
280 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
281 * Clear it and wake up the ptracer. Note that we don't need any further
282 * locking. @task->siglock guarantees that @task->parent points to the
283 * ptracer.
284 *
285 * CONTEXT:
286 * Must be called with @task->sighand->siglock held.
287 */
288 void task_clear_jobctl_trapping(struct task_struct *task)
289 {
290 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
291 task->jobctl &= ~JOBCTL_TRAPPING;
292 smp_mb(); /* advised by wake_up_bit() */
293 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
294 }
295 }
296
297 /**
298 * task_clear_jobctl_pending - clear jobctl pending bits
299 * @task: target task
300 * @mask: pending bits to clear
301 *
302 * Clear @mask from @task->jobctl. @mask must be subset of
303 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
304 * STOP bits are cleared together.
305 *
306 * If clearing of @mask leaves no stop or trap pending, this function calls
307 * task_clear_jobctl_trapping().
308 *
309 * CONTEXT:
310 * Must be called with @task->sighand->siglock held.
311 */
312 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
313 {
314 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
315
316 if (mask & JOBCTL_STOP_PENDING)
317 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
318
319 task->jobctl &= ~mask;
320
321 if (!(task->jobctl & JOBCTL_PENDING_MASK))
322 task_clear_jobctl_trapping(task);
323 }
324
325 /**
326 * task_participate_group_stop - participate in a group stop
327 * @task: task participating in a group stop
328 *
329 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
330 * Group stop states are cleared and the group stop count is consumed if
331 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
332 * stop, the appropriate %SIGNAL_* flags are set.
333 *
334 * CONTEXT:
335 * Must be called with @task->sighand->siglock held.
336 *
337 * RETURNS:
338 * %true if group stop completion should be notified to the parent, %false
339 * otherwise.
340 */
341 static bool task_participate_group_stop(struct task_struct *task)
342 {
343 struct signal_struct *sig = task->signal;
344 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
345
346 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
347
348 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
349
350 if (!consume)
351 return false;
352
353 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
354 sig->group_stop_count--;
355
356 /*
357 * Tell the caller to notify completion iff we are entering into a
358 * fresh group stop. Read comment in do_signal_stop() for details.
359 */
360 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
361 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
362 return true;
363 }
364 return false;
365 }
366
367 /*
368 * allocate a new signal queue record
369 * - this may be called without locks if and only if t == current, otherwise an
370 * appropriate lock must be held to stop the target task from exiting
371 */
372 static struct sigqueue *
373 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
374 {
375 struct sigqueue *q = NULL;
376 struct user_struct *user;
377
378 /*
379 * Protect access to @t credentials. This can go away when all
380 * callers hold rcu read lock.
381 */
382 rcu_read_lock();
383 user = get_uid(__task_cred(t)->user);
384 atomic_inc(&user->sigpending);
385 rcu_read_unlock();
386
387 if (override_rlimit ||
388 atomic_read(&user->sigpending) <=
389 task_rlimit(t, RLIMIT_SIGPENDING)) {
390 q = kmem_cache_alloc(sigqueue_cachep, flags);
391 } else {
392 print_dropped_signal(sig);
393 }
394
395 if (unlikely(q == NULL)) {
396 atomic_dec(&user->sigpending);
397 free_uid(user);
398 } else {
399 INIT_LIST_HEAD(&q->list);
400 q->flags = 0;
401 q->user = user;
402 }
403
404 return q;
405 }
406
407 static void __sigqueue_free(struct sigqueue *q)
408 {
409 if (q->flags & SIGQUEUE_PREALLOC)
410 return;
411 atomic_dec(&q->user->sigpending);
412 free_uid(q->user);
413 kmem_cache_free(sigqueue_cachep, q);
414 }
415
416 void flush_sigqueue(struct sigpending *queue)
417 {
418 struct sigqueue *q;
419
420 sigemptyset(&queue->signal);
421 while (!list_empty(&queue->list)) {
422 q = list_entry(queue->list.next, struct sigqueue , list);
423 list_del_init(&q->list);
424 __sigqueue_free(q);
425 }
426 }
427
428 /*
429 * Flush all pending signals for this kthread.
430 */
431 void flush_signals(struct task_struct *t)
432 {
433 unsigned long flags;
434
435 spin_lock_irqsave(&t->sighand->siglock, flags);
436 clear_tsk_thread_flag(t, TIF_SIGPENDING);
437 flush_sigqueue(&t->pending);
438 flush_sigqueue(&t->signal->shared_pending);
439 spin_unlock_irqrestore(&t->sighand->siglock, flags);
440 }
441
442 #ifdef CONFIG_POSIX_TIMERS
443 static void __flush_itimer_signals(struct sigpending *pending)
444 {
445 sigset_t signal, retain;
446 struct sigqueue *q, *n;
447
448 signal = pending->signal;
449 sigemptyset(&retain);
450
451 list_for_each_entry_safe(q, n, &pending->list, list) {
452 int sig = q->info.si_signo;
453
454 if (likely(q->info.si_code != SI_TIMER)) {
455 sigaddset(&retain, sig);
456 } else {
457 sigdelset(&signal, sig);
458 list_del_init(&q->list);
459 __sigqueue_free(q);
460 }
461 }
462
463 sigorsets(&pending->signal, &signal, &retain);
464 }
465
466 void flush_itimer_signals(void)
467 {
468 struct task_struct *tsk = current;
469 unsigned long flags;
470
471 spin_lock_irqsave(&tsk->sighand->siglock, flags);
472 __flush_itimer_signals(&tsk->pending);
473 __flush_itimer_signals(&tsk->signal->shared_pending);
474 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
475 }
476 #endif
477
478 void ignore_signals(struct task_struct *t)
479 {
480 int i;
481
482 for (i = 0; i < _NSIG; ++i)
483 t->sighand->action[i].sa.sa_handler = SIG_IGN;
484
485 flush_signals(t);
486 }
487
488 /*
489 * Flush all handlers for a task.
490 */
491
492 void
493 flush_signal_handlers(struct task_struct *t, int force_default)
494 {
495 int i;
496 struct k_sigaction *ka = &t->sighand->action[0];
497 for (i = _NSIG ; i != 0 ; i--) {
498 if (force_default || ka->sa.sa_handler != SIG_IGN)
499 ka->sa.sa_handler = SIG_DFL;
500 ka->sa.sa_flags = 0;
501 #ifdef __ARCH_HAS_SA_RESTORER
502 ka->sa.sa_restorer = NULL;
503 #endif
504 sigemptyset(&ka->sa.sa_mask);
505 ka++;
506 }
507 }
508
509 int unhandled_signal(struct task_struct *tsk, int sig)
510 {
511 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
512 if (is_global_init(tsk))
513 return 1;
514 if (handler != SIG_IGN && handler != SIG_DFL)
515 return 0;
516 /* if ptraced, let the tracer determine */
517 return !tsk->ptrace;
518 }
519
520 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
521 bool *resched_timer)
522 {
523 struct sigqueue *q, *first = NULL;
524
525 /*
526 * Collect the siginfo appropriate to this signal. Check if
527 * there is another siginfo for the same signal.
528 */
529 list_for_each_entry(q, &list->list, list) {
530 if (q->info.si_signo == sig) {
531 if (first)
532 goto still_pending;
533 first = q;
534 }
535 }
536
537 sigdelset(&list->signal, sig);
538
539 if (first) {
540 still_pending:
541 list_del_init(&first->list);
542 copy_siginfo(info, &first->info);
543
544 *resched_timer =
545 (first->flags & SIGQUEUE_PREALLOC) &&
546 (info->si_code == SI_TIMER) &&
547 (info->si_sys_private);
548
549 __sigqueue_free(first);
550 } else {
551 /*
552 * Ok, it wasn't in the queue. This must be
553 * a fast-pathed signal or we must have been
554 * out of queue space. So zero out the info.
555 */
556 info->si_signo = sig;
557 info->si_errno = 0;
558 info->si_code = SI_USER;
559 info->si_pid = 0;
560 info->si_uid = 0;
561 }
562 }
563
564 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
565 siginfo_t *info, bool *resched_timer)
566 {
567 int sig = next_signal(pending, mask);
568
569 if (sig)
570 collect_signal(sig, pending, info, resched_timer);
571 return sig;
572 }
573
574 /*
575 * Dequeue a signal and return the element to the caller, which is
576 * expected to free it.
577 *
578 * All callers have to hold the siglock.
579 */
580 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
581 {
582 bool resched_timer = false;
583 int signr;
584
585 /* We only dequeue private signals from ourselves, we don't let
586 * signalfd steal them
587 */
588 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
589 if (!signr) {
590 signr = __dequeue_signal(&tsk->signal->shared_pending,
591 mask, info, &resched_timer);
592 #ifdef CONFIG_POSIX_TIMERS
593 /*
594 * itimer signal ?
595 *
596 * itimers are process shared and we restart periodic
597 * itimers in the signal delivery path to prevent DoS
598 * attacks in the high resolution timer case. This is
599 * compliant with the old way of self-restarting
600 * itimers, as the SIGALRM is a legacy signal and only
601 * queued once. Changing the restart behaviour to
602 * restart the timer in the signal dequeue path is
603 * reducing the timer noise on heavy loaded !highres
604 * systems too.
605 */
606 if (unlikely(signr == SIGALRM)) {
607 struct hrtimer *tmr = &tsk->signal->real_timer;
608
609 if (!hrtimer_is_queued(tmr) &&
610 tsk->signal->it_real_incr != 0) {
611 hrtimer_forward(tmr, tmr->base->get_time(),
612 tsk->signal->it_real_incr);
613 hrtimer_restart(tmr);
614 }
615 }
616 #endif
617 }
618
619 recalc_sigpending();
620 if (!signr)
621 return 0;
622
623 if (unlikely(sig_kernel_stop(signr))) {
624 /*
625 * Set a marker that we have dequeued a stop signal. Our
626 * caller might release the siglock and then the pending
627 * stop signal it is about to process is no longer in the
628 * pending bitmasks, but must still be cleared by a SIGCONT
629 * (and overruled by a SIGKILL). So those cases clear this
630 * shared flag after we've set it. Note that this flag may
631 * remain set after the signal we return is ignored or
632 * handled. That doesn't matter because its only purpose
633 * is to alert stop-signal processing code when another
634 * processor has come along and cleared the flag.
635 */
636 current->jobctl |= JOBCTL_STOP_DEQUEUED;
637 }
638 #ifdef CONFIG_POSIX_TIMERS
639 if (resched_timer) {
640 /*
641 * Release the siglock to ensure proper locking order
642 * of timer locks outside of siglocks. Note, we leave
643 * irqs disabled here, since the posix-timers code is
644 * about to disable them again anyway.
645 */
646 spin_unlock(&tsk->sighand->siglock);
647 posixtimer_rearm(info);
648 spin_lock(&tsk->sighand->siglock);
649 }
650 #endif
651 return signr;
652 }
653
654 /*
655 * Tell a process that it has a new active signal..
656 *
657 * NOTE! we rely on the previous spin_lock to
658 * lock interrupts for us! We can only be called with
659 * "siglock" held, and the local interrupt must
660 * have been disabled when that got acquired!
661 *
662 * No need to set need_resched since signal event passing
663 * goes through ->blocked
664 */
665 void signal_wake_up_state(struct task_struct *t, unsigned int state)
666 {
667 set_tsk_thread_flag(t, TIF_SIGPENDING);
668 /*
669 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
670 * case. We don't check t->state here because there is a race with it
671 * executing another processor and just now entering stopped state.
672 * By using wake_up_state, we ensure the process will wake up and
673 * handle its death signal.
674 */
675 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
676 kick_process(t);
677 }
678
679 static int dequeue_synchronous_signal(siginfo_t *info)
680 {
681 struct task_struct *tsk = current;
682 struct sigpending *pending = &tsk->pending;
683 struct sigqueue *q, *sync = NULL;
684
685 /*
686 * Might a synchronous signal be in the queue?
687 */
688 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
689 return 0;
690
691 /*
692 * Return the first synchronous signal in the queue.
693 */
694 list_for_each_entry(q, &pending->list, list) {
695 /* Synchronous signals have a postive si_code */
696 if ((q->info.si_code > SI_USER) &&
697 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
698 sync = q;
699 goto next;
700 }
701 }
702 return 0;
703 next:
704 /*
705 * Check if there is another siginfo for the same signal.
706 */
707 list_for_each_entry_continue(q, &pending->list, list) {
708 if (q->info.si_signo == sync->info.si_signo)
709 goto still_pending;
710 }
711
712 sigdelset(&pending->signal, sync->info.si_signo);
713 recalc_sigpending();
714 still_pending:
715 list_del_init(&sync->list);
716 copy_siginfo(info, &sync->info);
717 __sigqueue_free(sync);
718 return info->si_signo;
719 }
720
721 /*
722 * Remove signals in mask from the pending set and queue.
723 * Returns 1 if any signals were found.
724 *
725 * All callers must be holding the siglock.
726 */
727 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
728 {
729 struct sigqueue *q, *n;
730 sigset_t m;
731
732 sigandsets(&m, mask, &s->signal);
733 if (sigisemptyset(&m))
734 return 0;
735
736 sigandnsets(&s->signal, &s->signal, mask);
737 list_for_each_entry_safe(q, n, &s->list, list) {
738 if (sigismember(mask, q->info.si_signo)) {
739 list_del_init(&q->list);
740 __sigqueue_free(q);
741 }
742 }
743 return 1;
744 }
745
746 static inline int is_si_special(const struct siginfo *info)
747 {
748 return info <= SEND_SIG_FORCED;
749 }
750
751 static inline bool si_fromuser(const struct siginfo *info)
752 {
753 return info == SEND_SIG_NOINFO ||
754 (!is_si_special(info) && SI_FROMUSER(info));
755 }
756
757 /*
758 * called with RCU read lock from check_kill_permission()
759 */
760 static int kill_ok_by_cred(struct task_struct *t)
761 {
762 const struct cred *cred = current_cred();
763 const struct cred *tcred = __task_cred(t);
764
765 if (uid_eq(cred->euid, tcred->suid) ||
766 uid_eq(cred->euid, tcred->uid) ||
767 uid_eq(cred->uid, tcred->suid) ||
768 uid_eq(cred->uid, tcred->uid))
769 return 1;
770
771 if (ns_capable(tcred->user_ns, CAP_KILL))
772 return 1;
773
774 return 0;
775 }
776
777 /*
778 * Bad permissions for sending the signal
779 * - the caller must hold the RCU read lock
780 */
781 static int check_kill_permission(int sig, struct siginfo *info,
782 struct task_struct *t)
783 {
784 struct pid *sid;
785 int error;
786
787 if (!valid_signal(sig))
788 return -EINVAL;
789
790 if (!si_fromuser(info))
791 return 0;
792
793 error = audit_signal_info(sig, t); /* Let audit system see the signal */
794 if (error)
795 return error;
796
797 if (!same_thread_group(current, t) &&
798 !kill_ok_by_cred(t)) {
799 switch (sig) {
800 case SIGCONT:
801 sid = task_session(t);
802 /*
803 * We don't return the error if sid == NULL. The
804 * task was unhashed, the caller must notice this.
805 */
806 if (!sid || sid == task_session(current))
807 break;
808 default:
809 return -EPERM;
810 }
811 }
812
813 return security_task_kill(t, info, sig, 0);
814 }
815
816 /**
817 * ptrace_trap_notify - schedule trap to notify ptracer
818 * @t: tracee wanting to notify tracer
819 *
820 * This function schedules sticky ptrace trap which is cleared on the next
821 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
822 * ptracer.
823 *
824 * If @t is running, STOP trap will be taken. If trapped for STOP and
825 * ptracer is listening for events, tracee is woken up so that it can
826 * re-trap for the new event. If trapped otherwise, STOP trap will be
827 * eventually taken without returning to userland after the existing traps
828 * are finished by PTRACE_CONT.
829 *
830 * CONTEXT:
831 * Must be called with @task->sighand->siglock held.
832 */
833 static void ptrace_trap_notify(struct task_struct *t)
834 {
835 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
836 assert_spin_locked(&t->sighand->siglock);
837
838 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
839 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
840 }
841
842 /*
843 * Handle magic process-wide effects of stop/continue signals. Unlike
844 * the signal actions, these happen immediately at signal-generation
845 * time regardless of blocking, ignoring, or handling. This does the
846 * actual continuing for SIGCONT, but not the actual stopping for stop
847 * signals. The process stop is done as a signal action for SIG_DFL.
848 *
849 * Returns true if the signal should be actually delivered, otherwise
850 * it should be dropped.
851 */
852 static bool prepare_signal(int sig, struct task_struct *p, bool force)
853 {
854 struct signal_struct *signal = p->signal;
855 struct task_struct *t;
856 sigset_t flush;
857
858 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
859 if (!(signal->flags & SIGNAL_GROUP_EXIT))
860 return sig == SIGKILL;
861 /*
862 * The process is in the middle of dying, nothing to do.
863 */
864 } else if (sig_kernel_stop(sig)) {
865 /*
866 * This is a stop signal. Remove SIGCONT from all queues.
867 */
868 siginitset(&flush, sigmask(SIGCONT));
869 flush_sigqueue_mask(&flush, &signal->shared_pending);
870 for_each_thread(p, t)
871 flush_sigqueue_mask(&flush, &t->pending);
872 } else if (sig == SIGCONT) {
873 unsigned int why;
874 /*
875 * Remove all stop signals from all queues, wake all threads.
876 */
877 siginitset(&flush, SIG_KERNEL_STOP_MASK);
878 flush_sigqueue_mask(&flush, &signal->shared_pending);
879 for_each_thread(p, t) {
880 flush_sigqueue_mask(&flush, &t->pending);
881 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
882 if (likely(!(t->ptrace & PT_SEIZED)))
883 wake_up_state(t, __TASK_STOPPED);
884 else
885 ptrace_trap_notify(t);
886 }
887
888 /*
889 * Notify the parent with CLD_CONTINUED if we were stopped.
890 *
891 * If we were in the middle of a group stop, we pretend it
892 * was already finished, and then continued. Since SIGCHLD
893 * doesn't queue we report only CLD_STOPPED, as if the next
894 * CLD_CONTINUED was dropped.
895 */
896 why = 0;
897 if (signal->flags & SIGNAL_STOP_STOPPED)
898 why |= SIGNAL_CLD_CONTINUED;
899 else if (signal->group_stop_count)
900 why |= SIGNAL_CLD_STOPPED;
901
902 if (why) {
903 /*
904 * The first thread which returns from do_signal_stop()
905 * will take ->siglock, notice SIGNAL_CLD_MASK, and
906 * notify its parent. See get_signal_to_deliver().
907 */
908 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
909 signal->group_stop_count = 0;
910 signal->group_exit_code = 0;
911 }
912 }
913
914 return !sig_ignored(p, sig, force);
915 }
916
917 /*
918 * Test if P wants to take SIG. After we've checked all threads with this,
919 * it's equivalent to finding no threads not blocking SIG. Any threads not
920 * blocking SIG were ruled out because they are not running and already
921 * have pending signals. Such threads will dequeue from the shared queue
922 * as soon as they're available, so putting the signal on the shared queue
923 * will be equivalent to sending it to one such thread.
924 */
925 static inline int wants_signal(int sig, struct task_struct *p)
926 {
927 if (sigismember(&p->blocked, sig))
928 return 0;
929 if (p->flags & PF_EXITING)
930 return 0;
931 if (sig == SIGKILL)
932 return 1;
933 if (task_is_stopped_or_traced(p))
934 return 0;
935 return task_curr(p) || !signal_pending(p);
936 }
937
938 static void complete_signal(int sig, struct task_struct *p, int group)
939 {
940 struct signal_struct *signal = p->signal;
941 struct task_struct *t;
942
943 /*
944 * Now find a thread we can wake up to take the signal off the queue.
945 *
946 * If the main thread wants the signal, it gets first crack.
947 * Probably the least surprising to the average bear.
948 */
949 if (wants_signal(sig, p))
950 t = p;
951 else if (!group || thread_group_empty(p))
952 /*
953 * There is just one thread and it does not need to be woken.
954 * It will dequeue unblocked signals before it runs again.
955 */
956 return;
957 else {
958 /*
959 * Otherwise try to find a suitable thread.
960 */
961 t = signal->curr_target;
962 while (!wants_signal(sig, t)) {
963 t = next_thread(t);
964 if (t == signal->curr_target)
965 /*
966 * No thread needs to be woken.
967 * Any eligible threads will see
968 * the signal in the queue soon.
969 */
970 return;
971 }
972 signal->curr_target = t;
973 }
974
975 /*
976 * Found a killable thread. If the signal will be fatal,
977 * then start taking the whole group down immediately.
978 */
979 if (sig_fatal(p, sig) &&
980 !(signal->flags & SIGNAL_GROUP_EXIT) &&
981 !sigismember(&t->real_blocked, sig) &&
982 (sig == SIGKILL || !p->ptrace)) {
983 /*
984 * This signal will be fatal to the whole group.
985 */
986 if (!sig_kernel_coredump(sig)) {
987 /*
988 * Start a group exit and wake everybody up.
989 * This way we don't have other threads
990 * running and doing things after a slower
991 * thread has the fatal signal pending.
992 */
993 signal->flags = SIGNAL_GROUP_EXIT;
994 signal->group_exit_code = sig;
995 signal->group_stop_count = 0;
996 t = p;
997 do {
998 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
999 sigaddset(&t->pending.signal, SIGKILL);
1000 signal_wake_up(t, 1);
1001 } while_each_thread(p, t);
1002 return;
1003 }
1004 }
1005
1006 /*
1007 * The signal is already in the shared-pending queue.
1008 * Tell the chosen thread to wake up and dequeue it.
1009 */
1010 signal_wake_up(t, sig == SIGKILL);
1011 return;
1012 }
1013
1014 static inline int legacy_queue(struct sigpending *signals, int sig)
1015 {
1016 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1017 }
1018
1019 #ifdef CONFIG_USER_NS
1020 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1021 {
1022 if (current_user_ns() == task_cred_xxx(t, user_ns))
1023 return;
1024
1025 if (SI_FROMKERNEL(info))
1026 return;
1027
1028 rcu_read_lock();
1029 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1030 make_kuid(current_user_ns(), info->si_uid));
1031 rcu_read_unlock();
1032 }
1033 #else
1034 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1035 {
1036 return;
1037 }
1038 #endif
1039
1040 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1041 int group, int from_ancestor_ns)
1042 {
1043 struct sigpending *pending;
1044 struct sigqueue *q;
1045 int override_rlimit;
1046 int ret = 0, result;
1047
1048 assert_spin_locked(&t->sighand->siglock);
1049
1050 result = TRACE_SIGNAL_IGNORED;
1051 if (!prepare_signal(sig, t,
1052 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
1053 goto ret;
1054
1055 pending = group ? &t->signal->shared_pending : &t->pending;
1056 /*
1057 * Short-circuit ignored signals and support queuing
1058 * exactly one non-rt signal, so that we can get more
1059 * detailed information about the cause of the signal.
1060 */
1061 result = TRACE_SIGNAL_ALREADY_PENDING;
1062 if (legacy_queue(pending, sig))
1063 goto ret;
1064
1065 result = TRACE_SIGNAL_DELIVERED;
1066 /*
1067 * fast-pathed signals for kernel-internal things like SIGSTOP
1068 * or SIGKILL.
1069 */
1070 if (info == SEND_SIG_FORCED)
1071 goto out_set;
1072
1073 /*
1074 * Real-time signals must be queued if sent by sigqueue, or
1075 * some other real-time mechanism. It is implementation
1076 * defined whether kill() does so. We attempt to do so, on
1077 * the principle of least surprise, but since kill is not
1078 * allowed to fail with EAGAIN when low on memory we just
1079 * make sure at least one signal gets delivered and don't
1080 * pass on the info struct.
1081 */
1082 if (sig < SIGRTMIN)
1083 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1084 else
1085 override_rlimit = 0;
1086
1087 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1088 if (q) {
1089 list_add_tail(&q->list, &pending->list);
1090 switch ((unsigned long) info) {
1091 case (unsigned long) SEND_SIG_NOINFO:
1092 q->info.si_signo = sig;
1093 q->info.si_errno = 0;
1094 q->info.si_code = SI_USER;
1095 q->info.si_pid = task_tgid_nr_ns(current,
1096 task_active_pid_ns(t));
1097 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1098 break;
1099 case (unsigned long) SEND_SIG_PRIV:
1100 q->info.si_signo = sig;
1101 q->info.si_errno = 0;
1102 q->info.si_code = SI_KERNEL;
1103 q->info.si_pid = 0;
1104 q->info.si_uid = 0;
1105 break;
1106 default:
1107 copy_siginfo(&q->info, info);
1108 if (from_ancestor_ns)
1109 q->info.si_pid = 0;
1110 break;
1111 }
1112
1113 userns_fixup_signal_uid(&q->info, t);
1114
1115 } else if (!is_si_special(info)) {
1116 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1117 /*
1118 * Queue overflow, abort. We may abort if the
1119 * signal was rt and sent by user using something
1120 * other than kill().
1121 */
1122 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1123 ret = -EAGAIN;
1124 goto ret;
1125 } else {
1126 /*
1127 * This is a silent loss of information. We still
1128 * send the signal, but the *info bits are lost.
1129 */
1130 result = TRACE_SIGNAL_LOSE_INFO;
1131 }
1132 }
1133
1134 out_set:
1135 signalfd_notify(t, sig);
1136 sigaddset(&pending->signal, sig);
1137 complete_signal(sig, t, group);
1138 ret:
1139 trace_signal_generate(sig, info, t, group, result);
1140 return ret;
1141 }
1142
1143 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1144 int group)
1145 {
1146 int from_ancestor_ns = 0;
1147
1148 #ifdef CONFIG_PID_NS
1149 from_ancestor_ns = si_fromuser(info) &&
1150 !task_pid_nr_ns(current, task_active_pid_ns(t));
1151 #endif
1152
1153 return __send_signal(sig, info, t, group, from_ancestor_ns);
1154 }
1155
1156 static void print_fatal_signal(int signr)
1157 {
1158 struct pt_regs *regs = signal_pt_regs();
1159 pr_info("potentially unexpected fatal signal %d.\n", signr);
1160
1161 #if defined(__i386__) && !defined(__arch_um__)
1162 pr_info("code at %08lx: ", regs->ip);
1163 {
1164 int i;
1165 for (i = 0; i < 16; i++) {
1166 unsigned char insn;
1167
1168 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1169 break;
1170 pr_cont("%02x ", insn);
1171 }
1172 }
1173 pr_cont("\n");
1174 #endif
1175 preempt_disable();
1176 show_regs(regs);
1177 preempt_enable();
1178 }
1179
1180 static int __init setup_print_fatal_signals(char *str)
1181 {
1182 get_option (&str, &print_fatal_signals);
1183
1184 return 1;
1185 }
1186
1187 __setup("print-fatal-signals=", setup_print_fatal_signals);
1188
1189 int
1190 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1191 {
1192 return send_signal(sig, info, p, 1);
1193 }
1194
1195 static int
1196 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1197 {
1198 return send_signal(sig, info, t, 0);
1199 }
1200
1201 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1202 bool group)
1203 {
1204 unsigned long flags;
1205 int ret = -ESRCH;
1206
1207 if (lock_task_sighand(p, &flags)) {
1208 ret = send_signal(sig, info, p, group);
1209 unlock_task_sighand(p, &flags);
1210 }
1211
1212 return ret;
1213 }
1214
1215 /*
1216 * Force a signal that the process can't ignore: if necessary
1217 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1218 *
1219 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1220 * since we do not want to have a signal handler that was blocked
1221 * be invoked when user space had explicitly blocked it.
1222 *
1223 * We don't want to have recursive SIGSEGV's etc, for example,
1224 * that is why we also clear SIGNAL_UNKILLABLE.
1225 */
1226 int
1227 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1228 {
1229 unsigned long int flags;
1230 int ret, blocked, ignored;
1231 struct k_sigaction *action;
1232
1233 spin_lock_irqsave(&t->sighand->siglock, flags);
1234 action = &t->sighand->action[sig-1];
1235 ignored = action->sa.sa_handler == SIG_IGN;
1236 blocked = sigismember(&t->blocked, sig);
1237 if (blocked || ignored) {
1238 action->sa.sa_handler = SIG_DFL;
1239 if (blocked) {
1240 sigdelset(&t->blocked, sig);
1241 recalc_sigpending_and_wake(t);
1242 }
1243 }
1244 /*
1245 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1246 * debugging to leave init killable.
1247 */
1248 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1249 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1250 ret = specific_send_sig_info(sig, info, t);
1251 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1252
1253 return ret;
1254 }
1255
1256 /*
1257 * Nuke all other threads in the group.
1258 */
1259 int zap_other_threads(struct task_struct *p)
1260 {
1261 struct task_struct *t = p;
1262 int count = 0;
1263
1264 p->signal->group_stop_count = 0;
1265
1266 while_each_thread(p, t) {
1267 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1268 count++;
1269
1270 /* Don't bother with already dead threads */
1271 if (t->exit_state)
1272 continue;
1273 sigaddset(&t->pending.signal, SIGKILL);
1274 signal_wake_up(t, 1);
1275 }
1276
1277 return count;
1278 }
1279
1280 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1281 unsigned long *flags)
1282 {
1283 struct sighand_struct *sighand;
1284
1285 for (;;) {
1286 /*
1287 * Disable interrupts early to avoid deadlocks.
1288 * See rcu_read_unlock() comment header for details.
1289 */
1290 local_irq_save(*flags);
1291 rcu_read_lock();
1292 sighand = rcu_dereference(tsk->sighand);
1293 if (unlikely(sighand == NULL)) {
1294 rcu_read_unlock();
1295 local_irq_restore(*flags);
1296 break;
1297 }
1298 /*
1299 * This sighand can be already freed and even reused, but
1300 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1301 * initializes ->siglock: this slab can't go away, it has
1302 * the same object type, ->siglock can't be reinitialized.
1303 *
1304 * We need to ensure that tsk->sighand is still the same
1305 * after we take the lock, we can race with de_thread() or
1306 * __exit_signal(). In the latter case the next iteration
1307 * must see ->sighand == NULL.
1308 */
1309 spin_lock(&sighand->siglock);
1310 if (likely(sighand == tsk->sighand)) {
1311 rcu_read_unlock();
1312 break;
1313 }
1314 spin_unlock(&sighand->siglock);
1315 rcu_read_unlock();
1316 local_irq_restore(*flags);
1317 }
1318
1319 return sighand;
1320 }
1321
1322 /*
1323 * send signal info to all the members of a group
1324 */
1325 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1326 {
1327 int ret;
1328
1329 rcu_read_lock();
1330 ret = check_kill_permission(sig, info, p);
1331 rcu_read_unlock();
1332
1333 if (!ret && sig) {
1334 ret = do_send_sig_info(sig, info, p, true);
1335 if (capable(CAP_KILL) && sig == SIGKILL)
1336 add_to_oom_reaper(p);
1337 }
1338
1339 return ret;
1340 }
1341
1342 /*
1343 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1344 * control characters do (^C, ^Z etc)
1345 * - the caller must hold at least a readlock on tasklist_lock
1346 */
1347 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1348 {
1349 struct task_struct *p = NULL;
1350 int retval, success;
1351
1352 success = 0;
1353 retval = -ESRCH;
1354 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1355 int err = group_send_sig_info(sig, info, p);
1356 success |= !err;
1357 retval = err;
1358 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1359 return success ? 0 : retval;
1360 }
1361
1362 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1363 {
1364 int error = -ESRCH;
1365 struct task_struct *p;
1366
1367 for (;;) {
1368 rcu_read_lock();
1369 p = pid_task(pid, PIDTYPE_PID);
1370 if (p)
1371 error = group_send_sig_info(sig, info, p);
1372 rcu_read_unlock();
1373 if (likely(!p || error != -ESRCH))
1374 return error;
1375
1376 /*
1377 * The task was unhashed in between, try again. If it
1378 * is dead, pid_task() will return NULL, if we race with
1379 * de_thread() it will find the new leader.
1380 */
1381 }
1382 }
1383
1384 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1385 {
1386 int error;
1387 rcu_read_lock();
1388 error = kill_pid_info(sig, info, find_vpid(pid));
1389 rcu_read_unlock();
1390 return error;
1391 }
1392
1393 static int kill_as_cred_perm(const struct cred *cred,
1394 struct task_struct *target)
1395 {
1396 const struct cred *pcred = __task_cred(target);
1397 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1398 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1399 return 0;
1400 return 1;
1401 }
1402
1403 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1404 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1405 const struct cred *cred, u32 secid)
1406 {
1407 int ret = -EINVAL;
1408 struct task_struct *p;
1409 unsigned long flags;
1410
1411 if (!valid_signal(sig))
1412 return ret;
1413
1414 rcu_read_lock();
1415 p = pid_task(pid, PIDTYPE_PID);
1416 if (!p) {
1417 ret = -ESRCH;
1418 goto out_unlock;
1419 }
1420 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1421 ret = -EPERM;
1422 goto out_unlock;
1423 }
1424 ret = security_task_kill(p, info, sig, secid);
1425 if (ret)
1426 goto out_unlock;
1427
1428 if (sig) {
1429 if (lock_task_sighand(p, &flags)) {
1430 ret = __send_signal(sig, info, p, 1, 0);
1431 unlock_task_sighand(p, &flags);
1432 } else
1433 ret = -ESRCH;
1434 }
1435 out_unlock:
1436 rcu_read_unlock();
1437 return ret;
1438 }
1439 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1440
1441 /*
1442 * kill_something_info() interprets pid in interesting ways just like kill(2).
1443 *
1444 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1445 * is probably wrong. Should make it like BSD or SYSV.
1446 */
1447
1448 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1449 {
1450 int ret;
1451
1452 if (pid > 0) {
1453 rcu_read_lock();
1454 ret = kill_pid_info(sig, info, find_vpid(pid));
1455 rcu_read_unlock();
1456 return ret;
1457 }
1458
1459 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1460 if (pid == INT_MIN)
1461 return -ESRCH;
1462
1463 read_lock(&tasklist_lock);
1464 if (pid != -1) {
1465 ret = __kill_pgrp_info(sig, info,
1466 pid ? find_vpid(-pid) : task_pgrp(current));
1467 } else {
1468 int retval = 0, count = 0;
1469 struct task_struct * p;
1470
1471 for_each_process(p) {
1472 if (task_pid_vnr(p) > 1 &&
1473 !same_thread_group(p, current)) {
1474 int err = group_send_sig_info(sig, info, p);
1475 ++count;
1476 if (err != -EPERM)
1477 retval = err;
1478 }
1479 }
1480 ret = count ? retval : -ESRCH;
1481 }
1482 read_unlock(&tasklist_lock);
1483
1484 return ret;
1485 }
1486
1487 /*
1488 * These are for backward compatibility with the rest of the kernel source.
1489 */
1490
1491 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1492 {
1493 /*
1494 * Make sure legacy kernel users don't send in bad values
1495 * (normal paths check this in check_kill_permission).
1496 */
1497 if (!valid_signal(sig))
1498 return -EINVAL;
1499
1500 return do_send_sig_info(sig, info, p, false);
1501 }
1502
1503 #define __si_special(priv) \
1504 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1505
1506 int
1507 send_sig(int sig, struct task_struct *p, int priv)
1508 {
1509 return send_sig_info(sig, __si_special(priv), p);
1510 }
1511
1512 void
1513 force_sig(int sig, struct task_struct *p)
1514 {
1515 force_sig_info(sig, SEND_SIG_PRIV, p);
1516 }
1517
1518 /*
1519 * When things go south during signal handling, we
1520 * will force a SIGSEGV. And if the signal that caused
1521 * the problem was already a SIGSEGV, we'll want to
1522 * make sure we don't even try to deliver the signal..
1523 */
1524 int
1525 force_sigsegv(int sig, struct task_struct *p)
1526 {
1527 if (sig == SIGSEGV) {
1528 unsigned long flags;
1529 spin_lock_irqsave(&p->sighand->siglock, flags);
1530 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1531 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1532 }
1533 force_sig(SIGSEGV, p);
1534 return 0;
1535 }
1536
1537 int kill_pgrp(struct pid *pid, int sig, int priv)
1538 {
1539 int ret;
1540
1541 read_lock(&tasklist_lock);
1542 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1543 read_unlock(&tasklist_lock);
1544
1545 return ret;
1546 }
1547 EXPORT_SYMBOL(kill_pgrp);
1548
1549 int kill_pid(struct pid *pid, int sig, int priv)
1550 {
1551 return kill_pid_info(sig, __si_special(priv), pid);
1552 }
1553 EXPORT_SYMBOL(kill_pid);
1554
1555 /*
1556 * These functions support sending signals using preallocated sigqueue
1557 * structures. This is needed "because realtime applications cannot
1558 * afford to lose notifications of asynchronous events, like timer
1559 * expirations or I/O completions". In the case of POSIX Timers
1560 * we allocate the sigqueue structure from the timer_create. If this
1561 * allocation fails we are able to report the failure to the application
1562 * with an EAGAIN error.
1563 */
1564 struct sigqueue *sigqueue_alloc(void)
1565 {
1566 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1567
1568 if (q)
1569 q->flags |= SIGQUEUE_PREALLOC;
1570
1571 return q;
1572 }
1573
1574 void sigqueue_free(struct sigqueue *q)
1575 {
1576 unsigned long flags;
1577 spinlock_t *lock = &current->sighand->siglock;
1578
1579 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1580 /*
1581 * We must hold ->siglock while testing q->list
1582 * to serialize with collect_signal() or with
1583 * __exit_signal()->flush_sigqueue().
1584 */
1585 spin_lock_irqsave(lock, flags);
1586 q->flags &= ~SIGQUEUE_PREALLOC;
1587 /*
1588 * If it is queued it will be freed when dequeued,
1589 * like the "regular" sigqueue.
1590 */
1591 if (!list_empty(&q->list))
1592 q = NULL;
1593 spin_unlock_irqrestore(lock, flags);
1594
1595 if (q)
1596 __sigqueue_free(q);
1597 }
1598
1599 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1600 {
1601 int sig = q->info.si_signo;
1602 struct sigpending *pending;
1603 unsigned long flags;
1604 int ret, result;
1605
1606 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1607
1608 ret = -1;
1609 if (!likely(lock_task_sighand(t, &flags)))
1610 goto ret;
1611
1612 ret = 1; /* the signal is ignored */
1613 result = TRACE_SIGNAL_IGNORED;
1614 if (!prepare_signal(sig, t, false))
1615 goto out;
1616
1617 ret = 0;
1618 if (unlikely(!list_empty(&q->list))) {
1619 /*
1620 * If an SI_TIMER entry is already queue just increment
1621 * the overrun count.
1622 */
1623 BUG_ON(q->info.si_code != SI_TIMER);
1624 q->info.si_overrun++;
1625 result = TRACE_SIGNAL_ALREADY_PENDING;
1626 goto out;
1627 }
1628 q->info.si_overrun = 0;
1629
1630 signalfd_notify(t, sig);
1631 pending = group ? &t->signal->shared_pending : &t->pending;
1632 list_add_tail(&q->list, &pending->list);
1633 sigaddset(&pending->signal, sig);
1634 complete_signal(sig, t, group);
1635 result = TRACE_SIGNAL_DELIVERED;
1636 out:
1637 trace_signal_generate(sig, &q->info, t, group, result);
1638 unlock_task_sighand(t, &flags);
1639 ret:
1640 return ret;
1641 }
1642
1643 /*
1644 * Let a parent know about the death of a child.
1645 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1646 *
1647 * Returns true if our parent ignored us and so we've switched to
1648 * self-reaping.
1649 */
1650 bool do_notify_parent(struct task_struct *tsk, int sig)
1651 {
1652 struct siginfo info;
1653 unsigned long flags;
1654 struct sighand_struct *psig;
1655 bool autoreap = false;
1656 u64 utime, stime;
1657
1658 BUG_ON(sig == -1);
1659
1660 /* do_notify_parent_cldstop should have been called instead. */
1661 BUG_ON(task_is_stopped_or_traced(tsk));
1662
1663 BUG_ON(!tsk->ptrace &&
1664 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1665
1666 if (sig != SIGCHLD) {
1667 /*
1668 * This is only possible if parent == real_parent.
1669 * Check if it has changed security domain.
1670 */
1671 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1672 sig = SIGCHLD;
1673 }
1674
1675 info.si_signo = sig;
1676 info.si_errno = 0;
1677 /*
1678 * We are under tasklist_lock here so our parent is tied to
1679 * us and cannot change.
1680 *
1681 * task_active_pid_ns will always return the same pid namespace
1682 * until a task passes through release_task.
1683 *
1684 * write_lock() currently calls preempt_disable() which is the
1685 * same as rcu_read_lock(), but according to Oleg, this is not
1686 * correct to rely on this
1687 */
1688 rcu_read_lock();
1689 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1690 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1691 task_uid(tsk));
1692 rcu_read_unlock();
1693
1694 task_cputime(tsk, &utime, &stime);
1695 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1696 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1697
1698 info.si_status = tsk->exit_code & 0x7f;
1699 if (tsk->exit_code & 0x80)
1700 info.si_code = CLD_DUMPED;
1701 else if (tsk->exit_code & 0x7f)
1702 info.si_code = CLD_KILLED;
1703 else {
1704 info.si_code = CLD_EXITED;
1705 info.si_status = tsk->exit_code >> 8;
1706 }
1707
1708 psig = tsk->parent->sighand;
1709 spin_lock_irqsave(&psig->siglock, flags);
1710 if (!tsk->ptrace && sig == SIGCHLD &&
1711 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1712 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1713 /*
1714 * We are exiting and our parent doesn't care. POSIX.1
1715 * defines special semantics for setting SIGCHLD to SIG_IGN
1716 * or setting the SA_NOCLDWAIT flag: we should be reaped
1717 * automatically and not left for our parent's wait4 call.
1718 * Rather than having the parent do it as a magic kind of
1719 * signal handler, we just set this to tell do_exit that we
1720 * can be cleaned up without becoming a zombie. Note that
1721 * we still call __wake_up_parent in this case, because a
1722 * blocked sys_wait4 might now return -ECHILD.
1723 *
1724 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1725 * is implementation-defined: we do (if you don't want
1726 * it, just use SIG_IGN instead).
1727 */
1728 autoreap = true;
1729 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1730 sig = 0;
1731 }
1732 if (valid_signal(sig) && sig)
1733 __group_send_sig_info(sig, &info, tsk->parent);
1734 __wake_up_parent(tsk, tsk->parent);
1735 spin_unlock_irqrestore(&psig->siglock, flags);
1736
1737 return autoreap;
1738 }
1739
1740 /**
1741 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1742 * @tsk: task reporting the state change
1743 * @for_ptracer: the notification is for ptracer
1744 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1745 *
1746 * Notify @tsk's parent that the stopped/continued state has changed. If
1747 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1748 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1749 *
1750 * CONTEXT:
1751 * Must be called with tasklist_lock at least read locked.
1752 */
1753 static void do_notify_parent_cldstop(struct task_struct *tsk,
1754 bool for_ptracer, int why)
1755 {
1756 struct siginfo info;
1757 unsigned long flags;
1758 struct task_struct *parent;
1759 struct sighand_struct *sighand;
1760 u64 utime, stime;
1761
1762 if (for_ptracer) {
1763 parent = tsk->parent;
1764 } else {
1765 tsk = tsk->group_leader;
1766 parent = tsk->real_parent;
1767 }
1768
1769 info.si_signo = SIGCHLD;
1770 info.si_errno = 0;
1771 /*
1772 * see comment in do_notify_parent() about the following 4 lines
1773 */
1774 rcu_read_lock();
1775 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1776 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1777 rcu_read_unlock();
1778
1779 task_cputime(tsk, &utime, &stime);
1780 info.si_utime = nsec_to_clock_t(utime);
1781 info.si_stime = nsec_to_clock_t(stime);
1782
1783 info.si_code = why;
1784 switch (why) {
1785 case CLD_CONTINUED:
1786 info.si_status = SIGCONT;
1787 break;
1788 case CLD_STOPPED:
1789 info.si_status = tsk->signal->group_exit_code & 0x7f;
1790 break;
1791 case CLD_TRAPPED:
1792 info.si_status = tsk->exit_code & 0x7f;
1793 break;
1794 default:
1795 BUG();
1796 }
1797
1798 sighand = parent->sighand;
1799 spin_lock_irqsave(&sighand->siglock, flags);
1800 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1801 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1802 __group_send_sig_info(SIGCHLD, &info, parent);
1803 /*
1804 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1805 */
1806 __wake_up_parent(tsk, parent);
1807 spin_unlock_irqrestore(&sighand->siglock, flags);
1808 }
1809
1810 static inline int may_ptrace_stop(void)
1811 {
1812 if (!likely(current->ptrace))
1813 return 0;
1814 /*
1815 * Are we in the middle of do_coredump?
1816 * If so and our tracer is also part of the coredump stopping
1817 * is a deadlock situation, and pointless because our tracer
1818 * is dead so don't allow us to stop.
1819 * If SIGKILL was already sent before the caller unlocked
1820 * ->siglock we must see ->core_state != NULL. Otherwise it
1821 * is safe to enter schedule().
1822 *
1823 * This is almost outdated, a task with the pending SIGKILL can't
1824 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1825 * after SIGKILL was already dequeued.
1826 */
1827 if (unlikely(current->mm->core_state) &&
1828 unlikely(current->mm == current->parent->mm))
1829 return 0;
1830
1831 return 1;
1832 }
1833
1834 /*
1835 * Return non-zero if there is a SIGKILL that should be waking us up.
1836 * Called with the siglock held.
1837 */
1838 static int sigkill_pending(struct task_struct *tsk)
1839 {
1840 return sigismember(&tsk->pending.signal, SIGKILL) ||
1841 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1842 }
1843
1844 /*
1845 * This must be called with current->sighand->siglock held.
1846 *
1847 * This should be the path for all ptrace stops.
1848 * We always set current->last_siginfo while stopped here.
1849 * That makes it a way to test a stopped process for
1850 * being ptrace-stopped vs being job-control-stopped.
1851 *
1852 * If we actually decide not to stop at all because the tracer
1853 * is gone, we keep current->exit_code unless clear_code.
1854 */
1855 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1856 __releases(&current->sighand->siglock)
1857 __acquires(&current->sighand->siglock)
1858 {
1859 bool gstop_done = false;
1860
1861 if (arch_ptrace_stop_needed(exit_code, info)) {
1862 /*
1863 * The arch code has something special to do before a
1864 * ptrace stop. This is allowed to block, e.g. for faults
1865 * on user stack pages. We can't keep the siglock while
1866 * calling arch_ptrace_stop, so we must release it now.
1867 * To preserve proper semantics, we must do this before
1868 * any signal bookkeeping like checking group_stop_count.
1869 * Meanwhile, a SIGKILL could come in before we retake the
1870 * siglock. That must prevent us from sleeping in TASK_TRACED.
1871 * So after regaining the lock, we must check for SIGKILL.
1872 */
1873 spin_unlock_irq(&current->sighand->siglock);
1874 arch_ptrace_stop(exit_code, info);
1875 spin_lock_irq(&current->sighand->siglock);
1876 if (sigkill_pending(current))
1877 return;
1878 }
1879
1880 set_special_state(TASK_TRACED);
1881
1882 /*
1883 * We're committing to trapping. TRACED should be visible before
1884 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1885 * Also, transition to TRACED and updates to ->jobctl should be
1886 * atomic with respect to siglock and should be done after the arch
1887 * hook as siglock is released and regrabbed across it.
1888 *
1889 * TRACER TRACEE
1890 *
1891 * ptrace_attach()
1892 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
1893 * do_wait()
1894 * set_current_state() smp_wmb();
1895 * ptrace_do_wait()
1896 * wait_task_stopped()
1897 * task_stopped_code()
1898 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
1899 */
1900 smp_wmb();
1901
1902 current->last_siginfo = info;
1903 current->exit_code = exit_code;
1904
1905 /*
1906 * If @why is CLD_STOPPED, we're trapping to participate in a group
1907 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1908 * across siglock relocks since INTERRUPT was scheduled, PENDING
1909 * could be clear now. We act as if SIGCONT is received after
1910 * TASK_TRACED is entered - ignore it.
1911 */
1912 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1913 gstop_done = task_participate_group_stop(current);
1914
1915 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1916 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1917 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1918 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1919
1920 /* entering a trap, clear TRAPPING */
1921 task_clear_jobctl_trapping(current);
1922
1923 spin_unlock_irq(&current->sighand->siglock);
1924 read_lock(&tasklist_lock);
1925 if (may_ptrace_stop()) {
1926 /*
1927 * Notify parents of the stop.
1928 *
1929 * While ptraced, there are two parents - the ptracer and
1930 * the real_parent of the group_leader. The ptracer should
1931 * know about every stop while the real parent is only
1932 * interested in the completion of group stop. The states
1933 * for the two don't interact with each other. Notify
1934 * separately unless they're gonna be duplicates.
1935 */
1936 do_notify_parent_cldstop(current, true, why);
1937 if (gstop_done && ptrace_reparented(current))
1938 do_notify_parent_cldstop(current, false, why);
1939
1940 /*
1941 * Don't want to allow preemption here, because
1942 * sys_ptrace() needs this task to be inactive.
1943 *
1944 * XXX: implement read_unlock_no_resched().
1945 */
1946 preempt_disable();
1947 read_unlock(&tasklist_lock);
1948 preempt_enable_no_resched();
1949 freezable_schedule();
1950 } else {
1951 /*
1952 * By the time we got the lock, our tracer went away.
1953 * Don't drop the lock yet, another tracer may come.
1954 *
1955 * If @gstop_done, the ptracer went away between group stop
1956 * completion and here. During detach, it would have set
1957 * JOBCTL_STOP_PENDING on us and we'll re-enter
1958 * TASK_STOPPED in do_signal_stop() on return, so notifying
1959 * the real parent of the group stop completion is enough.
1960 */
1961 if (gstop_done)
1962 do_notify_parent_cldstop(current, false, why);
1963
1964 /* tasklist protects us from ptrace_freeze_traced() */
1965 __set_current_state(TASK_RUNNING);
1966 if (clear_code)
1967 current->exit_code = 0;
1968 read_unlock(&tasklist_lock);
1969 }
1970
1971 /*
1972 * We are back. Now reacquire the siglock before touching
1973 * last_siginfo, so that we are sure to have synchronized with
1974 * any signal-sending on another CPU that wants to examine it.
1975 */
1976 spin_lock_irq(&current->sighand->siglock);
1977 current->last_siginfo = NULL;
1978
1979 /* LISTENING can be set only during STOP traps, clear it */
1980 current->jobctl &= ~JOBCTL_LISTENING;
1981
1982 /*
1983 * Queued signals ignored us while we were stopped for tracing.
1984 * So check for any that we should take before resuming user mode.
1985 * This sets TIF_SIGPENDING, but never clears it.
1986 */
1987 recalc_sigpending_tsk(current);
1988 }
1989
1990 static void ptrace_do_notify(int signr, int exit_code, int why)
1991 {
1992 siginfo_t info;
1993
1994 memset(&info, 0, sizeof info);
1995 info.si_signo = signr;
1996 info.si_code = exit_code;
1997 info.si_pid = task_pid_vnr(current);
1998 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1999
2000 /* Let the debugger run. */
2001 ptrace_stop(exit_code, why, 1, &info);
2002 }
2003
2004 void ptrace_notify(int exit_code)
2005 {
2006 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2007 if (unlikely(current->task_works))
2008 task_work_run();
2009
2010 spin_lock_irq(&current->sighand->siglock);
2011 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2012 spin_unlock_irq(&current->sighand->siglock);
2013 }
2014
2015 /**
2016 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2017 * @signr: signr causing group stop if initiating
2018 *
2019 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2020 * and participate in it. If already set, participate in the existing
2021 * group stop. If participated in a group stop (and thus slept), %true is
2022 * returned with siglock released.
2023 *
2024 * If ptraced, this function doesn't handle stop itself. Instead,
2025 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2026 * untouched. The caller must ensure that INTERRUPT trap handling takes
2027 * places afterwards.
2028 *
2029 * CONTEXT:
2030 * Must be called with @current->sighand->siglock held, which is released
2031 * on %true return.
2032 *
2033 * RETURNS:
2034 * %false if group stop is already cancelled or ptrace trap is scheduled.
2035 * %true if participated in group stop.
2036 */
2037 static bool do_signal_stop(int signr)
2038 __releases(&current->sighand->siglock)
2039 {
2040 struct signal_struct *sig = current->signal;
2041
2042 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2043 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2044 struct task_struct *t;
2045
2046 /* signr will be recorded in task->jobctl for retries */
2047 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2048
2049 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2050 unlikely(signal_group_exit(sig)))
2051 return false;
2052 /*
2053 * There is no group stop already in progress. We must
2054 * initiate one now.
2055 *
2056 * While ptraced, a task may be resumed while group stop is
2057 * still in effect and then receive a stop signal and
2058 * initiate another group stop. This deviates from the
2059 * usual behavior as two consecutive stop signals can't
2060 * cause two group stops when !ptraced. That is why we
2061 * also check !task_is_stopped(t) below.
2062 *
2063 * The condition can be distinguished by testing whether
2064 * SIGNAL_STOP_STOPPED is already set. Don't generate
2065 * group_exit_code in such case.
2066 *
2067 * This is not necessary for SIGNAL_STOP_CONTINUED because
2068 * an intervening stop signal is required to cause two
2069 * continued events regardless of ptrace.
2070 */
2071 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2072 sig->group_exit_code = signr;
2073
2074 sig->group_stop_count = 0;
2075
2076 if (task_set_jobctl_pending(current, signr | gstop))
2077 sig->group_stop_count++;
2078
2079 t = current;
2080 while_each_thread(current, t) {
2081 /*
2082 * Setting state to TASK_STOPPED for a group
2083 * stop is always done with the siglock held,
2084 * so this check has no races.
2085 */
2086 if (!task_is_stopped(t) &&
2087 task_set_jobctl_pending(t, signr | gstop)) {
2088 sig->group_stop_count++;
2089 if (likely(!(t->ptrace & PT_SEIZED)))
2090 signal_wake_up(t, 0);
2091 else
2092 ptrace_trap_notify(t);
2093 }
2094 }
2095 }
2096
2097 if (likely(!current->ptrace)) {
2098 int notify = 0;
2099
2100 /*
2101 * If there are no other threads in the group, or if there
2102 * is a group stop in progress and we are the last to stop,
2103 * report to the parent.
2104 */
2105 if (task_participate_group_stop(current))
2106 notify = CLD_STOPPED;
2107
2108 set_special_state(TASK_STOPPED);
2109 spin_unlock_irq(&current->sighand->siglock);
2110
2111 /*
2112 * Notify the parent of the group stop completion. Because
2113 * we're not holding either the siglock or tasklist_lock
2114 * here, ptracer may attach inbetween; however, this is for
2115 * group stop and should always be delivered to the real
2116 * parent of the group leader. The new ptracer will get
2117 * its notification when this task transitions into
2118 * TASK_TRACED.
2119 */
2120 if (notify) {
2121 read_lock(&tasklist_lock);
2122 do_notify_parent_cldstop(current, false, notify);
2123 read_unlock(&tasklist_lock);
2124 }
2125
2126 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2127 freezable_schedule();
2128 return true;
2129 } else {
2130 /*
2131 * While ptraced, group stop is handled by STOP trap.
2132 * Schedule it and let the caller deal with it.
2133 */
2134 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2135 return false;
2136 }
2137 }
2138
2139 /**
2140 * do_jobctl_trap - take care of ptrace jobctl traps
2141 *
2142 * When PT_SEIZED, it's used for both group stop and explicit
2143 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2144 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2145 * the stop signal; otherwise, %SIGTRAP.
2146 *
2147 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2148 * number as exit_code and no siginfo.
2149 *
2150 * CONTEXT:
2151 * Must be called with @current->sighand->siglock held, which may be
2152 * released and re-acquired before returning with intervening sleep.
2153 */
2154 static void do_jobctl_trap(void)
2155 {
2156 struct signal_struct *signal = current->signal;
2157 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2158
2159 if (current->ptrace & PT_SEIZED) {
2160 if (!signal->group_stop_count &&
2161 !(signal->flags & SIGNAL_STOP_STOPPED))
2162 signr = SIGTRAP;
2163 WARN_ON_ONCE(!signr);
2164 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2165 CLD_STOPPED);
2166 } else {
2167 WARN_ON_ONCE(!signr);
2168 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2169 current->exit_code = 0;
2170 }
2171 }
2172
2173 static int ptrace_signal(int signr, siginfo_t *info)
2174 {
2175 /*
2176 * We do not check sig_kernel_stop(signr) but set this marker
2177 * unconditionally because we do not know whether debugger will
2178 * change signr. This flag has no meaning unless we are going
2179 * to stop after return from ptrace_stop(). In this case it will
2180 * be checked in do_signal_stop(), we should only stop if it was
2181 * not cleared by SIGCONT while we were sleeping. See also the
2182 * comment in dequeue_signal().
2183 */
2184 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2185 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2186
2187 /* We're back. Did the debugger cancel the sig? */
2188 signr = current->exit_code;
2189 if (signr == 0)
2190 return signr;
2191
2192 current->exit_code = 0;
2193
2194 /*
2195 * Update the siginfo structure if the signal has
2196 * changed. If the debugger wanted something
2197 * specific in the siginfo structure then it should
2198 * have updated *info via PTRACE_SETSIGINFO.
2199 */
2200 if (signr != info->si_signo) {
2201 info->si_signo = signr;
2202 info->si_errno = 0;
2203 info->si_code = SI_USER;
2204 rcu_read_lock();
2205 info->si_pid = task_pid_vnr(current->parent);
2206 info->si_uid = from_kuid_munged(current_user_ns(),
2207 task_uid(current->parent));
2208 rcu_read_unlock();
2209 }
2210
2211 /* If the (new) signal is now blocked, requeue it. */
2212 if (sigismember(&current->blocked, signr)) {
2213 specific_send_sig_info(signr, info, current);
2214 signr = 0;
2215 }
2216
2217 return signr;
2218 }
2219
2220 int get_signal(struct ksignal *ksig)
2221 {
2222 struct sighand_struct *sighand = current->sighand;
2223 struct signal_struct *signal = current->signal;
2224 int signr;
2225
2226 if (unlikely(current->task_works))
2227 task_work_run();
2228
2229 if (unlikely(uprobe_deny_signal()))
2230 return 0;
2231
2232 /*
2233 * Do this once, we can't return to user-mode if freezing() == T.
2234 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2235 * thus do not need another check after return.
2236 */
2237 try_to_freeze();
2238
2239 relock:
2240 spin_lock_irq(&sighand->siglock);
2241 /*
2242 * Every stopped thread goes here after wakeup. Check to see if
2243 * we should notify the parent, prepare_signal(SIGCONT) encodes
2244 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2245 */
2246 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2247 int why;
2248
2249 if (signal->flags & SIGNAL_CLD_CONTINUED)
2250 why = CLD_CONTINUED;
2251 else
2252 why = CLD_STOPPED;
2253
2254 signal->flags &= ~SIGNAL_CLD_MASK;
2255
2256 spin_unlock_irq(&sighand->siglock);
2257
2258 /*
2259 * Notify the parent that we're continuing. This event is
2260 * always per-process and doesn't make whole lot of sense
2261 * for ptracers, who shouldn't consume the state via
2262 * wait(2) either, but, for backward compatibility, notify
2263 * the ptracer of the group leader too unless it's gonna be
2264 * a duplicate.
2265 */
2266 read_lock(&tasklist_lock);
2267 do_notify_parent_cldstop(current, false, why);
2268
2269 if (ptrace_reparented(current->group_leader))
2270 do_notify_parent_cldstop(current->group_leader,
2271 true, why);
2272 read_unlock(&tasklist_lock);
2273
2274 goto relock;
2275 }
2276
2277 /* Has this task already been marked for death? */
2278 if (signal_group_exit(signal)) {
2279 ksig->info.si_signo = signr = SIGKILL;
2280 sigdelset(&current->pending.signal, SIGKILL);
2281 recalc_sigpending();
2282 goto fatal;
2283 }
2284
2285 for (;;) {
2286 struct k_sigaction *ka;
2287
2288 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2289 do_signal_stop(0))
2290 goto relock;
2291
2292 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2293 do_jobctl_trap();
2294 spin_unlock_irq(&sighand->siglock);
2295 goto relock;
2296 }
2297
2298 /*
2299 * Signals generated by the execution of an instruction
2300 * need to be delivered before any other pending signals
2301 * so that the instruction pointer in the signal stack
2302 * frame points to the faulting instruction.
2303 */
2304 signr = dequeue_synchronous_signal(&ksig->info);
2305 if (!signr)
2306 signr = dequeue_signal(current, &current->blocked, &ksig->info);
2307
2308 if (!signr)
2309 break; /* will return 0 */
2310
2311 if (unlikely(current->ptrace) && signr != SIGKILL) {
2312 signr = ptrace_signal(signr, &ksig->info);
2313 if (!signr)
2314 continue;
2315 }
2316
2317 ka = &sighand->action[signr-1];
2318
2319 /* Trace actually delivered signals. */
2320 trace_signal_deliver(signr, &ksig->info, ka);
2321
2322 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2323 continue;
2324 if (ka->sa.sa_handler != SIG_DFL) {
2325 /* Run the handler. */
2326 ksig->ka = *ka;
2327
2328 if (ka->sa.sa_flags & SA_ONESHOT)
2329 ka->sa.sa_handler = SIG_DFL;
2330
2331 break; /* will return non-zero "signr" value */
2332 }
2333
2334 /*
2335 * Now we are doing the default action for this signal.
2336 */
2337 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2338 continue;
2339
2340 /*
2341 * Global init gets no signals it doesn't want.
2342 * Container-init gets no signals it doesn't want from same
2343 * container.
2344 *
2345 * Note that if global/container-init sees a sig_kernel_only()
2346 * signal here, the signal must have been generated internally
2347 * or must have come from an ancestor namespace. In either
2348 * case, the signal cannot be dropped.
2349 */
2350 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2351 !sig_kernel_only(signr))
2352 continue;
2353
2354 if (sig_kernel_stop(signr)) {
2355 /*
2356 * The default action is to stop all threads in
2357 * the thread group. The job control signals
2358 * do nothing in an orphaned pgrp, but SIGSTOP
2359 * always works. Note that siglock needs to be
2360 * dropped during the call to is_orphaned_pgrp()
2361 * because of lock ordering with tasklist_lock.
2362 * This allows an intervening SIGCONT to be posted.
2363 * We need to check for that and bail out if necessary.
2364 */
2365 if (signr != SIGSTOP) {
2366 spin_unlock_irq(&sighand->siglock);
2367
2368 /* signals can be posted during this window */
2369
2370 if (is_current_pgrp_orphaned())
2371 goto relock;
2372
2373 spin_lock_irq(&sighand->siglock);
2374 }
2375
2376 if (likely(do_signal_stop(ksig->info.si_signo))) {
2377 /* It released the siglock. */
2378 goto relock;
2379 }
2380
2381 /*
2382 * We didn't actually stop, due to a race
2383 * with SIGCONT or something like that.
2384 */
2385 continue;
2386 }
2387
2388 fatal:
2389 spin_unlock_irq(&sighand->siglock);
2390
2391 /*
2392 * Anything else is fatal, maybe with a core dump.
2393 */
2394 current->flags |= PF_SIGNALED;
2395
2396 if (sig_kernel_coredump(signr)) {
2397 if (print_fatal_signals)
2398 print_fatal_signal(ksig->info.si_signo);
2399 proc_coredump_connector(current);
2400 /*
2401 * If it was able to dump core, this kills all
2402 * other threads in the group and synchronizes with
2403 * their demise. If we lost the race with another
2404 * thread getting here, it set group_exit_code
2405 * first and our do_group_exit call below will use
2406 * that value and ignore the one we pass it.
2407 */
2408 do_coredump(&ksig->info);
2409 }
2410
2411 /*
2412 * Death signals, no core dump.
2413 */
2414 do_group_exit(ksig->info.si_signo);
2415 /* NOTREACHED */
2416 }
2417 spin_unlock_irq(&sighand->siglock);
2418
2419 ksig->sig = signr;
2420 return ksig->sig > 0;
2421 }
2422
2423 /**
2424 * signal_delivered -
2425 * @ksig: kernel signal struct
2426 * @stepping: nonzero if debugger single-step or block-step in use
2427 *
2428 * This function should be called when a signal has successfully been
2429 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2430 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2431 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2432 */
2433 static void signal_delivered(struct ksignal *ksig, int stepping)
2434 {
2435 sigset_t blocked;
2436
2437 /* A signal was successfully delivered, and the
2438 saved sigmask was stored on the signal frame,
2439 and will be restored by sigreturn. So we can
2440 simply clear the restore sigmask flag. */
2441 clear_restore_sigmask();
2442
2443 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2444 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2445 sigaddset(&blocked, ksig->sig);
2446 set_current_blocked(&blocked);
2447 tracehook_signal_handler(stepping);
2448 }
2449
2450 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2451 {
2452 if (failed)
2453 force_sigsegv(ksig->sig, current);
2454 else
2455 signal_delivered(ksig, stepping);
2456 }
2457
2458 /*
2459 * It could be that complete_signal() picked us to notify about the
2460 * group-wide signal. Other threads should be notified now to take
2461 * the shared signals in @which since we will not.
2462 */
2463 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2464 {
2465 sigset_t retarget;
2466 struct task_struct *t;
2467
2468 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2469 if (sigisemptyset(&retarget))
2470 return;
2471
2472 t = tsk;
2473 while_each_thread(tsk, t) {
2474 if (t->flags & PF_EXITING)
2475 continue;
2476
2477 if (!has_pending_signals(&retarget, &t->blocked))
2478 continue;
2479 /* Remove the signals this thread can handle. */
2480 sigandsets(&retarget, &retarget, &t->blocked);
2481
2482 if (!signal_pending(t))
2483 signal_wake_up(t, 0);
2484
2485 if (sigisemptyset(&retarget))
2486 break;
2487 }
2488 }
2489
2490 void exit_signals(struct task_struct *tsk)
2491 {
2492 int group_stop = 0;
2493 sigset_t unblocked;
2494
2495 /*
2496 * @tsk is about to have PF_EXITING set - lock out users which
2497 * expect stable threadgroup.
2498 */
2499 cgroup_threadgroup_change_begin(tsk);
2500
2501 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2502 tsk->flags |= PF_EXITING;
2503 cgroup_threadgroup_change_end(tsk);
2504 return;
2505 }
2506
2507 spin_lock_irq(&tsk->sighand->siglock);
2508 /*
2509 * From now this task is not visible for group-wide signals,
2510 * see wants_signal(), do_signal_stop().
2511 */
2512 tsk->flags |= PF_EXITING;
2513
2514 cgroup_threadgroup_change_end(tsk);
2515
2516 if (!signal_pending(tsk))
2517 goto out;
2518
2519 unblocked = tsk->blocked;
2520 signotset(&unblocked);
2521 retarget_shared_pending(tsk, &unblocked);
2522
2523 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2524 task_participate_group_stop(tsk))
2525 group_stop = CLD_STOPPED;
2526 out:
2527 spin_unlock_irq(&tsk->sighand->siglock);
2528
2529 /*
2530 * If group stop has completed, deliver the notification. This
2531 * should always go to the real parent of the group leader.
2532 */
2533 if (unlikely(group_stop)) {
2534 read_lock(&tasklist_lock);
2535 do_notify_parent_cldstop(tsk, false, group_stop);
2536 read_unlock(&tasklist_lock);
2537 }
2538 }
2539
2540 EXPORT_SYMBOL(recalc_sigpending);
2541 EXPORT_SYMBOL_GPL(dequeue_signal);
2542 EXPORT_SYMBOL(flush_signals);
2543 EXPORT_SYMBOL(force_sig);
2544 EXPORT_SYMBOL(send_sig);
2545 EXPORT_SYMBOL(send_sig_info);
2546 EXPORT_SYMBOL(sigprocmask);
2547
2548 /*
2549 * System call entry points.
2550 */
2551
2552 /**
2553 * sys_restart_syscall - restart a system call
2554 */
2555 SYSCALL_DEFINE0(restart_syscall)
2556 {
2557 struct restart_block *restart = &current->restart_block;
2558 return restart->fn(restart);
2559 }
2560
2561 long do_no_restart_syscall(struct restart_block *param)
2562 {
2563 return -EINTR;
2564 }
2565
2566 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2567 {
2568 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2569 sigset_t newblocked;
2570 /* A set of now blocked but previously unblocked signals. */
2571 sigandnsets(&newblocked, newset, &current->blocked);
2572 retarget_shared_pending(tsk, &newblocked);
2573 }
2574 tsk->blocked = *newset;
2575 recalc_sigpending();
2576 }
2577
2578 /**
2579 * set_current_blocked - change current->blocked mask
2580 * @newset: new mask
2581 *
2582 * It is wrong to change ->blocked directly, this helper should be used
2583 * to ensure the process can't miss a shared signal we are going to block.
2584 */
2585 void set_current_blocked(sigset_t *newset)
2586 {
2587 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2588 __set_current_blocked(newset);
2589 }
2590
2591 void __set_current_blocked(const sigset_t *newset)
2592 {
2593 struct task_struct *tsk = current;
2594
2595 /*
2596 * In case the signal mask hasn't changed, there is nothing we need
2597 * to do. The current->blocked shouldn't be modified by other task.
2598 */
2599 if (sigequalsets(&tsk->blocked, newset))
2600 return;
2601
2602 spin_lock_irq(&tsk->sighand->siglock);
2603 __set_task_blocked(tsk, newset);
2604 spin_unlock_irq(&tsk->sighand->siglock);
2605 }
2606
2607 /*
2608 * This is also useful for kernel threads that want to temporarily
2609 * (or permanently) block certain signals.
2610 *
2611 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2612 * interface happily blocks "unblockable" signals like SIGKILL
2613 * and friends.
2614 */
2615 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2616 {
2617 struct task_struct *tsk = current;
2618 sigset_t newset;
2619
2620 /* Lockless, only current can change ->blocked, never from irq */
2621 if (oldset)
2622 *oldset = tsk->blocked;
2623
2624 switch (how) {
2625 case SIG_BLOCK:
2626 sigorsets(&newset, &tsk->blocked, set);
2627 break;
2628 case SIG_UNBLOCK:
2629 sigandnsets(&newset, &tsk->blocked, set);
2630 break;
2631 case SIG_SETMASK:
2632 newset = *set;
2633 break;
2634 default:
2635 return -EINVAL;
2636 }
2637
2638 __set_current_blocked(&newset);
2639 return 0;
2640 }
2641
2642 /**
2643 * sys_rt_sigprocmask - change the list of currently blocked signals
2644 * @how: whether to add, remove, or set signals
2645 * @nset: stores pending signals
2646 * @oset: previous value of signal mask if non-null
2647 * @sigsetsize: size of sigset_t type
2648 */
2649 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2650 sigset_t __user *, oset, size_t, sigsetsize)
2651 {
2652 sigset_t old_set, new_set;
2653 int error;
2654
2655 /* XXX: Don't preclude handling different sized sigset_t's. */
2656 if (sigsetsize != sizeof(sigset_t))
2657 return -EINVAL;
2658
2659 old_set = current->blocked;
2660
2661 if (nset) {
2662 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2663 return -EFAULT;
2664 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2665
2666 error = sigprocmask(how, &new_set, NULL);
2667 if (error)
2668 return error;
2669 }
2670
2671 if (oset) {
2672 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2673 return -EFAULT;
2674 }
2675
2676 return 0;
2677 }
2678
2679 #ifdef CONFIG_COMPAT
2680 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2681 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2682 {
2683 #ifdef __BIG_ENDIAN
2684 sigset_t old_set = current->blocked;
2685
2686 /* XXX: Don't preclude handling different sized sigset_t's. */
2687 if (sigsetsize != sizeof(sigset_t))
2688 return -EINVAL;
2689
2690 if (nset) {
2691 compat_sigset_t new32;
2692 sigset_t new_set;
2693 int error;
2694 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2695 return -EFAULT;
2696
2697 sigset_from_compat(&new_set, &new32);
2698 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2699
2700 error = sigprocmask(how, &new_set, NULL);
2701 if (error)
2702 return error;
2703 }
2704 if (oset) {
2705 compat_sigset_t old32;
2706 sigset_to_compat(&old32, &old_set);
2707 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2708 return -EFAULT;
2709 }
2710 return 0;
2711 #else
2712 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2713 (sigset_t __user *)oset, sigsetsize);
2714 #endif
2715 }
2716 #endif
2717
2718 static int do_sigpending(void *set, unsigned long sigsetsize)
2719 {
2720 if (sigsetsize > sizeof(sigset_t))
2721 return -EINVAL;
2722
2723 spin_lock_irq(&current->sighand->siglock);
2724 sigorsets(set, &current->pending.signal,
2725 &current->signal->shared_pending.signal);
2726 spin_unlock_irq(&current->sighand->siglock);
2727
2728 /* Outside the lock because only this thread touches it. */
2729 sigandsets(set, &current->blocked, set);
2730 return 0;
2731 }
2732
2733 /**
2734 * sys_rt_sigpending - examine a pending signal that has been raised
2735 * while blocked
2736 * @uset: stores pending signals
2737 * @sigsetsize: size of sigset_t type or larger
2738 */
2739 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2740 {
2741 sigset_t set;
2742 int err = do_sigpending(&set, sigsetsize);
2743 if (!err && copy_to_user(uset, &set, sigsetsize))
2744 err = -EFAULT;
2745 return err;
2746 }
2747
2748 #ifdef CONFIG_COMPAT
2749 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2750 compat_size_t, sigsetsize)
2751 {
2752 #ifdef __BIG_ENDIAN
2753 sigset_t set;
2754 int err = do_sigpending(&set, sigsetsize);
2755 if (!err) {
2756 compat_sigset_t set32;
2757 sigset_to_compat(&set32, &set);
2758 /* we can get here only if sigsetsize <= sizeof(set) */
2759 if (copy_to_user(uset, &set32, sigsetsize))
2760 err = -EFAULT;
2761 }
2762 return err;
2763 #else
2764 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2765 #endif
2766 }
2767 #endif
2768
2769 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
2770 {
2771 enum siginfo_layout layout = SIL_KILL;
2772 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2773 static const struct {
2774 unsigned char limit, layout;
2775 } filter[] = {
2776 [SIGILL] = { NSIGILL, SIL_FAULT },
2777 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2778 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2779 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2780 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2781 #if defined(SIGEMT) && defined(NSIGEMT)
2782 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2783 #endif
2784 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2785 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
2786 #ifdef __ARCH_SIGSYS
2787 [SIGSYS] = { NSIGSYS, SIL_SYS },
2788 #endif
2789 };
2790 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit))
2791 layout = filter[sig].layout;
2792 else if (si_code <= NSIGPOLL)
2793 layout = SIL_POLL;
2794 } else {
2795 if (si_code == SI_TIMER)
2796 layout = SIL_TIMER;
2797 else if (si_code == SI_SIGIO)
2798 layout = SIL_POLL;
2799 else if (si_code < 0)
2800 layout = SIL_RT;
2801 /* Tests to support buggy kernel ABIs */
2802 #ifdef TRAP_FIXME
2803 if ((sig == SIGTRAP) && (si_code == TRAP_FIXME))
2804 layout = SIL_FAULT;
2805 #endif
2806 #ifdef FPE_FIXME
2807 if ((sig == SIGFPE) && (si_code == FPE_FIXME))
2808 layout = SIL_FAULT;
2809 #endif
2810 }
2811 return layout;
2812 }
2813
2814 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2815
2816 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2817 {
2818 int err;
2819
2820 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2821 return -EFAULT;
2822 if (from->si_code < 0)
2823 return __copy_to_user(to, from, sizeof(siginfo_t))
2824 ? -EFAULT : 0;
2825 /*
2826 * If you change siginfo_t structure, please be sure
2827 * this code is fixed accordingly.
2828 * Please remember to update the signalfd_copyinfo() function
2829 * inside fs/signalfd.c too, in case siginfo_t changes.
2830 * It should never copy any pad contained in the structure
2831 * to avoid security leaks, but must copy the generic
2832 * 3 ints plus the relevant union member.
2833 */
2834 err = __put_user(from->si_signo, &to->si_signo);
2835 err |= __put_user(from->si_errno, &to->si_errno);
2836 err |= __put_user(from->si_code, &to->si_code);
2837 switch (siginfo_layout(from->si_signo, from->si_code)) {
2838 case SIL_KILL:
2839 err |= __put_user(from->si_pid, &to->si_pid);
2840 err |= __put_user(from->si_uid, &to->si_uid);
2841 break;
2842 case SIL_TIMER:
2843 /* Unreached SI_TIMER is negative */
2844 break;
2845 case SIL_POLL:
2846 err |= __put_user(from->si_band, &to->si_band);
2847 err |= __put_user(from->si_fd, &to->si_fd);
2848 break;
2849 case SIL_FAULT:
2850 err |= __put_user(from->si_addr, &to->si_addr);
2851 #ifdef __ARCH_SI_TRAPNO
2852 err |= __put_user(from->si_trapno, &to->si_trapno);
2853 #endif
2854 #ifdef BUS_MCEERR_AO
2855 /*
2856 * Other callers might not initialize the si_lsb field,
2857 * so check explicitly for the right codes here.
2858 */
2859 if (from->si_signo == SIGBUS &&
2860 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2861 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2862 #endif
2863 #ifdef SEGV_BNDERR
2864 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2865 err |= __put_user(from->si_lower, &to->si_lower);
2866 err |= __put_user(from->si_upper, &to->si_upper);
2867 }
2868 #endif
2869 #ifdef SEGV_PKUERR
2870 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2871 err |= __put_user(from->si_pkey, &to->si_pkey);
2872 #endif
2873 break;
2874 case SIL_CHLD:
2875 err |= __put_user(from->si_pid, &to->si_pid);
2876 err |= __put_user(from->si_uid, &to->si_uid);
2877 err |= __put_user(from->si_status, &to->si_status);
2878 err |= __put_user(from->si_utime, &to->si_utime);
2879 err |= __put_user(from->si_stime, &to->si_stime);
2880 break;
2881 case SIL_RT:
2882 err |= __put_user(from->si_pid, &to->si_pid);
2883 err |= __put_user(from->si_uid, &to->si_uid);
2884 err |= __put_user(from->si_ptr, &to->si_ptr);
2885 break;
2886 #ifdef __ARCH_SIGSYS
2887 case SIL_SYS:
2888 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2889 err |= __put_user(from->si_syscall, &to->si_syscall);
2890 err |= __put_user(from->si_arch, &to->si_arch);
2891 break;
2892 #endif
2893 }
2894 return err;
2895 }
2896
2897 #endif
2898
2899 /**
2900 * do_sigtimedwait - wait for queued signals specified in @which
2901 * @which: queued signals to wait for
2902 * @info: if non-null, the signal's siginfo is returned here
2903 * @ts: upper bound on process time suspension
2904 */
2905 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2906 const struct timespec *ts)
2907 {
2908 ktime_t *to = NULL, timeout = KTIME_MAX;
2909 struct task_struct *tsk = current;
2910 sigset_t mask = *which;
2911 int sig, ret = 0;
2912
2913 if (ts) {
2914 if (!timespec_valid(ts))
2915 return -EINVAL;
2916 timeout = timespec_to_ktime(*ts);
2917 to = &timeout;
2918 }
2919
2920 /*
2921 * Invert the set of allowed signals to get those we want to block.
2922 */
2923 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2924 signotset(&mask);
2925
2926 spin_lock_irq(&tsk->sighand->siglock);
2927 sig = dequeue_signal(tsk, &mask, info);
2928 if (!sig && timeout) {
2929 /*
2930 * None ready, temporarily unblock those we're interested
2931 * while we are sleeping in so that we'll be awakened when
2932 * they arrive. Unblocking is always fine, we can avoid
2933 * set_current_blocked().
2934 */
2935 tsk->real_blocked = tsk->blocked;
2936 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2937 recalc_sigpending();
2938 spin_unlock_irq(&tsk->sighand->siglock);
2939
2940 __set_current_state(TASK_INTERRUPTIBLE);
2941 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2942 HRTIMER_MODE_REL);
2943 spin_lock_irq(&tsk->sighand->siglock);
2944 __set_task_blocked(tsk, &tsk->real_blocked);
2945 sigemptyset(&tsk->real_blocked);
2946 sig = dequeue_signal(tsk, &mask, info);
2947 }
2948 spin_unlock_irq(&tsk->sighand->siglock);
2949
2950 if (sig)
2951 return sig;
2952 return ret ? -EINTR : -EAGAIN;
2953 }
2954
2955 /**
2956 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2957 * in @uthese
2958 * @uthese: queued signals to wait for
2959 * @uinfo: if non-null, the signal's siginfo is returned here
2960 * @uts: upper bound on process time suspension
2961 * @sigsetsize: size of sigset_t type
2962 */
2963 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2964 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2965 size_t, sigsetsize)
2966 {
2967 sigset_t these;
2968 struct timespec ts;
2969 siginfo_t info;
2970 int ret;
2971
2972 /* XXX: Don't preclude handling different sized sigset_t's. */
2973 if (sigsetsize != sizeof(sigset_t))
2974 return -EINVAL;
2975
2976 if (copy_from_user(&these, uthese, sizeof(these)))
2977 return -EFAULT;
2978
2979 if (uts) {
2980 if (copy_from_user(&ts, uts, sizeof(ts)))
2981 return -EFAULT;
2982 }
2983
2984 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2985
2986 if (ret > 0 && uinfo) {
2987 if (copy_siginfo_to_user(uinfo, &info))
2988 ret = -EFAULT;
2989 }
2990
2991 return ret;
2992 }
2993
2994 #ifdef CONFIG_COMPAT
2995 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
2996 struct compat_siginfo __user *, uinfo,
2997 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
2998 {
2999 compat_sigset_t s32;
3000 sigset_t s;
3001 struct timespec t;
3002 siginfo_t info;
3003 long ret;
3004
3005 if (sigsetsize != sizeof(sigset_t))
3006 return -EINVAL;
3007
3008 if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t)))
3009 return -EFAULT;
3010 sigset_from_compat(&s, &s32);
3011
3012 if (uts) {
3013 if (compat_get_timespec(&t, uts))
3014 return -EFAULT;
3015 }
3016
3017 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3018
3019 if (ret > 0 && uinfo) {
3020 if (copy_siginfo_to_user32(uinfo, &info))
3021 ret = -EFAULT;
3022 }
3023
3024 return ret;
3025 }
3026 #endif
3027
3028 static inline void prepare_kill_siginfo(int sig, struct siginfo *info)
3029 {
3030 info->si_signo = sig;
3031 info->si_errno = 0;
3032 info->si_code = SI_USER;
3033 info->si_pid = task_tgid_vnr(current);
3034 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3035 }
3036
3037 /**
3038 * sys_kill - send a signal to a process
3039 * @pid: the PID of the process
3040 * @sig: signal to be sent
3041 */
3042 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3043 {
3044 struct siginfo info;
3045
3046 prepare_kill_siginfo(sig, &info);
3047
3048 return kill_something_info(sig, &info, pid);
3049 }
3050
3051 #ifdef CONFIG_PROC_FS
3052 /*
3053 * Verify that the signaler and signalee either are in the same pid namespace
3054 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3055 * namespace.
3056 */
3057 static bool access_pidfd_pidns(struct pid *pid)
3058 {
3059 struct pid_namespace *active = task_active_pid_ns(current);
3060 struct pid_namespace *p = ns_of_pid(pid);
3061
3062 for (;;) {
3063 if (!p)
3064 return false;
3065 if (p == active)
3066 break;
3067 p = p->parent;
3068 }
3069
3070 return true;
3071 }
3072
3073 static int copy_siginfo_from_user_any(siginfo_t *kinfo, siginfo_t __user *info)
3074 {
3075 #ifdef CONFIG_COMPAT
3076 /*
3077 * Avoid hooking up compat syscalls and instead handle necessary
3078 * conversions here. Note, this is a stop-gap measure and should not be
3079 * considered a generic solution.
3080 */
3081 if (in_compat_syscall())
3082 return copy_siginfo_from_user32(
3083 kinfo, (struct compat_siginfo __user *)info);
3084 #endif
3085 return copy_from_user(kinfo, info, sizeof(siginfo_t));
3086 }
3087
3088 /**
3089 * sys_pidfd_send_signal - send a signal to a process through a task file
3090 * descriptor
3091 * @pidfd: the file descriptor of the process
3092 * @sig: signal to be sent
3093 * @info: the signal info
3094 * @flags: future flags to be passed
3095 *
3096 * The syscall currently only signals via PIDTYPE_PID which covers
3097 * kill(<positive-pid>, <signal>. It does not signal threads or process
3098 * groups.
3099 * In order to extend the syscall to threads and process groups the @flags
3100 * argument should be used. In essence, the @flags argument will determine
3101 * what is signaled and not the file descriptor itself. Put in other words,
3102 * grouping is a property of the flags argument not a property of the file
3103 * descriptor.
3104 *
3105 * Return: 0 on success, negative errno on failure
3106 */
3107 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3108 siginfo_t __user *, info, unsigned int, flags)
3109 {
3110 int ret;
3111 struct fd f;
3112 struct pid *pid;
3113 siginfo_t kinfo;
3114
3115 /* Enforce flags be set to 0 until we add an extension. */
3116 if (flags)
3117 return -EINVAL;
3118
3119 f = fdget_raw(pidfd);
3120 if (!f.file)
3121 return -EBADF;
3122
3123 /* Is this a pidfd? */
3124 pid = tgid_pidfd_to_pid(f.file);
3125 if (IS_ERR(pid)) {
3126 ret = PTR_ERR(pid);
3127 goto err;
3128 }
3129
3130 ret = -EINVAL;
3131 if (!access_pidfd_pidns(pid))
3132 goto err;
3133
3134 if (info) {
3135 ret = copy_siginfo_from_user_any(&kinfo, info);
3136 if (unlikely(ret))
3137 goto err;
3138
3139 ret = -EINVAL;
3140 if (unlikely(sig != kinfo.si_signo))
3141 goto err;
3142
3143 if ((task_pid(current) != pid) &&
3144 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) {
3145 /* Only allow sending arbitrary signals to yourself. */
3146 ret = -EPERM;
3147 if (kinfo.si_code != SI_USER)
3148 goto err;
3149
3150 /* Turn this into a regular kill signal. */
3151 prepare_kill_siginfo(sig, &kinfo);
3152 }
3153 } else {
3154 prepare_kill_siginfo(sig, &kinfo);
3155 }
3156
3157 ret = kill_pid_info(sig, &kinfo, pid);
3158
3159 err:
3160 fdput(f);
3161 return ret;
3162 }
3163 #endif /* CONFIG_PROC_FS */
3164
3165 static int
3166 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3167 {
3168 struct task_struct *p;
3169 int error = -ESRCH;
3170
3171 rcu_read_lock();
3172 p = find_task_by_vpid(pid);
3173 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3174 error = check_kill_permission(sig, info, p);
3175 /*
3176 * The null signal is a permissions and process existence
3177 * probe. No signal is actually delivered.
3178 */
3179 if (!error && sig) {
3180 error = do_send_sig_info(sig, info, p, false);
3181 /*
3182 * If lock_task_sighand() failed we pretend the task
3183 * dies after receiving the signal. The window is tiny,
3184 * and the signal is private anyway.
3185 */
3186 if (unlikely(error == -ESRCH))
3187 error = 0;
3188 }
3189 }
3190 rcu_read_unlock();
3191
3192 return error;
3193 }
3194
3195 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3196 {
3197 struct siginfo info = {};
3198
3199 info.si_signo = sig;
3200 info.si_errno = 0;
3201 info.si_code = SI_TKILL;
3202 info.si_pid = task_tgid_vnr(current);
3203 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3204
3205 return do_send_specific(tgid, pid, sig, &info);
3206 }
3207
3208 /**
3209 * sys_tgkill - send signal to one specific thread
3210 * @tgid: the thread group ID of the thread
3211 * @pid: the PID of the thread
3212 * @sig: signal to be sent
3213 *
3214 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3215 * exists but it's not belonging to the target process anymore. This
3216 * method solves the problem of threads exiting and PIDs getting reused.
3217 */
3218 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3219 {
3220 /* This is only valid for single tasks */
3221 if (pid <= 0 || tgid <= 0)
3222 return -EINVAL;
3223
3224 return do_tkill(tgid, pid, sig);
3225 }
3226
3227 /**
3228 * sys_tkill - send signal to one specific task
3229 * @pid: the PID of the task
3230 * @sig: signal to be sent
3231 *
3232 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3233 */
3234 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3235 {
3236 /* This is only valid for single tasks */
3237 if (pid <= 0)
3238 return -EINVAL;
3239
3240 return do_tkill(0, pid, sig);
3241 }
3242
3243 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3244 {
3245 /* Not even root can pretend to send signals from the kernel.
3246 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3247 */
3248 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3249 (task_pid_vnr(current) != pid))
3250 return -EPERM;
3251
3252 info->si_signo = sig;
3253
3254 /* POSIX.1b doesn't mention process groups. */
3255 return kill_proc_info(sig, info, pid);
3256 }
3257
3258 /**
3259 * sys_rt_sigqueueinfo - send signal information to a signal
3260 * @pid: the PID of the thread
3261 * @sig: signal to be sent
3262 * @uinfo: signal info to be sent
3263 */
3264 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3265 siginfo_t __user *, uinfo)
3266 {
3267 siginfo_t info;
3268 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3269 return -EFAULT;
3270 return do_rt_sigqueueinfo(pid, sig, &info);
3271 }
3272
3273 #ifdef CONFIG_COMPAT
3274 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3275 compat_pid_t, pid,
3276 int, sig,
3277 struct compat_siginfo __user *, uinfo)
3278 {
3279 siginfo_t info = {};
3280 int ret = copy_siginfo_from_user32(&info, uinfo);
3281 if (unlikely(ret))
3282 return ret;
3283 return do_rt_sigqueueinfo(pid, sig, &info);
3284 }
3285 #endif
3286
3287 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3288 {
3289 /* This is only valid for single tasks */
3290 if (pid <= 0 || tgid <= 0)
3291 return -EINVAL;
3292
3293 /* Not even root can pretend to send signals from the kernel.
3294 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3295 */
3296 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3297 (task_pid_vnr(current) != pid))
3298 return -EPERM;
3299
3300 info->si_signo = sig;
3301
3302 return do_send_specific(tgid, pid, sig, info);
3303 }
3304
3305 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3306 siginfo_t __user *, uinfo)
3307 {
3308 siginfo_t info;
3309
3310 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3311 return -EFAULT;
3312
3313 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3314 }
3315
3316 #ifdef CONFIG_COMPAT
3317 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3318 compat_pid_t, tgid,
3319 compat_pid_t, pid,
3320 int, sig,
3321 struct compat_siginfo __user *, uinfo)
3322 {
3323 siginfo_t info = {};
3324
3325 if (copy_siginfo_from_user32(&info, uinfo))
3326 return -EFAULT;
3327 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3328 }
3329 #endif
3330
3331 /*
3332 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3333 */
3334 void kernel_sigaction(int sig, __sighandler_t action)
3335 {
3336 spin_lock_irq(&current->sighand->siglock);
3337 current->sighand->action[sig - 1].sa.sa_handler = action;
3338 if (action == SIG_IGN) {
3339 sigset_t mask;
3340
3341 sigemptyset(&mask);
3342 sigaddset(&mask, sig);
3343
3344 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3345 flush_sigqueue_mask(&mask, &current->pending);
3346 recalc_sigpending();
3347 }
3348 spin_unlock_irq(&current->sighand->siglock);
3349 }
3350 EXPORT_SYMBOL(kernel_sigaction);
3351
3352 void __weak sigaction_compat_abi(struct k_sigaction *act,
3353 struct k_sigaction *oact)
3354 {
3355 }
3356
3357 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3358 {
3359 struct task_struct *p = current, *t;
3360 struct k_sigaction *k;
3361 sigset_t mask;
3362
3363 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3364 return -EINVAL;
3365
3366 k = &p->sighand->action[sig-1];
3367
3368 spin_lock_irq(&p->sighand->siglock);
3369 if (oact)
3370 *oact = *k;
3371
3372 sigaction_compat_abi(act, oact);
3373
3374 if (act) {
3375 sigdelsetmask(&act->sa.sa_mask,
3376 sigmask(SIGKILL) | sigmask(SIGSTOP));
3377 *k = *act;
3378 /*
3379 * POSIX 3.3.1.3:
3380 * "Setting a signal action to SIG_IGN for a signal that is
3381 * pending shall cause the pending signal to be discarded,
3382 * whether or not it is blocked."
3383 *
3384 * "Setting a signal action to SIG_DFL for a signal that is
3385 * pending and whose default action is to ignore the signal
3386 * (for example, SIGCHLD), shall cause the pending signal to
3387 * be discarded, whether or not it is blocked"
3388 */
3389 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3390 sigemptyset(&mask);
3391 sigaddset(&mask, sig);
3392 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3393 for_each_thread(p, t)
3394 flush_sigqueue_mask(&mask, &t->pending);
3395 }
3396 }
3397
3398 spin_unlock_irq(&p->sighand->siglock);
3399 return 0;
3400 }
3401
3402 static int
3403 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3404 size_t min_ss_size)
3405 {
3406 struct task_struct *t = current;
3407
3408 if (oss) {
3409 memset(oss, 0, sizeof(stack_t));
3410 oss->ss_sp = (void __user *) t->sas_ss_sp;
3411 oss->ss_size = t->sas_ss_size;
3412 oss->ss_flags = sas_ss_flags(sp) |
3413 (current->sas_ss_flags & SS_FLAG_BITS);
3414 }
3415
3416 if (ss) {
3417 void __user *ss_sp = ss->ss_sp;
3418 size_t ss_size = ss->ss_size;
3419 unsigned ss_flags = ss->ss_flags;
3420 int ss_mode;
3421
3422 if (unlikely(on_sig_stack(sp)))
3423 return -EPERM;
3424
3425 ss_mode = ss_flags & ~SS_FLAG_BITS;
3426 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3427 ss_mode != 0))
3428 return -EINVAL;
3429
3430 if (ss_mode == SS_DISABLE) {
3431 ss_size = 0;
3432 ss_sp = NULL;
3433 } else {
3434 if (unlikely(ss_size < min_ss_size))
3435 return -ENOMEM;
3436 }
3437
3438 t->sas_ss_sp = (unsigned long) ss_sp;
3439 t->sas_ss_size = ss_size;
3440 t->sas_ss_flags = ss_flags;
3441 }
3442 return 0;
3443 }
3444
3445 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3446 {
3447 stack_t new, old;
3448 int err;
3449 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3450 return -EFAULT;
3451 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3452 current_user_stack_pointer(),
3453 MINSIGSTKSZ);
3454 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3455 err = -EFAULT;
3456 return err;
3457 }
3458
3459 int restore_altstack(const stack_t __user *uss)
3460 {
3461 stack_t new;
3462 if (copy_from_user(&new, uss, sizeof(stack_t)))
3463 return -EFAULT;
3464 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
3465 MINSIGSTKSZ);
3466 /* squash all but EFAULT for now */
3467 return 0;
3468 }
3469
3470 int __save_altstack(stack_t __user *uss, unsigned long sp)
3471 {
3472 struct task_struct *t = current;
3473 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3474 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3475 __put_user(t->sas_ss_size, &uss->ss_size);
3476 if (err)
3477 return err;
3478 if (t->sas_ss_flags & SS_AUTODISARM)
3479 sas_ss_reset(t);
3480 return 0;
3481 }
3482
3483 #ifdef CONFIG_COMPAT
3484 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3485 const compat_stack_t __user *, uss_ptr,
3486 compat_stack_t __user *, uoss_ptr)
3487 {
3488 stack_t uss, uoss;
3489 int ret;
3490
3491 if (uss_ptr) {
3492 compat_stack_t uss32;
3493 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3494 return -EFAULT;
3495 uss.ss_sp = compat_ptr(uss32.ss_sp);
3496 uss.ss_flags = uss32.ss_flags;
3497 uss.ss_size = uss32.ss_size;
3498 }
3499 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3500 compat_user_stack_pointer(),
3501 COMPAT_MINSIGSTKSZ);
3502 if (ret >= 0 && uoss_ptr) {
3503 compat_stack_t old;
3504 memset(&old, 0, sizeof(old));
3505 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3506 old.ss_flags = uoss.ss_flags;
3507 old.ss_size = uoss.ss_size;
3508 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3509 ret = -EFAULT;
3510 }
3511 return ret;
3512 }
3513
3514 int compat_restore_altstack(const compat_stack_t __user *uss)
3515 {
3516 int err = compat_sys_sigaltstack(uss, NULL);
3517 /* squash all but -EFAULT for now */
3518 return err == -EFAULT ? err : 0;
3519 }
3520
3521 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3522 {
3523 int err;
3524 struct task_struct *t = current;
3525 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3526 &uss->ss_sp) |
3527 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3528 __put_user(t->sas_ss_size, &uss->ss_size);
3529 if (err)
3530 return err;
3531 if (t->sas_ss_flags & SS_AUTODISARM)
3532 sas_ss_reset(t);
3533 return 0;
3534 }
3535 #endif
3536
3537 #ifdef __ARCH_WANT_SYS_SIGPENDING
3538
3539 /**
3540 * sys_sigpending - examine pending signals
3541 * @set: where mask of pending signal is returned
3542 */
3543 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3544 {
3545 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3546 }
3547
3548 #ifdef CONFIG_COMPAT
3549 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3550 {
3551 #ifdef __BIG_ENDIAN
3552 sigset_t set;
3553 int err = do_sigpending(&set, sizeof(set.sig[0]));
3554 if (!err)
3555 err = put_user(set.sig[0], set32);
3556 return err;
3557 #else
3558 return sys_rt_sigpending((sigset_t __user *)set32, sizeof(*set32));
3559 #endif
3560 }
3561 #endif
3562
3563 #endif
3564
3565 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3566 /**
3567 * sys_sigprocmask - examine and change blocked signals
3568 * @how: whether to add, remove, or set signals
3569 * @nset: signals to add or remove (if non-null)
3570 * @oset: previous value of signal mask if non-null
3571 *
3572 * Some platforms have their own version with special arguments;
3573 * others support only sys_rt_sigprocmask.
3574 */
3575
3576 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3577 old_sigset_t __user *, oset)
3578 {
3579 old_sigset_t old_set, new_set;
3580 sigset_t new_blocked;
3581
3582 old_set = current->blocked.sig[0];
3583
3584 if (nset) {
3585 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3586 return -EFAULT;
3587
3588 new_blocked = current->blocked;
3589
3590 switch (how) {
3591 case SIG_BLOCK:
3592 sigaddsetmask(&new_blocked, new_set);
3593 break;
3594 case SIG_UNBLOCK:
3595 sigdelsetmask(&new_blocked, new_set);
3596 break;
3597 case SIG_SETMASK:
3598 new_blocked.sig[0] = new_set;
3599 break;
3600 default:
3601 return -EINVAL;
3602 }
3603
3604 set_current_blocked(&new_blocked);
3605 }
3606
3607 if (oset) {
3608 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3609 return -EFAULT;
3610 }
3611
3612 return 0;
3613 }
3614 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3615
3616 #ifndef CONFIG_ODD_RT_SIGACTION
3617 /**
3618 * sys_rt_sigaction - alter an action taken by a process
3619 * @sig: signal to be sent
3620 * @act: new sigaction
3621 * @oact: used to save the previous sigaction
3622 * @sigsetsize: size of sigset_t type
3623 */
3624 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3625 const struct sigaction __user *, act,
3626 struct sigaction __user *, oact,
3627 size_t, sigsetsize)
3628 {
3629 struct k_sigaction new_sa, old_sa;
3630 int ret = -EINVAL;
3631
3632 /* XXX: Don't preclude handling different sized sigset_t's. */
3633 if (sigsetsize != sizeof(sigset_t))
3634 goto out;
3635
3636 if (act) {
3637 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3638 return -EFAULT;
3639 }
3640
3641 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3642
3643 if (!ret && oact) {
3644 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3645 return -EFAULT;
3646 }
3647 out:
3648 return ret;
3649 }
3650 #ifdef CONFIG_COMPAT
3651 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3652 const struct compat_sigaction __user *, act,
3653 struct compat_sigaction __user *, oact,
3654 compat_size_t, sigsetsize)
3655 {
3656 struct k_sigaction new_ka, old_ka;
3657 compat_sigset_t mask;
3658 #ifdef __ARCH_HAS_SA_RESTORER
3659 compat_uptr_t restorer;
3660 #endif
3661 int ret;
3662
3663 /* XXX: Don't preclude handling different sized sigset_t's. */
3664 if (sigsetsize != sizeof(compat_sigset_t))
3665 return -EINVAL;
3666
3667 if (act) {
3668 compat_uptr_t handler;
3669 ret = get_user(handler, &act->sa_handler);
3670 new_ka.sa.sa_handler = compat_ptr(handler);
3671 #ifdef __ARCH_HAS_SA_RESTORER
3672 ret |= get_user(restorer, &act->sa_restorer);
3673 new_ka.sa.sa_restorer = compat_ptr(restorer);
3674 #endif
3675 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3676 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3677 if (ret)
3678 return -EFAULT;
3679 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3680 }
3681
3682 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3683 if (!ret && oact) {
3684 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3685 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3686 &oact->sa_handler);
3687 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3688 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3689 #ifdef __ARCH_HAS_SA_RESTORER
3690 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3691 &oact->sa_restorer);
3692 #endif
3693 }
3694 return ret;
3695 }
3696 #endif
3697 #endif /* !CONFIG_ODD_RT_SIGACTION */
3698
3699 #ifdef CONFIG_OLD_SIGACTION
3700 SYSCALL_DEFINE3(sigaction, int, sig,
3701 const struct old_sigaction __user *, act,
3702 struct old_sigaction __user *, oact)
3703 {
3704 struct k_sigaction new_ka, old_ka;
3705 int ret;
3706
3707 if (act) {
3708 old_sigset_t mask;
3709 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3710 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3711 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3712 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3713 __get_user(mask, &act->sa_mask))
3714 return -EFAULT;
3715 #ifdef __ARCH_HAS_KA_RESTORER
3716 new_ka.ka_restorer = NULL;
3717 #endif
3718 siginitset(&new_ka.sa.sa_mask, mask);
3719 }
3720
3721 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3722
3723 if (!ret && oact) {
3724 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3725 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3726 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3727 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3728 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3729 return -EFAULT;
3730 }
3731
3732 return ret;
3733 }
3734 #endif
3735 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3736 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3737 const struct compat_old_sigaction __user *, act,
3738 struct compat_old_sigaction __user *, oact)
3739 {
3740 struct k_sigaction new_ka, old_ka;
3741 int ret;
3742 compat_old_sigset_t mask;
3743 compat_uptr_t handler, restorer;
3744
3745 if (act) {
3746 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3747 __get_user(handler, &act->sa_handler) ||
3748 __get_user(restorer, &act->sa_restorer) ||
3749 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3750 __get_user(mask, &act->sa_mask))
3751 return -EFAULT;
3752
3753 #ifdef __ARCH_HAS_KA_RESTORER
3754 new_ka.ka_restorer = NULL;
3755 #endif
3756 new_ka.sa.sa_handler = compat_ptr(handler);
3757 new_ka.sa.sa_restorer = compat_ptr(restorer);
3758 siginitset(&new_ka.sa.sa_mask, mask);
3759 }
3760
3761 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3762
3763 if (!ret && oact) {
3764 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3765 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3766 &oact->sa_handler) ||
3767 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3768 &oact->sa_restorer) ||
3769 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3770 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3771 return -EFAULT;
3772 }
3773 return ret;
3774 }
3775 #endif
3776
3777 #ifdef CONFIG_SGETMASK_SYSCALL
3778
3779 /*
3780 * For backwards compatibility. Functionality superseded by sigprocmask.
3781 */
3782 SYSCALL_DEFINE0(sgetmask)
3783 {
3784 /* SMP safe */
3785 return current->blocked.sig[0];
3786 }
3787
3788 SYSCALL_DEFINE1(ssetmask, int, newmask)
3789 {
3790 int old = current->blocked.sig[0];
3791 sigset_t newset;
3792
3793 siginitset(&newset, newmask);
3794 set_current_blocked(&newset);
3795
3796 return old;
3797 }
3798 #endif /* CONFIG_SGETMASK_SYSCALL */
3799
3800 #ifdef __ARCH_WANT_SYS_SIGNAL
3801 /*
3802 * For backwards compatibility. Functionality superseded by sigaction.
3803 */
3804 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3805 {
3806 struct k_sigaction new_sa, old_sa;
3807 int ret;
3808
3809 new_sa.sa.sa_handler = handler;
3810 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3811 sigemptyset(&new_sa.sa.sa_mask);
3812
3813 ret = do_sigaction(sig, &new_sa, &old_sa);
3814
3815 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3816 }
3817 #endif /* __ARCH_WANT_SYS_SIGNAL */
3818
3819 #ifdef __ARCH_WANT_SYS_PAUSE
3820
3821 SYSCALL_DEFINE0(pause)
3822 {
3823 while (!signal_pending(current)) {
3824 __set_current_state(TASK_INTERRUPTIBLE);
3825 schedule();
3826 }
3827 return -ERESTARTNOHAND;
3828 }
3829
3830 #endif
3831
3832 static int sigsuspend(sigset_t *set)
3833 {
3834 current->saved_sigmask = current->blocked;
3835 set_current_blocked(set);
3836
3837 while (!signal_pending(current)) {
3838 __set_current_state(TASK_INTERRUPTIBLE);
3839 schedule();
3840 }
3841 set_restore_sigmask();
3842 return -ERESTARTNOHAND;
3843 }
3844
3845 /**
3846 * sys_rt_sigsuspend - replace the signal mask for a value with the
3847 * @unewset value until a signal is received
3848 * @unewset: new signal mask value
3849 * @sigsetsize: size of sigset_t type
3850 */
3851 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3852 {
3853 sigset_t newset;
3854
3855 /* XXX: Don't preclude handling different sized sigset_t's. */
3856 if (sigsetsize != sizeof(sigset_t))
3857 return -EINVAL;
3858
3859 if (copy_from_user(&newset, unewset, sizeof(newset)))
3860 return -EFAULT;
3861 return sigsuspend(&newset);
3862 }
3863
3864 #ifdef CONFIG_COMPAT
3865 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3866 {
3867 #ifdef __BIG_ENDIAN
3868 sigset_t newset;
3869 compat_sigset_t newset32;
3870
3871 /* XXX: Don't preclude handling different sized sigset_t's. */
3872 if (sigsetsize != sizeof(sigset_t))
3873 return -EINVAL;
3874
3875 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3876 return -EFAULT;
3877 sigset_from_compat(&newset, &newset32);
3878 return sigsuspend(&newset);
3879 #else
3880 /* on little-endian bitmaps don't care about granularity */
3881 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3882 #endif
3883 }
3884 #endif
3885
3886 #ifdef CONFIG_OLD_SIGSUSPEND
3887 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3888 {
3889 sigset_t blocked;
3890 siginitset(&blocked, mask);
3891 return sigsuspend(&blocked);
3892 }
3893 #endif
3894 #ifdef CONFIG_OLD_SIGSUSPEND3
3895 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3896 {
3897 sigset_t blocked;
3898 siginitset(&blocked, mask);
3899 return sigsuspend(&blocked);
3900 }
3901 #endif
3902
3903 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3904 {
3905 return NULL;
3906 }
3907
3908 void __init signals_init(void)
3909 {
3910 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3911 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3912 != offsetof(struct siginfo, _sifields._pad));
3913
3914 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3915 }
3916
3917 #ifdef CONFIG_KGDB_KDB
3918 #include <linux/kdb.h>
3919 /*
3920 * kdb_send_sig_info - Allows kdb to send signals without exposing
3921 * signal internals. This function checks if the required locks are
3922 * available before calling the main signal code, to avoid kdb
3923 * deadlocks.
3924 */
3925 void
3926 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3927 {
3928 static struct task_struct *kdb_prev_t;
3929 int sig, new_t;
3930 if (!spin_trylock(&t->sighand->siglock)) {
3931 kdb_printf("Can't do kill command now.\n"
3932 "The sigmask lock is held somewhere else in "
3933 "kernel, try again later\n");
3934 return;
3935 }
3936 spin_unlock(&t->sighand->siglock);
3937 new_t = kdb_prev_t != t;
3938 kdb_prev_t = t;
3939 if (t->state != TASK_RUNNING && new_t) {
3940 kdb_printf("Process is not RUNNING, sending a signal from "
3941 "kdb risks deadlock\n"
3942 "on the run queue locks. "
3943 "The signal has _not_ been sent.\n"
3944 "Reissue the kill command if you want to risk "
3945 "the deadlock.\n");
3946 return;
3947 }
3948 sig = info->si_signo;
3949 if (send_sig_info(sig, info, t))
3950 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3951 sig, t->pid);
3952 else
3953 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3954 }
3955 #endif /* CONFIG_KGDB_KDB */