2 * linux/kernel/seccomp.c
4 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
6 * Copyright (C) 2012 Google, Inc.
7 * Will Drewry <wad@chromium.org>
9 * This defines a simple but solid secure-computing facility.
11 * Mode 1 uses a fixed list of allowed system calls.
12 * Mode 2 allows user-defined system call filters in the form
13 * of Berkeley Packet Filters/Linux Socket Filters.
16 #include <linux/atomic.h>
17 #include <linux/audit.h>
18 #include <linux/compat.h>
19 #include <linux/sched.h>
20 #include <linux/seccomp.h>
21 #include <linux/slab.h>
22 #include <linux/syscalls.h>
24 /* #define SECCOMP_DEBUG 1 */
26 #ifdef CONFIG_SECCOMP_FILTER
27 #include <asm/syscall.h>
28 #include <linux/filter.h>
29 #include <linux/pid.h>
30 #include <linux/ptrace.h>
31 #include <linux/security.h>
32 #include <linux/tracehook.h>
33 #include <linux/uaccess.h>
36 * struct seccomp_filter - container for seccomp BPF programs
38 * @usage: reference count to manage the object lifetime.
39 * get/put helpers should be used when accessing an instance
40 * outside of a lifetime-guarded section. In general, this
41 * is only needed for handling filters shared across tasks.
42 * @prev: points to a previously installed, or inherited, filter
43 * @len: the number of instructions in the program
44 * @insns: the BPF program instructions to evaluate
46 * seccomp_filter objects are organized in a tree linked via the @prev
47 * pointer. For any task, it appears to be a singly-linked list starting
48 * with current->seccomp.filter, the most recently attached or inherited filter.
49 * However, multiple filters may share a @prev node, by way of fork(), which
50 * results in a unidirectional tree existing in memory. This is similar to
51 * how namespaces work.
53 * seccomp_filter objects should never be modified after being attached
54 * to a task_struct (other than @usage).
56 struct seccomp_filter
{
58 struct seccomp_filter
*prev
;
59 unsigned short len
; /* Instruction count */
60 struct sock_filter insns
[];
63 /* Limit any path through the tree to 256KB worth of instructions. */
64 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
67 * get_u32 - returns a u32 offset into data
68 * @data: a unsigned 64 bit value
69 * @index: 0 or 1 to return the first or second 32-bits
71 * This inline exists to hide the length of unsigned long. If a 32-bit
72 * unsigned long is passed in, it will be extended and the top 32-bits will be
73 * 0. If it is a 64-bit unsigned long, then whatever data is resident will be
76 * Endianness is explicitly ignored and left for BPF program authors to manage
77 * as per the specific architecture.
79 static inline u32
get_u32(u64 data
, int index
)
81 return ((u32
*)&data
)[index
];
84 /* Helper for bpf_load below. */
85 #define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
87 * bpf_load: checks and returns a pointer to the requested offset
88 * @off: offset into struct seccomp_data to load from
90 * Returns the requested 32-bits of data.
91 * seccomp_check_filter() should assure that @off is 32-bit aligned
92 * and not out of bounds. Failure to do so is a BUG.
94 u32
seccomp_bpf_load(int off
)
96 struct pt_regs
*regs
= task_pt_regs(current
);
97 if (off
== BPF_DATA(nr
))
98 return syscall_get_nr(current
, regs
);
99 if (off
== BPF_DATA(arch
))
100 return syscall_get_arch();
101 if (off
>= BPF_DATA(args
[0]) && off
< BPF_DATA(args
[6])) {
103 int arg
= (off
- BPF_DATA(args
[0])) / sizeof(u64
);
104 int index
= !!(off
% sizeof(u64
));
105 syscall_get_arguments(current
, regs
, arg
, 1, &value
);
106 return get_u32(value
, index
);
108 if (off
== BPF_DATA(instruction_pointer
))
109 return get_u32(KSTK_EIP(current
), 0);
110 if (off
== BPF_DATA(instruction_pointer
) + sizeof(u32
))
111 return get_u32(KSTK_EIP(current
), 1);
112 /* seccomp_check_filter should make this impossible. */
117 * seccomp_check_filter - verify seccomp filter code
118 * @filter: filter to verify
119 * @flen: length of filter
121 * Takes a previously checked filter (by sk_chk_filter) and
122 * redirects all filter code that loads struct sk_buff data
123 * and related data through seccomp_bpf_load. It also
124 * enforces length and alignment checking of those loads.
126 * Returns 0 if the rule set is legal or -EINVAL if not.
128 static int seccomp_check_filter(struct sock_filter
*filter
, unsigned int flen
)
131 for (pc
= 0; pc
< flen
; pc
++) {
132 struct sock_filter
*ftest
= &filter
[pc
];
133 u16 code
= ftest
->code
;
138 ftest
->code
= BPF_S_ANC_SECCOMP_LD_W
;
139 /* 32-bit aligned and not out of bounds. */
140 if (k
>= sizeof(struct seccomp_data
) || k
& 3)
144 ftest
->code
= BPF_S_LD_IMM
;
145 ftest
->k
= sizeof(struct seccomp_data
);
147 case BPF_S_LDX_W_LEN
:
148 ftest
->code
= BPF_S_LDX_IMM
;
149 ftest
->k
= sizeof(struct seccomp_data
);
151 /* Explicitly include allowed calls. */
154 case BPF_S_ALU_ADD_K
:
155 case BPF_S_ALU_ADD_X
:
156 case BPF_S_ALU_SUB_K
:
157 case BPF_S_ALU_SUB_X
:
158 case BPF_S_ALU_MUL_K
:
159 case BPF_S_ALU_MUL_X
:
160 case BPF_S_ALU_DIV_X
:
161 case BPF_S_ALU_AND_K
:
162 case BPF_S_ALU_AND_X
:
165 case BPF_S_ALU_XOR_K
:
166 case BPF_S_ALU_XOR_X
:
167 case BPF_S_ALU_LSH_K
:
168 case BPF_S_ALU_LSH_X
:
169 case BPF_S_ALU_RSH_K
:
170 case BPF_S_ALU_RSH_X
:
176 case BPF_S_ALU_DIV_K
:
182 case BPF_S_JMP_JEQ_K
:
183 case BPF_S_JMP_JEQ_X
:
184 case BPF_S_JMP_JGE_K
:
185 case BPF_S_JMP_JGE_X
:
186 case BPF_S_JMP_JGT_K
:
187 case BPF_S_JMP_JGT_X
:
188 case BPF_S_JMP_JSET_K
:
189 case BPF_S_JMP_JSET_X
:
199 * seccomp_run_filters - evaluates all seccomp filters against @syscall
200 * @syscall: number of the current system call
202 * Returns valid seccomp BPF response codes.
204 static u32
seccomp_run_filters(int syscall
)
206 struct seccomp_filter
*f
= ACCESS_ONCE(current
->seccomp
.filter
);
207 u32 ret
= SECCOMP_RET_ALLOW
;
209 /* Ensure unexpected behavior doesn't result in failing open. */
210 if (unlikely(WARN_ON(f
== NULL
)))
211 return SECCOMP_RET_KILL
;
213 /* Make sure cross-thread synced filter points somewhere sane. */
214 smp_read_barrier_depends();
217 * All filters in the list are evaluated and the lowest BPF return
218 * value always takes priority (ignoring the DATA).
220 for (; f
; f
= f
->prev
) {
221 u32 cur_ret
= sk_run_filter(NULL
, f
->insns
);
223 if ((cur_ret
& SECCOMP_RET_ACTION
) < (ret
& SECCOMP_RET_ACTION
))
228 #endif /* CONFIG_SECCOMP_FILTER */
230 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode
)
232 assert_spin_locked(¤t
->sighand
->siglock
);
234 if (current
->seccomp
.mode
&& current
->seccomp
.mode
!= seccomp_mode
)
240 static inline void seccomp_assign_mode(struct task_struct
*task
,
241 unsigned long seccomp_mode
)
243 assert_spin_locked(&task
->sighand
->siglock
);
245 task
->seccomp
.mode
= seccomp_mode
;
247 * Make sure TIF_SECCOMP cannot be set before the mode (and
251 set_tsk_thread_flag(task
, TIF_SECCOMP
);
254 #ifdef CONFIG_SECCOMP_FILTER
255 /* Returns 1 if the parent is an ancestor of the child. */
256 static int is_ancestor(struct seccomp_filter
*parent
,
257 struct seccomp_filter
*child
)
259 /* NULL is the root ancestor. */
262 for (; child
; child
= child
->prev
)
269 * seccomp_can_sync_threads: checks if all threads can be synchronized
271 * Expects sighand and cred_guard_mutex locks to be held.
273 * Returns 0 on success, -ve on error, or the pid of a thread which was
274 * either not in the correct seccomp mode or it did not have an ancestral
277 static inline pid_t
seccomp_can_sync_threads(void)
279 struct task_struct
*thread
, *caller
;
281 BUG_ON(!mutex_is_locked(¤t
->signal
->cred_guard_mutex
));
282 assert_spin_locked(¤t
->sighand
->siglock
);
284 /* Validate all threads being eligible for synchronization. */
286 for_each_thread(caller
, thread
) {
289 /* Skip current, since it is initiating the sync. */
290 if (thread
== caller
)
293 if (thread
->seccomp
.mode
== SECCOMP_MODE_DISABLED
||
294 (thread
->seccomp
.mode
== SECCOMP_MODE_FILTER
&&
295 is_ancestor(thread
->seccomp
.filter
,
296 caller
->seccomp
.filter
)))
299 /* Return the first thread that cannot be synchronized. */
300 failed
= task_pid_vnr(thread
);
301 /* If the pid cannot be resolved, then return -ESRCH */
302 if (unlikely(WARN_ON(failed
== 0)))
311 * seccomp_sync_threads: sets all threads to use current's filter
313 * Expects sighand and cred_guard_mutex locks to be held, and for
314 * seccomp_can_sync_threads() to have returned success already
315 * without dropping the locks.
318 static inline void seccomp_sync_threads(void)
320 struct task_struct
*thread
, *caller
;
322 BUG_ON(!mutex_is_locked(¤t
->signal
->cred_guard_mutex
));
323 assert_spin_locked(¤t
->sighand
->siglock
);
325 /* Synchronize all threads. */
327 for_each_thread(caller
, thread
) {
328 /* Skip current, since it needs no changes. */
329 if (thread
== caller
)
332 /* Get a task reference for the new leaf node. */
333 get_seccomp_filter(caller
);
335 * Drop the task reference to the shared ancestor since
336 * current's path will hold a reference. (This also
337 * allows a put before the assignment.)
339 put_seccomp_filter(thread
);
340 smp_store_release(&thread
->seccomp
.filter
,
341 caller
->seccomp
.filter
);
343 * Opt the other thread into seccomp if needed.
344 * As threads are considered to be trust-realm
345 * equivalent (see ptrace_may_access), it is safe to
346 * allow one thread to transition the other.
348 if (thread
->seccomp
.mode
== SECCOMP_MODE_DISABLED
) {
350 * Don't let an unprivileged task work around
351 * the no_new_privs restriction by creating
352 * a thread that sets it up, enters seccomp,
355 if (task_no_new_privs(caller
))
356 task_set_no_new_privs(thread
);
358 seccomp_assign_mode(thread
, SECCOMP_MODE_FILTER
);
364 * seccomp_prepare_filter: Prepares a seccomp filter for use.
365 * @fprog: BPF program to install
367 * Returns filter on success or an ERR_PTR on failure.
369 static struct seccomp_filter
*seccomp_prepare_filter(struct sock_fprog
*fprog
)
371 struct seccomp_filter
*filter
;
372 unsigned long fp_size
= fprog
->len
* sizeof(struct sock_filter
);
373 unsigned long total_insns
= fprog
->len
;
376 if (fprog
->len
== 0 || fprog
->len
> BPF_MAXINSNS
)
377 return ERR_PTR(-EINVAL
);
378 BUG_ON(INT_MAX
/ fprog
->len
< sizeof(struct sock_filter
));
380 for (filter
= current
->seccomp
.filter
; filter
; filter
= filter
->prev
)
381 total_insns
+= filter
->len
+ 4; /* include a 4 instr penalty */
382 if (total_insns
> MAX_INSNS_PER_PATH
)
383 return ERR_PTR(-ENOMEM
);
386 * Installing a seccomp filter requires that the task have
387 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
388 * This avoids scenarios where unprivileged tasks can affect the
389 * behavior of privileged children.
391 if (!task_no_new_privs(current
) &&
392 security_capable_noaudit(current_cred(), current_user_ns(),
394 return ERR_PTR(-EACCES
);
396 /* Allocate a new seccomp_filter */
397 filter
= kzalloc(sizeof(struct seccomp_filter
) + fp_size
,
398 GFP_KERNEL
|__GFP_NOWARN
);
400 return ERR_PTR(-ENOMEM
);;
401 atomic_set(&filter
->usage
, 1);
402 filter
->len
= fprog
->len
;
404 /* Copy the instructions from fprog. */
406 if (copy_from_user(filter
->insns
, fprog
->filter
, fp_size
))
409 /* Check and rewrite the fprog via the skb checker */
410 ret
= sk_chk_filter(filter
->insns
, filter
->len
);
414 /* Check and rewrite the fprog for seccomp use */
415 ret
= seccomp_check_filter(filter
->insns
, filter
->len
);
427 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
428 * @user_filter: pointer to the user data containing a sock_fprog.
430 * Returns 0 on success and non-zero otherwise.
432 static struct seccomp_filter
*
433 seccomp_prepare_user_filter(const char __user
*user_filter
)
435 struct sock_fprog fprog
;
436 struct seccomp_filter
*filter
= ERR_PTR(-EFAULT
);
439 if (is_compat_task()) {
440 struct compat_sock_fprog fprog32
;
441 if (copy_from_user(&fprog32
, user_filter
, sizeof(fprog32
)))
443 fprog
.len
= fprog32
.len
;
444 fprog
.filter
= compat_ptr(fprog32
.filter
);
445 } else /* falls through to the if below. */
447 if (copy_from_user(&fprog
, user_filter
, sizeof(fprog
)))
449 filter
= seccomp_prepare_filter(&fprog
);
455 * seccomp_attach_filter: validate and attach filter
456 * @flags: flags to change filter behavior
457 * @filter: seccomp filter to add to the current process
459 * Caller must be holding current->sighand->siglock lock.
461 * Returns 0 on success, -ve on error.
463 static long seccomp_attach_filter(unsigned int flags
,
464 struct seccomp_filter
*filter
)
466 unsigned long total_insns
;
467 struct seccomp_filter
*walker
;
469 assert_spin_locked(¤t
->sighand
->siglock
);
471 /* Validate resulting filter length. */
472 total_insns
= filter
->len
;
473 for (walker
= current
->seccomp
.filter
; walker
; walker
= walker
->prev
)
474 total_insns
+= walker
->len
+ 4; /* 4 instr penalty */
475 if (total_insns
> MAX_INSNS_PER_PATH
)
478 /* If thread sync has been requested, check that it is possible. */
479 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
) {
482 ret
= seccomp_can_sync_threads();
488 * If there is an existing filter, make it the prev and don't drop its
491 filter
->prev
= current
->seccomp
.filter
;
492 current
->seccomp
.filter
= filter
;
494 /* Now that the new filter is in place, synchronize to all threads. */
495 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
)
496 seccomp_sync_threads();
501 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
502 void get_seccomp_filter(struct task_struct
*tsk
)
504 struct seccomp_filter
*orig
= tsk
->seccomp
.filter
;
507 /* Reference count is bounded by the number of total processes. */
508 atomic_inc(&orig
->usage
);
511 static inline void seccomp_filter_free(struct seccomp_filter
*filter
)
518 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
519 void put_seccomp_filter(struct task_struct
*tsk
)
521 struct seccomp_filter
*orig
= tsk
->seccomp
.filter
;
522 /* Clean up single-reference branches iteratively. */
523 while (orig
&& atomic_dec_and_test(&orig
->usage
)) {
524 struct seccomp_filter
*freeme
= orig
;
526 seccomp_filter_free(freeme
);
531 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
532 * @syscall: syscall number to send to userland
533 * @reason: filter-supplied reason code to send to userland (via si_errno)
535 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
537 static void seccomp_send_sigsys(int syscall
, int reason
)
540 memset(&info
, 0, sizeof(info
));
541 info
.si_signo
= SIGSYS
;
542 info
.si_code
= SYS_SECCOMP
;
543 info
.si_call_addr
= (void __user
*)KSTK_EIP(current
);
544 info
.si_errno
= reason
;
545 info
.si_arch
= syscall_get_arch();
546 info
.si_syscall
= syscall
;
547 force_sig_info(SIGSYS
, &info
, current
);
549 #endif /* CONFIG_SECCOMP_FILTER */
552 * Secure computing mode 1 allows only read/write/exit/sigreturn.
553 * To be fully secure this must be combined with rlimit
554 * to limit the stack allocations too.
556 static int mode1_syscalls
[] = {
557 __NR_seccomp_read
, __NR_seccomp_write
, __NR_seccomp_exit
, __NR_seccomp_sigreturn
,
558 0, /* null terminated */
562 static int mode1_syscalls_32
[] = {
563 __NR_seccomp_read_32
, __NR_seccomp_write_32
, __NR_seccomp_exit_32
, __NR_seccomp_sigreturn_32
,
564 0, /* null terminated */
568 int __secure_computing(int this_syscall
)
575 * Make sure that any changes to mode from another thread have
576 * been seen after TIF_SECCOMP was seen.
580 switch (current
->seccomp
.mode
) {
581 case SECCOMP_MODE_STRICT
:
582 syscall
= mode1_syscalls
;
584 if (is_compat_task())
585 syscall
= mode1_syscalls_32
;
588 if (*syscall
== this_syscall
)
590 } while (*++syscall
);
592 ret
= SECCOMP_RET_KILL
;
594 #ifdef CONFIG_SECCOMP_FILTER
595 case SECCOMP_MODE_FILTER
: {
597 struct pt_regs
*regs
= task_pt_regs(current
);
598 ret
= seccomp_run_filters(this_syscall
);
599 data
= ret
& SECCOMP_RET_DATA
;
600 ret
&= SECCOMP_RET_ACTION
;
602 case SECCOMP_RET_ERRNO
:
603 /* Set the low-order 16-bits as a errno. */
604 syscall_set_return_value(current
, regs
,
607 case SECCOMP_RET_TRAP
:
608 /* Show the handler the original registers. */
609 syscall_rollback(current
, regs
);
610 /* Let the filter pass back 16 bits of data. */
611 seccomp_send_sigsys(this_syscall
, data
);
613 case SECCOMP_RET_TRACE
:
614 /* Skip these calls if there is no tracer. */
615 if (!ptrace_event_enabled(current
, PTRACE_EVENT_SECCOMP
)) {
616 syscall_set_return_value(current
, regs
,
620 /* Allow the BPF to provide the event message */
621 ptrace_event(PTRACE_EVENT_SECCOMP
, data
);
623 * The delivery of a fatal signal during event
624 * notification may silently skip tracer notification.
625 * Terminating the task now avoids executing a system
626 * call that may not be intended.
628 if (fatal_signal_pending(current
))
630 if (syscall_get_nr(current
, regs
) < 0)
631 goto skip
; /* Explicit request to skip. */
634 case SECCOMP_RET_ALLOW
:
636 case SECCOMP_RET_KILL
:
651 audit_seccomp(this_syscall
, exit_sig
, ret
);
653 #ifdef CONFIG_SECCOMP_FILTER
655 audit_seccomp(this_syscall
, exit_sig
, ret
);
660 long prctl_get_seccomp(void)
662 return current
->seccomp
.mode
;
666 * seccomp_set_mode_strict: internal function for setting strict seccomp
668 * Once current->seccomp.mode is non-zero, it may not be changed.
670 * Returns 0 on success or -EINVAL on failure.
672 static long seccomp_set_mode_strict(void)
674 const unsigned long seccomp_mode
= SECCOMP_MODE_STRICT
;
677 spin_lock_irq(¤t
->sighand
->siglock
);
679 if (!seccomp_may_assign_mode(seccomp_mode
))
685 seccomp_assign_mode(current
, seccomp_mode
);
689 spin_unlock_irq(¤t
->sighand
->siglock
);
694 #ifdef CONFIG_SECCOMP_FILTER
696 * seccomp_set_mode_filter: internal function for setting seccomp filter
697 * @flags: flags to change filter behavior
698 * @filter: struct sock_fprog containing filter
700 * This function may be called repeatedly to install additional filters.
701 * Every filter successfully installed will be evaluated (in reverse order)
702 * for each system call the task makes.
704 * Once current->seccomp.mode is non-zero, it may not be changed.
706 * Returns 0 on success or -EINVAL on failure.
708 static long seccomp_set_mode_filter(unsigned int flags
,
709 const char __user
*filter
)
711 const unsigned long seccomp_mode
= SECCOMP_MODE_FILTER
;
712 struct seccomp_filter
*prepared
= NULL
;
715 /* Validate flags. */
716 if (flags
& ~SECCOMP_FILTER_FLAG_MASK
)
719 /* Prepare the new filter before holding any locks. */
720 prepared
= seccomp_prepare_user_filter(filter
);
721 if (IS_ERR(prepared
))
722 return PTR_ERR(prepared
);
725 * Make sure we cannot change seccomp or nnp state via TSYNC
726 * while another thread is in the middle of calling exec.
728 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
&&
729 mutex_lock_killable(¤t
->signal
->cred_guard_mutex
))
732 spin_lock_irq(¤t
->sighand
->siglock
);
734 if (!seccomp_may_assign_mode(seccomp_mode
))
737 ret
= seccomp_attach_filter(flags
, prepared
);
740 /* Do not free the successfully attached filter. */
743 seccomp_assign_mode(current
, seccomp_mode
);
745 spin_unlock_irq(¤t
->sighand
->siglock
);
746 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
)
747 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
749 seccomp_filter_free(prepared
);
753 static inline long seccomp_set_mode_filter(unsigned int flags
,
754 const char __user
*filter
)
760 /* Common entry point for both prctl and syscall. */
761 static long do_seccomp(unsigned int op
, unsigned int flags
,
762 const char __user
*uargs
)
765 case SECCOMP_SET_MODE_STRICT
:
766 if (flags
!= 0 || uargs
!= NULL
)
768 return seccomp_set_mode_strict();
769 case SECCOMP_SET_MODE_FILTER
:
770 return seccomp_set_mode_filter(flags
, uargs
);
776 SYSCALL_DEFINE3(seccomp
, unsigned int, op
, unsigned int, flags
,
777 const char __user
*, uargs
)
779 return do_seccomp(op
, flags
, uargs
);
783 * prctl_set_seccomp: configures current->seccomp.mode
784 * @seccomp_mode: requested mode to use
785 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
787 * Returns 0 on success or -EINVAL on failure.
789 long prctl_set_seccomp(unsigned long seccomp_mode
, char __user
*filter
)
794 switch (seccomp_mode
) {
795 case SECCOMP_MODE_STRICT
:
796 op
= SECCOMP_SET_MODE_STRICT
;
798 * Setting strict mode through prctl always ignored filter,
799 * so make sure it is always NULL here to pass the internal
800 * check in do_seccomp().
804 case SECCOMP_MODE_FILTER
:
805 op
= SECCOMP_SET_MODE_FILTER
;
812 /* prctl interface doesn't have flags, so they are always zero. */
813 return do_seccomp(op
, 0, uargs
);