2 * linux/kernel/seccomp.c
4 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
6 * Copyright (C) 2012 Google, Inc.
7 * Will Drewry <wad@chromium.org>
9 * This defines a simple but solid secure-computing facility.
11 * Mode 1 uses a fixed list of allowed system calls.
12 * Mode 2 allows user-defined system call filters in the form
13 * of Berkeley Packet Filters/Linux Socket Filters.
16 #include <linux/refcount.h>
17 #include <linux/audit.h>
18 #include <linux/compat.h>
19 #include <linux/coredump.h>
20 #include <linux/kmemleak.h>
21 #include <linux/nospec.h>
22 #include <linux/prctl.h>
23 #include <linux/sched.h>
24 #include <linux/sched/task_stack.h>
25 #include <linux/seccomp.h>
26 #include <linux/slab.h>
27 #include <linux/syscalls.h>
28 #include <linux/sysctl.h>
30 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
31 #include <asm/syscall.h>
34 #ifdef CONFIG_SECCOMP_FILTER
35 #include <linux/filter.h>
36 #include <linux/pid.h>
37 #include <linux/ptrace.h>
38 #include <linux/security.h>
39 #include <linux/tracehook.h>
40 #include <linux/uaccess.h>
43 * struct seccomp_filter - container for seccomp BPF programs
45 * @usage: reference count to manage the object lifetime.
46 * get/put helpers should be used when accessing an instance
47 * outside of a lifetime-guarded section. In general, this
48 * is only needed for handling filters shared across tasks.
49 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
50 * @prev: points to a previously installed, or inherited, filter
51 * @prog: the BPF program to evaluate
53 * seccomp_filter objects are organized in a tree linked via the @prev
54 * pointer. For any task, it appears to be a singly-linked list starting
55 * with current->seccomp.filter, the most recently attached or inherited filter.
56 * However, multiple filters may share a @prev node, by way of fork(), which
57 * results in a unidirectional tree existing in memory. This is similar to
58 * how namespaces work.
60 * seccomp_filter objects should never be modified after being attached
61 * to a task_struct (other than @usage).
63 struct seccomp_filter
{
66 struct seccomp_filter
*prev
;
67 struct bpf_prog
*prog
;
70 /* Limit any path through the tree to 256KB worth of instructions. */
71 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
74 * Endianness is explicitly ignored and left for BPF program authors to manage
75 * as per the specific architecture.
77 static void populate_seccomp_data(struct seccomp_data
*sd
)
79 struct task_struct
*task
= current
;
80 struct pt_regs
*regs
= task_pt_regs(task
);
81 unsigned long args
[6];
83 sd
->nr
= syscall_get_nr(task
, regs
);
84 sd
->arch
= syscall_get_arch();
85 syscall_get_arguments(task
, regs
, 0, 6, args
);
86 sd
->args
[0] = args
[0];
87 sd
->args
[1] = args
[1];
88 sd
->args
[2] = args
[2];
89 sd
->args
[3] = args
[3];
90 sd
->args
[4] = args
[4];
91 sd
->args
[5] = args
[5];
92 sd
->instruction_pointer
= KSTK_EIP(task
);
96 * seccomp_check_filter - verify seccomp filter code
97 * @filter: filter to verify
98 * @flen: length of filter
100 * Takes a previously checked filter (by bpf_check_classic) and
101 * redirects all filter code that loads struct sk_buff data
102 * and related data through seccomp_bpf_load. It also
103 * enforces length and alignment checking of those loads.
105 * Returns 0 if the rule set is legal or -EINVAL if not.
107 static int seccomp_check_filter(struct sock_filter
*filter
, unsigned int flen
)
110 for (pc
= 0; pc
< flen
; pc
++) {
111 struct sock_filter
*ftest
= &filter
[pc
];
112 u16 code
= ftest
->code
;
116 case BPF_LD
| BPF_W
| BPF_ABS
:
117 ftest
->code
= BPF_LDX
| BPF_W
| BPF_ABS
;
118 /* 32-bit aligned and not out of bounds. */
119 if (k
>= sizeof(struct seccomp_data
) || k
& 3)
122 case BPF_LD
| BPF_W
| BPF_LEN
:
123 ftest
->code
= BPF_LD
| BPF_IMM
;
124 ftest
->k
= sizeof(struct seccomp_data
);
126 case BPF_LDX
| BPF_W
| BPF_LEN
:
127 ftest
->code
= BPF_LDX
| BPF_IMM
;
128 ftest
->k
= sizeof(struct seccomp_data
);
130 /* Explicitly include allowed calls. */
131 case BPF_RET
| BPF_K
:
132 case BPF_RET
| BPF_A
:
133 case BPF_ALU
| BPF_ADD
| BPF_K
:
134 case BPF_ALU
| BPF_ADD
| BPF_X
:
135 case BPF_ALU
| BPF_SUB
| BPF_K
:
136 case BPF_ALU
| BPF_SUB
| BPF_X
:
137 case BPF_ALU
| BPF_MUL
| BPF_K
:
138 case BPF_ALU
| BPF_MUL
| BPF_X
:
139 case BPF_ALU
| BPF_DIV
| BPF_K
:
140 case BPF_ALU
| BPF_DIV
| BPF_X
:
141 case BPF_ALU
| BPF_AND
| BPF_K
:
142 case BPF_ALU
| BPF_AND
| BPF_X
:
143 case BPF_ALU
| BPF_OR
| BPF_K
:
144 case BPF_ALU
| BPF_OR
| BPF_X
:
145 case BPF_ALU
| BPF_XOR
| BPF_K
:
146 case BPF_ALU
| BPF_XOR
| BPF_X
:
147 case BPF_ALU
| BPF_LSH
| BPF_K
:
148 case BPF_ALU
| BPF_LSH
| BPF_X
:
149 case BPF_ALU
| BPF_RSH
| BPF_K
:
150 case BPF_ALU
| BPF_RSH
| BPF_X
:
151 case BPF_ALU
| BPF_NEG
:
152 case BPF_LD
| BPF_IMM
:
153 case BPF_LDX
| BPF_IMM
:
154 case BPF_MISC
| BPF_TAX
:
155 case BPF_MISC
| BPF_TXA
:
156 case BPF_LD
| BPF_MEM
:
157 case BPF_LDX
| BPF_MEM
:
160 case BPF_JMP
| BPF_JA
:
161 case BPF_JMP
| BPF_JEQ
| BPF_K
:
162 case BPF_JMP
| BPF_JEQ
| BPF_X
:
163 case BPF_JMP
| BPF_JGE
| BPF_K
:
164 case BPF_JMP
| BPF_JGE
| BPF_X
:
165 case BPF_JMP
| BPF_JGT
| BPF_K
:
166 case BPF_JMP
| BPF_JGT
| BPF_X
:
167 case BPF_JMP
| BPF_JSET
| BPF_K
:
168 case BPF_JMP
| BPF_JSET
| BPF_X
:
178 * seccomp_run_filters - evaluates all seccomp filters against @sd
179 * @sd: optional seccomp data to be passed to filters
180 * @match: stores struct seccomp_filter that resulted in the return value,
181 * unless filter returned SECCOMP_RET_ALLOW, in which case it will
184 * Returns valid seccomp BPF response codes.
186 static u32
seccomp_run_filters(const struct seccomp_data
*sd
,
187 struct seccomp_filter
**match
)
189 struct seccomp_data sd_local
;
190 u32 ret
= SECCOMP_RET_ALLOW
;
191 /* Make sure cross-thread synced filter points somewhere sane. */
192 struct seccomp_filter
*f
=
193 READ_ONCE(current
->seccomp
.filter
);
195 /* Ensure unexpected behavior doesn't result in failing open. */
196 if (unlikely(WARN_ON(f
== NULL
)))
197 return SECCOMP_RET_KILL
;
200 populate_seccomp_data(&sd_local
);
205 * All filters in the list are evaluated and the lowest BPF return
206 * value always takes priority (ignoring the DATA).
208 for (; f
; f
= f
->prev
) {
209 u32 cur_ret
= BPF_PROG_RUN(f
->prog
, sd
);
211 if ((cur_ret
& SECCOMP_RET_ACTION
) < (ret
& SECCOMP_RET_ACTION
)) {
218 #endif /* CONFIG_SECCOMP_FILTER */
220 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode
)
222 assert_spin_locked(¤t
->sighand
->siglock
);
224 if (current
->seccomp
.mode
&& current
->seccomp
.mode
!= seccomp_mode
)
231 * If a given speculation mitigation is opt-in (prctl()-controlled),
232 * select it, by disabling speculation (enabling mitigation).
234 static inline void spec_mitigate(struct task_struct
*task
,
237 int state
= arch_prctl_spec_ctrl_get(task
, which
);
239 if (state
> 0 && (state
& PR_SPEC_PRCTL
))
240 arch_prctl_spec_ctrl_set(task
, which
, PR_SPEC_DISABLE
);
243 static inline void seccomp_assign_mode(struct task_struct
*task
,
244 unsigned long seccomp_mode
)
246 assert_spin_locked(&task
->sighand
->siglock
);
248 task
->seccomp
.mode
= seccomp_mode
;
250 * Make sure TIF_SECCOMP cannot be set before the mode (and
253 smp_mb__before_atomic();
254 /* Assume seccomp processes want speculation flaw mitigation. */
255 spec_mitigate(task
, PR_SPEC_STORE_BYPASS
);
256 set_tsk_thread_flag(task
, TIF_SECCOMP
);
259 #ifdef CONFIG_SECCOMP_FILTER
260 /* Returns 1 if the parent is an ancestor of the child. */
261 static int is_ancestor(struct seccomp_filter
*parent
,
262 struct seccomp_filter
*child
)
264 /* NULL is the root ancestor. */
267 for (; child
; child
= child
->prev
)
274 * seccomp_can_sync_threads: checks if all threads can be synchronized
276 * Expects sighand and cred_guard_mutex locks to be held.
278 * Returns 0 on success, -ve on error, or the pid of a thread which was
279 * either not in the correct seccomp mode or it did not have an ancestral
282 static inline pid_t
seccomp_can_sync_threads(void)
284 struct task_struct
*thread
, *caller
;
286 BUG_ON(!mutex_is_locked(¤t
->signal
->cred_guard_mutex
));
287 assert_spin_locked(¤t
->sighand
->siglock
);
289 /* Validate all threads being eligible for synchronization. */
291 for_each_thread(caller
, thread
) {
294 /* Skip current, since it is initiating the sync. */
295 if (thread
== caller
)
298 if (thread
->seccomp
.mode
== SECCOMP_MODE_DISABLED
||
299 (thread
->seccomp
.mode
== SECCOMP_MODE_FILTER
&&
300 is_ancestor(thread
->seccomp
.filter
,
301 caller
->seccomp
.filter
)))
304 /* Return the first thread that cannot be synchronized. */
305 failed
= task_pid_vnr(thread
);
306 /* If the pid cannot be resolved, then return -ESRCH */
307 if (unlikely(WARN_ON(failed
== 0)))
316 * seccomp_sync_threads: sets all threads to use current's filter
318 * Expects sighand and cred_guard_mutex locks to be held, and for
319 * seccomp_can_sync_threads() to have returned success already
320 * without dropping the locks.
323 static inline void seccomp_sync_threads(void)
325 struct task_struct
*thread
, *caller
;
327 BUG_ON(!mutex_is_locked(¤t
->signal
->cred_guard_mutex
));
328 assert_spin_locked(¤t
->sighand
->siglock
);
330 /* Synchronize all threads. */
332 for_each_thread(caller
, thread
) {
333 /* Skip current, since it needs no changes. */
334 if (thread
== caller
)
337 /* Get a task reference for the new leaf node. */
338 get_seccomp_filter(caller
);
340 * Drop the task reference to the shared ancestor since
341 * current's path will hold a reference. (This also
342 * allows a put before the assignment.)
344 put_seccomp_filter(thread
);
345 smp_store_release(&thread
->seccomp
.filter
,
346 caller
->seccomp
.filter
);
349 * Don't let an unprivileged task work around
350 * the no_new_privs restriction by creating
351 * a thread that sets it up, enters seccomp,
354 if (task_no_new_privs(caller
))
355 task_set_no_new_privs(thread
);
358 * Opt the other thread into seccomp if needed.
359 * As threads are considered to be trust-realm
360 * equivalent (see ptrace_may_access), it is safe to
361 * allow one thread to transition the other.
363 if (thread
->seccomp
.mode
== SECCOMP_MODE_DISABLED
)
364 seccomp_assign_mode(thread
, SECCOMP_MODE_FILTER
);
369 * seccomp_prepare_filter: Prepares a seccomp filter for use.
370 * @fprog: BPF program to install
372 * Returns filter on success or an ERR_PTR on failure.
374 static struct seccomp_filter
*seccomp_prepare_filter(struct sock_fprog
*fprog
)
376 struct seccomp_filter
*sfilter
;
378 const bool save_orig
= IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
);
380 if (fprog
->len
== 0 || fprog
->len
> BPF_MAXINSNS
)
381 return ERR_PTR(-EINVAL
);
383 BUG_ON(INT_MAX
/ fprog
->len
< sizeof(struct sock_filter
));
386 * Installing a seccomp filter requires that the task has
387 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
388 * This avoids scenarios where unprivileged tasks can affect the
389 * behavior of privileged children.
391 if (!task_no_new_privs(current
) &&
392 security_capable_noaudit(current_cred(), current_user_ns(),
394 return ERR_PTR(-EACCES
);
396 /* Allocate a new seccomp_filter */
397 sfilter
= kzalloc(sizeof(*sfilter
), GFP_KERNEL
| __GFP_NOWARN
);
399 return ERR_PTR(-ENOMEM
);
401 ret
= bpf_prog_create_from_user(&sfilter
->prog
, fprog
,
402 seccomp_check_filter
, save_orig
);
408 refcount_set(&sfilter
->usage
, 1);
414 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
415 * @user_filter: pointer to the user data containing a sock_fprog.
417 * Returns 0 on success and non-zero otherwise.
419 static struct seccomp_filter
*
420 seccomp_prepare_user_filter(const char __user
*user_filter
)
422 struct sock_fprog fprog
;
423 struct seccomp_filter
*filter
= ERR_PTR(-EFAULT
);
426 if (in_compat_syscall()) {
427 struct compat_sock_fprog fprog32
;
428 if (copy_from_user(&fprog32
, user_filter
, sizeof(fprog32
)))
430 fprog
.len
= fprog32
.len
;
431 fprog
.filter
= compat_ptr(fprog32
.filter
);
432 } else /* falls through to the if below. */
434 if (copy_from_user(&fprog
, user_filter
, sizeof(fprog
)))
436 filter
= seccomp_prepare_filter(&fprog
);
442 * seccomp_attach_filter: validate and attach filter
443 * @flags: flags to change filter behavior
444 * @filter: seccomp filter to add to the current process
446 * Caller must be holding current->sighand->siglock lock.
448 * Returns 0 on success, -ve on error.
450 static long seccomp_attach_filter(unsigned int flags
,
451 struct seccomp_filter
*filter
)
453 unsigned long total_insns
;
454 struct seccomp_filter
*walker
;
456 assert_spin_locked(¤t
->sighand
->siglock
);
458 /* Validate resulting filter length. */
459 total_insns
= filter
->prog
->len
;
460 for (walker
= current
->seccomp
.filter
; walker
; walker
= walker
->prev
)
461 total_insns
+= walker
->prog
->len
+ 4; /* 4 instr penalty */
462 if (total_insns
> MAX_INSNS_PER_PATH
)
465 /* If thread sync has been requested, check that it is possible. */
466 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
) {
469 ret
= seccomp_can_sync_threads();
474 /* Set log flag, if present. */
475 if (flags
& SECCOMP_FILTER_FLAG_LOG
)
479 * If there is an existing filter, make it the prev and don't drop its
482 filter
->prev
= current
->seccomp
.filter
;
483 current
->seccomp
.filter
= filter
;
485 /* Now that the new filter is in place, synchronize to all threads. */
486 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
)
487 seccomp_sync_threads();
492 void __get_seccomp_filter(struct seccomp_filter
*filter
)
494 /* Reference count is bounded by the number of total processes. */
495 refcount_inc(&filter
->usage
);
498 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
499 void get_seccomp_filter(struct task_struct
*tsk
)
501 struct seccomp_filter
*orig
= tsk
->seccomp
.filter
;
504 __get_seccomp_filter(orig
);
507 static inline void seccomp_filter_free(struct seccomp_filter
*filter
)
510 bpf_prog_destroy(filter
->prog
);
515 static void __put_seccomp_filter(struct seccomp_filter
*orig
)
517 /* Clean up single-reference branches iteratively. */
518 while (orig
&& refcount_dec_and_test(&orig
->usage
)) {
519 struct seccomp_filter
*freeme
= orig
;
521 seccomp_filter_free(freeme
);
525 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
526 void put_seccomp_filter(struct task_struct
*tsk
)
528 __put_seccomp_filter(tsk
->seccomp
.filter
);
531 static void seccomp_init_siginfo(siginfo_t
*info
, int syscall
, int reason
)
533 memset(info
, 0, sizeof(*info
));
534 info
->si_signo
= SIGSYS
;
535 info
->si_code
= SYS_SECCOMP
;
536 info
->si_call_addr
= (void __user
*)KSTK_EIP(current
);
537 info
->si_errno
= reason
;
538 info
->si_arch
= syscall_get_arch();
539 info
->si_syscall
= syscall
;
543 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
544 * @syscall: syscall number to send to userland
545 * @reason: filter-supplied reason code to send to userland (via si_errno)
547 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
549 static void seccomp_send_sigsys(int syscall
, int reason
)
552 seccomp_init_siginfo(&info
, syscall
, reason
);
553 force_sig_info(SIGSYS
, &info
, current
);
555 #endif /* CONFIG_SECCOMP_FILTER */
557 /* For use with seccomp_actions_logged */
558 #define SECCOMP_LOG_KILL (1 << 0)
559 #define SECCOMP_LOG_TRAP (1 << 2)
560 #define SECCOMP_LOG_ERRNO (1 << 3)
561 #define SECCOMP_LOG_TRACE (1 << 4)
562 #define SECCOMP_LOG_LOG (1 << 5)
563 #define SECCOMP_LOG_ALLOW (1 << 6)
565 static u32 seccomp_actions_logged
= SECCOMP_LOG_KILL
| SECCOMP_LOG_TRAP
|
566 SECCOMP_LOG_ERRNO
| SECCOMP_LOG_TRACE
|
569 static inline void seccomp_log(unsigned long syscall
, long signr
, u32 action
,
575 case SECCOMP_RET_ALLOW
:
577 case SECCOMP_RET_TRAP
:
578 log
= requested
&& seccomp_actions_logged
& SECCOMP_LOG_TRAP
;
580 case SECCOMP_RET_ERRNO
:
581 log
= requested
&& seccomp_actions_logged
& SECCOMP_LOG_ERRNO
;
583 case SECCOMP_RET_TRACE
:
584 log
= requested
&& seccomp_actions_logged
& SECCOMP_LOG_TRACE
;
586 case SECCOMP_RET_LOG
:
587 log
= seccomp_actions_logged
& SECCOMP_LOG_LOG
;
589 case SECCOMP_RET_KILL
:
591 log
= seccomp_actions_logged
& SECCOMP_LOG_KILL
;
595 * Force an audit message to be emitted when the action is RET_KILL,
596 * RET_LOG, or the FILTER_FLAG_LOG bit was set and the action is
597 * allowed to be logged by the admin.
600 return __audit_seccomp(syscall
, signr
, action
);
603 * Let the audit subsystem decide if the action should be audited based
604 * on whether the current task itself is being audited.
606 return audit_seccomp(syscall
, signr
, action
);
610 * Secure computing mode 1 allows only read/write/exit/sigreturn.
611 * To be fully secure this must be combined with rlimit
612 * to limit the stack allocations too.
614 static const int mode1_syscalls
[] = {
615 __NR_seccomp_read
, __NR_seccomp_write
, __NR_seccomp_exit
, __NR_seccomp_sigreturn
,
616 0, /* null terminated */
619 static void __secure_computing_strict(int this_syscall
)
621 const int *syscall_whitelist
= mode1_syscalls
;
623 if (in_compat_syscall())
624 syscall_whitelist
= get_compat_mode1_syscalls();
627 if (*syscall_whitelist
== this_syscall
)
629 } while (*++syscall_whitelist
);
634 seccomp_log(this_syscall
, SIGKILL
, SECCOMP_RET_KILL
, true);
638 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
639 void secure_computing_strict(int this_syscall
)
641 int mode
= current
->seccomp
.mode
;
643 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
) &&
644 unlikely(current
->ptrace
& PT_SUSPEND_SECCOMP
))
647 if (mode
== SECCOMP_MODE_DISABLED
)
649 else if (mode
== SECCOMP_MODE_STRICT
)
650 __secure_computing_strict(this_syscall
);
656 #ifdef CONFIG_SECCOMP_FILTER
657 static int __seccomp_filter(int this_syscall
, const struct seccomp_data
*sd
,
658 const bool recheck_after_trace
)
660 u32 filter_ret
, action
;
661 struct seccomp_filter
*match
= NULL
;
665 * Make sure that any changes to mode from another thread have
666 * been seen after TIF_SECCOMP was seen.
670 filter_ret
= seccomp_run_filters(sd
, &match
);
671 data
= filter_ret
& SECCOMP_RET_DATA
;
672 action
= filter_ret
& SECCOMP_RET_ACTION
;
675 case SECCOMP_RET_ERRNO
:
676 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
677 if (data
> MAX_ERRNO
)
679 syscall_set_return_value(current
, task_pt_regs(current
),
683 case SECCOMP_RET_TRAP
:
684 /* Show the handler the original registers. */
685 syscall_rollback(current
, task_pt_regs(current
));
686 /* Let the filter pass back 16 bits of data. */
687 seccomp_send_sigsys(this_syscall
, data
);
690 case SECCOMP_RET_TRACE
:
691 /* We've been put in this state by the ptracer already. */
692 if (recheck_after_trace
)
695 /* ENOSYS these calls if there is no tracer attached. */
696 if (!ptrace_event_enabled(current
, PTRACE_EVENT_SECCOMP
)) {
697 syscall_set_return_value(current
,
698 task_pt_regs(current
),
703 /* Allow the BPF to provide the event message */
704 ptrace_event(PTRACE_EVENT_SECCOMP
, data
);
706 * The delivery of a fatal signal during event
707 * notification may silently skip tracer notification,
708 * which could leave us with a potentially unmodified
709 * syscall that the tracer would have liked to have
710 * changed. Since the process is about to die, we just
711 * force the syscall to be skipped and let the signal
712 * kill the process and correctly handle any tracer exit
715 if (fatal_signal_pending(current
))
717 /* Check if the tracer forced the syscall to be skipped. */
718 this_syscall
= syscall_get_nr(current
, task_pt_regs(current
));
719 if (this_syscall
< 0)
723 * Recheck the syscall, since it may have changed. This
724 * intentionally uses a NULL struct seccomp_data to force
725 * a reload of all registers. This does not goto skip since
726 * a skip would have already been reported.
728 if (__seccomp_filter(this_syscall
, NULL
, true))
733 case SECCOMP_RET_LOG
:
734 seccomp_log(this_syscall
, 0, action
, true);
737 case SECCOMP_RET_ALLOW
:
739 * Note that the "match" filter will always be NULL for
740 * this action since SECCOMP_RET_ALLOW is the starting
741 * state in seccomp_run_filters().
745 case SECCOMP_RET_KILL
:
747 seccomp_log(this_syscall
, SIGSYS
, action
, true);
748 /* Dump core only if this is the last remaining thread. */
749 if (get_nr_threads(current
) == 1) {
752 /* Show the original registers in the dump. */
753 syscall_rollback(current
, task_pt_regs(current
));
754 /* Trigger a manual coredump since do_exit skips it. */
755 seccomp_init_siginfo(&info
, this_syscall
, data
);
764 seccomp_log(this_syscall
, 0, action
, match
? match
->log
: false);
768 static int __seccomp_filter(int this_syscall
, const struct seccomp_data
*sd
,
769 const bool recheck_after_trace
)
775 int __secure_computing(const struct seccomp_data
*sd
)
777 int mode
= current
->seccomp
.mode
;
780 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
) &&
781 unlikely(current
->ptrace
& PT_SUSPEND_SECCOMP
))
784 this_syscall
= sd
? sd
->nr
:
785 syscall_get_nr(current
, task_pt_regs(current
));
788 case SECCOMP_MODE_STRICT
:
789 __secure_computing_strict(this_syscall
); /* may call do_exit */
791 case SECCOMP_MODE_FILTER
:
792 return __seccomp_filter(this_syscall
, sd
, false);
797 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
799 long prctl_get_seccomp(void)
801 return current
->seccomp
.mode
;
805 * seccomp_set_mode_strict: internal function for setting strict seccomp
807 * Once current->seccomp.mode is non-zero, it may not be changed.
809 * Returns 0 on success or -EINVAL on failure.
811 static long seccomp_set_mode_strict(void)
813 const unsigned long seccomp_mode
= SECCOMP_MODE_STRICT
;
816 spin_lock_irq(¤t
->sighand
->siglock
);
818 if (!seccomp_may_assign_mode(seccomp_mode
))
824 seccomp_assign_mode(current
, seccomp_mode
);
828 spin_unlock_irq(¤t
->sighand
->siglock
);
833 #ifdef CONFIG_SECCOMP_FILTER
835 * seccomp_set_mode_filter: internal function for setting seccomp filter
836 * @flags: flags to change filter behavior
837 * @filter: struct sock_fprog containing filter
839 * This function may be called repeatedly to install additional filters.
840 * Every filter successfully installed will be evaluated (in reverse order)
841 * for each system call the task makes.
843 * Once current->seccomp.mode is non-zero, it may not be changed.
845 * Returns 0 on success or -EINVAL on failure.
847 static long seccomp_set_mode_filter(unsigned int flags
,
848 const char __user
*filter
)
850 const unsigned long seccomp_mode
= SECCOMP_MODE_FILTER
;
851 struct seccomp_filter
*prepared
= NULL
;
854 /* Validate flags. */
855 if (flags
& ~SECCOMP_FILTER_FLAG_MASK
)
858 /* Prepare the new filter before holding any locks. */
859 prepared
= seccomp_prepare_user_filter(filter
);
860 if (IS_ERR(prepared
))
861 return PTR_ERR(prepared
);
864 * Make sure we cannot change seccomp or nnp state via TSYNC
865 * while another thread is in the middle of calling exec.
867 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
&&
868 mutex_lock_killable(¤t
->signal
->cred_guard_mutex
))
871 spin_lock_irq(¤t
->sighand
->siglock
);
873 if (!seccomp_may_assign_mode(seccomp_mode
))
876 ret
= seccomp_attach_filter(flags
, prepared
);
879 /* Do not free the successfully attached filter. */
882 seccomp_assign_mode(current
, seccomp_mode
);
884 spin_unlock_irq(¤t
->sighand
->siglock
);
885 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
)
886 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
888 seccomp_filter_free(prepared
);
892 static inline long seccomp_set_mode_filter(unsigned int flags
,
893 const char __user
*filter
)
899 static long seccomp_get_action_avail(const char __user
*uaction
)
903 if (copy_from_user(&action
, uaction
, sizeof(action
)))
907 case SECCOMP_RET_KILL
:
908 case SECCOMP_RET_TRAP
:
909 case SECCOMP_RET_ERRNO
:
910 case SECCOMP_RET_TRACE
:
911 case SECCOMP_RET_LOG
:
912 case SECCOMP_RET_ALLOW
:
921 /* Common entry point for both prctl and syscall. */
922 static long do_seccomp(unsigned int op
, unsigned int flags
,
923 const char __user
*uargs
)
926 case SECCOMP_SET_MODE_STRICT
:
927 if (flags
!= 0 || uargs
!= NULL
)
929 return seccomp_set_mode_strict();
930 case SECCOMP_SET_MODE_FILTER
:
931 return seccomp_set_mode_filter(flags
, uargs
);
932 case SECCOMP_GET_ACTION_AVAIL
:
936 return seccomp_get_action_avail(uargs
);
942 SYSCALL_DEFINE3(seccomp
, unsigned int, op
, unsigned int, flags
,
943 const char __user
*, uargs
)
945 return do_seccomp(op
, flags
, uargs
);
949 * prctl_set_seccomp: configures current->seccomp.mode
950 * @seccomp_mode: requested mode to use
951 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
953 * Returns 0 on success or -EINVAL on failure.
955 long prctl_set_seccomp(unsigned long seccomp_mode
, char __user
*filter
)
960 switch (seccomp_mode
) {
961 case SECCOMP_MODE_STRICT
:
962 op
= SECCOMP_SET_MODE_STRICT
;
964 * Setting strict mode through prctl always ignored filter,
965 * so make sure it is always NULL here to pass the internal
966 * check in do_seccomp().
970 case SECCOMP_MODE_FILTER
:
971 op
= SECCOMP_SET_MODE_FILTER
;
978 /* prctl interface doesn't have flags, so they are always zero. */
979 return do_seccomp(op
, 0, uargs
);
982 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
983 long seccomp_get_filter(struct task_struct
*task
, unsigned long filter_off
,
986 struct seccomp_filter
*filter
;
987 struct sock_fprog_kern
*fprog
;
989 unsigned long count
= 0;
991 if (!capable(CAP_SYS_ADMIN
) ||
992 current
->seccomp
.mode
!= SECCOMP_MODE_DISABLED
) {
996 spin_lock_irq(&task
->sighand
->siglock
);
997 if (task
->seccomp
.mode
!= SECCOMP_MODE_FILTER
) {
1002 filter
= task
->seccomp
.filter
;
1004 filter
= filter
->prev
;
1008 if (filter_off
>= count
) {
1012 count
-= filter_off
;
1014 filter
= task
->seccomp
.filter
;
1015 while (filter
&& count
> 1) {
1016 filter
= filter
->prev
;
1020 if (WARN_ON(count
!= 1 || !filter
)) {
1021 /* The filter tree shouldn't shrink while we're using it. */
1026 fprog
= filter
->prog
->orig_prog
;
1028 /* This must be a new non-cBPF filter, since we save
1029 * every cBPF filter's orig_prog above when
1030 * CONFIG_CHECKPOINT_RESTORE is enabled.
1040 __get_seccomp_filter(filter
);
1041 spin_unlock_irq(&task
->sighand
->siglock
);
1043 if (copy_to_user(data
, fprog
->filter
, bpf_classic_proglen(fprog
)))
1046 __put_seccomp_filter(filter
);
1050 spin_unlock_irq(&task
->sighand
->siglock
);
1055 #ifdef CONFIG_SYSCTL
1057 /* Human readable action names for friendly sysctl interaction */
1058 #define SECCOMP_RET_KILL_NAME "kill"
1059 #define SECCOMP_RET_TRAP_NAME "trap"
1060 #define SECCOMP_RET_ERRNO_NAME "errno"
1061 #define SECCOMP_RET_TRACE_NAME "trace"
1062 #define SECCOMP_RET_LOG_NAME "log"
1063 #define SECCOMP_RET_ALLOW_NAME "allow"
1065 static const char seccomp_actions_avail
[] = SECCOMP_RET_KILL_NAME
" "
1066 SECCOMP_RET_TRAP_NAME
" "
1067 SECCOMP_RET_ERRNO_NAME
" "
1068 SECCOMP_RET_TRACE_NAME
" "
1069 SECCOMP_RET_LOG_NAME
" "
1070 SECCOMP_RET_ALLOW_NAME
;
1072 struct seccomp_log_name
{
1077 static const struct seccomp_log_name seccomp_log_names
[] = {
1078 { SECCOMP_LOG_KILL
, SECCOMP_RET_KILL_NAME
},
1079 { SECCOMP_LOG_TRAP
, SECCOMP_RET_TRAP_NAME
},
1080 { SECCOMP_LOG_ERRNO
, SECCOMP_RET_ERRNO_NAME
},
1081 { SECCOMP_LOG_TRACE
, SECCOMP_RET_TRACE_NAME
},
1082 { SECCOMP_LOG_LOG
, SECCOMP_RET_LOG_NAME
},
1083 { SECCOMP_LOG_ALLOW
, SECCOMP_RET_ALLOW_NAME
},
1087 static bool seccomp_names_from_actions_logged(char *names
, size_t size
,
1090 const struct seccomp_log_name
*cur
;
1091 bool append_space
= false;
1093 for (cur
= seccomp_log_names
; cur
->name
&& size
; cur
++) {
1096 if (!(actions_logged
& cur
->log
))
1100 ret
= strscpy(names
, " ", size
);
1107 append_space
= true;
1109 ret
= strscpy(names
, cur
->name
, size
);
1120 static bool seccomp_action_logged_from_name(u32
*action_logged
,
1123 const struct seccomp_log_name
*cur
;
1125 for (cur
= seccomp_log_names
; cur
->name
; cur
++) {
1126 if (!strcmp(cur
->name
, name
)) {
1127 *action_logged
= cur
->log
;
1135 static bool seccomp_actions_logged_from_names(u32
*actions_logged
, char *names
)
1139 *actions_logged
= 0;
1140 while ((name
= strsep(&names
, " ")) && *name
) {
1141 u32 action_logged
= 0;
1143 if (!seccomp_action_logged_from_name(&action_logged
, name
))
1146 *actions_logged
|= action_logged
;
1152 static int seccomp_actions_logged_handler(struct ctl_table
*ro_table
, int write
,
1153 void __user
*buffer
, size_t *lenp
,
1156 char names
[sizeof(seccomp_actions_avail
)];
1157 struct ctl_table table
;
1160 if (write
&& !capable(CAP_SYS_ADMIN
))
1163 memset(names
, 0, sizeof(names
));
1166 if (!seccomp_names_from_actions_logged(names
, sizeof(names
),
1167 seccomp_actions_logged
))
1173 table
.maxlen
= sizeof(names
);
1174 ret
= proc_dostring(&table
, write
, buffer
, lenp
, ppos
);
1181 if (!seccomp_actions_logged_from_names(&actions_logged
,
1185 if (actions_logged
& SECCOMP_LOG_ALLOW
)
1188 seccomp_actions_logged
= actions_logged
;
1194 static struct ctl_path seccomp_sysctl_path
[] = {
1195 { .procname
= "kernel", },
1196 { .procname
= "seccomp", },
1200 static struct ctl_table seccomp_sysctl_table
[] = {
1202 .procname
= "actions_avail",
1203 .data
= (void *) &seccomp_actions_avail
,
1204 .maxlen
= sizeof(seccomp_actions_avail
),
1206 .proc_handler
= proc_dostring
,
1209 .procname
= "actions_logged",
1211 .proc_handler
= seccomp_actions_logged_handler
,
1216 static int __init
seccomp_sysctl_init(void)
1218 struct ctl_table_header
*hdr
;
1220 hdr
= register_sysctl_paths(seccomp_sysctl_path
, seccomp_sysctl_table
);
1222 pr_warn("seccomp: sysctl registration failed\n");
1224 kmemleak_not_leak(hdr
);
1229 device_initcall(seccomp_sysctl_init
)
1231 #endif /* CONFIG_SYSCTL */