2 * linux/kernel/seccomp.c
4 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
6 * Copyright (C) 2012 Google, Inc.
7 * Will Drewry <wad@chromium.org>
9 * This defines a simple but solid secure-computing facility.
11 * Mode 1 uses a fixed list of allowed system calls.
12 * Mode 2 allows user-defined system call filters in the form
13 * of Berkeley Packet Filters/Linux Socket Filters.
16 #include <linux/refcount.h>
17 #include <linux/audit.h>
18 #include <linux/compat.h>
19 #include <linux/coredump.h>
20 #include <linux/kmemleak.h>
21 #include <linux/sched.h>
22 #include <linux/sched/task_stack.h>
23 #include <linux/seccomp.h>
24 #include <linux/slab.h>
25 #include <linux/syscalls.h>
26 #include <linux/sysctl.h>
28 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
29 #include <asm/syscall.h>
32 #ifdef CONFIG_SECCOMP_FILTER
33 #include <linux/filter.h>
34 #include <linux/pid.h>
35 #include <linux/ptrace.h>
36 #include <linux/security.h>
37 #include <linux/tracehook.h>
38 #include <linux/uaccess.h>
41 * struct seccomp_filter - container for seccomp BPF programs
43 * @usage: reference count to manage the object lifetime.
44 * get/put helpers should be used when accessing an instance
45 * outside of a lifetime-guarded section. In general, this
46 * is only needed for handling filters shared across tasks.
47 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
48 * @prev: points to a previously installed, or inherited, filter
49 * @prog: the BPF program to evaluate
51 * seccomp_filter objects are organized in a tree linked via the @prev
52 * pointer. For any task, it appears to be a singly-linked list starting
53 * with current->seccomp.filter, the most recently attached or inherited filter.
54 * However, multiple filters may share a @prev node, by way of fork(), which
55 * results in a unidirectional tree existing in memory. This is similar to
56 * how namespaces work.
58 * seccomp_filter objects should never be modified after being attached
59 * to a task_struct (other than @usage).
61 struct seccomp_filter
{
64 struct seccomp_filter
*prev
;
65 struct bpf_prog
*prog
;
68 /* Limit any path through the tree to 256KB worth of instructions. */
69 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
72 * Endianness is explicitly ignored and left for BPF program authors to manage
73 * as per the specific architecture.
75 static void populate_seccomp_data(struct seccomp_data
*sd
)
77 struct task_struct
*task
= current
;
78 struct pt_regs
*regs
= task_pt_regs(task
);
79 unsigned long args
[6];
81 sd
->nr
= syscall_get_nr(task
, regs
);
82 sd
->arch
= syscall_get_arch();
83 syscall_get_arguments(task
, regs
, 0, 6, args
);
84 sd
->args
[0] = args
[0];
85 sd
->args
[1] = args
[1];
86 sd
->args
[2] = args
[2];
87 sd
->args
[3] = args
[3];
88 sd
->args
[4] = args
[4];
89 sd
->args
[5] = args
[5];
90 sd
->instruction_pointer
= KSTK_EIP(task
);
94 * seccomp_check_filter - verify seccomp filter code
95 * @filter: filter to verify
96 * @flen: length of filter
98 * Takes a previously checked filter (by bpf_check_classic) and
99 * redirects all filter code that loads struct sk_buff data
100 * and related data through seccomp_bpf_load. It also
101 * enforces length and alignment checking of those loads.
103 * Returns 0 if the rule set is legal or -EINVAL if not.
105 static int seccomp_check_filter(struct sock_filter
*filter
, unsigned int flen
)
108 for (pc
= 0; pc
< flen
; pc
++) {
109 struct sock_filter
*ftest
= &filter
[pc
];
110 u16 code
= ftest
->code
;
114 case BPF_LD
| BPF_W
| BPF_ABS
:
115 ftest
->code
= BPF_LDX
| BPF_W
| BPF_ABS
;
116 /* 32-bit aligned and not out of bounds. */
117 if (k
>= sizeof(struct seccomp_data
) || k
& 3)
120 case BPF_LD
| BPF_W
| BPF_LEN
:
121 ftest
->code
= BPF_LD
| BPF_IMM
;
122 ftest
->k
= sizeof(struct seccomp_data
);
124 case BPF_LDX
| BPF_W
| BPF_LEN
:
125 ftest
->code
= BPF_LDX
| BPF_IMM
;
126 ftest
->k
= sizeof(struct seccomp_data
);
128 /* Explicitly include allowed calls. */
129 case BPF_RET
| BPF_K
:
130 case BPF_RET
| BPF_A
:
131 case BPF_ALU
| BPF_ADD
| BPF_K
:
132 case BPF_ALU
| BPF_ADD
| BPF_X
:
133 case BPF_ALU
| BPF_SUB
| BPF_K
:
134 case BPF_ALU
| BPF_SUB
| BPF_X
:
135 case BPF_ALU
| BPF_MUL
| BPF_K
:
136 case BPF_ALU
| BPF_MUL
| BPF_X
:
137 case BPF_ALU
| BPF_DIV
| BPF_K
:
138 case BPF_ALU
| BPF_DIV
| BPF_X
:
139 case BPF_ALU
| BPF_AND
| BPF_K
:
140 case BPF_ALU
| BPF_AND
| BPF_X
:
141 case BPF_ALU
| BPF_OR
| BPF_K
:
142 case BPF_ALU
| BPF_OR
| BPF_X
:
143 case BPF_ALU
| BPF_XOR
| BPF_K
:
144 case BPF_ALU
| BPF_XOR
| BPF_X
:
145 case BPF_ALU
| BPF_LSH
| BPF_K
:
146 case BPF_ALU
| BPF_LSH
| BPF_X
:
147 case BPF_ALU
| BPF_RSH
| BPF_K
:
148 case BPF_ALU
| BPF_RSH
| BPF_X
:
149 case BPF_ALU
| BPF_NEG
:
150 case BPF_LD
| BPF_IMM
:
151 case BPF_LDX
| BPF_IMM
:
152 case BPF_MISC
| BPF_TAX
:
153 case BPF_MISC
| BPF_TXA
:
154 case BPF_LD
| BPF_MEM
:
155 case BPF_LDX
| BPF_MEM
:
158 case BPF_JMP
| BPF_JA
:
159 case BPF_JMP
| BPF_JEQ
| BPF_K
:
160 case BPF_JMP
| BPF_JEQ
| BPF_X
:
161 case BPF_JMP
| BPF_JGE
| BPF_K
:
162 case BPF_JMP
| BPF_JGE
| BPF_X
:
163 case BPF_JMP
| BPF_JGT
| BPF_K
:
164 case BPF_JMP
| BPF_JGT
| BPF_X
:
165 case BPF_JMP
| BPF_JSET
| BPF_K
:
166 case BPF_JMP
| BPF_JSET
| BPF_X
:
176 * seccomp_run_filters - evaluates all seccomp filters against @sd
177 * @sd: optional seccomp data to be passed to filters
178 * @match: stores struct seccomp_filter that resulted in the return value,
179 * unless filter returned SECCOMP_RET_ALLOW, in which case it will
182 * Returns valid seccomp BPF response codes.
184 static u32
seccomp_run_filters(const struct seccomp_data
*sd
,
185 struct seccomp_filter
**match
)
187 struct seccomp_data sd_local
;
188 u32 ret
= SECCOMP_RET_ALLOW
;
189 /* Make sure cross-thread synced filter points somewhere sane. */
190 struct seccomp_filter
*f
=
191 READ_ONCE(current
->seccomp
.filter
);
193 /* Ensure unexpected behavior doesn't result in failing open. */
194 if (unlikely(WARN_ON(f
== NULL
)))
195 return SECCOMP_RET_KILL
;
198 populate_seccomp_data(&sd_local
);
203 * All filters in the list are evaluated and the lowest BPF return
204 * value always takes priority (ignoring the DATA).
206 for (; f
; f
= f
->prev
) {
207 u32 cur_ret
= BPF_PROG_RUN(f
->prog
, sd
);
209 if ((cur_ret
& SECCOMP_RET_ACTION
) < (ret
& SECCOMP_RET_ACTION
)) {
216 #endif /* CONFIG_SECCOMP_FILTER */
218 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode
)
220 assert_spin_locked(¤t
->sighand
->siglock
);
222 if (current
->seccomp
.mode
&& current
->seccomp
.mode
!= seccomp_mode
)
228 static inline void seccomp_assign_mode(struct task_struct
*task
,
229 unsigned long seccomp_mode
)
231 assert_spin_locked(&task
->sighand
->siglock
);
233 task
->seccomp
.mode
= seccomp_mode
;
235 * Make sure TIF_SECCOMP cannot be set before the mode (and
238 smp_mb__before_atomic();
239 set_tsk_thread_flag(task
, TIF_SECCOMP
);
242 #ifdef CONFIG_SECCOMP_FILTER
243 /* Returns 1 if the parent is an ancestor of the child. */
244 static int is_ancestor(struct seccomp_filter
*parent
,
245 struct seccomp_filter
*child
)
247 /* NULL is the root ancestor. */
250 for (; child
; child
= child
->prev
)
257 * seccomp_can_sync_threads: checks if all threads can be synchronized
259 * Expects sighand and cred_guard_mutex locks to be held.
261 * Returns 0 on success, -ve on error, or the pid of a thread which was
262 * either not in the correct seccomp mode or it did not have an ancestral
265 static inline pid_t
seccomp_can_sync_threads(void)
267 struct task_struct
*thread
, *caller
;
269 BUG_ON(!mutex_is_locked(¤t
->signal
->cred_guard_mutex
));
270 assert_spin_locked(¤t
->sighand
->siglock
);
272 /* Validate all threads being eligible for synchronization. */
274 for_each_thread(caller
, thread
) {
277 /* Skip current, since it is initiating the sync. */
278 if (thread
== caller
)
281 if (thread
->seccomp
.mode
== SECCOMP_MODE_DISABLED
||
282 (thread
->seccomp
.mode
== SECCOMP_MODE_FILTER
&&
283 is_ancestor(thread
->seccomp
.filter
,
284 caller
->seccomp
.filter
)))
287 /* Return the first thread that cannot be synchronized. */
288 failed
= task_pid_vnr(thread
);
289 /* If the pid cannot be resolved, then return -ESRCH */
290 if (unlikely(WARN_ON(failed
== 0)))
299 * seccomp_sync_threads: sets all threads to use current's filter
301 * Expects sighand and cred_guard_mutex locks to be held, and for
302 * seccomp_can_sync_threads() to have returned success already
303 * without dropping the locks.
306 static inline void seccomp_sync_threads(void)
308 struct task_struct
*thread
, *caller
;
310 BUG_ON(!mutex_is_locked(¤t
->signal
->cred_guard_mutex
));
311 assert_spin_locked(¤t
->sighand
->siglock
);
313 /* Synchronize all threads. */
315 for_each_thread(caller
, thread
) {
316 /* Skip current, since it needs no changes. */
317 if (thread
== caller
)
320 /* Get a task reference for the new leaf node. */
321 get_seccomp_filter(caller
);
323 * Drop the task reference to the shared ancestor since
324 * current's path will hold a reference. (This also
325 * allows a put before the assignment.)
327 put_seccomp_filter(thread
);
328 smp_store_release(&thread
->seccomp
.filter
,
329 caller
->seccomp
.filter
);
332 * Don't let an unprivileged task work around
333 * the no_new_privs restriction by creating
334 * a thread that sets it up, enters seccomp,
337 if (task_no_new_privs(caller
))
338 task_set_no_new_privs(thread
);
341 * Opt the other thread into seccomp if needed.
342 * As threads are considered to be trust-realm
343 * equivalent (see ptrace_may_access), it is safe to
344 * allow one thread to transition the other.
346 if (thread
->seccomp
.mode
== SECCOMP_MODE_DISABLED
)
347 seccomp_assign_mode(thread
, SECCOMP_MODE_FILTER
);
352 * seccomp_prepare_filter: Prepares a seccomp filter for use.
353 * @fprog: BPF program to install
355 * Returns filter on success or an ERR_PTR on failure.
357 static struct seccomp_filter
*seccomp_prepare_filter(struct sock_fprog
*fprog
)
359 struct seccomp_filter
*sfilter
;
361 const bool save_orig
= IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
);
363 if (fprog
->len
== 0 || fprog
->len
> BPF_MAXINSNS
)
364 return ERR_PTR(-EINVAL
);
366 BUG_ON(INT_MAX
/ fprog
->len
< sizeof(struct sock_filter
));
369 * Installing a seccomp filter requires that the task has
370 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
371 * This avoids scenarios where unprivileged tasks can affect the
372 * behavior of privileged children.
374 if (!task_no_new_privs(current
) &&
375 security_capable_noaudit(current_cred(), current_user_ns(),
377 return ERR_PTR(-EACCES
);
379 /* Allocate a new seccomp_filter */
380 sfilter
= kzalloc(sizeof(*sfilter
), GFP_KERNEL
| __GFP_NOWARN
);
382 return ERR_PTR(-ENOMEM
);
384 ret
= bpf_prog_create_from_user(&sfilter
->prog
, fprog
,
385 seccomp_check_filter
, save_orig
);
391 refcount_set(&sfilter
->usage
, 1);
397 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
398 * @user_filter: pointer to the user data containing a sock_fprog.
400 * Returns 0 on success and non-zero otherwise.
402 static struct seccomp_filter
*
403 seccomp_prepare_user_filter(const char __user
*user_filter
)
405 struct sock_fprog fprog
;
406 struct seccomp_filter
*filter
= ERR_PTR(-EFAULT
);
409 if (in_compat_syscall()) {
410 struct compat_sock_fprog fprog32
;
411 if (copy_from_user(&fprog32
, user_filter
, sizeof(fprog32
)))
413 fprog
.len
= fprog32
.len
;
414 fprog
.filter
= compat_ptr(fprog32
.filter
);
415 } else /* falls through to the if below. */
417 if (copy_from_user(&fprog
, user_filter
, sizeof(fprog
)))
419 filter
= seccomp_prepare_filter(&fprog
);
425 * seccomp_attach_filter: validate and attach filter
426 * @flags: flags to change filter behavior
427 * @filter: seccomp filter to add to the current process
429 * Caller must be holding current->sighand->siglock lock.
431 * Returns 0 on success, -ve on error.
433 static long seccomp_attach_filter(unsigned int flags
,
434 struct seccomp_filter
*filter
)
436 unsigned long total_insns
;
437 struct seccomp_filter
*walker
;
439 assert_spin_locked(¤t
->sighand
->siglock
);
441 /* Validate resulting filter length. */
442 total_insns
= filter
->prog
->len
;
443 for (walker
= current
->seccomp
.filter
; walker
; walker
= walker
->prev
)
444 total_insns
+= walker
->prog
->len
+ 4; /* 4 instr penalty */
445 if (total_insns
> MAX_INSNS_PER_PATH
)
448 /* If thread sync has been requested, check that it is possible. */
449 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
) {
452 ret
= seccomp_can_sync_threads();
457 /* Set log flag, if present. */
458 if (flags
& SECCOMP_FILTER_FLAG_LOG
)
462 * If there is an existing filter, make it the prev and don't drop its
465 filter
->prev
= current
->seccomp
.filter
;
466 current
->seccomp
.filter
= filter
;
468 /* Now that the new filter is in place, synchronize to all threads. */
469 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
)
470 seccomp_sync_threads();
475 void __get_seccomp_filter(struct seccomp_filter
*filter
)
477 /* Reference count is bounded by the number of total processes. */
478 refcount_inc(&filter
->usage
);
481 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
482 void get_seccomp_filter(struct task_struct
*tsk
)
484 struct seccomp_filter
*orig
= tsk
->seccomp
.filter
;
487 __get_seccomp_filter(orig
);
490 static inline void seccomp_filter_free(struct seccomp_filter
*filter
)
493 bpf_prog_destroy(filter
->prog
);
498 static void __put_seccomp_filter(struct seccomp_filter
*orig
)
500 /* Clean up single-reference branches iteratively. */
501 while (orig
&& refcount_dec_and_test(&orig
->usage
)) {
502 struct seccomp_filter
*freeme
= orig
;
504 seccomp_filter_free(freeme
);
508 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
509 void put_seccomp_filter(struct task_struct
*tsk
)
511 __put_seccomp_filter(tsk
->seccomp
.filter
);
514 static void seccomp_init_siginfo(siginfo_t
*info
, int syscall
, int reason
)
516 memset(info
, 0, sizeof(*info
));
517 info
->si_signo
= SIGSYS
;
518 info
->si_code
= SYS_SECCOMP
;
519 info
->si_call_addr
= (void __user
*)KSTK_EIP(current
);
520 info
->si_errno
= reason
;
521 info
->si_arch
= syscall_get_arch();
522 info
->si_syscall
= syscall
;
526 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
527 * @syscall: syscall number to send to userland
528 * @reason: filter-supplied reason code to send to userland (via si_errno)
530 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
532 static void seccomp_send_sigsys(int syscall
, int reason
)
535 seccomp_init_siginfo(&info
, syscall
, reason
);
536 force_sig_info(SIGSYS
, &info
, current
);
538 #endif /* CONFIG_SECCOMP_FILTER */
540 /* For use with seccomp_actions_logged */
541 #define SECCOMP_LOG_KILL (1 << 0)
542 #define SECCOMP_LOG_TRAP (1 << 2)
543 #define SECCOMP_LOG_ERRNO (1 << 3)
544 #define SECCOMP_LOG_TRACE (1 << 4)
545 #define SECCOMP_LOG_LOG (1 << 5)
546 #define SECCOMP_LOG_ALLOW (1 << 6)
548 static u32 seccomp_actions_logged
= SECCOMP_LOG_KILL
| SECCOMP_LOG_TRAP
|
549 SECCOMP_LOG_ERRNO
| SECCOMP_LOG_TRACE
|
552 static inline void seccomp_log(unsigned long syscall
, long signr
, u32 action
,
558 case SECCOMP_RET_ALLOW
:
560 case SECCOMP_RET_TRAP
:
561 log
= requested
&& seccomp_actions_logged
& SECCOMP_LOG_TRAP
;
563 case SECCOMP_RET_ERRNO
:
564 log
= requested
&& seccomp_actions_logged
& SECCOMP_LOG_ERRNO
;
566 case SECCOMP_RET_TRACE
:
567 log
= requested
&& seccomp_actions_logged
& SECCOMP_LOG_TRACE
;
569 case SECCOMP_RET_LOG
:
570 log
= seccomp_actions_logged
& SECCOMP_LOG_LOG
;
572 case SECCOMP_RET_KILL
:
574 log
= seccomp_actions_logged
& SECCOMP_LOG_KILL
;
578 * Force an audit message to be emitted when the action is RET_KILL,
579 * RET_LOG, or the FILTER_FLAG_LOG bit was set and the action is
580 * allowed to be logged by the admin.
583 return __audit_seccomp(syscall
, signr
, action
);
586 * Let the audit subsystem decide if the action should be audited based
587 * on whether the current task itself is being audited.
589 return audit_seccomp(syscall
, signr
, action
);
593 * Secure computing mode 1 allows only read/write/exit/sigreturn.
594 * To be fully secure this must be combined with rlimit
595 * to limit the stack allocations too.
597 static const int mode1_syscalls
[] = {
598 __NR_seccomp_read
, __NR_seccomp_write
, __NR_seccomp_exit
, __NR_seccomp_sigreturn
,
599 0, /* null terminated */
602 static void __secure_computing_strict(int this_syscall
)
604 const int *syscall_whitelist
= mode1_syscalls
;
606 if (in_compat_syscall())
607 syscall_whitelist
= get_compat_mode1_syscalls();
610 if (*syscall_whitelist
== this_syscall
)
612 } while (*++syscall_whitelist
);
617 seccomp_log(this_syscall
, SIGKILL
, SECCOMP_RET_KILL
, true);
621 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
622 void secure_computing_strict(int this_syscall
)
624 int mode
= current
->seccomp
.mode
;
626 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
) &&
627 unlikely(current
->ptrace
& PT_SUSPEND_SECCOMP
))
630 if (mode
== SECCOMP_MODE_DISABLED
)
632 else if (mode
== SECCOMP_MODE_STRICT
)
633 __secure_computing_strict(this_syscall
);
639 #ifdef CONFIG_SECCOMP_FILTER
640 static int __seccomp_filter(int this_syscall
, const struct seccomp_data
*sd
,
641 const bool recheck_after_trace
)
643 u32 filter_ret
, action
;
644 struct seccomp_filter
*match
= NULL
;
648 * Make sure that any changes to mode from another thread have
649 * been seen after TIF_SECCOMP was seen.
653 filter_ret
= seccomp_run_filters(sd
, &match
);
654 data
= filter_ret
& SECCOMP_RET_DATA
;
655 action
= filter_ret
& SECCOMP_RET_ACTION
;
658 case SECCOMP_RET_ERRNO
:
659 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
660 if (data
> MAX_ERRNO
)
662 syscall_set_return_value(current
, task_pt_regs(current
),
666 case SECCOMP_RET_TRAP
:
667 /* Show the handler the original registers. */
668 syscall_rollback(current
, task_pt_regs(current
));
669 /* Let the filter pass back 16 bits of data. */
670 seccomp_send_sigsys(this_syscall
, data
);
673 case SECCOMP_RET_TRACE
:
674 /* We've been put in this state by the ptracer already. */
675 if (recheck_after_trace
)
678 /* ENOSYS these calls if there is no tracer attached. */
679 if (!ptrace_event_enabled(current
, PTRACE_EVENT_SECCOMP
)) {
680 syscall_set_return_value(current
,
681 task_pt_regs(current
),
686 /* Allow the BPF to provide the event message */
687 ptrace_event(PTRACE_EVENT_SECCOMP
, data
);
689 * The delivery of a fatal signal during event
690 * notification may silently skip tracer notification,
691 * which could leave us with a potentially unmodified
692 * syscall that the tracer would have liked to have
693 * changed. Since the process is about to die, we just
694 * force the syscall to be skipped and let the signal
695 * kill the process and correctly handle any tracer exit
698 if (fatal_signal_pending(current
))
700 /* Check if the tracer forced the syscall to be skipped. */
701 this_syscall
= syscall_get_nr(current
, task_pt_regs(current
));
702 if (this_syscall
< 0)
706 * Recheck the syscall, since it may have changed. This
707 * intentionally uses a NULL struct seccomp_data to force
708 * a reload of all registers. This does not goto skip since
709 * a skip would have already been reported.
711 if (__seccomp_filter(this_syscall
, NULL
, true))
716 case SECCOMP_RET_LOG
:
717 seccomp_log(this_syscall
, 0, action
, true);
720 case SECCOMP_RET_ALLOW
:
722 * Note that the "match" filter will always be NULL for
723 * this action since SECCOMP_RET_ALLOW is the starting
724 * state in seccomp_run_filters().
728 case SECCOMP_RET_KILL
:
730 seccomp_log(this_syscall
, SIGSYS
, action
, true);
731 /* Dump core only if this is the last remaining thread. */
732 if (get_nr_threads(current
) == 1) {
735 /* Show the original registers in the dump. */
736 syscall_rollback(current
, task_pt_regs(current
));
737 /* Trigger a manual coredump since do_exit skips it. */
738 seccomp_init_siginfo(&info
, this_syscall
, data
);
747 seccomp_log(this_syscall
, 0, action
, match
? match
->log
: false);
751 static int __seccomp_filter(int this_syscall
, const struct seccomp_data
*sd
,
752 const bool recheck_after_trace
)
758 int __secure_computing(const struct seccomp_data
*sd
)
760 int mode
= current
->seccomp
.mode
;
763 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE
) &&
764 unlikely(current
->ptrace
& PT_SUSPEND_SECCOMP
))
767 this_syscall
= sd
? sd
->nr
:
768 syscall_get_nr(current
, task_pt_regs(current
));
771 case SECCOMP_MODE_STRICT
:
772 __secure_computing_strict(this_syscall
); /* may call do_exit */
774 case SECCOMP_MODE_FILTER
:
775 return __seccomp_filter(this_syscall
, sd
, false);
780 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
782 long prctl_get_seccomp(void)
784 return current
->seccomp
.mode
;
788 * seccomp_set_mode_strict: internal function for setting strict seccomp
790 * Once current->seccomp.mode is non-zero, it may not be changed.
792 * Returns 0 on success or -EINVAL on failure.
794 static long seccomp_set_mode_strict(void)
796 const unsigned long seccomp_mode
= SECCOMP_MODE_STRICT
;
799 spin_lock_irq(¤t
->sighand
->siglock
);
801 if (!seccomp_may_assign_mode(seccomp_mode
))
807 seccomp_assign_mode(current
, seccomp_mode
);
811 spin_unlock_irq(¤t
->sighand
->siglock
);
816 #ifdef CONFIG_SECCOMP_FILTER
818 * seccomp_set_mode_filter: internal function for setting seccomp filter
819 * @flags: flags to change filter behavior
820 * @filter: struct sock_fprog containing filter
822 * This function may be called repeatedly to install additional filters.
823 * Every filter successfully installed will be evaluated (in reverse order)
824 * for each system call the task makes.
826 * Once current->seccomp.mode is non-zero, it may not be changed.
828 * Returns 0 on success or -EINVAL on failure.
830 static long seccomp_set_mode_filter(unsigned int flags
,
831 const char __user
*filter
)
833 const unsigned long seccomp_mode
= SECCOMP_MODE_FILTER
;
834 struct seccomp_filter
*prepared
= NULL
;
837 /* Validate flags. */
838 if (flags
& ~SECCOMP_FILTER_FLAG_MASK
)
841 /* Prepare the new filter before holding any locks. */
842 prepared
= seccomp_prepare_user_filter(filter
);
843 if (IS_ERR(prepared
))
844 return PTR_ERR(prepared
);
847 * Make sure we cannot change seccomp or nnp state via TSYNC
848 * while another thread is in the middle of calling exec.
850 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
&&
851 mutex_lock_killable(¤t
->signal
->cred_guard_mutex
))
854 spin_lock_irq(¤t
->sighand
->siglock
);
856 if (!seccomp_may_assign_mode(seccomp_mode
))
859 ret
= seccomp_attach_filter(flags
, prepared
);
862 /* Do not free the successfully attached filter. */
865 seccomp_assign_mode(current
, seccomp_mode
);
867 spin_unlock_irq(¤t
->sighand
->siglock
);
868 if (flags
& SECCOMP_FILTER_FLAG_TSYNC
)
869 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
871 seccomp_filter_free(prepared
);
875 static inline long seccomp_set_mode_filter(unsigned int flags
,
876 const char __user
*filter
)
882 static long seccomp_get_action_avail(const char __user
*uaction
)
886 if (copy_from_user(&action
, uaction
, sizeof(action
)))
890 case SECCOMP_RET_KILL
:
891 case SECCOMP_RET_TRAP
:
892 case SECCOMP_RET_ERRNO
:
893 case SECCOMP_RET_TRACE
:
894 case SECCOMP_RET_LOG
:
895 case SECCOMP_RET_ALLOW
:
904 /* Common entry point for both prctl and syscall. */
905 static long do_seccomp(unsigned int op
, unsigned int flags
,
906 const char __user
*uargs
)
909 case SECCOMP_SET_MODE_STRICT
:
910 if (flags
!= 0 || uargs
!= NULL
)
912 return seccomp_set_mode_strict();
913 case SECCOMP_SET_MODE_FILTER
:
914 return seccomp_set_mode_filter(flags
, uargs
);
915 case SECCOMP_GET_ACTION_AVAIL
:
919 return seccomp_get_action_avail(uargs
);
925 SYSCALL_DEFINE3(seccomp
, unsigned int, op
, unsigned int, flags
,
926 const char __user
*, uargs
)
928 return do_seccomp(op
, flags
, uargs
);
932 * prctl_set_seccomp: configures current->seccomp.mode
933 * @seccomp_mode: requested mode to use
934 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
936 * Returns 0 on success or -EINVAL on failure.
938 long prctl_set_seccomp(unsigned long seccomp_mode
, char __user
*filter
)
943 switch (seccomp_mode
) {
944 case SECCOMP_MODE_STRICT
:
945 op
= SECCOMP_SET_MODE_STRICT
;
947 * Setting strict mode through prctl always ignored filter,
948 * so make sure it is always NULL here to pass the internal
949 * check in do_seccomp().
953 case SECCOMP_MODE_FILTER
:
954 op
= SECCOMP_SET_MODE_FILTER
;
961 /* prctl interface doesn't have flags, so they are always zero. */
962 return do_seccomp(op
, 0, uargs
);
965 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
966 long seccomp_get_filter(struct task_struct
*task
, unsigned long filter_off
,
969 struct seccomp_filter
*filter
;
970 struct sock_fprog_kern
*fprog
;
972 unsigned long count
= 0;
974 if (!capable(CAP_SYS_ADMIN
) ||
975 current
->seccomp
.mode
!= SECCOMP_MODE_DISABLED
) {
979 spin_lock_irq(&task
->sighand
->siglock
);
980 if (task
->seccomp
.mode
!= SECCOMP_MODE_FILTER
) {
985 filter
= task
->seccomp
.filter
;
987 filter
= filter
->prev
;
991 if (filter_off
>= count
) {
997 filter
= task
->seccomp
.filter
;
998 while (filter
&& count
> 1) {
999 filter
= filter
->prev
;
1003 if (WARN_ON(count
!= 1 || !filter
)) {
1004 /* The filter tree shouldn't shrink while we're using it. */
1009 fprog
= filter
->prog
->orig_prog
;
1011 /* This must be a new non-cBPF filter, since we save
1012 * every cBPF filter's orig_prog above when
1013 * CONFIG_CHECKPOINT_RESTORE is enabled.
1023 __get_seccomp_filter(filter
);
1024 spin_unlock_irq(&task
->sighand
->siglock
);
1026 if (copy_to_user(data
, fprog
->filter
, bpf_classic_proglen(fprog
)))
1029 __put_seccomp_filter(filter
);
1033 spin_unlock_irq(&task
->sighand
->siglock
);
1038 #ifdef CONFIG_SYSCTL
1040 /* Human readable action names for friendly sysctl interaction */
1041 #define SECCOMP_RET_KILL_NAME "kill"
1042 #define SECCOMP_RET_TRAP_NAME "trap"
1043 #define SECCOMP_RET_ERRNO_NAME "errno"
1044 #define SECCOMP_RET_TRACE_NAME "trace"
1045 #define SECCOMP_RET_LOG_NAME "log"
1046 #define SECCOMP_RET_ALLOW_NAME "allow"
1048 static const char seccomp_actions_avail
[] = SECCOMP_RET_KILL_NAME
" "
1049 SECCOMP_RET_TRAP_NAME
" "
1050 SECCOMP_RET_ERRNO_NAME
" "
1051 SECCOMP_RET_TRACE_NAME
" "
1052 SECCOMP_RET_LOG_NAME
" "
1053 SECCOMP_RET_ALLOW_NAME
;
1055 struct seccomp_log_name
{
1060 static const struct seccomp_log_name seccomp_log_names
[] = {
1061 { SECCOMP_LOG_KILL
, SECCOMP_RET_KILL_NAME
},
1062 { SECCOMP_LOG_TRAP
, SECCOMP_RET_TRAP_NAME
},
1063 { SECCOMP_LOG_ERRNO
, SECCOMP_RET_ERRNO_NAME
},
1064 { SECCOMP_LOG_TRACE
, SECCOMP_RET_TRACE_NAME
},
1065 { SECCOMP_LOG_LOG
, SECCOMP_RET_LOG_NAME
},
1066 { SECCOMP_LOG_ALLOW
, SECCOMP_RET_ALLOW_NAME
},
1070 static bool seccomp_names_from_actions_logged(char *names
, size_t size
,
1073 const struct seccomp_log_name
*cur
;
1074 bool append_space
= false;
1076 for (cur
= seccomp_log_names
; cur
->name
&& size
; cur
++) {
1079 if (!(actions_logged
& cur
->log
))
1083 ret
= strscpy(names
, " ", size
);
1090 append_space
= true;
1092 ret
= strscpy(names
, cur
->name
, size
);
1103 static bool seccomp_action_logged_from_name(u32
*action_logged
,
1106 const struct seccomp_log_name
*cur
;
1108 for (cur
= seccomp_log_names
; cur
->name
; cur
++) {
1109 if (!strcmp(cur
->name
, name
)) {
1110 *action_logged
= cur
->log
;
1118 static bool seccomp_actions_logged_from_names(u32
*actions_logged
, char *names
)
1122 *actions_logged
= 0;
1123 while ((name
= strsep(&names
, " ")) && *name
) {
1124 u32 action_logged
= 0;
1126 if (!seccomp_action_logged_from_name(&action_logged
, name
))
1129 *actions_logged
|= action_logged
;
1135 static int seccomp_actions_logged_handler(struct ctl_table
*ro_table
, int write
,
1136 void __user
*buffer
, size_t *lenp
,
1139 char names
[sizeof(seccomp_actions_avail
)];
1140 struct ctl_table table
;
1143 if (write
&& !capable(CAP_SYS_ADMIN
))
1146 memset(names
, 0, sizeof(names
));
1149 if (!seccomp_names_from_actions_logged(names
, sizeof(names
),
1150 seccomp_actions_logged
))
1156 table
.maxlen
= sizeof(names
);
1157 ret
= proc_dostring(&table
, write
, buffer
, lenp
, ppos
);
1164 if (!seccomp_actions_logged_from_names(&actions_logged
,
1168 if (actions_logged
& SECCOMP_LOG_ALLOW
)
1171 seccomp_actions_logged
= actions_logged
;
1177 static struct ctl_path seccomp_sysctl_path
[] = {
1178 { .procname
= "kernel", },
1179 { .procname
= "seccomp", },
1183 static struct ctl_table seccomp_sysctl_table
[] = {
1185 .procname
= "actions_avail",
1186 .data
= (void *) &seccomp_actions_avail
,
1187 .maxlen
= sizeof(seccomp_actions_avail
),
1189 .proc_handler
= proc_dostring
,
1192 .procname
= "actions_logged",
1194 .proc_handler
= seccomp_actions_logged_handler
,
1199 static int __init
seccomp_sysctl_init(void)
1201 struct ctl_table_header
*hdr
;
1203 hdr
= register_sysctl_paths(seccomp_sysctl_path
, seccomp_sysctl_table
);
1205 pr_warn("seccomp: sysctl registration failed\n");
1207 kmemleak_not_leak(hdr
);
1212 device_initcall(seccomp_sysctl_init
)
1214 #endif /* CONFIG_SYSCTL */