]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - tools/testing/selftests/seccomp/seccomp_bpf.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / tools / testing / selftests / seccomp / seccomp_bpf.c
1 /*
2 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by the GPLv2 license.
4 *
5 * Test code for seccomp bpf.
6 */
7
8 #define _GNU_SOURCE
9 #include <sys/types.h>
10
11 /*
12 * glibc 2.26 and later have SIGSYS in siginfo_t. Before that,
13 * we need to use the kernel's siginfo.h file and trick glibc
14 * into accepting it.
15 */
16 #if !__GLIBC_PREREQ(2, 26)
17 # include <asm/siginfo.h>
18 # define __have_siginfo_t 1
19 # define __have_sigval_t 1
20 # define __have_sigevent_t 1
21 #endif
22
23 #include <errno.h>
24 #include <linux/filter.h>
25 #include <sys/prctl.h>
26 #include <sys/ptrace.h>
27 #include <sys/user.h>
28 #include <linux/prctl.h>
29 #include <linux/ptrace.h>
30 #include <linux/seccomp.h>
31 #include <pthread.h>
32 #include <semaphore.h>
33 #include <signal.h>
34 #include <stddef.h>
35 #include <stdbool.h>
36 #include <string.h>
37 #include <time.h>
38 #include <linux/elf.h>
39 #include <sys/uio.h>
40 #include <sys/utsname.h>
41 #include <sys/fcntl.h>
42 #include <sys/mman.h>
43 #include <sys/times.h>
44 #include <sys/socket.h>
45 #include <sys/ioctl.h>
46
47 #include <unistd.h>
48 #include <sys/syscall.h>
49 #include <poll.h>
50
51 #include "../kselftest_harness.h"
52
53 #ifndef PR_SET_PTRACER
54 # define PR_SET_PTRACER 0x59616d61
55 #endif
56
57 #ifndef PR_SET_NO_NEW_PRIVS
58 #define PR_SET_NO_NEW_PRIVS 38
59 #define PR_GET_NO_NEW_PRIVS 39
60 #endif
61
62 #ifndef PR_SECCOMP_EXT
63 #define PR_SECCOMP_EXT 43
64 #endif
65
66 #ifndef SECCOMP_EXT_ACT
67 #define SECCOMP_EXT_ACT 1
68 #endif
69
70 #ifndef SECCOMP_EXT_ACT_TSYNC
71 #define SECCOMP_EXT_ACT_TSYNC 1
72 #endif
73
74 #ifndef SECCOMP_MODE_STRICT
75 #define SECCOMP_MODE_STRICT 1
76 #endif
77
78 #ifndef SECCOMP_MODE_FILTER
79 #define SECCOMP_MODE_FILTER 2
80 #endif
81
82 #ifndef SECCOMP_RET_ALLOW
83 struct seccomp_data {
84 int nr;
85 __u32 arch;
86 __u64 instruction_pointer;
87 __u64 args[6];
88 };
89 #endif
90
91 #ifndef SECCOMP_RET_KILL_PROCESS
92 #define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */
93 #define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */
94 #endif
95 #ifndef SECCOMP_RET_KILL
96 #define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD
97 #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
98 #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
99 #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
100 #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
101 #endif
102 #ifndef SECCOMP_RET_LOG
103 #define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */
104 #endif
105
106 #ifndef __NR_seccomp
107 # if defined(__i386__)
108 # define __NR_seccomp 354
109 # elif defined(__x86_64__)
110 # define __NR_seccomp 317
111 # elif defined(__arm__)
112 # define __NR_seccomp 383
113 # elif defined(__aarch64__)
114 # define __NR_seccomp 277
115 # elif defined(__hppa__)
116 # define __NR_seccomp 338
117 # elif defined(__powerpc__)
118 # define __NR_seccomp 358
119 # elif defined(__s390__)
120 # define __NR_seccomp 348
121 # else
122 # warning "seccomp syscall number unknown for this architecture"
123 # define __NR_seccomp 0xffff
124 # endif
125 #endif
126
127 #ifndef SECCOMP_SET_MODE_STRICT
128 #define SECCOMP_SET_MODE_STRICT 0
129 #endif
130
131 #ifndef SECCOMP_SET_MODE_FILTER
132 #define SECCOMP_SET_MODE_FILTER 1
133 #endif
134
135 #ifndef SECCOMP_GET_ACTION_AVAIL
136 #define SECCOMP_GET_ACTION_AVAIL 2
137 #endif
138
139 #ifndef SECCOMP_GET_NOTIF_SIZES
140 #define SECCOMP_GET_NOTIF_SIZES 3
141 #endif
142
143 #ifndef SECCOMP_FILTER_FLAG_TSYNC
144 #define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
145 #endif
146
147 #ifndef SECCOMP_FILTER_FLAG_LOG
148 #define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
149 #endif
150
151 #ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
152 #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
153 #endif
154
155 #ifndef PTRACE_SECCOMP_GET_METADATA
156 #define PTRACE_SECCOMP_GET_METADATA 0x420d
157
158 struct seccomp_metadata {
159 __u64 filter_off; /* Input: which filter */
160 __u64 flags; /* Output: filter's flags */
161 };
162 #endif
163
164 #ifndef SECCOMP_FILTER_FLAG_NEW_LISTENER
165 #define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3)
166
167 #define SECCOMP_RET_USER_NOTIF 0x7fc00000U
168
169 #define SECCOMP_IOC_MAGIC '!'
170 #define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr)
171 #define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type)
172 #define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type)
173 #define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type)
174
175 /* Flags for seccomp notification fd ioctl. */
176 #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
177 #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
178 struct seccomp_notif_resp)
179 #define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64)
180
181 struct seccomp_notif {
182 __u64 id;
183 __u32 pid;
184 __u32 flags;
185 struct seccomp_data data;
186 };
187
188 struct seccomp_notif_resp {
189 __u64 id;
190 __s64 val;
191 __s32 error;
192 __u32 flags;
193 };
194
195 struct seccomp_notif_sizes {
196 __u16 seccomp_notif;
197 __u16 seccomp_notif_resp;
198 __u16 seccomp_data;
199 };
200 #endif
201
202 #ifndef seccomp
203 int seccomp(unsigned int op, unsigned int flags, void *args)
204 {
205 errno = 0;
206 return syscall(__NR_seccomp, op, flags, args);
207 }
208 #endif
209
210 #if __BYTE_ORDER == __LITTLE_ENDIAN
211 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
212 #elif __BYTE_ORDER == __BIG_ENDIAN
213 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32))
214 #else
215 #error "wut? Unknown __BYTE_ORDER?!"
216 #endif
217
218 #define SIBLING_EXIT_UNKILLED 0xbadbeef
219 #define SIBLING_EXIT_FAILURE 0xbadface
220 #define SIBLING_EXIT_NEWPRIVS 0xbadfeed
221
222 TEST(mode_strict_support)
223 {
224 long ret;
225
226 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
227 ASSERT_EQ(0, ret) {
228 TH_LOG("Kernel does not support CONFIG_SECCOMP");
229 }
230 syscall(__NR_exit, 0);
231 }
232
233 TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL)
234 {
235 long ret;
236
237 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
238 ASSERT_EQ(0, ret) {
239 TH_LOG("Kernel does not support CONFIG_SECCOMP");
240 }
241 syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
242 NULL, NULL, NULL);
243 EXPECT_FALSE(true) {
244 TH_LOG("Unreachable!");
245 }
246 }
247
248 /* Note! This doesn't test no new privs behavior */
249 TEST(no_new_privs_support)
250 {
251 long ret;
252
253 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
254 EXPECT_EQ(0, ret) {
255 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
256 }
257 }
258
259 /* Tests kernel support by checking for a copy_from_user() fault on NULL. */
260 TEST(mode_filter_support)
261 {
262 long ret;
263
264 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
265 ASSERT_EQ(0, ret) {
266 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
267 }
268 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL);
269 EXPECT_EQ(-1, ret);
270 EXPECT_EQ(EFAULT, errno) {
271 TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!");
272 }
273 }
274
275 TEST(mode_filter_without_nnp)
276 {
277 struct sock_filter filter[] = {
278 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
279 };
280 struct sock_fprog prog = {
281 .len = (unsigned short)ARRAY_SIZE(filter),
282 .filter = filter,
283 };
284 long ret;
285
286 ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0);
287 ASSERT_LE(0, ret) {
288 TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS");
289 }
290 errno = 0;
291 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
292 /* Succeeds with CAP_SYS_ADMIN, fails without */
293 /* TODO(wad) check caps not euid */
294 if (geteuid()) {
295 EXPECT_EQ(-1, ret);
296 EXPECT_EQ(EACCES, errno);
297 } else {
298 EXPECT_EQ(0, ret);
299 }
300 }
301
302 #define MAX_INSNS_PER_PATH 32768
303
304 TEST(filter_size_limits)
305 {
306 int i;
307 int count = BPF_MAXINSNS + 1;
308 struct sock_filter allow[] = {
309 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
310 };
311 struct sock_filter *filter;
312 struct sock_fprog prog = { };
313 long ret;
314
315 filter = calloc(count, sizeof(*filter));
316 ASSERT_NE(NULL, filter);
317
318 for (i = 0; i < count; i++)
319 filter[i] = allow[0];
320
321 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
322 ASSERT_EQ(0, ret);
323
324 prog.filter = filter;
325 prog.len = count;
326
327 /* Too many filter instructions in a single filter. */
328 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
329 ASSERT_NE(0, ret) {
330 TH_LOG("Installing %d insn filter was allowed", prog.len);
331 }
332
333 /* One less is okay, though. */
334 prog.len -= 1;
335 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
336 ASSERT_EQ(0, ret) {
337 TH_LOG("Installing %d insn filter wasn't allowed", prog.len);
338 }
339 }
340
341 TEST(filter_chain_limits)
342 {
343 int i;
344 int count = BPF_MAXINSNS;
345 struct sock_filter allow[] = {
346 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
347 };
348 struct sock_filter *filter;
349 struct sock_fprog prog = { };
350 long ret;
351
352 filter = calloc(count, sizeof(*filter));
353 ASSERT_NE(NULL, filter);
354
355 for (i = 0; i < count; i++)
356 filter[i] = allow[0];
357
358 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
359 ASSERT_EQ(0, ret);
360
361 prog.filter = filter;
362 prog.len = 1;
363
364 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
365 ASSERT_EQ(0, ret);
366
367 prog.len = count;
368
369 /* Too many total filter instructions. */
370 for (i = 0; i < MAX_INSNS_PER_PATH; i++) {
371 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
372 if (ret != 0)
373 break;
374 }
375 ASSERT_NE(0, ret) {
376 TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)",
377 i, count, i * (count + 4));
378 }
379 }
380
381 TEST(mode_filter_cannot_move_to_strict)
382 {
383 struct sock_filter filter[] = {
384 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
385 };
386 struct sock_fprog prog = {
387 .len = (unsigned short)ARRAY_SIZE(filter),
388 .filter = filter,
389 };
390 long ret;
391
392 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
393 ASSERT_EQ(0, ret);
394
395 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
396 ASSERT_EQ(0, ret);
397
398 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0);
399 EXPECT_EQ(-1, ret);
400 EXPECT_EQ(EINVAL, errno);
401 }
402
403
404 TEST(mode_filter_get_seccomp)
405 {
406 struct sock_filter filter[] = {
407 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
408 };
409 struct sock_fprog prog = {
410 .len = (unsigned short)ARRAY_SIZE(filter),
411 .filter = filter,
412 };
413 long ret;
414
415 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
416 ASSERT_EQ(0, ret);
417
418 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
419 EXPECT_EQ(0, ret);
420
421 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
422 ASSERT_EQ(0, ret);
423
424 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
425 EXPECT_EQ(2, ret);
426 }
427
428
429 TEST(ALLOW_all)
430 {
431 struct sock_filter filter[] = {
432 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
433 };
434 struct sock_fprog prog = {
435 .len = (unsigned short)ARRAY_SIZE(filter),
436 .filter = filter,
437 };
438 long ret;
439
440 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
441 ASSERT_EQ(0, ret);
442
443 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
444 ASSERT_EQ(0, ret);
445 }
446
447 TEST(empty_prog)
448 {
449 struct sock_filter filter[] = {
450 };
451 struct sock_fprog prog = {
452 .len = (unsigned short)ARRAY_SIZE(filter),
453 .filter = filter,
454 };
455 long ret;
456
457 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
458 ASSERT_EQ(0, ret);
459
460 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
461 EXPECT_EQ(-1, ret);
462 EXPECT_EQ(EINVAL, errno);
463 }
464
465 TEST(log_all)
466 {
467 struct sock_filter filter[] = {
468 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG),
469 };
470 struct sock_fprog prog = {
471 .len = (unsigned short)ARRAY_SIZE(filter),
472 .filter = filter,
473 };
474 long ret;
475 pid_t parent = getppid();
476
477 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
478 ASSERT_EQ(0, ret);
479
480 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
481 ASSERT_EQ(0, ret);
482
483 /* getppid() should succeed and be logged (no check for logging) */
484 EXPECT_EQ(parent, syscall(__NR_getppid));
485 }
486
487 TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS)
488 {
489 struct sock_filter filter[] = {
490 BPF_STMT(BPF_RET|BPF_K, 0x10000000U),
491 };
492 struct sock_fprog prog = {
493 .len = (unsigned short)ARRAY_SIZE(filter),
494 .filter = filter,
495 };
496 long ret;
497
498 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
499 ASSERT_EQ(0, ret);
500
501 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
502 ASSERT_EQ(0, ret);
503 EXPECT_EQ(0, syscall(__NR_getpid)) {
504 TH_LOG("getpid() shouldn't ever return");
505 }
506 }
507
508 /* return code >= 0x80000000 is unused. */
509 TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS)
510 {
511 struct sock_filter filter[] = {
512 BPF_STMT(BPF_RET|BPF_K, 0x90000000U),
513 };
514 struct sock_fprog prog = {
515 .len = (unsigned short)ARRAY_SIZE(filter),
516 .filter = filter,
517 };
518 long ret;
519
520 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
521 ASSERT_EQ(0, ret);
522
523 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
524 ASSERT_EQ(0, ret);
525 EXPECT_EQ(0, syscall(__NR_getpid)) {
526 TH_LOG("getpid() shouldn't ever return");
527 }
528 }
529
530 TEST_SIGNAL(KILL_all, SIGSYS)
531 {
532 struct sock_filter filter[] = {
533 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
534 };
535 struct sock_fprog prog = {
536 .len = (unsigned short)ARRAY_SIZE(filter),
537 .filter = filter,
538 };
539 long ret;
540
541 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
542 ASSERT_EQ(0, ret);
543
544 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
545 ASSERT_EQ(0, ret);
546 }
547
548 TEST_SIGNAL(KILL_one, SIGSYS)
549 {
550 struct sock_filter filter[] = {
551 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
552 offsetof(struct seccomp_data, nr)),
553 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
554 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
555 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
556 };
557 struct sock_fprog prog = {
558 .len = (unsigned short)ARRAY_SIZE(filter),
559 .filter = filter,
560 };
561 long ret;
562 pid_t parent = getppid();
563
564 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
565 ASSERT_EQ(0, ret);
566
567 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
568 ASSERT_EQ(0, ret);
569
570 EXPECT_EQ(parent, syscall(__NR_getppid));
571 /* getpid() should never return. */
572 EXPECT_EQ(0, syscall(__NR_getpid));
573 }
574
575 TEST_SIGNAL(KILL_one_arg_one, SIGSYS)
576 {
577 void *fatal_address;
578 struct sock_filter filter[] = {
579 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
580 offsetof(struct seccomp_data, nr)),
581 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0),
582 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
583 /* Only both with lower 32-bit for now. */
584 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)),
585 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K,
586 (unsigned long)&fatal_address, 0, 1),
587 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
588 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
589 };
590 struct sock_fprog prog = {
591 .len = (unsigned short)ARRAY_SIZE(filter),
592 .filter = filter,
593 };
594 long ret;
595 pid_t parent = getppid();
596 struct tms timebuf;
597 clock_t clock = times(&timebuf);
598
599 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
600 ASSERT_EQ(0, ret);
601
602 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
603 ASSERT_EQ(0, ret);
604
605 EXPECT_EQ(parent, syscall(__NR_getppid));
606 EXPECT_LE(clock, syscall(__NR_times, &timebuf));
607 /* times() should never return. */
608 EXPECT_EQ(0, syscall(__NR_times, &fatal_address));
609 }
610
611 TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
612 {
613 #ifndef __NR_mmap2
614 int sysno = __NR_mmap;
615 #else
616 int sysno = __NR_mmap2;
617 #endif
618 struct sock_filter filter[] = {
619 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
620 offsetof(struct seccomp_data, nr)),
621 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0),
622 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
623 /* Only both with lower 32-bit for now. */
624 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)),
625 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1),
626 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
627 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
628 };
629 struct sock_fprog prog = {
630 .len = (unsigned short)ARRAY_SIZE(filter),
631 .filter = filter,
632 };
633 long ret;
634 pid_t parent = getppid();
635 int fd;
636 void *map1, *map2;
637 int page_size = sysconf(_SC_PAGESIZE);
638
639 ASSERT_LT(0, page_size);
640
641 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
642 ASSERT_EQ(0, ret);
643
644 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
645 ASSERT_EQ(0, ret);
646
647 fd = open("/dev/zero", O_RDONLY);
648 ASSERT_NE(-1, fd);
649
650 EXPECT_EQ(parent, syscall(__NR_getppid));
651 map1 = (void *)syscall(sysno,
652 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size);
653 EXPECT_NE(MAP_FAILED, map1);
654 /* mmap2() should never return. */
655 map2 = (void *)syscall(sysno,
656 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE);
657 EXPECT_EQ(MAP_FAILED, map2);
658
659 /* The test failed, so clean up the resources. */
660 munmap(map1, page_size);
661 munmap(map2, page_size);
662 close(fd);
663 }
664
665 /* This is a thread task to die via seccomp filter violation. */
666 void *kill_thread(void *data)
667 {
668 bool die = (bool)data;
669
670 if (die) {
671 prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
672 return (void *)SIBLING_EXIT_FAILURE;
673 }
674
675 return (void *)SIBLING_EXIT_UNKILLED;
676 }
677
678 /* Prepare a thread that will kill itself or both of us. */
679 void kill_thread_or_group(struct __test_metadata *_metadata, bool kill_process)
680 {
681 pthread_t thread;
682 void *status;
683 /* Kill only when calling __NR_prctl. */
684 struct sock_filter filter_thread[] = {
685 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
686 offsetof(struct seccomp_data, nr)),
687 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
688 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD),
689 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
690 };
691 struct sock_fprog prog_thread = {
692 .len = (unsigned short)ARRAY_SIZE(filter_thread),
693 .filter = filter_thread,
694 };
695 struct sock_filter filter_process[] = {
696 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
697 offsetof(struct seccomp_data, nr)),
698 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
699 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_PROCESS),
700 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
701 };
702 struct sock_fprog prog_process = {
703 .len = (unsigned short)ARRAY_SIZE(filter_process),
704 .filter = filter_process,
705 };
706
707 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
708 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
709 }
710
711 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0,
712 kill_process ? &prog_process : &prog_thread));
713
714 /*
715 * Add the KILL_THREAD rule again to make sure that the KILL_PROCESS
716 * flag cannot be downgraded by a new filter.
717 */
718 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog_thread));
719
720 /* Start a thread that will exit immediately. */
721 ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)false));
722 ASSERT_EQ(0, pthread_join(thread, &status));
723 ASSERT_EQ(SIBLING_EXIT_UNKILLED, (unsigned long)status);
724
725 /* Start a thread that will die immediately. */
726 ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)true));
727 ASSERT_EQ(0, pthread_join(thread, &status));
728 ASSERT_NE(SIBLING_EXIT_FAILURE, (unsigned long)status);
729
730 /*
731 * If we get here, only the spawned thread died. Let the parent know
732 * the whole process didn't die (i.e. this thread, the spawner,
733 * stayed running).
734 */
735 exit(42);
736 }
737
738 TEST(KILL_thread)
739 {
740 int status;
741 pid_t child_pid;
742
743 child_pid = fork();
744 ASSERT_LE(0, child_pid);
745 if (child_pid == 0) {
746 kill_thread_or_group(_metadata, false);
747 _exit(38);
748 }
749
750 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
751
752 /* If only the thread was killed, we'll see exit 42. */
753 ASSERT_TRUE(WIFEXITED(status));
754 ASSERT_EQ(42, WEXITSTATUS(status));
755 }
756
757 TEST(KILL_process)
758 {
759 int status;
760 pid_t child_pid;
761
762 child_pid = fork();
763 ASSERT_LE(0, child_pid);
764 if (child_pid == 0) {
765 kill_thread_or_group(_metadata, true);
766 _exit(38);
767 }
768
769 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
770
771 /* If the entire process was killed, we'll see SIGSYS. */
772 ASSERT_TRUE(WIFSIGNALED(status));
773 ASSERT_EQ(SIGSYS, WTERMSIG(status));
774 }
775
776 /* TODO(wad) add 64-bit versus 32-bit arg tests. */
777 TEST(arg_out_of_range)
778 {
779 struct sock_filter filter[] = {
780 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)),
781 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
782 };
783 struct sock_fprog prog = {
784 .len = (unsigned short)ARRAY_SIZE(filter),
785 .filter = filter,
786 };
787 long ret;
788
789 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
790 ASSERT_EQ(0, ret);
791
792 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
793 EXPECT_EQ(-1, ret);
794 EXPECT_EQ(EINVAL, errno);
795 }
796
797 #define ERRNO_FILTER(name, errno) \
798 struct sock_filter _read_filter_##name[] = { \
799 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, \
800 offsetof(struct seccomp_data, nr)), \
801 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), \
802 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | errno), \
803 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), \
804 }; \
805 struct sock_fprog prog_##name = { \
806 .len = (unsigned short)ARRAY_SIZE(_read_filter_##name), \
807 .filter = _read_filter_##name, \
808 }
809
810 /* Make sure basic errno values are correctly passed through a filter. */
811 TEST(ERRNO_valid)
812 {
813 ERRNO_FILTER(valid, E2BIG);
814 long ret;
815 pid_t parent = getppid();
816
817 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
818 ASSERT_EQ(0, ret);
819
820 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_valid);
821 ASSERT_EQ(0, ret);
822
823 EXPECT_EQ(parent, syscall(__NR_getppid));
824 EXPECT_EQ(-1, read(0, NULL, 0));
825 EXPECT_EQ(E2BIG, errno);
826 }
827
828 /* Make sure an errno of zero is correctly handled by the arch code. */
829 TEST(ERRNO_zero)
830 {
831 ERRNO_FILTER(zero, 0);
832 long ret;
833 pid_t parent = getppid();
834
835 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
836 ASSERT_EQ(0, ret);
837
838 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_zero);
839 ASSERT_EQ(0, ret);
840
841 EXPECT_EQ(parent, syscall(__NR_getppid));
842 /* "errno" of 0 is ok. */
843 EXPECT_EQ(0, read(0, NULL, 0));
844 }
845
846 /*
847 * The SECCOMP_RET_DATA mask is 16 bits wide, but errno is smaller.
848 * This tests that the errno value gets capped correctly, fixed by
849 * 580c57f10768 ("seccomp: cap SECCOMP_RET_ERRNO data to MAX_ERRNO").
850 */
851 TEST(ERRNO_capped)
852 {
853 ERRNO_FILTER(capped, 4096);
854 long ret;
855 pid_t parent = getppid();
856
857 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
858 ASSERT_EQ(0, ret);
859
860 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_capped);
861 ASSERT_EQ(0, ret);
862
863 EXPECT_EQ(parent, syscall(__NR_getppid));
864 EXPECT_EQ(-1, read(0, NULL, 0));
865 EXPECT_EQ(4095, errno);
866 }
867
868 /*
869 * Filters are processed in reverse order: last applied is executed first.
870 * Since only the SECCOMP_RET_ACTION mask is tested for return values, the
871 * SECCOMP_RET_DATA mask results will follow the most recently applied
872 * matching filter return (and not the lowest or highest value).
873 */
874 TEST(ERRNO_order)
875 {
876 ERRNO_FILTER(first, 11);
877 ERRNO_FILTER(second, 13);
878 ERRNO_FILTER(third, 12);
879 long ret;
880 pid_t parent = getppid();
881
882 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
883 ASSERT_EQ(0, ret);
884
885 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_first);
886 ASSERT_EQ(0, ret);
887
888 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_second);
889 ASSERT_EQ(0, ret);
890
891 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_third);
892 ASSERT_EQ(0, ret);
893
894 EXPECT_EQ(parent, syscall(__NR_getppid));
895 EXPECT_EQ(-1, read(0, NULL, 0));
896 EXPECT_EQ(12, errno);
897 }
898
899 FIXTURE_DATA(TRAP) {
900 struct sock_fprog prog;
901 };
902
903 FIXTURE_SETUP(TRAP)
904 {
905 struct sock_filter filter[] = {
906 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
907 offsetof(struct seccomp_data, nr)),
908 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
909 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
910 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
911 };
912
913 memset(&self->prog, 0, sizeof(self->prog));
914 self->prog.filter = malloc(sizeof(filter));
915 ASSERT_NE(NULL, self->prog.filter);
916 memcpy(self->prog.filter, filter, sizeof(filter));
917 self->prog.len = (unsigned short)ARRAY_SIZE(filter);
918 }
919
920 FIXTURE_TEARDOWN(TRAP)
921 {
922 if (self->prog.filter)
923 free(self->prog.filter);
924 }
925
926 TEST_F_SIGNAL(TRAP, dfl, SIGSYS)
927 {
928 long ret;
929
930 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
931 ASSERT_EQ(0, ret);
932
933 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
934 ASSERT_EQ(0, ret);
935 syscall(__NR_getpid);
936 }
937
938 /* Ensure that SIGSYS overrides SIG_IGN */
939 TEST_F_SIGNAL(TRAP, ign, SIGSYS)
940 {
941 long ret;
942
943 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
944 ASSERT_EQ(0, ret);
945
946 signal(SIGSYS, SIG_IGN);
947
948 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
949 ASSERT_EQ(0, ret);
950 syscall(__NR_getpid);
951 }
952
953 static siginfo_t TRAP_info;
954 static volatile int TRAP_nr;
955 static void TRAP_action(int nr, siginfo_t *info, void *void_context)
956 {
957 memcpy(&TRAP_info, info, sizeof(TRAP_info));
958 TRAP_nr = nr;
959 }
960
961 TEST_F(TRAP, handler)
962 {
963 int ret, test;
964 struct sigaction act;
965 sigset_t mask;
966
967 memset(&act, 0, sizeof(act));
968 sigemptyset(&mask);
969 sigaddset(&mask, SIGSYS);
970
971 act.sa_sigaction = &TRAP_action;
972 act.sa_flags = SA_SIGINFO;
973 ret = sigaction(SIGSYS, &act, NULL);
974 ASSERT_EQ(0, ret) {
975 TH_LOG("sigaction failed");
976 }
977 ret = sigprocmask(SIG_UNBLOCK, &mask, NULL);
978 ASSERT_EQ(0, ret) {
979 TH_LOG("sigprocmask failed");
980 }
981
982 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
983 ASSERT_EQ(0, ret);
984 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
985 ASSERT_EQ(0, ret);
986 TRAP_nr = 0;
987 memset(&TRAP_info, 0, sizeof(TRAP_info));
988 /* Expect the registers to be rolled back. (nr = error) may vary
989 * based on arch. */
990 ret = syscall(__NR_getpid);
991 /* Silence gcc warning about volatile. */
992 test = TRAP_nr;
993 EXPECT_EQ(SIGSYS, test);
994 struct local_sigsys {
995 void *_call_addr; /* calling user insn */
996 int _syscall; /* triggering system call number */
997 unsigned int _arch; /* AUDIT_ARCH_* of syscall */
998 } *sigsys = (struct local_sigsys *)
999 #ifdef si_syscall
1000 &(TRAP_info.si_call_addr);
1001 #else
1002 &TRAP_info.si_pid;
1003 #endif
1004 EXPECT_EQ(__NR_getpid, sigsys->_syscall);
1005 /* Make sure arch is non-zero. */
1006 EXPECT_NE(0, sigsys->_arch);
1007 EXPECT_NE(0, (unsigned long)sigsys->_call_addr);
1008 }
1009
1010 FIXTURE_DATA(precedence) {
1011 struct sock_fprog allow;
1012 struct sock_fprog log;
1013 struct sock_fprog trace;
1014 struct sock_fprog error;
1015 struct sock_fprog trap;
1016 struct sock_fprog kill;
1017 };
1018
1019 FIXTURE_SETUP(precedence)
1020 {
1021 struct sock_filter allow_insns[] = {
1022 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1023 };
1024 struct sock_filter log_insns[] = {
1025 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1026 offsetof(struct seccomp_data, nr)),
1027 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
1028 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1029 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG),
1030 };
1031 struct sock_filter trace_insns[] = {
1032 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1033 offsetof(struct seccomp_data, nr)),
1034 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
1035 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1036 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE),
1037 };
1038 struct sock_filter error_insns[] = {
1039 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1040 offsetof(struct seccomp_data, nr)),
1041 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
1042 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1043 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO),
1044 };
1045 struct sock_filter trap_insns[] = {
1046 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1047 offsetof(struct seccomp_data, nr)),
1048 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
1049 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1050 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
1051 };
1052 struct sock_filter kill_insns[] = {
1053 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1054 offsetof(struct seccomp_data, nr)),
1055 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
1056 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1057 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
1058 };
1059
1060 memset(self, 0, sizeof(*self));
1061 #define FILTER_ALLOC(_x) \
1062 self->_x.filter = malloc(sizeof(_x##_insns)); \
1063 ASSERT_NE(NULL, self->_x.filter); \
1064 memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \
1065 self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns)
1066 FILTER_ALLOC(allow);
1067 FILTER_ALLOC(log);
1068 FILTER_ALLOC(trace);
1069 FILTER_ALLOC(error);
1070 FILTER_ALLOC(trap);
1071 FILTER_ALLOC(kill);
1072 }
1073
1074 FIXTURE_TEARDOWN(precedence)
1075 {
1076 #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter)
1077 FILTER_FREE(allow);
1078 FILTER_FREE(log);
1079 FILTER_FREE(trace);
1080 FILTER_FREE(error);
1081 FILTER_FREE(trap);
1082 FILTER_FREE(kill);
1083 }
1084
1085 TEST_F(precedence, allow_ok)
1086 {
1087 pid_t parent, res = 0;
1088 long ret;
1089
1090 parent = getppid();
1091 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1092 ASSERT_EQ(0, ret);
1093
1094 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1095 ASSERT_EQ(0, ret);
1096 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1097 ASSERT_EQ(0, ret);
1098 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1099 ASSERT_EQ(0, ret);
1100 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1101 ASSERT_EQ(0, ret);
1102 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
1103 ASSERT_EQ(0, ret);
1104 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
1105 ASSERT_EQ(0, ret);
1106 /* Should work just fine. */
1107 res = syscall(__NR_getppid);
1108 EXPECT_EQ(parent, res);
1109 }
1110
1111 TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS)
1112 {
1113 pid_t parent, res = 0;
1114 long ret;
1115
1116 parent = getppid();
1117 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1118 ASSERT_EQ(0, ret);
1119
1120 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1121 ASSERT_EQ(0, ret);
1122 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1123 ASSERT_EQ(0, ret);
1124 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1125 ASSERT_EQ(0, ret);
1126 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1127 ASSERT_EQ(0, ret);
1128 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
1129 ASSERT_EQ(0, ret);
1130 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
1131 ASSERT_EQ(0, ret);
1132 /* Should work just fine. */
1133 res = syscall(__NR_getppid);
1134 EXPECT_EQ(parent, res);
1135 /* getpid() should never return. */
1136 res = syscall(__NR_getpid);
1137 EXPECT_EQ(0, res);
1138 }
1139
1140 TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS)
1141 {
1142 pid_t parent;
1143 long ret;
1144
1145 parent = getppid();
1146 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1147 ASSERT_EQ(0, ret);
1148
1149 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1150 ASSERT_EQ(0, ret);
1151 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
1152 ASSERT_EQ(0, ret);
1153 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1154 ASSERT_EQ(0, ret);
1155 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1156 ASSERT_EQ(0, ret);
1157 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1158 ASSERT_EQ(0, ret);
1159 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
1160 ASSERT_EQ(0, ret);
1161 /* Should work just fine. */
1162 EXPECT_EQ(parent, syscall(__NR_getppid));
1163 /* getpid() should never return. */
1164 EXPECT_EQ(0, syscall(__NR_getpid));
1165 }
1166
1167 TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS)
1168 {
1169 pid_t parent;
1170 long ret;
1171
1172 parent = getppid();
1173 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1174 ASSERT_EQ(0, ret);
1175
1176 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1177 ASSERT_EQ(0, ret);
1178 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1179 ASSERT_EQ(0, ret);
1180 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1181 ASSERT_EQ(0, ret);
1182 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1183 ASSERT_EQ(0, ret);
1184 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
1185 ASSERT_EQ(0, ret);
1186 /* Should work just fine. */
1187 EXPECT_EQ(parent, syscall(__NR_getppid));
1188 /* getpid() should never return. */
1189 EXPECT_EQ(0, syscall(__NR_getpid));
1190 }
1191
1192 TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS)
1193 {
1194 pid_t parent;
1195 long ret;
1196
1197 parent = getppid();
1198 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1199 ASSERT_EQ(0, ret);
1200
1201 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1202 ASSERT_EQ(0, ret);
1203 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
1204 ASSERT_EQ(0, ret);
1205 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1206 ASSERT_EQ(0, ret);
1207 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1208 ASSERT_EQ(0, ret);
1209 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1210 ASSERT_EQ(0, ret);
1211 /* Should work just fine. */
1212 EXPECT_EQ(parent, syscall(__NR_getppid));
1213 /* getpid() should never return. */
1214 EXPECT_EQ(0, syscall(__NR_getpid));
1215 }
1216
1217 TEST_F(precedence, errno_is_third)
1218 {
1219 pid_t parent;
1220 long ret;
1221
1222 parent = getppid();
1223 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1224 ASSERT_EQ(0, ret);
1225
1226 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1227 ASSERT_EQ(0, ret);
1228 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1229 ASSERT_EQ(0, ret);
1230 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1231 ASSERT_EQ(0, ret);
1232 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1233 ASSERT_EQ(0, ret);
1234 /* Should work just fine. */
1235 EXPECT_EQ(parent, syscall(__NR_getppid));
1236 EXPECT_EQ(0, syscall(__NR_getpid));
1237 }
1238
1239 TEST_F(precedence, errno_is_third_in_any_order)
1240 {
1241 pid_t parent;
1242 long ret;
1243
1244 parent = getppid();
1245 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1246 ASSERT_EQ(0, ret);
1247
1248 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1249 ASSERT_EQ(0, ret);
1250 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1251 ASSERT_EQ(0, ret);
1252 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1253 ASSERT_EQ(0, ret);
1254 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1255 ASSERT_EQ(0, ret);
1256 /* Should work just fine. */
1257 EXPECT_EQ(parent, syscall(__NR_getppid));
1258 EXPECT_EQ(0, syscall(__NR_getpid));
1259 }
1260
1261 TEST_F(precedence, trace_is_fourth)
1262 {
1263 pid_t parent;
1264 long ret;
1265
1266 parent = getppid();
1267 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1268 ASSERT_EQ(0, ret);
1269
1270 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1271 ASSERT_EQ(0, ret);
1272 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1273 ASSERT_EQ(0, ret);
1274 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1275 ASSERT_EQ(0, ret);
1276 /* Should work just fine. */
1277 EXPECT_EQ(parent, syscall(__NR_getppid));
1278 /* No ptracer */
1279 EXPECT_EQ(-1, syscall(__NR_getpid));
1280 }
1281
1282 TEST_F(precedence, trace_is_fourth_in_any_order)
1283 {
1284 pid_t parent;
1285 long ret;
1286
1287 parent = getppid();
1288 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1289 ASSERT_EQ(0, ret);
1290
1291 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1292 ASSERT_EQ(0, ret);
1293 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1294 ASSERT_EQ(0, ret);
1295 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1296 ASSERT_EQ(0, ret);
1297 /* Should work just fine. */
1298 EXPECT_EQ(parent, syscall(__NR_getppid));
1299 /* No ptracer */
1300 EXPECT_EQ(-1, syscall(__NR_getpid));
1301 }
1302
1303 TEST_F(precedence, log_is_fifth)
1304 {
1305 pid_t mypid, parent;
1306 long ret;
1307
1308 mypid = getpid();
1309 parent = getppid();
1310 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1311 ASSERT_EQ(0, ret);
1312
1313 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1314 ASSERT_EQ(0, ret);
1315 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1316 ASSERT_EQ(0, ret);
1317 /* Should work just fine. */
1318 EXPECT_EQ(parent, syscall(__NR_getppid));
1319 /* Should also work just fine */
1320 EXPECT_EQ(mypid, syscall(__NR_getpid));
1321 }
1322
1323 TEST_F(precedence, log_is_fifth_in_any_order)
1324 {
1325 pid_t mypid, parent;
1326 long ret;
1327
1328 mypid = getpid();
1329 parent = getppid();
1330 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1331 ASSERT_EQ(0, ret);
1332
1333 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1334 ASSERT_EQ(0, ret);
1335 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1336 ASSERT_EQ(0, ret);
1337 /* Should work just fine. */
1338 EXPECT_EQ(parent, syscall(__NR_getppid));
1339 /* Should also work just fine */
1340 EXPECT_EQ(mypid, syscall(__NR_getpid));
1341 }
1342
1343 #ifndef PTRACE_O_TRACESECCOMP
1344 #define PTRACE_O_TRACESECCOMP 0x00000080
1345 #endif
1346
1347 /* Catch the Ubuntu 12.04 value error. */
1348 #if PTRACE_EVENT_SECCOMP != 7
1349 #undef PTRACE_EVENT_SECCOMP
1350 #endif
1351
1352 #ifndef PTRACE_EVENT_SECCOMP
1353 #define PTRACE_EVENT_SECCOMP 7
1354 #endif
1355
1356 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
1357 bool tracer_running;
1358 void tracer_stop(int sig)
1359 {
1360 tracer_running = false;
1361 }
1362
1363 typedef void tracer_func_t(struct __test_metadata *_metadata,
1364 pid_t tracee, int status, void *args);
1365
1366 void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee,
1367 tracer_func_t tracer_func, void *args, bool ptrace_syscall)
1368 {
1369 int ret = -1;
1370 struct sigaction action = {
1371 .sa_handler = tracer_stop,
1372 };
1373
1374 /* Allow external shutdown. */
1375 tracer_running = true;
1376 ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL));
1377
1378 errno = 0;
1379 while (ret == -1 && errno != EINVAL)
1380 ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0);
1381 ASSERT_EQ(0, ret) {
1382 kill(tracee, SIGKILL);
1383 }
1384 /* Wait for attach stop */
1385 wait(NULL);
1386
1387 ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, ptrace_syscall ?
1388 PTRACE_O_TRACESYSGOOD :
1389 PTRACE_O_TRACESECCOMP);
1390 ASSERT_EQ(0, ret) {
1391 TH_LOG("Failed to set PTRACE_O_TRACESECCOMP");
1392 kill(tracee, SIGKILL);
1393 }
1394 ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT,
1395 tracee, NULL, 0);
1396 ASSERT_EQ(0, ret);
1397
1398 /* Unblock the tracee */
1399 ASSERT_EQ(1, write(fd, "A", 1));
1400 ASSERT_EQ(0, close(fd));
1401
1402 /* Run until we're shut down. Must assert to stop execution. */
1403 while (tracer_running) {
1404 int status;
1405
1406 if (wait(&status) != tracee)
1407 continue;
1408 if (WIFSIGNALED(status) || WIFEXITED(status))
1409 /* Child is dead. Time to go. */
1410 return;
1411
1412 /* Check if this is a seccomp event. */
1413 ASSERT_EQ(!ptrace_syscall, IS_SECCOMP_EVENT(status));
1414
1415 tracer_func(_metadata, tracee, status, args);
1416
1417 ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT,
1418 tracee, NULL, 0);
1419 ASSERT_EQ(0, ret);
1420 }
1421 /* Directly report the status of our test harness results. */
1422 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
1423 }
1424
1425 /* Common tracer setup/teardown functions. */
1426 void cont_handler(int num)
1427 { }
1428 pid_t setup_trace_fixture(struct __test_metadata *_metadata,
1429 tracer_func_t func, void *args, bool ptrace_syscall)
1430 {
1431 char sync;
1432 int pipefd[2];
1433 pid_t tracer_pid;
1434 pid_t tracee = getpid();
1435
1436 /* Setup a pipe for clean synchronization. */
1437 ASSERT_EQ(0, pipe(pipefd));
1438
1439 /* Fork a child which we'll promote to tracer */
1440 tracer_pid = fork();
1441 ASSERT_LE(0, tracer_pid);
1442 signal(SIGALRM, cont_handler);
1443 if (tracer_pid == 0) {
1444 close(pipefd[0]);
1445 start_tracer(_metadata, pipefd[1], tracee, func, args,
1446 ptrace_syscall);
1447 syscall(__NR_exit, 0);
1448 }
1449 close(pipefd[1]);
1450 prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
1451 read(pipefd[0], &sync, 1);
1452 close(pipefd[0]);
1453
1454 return tracer_pid;
1455 }
1456 void teardown_trace_fixture(struct __test_metadata *_metadata,
1457 pid_t tracer)
1458 {
1459 if (tracer) {
1460 int status;
1461 /*
1462 * Extract the exit code from the other process and
1463 * adopt it for ourselves in case its asserts failed.
1464 */
1465 ASSERT_EQ(0, kill(tracer, SIGUSR1));
1466 ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
1467 if (WEXITSTATUS(status))
1468 _metadata->passed = 0;
1469 }
1470 }
1471
1472 /* "poke" tracer arguments and function. */
1473 struct tracer_args_poke_t {
1474 unsigned long poke_addr;
1475 };
1476
1477 void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status,
1478 void *args)
1479 {
1480 int ret;
1481 unsigned long msg;
1482 struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args;
1483
1484 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
1485 EXPECT_EQ(0, ret);
1486 /* If this fails, don't try to recover. */
1487 ASSERT_EQ(0x1001, msg) {
1488 kill(tracee, SIGKILL);
1489 }
1490 /*
1491 * Poke in the message.
1492 * Registers are not touched to try to keep this relatively arch
1493 * agnostic.
1494 */
1495 ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001);
1496 EXPECT_EQ(0, ret);
1497 }
1498
1499 FIXTURE_DATA(TRACE_poke) {
1500 struct sock_fprog prog;
1501 pid_t tracer;
1502 long poked;
1503 struct tracer_args_poke_t tracer_args;
1504 };
1505
1506 FIXTURE_SETUP(TRACE_poke)
1507 {
1508 struct sock_filter filter[] = {
1509 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1510 offsetof(struct seccomp_data, nr)),
1511 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
1512 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001),
1513 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1514 };
1515
1516 self->poked = 0;
1517 memset(&self->prog, 0, sizeof(self->prog));
1518 self->prog.filter = malloc(sizeof(filter));
1519 ASSERT_NE(NULL, self->prog.filter);
1520 memcpy(self->prog.filter, filter, sizeof(filter));
1521 self->prog.len = (unsigned short)ARRAY_SIZE(filter);
1522
1523 /* Set up tracer args. */
1524 self->tracer_args.poke_addr = (unsigned long)&self->poked;
1525
1526 /* Launch tracer. */
1527 self->tracer = setup_trace_fixture(_metadata, tracer_poke,
1528 &self->tracer_args, false);
1529 }
1530
1531 FIXTURE_TEARDOWN(TRACE_poke)
1532 {
1533 teardown_trace_fixture(_metadata, self->tracer);
1534 if (self->prog.filter)
1535 free(self->prog.filter);
1536 }
1537
1538 TEST_F(TRACE_poke, read_has_side_effects)
1539 {
1540 ssize_t ret;
1541
1542 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1543 ASSERT_EQ(0, ret);
1544
1545 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1546 ASSERT_EQ(0, ret);
1547
1548 EXPECT_EQ(0, self->poked);
1549 ret = read(-1, NULL, 0);
1550 EXPECT_EQ(-1, ret);
1551 EXPECT_EQ(0x1001, self->poked);
1552 }
1553
1554 TEST_F(TRACE_poke, getpid_runs_normally)
1555 {
1556 long ret;
1557
1558 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1559 ASSERT_EQ(0, ret);
1560
1561 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1562 ASSERT_EQ(0, ret);
1563
1564 EXPECT_EQ(0, self->poked);
1565 EXPECT_NE(0, syscall(__NR_getpid));
1566 EXPECT_EQ(0, self->poked);
1567 }
1568
1569 #if defined(__x86_64__)
1570 # define ARCH_REGS struct user_regs_struct
1571 # define SYSCALL_NUM orig_rax
1572 # define SYSCALL_RET rax
1573 #elif defined(__i386__)
1574 # define ARCH_REGS struct user_regs_struct
1575 # define SYSCALL_NUM orig_eax
1576 # define SYSCALL_RET eax
1577 #elif defined(__arm__)
1578 # define ARCH_REGS struct pt_regs
1579 # define SYSCALL_NUM ARM_r7
1580 # define SYSCALL_RET ARM_r0
1581 #elif defined(__aarch64__)
1582 # define ARCH_REGS struct user_pt_regs
1583 # define SYSCALL_NUM regs[8]
1584 # define SYSCALL_RET regs[0]
1585 #elif defined(__hppa__)
1586 # define ARCH_REGS struct user_regs_struct
1587 # define SYSCALL_NUM gr[20]
1588 # define SYSCALL_RET gr[28]
1589 #elif defined(__powerpc__)
1590 # define ARCH_REGS struct pt_regs
1591 # define SYSCALL_NUM gpr[0]
1592 # define SYSCALL_RET gpr[3]
1593 #elif defined(__s390__)
1594 # define ARCH_REGS s390_regs
1595 # define SYSCALL_NUM gprs[2]
1596 # define SYSCALL_RET gprs[2]
1597 #elif defined(__mips__)
1598 # define ARCH_REGS struct pt_regs
1599 # define SYSCALL_NUM regs[2]
1600 # define SYSCALL_SYSCALL_NUM regs[4]
1601 # define SYSCALL_RET regs[2]
1602 # define SYSCALL_NUM_RET_SHARE_REG
1603 #else
1604 # error "Do not know how to find your architecture's registers and syscalls"
1605 #endif
1606
1607 /* When the syscall return can't be changed, stub out the tests for it. */
1608 #ifdef SYSCALL_NUM_RET_SHARE_REG
1609 # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action)
1610 #else
1611 # define EXPECT_SYSCALL_RETURN(val, action) \
1612 do { \
1613 errno = 0; \
1614 if (val < 0) { \
1615 EXPECT_EQ(-1, action); \
1616 EXPECT_EQ(-(val), errno); \
1617 } else { \
1618 EXPECT_EQ(val, action); \
1619 } \
1620 } while (0)
1621 #endif
1622
1623 /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
1624 * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux).
1625 */
1626 #if defined(__x86_64__) || defined(__i386__) || defined(__mips__)
1627 #define HAVE_GETREGS
1628 #endif
1629
1630 /* Architecture-specific syscall fetching routine. */
1631 int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
1632 {
1633 ARCH_REGS regs;
1634 #ifdef HAVE_GETREGS
1635 EXPECT_EQ(0, ptrace(PTRACE_GETREGS, tracee, 0, &regs)) {
1636 TH_LOG("PTRACE_GETREGS failed");
1637 return -1;
1638 }
1639 #else
1640 struct iovec iov;
1641
1642 iov.iov_base = &regs;
1643 iov.iov_len = sizeof(regs);
1644 EXPECT_EQ(0, ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov)) {
1645 TH_LOG("PTRACE_GETREGSET failed");
1646 return -1;
1647 }
1648 #endif
1649
1650 #if defined(__mips__)
1651 if (regs.SYSCALL_NUM == __NR_O32_Linux)
1652 return regs.SYSCALL_SYSCALL_NUM;
1653 #endif
1654 return regs.SYSCALL_NUM;
1655 }
1656
1657 /* Architecture-specific syscall changing routine. */
1658 void change_syscall(struct __test_metadata *_metadata,
1659 pid_t tracee, int syscall, int result)
1660 {
1661 int ret;
1662 ARCH_REGS regs;
1663 #ifdef HAVE_GETREGS
1664 ret = ptrace(PTRACE_GETREGS, tracee, 0, &regs);
1665 #else
1666 struct iovec iov;
1667 iov.iov_base = &regs;
1668 iov.iov_len = sizeof(regs);
1669 ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
1670 #endif
1671 EXPECT_EQ(0, ret) {}
1672
1673 #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
1674 defined(__s390__) || defined(__hppa__)
1675 {
1676 regs.SYSCALL_NUM = syscall;
1677 }
1678 #elif defined(__mips__)
1679 {
1680 if (regs.SYSCALL_NUM == __NR_O32_Linux)
1681 regs.SYSCALL_SYSCALL_NUM = syscall;
1682 else
1683 regs.SYSCALL_NUM = syscall;
1684 }
1685
1686 #elif defined(__arm__)
1687 # ifndef PTRACE_SET_SYSCALL
1688 # define PTRACE_SET_SYSCALL 23
1689 # endif
1690 {
1691 ret = ptrace(PTRACE_SET_SYSCALL, tracee, NULL, syscall);
1692 EXPECT_EQ(0, ret);
1693 }
1694
1695 #elif defined(__aarch64__)
1696 # ifndef NT_ARM_SYSTEM_CALL
1697 # define NT_ARM_SYSTEM_CALL 0x404
1698 # endif
1699 {
1700 iov.iov_base = &syscall;
1701 iov.iov_len = sizeof(syscall);
1702 ret = ptrace(PTRACE_SETREGSET, tracee, NT_ARM_SYSTEM_CALL,
1703 &iov);
1704 EXPECT_EQ(0, ret);
1705 }
1706
1707 #else
1708 ASSERT_EQ(1, 0) {
1709 TH_LOG("How is the syscall changed on this architecture?");
1710 }
1711 #endif
1712
1713 /* If syscall is skipped, change return value. */
1714 if (syscall == -1)
1715 #ifdef SYSCALL_NUM_RET_SHARE_REG
1716 TH_LOG("Can't modify syscall return on this architecture");
1717 #else
1718 regs.SYSCALL_RET = result;
1719 #endif
1720
1721 #ifdef HAVE_GETREGS
1722 ret = ptrace(PTRACE_SETREGS, tracee, 0, &regs);
1723 #else
1724 iov.iov_base = &regs;
1725 iov.iov_len = sizeof(regs);
1726 ret = ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &iov);
1727 #endif
1728 EXPECT_EQ(0, ret);
1729 }
1730
1731 void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
1732 int status, void *args)
1733 {
1734 int ret;
1735 unsigned long msg;
1736
1737 /* Make sure we got the right message. */
1738 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
1739 EXPECT_EQ(0, ret);
1740
1741 /* Validate and take action on expected syscalls. */
1742 switch (msg) {
1743 case 0x1002:
1744 /* change getpid to getppid. */
1745 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
1746 change_syscall(_metadata, tracee, __NR_getppid, 0);
1747 break;
1748 case 0x1003:
1749 /* skip gettid with valid return code. */
1750 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
1751 change_syscall(_metadata, tracee, -1, 45000);
1752 break;
1753 case 0x1004:
1754 /* skip openat with error. */
1755 EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
1756 change_syscall(_metadata, tracee, -1, -ESRCH);
1757 break;
1758 case 0x1005:
1759 /* do nothing (allow getppid) */
1760 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
1761 break;
1762 default:
1763 EXPECT_EQ(0, msg) {
1764 TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg);
1765 kill(tracee, SIGKILL);
1766 }
1767 }
1768
1769 }
1770
1771 void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
1772 int status, void *args)
1773 {
1774 int ret, nr;
1775 unsigned long msg;
1776 static bool entry;
1777
1778 /* Make sure we got an empty message. */
1779 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
1780 EXPECT_EQ(0, ret);
1781 EXPECT_EQ(0, msg);
1782
1783 /* The only way to tell PTRACE_SYSCALL entry/exit is by counting. */
1784 entry = !entry;
1785 if (!entry)
1786 return;
1787
1788 nr = get_syscall(_metadata, tracee);
1789
1790 if (nr == __NR_getpid)
1791 change_syscall(_metadata, tracee, __NR_getppid, 0);
1792 if (nr == __NR_gettid)
1793 change_syscall(_metadata, tracee, -1, 45000);
1794 if (nr == __NR_openat)
1795 change_syscall(_metadata, tracee, -1, -ESRCH);
1796 }
1797
1798 FIXTURE_DATA(TRACE_syscall) {
1799 struct sock_fprog prog;
1800 pid_t tracer, mytid, mypid, parent;
1801 };
1802
1803 FIXTURE_SETUP(TRACE_syscall)
1804 {
1805 struct sock_filter filter[] = {
1806 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1807 offsetof(struct seccomp_data, nr)),
1808 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
1809 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
1810 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
1811 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
1812 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
1813 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
1814 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
1815 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
1816 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1817 };
1818
1819 memset(&self->prog, 0, sizeof(self->prog));
1820 self->prog.filter = malloc(sizeof(filter));
1821 ASSERT_NE(NULL, self->prog.filter);
1822 memcpy(self->prog.filter, filter, sizeof(filter));
1823 self->prog.len = (unsigned short)ARRAY_SIZE(filter);
1824
1825 /* Prepare some testable syscall results. */
1826 self->mytid = syscall(__NR_gettid);
1827 ASSERT_GT(self->mytid, 0);
1828 ASSERT_NE(self->mytid, 1) {
1829 TH_LOG("Running this test as init is not supported. :)");
1830 }
1831
1832 self->mypid = getpid();
1833 ASSERT_GT(self->mypid, 0);
1834 ASSERT_EQ(self->mytid, self->mypid);
1835
1836 self->parent = getppid();
1837 ASSERT_GT(self->parent, 0);
1838 ASSERT_NE(self->parent, self->mypid);
1839
1840 /* Launch tracer. */
1841 self->tracer = setup_trace_fixture(_metadata, tracer_syscall, NULL,
1842 false);
1843 }
1844
1845 FIXTURE_TEARDOWN(TRACE_syscall)
1846 {
1847 teardown_trace_fixture(_metadata, self->tracer);
1848 if (self->prog.filter)
1849 free(self->prog.filter);
1850 }
1851
1852 TEST_F(TRACE_syscall, ptrace_syscall_redirected)
1853 {
1854 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
1855 teardown_trace_fixture(_metadata, self->tracer);
1856 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
1857 true);
1858
1859 /* Tracer will redirect getpid to getppid. */
1860 EXPECT_NE(self->mypid, syscall(__NR_getpid));
1861 }
1862
1863 TEST_F(TRACE_syscall, ptrace_syscall_errno)
1864 {
1865 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
1866 teardown_trace_fixture(_metadata, self->tracer);
1867 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
1868 true);
1869
1870 /* Tracer should skip the open syscall, resulting in ESRCH. */
1871 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
1872 }
1873
1874 TEST_F(TRACE_syscall, ptrace_syscall_faked)
1875 {
1876 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
1877 teardown_trace_fixture(_metadata, self->tracer);
1878 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
1879 true);
1880
1881 /* Tracer should skip the gettid syscall, resulting fake pid. */
1882 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
1883 }
1884
1885 TEST_F(TRACE_syscall, syscall_allowed)
1886 {
1887 long ret;
1888
1889 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1890 ASSERT_EQ(0, ret);
1891
1892 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1893 ASSERT_EQ(0, ret);
1894
1895 /* getppid works as expected (no changes). */
1896 EXPECT_EQ(self->parent, syscall(__NR_getppid));
1897 EXPECT_NE(self->mypid, syscall(__NR_getppid));
1898 }
1899
1900 TEST_F(TRACE_syscall, syscall_redirected)
1901 {
1902 long ret;
1903
1904 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1905 ASSERT_EQ(0, ret);
1906
1907 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1908 ASSERT_EQ(0, ret);
1909
1910 /* getpid has been redirected to getppid as expected. */
1911 EXPECT_EQ(self->parent, syscall(__NR_getpid));
1912 EXPECT_NE(self->mypid, syscall(__NR_getpid));
1913 }
1914
1915 TEST_F(TRACE_syscall, syscall_errno)
1916 {
1917 long ret;
1918
1919 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1920 ASSERT_EQ(0, ret);
1921
1922 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1923 ASSERT_EQ(0, ret);
1924
1925 /* openat has been skipped and an errno return. */
1926 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
1927 }
1928
1929 TEST_F(TRACE_syscall, syscall_faked)
1930 {
1931 long ret;
1932
1933 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1934 ASSERT_EQ(0, ret);
1935
1936 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1937 ASSERT_EQ(0, ret);
1938
1939 /* gettid has been skipped and an altered return value stored. */
1940 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
1941 }
1942
1943 TEST_F(TRACE_syscall, skip_after_RET_TRACE)
1944 {
1945 struct sock_filter filter[] = {
1946 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1947 offsetof(struct seccomp_data, nr)),
1948 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
1949 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM),
1950 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1951 };
1952 struct sock_fprog prog = {
1953 .len = (unsigned short)ARRAY_SIZE(filter),
1954 .filter = filter,
1955 };
1956 long ret;
1957
1958 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1959 ASSERT_EQ(0, ret);
1960
1961 /* Install fixture filter. */
1962 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1963 ASSERT_EQ(0, ret);
1964
1965 /* Install "errno on getppid" filter. */
1966 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
1967 ASSERT_EQ(0, ret);
1968
1969 /* Tracer will redirect getpid to getppid, and we should see EPERM. */
1970 errno = 0;
1971 EXPECT_EQ(-1, syscall(__NR_getpid));
1972 EXPECT_EQ(EPERM, errno);
1973 }
1974
1975 TEST_F_SIGNAL(TRACE_syscall, kill_after_RET_TRACE, SIGSYS)
1976 {
1977 struct sock_filter filter[] = {
1978 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1979 offsetof(struct seccomp_data, nr)),
1980 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
1981 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
1982 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1983 };
1984 struct sock_fprog prog = {
1985 .len = (unsigned short)ARRAY_SIZE(filter),
1986 .filter = filter,
1987 };
1988 long ret;
1989
1990 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1991 ASSERT_EQ(0, ret);
1992
1993 /* Install fixture filter. */
1994 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1995 ASSERT_EQ(0, ret);
1996
1997 /* Install "death on getppid" filter. */
1998 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
1999 ASSERT_EQ(0, ret);
2000
2001 /* Tracer will redirect getpid to getppid, and we should die. */
2002 EXPECT_NE(self->mypid, syscall(__NR_getpid));
2003 }
2004
2005 TEST_F(TRACE_syscall, skip_after_ptrace)
2006 {
2007 struct sock_filter filter[] = {
2008 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2009 offsetof(struct seccomp_data, nr)),
2010 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
2011 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM),
2012 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2013 };
2014 struct sock_fprog prog = {
2015 .len = (unsigned short)ARRAY_SIZE(filter),
2016 .filter = filter,
2017 };
2018 long ret;
2019
2020 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
2021 teardown_trace_fixture(_metadata, self->tracer);
2022 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
2023 true);
2024
2025 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
2026 ASSERT_EQ(0, ret);
2027
2028 /* Install "errno on getppid" filter. */
2029 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
2030 ASSERT_EQ(0, ret);
2031
2032 /* Tracer will redirect getpid to getppid, and we should see EPERM. */
2033 EXPECT_EQ(-1, syscall(__NR_getpid));
2034 EXPECT_EQ(EPERM, errno);
2035 }
2036
2037 TEST_F_SIGNAL(TRACE_syscall, kill_after_ptrace, SIGSYS)
2038 {
2039 struct sock_filter filter[] = {
2040 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2041 offsetof(struct seccomp_data, nr)),
2042 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
2043 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
2044 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2045 };
2046 struct sock_fprog prog = {
2047 .len = (unsigned short)ARRAY_SIZE(filter),
2048 .filter = filter,
2049 };
2050 long ret;
2051
2052 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
2053 teardown_trace_fixture(_metadata, self->tracer);
2054 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
2055 true);
2056
2057 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
2058 ASSERT_EQ(0, ret);
2059
2060 /* Install "death on getppid" filter. */
2061 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
2062 ASSERT_EQ(0, ret);
2063
2064 /* Tracer will redirect getpid to getppid, and we should die. */
2065 EXPECT_NE(self->mypid, syscall(__NR_getpid));
2066 }
2067
2068 TEST(seccomp_syscall)
2069 {
2070 struct sock_filter filter[] = {
2071 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2072 };
2073 struct sock_fprog prog = {
2074 .len = (unsigned short)ARRAY_SIZE(filter),
2075 .filter = filter,
2076 };
2077 long ret;
2078
2079 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
2080 ASSERT_EQ(0, ret) {
2081 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2082 }
2083
2084 /* Reject insane operation. */
2085 ret = seccomp(-1, 0, &prog);
2086 ASSERT_NE(ENOSYS, errno) {
2087 TH_LOG("Kernel does not support seccomp syscall!");
2088 }
2089 EXPECT_EQ(EINVAL, errno) {
2090 TH_LOG("Did not reject crazy op value!");
2091 }
2092
2093 /* Reject strict with flags or pointer. */
2094 ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL);
2095 EXPECT_EQ(EINVAL, errno) {
2096 TH_LOG("Did not reject mode strict with flags!");
2097 }
2098 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog);
2099 EXPECT_EQ(EINVAL, errno) {
2100 TH_LOG("Did not reject mode strict with uargs!");
2101 }
2102
2103 /* Reject insane args for filter. */
2104 ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog);
2105 EXPECT_EQ(EINVAL, errno) {
2106 TH_LOG("Did not reject crazy filter flags!");
2107 }
2108 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL);
2109 EXPECT_EQ(EFAULT, errno) {
2110 TH_LOG("Did not reject NULL filter!");
2111 }
2112
2113 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
2114 EXPECT_EQ(0, errno) {
2115 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s",
2116 strerror(errno));
2117 }
2118 }
2119
2120 TEST(seccomp_syscall_mode_lock)
2121 {
2122 struct sock_filter filter[] = {
2123 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2124 };
2125 struct sock_fprog prog = {
2126 .len = (unsigned short)ARRAY_SIZE(filter),
2127 .filter = filter,
2128 };
2129 long ret;
2130
2131 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
2132 ASSERT_EQ(0, ret) {
2133 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2134 }
2135
2136 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
2137 ASSERT_NE(ENOSYS, errno) {
2138 TH_LOG("Kernel does not support seccomp syscall!");
2139 }
2140 EXPECT_EQ(0, ret) {
2141 TH_LOG("Could not install filter!");
2142 }
2143
2144 /* Make sure neither entry point will switch to strict. */
2145 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0);
2146 EXPECT_EQ(EINVAL, errno) {
2147 TH_LOG("Switched to mode strict!");
2148 }
2149
2150 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL);
2151 EXPECT_EQ(EINVAL, errno) {
2152 TH_LOG("Switched to mode strict!");
2153 }
2154 }
2155
2156 /*
2157 * Test detection of known and unknown filter flags. Userspace needs to be able
2158 * to check if a filter flag is supported by the current kernel and a good way
2159 * of doing that is by attempting to enter filter mode, with the flag bit in
2160 * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates
2161 * that the flag is valid and EINVAL indicates that the flag is invalid.
2162 */
2163 TEST(detect_seccomp_filter_flags)
2164 {
2165 unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
2166 SECCOMP_FILTER_FLAG_LOG,
2167 SECCOMP_FILTER_FLAG_SPEC_ALLOW,
2168 SECCOMP_FILTER_FLAG_NEW_LISTENER };
2169 unsigned int flag, all_flags;
2170 int i;
2171 long ret;
2172
2173 /* Test detection of known-good filter flags */
2174 for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
2175 int bits = 0;
2176
2177 flag = flags[i];
2178 /* Make sure the flag is a single bit! */
2179 while (flag) {
2180 if (flag & 0x1)
2181 bits ++;
2182 flag >>= 1;
2183 }
2184 ASSERT_EQ(1, bits);
2185 flag = flags[i];
2186
2187 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
2188 ASSERT_NE(ENOSYS, errno) {
2189 TH_LOG("Kernel does not support seccomp syscall!");
2190 }
2191 EXPECT_EQ(-1, ret);
2192 EXPECT_EQ(EFAULT, errno) {
2193 TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
2194 flag);
2195 }
2196
2197 all_flags |= flag;
2198 }
2199
2200 /* Test detection of all known-good filter flags */
2201 ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
2202 EXPECT_EQ(-1, ret);
2203 EXPECT_EQ(EFAULT, errno) {
2204 TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
2205 all_flags);
2206 }
2207
2208 /* Test detection of an unknown filter flag */
2209 flag = -1;
2210 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
2211 EXPECT_EQ(-1, ret);
2212 EXPECT_EQ(EINVAL, errno) {
2213 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
2214 flag);
2215 }
2216
2217 /*
2218 * Test detection of an unknown filter flag that may simply need to be
2219 * added to this test
2220 */
2221 flag = flags[ARRAY_SIZE(flags) - 1] << 1;
2222 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
2223 EXPECT_EQ(-1, ret);
2224 EXPECT_EQ(EINVAL, errno) {
2225 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
2226 flag);
2227 }
2228 }
2229
2230 TEST(TSYNC_first)
2231 {
2232 struct sock_filter filter[] = {
2233 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2234 };
2235 struct sock_fprog prog = {
2236 .len = (unsigned short)ARRAY_SIZE(filter),
2237 .filter = filter,
2238 };
2239 long ret;
2240
2241 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
2242 ASSERT_EQ(0, ret) {
2243 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2244 }
2245
2246 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2247 &prog);
2248 ASSERT_NE(ENOSYS, errno) {
2249 TH_LOG("Kernel does not support seccomp syscall!");
2250 }
2251 EXPECT_EQ(0, ret) {
2252 TH_LOG("Could not install initial filter with TSYNC!");
2253 }
2254 }
2255
2256 #define TSYNC_SIBLINGS 2
2257 struct tsync_sibling {
2258 pthread_t tid;
2259 pid_t system_tid;
2260 sem_t *started;
2261 pthread_cond_t *cond;
2262 pthread_mutex_t *mutex;
2263 int diverge;
2264 int num_waits;
2265 struct sock_fprog *prog;
2266 struct __test_metadata *metadata;
2267 };
2268
2269 /*
2270 * To avoid joining joined threads (which is not allowed by Bionic),
2271 * make sure we both successfully join and clear the tid to skip a
2272 * later join attempt during fixture teardown. Any remaining threads
2273 * will be directly killed during teardown.
2274 */
2275 #define PTHREAD_JOIN(tid, status) \
2276 do { \
2277 int _rc = pthread_join(tid, status); \
2278 if (_rc) { \
2279 TH_LOG("pthread_join of tid %u failed: %d\n", \
2280 (unsigned int)tid, _rc); \
2281 } else { \
2282 tid = 0; \
2283 } \
2284 } while (0)
2285
2286 FIXTURE_DATA(TSYNC) {
2287 struct sock_fprog root_prog, apply_prog;
2288 struct tsync_sibling sibling[TSYNC_SIBLINGS];
2289 sem_t started;
2290 pthread_cond_t cond;
2291 pthread_mutex_t mutex;
2292 int sibling_count;
2293 };
2294
2295 FIXTURE_SETUP(TSYNC)
2296 {
2297 struct sock_filter root_filter[] = {
2298 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2299 };
2300 struct sock_filter apply_filter[] = {
2301 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2302 offsetof(struct seccomp_data, nr)),
2303 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
2304 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
2305 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2306 };
2307
2308 memset(&self->root_prog, 0, sizeof(self->root_prog));
2309 memset(&self->apply_prog, 0, sizeof(self->apply_prog));
2310 memset(&self->sibling, 0, sizeof(self->sibling));
2311 self->root_prog.filter = malloc(sizeof(root_filter));
2312 ASSERT_NE(NULL, self->root_prog.filter);
2313 memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter));
2314 self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter);
2315
2316 self->apply_prog.filter = malloc(sizeof(apply_filter));
2317 ASSERT_NE(NULL, self->apply_prog.filter);
2318 memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter));
2319 self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter);
2320
2321 self->sibling_count = 0;
2322 pthread_mutex_init(&self->mutex, NULL);
2323 pthread_cond_init(&self->cond, NULL);
2324 sem_init(&self->started, 0, 0);
2325 self->sibling[0].tid = 0;
2326 self->sibling[0].cond = &self->cond;
2327 self->sibling[0].started = &self->started;
2328 self->sibling[0].mutex = &self->mutex;
2329 self->sibling[0].diverge = 0;
2330 self->sibling[0].num_waits = 1;
2331 self->sibling[0].prog = &self->root_prog;
2332 self->sibling[0].metadata = _metadata;
2333 self->sibling[1].tid = 0;
2334 self->sibling[1].cond = &self->cond;
2335 self->sibling[1].started = &self->started;
2336 self->sibling[1].mutex = &self->mutex;
2337 self->sibling[1].diverge = 0;
2338 self->sibling[1].prog = &self->root_prog;
2339 self->sibling[1].num_waits = 1;
2340 self->sibling[1].metadata = _metadata;
2341 }
2342
2343 FIXTURE_TEARDOWN(TSYNC)
2344 {
2345 int sib = 0;
2346
2347 if (self->root_prog.filter)
2348 free(self->root_prog.filter);
2349 if (self->apply_prog.filter)
2350 free(self->apply_prog.filter);
2351
2352 for ( ; sib < self->sibling_count; ++sib) {
2353 struct tsync_sibling *s = &self->sibling[sib];
2354
2355 if (!s->tid)
2356 continue;
2357 /*
2358 * If a thread is still running, it may be stuck, so hit
2359 * it over the head really hard.
2360 */
2361 pthread_kill(s->tid, 9);
2362 }
2363 pthread_mutex_destroy(&self->mutex);
2364 pthread_cond_destroy(&self->cond);
2365 sem_destroy(&self->started);
2366 }
2367
2368 void *tsync_sibling(void *data)
2369 {
2370 long ret = 0;
2371 struct tsync_sibling *me = data;
2372
2373 me->system_tid = syscall(__NR_gettid);
2374
2375 pthread_mutex_lock(me->mutex);
2376 if (me->diverge) {
2377 /* Just re-apply the root prog to fork the tree */
2378 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
2379 me->prog, 0, 0);
2380 }
2381 sem_post(me->started);
2382 /* Return outside of started so parent notices failures. */
2383 if (ret) {
2384 pthread_mutex_unlock(me->mutex);
2385 return (void *)SIBLING_EXIT_FAILURE;
2386 }
2387 do {
2388 pthread_cond_wait(me->cond, me->mutex);
2389 me->num_waits = me->num_waits - 1;
2390 } while (me->num_waits);
2391 pthread_mutex_unlock(me->mutex);
2392
2393 ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
2394 if (!ret)
2395 return (void *)SIBLING_EXIT_NEWPRIVS;
2396 read(0, NULL, 0);
2397 return (void *)SIBLING_EXIT_UNKILLED;
2398 }
2399
2400 void tsync_start_sibling(struct tsync_sibling *sibling)
2401 {
2402 pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling);
2403 }
2404
2405 TEST_F(TSYNC, siblings_fail_prctl)
2406 {
2407 long ret;
2408 void *status;
2409 struct sock_filter filter[] = {
2410 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2411 offsetof(struct seccomp_data, nr)),
2412 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
2413 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL),
2414 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2415 };
2416 struct sock_fprog prog = {
2417 .len = (unsigned short)ARRAY_SIZE(filter),
2418 .filter = filter,
2419 };
2420
2421 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2422 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2423 }
2424
2425 /* Check prctl failure detection by requesting sib 0 diverge. */
2426 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
2427 ASSERT_NE(ENOSYS, errno) {
2428 TH_LOG("Kernel does not support seccomp syscall!");
2429 }
2430 ASSERT_EQ(0, ret) {
2431 TH_LOG("setting filter failed");
2432 }
2433
2434 self->sibling[0].diverge = 1;
2435 tsync_start_sibling(&self->sibling[0]);
2436 tsync_start_sibling(&self->sibling[1]);
2437
2438 while (self->sibling_count < TSYNC_SIBLINGS) {
2439 sem_wait(&self->started);
2440 self->sibling_count++;
2441 }
2442
2443 /* Signal the threads to clean up*/
2444 pthread_mutex_lock(&self->mutex);
2445 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2446 TH_LOG("cond broadcast non-zero");
2447 }
2448 pthread_mutex_unlock(&self->mutex);
2449
2450 /* Ensure diverging sibling failed to call prctl. */
2451 PTHREAD_JOIN(self->sibling[0].tid, &status);
2452 EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status);
2453 PTHREAD_JOIN(self->sibling[1].tid, &status);
2454 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
2455 }
2456
2457 TEST_F(TSYNC, two_siblings_with_ancestor)
2458 {
2459 long ret;
2460 void *status;
2461
2462 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2463 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2464 }
2465
2466 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
2467 ASSERT_NE(ENOSYS, errno) {
2468 TH_LOG("Kernel does not support seccomp syscall!");
2469 }
2470 ASSERT_EQ(0, ret) {
2471 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
2472 }
2473 tsync_start_sibling(&self->sibling[0]);
2474 tsync_start_sibling(&self->sibling[1]);
2475
2476 while (self->sibling_count < TSYNC_SIBLINGS) {
2477 sem_wait(&self->started);
2478 self->sibling_count++;
2479 }
2480
2481 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2482 &self->apply_prog);
2483 ASSERT_EQ(0, ret) {
2484 TH_LOG("Could install filter on all threads!");
2485 }
2486 /* Tell the siblings to test the policy */
2487 pthread_mutex_lock(&self->mutex);
2488 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2489 TH_LOG("cond broadcast non-zero");
2490 }
2491 pthread_mutex_unlock(&self->mutex);
2492 /* Ensure they are both killed and don't exit cleanly. */
2493 PTHREAD_JOIN(self->sibling[0].tid, &status);
2494 EXPECT_EQ(0x0, (long)status);
2495 PTHREAD_JOIN(self->sibling[1].tid, &status);
2496 EXPECT_EQ(0x0, (long)status);
2497 }
2498
2499 TEST_F(TSYNC, two_sibling_want_nnp)
2500 {
2501 void *status;
2502
2503 /* start siblings before any prctl() operations */
2504 tsync_start_sibling(&self->sibling[0]);
2505 tsync_start_sibling(&self->sibling[1]);
2506 while (self->sibling_count < TSYNC_SIBLINGS) {
2507 sem_wait(&self->started);
2508 self->sibling_count++;
2509 }
2510
2511 /* Tell the siblings to test no policy */
2512 pthread_mutex_lock(&self->mutex);
2513 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2514 TH_LOG("cond broadcast non-zero");
2515 }
2516 pthread_mutex_unlock(&self->mutex);
2517
2518 /* Ensure they are both upset about lacking nnp. */
2519 PTHREAD_JOIN(self->sibling[0].tid, &status);
2520 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
2521 PTHREAD_JOIN(self->sibling[1].tid, &status);
2522 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
2523 }
2524
2525 TEST_F(TSYNC, two_siblings_with_no_filter)
2526 {
2527 long ret;
2528 void *status;
2529
2530 /* start siblings before any prctl() operations */
2531 tsync_start_sibling(&self->sibling[0]);
2532 tsync_start_sibling(&self->sibling[1]);
2533 while (self->sibling_count < TSYNC_SIBLINGS) {
2534 sem_wait(&self->started);
2535 self->sibling_count++;
2536 }
2537
2538 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2539 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2540 }
2541
2542 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2543 &self->apply_prog);
2544 ASSERT_NE(ENOSYS, errno) {
2545 TH_LOG("Kernel does not support seccomp syscall!");
2546 }
2547 ASSERT_EQ(0, ret) {
2548 TH_LOG("Could install filter on all threads!");
2549 }
2550
2551 /* Tell the siblings to test the policy */
2552 pthread_mutex_lock(&self->mutex);
2553 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2554 TH_LOG("cond broadcast non-zero");
2555 }
2556 pthread_mutex_unlock(&self->mutex);
2557
2558 /* Ensure they are both killed and don't exit cleanly. */
2559 PTHREAD_JOIN(self->sibling[0].tid, &status);
2560 EXPECT_EQ(0x0, (long)status);
2561 PTHREAD_JOIN(self->sibling[1].tid, &status);
2562 EXPECT_EQ(0x0, (long)status);
2563 }
2564
2565 TEST_F(TSYNC, two_siblings_with_one_divergence)
2566 {
2567 long ret;
2568 void *status;
2569
2570 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2571 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2572 }
2573
2574 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
2575 ASSERT_NE(ENOSYS, errno) {
2576 TH_LOG("Kernel does not support seccomp syscall!");
2577 }
2578 ASSERT_EQ(0, ret) {
2579 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
2580 }
2581 self->sibling[0].diverge = 1;
2582 tsync_start_sibling(&self->sibling[0]);
2583 tsync_start_sibling(&self->sibling[1]);
2584
2585 while (self->sibling_count < TSYNC_SIBLINGS) {
2586 sem_wait(&self->started);
2587 self->sibling_count++;
2588 }
2589
2590 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2591 &self->apply_prog);
2592 ASSERT_EQ(self->sibling[0].system_tid, ret) {
2593 TH_LOG("Did not fail on diverged sibling.");
2594 }
2595
2596 /* Wake the threads */
2597 pthread_mutex_lock(&self->mutex);
2598 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2599 TH_LOG("cond broadcast non-zero");
2600 }
2601 pthread_mutex_unlock(&self->mutex);
2602
2603 /* Ensure they are both unkilled. */
2604 PTHREAD_JOIN(self->sibling[0].tid, &status);
2605 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
2606 PTHREAD_JOIN(self->sibling[1].tid, &status);
2607 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
2608 }
2609
2610 TEST_F(TSYNC, two_siblings_not_under_filter)
2611 {
2612 long ret, sib;
2613 void *status;
2614 struct timespec delay = { .tv_nsec = 100000000 };
2615
2616 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2617 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2618 }
2619
2620 /*
2621 * Sibling 0 will have its own seccomp policy
2622 * and Sibling 1 will not be under seccomp at
2623 * all. Sibling 1 will enter seccomp and 0
2624 * will cause failure.
2625 */
2626 self->sibling[0].diverge = 1;
2627 tsync_start_sibling(&self->sibling[0]);
2628 tsync_start_sibling(&self->sibling[1]);
2629
2630 while (self->sibling_count < TSYNC_SIBLINGS) {
2631 sem_wait(&self->started);
2632 self->sibling_count++;
2633 }
2634
2635 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
2636 ASSERT_NE(ENOSYS, errno) {
2637 TH_LOG("Kernel does not support seccomp syscall!");
2638 }
2639 ASSERT_EQ(0, ret) {
2640 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
2641 }
2642
2643 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2644 &self->apply_prog);
2645 ASSERT_EQ(ret, self->sibling[0].system_tid) {
2646 TH_LOG("Did not fail on diverged sibling.");
2647 }
2648 sib = 1;
2649 if (ret == self->sibling[0].system_tid)
2650 sib = 0;
2651
2652 pthread_mutex_lock(&self->mutex);
2653
2654 /* Increment the other siblings num_waits so we can clean up
2655 * the one we just saw.
2656 */
2657 self->sibling[!sib].num_waits += 1;
2658
2659 /* Signal the thread to clean up*/
2660 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2661 TH_LOG("cond broadcast non-zero");
2662 }
2663 pthread_mutex_unlock(&self->mutex);
2664 PTHREAD_JOIN(self->sibling[sib].tid, &status);
2665 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
2666 /* Poll for actual task death. pthread_join doesn't guarantee it. */
2667 while (!kill(self->sibling[sib].system_tid, 0))
2668 nanosleep(&delay, NULL);
2669 /* Switch to the remaining sibling */
2670 sib = !sib;
2671
2672 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2673 &self->apply_prog);
2674 ASSERT_EQ(0, ret) {
2675 TH_LOG("Expected the remaining sibling to sync");
2676 };
2677
2678 pthread_mutex_lock(&self->mutex);
2679
2680 /* If remaining sibling didn't have a chance to wake up during
2681 * the first broadcast, manually reduce the num_waits now.
2682 */
2683 if (self->sibling[sib].num_waits > 1)
2684 self->sibling[sib].num_waits = 1;
2685 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2686 TH_LOG("cond broadcast non-zero");
2687 }
2688 pthread_mutex_unlock(&self->mutex);
2689 PTHREAD_JOIN(self->sibling[sib].tid, &status);
2690 EXPECT_EQ(0, (long)status);
2691 /* Poll for actual task death. pthread_join doesn't guarantee it. */
2692 while (!kill(self->sibling[sib].system_tid, 0))
2693 nanosleep(&delay, NULL);
2694
2695 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2696 &self->apply_prog);
2697 ASSERT_EQ(0, ret); /* just us chickens */
2698 }
2699
2700 /* Make sure restarted syscalls are seen directly as "restart_syscall". */
2701 TEST(syscall_restart)
2702 {
2703 long ret;
2704 unsigned long msg;
2705 pid_t child_pid;
2706 int pipefd[2];
2707 int status;
2708 siginfo_t info = { };
2709 struct sock_filter filter[] = {
2710 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2711 offsetof(struct seccomp_data, nr)),
2712
2713 #ifdef __NR_sigreturn
2714 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 6, 0),
2715 #endif
2716 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 5, 0),
2717 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 4, 0),
2718 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 3, 0),
2719 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 4, 0),
2720 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0),
2721
2722 /* Allow __NR_write for easy logging. */
2723 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1),
2724 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2725 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
2726 /* The nanosleep jump target. */
2727 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100),
2728 /* The restart_syscall jump target. */
2729 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200),
2730 };
2731 struct sock_fprog prog = {
2732 .len = (unsigned short)ARRAY_SIZE(filter),
2733 .filter = filter,
2734 };
2735 #if defined(__arm__)
2736 struct utsname utsbuf;
2737 #endif
2738
2739 ASSERT_EQ(0, pipe(pipefd));
2740
2741 child_pid = fork();
2742 ASSERT_LE(0, child_pid);
2743 if (child_pid == 0) {
2744 /* Child uses EXPECT not ASSERT to deliver status correctly. */
2745 char buf = ' ';
2746 struct timespec timeout = { };
2747
2748 /* Attach parent as tracer and stop. */
2749 EXPECT_EQ(0, ptrace(PTRACE_TRACEME));
2750 EXPECT_EQ(0, raise(SIGSTOP));
2751
2752 EXPECT_EQ(0, close(pipefd[1]));
2753
2754 EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2755 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2756 }
2757
2758 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
2759 EXPECT_EQ(0, ret) {
2760 TH_LOG("Failed to install filter!");
2761 }
2762
2763 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
2764 TH_LOG("Failed to read() sync from parent");
2765 }
2766 EXPECT_EQ('.', buf) {
2767 TH_LOG("Failed to get sync data from read()");
2768 }
2769
2770 /* Start nanosleep to be interrupted. */
2771 timeout.tv_sec = 1;
2772 errno = 0;
2773 EXPECT_EQ(0, nanosleep(&timeout, NULL)) {
2774 TH_LOG("Call to nanosleep() failed (errno %d)", errno);
2775 }
2776
2777 /* Read final sync from parent. */
2778 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
2779 TH_LOG("Failed final read() from parent");
2780 }
2781 EXPECT_EQ('!', buf) {
2782 TH_LOG("Failed to get final data from read()");
2783 }
2784
2785 /* Directly report the status of our test harness results. */
2786 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS
2787 : EXIT_FAILURE);
2788 }
2789 EXPECT_EQ(0, close(pipefd[0]));
2790
2791 /* Attach to child, setup options, and release. */
2792 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
2793 ASSERT_EQ(true, WIFSTOPPED(status));
2794 ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL,
2795 PTRACE_O_TRACESECCOMP));
2796 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
2797 ASSERT_EQ(1, write(pipefd[1], ".", 1));
2798
2799 /* Wait for nanosleep() to start. */
2800 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
2801 ASSERT_EQ(true, WIFSTOPPED(status));
2802 ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
2803 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
2804 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
2805 ASSERT_EQ(0x100, msg);
2806 EXPECT_EQ(__NR_nanosleep, get_syscall(_metadata, child_pid));
2807
2808 /* Might as well check siginfo for sanity while we're here. */
2809 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
2810 ASSERT_EQ(SIGTRAP, info.si_signo);
2811 ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code);
2812 EXPECT_EQ(0, info.si_errno);
2813 EXPECT_EQ(getuid(), info.si_uid);
2814 /* Verify signal delivery came from child (seccomp-triggered). */
2815 EXPECT_EQ(child_pid, info.si_pid);
2816
2817 /* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */
2818 ASSERT_EQ(0, kill(child_pid, SIGSTOP));
2819 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
2820 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
2821 ASSERT_EQ(true, WIFSTOPPED(status));
2822 ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
2823 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
2824 /*
2825 * There is no siginfo on SIGSTOP any more, so we can't verify
2826 * signal delivery came from parent now (getpid() == info.si_pid).
2827 * https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com
2828 * At least verify the SIGSTOP via PTRACE_GETSIGINFO.
2829 */
2830 EXPECT_EQ(SIGSTOP, info.si_signo);
2831
2832 /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */
2833 ASSERT_EQ(0, kill(child_pid, SIGCONT));
2834 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
2835 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
2836 ASSERT_EQ(true, WIFSTOPPED(status));
2837 ASSERT_EQ(SIGCONT, WSTOPSIG(status));
2838 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
2839
2840 /* Wait for restart_syscall() to start. */
2841 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
2842 ASSERT_EQ(true, WIFSTOPPED(status));
2843 ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
2844 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
2845 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
2846
2847 ASSERT_EQ(0x200, msg);
2848 ret = get_syscall(_metadata, child_pid);
2849 #if defined(__arm__)
2850 /*
2851 * FIXME:
2852 * - native ARM registers do NOT expose true syscall.
2853 * - compat ARM registers on ARM64 DO expose true syscall.
2854 */
2855 ASSERT_EQ(0, uname(&utsbuf));
2856 if (strncmp(utsbuf.machine, "arm", 3) == 0) {
2857 EXPECT_EQ(__NR_nanosleep, ret);
2858 } else
2859 #endif
2860 {
2861 EXPECT_EQ(__NR_restart_syscall, ret);
2862 }
2863
2864 /* Write again to end test. */
2865 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
2866 ASSERT_EQ(1, write(pipefd[1], "!", 1));
2867 EXPECT_EQ(0, close(pipefd[1]));
2868
2869 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
2870 if (WIFSIGNALED(status) || WEXITSTATUS(status))
2871 _metadata->passed = 0;
2872 }
2873
2874 TEST_SIGNAL(filter_flag_log, SIGSYS)
2875 {
2876 struct sock_filter allow_filter[] = {
2877 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2878 };
2879 struct sock_filter kill_filter[] = {
2880 BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2881 offsetof(struct seccomp_data, nr)),
2882 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
2883 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
2884 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2885 };
2886 struct sock_fprog allow_prog = {
2887 .len = (unsigned short)ARRAY_SIZE(allow_filter),
2888 .filter = allow_filter,
2889 };
2890 struct sock_fprog kill_prog = {
2891 .len = (unsigned short)ARRAY_SIZE(kill_filter),
2892 .filter = kill_filter,
2893 };
2894 long ret;
2895 pid_t parent = getppid();
2896
2897 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
2898 ASSERT_EQ(0, ret);
2899
2900 /* Verify that the FILTER_FLAG_LOG flag isn't accepted in strict mode */
2901 ret = seccomp(SECCOMP_SET_MODE_STRICT, SECCOMP_FILTER_FLAG_LOG,
2902 &allow_prog);
2903 ASSERT_NE(ENOSYS, errno) {
2904 TH_LOG("Kernel does not support seccomp syscall!");
2905 }
2906 EXPECT_NE(0, ret) {
2907 TH_LOG("Kernel accepted FILTER_FLAG_LOG flag in strict mode!");
2908 }
2909 EXPECT_EQ(EINVAL, errno) {
2910 TH_LOG("Kernel returned unexpected errno for FILTER_FLAG_LOG flag in strict mode!");
2911 }
2912
2913 /* Verify that a simple, permissive filter can be added with no flags */
2914 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog);
2915 EXPECT_EQ(0, ret);
2916
2917 /* See if the same filter can be added with the FILTER_FLAG_LOG flag */
2918 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG,
2919 &allow_prog);
2920 ASSERT_NE(EINVAL, errno) {
2921 TH_LOG("Kernel does not support the FILTER_FLAG_LOG flag!");
2922 }
2923 EXPECT_EQ(0, ret);
2924
2925 /* Ensure that the kill filter works with the FILTER_FLAG_LOG flag */
2926 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG,
2927 &kill_prog);
2928 EXPECT_EQ(0, ret);
2929
2930 EXPECT_EQ(parent, syscall(__NR_getppid));
2931 /* getpid() should never return. */
2932 EXPECT_EQ(0, syscall(__NR_getpid));
2933 }
2934
2935 TEST(get_action_avail)
2936 {
2937 __u32 actions[] = { SECCOMP_RET_KILL_THREAD, SECCOMP_RET_TRAP,
2938 SECCOMP_RET_ERRNO, SECCOMP_RET_TRACE,
2939 SECCOMP_RET_LOG, SECCOMP_RET_ALLOW };
2940 __u32 unknown_action = 0x10000000U;
2941 int i;
2942 long ret;
2943
2944 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[0]);
2945 ASSERT_NE(ENOSYS, errno) {
2946 TH_LOG("Kernel does not support seccomp syscall!");
2947 }
2948 ASSERT_NE(EINVAL, errno) {
2949 TH_LOG("Kernel does not support SECCOMP_GET_ACTION_AVAIL operation!");
2950 }
2951 EXPECT_EQ(ret, 0);
2952
2953 for (i = 0; i < ARRAY_SIZE(actions); i++) {
2954 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[i]);
2955 EXPECT_EQ(ret, 0) {
2956 TH_LOG("Expected action (0x%X) not available!",
2957 actions[i]);
2958 }
2959 }
2960
2961 /* Check that an unknown action is handled properly (EOPNOTSUPP) */
2962 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &unknown_action);
2963 EXPECT_EQ(ret, -1);
2964 EXPECT_EQ(errno, EOPNOTSUPP);
2965 }
2966
2967 TEST(get_metadata)
2968 {
2969 pid_t pid;
2970 int pipefd[2];
2971 char buf;
2972 struct seccomp_metadata md;
2973 long ret;
2974
2975 /* Only real root can get metadata. */
2976 if (geteuid()) {
2977 XFAIL(return, "get_metadata requires real root");
2978 return;
2979 }
2980
2981 ASSERT_EQ(0, pipe(pipefd));
2982
2983 pid = fork();
2984 ASSERT_GE(pid, 0);
2985 if (pid == 0) {
2986 struct sock_filter filter[] = {
2987 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2988 };
2989 struct sock_fprog prog = {
2990 .len = (unsigned short)ARRAY_SIZE(filter),
2991 .filter = filter,
2992 };
2993
2994 /* one with log, one without */
2995 EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
2996 SECCOMP_FILTER_FLAG_LOG, &prog));
2997 EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
2998
2999 EXPECT_EQ(0, close(pipefd[0]));
3000 ASSERT_EQ(1, write(pipefd[1], "1", 1));
3001 ASSERT_EQ(0, close(pipefd[1]));
3002
3003 while (1)
3004 sleep(100);
3005 }
3006
3007 ASSERT_EQ(0, close(pipefd[1]));
3008 ASSERT_EQ(1, read(pipefd[0], &buf, 1));
3009
3010 ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid));
3011 ASSERT_EQ(pid, waitpid(pid, NULL, 0));
3012
3013 /* Past here must not use ASSERT or child process is never killed. */
3014
3015 md.filter_off = 0;
3016 errno = 0;
3017 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md);
3018 EXPECT_EQ(sizeof(md), ret) {
3019 if (errno == EINVAL)
3020 XFAIL(goto skip, "Kernel does not support PTRACE_SECCOMP_GET_METADATA (missing CONFIG_CHECKPOINT_RESTORE?)");
3021 }
3022
3023 EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG);
3024 EXPECT_EQ(md.filter_off, 0);
3025
3026 md.filter_off = 1;
3027 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md);
3028 EXPECT_EQ(sizeof(md), ret);
3029 EXPECT_EQ(md.flags, 0);
3030 EXPECT_EQ(md.filter_off, 1);
3031
3032 skip:
3033 ASSERT_EQ(0, kill(pid, SIGKILL));
3034 }
3035
3036 static int user_trap_syscall(int nr, unsigned int flags)
3037 {
3038 struct sock_filter filter[] = {
3039 BPF_STMT(BPF_LD+BPF_W+BPF_ABS,
3040 offsetof(struct seccomp_data, nr)),
3041 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, nr, 0, 1),
3042 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_USER_NOTIF),
3043 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
3044 };
3045
3046 struct sock_fprog prog = {
3047 .len = (unsigned short)ARRAY_SIZE(filter),
3048 .filter = filter,
3049 };
3050
3051 return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog);
3052 }
3053
3054 #define USER_NOTIF_MAGIC 116983961184613L
3055 TEST(user_notification_basic)
3056 {
3057 pid_t pid;
3058 long ret;
3059 int status, listener;
3060 struct seccomp_notif req = {};
3061 struct seccomp_notif_resp resp = {};
3062 struct pollfd pollfd;
3063
3064 struct sock_filter filter[] = {
3065 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
3066 };
3067 struct sock_fprog prog = {
3068 .len = (unsigned short)ARRAY_SIZE(filter),
3069 .filter = filter,
3070 };
3071
3072 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3073 ASSERT_EQ(0, ret) {
3074 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3075 }
3076
3077 pid = fork();
3078 ASSERT_GE(pid, 0);
3079
3080 /* Check that we get -ENOSYS with no listener attached */
3081 if (pid == 0) {
3082 if (user_trap_syscall(__NR_getpid, 0) < 0)
3083 exit(1);
3084 ret = syscall(__NR_getpid);
3085 exit(ret >= 0 || errno != ENOSYS);
3086 }
3087
3088 EXPECT_EQ(waitpid(pid, &status, 0), pid);
3089 EXPECT_EQ(true, WIFEXITED(status));
3090 EXPECT_EQ(0, WEXITSTATUS(status));
3091
3092 /* Add some no-op filters for grins. */
3093 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
3094 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
3095 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
3096 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
3097
3098 /* Check that the basic notification machinery works */
3099 listener = user_trap_syscall(__NR_getpid,
3100 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3101 ASSERT_GE(listener, 0);
3102
3103 /* Installing a second listener in the chain should EBUSY */
3104 EXPECT_EQ(user_trap_syscall(__NR_getpid,
3105 SECCOMP_FILTER_FLAG_NEW_LISTENER),
3106 -1);
3107 EXPECT_EQ(errno, EBUSY);
3108
3109 pid = fork();
3110 ASSERT_GE(pid, 0);
3111
3112 if (pid == 0) {
3113 ret = syscall(__NR_getpid);
3114 exit(ret != USER_NOTIF_MAGIC);
3115 }
3116
3117 pollfd.fd = listener;
3118 pollfd.events = POLLIN | POLLOUT;
3119
3120 EXPECT_GT(poll(&pollfd, 1, -1), 0);
3121 EXPECT_EQ(pollfd.revents, POLLIN);
3122
3123 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3124
3125 pollfd.fd = listener;
3126 pollfd.events = POLLIN | POLLOUT;
3127
3128 EXPECT_GT(poll(&pollfd, 1, -1), 0);
3129 EXPECT_EQ(pollfd.revents, POLLOUT);
3130
3131 EXPECT_EQ(req.data.nr, __NR_getpid);
3132
3133 resp.id = req.id;
3134 resp.error = 0;
3135 resp.val = USER_NOTIF_MAGIC;
3136
3137 /* check that we make sure flags == 0 */
3138 resp.flags = 1;
3139 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
3140 EXPECT_EQ(errno, EINVAL);
3141
3142 resp.flags = 0;
3143 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
3144
3145 EXPECT_EQ(waitpid(pid, &status, 0), pid);
3146 EXPECT_EQ(true, WIFEXITED(status));
3147 EXPECT_EQ(0, WEXITSTATUS(status));
3148 }
3149
3150 TEST(user_notification_kill_in_middle)
3151 {
3152 pid_t pid;
3153 long ret;
3154 int listener;
3155 struct seccomp_notif req = {};
3156 struct seccomp_notif_resp resp = {};
3157
3158 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3159 ASSERT_EQ(0, ret) {
3160 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3161 }
3162
3163 listener = user_trap_syscall(__NR_getpid,
3164 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3165 ASSERT_GE(listener, 0);
3166
3167 /*
3168 * Check that nothing bad happens when we kill the task in the middle
3169 * of a syscall.
3170 */
3171 pid = fork();
3172 ASSERT_GE(pid, 0);
3173
3174 if (pid == 0) {
3175 ret = syscall(__NR_getpid);
3176 exit(ret != USER_NOTIF_MAGIC);
3177 }
3178
3179 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3180 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), 0);
3181
3182 EXPECT_EQ(kill(pid, SIGKILL), 0);
3183 EXPECT_EQ(waitpid(pid, NULL, 0), pid);
3184
3185 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), -1);
3186
3187 resp.id = req.id;
3188 ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp);
3189 EXPECT_EQ(ret, -1);
3190 EXPECT_EQ(errno, ENOENT);
3191 }
3192
3193 static int handled = -1;
3194
3195 static void signal_handler(int signal)
3196 {
3197 if (write(handled, "c", 1) != 1)
3198 perror("write from signal");
3199 }
3200
3201 TEST(user_notification_signal)
3202 {
3203 pid_t pid;
3204 long ret;
3205 int status, listener, sk_pair[2];
3206 struct seccomp_notif req = {};
3207 struct seccomp_notif_resp resp = {};
3208 char c;
3209
3210 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3211 ASSERT_EQ(0, ret) {
3212 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3213 }
3214
3215 ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0);
3216
3217 listener = user_trap_syscall(__NR_gettid,
3218 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3219 ASSERT_GE(listener, 0);
3220
3221 pid = fork();
3222 ASSERT_GE(pid, 0);
3223
3224 if (pid == 0) {
3225 close(sk_pair[0]);
3226 handled = sk_pair[1];
3227 if (signal(SIGUSR1, signal_handler) == SIG_ERR) {
3228 perror("signal");
3229 exit(1);
3230 }
3231 /*
3232 * ERESTARTSYS behavior is a bit hard to test, because we need
3233 * to rely on a signal that has not yet been handled. Let's at
3234 * least check that the error code gets propagated through, and
3235 * hope that it doesn't break when there is actually a signal :)
3236 */
3237 ret = syscall(__NR_gettid);
3238 exit(!(ret == -1 && errno == 512));
3239 }
3240
3241 close(sk_pair[1]);
3242
3243 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3244
3245 EXPECT_EQ(kill(pid, SIGUSR1), 0);
3246
3247 /*
3248 * Make sure the signal really is delivered, which means we're not
3249 * stuck in the user notification code any more and the notification
3250 * should be dead.
3251 */
3252 EXPECT_EQ(read(sk_pair[0], &c, 1), 1);
3253
3254 resp.id = req.id;
3255 resp.error = -EPERM;
3256 resp.val = 0;
3257
3258 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
3259 EXPECT_EQ(errno, ENOENT);
3260
3261 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3262
3263 resp.id = req.id;
3264 resp.error = -512; /* -ERESTARTSYS */
3265 resp.val = 0;
3266
3267 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
3268
3269 EXPECT_EQ(waitpid(pid, &status, 0), pid);
3270 EXPECT_EQ(true, WIFEXITED(status));
3271 EXPECT_EQ(0, WEXITSTATUS(status));
3272 }
3273
3274 TEST(user_notification_closed_listener)
3275 {
3276 pid_t pid;
3277 long ret;
3278 int status, listener;
3279
3280 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3281 ASSERT_EQ(0, ret) {
3282 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3283 }
3284
3285 listener = user_trap_syscall(__NR_getpid,
3286 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3287 ASSERT_GE(listener, 0);
3288
3289 /*
3290 * Check that we get an ENOSYS when the listener is closed.
3291 */
3292 pid = fork();
3293 ASSERT_GE(pid, 0);
3294 if (pid == 0) {
3295 close(listener);
3296 ret = syscall(__NR_getpid);
3297 exit(ret != -1 && errno != ENOSYS);
3298 }
3299
3300 close(listener);
3301
3302 EXPECT_EQ(waitpid(pid, &status, 0), pid);
3303 EXPECT_EQ(true, WIFEXITED(status));
3304 EXPECT_EQ(0, WEXITSTATUS(status));
3305 }
3306
3307 /*
3308 * Check that a pid in a child namespace still shows up as valid in ours.
3309 */
3310 TEST(user_notification_child_pid_ns)
3311 {
3312 pid_t pid;
3313 int status, listener;
3314 struct seccomp_notif req = {};
3315 struct seccomp_notif_resp resp = {};
3316
3317 ASSERT_EQ(unshare(CLONE_NEWUSER | CLONE_NEWPID), 0);
3318
3319 listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
3320 ASSERT_GE(listener, 0);
3321
3322 pid = fork();
3323 ASSERT_GE(pid, 0);
3324
3325 if (pid == 0)
3326 exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC);
3327
3328 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3329 EXPECT_EQ(req.pid, pid);
3330
3331 resp.id = req.id;
3332 resp.error = 0;
3333 resp.val = USER_NOTIF_MAGIC;
3334
3335 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
3336
3337 EXPECT_EQ(waitpid(pid, &status, 0), pid);
3338 EXPECT_EQ(true, WIFEXITED(status));
3339 EXPECT_EQ(0, WEXITSTATUS(status));
3340 close(listener);
3341 }
3342
3343 /*
3344 * Check that a pid in a sibling (i.e. unrelated) namespace shows up as 0, i.e.
3345 * invalid.
3346 */
3347 TEST(user_notification_sibling_pid_ns)
3348 {
3349 pid_t pid, pid2;
3350 int status, listener;
3351 struct seccomp_notif req = {};
3352 struct seccomp_notif_resp resp = {};
3353
3354 ASSERT_EQ(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), 0) {
3355 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3356 }
3357
3358 listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
3359 ASSERT_GE(listener, 0);
3360
3361 pid = fork();
3362 ASSERT_GE(pid, 0);
3363
3364 if (pid == 0) {
3365 ASSERT_EQ(unshare(CLONE_NEWPID), 0);
3366
3367 pid2 = fork();
3368 ASSERT_GE(pid2, 0);
3369
3370 if (pid2 == 0)
3371 exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC);
3372
3373 EXPECT_EQ(waitpid(pid2, &status, 0), pid2);
3374 EXPECT_EQ(true, WIFEXITED(status));
3375 EXPECT_EQ(0, WEXITSTATUS(status));
3376 exit(WEXITSTATUS(status));
3377 }
3378
3379 /* Create the sibling ns, and sibling in it. */
3380 EXPECT_EQ(unshare(CLONE_NEWPID), 0);
3381 EXPECT_EQ(errno, 0);
3382
3383 pid2 = fork();
3384 EXPECT_GE(pid2, 0);
3385
3386 if (pid2 == 0) {
3387 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3388 /*
3389 * The pid should be 0, i.e. the task is in some namespace that
3390 * we can't "see".
3391 */
3392 ASSERT_EQ(req.pid, 0);
3393
3394 resp.id = req.id;
3395 resp.error = 0;
3396 resp.val = USER_NOTIF_MAGIC;
3397
3398 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
3399 exit(0);
3400 }
3401
3402 close(listener);
3403
3404 EXPECT_EQ(waitpid(pid, &status, 0), pid);
3405 EXPECT_EQ(true, WIFEXITED(status));
3406 EXPECT_EQ(0, WEXITSTATUS(status));
3407
3408 EXPECT_EQ(waitpid(pid2, &status, 0), pid2);
3409 EXPECT_EQ(true, WIFEXITED(status));
3410 EXPECT_EQ(0, WEXITSTATUS(status));
3411 }
3412
3413 TEST(user_notification_fault_recv)
3414 {
3415 pid_t pid;
3416 int status, listener;
3417 struct seccomp_notif req = {};
3418 struct seccomp_notif_resp resp = {};
3419
3420 ASSERT_EQ(unshare(CLONE_NEWUSER), 0);
3421
3422 listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
3423 ASSERT_GE(listener, 0);
3424
3425 pid = fork();
3426 ASSERT_GE(pid, 0);
3427
3428 if (pid == 0)
3429 exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC);
3430
3431 /* Do a bad recv() */
3432 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, NULL), -1);
3433 EXPECT_EQ(errno, EFAULT);
3434
3435 /* We should still be able to receive this notification, though. */
3436 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3437 EXPECT_EQ(req.pid, pid);
3438
3439 resp.id = req.id;
3440 resp.error = 0;
3441 resp.val = USER_NOTIF_MAGIC;
3442
3443 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
3444
3445 EXPECT_EQ(waitpid(pid, &status, 0), pid);
3446 EXPECT_EQ(true, WIFEXITED(status));
3447 EXPECT_EQ(0, WEXITSTATUS(status));
3448 }
3449
3450 TEST(seccomp_get_notif_sizes)
3451 {
3452 struct seccomp_notif_sizes sizes;
3453
3454 ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0);
3455 EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif));
3456 EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp));
3457 }
3458
3459 /*
3460 * TODO:
3461 * - add microbenchmarks
3462 * - expand NNP testing
3463 * - better arch-specific TRACE and TRAP handlers.
3464 * - endianness checking when appropriate
3465 * - 64-bit arg prodding
3466 * - arch value testing (x86 modes especially)
3467 * - verify that FILTER_FLAG_LOG filters generate log messages
3468 * - verify that RET_LOG generates log messages
3469 * - ...
3470 */
3471
3472 TEST_HARNESS_MAIN