2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 * Copyright (c) 2017 Facebook
6 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
14 #include <asm/types.h>
15 #include <linux/types.h>
28 #include <sys/capability.h>
30 #include <linux/unistd.h>
31 #include <linux/filter.h>
32 #include <linux/bpf_perf_event.h>
33 #include <linux/bpf.h>
34 #include <linux/if_ether.h>
35 #include <linux/btf.h>
38 #include <bpf/libbpf.h>
41 # include "autoconf.h"
43 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
44 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
47 #include "bpf_rlimit.h"
51 #include "../../../include/linux/filter.h"
53 #define MAX_INSNS BPF_MAXINSNS
54 #define MAX_TEST_INSNS 1000000
56 #define MAX_NR_MAPS 18
57 #define MAX_TEST_RUNS 8
58 #define POINTER_VALUE 0xcafe4all
59 #define TEST_DATA_LEN 64
61 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
62 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
64 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
65 static bool unpriv_disabled
= false;
70 struct bpf_insn insns
[MAX_INSNS
];
71 struct bpf_insn
*fill_insns
;
72 int fixup_map_hash_8b
[MAX_FIXUPS
];
73 int fixup_map_hash_48b
[MAX_FIXUPS
];
74 int fixup_map_hash_16b
[MAX_FIXUPS
];
75 int fixup_map_array_48b
[MAX_FIXUPS
];
76 int fixup_map_sockmap
[MAX_FIXUPS
];
77 int fixup_map_sockhash
[MAX_FIXUPS
];
78 int fixup_map_xskmap
[MAX_FIXUPS
];
79 int fixup_map_stacktrace
[MAX_FIXUPS
];
80 int fixup_prog1
[MAX_FIXUPS
];
81 int fixup_prog2
[MAX_FIXUPS
];
82 int fixup_map_in_map
[MAX_FIXUPS
];
83 int fixup_cgroup_storage
[MAX_FIXUPS
];
84 int fixup_percpu_cgroup_storage
[MAX_FIXUPS
];
85 int fixup_map_spin_lock
[MAX_FIXUPS
];
86 int fixup_map_array_ro
[MAX_FIXUPS
];
87 int fixup_map_array_wo
[MAX_FIXUPS
];
88 int fixup_map_array_small
[MAX_FIXUPS
];
89 int fixup_sk_storage_map
[MAX_FIXUPS
];
91 const char *errstr_unpriv
;
92 uint32_t retval
, retval_unpriv
, insn_processed
;
98 } result
, result_unpriv
;
99 enum bpf_prog_type prog_type
;
101 __u8 data
[TEST_DATA_LEN
];
102 void (*fill_helper
)(struct bpf_test
*self
);
105 uint32_t retval
, retval_unpriv
;
107 __u8 data
[TEST_DATA_LEN
];
108 __u64 data64
[TEST_DATA_LEN
/ 8];
110 } retvals
[MAX_TEST_RUNS
];
113 /* Note we want this to be 64 bit aligned so that the end of our array is
114 * actually the end of the structure.
116 #define MAX_ENTRIES 11
120 int foo
[MAX_ENTRIES
];
128 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test
*self
)
130 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
132 /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
133 unsigned int len
= (1 << 15) - PUSH_CNT
* 2 * 5 * 6;
134 struct bpf_insn
*insn
= self
->fill_insns
;
137 insn
[i
++] = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
139 for (j
= 0; j
< PUSH_CNT
; j
++) {
140 insn
[i
++] = BPF_LD_ABS(BPF_B
, 0);
141 insn
[i
] = BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0x34, len
- i
- 2);
143 insn
[i
++] = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
);
144 insn
[i
++] = BPF_MOV64_IMM(BPF_REG_2
, 1);
145 insn
[i
++] = BPF_MOV64_IMM(BPF_REG_3
, 2);
146 insn
[i
++] = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
147 BPF_FUNC_skb_vlan_push
),
148 insn
[i
] = BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, len
- i
- 2);
152 for (j
= 0; j
< PUSH_CNT
; j
++) {
153 insn
[i
++] = BPF_LD_ABS(BPF_B
, 0);
154 insn
[i
] = BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0x34, len
- i
- 2);
156 insn
[i
++] = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
);
157 insn
[i
++] = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
158 BPF_FUNC_skb_vlan_pop
),
159 insn
[i
] = BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, len
- i
- 2);
165 for (; i
< len
- 1; i
++)
166 insn
[i
] = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_0
, 0xbef);
167 insn
[len
- 1] = BPF_EXIT_INSN();
168 self
->prog_len
= len
;
171 static void bpf_fill_jump_around_ld_abs(struct bpf_test
*self
)
173 struct bpf_insn
*insn
= self
->fill_insns
;
174 /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns */
175 unsigned int len
= (1 << 15) / 6;
178 insn
[i
++] = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
179 insn
[i
++] = BPF_LD_ABS(BPF_B
, 0);
180 insn
[i
] = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 10, len
- i
- 2);
183 insn
[i
++] = BPF_LD_ABS(BPF_B
, 1);
184 insn
[i
] = BPF_EXIT_INSN();
185 self
->prog_len
= i
+ 1;
188 static void bpf_fill_rand_ld_dw(struct bpf_test
*self
)
190 struct bpf_insn
*insn
= self
->fill_insns
;
194 insn
[i
++] = BPF_MOV32_IMM(BPF_REG_0
, 0);
195 while (i
< self
->retval
) {
196 uint64_t val
= bpf_semi_rand_get();
197 struct bpf_insn tmp
[2] = { BPF_LD_IMM64(BPF_REG_1
, val
) };
202 insn
[i
++] = BPF_ALU64_REG(BPF_XOR
, BPF_REG_0
, BPF_REG_1
);
204 insn
[i
++] = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
);
205 insn
[i
++] = BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 32);
206 insn
[i
++] = BPF_ALU64_REG(BPF_XOR
, BPF_REG_0
, BPF_REG_1
);
207 insn
[i
] = BPF_EXIT_INSN();
208 self
->prog_len
= i
+ 1;
210 self
->retval
= (uint32_t)res
;
213 /* test the sequence of 1k jumps */
214 static void bpf_fill_scale1(struct bpf_test
*self
)
216 struct bpf_insn
*insn
= self
->fill_insns
;
219 insn
[i
++] = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
220 /* test to check that the sequence of 1024 jumps is acceptable */
222 insn
[i
++] = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
223 BPF_FUNC_get_prandom_u32
);
224 insn
[i
++] = BPF_JMP_IMM(BPF_JGT
, BPF_REG_0
, bpf_semi_rand_get(), 2);
225 insn
[i
++] = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
);
226 insn
[i
++] = BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
,
229 /* every jump adds 1024 steps to insn_processed, so to stay exactly
230 * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT
232 while (i
< MAX_TEST_INSNS
- 1025)
233 insn
[i
++] = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_0
, 42);
234 insn
[i
] = BPF_EXIT_INSN();
235 self
->prog_len
= i
+ 1;
239 /* test the sequence of 1k jumps in inner most function (function depth 8)*/
240 static void bpf_fill_scale2(struct bpf_test
*self
)
242 struct bpf_insn
*insn
= self
->fill_insns
;
246 for (k
= 0; k
< FUNC_NEST
; k
++) {
247 insn
[i
++] = BPF_CALL_REL(1);
248 insn
[i
++] = BPF_EXIT_INSN();
250 insn
[i
++] = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
251 /* test to check that the sequence of 1024 jumps is acceptable */
253 insn
[i
++] = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
254 BPF_FUNC_get_prandom_u32
);
255 insn
[i
++] = BPF_JMP_IMM(BPF_JGT
, BPF_REG_0
, bpf_semi_rand_get(), 2);
256 insn
[i
++] = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
);
257 insn
[i
++] = BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
,
258 -8 * (k
% (64 - 4 * FUNC_NEST
) + 1));
260 /* every jump adds 1024 steps to insn_processed, so to stay exactly
261 * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT
263 while (i
< MAX_TEST_INSNS
- 1025)
264 insn
[i
++] = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_0
, 42);
265 insn
[i
] = BPF_EXIT_INSN();
266 self
->prog_len
= i
+ 1;
270 static void bpf_fill_scale(struct bpf_test
*self
)
272 switch (self
->retval
) {
274 return bpf_fill_scale1(self
);
276 return bpf_fill_scale2(self
);
283 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
284 #define BPF_SK_LOOKUP(func) \
285 /* struct bpf_sock_tuple tuple = {} */ \
286 BPF_MOV64_IMM(BPF_REG_2, 0), \
287 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
288 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
289 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
290 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
291 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
292 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
293 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
294 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
296 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
297 BPF_MOV64_IMM(BPF_REG_4, 0), \
298 BPF_MOV64_IMM(BPF_REG_5, 0), \
299 BPF_EMIT_CALL(BPF_FUNC_ ## func)
301 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
302 * value into 0 and does necessary preparation for direct packet access
303 * through r2. The allowed access range is 8 bytes.
305 #define BPF_DIRECT_PKT_R2 \
306 BPF_MOV64_IMM(BPF_REG_0, 0), \
307 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
308 offsetof(struct __sk_buff, data)), \
309 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
310 offsetof(struct __sk_buff, data_end)), \
311 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
313 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
316 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
317 * positive u32, and zero-extend it into 64-bit.
319 #define BPF_RAND_UEXT_R7 \
320 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
321 BPF_FUNC_get_prandom_u32), \
322 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
323 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
324 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
326 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
327 * negative u32, and sign-extend it into 64-bit.
329 #define BPF_RAND_SEXT_R7 \
330 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
331 BPF_FUNC_get_prandom_u32), \
332 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
333 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
334 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
335 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
337 static struct bpf_test tests
[] = {
339 #include <verifier/tests.h>
343 static int probe_filter_length(const struct bpf_insn
*fp
)
347 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
348 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
353 static bool skip_unsupported_map(enum bpf_map_type map_type
)
355 if (!bpf_probe_map_type(map_type
, 0)) {
356 printf("SKIP (unsupported map type %d)\n", map_type
);
363 static int __create_map(uint32_t type
, uint32_t size_key
,
364 uint32_t size_value
, uint32_t max_elem
,
365 uint32_t extra_flags
)
369 fd
= bpf_create_map(type
, size_key
, size_value
, max_elem
,
370 (type
== BPF_MAP_TYPE_HASH
?
371 BPF_F_NO_PREALLOC
: 0) | extra_flags
);
373 if (skip_unsupported_map(type
))
375 printf("Failed to create hash map '%s'!\n", strerror(errno
));
381 static int create_map(uint32_t type
, uint32_t size_key
,
382 uint32_t size_value
, uint32_t max_elem
)
384 return __create_map(type
, size_key
, size_value
, max_elem
, 0);
387 static void update_map(int fd
, int index
)
389 struct test_val value
= {
390 .index
= (6 + 1) * sizeof(int),
391 .foo
[6] = 0xabcdef12,
394 assert(!bpf_map_update_elem(fd
, &index
, &value
, 0));
397 static int create_prog_dummy1(enum bpf_prog_type prog_type
)
399 struct bpf_insn prog
[] = {
400 BPF_MOV64_IMM(BPF_REG_0
, 42),
404 return bpf_load_program(prog_type
, prog
,
405 ARRAY_SIZE(prog
), "GPL", 0, NULL
, 0);
408 static int create_prog_dummy2(enum bpf_prog_type prog_type
, int mfd
, int idx
)
410 struct bpf_insn prog
[] = {
411 BPF_MOV64_IMM(BPF_REG_3
, idx
),
412 BPF_LD_MAP_FD(BPF_REG_2
, mfd
),
413 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
415 BPF_MOV64_IMM(BPF_REG_0
, 41),
419 return bpf_load_program(prog_type
, prog
,
420 ARRAY_SIZE(prog
), "GPL", 0, NULL
, 0);
423 static int create_prog_array(enum bpf_prog_type prog_type
, uint32_t max_elem
,
429 mfd
= bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY
, sizeof(int),
430 sizeof(int), max_elem
, 0);
432 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY
))
434 printf("Failed to create prog array '%s'!\n", strerror(errno
));
438 p1fd
= create_prog_dummy1(prog_type
);
439 p2fd
= create_prog_dummy2(prog_type
, mfd
, p2key
);
440 if (p1fd
< 0 || p2fd
< 0)
442 if (bpf_map_update_elem(mfd
, &p1key
, &p1fd
, BPF_ANY
) < 0)
444 if (bpf_map_update_elem(mfd
, &p2key
, &p2fd
, BPF_ANY
) < 0)
457 static int create_map_in_map(void)
459 int inner_map_fd
, outer_map_fd
;
461 inner_map_fd
= bpf_create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
463 if (inner_map_fd
< 0) {
464 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY
))
466 printf("Failed to create array '%s'!\n", strerror(errno
));
470 outer_map_fd
= bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS
, NULL
,
471 sizeof(int), inner_map_fd
, 1, 0);
472 if (outer_map_fd
< 0) {
473 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS
))
475 printf("Failed to create array of maps '%s'!\n",
484 static int create_cgroup_storage(bool percpu
)
486 enum bpf_map_type type
= percpu
? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
:
487 BPF_MAP_TYPE_CGROUP_STORAGE
;
490 fd
= bpf_create_map(type
, sizeof(struct bpf_cgroup_storage_key
),
491 TEST_DATA_LEN
, 0, 0);
493 if (skip_unsupported_map(type
))
495 printf("Failed to create cgroup storage '%s'!\n",
502 /* struct bpf_spin_lock {
507 * struct bpf_spin_lock l;
510 static const char btf_str_sec
[] = "\0bpf_spin_lock\0val\0cnt\0l";
511 static __u32 btf_raw_types
[] = {
513 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED
, 0, 32, 4), /* [1] */
514 /* struct bpf_spin_lock */ /* [2] */
515 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT
, 0, 1), 4),
516 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
517 /* struct val */ /* [3] */
518 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT
, 0, 2), 8),
519 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
520 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
523 static int load_btf(void)
525 struct btf_header hdr
= {
527 .version
= BTF_VERSION
,
528 .hdr_len
= sizeof(struct btf_header
),
529 .type_len
= sizeof(btf_raw_types
),
530 .str_off
= sizeof(btf_raw_types
),
531 .str_len
= sizeof(btf_str_sec
),
536 ptr
= raw_btf
= malloc(sizeof(hdr
) + sizeof(btf_raw_types
) +
537 sizeof(btf_str_sec
));
539 memcpy(ptr
, &hdr
, sizeof(hdr
));
541 memcpy(ptr
, btf_raw_types
, hdr
.type_len
);
543 memcpy(ptr
, btf_str_sec
, hdr
.str_len
);
546 btf_fd
= bpf_load_btf(raw_btf
, ptr
- raw_btf
, 0, 0, 0);
553 static int create_map_spin_lock(void)
555 struct bpf_create_map_attr attr
= {
557 .map_type
= BPF_MAP_TYPE_ARRAY
,
561 .btf_key_type_id
= 1,
562 .btf_value_type_id
= 3,
569 attr
.btf_fd
= btf_fd
;
570 fd
= bpf_create_map_xattr(&attr
);
572 printf("Failed to create map with spin_lock\n");
576 static int create_sk_storage_map(void)
578 struct bpf_create_map_attr attr
= {
580 .map_type
= BPF_MAP_TYPE_SK_STORAGE
,
584 .map_flags
= BPF_F_NO_PREALLOC
,
585 .btf_key_type_id
= 1,
586 .btf_value_type_id
= 3,
593 attr
.btf_fd
= btf_fd
;
594 fd
= bpf_create_map_xattr(&attr
);
597 printf("Failed to create sk_storage_map\n");
601 static char bpf_vlog
[UINT_MAX
>> 8];
603 static void do_test_fixup(struct bpf_test
*test
, enum bpf_prog_type prog_type
,
604 struct bpf_insn
*prog
, int *map_fds
)
606 int *fixup_map_hash_8b
= test
->fixup_map_hash_8b
;
607 int *fixup_map_hash_48b
= test
->fixup_map_hash_48b
;
608 int *fixup_map_hash_16b
= test
->fixup_map_hash_16b
;
609 int *fixup_map_array_48b
= test
->fixup_map_array_48b
;
610 int *fixup_map_sockmap
= test
->fixup_map_sockmap
;
611 int *fixup_map_sockhash
= test
->fixup_map_sockhash
;
612 int *fixup_map_xskmap
= test
->fixup_map_xskmap
;
613 int *fixup_map_stacktrace
= test
->fixup_map_stacktrace
;
614 int *fixup_prog1
= test
->fixup_prog1
;
615 int *fixup_prog2
= test
->fixup_prog2
;
616 int *fixup_map_in_map
= test
->fixup_map_in_map
;
617 int *fixup_cgroup_storage
= test
->fixup_cgroup_storage
;
618 int *fixup_percpu_cgroup_storage
= test
->fixup_percpu_cgroup_storage
;
619 int *fixup_map_spin_lock
= test
->fixup_map_spin_lock
;
620 int *fixup_map_array_ro
= test
->fixup_map_array_ro
;
621 int *fixup_map_array_wo
= test
->fixup_map_array_wo
;
622 int *fixup_map_array_small
= test
->fixup_map_array_small
;
623 int *fixup_sk_storage_map
= test
->fixup_sk_storage_map
;
625 if (test
->fill_helper
) {
626 test
->fill_insns
= calloc(MAX_TEST_INSNS
, sizeof(struct bpf_insn
));
627 test
->fill_helper(test
);
630 /* Allocating HTs with 1 elem is fine here, since we only test
631 * for verifier and not do a runtime lookup, so the only thing
632 * that really matters is value size in this case.
634 if (*fixup_map_hash_8b
) {
635 map_fds
[0] = create_map(BPF_MAP_TYPE_HASH
, sizeof(long long),
636 sizeof(long long), 1);
638 prog
[*fixup_map_hash_8b
].imm
= map_fds
[0];
640 } while (*fixup_map_hash_8b
);
643 if (*fixup_map_hash_48b
) {
644 map_fds
[1] = create_map(BPF_MAP_TYPE_HASH
, sizeof(long long),
645 sizeof(struct test_val
), 1);
647 prog
[*fixup_map_hash_48b
].imm
= map_fds
[1];
648 fixup_map_hash_48b
++;
649 } while (*fixup_map_hash_48b
);
652 if (*fixup_map_hash_16b
) {
653 map_fds
[2] = create_map(BPF_MAP_TYPE_HASH
, sizeof(long long),
654 sizeof(struct other_val
), 1);
656 prog
[*fixup_map_hash_16b
].imm
= map_fds
[2];
657 fixup_map_hash_16b
++;
658 } while (*fixup_map_hash_16b
);
661 if (*fixup_map_array_48b
) {
662 map_fds
[3] = create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
663 sizeof(struct test_val
), 1);
664 update_map(map_fds
[3], 0);
666 prog
[*fixup_map_array_48b
].imm
= map_fds
[3];
667 fixup_map_array_48b
++;
668 } while (*fixup_map_array_48b
);
672 map_fds
[4] = create_prog_array(prog_type
, 4, 0);
674 prog
[*fixup_prog1
].imm
= map_fds
[4];
676 } while (*fixup_prog1
);
680 map_fds
[5] = create_prog_array(prog_type
, 8, 7);
682 prog
[*fixup_prog2
].imm
= map_fds
[5];
684 } while (*fixup_prog2
);
687 if (*fixup_map_in_map
) {
688 map_fds
[6] = create_map_in_map();
690 prog
[*fixup_map_in_map
].imm
= map_fds
[6];
692 } while (*fixup_map_in_map
);
695 if (*fixup_cgroup_storage
) {
696 map_fds
[7] = create_cgroup_storage(false);
698 prog
[*fixup_cgroup_storage
].imm
= map_fds
[7];
699 fixup_cgroup_storage
++;
700 } while (*fixup_cgroup_storage
);
703 if (*fixup_percpu_cgroup_storage
) {
704 map_fds
[8] = create_cgroup_storage(true);
706 prog
[*fixup_percpu_cgroup_storage
].imm
= map_fds
[8];
707 fixup_percpu_cgroup_storage
++;
708 } while (*fixup_percpu_cgroup_storage
);
710 if (*fixup_map_sockmap
) {
711 map_fds
[9] = create_map(BPF_MAP_TYPE_SOCKMAP
, sizeof(int),
714 prog
[*fixup_map_sockmap
].imm
= map_fds
[9];
716 } while (*fixup_map_sockmap
);
718 if (*fixup_map_sockhash
) {
719 map_fds
[10] = create_map(BPF_MAP_TYPE_SOCKHASH
, sizeof(int),
722 prog
[*fixup_map_sockhash
].imm
= map_fds
[10];
723 fixup_map_sockhash
++;
724 } while (*fixup_map_sockhash
);
726 if (*fixup_map_xskmap
) {
727 map_fds
[11] = create_map(BPF_MAP_TYPE_XSKMAP
, sizeof(int),
730 prog
[*fixup_map_xskmap
].imm
= map_fds
[11];
732 } while (*fixup_map_xskmap
);
734 if (*fixup_map_stacktrace
) {
735 map_fds
[12] = create_map(BPF_MAP_TYPE_STACK_TRACE
, sizeof(u32
),
738 prog
[*fixup_map_stacktrace
].imm
= map_fds
[12];
739 fixup_map_stacktrace
++;
740 } while (*fixup_map_stacktrace
);
742 if (*fixup_map_spin_lock
) {
743 map_fds
[13] = create_map_spin_lock();
745 prog
[*fixup_map_spin_lock
].imm
= map_fds
[13];
746 fixup_map_spin_lock
++;
747 } while (*fixup_map_spin_lock
);
749 if (*fixup_map_array_ro
) {
750 map_fds
[14] = __create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
751 sizeof(struct test_val
), 1,
753 update_map(map_fds
[14], 0);
755 prog
[*fixup_map_array_ro
].imm
= map_fds
[14];
756 fixup_map_array_ro
++;
757 } while (*fixup_map_array_ro
);
759 if (*fixup_map_array_wo
) {
760 map_fds
[15] = __create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
761 sizeof(struct test_val
), 1,
763 update_map(map_fds
[15], 0);
765 prog
[*fixup_map_array_wo
].imm
= map_fds
[15];
766 fixup_map_array_wo
++;
767 } while (*fixup_map_array_wo
);
769 if (*fixup_map_array_small
) {
770 map_fds
[16] = __create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
772 update_map(map_fds
[16], 0);
774 prog
[*fixup_map_array_small
].imm
= map_fds
[16];
775 fixup_map_array_small
++;
776 } while (*fixup_map_array_small
);
778 if (*fixup_sk_storage_map
) {
779 map_fds
[17] = create_sk_storage_map();
781 prog
[*fixup_sk_storage_map
].imm
= map_fds
[17];
782 fixup_sk_storage_map
++;
783 } while (*fixup_sk_storage_map
);
787 static int set_admin(bool admin
)
790 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
793 caps
= cap_get_proc();
795 perror("cap_get_proc");
798 if (cap_set_flag(caps
, CAP_EFFECTIVE
, 1, &cap_val
,
799 admin
? CAP_SET
: CAP_CLEAR
)) {
800 perror("cap_set_flag");
803 if (cap_set_proc(caps
)) {
804 perror("cap_set_proc");
814 static int do_prog_test_run(int fd_prog
, bool unpriv
, uint32_t expected_val
,
815 void *data
, size_t size_data
)
817 __u8 tmp
[TEST_DATA_LEN
<< 2];
818 __u32 size_tmp
= sizeof(tmp
);
824 err
= bpf_prog_test_run(fd_prog
, 1, data
, size_data
,
825 tmp
, &size_tmp
, &retval
, NULL
);
828 if (err
&& errno
!= 524/*ENOTSUPP*/ && errno
!= EPERM
) {
829 printf("Unexpected bpf_prog_test_run error ");
832 if (!err
&& retval
!= expected_val
&&
833 expected_val
!= POINTER_VALUE
) {
834 printf("FAIL retval %d != %d ", retval
, expected_val
);
841 static void do_test_single(struct bpf_test
*test
, bool unpriv
,
842 int *passes
, int *errors
)
844 int fd_prog
, expected_ret
, alignment_prevented_execution
;
845 int prog_len
, prog_type
= test
->prog_type
;
846 struct bpf_insn
*prog
= test
->insns
;
847 int run_errs
, run_successes
;
848 int map_fds
[MAX_NR_MAPS
];
849 const char *expected_err
;
854 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
858 prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
;
860 do_test_fixup(test
, prog_type
, prog
, map_fds
);
861 if (test
->fill_insns
) {
862 prog
= test
->fill_insns
;
863 prog_len
= test
->prog_len
;
865 prog_len
= probe_filter_length(prog
);
867 /* If there were some map skips during fixup due to missing bpf
868 * features, skip this test.
870 if (fixup_skips
!= skips
)
874 if (test
->flags
& F_LOAD_WITH_STRICT_ALIGNMENT
)
875 pflags
|= BPF_F_STRICT_ALIGNMENT
;
876 if (test
->flags
& F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
)
877 pflags
|= BPF_F_ANY_ALIGNMENT
;
878 fd_prog
= bpf_verify_program(prog_type
, prog
, prog_len
, pflags
,
879 "GPL", 0, bpf_vlog
, sizeof(bpf_vlog
), 4);
880 if (fd_prog
< 0 && !bpf_probe_prog_type(prog_type
, 0)) {
881 printf("SKIP (unsupported program type %d)\n", prog_type
);
886 expected_ret
= unpriv
&& test
->result_unpriv
!= UNDEF
?
887 test
->result_unpriv
: test
->result
;
888 expected_err
= unpriv
&& test
->errstr_unpriv
?
889 test
->errstr_unpriv
: test
->errstr
;
891 alignment_prevented_execution
= 0;
893 if (expected_ret
== ACCEPT
) {
895 printf("FAIL\nFailed to load prog '%s'!\n",
899 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
901 (test
->flags
& F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
))
902 alignment_prevented_execution
= 1;
906 printf("FAIL\nUnexpected success to load!\n");
909 if (!strstr(bpf_vlog
, expected_err
)) {
910 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
911 expected_err
, bpf_vlog
);
916 if (test
->insn_processed
) {
917 uint32_t insn_processed
;
920 proc
= strstr(bpf_vlog
, "processed ");
921 insn_processed
= atoi(proc
+ 10);
922 if (test
->insn_processed
!= insn_processed
) {
923 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
924 insn_processed
, test
->insn_processed
);
931 if (!alignment_prevented_execution
&& fd_prog
>= 0) {
932 uint32_t expected_val
;
936 expected_val
= unpriv
&& test
->retval_unpriv
?
937 test
->retval_unpriv
: test
->retval
;
939 err
= do_prog_test_run(fd_prog
, unpriv
, expected_val
,
940 test
->data
, sizeof(test
->data
));
947 for (i
= 0; i
< test
->runs
; i
++) {
948 if (unpriv
&& test
->retvals
[i
].retval_unpriv
)
949 expected_val
= test
->retvals
[i
].retval_unpriv
;
951 expected_val
= test
->retvals
[i
].retval
;
953 err
= do_prog_test_run(fd_prog
, unpriv
, expected_val
,
954 test
->retvals
[i
].data
,
955 sizeof(test
->retvals
[i
].data
));
957 printf("(run %d/%d) ", i
+ 1, test
->runs
);
967 if (run_successes
> 1)
968 printf("%d cases ", run_successes
);
970 if (alignment_prevented_execution
)
971 printf(" (NOTE: not executed due to unknown alignment)");
978 if (test
->fill_insns
)
979 free(test
->fill_insns
);
981 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
987 printf("%s", bpf_vlog
);
991 static bool is_admin(void)
994 cap_flag_value_t sysadmin
= CAP_CLEAR
;
995 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
997 #ifdef CAP_IS_SUPPORTED
998 if (!CAP_IS_SUPPORTED(CAP_SETFCAP
)) {
999 perror("cap_get_flag");
1003 caps
= cap_get_proc();
1005 perror("cap_get_proc");
1008 if (cap_get_flag(caps
, cap_val
, CAP_EFFECTIVE
, &sysadmin
))
1009 perror("cap_get_flag");
1012 return (sysadmin
== CAP_SET
);
1015 static void get_unpriv_disabled()
1020 fd
= fopen("/proc/sys/"UNPRIV_SYSCTL
, "r");
1022 perror("fopen /proc/sys/"UNPRIV_SYSCTL
);
1023 unpriv_disabled
= true;
1026 if (fgets(buf
, 2, fd
) == buf
&& atoi(buf
))
1027 unpriv_disabled
= true;
1031 static bool test_as_unpriv(struct bpf_test
*test
)
1033 return !test
->prog_type
||
1034 test
->prog_type
== BPF_PROG_TYPE_SOCKET_FILTER
||
1035 test
->prog_type
== BPF_PROG_TYPE_CGROUP_SKB
;
1038 static int do_test(bool unpriv
, unsigned int from
, unsigned int to
)
1040 int i
, passes
= 0, errors
= 0;
1042 for (i
= from
; i
< to
; i
++) {
1043 struct bpf_test
*test
= &tests
[i
];
1045 /* Program types that are not supported by non-root we
1048 if (test_as_unpriv(test
) && unpriv_disabled
) {
1049 printf("#%d/u %s SKIP\n", i
, test
->descr
);
1051 } else if (test_as_unpriv(test
)) {
1054 printf("#%d/u %s ", i
, test
->descr
);
1055 do_test_single(test
, true, &passes
, &errors
);
1061 printf("#%d/p %s SKIP\n", i
, test
->descr
);
1064 printf("#%d/p %s ", i
, test
->descr
);
1065 do_test_single(test
, false, &passes
, &errors
);
1069 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes
,
1071 return errors
? EXIT_FAILURE
: EXIT_SUCCESS
;
1074 int main(int argc
, char **argv
)
1076 unsigned int from
= 0, to
= ARRAY_SIZE(tests
);
1077 bool unpriv
= !is_admin();
1080 unsigned int l
= atoi(argv
[argc
- 2]);
1081 unsigned int u
= atoi(argv
[argc
- 1]);
1083 if (l
< to
&& u
< to
) {
1087 } else if (argc
== 2) {
1088 unsigned int t
= atoi(argv
[argc
- 1]);
1096 get_unpriv_disabled();
1097 if (unpriv
&& unpriv_disabled
) {
1098 printf("Cannot run as unprivileged user with sysctl %s.\n",
1100 return EXIT_FAILURE
;
1103 bpf_semi_rand_init();
1104 return do_test(unpriv
, from
, to
);