2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <linux/bpf.h>
14 #include <linux/unistd.h>
16 #include <linux/filter.h>
20 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
24 struct bpf_insn insns
[MAX_INSNS
];
33 static struct bpf_test tests
[] = {
37 BPF_MOV64_IMM(BPF_REG_1
, 1),
38 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 2),
39 BPF_MOV64_IMM(BPF_REG_2
, 3),
40 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_2
),
41 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -1),
42 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 3),
43 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
54 .errstr
= "unreachable",
60 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
61 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
64 .errstr
= "unreachable",
70 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
73 .errstr
= "jump out of range",
79 BPF_JMP_IMM(BPF_JA
, 0, 0, -2),
82 .errstr
= "jump out of range",
88 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
89 BPF_LD_IMM64(BPF_REG_0
, 0),
90 BPF_LD_IMM64(BPF_REG_0
, 0),
91 BPF_LD_IMM64(BPF_REG_0
, 1),
92 BPF_LD_IMM64(BPF_REG_0
, 1),
93 BPF_MOV64_IMM(BPF_REG_0
, 2),
96 .errstr
= "invalid BPF_LD_IMM insn",
102 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
103 BPF_LD_IMM64(BPF_REG_0
, 0),
104 BPF_LD_IMM64(BPF_REG_0
, 0),
105 BPF_LD_IMM64(BPF_REG_0
, 1),
106 BPF_LD_IMM64(BPF_REG_0
, 1),
109 .errstr
= "invalid BPF_LD_IMM insn",
115 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
116 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
117 BPF_LD_IMM64(BPF_REG_0
, 0),
118 BPF_LD_IMM64(BPF_REG_0
, 0),
119 BPF_LD_IMM64(BPF_REG_0
, 1),
120 BPF_LD_IMM64(BPF_REG_0
, 1),
123 .errstr
= "invalid bpf_ld_imm64 insn",
129 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
132 .errstr
= "invalid bpf_ld_imm64 insn",
138 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
140 .errstr
= "invalid bpf_ld_imm64 insn",
146 BPF_ALU64_REG(BPF_MOV
, BPF_REG_0
, BPF_REG_2
),
148 .errstr
= "jump out of range",
154 BPF_JMP_IMM(BPF_JA
, 0, 0, -1),
157 .errstr
= "back-edge",
163 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
164 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
165 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
166 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
169 .errstr
= "back-edge",
175 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
176 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
177 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
178 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, -3),
181 .errstr
= "back-edge",
185 "read uninitialized register",
187 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
190 .errstr
= "R2 !read_ok",
194 "read invalid register",
196 BPF_MOV64_REG(BPF_REG_0
, -1),
199 .errstr
= "R15 is invalid",
203 "program doesn't init R0 before exit",
205 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_1
),
208 .errstr
= "R0 !read_ok",
212 "program doesn't init R0 before exit in all branches",
214 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
215 BPF_MOV64_IMM(BPF_REG_0
, 1),
216 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
219 .errstr
= "R0 !read_ok",
223 "stack out of bounds",
225 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, 8, 0),
228 .errstr
= "invalid stack",
232 "invalid call insn1",
234 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
| BPF_X
, 0, 0, 0, 0),
237 .errstr
= "BPF_CALL uses reserved",
241 "invalid call insn2",
243 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 1, 0),
246 .errstr
= "BPF_CALL uses reserved",
250 "invalid function call",
252 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, 1234567),
255 .errstr
= "invalid func 1234567",
259 "uninitialized stack1",
261 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
262 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
263 BPF_LD_MAP_FD(BPF_REG_1
, 0),
264 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
268 .errstr
= "invalid indirect read from stack",
272 "uninitialized stack2",
274 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
275 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -8),
278 .errstr
= "invalid read from stack",
282 "check valid spill/fill",
284 /* spill R1(ctx) into stack */
285 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
287 /* fill it back into R2 */
288 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
290 /* should be able to access R0 = *(R2 + 8) */
291 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
292 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
298 "check corrupted spill/fill",
300 /* spill R1(ctx) into stack */
301 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
303 /* mess up with R1 pointer on stack */
304 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -7, 0x23),
306 /* fill back into R0 should fail */
307 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
311 .errstr
= "corrupted spill",
315 "invalid src register in STX",
317 BPF_STX_MEM(BPF_B
, BPF_REG_10
, -1, -1),
320 .errstr
= "R15 is invalid",
324 "invalid dst register in STX",
326 BPF_STX_MEM(BPF_B
, 14, BPF_REG_10
, -1),
329 .errstr
= "R14 is invalid",
333 "invalid dst register in ST",
335 BPF_ST_MEM(BPF_B
, 14, -1, -1),
338 .errstr
= "R14 is invalid",
342 "invalid src register in LDX",
344 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, 12, 0),
347 .errstr
= "R12 is invalid",
351 "invalid dst register in LDX",
353 BPF_LDX_MEM(BPF_B
, 11, BPF_REG_1
, 0),
356 .errstr
= "R11 is invalid",
362 BPF_RAW_INSN(0, 0, 0, 0, 0),
365 .errstr
= "invalid BPF_LD_IMM",
371 BPF_RAW_INSN(1, 0, 0, 0, 0),
374 .errstr
= "BPF_LDX uses reserved fields",
380 BPF_RAW_INSN(-1, 0, 0, 0, 0),
383 .errstr
= "invalid BPF_ALU opcode f0",
389 BPF_RAW_INSN(-1, -1, -1, -1, -1),
392 .errstr
= "invalid BPF_ALU opcode f0",
398 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
401 .errstr
= "BPF_ALU uses reserved fields",
405 "misaligned read from stack",
407 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
408 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -4),
411 .errstr
= "misaligned access",
415 "invalid map_fd for function call",
417 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
418 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_10
),
419 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
420 BPF_LD_MAP_FD(BPF_REG_1
, 0),
421 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_delete_elem
),
424 .errstr
= "fd 0 is not pointing to valid bpf_map",
428 "don't check return value before access",
430 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
431 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
432 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
433 BPF_LD_MAP_FD(BPF_REG_1
, 0),
434 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
435 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
439 .errstr
= "R0 invalid mem access 'map_value_or_null'",
443 "access memory with incorrect alignment",
445 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
446 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
447 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
448 BPF_LD_MAP_FD(BPF_REG_1
, 0),
449 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
450 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
451 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 4, 0),
455 .errstr
= "misaligned access",
459 "sometimes access memory with incorrect alignment",
461 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
462 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
463 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
464 BPF_LD_MAP_FD(BPF_REG_1
, 0),
465 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
466 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
467 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
469 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 1),
473 .errstr
= "R0 invalid mem access",
479 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
480 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -8),
481 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
482 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
483 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 1),
484 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 1),
485 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 1),
486 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 2),
487 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 1),
488 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 3),
489 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 1),
490 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 4),
491 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
492 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 5),
493 BPF_MOV64_IMM(BPF_REG_0
, 0),
501 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
502 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 2),
503 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
504 BPF_JMP_IMM(BPF_JA
, 0, 0, 14),
505 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 2),
506 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
507 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
508 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 2),
509 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
510 BPF_JMP_IMM(BPF_JA
, 0, 0, 8),
511 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 2),
512 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
513 BPF_JMP_IMM(BPF_JA
, 0, 0, 5),
514 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 2),
515 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
516 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
517 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
518 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
519 BPF_MOV64_IMM(BPF_REG_0
, 0),
527 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
528 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
529 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
530 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
531 BPF_JMP_IMM(BPF_JA
, 0, 0, 19),
532 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 3),
533 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
534 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
535 BPF_JMP_IMM(BPF_JA
, 0, 0, 15),
536 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 3),
537 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
538 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -32),
539 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
540 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 3),
541 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
542 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -40),
543 BPF_JMP_IMM(BPF_JA
, 0, 0, 7),
544 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 3),
545 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
546 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -48),
547 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
548 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 0),
549 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
550 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -56),
551 BPF_LD_MAP_FD(BPF_REG_1
, 0),
552 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_delete_elem
),
561 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
562 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
563 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
564 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
565 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
566 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
567 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
568 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
569 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
570 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
571 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
572 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
573 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
574 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
575 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
576 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
577 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
578 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
579 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
580 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
581 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
582 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
583 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
584 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
585 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
586 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
587 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
588 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
589 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
590 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
591 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
592 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
593 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
594 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
595 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
596 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
597 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
598 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
599 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
600 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
601 BPF_MOV64_IMM(BPF_REG_0
, 0),
609 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
610 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
611 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
612 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
613 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
614 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
615 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
616 BPF_MOV64_IMM(BPF_REG_0
, 0),
617 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
618 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
619 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
620 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
621 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
622 BPF_MOV64_IMM(BPF_REG_0
, 0),
623 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
624 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
625 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
626 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
627 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
628 BPF_MOV64_IMM(BPF_REG_0
, 0),
629 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
630 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
631 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
632 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
633 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
634 BPF_MOV64_IMM(BPF_REG_0
, 0),
635 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
636 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
637 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
638 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
639 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
640 BPF_MOV64_IMM(BPF_REG_0
, 0),
647 static int probe_filter_length(struct bpf_insn
*fp
)
651 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
652 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
658 static int create_map(void)
660 long long key
, value
= 0;
663 map_fd
= bpf_create_map(BPF_MAP_TYPE_HASH
, sizeof(key
), sizeof(value
), 1024);
665 printf("failed to create map '%s'\n", strerror(errno
));
671 static int test(void)
673 int prog_fd
, i
, pass_cnt
= 0, err_cnt
= 0;
675 for (i
= 0; i
< ARRAY_SIZE(tests
); i
++) {
676 struct bpf_insn
*prog
= tests
[i
].insns
;
677 int prog_len
= probe_filter_length(prog
);
678 int *fixup
= tests
[i
].fixup
;
682 map_fd
= create_map();
685 prog
[*fixup
].imm
= map_fd
;
689 printf("#%d %s ", i
, tests
[i
].descr
);
691 prog_fd
= bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER
, prog
,
692 prog_len
* sizeof(struct bpf_insn
),
695 if (tests
[i
].result
== ACCEPT
) {
697 printf("FAIL\nfailed to load prog '%s'\n",
699 printf("%s", bpf_log_buf
);
705 printf("FAIL\nunexpected success to load\n");
706 printf("%s", bpf_log_buf
);
710 if (strstr(bpf_log_buf
, tests
[i
].errstr
) == 0) {
711 printf("FAIL\nunexpected error message: %s",
726 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt
, err_cnt
);