2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <asm/types.h>
13 #include <linux/types.h>
26 #include <sys/capability.h>
27 #include <sys/resource.h>
29 #include <linux/unistd.h>
30 #include <linux/filter.h>
31 #include <linux/bpf_perf_event.h>
32 #include <linux/bpf.h>
37 # include "autoconf.h"
39 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
40 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
44 #include "../../../include/linux/filter.h"
47 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
54 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
55 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
59 struct bpf_insn insns
[MAX_INSNS
];
60 int fixup_map1
[MAX_FIXUPS
];
61 int fixup_map2
[MAX_FIXUPS
];
62 int fixup_prog
[MAX_FIXUPS
];
63 int fixup_map_in_map
[MAX_FIXUPS
];
65 const char *errstr_unpriv
;
70 } result
, result_unpriv
;
71 enum bpf_prog_type prog_type
;
75 /* Note we want this to be 64 bit aligned so that the end of our array is
76 * actually the end of the structure.
78 #define MAX_ENTRIES 11
85 static struct bpf_test tests
[] = {
89 BPF_MOV64_IMM(BPF_REG_1
, 1),
90 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 2),
91 BPF_MOV64_IMM(BPF_REG_2
, 3),
92 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_2
),
93 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -1),
94 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 3),
95 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
106 .errstr
= "unreachable",
112 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
113 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
116 .errstr
= "unreachable",
122 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
125 .errstr
= "jump out of range",
129 "out of range jump2",
131 BPF_JMP_IMM(BPF_JA
, 0, 0, -2),
134 .errstr
= "jump out of range",
140 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
141 BPF_LD_IMM64(BPF_REG_0
, 0),
142 BPF_LD_IMM64(BPF_REG_0
, 0),
143 BPF_LD_IMM64(BPF_REG_0
, 1),
144 BPF_LD_IMM64(BPF_REG_0
, 1),
145 BPF_MOV64_IMM(BPF_REG_0
, 2),
148 .errstr
= "invalid BPF_LD_IMM insn",
149 .errstr_unpriv
= "R1 pointer comparison",
155 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
156 BPF_LD_IMM64(BPF_REG_0
, 0),
157 BPF_LD_IMM64(BPF_REG_0
, 0),
158 BPF_LD_IMM64(BPF_REG_0
, 1),
159 BPF_LD_IMM64(BPF_REG_0
, 1),
162 .errstr
= "invalid BPF_LD_IMM insn",
163 .errstr_unpriv
= "R1 pointer comparison",
169 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
170 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
171 BPF_LD_IMM64(BPF_REG_0
, 0),
172 BPF_LD_IMM64(BPF_REG_0
, 0),
173 BPF_LD_IMM64(BPF_REG_0
, 1),
174 BPF_LD_IMM64(BPF_REG_0
, 1),
177 .errstr
= "invalid bpf_ld_imm64 insn",
183 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
186 .errstr
= "invalid bpf_ld_imm64 insn",
192 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
194 .errstr
= "invalid bpf_ld_imm64 insn",
200 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
201 BPF_RAW_INSN(0, 0, 0, 0, 0),
209 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
210 BPF_RAW_INSN(0, 0, 0, 0, 1),
218 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 1, 1),
219 BPF_RAW_INSN(0, 0, 0, 0, 1),
222 .errstr
= "uses reserved fields",
228 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
229 BPF_RAW_INSN(0, 0, 0, 1, 1),
232 .errstr
= "invalid bpf_ld_imm64 insn",
238 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
239 BPF_RAW_INSN(0, BPF_REG_1
, 0, 0, 1),
242 .errstr
= "invalid bpf_ld_imm64 insn",
248 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
249 BPF_RAW_INSN(0, 0, BPF_REG_1
, 0, 1),
252 .errstr
= "invalid bpf_ld_imm64 insn",
258 BPF_MOV64_IMM(BPF_REG_1
, 0),
259 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, BPF_REG_1
, 0, 1),
260 BPF_RAW_INSN(0, 0, 0, 0, 1),
263 .errstr
= "not pointing to valid bpf_map",
269 BPF_MOV64_IMM(BPF_REG_1
, 0),
270 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, BPF_REG_1
, 0, 1),
271 BPF_RAW_INSN(0, 0, BPF_REG_1
, 0, 1),
274 .errstr
= "invalid bpf_ld_imm64 insn",
280 BPF_MOV64_IMM(BPF_REG_0
, 1),
281 BPF_ALU32_IMM(BPF_ARSH
, BPF_REG_0
, 5),
285 .errstr
= "BPF_ARSH not supported for 32 bit ALU",
290 BPF_MOV64_IMM(BPF_REG_0
, 1),
291 BPF_MOV64_IMM(BPF_REG_1
, 5),
292 BPF_ALU32_REG(BPF_ARSH
, BPF_REG_0
, BPF_REG_1
),
296 .errstr
= "BPF_ARSH not supported for 32 bit ALU",
301 BPF_MOV64_IMM(BPF_REG_0
, 1),
302 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_0
, 5),
310 BPF_MOV64_IMM(BPF_REG_0
, 1),
311 BPF_MOV64_IMM(BPF_REG_1
, 5),
312 BPF_ALU64_REG(BPF_ARSH
, BPF_REG_0
, BPF_REG_1
),
320 BPF_ALU64_REG(BPF_MOV
, BPF_REG_0
, BPF_REG_2
),
322 .errstr
= "jump out of range",
328 BPF_JMP_IMM(BPF_JA
, 0, 0, -1),
331 .errstr
= "back-edge",
337 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
338 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
339 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
340 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
343 .errstr
= "back-edge",
349 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
350 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
351 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
352 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, -3),
355 .errstr
= "back-edge",
359 "read uninitialized register",
361 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
364 .errstr
= "R2 !read_ok",
368 "read invalid register",
370 BPF_MOV64_REG(BPF_REG_0
, -1),
373 .errstr
= "R15 is invalid",
377 "program doesn't init R0 before exit",
379 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_1
),
382 .errstr
= "R0 !read_ok",
386 "program doesn't init R0 before exit in all branches",
388 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
389 BPF_MOV64_IMM(BPF_REG_0
, 1),
390 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
393 .errstr
= "R0 !read_ok",
394 .errstr_unpriv
= "R1 pointer comparison",
398 "stack out of bounds",
400 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, 8, 0),
403 .errstr
= "invalid stack",
407 "invalid call insn1",
409 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
| BPF_X
, 0, 0, 0, 0),
412 .errstr
= "BPF_CALL uses reserved",
416 "invalid call insn2",
418 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 1, 0),
421 .errstr
= "BPF_CALL uses reserved",
425 "invalid function call",
427 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, 1234567),
430 .errstr
= "invalid func unknown#1234567",
434 "uninitialized stack1",
436 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
437 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
438 BPF_LD_MAP_FD(BPF_REG_1
, 0),
439 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
440 BPF_FUNC_map_lookup_elem
),
444 .errstr
= "invalid indirect read from stack",
448 "uninitialized stack2",
450 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
451 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -8),
454 .errstr
= "invalid read from stack",
458 "invalid fp arithmetic",
459 /* If this gets ever changed, make sure JITs can deal with it. */
461 BPF_MOV64_IMM(BPF_REG_0
, 0),
462 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
463 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 8),
464 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
467 .errstr
= "R1 subtraction from stack pointer",
471 "non-invalid fp arithmetic",
473 BPF_MOV64_IMM(BPF_REG_0
, 0),
474 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
480 "invalid argument register",
482 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
483 BPF_FUNC_get_cgroup_classid
),
484 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
485 BPF_FUNC_get_cgroup_classid
),
488 .errstr
= "R1 !read_ok",
490 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
493 "non-invalid argument register",
495 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_1
),
496 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
497 BPF_FUNC_get_cgroup_classid
),
498 BPF_ALU64_REG(BPF_MOV
, BPF_REG_1
, BPF_REG_6
),
499 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
500 BPF_FUNC_get_cgroup_classid
),
504 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
507 "check valid spill/fill",
509 /* spill R1(ctx) into stack */
510 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
511 /* fill it back into R2 */
512 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
513 /* should be able to access R0 = *(R2 + 8) */
514 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
515 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
518 .errstr_unpriv
= "R0 leaks addr",
520 .result_unpriv
= REJECT
,
523 "check valid spill/fill, skb mark",
525 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_1
),
526 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, -8),
527 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
528 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
529 offsetof(struct __sk_buff
, mark
)),
533 .result_unpriv
= ACCEPT
,
536 "check corrupted spill/fill",
538 /* spill R1(ctx) into stack */
539 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
540 /* mess up with R1 pointer on stack */
541 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -7, 0x23),
542 /* fill back into R0 should fail */
543 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
546 .errstr_unpriv
= "attempt to corrupt spilled",
547 .errstr
= "corrupted spill",
551 "invalid src register in STX",
553 BPF_STX_MEM(BPF_B
, BPF_REG_10
, -1, -1),
556 .errstr
= "R15 is invalid",
560 "invalid dst register in STX",
562 BPF_STX_MEM(BPF_B
, 14, BPF_REG_10
, -1),
565 .errstr
= "R14 is invalid",
569 "invalid dst register in ST",
571 BPF_ST_MEM(BPF_B
, 14, -1, -1),
574 .errstr
= "R14 is invalid",
578 "invalid src register in LDX",
580 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, 12, 0),
583 .errstr
= "R12 is invalid",
587 "invalid dst register in LDX",
589 BPF_LDX_MEM(BPF_B
, 11, BPF_REG_1
, 0),
592 .errstr
= "R11 is invalid",
598 BPF_RAW_INSN(0, 0, 0, 0, 0),
601 .errstr
= "invalid BPF_LD_IMM",
607 BPF_RAW_INSN(1, 0, 0, 0, 0),
610 .errstr
= "BPF_LDX uses reserved fields",
616 BPF_RAW_INSN(-1, 0, 0, 0, 0),
619 .errstr
= "invalid BPF_ALU opcode f0",
625 BPF_RAW_INSN(-1, -1, -1, -1, -1),
628 .errstr
= "invalid BPF_ALU opcode f0",
634 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
637 .errstr
= "BPF_ALU uses reserved fields",
641 "misaligned read from stack",
643 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
644 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -4),
647 .errstr
= "misaligned stack access",
651 "invalid map_fd for function call",
653 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
654 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_10
),
655 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
656 BPF_LD_MAP_FD(BPF_REG_1
, 0),
657 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
658 BPF_FUNC_map_delete_elem
),
661 .errstr
= "fd 0 is not pointing to valid bpf_map",
665 "don't check return value before access",
667 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
668 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
669 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
670 BPF_LD_MAP_FD(BPF_REG_1
, 0),
671 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
672 BPF_FUNC_map_lookup_elem
),
673 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
677 .errstr
= "R0 invalid mem access 'map_value_or_null'",
681 "access memory with incorrect alignment",
683 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
684 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
685 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
686 BPF_LD_MAP_FD(BPF_REG_1
, 0),
687 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
688 BPF_FUNC_map_lookup_elem
),
689 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
690 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 4, 0),
694 .errstr
= "misaligned value access",
696 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
699 "sometimes access memory with incorrect alignment",
701 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
702 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
703 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
704 BPF_LD_MAP_FD(BPF_REG_1
, 0),
705 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
706 BPF_FUNC_map_lookup_elem
),
707 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
708 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
710 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 1),
714 .errstr
= "R0 invalid mem access",
715 .errstr_unpriv
= "R0 leaks addr",
717 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
722 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
723 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -8),
724 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
725 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
726 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 1),
727 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 1),
728 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 1),
729 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 2),
730 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 1),
731 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 3),
732 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 1),
733 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 4),
734 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
735 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 5),
736 BPF_MOV64_IMM(BPF_REG_0
, 0),
739 .errstr_unpriv
= "R1 pointer comparison",
740 .result_unpriv
= REJECT
,
746 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
747 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 2),
748 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
749 BPF_JMP_IMM(BPF_JA
, 0, 0, 14),
750 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 2),
751 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
752 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
753 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 2),
754 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
755 BPF_JMP_IMM(BPF_JA
, 0, 0, 8),
756 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 2),
757 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
758 BPF_JMP_IMM(BPF_JA
, 0, 0, 5),
759 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 2),
760 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
761 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
762 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
763 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
764 BPF_MOV64_IMM(BPF_REG_0
, 0),
767 .errstr_unpriv
= "R1 pointer comparison",
768 .result_unpriv
= REJECT
,
774 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
775 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
776 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
777 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
778 BPF_JMP_IMM(BPF_JA
, 0, 0, 19),
779 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 3),
780 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
781 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
782 BPF_JMP_IMM(BPF_JA
, 0, 0, 15),
783 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 3),
784 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
785 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -32),
786 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
787 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 3),
788 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
789 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -40),
790 BPF_JMP_IMM(BPF_JA
, 0, 0, 7),
791 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 3),
792 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
793 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -48),
794 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
795 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 0),
796 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
797 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -56),
798 BPF_LD_MAP_FD(BPF_REG_1
, 0),
799 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
800 BPF_FUNC_map_delete_elem
),
803 .fixup_map1
= { 24 },
804 .errstr_unpriv
= "R1 pointer comparison",
805 .result_unpriv
= REJECT
,
811 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
812 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
813 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
814 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
815 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
816 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
817 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
818 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
819 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
820 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
821 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
822 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
823 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
824 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
825 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
826 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
827 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
828 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
829 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
830 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
831 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
832 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
833 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
834 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
835 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
836 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
837 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
838 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
839 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
840 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
841 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
842 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
843 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
844 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
845 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
846 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
847 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
848 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
849 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
850 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
851 BPF_MOV64_IMM(BPF_REG_0
, 0),
854 .errstr_unpriv
= "R1 pointer comparison",
855 .result_unpriv
= REJECT
,
861 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
862 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
863 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
864 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
865 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
866 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
867 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
868 BPF_MOV64_IMM(BPF_REG_0
, 0),
869 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
870 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
871 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
872 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
873 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
874 BPF_MOV64_IMM(BPF_REG_0
, 0),
875 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
876 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
877 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
878 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
879 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
880 BPF_MOV64_IMM(BPF_REG_0
, 0),
881 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
882 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
883 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
884 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
885 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
886 BPF_MOV64_IMM(BPF_REG_0
, 0),
887 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
888 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
889 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
890 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
891 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
892 BPF_MOV64_IMM(BPF_REG_0
, 0),
895 .errstr_unpriv
= "R1 pointer comparison",
896 .result_unpriv
= REJECT
,
900 "access skb fields ok",
902 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
903 offsetof(struct __sk_buff
, len
)),
904 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
905 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
906 offsetof(struct __sk_buff
, mark
)),
907 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
908 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
909 offsetof(struct __sk_buff
, pkt_type
)),
910 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
911 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
912 offsetof(struct __sk_buff
, queue_mapping
)),
913 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
914 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
915 offsetof(struct __sk_buff
, protocol
)),
916 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
917 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
918 offsetof(struct __sk_buff
, vlan_present
)),
919 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
920 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
921 offsetof(struct __sk_buff
, vlan_tci
)),
922 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
923 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
924 offsetof(struct __sk_buff
, napi_id
)),
925 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
931 "access skb fields bad1",
933 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -4),
936 .errstr
= "invalid bpf_context access",
940 "access skb fields bad2",
942 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 9),
943 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
944 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
945 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
946 BPF_LD_MAP_FD(BPF_REG_1
, 0),
947 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
948 BPF_FUNC_map_lookup_elem
),
949 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
951 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
952 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
953 offsetof(struct __sk_buff
, pkt_type
)),
957 .errstr
= "different pointers",
958 .errstr_unpriv
= "R1 pointer comparison",
962 "access skb fields bad3",
964 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
965 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
966 offsetof(struct __sk_buff
, pkt_type
)),
968 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
969 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
970 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
971 BPF_LD_MAP_FD(BPF_REG_1
, 0),
972 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
973 BPF_FUNC_map_lookup_elem
),
974 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
976 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
977 BPF_JMP_IMM(BPF_JA
, 0, 0, -12),
980 .errstr
= "different pointers",
981 .errstr_unpriv
= "R1 pointer comparison",
985 "access skb fields bad4",
987 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 3),
988 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
989 offsetof(struct __sk_buff
, len
)),
990 BPF_MOV64_IMM(BPF_REG_0
, 0),
992 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
993 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
994 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
995 BPF_LD_MAP_FD(BPF_REG_1
, 0),
996 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
997 BPF_FUNC_map_lookup_elem
),
998 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
1000 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
1001 BPF_JMP_IMM(BPF_JA
, 0, 0, -13),
1003 .fixup_map1
= { 7 },
1004 .errstr
= "different pointers",
1005 .errstr_unpriv
= "R1 pointer comparison",
1009 "invalid access __sk_buff family",
1011 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1012 offsetof(struct __sk_buff
, family
)),
1015 .errstr
= "invalid bpf_context access",
1019 "invalid access __sk_buff remote_ip4",
1021 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1022 offsetof(struct __sk_buff
, remote_ip4
)),
1025 .errstr
= "invalid bpf_context access",
1029 "invalid access __sk_buff local_ip4",
1031 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1032 offsetof(struct __sk_buff
, local_ip4
)),
1035 .errstr
= "invalid bpf_context access",
1039 "invalid access __sk_buff remote_ip6",
1041 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1042 offsetof(struct __sk_buff
, remote_ip6
)),
1045 .errstr
= "invalid bpf_context access",
1049 "invalid access __sk_buff local_ip6",
1051 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1052 offsetof(struct __sk_buff
, local_ip6
)),
1055 .errstr
= "invalid bpf_context access",
1059 "invalid access __sk_buff remote_port",
1061 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1062 offsetof(struct __sk_buff
, remote_port
)),
1065 .errstr
= "invalid bpf_context access",
1069 "invalid access __sk_buff remote_port",
1071 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1072 offsetof(struct __sk_buff
, local_port
)),
1075 .errstr
= "invalid bpf_context access",
1079 "valid access __sk_buff family",
1081 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1082 offsetof(struct __sk_buff
, family
)),
1086 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1089 "valid access __sk_buff remote_ip4",
1091 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1092 offsetof(struct __sk_buff
, remote_ip4
)),
1096 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1099 "valid access __sk_buff local_ip4",
1101 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1102 offsetof(struct __sk_buff
, local_ip4
)),
1106 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1109 "valid access __sk_buff remote_ip6",
1111 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1112 offsetof(struct __sk_buff
, remote_ip6
[0])),
1113 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1114 offsetof(struct __sk_buff
, remote_ip6
[1])),
1115 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1116 offsetof(struct __sk_buff
, remote_ip6
[2])),
1117 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1118 offsetof(struct __sk_buff
, remote_ip6
[3])),
1122 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1125 "valid access __sk_buff local_ip6",
1127 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1128 offsetof(struct __sk_buff
, local_ip6
[0])),
1129 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1130 offsetof(struct __sk_buff
, local_ip6
[1])),
1131 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1132 offsetof(struct __sk_buff
, local_ip6
[2])),
1133 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1134 offsetof(struct __sk_buff
, local_ip6
[3])),
1138 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1141 "valid access __sk_buff remote_port",
1143 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1144 offsetof(struct __sk_buff
, remote_port
)),
1148 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1151 "valid access __sk_buff remote_port",
1153 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1154 offsetof(struct __sk_buff
, local_port
)),
1158 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1161 "invalid access of tc_classid for SK_SKB",
1163 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1164 offsetof(struct __sk_buff
, tc_classid
)),
1168 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1169 .errstr
= "invalid bpf_context access",
1172 "invalid access of skb->mark for SK_SKB",
1174 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1175 offsetof(struct __sk_buff
, mark
)),
1179 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1180 .errstr
= "invalid bpf_context access",
1183 "check skb->mark is not writeable by SK_SKB",
1185 BPF_MOV64_IMM(BPF_REG_0
, 0),
1186 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1187 offsetof(struct __sk_buff
, mark
)),
1191 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1192 .errstr
= "invalid bpf_context access",
1195 "check skb->tc_index is writeable by SK_SKB",
1197 BPF_MOV64_IMM(BPF_REG_0
, 0),
1198 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1199 offsetof(struct __sk_buff
, tc_index
)),
1203 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1206 "check skb->priority is writeable by SK_SKB",
1208 BPF_MOV64_IMM(BPF_REG_0
, 0),
1209 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1210 offsetof(struct __sk_buff
, priority
)),
1214 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1217 "direct packet read for SK_SKB",
1219 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1220 offsetof(struct __sk_buff
, data
)),
1221 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1222 offsetof(struct __sk_buff
, data_end
)),
1223 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1224 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1225 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
1226 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
1227 BPF_MOV64_IMM(BPF_REG_0
, 0),
1231 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1234 "direct packet write for SK_SKB",
1236 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1237 offsetof(struct __sk_buff
, data
)),
1238 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1239 offsetof(struct __sk_buff
, data_end
)),
1240 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1241 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1242 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
1243 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
1244 BPF_MOV64_IMM(BPF_REG_0
, 0),
1248 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1251 "overlapping checks for direct packet access SK_SKB",
1253 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1254 offsetof(struct __sk_buff
, data
)),
1255 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1256 offsetof(struct __sk_buff
, data_end
)),
1257 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1258 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1259 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 4),
1260 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
1261 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 6),
1262 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
1263 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_2
, 6),
1264 BPF_MOV64_IMM(BPF_REG_0
, 0),
1268 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1271 "check skb->mark is not writeable by sockets",
1273 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1274 offsetof(struct __sk_buff
, mark
)),
1277 .errstr
= "invalid bpf_context access",
1278 .errstr_unpriv
= "R1 leaks addr",
1282 "check skb->tc_index is not writeable by sockets",
1284 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1285 offsetof(struct __sk_buff
, tc_index
)),
1288 .errstr
= "invalid bpf_context access",
1289 .errstr_unpriv
= "R1 leaks addr",
1293 "check cb access: byte",
1295 BPF_MOV64_IMM(BPF_REG_0
, 0),
1296 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1297 offsetof(struct __sk_buff
, cb
[0])),
1298 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1299 offsetof(struct __sk_buff
, cb
[0]) + 1),
1300 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1301 offsetof(struct __sk_buff
, cb
[0]) + 2),
1302 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1303 offsetof(struct __sk_buff
, cb
[0]) + 3),
1304 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1305 offsetof(struct __sk_buff
, cb
[1])),
1306 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1307 offsetof(struct __sk_buff
, cb
[1]) + 1),
1308 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1309 offsetof(struct __sk_buff
, cb
[1]) + 2),
1310 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1311 offsetof(struct __sk_buff
, cb
[1]) + 3),
1312 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1313 offsetof(struct __sk_buff
, cb
[2])),
1314 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1315 offsetof(struct __sk_buff
, cb
[2]) + 1),
1316 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1317 offsetof(struct __sk_buff
, cb
[2]) + 2),
1318 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1319 offsetof(struct __sk_buff
, cb
[2]) + 3),
1320 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1321 offsetof(struct __sk_buff
, cb
[3])),
1322 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1323 offsetof(struct __sk_buff
, cb
[3]) + 1),
1324 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1325 offsetof(struct __sk_buff
, cb
[3]) + 2),
1326 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1327 offsetof(struct __sk_buff
, cb
[3]) + 3),
1328 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1329 offsetof(struct __sk_buff
, cb
[4])),
1330 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1331 offsetof(struct __sk_buff
, cb
[4]) + 1),
1332 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1333 offsetof(struct __sk_buff
, cb
[4]) + 2),
1334 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1335 offsetof(struct __sk_buff
, cb
[4]) + 3),
1336 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1337 offsetof(struct __sk_buff
, cb
[0])),
1338 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1339 offsetof(struct __sk_buff
, cb
[0]) + 1),
1340 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1341 offsetof(struct __sk_buff
, cb
[0]) + 2),
1342 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1343 offsetof(struct __sk_buff
, cb
[0]) + 3),
1344 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1345 offsetof(struct __sk_buff
, cb
[1])),
1346 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1347 offsetof(struct __sk_buff
, cb
[1]) + 1),
1348 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1349 offsetof(struct __sk_buff
, cb
[1]) + 2),
1350 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1351 offsetof(struct __sk_buff
, cb
[1]) + 3),
1352 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1353 offsetof(struct __sk_buff
, cb
[2])),
1354 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1355 offsetof(struct __sk_buff
, cb
[2]) + 1),
1356 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1357 offsetof(struct __sk_buff
, cb
[2]) + 2),
1358 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1359 offsetof(struct __sk_buff
, cb
[2]) + 3),
1360 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1361 offsetof(struct __sk_buff
, cb
[3])),
1362 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1363 offsetof(struct __sk_buff
, cb
[3]) + 1),
1364 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1365 offsetof(struct __sk_buff
, cb
[3]) + 2),
1366 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1367 offsetof(struct __sk_buff
, cb
[3]) + 3),
1368 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1369 offsetof(struct __sk_buff
, cb
[4])),
1370 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1371 offsetof(struct __sk_buff
, cb
[4]) + 1),
1372 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1373 offsetof(struct __sk_buff
, cb
[4]) + 2),
1374 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1375 offsetof(struct __sk_buff
, cb
[4]) + 3),
1381 "__sk_buff->hash, offset 0, byte store not permitted",
1383 BPF_MOV64_IMM(BPF_REG_0
, 0),
1384 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1385 offsetof(struct __sk_buff
, hash
)),
1388 .errstr
= "invalid bpf_context access",
1392 "__sk_buff->tc_index, offset 3, byte store not permitted",
1394 BPF_MOV64_IMM(BPF_REG_0
, 0),
1395 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1396 offsetof(struct __sk_buff
, tc_index
) + 3),
1399 .errstr
= "invalid bpf_context access",
1403 "check skb->hash byte load permitted",
1405 BPF_MOV64_IMM(BPF_REG_0
, 0),
1406 #if __BYTE_ORDER == __LITTLE_ENDIAN
1407 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1408 offsetof(struct __sk_buff
, hash
)),
1410 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1411 offsetof(struct __sk_buff
, hash
) + 3),
1418 "check skb->hash byte load not permitted 1",
1420 BPF_MOV64_IMM(BPF_REG_0
, 0),
1421 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1422 offsetof(struct __sk_buff
, hash
) + 1),
1425 .errstr
= "invalid bpf_context access",
1429 "check skb->hash byte load not permitted 2",
1431 BPF_MOV64_IMM(BPF_REG_0
, 0),
1432 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1433 offsetof(struct __sk_buff
, hash
) + 2),
1436 .errstr
= "invalid bpf_context access",
1440 "check skb->hash byte load not permitted 3",
1442 BPF_MOV64_IMM(BPF_REG_0
, 0),
1443 #if __BYTE_ORDER == __LITTLE_ENDIAN
1444 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1445 offsetof(struct __sk_buff
, hash
) + 3),
1447 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1448 offsetof(struct __sk_buff
, hash
)),
1452 .errstr
= "invalid bpf_context access",
1456 "check cb access: byte, wrong type",
1458 BPF_MOV64_IMM(BPF_REG_0
, 0),
1459 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1460 offsetof(struct __sk_buff
, cb
[0])),
1463 .errstr
= "invalid bpf_context access",
1465 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1468 "check cb access: half",
1470 BPF_MOV64_IMM(BPF_REG_0
, 0),
1471 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1472 offsetof(struct __sk_buff
, cb
[0])),
1473 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1474 offsetof(struct __sk_buff
, cb
[0]) + 2),
1475 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1476 offsetof(struct __sk_buff
, cb
[1])),
1477 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1478 offsetof(struct __sk_buff
, cb
[1]) + 2),
1479 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1480 offsetof(struct __sk_buff
, cb
[2])),
1481 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1482 offsetof(struct __sk_buff
, cb
[2]) + 2),
1483 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1484 offsetof(struct __sk_buff
, cb
[3])),
1485 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1486 offsetof(struct __sk_buff
, cb
[3]) + 2),
1487 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1488 offsetof(struct __sk_buff
, cb
[4])),
1489 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1490 offsetof(struct __sk_buff
, cb
[4]) + 2),
1491 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1492 offsetof(struct __sk_buff
, cb
[0])),
1493 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1494 offsetof(struct __sk_buff
, cb
[0]) + 2),
1495 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1496 offsetof(struct __sk_buff
, cb
[1])),
1497 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1498 offsetof(struct __sk_buff
, cb
[1]) + 2),
1499 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1500 offsetof(struct __sk_buff
, cb
[2])),
1501 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1502 offsetof(struct __sk_buff
, cb
[2]) + 2),
1503 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1504 offsetof(struct __sk_buff
, cb
[3])),
1505 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1506 offsetof(struct __sk_buff
, cb
[3]) + 2),
1507 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1508 offsetof(struct __sk_buff
, cb
[4])),
1509 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1510 offsetof(struct __sk_buff
, cb
[4]) + 2),
1516 "check cb access: half, unaligned",
1518 BPF_MOV64_IMM(BPF_REG_0
, 0),
1519 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1520 offsetof(struct __sk_buff
, cb
[0]) + 1),
1523 .errstr
= "misaligned context access",
1525 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1528 "check __sk_buff->hash, offset 0, half store not permitted",
1530 BPF_MOV64_IMM(BPF_REG_0
, 0),
1531 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1532 offsetof(struct __sk_buff
, hash
)),
1535 .errstr
= "invalid bpf_context access",
1539 "check __sk_buff->tc_index, offset 2, half store not permitted",
1541 BPF_MOV64_IMM(BPF_REG_0
, 0),
1542 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1543 offsetof(struct __sk_buff
, tc_index
) + 2),
1546 .errstr
= "invalid bpf_context access",
1550 "check skb->hash half load permitted",
1552 BPF_MOV64_IMM(BPF_REG_0
, 0),
1553 #if __BYTE_ORDER == __LITTLE_ENDIAN
1554 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1555 offsetof(struct __sk_buff
, hash
)),
1557 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1558 offsetof(struct __sk_buff
, hash
) + 2),
1565 "check skb->hash half load not permitted",
1567 BPF_MOV64_IMM(BPF_REG_0
, 0),
1568 #if __BYTE_ORDER == __LITTLE_ENDIAN
1569 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1570 offsetof(struct __sk_buff
, hash
) + 2),
1572 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1573 offsetof(struct __sk_buff
, hash
)),
1577 .errstr
= "invalid bpf_context access",
1581 "check cb access: half, wrong type",
1583 BPF_MOV64_IMM(BPF_REG_0
, 0),
1584 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1585 offsetof(struct __sk_buff
, cb
[0])),
1588 .errstr
= "invalid bpf_context access",
1590 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1593 "check cb access: word",
1595 BPF_MOV64_IMM(BPF_REG_0
, 0),
1596 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1597 offsetof(struct __sk_buff
, cb
[0])),
1598 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1599 offsetof(struct __sk_buff
, cb
[1])),
1600 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1601 offsetof(struct __sk_buff
, cb
[2])),
1602 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1603 offsetof(struct __sk_buff
, cb
[3])),
1604 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1605 offsetof(struct __sk_buff
, cb
[4])),
1606 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1607 offsetof(struct __sk_buff
, cb
[0])),
1608 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1609 offsetof(struct __sk_buff
, cb
[1])),
1610 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1611 offsetof(struct __sk_buff
, cb
[2])),
1612 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1613 offsetof(struct __sk_buff
, cb
[3])),
1614 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1615 offsetof(struct __sk_buff
, cb
[4])),
1621 "check cb access: word, unaligned 1",
1623 BPF_MOV64_IMM(BPF_REG_0
, 0),
1624 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1625 offsetof(struct __sk_buff
, cb
[0]) + 2),
1628 .errstr
= "misaligned context access",
1630 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1633 "check cb access: word, unaligned 2",
1635 BPF_MOV64_IMM(BPF_REG_0
, 0),
1636 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1637 offsetof(struct __sk_buff
, cb
[4]) + 1),
1640 .errstr
= "misaligned context access",
1642 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1645 "check cb access: word, unaligned 3",
1647 BPF_MOV64_IMM(BPF_REG_0
, 0),
1648 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1649 offsetof(struct __sk_buff
, cb
[4]) + 2),
1652 .errstr
= "misaligned context access",
1654 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1657 "check cb access: word, unaligned 4",
1659 BPF_MOV64_IMM(BPF_REG_0
, 0),
1660 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1661 offsetof(struct __sk_buff
, cb
[4]) + 3),
1664 .errstr
= "misaligned context access",
1666 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1669 "check cb access: double",
1671 BPF_MOV64_IMM(BPF_REG_0
, 0),
1672 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1673 offsetof(struct __sk_buff
, cb
[0])),
1674 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1675 offsetof(struct __sk_buff
, cb
[2])),
1676 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1677 offsetof(struct __sk_buff
, cb
[0])),
1678 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1679 offsetof(struct __sk_buff
, cb
[2])),
1685 "check cb access: double, unaligned 1",
1687 BPF_MOV64_IMM(BPF_REG_0
, 0),
1688 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1689 offsetof(struct __sk_buff
, cb
[1])),
1692 .errstr
= "misaligned context access",
1694 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1697 "check cb access: double, unaligned 2",
1699 BPF_MOV64_IMM(BPF_REG_0
, 0),
1700 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1701 offsetof(struct __sk_buff
, cb
[3])),
1704 .errstr
= "misaligned context access",
1706 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1709 "check cb access: double, oob 1",
1711 BPF_MOV64_IMM(BPF_REG_0
, 0),
1712 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1713 offsetof(struct __sk_buff
, cb
[4])),
1716 .errstr
= "invalid bpf_context access",
1720 "check cb access: double, oob 2",
1722 BPF_MOV64_IMM(BPF_REG_0
, 0),
1723 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1724 offsetof(struct __sk_buff
, cb
[4])),
1727 .errstr
= "invalid bpf_context access",
1731 "check __sk_buff->ifindex dw store not permitted",
1733 BPF_MOV64_IMM(BPF_REG_0
, 0),
1734 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1735 offsetof(struct __sk_buff
, ifindex
)),
1738 .errstr
= "invalid bpf_context access",
1742 "check __sk_buff->ifindex dw load not permitted",
1744 BPF_MOV64_IMM(BPF_REG_0
, 0),
1745 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1746 offsetof(struct __sk_buff
, ifindex
)),
1749 .errstr
= "invalid bpf_context access",
1753 "check cb access: double, wrong type",
1755 BPF_MOV64_IMM(BPF_REG_0
, 0),
1756 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1757 offsetof(struct __sk_buff
, cb
[0])),
1760 .errstr
= "invalid bpf_context access",
1762 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1765 "check out of range skb->cb access",
1767 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1768 offsetof(struct __sk_buff
, cb
[0]) + 256),
1771 .errstr
= "invalid bpf_context access",
1772 .errstr_unpriv
= "",
1774 .prog_type
= BPF_PROG_TYPE_SCHED_ACT
,
1777 "write skb fields from socket prog",
1779 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1780 offsetof(struct __sk_buff
, cb
[4])),
1781 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
1782 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1783 offsetof(struct __sk_buff
, mark
)),
1784 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1785 offsetof(struct __sk_buff
, tc_index
)),
1786 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
1787 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1788 offsetof(struct __sk_buff
, cb
[0])),
1789 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1790 offsetof(struct __sk_buff
, cb
[2])),
1794 .errstr_unpriv
= "R1 leaks addr",
1795 .result_unpriv
= REJECT
,
1798 "write skb fields from tc_cls_act prog",
1800 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1801 offsetof(struct __sk_buff
, cb
[0])),
1802 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1803 offsetof(struct __sk_buff
, mark
)),
1804 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1805 offsetof(struct __sk_buff
, tc_index
)),
1806 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1807 offsetof(struct __sk_buff
, tc_index
)),
1808 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1809 offsetof(struct __sk_buff
, cb
[3])),
1812 .errstr_unpriv
= "",
1813 .result_unpriv
= REJECT
,
1815 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1818 "PTR_TO_STACK store/load",
1820 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1821 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
1822 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
1823 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
1829 "PTR_TO_STACK store/load - bad alignment on off",
1831 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1832 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1833 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
1834 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
1838 .errstr
= "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1841 "PTR_TO_STACK store/load - bad alignment on reg",
1843 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1844 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
1845 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1846 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1850 .errstr
= "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1853 "PTR_TO_STACK store/load - out of bounds low",
1855 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1856 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -80000),
1857 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1858 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1862 .errstr
= "invalid stack off=-79992 size=8",
1863 .errstr_unpriv
= "R1 stack pointer arithmetic goes out of range",
1866 "PTR_TO_STACK store/load - out of bounds high",
1868 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1869 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1870 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1871 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1875 .errstr
= "invalid stack off=0 size=8",
1878 "unpriv: return pointer",
1880 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
1884 .result_unpriv
= REJECT
,
1885 .errstr_unpriv
= "R0 leaks addr",
1888 "unpriv: add const to pointer",
1890 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
1891 BPF_MOV64_IMM(BPF_REG_0
, 0),
1897 "unpriv: add pointer to pointer",
1899 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
1900 BPF_MOV64_IMM(BPF_REG_0
, 0),
1904 .errstr
= "R1 pointer += pointer",
1907 "unpriv: neg pointer",
1909 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_1
, 0),
1910 BPF_MOV64_IMM(BPF_REG_0
, 0),
1914 .result_unpriv
= REJECT
,
1915 .errstr_unpriv
= "R1 pointer arithmetic",
1918 "unpriv: cmp pointer with const",
1920 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
1921 BPF_MOV64_IMM(BPF_REG_0
, 0),
1925 .result_unpriv
= REJECT
,
1926 .errstr_unpriv
= "R1 pointer comparison",
1929 "unpriv: cmp pointer with pointer",
1931 BPF_JMP_REG(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
1932 BPF_MOV64_IMM(BPF_REG_0
, 0),
1936 .result_unpriv
= REJECT
,
1937 .errstr_unpriv
= "R10 pointer comparison",
1940 "unpriv: check that printk is disallowed",
1942 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1943 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1944 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1945 BPF_MOV64_IMM(BPF_REG_2
, 8),
1946 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
1947 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1948 BPF_FUNC_trace_printk
),
1949 BPF_MOV64_IMM(BPF_REG_0
, 0),
1952 .errstr_unpriv
= "unknown func bpf_trace_printk#6",
1953 .result_unpriv
= REJECT
,
1957 "unpriv: pass pointer to helper function",
1959 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1960 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1961 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1962 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1963 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
1964 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
1965 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1966 BPF_FUNC_map_update_elem
),
1967 BPF_MOV64_IMM(BPF_REG_0
, 0),
1970 .fixup_map1
= { 3 },
1971 .errstr_unpriv
= "R4 leaks addr",
1972 .result_unpriv
= REJECT
,
1976 "unpriv: indirectly pass pointer on stack to helper function",
1978 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1979 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1980 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1981 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1982 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1983 BPF_FUNC_map_lookup_elem
),
1984 BPF_MOV64_IMM(BPF_REG_0
, 0),
1987 .fixup_map1
= { 3 },
1988 .errstr
= "invalid indirect read from stack off -8+0 size 8",
1992 "unpriv: mangle pointer on stack 1",
1994 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1995 BPF_ST_MEM(BPF_W
, BPF_REG_10
, -8, 0),
1996 BPF_MOV64_IMM(BPF_REG_0
, 0),
1999 .errstr_unpriv
= "attempt to corrupt spilled",
2000 .result_unpriv
= REJECT
,
2004 "unpriv: mangle pointer on stack 2",
2006 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
2007 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -1, 0),
2008 BPF_MOV64_IMM(BPF_REG_0
, 0),
2011 .errstr_unpriv
= "attempt to corrupt spilled",
2012 .result_unpriv
= REJECT
,
2016 "unpriv: read pointer from stack in small chunks",
2018 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
2019 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_10
, -8),
2020 BPF_MOV64_IMM(BPF_REG_0
, 0),
2023 .errstr
= "invalid size",
2027 "unpriv: write pointer into ctx",
2029 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
, 0),
2030 BPF_MOV64_IMM(BPF_REG_0
, 0),
2033 .errstr_unpriv
= "R1 leaks addr",
2034 .result_unpriv
= REJECT
,
2035 .errstr
= "invalid bpf_context access",
2039 "unpriv: spill/fill of ctx",
2041 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2042 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2043 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2044 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2045 BPF_MOV64_IMM(BPF_REG_0
, 0),
2051 "unpriv: spill/fill of ctx 2",
2053 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2054 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2055 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2056 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2057 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2058 BPF_FUNC_get_hash_recalc
),
2062 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2065 "unpriv: spill/fill of ctx 3",
2067 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2068 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2069 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2070 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
2071 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2072 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2073 BPF_FUNC_get_hash_recalc
),
2077 .errstr
= "R1 type=fp expected=ctx",
2078 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2081 "unpriv: spill/fill of ctx 4",
2083 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2084 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2085 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2086 BPF_MOV64_IMM(BPF_REG_0
, 1),
2087 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_DW
, BPF_REG_10
,
2089 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2090 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2091 BPF_FUNC_get_hash_recalc
),
2095 .errstr
= "R1 type=inv expected=ctx",
2096 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2099 "unpriv: spill/fill of different pointers stx",
2101 BPF_MOV64_IMM(BPF_REG_3
, 42),
2102 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2103 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2104 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
2105 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2106 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
2107 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
2108 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
2109 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2110 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2111 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_3
,
2112 offsetof(struct __sk_buff
, mark
)),
2113 BPF_MOV64_IMM(BPF_REG_0
, 0),
2117 .errstr
= "same insn cannot be used with different pointers",
2118 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2121 "unpriv: spill/fill of different pointers ldx",
2123 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2124 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2125 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
2126 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2127 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
,
2128 -(__s32
)offsetof(struct bpf_perf_event_data
,
2129 sample_period
) - 8),
2130 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
2131 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
2132 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2133 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2134 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
,
2135 offsetof(struct bpf_perf_event_data
,
2137 BPF_MOV64_IMM(BPF_REG_0
, 0),
2141 .errstr
= "same insn cannot be used with different pointers",
2142 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
2145 "unpriv: write pointer into map elem value",
2147 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
2148 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2149 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
2150 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2151 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2152 BPF_FUNC_map_lookup_elem
),
2153 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
2154 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
2157 .fixup_map1
= { 3 },
2158 .errstr_unpriv
= "R0 leaks addr",
2159 .result_unpriv
= REJECT
,
2163 "alu32: mov u32 const",
2165 BPF_MOV32_IMM(BPF_REG_7
, 0),
2166 BPF_ALU32_IMM(BPF_AND
, BPF_REG_7
, 1),
2167 BPF_MOV32_REG(BPF_REG_0
, BPF_REG_7
),
2168 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
2169 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_7
, 0),
2175 "unpriv: partial copy of pointer",
2177 BPF_MOV32_REG(BPF_REG_1
, BPF_REG_10
),
2178 BPF_MOV64_IMM(BPF_REG_0
, 0),
2181 .errstr_unpriv
= "R10 partial copy",
2182 .result_unpriv
= REJECT
,
2186 "unpriv: pass pointer to tail_call",
2188 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
2189 BPF_LD_MAP_FD(BPF_REG_2
, 0),
2190 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2191 BPF_FUNC_tail_call
),
2192 BPF_MOV64_IMM(BPF_REG_0
, 0),
2195 .fixup_prog
= { 1 },
2196 .errstr_unpriv
= "R3 leaks addr into helper",
2197 .result_unpriv
= REJECT
,
2201 "unpriv: cmp map pointer with zero",
2203 BPF_MOV64_IMM(BPF_REG_1
, 0),
2204 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2205 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
2206 BPF_MOV64_IMM(BPF_REG_0
, 0),
2209 .fixup_map1
= { 1 },
2210 .errstr_unpriv
= "R1 pointer comparison",
2211 .result_unpriv
= REJECT
,
2215 "unpriv: write into frame pointer",
2217 BPF_MOV64_REG(BPF_REG_10
, BPF_REG_1
),
2218 BPF_MOV64_IMM(BPF_REG_0
, 0),
2221 .errstr
= "frame pointer is read only",
2225 "unpriv: spill/fill frame pointer",
2227 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2228 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2229 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
2230 BPF_LDX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, 0),
2231 BPF_MOV64_IMM(BPF_REG_0
, 0),
2234 .errstr
= "frame pointer is read only",
2238 "unpriv: cmp of frame pointer",
2240 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_10
, 0, 0),
2241 BPF_MOV64_IMM(BPF_REG_0
, 0),
2244 .errstr_unpriv
= "R10 pointer comparison",
2245 .result_unpriv
= REJECT
,
2249 "unpriv: adding of fp",
2251 BPF_MOV64_IMM(BPF_REG_0
, 0),
2252 BPF_MOV64_IMM(BPF_REG_1
, 0),
2253 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
2254 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, -8),
2257 .errstr_unpriv
= "R1 stack pointer arithmetic goes out of range",
2258 .result_unpriv
= REJECT
,
2262 "unpriv: cmp of stack pointer",
2264 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2265 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
2266 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_2
, 0, 0),
2267 BPF_MOV64_IMM(BPF_REG_0
, 0),
2270 .errstr_unpriv
= "R2 pointer comparison",
2271 .result_unpriv
= REJECT
,
2275 "runtime/jit: pass negative index to tail_call",
2277 BPF_MOV64_IMM(BPF_REG_3
, -1),
2278 BPF_LD_MAP_FD(BPF_REG_2
, 0),
2279 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2280 BPF_FUNC_tail_call
),
2281 BPF_MOV64_IMM(BPF_REG_0
, 0),
2284 .fixup_prog
= { 1 },
2288 "runtime/jit: pass > 32bit index to tail_call",
2290 BPF_LD_IMM64(BPF_REG_3
, 0x100000000ULL
),
2291 BPF_LD_MAP_FD(BPF_REG_2
, 0),
2292 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2293 BPF_FUNC_tail_call
),
2294 BPF_MOV64_IMM(BPF_REG_0
, 0),
2297 .fixup_prog
= { 2 },
2301 "PTR_TO_STACK check high 1",
2303 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2304 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -1),
2305 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 42),
2306 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, 0),
2312 "PTR_TO_STACK check high 2",
2314 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2315 BPF_ST_MEM(BPF_B
, BPF_REG_1
, -1, 42),
2316 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, -1),
2322 "PTR_TO_STACK check high 3",
2324 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2325 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0),
2326 BPF_ST_MEM(BPF_B
, BPF_REG_1
, -1, 42),
2327 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, -1),
2330 .errstr_unpriv
= "R1 stack pointer arithmetic goes out of range",
2331 .result_unpriv
= REJECT
,
2335 "PTR_TO_STACK check high 4",
2337 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2338 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0),
2339 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 42),
2340 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, 0),
2343 .errstr_unpriv
= "R1 stack pointer arithmetic goes out of range",
2344 .errstr
= "invalid stack off=0 size=1",
2348 "PTR_TO_STACK check high 5",
2350 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2351 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, (1 << 29) - 1),
2352 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 42),
2353 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, 0),
2357 .errstr
= "invalid stack off",
2360 "PTR_TO_STACK check high 6",
2362 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2363 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, (1 << 29) - 1),
2364 BPF_ST_MEM(BPF_B
, BPF_REG_1
, SHRT_MAX
, 42),
2365 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, SHRT_MAX
),
2369 .errstr
= "invalid stack off",
2372 "PTR_TO_STACK check high 7",
2374 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2375 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, (1 << 29) - 1),
2376 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, (1 << 29) - 1),
2377 BPF_ST_MEM(BPF_B
, BPF_REG_1
, SHRT_MAX
, 42),
2378 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, SHRT_MAX
),
2382 .errstr_unpriv
= "R1 stack pointer arithmetic goes out of range",
2383 .errstr
= "fp pointer offset",
2386 "PTR_TO_STACK check low 1",
2388 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2389 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -512),
2390 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 42),
2391 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, 0),
2397 "PTR_TO_STACK check low 2",
2399 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2400 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -513),
2401 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 1, 42),
2402 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, 1),
2405 .result_unpriv
= REJECT
,
2406 .errstr_unpriv
= "R1 stack pointer arithmetic goes out of range",
2410 "PTR_TO_STACK check low 3",
2412 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2413 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -513),
2414 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 42),
2415 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, 0),
2418 .errstr_unpriv
= "R1 stack pointer arithmetic goes out of range",
2419 .errstr
= "invalid stack off=-513 size=1",
2423 "PTR_TO_STACK check low 4",
2425 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2426 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, INT_MIN
),
2427 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 42),
2428 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, 0),
2432 .errstr
= "math between fp pointer",
2435 "PTR_TO_STACK check low 5",
2437 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2438 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -((1 << 29) - 1)),
2439 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 42),
2440 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, 0),
2444 .errstr
= "invalid stack off",
2447 "PTR_TO_STACK check low 6",
2449 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2450 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -((1 << 29) - 1)),
2451 BPF_ST_MEM(BPF_B
, BPF_REG_1
, SHRT_MIN
, 42),
2452 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, SHRT_MIN
),
2456 .errstr
= "invalid stack off",
2459 "PTR_TO_STACK check low 7",
2461 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2462 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -((1 << 29) - 1)),
2463 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -((1 << 29) - 1)),
2464 BPF_ST_MEM(BPF_B
, BPF_REG_1
, SHRT_MIN
, 42),
2465 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, SHRT_MIN
),
2469 .errstr_unpriv
= "R1 stack pointer arithmetic goes out of range",
2470 .errstr
= "fp pointer offset",
2473 "PTR_TO_STACK mixed reg/k, 1",
2475 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2476 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -3),
2477 BPF_MOV64_IMM(BPF_REG_2
, -3),
2478 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
2479 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 42),
2480 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, 0),
2486 "PTR_TO_STACK mixed reg/k, 2",
2488 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
2489 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, 0),
2490 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2491 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -3),
2492 BPF_MOV64_IMM(BPF_REG_2
, -3),
2493 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
2494 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 42),
2495 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_10
),
2496 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_5
, -6),
2502 "PTR_TO_STACK mixed reg/k, 3",
2504 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2505 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -3),
2506 BPF_MOV64_IMM(BPF_REG_2
, -3),
2507 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
2508 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 42),
2509 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2517 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
2518 BPF_MOV64_IMM(BPF_REG_2
, -3),
2519 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
2520 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 42),
2521 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
, 0),
2524 .result_unpriv
= REJECT
,
2525 .errstr_unpriv
= "invalid stack off=0 size=1",
2529 "stack pointer arithmetic",
2531 BPF_MOV64_IMM(BPF_REG_1
, 4),
2532 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
2533 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_10
),
2534 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, -10),
2535 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, -10),
2536 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
2537 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_1
),
2538 BPF_ST_MEM(0, BPF_REG_2
, 4, 0),
2539 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
2540 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
2541 BPF_ST_MEM(0, BPF_REG_2
, 4, 0),
2542 BPF_MOV64_IMM(BPF_REG_0
, 0),
2548 "raw_stack: no skb_load_bytes",
2550 BPF_MOV64_IMM(BPF_REG_2
, 4),
2551 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2552 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2553 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2554 BPF_MOV64_IMM(BPF_REG_4
, 8),
2555 /* Call to skb_load_bytes() omitted. */
2556 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2560 .errstr
= "invalid read from stack off -8+0 size 8",
2561 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2564 "raw_stack: skb_load_bytes, negative len",
2566 BPF_MOV64_IMM(BPF_REG_2
, 4),
2567 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2568 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2569 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2570 BPF_MOV64_IMM(BPF_REG_4
, -8),
2571 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2572 BPF_FUNC_skb_load_bytes
),
2573 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2577 .errstr
= "R4 min value is negative",
2578 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2581 "raw_stack: skb_load_bytes, negative len 2",
2583 BPF_MOV64_IMM(BPF_REG_2
, 4),
2584 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2585 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2586 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2587 BPF_MOV64_IMM(BPF_REG_4
, ~0),
2588 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2589 BPF_FUNC_skb_load_bytes
),
2590 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2594 .errstr
= "R4 min value is negative",
2595 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2598 "raw_stack: skb_load_bytes, zero len",
2600 BPF_MOV64_IMM(BPF_REG_2
, 4),
2601 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2602 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2603 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2604 BPF_MOV64_IMM(BPF_REG_4
, 0),
2605 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2606 BPF_FUNC_skb_load_bytes
),
2607 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2611 .errstr
= "invalid stack type R3",
2612 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2615 "raw_stack: skb_load_bytes, no init",
2617 BPF_MOV64_IMM(BPF_REG_2
, 4),
2618 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2619 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2620 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2621 BPF_MOV64_IMM(BPF_REG_4
, 8),
2622 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2623 BPF_FUNC_skb_load_bytes
),
2624 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2628 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2631 "raw_stack: skb_load_bytes, init",
2633 BPF_MOV64_IMM(BPF_REG_2
, 4),
2634 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2635 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2636 BPF_ST_MEM(BPF_DW
, BPF_REG_6
, 0, 0xcafe),
2637 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2638 BPF_MOV64_IMM(BPF_REG_4
, 8),
2639 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2640 BPF_FUNC_skb_load_bytes
),
2641 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2645 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2648 "raw_stack: skb_load_bytes, spilled regs around bounds",
2650 BPF_MOV64_IMM(BPF_REG_2
, 4),
2651 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2652 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2653 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2654 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2655 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2656 BPF_MOV64_IMM(BPF_REG_4
, 8),
2657 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2658 BPF_FUNC_skb_load_bytes
),
2659 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2660 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2661 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2662 offsetof(struct __sk_buff
, mark
)),
2663 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2664 offsetof(struct __sk_buff
, priority
)),
2665 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2669 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2672 "raw_stack: skb_load_bytes, spilled regs corruption",
2674 BPF_MOV64_IMM(BPF_REG_2
, 4),
2675 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2676 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2677 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2678 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2679 BPF_MOV64_IMM(BPF_REG_4
, 8),
2680 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2681 BPF_FUNC_skb_load_bytes
),
2682 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2683 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2684 offsetof(struct __sk_buff
, mark
)),
2688 .errstr
= "R0 invalid mem access 'inv'",
2689 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2692 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2694 BPF_MOV64_IMM(BPF_REG_2
, 4),
2695 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2696 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2697 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2698 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2699 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2700 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2701 BPF_MOV64_IMM(BPF_REG_4
, 8),
2702 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2703 BPF_FUNC_skb_load_bytes
),
2704 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2705 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2706 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0),
2707 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2708 offsetof(struct __sk_buff
, mark
)),
2709 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2710 offsetof(struct __sk_buff
, priority
)),
2711 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2712 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_3
,
2713 offsetof(struct __sk_buff
, pkt_type
)),
2714 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
2718 .errstr
= "R3 invalid mem access 'inv'",
2719 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2722 "raw_stack: skb_load_bytes, spilled regs + data",
2724 BPF_MOV64_IMM(BPF_REG_2
, 4),
2725 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2726 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2727 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2728 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2729 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2730 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2731 BPF_MOV64_IMM(BPF_REG_4
, 8),
2732 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2733 BPF_FUNC_skb_load_bytes
),
2734 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2735 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2736 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0),
2737 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2738 offsetof(struct __sk_buff
, mark
)),
2739 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2740 offsetof(struct __sk_buff
, priority
)),
2741 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2742 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
2746 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2749 "raw_stack: skb_load_bytes, invalid access 1",
2751 BPF_MOV64_IMM(BPF_REG_2
, 4),
2752 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2753 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -513),
2754 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2755 BPF_MOV64_IMM(BPF_REG_4
, 8),
2756 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2757 BPF_FUNC_skb_load_bytes
),
2758 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2762 .errstr
= "invalid stack type R3 off=-513 access_size=8",
2763 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2766 "raw_stack: skb_load_bytes, invalid access 2",
2768 BPF_MOV64_IMM(BPF_REG_2
, 4),
2769 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2770 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
2771 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2772 BPF_MOV64_IMM(BPF_REG_4
, 8),
2773 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2774 BPF_FUNC_skb_load_bytes
),
2775 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2779 .errstr
= "invalid stack type R3 off=-1 access_size=8",
2780 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2783 "raw_stack: skb_load_bytes, invalid access 3",
2785 BPF_MOV64_IMM(BPF_REG_2
, 4),
2786 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2787 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 0xffffffff),
2788 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2789 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
2790 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2791 BPF_FUNC_skb_load_bytes
),
2792 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2796 .errstr
= "R4 min value is negative",
2797 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2800 "raw_stack: skb_load_bytes, invalid access 4",
2802 BPF_MOV64_IMM(BPF_REG_2
, 4),
2803 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2804 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
2805 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2806 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
2807 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2808 BPF_FUNC_skb_load_bytes
),
2809 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2813 .errstr
= "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2814 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2817 "raw_stack: skb_load_bytes, invalid access 5",
2819 BPF_MOV64_IMM(BPF_REG_2
, 4),
2820 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2821 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2822 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2823 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
2824 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2825 BPF_FUNC_skb_load_bytes
),
2826 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2830 .errstr
= "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2831 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2834 "raw_stack: skb_load_bytes, invalid access 6",
2836 BPF_MOV64_IMM(BPF_REG_2
, 4),
2837 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2838 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2839 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2840 BPF_MOV64_IMM(BPF_REG_4
, 0),
2841 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2842 BPF_FUNC_skb_load_bytes
),
2843 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2847 .errstr
= "invalid stack type R3 off=-512 access_size=0",
2848 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2851 "raw_stack: skb_load_bytes, large access",
2853 BPF_MOV64_IMM(BPF_REG_2
, 4),
2854 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2855 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2856 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2857 BPF_MOV64_IMM(BPF_REG_4
, 512),
2858 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2859 BPF_FUNC_skb_load_bytes
),
2860 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2864 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2867 "context stores via ST",
2869 BPF_MOV64_IMM(BPF_REG_0
, 0),
2870 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, offsetof(struct __sk_buff
, mark
), 0),
2873 .errstr
= "BPF_ST stores into R1 context is not allowed",
2875 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2878 "context stores via XADD",
2880 BPF_MOV64_IMM(BPF_REG_0
, 0),
2881 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_W
, BPF_REG_1
,
2882 BPF_REG_0
, offsetof(struct __sk_buff
, mark
), 0),
2885 .errstr
= "BPF_XADD stores into R1 context is not allowed",
2887 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2890 "direct packet access: test1",
2892 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2893 offsetof(struct __sk_buff
, data
)),
2894 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2895 offsetof(struct __sk_buff
, data_end
)),
2896 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2897 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2898 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2899 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2900 BPF_MOV64_IMM(BPF_REG_0
, 0),
2904 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2907 "direct packet access: test2",
2909 BPF_MOV64_IMM(BPF_REG_0
, 1),
2910 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
2911 offsetof(struct __sk_buff
, data_end
)),
2912 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2913 offsetof(struct __sk_buff
, data
)),
2914 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2915 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
2916 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_4
, 15),
2917 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_3
, 7),
2918 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_3
, 12),
2919 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 14),
2920 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2921 offsetof(struct __sk_buff
, data
)),
2922 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_4
),
2923 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2924 offsetof(struct __sk_buff
, len
)),
2925 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_2
, 49),
2926 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 49),
2927 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_2
),
2928 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_3
),
2929 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
2930 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
2931 offsetof(struct __sk_buff
, data_end
)),
2932 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
2933 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_3
, 4),
2934 BPF_MOV64_IMM(BPF_REG_0
, 0),
2938 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2941 "direct packet access: test3",
2943 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2944 offsetof(struct __sk_buff
, data
)),
2945 BPF_MOV64_IMM(BPF_REG_0
, 0),
2948 .errstr
= "invalid bpf_context access off=76",
2950 .prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
,
2953 "direct packet access: test4 (write)",
2955 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2956 offsetof(struct __sk_buff
, data
)),
2957 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2958 offsetof(struct __sk_buff
, data_end
)),
2959 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2960 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2961 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2962 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2963 BPF_MOV64_IMM(BPF_REG_0
, 0),
2967 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2970 "direct packet access: test5 (pkt_end >= reg, good access)",
2972 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2973 offsetof(struct __sk_buff
, data
)),
2974 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2975 offsetof(struct __sk_buff
, data_end
)),
2976 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2977 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2978 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 2),
2979 BPF_MOV64_IMM(BPF_REG_0
, 1),
2981 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2982 BPF_MOV64_IMM(BPF_REG_0
, 0),
2986 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2989 "direct packet access: test6 (pkt_end >= reg, bad access)",
2991 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2992 offsetof(struct __sk_buff
, data
)),
2993 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2994 offsetof(struct __sk_buff
, data_end
)),
2995 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2996 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2997 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 3),
2998 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2999 BPF_MOV64_IMM(BPF_REG_0
, 1),
3001 BPF_MOV64_IMM(BPF_REG_0
, 0),
3004 .errstr
= "invalid access to packet",
3006 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3009 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3011 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3012 offsetof(struct __sk_buff
, data
)),
3013 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3014 offsetof(struct __sk_buff
, data_end
)),
3015 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3016 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3017 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 3),
3018 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3019 BPF_MOV64_IMM(BPF_REG_0
, 1),
3021 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3022 BPF_MOV64_IMM(BPF_REG_0
, 0),
3025 .errstr
= "invalid access to packet",
3027 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3030 "direct packet access: test8 (double test, variant 1)",
3032 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3033 offsetof(struct __sk_buff
, data
)),
3034 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3035 offsetof(struct __sk_buff
, data_end
)),
3036 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3037 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3038 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 4),
3039 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3040 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3041 BPF_MOV64_IMM(BPF_REG_0
, 1),
3043 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3044 BPF_MOV64_IMM(BPF_REG_0
, 0),
3048 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3051 "direct packet access: test9 (double test, variant 2)",
3053 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3054 offsetof(struct __sk_buff
, data
)),
3055 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3056 offsetof(struct __sk_buff
, data_end
)),
3057 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3058 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3059 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 2),
3060 BPF_MOV64_IMM(BPF_REG_0
, 1),
3062 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3063 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3064 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3065 BPF_MOV64_IMM(BPF_REG_0
, 0),
3069 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3072 "direct packet access: test10 (write invalid)",
3074 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3075 offsetof(struct __sk_buff
, data
)),
3076 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3077 offsetof(struct __sk_buff
, data_end
)),
3078 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3079 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3080 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
3081 BPF_MOV64_IMM(BPF_REG_0
, 0),
3083 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
3084 BPF_MOV64_IMM(BPF_REG_0
, 0),
3087 .errstr
= "invalid access to packet",
3089 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3092 "direct packet access: test11 (shift, good access)",
3094 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3095 offsetof(struct __sk_buff
, data
)),
3096 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3097 offsetof(struct __sk_buff
, data_end
)),
3098 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3099 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
3100 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
3101 BPF_MOV64_IMM(BPF_REG_3
, 144),
3102 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
3103 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
3104 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_5
, 3),
3105 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
3106 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
3107 BPF_MOV64_IMM(BPF_REG_0
, 1),
3109 BPF_MOV64_IMM(BPF_REG_0
, 0),
3113 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3116 "direct packet access: test12 (and, good access)",
3118 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3119 offsetof(struct __sk_buff
, data
)),
3120 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3121 offsetof(struct __sk_buff
, data_end
)),
3122 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3123 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
3124 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
3125 BPF_MOV64_IMM(BPF_REG_3
, 144),
3126 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
3127 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
3128 BPF_ALU64_IMM(BPF_AND
, BPF_REG_5
, 15),
3129 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
3130 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
3131 BPF_MOV64_IMM(BPF_REG_0
, 1),
3133 BPF_MOV64_IMM(BPF_REG_0
, 0),
3137 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3140 "direct packet access: test13 (branches, good access)",
3142 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3143 offsetof(struct __sk_buff
, data
)),
3144 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3145 offsetof(struct __sk_buff
, data_end
)),
3146 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3147 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
3148 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 13),
3149 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3150 offsetof(struct __sk_buff
, mark
)),
3151 BPF_MOV64_IMM(BPF_REG_4
, 1),
3152 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_4
, 2),
3153 BPF_MOV64_IMM(BPF_REG_3
, 14),
3154 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
3155 BPF_MOV64_IMM(BPF_REG_3
, 24),
3156 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
3157 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
3158 BPF_ALU64_IMM(BPF_AND
, BPF_REG_5
, 15),
3159 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
3160 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
3161 BPF_MOV64_IMM(BPF_REG_0
, 1),
3163 BPF_MOV64_IMM(BPF_REG_0
, 0),
3167 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3170 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3172 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3173 offsetof(struct __sk_buff
, data
)),
3174 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3175 offsetof(struct __sk_buff
, data_end
)),
3176 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3177 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
3178 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 7),
3179 BPF_MOV64_IMM(BPF_REG_5
, 12),
3180 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_5
, 4),
3181 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
3182 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
3183 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_6
, 0),
3184 BPF_MOV64_IMM(BPF_REG_0
, 1),
3186 BPF_MOV64_IMM(BPF_REG_0
, 0),
3190 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3193 "direct packet access: test15 (spill with xadd)",
3195 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3196 offsetof(struct __sk_buff
, data
)),
3197 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3198 offsetof(struct __sk_buff
, data_end
)),
3199 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3200 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3201 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
3202 BPF_MOV64_IMM(BPF_REG_5
, 4096),
3203 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
3204 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
3205 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
3206 BPF_STX_XADD(BPF_DW
, BPF_REG_4
, BPF_REG_5
, 0),
3207 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_4
, 0),
3208 BPF_STX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_5
, 0),
3209 BPF_MOV64_IMM(BPF_REG_0
, 0),
3212 .errstr
= "R2 invalid mem access 'inv'",
3214 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3217 "direct packet access: test16 (arith on data_end)",
3219 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3220 offsetof(struct __sk_buff
, data
)),
3221 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3222 offsetof(struct __sk_buff
, data_end
)),
3223 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3224 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3225 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 16),
3226 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3227 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
3228 BPF_MOV64_IMM(BPF_REG_0
, 0),
3231 .errstr
= "R3 pointer arithmetic on PTR_TO_PACKET_END",
3233 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3236 "direct packet access: test17 (pruning, alignment)",
3238 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3239 offsetof(struct __sk_buff
, data
)),
3240 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3241 offsetof(struct __sk_buff
, data_end
)),
3242 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3243 offsetof(struct __sk_buff
, mark
)),
3244 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3245 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 14),
3246 BPF_JMP_IMM(BPF_JGT
, BPF_REG_7
, 1, 4),
3247 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3248 BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
, -4),
3249 BPF_MOV64_IMM(BPF_REG_0
, 0),
3251 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 1),
3254 .errstr
= "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3256 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3257 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
3260 "direct packet access: test18 (imm += pkt_ptr, 1)",
3262 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3263 offsetof(struct __sk_buff
, data
)),
3264 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3265 offsetof(struct __sk_buff
, data_end
)),
3266 BPF_MOV64_IMM(BPF_REG_0
, 8),
3267 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
3268 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3269 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
3270 BPF_MOV64_IMM(BPF_REG_0
, 0),
3274 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3277 "direct packet access: test19 (imm += pkt_ptr, 2)",
3279 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3280 offsetof(struct __sk_buff
, data
)),
3281 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3282 offsetof(struct __sk_buff
, data_end
)),
3283 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3284 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3285 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 3),
3286 BPF_MOV64_IMM(BPF_REG_4
, 4),
3287 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3288 BPF_STX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_4
, 0),
3289 BPF_MOV64_IMM(BPF_REG_0
, 0),
3293 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3296 "direct packet access: test20 (x += pkt_ptr, 1)",
3298 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3299 offsetof(struct __sk_buff
, data
)),
3300 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3301 offsetof(struct __sk_buff
, data_end
)),
3302 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
3303 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
3304 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
3305 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0x7fff),
3306 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3307 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3308 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3309 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0x7fff - 1),
3310 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
3311 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_4
, 0),
3312 BPF_MOV64_IMM(BPF_REG_0
, 0),
3315 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3319 "direct packet access: test21 (x += pkt_ptr, 2)",
3321 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3322 offsetof(struct __sk_buff
, data
)),
3323 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3324 offsetof(struct __sk_buff
, data_end
)),
3325 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3326 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3327 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 9),
3328 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
3329 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -8),
3330 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
3331 BPF_ALU64_IMM(BPF_AND
, BPF_REG_4
, 0x7fff),
3332 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3333 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3334 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0x7fff - 1),
3335 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
3336 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_4
, 0),
3337 BPF_MOV64_IMM(BPF_REG_0
, 0),
3340 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3344 "direct packet access: test22 (x += pkt_ptr, 3)",
3346 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3347 offsetof(struct __sk_buff
, data
)),
3348 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3349 offsetof(struct __sk_buff
, data_end
)),
3350 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3351 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3352 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -8),
3353 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_3
, -16),
3354 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_10
, -16),
3355 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 11),
3356 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
3357 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
3358 BPF_STX_XADD(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -8),
3359 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
3360 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 49),
3361 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3362 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
3363 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
3364 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
3365 BPF_MOV64_IMM(BPF_REG_2
, 1),
3366 BPF_STX_MEM(BPF_H
, BPF_REG_4
, BPF_REG_2
, 0),
3367 BPF_MOV64_IMM(BPF_REG_0
, 0),
3370 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3374 "direct packet access: test23 (x += pkt_ptr, 4)",
3376 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3377 offsetof(struct __sk_buff
, data
)),
3378 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3379 offsetof(struct __sk_buff
, data_end
)),
3380 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
3381 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
3382 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
3383 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0xffff),
3384 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3385 BPF_MOV64_IMM(BPF_REG_0
, 31),
3386 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_4
),
3387 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
3388 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_0
),
3389 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0xffff - 1),
3390 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3391 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_0
, 0),
3392 BPF_MOV64_IMM(BPF_REG_0
, 0),
3395 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3397 .errstr
= "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3400 "direct packet access: test24 (x += pkt_ptr, 5)",
3402 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3403 offsetof(struct __sk_buff
, data
)),
3404 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3405 offsetof(struct __sk_buff
, data_end
)),
3406 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
3407 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
3408 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
3409 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0xff),
3410 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3411 BPF_MOV64_IMM(BPF_REG_0
, 64),
3412 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_4
),
3413 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
3414 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_0
),
3415 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x7fff - 1),
3416 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3417 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_0
, 0),
3418 BPF_MOV64_IMM(BPF_REG_0
, 0),
3421 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3425 "direct packet access: test25 (marking on <, good access)",
3427 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3428 offsetof(struct __sk_buff
, data
)),
3429 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3430 offsetof(struct __sk_buff
, data_end
)),
3431 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3432 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3433 BPF_JMP_REG(BPF_JLT
, BPF_REG_0
, BPF_REG_3
, 2),
3434 BPF_MOV64_IMM(BPF_REG_0
, 0),
3436 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3437 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
3440 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3443 "direct packet access: test26 (marking on <, bad access)",
3445 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3446 offsetof(struct __sk_buff
, data
)),
3447 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3448 offsetof(struct __sk_buff
, data_end
)),
3449 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3450 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3451 BPF_JMP_REG(BPF_JLT
, BPF_REG_0
, BPF_REG_3
, 3),
3452 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3453 BPF_MOV64_IMM(BPF_REG_0
, 0),
3455 BPF_JMP_IMM(BPF_JA
, 0, 0, -3),
3458 .errstr
= "invalid access to packet",
3459 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3462 "direct packet access: test27 (marking on <=, good access)",
3464 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3465 offsetof(struct __sk_buff
, data
)),
3466 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3467 offsetof(struct __sk_buff
, data_end
)),
3468 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3469 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3470 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_0
, 1),
3471 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3472 BPF_MOV64_IMM(BPF_REG_0
, 1),
3476 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3479 "direct packet access: test28 (marking on <=, bad access)",
3481 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3482 offsetof(struct __sk_buff
, data
)),
3483 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3484 offsetof(struct __sk_buff
, data_end
)),
3485 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3486 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3487 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_0
, 2),
3488 BPF_MOV64_IMM(BPF_REG_0
, 1),
3490 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3491 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
3494 .errstr
= "invalid access to packet",
3495 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3498 "helper access to packet: test1, valid packet_ptr range",
3500 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3501 offsetof(struct xdp_md
, data
)),
3502 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3503 offsetof(struct xdp_md
, data_end
)),
3504 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
3505 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
3506 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 5),
3507 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3508 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
3509 BPF_MOV64_IMM(BPF_REG_4
, 0),
3510 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3511 BPF_FUNC_map_update_elem
),
3512 BPF_MOV64_IMM(BPF_REG_0
, 0),
3515 .fixup_map1
= { 5 },
3516 .result_unpriv
= ACCEPT
,
3518 .prog_type
= BPF_PROG_TYPE_XDP
,
3521 "helper access to packet: test2, unchecked packet_ptr",
3523 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3524 offsetof(struct xdp_md
, data
)),
3525 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3526 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3527 BPF_FUNC_map_lookup_elem
),
3528 BPF_MOV64_IMM(BPF_REG_0
, 0),
3531 .fixup_map1
= { 1 },
3533 .errstr
= "invalid access to packet",
3534 .prog_type
= BPF_PROG_TYPE_XDP
,
3537 "helper access to packet: test3, variable add",
3539 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3540 offsetof(struct xdp_md
, data
)),
3541 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3542 offsetof(struct xdp_md
, data_end
)),
3543 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3544 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
3545 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 10),
3546 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_2
, 0),
3547 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3548 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_5
),
3549 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3550 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 8),
3551 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 4),
3552 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3553 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_4
),
3554 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3555 BPF_FUNC_map_lookup_elem
),
3556 BPF_MOV64_IMM(BPF_REG_0
, 0),
3559 .fixup_map1
= { 11 },
3561 .prog_type
= BPF_PROG_TYPE_XDP
,
3564 "helper access to packet: test4, packet_ptr with bad range",
3566 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3567 offsetof(struct xdp_md
, data
)),
3568 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3569 offsetof(struct xdp_md
, data_end
)),
3570 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3571 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
3572 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 2),
3573 BPF_MOV64_IMM(BPF_REG_0
, 0),
3575 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3576 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3577 BPF_FUNC_map_lookup_elem
),
3578 BPF_MOV64_IMM(BPF_REG_0
, 0),
3581 .fixup_map1
= { 7 },
3583 .errstr
= "invalid access to packet",
3584 .prog_type
= BPF_PROG_TYPE_XDP
,
3587 "helper access to packet: test5, packet_ptr with too short range",
3589 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3590 offsetof(struct xdp_md
, data
)),
3591 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3592 offsetof(struct xdp_md
, data_end
)),
3593 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
3594 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3595 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 7),
3596 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 3),
3597 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3598 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3599 BPF_FUNC_map_lookup_elem
),
3600 BPF_MOV64_IMM(BPF_REG_0
, 0),
3603 .fixup_map1
= { 6 },
3605 .errstr
= "invalid access to packet",
3606 .prog_type
= BPF_PROG_TYPE_XDP
,
3609 "helper access to packet: test6, cls valid packet_ptr range",
3611 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3612 offsetof(struct __sk_buff
, data
)),
3613 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3614 offsetof(struct __sk_buff
, data_end
)),
3615 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
3616 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
3617 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 5),
3618 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3619 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
3620 BPF_MOV64_IMM(BPF_REG_4
, 0),
3621 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3622 BPF_FUNC_map_update_elem
),
3623 BPF_MOV64_IMM(BPF_REG_0
, 0),
3626 .fixup_map1
= { 5 },
3628 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3631 "helper access to packet: test7, cls unchecked packet_ptr",
3633 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3634 offsetof(struct __sk_buff
, data
)),
3635 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3636 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3637 BPF_FUNC_map_lookup_elem
),
3638 BPF_MOV64_IMM(BPF_REG_0
, 0),
3641 .fixup_map1
= { 1 },
3643 .errstr
= "invalid access to packet",
3644 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3647 "helper access to packet: test8, cls variable add",
3649 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3650 offsetof(struct __sk_buff
, data
)),
3651 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3652 offsetof(struct __sk_buff
, data_end
)),
3653 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3654 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
3655 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 10),
3656 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_2
, 0),
3657 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3658 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_5
),
3659 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3660 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 8),
3661 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 4),
3662 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3663 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_4
),
3664 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3665 BPF_FUNC_map_lookup_elem
),
3666 BPF_MOV64_IMM(BPF_REG_0
, 0),
3669 .fixup_map1
= { 11 },
3671 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3674 "helper access to packet: test9, cls packet_ptr with bad range",
3676 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3677 offsetof(struct __sk_buff
, data
)),
3678 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3679 offsetof(struct __sk_buff
, data_end
)),
3680 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3681 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
3682 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 2),
3683 BPF_MOV64_IMM(BPF_REG_0
, 0),
3685 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3686 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3687 BPF_FUNC_map_lookup_elem
),
3688 BPF_MOV64_IMM(BPF_REG_0
, 0),
3691 .fixup_map1
= { 7 },
3693 .errstr
= "invalid access to packet",
3694 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3697 "helper access to packet: test10, cls packet_ptr with too short range",
3699 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3700 offsetof(struct __sk_buff
, data
)),
3701 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3702 offsetof(struct __sk_buff
, data_end
)),
3703 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
3704 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3705 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 7),
3706 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 3),
3707 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3708 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3709 BPF_FUNC_map_lookup_elem
),
3710 BPF_MOV64_IMM(BPF_REG_0
, 0),
3713 .fixup_map1
= { 6 },
3715 .errstr
= "invalid access to packet",
3716 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3719 "helper access to packet: test11, cls unsuitable helper 1",
3721 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3722 offsetof(struct __sk_buff
, data
)),
3723 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3724 offsetof(struct __sk_buff
, data_end
)),
3725 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3726 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
3727 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 7),
3728 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_7
, 4),
3729 BPF_MOV64_IMM(BPF_REG_2
, 0),
3730 BPF_MOV64_IMM(BPF_REG_4
, 42),
3731 BPF_MOV64_IMM(BPF_REG_5
, 0),
3732 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3733 BPF_FUNC_skb_store_bytes
),
3734 BPF_MOV64_IMM(BPF_REG_0
, 0),
3738 .errstr
= "helper access to the packet",
3739 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3742 "helper access to packet: test12, cls unsuitable helper 2",
3744 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3745 offsetof(struct __sk_buff
, data
)),
3746 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3747 offsetof(struct __sk_buff
, data_end
)),
3748 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
3749 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 8),
3750 BPF_JMP_REG(BPF_JGT
, BPF_REG_6
, BPF_REG_7
, 3),
3751 BPF_MOV64_IMM(BPF_REG_2
, 0),
3752 BPF_MOV64_IMM(BPF_REG_4
, 4),
3753 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3754 BPF_FUNC_skb_load_bytes
),
3755 BPF_MOV64_IMM(BPF_REG_0
, 0),
3759 .errstr
= "helper access to the packet",
3760 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3763 "helper access to packet: test13, cls helper ok",
3765 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3766 offsetof(struct __sk_buff
, data
)),
3767 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3768 offsetof(struct __sk_buff
, data_end
)),
3769 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3770 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3771 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3772 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3773 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3774 BPF_MOV64_IMM(BPF_REG_2
, 4),
3775 BPF_MOV64_IMM(BPF_REG_3
, 0),
3776 BPF_MOV64_IMM(BPF_REG_4
, 0),
3777 BPF_MOV64_IMM(BPF_REG_5
, 0),
3778 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3779 BPF_FUNC_csum_diff
),
3780 BPF_MOV64_IMM(BPF_REG_0
, 0),
3784 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3787 "helper access to packet: test14, cls helper ok sub",
3789 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3790 offsetof(struct __sk_buff
, data
)),
3791 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3792 offsetof(struct __sk_buff
, data_end
)),
3793 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3794 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3795 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3796 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3797 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 4),
3798 BPF_MOV64_IMM(BPF_REG_2
, 4),
3799 BPF_MOV64_IMM(BPF_REG_3
, 0),
3800 BPF_MOV64_IMM(BPF_REG_4
, 0),
3801 BPF_MOV64_IMM(BPF_REG_5
, 0),
3802 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3803 BPF_FUNC_csum_diff
),
3804 BPF_MOV64_IMM(BPF_REG_0
, 0),
3808 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3811 "helper access to packet: test15, cls helper fail sub",
3813 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3814 offsetof(struct __sk_buff
, data
)),
3815 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3816 offsetof(struct __sk_buff
, data_end
)),
3817 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3818 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3819 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3820 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3821 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 12),
3822 BPF_MOV64_IMM(BPF_REG_2
, 4),
3823 BPF_MOV64_IMM(BPF_REG_3
, 0),
3824 BPF_MOV64_IMM(BPF_REG_4
, 0),
3825 BPF_MOV64_IMM(BPF_REG_5
, 0),
3826 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3827 BPF_FUNC_csum_diff
),
3828 BPF_MOV64_IMM(BPF_REG_0
, 0),
3832 .errstr
= "invalid access to packet",
3833 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3836 "helper access to packet: test16, cls helper fail range 1",
3838 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3839 offsetof(struct __sk_buff
, data
)),
3840 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3841 offsetof(struct __sk_buff
, data_end
)),
3842 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3843 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3844 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3845 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3846 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3847 BPF_MOV64_IMM(BPF_REG_2
, 8),
3848 BPF_MOV64_IMM(BPF_REG_3
, 0),
3849 BPF_MOV64_IMM(BPF_REG_4
, 0),
3850 BPF_MOV64_IMM(BPF_REG_5
, 0),
3851 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3852 BPF_FUNC_csum_diff
),
3853 BPF_MOV64_IMM(BPF_REG_0
, 0),
3857 .errstr
= "invalid access to packet",
3858 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3861 "helper access to packet: test17, cls helper fail range 2",
3863 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3864 offsetof(struct __sk_buff
, data
)),
3865 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3866 offsetof(struct __sk_buff
, data_end
)),
3867 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3868 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3869 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3870 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3871 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3872 BPF_MOV64_IMM(BPF_REG_2
, -9),
3873 BPF_MOV64_IMM(BPF_REG_3
, 0),
3874 BPF_MOV64_IMM(BPF_REG_4
, 0),
3875 BPF_MOV64_IMM(BPF_REG_5
, 0),
3876 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3877 BPF_FUNC_csum_diff
),
3878 BPF_MOV64_IMM(BPF_REG_0
, 0),
3882 .errstr
= "R2 min value is negative",
3883 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3886 "helper access to packet: test18, cls helper fail range 3",
3888 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3889 offsetof(struct __sk_buff
, data
)),
3890 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3891 offsetof(struct __sk_buff
, data_end
)),
3892 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3893 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3894 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3895 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3896 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3897 BPF_MOV64_IMM(BPF_REG_2
, ~0),
3898 BPF_MOV64_IMM(BPF_REG_3
, 0),
3899 BPF_MOV64_IMM(BPF_REG_4
, 0),
3900 BPF_MOV64_IMM(BPF_REG_5
, 0),
3901 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3902 BPF_FUNC_csum_diff
),
3903 BPF_MOV64_IMM(BPF_REG_0
, 0),
3907 .errstr
= "R2 min value is negative",
3908 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3911 "helper access to packet: test19, cls helper range zero",
3913 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3914 offsetof(struct __sk_buff
, data
)),
3915 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3916 offsetof(struct __sk_buff
, data_end
)),
3917 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3918 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3919 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3920 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3921 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3922 BPF_MOV64_IMM(BPF_REG_2
, 0),
3923 BPF_MOV64_IMM(BPF_REG_3
, 0),
3924 BPF_MOV64_IMM(BPF_REG_4
, 0),
3925 BPF_MOV64_IMM(BPF_REG_5
, 0),
3926 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3927 BPF_FUNC_csum_diff
),
3928 BPF_MOV64_IMM(BPF_REG_0
, 0),
3932 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3935 "helper access to packet: test20, pkt end as input",
3937 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3938 offsetof(struct __sk_buff
, data
)),
3939 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3940 offsetof(struct __sk_buff
, data_end
)),
3941 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3942 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3943 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3944 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3945 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_7
),
3946 BPF_MOV64_IMM(BPF_REG_2
, 4),
3947 BPF_MOV64_IMM(BPF_REG_3
, 0),
3948 BPF_MOV64_IMM(BPF_REG_4
, 0),
3949 BPF_MOV64_IMM(BPF_REG_5
, 0),
3950 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3951 BPF_FUNC_csum_diff
),
3952 BPF_MOV64_IMM(BPF_REG_0
, 0),
3956 .errstr
= "R1 type=pkt_end expected=fp",
3957 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3960 "helper access to packet: test21, wrong reg",
3962 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3963 offsetof(struct __sk_buff
, data
)),
3964 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3965 offsetof(struct __sk_buff
, data_end
)),
3966 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3967 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3968 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3969 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3970 BPF_MOV64_IMM(BPF_REG_2
, 4),
3971 BPF_MOV64_IMM(BPF_REG_3
, 0),
3972 BPF_MOV64_IMM(BPF_REG_4
, 0),
3973 BPF_MOV64_IMM(BPF_REG_5
, 0),
3974 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3975 BPF_FUNC_csum_diff
),
3976 BPF_MOV64_IMM(BPF_REG_0
, 0),
3980 .errstr
= "invalid access to packet",
3981 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3984 "valid map access into an array with a constant",
3986 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3987 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3988 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3989 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3990 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3991 BPF_FUNC_map_lookup_elem
),
3992 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3993 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3994 offsetof(struct test_val
, foo
)),
3997 .fixup_map2
= { 3 },
3998 .errstr_unpriv
= "R0 leaks addr",
3999 .result_unpriv
= REJECT
,
4003 "valid map access into an array with a register",
4005 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4006 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4007 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4008 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4009 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4010 BPF_FUNC_map_lookup_elem
),
4011 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4012 BPF_MOV64_IMM(BPF_REG_1
, 4),
4013 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4014 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4015 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4016 offsetof(struct test_val
, foo
)),
4019 .fixup_map2
= { 3 },
4020 .errstr_unpriv
= "R0 leaks addr",
4021 .result_unpriv
= REJECT
,
4023 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4026 "valid map access into an array with a variable",
4028 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4029 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4030 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4031 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4032 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4033 BPF_FUNC_map_lookup_elem
),
4034 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4035 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4036 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
, 3),
4037 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4038 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4039 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4040 offsetof(struct test_val
, foo
)),
4043 .fixup_map2
= { 3 },
4044 .errstr_unpriv
= "R0 leaks addr",
4045 .result_unpriv
= REJECT
,
4047 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4050 "valid map access into an array with a signed variable",
4052 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4053 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4054 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4055 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4056 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4057 BPF_FUNC_map_lookup_elem
),
4058 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
4059 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4060 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 0xffffffff, 1),
4061 BPF_MOV32_IMM(BPF_REG_1
, 0),
4062 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
4063 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
4064 BPF_MOV32_IMM(BPF_REG_1
, 0),
4065 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
4066 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4067 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4068 offsetof(struct test_val
, foo
)),
4071 .fixup_map2
= { 3 },
4072 .errstr_unpriv
= "R0 leaks addr",
4073 .result_unpriv
= REJECT
,
4075 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4078 "invalid map access into an array with a constant",
4080 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4081 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4082 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4083 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4084 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4085 BPF_FUNC_map_lookup_elem
),
4086 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4087 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, (MAX_ENTRIES
+ 1) << 2,
4088 offsetof(struct test_val
, foo
)),
4091 .fixup_map2
= { 3 },
4092 .errstr
= "invalid access to map value, value_size=48 off=48 size=8",
4096 "invalid map access into an array with a register",
4098 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4099 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4100 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4101 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4102 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4103 BPF_FUNC_map_lookup_elem
),
4104 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4105 BPF_MOV64_IMM(BPF_REG_1
, MAX_ENTRIES
+ 1),
4106 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4107 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4108 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4109 offsetof(struct test_val
, foo
)),
4112 .fixup_map2
= { 3 },
4113 .errstr
= "R0 min value is outside of the array range",
4115 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4118 "invalid map access into an array with a variable",
4120 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4121 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4122 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4123 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4124 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4125 BPF_FUNC_map_lookup_elem
),
4126 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4127 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4128 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4129 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4130 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4131 offsetof(struct test_val
, foo
)),
4134 .fixup_map2
= { 3 },
4135 .errstr
= "R0 unbounded memory access, make sure to bounds check any array access into a map",
4137 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4140 "invalid map access into an array with no floor check",
4142 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4143 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4144 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4145 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4146 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4147 BPF_FUNC_map_lookup_elem
),
4148 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4149 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
4150 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
4151 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
4152 BPF_MOV32_IMM(BPF_REG_1
, 0),
4153 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
4154 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4155 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4156 offsetof(struct test_val
, foo
)),
4159 .fixup_map2
= { 3 },
4160 .errstr_unpriv
= "R0 leaks addr",
4161 .errstr
= "R0 unbounded memory access",
4162 .result_unpriv
= REJECT
,
4164 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4167 "invalid map access into an array with a invalid max check",
4169 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4170 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4171 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4172 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4173 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4174 BPF_FUNC_map_lookup_elem
),
4175 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4176 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4177 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
+ 1),
4178 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
4179 BPF_MOV32_IMM(BPF_REG_1
, 0),
4180 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
4181 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4182 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
4183 offsetof(struct test_val
, foo
)),
4186 .fixup_map2
= { 3 },
4187 .errstr_unpriv
= "R0 leaks addr",
4188 .errstr
= "invalid access to map value, value_size=48 off=44 size=8",
4189 .result_unpriv
= REJECT
,
4191 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4194 "invalid map access into an array with a invalid max check",
4196 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4197 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4198 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4199 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4200 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4201 BPF_FUNC_map_lookup_elem
),
4202 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
4203 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
4204 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4205 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4206 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4207 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4208 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4209 BPF_FUNC_map_lookup_elem
),
4210 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
4211 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
4212 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
4213 offsetof(struct test_val
, foo
)),
4216 .fixup_map2
= { 3, 11 },
4217 .errstr
= "R0 pointer += pointer",
4219 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4222 "multiple registers share map_lookup_elem result",
4224 BPF_MOV64_IMM(BPF_REG_1
, 10),
4225 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4226 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4227 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4228 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4229 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4230 BPF_FUNC_map_lookup_elem
),
4231 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4232 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4233 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4236 .fixup_map1
= { 4 },
4238 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4241 "alu ops on ptr_to_map_value_or_null, 1",
4243 BPF_MOV64_IMM(BPF_REG_1
, 10),
4244 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4245 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4246 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4247 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4248 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4249 BPF_FUNC_map_lookup_elem
),
4250 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4251 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -2),
4252 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 2),
4253 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4254 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4257 .fixup_map1
= { 4 },
4258 .errstr
= "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4260 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4263 "alu ops on ptr_to_map_value_or_null, 2",
4265 BPF_MOV64_IMM(BPF_REG_1
, 10),
4266 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4267 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4268 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4269 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4270 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4271 BPF_FUNC_map_lookup_elem
),
4272 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4273 BPF_ALU64_IMM(BPF_AND
, BPF_REG_4
, -1),
4274 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4275 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4278 .fixup_map1
= { 4 },
4279 .errstr
= "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4281 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4284 "alu ops on ptr_to_map_value_or_null, 3",
4286 BPF_MOV64_IMM(BPF_REG_1
, 10),
4287 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4288 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4289 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4290 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4291 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4292 BPF_FUNC_map_lookup_elem
),
4293 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4294 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_4
, 1),
4295 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4296 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4299 .fixup_map1
= { 4 },
4300 .errstr
= "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4302 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4305 "invalid memory access with multiple map_lookup_elem calls",
4307 BPF_MOV64_IMM(BPF_REG_1
, 10),
4308 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4309 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4310 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4311 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4312 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
4313 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
4314 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4315 BPF_FUNC_map_lookup_elem
),
4316 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4317 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
4318 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
4319 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4320 BPF_FUNC_map_lookup_elem
),
4321 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4322 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4325 .fixup_map1
= { 4 },
4327 .errstr
= "R4 !read_ok",
4328 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4331 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4333 BPF_MOV64_IMM(BPF_REG_1
, 10),
4334 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4335 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4336 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4337 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4338 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
4339 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
4340 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4341 BPF_FUNC_map_lookup_elem
),
4342 BPF_MOV64_IMM(BPF_REG_2
, 10),
4343 BPF_JMP_IMM(BPF_JNE
, BPF_REG_2
, 0, 3),
4344 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
4345 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
4346 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4347 BPF_FUNC_map_lookup_elem
),
4348 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4349 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4350 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4353 .fixup_map1
= { 4 },
4355 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4358 "invalid map access from else condition",
4360 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4361 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4362 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4363 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4364 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
4365 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4366 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4367 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
-1, 1),
4368 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 1),
4369 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4370 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4371 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, offsetof(struct test_val
, foo
)),
4374 .fixup_map2
= { 3 },
4375 .errstr
= "R0 unbounded memory access",
4377 .errstr_unpriv
= "R0 leaks addr",
4378 .result_unpriv
= REJECT
,
4379 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4382 "constant register |= constant should keep constant type",
4384 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4385 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4386 BPF_MOV64_IMM(BPF_REG_2
, 34),
4387 BPF_ALU64_IMM(BPF_OR
, BPF_REG_2
, 13),
4388 BPF_MOV64_IMM(BPF_REG_3
, 0),
4389 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4393 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4396 "constant register |= constant should not bypass stack boundary checks",
4398 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4399 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4400 BPF_MOV64_IMM(BPF_REG_2
, 34),
4401 BPF_ALU64_IMM(BPF_OR
, BPF_REG_2
, 24),
4402 BPF_MOV64_IMM(BPF_REG_3
, 0),
4403 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4406 .errstr
= "invalid stack type R1 off=-48 access_size=58",
4408 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4411 "constant register |= constant register should keep constant type",
4413 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4414 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4415 BPF_MOV64_IMM(BPF_REG_2
, 34),
4416 BPF_MOV64_IMM(BPF_REG_4
, 13),
4417 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_4
),
4418 BPF_MOV64_IMM(BPF_REG_3
, 0),
4419 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4423 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4426 "constant register |= constant register should not bypass stack boundary checks",
4428 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4429 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4430 BPF_MOV64_IMM(BPF_REG_2
, 34),
4431 BPF_MOV64_IMM(BPF_REG_4
, 24),
4432 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_4
),
4433 BPF_MOV64_IMM(BPF_REG_3
, 0),
4434 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4437 .errstr
= "invalid stack type R1 off=-48 access_size=58",
4439 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4442 "invalid direct packet write for LWT_IN",
4444 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4445 offsetof(struct __sk_buff
, data
)),
4446 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4447 offsetof(struct __sk_buff
, data_end
)),
4448 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4449 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4450 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4451 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4452 BPF_MOV64_IMM(BPF_REG_0
, 0),
4455 .errstr
= "cannot write into packet",
4457 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
4460 "invalid direct packet write for LWT_OUT",
4462 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4463 offsetof(struct __sk_buff
, data
)),
4464 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4465 offsetof(struct __sk_buff
, data_end
)),
4466 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4467 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4468 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4469 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4470 BPF_MOV64_IMM(BPF_REG_0
, 0),
4473 .errstr
= "cannot write into packet",
4475 .prog_type
= BPF_PROG_TYPE_LWT_OUT
,
4478 "direct packet write for LWT_XMIT",
4480 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4481 offsetof(struct __sk_buff
, data
)),
4482 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4483 offsetof(struct __sk_buff
, data_end
)),
4484 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4485 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4486 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4487 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4488 BPF_MOV64_IMM(BPF_REG_0
, 0),
4492 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4495 "direct packet read for LWT_IN",
4497 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4498 offsetof(struct __sk_buff
, data
)),
4499 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4500 offsetof(struct __sk_buff
, data_end
)),
4501 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4502 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4503 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4504 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4505 BPF_MOV64_IMM(BPF_REG_0
, 0),
4509 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
4512 "direct packet read for LWT_OUT",
4514 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4515 offsetof(struct __sk_buff
, data
)),
4516 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4517 offsetof(struct __sk_buff
, data_end
)),
4518 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4519 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4520 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4521 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4522 BPF_MOV64_IMM(BPF_REG_0
, 0),
4526 .prog_type
= BPF_PROG_TYPE_LWT_OUT
,
4529 "direct packet read for LWT_XMIT",
4531 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4532 offsetof(struct __sk_buff
, data
)),
4533 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4534 offsetof(struct __sk_buff
, data_end
)),
4535 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4536 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4537 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4538 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4539 BPF_MOV64_IMM(BPF_REG_0
, 0),
4543 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4546 "overlapping checks for direct packet access",
4548 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4549 offsetof(struct __sk_buff
, data
)),
4550 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4551 offsetof(struct __sk_buff
, data_end
)),
4552 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4553 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4554 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 4),
4555 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
4556 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 6),
4557 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
4558 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_2
, 6),
4559 BPF_MOV64_IMM(BPF_REG_0
, 0),
4563 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4566 "invalid access of tc_classid for LWT_IN",
4568 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4569 offsetof(struct __sk_buff
, tc_classid
)),
4573 .errstr
= "invalid bpf_context access",
4576 "invalid access of tc_classid for LWT_OUT",
4578 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4579 offsetof(struct __sk_buff
, tc_classid
)),
4583 .errstr
= "invalid bpf_context access",
4586 "invalid access of tc_classid for LWT_XMIT",
4588 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4589 offsetof(struct __sk_buff
, tc_classid
)),
4593 .errstr
= "invalid bpf_context access",
4596 "leak pointer into ctx 1",
4598 BPF_MOV64_IMM(BPF_REG_0
, 0),
4599 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
4600 offsetof(struct __sk_buff
, cb
[0])),
4601 BPF_LD_MAP_FD(BPF_REG_2
, 0),
4602 BPF_STX_XADD(BPF_DW
, BPF_REG_1
, BPF_REG_2
,
4603 offsetof(struct __sk_buff
, cb
[0])),
4606 .fixup_map1
= { 2 },
4607 .errstr_unpriv
= "R2 leaks addr into mem",
4608 .result_unpriv
= REJECT
,
4610 .errstr
= "BPF_XADD stores into R1 context is not allowed",
4613 "leak pointer into ctx 2",
4615 BPF_MOV64_IMM(BPF_REG_0
, 0),
4616 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
4617 offsetof(struct __sk_buff
, cb
[0])),
4618 BPF_STX_XADD(BPF_DW
, BPF_REG_1
, BPF_REG_10
,
4619 offsetof(struct __sk_buff
, cb
[0])),
4622 .errstr_unpriv
= "R10 leaks addr into mem",
4623 .result_unpriv
= REJECT
,
4625 .errstr
= "BPF_XADD stores into R1 context is not allowed",
4628 "leak pointer into ctx 3",
4630 BPF_MOV64_IMM(BPF_REG_0
, 0),
4631 BPF_LD_MAP_FD(BPF_REG_2
, 0),
4632 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
,
4633 offsetof(struct __sk_buff
, cb
[0])),
4636 .fixup_map1
= { 1 },
4637 .errstr_unpriv
= "R2 leaks addr into ctx",
4638 .result_unpriv
= REJECT
,
4642 "leak pointer into map val",
4644 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
4645 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4646 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4647 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4648 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4649 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4650 BPF_FUNC_map_lookup_elem
),
4651 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
4652 BPF_MOV64_IMM(BPF_REG_3
, 0),
4653 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_3
, 0),
4654 BPF_STX_XADD(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
4655 BPF_MOV64_IMM(BPF_REG_0
, 0),
4658 .fixup_map1
= { 4 },
4659 .errstr_unpriv
= "R6 leaks addr into mem",
4660 .result_unpriv
= REJECT
,
4664 "helper access to map: full range",
4666 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4667 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4668 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4669 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4670 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4671 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4672 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4673 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
4674 BPF_MOV64_IMM(BPF_REG_3
, 0),
4675 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4678 .fixup_map2
= { 3 },
4680 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4683 "helper access to map: partial range",
4685 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4686 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4687 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4688 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4689 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4690 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4691 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4692 BPF_MOV64_IMM(BPF_REG_2
, 8),
4693 BPF_MOV64_IMM(BPF_REG_3
, 0),
4694 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4697 .fixup_map2
= { 3 },
4699 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4702 "helper access to map: empty range",
4704 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4705 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4706 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4707 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4708 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4709 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
4710 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4711 BPF_MOV64_IMM(BPF_REG_2
, 0),
4712 BPF_EMIT_CALL(BPF_FUNC_trace_printk
),
4715 .fixup_map2
= { 3 },
4716 .errstr
= "invalid access to map value, value_size=48 off=0 size=0",
4718 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4721 "helper access to map: out-of-bound range",
4723 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4724 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4725 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4726 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4727 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4728 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4729 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4730 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
) + 8),
4731 BPF_MOV64_IMM(BPF_REG_3
, 0),
4732 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4735 .fixup_map2
= { 3 },
4736 .errstr
= "invalid access to map value, value_size=48 off=0 size=56",
4738 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4741 "helper access to map: negative range",
4743 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4744 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4745 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4746 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4747 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4748 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4749 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4750 BPF_MOV64_IMM(BPF_REG_2
, -8),
4751 BPF_MOV64_IMM(BPF_REG_3
, 0),
4752 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4755 .fixup_map2
= { 3 },
4756 .errstr
= "R2 min value is negative",
4758 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4761 "helper access to adjusted map (via const imm): full range",
4763 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4764 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4765 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4766 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4767 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4768 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4769 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4770 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4771 offsetof(struct test_val
, foo
)),
4772 BPF_MOV64_IMM(BPF_REG_2
,
4773 sizeof(struct test_val
) -
4774 offsetof(struct test_val
, foo
)),
4775 BPF_MOV64_IMM(BPF_REG_3
, 0),
4776 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4779 .fixup_map2
= { 3 },
4781 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4784 "helper access to adjusted map (via const imm): partial range",
4786 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4787 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4788 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4789 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4790 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4791 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4792 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4793 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4794 offsetof(struct test_val
, foo
)),
4795 BPF_MOV64_IMM(BPF_REG_2
, 8),
4796 BPF_MOV64_IMM(BPF_REG_3
, 0),
4797 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4800 .fixup_map2
= { 3 },
4802 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4805 "helper access to adjusted map (via const imm): empty range",
4807 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4808 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4809 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4810 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4811 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4812 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4813 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4814 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4815 offsetof(struct test_val
, foo
)),
4816 BPF_MOV64_IMM(BPF_REG_2
, 0),
4817 BPF_EMIT_CALL(BPF_FUNC_trace_printk
),
4820 .fixup_map2
= { 3 },
4821 .errstr
= "invalid access to map value, value_size=48 off=4 size=0",
4823 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4826 "helper access to adjusted map (via const imm): out-of-bound range",
4828 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4829 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4830 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4831 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4832 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4833 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4834 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4835 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4836 offsetof(struct test_val
, foo
)),
4837 BPF_MOV64_IMM(BPF_REG_2
,
4838 sizeof(struct test_val
) -
4839 offsetof(struct test_val
, foo
) + 8),
4840 BPF_MOV64_IMM(BPF_REG_3
, 0),
4841 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4844 .fixup_map2
= { 3 },
4845 .errstr
= "invalid access to map value, value_size=48 off=4 size=52",
4847 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4850 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4852 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4853 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4854 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4855 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4856 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4857 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4858 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4859 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4860 offsetof(struct test_val
, foo
)),
4861 BPF_MOV64_IMM(BPF_REG_2
, -8),
4862 BPF_MOV64_IMM(BPF_REG_3
, 0),
4863 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4866 .fixup_map2
= { 3 },
4867 .errstr
= "R2 min value is negative",
4869 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4872 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4874 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4875 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4876 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4877 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4878 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4879 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4880 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4881 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4882 offsetof(struct test_val
, foo
)),
4883 BPF_MOV64_IMM(BPF_REG_2
, -1),
4884 BPF_MOV64_IMM(BPF_REG_3
, 0),
4885 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4888 .fixup_map2
= { 3 },
4889 .errstr
= "R2 min value is negative",
4891 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4894 "helper access to adjusted map (via const reg): full range",
4896 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4897 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4898 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4899 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4900 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4901 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4902 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4903 BPF_MOV64_IMM(BPF_REG_3
,
4904 offsetof(struct test_val
, foo
)),
4905 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4906 BPF_MOV64_IMM(BPF_REG_2
,
4907 sizeof(struct test_val
) -
4908 offsetof(struct test_val
, foo
)),
4909 BPF_MOV64_IMM(BPF_REG_3
, 0),
4910 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4913 .fixup_map2
= { 3 },
4915 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4918 "helper access to adjusted map (via const reg): partial range",
4920 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4921 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4922 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4923 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4924 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4925 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4926 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4927 BPF_MOV64_IMM(BPF_REG_3
,
4928 offsetof(struct test_val
, foo
)),
4929 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4930 BPF_MOV64_IMM(BPF_REG_2
, 8),
4931 BPF_MOV64_IMM(BPF_REG_3
, 0),
4932 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4935 .fixup_map2
= { 3 },
4937 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4940 "helper access to adjusted map (via const reg): empty range",
4942 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4943 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4944 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4945 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4946 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4947 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4948 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4949 BPF_MOV64_IMM(BPF_REG_3
, 0),
4950 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4951 BPF_MOV64_IMM(BPF_REG_2
, 0),
4952 BPF_EMIT_CALL(BPF_FUNC_trace_printk
),
4955 .fixup_map2
= { 3 },
4956 .errstr
= "R1 min value is outside of the array range",
4958 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4961 "helper access to adjusted map (via const reg): out-of-bound range",
4963 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4964 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4965 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4966 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4967 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4968 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4969 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4970 BPF_MOV64_IMM(BPF_REG_3
,
4971 offsetof(struct test_val
, foo
)),
4972 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4973 BPF_MOV64_IMM(BPF_REG_2
,
4974 sizeof(struct test_val
) -
4975 offsetof(struct test_val
, foo
) + 8),
4976 BPF_MOV64_IMM(BPF_REG_3
, 0),
4977 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4980 .fixup_map2
= { 3 },
4981 .errstr
= "invalid access to map value, value_size=48 off=4 size=52",
4983 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4986 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4988 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4989 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4990 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4991 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4992 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4993 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4994 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4995 BPF_MOV64_IMM(BPF_REG_3
,
4996 offsetof(struct test_val
, foo
)),
4997 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4998 BPF_MOV64_IMM(BPF_REG_2
, -8),
4999 BPF_MOV64_IMM(BPF_REG_3
, 0),
5000 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5003 .fixup_map2
= { 3 },
5004 .errstr
= "R2 min value is negative",
5006 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5009 "helper access to adjusted map (via const reg): negative range (< adjustment)",
5011 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5012 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5013 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5014 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5015 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5016 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5017 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5018 BPF_MOV64_IMM(BPF_REG_3
,
5019 offsetof(struct test_val
, foo
)),
5020 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5021 BPF_MOV64_IMM(BPF_REG_2
, -1),
5022 BPF_MOV64_IMM(BPF_REG_3
, 0),
5023 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5026 .fixup_map2
= { 3 },
5027 .errstr
= "R2 min value is negative",
5029 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5032 "helper access to adjusted map (via variable): full range",
5034 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5035 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5036 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5037 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5038 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5039 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5040 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5041 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5042 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
5043 offsetof(struct test_val
, foo
), 4),
5044 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5045 BPF_MOV64_IMM(BPF_REG_2
,
5046 sizeof(struct test_val
) -
5047 offsetof(struct test_val
, foo
)),
5048 BPF_MOV64_IMM(BPF_REG_3
, 0),
5049 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5052 .fixup_map2
= { 3 },
5054 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5057 "helper access to adjusted map (via variable): partial range",
5059 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5060 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5061 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5062 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5063 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5064 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5065 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5066 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5067 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
5068 offsetof(struct test_val
, foo
), 4),
5069 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5070 BPF_MOV64_IMM(BPF_REG_2
, 8),
5071 BPF_MOV64_IMM(BPF_REG_3
, 0),
5072 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5075 .fixup_map2
= { 3 },
5077 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5080 "helper access to adjusted map (via variable): empty range",
5082 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5083 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5084 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5085 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5086 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5087 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5088 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5089 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5090 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
5091 offsetof(struct test_val
, foo
), 3),
5092 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5093 BPF_MOV64_IMM(BPF_REG_2
, 0),
5094 BPF_EMIT_CALL(BPF_FUNC_trace_printk
),
5097 .fixup_map2
= { 3 },
5098 .errstr
= "R1 min value is outside of the array range",
5100 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5103 "helper access to adjusted map (via variable): no max check",
5105 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5106 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5107 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5108 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5109 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5110 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5111 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5112 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5113 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5114 BPF_MOV64_IMM(BPF_REG_2
, 1),
5115 BPF_MOV64_IMM(BPF_REG_3
, 0),
5116 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5119 .fixup_map2
= { 3 },
5120 .errstr
= "R1 unbounded memory access",
5122 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5125 "helper access to adjusted map (via variable): wrong max check",
5127 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5128 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5129 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5130 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5131 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5132 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5133 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5134 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5135 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
5136 offsetof(struct test_val
, foo
), 4),
5137 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5138 BPF_MOV64_IMM(BPF_REG_2
,
5139 sizeof(struct test_val
) -
5140 offsetof(struct test_val
, foo
) + 1),
5141 BPF_MOV64_IMM(BPF_REG_3
, 0),
5142 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5145 .fixup_map2
= { 3 },
5146 .errstr
= "invalid access to map value, value_size=48 off=4 size=45",
5148 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5151 "helper access to map: bounds check using <, good access",
5153 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5154 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5155 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5156 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5157 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5158 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5159 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5160 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5161 BPF_JMP_IMM(BPF_JLT
, BPF_REG_3
, 32, 2),
5162 BPF_MOV64_IMM(BPF_REG_0
, 0),
5164 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5165 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5166 BPF_MOV64_IMM(BPF_REG_0
, 0),
5169 .fixup_map2
= { 3 },
5171 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5174 "helper access to map: bounds check using <, bad access",
5176 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5177 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5178 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5179 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5180 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5181 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5182 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5183 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5184 BPF_JMP_IMM(BPF_JLT
, BPF_REG_3
, 32, 4),
5185 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5186 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5187 BPF_MOV64_IMM(BPF_REG_0
, 0),
5189 BPF_MOV64_IMM(BPF_REG_0
, 0),
5192 .fixup_map2
= { 3 },
5194 .errstr
= "R1 unbounded memory access",
5195 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5198 "helper access to map: bounds check using <=, good access",
5200 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5201 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5202 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5203 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5204 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5205 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5206 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5207 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5208 BPF_JMP_IMM(BPF_JLE
, BPF_REG_3
, 32, 2),
5209 BPF_MOV64_IMM(BPF_REG_0
, 0),
5211 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5212 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5213 BPF_MOV64_IMM(BPF_REG_0
, 0),
5216 .fixup_map2
= { 3 },
5218 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5221 "helper access to map: bounds check using <=, bad access",
5223 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5224 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5225 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5226 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5227 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5228 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5229 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5230 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5231 BPF_JMP_IMM(BPF_JLE
, BPF_REG_3
, 32, 4),
5232 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5233 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5234 BPF_MOV64_IMM(BPF_REG_0
, 0),
5236 BPF_MOV64_IMM(BPF_REG_0
, 0),
5239 .fixup_map2
= { 3 },
5241 .errstr
= "R1 unbounded memory access",
5242 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5245 "helper access to map: bounds check using s<, good access",
5247 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5248 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5249 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5250 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5251 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5252 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5253 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5254 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5255 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
5256 BPF_MOV64_IMM(BPF_REG_0
, 0),
5258 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 0, -3),
5259 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5260 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5261 BPF_MOV64_IMM(BPF_REG_0
, 0),
5264 .fixup_map2
= { 3 },
5266 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5269 "helper access to map: bounds check using s<, good access 2",
5271 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5272 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5273 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5274 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5275 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5276 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5277 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5278 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5279 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
5280 BPF_MOV64_IMM(BPF_REG_0
, 0),
5282 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, -3, -3),
5283 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5284 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5285 BPF_MOV64_IMM(BPF_REG_0
, 0),
5288 .fixup_map2
= { 3 },
5290 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5293 "helper access to map: bounds check using s<, bad access",
5295 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5296 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5297 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5298 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5299 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5300 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5301 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5302 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
5303 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
5304 BPF_MOV64_IMM(BPF_REG_0
, 0),
5306 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, -3, -3),
5307 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5308 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5309 BPF_MOV64_IMM(BPF_REG_0
, 0),
5312 .fixup_map2
= { 3 },
5314 .errstr
= "R1 min value is negative",
5315 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5318 "helper access to map: bounds check using s<=, good access",
5320 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5321 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5322 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5323 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5324 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5325 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5326 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5327 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5328 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5329 BPF_MOV64_IMM(BPF_REG_0
, 0),
5331 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 0, -3),
5332 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5333 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5334 BPF_MOV64_IMM(BPF_REG_0
, 0),
5337 .fixup_map2
= { 3 },
5339 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5342 "helper access to map: bounds check using s<=, good access 2",
5344 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5345 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5346 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5347 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5348 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5349 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5350 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5351 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5352 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5353 BPF_MOV64_IMM(BPF_REG_0
, 0),
5355 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, -3, -3),
5356 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5357 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5358 BPF_MOV64_IMM(BPF_REG_0
, 0),
5361 .fixup_map2
= { 3 },
5363 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5366 "helper access to map: bounds check using s<=, bad access",
5368 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5369 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5370 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5371 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5372 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5373 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5374 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5375 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
5376 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5377 BPF_MOV64_IMM(BPF_REG_0
, 0),
5379 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, -3, -3),
5380 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5381 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5382 BPF_MOV64_IMM(BPF_REG_0
, 0),
5385 .fixup_map2
= { 3 },
5387 .errstr
= "R1 min value is negative",
5388 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5391 "map element value is preserved across register spilling",
5393 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5394 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5395 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5396 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5397 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5398 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5399 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5400 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5401 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -184),
5402 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5403 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5404 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5407 .fixup_map2
= { 3 },
5408 .errstr_unpriv
= "R0 leaks addr",
5410 .result_unpriv
= REJECT
,
5413 "map element value or null is marked on register spilling",
5415 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5416 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5417 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5418 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5419 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5420 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5421 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -152),
5422 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5423 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5424 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5425 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5428 .fixup_map2
= { 3 },
5429 .errstr_unpriv
= "R0 leaks addr",
5431 .result_unpriv
= REJECT
,
5434 "map element value store of cleared call register",
5436 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5437 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5438 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5439 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5440 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5441 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
5442 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0),
5445 .fixup_map2
= { 3 },
5446 .errstr_unpriv
= "R1 !read_ok",
5447 .errstr
= "R1 !read_ok",
5449 .result_unpriv
= REJECT
,
5452 "map element value with unaligned store",
5454 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5455 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5456 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5457 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5458 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5459 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 17),
5460 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 3),
5461 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5462 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 2, 43),
5463 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, -2, 44),
5464 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
5465 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 0, 32),
5466 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 2, 33),
5467 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, -2, 34),
5468 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_8
, 5),
5469 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 0, 22),
5470 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 4, 23),
5471 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, -7, 24),
5472 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_8
),
5473 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, 3),
5474 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, 0, 22),
5475 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, 4, 23),
5476 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, -4, 24),
5479 .fixup_map2
= { 3 },
5480 .errstr_unpriv
= "R0 leaks addr",
5482 .result_unpriv
= REJECT
,
5483 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5486 "map element value with unaligned load",
5488 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5489 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5490 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5491 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5492 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5493 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5494 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
5495 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
, 9),
5496 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 3),
5497 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
5498 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 2),
5499 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
5500 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_8
, 0),
5501 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_8
, 2),
5502 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 5),
5503 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
5504 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 4),
5507 .fixup_map2
= { 3 },
5508 .errstr_unpriv
= "R0 leaks addr",
5510 .result_unpriv
= REJECT
,
5511 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5514 "map element value illegal alu op, 1",
5516 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5517 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5518 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5519 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5520 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5521 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5522 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 8),
5523 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5526 .fixup_map2
= { 3 },
5527 .errstr
= "R0 bitwise operator &= on pointer",
5531 "map element value illegal alu op, 2",
5533 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5534 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5535 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5536 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5537 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5538 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5539 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_0
, 0),
5540 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5543 .fixup_map2
= { 3 },
5544 .errstr
= "R0 32-bit pointer arithmetic prohibited",
5548 "map element value illegal alu op, 3",
5550 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5551 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5552 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5553 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5554 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5555 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5556 BPF_ALU64_IMM(BPF_DIV
, BPF_REG_0
, 42),
5557 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5560 .fixup_map2
= { 3 },
5561 .errstr
= "R0 pointer arithmetic with /= operator",
5565 "map element value illegal alu op, 4",
5567 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5568 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5569 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5570 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5571 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5572 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5573 BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_0
, 64),
5574 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5577 .fixup_map2
= { 3 },
5578 .errstr_unpriv
= "R0 pointer arithmetic prohibited",
5579 .errstr
= "invalid mem access 'inv'",
5581 .result_unpriv
= REJECT
,
5584 "map element value illegal alu op, 5",
5586 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5587 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5588 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5589 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5590 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5591 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5592 BPF_MOV64_IMM(BPF_REG_3
, 4096),
5593 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5594 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5595 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
5596 BPF_STX_XADD(BPF_DW
, BPF_REG_2
, BPF_REG_3
, 0),
5597 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, 0),
5598 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5601 .fixup_map2
= { 3 },
5602 .errstr
= "R0 invalid mem access 'inv'",
5606 "map element value is preserved across register spilling",
5608 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5609 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5610 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5611 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5612 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5613 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5614 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
,
5615 offsetof(struct test_val
, foo
)),
5616 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5617 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5618 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -184),
5619 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5620 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5621 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5624 .fixup_map2
= { 3 },
5625 .errstr_unpriv
= "R0 leaks addr",
5627 .result_unpriv
= REJECT
,
5628 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5631 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5633 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5634 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5635 BPF_MOV64_IMM(BPF_REG_0
, 0),
5636 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5637 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5638 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5639 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5640 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5641 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5642 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5643 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5644 BPF_MOV64_IMM(BPF_REG_2
, 16),
5645 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5646 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5647 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5648 BPF_MOV64_IMM(BPF_REG_4
, 0),
5649 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5650 BPF_MOV64_IMM(BPF_REG_3
, 0),
5651 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5652 BPF_MOV64_IMM(BPF_REG_0
, 0),
5656 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5659 "helper access to variable memory: stack, bitwise AND, zero included",
5661 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5662 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5663 BPF_MOV64_IMM(BPF_REG_2
, 16),
5664 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5665 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5666 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5667 BPF_MOV64_IMM(BPF_REG_3
, 0),
5668 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5671 .errstr
= "invalid indirect read from stack off -64+0 size 64",
5673 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5676 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5678 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5679 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5680 BPF_MOV64_IMM(BPF_REG_2
, 16),
5681 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5682 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5683 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 65),
5684 BPF_MOV64_IMM(BPF_REG_4
, 0),
5685 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5686 BPF_MOV64_IMM(BPF_REG_3
, 0),
5687 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5688 BPF_MOV64_IMM(BPF_REG_0
, 0),
5691 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5693 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5696 "helper access to variable memory: stack, JMP, correct bounds",
5698 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5699 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5700 BPF_MOV64_IMM(BPF_REG_0
, 0),
5701 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5702 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5703 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5704 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5705 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5706 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5707 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5708 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5709 BPF_MOV64_IMM(BPF_REG_2
, 16),
5710 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5711 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5712 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 4),
5713 BPF_MOV64_IMM(BPF_REG_4
, 0),
5714 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5715 BPF_MOV64_IMM(BPF_REG_3
, 0),
5716 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5717 BPF_MOV64_IMM(BPF_REG_0
, 0),
5721 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5724 "helper access to variable memory: stack, JMP (signed), correct bounds",
5726 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5727 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5728 BPF_MOV64_IMM(BPF_REG_0
, 0),
5729 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5730 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5731 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5732 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5733 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5734 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5735 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5736 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5737 BPF_MOV64_IMM(BPF_REG_2
, 16),
5738 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5739 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5740 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
, 64, 4),
5741 BPF_MOV64_IMM(BPF_REG_4
, 0),
5742 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5743 BPF_MOV64_IMM(BPF_REG_3
, 0),
5744 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5745 BPF_MOV64_IMM(BPF_REG_0
, 0),
5749 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5752 "helper access to variable memory: stack, JMP, bounds + offset",
5754 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5755 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5756 BPF_MOV64_IMM(BPF_REG_2
, 16),
5757 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5758 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5759 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 5),
5760 BPF_MOV64_IMM(BPF_REG_4
, 0),
5761 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 3),
5762 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
5763 BPF_MOV64_IMM(BPF_REG_3
, 0),
5764 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5765 BPF_MOV64_IMM(BPF_REG_0
, 0),
5768 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5770 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5773 "helper access to variable memory: stack, JMP, wrong max",
5775 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5776 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5777 BPF_MOV64_IMM(BPF_REG_2
, 16),
5778 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5779 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5780 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 65, 4),
5781 BPF_MOV64_IMM(BPF_REG_4
, 0),
5782 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5783 BPF_MOV64_IMM(BPF_REG_3
, 0),
5784 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5785 BPF_MOV64_IMM(BPF_REG_0
, 0),
5788 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5790 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5793 "helper access to variable memory: stack, JMP, no max check",
5795 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5796 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5797 BPF_MOV64_IMM(BPF_REG_2
, 16),
5798 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5799 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5800 BPF_MOV64_IMM(BPF_REG_4
, 0),
5801 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5802 BPF_MOV64_IMM(BPF_REG_3
, 0),
5803 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5804 BPF_MOV64_IMM(BPF_REG_0
, 0),
5807 /* because max wasn't checked, signed min is negative */
5808 .errstr
= "R2 min value is negative, either use unsigned or 'var &= const'",
5810 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5813 "helper access to variable memory: stack, JMP, no min check",
5815 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5816 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5817 BPF_MOV64_IMM(BPF_REG_2
, 16),
5818 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5819 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5820 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 3),
5821 BPF_MOV64_IMM(BPF_REG_3
, 0),
5822 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5823 BPF_MOV64_IMM(BPF_REG_0
, 0),
5826 .errstr
= "invalid indirect read from stack off -64+0 size 64",
5828 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5831 "helper access to variable memory: stack, JMP (signed), no min check",
5833 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5834 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5835 BPF_MOV64_IMM(BPF_REG_2
, 16),
5836 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5837 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5838 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
, 64, 3),
5839 BPF_MOV64_IMM(BPF_REG_3
, 0),
5840 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5841 BPF_MOV64_IMM(BPF_REG_0
, 0),
5844 .errstr
= "R2 min value is negative",
5846 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5849 "helper access to variable memory: map, JMP, correct bounds",
5851 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5852 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5853 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5854 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5855 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5856 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
5857 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5858 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5859 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5860 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5861 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5862 sizeof(struct test_val
), 4),
5863 BPF_MOV64_IMM(BPF_REG_4
, 0),
5864 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5865 BPF_MOV64_IMM(BPF_REG_3
, 0),
5866 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5867 BPF_MOV64_IMM(BPF_REG_0
, 0),
5870 .fixup_map2
= { 3 },
5872 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5875 "helper access to variable memory: map, JMP, wrong max",
5877 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5878 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5879 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5880 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5881 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5882 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
5883 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5884 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5885 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5886 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5887 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5888 sizeof(struct test_val
) + 1, 4),
5889 BPF_MOV64_IMM(BPF_REG_4
, 0),
5890 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5891 BPF_MOV64_IMM(BPF_REG_3
, 0),
5892 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5893 BPF_MOV64_IMM(BPF_REG_0
, 0),
5896 .fixup_map2
= { 3 },
5897 .errstr
= "invalid access to map value, value_size=48 off=0 size=49",
5899 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5902 "helper access to variable memory: map adjusted, JMP, correct bounds",
5904 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5905 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5906 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5907 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5908 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5909 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5910 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5911 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 20),
5912 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5913 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5914 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5915 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5916 sizeof(struct test_val
) - 20, 4),
5917 BPF_MOV64_IMM(BPF_REG_4
, 0),
5918 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5919 BPF_MOV64_IMM(BPF_REG_3
, 0),
5920 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5921 BPF_MOV64_IMM(BPF_REG_0
, 0),
5924 .fixup_map2
= { 3 },
5926 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5929 "helper access to variable memory: map adjusted, JMP, wrong max",
5931 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5932 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5933 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5934 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5935 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5936 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5937 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5938 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 20),
5939 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5940 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5941 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5942 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5943 sizeof(struct test_val
) - 19, 4),
5944 BPF_MOV64_IMM(BPF_REG_4
, 0),
5945 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5946 BPF_MOV64_IMM(BPF_REG_3
, 0),
5947 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5948 BPF_MOV64_IMM(BPF_REG_0
, 0),
5951 .fixup_map2
= { 3 },
5952 .errstr
= "R1 min value is outside of the array range",
5954 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5957 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
5959 BPF_MOV64_IMM(BPF_REG_1
, 0),
5960 BPF_MOV64_IMM(BPF_REG_2
, 0),
5961 BPF_MOV64_IMM(BPF_REG_3
, 0),
5962 BPF_MOV64_IMM(BPF_REG_4
, 0),
5963 BPF_MOV64_IMM(BPF_REG_5
, 0),
5964 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
5968 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
5971 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
5973 BPF_MOV64_IMM(BPF_REG_1
, 0),
5974 BPF_MOV64_IMM(BPF_REG_2
, 0),
5975 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5976 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5977 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5978 BPF_MOV64_IMM(BPF_REG_3
, 0),
5979 BPF_MOV64_IMM(BPF_REG_4
, 0),
5980 BPF_MOV64_IMM(BPF_REG_5
, 0),
5981 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
5984 .errstr
= "R1 type=inv expected=fp",
5986 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
5989 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
5991 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5992 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
5993 BPF_MOV64_IMM(BPF_REG_2
, 0),
5994 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, 0),
5995 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 8),
5996 BPF_MOV64_IMM(BPF_REG_3
, 0),
5997 BPF_MOV64_IMM(BPF_REG_4
, 0),
5998 BPF_MOV64_IMM(BPF_REG_5
, 0),
5999 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6003 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6006 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
6008 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6009 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6010 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6011 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6012 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6013 BPF_FUNC_map_lookup_elem
),
6014 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6015 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6016 BPF_MOV64_IMM(BPF_REG_2
, 0),
6017 BPF_MOV64_IMM(BPF_REG_3
, 0),
6018 BPF_MOV64_IMM(BPF_REG_4
, 0),
6019 BPF_MOV64_IMM(BPF_REG_5
, 0),
6020 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6023 .fixup_map1
= { 3 },
6025 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6028 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
6030 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6031 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6032 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6033 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6034 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6035 BPF_FUNC_map_lookup_elem
),
6036 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6037 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
6038 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 8, 7),
6039 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
6040 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
6041 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, 0),
6042 BPF_MOV64_IMM(BPF_REG_3
, 0),
6043 BPF_MOV64_IMM(BPF_REG_4
, 0),
6044 BPF_MOV64_IMM(BPF_REG_5
, 0),
6045 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6048 .fixup_map1
= { 3 },
6050 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6053 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
6055 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6056 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6057 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6058 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6059 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6060 BPF_FUNC_map_lookup_elem
),
6061 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6062 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6063 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
6064 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 8, 4),
6065 BPF_MOV64_IMM(BPF_REG_3
, 0),
6066 BPF_MOV64_IMM(BPF_REG_4
, 0),
6067 BPF_MOV64_IMM(BPF_REG_5
, 0),
6068 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6071 .fixup_map1
= { 3 },
6073 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6076 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
6078 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
6079 offsetof(struct __sk_buff
, data
)),
6080 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
6081 offsetof(struct __sk_buff
, data_end
)),
6082 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_6
),
6083 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
6084 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 7),
6085 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
6086 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 0),
6087 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 8, 4),
6088 BPF_MOV64_IMM(BPF_REG_3
, 0),
6089 BPF_MOV64_IMM(BPF_REG_4
, 0),
6090 BPF_MOV64_IMM(BPF_REG_5
, 0),
6091 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
6095 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6098 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6100 BPF_MOV64_IMM(BPF_REG_1
, 0),
6101 BPF_MOV64_IMM(BPF_REG_2
, 0),
6102 BPF_MOV64_IMM(BPF_REG_3
, 0),
6103 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6106 .errstr
= "R1 type=inv expected=fp",
6108 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6111 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6113 BPF_MOV64_IMM(BPF_REG_1
, 0),
6114 BPF_MOV64_IMM(BPF_REG_2
, 1),
6115 BPF_MOV64_IMM(BPF_REG_3
, 0),
6116 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6119 .errstr
= "R1 type=inv expected=fp",
6121 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6124 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6126 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
6127 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
6128 BPF_MOV64_IMM(BPF_REG_2
, 0),
6129 BPF_MOV64_IMM(BPF_REG_3
, 0),
6130 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6134 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6137 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6139 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6140 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6141 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6142 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6143 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
6144 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6145 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6146 BPF_MOV64_IMM(BPF_REG_2
, 0),
6147 BPF_MOV64_IMM(BPF_REG_3
, 0),
6148 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6151 .fixup_map1
= { 3 },
6153 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6156 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6158 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6159 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6160 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6161 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6162 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
6163 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6164 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
6165 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 8, 4),
6166 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
6167 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
6168 BPF_MOV64_IMM(BPF_REG_3
, 0),
6169 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6172 .fixup_map1
= { 3 },
6174 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6177 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6179 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6180 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6181 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6182 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6183 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
6184 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
6185 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6186 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
6187 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 8, 2),
6188 BPF_MOV64_IMM(BPF_REG_3
, 0),
6189 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6192 .fixup_map1
= { 3 },
6194 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6197 "helper access to variable memory: 8 bytes leak",
6199 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
6200 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
6201 BPF_MOV64_IMM(BPF_REG_0
, 0),
6202 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
6203 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
6204 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
6205 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
6206 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
6207 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
6208 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
6209 BPF_MOV64_IMM(BPF_REG_2
, 0),
6210 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
6211 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
6212 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 63),
6213 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
6214 BPF_MOV64_IMM(BPF_REG_3
, 0),
6215 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6216 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6219 .errstr
= "invalid indirect read from stack off -64+32 size 64",
6221 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6224 "helper access to variable memory: 8 bytes no leak (init memory)",
6226 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
6227 BPF_MOV64_IMM(BPF_REG_0
, 0),
6228 BPF_MOV64_IMM(BPF_REG_0
, 0),
6229 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
6230 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
6231 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
6232 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
6233 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
6234 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
6235 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
6236 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
6237 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
6238 BPF_MOV64_IMM(BPF_REG_2
, 0),
6239 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 32),
6240 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 32),
6241 BPF_MOV64_IMM(BPF_REG_3
, 0),
6242 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
6243 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6247 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
6250 "invalid and of negative number",
6252 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6253 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6254 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6255 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6256 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6257 BPF_FUNC_map_lookup_elem
),
6258 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6259 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6260 BPF_ALU64_IMM(BPF_AND
, BPF_REG_1
, -4),
6261 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
6262 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6263 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
6264 offsetof(struct test_val
, foo
)),
6267 .fixup_map2
= { 3 },
6268 .errstr
= "R0 max value is outside of the array range",
6270 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
6273 "invalid range check",
6275 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6276 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6277 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6278 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6279 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6280 BPF_FUNC_map_lookup_elem
),
6281 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 12),
6282 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
6283 BPF_MOV64_IMM(BPF_REG_9
, 1),
6284 BPF_ALU32_IMM(BPF_MOD
, BPF_REG_1
, 2),
6285 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_1
, 1),
6286 BPF_ALU32_REG(BPF_AND
, BPF_REG_9
, BPF_REG_1
),
6287 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_9
, 1),
6288 BPF_ALU32_IMM(BPF_RSH
, BPF_REG_9
, 1),
6289 BPF_MOV32_IMM(BPF_REG_3
, 1),
6290 BPF_ALU32_REG(BPF_SUB
, BPF_REG_3
, BPF_REG_9
),
6291 BPF_ALU32_IMM(BPF_MUL
, BPF_REG_3
, 0x10000000),
6292 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
6293 BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_3
, 0),
6294 BPF_MOV64_REG(BPF_REG_0
, 0),
6297 .fixup_map2
= { 3 },
6298 .errstr
= "R0 max value is outside of the array range",
6300 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
6303 "map in map access",
6305 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6306 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6307 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6308 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6309 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6310 BPF_FUNC_map_lookup_elem
),
6311 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
6312 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6313 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6314 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6315 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6316 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6317 BPF_FUNC_map_lookup_elem
),
6318 BPF_MOV64_IMM(BPF_REG_0
, 0),
6321 .fixup_map_in_map
= { 3 },
6325 "invalid inner map pointer",
6327 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6328 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6329 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6330 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6331 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6332 BPF_FUNC_map_lookup_elem
),
6333 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6334 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6335 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6336 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6337 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6338 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
6339 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6340 BPF_FUNC_map_lookup_elem
),
6341 BPF_MOV64_IMM(BPF_REG_0
, 0),
6344 .fixup_map_in_map
= { 3 },
6345 .errstr
= "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
6349 "forgot null checking on the inner map pointer",
6351 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6352 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6353 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6354 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6355 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6356 BPF_FUNC_map_lookup_elem
),
6357 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
6358 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6359 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
6360 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
6361 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6362 BPF_FUNC_map_lookup_elem
),
6363 BPF_MOV64_IMM(BPF_REG_0
, 0),
6366 .fixup_map_in_map
= { 3 },
6367 .errstr
= "R1 type=map_value_or_null expected=map_ptr",
6371 "ld_abs: check calling conv, r1",
6373 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6374 BPF_MOV64_IMM(BPF_REG_1
, 0),
6375 BPF_LD_ABS(BPF_W
, -0x200000),
6376 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
6379 .errstr
= "R1 !read_ok",
6383 "ld_abs: check calling conv, r2",
6385 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6386 BPF_MOV64_IMM(BPF_REG_2
, 0),
6387 BPF_LD_ABS(BPF_W
, -0x200000),
6388 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
6391 .errstr
= "R2 !read_ok",
6395 "ld_abs: check calling conv, r3",
6397 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6398 BPF_MOV64_IMM(BPF_REG_3
, 0),
6399 BPF_LD_ABS(BPF_W
, -0x200000),
6400 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
6403 .errstr
= "R3 !read_ok",
6407 "ld_abs: check calling conv, r4",
6409 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6410 BPF_MOV64_IMM(BPF_REG_4
, 0),
6411 BPF_LD_ABS(BPF_W
, -0x200000),
6412 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
6415 .errstr
= "R4 !read_ok",
6419 "ld_abs: check calling conv, r5",
6421 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6422 BPF_MOV64_IMM(BPF_REG_5
, 0),
6423 BPF_LD_ABS(BPF_W
, -0x200000),
6424 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
6427 .errstr
= "R5 !read_ok",
6431 "ld_abs: check calling conv, r7",
6433 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6434 BPF_MOV64_IMM(BPF_REG_7
, 0),
6435 BPF_LD_ABS(BPF_W
, -0x200000),
6436 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
6442 "ld_abs: tests on r6 and skb data reload helper",
6444 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6445 BPF_LD_ABS(BPF_B
, 0),
6446 BPF_LD_ABS(BPF_H
, 0),
6447 BPF_LD_ABS(BPF_W
, 0),
6448 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_6
),
6449 BPF_MOV64_IMM(BPF_REG_6
, 0),
6450 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_7
),
6451 BPF_MOV64_IMM(BPF_REG_2
, 1),
6452 BPF_MOV64_IMM(BPF_REG_3
, 2),
6453 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6454 BPF_FUNC_skb_vlan_push
),
6455 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_7
),
6456 BPF_LD_ABS(BPF_B
, 0),
6457 BPF_LD_ABS(BPF_H
, 0),
6458 BPF_LD_ABS(BPF_W
, 0),
6459 BPF_MOV64_IMM(BPF_REG_0
, 42),
6462 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6466 "ld_ind: check calling conv, r1",
6468 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6469 BPF_MOV64_IMM(BPF_REG_1
, 1),
6470 BPF_LD_IND(BPF_W
, BPF_REG_1
, -0x200000),
6471 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
6474 .errstr
= "R1 !read_ok",
6478 "ld_ind: check calling conv, r2",
6480 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6481 BPF_MOV64_IMM(BPF_REG_2
, 1),
6482 BPF_LD_IND(BPF_W
, BPF_REG_2
, -0x200000),
6483 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
6486 .errstr
= "R2 !read_ok",
6490 "ld_ind: check calling conv, r3",
6492 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6493 BPF_MOV64_IMM(BPF_REG_3
, 1),
6494 BPF_LD_IND(BPF_W
, BPF_REG_3
, -0x200000),
6495 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
6498 .errstr
= "R3 !read_ok",
6502 "ld_ind: check calling conv, r4",
6504 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6505 BPF_MOV64_IMM(BPF_REG_4
, 1),
6506 BPF_LD_IND(BPF_W
, BPF_REG_4
, -0x200000),
6507 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
6510 .errstr
= "R4 !read_ok",
6514 "ld_ind: check calling conv, r5",
6516 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6517 BPF_MOV64_IMM(BPF_REG_5
, 1),
6518 BPF_LD_IND(BPF_W
, BPF_REG_5
, -0x200000),
6519 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
6522 .errstr
= "R5 !read_ok",
6526 "ld_ind: check calling conv, r7",
6528 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
6529 BPF_MOV64_IMM(BPF_REG_7
, 1),
6530 BPF_LD_IND(BPF_W
, BPF_REG_7
, -0x200000),
6531 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
6537 "check bpf_perf_event_data->sample_period byte load permitted",
6539 BPF_MOV64_IMM(BPF_REG_0
, 0),
6540 #if __BYTE_ORDER == __LITTLE_ENDIAN
6541 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
6542 offsetof(struct bpf_perf_event_data
, sample_period
)),
6544 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
6545 offsetof(struct bpf_perf_event_data
, sample_period
) + 7),
6550 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6553 "check bpf_perf_event_data->sample_period half load permitted",
6555 BPF_MOV64_IMM(BPF_REG_0
, 0),
6556 #if __BYTE_ORDER == __LITTLE_ENDIAN
6557 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6558 offsetof(struct bpf_perf_event_data
, sample_period
)),
6560 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6561 offsetof(struct bpf_perf_event_data
, sample_period
) + 6),
6566 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6569 "check bpf_perf_event_data->sample_period word load permitted",
6571 BPF_MOV64_IMM(BPF_REG_0
, 0),
6572 #if __BYTE_ORDER == __LITTLE_ENDIAN
6573 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
6574 offsetof(struct bpf_perf_event_data
, sample_period
)),
6576 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
6577 offsetof(struct bpf_perf_event_data
, sample_period
) + 4),
6582 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6585 "check bpf_perf_event_data->sample_period dword load permitted",
6587 BPF_MOV64_IMM(BPF_REG_0
, 0),
6588 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
6589 offsetof(struct bpf_perf_event_data
, sample_period
)),
6593 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6596 "check skb->data half load not permitted",
6598 BPF_MOV64_IMM(BPF_REG_0
, 0),
6599 #if __BYTE_ORDER == __LITTLE_ENDIAN
6600 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6601 offsetof(struct __sk_buff
, data
)),
6603 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6604 offsetof(struct __sk_buff
, data
) + 2),
6609 .errstr
= "invalid bpf_context access",
6612 "check skb->tc_classid half load not permitted for lwt prog",
6614 BPF_MOV64_IMM(BPF_REG_0
, 0),
6615 #if __BYTE_ORDER == __LITTLE_ENDIAN
6616 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6617 offsetof(struct __sk_buff
, tc_classid
)),
6619 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6620 offsetof(struct __sk_buff
, tc_classid
) + 2),
6625 .errstr
= "invalid bpf_context access",
6626 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
6629 "bounds checks mixing signed and unsigned, positive bounds",
6631 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6632 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6633 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6634 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6635 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6636 BPF_FUNC_map_lookup_elem
),
6637 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6638 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6639 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6640 BPF_MOV64_IMM(BPF_REG_2
, 2),
6641 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 3),
6642 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 4, 2),
6643 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6644 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6645 BPF_MOV64_IMM(BPF_REG_0
, 0),
6648 .fixup_map1
= { 3 },
6649 .errstr
= "unbounded min value",
6650 .errstr_unpriv
= "R1 has unknown scalar with mixed signed bounds",
6654 "bounds checks mixing signed and unsigned",
6656 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6657 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6658 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6659 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6660 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6661 BPF_FUNC_map_lookup_elem
),
6662 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6663 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6664 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6665 BPF_MOV64_IMM(BPF_REG_2
, -1),
6666 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 3),
6667 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6668 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6669 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6670 BPF_MOV64_IMM(BPF_REG_0
, 0),
6673 .fixup_map1
= { 3 },
6674 .errstr
= "unbounded min value",
6675 .errstr_unpriv
= "R1 has unknown scalar with mixed signed bounds",
6679 "bounds checks mixing signed and unsigned, variant 2",
6681 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6682 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6683 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6684 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6685 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6686 BPF_FUNC_map_lookup_elem
),
6687 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6688 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6689 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6690 BPF_MOV64_IMM(BPF_REG_2
, -1),
6691 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 5),
6692 BPF_MOV64_IMM(BPF_REG_8
, 0),
6693 BPF_ALU64_REG(BPF_ADD
, BPF_REG_8
, BPF_REG_1
),
6694 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_8
, 1, 2),
6695 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
6696 BPF_ST_MEM(BPF_B
, BPF_REG_8
, 0, 0),
6697 BPF_MOV64_IMM(BPF_REG_0
, 0),
6700 .fixup_map1
= { 3 },
6701 .errstr
= "unbounded min value",
6702 .errstr_unpriv
= "R8 has unknown scalar with mixed signed bounds",
6706 "bounds checks mixing signed and unsigned, variant 3",
6708 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6709 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6710 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6711 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6712 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6713 BPF_FUNC_map_lookup_elem
),
6714 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
6715 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6716 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6717 BPF_MOV64_IMM(BPF_REG_2
, -1),
6718 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 4),
6719 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
6720 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_8
, 1, 2),
6721 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
6722 BPF_ST_MEM(BPF_B
, BPF_REG_8
, 0, 0),
6723 BPF_MOV64_IMM(BPF_REG_0
, 0),
6726 .fixup_map1
= { 3 },
6727 .errstr
= "unbounded min value",
6728 .errstr_unpriv
= "R8 has unknown scalar with mixed signed bounds",
6732 "bounds checks mixing signed and unsigned, variant 4",
6734 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6735 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6736 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6737 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6738 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6739 BPF_FUNC_map_lookup_elem
),
6740 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6741 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6742 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6743 BPF_MOV64_IMM(BPF_REG_2
, 1),
6744 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
6745 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6746 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6747 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6748 BPF_MOV64_IMM(BPF_REG_0
, 0),
6751 .fixup_map1
= { 3 },
6755 "bounds checks mixing signed and unsigned, variant 5",
6757 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6758 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6759 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6760 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6761 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6762 BPF_FUNC_map_lookup_elem
),
6763 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6764 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6765 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6766 BPF_MOV64_IMM(BPF_REG_2
, -1),
6767 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 5),
6768 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 4),
6769 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 4),
6770 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
6771 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6772 BPF_MOV64_IMM(BPF_REG_0
, 0),
6775 .fixup_map1
= { 3 },
6776 .errstr
= "unbounded min value",
6777 .errstr_unpriv
= "R1 has unknown scalar with mixed signed bounds",
6781 "bounds checks mixing signed and unsigned, variant 6",
6783 BPF_MOV64_IMM(BPF_REG_2
, 0),
6784 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_10
),
6785 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, -512),
6786 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6787 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -16),
6788 BPF_MOV64_IMM(BPF_REG_6
, -1),
6789 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_6
, 5),
6790 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_4
, 1, 4),
6791 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 1),
6792 BPF_MOV64_IMM(BPF_REG_5
, 0),
6793 BPF_ST_MEM(BPF_H
, BPF_REG_10
, -512, 0),
6794 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6795 BPF_FUNC_skb_load_bytes
),
6796 BPF_MOV64_IMM(BPF_REG_0
, 0),
6799 .errstr
= "R4 min value is negative, either use unsigned",
6803 "bounds checks mixing signed and unsigned, variant 7",
6805 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6806 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6807 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6808 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6809 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6810 BPF_FUNC_map_lookup_elem
),
6811 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6812 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6813 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6814 BPF_MOV64_IMM(BPF_REG_2
, 1024 * 1024 * 1024),
6815 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 3),
6816 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6817 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6818 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6819 BPF_MOV64_IMM(BPF_REG_0
, 0),
6822 .fixup_map1
= { 3 },
6826 "bounds checks mixing signed and unsigned, variant 8",
6828 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6829 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6830 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6831 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6832 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6833 BPF_FUNC_map_lookup_elem
),
6834 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6835 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6836 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6837 BPF_MOV64_IMM(BPF_REG_2
, -1),
6838 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6839 BPF_MOV64_IMM(BPF_REG_0
, 0),
6841 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6842 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6843 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6844 BPF_MOV64_IMM(BPF_REG_0
, 0),
6847 .fixup_map1
= { 3 },
6848 .errstr
= "unbounded min value",
6849 .errstr_unpriv
= "R1 has unknown scalar with mixed signed bounds",
6853 "bounds checks mixing signed and unsigned, variant 9",
6855 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6856 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6857 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6858 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6859 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6860 BPF_FUNC_map_lookup_elem
),
6861 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
6862 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6863 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6864 BPF_LD_IMM64(BPF_REG_2
, -9223372036854775808ULL),
6865 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6866 BPF_MOV64_IMM(BPF_REG_0
, 0),
6868 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6869 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6870 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6871 BPF_MOV64_IMM(BPF_REG_0
, 0),
6874 .fixup_map1
= { 3 },
6878 "bounds checks mixing signed and unsigned, variant 10",
6880 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6881 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6882 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6883 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6884 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6885 BPF_FUNC_map_lookup_elem
),
6886 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6887 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6888 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6889 BPF_MOV64_IMM(BPF_REG_2
, 0),
6890 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6891 BPF_MOV64_IMM(BPF_REG_0
, 0),
6893 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6894 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6895 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6896 BPF_MOV64_IMM(BPF_REG_0
, 0),
6899 .fixup_map1
= { 3 },
6900 .errstr
= "unbounded min value",
6901 .errstr_unpriv
= "R1 has unknown scalar with mixed signed bounds",
6905 "bounds checks mixing signed and unsigned, variant 11",
6907 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6908 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6909 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6910 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6911 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6912 BPF_FUNC_map_lookup_elem
),
6913 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6914 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6915 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6916 BPF_MOV64_IMM(BPF_REG_2
, -1),
6917 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6919 BPF_MOV64_IMM(BPF_REG_0
, 0),
6921 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6922 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6923 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6924 BPF_MOV64_IMM(BPF_REG_0
, 0),
6927 .fixup_map1
= { 3 },
6928 .errstr
= "unbounded min value",
6929 .errstr_unpriv
= "R1 has unknown scalar with mixed signed bounds",
6933 "bounds checks mixing signed and unsigned, variant 12",
6935 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6936 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6937 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6938 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6939 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6940 BPF_FUNC_map_lookup_elem
),
6941 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6942 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6943 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6944 BPF_MOV64_IMM(BPF_REG_2
, -6),
6945 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6946 BPF_MOV64_IMM(BPF_REG_0
, 0),
6948 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6949 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6950 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6951 BPF_MOV64_IMM(BPF_REG_0
, 0),
6954 .fixup_map1
= { 3 },
6955 .errstr
= "unbounded min value",
6956 .errstr_unpriv
= "R1 has unknown scalar with mixed signed bounds",
6960 "bounds checks mixing signed and unsigned, variant 13",
6962 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6963 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6964 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6965 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6966 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6967 BPF_FUNC_map_lookup_elem
),
6968 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6969 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6970 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6971 BPF_MOV64_IMM(BPF_REG_2
, 2),
6972 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6973 BPF_MOV64_IMM(BPF_REG_7
, 1),
6974 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_7
, 0, 2),
6975 BPF_MOV64_IMM(BPF_REG_0
, 0),
6977 BPF_ALU64_REG(BPF_ADD
, BPF_REG_7
, BPF_REG_1
),
6978 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_7
, 4, 2),
6979 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_7
),
6980 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6981 BPF_MOV64_IMM(BPF_REG_0
, 0),
6984 .fixup_map1
= { 3 },
6985 .errstr
= "unbounded min value",
6986 .errstr_unpriv
= "R7 has unknown scalar with mixed signed bounds",
6990 "bounds checks mixing signed and unsigned, variant 14",
6992 BPF_LDX_MEM(BPF_W
, BPF_REG_9
, BPF_REG_1
,
6993 offsetof(struct __sk_buff
, mark
)),
6994 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6995 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6996 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6997 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6998 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6999 BPF_FUNC_map_lookup_elem
),
7000 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
7001 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
7002 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
7003 BPF_MOV64_IMM(BPF_REG_2
, -1),
7004 BPF_MOV64_IMM(BPF_REG_8
, 2),
7005 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_9
, 42, 6),
7006 BPF_JMP_REG(BPF_JSGT
, BPF_REG_8
, BPF_REG_1
, 3),
7007 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
7008 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7009 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
7010 BPF_MOV64_IMM(BPF_REG_0
, 0),
7012 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, -3),
7013 BPF_JMP_IMM(BPF_JA
, 0, 0, -7),
7015 .fixup_map1
= { 4 },
7016 .errstr
= "R0 invalid mem access 'inv'",
7017 .errstr_unpriv
= "R0 invalid mem access 'inv'",
7021 "bounds checks mixing signed and unsigned, variant 15",
7023 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7024 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7025 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7026 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7027 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7028 BPF_FUNC_map_lookup_elem
),
7029 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7030 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
7031 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
7032 BPF_MOV64_IMM(BPF_REG_2
, -6),
7033 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
7034 BPF_MOV64_IMM(BPF_REG_0
, 0),
7036 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7037 BPF_JMP_IMM(BPF_JGT
, BPF_REG_0
, 1, 2),
7038 BPF_MOV64_IMM(BPF_REG_0
, 0),
7040 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
7041 BPF_MOV64_IMM(BPF_REG_0
, 0),
7044 .fixup_map1
= { 3 },
7045 .errstr
= "unbounded min value",
7046 .errstr_unpriv
= "R1 has unknown scalar with mixed signed bounds",
7048 .result_unpriv
= REJECT
,
7051 "subtraction bounds (map value) variant 1",
7053 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7054 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7055 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7056 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7057 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7058 BPF_FUNC_map_lookup_elem
),
7059 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
7060 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7061 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 0xff, 7),
7062 BPF_LDX_MEM(BPF_B
, BPF_REG_3
, BPF_REG_0
, 1),
7063 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
, 0xff, 5),
7064 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_3
),
7065 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 56),
7066 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7067 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7069 BPF_MOV64_IMM(BPF_REG_0
, 0),
7072 .fixup_map1
= { 3 },
7073 .errstr
= "R0 max value is outside of the array range",
7077 "subtraction bounds (map value) variant 2",
7079 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7080 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7081 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7082 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7083 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7084 BPF_FUNC_map_lookup_elem
),
7085 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
7086 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7087 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 0xff, 6),
7088 BPF_LDX_MEM(BPF_B
, BPF_REG_3
, BPF_REG_0
, 1),
7089 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
, 0xff, 4),
7090 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_3
),
7091 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7092 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7094 BPF_MOV64_IMM(BPF_REG_0
, 0),
7097 .fixup_map1
= { 3 },
7098 .errstr
= "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
7099 .errstr_unpriv
= "R1 has unknown scalar with mixed signed bounds",
7103 "check subtraction on pointers for unpriv",
7105 BPF_MOV64_IMM(BPF_REG_0
, 0),
7106 BPF_LD_MAP_FD(BPF_REG_ARG1
, 0),
7107 BPF_MOV64_REG(BPF_REG_ARG2
, BPF_REG_FP
),
7108 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_ARG2
, -8),
7109 BPF_ST_MEM(BPF_DW
, BPF_REG_ARG2
, 0, 9),
7110 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7111 BPF_FUNC_map_lookup_elem
),
7112 BPF_MOV64_REG(BPF_REG_9
, BPF_REG_FP
),
7113 BPF_ALU64_REG(BPF_SUB
, BPF_REG_9
, BPF_REG_0
),
7114 BPF_LD_MAP_FD(BPF_REG_ARG1
, 0),
7115 BPF_MOV64_REG(BPF_REG_ARG2
, BPF_REG_FP
),
7116 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_ARG2
, -8),
7117 BPF_ST_MEM(BPF_DW
, BPF_REG_ARG2
, 0, 0),
7118 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7119 BPF_FUNC_map_lookup_elem
),
7120 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
7122 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_9
, 0),
7123 BPF_MOV64_IMM(BPF_REG_0
, 0),
7126 .fixup_map1
= { 1, 9 },
7128 .result_unpriv
= REJECT
,
7129 .errstr_unpriv
= "R9 pointer -= pointer prohibited",
7132 "bounds check based on zero-extended MOV",
7134 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7135 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7136 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7137 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7138 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7139 BPF_FUNC_map_lookup_elem
),
7140 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7141 /* r2 = 0x0000'0000'ffff'ffff */
7142 BPF_MOV32_IMM(BPF_REG_2
, 0xffffffff),
7144 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 32),
7146 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
7147 /* access at offset 0 */
7148 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7150 BPF_MOV64_IMM(BPF_REG_0
, 0),
7153 .fixup_map1
= { 3 },
7157 "bounds check based on sign-extended MOV. test1",
7159 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7160 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7161 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7162 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7163 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7164 BPF_FUNC_map_lookup_elem
),
7165 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7166 /* r2 = 0xffff'ffff'ffff'ffff */
7167 BPF_MOV64_IMM(BPF_REG_2
, 0xffffffff),
7168 /* r2 = 0xffff'ffff */
7169 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 32),
7170 /* r0 = <oob pointer> */
7171 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
7172 /* access to OOB pointer */
7173 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7175 BPF_MOV64_IMM(BPF_REG_0
, 0),
7178 .fixup_map1
= { 3 },
7179 .errstr
= "map_value pointer and 4294967295",
7183 "bounds check based on sign-extended MOV. test2",
7185 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7186 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7187 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7188 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7189 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7190 BPF_FUNC_map_lookup_elem
),
7191 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7192 /* r2 = 0xffff'ffff'ffff'ffff */
7193 BPF_MOV64_IMM(BPF_REG_2
, 0xffffffff),
7194 /* r2 = 0xfff'ffff */
7195 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 36),
7196 /* r0 = <oob pointer> */
7197 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
7198 /* access to OOB pointer */
7199 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7201 BPF_MOV64_IMM(BPF_REG_0
, 0),
7204 .fixup_map1
= { 3 },
7205 .errstr
= "R0 min value is outside of the array range",
7209 "bounds check based on reg_off + var_off + insn_off. test1",
7211 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
7212 offsetof(struct __sk_buff
, mark
)),
7213 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7214 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7215 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7216 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7217 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7218 BPF_FUNC_map_lookup_elem
),
7219 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7220 BPF_ALU64_IMM(BPF_AND
, BPF_REG_6
, 1),
7221 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, (1 << 29) - 1),
7222 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_6
),
7223 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, (1 << 29) - 1),
7224 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 3),
7225 BPF_MOV64_IMM(BPF_REG_0
, 0),
7228 .fixup_map1
= { 4 },
7229 .errstr
= "value_size=8 off=1073741825",
7231 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
7234 "bounds check based on reg_off + var_off + insn_off. test2",
7236 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
7237 offsetof(struct __sk_buff
, mark
)),
7238 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7239 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7240 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7241 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7242 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7243 BPF_FUNC_map_lookup_elem
),
7244 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
7245 BPF_ALU64_IMM(BPF_AND
, BPF_REG_6
, 1),
7246 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, (1 << 30) - 1),
7247 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_6
),
7248 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, (1 << 29) - 1),
7249 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 3),
7250 BPF_MOV64_IMM(BPF_REG_0
, 0),
7253 .fixup_map1
= { 4 },
7254 .errstr
= "value 1073741823",
7256 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
7259 "bounds check after truncation of non-boundary-crossing range",
7261 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7262 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7263 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7264 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7265 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7266 BPF_FUNC_map_lookup_elem
),
7267 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
7268 /* r1 = [0x00, 0xff] */
7269 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7270 BPF_MOV64_IMM(BPF_REG_2
, 1),
7271 /* r2 = 0x10'0000'0000 */
7272 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_2
, 36),
7273 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
7274 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
7275 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
7276 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
7277 /* r1 = [0x00, 0xff] */
7278 BPF_ALU32_IMM(BPF_SUB
, BPF_REG_1
, 0x7fffffff),
7280 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
7282 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7283 /* access at offset 0 */
7284 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7286 BPF_MOV64_IMM(BPF_REG_0
, 0),
7289 .fixup_map1
= { 3 },
7293 "bounds check after truncation of boundary-crossing range (1)",
7295 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7296 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7297 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7298 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7299 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7300 BPF_FUNC_map_lookup_elem
),
7301 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
7302 /* r1 = [0x00, 0xff] */
7303 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7304 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
7305 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7306 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
7307 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7308 * [0x0000'0000, 0x0000'007f]
7310 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_1
, 0),
7311 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
7312 /* r1 = [0x00, 0xff] or
7313 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7315 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
7317 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7319 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
7320 /* no-op or OOB pointer computation */
7321 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7322 /* potentially OOB access */
7323 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7325 BPF_MOV64_IMM(BPF_REG_0
, 0),
7328 .fixup_map1
= { 3 },
7329 /* not actually fully unbounded, but the bound is very high */
7330 .errstr
= "R0 unbounded memory access",
7334 "bounds check after truncation of boundary-crossing range (2)",
7336 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7337 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7338 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7339 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7340 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7341 BPF_FUNC_map_lookup_elem
),
7342 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
7343 /* r1 = [0x00, 0xff] */
7344 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7345 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
7346 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7347 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
7348 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7349 * [0x0000'0000, 0x0000'007f]
7350 * difference to previous test: truncation via MOV32
7353 BPF_MOV32_REG(BPF_REG_1
, BPF_REG_1
),
7354 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
7355 /* r1 = [0x00, 0xff] or
7356 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7358 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
7360 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7362 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
7363 /* no-op or OOB pointer computation */
7364 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7365 /* potentially OOB access */
7366 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7368 BPF_MOV64_IMM(BPF_REG_0
, 0),
7371 .fixup_map1
= { 3 },
7372 /* not actually fully unbounded, but the bound is very high */
7373 .errstr
= "R0 unbounded memory access",
7377 "bounds check after wrapping 32-bit addition",
7379 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7380 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7381 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7382 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7383 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7384 BPF_FUNC_map_lookup_elem
),
7385 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
7386 /* r1 = 0x7fff'ffff */
7387 BPF_MOV64_IMM(BPF_REG_1
, 0x7fffffff),
7388 /* r1 = 0xffff'fffe */
7389 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
7391 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_1
, 2),
7393 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7394 /* access at offset 0 */
7395 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7397 BPF_MOV64_IMM(BPF_REG_0
, 0),
7400 .fixup_map1
= { 3 },
7404 "bounds check after shift with oversized count operand",
7406 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7407 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7408 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7409 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7410 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7411 BPF_FUNC_map_lookup_elem
),
7412 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
7413 BPF_MOV64_IMM(BPF_REG_2
, 32),
7414 BPF_MOV64_IMM(BPF_REG_1
, 1),
7415 /* r1 = (u32)1 << (u32)32 = ? */
7416 BPF_ALU32_REG(BPF_LSH
, BPF_REG_1
, BPF_REG_2
),
7417 /* r1 = [0x0000, 0xffff] */
7418 BPF_ALU64_IMM(BPF_AND
, BPF_REG_1
, 0xffff),
7419 /* computes unknown pointer, potentially OOB */
7420 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7421 /* potentially OOB access */
7422 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7424 BPF_MOV64_IMM(BPF_REG_0
, 0),
7427 .fixup_map1
= { 3 },
7428 .errstr
= "R0 max value is outside of the array range",
7432 "bounds check after right shift of maybe-negative number",
7434 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7435 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7436 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7437 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7438 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7439 BPF_FUNC_map_lookup_elem
),
7440 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
7441 /* r1 = [0x00, 0xff] */
7442 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7443 /* r1 = [-0x01, 0xfe] */
7444 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 1),
7445 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7446 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
7447 /* r1 = 0 or 0xffff'ffff'ffff */
7448 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
7449 /* computes unknown pointer, potentially OOB */
7450 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7451 /* potentially OOB access */
7452 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7454 BPF_MOV64_IMM(BPF_REG_0
, 0),
7457 .fixup_map1
= { 3 },
7458 .errstr
= "R0 unbounded memory access",
7462 "bounds check after 32-bit right shift with 64-bit input",
7464 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7465 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7466 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7467 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7468 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7469 BPF_FUNC_map_lookup_elem
),
7470 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
7472 BPF_MOV64_IMM(BPF_REG_1
, 2),
7474 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 31),
7475 /* r1 = 0 (NOT 2!) */
7476 BPF_ALU32_IMM(BPF_RSH
, BPF_REG_1
, 31),
7477 /* r1 = 0xffff'fffe (NOT 0!) */
7478 BPF_ALU32_IMM(BPF_SUB
, BPF_REG_1
, 2),
7479 /* computes OOB pointer */
7480 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7482 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7484 BPF_MOV64_IMM(BPF_REG_0
, 0),
7487 .fixup_map1
= { 3 },
7488 .errstr
= "R0 invalid mem access",
7492 "bounds check map access with off+size signed 32bit overflow. test1",
7494 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7495 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7496 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7497 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7498 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7499 BPF_FUNC_map_lookup_elem
),
7500 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
7502 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x7ffffffe),
7503 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
7507 .fixup_map1
= { 3 },
7508 .errstr
= "map_value pointer and 2147483646",
7512 "bounds check map access with off+size signed 32bit overflow. test2",
7514 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7515 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7516 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7517 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7518 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7519 BPF_FUNC_map_lookup_elem
),
7520 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
7522 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x1fffffff),
7523 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x1fffffff),
7524 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x1fffffff),
7525 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
7529 .fixup_map1
= { 3 },
7530 .errstr
= "pointer offset 1073741822",
7531 .errstr_unpriv
= "R0 pointer arithmetic of map value goes out of range",
7535 "bounds check map access with off+size signed 32bit overflow. test3",
7537 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7538 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7539 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7540 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7541 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7542 BPF_FUNC_map_lookup_elem
),
7543 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
7545 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_0
, 0x1fffffff),
7546 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_0
, 0x1fffffff),
7547 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 2),
7551 .fixup_map1
= { 3 },
7552 .errstr
= "pointer offset -1073741822",
7553 .errstr_unpriv
= "R0 pointer arithmetic of map value goes out of range",
7557 "bounds check map access with off+size signed 32bit overflow. test4",
7559 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7560 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7561 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7562 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7563 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7564 BPF_FUNC_map_lookup_elem
),
7565 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
7567 BPF_MOV64_IMM(BPF_REG_1
, 1000000),
7568 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 1000000),
7569 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7570 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 2),
7574 .fixup_map1
= { 3 },
7575 .errstr
= "map_value pointer and 1000000000000",
7579 "pointer/scalar confusion in state equality check (way 1)",
7581 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7582 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7583 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7584 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7585 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7586 BPF_FUNC_map_lookup_elem
),
7587 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
7588 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
7590 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
7594 .fixup_map1
= { 3 },
7596 .result_unpriv
= REJECT
,
7597 .errstr_unpriv
= "R0 leaks addr as return value"
7600 "pointer/scalar confusion in state equality check (way 2)",
7602 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7603 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7604 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7605 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7606 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7607 BPF_FUNC_map_lookup_elem
),
7608 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
7609 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
7611 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
7614 .fixup_map1
= { 3 },
7616 .result_unpriv
= REJECT
,
7617 .errstr_unpriv
= "R0 leaks addr as return value"
7620 "variable-offset ctx access",
7622 /* Get an unknown value */
7623 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7624 /* Make it small and 4-byte aligned */
7625 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
7626 /* add it to skb. We now have either &skb->len or
7627 * &skb->pkt_type, but we don't know which
7629 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
7630 /* dereference it */
7631 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
7634 .errstr
= "variable ctx access var_off=(0x0; 0x4)",
7636 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7639 "variable-offset stack access",
7641 /* Fill the top 8 bytes of the stack */
7642 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7643 /* Get an unknown value */
7644 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7645 /* Make it small and 4-byte aligned */
7646 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
7647 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_2
, 8),
7648 /* add it to fp. We now have either fp-4 or fp-8, but
7649 * we don't know which
7651 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_10
),
7652 /* dereference it */
7653 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_2
, 0),
7656 .errstr
= "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7658 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7661 "indirect variable-offset stack access",
7663 /* Fill the top 8 bytes of the stack */
7664 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7665 /* Get an unknown value */
7666 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7667 /* Make it small and 4-byte aligned */
7668 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
7669 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_2
, 8),
7670 /* add it to fp. We now have either fp-4 or fp-8, but
7671 * we don't know which
7673 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_10
),
7674 /* dereference it indirectly */
7675 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7676 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7677 BPF_FUNC_map_lookup_elem
),
7678 BPF_MOV64_IMM(BPF_REG_0
, 0),
7681 .fixup_map1
= { 5 },
7682 .errstr
= "variable stack read R2",
7684 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7687 "direct stack access with 32-bit wraparound. test1",
7689 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
7690 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
7691 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
7692 BPF_MOV32_IMM(BPF_REG_0
, 0),
7693 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7696 .errstr
= "fp pointer and 2147483647",
7700 "direct stack access with 32-bit wraparound. test2",
7702 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
7703 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x3fffffff),
7704 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x3fffffff),
7705 BPF_MOV32_IMM(BPF_REG_0
, 0),
7706 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7709 .errstr
= "fp pointer and 1073741823",
7713 "direct stack access with 32-bit wraparound. test3",
7715 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
7716 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x1fffffff),
7717 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x1fffffff),
7718 BPF_MOV32_IMM(BPF_REG_0
, 0),
7719 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7722 .errstr
= "fp pointer offset 1073741822",
7723 .errstr_unpriv
= "R1 stack pointer arithmetic goes out of range",
7727 "liveness pruning and write screening",
7729 /* Get an unknown value */
7730 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7731 /* branch conditions teach us nothing about R2 */
7732 BPF_JMP_IMM(BPF_JGE
, BPF_REG_2
, 0, 1),
7733 BPF_MOV64_IMM(BPF_REG_0
, 0),
7734 BPF_JMP_IMM(BPF_JGE
, BPF_REG_2
, 0, 1),
7735 BPF_MOV64_IMM(BPF_REG_0
, 0),
7738 .errstr
= "R0 !read_ok",
7740 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7743 "varlen_map_value_access pruning",
7745 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7746 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7747 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7748 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7749 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7750 BPF_FUNC_map_lookup_elem
),
7751 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
7752 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
7753 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
7754 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
7755 BPF_MOV32_IMM(BPF_REG_1
, 0),
7756 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
7757 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7758 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
7759 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
7760 offsetof(struct test_val
, foo
)),
7763 .fixup_map2
= { 3 },
7764 .errstr_unpriv
= "R0 leaks addr",
7765 .errstr
= "R0 unbounded memory access",
7766 .result_unpriv
= REJECT
,
7768 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7771 "invalid 64-bit BPF_END",
7773 BPF_MOV32_IMM(BPF_REG_0
, 0),
7775 .code
= BPF_ALU64
| BPF_END
| BPF_TO_LE
,
7776 .dst_reg
= BPF_REG_0
,
7783 .errstr
= "BPF_END uses reserved fields",
7787 "meta access, test1",
7789 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7790 offsetof(struct xdp_md
, data_meta
)),
7791 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7792 offsetof(struct xdp_md
, data
)),
7793 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
7794 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7795 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
7796 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7797 BPF_MOV64_IMM(BPF_REG_0
, 0),
7801 .prog_type
= BPF_PROG_TYPE_XDP
,
7804 "meta access, test2",
7806 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7807 offsetof(struct xdp_md
, data_meta
)),
7808 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7809 offsetof(struct xdp_md
, data
)),
7810 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
7811 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_0
, 8),
7812 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
7813 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
7814 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
7815 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
7816 BPF_MOV64_IMM(BPF_REG_0
, 0),
7820 .errstr
= "invalid access to packet, off=-8",
7821 .prog_type
= BPF_PROG_TYPE_XDP
,
7824 "meta access, test3",
7826 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7827 offsetof(struct xdp_md
, data_meta
)),
7828 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7829 offsetof(struct xdp_md
, data_end
)),
7830 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
7831 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7832 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
7833 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7834 BPF_MOV64_IMM(BPF_REG_0
, 0),
7838 .errstr
= "invalid access to packet",
7839 .prog_type
= BPF_PROG_TYPE_XDP
,
7842 "meta access, test4",
7844 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7845 offsetof(struct xdp_md
, data_meta
)),
7846 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7847 offsetof(struct xdp_md
, data_end
)),
7848 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
7849 offsetof(struct xdp_md
, data
)),
7850 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
7851 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7852 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
7853 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7854 BPF_MOV64_IMM(BPF_REG_0
, 0),
7858 .errstr
= "invalid access to packet",
7859 .prog_type
= BPF_PROG_TYPE_XDP
,
7862 "meta access, test5",
7864 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7865 offsetof(struct xdp_md
, data_meta
)),
7866 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
7867 offsetof(struct xdp_md
, data
)),
7868 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
7869 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7870 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_4
, 3),
7871 BPF_MOV64_IMM(BPF_REG_2
, -8),
7872 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7873 BPF_FUNC_xdp_adjust_meta
),
7874 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_3
, 0),
7875 BPF_MOV64_IMM(BPF_REG_0
, 0),
7879 .errstr
= "R3 !read_ok",
7880 .prog_type
= BPF_PROG_TYPE_XDP
,
7883 "meta access, test6",
7885 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7886 offsetof(struct xdp_md
, data_meta
)),
7887 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7888 offsetof(struct xdp_md
, data
)),
7889 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
7890 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7891 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
7892 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
7893 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_0
, 1),
7894 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7895 BPF_MOV64_IMM(BPF_REG_0
, 0),
7899 .errstr
= "invalid access to packet",
7900 .prog_type
= BPF_PROG_TYPE_XDP
,
7903 "meta access, test7",
7905 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7906 offsetof(struct xdp_md
, data_meta
)),
7907 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7908 offsetof(struct xdp_md
, data
)),
7909 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
7910 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
7911 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
7912 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
7913 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
7914 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7915 BPF_MOV64_IMM(BPF_REG_0
, 0),
7919 .prog_type
= BPF_PROG_TYPE_XDP
,
7922 "meta access, test8",
7924 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7925 offsetof(struct xdp_md
, data_meta
)),
7926 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7927 offsetof(struct xdp_md
, data
)),
7928 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
7929 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0xFFFF),
7930 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
7931 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7932 BPF_MOV64_IMM(BPF_REG_0
, 0),
7936 .prog_type
= BPF_PROG_TYPE_XDP
,
7939 "meta access, test9",
7941 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7942 offsetof(struct xdp_md
, data_meta
)),
7943 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7944 offsetof(struct xdp_md
, data
)),
7945 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
7946 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0xFFFF),
7947 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 1),
7948 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
7949 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
7950 BPF_MOV64_IMM(BPF_REG_0
, 0),
7954 .errstr
= "invalid access to packet",
7955 .prog_type
= BPF_PROG_TYPE_XDP
,
7958 "meta access, test10",
7960 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7961 offsetof(struct xdp_md
, data_meta
)),
7962 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7963 offsetof(struct xdp_md
, data
)),
7964 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
7965 offsetof(struct xdp_md
, data_end
)),
7966 BPF_MOV64_IMM(BPF_REG_5
, 42),
7967 BPF_MOV64_IMM(BPF_REG_6
, 24),
7968 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_5
, -8),
7969 BPF_STX_XADD(BPF_DW
, BPF_REG_10
, BPF_REG_6
, -8),
7970 BPF_LDX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_10
, -8),
7971 BPF_JMP_IMM(BPF_JGT
, BPF_REG_5
, 100, 6),
7972 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_5
),
7973 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
7974 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
7975 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 8),
7976 BPF_JMP_REG(BPF_JGT
, BPF_REG_6
, BPF_REG_5
, 1),
7977 BPF_LDX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
7978 BPF_MOV64_IMM(BPF_REG_0
, 0),
7982 .errstr
= "invalid access to packet",
7983 .prog_type
= BPF_PROG_TYPE_XDP
,
7986 "meta access, test11",
7988 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7989 offsetof(struct xdp_md
, data_meta
)),
7990 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7991 offsetof(struct xdp_md
, data
)),
7992 BPF_MOV64_IMM(BPF_REG_5
, 42),
7993 BPF_MOV64_IMM(BPF_REG_6
, 24),
7994 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_5
, -8),
7995 BPF_STX_XADD(BPF_DW
, BPF_REG_10
, BPF_REG_6
, -8),
7996 BPF_LDX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_10
, -8),
7997 BPF_JMP_IMM(BPF_JGT
, BPF_REG_5
, 100, 6),
7998 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_5
),
7999 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
8000 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
8001 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 8),
8002 BPF_JMP_REG(BPF_JGT
, BPF_REG_6
, BPF_REG_3
, 1),
8003 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_5
, 0),
8004 BPF_MOV64_IMM(BPF_REG_0
, 0),
8008 .prog_type
= BPF_PROG_TYPE_XDP
,
8011 "meta access, test12",
8013 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8014 offsetof(struct xdp_md
, data_meta
)),
8015 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8016 offsetof(struct xdp_md
, data
)),
8017 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
8018 offsetof(struct xdp_md
, data_end
)),
8019 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
8020 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 16),
8021 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_4
, 5),
8022 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_3
, 0),
8023 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_2
),
8024 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 16),
8025 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 1),
8026 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
8027 BPF_MOV64_IMM(BPF_REG_0
, 0),
8031 .prog_type
= BPF_PROG_TYPE_XDP
,
8034 "arithmetic ops make PTR_TO_CTX unusable",
8036 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
8037 offsetof(struct __sk_buff
, data
) -
8038 offsetof(struct __sk_buff
, mark
)),
8039 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
8040 offsetof(struct __sk_buff
, mark
)),
8043 .errstr
= "dereference of modified ctx ptr",
8045 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
8048 "pkt_end - pkt_start is allowed",
8050 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
8051 offsetof(struct __sk_buff
, data_end
)),
8052 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8053 offsetof(struct __sk_buff
, data
)),
8054 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_2
),
8058 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
8061 "XDP pkt read, pkt_end mangling, bad access 1",
8063 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8064 offsetof(struct xdp_md
, data
)),
8065 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8066 offsetof(struct xdp_md
, data_end
)),
8067 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8068 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8069 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 8),
8070 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8071 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8072 BPF_MOV64_IMM(BPF_REG_0
, 0),
8075 .errstr
= "R3 pointer arithmetic on PTR_TO_PACKET_END",
8077 .prog_type
= BPF_PROG_TYPE_XDP
,
8080 "XDP pkt read, pkt_end mangling, bad access 2",
8082 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8083 offsetof(struct xdp_md
, data
)),
8084 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8085 offsetof(struct xdp_md
, data_end
)),
8086 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8087 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8088 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_3
, 8),
8089 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8090 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8091 BPF_MOV64_IMM(BPF_REG_0
, 0),
8094 .errstr
= "R3 pointer arithmetic on PTR_TO_PACKET_END",
8096 .prog_type
= BPF_PROG_TYPE_XDP
,
8099 "XDP pkt read, pkt_data' > pkt_end, good access",
8101 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8102 offsetof(struct xdp_md
, data
)),
8103 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8104 offsetof(struct xdp_md
, data_end
)),
8105 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8106 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8107 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8108 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8109 BPF_MOV64_IMM(BPF_REG_0
, 0),
8113 .prog_type
= BPF_PROG_TYPE_XDP
,
8116 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
8118 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8119 offsetof(struct xdp_md
, data
)),
8120 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8121 offsetof(struct xdp_md
, data_end
)),
8122 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8123 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8124 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8125 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8126 BPF_MOV64_IMM(BPF_REG_0
, 0),
8129 .errstr
= "R1 offset is outside of the packet",
8131 .prog_type
= BPF_PROG_TYPE_XDP
,
8132 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8135 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
8137 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8138 offsetof(struct xdp_md
, data
)),
8139 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8140 offsetof(struct xdp_md
, data_end
)),
8141 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8142 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8143 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 0),
8144 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8145 BPF_MOV64_IMM(BPF_REG_0
, 0),
8148 .errstr
= "R1 offset is outside of the packet",
8150 .prog_type
= BPF_PROG_TYPE_XDP
,
8153 "XDP pkt read, pkt_end > pkt_data', good access",
8155 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8156 offsetof(struct xdp_md
, data
)),
8157 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8158 offsetof(struct xdp_md
, data_end
)),
8159 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8160 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8161 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8162 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8163 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8164 BPF_MOV64_IMM(BPF_REG_0
, 0),
8168 .prog_type
= BPF_PROG_TYPE_XDP
,
8169 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8172 "XDP pkt read, pkt_end > pkt_data', bad access 1",
8174 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8175 offsetof(struct xdp_md
, data
)),
8176 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8177 offsetof(struct xdp_md
, data_end
)),
8178 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8179 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8180 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8181 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8182 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8183 BPF_MOV64_IMM(BPF_REG_0
, 0),
8186 .errstr
= "R1 offset is outside of the packet",
8188 .prog_type
= BPF_PROG_TYPE_XDP
,
8191 "XDP pkt read, pkt_end > pkt_data', bad access 2",
8193 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8194 offsetof(struct xdp_md
, data
)),
8195 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8196 offsetof(struct xdp_md
, data_end
)),
8197 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8198 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8199 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8200 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8201 BPF_MOV64_IMM(BPF_REG_0
, 0),
8204 .errstr
= "R1 offset is outside of the packet",
8206 .prog_type
= BPF_PROG_TYPE_XDP
,
8209 "XDP pkt read, pkt_data' < pkt_end, good access",
8211 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8212 offsetof(struct xdp_md
, data
)),
8213 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8214 offsetof(struct xdp_md
, data_end
)),
8215 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8216 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8217 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8218 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8219 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8220 BPF_MOV64_IMM(BPF_REG_0
, 0),
8224 .prog_type
= BPF_PROG_TYPE_XDP
,
8225 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8228 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
8230 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8231 offsetof(struct xdp_md
, data
)),
8232 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8233 offsetof(struct xdp_md
, data_end
)),
8234 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8235 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8236 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8237 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8238 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8239 BPF_MOV64_IMM(BPF_REG_0
, 0),
8242 .errstr
= "R1 offset is outside of the packet",
8244 .prog_type
= BPF_PROG_TYPE_XDP
,
8247 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
8249 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8250 offsetof(struct xdp_md
, data
)),
8251 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8252 offsetof(struct xdp_md
, data_end
)),
8253 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8254 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8255 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8256 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8257 BPF_MOV64_IMM(BPF_REG_0
, 0),
8260 .errstr
= "R1 offset is outside of the packet",
8262 .prog_type
= BPF_PROG_TYPE_XDP
,
8265 "XDP pkt read, pkt_end < pkt_data', good access",
8267 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8268 offsetof(struct xdp_md
, data
)),
8269 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8270 offsetof(struct xdp_md
, data_end
)),
8271 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8272 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8273 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 1),
8274 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8275 BPF_MOV64_IMM(BPF_REG_0
, 0),
8279 .prog_type
= BPF_PROG_TYPE_XDP
,
8282 "XDP pkt read, pkt_end < pkt_data', bad access 1",
8284 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8285 offsetof(struct xdp_md
, data
)),
8286 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8287 offsetof(struct xdp_md
, data_end
)),
8288 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8289 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8290 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 1),
8291 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8292 BPF_MOV64_IMM(BPF_REG_0
, 0),
8295 .errstr
= "R1 offset is outside of the packet",
8297 .prog_type
= BPF_PROG_TYPE_XDP
,
8298 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8301 "XDP pkt read, pkt_end < pkt_data', bad access 2",
8303 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8304 offsetof(struct xdp_md
, data
)),
8305 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8306 offsetof(struct xdp_md
, data_end
)),
8307 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8308 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8309 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 0),
8310 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8311 BPF_MOV64_IMM(BPF_REG_0
, 0),
8314 .errstr
= "R1 offset is outside of the packet",
8316 .prog_type
= BPF_PROG_TYPE_XDP
,
8319 "XDP pkt read, pkt_data' >= pkt_end, good access",
8321 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8322 offsetof(struct xdp_md
, data
)),
8323 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8324 offsetof(struct xdp_md
, data_end
)),
8325 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8326 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8327 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 1),
8328 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8329 BPF_MOV64_IMM(BPF_REG_0
, 0),
8333 .prog_type
= BPF_PROG_TYPE_XDP
,
8334 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8337 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
8339 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8340 offsetof(struct xdp_md
, data
)),
8341 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8342 offsetof(struct xdp_md
, data_end
)),
8343 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8344 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8345 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 1),
8346 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8347 BPF_MOV64_IMM(BPF_REG_0
, 0),
8350 .errstr
= "R1 offset is outside of the packet",
8352 .prog_type
= BPF_PROG_TYPE_XDP
,
8355 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
8357 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8358 offsetof(struct xdp_md
, data
)),
8359 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8360 offsetof(struct xdp_md
, data_end
)),
8361 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8362 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8363 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 0),
8364 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8365 BPF_MOV64_IMM(BPF_REG_0
, 0),
8368 .errstr
= "R1 offset is outside of the packet",
8370 .prog_type
= BPF_PROG_TYPE_XDP
,
8371 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8374 "XDP pkt read, pkt_end >= pkt_data', good access",
8376 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8377 offsetof(struct xdp_md
, data
)),
8378 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8379 offsetof(struct xdp_md
, data_end
)),
8380 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8381 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8382 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8383 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8384 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8385 BPF_MOV64_IMM(BPF_REG_0
, 0),
8389 .prog_type
= BPF_PROG_TYPE_XDP
,
8392 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
8394 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8395 offsetof(struct xdp_md
, data
)),
8396 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8397 offsetof(struct xdp_md
, data_end
)),
8398 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8399 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8400 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8401 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8402 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8403 BPF_MOV64_IMM(BPF_REG_0
, 0),
8406 .errstr
= "R1 offset is outside of the packet",
8408 .prog_type
= BPF_PROG_TYPE_XDP
,
8409 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8412 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
8414 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8415 offsetof(struct xdp_md
, data
)),
8416 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8417 offsetof(struct xdp_md
, data_end
)),
8418 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8419 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8420 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8421 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8422 BPF_MOV64_IMM(BPF_REG_0
, 0),
8425 .errstr
= "R1 offset is outside of the packet",
8427 .prog_type
= BPF_PROG_TYPE_XDP
,
8430 "XDP pkt read, pkt_data' <= pkt_end, good access",
8432 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8433 offsetof(struct xdp_md
, data
)),
8434 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8435 offsetof(struct xdp_md
, data_end
)),
8436 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8437 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8438 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8439 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8440 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8441 BPF_MOV64_IMM(BPF_REG_0
, 0),
8445 .prog_type
= BPF_PROG_TYPE_XDP
,
8448 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
8450 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8451 offsetof(struct xdp_md
, data
)),
8452 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8453 offsetof(struct xdp_md
, data_end
)),
8454 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8455 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8456 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8457 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8458 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8459 BPF_MOV64_IMM(BPF_REG_0
, 0),
8462 .errstr
= "R1 offset is outside of the packet",
8464 .prog_type
= BPF_PROG_TYPE_XDP
,
8465 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8468 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
8470 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8471 offsetof(struct xdp_md
, data
)),
8472 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8473 offsetof(struct xdp_md
, data_end
)),
8474 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8475 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8476 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8477 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8478 BPF_MOV64_IMM(BPF_REG_0
, 0),
8481 .errstr
= "R1 offset is outside of the packet",
8483 .prog_type
= BPF_PROG_TYPE_XDP
,
8486 "XDP pkt read, pkt_end <= pkt_data', good access",
8488 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8489 offsetof(struct xdp_md
, data
)),
8490 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8491 offsetof(struct xdp_md
, data_end
)),
8492 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8493 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8494 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 1),
8495 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8496 BPF_MOV64_IMM(BPF_REG_0
, 0),
8500 .prog_type
= BPF_PROG_TYPE_XDP
,
8501 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8504 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
8506 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8507 offsetof(struct xdp_md
, data
)),
8508 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8509 offsetof(struct xdp_md
, data_end
)),
8510 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8511 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8512 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 1),
8513 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8514 BPF_MOV64_IMM(BPF_REG_0
, 0),
8517 .errstr
= "R1 offset is outside of the packet",
8519 .prog_type
= BPF_PROG_TYPE_XDP
,
8522 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
8524 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8525 offsetof(struct xdp_md
, data
)),
8526 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8527 offsetof(struct xdp_md
, data_end
)),
8528 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8529 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8530 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 0),
8531 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8532 BPF_MOV64_IMM(BPF_REG_0
, 0),
8535 .errstr
= "R1 offset is outside of the packet",
8537 .prog_type
= BPF_PROG_TYPE_XDP
,
8538 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8541 "XDP pkt read, pkt_meta' > pkt_data, good access",
8543 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8544 offsetof(struct xdp_md
, data_meta
)),
8545 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8546 offsetof(struct xdp_md
, data
)),
8547 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8548 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8549 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8550 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8551 BPF_MOV64_IMM(BPF_REG_0
, 0),
8555 .prog_type
= BPF_PROG_TYPE_XDP
,
8558 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
8560 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8561 offsetof(struct xdp_md
, data_meta
)),
8562 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8563 offsetof(struct xdp_md
, data
)),
8564 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8565 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8566 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
8567 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8568 BPF_MOV64_IMM(BPF_REG_0
, 0),
8571 .errstr
= "R1 offset is outside of the packet",
8573 .prog_type
= BPF_PROG_TYPE_XDP
,
8574 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8577 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
8579 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8580 offsetof(struct xdp_md
, data_meta
)),
8581 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8582 offsetof(struct xdp_md
, data
)),
8583 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8584 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8585 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 0),
8586 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8587 BPF_MOV64_IMM(BPF_REG_0
, 0),
8590 .errstr
= "R1 offset is outside of the packet",
8592 .prog_type
= BPF_PROG_TYPE_XDP
,
8595 "XDP pkt read, pkt_data > pkt_meta', good access",
8597 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8598 offsetof(struct xdp_md
, data_meta
)),
8599 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8600 offsetof(struct xdp_md
, data
)),
8601 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8602 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8603 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8604 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8605 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8606 BPF_MOV64_IMM(BPF_REG_0
, 0),
8610 .prog_type
= BPF_PROG_TYPE_XDP
,
8611 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8614 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
8616 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8617 offsetof(struct xdp_md
, data_meta
)),
8618 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8619 offsetof(struct xdp_md
, data
)),
8620 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8621 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8622 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8623 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8624 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8625 BPF_MOV64_IMM(BPF_REG_0
, 0),
8628 .errstr
= "R1 offset is outside of the packet",
8630 .prog_type
= BPF_PROG_TYPE_XDP
,
8633 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
8635 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8636 offsetof(struct xdp_md
, data_meta
)),
8637 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8638 offsetof(struct xdp_md
, data
)),
8639 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8640 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8641 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
8642 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8643 BPF_MOV64_IMM(BPF_REG_0
, 0),
8646 .errstr
= "R1 offset is outside of the packet",
8648 .prog_type
= BPF_PROG_TYPE_XDP
,
8651 "XDP pkt read, pkt_meta' < pkt_data, good access",
8653 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8654 offsetof(struct xdp_md
, data_meta
)),
8655 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8656 offsetof(struct xdp_md
, data
)),
8657 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8658 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8659 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8660 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8661 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8662 BPF_MOV64_IMM(BPF_REG_0
, 0),
8666 .prog_type
= BPF_PROG_TYPE_XDP
,
8667 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8670 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
8672 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8673 offsetof(struct xdp_md
, data_meta
)),
8674 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8675 offsetof(struct xdp_md
, data
)),
8676 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8677 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8678 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8679 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8680 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8681 BPF_MOV64_IMM(BPF_REG_0
, 0),
8684 .errstr
= "R1 offset is outside of the packet",
8686 .prog_type
= BPF_PROG_TYPE_XDP
,
8689 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
8691 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8692 offsetof(struct xdp_md
, data_meta
)),
8693 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8694 offsetof(struct xdp_md
, data
)),
8695 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8696 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8697 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
8698 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8699 BPF_MOV64_IMM(BPF_REG_0
, 0),
8702 .errstr
= "R1 offset is outside of the packet",
8704 .prog_type
= BPF_PROG_TYPE_XDP
,
8707 "XDP pkt read, pkt_data < pkt_meta', good access",
8709 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8710 offsetof(struct xdp_md
, data_meta
)),
8711 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8712 offsetof(struct xdp_md
, data
)),
8713 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8714 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8715 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 1),
8716 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8717 BPF_MOV64_IMM(BPF_REG_0
, 0),
8721 .prog_type
= BPF_PROG_TYPE_XDP
,
8724 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
8726 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8727 offsetof(struct xdp_md
, data_meta
)),
8728 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8729 offsetof(struct xdp_md
, data
)),
8730 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8731 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8732 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 1),
8733 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8734 BPF_MOV64_IMM(BPF_REG_0
, 0),
8737 .errstr
= "R1 offset is outside of the packet",
8739 .prog_type
= BPF_PROG_TYPE_XDP
,
8740 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8743 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
8745 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8746 offsetof(struct xdp_md
, data_meta
)),
8747 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8748 offsetof(struct xdp_md
, data
)),
8749 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8750 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8751 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 0),
8752 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8753 BPF_MOV64_IMM(BPF_REG_0
, 0),
8756 .errstr
= "R1 offset is outside of the packet",
8758 .prog_type
= BPF_PROG_TYPE_XDP
,
8761 "XDP pkt read, pkt_meta' >= pkt_data, good access",
8763 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8764 offsetof(struct xdp_md
, data_meta
)),
8765 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8766 offsetof(struct xdp_md
, data
)),
8767 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8768 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8769 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 1),
8770 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8771 BPF_MOV64_IMM(BPF_REG_0
, 0),
8775 .prog_type
= BPF_PROG_TYPE_XDP
,
8776 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8779 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
8781 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8782 offsetof(struct xdp_md
, data_meta
)),
8783 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8784 offsetof(struct xdp_md
, data
)),
8785 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8786 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8787 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 1),
8788 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8789 BPF_MOV64_IMM(BPF_REG_0
, 0),
8792 .errstr
= "R1 offset is outside of the packet",
8794 .prog_type
= BPF_PROG_TYPE_XDP
,
8797 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
8799 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8800 offsetof(struct xdp_md
, data_meta
)),
8801 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8802 offsetof(struct xdp_md
, data
)),
8803 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8804 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8805 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 0),
8806 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8807 BPF_MOV64_IMM(BPF_REG_0
, 0),
8810 .errstr
= "R1 offset is outside of the packet",
8812 .prog_type
= BPF_PROG_TYPE_XDP
,
8813 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8816 "XDP pkt read, pkt_data >= pkt_meta', good access",
8818 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8819 offsetof(struct xdp_md
, data_meta
)),
8820 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8821 offsetof(struct xdp_md
, data
)),
8822 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8823 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8824 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8825 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8826 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8827 BPF_MOV64_IMM(BPF_REG_0
, 0),
8831 .prog_type
= BPF_PROG_TYPE_XDP
,
8834 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
8836 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8837 offsetof(struct xdp_md
, data_meta
)),
8838 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8839 offsetof(struct xdp_md
, data
)),
8840 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8841 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8842 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8843 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8844 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8845 BPF_MOV64_IMM(BPF_REG_0
, 0),
8848 .errstr
= "R1 offset is outside of the packet",
8850 .prog_type
= BPF_PROG_TYPE_XDP
,
8851 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8854 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
8856 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8857 offsetof(struct xdp_md
, data_meta
)),
8858 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8859 offsetof(struct xdp_md
, data
)),
8860 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8861 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8862 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
8863 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8864 BPF_MOV64_IMM(BPF_REG_0
, 0),
8867 .errstr
= "R1 offset is outside of the packet",
8869 .prog_type
= BPF_PROG_TYPE_XDP
,
8872 "XDP pkt read, pkt_meta' <= pkt_data, good access",
8874 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8875 offsetof(struct xdp_md
, data_meta
)),
8876 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8877 offsetof(struct xdp_md
, data
)),
8878 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8879 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8880 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8881 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8882 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8883 BPF_MOV64_IMM(BPF_REG_0
, 0),
8887 .prog_type
= BPF_PROG_TYPE_XDP
,
8890 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
8892 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8893 offsetof(struct xdp_md
, data_meta
)),
8894 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8895 offsetof(struct xdp_md
, data
)),
8896 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8897 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8898 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8899 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
8900 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
8901 BPF_MOV64_IMM(BPF_REG_0
, 0),
8904 .errstr
= "R1 offset is outside of the packet",
8906 .prog_type
= BPF_PROG_TYPE_XDP
,
8907 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8910 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
8912 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8913 offsetof(struct xdp_md
, data_meta
)),
8914 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8915 offsetof(struct xdp_md
, data
)),
8916 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8917 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8918 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
8919 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8920 BPF_MOV64_IMM(BPF_REG_0
, 0),
8923 .errstr
= "R1 offset is outside of the packet",
8925 .prog_type
= BPF_PROG_TYPE_XDP
,
8928 "XDP pkt read, pkt_data <= pkt_meta', good access",
8930 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8931 offsetof(struct xdp_md
, data_meta
)),
8932 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8933 offsetof(struct xdp_md
, data
)),
8934 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8935 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8936 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 1),
8937 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8938 BPF_MOV64_IMM(BPF_REG_0
, 0),
8942 .prog_type
= BPF_PROG_TYPE_XDP
,
8943 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8946 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
8948 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8949 offsetof(struct xdp_md
, data_meta
)),
8950 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8951 offsetof(struct xdp_md
, data
)),
8952 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8953 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8954 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 1),
8955 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
8956 BPF_MOV64_IMM(BPF_REG_0
, 0),
8959 .errstr
= "R1 offset is outside of the packet",
8961 .prog_type
= BPF_PROG_TYPE_XDP
,
8964 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
8966 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
8967 offsetof(struct xdp_md
, data_meta
)),
8968 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
8969 offsetof(struct xdp_md
, data
)),
8970 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
8971 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
8972 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 0),
8973 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
8974 BPF_MOV64_IMM(BPF_REG_0
, 0),
8977 .errstr
= "R1 offset is outside of the packet",
8979 .prog_type
= BPF_PROG_TYPE_XDP
,
8980 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
8983 "check deducing bounds from const, 1",
8985 BPF_MOV64_IMM(BPF_REG_0
, 1),
8986 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 1, 0),
8987 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
8991 .errstr
= "R0 tried to subtract pointer from scalar",
8994 "check deducing bounds from const, 2",
8996 BPF_MOV64_IMM(BPF_REG_0
, 1),
8997 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 1, 1),
8999 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_0
, 1, 1),
9001 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_0
),
9007 "check deducing bounds from const, 3",
9009 BPF_MOV64_IMM(BPF_REG_0
, 0),
9010 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_0
, 0, 0),
9011 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
9015 .errstr
= "R0 tried to subtract pointer from scalar",
9018 "check deducing bounds from const, 4",
9020 BPF_MOV64_IMM(BPF_REG_0
, 0),
9021 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_0
, 0, 1),
9023 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 1),
9025 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_0
),
9031 "check deducing bounds from const, 5",
9033 BPF_MOV64_IMM(BPF_REG_0
, 0),
9034 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 1),
9035 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
9039 .errstr
= "R0 tried to subtract pointer from scalar",
9042 "check deducing bounds from const, 6",
9044 BPF_MOV64_IMM(BPF_REG_0
, 0),
9045 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 1),
9047 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
9051 .errstr
= "R0 tried to subtract pointer from scalar",
9054 "check deducing bounds from const, 7",
9056 BPF_MOV64_IMM(BPF_REG_0
, ~0),
9057 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 0),
9058 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_0
),
9059 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9060 offsetof(struct __sk_buff
, mark
)),
9064 .errstr
= "dereference of modified ctx ptr",
9067 "check deducing bounds from const, 8",
9069 BPF_MOV64_IMM(BPF_REG_0
, ~0),
9070 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 1),
9071 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_0
),
9072 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
9073 offsetof(struct __sk_buff
, mark
)),
9077 .errstr
= "dereference of modified ctx ptr",
9080 "check deducing bounds from const, 9",
9082 BPF_MOV64_IMM(BPF_REG_0
, 0),
9083 BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 0),
9084 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
9088 .errstr
= "R0 tried to subtract pointer from scalar",
9091 "check deducing bounds from const, 10",
9093 BPF_MOV64_IMM(BPF_REG_0
, 0),
9094 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_0
, 0, 0),
9095 /* Marks reg as unknown. */
9096 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_0
, 0),
9097 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
9101 .errstr
= "math between ctx pointer and register with unbounded min value is not allowed",
9104 "bpf_exit with invalid return code. test1",
9106 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
9109 .errstr
= "R0 has value (0x0; 0xffffffff)",
9111 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9114 "bpf_exit with invalid return code. test2",
9116 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
9117 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 1),
9121 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9124 "bpf_exit with invalid return code. test3",
9126 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
9127 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 3),
9130 .errstr
= "R0 has value (0x0; 0x3)",
9132 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9135 "bpf_exit with invalid return code. test4",
9137 BPF_MOV64_IMM(BPF_REG_0
, 1),
9141 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9144 "bpf_exit with invalid return code. test5",
9146 BPF_MOV64_IMM(BPF_REG_0
, 2),
9149 .errstr
= "R0 has value (0x2; 0x0)",
9151 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9154 "bpf_exit with invalid return code. test6",
9156 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9159 .errstr
= "R0 is not a known value (ctx)",
9161 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9164 "bpf_exit with invalid return code. test7",
9166 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
9167 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 4),
9168 BPF_ALU64_REG(BPF_MUL
, BPF_REG_0
, BPF_REG_2
),
9171 .errstr
= "R0 has unknown scalar value",
9173 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
9176 "xadd/w check unaligned stack",
9178 BPF_MOV64_IMM(BPF_REG_0
, 1),
9179 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
9180 BPF_STX_XADD(BPF_W
, BPF_REG_10
, BPF_REG_0
, -7),
9181 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
9185 .errstr
= "misaligned stack access off",
9186 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9189 "xadd/w check unaligned map",
9191 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
9192 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
9193 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
9194 BPF_LD_MAP_FD(BPF_REG_1
, 0),
9195 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
9196 BPF_FUNC_map_lookup_elem
),
9197 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
9199 BPF_MOV64_IMM(BPF_REG_1
, 1),
9200 BPF_STX_XADD(BPF_W
, BPF_REG_0
, BPF_REG_1
, 3),
9201 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
, 3),
9204 .fixup_map1
= { 3 },
9206 .errstr
= "misaligned value access off",
9207 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9210 "xadd/w check unaligned pkt",
9212 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
9213 offsetof(struct xdp_md
, data
)),
9214 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
9215 offsetof(struct xdp_md
, data_end
)),
9216 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
9217 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
9218 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 2),
9219 BPF_MOV64_IMM(BPF_REG_0
, 99),
9220 BPF_JMP_IMM(BPF_JA
, 0, 0, 6),
9221 BPF_MOV64_IMM(BPF_REG_0
, 1),
9222 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, 0),
9223 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 3, 0),
9224 BPF_STX_XADD(BPF_W
, BPF_REG_2
, BPF_REG_0
, 1),
9225 BPF_STX_XADD(BPF_W
, BPF_REG_2
, BPF_REG_0
, 2),
9226 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_2
, 1),
9230 .errstr
= "BPF_XADD stores into R2 packet",
9231 .prog_type
= BPF_PROG_TYPE_XDP
,
9234 "pass unmodified ctx pointer to helper",
9236 BPF_MOV64_IMM(BPF_REG_2
, 0),
9237 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
9238 BPF_FUNC_csum_update
),
9239 BPF_MOV64_IMM(BPF_REG_0
, 0),
9242 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9246 "pass modified ctx pointer to helper, 1",
9248 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -612),
9249 BPF_MOV64_IMM(BPF_REG_2
, 0),
9250 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
9251 BPF_FUNC_csum_update
),
9252 BPF_MOV64_IMM(BPF_REG_0
, 0),
9255 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9257 .errstr
= "dereference of modified ctx ptr",
9260 "pass modified ctx pointer to helper, 2",
9262 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -612),
9263 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
9264 BPF_FUNC_get_socket_cookie
),
9265 BPF_MOV64_IMM(BPF_REG_0
, 0),
9268 .result_unpriv
= REJECT
,
9270 .errstr_unpriv
= "dereference of modified ctx ptr",
9271 .errstr
= "dereference of modified ctx ptr",
9274 "pass modified ctx pointer to helper, 3",
9276 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
, 0),
9277 BPF_ALU64_IMM(BPF_AND
, BPF_REG_3
, 4),
9278 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
9279 BPF_MOV64_IMM(BPF_REG_2
, 0),
9280 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
9281 BPF_FUNC_csum_update
),
9282 BPF_MOV64_IMM(BPF_REG_0
, 0),
9285 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
9287 .errstr
= "variable ctx access var_off=(0x0; 0x4)",
9290 "masking, test out of bounds 1",
9292 BPF_MOV32_IMM(BPF_REG_1
, 5),
9293 BPF_MOV32_IMM(BPF_REG_2
, 5 - 1),
9294 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9295 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9296 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9297 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9298 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9299 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9305 "masking, test out of bounds 2",
9307 BPF_MOV32_IMM(BPF_REG_1
, 1),
9308 BPF_MOV32_IMM(BPF_REG_2
, 1 - 1),
9309 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9310 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9311 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9312 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9313 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9314 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9320 "masking, test out of bounds 3",
9322 BPF_MOV32_IMM(BPF_REG_1
, 0xffffffff),
9323 BPF_MOV32_IMM(BPF_REG_2
, 0xffffffff - 1),
9324 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9325 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9326 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9327 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9328 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9329 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9335 "masking, test out of bounds 4",
9337 BPF_MOV32_IMM(BPF_REG_1
, 0xffffffff),
9338 BPF_MOV32_IMM(BPF_REG_2
, 1 - 1),
9339 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9340 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9341 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9342 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9343 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9344 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9350 "masking, test out of bounds 5",
9352 BPF_MOV32_IMM(BPF_REG_1
, -1),
9353 BPF_MOV32_IMM(BPF_REG_2
, 1 - 1),
9354 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9355 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9356 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9357 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9358 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9359 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9365 "masking, test out of bounds 6",
9367 BPF_MOV32_IMM(BPF_REG_1
, -1),
9368 BPF_MOV32_IMM(BPF_REG_2
, 0xffffffff - 1),
9369 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9370 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9371 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9372 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9373 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9374 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9380 "masking, test out of bounds 7",
9382 BPF_MOV64_IMM(BPF_REG_1
, 5),
9383 BPF_MOV32_IMM(BPF_REG_2
, 5 - 1),
9384 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9385 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9386 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9387 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9388 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9389 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9395 "masking, test out of bounds 8",
9397 BPF_MOV64_IMM(BPF_REG_1
, 1),
9398 BPF_MOV32_IMM(BPF_REG_2
, 1 - 1),
9399 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9400 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9401 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9402 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9403 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9404 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9410 "masking, test out of bounds 9",
9412 BPF_MOV64_IMM(BPF_REG_1
, 0xffffffff),
9413 BPF_MOV32_IMM(BPF_REG_2
, 0xffffffff - 1),
9414 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9415 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9416 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9417 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9418 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9419 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9425 "masking, test out of bounds 10",
9427 BPF_MOV64_IMM(BPF_REG_1
, 0xffffffff),
9428 BPF_MOV32_IMM(BPF_REG_2
, 1 - 1),
9429 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9430 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9431 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9432 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9433 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9434 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9440 "masking, test out of bounds 11",
9442 BPF_MOV64_IMM(BPF_REG_1
, -1),
9443 BPF_MOV32_IMM(BPF_REG_2
, 1 - 1),
9444 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9445 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9446 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9447 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9448 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9449 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9455 "masking, test out of bounds 12",
9457 BPF_MOV64_IMM(BPF_REG_1
, -1),
9458 BPF_MOV32_IMM(BPF_REG_2
, 0xffffffff - 1),
9459 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9460 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9461 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9462 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9463 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9464 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9470 "masking, test in bounds 1",
9472 BPF_MOV32_IMM(BPF_REG_1
, 4),
9473 BPF_MOV32_IMM(BPF_REG_2
, 5 - 1),
9474 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9475 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9476 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9477 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9478 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9479 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9485 "masking, test in bounds 2",
9487 BPF_MOV32_IMM(BPF_REG_1
, 0),
9488 BPF_MOV32_IMM(BPF_REG_2
, 0xffffffff - 1),
9489 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9490 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9491 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9492 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9493 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9494 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9500 "masking, test in bounds 3",
9502 BPF_MOV32_IMM(BPF_REG_1
, 0xfffffffe),
9503 BPF_MOV32_IMM(BPF_REG_2
, 0xffffffff - 1),
9504 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9505 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9506 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9507 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9508 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9509 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9515 "masking, test in bounds 4",
9517 BPF_MOV32_IMM(BPF_REG_1
, 0xabcde),
9518 BPF_MOV32_IMM(BPF_REG_2
, 0xabcdef - 1),
9519 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9520 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9521 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9522 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9523 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9524 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9530 "masking, test in bounds 5",
9532 BPF_MOV32_IMM(BPF_REG_1
, 0),
9533 BPF_MOV32_IMM(BPF_REG_2
, 1 - 1),
9534 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9535 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9536 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9537 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9538 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9539 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9545 "masking, test in bounds 6",
9547 BPF_MOV32_IMM(BPF_REG_1
, 46),
9548 BPF_MOV32_IMM(BPF_REG_2
, 47 - 1),
9549 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_1
),
9550 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_1
),
9551 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9552 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9553 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
9554 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
9560 "masking, test in bounds 7",
9562 BPF_MOV64_IMM(BPF_REG_3
, -46),
9563 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_3
, -1),
9564 BPF_MOV32_IMM(BPF_REG_2
, 47 - 1),
9565 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_3
),
9566 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_3
),
9567 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9568 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9569 BPF_ALU64_REG(BPF_AND
, BPF_REG_3
, BPF_REG_2
),
9570 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
9576 "masking, test in bounds 8",
9578 BPF_MOV64_IMM(BPF_REG_3
, -47),
9579 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_3
, -1),
9580 BPF_MOV32_IMM(BPF_REG_2
, 47 - 1),
9581 BPF_ALU64_REG(BPF_SUB
, BPF_REG_2
, BPF_REG_3
),
9582 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_3
),
9583 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_2
, 0),
9584 BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_2
, 63),
9585 BPF_ALU64_REG(BPF_AND
, BPF_REG_3
, BPF_REG_2
),
9586 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
9593 static int probe_filter_length(const struct bpf_insn
*fp
)
9597 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
9598 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
9603 static int create_map(uint32_t size_value
, uint32_t max_elem
)
9607 fd
= bpf_create_map(BPF_MAP_TYPE_HASH
, sizeof(long long),
9608 size_value
, max_elem
, BPF_F_NO_PREALLOC
);
9610 printf("Failed to create hash map '%s'!\n", strerror(errno
));
9615 static int create_prog_array(void)
9619 fd
= bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY
, sizeof(int),
9622 printf("Failed to create prog array '%s'!\n", strerror(errno
));
9627 static int create_map_in_map(void)
9629 int inner_map_fd
, outer_map_fd
;
9631 inner_map_fd
= bpf_create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
9633 if (inner_map_fd
< 0) {
9634 printf("Failed to create array '%s'!\n", strerror(errno
));
9635 return inner_map_fd
;
9638 outer_map_fd
= bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS
, NULL
,
9639 sizeof(int), inner_map_fd
, 1, 0);
9640 if (outer_map_fd
< 0)
9641 printf("Failed to create array of maps '%s'!\n",
9644 close(inner_map_fd
);
9646 return outer_map_fd
;
9649 static char bpf_vlog
[32768];
9651 static void do_test_fixup(struct bpf_test
*test
, struct bpf_insn
*prog
,
9654 int *fixup_map1
= test
->fixup_map1
;
9655 int *fixup_map2
= test
->fixup_map2
;
9656 int *fixup_prog
= test
->fixup_prog
;
9657 int *fixup_map_in_map
= test
->fixup_map_in_map
;
9659 /* Allocating HTs with 1 elem is fine here, since we only test
9660 * for verifier and not do a runtime lookup, so the only thing
9661 * that really matters is value size in this case.
9664 map_fds
[0] = create_map(sizeof(long long), 1);
9666 prog
[*fixup_map1
].imm
= map_fds
[0];
9668 } while (*fixup_map1
);
9672 map_fds
[1] = create_map(sizeof(struct test_val
), 1);
9674 prog
[*fixup_map2
].imm
= map_fds
[1];
9676 } while (*fixup_map2
);
9680 map_fds
[2] = create_prog_array();
9682 prog
[*fixup_prog
].imm
= map_fds
[2];
9684 } while (*fixup_prog
);
9687 if (*fixup_map_in_map
) {
9688 map_fds
[3] = create_map_in_map();
9690 prog
[*fixup_map_in_map
].imm
= map_fds
[3];
9692 } while (*fixup_map_in_map
);
9696 static void do_test_single(struct bpf_test
*test
, bool unpriv
,
9697 int *passes
, int *errors
)
9699 int fd_prog
, expected_ret
, reject_from_alignment
;
9700 struct bpf_insn
*prog
= test
->insns
;
9701 int prog_len
= probe_filter_length(prog
);
9702 int prog_type
= test
->prog_type
;
9703 int map_fds
[MAX_NR_MAPS
];
9704 const char *expected_err
;
9707 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
9710 do_test_fixup(test
, prog
, map_fds
);
9712 fd_prog
= bpf_verify_program(prog_type
? : BPF_PROG_TYPE_SOCKET_FILTER
,
9713 prog
, prog_len
, test
->flags
& F_LOAD_WITH_STRICT_ALIGNMENT
,
9714 "GPL", 0, bpf_vlog
, sizeof(bpf_vlog
), 1);
9716 expected_ret
= unpriv
&& test
->result_unpriv
!= UNDEF
?
9717 test
->result_unpriv
: test
->result
;
9718 expected_err
= unpriv
&& test
->errstr_unpriv
?
9719 test
->errstr_unpriv
: test
->errstr
;
9721 reject_from_alignment
= fd_prog
< 0 &&
9722 (test
->flags
& F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
) &&
9723 strstr(bpf_vlog
, "misaligned");
9724 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
9725 if (reject_from_alignment
) {
9726 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
9731 if (expected_ret
== ACCEPT
) {
9732 if (fd_prog
< 0 && !reject_from_alignment
) {
9733 printf("FAIL\nFailed to load prog '%s'!\n",
9739 printf("FAIL\nUnexpected success to load!\n");
9742 if (!strstr(bpf_vlog
, expected_err
) && !reject_from_alignment
) {
9743 printf("FAIL\nUnexpected error message!\n");
9749 printf("OK%s\n", reject_from_alignment
?
9750 " (NOTE: reject due to unknown alignment)" : "");
9753 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
9759 printf("%s", bpf_vlog
);
9763 static bool is_admin(void)
9766 cap_flag_value_t sysadmin
= CAP_CLEAR
;
9767 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
9769 #ifdef CAP_IS_SUPPORTED
9770 if (!CAP_IS_SUPPORTED(CAP_SETFCAP
)) {
9771 perror("cap_get_flag");
9775 caps
= cap_get_proc();
9777 perror("cap_get_proc");
9780 if (cap_get_flag(caps
, cap_val
, CAP_EFFECTIVE
, &sysadmin
))
9781 perror("cap_get_flag");
9784 return (sysadmin
== CAP_SET
);
9787 static int set_admin(bool admin
)
9790 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
9793 caps
= cap_get_proc();
9795 perror("cap_get_proc");
9798 if (cap_set_flag(caps
, CAP_EFFECTIVE
, 1, &cap_val
,
9799 admin
? CAP_SET
: CAP_CLEAR
)) {
9800 perror("cap_set_flag");
9803 if (cap_set_proc(caps
)) {
9804 perror("cap_set_proc");
9814 static int do_test(bool unpriv
, unsigned int from
, unsigned int to
)
9816 int i
, passes
= 0, errors
= 0;
9818 for (i
= from
; i
< to
; i
++) {
9819 struct bpf_test
*test
= &tests
[i
];
9821 /* Program types that are not supported by non-root we
9824 if (!test
->prog_type
) {
9827 printf("#%d/u %s ", i
, test
->descr
);
9828 do_test_single(test
, true, &passes
, &errors
);
9834 printf("#%d/p %s ", i
, test
->descr
);
9835 do_test_single(test
, false, &passes
, &errors
);
9839 printf("Summary: %d PASSED, %d FAILED\n", passes
, errors
);
9840 return errors
? EXIT_FAILURE
: EXIT_SUCCESS
;
9843 int main(int argc
, char **argv
)
9845 struct rlimit rinf
= { RLIM_INFINITY
, RLIM_INFINITY
};
9846 struct rlimit rlim
= { 1 << 20, 1 << 20 };
9847 unsigned int from
= 0, to
= ARRAY_SIZE(tests
);
9848 bool unpriv
= !is_admin();
9851 unsigned int l
= atoi(argv
[argc
- 2]);
9852 unsigned int u
= atoi(argv
[argc
- 1]);
9854 if (l
< to
&& u
< to
) {
9858 } else if (argc
== 2) {
9859 unsigned int t
= atoi(argv
[argc
- 1]);
9867 setrlimit(RLIMIT_MEMLOCK
, unpriv
? &rlim
: &rinf
);
9868 return do_test(unpriv
, from
, to
);