]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/testing/selftests/bpf/test_verifier.c
powerpc/mm: Ensure cpumask update is ordered
[mirror_ubuntu-artful-kernel.git] / tools / testing / selftests / bpf / test_verifier.c
1 /*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
10
11 #include <asm/types.h>
12 #include <linux/types.h>
13 #include <stdint.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <errno.h>
18 #include <string.h>
19 #include <stddef.h>
20 #include <stdbool.h>
21 #include <sched.h>
22
23 #include <sys/capability.h>
24 #include <sys/resource.h>
25
26 #include <linux/unistd.h>
27 #include <linux/filter.h>
28 #include <linux/bpf_perf_event.h>
29 #include <linux/bpf.h>
30
31 #include <bpf/bpf.h>
32
33 #ifdef HAVE_GENHDR
34 # include "autoconf.h"
35 #else
36 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
37 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
38 # endif
39 #endif
40
41 #include "../../../include/linux/filter.h"
42
43 #ifndef ARRAY_SIZE
44 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
45 #endif
46
47 #define MAX_INSNS 512
48 #define MAX_FIXUPS 8
49 #define MAX_NR_MAPS 4
50
51 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
52 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
53
54 struct bpf_test {
55 const char *descr;
56 struct bpf_insn insns[MAX_INSNS];
57 int fixup_map1[MAX_FIXUPS];
58 int fixup_map2[MAX_FIXUPS];
59 int fixup_prog[MAX_FIXUPS];
60 int fixup_map_in_map[MAX_FIXUPS];
61 const char *errstr;
62 const char *errstr_unpriv;
63 enum {
64 UNDEF,
65 ACCEPT,
66 REJECT
67 } result, result_unpriv;
68 enum bpf_prog_type prog_type;
69 uint8_t flags;
70 };
71
72 /* Note we want this to be 64 bit aligned so that the end of our array is
73 * actually the end of the structure.
74 */
75 #define MAX_ENTRIES 11
76
77 struct test_val {
78 unsigned int index;
79 int foo[MAX_ENTRIES];
80 };
81
82 static struct bpf_test tests[] = {
83 {
84 "add+sub+mul",
85 .insns = {
86 BPF_MOV64_IMM(BPF_REG_1, 1),
87 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
88 BPF_MOV64_IMM(BPF_REG_2, 3),
89 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
90 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
91 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
92 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
93 BPF_EXIT_INSN(),
94 },
95 .result = ACCEPT,
96 },
97 {
98 "unreachable",
99 .insns = {
100 BPF_EXIT_INSN(),
101 BPF_EXIT_INSN(),
102 },
103 .errstr = "unreachable",
104 .result = REJECT,
105 },
106 {
107 "unreachable2",
108 .insns = {
109 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
110 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
111 BPF_EXIT_INSN(),
112 },
113 .errstr = "unreachable",
114 .result = REJECT,
115 },
116 {
117 "out of range jump",
118 .insns = {
119 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
120 BPF_EXIT_INSN(),
121 },
122 .errstr = "jump out of range",
123 .result = REJECT,
124 },
125 {
126 "out of range jump2",
127 .insns = {
128 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
129 BPF_EXIT_INSN(),
130 },
131 .errstr = "jump out of range",
132 .result = REJECT,
133 },
134 {
135 "test1 ld_imm64",
136 .insns = {
137 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
138 BPF_LD_IMM64(BPF_REG_0, 0),
139 BPF_LD_IMM64(BPF_REG_0, 0),
140 BPF_LD_IMM64(BPF_REG_0, 1),
141 BPF_LD_IMM64(BPF_REG_0, 1),
142 BPF_MOV64_IMM(BPF_REG_0, 2),
143 BPF_EXIT_INSN(),
144 },
145 .errstr = "invalid BPF_LD_IMM insn",
146 .errstr_unpriv = "R1 pointer comparison",
147 .result = REJECT,
148 },
149 {
150 "test2 ld_imm64",
151 .insns = {
152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
153 BPF_LD_IMM64(BPF_REG_0, 0),
154 BPF_LD_IMM64(BPF_REG_0, 0),
155 BPF_LD_IMM64(BPF_REG_0, 1),
156 BPF_LD_IMM64(BPF_REG_0, 1),
157 BPF_EXIT_INSN(),
158 },
159 .errstr = "invalid BPF_LD_IMM insn",
160 .errstr_unpriv = "R1 pointer comparison",
161 .result = REJECT,
162 },
163 {
164 "test3 ld_imm64",
165 .insns = {
166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
167 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
168 BPF_LD_IMM64(BPF_REG_0, 0),
169 BPF_LD_IMM64(BPF_REG_0, 0),
170 BPF_LD_IMM64(BPF_REG_0, 1),
171 BPF_LD_IMM64(BPF_REG_0, 1),
172 BPF_EXIT_INSN(),
173 },
174 .errstr = "invalid bpf_ld_imm64 insn",
175 .result = REJECT,
176 },
177 {
178 "test4 ld_imm64",
179 .insns = {
180 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
181 BPF_EXIT_INSN(),
182 },
183 .errstr = "invalid bpf_ld_imm64 insn",
184 .result = REJECT,
185 },
186 {
187 "test5 ld_imm64",
188 .insns = {
189 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
190 },
191 .errstr = "invalid bpf_ld_imm64 insn",
192 .result = REJECT,
193 },
194 {
195 "test6 ld_imm64",
196 .insns = {
197 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
198 BPF_RAW_INSN(0, 0, 0, 0, 0),
199 BPF_EXIT_INSN(),
200 },
201 .result = ACCEPT,
202 },
203 {
204 "test7 ld_imm64",
205 .insns = {
206 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
207 BPF_RAW_INSN(0, 0, 0, 0, 1),
208 BPF_EXIT_INSN(),
209 },
210 .result = ACCEPT,
211 },
212 {
213 "test8 ld_imm64",
214 .insns = {
215 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
216 BPF_RAW_INSN(0, 0, 0, 0, 1),
217 BPF_EXIT_INSN(),
218 },
219 .errstr = "uses reserved fields",
220 .result = REJECT,
221 },
222 {
223 "test9 ld_imm64",
224 .insns = {
225 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
226 BPF_RAW_INSN(0, 0, 0, 1, 1),
227 BPF_EXIT_INSN(),
228 },
229 .errstr = "invalid bpf_ld_imm64 insn",
230 .result = REJECT,
231 },
232 {
233 "test10 ld_imm64",
234 .insns = {
235 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
236 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
237 BPF_EXIT_INSN(),
238 },
239 .errstr = "invalid bpf_ld_imm64 insn",
240 .result = REJECT,
241 },
242 {
243 "test11 ld_imm64",
244 .insns = {
245 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
246 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
247 BPF_EXIT_INSN(),
248 },
249 .errstr = "invalid bpf_ld_imm64 insn",
250 .result = REJECT,
251 },
252 {
253 "test12 ld_imm64",
254 .insns = {
255 BPF_MOV64_IMM(BPF_REG_1, 0),
256 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
257 BPF_RAW_INSN(0, 0, 0, 0, 1),
258 BPF_EXIT_INSN(),
259 },
260 .errstr = "not pointing to valid bpf_map",
261 .result = REJECT,
262 },
263 {
264 "test13 ld_imm64",
265 .insns = {
266 BPF_MOV64_IMM(BPF_REG_1, 0),
267 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
268 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
269 BPF_EXIT_INSN(),
270 },
271 .errstr = "invalid bpf_ld_imm64 insn",
272 .result = REJECT,
273 },
274 {
275 "no bpf_exit",
276 .insns = {
277 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
278 },
279 .errstr = "jump out of range",
280 .result = REJECT,
281 },
282 {
283 "loop (back-edge)",
284 .insns = {
285 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
286 BPF_EXIT_INSN(),
287 },
288 .errstr = "back-edge",
289 .result = REJECT,
290 },
291 {
292 "loop2 (back-edge)",
293 .insns = {
294 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
295 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
296 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
297 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
298 BPF_EXIT_INSN(),
299 },
300 .errstr = "back-edge",
301 .result = REJECT,
302 },
303 {
304 "conditional loop",
305 .insns = {
306 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
307 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
308 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
309 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
310 BPF_EXIT_INSN(),
311 },
312 .errstr = "back-edge",
313 .result = REJECT,
314 },
315 {
316 "read uninitialized register",
317 .insns = {
318 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
319 BPF_EXIT_INSN(),
320 },
321 .errstr = "R2 !read_ok",
322 .result = REJECT,
323 },
324 {
325 "read invalid register",
326 .insns = {
327 BPF_MOV64_REG(BPF_REG_0, -1),
328 BPF_EXIT_INSN(),
329 },
330 .errstr = "R15 is invalid",
331 .result = REJECT,
332 },
333 {
334 "program doesn't init R0 before exit",
335 .insns = {
336 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
337 BPF_EXIT_INSN(),
338 },
339 .errstr = "R0 !read_ok",
340 .result = REJECT,
341 },
342 {
343 "program doesn't init R0 before exit in all branches",
344 .insns = {
345 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
346 BPF_MOV64_IMM(BPF_REG_0, 1),
347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
348 BPF_EXIT_INSN(),
349 },
350 .errstr = "R0 !read_ok",
351 .errstr_unpriv = "R1 pointer comparison",
352 .result = REJECT,
353 },
354 {
355 "stack out of bounds",
356 .insns = {
357 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
358 BPF_EXIT_INSN(),
359 },
360 .errstr = "invalid stack",
361 .result = REJECT,
362 },
363 {
364 "invalid call insn1",
365 .insns = {
366 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
367 BPF_EXIT_INSN(),
368 },
369 .errstr = "BPF_CALL uses reserved",
370 .result = REJECT,
371 },
372 {
373 "invalid call insn2",
374 .insns = {
375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
376 BPF_EXIT_INSN(),
377 },
378 .errstr = "BPF_CALL uses reserved",
379 .result = REJECT,
380 },
381 {
382 "invalid function call",
383 .insns = {
384 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
385 BPF_EXIT_INSN(),
386 },
387 .errstr = "invalid func unknown#1234567",
388 .result = REJECT,
389 },
390 {
391 "uninitialized stack1",
392 .insns = {
393 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
395 BPF_LD_MAP_FD(BPF_REG_1, 0),
396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
397 BPF_FUNC_map_lookup_elem),
398 BPF_EXIT_INSN(),
399 },
400 .fixup_map1 = { 2 },
401 .errstr = "invalid indirect read from stack",
402 .result = REJECT,
403 },
404 {
405 "uninitialized stack2",
406 .insns = {
407 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
408 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
409 BPF_EXIT_INSN(),
410 },
411 .errstr = "invalid read from stack",
412 .result = REJECT,
413 },
414 {
415 "invalid fp arithmetic",
416 /* If this gets ever changed, make sure JITs can deal with it. */
417 .insns = {
418 BPF_MOV64_IMM(BPF_REG_0, 0),
419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
420 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
421 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
422 BPF_EXIT_INSN(),
423 },
424 .errstr_unpriv = "R1 pointer arithmetic",
425 .result_unpriv = REJECT,
426 .errstr = "R1 invalid mem access",
427 .result = REJECT,
428 },
429 {
430 "non-invalid fp arithmetic",
431 .insns = {
432 BPF_MOV64_IMM(BPF_REG_0, 0),
433 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
434 BPF_EXIT_INSN(),
435 },
436 .result = ACCEPT,
437 },
438 {
439 "invalid argument register",
440 .insns = {
441 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
442 BPF_FUNC_get_cgroup_classid),
443 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
444 BPF_FUNC_get_cgroup_classid),
445 BPF_EXIT_INSN(),
446 },
447 .errstr = "R1 !read_ok",
448 .result = REJECT,
449 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
450 },
451 {
452 "non-invalid argument register",
453 .insns = {
454 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
456 BPF_FUNC_get_cgroup_classid),
457 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
459 BPF_FUNC_get_cgroup_classid),
460 BPF_EXIT_INSN(),
461 },
462 .result = ACCEPT,
463 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
464 },
465 {
466 "check valid spill/fill",
467 .insns = {
468 /* spill R1(ctx) into stack */
469 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
470 /* fill it back into R2 */
471 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
472 /* should be able to access R0 = *(R2 + 8) */
473 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
474 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
475 BPF_EXIT_INSN(),
476 },
477 .errstr_unpriv = "R0 leaks addr",
478 .result = ACCEPT,
479 .result_unpriv = REJECT,
480 },
481 {
482 "check valid spill/fill, skb mark",
483 .insns = {
484 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
485 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
486 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
487 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
488 offsetof(struct __sk_buff, mark)),
489 BPF_EXIT_INSN(),
490 },
491 .result = ACCEPT,
492 .result_unpriv = ACCEPT,
493 },
494 {
495 "check corrupted spill/fill",
496 .insns = {
497 /* spill R1(ctx) into stack */
498 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
499 /* mess up with R1 pointer on stack */
500 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
501 /* fill back into R0 should fail */
502 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
503 BPF_EXIT_INSN(),
504 },
505 .errstr_unpriv = "attempt to corrupt spilled",
506 .errstr = "corrupted spill",
507 .result = REJECT,
508 },
509 {
510 "invalid src register in STX",
511 .insns = {
512 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
513 BPF_EXIT_INSN(),
514 },
515 .errstr = "R15 is invalid",
516 .result = REJECT,
517 },
518 {
519 "invalid dst register in STX",
520 .insns = {
521 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
522 BPF_EXIT_INSN(),
523 },
524 .errstr = "R14 is invalid",
525 .result = REJECT,
526 },
527 {
528 "invalid dst register in ST",
529 .insns = {
530 BPF_ST_MEM(BPF_B, 14, -1, -1),
531 BPF_EXIT_INSN(),
532 },
533 .errstr = "R14 is invalid",
534 .result = REJECT,
535 },
536 {
537 "invalid src register in LDX",
538 .insns = {
539 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
540 BPF_EXIT_INSN(),
541 },
542 .errstr = "R12 is invalid",
543 .result = REJECT,
544 },
545 {
546 "invalid dst register in LDX",
547 .insns = {
548 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
549 BPF_EXIT_INSN(),
550 },
551 .errstr = "R11 is invalid",
552 .result = REJECT,
553 },
554 {
555 "junk insn",
556 .insns = {
557 BPF_RAW_INSN(0, 0, 0, 0, 0),
558 BPF_EXIT_INSN(),
559 },
560 .errstr = "invalid BPF_LD_IMM",
561 .result = REJECT,
562 },
563 {
564 "junk insn2",
565 .insns = {
566 BPF_RAW_INSN(1, 0, 0, 0, 0),
567 BPF_EXIT_INSN(),
568 },
569 .errstr = "BPF_LDX uses reserved fields",
570 .result = REJECT,
571 },
572 {
573 "junk insn3",
574 .insns = {
575 BPF_RAW_INSN(-1, 0, 0, 0, 0),
576 BPF_EXIT_INSN(),
577 },
578 .errstr = "invalid BPF_ALU opcode f0",
579 .result = REJECT,
580 },
581 {
582 "junk insn4",
583 .insns = {
584 BPF_RAW_INSN(-1, -1, -1, -1, -1),
585 BPF_EXIT_INSN(),
586 },
587 .errstr = "invalid BPF_ALU opcode f0",
588 .result = REJECT,
589 },
590 {
591 "junk insn5",
592 .insns = {
593 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
594 BPF_EXIT_INSN(),
595 },
596 .errstr = "BPF_ALU uses reserved fields",
597 .result = REJECT,
598 },
599 {
600 "misaligned read from stack",
601 .insns = {
602 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
603 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
604 BPF_EXIT_INSN(),
605 },
606 .errstr = "misaligned access",
607 .result = REJECT,
608 },
609 {
610 "invalid map_fd for function call",
611 .insns = {
612 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
613 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
615 BPF_LD_MAP_FD(BPF_REG_1, 0),
616 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
617 BPF_FUNC_map_delete_elem),
618 BPF_EXIT_INSN(),
619 },
620 .errstr = "fd 0 is not pointing to valid bpf_map",
621 .result = REJECT,
622 },
623 {
624 "don't check return value before access",
625 .insns = {
626 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
627 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
629 BPF_LD_MAP_FD(BPF_REG_1, 0),
630 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
631 BPF_FUNC_map_lookup_elem),
632 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
633 BPF_EXIT_INSN(),
634 },
635 .fixup_map1 = { 3 },
636 .errstr = "R0 invalid mem access 'map_value_or_null'",
637 .result = REJECT,
638 },
639 {
640 "access memory with incorrect alignment",
641 .insns = {
642 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
643 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
645 BPF_LD_MAP_FD(BPF_REG_1, 0),
646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
647 BPF_FUNC_map_lookup_elem),
648 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
649 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
650 BPF_EXIT_INSN(),
651 },
652 .fixup_map1 = { 3 },
653 .errstr = "misaligned access",
654 .result = REJECT,
655 },
656 {
657 "sometimes access memory with incorrect alignment",
658 .insns = {
659 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
660 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
662 BPF_LD_MAP_FD(BPF_REG_1, 0),
663 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
664 BPF_FUNC_map_lookup_elem),
665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
666 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
667 BPF_EXIT_INSN(),
668 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
669 BPF_EXIT_INSN(),
670 },
671 .fixup_map1 = { 3 },
672 .errstr = "R0 invalid mem access",
673 .errstr_unpriv = "R0 leaks addr",
674 .result = REJECT,
675 },
676 {
677 "jump test 1",
678 .insns = {
679 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
680 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
681 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
682 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
684 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
686 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
688 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
690 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
691 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
692 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
693 BPF_MOV64_IMM(BPF_REG_0, 0),
694 BPF_EXIT_INSN(),
695 },
696 .errstr_unpriv = "R1 pointer comparison",
697 .result_unpriv = REJECT,
698 .result = ACCEPT,
699 },
700 {
701 "jump test 2",
702 .insns = {
703 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
704 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
705 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
706 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
707 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
708 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
709 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
710 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
711 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
712 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
713 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
714 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
715 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
716 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
717 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
718 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
719 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
720 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
721 BPF_MOV64_IMM(BPF_REG_0, 0),
722 BPF_EXIT_INSN(),
723 },
724 .errstr_unpriv = "R1 pointer comparison",
725 .result_unpriv = REJECT,
726 .result = ACCEPT,
727 },
728 {
729 "jump test 3",
730 .insns = {
731 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
732 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
733 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
735 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
736 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
737 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
739 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
740 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
741 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
743 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
744 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
745 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
747 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
748 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
749 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
751 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
752 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
753 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
755 BPF_LD_MAP_FD(BPF_REG_1, 0),
756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
757 BPF_FUNC_map_delete_elem),
758 BPF_EXIT_INSN(),
759 },
760 .fixup_map1 = { 24 },
761 .errstr_unpriv = "R1 pointer comparison",
762 .result_unpriv = REJECT,
763 .result = ACCEPT,
764 },
765 {
766 "jump test 4",
767 .insns = {
768 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
769 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
770 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
808 BPF_MOV64_IMM(BPF_REG_0, 0),
809 BPF_EXIT_INSN(),
810 },
811 .errstr_unpriv = "R1 pointer comparison",
812 .result_unpriv = REJECT,
813 .result = ACCEPT,
814 },
815 {
816 "jump test 5",
817 .insns = {
818 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
819 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
820 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
821 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
822 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
823 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
824 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
825 BPF_MOV64_IMM(BPF_REG_0, 0),
826 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
827 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
828 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
829 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
830 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
831 BPF_MOV64_IMM(BPF_REG_0, 0),
832 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
833 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
834 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
835 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
836 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
837 BPF_MOV64_IMM(BPF_REG_0, 0),
838 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
839 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
840 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
841 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
842 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
843 BPF_MOV64_IMM(BPF_REG_0, 0),
844 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
845 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
846 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
847 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
848 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
849 BPF_MOV64_IMM(BPF_REG_0, 0),
850 BPF_EXIT_INSN(),
851 },
852 .errstr_unpriv = "R1 pointer comparison",
853 .result_unpriv = REJECT,
854 .result = ACCEPT,
855 },
856 {
857 "access skb fields ok",
858 .insns = {
859 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
860 offsetof(struct __sk_buff, len)),
861 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
862 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
863 offsetof(struct __sk_buff, mark)),
864 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
865 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
866 offsetof(struct __sk_buff, pkt_type)),
867 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
868 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
869 offsetof(struct __sk_buff, queue_mapping)),
870 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
871 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
872 offsetof(struct __sk_buff, protocol)),
873 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
874 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
875 offsetof(struct __sk_buff, vlan_present)),
876 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
877 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
878 offsetof(struct __sk_buff, vlan_tci)),
879 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
880 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
881 offsetof(struct __sk_buff, napi_id)),
882 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
883 BPF_EXIT_INSN(),
884 },
885 .result = ACCEPT,
886 },
887 {
888 "access skb fields bad1",
889 .insns = {
890 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
891 BPF_EXIT_INSN(),
892 },
893 .errstr = "invalid bpf_context access",
894 .result = REJECT,
895 },
896 {
897 "access skb fields bad2",
898 .insns = {
899 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
900 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
901 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
902 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
903 BPF_LD_MAP_FD(BPF_REG_1, 0),
904 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
905 BPF_FUNC_map_lookup_elem),
906 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
907 BPF_EXIT_INSN(),
908 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
909 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
910 offsetof(struct __sk_buff, pkt_type)),
911 BPF_EXIT_INSN(),
912 },
913 .fixup_map1 = { 4 },
914 .errstr = "different pointers",
915 .errstr_unpriv = "R1 pointer comparison",
916 .result = REJECT,
917 },
918 {
919 "access skb fields bad3",
920 .insns = {
921 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
922 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
923 offsetof(struct __sk_buff, pkt_type)),
924 BPF_EXIT_INSN(),
925 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
926 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
928 BPF_LD_MAP_FD(BPF_REG_1, 0),
929 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
930 BPF_FUNC_map_lookup_elem),
931 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
932 BPF_EXIT_INSN(),
933 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
934 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
935 },
936 .fixup_map1 = { 6 },
937 .errstr = "different pointers",
938 .errstr_unpriv = "R1 pointer comparison",
939 .result = REJECT,
940 },
941 {
942 "access skb fields bad4",
943 .insns = {
944 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
945 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
946 offsetof(struct __sk_buff, len)),
947 BPF_MOV64_IMM(BPF_REG_0, 0),
948 BPF_EXIT_INSN(),
949 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
950 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
951 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
952 BPF_LD_MAP_FD(BPF_REG_1, 0),
953 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
954 BPF_FUNC_map_lookup_elem),
955 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
956 BPF_EXIT_INSN(),
957 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
958 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
959 },
960 .fixup_map1 = { 7 },
961 .errstr = "different pointers",
962 .errstr_unpriv = "R1 pointer comparison",
963 .result = REJECT,
964 },
965 {
966 "check skb->mark is not writeable by sockets",
967 .insns = {
968 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
969 offsetof(struct __sk_buff, mark)),
970 BPF_EXIT_INSN(),
971 },
972 .errstr = "invalid bpf_context access",
973 .errstr_unpriv = "R1 leaks addr",
974 .result = REJECT,
975 },
976 {
977 "check skb->tc_index is not writeable by sockets",
978 .insns = {
979 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
980 offsetof(struct __sk_buff, tc_index)),
981 BPF_EXIT_INSN(),
982 },
983 .errstr = "invalid bpf_context access",
984 .errstr_unpriv = "R1 leaks addr",
985 .result = REJECT,
986 },
987 {
988 "check cb access: byte",
989 .insns = {
990 BPF_MOV64_IMM(BPF_REG_0, 0),
991 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
992 offsetof(struct __sk_buff, cb[0])),
993 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
994 offsetof(struct __sk_buff, cb[0]) + 1),
995 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
996 offsetof(struct __sk_buff, cb[0]) + 2),
997 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
998 offsetof(struct __sk_buff, cb[0]) + 3),
999 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1000 offsetof(struct __sk_buff, cb[1])),
1001 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1002 offsetof(struct __sk_buff, cb[1]) + 1),
1003 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1004 offsetof(struct __sk_buff, cb[1]) + 2),
1005 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1006 offsetof(struct __sk_buff, cb[1]) + 3),
1007 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1008 offsetof(struct __sk_buff, cb[2])),
1009 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1010 offsetof(struct __sk_buff, cb[2]) + 1),
1011 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1012 offsetof(struct __sk_buff, cb[2]) + 2),
1013 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1014 offsetof(struct __sk_buff, cb[2]) + 3),
1015 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1016 offsetof(struct __sk_buff, cb[3])),
1017 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1018 offsetof(struct __sk_buff, cb[3]) + 1),
1019 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1020 offsetof(struct __sk_buff, cb[3]) + 2),
1021 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1022 offsetof(struct __sk_buff, cb[3]) + 3),
1023 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1024 offsetof(struct __sk_buff, cb[4])),
1025 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1026 offsetof(struct __sk_buff, cb[4]) + 1),
1027 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1028 offsetof(struct __sk_buff, cb[4]) + 2),
1029 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1030 offsetof(struct __sk_buff, cb[4]) + 3),
1031 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1032 offsetof(struct __sk_buff, cb[0])),
1033 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1034 offsetof(struct __sk_buff, cb[0]) + 1),
1035 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1036 offsetof(struct __sk_buff, cb[0]) + 2),
1037 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1038 offsetof(struct __sk_buff, cb[0]) + 3),
1039 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1040 offsetof(struct __sk_buff, cb[1])),
1041 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1042 offsetof(struct __sk_buff, cb[1]) + 1),
1043 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1044 offsetof(struct __sk_buff, cb[1]) + 2),
1045 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1046 offsetof(struct __sk_buff, cb[1]) + 3),
1047 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1048 offsetof(struct __sk_buff, cb[2])),
1049 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1050 offsetof(struct __sk_buff, cb[2]) + 1),
1051 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1052 offsetof(struct __sk_buff, cb[2]) + 2),
1053 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1054 offsetof(struct __sk_buff, cb[2]) + 3),
1055 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1056 offsetof(struct __sk_buff, cb[3])),
1057 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1058 offsetof(struct __sk_buff, cb[3]) + 1),
1059 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1060 offsetof(struct __sk_buff, cb[3]) + 2),
1061 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1062 offsetof(struct __sk_buff, cb[3]) + 3),
1063 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1064 offsetof(struct __sk_buff, cb[4])),
1065 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1066 offsetof(struct __sk_buff, cb[4]) + 1),
1067 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1068 offsetof(struct __sk_buff, cb[4]) + 2),
1069 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1070 offsetof(struct __sk_buff, cb[4]) + 3),
1071 BPF_EXIT_INSN(),
1072 },
1073 .result = ACCEPT,
1074 },
1075 {
1076 "__sk_buff->hash, offset 0, byte store not permitted",
1077 .insns = {
1078 BPF_MOV64_IMM(BPF_REG_0, 0),
1079 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1080 offsetof(struct __sk_buff, hash)),
1081 BPF_EXIT_INSN(),
1082 },
1083 .errstr = "invalid bpf_context access",
1084 .result = REJECT,
1085 },
1086 {
1087 "__sk_buff->tc_index, offset 3, byte store not permitted",
1088 .insns = {
1089 BPF_MOV64_IMM(BPF_REG_0, 0),
1090 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1091 offsetof(struct __sk_buff, tc_index) + 3),
1092 BPF_EXIT_INSN(),
1093 },
1094 .errstr = "invalid bpf_context access",
1095 .result = REJECT,
1096 },
1097 {
1098 "check skb->hash byte load permitted",
1099 .insns = {
1100 BPF_MOV64_IMM(BPF_REG_0, 0),
1101 #ifdef __LITTLE_ENDIAN
1102 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1103 offsetof(struct __sk_buff, hash)),
1104 #else
1105 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1106 offsetof(struct __sk_buff, hash) + 3),
1107 #endif
1108 BPF_EXIT_INSN(),
1109 },
1110 .result = ACCEPT,
1111 },
1112 {
1113 "check skb->hash byte load not permitted 1",
1114 .insns = {
1115 BPF_MOV64_IMM(BPF_REG_0, 0),
1116 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1117 offsetof(struct __sk_buff, hash) + 1),
1118 BPF_EXIT_INSN(),
1119 },
1120 .errstr = "invalid bpf_context access",
1121 .result = REJECT,
1122 },
1123 {
1124 "check skb->hash byte load not permitted 2",
1125 .insns = {
1126 BPF_MOV64_IMM(BPF_REG_0, 0),
1127 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1128 offsetof(struct __sk_buff, hash) + 2),
1129 BPF_EXIT_INSN(),
1130 },
1131 .errstr = "invalid bpf_context access",
1132 .result = REJECT,
1133 },
1134 {
1135 "check skb->hash byte load not permitted 3",
1136 .insns = {
1137 BPF_MOV64_IMM(BPF_REG_0, 0),
1138 #ifdef __LITTLE_ENDIAN
1139 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1140 offsetof(struct __sk_buff, hash) + 3),
1141 #else
1142 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1143 offsetof(struct __sk_buff, hash)),
1144 #endif
1145 BPF_EXIT_INSN(),
1146 },
1147 .errstr = "invalid bpf_context access",
1148 .result = REJECT,
1149 },
1150 {
1151 "check cb access: byte, wrong type",
1152 .insns = {
1153 BPF_MOV64_IMM(BPF_REG_0, 0),
1154 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1155 offsetof(struct __sk_buff, cb[0])),
1156 BPF_EXIT_INSN(),
1157 },
1158 .errstr = "invalid bpf_context access",
1159 .result = REJECT,
1160 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1161 },
1162 {
1163 "check cb access: half",
1164 .insns = {
1165 BPF_MOV64_IMM(BPF_REG_0, 0),
1166 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1167 offsetof(struct __sk_buff, cb[0])),
1168 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1169 offsetof(struct __sk_buff, cb[0]) + 2),
1170 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1171 offsetof(struct __sk_buff, cb[1])),
1172 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1173 offsetof(struct __sk_buff, cb[1]) + 2),
1174 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1175 offsetof(struct __sk_buff, cb[2])),
1176 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1177 offsetof(struct __sk_buff, cb[2]) + 2),
1178 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1179 offsetof(struct __sk_buff, cb[3])),
1180 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1181 offsetof(struct __sk_buff, cb[3]) + 2),
1182 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1183 offsetof(struct __sk_buff, cb[4])),
1184 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1185 offsetof(struct __sk_buff, cb[4]) + 2),
1186 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1187 offsetof(struct __sk_buff, cb[0])),
1188 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1189 offsetof(struct __sk_buff, cb[0]) + 2),
1190 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1191 offsetof(struct __sk_buff, cb[1])),
1192 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1193 offsetof(struct __sk_buff, cb[1]) + 2),
1194 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1195 offsetof(struct __sk_buff, cb[2])),
1196 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1197 offsetof(struct __sk_buff, cb[2]) + 2),
1198 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1199 offsetof(struct __sk_buff, cb[3])),
1200 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1201 offsetof(struct __sk_buff, cb[3]) + 2),
1202 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1203 offsetof(struct __sk_buff, cb[4])),
1204 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1205 offsetof(struct __sk_buff, cb[4]) + 2),
1206 BPF_EXIT_INSN(),
1207 },
1208 .result = ACCEPT,
1209 },
1210 {
1211 "check cb access: half, unaligned",
1212 .insns = {
1213 BPF_MOV64_IMM(BPF_REG_0, 0),
1214 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1215 offsetof(struct __sk_buff, cb[0]) + 1),
1216 BPF_EXIT_INSN(),
1217 },
1218 .errstr = "misaligned access",
1219 .result = REJECT,
1220 },
1221 {
1222 "check __sk_buff->hash, offset 0, half store not permitted",
1223 .insns = {
1224 BPF_MOV64_IMM(BPF_REG_0, 0),
1225 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1226 offsetof(struct __sk_buff, hash)),
1227 BPF_EXIT_INSN(),
1228 },
1229 .errstr = "invalid bpf_context access",
1230 .result = REJECT,
1231 },
1232 {
1233 "check __sk_buff->tc_index, offset 2, half store not permitted",
1234 .insns = {
1235 BPF_MOV64_IMM(BPF_REG_0, 0),
1236 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1237 offsetof(struct __sk_buff, tc_index) + 2),
1238 BPF_EXIT_INSN(),
1239 },
1240 .errstr = "invalid bpf_context access",
1241 .result = REJECT,
1242 },
1243 {
1244 "check skb->hash half load permitted",
1245 .insns = {
1246 BPF_MOV64_IMM(BPF_REG_0, 0),
1247 #ifdef __LITTLE_ENDIAN
1248 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1249 offsetof(struct __sk_buff, hash)),
1250 #else
1251 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1252 offsetof(struct __sk_buff, hash) + 2),
1253 #endif
1254 BPF_EXIT_INSN(),
1255 },
1256 .result = ACCEPT,
1257 },
1258 {
1259 "check skb->hash half load not permitted",
1260 .insns = {
1261 BPF_MOV64_IMM(BPF_REG_0, 0),
1262 #ifdef __LITTLE_ENDIAN
1263 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1264 offsetof(struct __sk_buff, hash) + 2),
1265 #else
1266 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1267 offsetof(struct __sk_buff, hash)),
1268 #endif
1269 BPF_EXIT_INSN(),
1270 },
1271 .errstr = "invalid bpf_context access",
1272 .result = REJECT,
1273 },
1274 {
1275 "check cb access: half, wrong type",
1276 .insns = {
1277 BPF_MOV64_IMM(BPF_REG_0, 0),
1278 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1279 offsetof(struct __sk_buff, cb[0])),
1280 BPF_EXIT_INSN(),
1281 },
1282 .errstr = "invalid bpf_context access",
1283 .result = REJECT,
1284 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1285 },
1286 {
1287 "check cb access: word",
1288 .insns = {
1289 BPF_MOV64_IMM(BPF_REG_0, 0),
1290 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1291 offsetof(struct __sk_buff, cb[0])),
1292 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1293 offsetof(struct __sk_buff, cb[1])),
1294 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1295 offsetof(struct __sk_buff, cb[2])),
1296 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1297 offsetof(struct __sk_buff, cb[3])),
1298 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1299 offsetof(struct __sk_buff, cb[4])),
1300 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1301 offsetof(struct __sk_buff, cb[0])),
1302 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1303 offsetof(struct __sk_buff, cb[1])),
1304 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1305 offsetof(struct __sk_buff, cb[2])),
1306 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1307 offsetof(struct __sk_buff, cb[3])),
1308 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1309 offsetof(struct __sk_buff, cb[4])),
1310 BPF_EXIT_INSN(),
1311 },
1312 .result = ACCEPT,
1313 },
1314 {
1315 "check cb access: word, unaligned 1",
1316 .insns = {
1317 BPF_MOV64_IMM(BPF_REG_0, 0),
1318 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1319 offsetof(struct __sk_buff, cb[0]) + 2),
1320 BPF_EXIT_INSN(),
1321 },
1322 .errstr = "misaligned access",
1323 .result = REJECT,
1324 },
1325 {
1326 "check cb access: word, unaligned 2",
1327 .insns = {
1328 BPF_MOV64_IMM(BPF_REG_0, 0),
1329 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1330 offsetof(struct __sk_buff, cb[4]) + 1),
1331 BPF_EXIT_INSN(),
1332 },
1333 .errstr = "misaligned access",
1334 .result = REJECT,
1335 },
1336 {
1337 "check cb access: word, unaligned 3",
1338 .insns = {
1339 BPF_MOV64_IMM(BPF_REG_0, 0),
1340 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1341 offsetof(struct __sk_buff, cb[4]) + 2),
1342 BPF_EXIT_INSN(),
1343 },
1344 .errstr = "misaligned access",
1345 .result = REJECT,
1346 },
1347 {
1348 "check cb access: word, unaligned 4",
1349 .insns = {
1350 BPF_MOV64_IMM(BPF_REG_0, 0),
1351 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1352 offsetof(struct __sk_buff, cb[4]) + 3),
1353 BPF_EXIT_INSN(),
1354 },
1355 .errstr = "misaligned access",
1356 .result = REJECT,
1357 },
1358 {
1359 "check cb access: double",
1360 .insns = {
1361 BPF_MOV64_IMM(BPF_REG_0, 0),
1362 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1363 offsetof(struct __sk_buff, cb[0])),
1364 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1365 offsetof(struct __sk_buff, cb[2])),
1366 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1367 offsetof(struct __sk_buff, cb[0])),
1368 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1369 offsetof(struct __sk_buff, cb[2])),
1370 BPF_EXIT_INSN(),
1371 },
1372 .result = ACCEPT,
1373 },
1374 {
1375 "check cb access: double, unaligned 1",
1376 .insns = {
1377 BPF_MOV64_IMM(BPF_REG_0, 0),
1378 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1379 offsetof(struct __sk_buff, cb[1])),
1380 BPF_EXIT_INSN(),
1381 },
1382 .errstr = "misaligned access",
1383 .result = REJECT,
1384 },
1385 {
1386 "check cb access: double, unaligned 2",
1387 .insns = {
1388 BPF_MOV64_IMM(BPF_REG_0, 0),
1389 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1390 offsetof(struct __sk_buff, cb[3])),
1391 BPF_EXIT_INSN(),
1392 },
1393 .errstr = "misaligned access",
1394 .result = REJECT,
1395 },
1396 {
1397 "check cb access: double, oob 1",
1398 .insns = {
1399 BPF_MOV64_IMM(BPF_REG_0, 0),
1400 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1401 offsetof(struct __sk_buff, cb[4])),
1402 BPF_EXIT_INSN(),
1403 },
1404 .errstr = "invalid bpf_context access",
1405 .result = REJECT,
1406 },
1407 {
1408 "check cb access: double, oob 2",
1409 .insns = {
1410 BPF_MOV64_IMM(BPF_REG_0, 0),
1411 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1412 offsetof(struct __sk_buff, cb[4])),
1413 BPF_EXIT_INSN(),
1414 },
1415 .errstr = "invalid bpf_context access",
1416 .result = REJECT,
1417 },
1418 {
1419 "check __sk_buff->ifindex dw store not permitted",
1420 .insns = {
1421 BPF_MOV64_IMM(BPF_REG_0, 0),
1422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1423 offsetof(struct __sk_buff, ifindex)),
1424 BPF_EXIT_INSN(),
1425 },
1426 .errstr = "invalid bpf_context access",
1427 .result = REJECT,
1428 },
1429 {
1430 "check __sk_buff->ifindex dw load not permitted",
1431 .insns = {
1432 BPF_MOV64_IMM(BPF_REG_0, 0),
1433 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1434 offsetof(struct __sk_buff, ifindex)),
1435 BPF_EXIT_INSN(),
1436 },
1437 .errstr = "invalid bpf_context access",
1438 .result = REJECT,
1439 },
1440 {
1441 "check cb access: double, wrong type",
1442 .insns = {
1443 BPF_MOV64_IMM(BPF_REG_0, 0),
1444 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1445 offsetof(struct __sk_buff, cb[0])),
1446 BPF_EXIT_INSN(),
1447 },
1448 .errstr = "invalid bpf_context access",
1449 .result = REJECT,
1450 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1451 },
1452 {
1453 "check out of range skb->cb access",
1454 .insns = {
1455 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1456 offsetof(struct __sk_buff, cb[0]) + 256),
1457 BPF_EXIT_INSN(),
1458 },
1459 .errstr = "invalid bpf_context access",
1460 .errstr_unpriv = "",
1461 .result = REJECT,
1462 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1463 },
1464 {
1465 "write skb fields from socket prog",
1466 .insns = {
1467 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1468 offsetof(struct __sk_buff, cb[4])),
1469 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1470 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1471 offsetof(struct __sk_buff, mark)),
1472 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1473 offsetof(struct __sk_buff, tc_index)),
1474 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1475 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1476 offsetof(struct __sk_buff, cb[0])),
1477 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1478 offsetof(struct __sk_buff, cb[2])),
1479 BPF_EXIT_INSN(),
1480 },
1481 .result = ACCEPT,
1482 .errstr_unpriv = "R1 leaks addr",
1483 .result_unpriv = REJECT,
1484 },
1485 {
1486 "write skb fields from tc_cls_act prog",
1487 .insns = {
1488 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1489 offsetof(struct __sk_buff, cb[0])),
1490 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1491 offsetof(struct __sk_buff, mark)),
1492 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1493 offsetof(struct __sk_buff, tc_index)),
1494 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1495 offsetof(struct __sk_buff, tc_index)),
1496 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1497 offsetof(struct __sk_buff, cb[3])),
1498 BPF_EXIT_INSN(),
1499 },
1500 .errstr_unpriv = "",
1501 .result_unpriv = REJECT,
1502 .result = ACCEPT,
1503 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1504 },
1505 {
1506 "PTR_TO_STACK store/load",
1507 .insns = {
1508 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1510 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1511 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1512 BPF_EXIT_INSN(),
1513 },
1514 .result = ACCEPT,
1515 },
1516 {
1517 "PTR_TO_STACK store/load - bad alignment on off",
1518 .insns = {
1519 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1521 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1522 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1523 BPF_EXIT_INSN(),
1524 },
1525 .result = REJECT,
1526 .errstr = "misaligned access off -6 size 8",
1527 },
1528 {
1529 "PTR_TO_STACK store/load - bad alignment on reg",
1530 .insns = {
1531 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1533 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1534 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1535 BPF_EXIT_INSN(),
1536 },
1537 .result = REJECT,
1538 .errstr = "misaligned access off -2 size 8",
1539 },
1540 {
1541 "PTR_TO_STACK store/load - out of bounds low",
1542 .insns = {
1543 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1544 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1545 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1546 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1547 BPF_EXIT_INSN(),
1548 },
1549 .result = REJECT,
1550 .errstr = "invalid stack off=-79992 size=8",
1551 },
1552 {
1553 "PTR_TO_STACK store/load - out of bounds high",
1554 .insns = {
1555 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1557 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1558 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1559 BPF_EXIT_INSN(),
1560 },
1561 .result = REJECT,
1562 .errstr = "invalid stack off=0 size=8",
1563 },
1564 {
1565 "unpriv: return pointer",
1566 .insns = {
1567 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1568 BPF_EXIT_INSN(),
1569 },
1570 .result = ACCEPT,
1571 .result_unpriv = REJECT,
1572 .errstr_unpriv = "R0 leaks addr",
1573 },
1574 {
1575 "unpriv: add const to pointer",
1576 .insns = {
1577 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1578 BPF_MOV64_IMM(BPF_REG_0, 0),
1579 BPF_EXIT_INSN(),
1580 },
1581 .result = ACCEPT,
1582 .result_unpriv = REJECT,
1583 .errstr_unpriv = "R1 pointer arithmetic",
1584 },
1585 {
1586 "unpriv: add pointer to pointer",
1587 .insns = {
1588 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1589 BPF_MOV64_IMM(BPF_REG_0, 0),
1590 BPF_EXIT_INSN(),
1591 },
1592 .result = ACCEPT,
1593 .result_unpriv = REJECT,
1594 .errstr_unpriv = "R1 pointer arithmetic",
1595 },
1596 {
1597 "unpriv: neg pointer",
1598 .insns = {
1599 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1600 BPF_MOV64_IMM(BPF_REG_0, 0),
1601 BPF_EXIT_INSN(),
1602 },
1603 .result = ACCEPT,
1604 .result_unpriv = REJECT,
1605 .errstr_unpriv = "R1 pointer arithmetic",
1606 },
1607 {
1608 "unpriv: cmp pointer with const",
1609 .insns = {
1610 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1611 BPF_MOV64_IMM(BPF_REG_0, 0),
1612 BPF_EXIT_INSN(),
1613 },
1614 .result = ACCEPT,
1615 .result_unpriv = REJECT,
1616 .errstr_unpriv = "R1 pointer comparison",
1617 },
1618 {
1619 "unpriv: cmp pointer with pointer",
1620 .insns = {
1621 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1622 BPF_MOV64_IMM(BPF_REG_0, 0),
1623 BPF_EXIT_INSN(),
1624 },
1625 .result = ACCEPT,
1626 .result_unpriv = REJECT,
1627 .errstr_unpriv = "R10 pointer comparison",
1628 },
1629 {
1630 "unpriv: check that printk is disallowed",
1631 .insns = {
1632 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1633 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1635 BPF_MOV64_IMM(BPF_REG_2, 8),
1636 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1637 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1638 BPF_FUNC_trace_printk),
1639 BPF_MOV64_IMM(BPF_REG_0, 0),
1640 BPF_EXIT_INSN(),
1641 },
1642 .errstr_unpriv = "unknown func bpf_trace_printk#6",
1643 .result_unpriv = REJECT,
1644 .result = ACCEPT,
1645 },
1646 {
1647 "unpriv: pass pointer to helper function",
1648 .insns = {
1649 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1650 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1652 BPF_LD_MAP_FD(BPF_REG_1, 0),
1653 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1654 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1655 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1656 BPF_FUNC_map_update_elem),
1657 BPF_MOV64_IMM(BPF_REG_0, 0),
1658 BPF_EXIT_INSN(),
1659 },
1660 .fixup_map1 = { 3 },
1661 .errstr_unpriv = "R4 leaks addr",
1662 .result_unpriv = REJECT,
1663 .result = ACCEPT,
1664 },
1665 {
1666 "unpriv: indirectly pass pointer on stack to helper function",
1667 .insns = {
1668 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1669 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1670 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1671 BPF_LD_MAP_FD(BPF_REG_1, 0),
1672 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1673 BPF_FUNC_map_lookup_elem),
1674 BPF_MOV64_IMM(BPF_REG_0, 0),
1675 BPF_EXIT_INSN(),
1676 },
1677 .fixup_map1 = { 3 },
1678 .errstr = "invalid indirect read from stack off -8+0 size 8",
1679 .result = REJECT,
1680 },
1681 {
1682 "unpriv: mangle pointer on stack 1",
1683 .insns = {
1684 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1685 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1686 BPF_MOV64_IMM(BPF_REG_0, 0),
1687 BPF_EXIT_INSN(),
1688 },
1689 .errstr_unpriv = "attempt to corrupt spilled",
1690 .result_unpriv = REJECT,
1691 .result = ACCEPT,
1692 },
1693 {
1694 "unpriv: mangle pointer on stack 2",
1695 .insns = {
1696 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1697 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1698 BPF_MOV64_IMM(BPF_REG_0, 0),
1699 BPF_EXIT_INSN(),
1700 },
1701 .errstr_unpriv = "attempt to corrupt spilled",
1702 .result_unpriv = REJECT,
1703 .result = ACCEPT,
1704 },
1705 {
1706 "unpriv: read pointer from stack in small chunks",
1707 .insns = {
1708 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1709 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1710 BPF_MOV64_IMM(BPF_REG_0, 0),
1711 BPF_EXIT_INSN(),
1712 },
1713 .errstr = "invalid size",
1714 .result = REJECT,
1715 },
1716 {
1717 "unpriv: write pointer into ctx",
1718 .insns = {
1719 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1720 BPF_MOV64_IMM(BPF_REG_0, 0),
1721 BPF_EXIT_INSN(),
1722 },
1723 .errstr_unpriv = "R1 leaks addr",
1724 .result_unpriv = REJECT,
1725 .errstr = "invalid bpf_context access",
1726 .result = REJECT,
1727 },
1728 {
1729 "unpriv: spill/fill of ctx",
1730 .insns = {
1731 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1732 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1733 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1734 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1735 BPF_MOV64_IMM(BPF_REG_0, 0),
1736 BPF_EXIT_INSN(),
1737 },
1738 .result = ACCEPT,
1739 },
1740 {
1741 "unpriv: spill/fill of ctx 2",
1742 .insns = {
1743 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1744 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1745 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1746 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1747 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1748 BPF_FUNC_get_hash_recalc),
1749 BPF_EXIT_INSN(),
1750 },
1751 .result = ACCEPT,
1752 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1753 },
1754 {
1755 "unpriv: spill/fill of ctx 3",
1756 .insns = {
1757 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1759 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1760 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1761 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1762 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1763 BPF_FUNC_get_hash_recalc),
1764 BPF_EXIT_INSN(),
1765 },
1766 .result = REJECT,
1767 .errstr = "R1 type=fp expected=ctx",
1768 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1769 },
1770 {
1771 "unpriv: spill/fill of ctx 4",
1772 .insns = {
1773 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1775 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1776 BPF_MOV64_IMM(BPF_REG_0, 1),
1777 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
1778 BPF_REG_0, -8, 0),
1779 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1780 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1781 BPF_FUNC_get_hash_recalc),
1782 BPF_EXIT_INSN(),
1783 },
1784 .result = REJECT,
1785 .errstr = "R1 type=inv expected=ctx",
1786 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1787 },
1788 {
1789 "unpriv: spill/fill of different pointers stx",
1790 .insns = {
1791 BPF_MOV64_IMM(BPF_REG_3, 42),
1792 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1795 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1796 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1797 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1798 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1799 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1800 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1801 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
1802 offsetof(struct __sk_buff, mark)),
1803 BPF_MOV64_IMM(BPF_REG_0, 0),
1804 BPF_EXIT_INSN(),
1805 },
1806 .result = REJECT,
1807 .errstr = "same insn cannot be used with different pointers",
1808 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1809 },
1810 {
1811 "unpriv: spill/fill of different pointers ldx",
1812 .insns = {
1813 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1815 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1816 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
1818 -(__s32)offsetof(struct bpf_perf_event_data,
1819 sample_period) - 8),
1820 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1821 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1822 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1823 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1824 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1825 offsetof(struct bpf_perf_event_data,
1826 sample_period)),
1827 BPF_MOV64_IMM(BPF_REG_0, 0),
1828 BPF_EXIT_INSN(),
1829 },
1830 .result = REJECT,
1831 .errstr = "same insn cannot be used with different pointers",
1832 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
1833 },
1834 {
1835 "unpriv: write pointer into map elem value",
1836 .insns = {
1837 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1838 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1839 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1840 BPF_LD_MAP_FD(BPF_REG_1, 0),
1841 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1842 BPF_FUNC_map_lookup_elem),
1843 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1844 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1845 BPF_EXIT_INSN(),
1846 },
1847 .fixup_map1 = { 3 },
1848 .errstr_unpriv = "R0 leaks addr",
1849 .result_unpriv = REJECT,
1850 .result = ACCEPT,
1851 },
1852 {
1853 "unpriv: partial copy of pointer",
1854 .insns = {
1855 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1856 BPF_MOV64_IMM(BPF_REG_0, 0),
1857 BPF_EXIT_INSN(),
1858 },
1859 .errstr_unpriv = "R10 partial copy",
1860 .result_unpriv = REJECT,
1861 .result = ACCEPT,
1862 },
1863 {
1864 "unpriv: pass pointer to tail_call",
1865 .insns = {
1866 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1867 BPF_LD_MAP_FD(BPF_REG_2, 0),
1868 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1869 BPF_FUNC_tail_call),
1870 BPF_MOV64_IMM(BPF_REG_0, 0),
1871 BPF_EXIT_INSN(),
1872 },
1873 .fixup_prog = { 1 },
1874 .errstr_unpriv = "R3 leaks addr into helper",
1875 .result_unpriv = REJECT,
1876 .result = ACCEPT,
1877 },
1878 {
1879 "unpriv: cmp map pointer with zero",
1880 .insns = {
1881 BPF_MOV64_IMM(BPF_REG_1, 0),
1882 BPF_LD_MAP_FD(BPF_REG_1, 0),
1883 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1884 BPF_MOV64_IMM(BPF_REG_0, 0),
1885 BPF_EXIT_INSN(),
1886 },
1887 .fixup_map1 = { 1 },
1888 .errstr_unpriv = "R1 pointer comparison",
1889 .result_unpriv = REJECT,
1890 .result = ACCEPT,
1891 },
1892 {
1893 "unpriv: write into frame pointer",
1894 .insns = {
1895 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1896 BPF_MOV64_IMM(BPF_REG_0, 0),
1897 BPF_EXIT_INSN(),
1898 },
1899 .errstr = "frame pointer is read only",
1900 .result = REJECT,
1901 },
1902 {
1903 "unpriv: spill/fill frame pointer",
1904 .insns = {
1905 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1907 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1908 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
1909 BPF_MOV64_IMM(BPF_REG_0, 0),
1910 BPF_EXIT_INSN(),
1911 },
1912 .errstr = "frame pointer is read only",
1913 .result = REJECT,
1914 },
1915 {
1916 "unpriv: cmp of frame pointer",
1917 .insns = {
1918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1919 BPF_MOV64_IMM(BPF_REG_0, 0),
1920 BPF_EXIT_INSN(),
1921 },
1922 .errstr_unpriv = "R10 pointer comparison",
1923 .result_unpriv = REJECT,
1924 .result = ACCEPT,
1925 },
1926 {
1927 "unpriv: adding of fp",
1928 .insns = {
1929 BPF_MOV64_IMM(BPF_REG_0, 0),
1930 BPF_MOV64_IMM(BPF_REG_1, 0),
1931 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1932 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
1933 BPF_EXIT_INSN(),
1934 },
1935 .errstr_unpriv = "pointer arithmetic prohibited",
1936 .result_unpriv = REJECT,
1937 .errstr = "R1 invalid mem access",
1938 .result = REJECT,
1939 },
1940 {
1941 "unpriv: cmp of stack pointer",
1942 .insns = {
1943 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1945 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1946 BPF_MOV64_IMM(BPF_REG_0, 0),
1947 BPF_EXIT_INSN(),
1948 },
1949 .errstr_unpriv = "R2 pointer comparison",
1950 .result_unpriv = REJECT,
1951 .result = ACCEPT,
1952 },
1953 {
1954 "stack pointer arithmetic",
1955 .insns = {
1956 BPF_MOV64_IMM(BPF_REG_1, 4),
1957 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1958 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
1959 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
1960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
1961 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
1962 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
1963 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
1964 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
1965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
1966 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
1967 BPF_MOV64_IMM(BPF_REG_0, 0),
1968 BPF_EXIT_INSN(),
1969 },
1970 .result = ACCEPT,
1971 },
1972 {
1973 "raw_stack: no skb_load_bytes",
1974 .insns = {
1975 BPF_MOV64_IMM(BPF_REG_2, 4),
1976 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1978 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1979 BPF_MOV64_IMM(BPF_REG_4, 8),
1980 /* Call to skb_load_bytes() omitted. */
1981 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1982 BPF_EXIT_INSN(),
1983 },
1984 .result = REJECT,
1985 .errstr = "invalid read from stack off -8+0 size 8",
1986 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1987 },
1988 {
1989 "raw_stack: skb_load_bytes, negative len",
1990 .insns = {
1991 BPF_MOV64_IMM(BPF_REG_2, 4),
1992 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1994 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1995 BPF_MOV64_IMM(BPF_REG_4, -8),
1996 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1997 BPF_FUNC_skb_load_bytes),
1998 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1999 BPF_EXIT_INSN(),
2000 },
2001 .result = REJECT,
2002 .errstr = "invalid stack type R3",
2003 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2004 },
2005 {
2006 "raw_stack: skb_load_bytes, negative len 2",
2007 .insns = {
2008 BPF_MOV64_IMM(BPF_REG_2, 4),
2009 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2010 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2011 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2012 BPF_MOV64_IMM(BPF_REG_4, ~0),
2013 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2014 BPF_FUNC_skb_load_bytes),
2015 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2016 BPF_EXIT_INSN(),
2017 },
2018 .result = REJECT,
2019 .errstr = "invalid stack type R3",
2020 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2021 },
2022 {
2023 "raw_stack: skb_load_bytes, zero len",
2024 .insns = {
2025 BPF_MOV64_IMM(BPF_REG_2, 4),
2026 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2027 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2028 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2029 BPF_MOV64_IMM(BPF_REG_4, 0),
2030 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2031 BPF_FUNC_skb_load_bytes),
2032 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2033 BPF_EXIT_INSN(),
2034 },
2035 .result = REJECT,
2036 .errstr = "invalid stack type R3",
2037 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2038 },
2039 {
2040 "raw_stack: skb_load_bytes, no init",
2041 .insns = {
2042 BPF_MOV64_IMM(BPF_REG_2, 4),
2043 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2044 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2045 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2046 BPF_MOV64_IMM(BPF_REG_4, 8),
2047 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2048 BPF_FUNC_skb_load_bytes),
2049 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2050 BPF_EXIT_INSN(),
2051 },
2052 .result = ACCEPT,
2053 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2054 },
2055 {
2056 "raw_stack: skb_load_bytes, init",
2057 .insns = {
2058 BPF_MOV64_IMM(BPF_REG_2, 4),
2059 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2061 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2062 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2063 BPF_MOV64_IMM(BPF_REG_4, 8),
2064 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2065 BPF_FUNC_skb_load_bytes),
2066 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2067 BPF_EXIT_INSN(),
2068 },
2069 .result = ACCEPT,
2070 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2071 },
2072 {
2073 "raw_stack: skb_load_bytes, spilled regs around bounds",
2074 .insns = {
2075 BPF_MOV64_IMM(BPF_REG_2, 4),
2076 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2078 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2079 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2080 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2081 BPF_MOV64_IMM(BPF_REG_4, 8),
2082 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2083 BPF_FUNC_skb_load_bytes),
2084 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2085 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2086 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2087 offsetof(struct __sk_buff, mark)),
2088 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2089 offsetof(struct __sk_buff, priority)),
2090 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2091 BPF_EXIT_INSN(),
2092 },
2093 .result = ACCEPT,
2094 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2095 },
2096 {
2097 "raw_stack: skb_load_bytes, spilled regs corruption",
2098 .insns = {
2099 BPF_MOV64_IMM(BPF_REG_2, 4),
2100 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2102 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2103 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2104 BPF_MOV64_IMM(BPF_REG_4, 8),
2105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2106 BPF_FUNC_skb_load_bytes),
2107 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2108 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2109 offsetof(struct __sk_buff, mark)),
2110 BPF_EXIT_INSN(),
2111 },
2112 .result = REJECT,
2113 .errstr = "R0 invalid mem access 'inv'",
2114 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2115 },
2116 {
2117 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2118 .insns = {
2119 BPF_MOV64_IMM(BPF_REG_2, 4),
2120 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2121 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2122 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2123 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2124 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2125 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2126 BPF_MOV64_IMM(BPF_REG_4, 8),
2127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2128 BPF_FUNC_skb_load_bytes),
2129 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2130 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2131 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2132 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2133 offsetof(struct __sk_buff, mark)),
2134 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2135 offsetof(struct __sk_buff, priority)),
2136 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2137 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2138 offsetof(struct __sk_buff, pkt_type)),
2139 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2140 BPF_EXIT_INSN(),
2141 },
2142 .result = REJECT,
2143 .errstr = "R3 invalid mem access 'inv'",
2144 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2145 },
2146 {
2147 "raw_stack: skb_load_bytes, spilled regs + data",
2148 .insns = {
2149 BPF_MOV64_IMM(BPF_REG_2, 4),
2150 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2152 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2153 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2154 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2155 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2156 BPF_MOV64_IMM(BPF_REG_4, 8),
2157 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2158 BPF_FUNC_skb_load_bytes),
2159 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2160 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2161 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2162 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2163 offsetof(struct __sk_buff, mark)),
2164 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2165 offsetof(struct __sk_buff, priority)),
2166 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2167 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2168 BPF_EXIT_INSN(),
2169 },
2170 .result = ACCEPT,
2171 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2172 },
2173 {
2174 "raw_stack: skb_load_bytes, invalid access 1",
2175 .insns = {
2176 BPF_MOV64_IMM(BPF_REG_2, 4),
2177 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2179 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2180 BPF_MOV64_IMM(BPF_REG_4, 8),
2181 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2182 BPF_FUNC_skb_load_bytes),
2183 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2184 BPF_EXIT_INSN(),
2185 },
2186 .result = REJECT,
2187 .errstr = "invalid stack type R3 off=-513 access_size=8",
2188 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2189 },
2190 {
2191 "raw_stack: skb_load_bytes, invalid access 2",
2192 .insns = {
2193 BPF_MOV64_IMM(BPF_REG_2, 4),
2194 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2196 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2197 BPF_MOV64_IMM(BPF_REG_4, 8),
2198 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2199 BPF_FUNC_skb_load_bytes),
2200 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2201 BPF_EXIT_INSN(),
2202 },
2203 .result = REJECT,
2204 .errstr = "invalid stack type R3 off=-1 access_size=8",
2205 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2206 },
2207 {
2208 "raw_stack: skb_load_bytes, invalid access 3",
2209 .insns = {
2210 BPF_MOV64_IMM(BPF_REG_2, 4),
2211 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2213 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2214 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2215 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2216 BPF_FUNC_skb_load_bytes),
2217 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2218 BPF_EXIT_INSN(),
2219 },
2220 .result = REJECT,
2221 .errstr = "invalid stack type R3 off=-1 access_size=-1",
2222 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2223 },
2224 {
2225 "raw_stack: skb_load_bytes, invalid access 4",
2226 .insns = {
2227 BPF_MOV64_IMM(BPF_REG_2, 4),
2228 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2230 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2231 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2232 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2233 BPF_FUNC_skb_load_bytes),
2234 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2235 BPF_EXIT_INSN(),
2236 },
2237 .result = REJECT,
2238 .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
2239 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2240 },
2241 {
2242 "raw_stack: skb_load_bytes, invalid access 5",
2243 .insns = {
2244 BPF_MOV64_IMM(BPF_REG_2, 4),
2245 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2247 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2248 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2249 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2250 BPF_FUNC_skb_load_bytes),
2251 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2252 BPF_EXIT_INSN(),
2253 },
2254 .result = REJECT,
2255 .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
2256 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2257 },
2258 {
2259 "raw_stack: skb_load_bytes, invalid access 6",
2260 .insns = {
2261 BPF_MOV64_IMM(BPF_REG_2, 4),
2262 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2264 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2265 BPF_MOV64_IMM(BPF_REG_4, 0),
2266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2267 BPF_FUNC_skb_load_bytes),
2268 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2269 BPF_EXIT_INSN(),
2270 },
2271 .result = REJECT,
2272 .errstr = "invalid stack type R3 off=-512 access_size=0",
2273 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2274 },
2275 {
2276 "raw_stack: skb_load_bytes, large access",
2277 .insns = {
2278 BPF_MOV64_IMM(BPF_REG_2, 4),
2279 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2281 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2282 BPF_MOV64_IMM(BPF_REG_4, 512),
2283 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2284 BPF_FUNC_skb_load_bytes),
2285 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2286 BPF_EXIT_INSN(),
2287 },
2288 .result = ACCEPT,
2289 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2290 },
2291 {
2292 "direct packet access: test1",
2293 .insns = {
2294 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2295 offsetof(struct __sk_buff, data)),
2296 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2297 offsetof(struct __sk_buff, data_end)),
2298 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2300 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2301 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2302 BPF_MOV64_IMM(BPF_REG_0, 0),
2303 BPF_EXIT_INSN(),
2304 },
2305 .result = ACCEPT,
2306 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2307 },
2308 {
2309 "direct packet access: test2",
2310 .insns = {
2311 BPF_MOV64_IMM(BPF_REG_0, 1),
2312 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2313 offsetof(struct __sk_buff, data_end)),
2314 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2315 offsetof(struct __sk_buff, data)),
2316 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2318 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2319 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2320 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2321 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2322 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2323 offsetof(struct __sk_buff, data)),
2324 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2325 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2326 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
2327 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
2328 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2329 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2331 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2332 offsetof(struct __sk_buff, data_end)),
2333 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2334 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2335 BPF_MOV64_IMM(BPF_REG_0, 0),
2336 BPF_EXIT_INSN(),
2337 },
2338 .result = ACCEPT,
2339 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2340 },
2341 {
2342 "direct packet access: test3",
2343 .insns = {
2344 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2345 offsetof(struct __sk_buff, data)),
2346 BPF_MOV64_IMM(BPF_REG_0, 0),
2347 BPF_EXIT_INSN(),
2348 },
2349 .errstr = "invalid bpf_context access off=76",
2350 .result = REJECT,
2351 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2352 },
2353 {
2354 "direct packet access: test4 (write)",
2355 .insns = {
2356 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2357 offsetof(struct __sk_buff, data)),
2358 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2359 offsetof(struct __sk_buff, data_end)),
2360 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2362 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2363 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2364 BPF_MOV64_IMM(BPF_REG_0, 0),
2365 BPF_EXIT_INSN(),
2366 },
2367 .result = ACCEPT,
2368 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2369 },
2370 {
2371 "direct packet access: test5 (pkt_end >= reg, good access)",
2372 .insns = {
2373 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2374 offsetof(struct __sk_buff, data)),
2375 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2376 offsetof(struct __sk_buff, data_end)),
2377 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2379 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2380 BPF_MOV64_IMM(BPF_REG_0, 1),
2381 BPF_EXIT_INSN(),
2382 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2383 BPF_MOV64_IMM(BPF_REG_0, 0),
2384 BPF_EXIT_INSN(),
2385 },
2386 .result = ACCEPT,
2387 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2388 },
2389 {
2390 "direct packet access: test6 (pkt_end >= reg, bad access)",
2391 .insns = {
2392 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2393 offsetof(struct __sk_buff, data)),
2394 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2395 offsetof(struct __sk_buff, data_end)),
2396 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2397 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2398 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2399 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2400 BPF_MOV64_IMM(BPF_REG_0, 1),
2401 BPF_EXIT_INSN(),
2402 BPF_MOV64_IMM(BPF_REG_0, 0),
2403 BPF_EXIT_INSN(),
2404 },
2405 .errstr = "invalid access to packet",
2406 .result = REJECT,
2407 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2408 },
2409 {
2410 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2411 .insns = {
2412 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2413 offsetof(struct __sk_buff, data)),
2414 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2415 offsetof(struct __sk_buff, data_end)),
2416 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2418 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2419 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2420 BPF_MOV64_IMM(BPF_REG_0, 1),
2421 BPF_EXIT_INSN(),
2422 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2423 BPF_MOV64_IMM(BPF_REG_0, 0),
2424 BPF_EXIT_INSN(),
2425 },
2426 .errstr = "invalid access to packet",
2427 .result = REJECT,
2428 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2429 },
2430 {
2431 "direct packet access: test8 (double test, variant 1)",
2432 .insns = {
2433 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2434 offsetof(struct __sk_buff, data)),
2435 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2436 offsetof(struct __sk_buff, data_end)),
2437 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2439 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2440 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2441 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2442 BPF_MOV64_IMM(BPF_REG_0, 1),
2443 BPF_EXIT_INSN(),
2444 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2445 BPF_MOV64_IMM(BPF_REG_0, 0),
2446 BPF_EXIT_INSN(),
2447 },
2448 .result = ACCEPT,
2449 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2450 },
2451 {
2452 "direct packet access: test9 (double test, variant 2)",
2453 .insns = {
2454 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2455 offsetof(struct __sk_buff, data)),
2456 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2457 offsetof(struct __sk_buff, data_end)),
2458 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2460 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2461 BPF_MOV64_IMM(BPF_REG_0, 1),
2462 BPF_EXIT_INSN(),
2463 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2464 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2465 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2466 BPF_MOV64_IMM(BPF_REG_0, 0),
2467 BPF_EXIT_INSN(),
2468 },
2469 .result = ACCEPT,
2470 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2471 },
2472 {
2473 "direct packet access: test10 (write invalid)",
2474 .insns = {
2475 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2476 offsetof(struct __sk_buff, data)),
2477 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2478 offsetof(struct __sk_buff, data_end)),
2479 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2481 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2482 BPF_MOV64_IMM(BPF_REG_0, 0),
2483 BPF_EXIT_INSN(),
2484 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2485 BPF_MOV64_IMM(BPF_REG_0, 0),
2486 BPF_EXIT_INSN(),
2487 },
2488 .errstr = "invalid access to packet",
2489 .result = REJECT,
2490 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2491 },
2492 {
2493 "direct packet access: test11 (shift, good access)",
2494 .insns = {
2495 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2496 offsetof(struct __sk_buff, data)),
2497 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2498 offsetof(struct __sk_buff, data_end)),
2499 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2500 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2501 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2502 BPF_MOV64_IMM(BPF_REG_3, 144),
2503 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2505 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2506 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2507 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2508 BPF_MOV64_IMM(BPF_REG_0, 1),
2509 BPF_EXIT_INSN(),
2510 BPF_MOV64_IMM(BPF_REG_0, 0),
2511 BPF_EXIT_INSN(),
2512 },
2513 .result = ACCEPT,
2514 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2515 },
2516 {
2517 "direct packet access: test12 (and, good access)",
2518 .insns = {
2519 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2520 offsetof(struct __sk_buff, data)),
2521 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2522 offsetof(struct __sk_buff, data_end)),
2523 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2524 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2525 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2526 BPF_MOV64_IMM(BPF_REG_3, 144),
2527 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2529 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2530 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2531 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2532 BPF_MOV64_IMM(BPF_REG_0, 1),
2533 BPF_EXIT_INSN(),
2534 BPF_MOV64_IMM(BPF_REG_0, 0),
2535 BPF_EXIT_INSN(),
2536 },
2537 .result = ACCEPT,
2538 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2539 },
2540 {
2541 "direct packet access: test13 (branches, good access)",
2542 .insns = {
2543 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2544 offsetof(struct __sk_buff, data)),
2545 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2546 offsetof(struct __sk_buff, data_end)),
2547 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2549 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2550 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2551 offsetof(struct __sk_buff, mark)),
2552 BPF_MOV64_IMM(BPF_REG_4, 1),
2553 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2554 BPF_MOV64_IMM(BPF_REG_3, 14),
2555 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2556 BPF_MOV64_IMM(BPF_REG_3, 24),
2557 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2559 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2560 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2561 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2562 BPF_MOV64_IMM(BPF_REG_0, 1),
2563 BPF_EXIT_INSN(),
2564 BPF_MOV64_IMM(BPF_REG_0, 0),
2565 BPF_EXIT_INSN(),
2566 },
2567 .result = ACCEPT,
2568 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2569 },
2570 {
2571 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2572 .insns = {
2573 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2574 offsetof(struct __sk_buff, data)),
2575 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2576 offsetof(struct __sk_buff, data_end)),
2577 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2579 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2580 BPF_MOV64_IMM(BPF_REG_5, 12),
2581 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2582 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2583 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2584 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2585 BPF_MOV64_IMM(BPF_REG_0, 1),
2586 BPF_EXIT_INSN(),
2587 BPF_MOV64_IMM(BPF_REG_0, 0),
2588 BPF_EXIT_INSN(),
2589 },
2590 .result = ACCEPT,
2591 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2592 },
2593 {
2594 "direct packet access: test15 (spill with xadd)",
2595 .insns = {
2596 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2597 offsetof(struct __sk_buff, data)),
2598 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2599 offsetof(struct __sk_buff, data_end)),
2600 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2602 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2603 BPF_MOV64_IMM(BPF_REG_5, 4096),
2604 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2605 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2606 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2607 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2608 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2609 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2610 BPF_MOV64_IMM(BPF_REG_0, 0),
2611 BPF_EXIT_INSN(),
2612 },
2613 .errstr = "R2 invalid mem access 'inv'",
2614 .result = REJECT,
2615 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2616 },
2617 {
2618 "direct packet access: test16 (arith on data_end)",
2619 .insns = {
2620 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2621 offsetof(struct __sk_buff, data)),
2622 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2623 offsetof(struct __sk_buff, data_end)),
2624 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2625 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2627 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2628 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2629 BPF_MOV64_IMM(BPF_REG_0, 0),
2630 BPF_EXIT_INSN(),
2631 },
2632 .errstr = "invalid access to packet",
2633 .result = REJECT,
2634 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2635 },
2636 {
2637 "direct packet access: test17 (pruning, alignment)",
2638 .insns = {
2639 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2640 offsetof(struct __sk_buff, data)),
2641 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2642 offsetof(struct __sk_buff, data_end)),
2643 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2644 offsetof(struct __sk_buff, mark)),
2645 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2647 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2648 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2649 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2650 BPF_MOV64_IMM(BPF_REG_0, 0),
2651 BPF_EXIT_INSN(),
2652 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2653 BPF_JMP_A(-6),
2654 },
2655 .errstr = "misaligned packet access off 2+15+-4 size 4",
2656 .result = REJECT,
2657 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2658 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2659 },
2660 {
2661 "direct packet access: test18 (imm += pkt_ptr, 1)",
2662 .insns = {
2663 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2664 offsetof(struct __sk_buff, data)),
2665 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2666 offsetof(struct __sk_buff, data_end)),
2667 BPF_MOV64_IMM(BPF_REG_0, 8),
2668 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2669 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2670 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2671 BPF_MOV64_IMM(BPF_REG_0, 0),
2672 BPF_EXIT_INSN(),
2673 },
2674 .result = ACCEPT,
2675 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2676 },
2677 {
2678 "direct packet access: test19 (imm += pkt_ptr, 2)",
2679 .insns = {
2680 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2681 offsetof(struct __sk_buff, data)),
2682 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2683 offsetof(struct __sk_buff, data_end)),
2684 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2686 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2687 BPF_MOV64_IMM(BPF_REG_4, 4),
2688 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2689 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
2690 BPF_MOV64_IMM(BPF_REG_0, 0),
2691 BPF_EXIT_INSN(),
2692 },
2693 .result = ACCEPT,
2694 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2695 },
2696 {
2697 "direct packet access: test20 (x += pkt_ptr, 1)",
2698 .insns = {
2699 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2700 offsetof(struct __sk_buff, data)),
2701 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2702 offsetof(struct __sk_buff, data_end)),
2703 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2704 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2705 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2706 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
2707 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2708 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2709 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1),
2711 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2712 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2713 BPF_MOV64_IMM(BPF_REG_0, 0),
2714 BPF_EXIT_INSN(),
2715 },
2716 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2717 .result = ACCEPT,
2718 },
2719 {
2720 "direct packet access: test21 (x += pkt_ptr, 2)",
2721 .insns = {
2722 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2723 offsetof(struct __sk_buff, data)),
2724 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2725 offsetof(struct __sk_buff, data_end)),
2726 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2728 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
2729 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2730 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
2731 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2732 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0xffff),
2733 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2734 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1),
2736 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2737 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2738 BPF_MOV64_IMM(BPF_REG_0, 0),
2739 BPF_EXIT_INSN(),
2740 },
2741 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2742 .result = ACCEPT,
2743 },
2744 {
2745 "direct packet access: test22 (x += pkt_ptr, 3)",
2746 .insns = {
2747 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2748 offsetof(struct __sk_buff, data)),
2749 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2750 offsetof(struct __sk_buff, data_end)),
2751 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2753 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
2754 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
2755 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
2756 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
2757 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
2758 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2759 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
2760 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2761 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 48),
2762 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2763 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
2764 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
2765 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2766 BPF_MOV64_IMM(BPF_REG_2, 1),
2767 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
2768 BPF_MOV64_IMM(BPF_REG_0, 0),
2769 BPF_EXIT_INSN(),
2770 },
2771 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2772 .result = ACCEPT,
2773 },
2774 {
2775 "direct packet access: test23 (x += pkt_ptr, 4)",
2776 .insns = {
2777 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2778 offsetof(struct __sk_buff, data)),
2779 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2780 offsetof(struct __sk_buff, data_end)),
2781 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2782 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2783 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2784 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
2785 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2786 BPF_MOV64_IMM(BPF_REG_0, 31),
2787 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
2788 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2789 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
2790 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
2791 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2792 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
2793 BPF_MOV64_IMM(BPF_REG_0, 0),
2794 BPF_EXIT_INSN(),
2795 },
2796 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2797 .result = REJECT,
2798 .errstr = "cannot add integer value with 47 upper zero bits to ptr_to_packet",
2799 },
2800 {
2801 "direct packet access: test24 (x += pkt_ptr, 5)",
2802 .insns = {
2803 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2804 offsetof(struct __sk_buff, data)),
2805 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2806 offsetof(struct __sk_buff, data_end)),
2807 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2808 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2809 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2810 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
2811 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2812 BPF_MOV64_IMM(BPF_REG_0, 64),
2813 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
2814 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2815 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
2816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
2817 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2818 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
2819 BPF_MOV64_IMM(BPF_REG_0, 0),
2820 BPF_EXIT_INSN(),
2821 },
2822 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2823 .result = ACCEPT,
2824 },
2825 {
2826 "helper access to packet: test1, valid packet_ptr range",
2827 .insns = {
2828 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2829 offsetof(struct xdp_md, data)),
2830 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2831 offsetof(struct xdp_md, data_end)),
2832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2833 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2834 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2835 BPF_LD_MAP_FD(BPF_REG_1, 0),
2836 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2837 BPF_MOV64_IMM(BPF_REG_4, 0),
2838 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2839 BPF_FUNC_map_update_elem),
2840 BPF_MOV64_IMM(BPF_REG_0, 0),
2841 BPF_EXIT_INSN(),
2842 },
2843 .fixup_map1 = { 5 },
2844 .result_unpriv = ACCEPT,
2845 .result = ACCEPT,
2846 .prog_type = BPF_PROG_TYPE_XDP,
2847 },
2848 {
2849 "helper access to packet: test2, unchecked packet_ptr",
2850 .insns = {
2851 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2852 offsetof(struct xdp_md, data)),
2853 BPF_LD_MAP_FD(BPF_REG_1, 0),
2854 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2855 BPF_FUNC_map_lookup_elem),
2856 BPF_MOV64_IMM(BPF_REG_0, 0),
2857 BPF_EXIT_INSN(),
2858 },
2859 .fixup_map1 = { 1 },
2860 .result = REJECT,
2861 .errstr = "invalid access to packet",
2862 .prog_type = BPF_PROG_TYPE_XDP,
2863 },
2864 {
2865 "helper access to packet: test3, variable add",
2866 .insns = {
2867 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2868 offsetof(struct xdp_md, data)),
2869 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2870 offsetof(struct xdp_md, data_end)),
2871 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2873 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2874 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2875 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2876 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2877 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2878 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2879 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2880 BPF_LD_MAP_FD(BPF_REG_1, 0),
2881 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
2882 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2883 BPF_FUNC_map_lookup_elem),
2884 BPF_MOV64_IMM(BPF_REG_0, 0),
2885 BPF_EXIT_INSN(),
2886 },
2887 .fixup_map1 = { 11 },
2888 .result = ACCEPT,
2889 .prog_type = BPF_PROG_TYPE_XDP,
2890 },
2891 {
2892 "helper access to packet: test4, packet_ptr with bad range",
2893 .insns = {
2894 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2895 offsetof(struct xdp_md, data)),
2896 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2897 offsetof(struct xdp_md, data_end)),
2898 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2900 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2901 BPF_MOV64_IMM(BPF_REG_0, 0),
2902 BPF_EXIT_INSN(),
2903 BPF_LD_MAP_FD(BPF_REG_1, 0),
2904 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2905 BPF_FUNC_map_lookup_elem),
2906 BPF_MOV64_IMM(BPF_REG_0, 0),
2907 BPF_EXIT_INSN(),
2908 },
2909 .fixup_map1 = { 7 },
2910 .result = REJECT,
2911 .errstr = "invalid access to packet",
2912 .prog_type = BPF_PROG_TYPE_XDP,
2913 },
2914 {
2915 "helper access to packet: test5, packet_ptr with too short range",
2916 .insns = {
2917 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2918 offsetof(struct xdp_md, data)),
2919 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2920 offsetof(struct xdp_md, data_end)),
2921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2922 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2924 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2925 BPF_LD_MAP_FD(BPF_REG_1, 0),
2926 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2927 BPF_FUNC_map_lookup_elem),
2928 BPF_MOV64_IMM(BPF_REG_0, 0),
2929 BPF_EXIT_INSN(),
2930 },
2931 .fixup_map1 = { 6 },
2932 .result = REJECT,
2933 .errstr = "invalid access to packet",
2934 .prog_type = BPF_PROG_TYPE_XDP,
2935 },
2936 {
2937 "helper access to packet: test6, cls valid packet_ptr range",
2938 .insns = {
2939 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2940 offsetof(struct __sk_buff, data)),
2941 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2942 offsetof(struct __sk_buff, data_end)),
2943 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2945 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2946 BPF_LD_MAP_FD(BPF_REG_1, 0),
2947 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2948 BPF_MOV64_IMM(BPF_REG_4, 0),
2949 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2950 BPF_FUNC_map_update_elem),
2951 BPF_MOV64_IMM(BPF_REG_0, 0),
2952 BPF_EXIT_INSN(),
2953 },
2954 .fixup_map1 = { 5 },
2955 .result = ACCEPT,
2956 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2957 },
2958 {
2959 "helper access to packet: test7, cls unchecked packet_ptr",
2960 .insns = {
2961 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2962 offsetof(struct __sk_buff, data)),
2963 BPF_LD_MAP_FD(BPF_REG_1, 0),
2964 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2965 BPF_FUNC_map_lookup_elem),
2966 BPF_MOV64_IMM(BPF_REG_0, 0),
2967 BPF_EXIT_INSN(),
2968 },
2969 .fixup_map1 = { 1 },
2970 .result = REJECT,
2971 .errstr = "invalid access to packet",
2972 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2973 },
2974 {
2975 "helper access to packet: test8, cls variable add",
2976 .insns = {
2977 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2978 offsetof(struct __sk_buff, data)),
2979 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2980 offsetof(struct __sk_buff, data_end)),
2981 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2983 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2984 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2985 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2986 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2987 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2989 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2990 BPF_LD_MAP_FD(BPF_REG_1, 0),
2991 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
2992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2993 BPF_FUNC_map_lookup_elem),
2994 BPF_MOV64_IMM(BPF_REG_0, 0),
2995 BPF_EXIT_INSN(),
2996 },
2997 .fixup_map1 = { 11 },
2998 .result = ACCEPT,
2999 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3000 },
3001 {
3002 "helper access to packet: test9, cls packet_ptr with bad range",
3003 .insns = {
3004 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3005 offsetof(struct __sk_buff, data)),
3006 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3007 offsetof(struct __sk_buff, data_end)),
3008 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3010 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3011 BPF_MOV64_IMM(BPF_REG_0, 0),
3012 BPF_EXIT_INSN(),
3013 BPF_LD_MAP_FD(BPF_REG_1, 0),
3014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3015 BPF_FUNC_map_lookup_elem),
3016 BPF_MOV64_IMM(BPF_REG_0, 0),
3017 BPF_EXIT_INSN(),
3018 },
3019 .fixup_map1 = { 7 },
3020 .result = REJECT,
3021 .errstr = "invalid access to packet",
3022 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3023 },
3024 {
3025 "helper access to packet: test10, cls packet_ptr with too short range",
3026 .insns = {
3027 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3028 offsetof(struct __sk_buff, data)),
3029 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3030 offsetof(struct __sk_buff, data_end)),
3031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3032 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3034 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3035 BPF_LD_MAP_FD(BPF_REG_1, 0),
3036 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3037 BPF_FUNC_map_lookup_elem),
3038 BPF_MOV64_IMM(BPF_REG_0, 0),
3039 BPF_EXIT_INSN(),
3040 },
3041 .fixup_map1 = { 6 },
3042 .result = REJECT,
3043 .errstr = "invalid access to packet",
3044 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3045 },
3046 {
3047 "helper access to packet: test11, cls unsuitable helper 1",
3048 .insns = {
3049 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3050 offsetof(struct __sk_buff, data)),
3051 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3052 offsetof(struct __sk_buff, data_end)),
3053 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3054 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3056 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3057 BPF_MOV64_IMM(BPF_REG_2, 0),
3058 BPF_MOV64_IMM(BPF_REG_4, 42),
3059 BPF_MOV64_IMM(BPF_REG_5, 0),
3060 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3061 BPF_FUNC_skb_store_bytes),
3062 BPF_MOV64_IMM(BPF_REG_0, 0),
3063 BPF_EXIT_INSN(),
3064 },
3065 .result = REJECT,
3066 .errstr = "helper access to the packet",
3067 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3068 },
3069 {
3070 "helper access to packet: test12, cls unsuitable helper 2",
3071 .insns = {
3072 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3073 offsetof(struct __sk_buff, data)),
3074 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3075 offsetof(struct __sk_buff, data_end)),
3076 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3078 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3079 BPF_MOV64_IMM(BPF_REG_2, 0),
3080 BPF_MOV64_IMM(BPF_REG_4, 4),
3081 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3082 BPF_FUNC_skb_load_bytes),
3083 BPF_MOV64_IMM(BPF_REG_0, 0),
3084 BPF_EXIT_INSN(),
3085 },
3086 .result = REJECT,
3087 .errstr = "helper access to the packet",
3088 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3089 },
3090 {
3091 "helper access to packet: test13, cls helper ok",
3092 .insns = {
3093 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3094 offsetof(struct __sk_buff, data)),
3095 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3096 offsetof(struct __sk_buff, data_end)),
3097 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3098 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3100 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3101 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3102 BPF_MOV64_IMM(BPF_REG_2, 4),
3103 BPF_MOV64_IMM(BPF_REG_3, 0),
3104 BPF_MOV64_IMM(BPF_REG_4, 0),
3105 BPF_MOV64_IMM(BPF_REG_5, 0),
3106 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3107 BPF_FUNC_csum_diff),
3108 BPF_MOV64_IMM(BPF_REG_0, 0),
3109 BPF_EXIT_INSN(),
3110 },
3111 .result = ACCEPT,
3112 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3113 },
3114 {
3115 "helper access to packet: test14, cls helper fail sub",
3116 .insns = {
3117 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3118 offsetof(struct __sk_buff, data)),
3119 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3120 offsetof(struct __sk_buff, data_end)),
3121 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3122 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3124 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3125 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3126 BPF_MOV64_IMM(BPF_REG_2, 4),
3127 BPF_MOV64_IMM(BPF_REG_3, 0),
3128 BPF_MOV64_IMM(BPF_REG_4, 0),
3129 BPF_MOV64_IMM(BPF_REG_5, 0),
3130 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3131 BPF_FUNC_csum_diff),
3132 BPF_MOV64_IMM(BPF_REG_0, 0),
3133 BPF_EXIT_INSN(),
3134 },
3135 .result = REJECT,
3136 .errstr = "type=inv expected=fp",
3137 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3138 },
3139 {
3140 "helper access to packet: test15, cls helper fail range 1",
3141 .insns = {
3142 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3143 offsetof(struct __sk_buff, data)),
3144 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3145 offsetof(struct __sk_buff, data_end)),
3146 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3147 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3149 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3150 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3151 BPF_MOV64_IMM(BPF_REG_2, 8),
3152 BPF_MOV64_IMM(BPF_REG_3, 0),
3153 BPF_MOV64_IMM(BPF_REG_4, 0),
3154 BPF_MOV64_IMM(BPF_REG_5, 0),
3155 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3156 BPF_FUNC_csum_diff),
3157 BPF_MOV64_IMM(BPF_REG_0, 0),
3158 BPF_EXIT_INSN(),
3159 },
3160 .result = REJECT,
3161 .errstr = "invalid access to packet",
3162 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3163 },
3164 {
3165 "helper access to packet: test16, cls helper fail range 2",
3166 .insns = {
3167 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3168 offsetof(struct __sk_buff, data)),
3169 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3170 offsetof(struct __sk_buff, data_end)),
3171 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3172 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3174 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3176 BPF_MOV64_IMM(BPF_REG_2, -9),
3177 BPF_MOV64_IMM(BPF_REG_3, 0),
3178 BPF_MOV64_IMM(BPF_REG_4, 0),
3179 BPF_MOV64_IMM(BPF_REG_5, 0),
3180 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3181 BPF_FUNC_csum_diff),
3182 BPF_MOV64_IMM(BPF_REG_0, 0),
3183 BPF_EXIT_INSN(),
3184 },
3185 .result = REJECT,
3186 .errstr = "invalid access to packet",
3187 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3188 },
3189 {
3190 "helper access to packet: test17, cls helper fail range 3",
3191 .insns = {
3192 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3193 offsetof(struct __sk_buff, data)),
3194 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3195 offsetof(struct __sk_buff, data_end)),
3196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3197 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3199 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3200 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3201 BPF_MOV64_IMM(BPF_REG_2, ~0),
3202 BPF_MOV64_IMM(BPF_REG_3, 0),
3203 BPF_MOV64_IMM(BPF_REG_4, 0),
3204 BPF_MOV64_IMM(BPF_REG_5, 0),
3205 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3206 BPF_FUNC_csum_diff),
3207 BPF_MOV64_IMM(BPF_REG_0, 0),
3208 BPF_EXIT_INSN(),
3209 },
3210 .result = REJECT,
3211 .errstr = "invalid access to packet",
3212 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3213 },
3214 {
3215 "helper access to packet: test18, cls helper fail range zero",
3216 .insns = {
3217 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3218 offsetof(struct __sk_buff, data)),
3219 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3220 offsetof(struct __sk_buff, data_end)),
3221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3222 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3223 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3224 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3225 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3226 BPF_MOV64_IMM(BPF_REG_2, 0),
3227 BPF_MOV64_IMM(BPF_REG_3, 0),
3228 BPF_MOV64_IMM(BPF_REG_4, 0),
3229 BPF_MOV64_IMM(BPF_REG_5, 0),
3230 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3231 BPF_FUNC_csum_diff),
3232 BPF_MOV64_IMM(BPF_REG_0, 0),
3233 BPF_EXIT_INSN(),
3234 },
3235 .result = REJECT,
3236 .errstr = "invalid access to packet",
3237 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3238 },
3239 {
3240 "helper access to packet: test19, pkt end as input",
3241 .insns = {
3242 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3243 offsetof(struct __sk_buff, data)),
3244 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3245 offsetof(struct __sk_buff, data_end)),
3246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3247 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3249 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3250 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3251 BPF_MOV64_IMM(BPF_REG_2, 4),
3252 BPF_MOV64_IMM(BPF_REG_3, 0),
3253 BPF_MOV64_IMM(BPF_REG_4, 0),
3254 BPF_MOV64_IMM(BPF_REG_5, 0),
3255 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3256 BPF_FUNC_csum_diff),
3257 BPF_MOV64_IMM(BPF_REG_0, 0),
3258 BPF_EXIT_INSN(),
3259 },
3260 .result = REJECT,
3261 .errstr = "R1 type=pkt_end expected=fp",
3262 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3263 },
3264 {
3265 "helper access to packet: test20, wrong reg",
3266 .insns = {
3267 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3268 offsetof(struct __sk_buff, data)),
3269 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3270 offsetof(struct __sk_buff, data_end)),
3271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3272 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3273 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3274 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3275 BPF_MOV64_IMM(BPF_REG_2, 4),
3276 BPF_MOV64_IMM(BPF_REG_3, 0),
3277 BPF_MOV64_IMM(BPF_REG_4, 0),
3278 BPF_MOV64_IMM(BPF_REG_5, 0),
3279 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3280 BPF_FUNC_csum_diff),
3281 BPF_MOV64_IMM(BPF_REG_0, 0),
3282 BPF_EXIT_INSN(),
3283 },
3284 .result = REJECT,
3285 .errstr = "invalid access to packet",
3286 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3287 },
3288 {
3289 "valid map access into an array with a constant",
3290 .insns = {
3291 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3292 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3294 BPF_LD_MAP_FD(BPF_REG_1, 0),
3295 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3296 BPF_FUNC_map_lookup_elem),
3297 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3298 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3299 offsetof(struct test_val, foo)),
3300 BPF_EXIT_INSN(),
3301 },
3302 .fixup_map2 = { 3 },
3303 .errstr_unpriv = "R0 leaks addr",
3304 .result_unpriv = REJECT,
3305 .result = ACCEPT,
3306 },
3307 {
3308 "valid map access into an array with a register",
3309 .insns = {
3310 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3313 BPF_LD_MAP_FD(BPF_REG_1, 0),
3314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3315 BPF_FUNC_map_lookup_elem),
3316 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3317 BPF_MOV64_IMM(BPF_REG_1, 4),
3318 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3319 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3320 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3321 offsetof(struct test_val, foo)),
3322 BPF_EXIT_INSN(),
3323 },
3324 .fixup_map2 = { 3 },
3325 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3326 .result_unpriv = REJECT,
3327 .result = ACCEPT,
3328 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3329 },
3330 {
3331 "valid map access into an array with a variable",
3332 .insns = {
3333 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3334 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3335 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3336 BPF_LD_MAP_FD(BPF_REG_1, 0),
3337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3338 BPF_FUNC_map_lookup_elem),
3339 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3340 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3341 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3342 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3343 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3344 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3345 offsetof(struct test_val, foo)),
3346 BPF_EXIT_INSN(),
3347 },
3348 .fixup_map2 = { 3 },
3349 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3350 .result_unpriv = REJECT,
3351 .result = ACCEPT,
3352 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3353 },
3354 {
3355 "valid map access into an array with a signed variable",
3356 .insns = {
3357 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3358 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3360 BPF_LD_MAP_FD(BPF_REG_1, 0),
3361 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3362 BPF_FUNC_map_lookup_elem),
3363 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3364 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3365 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3366 BPF_MOV32_IMM(BPF_REG_1, 0),
3367 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3368 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3369 BPF_MOV32_IMM(BPF_REG_1, 0),
3370 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3371 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3372 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3373 offsetof(struct test_val, foo)),
3374 BPF_EXIT_INSN(),
3375 },
3376 .fixup_map2 = { 3 },
3377 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3378 .result_unpriv = REJECT,
3379 .result = ACCEPT,
3380 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3381 },
3382 {
3383 "invalid map access into an array with a constant",
3384 .insns = {
3385 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3386 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3388 BPF_LD_MAP_FD(BPF_REG_1, 0),
3389 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3390 BPF_FUNC_map_lookup_elem),
3391 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3392 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3393 offsetof(struct test_val, foo)),
3394 BPF_EXIT_INSN(),
3395 },
3396 .fixup_map2 = { 3 },
3397 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3398 .result = REJECT,
3399 },
3400 {
3401 "invalid map access into an array with a register",
3402 .insns = {
3403 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3404 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3406 BPF_LD_MAP_FD(BPF_REG_1, 0),
3407 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3408 BPF_FUNC_map_lookup_elem),
3409 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3410 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3411 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3412 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3413 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3414 offsetof(struct test_val, foo)),
3415 BPF_EXIT_INSN(),
3416 },
3417 .fixup_map2 = { 3 },
3418 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3419 .errstr = "R0 min value is outside of the array range",
3420 .result_unpriv = REJECT,
3421 .result = REJECT,
3422 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3423 },
3424 {
3425 "invalid map access into an array with a variable",
3426 .insns = {
3427 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3428 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3429 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3430 BPF_LD_MAP_FD(BPF_REG_1, 0),
3431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3432 BPF_FUNC_map_lookup_elem),
3433 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3434 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3435 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3436 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3437 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3438 offsetof(struct test_val, foo)),
3439 BPF_EXIT_INSN(),
3440 },
3441 .fixup_map2 = { 3 },
3442 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3443 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3444 .result_unpriv = REJECT,
3445 .result = REJECT,
3446 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3447 },
3448 {
3449 "invalid map access into an array with no floor check",
3450 .insns = {
3451 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3452 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3454 BPF_LD_MAP_FD(BPF_REG_1, 0),
3455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3456 BPF_FUNC_map_lookup_elem),
3457 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3458 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3459 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3460 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3461 BPF_MOV32_IMM(BPF_REG_1, 0),
3462 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3463 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3464 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3465 offsetof(struct test_val, foo)),
3466 BPF_EXIT_INSN(),
3467 },
3468 .fixup_map2 = { 3 },
3469 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3470 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3471 .result_unpriv = REJECT,
3472 .result = REJECT,
3473 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3474 },
3475 {
3476 "invalid map access into an array with a invalid max check",
3477 .insns = {
3478 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3481 BPF_LD_MAP_FD(BPF_REG_1, 0),
3482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3483 BPF_FUNC_map_lookup_elem),
3484 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3485 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3486 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3487 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3488 BPF_MOV32_IMM(BPF_REG_1, 0),
3489 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3490 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3491 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3492 offsetof(struct test_val, foo)),
3493 BPF_EXIT_INSN(),
3494 },
3495 .fixup_map2 = { 3 },
3496 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3497 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
3498 .result_unpriv = REJECT,
3499 .result = REJECT,
3500 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3501 },
3502 {
3503 "invalid map access into an array with a invalid max check",
3504 .insns = {
3505 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3506 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3508 BPF_LD_MAP_FD(BPF_REG_1, 0),
3509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3510 BPF_FUNC_map_lookup_elem),
3511 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3512 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3513 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3514 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3516 BPF_LD_MAP_FD(BPF_REG_1, 0),
3517 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3518 BPF_FUNC_map_lookup_elem),
3519 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3520 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
3521 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3522 offsetof(struct test_val, foo)),
3523 BPF_EXIT_INSN(),
3524 },
3525 .fixup_map2 = { 3, 11 },
3526 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3527 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3528 .result_unpriv = REJECT,
3529 .result = REJECT,
3530 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3531 },
3532 {
3533 "multiple registers share map_lookup_elem result",
3534 .insns = {
3535 BPF_MOV64_IMM(BPF_REG_1, 10),
3536 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3537 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3539 BPF_LD_MAP_FD(BPF_REG_1, 0),
3540 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3541 BPF_FUNC_map_lookup_elem),
3542 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3543 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3544 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3545 BPF_EXIT_INSN(),
3546 },
3547 .fixup_map1 = { 4 },
3548 .result = ACCEPT,
3549 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3550 },
3551 {
3552 "alu ops on ptr_to_map_value_or_null, 1",
3553 .insns = {
3554 BPF_MOV64_IMM(BPF_REG_1, 10),
3555 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3556 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3558 BPF_LD_MAP_FD(BPF_REG_1, 0),
3559 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3560 BPF_FUNC_map_lookup_elem),
3561 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3563 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3565 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3566 BPF_EXIT_INSN(),
3567 },
3568 .fixup_map1 = { 4 },
3569 .errstr = "R4 invalid mem access",
3570 .result = REJECT,
3571 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3572 },
3573 {
3574 "alu ops on ptr_to_map_value_or_null, 2",
3575 .insns = {
3576 BPF_MOV64_IMM(BPF_REG_1, 10),
3577 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3578 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3580 BPF_LD_MAP_FD(BPF_REG_1, 0),
3581 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3582 BPF_FUNC_map_lookup_elem),
3583 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3584 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3586 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3587 BPF_EXIT_INSN(),
3588 },
3589 .fixup_map1 = { 4 },
3590 .errstr = "R4 invalid mem access",
3591 .result = REJECT,
3592 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3593 },
3594 {
3595 "alu ops on ptr_to_map_value_or_null, 3",
3596 .insns = {
3597 BPF_MOV64_IMM(BPF_REG_1, 10),
3598 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3599 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3601 BPF_LD_MAP_FD(BPF_REG_1, 0),
3602 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3603 BPF_FUNC_map_lookup_elem),
3604 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3605 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
3606 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3607 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3608 BPF_EXIT_INSN(),
3609 },
3610 .fixup_map1 = { 4 },
3611 .errstr = "R4 invalid mem access",
3612 .result = REJECT,
3613 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3614 },
3615 {
3616 "invalid memory access with multiple map_lookup_elem calls",
3617 .insns = {
3618 BPF_MOV64_IMM(BPF_REG_1, 10),
3619 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3620 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3621 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3622 BPF_LD_MAP_FD(BPF_REG_1, 0),
3623 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3624 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3625 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3626 BPF_FUNC_map_lookup_elem),
3627 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3628 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3629 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3630 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3631 BPF_FUNC_map_lookup_elem),
3632 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3633 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3634 BPF_EXIT_INSN(),
3635 },
3636 .fixup_map1 = { 4 },
3637 .result = REJECT,
3638 .errstr = "R4 !read_ok",
3639 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3640 },
3641 {
3642 "valid indirect map_lookup_elem access with 2nd lookup in branch",
3643 .insns = {
3644 BPF_MOV64_IMM(BPF_REG_1, 10),
3645 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3646 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3648 BPF_LD_MAP_FD(BPF_REG_1, 0),
3649 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3650 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3651 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3652 BPF_FUNC_map_lookup_elem),
3653 BPF_MOV64_IMM(BPF_REG_2, 10),
3654 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
3655 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3656 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3657 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3658 BPF_FUNC_map_lookup_elem),
3659 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3660 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3661 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3662 BPF_EXIT_INSN(),
3663 },
3664 .fixup_map1 = { 4 },
3665 .result = ACCEPT,
3666 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3667 },
3668 {
3669 "multiple registers share map_lookup_elem bad reg type",
3670 .insns = {
3671 BPF_MOV64_IMM(BPF_REG_1, 10),
3672 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3673 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3675 BPF_LD_MAP_FD(BPF_REG_1, 0),
3676 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3677 BPF_FUNC_map_lookup_elem),
3678 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
3679 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
3680 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3681 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3683 BPF_MOV64_IMM(BPF_REG_1, 1),
3684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3685 BPF_MOV64_IMM(BPF_REG_1, 2),
3686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
3687 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
3688 BPF_MOV64_IMM(BPF_REG_1, 3),
3689 BPF_EXIT_INSN(),
3690 },
3691 .fixup_map1 = { 4 },
3692 .result = REJECT,
3693 .errstr = "R3 invalid mem access 'inv'",
3694 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3695 },
3696 {
3697 "invalid map access from else condition",
3698 .insns = {
3699 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3700 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3702 BPF_LD_MAP_FD(BPF_REG_1, 0),
3703 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
3704 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3705 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3706 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
3707 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
3708 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3709 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3710 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
3711 BPF_EXIT_INSN(),
3712 },
3713 .fixup_map2 = { 3 },
3714 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3715 .result = REJECT,
3716 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3717 .result_unpriv = REJECT,
3718 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3719 },
3720 {
3721 "constant register |= constant should keep constant type",
3722 .insns = {
3723 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3724 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3725 BPF_MOV64_IMM(BPF_REG_2, 34),
3726 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
3727 BPF_MOV64_IMM(BPF_REG_3, 0),
3728 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3729 BPF_EXIT_INSN(),
3730 },
3731 .result = ACCEPT,
3732 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3733 },
3734 {
3735 "constant register |= constant should not bypass stack boundary checks",
3736 .insns = {
3737 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3739 BPF_MOV64_IMM(BPF_REG_2, 34),
3740 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
3741 BPF_MOV64_IMM(BPF_REG_3, 0),
3742 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3743 BPF_EXIT_INSN(),
3744 },
3745 .errstr = "invalid stack type R1 off=-48 access_size=58",
3746 .result = REJECT,
3747 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3748 },
3749 {
3750 "constant register |= constant register should keep constant type",
3751 .insns = {
3752 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3754 BPF_MOV64_IMM(BPF_REG_2, 34),
3755 BPF_MOV64_IMM(BPF_REG_4, 13),
3756 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3757 BPF_MOV64_IMM(BPF_REG_3, 0),
3758 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3759 BPF_EXIT_INSN(),
3760 },
3761 .result = ACCEPT,
3762 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3763 },
3764 {
3765 "constant register |= constant register should not bypass stack boundary checks",
3766 .insns = {
3767 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3768 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3769 BPF_MOV64_IMM(BPF_REG_2, 34),
3770 BPF_MOV64_IMM(BPF_REG_4, 24),
3771 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3772 BPF_MOV64_IMM(BPF_REG_3, 0),
3773 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3774 BPF_EXIT_INSN(),
3775 },
3776 .errstr = "invalid stack type R1 off=-48 access_size=58",
3777 .result = REJECT,
3778 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3779 },
3780 {
3781 "invalid direct packet write for LWT_IN",
3782 .insns = {
3783 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3784 offsetof(struct __sk_buff, data)),
3785 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3786 offsetof(struct __sk_buff, data_end)),
3787 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3788 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3789 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3790 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3791 BPF_MOV64_IMM(BPF_REG_0, 0),
3792 BPF_EXIT_INSN(),
3793 },
3794 .errstr = "cannot write into packet",
3795 .result = REJECT,
3796 .prog_type = BPF_PROG_TYPE_LWT_IN,
3797 },
3798 {
3799 "invalid direct packet write for LWT_OUT",
3800 .insns = {
3801 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3802 offsetof(struct __sk_buff, data)),
3803 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3804 offsetof(struct __sk_buff, data_end)),
3805 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3806 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3807 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3808 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3809 BPF_MOV64_IMM(BPF_REG_0, 0),
3810 BPF_EXIT_INSN(),
3811 },
3812 .errstr = "cannot write into packet",
3813 .result = REJECT,
3814 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3815 },
3816 {
3817 "direct packet write for LWT_XMIT",
3818 .insns = {
3819 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3820 offsetof(struct __sk_buff, data)),
3821 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3822 offsetof(struct __sk_buff, data_end)),
3823 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3824 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3825 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3826 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3827 BPF_MOV64_IMM(BPF_REG_0, 0),
3828 BPF_EXIT_INSN(),
3829 },
3830 .result = ACCEPT,
3831 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3832 },
3833 {
3834 "direct packet read for LWT_IN",
3835 .insns = {
3836 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3837 offsetof(struct __sk_buff, data)),
3838 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3839 offsetof(struct __sk_buff, data_end)),
3840 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3842 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3843 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3844 BPF_MOV64_IMM(BPF_REG_0, 0),
3845 BPF_EXIT_INSN(),
3846 },
3847 .result = ACCEPT,
3848 .prog_type = BPF_PROG_TYPE_LWT_IN,
3849 },
3850 {
3851 "direct packet read for LWT_OUT",
3852 .insns = {
3853 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3854 offsetof(struct __sk_buff, data)),
3855 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3856 offsetof(struct __sk_buff, data_end)),
3857 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3858 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3859 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3860 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3861 BPF_MOV64_IMM(BPF_REG_0, 0),
3862 BPF_EXIT_INSN(),
3863 },
3864 .result = ACCEPT,
3865 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3866 },
3867 {
3868 "direct packet read for LWT_XMIT",
3869 .insns = {
3870 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3871 offsetof(struct __sk_buff, data)),
3872 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3873 offsetof(struct __sk_buff, data_end)),
3874 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3876 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3877 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3878 BPF_MOV64_IMM(BPF_REG_0, 0),
3879 BPF_EXIT_INSN(),
3880 },
3881 .result = ACCEPT,
3882 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3883 },
3884 {
3885 "overlapping checks for direct packet access",
3886 .insns = {
3887 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3888 offsetof(struct __sk_buff, data)),
3889 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3890 offsetof(struct __sk_buff, data_end)),
3891 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3892 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3893 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
3894 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
3896 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
3897 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
3898 BPF_MOV64_IMM(BPF_REG_0, 0),
3899 BPF_EXIT_INSN(),
3900 },
3901 .result = ACCEPT,
3902 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3903 },
3904 {
3905 "invalid access of tc_classid for LWT_IN",
3906 .insns = {
3907 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3908 offsetof(struct __sk_buff, tc_classid)),
3909 BPF_EXIT_INSN(),
3910 },
3911 .result = REJECT,
3912 .errstr = "invalid bpf_context access",
3913 },
3914 {
3915 "invalid access of tc_classid for LWT_OUT",
3916 .insns = {
3917 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3918 offsetof(struct __sk_buff, tc_classid)),
3919 BPF_EXIT_INSN(),
3920 },
3921 .result = REJECT,
3922 .errstr = "invalid bpf_context access",
3923 },
3924 {
3925 "invalid access of tc_classid for LWT_XMIT",
3926 .insns = {
3927 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3928 offsetof(struct __sk_buff, tc_classid)),
3929 BPF_EXIT_INSN(),
3930 },
3931 .result = REJECT,
3932 .errstr = "invalid bpf_context access",
3933 },
3934 {
3935 "leak pointer into ctx 1",
3936 .insns = {
3937 BPF_MOV64_IMM(BPF_REG_0, 0),
3938 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
3939 offsetof(struct __sk_buff, cb[0])),
3940 BPF_LD_MAP_FD(BPF_REG_2, 0),
3941 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
3942 offsetof(struct __sk_buff, cb[0])),
3943 BPF_EXIT_INSN(),
3944 },
3945 .fixup_map1 = { 2 },
3946 .errstr_unpriv = "R2 leaks addr into mem",
3947 .result_unpriv = REJECT,
3948 .result = ACCEPT,
3949 },
3950 {
3951 "leak pointer into ctx 2",
3952 .insns = {
3953 BPF_MOV64_IMM(BPF_REG_0, 0),
3954 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
3955 offsetof(struct __sk_buff, cb[0])),
3956 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
3957 offsetof(struct __sk_buff, cb[0])),
3958 BPF_EXIT_INSN(),
3959 },
3960 .errstr_unpriv = "R10 leaks addr into mem",
3961 .result_unpriv = REJECT,
3962 .result = ACCEPT,
3963 },
3964 {
3965 "leak pointer into ctx 3",
3966 .insns = {
3967 BPF_MOV64_IMM(BPF_REG_0, 0),
3968 BPF_LD_MAP_FD(BPF_REG_2, 0),
3969 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
3970 offsetof(struct __sk_buff, cb[0])),
3971 BPF_EXIT_INSN(),
3972 },
3973 .fixup_map1 = { 1 },
3974 .errstr_unpriv = "R2 leaks addr into ctx",
3975 .result_unpriv = REJECT,
3976 .result = ACCEPT,
3977 },
3978 {
3979 "leak pointer into map val",
3980 .insns = {
3981 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
3982 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3983 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3985 BPF_LD_MAP_FD(BPF_REG_1, 0),
3986 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3987 BPF_FUNC_map_lookup_elem),
3988 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
3989 BPF_MOV64_IMM(BPF_REG_3, 0),
3990 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
3991 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3992 BPF_MOV64_IMM(BPF_REG_0, 0),
3993 BPF_EXIT_INSN(),
3994 },
3995 .fixup_map1 = { 4 },
3996 .errstr_unpriv = "R6 leaks addr into mem",
3997 .result_unpriv = REJECT,
3998 .result = ACCEPT,
3999 },
4000 {
4001 "helper access to map: full range",
4002 .insns = {
4003 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4004 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4005 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4006 BPF_LD_MAP_FD(BPF_REG_1, 0),
4007 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4008 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4009 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4010 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4011 BPF_MOV64_IMM(BPF_REG_3, 0),
4012 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4013 BPF_EXIT_INSN(),
4014 },
4015 .fixup_map2 = { 3 },
4016 .result = ACCEPT,
4017 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4018 },
4019 {
4020 "helper access to map: partial range",
4021 .insns = {
4022 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4024 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4025 BPF_LD_MAP_FD(BPF_REG_1, 0),
4026 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4027 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4028 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4029 BPF_MOV64_IMM(BPF_REG_2, 8),
4030 BPF_MOV64_IMM(BPF_REG_3, 0),
4031 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4032 BPF_EXIT_INSN(),
4033 },
4034 .fixup_map2 = { 3 },
4035 .result = ACCEPT,
4036 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4037 },
4038 {
4039 "helper access to map: empty range",
4040 .insns = {
4041 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4042 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4043 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4044 BPF_LD_MAP_FD(BPF_REG_1, 0),
4045 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4046 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4047 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4048 BPF_MOV64_IMM(BPF_REG_2, 0),
4049 BPF_MOV64_IMM(BPF_REG_3, 0),
4050 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4051 BPF_EXIT_INSN(),
4052 },
4053 .fixup_map2 = { 3 },
4054 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4055 .result = REJECT,
4056 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4057 },
4058 {
4059 "helper access to map: out-of-bound range",
4060 .insns = {
4061 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4063 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4064 BPF_LD_MAP_FD(BPF_REG_1, 0),
4065 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4066 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4067 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4068 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4069 BPF_MOV64_IMM(BPF_REG_3, 0),
4070 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4071 BPF_EXIT_INSN(),
4072 },
4073 .fixup_map2 = { 3 },
4074 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4075 .result = REJECT,
4076 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4077 },
4078 {
4079 "helper access to map: negative range",
4080 .insns = {
4081 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4083 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4084 BPF_LD_MAP_FD(BPF_REG_1, 0),
4085 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4086 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4087 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4088 BPF_MOV64_IMM(BPF_REG_2, -8),
4089 BPF_MOV64_IMM(BPF_REG_3, 0),
4090 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4091 BPF_EXIT_INSN(),
4092 },
4093 .fixup_map2 = { 3 },
4094 .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
4095 .result = REJECT,
4096 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4097 },
4098 {
4099 "helper access to adjusted map (via const imm): full range",
4100 .insns = {
4101 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4103 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4104 BPF_LD_MAP_FD(BPF_REG_1, 0),
4105 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4106 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4107 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4109 offsetof(struct test_val, foo)),
4110 BPF_MOV64_IMM(BPF_REG_2,
4111 sizeof(struct test_val) -
4112 offsetof(struct test_val, foo)),
4113 BPF_MOV64_IMM(BPF_REG_3, 0),
4114 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4115 BPF_EXIT_INSN(),
4116 },
4117 .fixup_map2 = { 3 },
4118 .result = ACCEPT,
4119 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4120 },
4121 {
4122 "helper access to adjusted map (via const imm): partial range",
4123 .insns = {
4124 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4126 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4127 BPF_LD_MAP_FD(BPF_REG_1, 0),
4128 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4129 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4130 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4132 offsetof(struct test_val, foo)),
4133 BPF_MOV64_IMM(BPF_REG_2, 8),
4134 BPF_MOV64_IMM(BPF_REG_3, 0),
4135 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4136 BPF_EXIT_INSN(),
4137 },
4138 .fixup_map2 = { 3 },
4139 .result = ACCEPT,
4140 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4141 },
4142 {
4143 "helper access to adjusted map (via const imm): empty range",
4144 .insns = {
4145 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4146 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4147 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4148 BPF_LD_MAP_FD(BPF_REG_1, 0),
4149 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4150 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4151 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4152 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4153 offsetof(struct test_val, foo)),
4154 BPF_MOV64_IMM(BPF_REG_2, 0),
4155 BPF_MOV64_IMM(BPF_REG_3, 0),
4156 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4157 BPF_EXIT_INSN(),
4158 },
4159 .fixup_map2 = { 3 },
4160 .errstr = "R1 min value is outside of the array range",
4161 .result = REJECT,
4162 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4163 },
4164 {
4165 "helper access to adjusted map (via const imm): out-of-bound range",
4166 .insns = {
4167 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4169 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4170 BPF_LD_MAP_FD(BPF_REG_1, 0),
4171 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4172 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4173 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4175 offsetof(struct test_val, foo)),
4176 BPF_MOV64_IMM(BPF_REG_2,
4177 sizeof(struct test_val) -
4178 offsetof(struct test_val, foo) + 8),
4179 BPF_MOV64_IMM(BPF_REG_3, 0),
4180 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4181 BPF_EXIT_INSN(),
4182 },
4183 .fixup_map2 = { 3 },
4184 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4185 .result = REJECT,
4186 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4187 },
4188 {
4189 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4190 .insns = {
4191 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4193 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4194 BPF_LD_MAP_FD(BPF_REG_1, 0),
4195 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4196 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4197 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4199 offsetof(struct test_val, foo)),
4200 BPF_MOV64_IMM(BPF_REG_2, -8),
4201 BPF_MOV64_IMM(BPF_REG_3, 0),
4202 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4203 BPF_EXIT_INSN(),
4204 },
4205 .fixup_map2 = { 3 },
4206 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
4207 .result = REJECT,
4208 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4209 },
4210 {
4211 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4212 .insns = {
4213 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4215 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4216 BPF_LD_MAP_FD(BPF_REG_1, 0),
4217 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4218 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4219 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4221 offsetof(struct test_val, foo)),
4222 BPF_MOV64_IMM(BPF_REG_2, -1),
4223 BPF_MOV64_IMM(BPF_REG_3, 0),
4224 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4225 BPF_EXIT_INSN(),
4226 },
4227 .fixup_map2 = { 3 },
4228 .errstr = "R1 min value is outside of the array range",
4229 .result = REJECT,
4230 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4231 },
4232 {
4233 "helper access to adjusted map (via const reg): full range",
4234 .insns = {
4235 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4237 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4238 BPF_LD_MAP_FD(BPF_REG_1, 0),
4239 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4240 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4241 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4242 BPF_MOV64_IMM(BPF_REG_3,
4243 offsetof(struct test_val, foo)),
4244 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4245 BPF_MOV64_IMM(BPF_REG_2,
4246 sizeof(struct test_val) -
4247 offsetof(struct test_val, foo)),
4248 BPF_MOV64_IMM(BPF_REG_3, 0),
4249 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4250 BPF_EXIT_INSN(),
4251 },
4252 .fixup_map2 = { 3 },
4253 .result = ACCEPT,
4254 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4255 },
4256 {
4257 "helper access to adjusted map (via const reg): partial range",
4258 .insns = {
4259 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4261 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4262 BPF_LD_MAP_FD(BPF_REG_1, 0),
4263 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4265 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4266 BPF_MOV64_IMM(BPF_REG_3,
4267 offsetof(struct test_val, foo)),
4268 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4269 BPF_MOV64_IMM(BPF_REG_2, 8),
4270 BPF_MOV64_IMM(BPF_REG_3, 0),
4271 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4272 BPF_EXIT_INSN(),
4273 },
4274 .fixup_map2 = { 3 },
4275 .result = ACCEPT,
4276 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4277 },
4278 {
4279 "helper access to adjusted map (via const reg): empty range",
4280 .insns = {
4281 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4283 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4284 BPF_LD_MAP_FD(BPF_REG_1, 0),
4285 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4286 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4287 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4288 BPF_MOV64_IMM(BPF_REG_3, 0),
4289 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4290 BPF_MOV64_IMM(BPF_REG_2, 0),
4291 BPF_MOV64_IMM(BPF_REG_3, 0),
4292 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4293 BPF_EXIT_INSN(),
4294 },
4295 .fixup_map2 = { 3 },
4296 .errstr = "R1 min value is outside of the array range",
4297 .result = REJECT,
4298 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4299 },
4300 {
4301 "helper access to adjusted map (via const reg): out-of-bound range",
4302 .insns = {
4303 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4305 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4306 BPF_LD_MAP_FD(BPF_REG_1, 0),
4307 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4308 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4309 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4310 BPF_MOV64_IMM(BPF_REG_3,
4311 offsetof(struct test_val, foo)),
4312 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4313 BPF_MOV64_IMM(BPF_REG_2,
4314 sizeof(struct test_val) -
4315 offsetof(struct test_val, foo) + 8),
4316 BPF_MOV64_IMM(BPF_REG_3, 0),
4317 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4318 BPF_EXIT_INSN(),
4319 },
4320 .fixup_map2 = { 3 },
4321 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4322 .result = REJECT,
4323 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4324 },
4325 {
4326 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4327 .insns = {
4328 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4329 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4330 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4331 BPF_LD_MAP_FD(BPF_REG_1, 0),
4332 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4333 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4334 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4335 BPF_MOV64_IMM(BPF_REG_3,
4336 offsetof(struct test_val, foo)),
4337 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4338 BPF_MOV64_IMM(BPF_REG_2, -8),
4339 BPF_MOV64_IMM(BPF_REG_3, 0),
4340 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4341 BPF_EXIT_INSN(),
4342 },
4343 .fixup_map2 = { 3 },
4344 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
4345 .result = REJECT,
4346 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4347 },
4348 {
4349 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4350 .insns = {
4351 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4353 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4354 BPF_LD_MAP_FD(BPF_REG_1, 0),
4355 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4356 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4357 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4358 BPF_MOV64_IMM(BPF_REG_3,
4359 offsetof(struct test_val, foo)),
4360 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4361 BPF_MOV64_IMM(BPF_REG_2, -1),
4362 BPF_MOV64_IMM(BPF_REG_3, 0),
4363 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4364 BPF_EXIT_INSN(),
4365 },
4366 .fixup_map2 = { 3 },
4367 .errstr = "R1 min value is outside of the array range",
4368 .result = REJECT,
4369 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4370 },
4371 {
4372 "helper access to adjusted map (via variable): full range",
4373 .insns = {
4374 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4375 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4376 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4377 BPF_LD_MAP_FD(BPF_REG_1, 0),
4378 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4379 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4380 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4381 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4382 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4383 offsetof(struct test_val, foo), 4),
4384 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4385 BPF_MOV64_IMM(BPF_REG_2,
4386 sizeof(struct test_val) -
4387 offsetof(struct test_val, foo)),
4388 BPF_MOV64_IMM(BPF_REG_3, 0),
4389 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4390 BPF_EXIT_INSN(),
4391 },
4392 .fixup_map2 = { 3 },
4393 .result = ACCEPT,
4394 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4395 },
4396 {
4397 "helper access to adjusted map (via variable): partial range",
4398 .insns = {
4399 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4401 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4402 BPF_LD_MAP_FD(BPF_REG_1, 0),
4403 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4404 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4405 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4406 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4407 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4408 offsetof(struct test_val, foo), 4),
4409 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4410 BPF_MOV64_IMM(BPF_REG_2, 8),
4411 BPF_MOV64_IMM(BPF_REG_3, 0),
4412 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4413 BPF_EXIT_INSN(),
4414 },
4415 .fixup_map2 = { 3 },
4416 .result = ACCEPT,
4417 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4418 },
4419 {
4420 "helper access to adjusted map (via variable): empty range",
4421 .insns = {
4422 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4424 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4425 BPF_LD_MAP_FD(BPF_REG_1, 0),
4426 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4427 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4428 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4429 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4430 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4431 offsetof(struct test_val, foo), 4),
4432 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4433 BPF_MOV64_IMM(BPF_REG_2, 0),
4434 BPF_MOV64_IMM(BPF_REG_3, 0),
4435 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4436 BPF_EXIT_INSN(),
4437 },
4438 .fixup_map2 = { 3 },
4439 .errstr = "R1 min value is outside of the array range",
4440 .result = REJECT,
4441 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4442 },
4443 {
4444 "helper access to adjusted map (via variable): no max check",
4445 .insns = {
4446 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4448 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4449 BPF_LD_MAP_FD(BPF_REG_1, 0),
4450 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4451 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4452 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4453 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4454 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4455 BPF_MOV64_IMM(BPF_REG_2, 0),
4456 BPF_MOV64_IMM(BPF_REG_3, 0),
4457 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4458 BPF_EXIT_INSN(),
4459 },
4460 .fixup_map2 = { 3 },
4461 .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
4462 .result = REJECT,
4463 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4464 },
4465 {
4466 "helper access to adjusted map (via variable): wrong max check",
4467 .insns = {
4468 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4469 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4470 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4471 BPF_LD_MAP_FD(BPF_REG_1, 0),
4472 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4473 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4475 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4476 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4477 offsetof(struct test_val, foo), 4),
4478 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4479 BPF_MOV64_IMM(BPF_REG_2,
4480 sizeof(struct test_val) -
4481 offsetof(struct test_val, foo) + 1),
4482 BPF_MOV64_IMM(BPF_REG_3, 0),
4483 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4484 BPF_EXIT_INSN(),
4485 },
4486 .fixup_map2 = { 3 },
4487 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4488 .result = REJECT,
4489 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4490 },
4491 {
4492 "map element value is preserved across register spilling",
4493 .insns = {
4494 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4496 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4497 BPF_LD_MAP_FD(BPF_REG_1, 0),
4498 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4500 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4501 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4502 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4503 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4504 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4505 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4506 BPF_EXIT_INSN(),
4507 },
4508 .fixup_map2 = { 3 },
4509 .errstr_unpriv = "R0 leaks addr",
4510 .result = ACCEPT,
4511 .result_unpriv = REJECT,
4512 },
4513 {
4514 "map element value or null is marked on register spilling",
4515 .insns = {
4516 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4518 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4519 BPF_LD_MAP_FD(BPF_REG_1, 0),
4520 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4521 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4523 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4524 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4525 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4526 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4527 BPF_EXIT_INSN(),
4528 },
4529 .fixup_map2 = { 3 },
4530 .errstr_unpriv = "R0 leaks addr",
4531 .result = ACCEPT,
4532 .result_unpriv = REJECT,
4533 },
4534 {
4535 "map element value store of cleared call register",
4536 .insns = {
4537 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4539 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4540 BPF_LD_MAP_FD(BPF_REG_1, 0),
4541 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4542 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4543 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
4544 BPF_EXIT_INSN(),
4545 },
4546 .fixup_map2 = { 3 },
4547 .errstr_unpriv = "R1 !read_ok",
4548 .errstr = "R1 !read_ok",
4549 .result = REJECT,
4550 .result_unpriv = REJECT,
4551 },
4552 {
4553 "map element value with unaligned store",
4554 .insns = {
4555 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4557 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4558 BPF_LD_MAP_FD(BPF_REG_1, 0),
4559 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
4561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4562 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4563 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
4564 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
4565 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4566 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
4567 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
4568 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
4569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
4570 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
4571 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
4572 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
4573 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
4574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
4575 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
4576 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
4577 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
4578 BPF_EXIT_INSN(),
4579 },
4580 .fixup_map2 = { 3 },
4581 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4582 .result = ACCEPT,
4583 .result_unpriv = REJECT,
4584 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4585 },
4586 {
4587 "map element value with unaligned load",
4588 .insns = {
4589 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4591 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4592 BPF_LD_MAP_FD(BPF_REG_1, 0),
4593 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4594 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4595 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4596 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
4597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4598 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4599 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
4600 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4601 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
4602 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
4603 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
4604 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4605 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
4606 BPF_EXIT_INSN(),
4607 },
4608 .fixup_map2 = { 3 },
4609 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4610 .result = ACCEPT,
4611 .result_unpriv = REJECT,
4612 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4613 },
4614 {
4615 "map element value illegal alu op, 1",
4616 .insns = {
4617 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4619 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4620 BPF_LD_MAP_FD(BPF_REG_1, 0),
4621 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4622 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4623 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
4624 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4625 BPF_EXIT_INSN(),
4626 },
4627 .fixup_map2 = { 3 },
4628 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4629 .errstr = "invalid mem access 'inv'",
4630 .result = REJECT,
4631 .result_unpriv = REJECT,
4632 },
4633 {
4634 "map element value illegal alu op, 2",
4635 .insns = {
4636 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4638 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4639 BPF_LD_MAP_FD(BPF_REG_1, 0),
4640 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4641 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4642 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
4643 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4644 BPF_EXIT_INSN(),
4645 },
4646 .fixup_map2 = { 3 },
4647 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4648 .errstr = "invalid mem access 'inv'",
4649 .result = REJECT,
4650 .result_unpriv = REJECT,
4651 },
4652 {
4653 "map element value illegal alu op, 3",
4654 .insns = {
4655 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4656 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4657 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4658 BPF_LD_MAP_FD(BPF_REG_1, 0),
4659 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4660 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4661 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
4662 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4663 BPF_EXIT_INSN(),
4664 },
4665 .fixup_map2 = { 3 },
4666 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4667 .errstr = "invalid mem access 'inv'",
4668 .result = REJECT,
4669 .result_unpriv = REJECT,
4670 },
4671 {
4672 "map element value illegal alu op, 4",
4673 .insns = {
4674 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4676 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4677 BPF_LD_MAP_FD(BPF_REG_1, 0),
4678 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4680 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
4681 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4682 BPF_EXIT_INSN(),
4683 },
4684 .fixup_map2 = { 3 },
4685 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4686 .errstr = "invalid mem access 'inv'",
4687 .result = REJECT,
4688 .result_unpriv = REJECT,
4689 },
4690 {
4691 "map element value illegal alu op, 5",
4692 .insns = {
4693 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4695 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4696 BPF_LD_MAP_FD(BPF_REG_1, 0),
4697 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4698 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4699 BPF_MOV64_IMM(BPF_REG_3, 4096),
4700 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4702 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
4703 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
4704 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
4705 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4706 BPF_EXIT_INSN(),
4707 },
4708 .fixup_map2 = { 3 },
4709 .errstr_unpriv = "R0 invalid mem access 'inv'",
4710 .errstr = "R0 invalid mem access 'inv'",
4711 .result = REJECT,
4712 .result_unpriv = REJECT,
4713 },
4714 {
4715 "map element value is preserved across register spilling",
4716 .insns = {
4717 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4719 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4720 BPF_LD_MAP_FD(BPF_REG_1, 0),
4721 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4722 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4723 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
4724 offsetof(struct test_val, foo)),
4725 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4726 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4728 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4729 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4730 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4731 BPF_EXIT_INSN(),
4732 },
4733 .fixup_map2 = { 3 },
4734 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4735 .result = ACCEPT,
4736 .result_unpriv = REJECT,
4737 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4738 },
4739 {
4740 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
4741 .insns = {
4742 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4744 BPF_MOV64_IMM(BPF_REG_0, 0),
4745 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4746 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4747 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4748 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4749 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4750 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4751 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4752 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4753 BPF_MOV64_IMM(BPF_REG_2, 16),
4754 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4755 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4756 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4757 BPF_MOV64_IMM(BPF_REG_4, 0),
4758 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4759 BPF_MOV64_IMM(BPF_REG_3, 0),
4760 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4761 BPF_MOV64_IMM(BPF_REG_0, 0),
4762 BPF_EXIT_INSN(),
4763 },
4764 .result = ACCEPT,
4765 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4766 },
4767 {
4768 "helper access to variable memory: stack, bitwise AND, zero included",
4769 .insns = {
4770 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4772 BPF_MOV64_IMM(BPF_REG_2, 16),
4773 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4774 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4775 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4776 BPF_MOV64_IMM(BPF_REG_3, 0),
4777 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4778 BPF_EXIT_INSN(),
4779 },
4780 .errstr = "invalid stack type R1 off=-64 access_size=0",
4781 .result = REJECT,
4782 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4783 },
4784 {
4785 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
4786 .insns = {
4787 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4788 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4789 BPF_MOV64_IMM(BPF_REG_2, 16),
4790 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4791 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4792 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
4793 BPF_MOV64_IMM(BPF_REG_4, 0),
4794 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4795 BPF_MOV64_IMM(BPF_REG_3, 0),
4796 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4797 BPF_MOV64_IMM(BPF_REG_0, 0),
4798 BPF_EXIT_INSN(),
4799 },
4800 .errstr = "invalid stack type R1 off=-64 access_size=65",
4801 .result = REJECT,
4802 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4803 },
4804 {
4805 "helper access to variable memory: stack, JMP, correct bounds",
4806 .insns = {
4807 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4808 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4809 BPF_MOV64_IMM(BPF_REG_0, 0),
4810 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4811 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4812 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4813 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4814 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4815 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4816 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4817 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4818 BPF_MOV64_IMM(BPF_REG_2, 16),
4819 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4820 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4821 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
4822 BPF_MOV64_IMM(BPF_REG_4, 0),
4823 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4824 BPF_MOV64_IMM(BPF_REG_3, 0),
4825 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4826 BPF_MOV64_IMM(BPF_REG_0, 0),
4827 BPF_EXIT_INSN(),
4828 },
4829 .result = ACCEPT,
4830 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4831 },
4832 {
4833 "helper access to variable memory: stack, JMP (signed), correct bounds",
4834 .insns = {
4835 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4836 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4837 BPF_MOV64_IMM(BPF_REG_0, 0),
4838 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4839 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4840 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4841 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4842 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4843 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4844 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4845 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4846 BPF_MOV64_IMM(BPF_REG_2, 16),
4847 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4848 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4849 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
4850 BPF_MOV64_IMM(BPF_REG_4, 0),
4851 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
4852 BPF_MOV64_IMM(BPF_REG_3, 0),
4853 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4854 BPF_MOV64_IMM(BPF_REG_0, 0),
4855 BPF_EXIT_INSN(),
4856 },
4857 .result = ACCEPT,
4858 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4859 },
4860 {
4861 "helper access to variable memory: stack, JMP, bounds + offset",
4862 .insns = {
4863 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4865 BPF_MOV64_IMM(BPF_REG_2, 16),
4866 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4867 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4868 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
4869 BPF_MOV64_IMM(BPF_REG_4, 0),
4870 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
4871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4872 BPF_MOV64_IMM(BPF_REG_3, 0),
4873 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4874 BPF_MOV64_IMM(BPF_REG_0, 0),
4875 BPF_EXIT_INSN(),
4876 },
4877 .errstr = "invalid stack type R1 off=-64 access_size=65",
4878 .result = REJECT,
4879 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4880 },
4881 {
4882 "helper access to variable memory: stack, JMP, wrong max",
4883 .insns = {
4884 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4885 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4886 BPF_MOV64_IMM(BPF_REG_2, 16),
4887 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4888 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4889 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
4890 BPF_MOV64_IMM(BPF_REG_4, 0),
4891 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4892 BPF_MOV64_IMM(BPF_REG_3, 0),
4893 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4894 BPF_MOV64_IMM(BPF_REG_0, 0),
4895 BPF_EXIT_INSN(),
4896 },
4897 .errstr = "invalid stack type R1 off=-64 access_size=65",
4898 .result = REJECT,
4899 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4900 },
4901 {
4902 "helper access to variable memory: stack, JMP, no max check",
4903 .insns = {
4904 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4905 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4906 BPF_MOV64_IMM(BPF_REG_2, 16),
4907 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4908 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4909 BPF_MOV64_IMM(BPF_REG_4, 0),
4910 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4911 BPF_MOV64_IMM(BPF_REG_3, 0),
4912 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4913 BPF_MOV64_IMM(BPF_REG_0, 0),
4914 BPF_EXIT_INSN(),
4915 },
4916 .errstr = "R2 unbounded memory access",
4917 .result = REJECT,
4918 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4919 },
4920 {
4921 "helper access to variable memory: stack, JMP, no min check",
4922 .insns = {
4923 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4925 BPF_MOV64_IMM(BPF_REG_2, 16),
4926 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4927 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4928 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
4929 BPF_MOV64_IMM(BPF_REG_3, 0),
4930 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4931 BPF_MOV64_IMM(BPF_REG_0, 0),
4932 BPF_EXIT_INSN(),
4933 },
4934 .errstr = "invalid stack type R1 off=-64 access_size=0",
4935 .result = REJECT,
4936 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4937 },
4938 {
4939 "helper access to variable memory: stack, JMP (signed), no min check",
4940 .insns = {
4941 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4943 BPF_MOV64_IMM(BPF_REG_2, 16),
4944 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4945 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4946 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
4947 BPF_MOV64_IMM(BPF_REG_3, 0),
4948 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4949 BPF_MOV64_IMM(BPF_REG_0, 0),
4950 BPF_EXIT_INSN(),
4951 },
4952 .errstr = "R2 min value is negative",
4953 .result = REJECT,
4954 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4955 },
4956 {
4957 "helper access to variable memory: map, JMP, correct bounds",
4958 .insns = {
4959 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4961 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4962 BPF_LD_MAP_FD(BPF_REG_1, 0),
4963 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4964 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4965 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4966 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4967 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4968 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4969 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4970 sizeof(struct test_val), 4),
4971 BPF_MOV64_IMM(BPF_REG_4, 0),
4972 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4973 BPF_MOV64_IMM(BPF_REG_3, 0),
4974 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4975 BPF_MOV64_IMM(BPF_REG_0, 0),
4976 BPF_EXIT_INSN(),
4977 },
4978 .fixup_map2 = { 3 },
4979 .result = ACCEPT,
4980 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4981 },
4982 {
4983 "helper access to variable memory: map, JMP, wrong max",
4984 .insns = {
4985 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4987 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4988 BPF_LD_MAP_FD(BPF_REG_1, 0),
4989 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4990 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4991 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4992 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4993 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4994 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4995 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4996 sizeof(struct test_val) + 1, 4),
4997 BPF_MOV64_IMM(BPF_REG_4, 0),
4998 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4999 BPF_MOV64_IMM(BPF_REG_3, 0),
5000 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5001 BPF_MOV64_IMM(BPF_REG_0, 0),
5002 BPF_EXIT_INSN(),
5003 },
5004 .fixup_map2 = { 3 },
5005 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5006 .result = REJECT,
5007 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5008 },
5009 {
5010 "helper access to variable memory: map adjusted, JMP, correct bounds",
5011 .insns = {
5012 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5014 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5015 BPF_LD_MAP_FD(BPF_REG_1, 0),
5016 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5017 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5018 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5019 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5020 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5021 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5022 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5023 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5024 sizeof(struct test_val) - 20, 4),
5025 BPF_MOV64_IMM(BPF_REG_4, 0),
5026 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5027 BPF_MOV64_IMM(BPF_REG_3, 0),
5028 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5029 BPF_MOV64_IMM(BPF_REG_0, 0),
5030 BPF_EXIT_INSN(),
5031 },
5032 .fixup_map2 = { 3 },
5033 .result = ACCEPT,
5034 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5035 },
5036 {
5037 "helper access to variable memory: map adjusted, JMP, wrong max",
5038 .insns = {
5039 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5041 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5042 BPF_LD_MAP_FD(BPF_REG_1, 0),
5043 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5044 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5045 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5046 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5047 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5048 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5049 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5050 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5051 sizeof(struct test_val) - 19, 4),
5052 BPF_MOV64_IMM(BPF_REG_4, 0),
5053 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5054 BPF_MOV64_IMM(BPF_REG_3, 0),
5055 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5056 BPF_MOV64_IMM(BPF_REG_0, 0),
5057 BPF_EXIT_INSN(),
5058 },
5059 .fixup_map2 = { 3 },
5060 .errstr = "R1 min value is outside of the array range",
5061 .result = REJECT,
5062 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5063 },
5064 {
5065 "helper access to variable memory: size > 0 not allowed on NULL",
5066 .insns = {
5067 BPF_MOV64_IMM(BPF_REG_1, 0),
5068 BPF_MOV64_IMM(BPF_REG_2, 0),
5069 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5070 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5071 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5072 BPF_MOV64_IMM(BPF_REG_3, 0),
5073 BPF_MOV64_IMM(BPF_REG_4, 0),
5074 BPF_MOV64_IMM(BPF_REG_5, 0),
5075 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5076 BPF_EXIT_INSN(),
5077 },
5078 .errstr = "R1 type=imm expected=fp",
5079 .result = REJECT,
5080 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5081 },
5082 {
5083 "helper access to variable memory: size = 0 not allowed on != NULL",
5084 .insns = {
5085 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5086 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5087 BPF_MOV64_IMM(BPF_REG_2, 0),
5088 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5089 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
5090 BPF_MOV64_IMM(BPF_REG_3, 0),
5091 BPF_MOV64_IMM(BPF_REG_4, 0),
5092 BPF_MOV64_IMM(BPF_REG_5, 0),
5093 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5094 BPF_EXIT_INSN(),
5095 },
5096 .errstr = "invalid stack type R1 off=-8 access_size=0",
5097 .result = REJECT,
5098 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5099 },
5100 {
5101 "helper access to variable memory: 8 bytes leak",
5102 .insns = {
5103 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5105 BPF_MOV64_IMM(BPF_REG_0, 0),
5106 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5107 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5108 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5109 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5110 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5111 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5112 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5113 BPF_MOV64_IMM(BPF_REG_2, 0),
5114 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5115 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5116 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
5117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5118 BPF_MOV64_IMM(BPF_REG_3, 0),
5119 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5120 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5121 BPF_EXIT_INSN(),
5122 },
5123 .errstr = "invalid indirect read from stack off -64+32 size 64",
5124 .result = REJECT,
5125 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5126 },
5127 {
5128 "helper access to variable memory: 8 bytes no leak (init memory)",
5129 .insns = {
5130 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5131 BPF_MOV64_IMM(BPF_REG_0, 0),
5132 BPF_MOV64_IMM(BPF_REG_0, 0),
5133 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5134 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5135 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5136 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5137 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5138 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5139 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5140 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5141 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5142 BPF_MOV64_IMM(BPF_REG_2, 0),
5143 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
5144 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
5145 BPF_MOV64_IMM(BPF_REG_3, 0),
5146 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5147 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5148 BPF_EXIT_INSN(),
5149 },
5150 .result = ACCEPT,
5151 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5152 },
5153 {
5154 "invalid and of negative number",
5155 .insns = {
5156 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5157 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5158 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5159 BPF_LD_MAP_FD(BPF_REG_1, 0),
5160 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5161 BPF_FUNC_map_lookup_elem),
5162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5163 BPF_MOV64_IMM(BPF_REG_1, 6),
5164 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
5165 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5166 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5167 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5168 offsetof(struct test_val, foo)),
5169 BPF_EXIT_INSN(),
5170 },
5171 .fixup_map2 = { 3 },
5172 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5173 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
5174 .result = REJECT,
5175 .result_unpriv = REJECT,
5176 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5177 },
5178 {
5179 "invalid range check",
5180 .insns = {
5181 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5182 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5183 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5184 BPF_LD_MAP_FD(BPF_REG_1, 0),
5185 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5186 BPF_FUNC_map_lookup_elem),
5187 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
5188 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5189 BPF_MOV64_IMM(BPF_REG_9, 1),
5190 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
5191 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
5192 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
5193 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
5194 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
5195 BPF_MOV32_IMM(BPF_REG_3, 1),
5196 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
5197 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
5198 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
5199 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
5200 BPF_MOV64_REG(BPF_REG_0, 0),
5201 BPF_EXIT_INSN(),
5202 },
5203 .fixup_map2 = { 3 },
5204 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5205 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
5206 .result = REJECT,
5207 .result_unpriv = REJECT,
5208 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5209 },
5210 {
5211 "map in map access",
5212 .insns = {
5213 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5214 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5216 BPF_LD_MAP_FD(BPF_REG_1, 0),
5217 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5218 BPF_FUNC_map_lookup_elem),
5219 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5220 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5221 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5223 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5224 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5225 BPF_FUNC_map_lookup_elem),
5226 BPF_MOV64_REG(BPF_REG_0, 0),
5227 BPF_EXIT_INSN(),
5228 },
5229 .fixup_map_in_map = { 3 },
5230 .result = ACCEPT,
5231 },
5232 {
5233 "invalid inner map pointer",
5234 .insns = {
5235 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5236 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5238 BPF_LD_MAP_FD(BPF_REG_1, 0),
5239 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5240 BPF_FUNC_map_lookup_elem),
5241 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5242 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5243 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5245 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
5247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5248 BPF_FUNC_map_lookup_elem),
5249 BPF_MOV64_REG(BPF_REG_0, 0),
5250 BPF_EXIT_INSN(),
5251 },
5252 .fixup_map_in_map = { 3 },
5253 .errstr = "R1 type=inv expected=map_ptr",
5254 .errstr_unpriv = "R1 pointer arithmetic prohibited",
5255 .result = REJECT,
5256 },
5257 {
5258 "forgot null checking on the inner map pointer",
5259 .insns = {
5260 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5261 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5262 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5263 BPF_LD_MAP_FD(BPF_REG_1, 0),
5264 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5265 BPF_FUNC_map_lookup_elem),
5266 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5267 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5268 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5269 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5271 BPF_FUNC_map_lookup_elem),
5272 BPF_MOV64_REG(BPF_REG_0, 0),
5273 BPF_EXIT_INSN(),
5274 },
5275 .fixup_map_in_map = { 3 },
5276 .errstr = "R1 type=map_value_or_null expected=map_ptr",
5277 .result = REJECT,
5278 },
5279 {
5280 "ld_abs: check calling conv, r1",
5281 .insns = {
5282 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5283 BPF_MOV64_IMM(BPF_REG_1, 0),
5284 BPF_LD_ABS(BPF_W, -0x200000),
5285 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5286 BPF_EXIT_INSN(),
5287 },
5288 .errstr = "R1 !read_ok",
5289 .result = REJECT,
5290 },
5291 {
5292 "ld_abs: check calling conv, r2",
5293 .insns = {
5294 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5295 BPF_MOV64_IMM(BPF_REG_2, 0),
5296 BPF_LD_ABS(BPF_W, -0x200000),
5297 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5298 BPF_EXIT_INSN(),
5299 },
5300 .errstr = "R2 !read_ok",
5301 .result = REJECT,
5302 },
5303 {
5304 "ld_abs: check calling conv, r3",
5305 .insns = {
5306 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5307 BPF_MOV64_IMM(BPF_REG_3, 0),
5308 BPF_LD_ABS(BPF_W, -0x200000),
5309 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5310 BPF_EXIT_INSN(),
5311 },
5312 .errstr = "R3 !read_ok",
5313 .result = REJECT,
5314 },
5315 {
5316 "ld_abs: check calling conv, r4",
5317 .insns = {
5318 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5319 BPF_MOV64_IMM(BPF_REG_4, 0),
5320 BPF_LD_ABS(BPF_W, -0x200000),
5321 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5322 BPF_EXIT_INSN(),
5323 },
5324 .errstr = "R4 !read_ok",
5325 .result = REJECT,
5326 },
5327 {
5328 "ld_abs: check calling conv, r5",
5329 .insns = {
5330 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5331 BPF_MOV64_IMM(BPF_REG_5, 0),
5332 BPF_LD_ABS(BPF_W, -0x200000),
5333 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5334 BPF_EXIT_INSN(),
5335 },
5336 .errstr = "R5 !read_ok",
5337 .result = REJECT,
5338 },
5339 {
5340 "ld_abs: check calling conv, r7",
5341 .insns = {
5342 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5343 BPF_MOV64_IMM(BPF_REG_7, 0),
5344 BPF_LD_ABS(BPF_W, -0x200000),
5345 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5346 BPF_EXIT_INSN(),
5347 },
5348 .result = ACCEPT,
5349 },
5350 {
5351 "ld_ind: check calling conv, r1",
5352 .insns = {
5353 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5354 BPF_MOV64_IMM(BPF_REG_1, 1),
5355 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
5356 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5357 BPF_EXIT_INSN(),
5358 },
5359 .errstr = "R1 !read_ok",
5360 .result = REJECT,
5361 },
5362 {
5363 "ld_ind: check calling conv, r2",
5364 .insns = {
5365 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5366 BPF_MOV64_IMM(BPF_REG_2, 1),
5367 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
5368 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5369 BPF_EXIT_INSN(),
5370 },
5371 .errstr = "R2 !read_ok",
5372 .result = REJECT,
5373 },
5374 {
5375 "ld_ind: check calling conv, r3",
5376 .insns = {
5377 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5378 BPF_MOV64_IMM(BPF_REG_3, 1),
5379 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
5380 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5381 BPF_EXIT_INSN(),
5382 },
5383 .errstr = "R3 !read_ok",
5384 .result = REJECT,
5385 },
5386 {
5387 "ld_ind: check calling conv, r4",
5388 .insns = {
5389 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5390 BPF_MOV64_IMM(BPF_REG_4, 1),
5391 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
5392 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5393 BPF_EXIT_INSN(),
5394 },
5395 .errstr = "R4 !read_ok",
5396 .result = REJECT,
5397 },
5398 {
5399 "ld_ind: check calling conv, r5",
5400 .insns = {
5401 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5402 BPF_MOV64_IMM(BPF_REG_5, 1),
5403 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
5404 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5405 BPF_EXIT_INSN(),
5406 },
5407 .errstr = "R5 !read_ok",
5408 .result = REJECT,
5409 },
5410 {
5411 "ld_ind: check calling conv, r7",
5412 .insns = {
5413 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5414 BPF_MOV64_IMM(BPF_REG_7, 1),
5415 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
5416 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5417 BPF_EXIT_INSN(),
5418 },
5419 .result = ACCEPT,
5420 },
5421 {
5422 "check bpf_perf_event_data->sample_period byte load permitted",
5423 .insns = {
5424 BPF_MOV64_IMM(BPF_REG_0, 0),
5425 #ifdef __LITTLE_ENDIAN
5426 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
5427 offsetof(struct bpf_perf_event_data, sample_period)),
5428 #else
5429 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
5430 offsetof(struct bpf_perf_event_data, sample_period) + 7),
5431 #endif
5432 BPF_EXIT_INSN(),
5433 },
5434 .result = ACCEPT,
5435 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5436 },
5437 {
5438 "check bpf_perf_event_data->sample_period half load permitted",
5439 .insns = {
5440 BPF_MOV64_IMM(BPF_REG_0, 0),
5441 #ifdef __LITTLE_ENDIAN
5442 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5443 offsetof(struct bpf_perf_event_data, sample_period)),
5444 #else
5445 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5446 offsetof(struct bpf_perf_event_data, sample_period) + 6),
5447 #endif
5448 BPF_EXIT_INSN(),
5449 },
5450 .result = ACCEPT,
5451 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5452 },
5453 {
5454 "check bpf_perf_event_data->sample_period word load permitted",
5455 .insns = {
5456 BPF_MOV64_IMM(BPF_REG_0, 0),
5457 #ifdef __LITTLE_ENDIAN
5458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5459 offsetof(struct bpf_perf_event_data, sample_period)),
5460 #else
5461 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5462 offsetof(struct bpf_perf_event_data, sample_period) + 4),
5463 #endif
5464 BPF_EXIT_INSN(),
5465 },
5466 .result = ACCEPT,
5467 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5468 },
5469 {
5470 "check bpf_perf_event_data->sample_period dword load permitted",
5471 .insns = {
5472 BPF_MOV64_IMM(BPF_REG_0, 0),
5473 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5474 offsetof(struct bpf_perf_event_data, sample_period)),
5475 BPF_EXIT_INSN(),
5476 },
5477 .result = ACCEPT,
5478 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5479 },
5480 {
5481 "check skb->data half load not permitted",
5482 .insns = {
5483 BPF_MOV64_IMM(BPF_REG_0, 0),
5484 #ifdef __LITTLE_ENDIAN
5485 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5486 offsetof(struct __sk_buff, data)),
5487 #else
5488 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5489 offsetof(struct __sk_buff, data) + 2),
5490 #endif
5491 BPF_EXIT_INSN(),
5492 },
5493 .result = REJECT,
5494 .errstr = "invalid bpf_context access",
5495 },
5496 {
5497 "check skb->tc_classid half load not permitted for lwt prog",
5498 .insns = {
5499 BPF_MOV64_IMM(BPF_REG_0, 0),
5500 #ifdef __LITTLE_ENDIAN
5501 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5502 offsetof(struct __sk_buff, tc_classid)),
5503 #else
5504 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5505 offsetof(struct __sk_buff, tc_classid) + 2),
5506 #endif
5507 BPF_EXIT_INSN(),
5508 },
5509 .result = REJECT,
5510 .errstr = "invalid bpf_context access",
5511 .prog_type = BPF_PROG_TYPE_LWT_IN,
5512 },
5513 };
5514
5515 static int probe_filter_length(const struct bpf_insn *fp)
5516 {
5517 int len;
5518
5519 for (len = MAX_INSNS - 1; len > 0; --len)
5520 if (fp[len].code != 0 || fp[len].imm != 0)
5521 break;
5522 return len + 1;
5523 }
5524
5525 static int create_map(uint32_t size_value, uint32_t max_elem)
5526 {
5527 int fd;
5528
5529 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
5530 size_value, max_elem, BPF_F_NO_PREALLOC);
5531 if (fd < 0)
5532 printf("Failed to create hash map '%s'!\n", strerror(errno));
5533
5534 return fd;
5535 }
5536
5537 static int create_prog_array(void)
5538 {
5539 int fd;
5540
5541 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
5542 sizeof(int), 4, 0);
5543 if (fd < 0)
5544 printf("Failed to create prog array '%s'!\n", strerror(errno));
5545
5546 return fd;
5547 }
5548
5549 static int create_map_in_map(void)
5550 {
5551 int inner_map_fd, outer_map_fd;
5552
5553 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
5554 sizeof(int), 1, 0);
5555 if (inner_map_fd < 0) {
5556 printf("Failed to create array '%s'!\n", strerror(errno));
5557 return inner_map_fd;
5558 }
5559
5560 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
5561 sizeof(int), inner_map_fd, 1, 0);
5562 if (outer_map_fd < 0)
5563 printf("Failed to create array of maps '%s'!\n",
5564 strerror(errno));
5565
5566 close(inner_map_fd);
5567
5568 return outer_map_fd;
5569 }
5570
5571 static char bpf_vlog[32768];
5572
5573 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
5574 int *map_fds)
5575 {
5576 int *fixup_map1 = test->fixup_map1;
5577 int *fixup_map2 = test->fixup_map2;
5578 int *fixup_prog = test->fixup_prog;
5579 int *fixup_map_in_map = test->fixup_map_in_map;
5580
5581 /* Allocating HTs with 1 elem is fine here, since we only test
5582 * for verifier and not do a runtime lookup, so the only thing
5583 * that really matters is value size in this case.
5584 */
5585 if (*fixup_map1) {
5586 map_fds[0] = create_map(sizeof(long long), 1);
5587 do {
5588 prog[*fixup_map1].imm = map_fds[0];
5589 fixup_map1++;
5590 } while (*fixup_map1);
5591 }
5592
5593 if (*fixup_map2) {
5594 map_fds[1] = create_map(sizeof(struct test_val), 1);
5595 do {
5596 prog[*fixup_map2].imm = map_fds[1];
5597 fixup_map2++;
5598 } while (*fixup_map2);
5599 }
5600
5601 if (*fixup_prog) {
5602 map_fds[2] = create_prog_array();
5603 do {
5604 prog[*fixup_prog].imm = map_fds[2];
5605 fixup_prog++;
5606 } while (*fixup_prog);
5607 }
5608
5609 if (*fixup_map_in_map) {
5610 map_fds[3] = create_map_in_map();
5611 do {
5612 prog[*fixup_map_in_map].imm = map_fds[3];
5613 fixup_map_in_map++;
5614 } while (*fixup_map_in_map);
5615 }
5616 }
5617
5618 static void do_test_single(struct bpf_test *test, bool unpriv,
5619 int *passes, int *errors)
5620 {
5621 int fd_prog, expected_ret, reject_from_alignment;
5622 struct bpf_insn *prog = test->insns;
5623 int prog_len = probe_filter_length(prog);
5624 int prog_type = test->prog_type;
5625 int map_fds[MAX_NR_MAPS];
5626 const char *expected_err;
5627 int i;
5628
5629 for (i = 0; i < MAX_NR_MAPS; i++)
5630 map_fds[i] = -1;
5631
5632 do_test_fixup(test, prog, map_fds);
5633
5634 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
5635 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
5636 "GPL", 0, bpf_vlog, sizeof(bpf_vlog));
5637
5638 expected_ret = unpriv && test->result_unpriv != UNDEF ?
5639 test->result_unpriv : test->result;
5640 expected_err = unpriv && test->errstr_unpriv ?
5641 test->errstr_unpriv : test->errstr;
5642
5643 reject_from_alignment = fd_prog < 0 &&
5644 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
5645 strstr(bpf_vlog, "Unknown alignment.");
5646 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
5647 if (reject_from_alignment) {
5648 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
5649 strerror(errno));
5650 goto fail_log;
5651 }
5652 #endif
5653 if (expected_ret == ACCEPT) {
5654 if (fd_prog < 0 && !reject_from_alignment) {
5655 printf("FAIL\nFailed to load prog '%s'!\n",
5656 strerror(errno));
5657 goto fail_log;
5658 }
5659 } else {
5660 if (fd_prog >= 0) {
5661 printf("FAIL\nUnexpected success to load!\n");
5662 goto fail_log;
5663 }
5664 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
5665 printf("FAIL\nUnexpected error message!\n");
5666 goto fail_log;
5667 }
5668 }
5669
5670 (*passes)++;
5671 printf("OK%s\n", reject_from_alignment ?
5672 " (NOTE: reject due to unknown alignment)" : "");
5673 close_fds:
5674 close(fd_prog);
5675 for (i = 0; i < MAX_NR_MAPS; i++)
5676 close(map_fds[i]);
5677 sched_yield();
5678 return;
5679 fail_log:
5680 (*errors)++;
5681 printf("%s", bpf_vlog);
5682 goto close_fds;
5683 }
5684
5685 static bool is_admin(void)
5686 {
5687 cap_t caps;
5688 cap_flag_value_t sysadmin = CAP_CLEAR;
5689 const cap_value_t cap_val = CAP_SYS_ADMIN;
5690
5691 #ifdef CAP_IS_SUPPORTED
5692 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
5693 perror("cap_get_flag");
5694 return false;
5695 }
5696 #endif
5697 caps = cap_get_proc();
5698 if (!caps) {
5699 perror("cap_get_proc");
5700 return false;
5701 }
5702 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
5703 perror("cap_get_flag");
5704 if (cap_free(caps))
5705 perror("cap_free");
5706 return (sysadmin == CAP_SET);
5707 }
5708
5709 static int set_admin(bool admin)
5710 {
5711 cap_t caps;
5712 const cap_value_t cap_val = CAP_SYS_ADMIN;
5713 int ret = -1;
5714
5715 caps = cap_get_proc();
5716 if (!caps) {
5717 perror("cap_get_proc");
5718 return -1;
5719 }
5720 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
5721 admin ? CAP_SET : CAP_CLEAR)) {
5722 perror("cap_set_flag");
5723 goto out;
5724 }
5725 if (cap_set_proc(caps)) {
5726 perror("cap_set_proc");
5727 goto out;
5728 }
5729 ret = 0;
5730 out:
5731 if (cap_free(caps))
5732 perror("cap_free");
5733 return ret;
5734 }
5735
5736 static int do_test(bool unpriv, unsigned int from, unsigned int to)
5737 {
5738 int i, passes = 0, errors = 0;
5739
5740 for (i = from; i < to; i++) {
5741 struct bpf_test *test = &tests[i];
5742
5743 /* Program types that are not supported by non-root we
5744 * skip right away.
5745 */
5746 if (!test->prog_type) {
5747 if (!unpriv)
5748 set_admin(false);
5749 printf("#%d/u %s ", i, test->descr);
5750 do_test_single(test, true, &passes, &errors);
5751 if (!unpriv)
5752 set_admin(true);
5753 }
5754
5755 if (!unpriv) {
5756 printf("#%d/p %s ", i, test->descr);
5757 do_test_single(test, false, &passes, &errors);
5758 }
5759 }
5760
5761 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
5762 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
5763 }
5764
5765 int main(int argc, char **argv)
5766 {
5767 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
5768 struct rlimit rlim = { 1 << 20, 1 << 20 };
5769 unsigned int from = 0, to = ARRAY_SIZE(tests);
5770 bool unpriv = !is_admin();
5771
5772 if (argc == 3) {
5773 unsigned int l = atoi(argv[argc - 2]);
5774 unsigned int u = atoi(argv[argc - 1]);
5775
5776 if (l < to && u < to) {
5777 from = l;
5778 to = u + 1;
5779 }
5780 } else if (argc == 2) {
5781 unsigned int t = atoi(argv[argc - 1]);
5782
5783 if (t < to) {
5784 from = t;
5785 to = t + 1;
5786 }
5787 }
5788
5789 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
5790 return do_test(unpriv, from, to);
5791 }