1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/bpf.h>
16 #include <linux/filter.h>
17 #include <net/netlink.h>
18 #include <linux/file.h>
19 #include <linux/vmalloc.h>
21 /* bpf_check() is a static code analyzer that walks eBPF program
22 * instruction by instruction and updates register/stack state.
23 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
25 * The first pass is depth-first-search to check that the program is a DAG.
26 * It rejects the following programs:
27 * - larger than BPF_MAXINSNS insns
28 * - if loop is present (detected via back-edge)
29 * - unreachable insns exist (shouldn't be a forest. program = one function)
30 * - out of bounds or malformed jumps
31 * The second pass is all possible path descent from the 1st insn.
32 * Since it's analyzing all pathes through the program, the length of the
33 * analysis is limited to 32k insn, which may be hit even if total number of
34 * insn is less then 4K, but there are too many branches that change stack/regs.
35 * Number of 'branches to be analyzed' is limited to 1k
37 * On entry to each instruction, each register has a type, and the instruction
38 * changes the types of the registers depending on instruction semantics.
39 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
42 * All registers are 64-bit.
43 * R0 - return register
44 * R1-R5 argument passing registers
45 * R6-R9 callee saved registers
46 * R10 - frame pointer read-only
48 * At the start of BPF program the register R1 contains a pointer to bpf_context
49 * and has type PTR_TO_CTX.
51 * Verifier tracks arithmetic operations on pointers in case:
52 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
53 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
54 * 1st insn copies R10 (which has FRAME_PTR) type into R1
55 * and 2nd arithmetic instruction is pattern matched to recognize
56 * that it wants to construct a pointer to some element within stack.
57 * So after 2nd insn, the register R1 has type PTR_TO_STACK
58 * (and -20 constant is saved for further stack bounds checking).
59 * Meaning that this reg is a pointer to stack plus known immediate constant.
61 * Most of the time the registers have UNKNOWN_VALUE type, which
62 * means the register has some value, but it's not a valid pointer.
63 * (like pointer plus pointer becomes UNKNOWN_VALUE type)
65 * When verifier sees load or store instructions the type of base register
66 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer
67 * types recognized by check_mem_access() function.
69 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
70 * and the range of [ptr, ptr + map's value_size) is accessible.
72 * registers used to pass values to function calls are checked against
73 * function argument constraints.
75 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
76 * It means that the register type passed to this function must be
77 * PTR_TO_STACK and it will be used inside the function as
78 * 'pointer to map element key'
80 * For example the argument constraints for bpf_map_lookup_elem():
81 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
82 * .arg1_type = ARG_CONST_MAP_PTR,
83 * .arg2_type = ARG_PTR_TO_MAP_KEY,
85 * ret_type says that this function returns 'pointer to map elem value or null'
86 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
87 * 2nd argument should be a pointer to stack, which will be used inside
88 * the helper function as a pointer to map element key.
90 * On the kernel side the helper function looks like:
91 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
93 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
94 * void *key = (void *) (unsigned long) r2;
97 * here kernel can access 'key' and 'map' pointers safely, knowing that
98 * [key, key + map->key_size) bytes are valid and were initialized on
99 * the stack of eBPF program.
102 * Corresponding eBPF program may look like:
103 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
104 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
105 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
106 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
107 * here verifier looks at prototype of map_lookup_elem() and sees:
108 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
109 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
111 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
112 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
113 * and were initialized prior to this call.
114 * If it's ok, then verifier allows this BPF_CALL insn and looks at
115 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
116 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
117 * returns ether pointer to map value or NULL.
119 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
120 * insn, the register holding that pointer in the true branch changes state to
121 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
122 * branch. See check_cond_jmp_op().
124 * After the call R0 is set to return type of the function and registers R1-R5
125 * are set to NOT_INIT to indicate that they are no longer readable.
128 /* types of values stored in eBPF registers */
130 NOT_INIT
= 0, /* nothing was written into register */
131 UNKNOWN_VALUE
, /* reg doesn't contain a valid pointer */
132 PTR_TO_CTX
, /* reg points to bpf_context */
133 CONST_PTR_TO_MAP
, /* reg points to struct bpf_map */
134 PTR_TO_MAP_VALUE
, /* reg points to map element value */
135 PTR_TO_MAP_VALUE_OR_NULL
,/* points to map elem value or NULL */
136 FRAME_PTR
, /* reg == frame_pointer */
137 PTR_TO_STACK
, /* reg == frame_pointer + imm */
138 CONST_IMM
, /* constant integer value */
142 enum bpf_reg_type type
;
144 /* valid when type == CONST_IMM | PTR_TO_STACK */
147 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
148 * PTR_TO_MAP_VALUE_OR_NULL
150 struct bpf_map
*map_ptr
;
154 enum bpf_stack_slot_type
{
155 STACK_INVALID
, /* nothing was stored in this stack slot */
156 STACK_SPILL
, /* register spilled into stack */
157 STACK_MISC
/* BPF program wrote some data into this slot */
160 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
162 /* state of the program:
163 * type of all registers and stack info
165 struct verifier_state
{
166 struct reg_state regs
[MAX_BPF_REG
];
167 u8 stack_slot_type
[MAX_BPF_STACK
];
168 struct reg_state spilled_regs
[MAX_BPF_STACK
/ BPF_REG_SIZE
];
171 /* linked list of verifier states used to prune search */
172 struct verifier_state_list
{
173 struct verifier_state state
;
174 struct verifier_state_list
*next
;
177 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
178 struct verifier_stack_elem
{
179 /* verifer state is 'st'
180 * before processing instruction 'insn_idx'
181 * and after processing instruction 'prev_insn_idx'
183 struct verifier_state st
;
186 struct verifier_stack_elem
*next
;
189 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
191 /* single container for all structs
192 * one verifier_env per bpf_check() call
194 struct verifier_env
{
195 struct bpf_prog
*prog
; /* eBPF program being verified */
196 struct verifier_stack_elem
*head
; /* stack of verifier states to be processed */
197 int stack_size
; /* number of states to be processed */
198 struct verifier_state cur_state
; /* current verifier state */
199 struct verifier_state_list
**explored_states
; /* search pruning optimization */
200 struct bpf_map
*used_maps
[MAX_USED_MAPS
]; /* array of map's used by eBPF program */
201 u32 used_map_cnt
; /* number of used maps */
202 bool allow_ptr_leaks
;
205 #define BPF_COMPLEXITY_LIMIT_INSNS 65536
206 #define BPF_COMPLEXITY_LIMIT_STACK 1024
208 /* verbose verifier prints what it's seeing
209 * bpf_check() is called under lock, so no race to access these global vars
211 static u32 log_level
, log_size
, log_len
;
212 static char *log_buf
;
214 static DEFINE_MUTEX(bpf_verifier_lock
);
216 /* log_level controls verbosity level of eBPF verifier.
217 * verbose() is used to dump the verification trace to the log, so the user
218 * can figure out what's wrong with the program
220 static __printf(1, 2) void verbose(const char *fmt
, ...)
224 if (log_level
== 0 || log_len
>= log_size
- 1)
228 log_len
+= vscnprintf(log_buf
+ log_len
, log_size
- log_len
, fmt
, args
);
232 /* string representation of 'enum bpf_reg_type' */
233 static const char * const reg_type_str
[] = {
235 [UNKNOWN_VALUE
] = "inv",
236 [PTR_TO_CTX
] = "ctx",
237 [CONST_PTR_TO_MAP
] = "map_ptr",
238 [PTR_TO_MAP_VALUE
] = "map_value",
239 [PTR_TO_MAP_VALUE_OR_NULL
] = "map_value_or_null",
241 [PTR_TO_STACK
] = "fp",
245 static const struct {
249 {BPF_MAP_TYPE_PROG_ARRAY
, BPF_FUNC_tail_call
},
250 {BPF_MAP_TYPE_PERF_EVENT_ARRAY
, BPF_FUNC_perf_event_read
},
251 {BPF_MAP_TYPE_PERF_EVENT_ARRAY
, BPF_FUNC_perf_event_output
},
252 {BPF_MAP_TYPE_STACK_TRACE
, BPF_FUNC_get_stackid
},
255 static void print_verifier_state(struct verifier_env
*env
)
260 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
261 t
= env
->cur_state
.regs
[i
].type
;
264 verbose(" R%d=%s", i
, reg_type_str
[t
]);
265 if (t
== CONST_IMM
|| t
== PTR_TO_STACK
)
266 verbose("%d", env
->cur_state
.regs
[i
].imm
);
267 else if (t
== CONST_PTR_TO_MAP
|| t
== PTR_TO_MAP_VALUE
||
268 t
== PTR_TO_MAP_VALUE_OR_NULL
)
269 verbose("(ks=%d,vs=%d)",
270 env
->cur_state
.regs
[i
].map_ptr
->key_size
,
271 env
->cur_state
.regs
[i
].map_ptr
->value_size
);
273 for (i
= 0; i
< MAX_BPF_STACK
; i
+= BPF_REG_SIZE
) {
274 if (env
->cur_state
.stack_slot_type
[i
] == STACK_SPILL
)
275 verbose(" fp%d=%s", -MAX_BPF_STACK
+ i
,
276 reg_type_str
[env
->cur_state
.spilled_regs
[i
/ BPF_REG_SIZE
].type
]);
281 static const char *const bpf_class_string
[] = {
289 [BPF_ALU64
] = "alu64",
292 static const char *const bpf_alu_string
[16] = {
293 [BPF_ADD
>> 4] = "+=",
294 [BPF_SUB
>> 4] = "-=",
295 [BPF_MUL
>> 4] = "*=",
296 [BPF_DIV
>> 4] = "/=",
297 [BPF_OR
>> 4] = "|=",
298 [BPF_AND
>> 4] = "&=",
299 [BPF_LSH
>> 4] = "<<=",
300 [BPF_RSH
>> 4] = ">>=",
301 [BPF_NEG
>> 4] = "neg",
302 [BPF_MOD
>> 4] = "%=",
303 [BPF_XOR
>> 4] = "^=",
304 [BPF_MOV
>> 4] = "=",
305 [BPF_ARSH
>> 4] = "s>>=",
306 [BPF_END
>> 4] = "endian",
309 static const char *const bpf_ldst_string
[] = {
310 [BPF_W
>> 3] = "u32",
311 [BPF_H
>> 3] = "u16",
313 [BPF_DW
>> 3] = "u64",
316 static const char *const bpf_jmp_string
[16] = {
317 [BPF_JA
>> 4] = "jmp",
318 [BPF_JEQ
>> 4] = "==",
319 [BPF_JGT
>> 4] = ">",
320 [BPF_JGE
>> 4] = ">=",
321 [BPF_JSET
>> 4] = "&",
322 [BPF_JNE
>> 4] = "!=",
323 [BPF_JSGT
>> 4] = "s>",
324 [BPF_JSGE
>> 4] = "s>=",
325 [BPF_CALL
>> 4] = "call",
326 [BPF_EXIT
>> 4] = "exit",
329 static void print_bpf_insn(struct bpf_insn
*insn
)
331 u8
class = BPF_CLASS(insn
->code
);
333 if (class == BPF_ALU
|| class == BPF_ALU64
) {
334 if (BPF_SRC(insn
->code
) == BPF_X
)
335 verbose("(%02x) %sr%d %s %sr%d\n",
336 insn
->code
, class == BPF_ALU
? "(u32) " : "",
338 bpf_alu_string
[BPF_OP(insn
->code
) >> 4],
339 class == BPF_ALU
? "(u32) " : "",
342 verbose("(%02x) %sr%d %s %s%d\n",
343 insn
->code
, class == BPF_ALU
? "(u32) " : "",
345 bpf_alu_string
[BPF_OP(insn
->code
) >> 4],
346 class == BPF_ALU
? "(u32) " : "",
348 } else if (class == BPF_STX
) {
349 if (BPF_MODE(insn
->code
) == BPF_MEM
)
350 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
352 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
354 insn
->off
, insn
->src_reg
);
355 else if (BPF_MODE(insn
->code
) == BPF_XADD
)
356 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
358 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
359 insn
->dst_reg
, insn
->off
,
362 verbose("BUG_%02x\n", insn
->code
);
363 } else if (class == BPF_ST
) {
364 if (BPF_MODE(insn
->code
) != BPF_MEM
) {
365 verbose("BUG_st_%02x\n", insn
->code
);
368 verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
370 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
372 insn
->off
, insn
->imm
);
373 } else if (class == BPF_LDX
) {
374 if (BPF_MODE(insn
->code
) != BPF_MEM
) {
375 verbose("BUG_ldx_%02x\n", insn
->code
);
378 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
379 insn
->code
, insn
->dst_reg
,
380 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
381 insn
->src_reg
, insn
->off
);
382 } else if (class == BPF_LD
) {
383 if (BPF_MODE(insn
->code
) == BPF_ABS
) {
384 verbose("(%02x) r0 = *(%s *)skb[%d]\n",
386 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
388 } else if (BPF_MODE(insn
->code
) == BPF_IND
) {
389 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
391 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
392 insn
->src_reg
, insn
->imm
);
393 } else if (BPF_MODE(insn
->code
) == BPF_IMM
) {
394 verbose("(%02x) r%d = 0x%x\n",
395 insn
->code
, insn
->dst_reg
, insn
->imm
);
397 verbose("BUG_ld_%02x\n", insn
->code
);
400 } else if (class == BPF_JMP
) {
401 u8 opcode
= BPF_OP(insn
->code
);
403 if (opcode
== BPF_CALL
) {
404 verbose("(%02x) call %d\n", insn
->code
, insn
->imm
);
405 } else if (insn
->code
== (BPF_JMP
| BPF_JA
)) {
406 verbose("(%02x) goto pc%+d\n",
407 insn
->code
, insn
->off
);
408 } else if (insn
->code
== (BPF_JMP
| BPF_EXIT
)) {
409 verbose("(%02x) exit\n", insn
->code
);
410 } else if (BPF_SRC(insn
->code
) == BPF_X
) {
411 verbose("(%02x) if r%d %s r%d goto pc%+d\n",
412 insn
->code
, insn
->dst_reg
,
413 bpf_jmp_string
[BPF_OP(insn
->code
) >> 4],
414 insn
->src_reg
, insn
->off
);
416 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
417 insn
->code
, insn
->dst_reg
,
418 bpf_jmp_string
[BPF_OP(insn
->code
) >> 4],
419 insn
->imm
, insn
->off
);
422 verbose("(%02x) %s\n", insn
->code
, bpf_class_string
[class]);
426 static int pop_stack(struct verifier_env
*env
, int *prev_insn_idx
)
428 struct verifier_stack_elem
*elem
;
431 if (env
->head
== NULL
)
434 memcpy(&env
->cur_state
, &env
->head
->st
, sizeof(env
->cur_state
));
435 insn_idx
= env
->head
->insn_idx
;
437 *prev_insn_idx
= env
->head
->prev_insn_idx
;
438 elem
= env
->head
->next
;
445 static struct verifier_state
*push_stack(struct verifier_env
*env
, int insn_idx
,
448 struct verifier_stack_elem
*elem
;
450 elem
= kmalloc(sizeof(struct verifier_stack_elem
), GFP_KERNEL
);
454 memcpy(&elem
->st
, &env
->cur_state
, sizeof(env
->cur_state
));
455 elem
->insn_idx
= insn_idx
;
456 elem
->prev_insn_idx
= prev_insn_idx
;
457 elem
->next
= env
->head
;
460 if (env
->stack_size
> BPF_COMPLEXITY_LIMIT_STACK
) {
461 verbose("BPF program is too complex\n");
466 /* pop all elements and return */
467 while (pop_stack(env
, NULL
) >= 0);
471 #define CALLER_SAVED_REGS 6
472 static const int caller_saved
[CALLER_SAVED_REGS
] = {
473 BPF_REG_0
, BPF_REG_1
, BPF_REG_2
, BPF_REG_3
, BPF_REG_4
, BPF_REG_5
476 static void init_reg_state(struct reg_state
*regs
)
480 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
481 regs
[i
].type
= NOT_INIT
;
483 regs
[i
].map_ptr
= NULL
;
487 regs
[BPF_REG_FP
].type
= FRAME_PTR
;
489 /* 1st arg to a function */
490 regs
[BPF_REG_1
].type
= PTR_TO_CTX
;
493 static void mark_reg_unknown_value(struct reg_state
*regs
, u32 regno
)
495 BUG_ON(regno
>= MAX_BPF_REG
);
496 regs
[regno
].type
= UNKNOWN_VALUE
;
498 regs
[regno
].map_ptr
= NULL
;
502 SRC_OP
, /* register is used as source operand */
503 DST_OP
, /* register is used as destination operand */
504 DST_OP_NO_MARK
/* same as above, check only, don't mark */
507 static int check_reg_arg(struct reg_state
*regs
, u32 regno
,
510 if (regno
>= MAX_BPF_REG
) {
511 verbose("R%d is invalid\n", regno
);
516 /* check whether register used as source operand can be read */
517 if (regs
[regno
].type
== NOT_INIT
) {
518 verbose("R%d !read_ok\n", regno
);
522 /* check whether register used as dest operand can be written to */
523 if (regno
== BPF_REG_FP
) {
524 verbose("frame pointer is read only\n");
528 mark_reg_unknown_value(regs
, regno
);
533 static int bpf_size_to_bytes(int bpf_size
)
535 if (bpf_size
== BPF_W
)
537 else if (bpf_size
== BPF_H
)
539 else if (bpf_size
== BPF_B
)
541 else if (bpf_size
== BPF_DW
)
547 static bool is_spillable_regtype(enum bpf_reg_type type
)
550 case PTR_TO_MAP_VALUE
:
551 case PTR_TO_MAP_VALUE_OR_NULL
:
555 case CONST_PTR_TO_MAP
:
562 /* check_stack_read/write functions track spill/fill of registers,
563 * stack boundary and alignment are checked in check_mem_access()
565 static int check_stack_write(struct verifier_state
*state
, int off
, int size
,
569 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
570 * so it's aligned access and [off, off + size) are within stack limits
573 if (value_regno
>= 0 &&
574 is_spillable_regtype(state
->regs
[value_regno
].type
)) {
576 /* register containing pointer is being spilled into stack */
577 if (size
!= BPF_REG_SIZE
) {
578 verbose("invalid size of register spill\n");
582 /* save register state */
583 state
->spilled_regs
[(MAX_BPF_STACK
+ off
) / BPF_REG_SIZE
] =
584 state
->regs
[value_regno
];
586 for (i
= 0; i
< BPF_REG_SIZE
; i
++)
587 state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] = STACK_SPILL
;
589 /* regular write of data into stack */
590 state
->spilled_regs
[(MAX_BPF_STACK
+ off
) / BPF_REG_SIZE
] =
591 (struct reg_state
) {};
593 for (i
= 0; i
< size
; i
++)
594 state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] = STACK_MISC
;
599 static int check_stack_read(struct verifier_state
*state
, int off
, int size
,
605 slot_type
= &state
->stack_slot_type
[MAX_BPF_STACK
+ off
];
607 if (slot_type
[0] == STACK_SPILL
) {
608 if (size
!= BPF_REG_SIZE
) {
609 verbose("invalid size of register spill\n");
612 for (i
= 1; i
< BPF_REG_SIZE
; i
++) {
613 if (slot_type
[i
] != STACK_SPILL
) {
614 verbose("corrupted spill memory\n");
619 if (value_regno
>= 0)
620 /* restore register state from stack */
621 state
->regs
[value_regno
] =
622 state
->spilled_regs
[(MAX_BPF_STACK
+ off
) / BPF_REG_SIZE
];
625 for (i
= 0; i
< size
; i
++) {
626 if (slot_type
[i
] != STACK_MISC
) {
627 verbose("invalid read from stack off %d+%d size %d\n",
632 if (value_regno
>= 0)
633 /* have read misc data from the stack */
634 mark_reg_unknown_value(state
->regs
, value_regno
);
639 /* check read/write into map element returned by bpf_map_lookup_elem() */
640 static int check_map_access(struct verifier_env
*env
, u32 regno
, int off
,
643 struct bpf_map
*map
= env
->cur_state
.regs
[regno
].map_ptr
;
645 if (off
< 0 || off
+ size
> map
->value_size
) {
646 verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
647 map
->value_size
, off
, size
);
653 /* check access to 'struct bpf_context' fields */
654 static int check_ctx_access(struct verifier_env
*env
, int off
, int size
,
655 enum bpf_access_type t
)
657 if (env
->prog
->aux
->ops
->is_valid_access
&&
658 env
->prog
->aux
->ops
->is_valid_access(off
, size
, t
)) {
659 /* remember the offset of last byte accessed in ctx */
660 if (env
->prog
->aux
->max_ctx_offset
< off
+ size
)
661 env
->prog
->aux
->max_ctx_offset
= off
+ size
;
665 verbose("invalid bpf_context access off=%d size=%d\n", off
, size
);
669 static bool is_pointer_value(struct verifier_env
*env
, int regno
)
671 if (env
->allow_ptr_leaks
)
674 switch (env
->cur_state
.regs
[regno
].type
) {
683 /* check whether memory at (regno + off) is accessible for t = (read | write)
684 * if t==write, value_regno is a register which value is stored into memory
685 * if t==read, value_regno is a register which will receive the value from memory
686 * if t==write && value_regno==-1, some unknown value is stored into memory
687 * if t==read && value_regno==-1, don't care what we read from memory
689 static int check_mem_access(struct verifier_env
*env
, u32 regno
, int off
,
690 int bpf_size
, enum bpf_access_type t
,
693 struct verifier_state
*state
= &env
->cur_state
;
696 if (state
->regs
[regno
].type
== PTR_TO_STACK
)
697 off
+= state
->regs
[regno
].imm
;
699 size
= bpf_size_to_bytes(bpf_size
);
703 if (off
% size
!= 0) {
704 verbose("misaligned access off %d size %d\n", off
, size
);
708 if (state
->regs
[regno
].type
== PTR_TO_MAP_VALUE
) {
709 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
710 is_pointer_value(env
, value_regno
)) {
711 verbose("R%d leaks addr into map\n", value_regno
);
714 err
= check_map_access(env
, regno
, off
, size
);
715 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
716 mark_reg_unknown_value(state
->regs
, value_regno
);
718 } else if (state
->regs
[regno
].type
== PTR_TO_CTX
) {
719 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
720 is_pointer_value(env
, value_regno
)) {
721 verbose("R%d leaks addr into ctx\n", value_regno
);
724 err
= check_ctx_access(env
, off
, size
, t
);
725 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
726 mark_reg_unknown_value(state
->regs
, value_regno
);
728 } else if (state
->regs
[regno
].type
== FRAME_PTR
||
729 state
->regs
[regno
].type
== PTR_TO_STACK
) {
730 if (off
>= 0 || off
< -MAX_BPF_STACK
) {
731 verbose("invalid stack off=%d size=%d\n", off
, size
);
734 if (t
== BPF_WRITE
) {
735 if (!env
->allow_ptr_leaks
&&
736 state
->stack_slot_type
[MAX_BPF_STACK
+ off
] == STACK_SPILL
&&
737 size
!= BPF_REG_SIZE
) {
738 verbose("attempt to corrupt spilled pointer on stack\n");
741 err
= check_stack_write(state
, off
, size
, value_regno
);
743 err
= check_stack_read(state
, off
, size
, value_regno
);
746 verbose("R%d invalid mem access '%s'\n",
747 regno
, reg_type_str
[state
->regs
[regno
].type
]);
753 static int check_xadd(struct verifier_env
*env
, struct bpf_insn
*insn
)
755 struct reg_state
*regs
= env
->cur_state
.regs
;
758 if ((BPF_SIZE(insn
->code
) != BPF_W
&& BPF_SIZE(insn
->code
) != BPF_DW
) ||
760 verbose("BPF_XADD uses reserved fields\n");
764 /* check src1 operand */
765 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
769 /* check src2 operand */
770 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
774 /* check whether atomic_add can read the memory */
775 err
= check_mem_access(env
, insn
->dst_reg
, insn
->off
,
776 BPF_SIZE(insn
->code
), BPF_READ
, -1);
780 /* check whether atomic_add can write into the same memory */
781 return check_mem_access(env
, insn
->dst_reg
, insn
->off
,
782 BPF_SIZE(insn
->code
), BPF_WRITE
, -1);
785 /* when register 'regno' is passed into function that will read 'access_size'
786 * bytes from that pointer, make sure that it's within stack boundary
787 * and all elements of stack are initialized
789 static int check_stack_boundary(struct verifier_env
*env
, int regno
,
790 int access_size
, bool zero_size_allowed
)
792 struct verifier_state
*state
= &env
->cur_state
;
793 struct reg_state
*regs
= state
->regs
;
796 if (regs
[regno
].type
!= PTR_TO_STACK
) {
797 if (zero_size_allowed
&& access_size
== 0 &&
798 regs
[regno
].type
== CONST_IMM
&&
799 regs
[regno
].imm
== 0)
802 verbose("R%d type=%s expected=%s\n", regno
,
803 reg_type_str
[regs
[regno
].type
],
804 reg_type_str
[PTR_TO_STACK
]);
808 off
= regs
[regno
].imm
;
809 if (off
>= 0 || off
< -MAX_BPF_STACK
|| off
+ access_size
> 0 ||
811 verbose("invalid stack type R%d off=%d access_size=%d\n",
812 regno
, off
, access_size
);
816 for (i
= 0; i
< access_size
; i
++) {
817 if (state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] != STACK_MISC
) {
818 verbose("invalid indirect read from stack off %d+%d size %d\n",
819 off
, i
, access_size
);
826 static int check_func_arg(struct verifier_env
*env
, u32 regno
,
827 enum bpf_arg_type arg_type
, struct bpf_map
**mapp
)
829 struct reg_state
*reg
= env
->cur_state
.regs
+ regno
;
830 enum bpf_reg_type expected_type
;
833 if (arg_type
== ARG_DONTCARE
)
836 if (reg
->type
== NOT_INIT
) {
837 verbose("R%d !read_ok\n", regno
);
841 if (arg_type
== ARG_ANYTHING
) {
842 if (is_pointer_value(env
, regno
)) {
843 verbose("R%d leaks addr into helper function\n", regno
);
849 if (arg_type
== ARG_PTR_TO_MAP_KEY
||
850 arg_type
== ARG_PTR_TO_MAP_VALUE
) {
851 expected_type
= PTR_TO_STACK
;
852 } else if (arg_type
== ARG_CONST_STACK_SIZE
||
853 arg_type
== ARG_CONST_STACK_SIZE_OR_ZERO
) {
854 expected_type
= CONST_IMM
;
855 } else if (arg_type
== ARG_CONST_MAP_PTR
) {
856 expected_type
= CONST_PTR_TO_MAP
;
857 } else if (arg_type
== ARG_PTR_TO_CTX
) {
858 expected_type
= PTR_TO_CTX
;
859 } else if (arg_type
== ARG_PTR_TO_STACK
) {
860 expected_type
= PTR_TO_STACK
;
861 /* One exception here. In case function allows for NULL to be
862 * passed in as argument, it's a CONST_IMM type. Final test
863 * happens during stack boundary checking.
865 if (reg
->type
== CONST_IMM
&& reg
->imm
== 0)
866 expected_type
= CONST_IMM
;
868 verbose("unsupported arg_type %d\n", arg_type
);
872 if (reg
->type
!= expected_type
) {
873 verbose("R%d type=%s expected=%s\n", regno
,
874 reg_type_str
[reg
->type
], reg_type_str
[expected_type
]);
878 if (arg_type
== ARG_CONST_MAP_PTR
) {
879 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
880 *mapp
= reg
->map_ptr
;
882 } else if (arg_type
== ARG_PTR_TO_MAP_KEY
) {
883 /* bpf_map_xxx(..., map_ptr, ..., key) call:
884 * check that [key, key + map->key_size) are within
885 * stack limits and initialized
888 /* in function declaration map_ptr must come before
889 * map_key, so that it's verified and known before
890 * we have to check map_key here. Otherwise it means
891 * that kernel subsystem misconfigured verifier
893 verbose("invalid map_ptr to access map->key\n");
896 err
= check_stack_boundary(env
, regno
, (*mapp
)->key_size
,
898 } else if (arg_type
== ARG_PTR_TO_MAP_VALUE
) {
899 /* bpf_map_xxx(..., map_ptr, ..., value) call:
900 * check [value, value + map->value_size) validity
903 /* kernel subsystem misconfigured verifier */
904 verbose("invalid map_ptr to access map->value\n");
907 err
= check_stack_boundary(env
, regno
, (*mapp
)->value_size
,
909 } else if (arg_type
== ARG_CONST_STACK_SIZE
||
910 arg_type
== ARG_CONST_STACK_SIZE_OR_ZERO
) {
911 bool zero_size_allowed
= (arg_type
== ARG_CONST_STACK_SIZE_OR_ZERO
);
913 /* bpf_xxx(..., buf, len) call will access 'len' bytes
914 * from stack pointer 'buf'. Check it
915 * note: regno == len, regno - 1 == buf
918 /* kernel subsystem misconfigured verifier */
919 verbose("ARG_CONST_STACK_SIZE cannot be first argument\n");
922 err
= check_stack_boundary(env
, regno
- 1, reg
->imm
,
929 static int check_map_func_compatibility(struct bpf_map
*map
, int func_id
)
931 bool bool_map
, bool_func
;
937 for (i
= 0; i
< ARRAY_SIZE(func_limit
); i
++) {
938 bool_map
= (map
->map_type
== func_limit
[i
].map_type
);
939 bool_func
= (func_id
== func_limit
[i
].func_id
);
940 /* only when map & func pair match it can continue.
941 * don't allow any other map type to be passed into
944 if (bool_func
&& bool_map
!= bool_func
) {
945 verbose("cannot pass map_type %d into func %d\n",
946 map
->map_type
, func_id
);
954 static int check_call(struct verifier_env
*env
, int func_id
)
956 struct verifier_state
*state
= &env
->cur_state
;
957 const struct bpf_func_proto
*fn
= NULL
;
958 struct reg_state
*regs
= state
->regs
;
959 struct bpf_map
*map
= NULL
;
960 struct reg_state
*reg
;
963 /* find function prototype */
964 if (func_id
< 0 || func_id
>= __BPF_FUNC_MAX_ID
) {
965 verbose("invalid func %d\n", func_id
);
969 if (env
->prog
->aux
->ops
->get_func_proto
)
970 fn
= env
->prog
->aux
->ops
->get_func_proto(func_id
);
973 verbose("unknown func %d\n", func_id
);
977 /* eBPF programs must be GPL compatible to use GPL-ed functions */
978 if (!env
->prog
->gpl_compatible
&& fn
->gpl_only
) {
979 verbose("cannot call GPL only function from proprietary program\n");
984 err
= check_func_arg(env
, BPF_REG_1
, fn
->arg1_type
, &map
);
987 err
= check_func_arg(env
, BPF_REG_2
, fn
->arg2_type
, &map
);
990 err
= check_func_arg(env
, BPF_REG_3
, fn
->arg3_type
, &map
);
993 err
= check_func_arg(env
, BPF_REG_4
, fn
->arg4_type
, &map
);
996 err
= check_func_arg(env
, BPF_REG_5
, fn
->arg5_type
, &map
);
1000 /* reset caller saved regs */
1001 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
1002 reg
= regs
+ caller_saved
[i
];
1003 reg
->type
= NOT_INIT
;
1007 /* update return register */
1008 if (fn
->ret_type
== RET_INTEGER
) {
1009 regs
[BPF_REG_0
].type
= UNKNOWN_VALUE
;
1010 } else if (fn
->ret_type
== RET_VOID
) {
1011 regs
[BPF_REG_0
].type
= NOT_INIT
;
1012 } else if (fn
->ret_type
== RET_PTR_TO_MAP_VALUE_OR_NULL
) {
1013 regs
[BPF_REG_0
].type
= PTR_TO_MAP_VALUE_OR_NULL
;
1014 /* remember map_ptr, so that check_map_access()
1015 * can check 'value_size' boundary of memory access
1016 * to map element returned from bpf_map_lookup_elem()
1019 verbose("kernel subsystem misconfigured verifier\n");
1022 regs
[BPF_REG_0
].map_ptr
= map
;
1024 verbose("unknown return type %d of func %d\n",
1025 fn
->ret_type
, func_id
);
1029 err
= check_map_func_compatibility(map
, func_id
);
1036 /* check validity of 32-bit and 64-bit arithmetic operations */
1037 static int check_alu_op(struct verifier_env
*env
, struct bpf_insn
*insn
)
1039 struct reg_state
*regs
= env
->cur_state
.regs
;
1040 u8 opcode
= BPF_OP(insn
->code
);
1043 if (opcode
== BPF_END
|| opcode
== BPF_NEG
) {
1044 if (opcode
== BPF_NEG
) {
1045 if (BPF_SRC(insn
->code
) != 0 ||
1046 insn
->src_reg
!= BPF_REG_0
||
1047 insn
->off
!= 0 || insn
->imm
!= 0) {
1048 verbose("BPF_NEG uses reserved fields\n");
1052 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
1053 (insn
->imm
!= 16 && insn
->imm
!= 32 && insn
->imm
!= 64)) {
1054 verbose("BPF_END uses reserved fields\n");
1059 /* check src operand */
1060 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1064 if (is_pointer_value(env
, insn
->dst_reg
)) {
1065 verbose("R%d pointer arithmetic prohibited\n",
1070 /* check dest operand */
1071 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1075 } else if (opcode
== BPF_MOV
) {
1077 if (BPF_SRC(insn
->code
) == BPF_X
) {
1078 if (insn
->imm
!= 0 || insn
->off
!= 0) {
1079 verbose("BPF_MOV uses reserved fields\n");
1083 /* check src operand */
1084 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1088 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
1089 verbose("BPF_MOV uses reserved fields\n");
1094 /* check dest operand */
1095 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1099 if (BPF_SRC(insn
->code
) == BPF_X
) {
1100 if (BPF_CLASS(insn
->code
) == BPF_ALU64
) {
1102 * copy register state to dest reg
1104 regs
[insn
->dst_reg
] = regs
[insn
->src_reg
];
1106 if (is_pointer_value(env
, insn
->src_reg
)) {
1107 verbose("R%d partial copy of pointer\n",
1111 regs
[insn
->dst_reg
].type
= UNKNOWN_VALUE
;
1112 regs
[insn
->dst_reg
].map_ptr
= NULL
;
1116 * remember the value we stored into this reg
1118 regs
[insn
->dst_reg
].type
= CONST_IMM
;
1119 regs
[insn
->dst_reg
].imm
= insn
->imm
;
1122 } else if (opcode
> BPF_END
) {
1123 verbose("invalid BPF_ALU opcode %x\n", opcode
);
1126 } else { /* all other ALU ops: and, sub, xor, add, ... */
1128 bool stack_relative
= false;
1130 if (BPF_SRC(insn
->code
) == BPF_X
) {
1131 if (insn
->imm
!= 0 || insn
->off
!= 0) {
1132 verbose("BPF_ALU uses reserved fields\n");
1135 /* check src1 operand */
1136 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1140 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
1141 verbose("BPF_ALU uses reserved fields\n");
1146 /* check src2 operand */
1147 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1151 if ((opcode
== BPF_MOD
|| opcode
== BPF_DIV
) &&
1152 BPF_SRC(insn
->code
) == BPF_K
&& insn
->imm
== 0) {
1153 verbose("div by zero\n");
1157 if ((opcode
== BPF_LSH
|| opcode
== BPF_RSH
||
1158 opcode
== BPF_ARSH
) && BPF_SRC(insn
->code
) == BPF_K
) {
1159 int size
= BPF_CLASS(insn
->code
) == BPF_ALU64
? 64 : 32;
1161 if (insn
->imm
< 0 || insn
->imm
>= size
) {
1162 verbose("invalid shift %d\n", insn
->imm
);
1167 /* pattern match 'bpf_add Rx, imm' instruction */
1168 if (opcode
== BPF_ADD
&& BPF_CLASS(insn
->code
) == BPF_ALU64
&&
1169 regs
[insn
->dst_reg
].type
== FRAME_PTR
&&
1170 BPF_SRC(insn
->code
) == BPF_K
) {
1171 stack_relative
= true;
1172 } else if (is_pointer_value(env
, insn
->dst_reg
)) {
1173 verbose("R%d pointer arithmetic prohibited\n",
1176 } else if (BPF_SRC(insn
->code
) == BPF_X
&&
1177 is_pointer_value(env
, insn
->src_reg
)) {
1178 verbose("R%d pointer arithmetic prohibited\n",
1183 /* check dest operand */
1184 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1188 if (stack_relative
) {
1189 regs
[insn
->dst_reg
].type
= PTR_TO_STACK
;
1190 regs
[insn
->dst_reg
].imm
= insn
->imm
;
1197 static int check_cond_jmp_op(struct verifier_env
*env
,
1198 struct bpf_insn
*insn
, int *insn_idx
)
1200 struct reg_state
*regs
= env
->cur_state
.regs
;
1201 struct verifier_state
*other_branch
;
1202 u8 opcode
= BPF_OP(insn
->code
);
1205 if (opcode
> BPF_EXIT
) {
1206 verbose("invalid BPF_JMP opcode %x\n", opcode
);
1210 if (BPF_SRC(insn
->code
) == BPF_X
) {
1211 if (insn
->imm
!= 0) {
1212 verbose("BPF_JMP uses reserved fields\n");
1216 /* check src1 operand */
1217 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1221 if (is_pointer_value(env
, insn
->src_reg
)) {
1222 verbose("R%d pointer comparison prohibited\n",
1227 if (insn
->src_reg
!= BPF_REG_0
) {
1228 verbose("BPF_JMP uses reserved fields\n");
1233 /* check src2 operand */
1234 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1238 /* detect if R == 0 where R was initialized to zero earlier */
1239 if (BPF_SRC(insn
->code
) == BPF_K
&&
1240 (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
) &&
1241 regs
[insn
->dst_reg
].type
== CONST_IMM
&&
1242 regs
[insn
->dst_reg
].imm
== insn
->imm
) {
1243 if (opcode
== BPF_JEQ
) {
1244 /* if (imm == imm) goto pc+off;
1245 * only follow the goto, ignore fall-through
1247 *insn_idx
+= insn
->off
;
1250 /* if (imm != imm) goto pc+off;
1251 * only follow fall-through branch, since
1252 * that's where the program will go
1258 other_branch
= push_stack(env
, *insn_idx
+ insn
->off
+ 1, *insn_idx
);
1262 /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */
1263 if (BPF_SRC(insn
->code
) == BPF_K
&&
1264 insn
->imm
== 0 && (opcode
== BPF_JEQ
||
1265 opcode
== BPF_JNE
) &&
1266 regs
[insn
->dst_reg
].type
== PTR_TO_MAP_VALUE_OR_NULL
) {
1267 if (opcode
== BPF_JEQ
) {
1268 /* next fallthrough insn can access memory via
1271 regs
[insn
->dst_reg
].type
= PTR_TO_MAP_VALUE
;
1272 /* branch targer cannot access it, since reg == 0 */
1273 other_branch
->regs
[insn
->dst_reg
].type
= CONST_IMM
;
1274 other_branch
->regs
[insn
->dst_reg
].imm
= 0;
1276 other_branch
->regs
[insn
->dst_reg
].type
= PTR_TO_MAP_VALUE
;
1277 regs
[insn
->dst_reg
].type
= CONST_IMM
;
1278 regs
[insn
->dst_reg
].imm
= 0;
1280 } else if (is_pointer_value(env
, insn
->dst_reg
)) {
1281 verbose("R%d pointer comparison prohibited\n", insn
->dst_reg
);
1283 } else if (BPF_SRC(insn
->code
) == BPF_K
&&
1284 (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
)) {
1286 if (opcode
== BPF_JEQ
) {
1287 /* detect if (R == imm) goto
1288 * and in the target state recognize that R = imm
1290 other_branch
->regs
[insn
->dst_reg
].type
= CONST_IMM
;
1291 other_branch
->regs
[insn
->dst_reg
].imm
= insn
->imm
;
1293 /* detect if (R != imm) goto
1294 * and in the fall-through state recognize that R = imm
1296 regs
[insn
->dst_reg
].type
= CONST_IMM
;
1297 regs
[insn
->dst_reg
].imm
= insn
->imm
;
1301 print_verifier_state(env
);
1305 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
1306 static struct bpf_map
*ld_imm64_to_map_ptr(struct bpf_insn
*insn
)
1308 u64 imm64
= ((u64
) (u32
) insn
[0].imm
) | ((u64
) (u32
) insn
[1].imm
) << 32;
1310 return (struct bpf_map
*) (unsigned long) imm64
;
1313 /* verify BPF_LD_IMM64 instruction */
1314 static int check_ld_imm(struct verifier_env
*env
, struct bpf_insn
*insn
)
1316 struct reg_state
*regs
= env
->cur_state
.regs
;
1319 if (BPF_SIZE(insn
->code
) != BPF_DW
) {
1320 verbose("invalid BPF_LD_IMM insn\n");
1323 if (insn
->off
!= 0) {
1324 verbose("BPF_LD_IMM64 uses reserved fields\n");
1328 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1332 if (insn
->src_reg
== 0)
1333 /* generic move 64-bit immediate into a register */
1336 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
1337 BUG_ON(insn
->src_reg
!= BPF_PSEUDO_MAP_FD
);
1339 regs
[insn
->dst_reg
].type
= CONST_PTR_TO_MAP
;
1340 regs
[insn
->dst_reg
].map_ptr
= ld_imm64_to_map_ptr(insn
);
1344 static bool may_access_skb(enum bpf_prog_type type
)
1347 case BPF_PROG_TYPE_SOCKET_FILTER
:
1348 case BPF_PROG_TYPE_SCHED_CLS
:
1349 case BPF_PROG_TYPE_SCHED_ACT
:
1356 /* verify safety of LD_ABS|LD_IND instructions:
1357 * - they can only appear in the programs where ctx == skb
1358 * - since they are wrappers of function calls, they scratch R1-R5 registers,
1359 * preserve R6-R9, and store return value into R0
1362 * ctx == skb == R6 == CTX
1365 * SRC == any register
1366 * IMM == 32-bit immediate
1369 * R0 - 8/16/32-bit skb data converted to cpu endianness
1371 static int check_ld_abs(struct verifier_env
*env
, struct bpf_insn
*insn
)
1373 struct reg_state
*regs
= env
->cur_state
.regs
;
1374 u8 mode
= BPF_MODE(insn
->code
);
1375 struct reg_state
*reg
;
1378 if (!may_access_skb(env
->prog
->type
)) {
1379 verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
1383 if (insn
->dst_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
1384 (mode
== BPF_ABS
&& insn
->src_reg
!= BPF_REG_0
)) {
1385 verbose("BPF_LD_ABS uses reserved fields\n");
1389 /* check whether implicit source operand (register R6) is readable */
1390 err
= check_reg_arg(regs
, BPF_REG_6
, SRC_OP
);
1394 if (regs
[BPF_REG_6
].type
!= PTR_TO_CTX
) {
1395 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
1399 if (mode
== BPF_IND
) {
1400 /* check explicit source operand */
1401 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1406 /* reset caller saved regs to unreadable */
1407 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
1408 reg
= regs
+ caller_saved
[i
];
1409 reg
->type
= NOT_INIT
;
1413 /* mark destination R0 register as readable, since it contains
1414 * the value fetched from the packet
1416 regs
[BPF_REG_0
].type
= UNKNOWN_VALUE
;
1420 /* non-recursive DFS pseudo code
1421 * 1 procedure DFS-iterative(G,v):
1422 * 2 label v as discovered
1423 * 3 let S be a stack
1425 * 5 while S is not empty
1427 * 7 if t is what we're looking for:
1429 * 9 for all edges e in G.adjacentEdges(t) do
1430 * 10 if edge e is already labelled
1431 * 11 continue with the next edge
1432 * 12 w <- G.adjacentVertex(t,e)
1433 * 13 if vertex w is not discovered and not explored
1434 * 14 label e as tree-edge
1435 * 15 label w as discovered
1438 * 18 else if vertex w is discovered
1439 * 19 label e as back-edge
1441 * 21 // vertex w is explored
1442 * 22 label e as forward- or cross-edge
1443 * 23 label t as explored
1448 * 0x11 - discovered and fall-through edge labelled
1449 * 0x12 - discovered and fall-through and branch edges labelled
1460 #define STATE_LIST_MARK ((struct verifier_state_list *) -1L)
1462 static int *insn_stack
; /* stack of insns to process */
1463 static int cur_stack
; /* current stack index */
1464 static int *insn_state
;
1466 /* t, w, e - match pseudo-code above:
1467 * t - index of current instruction
1468 * w - next instruction
1471 static int push_insn(int t
, int w
, int e
, struct verifier_env
*env
)
1473 if (e
== FALLTHROUGH
&& insn_state
[t
] >= (DISCOVERED
| FALLTHROUGH
))
1476 if (e
== BRANCH
&& insn_state
[t
] >= (DISCOVERED
| BRANCH
))
1479 if (w
< 0 || w
>= env
->prog
->len
) {
1480 verbose("jump out of range from insn %d to %d\n", t
, w
);
1485 /* mark branch target for state pruning */
1486 env
->explored_states
[w
] = STATE_LIST_MARK
;
1488 if (insn_state
[w
] == 0) {
1490 insn_state
[t
] = DISCOVERED
| e
;
1491 insn_state
[w
] = DISCOVERED
;
1492 if (cur_stack
>= env
->prog
->len
)
1494 insn_stack
[cur_stack
++] = w
;
1496 } else if ((insn_state
[w
] & 0xF0) == DISCOVERED
) {
1497 verbose("back-edge from insn %d to %d\n", t
, w
);
1499 } else if (insn_state
[w
] == EXPLORED
) {
1500 /* forward- or cross-edge */
1501 insn_state
[t
] = DISCOVERED
| e
;
1503 verbose("insn state internal bug\n");
1509 /* non-recursive depth-first-search to detect loops in BPF program
1510 * loop == back-edge in directed graph
1512 static int check_cfg(struct verifier_env
*env
)
1514 struct bpf_insn
*insns
= env
->prog
->insnsi
;
1515 int insn_cnt
= env
->prog
->len
;
1519 insn_state
= kcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
1523 insn_stack
= kcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
1529 insn_state
[0] = DISCOVERED
; /* mark 1st insn as discovered */
1530 insn_stack
[0] = 0; /* 0 is the first instruction */
1536 t
= insn_stack
[cur_stack
- 1];
1538 if (BPF_CLASS(insns
[t
].code
) == BPF_JMP
) {
1539 u8 opcode
= BPF_OP(insns
[t
].code
);
1541 if (opcode
== BPF_EXIT
) {
1543 } else if (opcode
== BPF_CALL
) {
1544 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
1549 if (t
+ 1 < insn_cnt
)
1550 env
->explored_states
[t
+ 1] = STATE_LIST_MARK
;
1551 } else if (opcode
== BPF_JA
) {
1552 if (BPF_SRC(insns
[t
].code
) != BPF_K
) {
1556 /* unconditional jump with single edge */
1557 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1,
1563 /* tell verifier to check for equivalent states
1564 * after every call and jump
1566 if (t
+ 1 < insn_cnt
)
1567 env
->explored_states
[t
+ 1] = STATE_LIST_MARK
;
1569 /* conditional jump with two edges */
1570 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
1576 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1, BRANCH
, env
);
1583 /* all other non-branch instructions with single
1586 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
1594 insn_state
[t
] = EXPLORED
;
1595 if (cur_stack
-- <= 0) {
1596 verbose("pop stack internal bug\n");
1603 for (i
= 0; i
< insn_cnt
; i
++) {
1604 if (insn_state
[i
] != EXPLORED
) {
1605 verbose("unreachable insn %d\n", i
);
1610 ret
= 0; /* cfg looks good */
1618 /* compare two verifier states
1620 * all states stored in state_list are known to be valid, since
1621 * verifier reached 'bpf_exit' instruction through them
1623 * this function is called when verifier exploring different branches of
1624 * execution popped from the state stack. If it sees an old state that has
1625 * more strict register state and more strict stack state then this execution
1626 * branch doesn't need to be explored further, since verifier already
1627 * concluded that more strict state leads to valid finish.
1629 * Therefore two states are equivalent if register state is more conservative
1630 * and explored stack state is more conservative than the current one.
1633 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
1634 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
1636 * In other words if current stack state (one being explored) has more
1637 * valid slots than old one that already passed validation, it means
1638 * the verifier can stop exploring and conclude that current state is valid too
1640 * Similarly with registers. If explored state has register type as invalid
1641 * whereas register type in current state is meaningful, it means that
1642 * the current state will reach 'bpf_exit' instruction safely
1644 static bool states_equal(struct verifier_state
*old
, struct verifier_state
*cur
)
1648 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
1649 if (memcmp(&old
->regs
[i
], &cur
->regs
[i
],
1650 sizeof(old
->regs
[0])) != 0) {
1651 if (old
->regs
[i
].type
== NOT_INIT
||
1652 (old
->regs
[i
].type
== UNKNOWN_VALUE
&&
1653 cur
->regs
[i
].type
!= NOT_INIT
))
1659 for (i
= 0; i
< MAX_BPF_STACK
; i
++) {
1660 if (old
->stack_slot_type
[i
] == STACK_INVALID
)
1662 if (old
->stack_slot_type
[i
] != cur
->stack_slot_type
[i
])
1663 /* Ex: old explored (safe) state has STACK_SPILL in
1664 * this stack slot, but current has has STACK_MISC ->
1665 * this verifier states are not equivalent,
1666 * return false to continue verification of this path
1669 if (i
% BPF_REG_SIZE
)
1671 if (memcmp(&old
->spilled_regs
[i
/ BPF_REG_SIZE
],
1672 &cur
->spilled_regs
[i
/ BPF_REG_SIZE
],
1673 sizeof(old
->spilled_regs
[0])))
1674 /* when explored and current stack slot types are
1675 * the same, check that stored pointers types
1676 * are the same as well.
1677 * Ex: explored safe path could have stored
1678 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8}
1679 * but current path has stored:
1680 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16}
1681 * such verifier states are not equivalent.
1682 * return false to continue verification of this path
1691 static int is_state_visited(struct verifier_env
*env
, int insn_idx
)
1693 struct verifier_state_list
*new_sl
;
1694 struct verifier_state_list
*sl
;
1696 sl
= env
->explored_states
[insn_idx
];
1698 /* this 'insn_idx' instruction wasn't marked, so we will not
1699 * be doing state search here
1703 while (sl
!= STATE_LIST_MARK
) {
1704 if (states_equal(&sl
->state
, &env
->cur_state
))
1705 /* reached equivalent register/stack state,
1712 /* there were no equivalent states, remember current one.
1713 * technically the current state is not proven to be safe yet,
1714 * but it will either reach bpf_exit (which means it's safe) or
1715 * it will be rejected. Since there are no loops, we won't be
1716 * seeing this 'insn_idx' instruction again on the way to bpf_exit
1718 new_sl
= kmalloc(sizeof(struct verifier_state_list
), GFP_USER
);
1722 /* add new state to the head of linked list */
1723 memcpy(&new_sl
->state
, &env
->cur_state
, sizeof(env
->cur_state
));
1724 new_sl
->next
= env
->explored_states
[insn_idx
];
1725 env
->explored_states
[insn_idx
] = new_sl
;
1729 static int do_check(struct verifier_env
*env
)
1731 struct verifier_state
*state
= &env
->cur_state
;
1732 struct bpf_insn
*insns
= env
->prog
->insnsi
;
1733 struct reg_state
*regs
= state
->regs
;
1734 int insn_cnt
= env
->prog
->len
;
1735 int insn_idx
, prev_insn_idx
= 0;
1736 int insn_processed
= 0;
1737 bool do_print_state
= false;
1739 init_reg_state(regs
);
1742 struct bpf_insn
*insn
;
1746 if (insn_idx
>= insn_cnt
) {
1747 verbose("invalid insn idx %d insn_cnt %d\n",
1748 insn_idx
, insn_cnt
);
1752 insn
= &insns
[insn_idx
];
1753 class = BPF_CLASS(insn
->code
);
1755 if (++insn_processed
> BPF_COMPLEXITY_LIMIT_INSNS
) {
1756 verbose("BPF program is too large. Proccessed %d insn\n",
1761 err
= is_state_visited(env
, insn_idx
);
1765 /* found equivalent state, can prune the search */
1768 verbose("\nfrom %d to %d: safe\n",
1769 prev_insn_idx
, insn_idx
);
1771 verbose("%d: safe\n", insn_idx
);
1773 goto process_bpf_exit
;
1776 if (log_level
&& do_print_state
) {
1777 verbose("\nfrom %d to %d:", prev_insn_idx
, insn_idx
);
1778 print_verifier_state(env
);
1779 do_print_state
= false;
1783 verbose("%d: ", insn_idx
);
1784 print_bpf_insn(insn
);
1787 if (class == BPF_ALU
|| class == BPF_ALU64
) {
1788 err
= check_alu_op(env
, insn
);
1792 } else if (class == BPF_LDX
) {
1793 enum bpf_reg_type src_reg_type
;
1795 /* check for reserved fields is already done */
1797 /* check src operand */
1798 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1802 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP_NO_MARK
);
1806 src_reg_type
= regs
[insn
->src_reg
].type
;
1808 /* check that memory (src_reg + off) is readable,
1809 * the state of dst_reg will be updated by this func
1811 err
= check_mem_access(env
, insn
->src_reg
, insn
->off
,
1812 BPF_SIZE(insn
->code
), BPF_READ
,
1817 if (BPF_SIZE(insn
->code
) != BPF_W
) {
1822 if (insn
->imm
== 0) {
1824 * dst_reg = *(u32 *)(src_reg + off)
1825 * use reserved 'imm' field to mark this insn
1827 insn
->imm
= src_reg_type
;
1829 } else if (src_reg_type
!= insn
->imm
&&
1830 (src_reg_type
== PTR_TO_CTX
||
1831 insn
->imm
== PTR_TO_CTX
)) {
1832 /* ABuser program is trying to use the same insn
1833 * dst_reg = *(u32*) (src_reg + off)
1834 * with different pointer types:
1835 * src_reg == ctx in one branch and
1836 * src_reg == stack|map in some other branch.
1839 verbose("same insn cannot be used with different pointers\n");
1843 } else if (class == BPF_STX
) {
1844 enum bpf_reg_type dst_reg_type
;
1846 if (BPF_MODE(insn
->code
) == BPF_XADD
) {
1847 err
= check_xadd(env
, insn
);
1854 /* check src1 operand */
1855 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1858 /* check src2 operand */
1859 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1863 dst_reg_type
= regs
[insn
->dst_reg
].type
;
1865 /* check that memory (dst_reg + off) is writeable */
1866 err
= check_mem_access(env
, insn
->dst_reg
, insn
->off
,
1867 BPF_SIZE(insn
->code
), BPF_WRITE
,
1872 if (insn
->imm
== 0) {
1873 insn
->imm
= dst_reg_type
;
1874 } else if (dst_reg_type
!= insn
->imm
&&
1875 (dst_reg_type
== PTR_TO_CTX
||
1876 insn
->imm
== PTR_TO_CTX
)) {
1877 verbose("same insn cannot be used with different pointers\n");
1881 } else if (class == BPF_ST
) {
1882 if (BPF_MODE(insn
->code
) != BPF_MEM
||
1883 insn
->src_reg
!= BPF_REG_0
) {
1884 verbose("BPF_ST uses reserved fields\n");
1887 /* check src operand */
1888 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1892 /* check that memory (dst_reg + off) is writeable */
1893 err
= check_mem_access(env
, insn
->dst_reg
, insn
->off
,
1894 BPF_SIZE(insn
->code
), BPF_WRITE
,
1899 } else if (class == BPF_JMP
) {
1900 u8 opcode
= BPF_OP(insn
->code
);
1902 if (opcode
== BPF_CALL
) {
1903 if (BPF_SRC(insn
->code
) != BPF_K
||
1905 insn
->src_reg
!= BPF_REG_0
||
1906 insn
->dst_reg
!= BPF_REG_0
) {
1907 verbose("BPF_CALL uses reserved fields\n");
1911 err
= check_call(env
, insn
->imm
);
1915 } else if (opcode
== BPF_JA
) {
1916 if (BPF_SRC(insn
->code
) != BPF_K
||
1918 insn
->src_reg
!= BPF_REG_0
||
1919 insn
->dst_reg
!= BPF_REG_0
) {
1920 verbose("BPF_JA uses reserved fields\n");
1924 insn_idx
+= insn
->off
+ 1;
1927 } else if (opcode
== BPF_EXIT
) {
1928 if (BPF_SRC(insn
->code
) != BPF_K
||
1930 insn
->src_reg
!= BPF_REG_0
||
1931 insn
->dst_reg
!= BPF_REG_0
) {
1932 verbose("BPF_EXIT uses reserved fields\n");
1936 /* eBPF calling convetion is such that R0 is used
1937 * to return the value from eBPF program.
1938 * Make sure that it's readable at this time
1939 * of bpf_exit, which means that program wrote
1940 * something into it earlier
1942 err
= check_reg_arg(regs
, BPF_REG_0
, SRC_OP
);
1946 if (is_pointer_value(env
, BPF_REG_0
)) {
1947 verbose("R0 leaks addr as return value\n");
1952 insn_idx
= pop_stack(env
, &prev_insn_idx
);
1956 do_print_state
= true;
1960 err
= check_cond_jmp_op(env
, insn
, &insn_idx
);
1964 } else if (class == BPF_LD
) {
1965 u8 mode
= BPF_MODE(insn
->code
);
1967 if (mode
== BPF_ABS
|| mode
== BPF_IND
) {
1968 err
= check_ld_abs(env
, insn
);
1972 } else if (mode
== BPF_IMM
) {
1973 err
= check_ld_imm(env
, insn
);
1979 verbose("invalid BPF_LD mode\n");
1983 verbose("unknown insn class %d\n", class);
1993 /* look for pseudo eBPF instructions that access map FDs and
1994 * replace them with actual map pointers
1996 static int replace_map_fd_with_map_ptr(struct verifier_env
*env
)
1998 struct bpf_insn
*insn
= env
->prog
->insnsi
;
1999 int insn_cnt
= env
->prog
->len
;
2002 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
2003 if (BPF_CLASS(insn
->code
) == BPF_LDX
&&
2004 (BPF_MODE(insn
->code
) != BPF_MEM
|| insn
->imm
!= 0)) {
2005 verbose("BPF_LDX uses reserved fields\n");
2009 if (BPF_CLASS(insn
->code
) == BPF_STX
&&
2010 ((BPF_MODE(insn
->code
) != BPF_MEM
&&
2011 BPF_MODE(insn
->code
) != BPF_XADD
) || insn
->imm
!= 0)) {
2012 verbose("BPF_STX uses reserved fields\n");
2016 if (insn
[0].code
== (BPF_LD
| BPF_IMM
| BPF_DW
)) {
2017 struct bpf_map
*map
;
2020 if (i
== insn_cnt
- 1 || insn
[1].code
!= 0 ||
2021 insn
[1].dst_reg
!= 0 || insn
[1].src_reg
!= 0 ||
2023 verbose("invalid bpf_ld_imm64 insn\n");
2027 if (insn
->src_reg
== 0)
2028 /* valid generic load 64-bit imm */
2031 if (insn
->src_reg
!= BPF_PSEUDO_MAP_FD
) {
2032 verbose("unrecognized bpf_ld_imm64 insn\n");
2036 f
= fdget(insn
->imm
);
2037 map
= __bpf_map_get(f
);
2039 verbose("fd %d is not pointing to valid bpf_map\n",
2042 return PTR_ERR(map
);
2045 /* store map pointer inside BPF_LD_IMM64 instruction */
2046 insn
[0].imm
= (u32
) (unsigned long) map
;
2047 insn
[1].imm
= ((u64
) (unsigned long) map
) >> 32;
2049 /* check whether we recorded this map already */
2050 for (j
= 0; j
< env
->used_map_cnt
; j
++)
2051 if (env
->used_maps
[j
] == map
) {
2056 if (env
->used_map_cnt
>= MAX_USED_MAPS
) {
2061 /* remember this map */
2062 env
->used_maps
[env
->used_map_cnt
++] = map
;
2064 /* hold the map. If the program is rejected by verifier,
2065 * the map will be released by release_maps() or it
2066 * will be used by the valid program until it's unloaded
2067 * and all maps are released in free_bpf_prog_info()
2069 bpf_map_inc(map
, false);
2077 /* now all pseudo BPF_LD_IMM64 instructions load valid
2078 * 'struct bpf_map *' into a register instead of user map_fd.
2079 * These pointers will be used later by verifier to validate map access.
2084 /* drop refcnt of maps used by the rejected program */
2085 static void release_maps(struct verifier_env
*env
)
2089 for (i
= 0; i
< env
->used_map_cnt
; i
++)
2090 bpf_map_put(env
->used_maps
[i
]);
2093 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
2094 static void convert_pseudo_ld_imm64(struct verifier_env
*env
)
2096 struct bpf_insn
*insn
= env
->prog
->insnsi
;
2097 int insn_cnt
= env
->prog
->len
;
2100 for (i
= 0; i
< insn_cnt
; i
++, insn
++)
2101 if (insn
->code
== (BPF_LD
| BPF_IMM
| BPF_DW
))
2105 static void adjust_branches(struct bpf_prog
*prog
, int pos
, int delta
)
2107 struct bpf_insn
*insn
= prog
->insnsi
;
2108 int insn_cnt
= prog
->len
;
2111 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
2112 if (BPF_CLASS(insn
->code
) != BPF_JMP
||
2113 BPF_OP(insn
->code
) == BPF_CALL
||
2114 BPF_OP(insn
->code
) == BPF_EXIT
)
2117 /* adjust offset of jmps if necessary */
2118 if (i
< pos
&& i
+ insn
->off
+ 1 > pos
)
2120 else if (i
> pos
+ delta
&& i
+ insn
->off
+ 1 <= pos
+ delta
)
2125 /* convert load instructions that access fields of 'struct __sk_buff'
2126 * into sequence of instructions that access fields of 'struct sk_buff'
2128 static int convert_ctx_accesses(struct verifier_env
*env
)
2130 struct bpf_insn
*insn
= env
->prog
->insnsi
;
2131 int insn_cnt
= env
->prog
->len
;
2132 struct bpf_insn insn_buf
[16];
2133 struct bpf_prog
*new_prog
;
2136 enum bpf_access_type type
;
2138 if (!env
->prog
->aux
->ops
->convert_ctx_access
)
2141 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
2142 if (insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_W
))
2144 else if (insn
->code
== (BPF_STX
| BPF_MEM
| BPF_W
))
2149 if (insn
->imm
!= PTR_TO_CTX
) {
2150 /* clear internal mark */
2155 cnt
= env
->prog
->aux
->ops
->
2156 convert_ctx_access(type
, insn
->dst_reg
, insn
->src_reg
,
2157 insn
->off
, insn_buf
, env
->prog
);
2158 if (cnt
== 0 || cnt
>= ARRAY_SIZE(insn_buf
)) {
2159 verbose("bpf verifier is misconfigured\n");
2164 memcpy(insn
, insn_buf
, sizeof(*insn
));
2168 /* several new insns need to be inserted. Make room for them */
2169 insn_cnt
+= cnt
- 1;
2170 new_prog
= bpf_prog_realloc(env
->prog
,
2171 bpf_prog_size(insn_cnt
),
2176 new_prog
->len
= insn_cnt
;
2178 memmove(new_prog
->insnsi
+ i
+ cnt
, new_prog
->insns
+ i
+ 1,
2179 sizeof(*insn
) * (insn_cnt
- i
- cnt
));
2181 /* copy substitute insns in place of load instruction */
2182 memcpy(new_prog
->insnsi
+ i
, insn_buf
, sizeof(*insn
) * cnt
);
2184 /* adjust branches in the whole program */
2185 adjust_branches(new_prog
, i
, cnt
- 1);
2187 /* keep walking new program and skip insns we just inserted */
2188 env
->prog
= new_prog
;
2189 insn
= new_prog
->insnsi
+ i
+ cnt
- 1;
2196 static void free_states(struct verifier_env
*env
)
2198 struct verifier_state_list
*sl
, *sln
;
2201 if (!env
->explored_states
)
2204 for (i
= 0; i
< env
->prog
->len
; i
++) {
2205 sl
= env
->explored_states
[i
];
2208 while (sl
!= STATE_LIST_MARK
) {
2215 kfree(env
->explored_states
);
2218 int bpf_check(struct bpf_prog
**prog
, union bpf_attr
*attr
)
2220 char __user
*log_ubuf
= NULL
;
2221 struct verifier_env
*env
;
2224 if ((*prog
)->len
<= 0 || (*prog
)->len
> BPF_MAXINSNS
)
2227 /* 'struct verifier_env' can be global, but since it's not small,
2228 * allocate/free it every time bpf_check() is called
2230 env
= kzalloc(sizeof(struct verifier_env
), GFP_KERNEL
);
2236 /* grab the mutex to protect few globals used by verifier */
2237 mutex_lock(&bpf_verifier_lock
);
2239 if (attr
->log_level
|| attr
->log_buf
|| attr
->log_size
) {
2240 /* user requested verbose verifier output
2241 * and supplied buffer to store the verification trace
2243 log_level
= attr
->log_level
;
2244 log_ubuf
= (char __user
*) (unsigned long) attr
->log_buf
;
2245 log_size
= attr
->log_size
;
2249 /* log_* values have to be sane */
2250 if (log_size
< 128 || log_size
> UINT_MAX
>> 8 ||
2251 log_level
== 0 || log_ubuf
== NULL
)
2255 log_buf
= vmalloc(log_size
);
2262 ret
= replace_map_fd_with_map_ptr(env
);
2264 goto skip_full_check
;
2266 env
->explored_states
= kcalloc(env
->prog
->len
,
2267 sizeof(struct verifier_state_list
*),
2270 if (!env
->explored_states
)
2271 goto skip_full_check
;
2273 ret
= check_cfg(env
);
2275 goto skip_full_check
;
2277 env
->allow_ptr_leaks
= capable(CAP_SYS_ADMIN
);
2279 ret
= do_check(env
);
2282 while (pop_stack(env
, NULL
) >= 0);
2286 /* program is valid, convert *(u32*)(ctx + off) accesses */
2287 ret
= convert_ctx_accesses(env
);
2289 if (log_level
&& log_len
>= log_size
- 1) {
2290 BUG_ON(log_len
>= log_size
);
2291 /* verifier log exceeded user supplied buffer */
2293 /* fall through to return what was recorded */
2296 /* copy verifier log back to user space including trailing zero */
2297 if (log_level
&& copy_to_user(log_ubuf
, log_buf
, log_len
+ 1) != 0) {
2302 if (ret
== 0 && env
->used_map_cnt
) {
2303 /* if program passed verifier, update used_maps in bpf_prog_info */
2304 env
->prog
->aux
->used_maps
= kmalloc_array(env
->used_map_cnt
,
2305 sizeof(env
->used_maps
[0]),
2308 if (!env
->prog
->aux
->used_maps
) {
2313 memcpy(env
->prog
->aux
->used_maps
, env
->used_maps
,
2314 sizeof(env
->used_maps
[0]) * env
->used_map_cnt
);
2315 env
->prog
->aux
->used_map_cnt
= env
->used_map_cnt
;
2317 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
2318 * bpf_ld_imm64 instructions
2320 convert_pseudo_ld_imm64(env
);
2327 if (!env
->prog
->aux
->used_maps
)
2328 /* if we didn't copy map pointers into bpf_prog_info, release
2329 * them now. Otherwise free_bpf_prog_info() will release them.
2334 mutex_unlock(&bpf_verifier_lock
);