1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/bpf.h>
16 #include <linux/filter.h>
17 #include <net/netlink.h>
18 #include <linux/file.h>
19 #include <linux/vmalloc.h>
21 /* bpf_check() is a static code analyzer that walks eBPF program
22 * instruction by instruction and updates register/stack state.
23 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
25 * The first pass is depth-first-search to check that the program is a DAG.
26 * It rejects the following programs:
27 * - larger than BPF_MAXINSNS insns
28 * - if loop is present (detected via back-edge)
29 * - unreachable insns exist (shouldn't be a forest. program = one function)
30 * - out of bounds or malformed jumps
31 * The second pass is all possible path descent from the 1st insn.
32 * Since it's analyzing all pathes through the program, the length of the
33 * analysis is limited to 32k insn, which may be hit even if total number of
34 * insn is less then 4K, but there are too many branches that change stack/regs.
35 * Number of 'branches to be analyzed' is limited to 1k
37 * On entry to each instruction, each register has a type, and the instruction
38 * changes the types of the registers depending on instruction semantics.
39 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
42 * All registers are 64-bit.
43 * R0 - return register
44 * R1-R5 argument passing registers
45 * R6-R9 callee saved registers
46 * R10 - frame pointer read-only
48 * At the start of BPF program the register R1 contains a pointer to bpf_context
49 * and has type PTR_TO_CTX.
51 * Verifier tracks arithmetic operations on pointers in case:
52 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
53 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
54 * 1st insn copies R10 (which has FRAME_PTR) type into R1
55 * and 2nd arithmetic instruction is pattern matched to recognize
56 * that it wants to construct a pointer to some element within stack.
57 * So after 2nd insn, the register R1 has type PTR_TO_STACK
58 * (and -20 constant is saved for further stack bounds checking).
59 * Meaning that this reg is a pointer to stack plus known immediate constant.
61 * Most of the time the registers have UNKNOWN_VALUE type, which
62 * means the register has some value, but it's not a valid pointer.
63 * (like pointer plus pointer becomes UNKNOWN_VALUE type)
65 * When verifier sees load or store instructions the type of base register
66 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer
67 * types recognized by check_mem_access() function.
69 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
70 * and the range of [ptr, ptr + map's value_size) is accessible.
72 * registers used to pass values to function calls are checked against
73 * function argument constraints.
75 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
76 * It means that the register type passed to this function must be
77 * PTR_TO_STACK and it will be used inside the function as
78 * 'pointer to map element key'
80 * For example the argument constraints for bpf_map_lookup_elem():
81 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
82 * .arg1_type = ARG_CONST_MAP_PTR,
83 * .arg2_type = ARG_PTR_TO_MAP_KEY,
85 * ret_type says that this function returns 'pointer to map elem value or null'
86 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
87 * 2nd argument should be a pointer to stack, which will be used inside
88 * the helper function as a pointer to map element key.
90 * On the kernel side the helper function looks like:
91 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
93 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
94 * void *key = (void *) (unsigned long) r2;
97 * here kernel can access 'key' and 'map' pointers safely, knowing that
98 * [key, key + map->key_size) bytes are valid and were initialized on
99 * the stack of eBPF program.
102 * Corresponding eBPF program may look like:
103 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
104 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
105 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
106 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
107 * here verifier looks at prototype of map_lookup_elem() and sees:
108 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
109 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
111 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
112 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
113 * and were initialized prior to this call.
114 * If it's ok, then verifier allows this BPF_CALL insn and looks at
115 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
116 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
117 * returns ether pointer to map value or NULL.
119 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
120 * insn, the register holding that pointer in the true branch changes state to
121 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
122 * branch. See check_cond_jmp_op().
124 * After the call R0 is set to return type of the function and registers R1-R5
125 * are set to NOT_INIT to indicate that they are no longer readable.
128 /* types of values stored in eBPF registers */
130 NOT_INIT
= 0, /* nothing was written into register */
131 UNKNOWN_VALUE
, /* reg doesn't contain a valid pointer */
132 PTR_TO_CTX
, /* reg points to bpf_context */
133 CONST_PTR_TO_MAP
, /* reg points to struct bpf_map */
134 PTR_TO_MAP_VALUE
, /* reg points to map element value */
135 PTR_TO_MAP_VALUE_OR_NULL
,/* points to map elem value or NULL */
136 FRAME_PTR
, /* reg == frame_pointer */
137 PTR_TO_STACK
, /* reg == frame_pointer + imm */
138 CONST_IMM
, /* constant integer value */
142 enum bpf_reg_type type
;
144 /* valid when type == CONST_IMM | PTR_TO_STACK */
147 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
148 * PTR_TO_MAP_VALUE_OR_NULL
150 struct bpf_map
*map_ptr
;
154 enum bpf_stack_slot_type
{
155 STACK_INVALID
, /* nothing was stored in this stack slot */
156 STACK_SPILL
, /* register spilled into stack */
157 STACK_MISC
/* BPF program wrote some data into this slot */
160 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
162 /* state of the program:
163 * type of all registers and stack info
165 struct verifier_state
{
166 struct reg_state regs
[MAX_BPF_REG
];
167 u8 stack_slot_type
[MAX_BPF_STACK
];
168 struct reg_state spilled_regs
[MAX_BPF_STACK
/ BPF_REG_SIZE
];
171 /* linked list of verifier states used to prune search */
172 struct verifier_state_list
{
173 struct verifier_state state
;
174 struct verifier_state_list
*next
;
177 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
178 struct verifier_stack_elem
{
179 /* verifer state is 'st'
180 * before processing instruction 'insn_idx'
181 * and after processing instruction 'prev_insn_idx'
183 struct verifier_state st
;
186 struct verifier_stack_elem
*next
;
189 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
191 /* single container for all structs
192 * one verifier_env per bpf_check() call
194 struct verifier_env
{
195 struct bpf_prog
*prog
; /* eBPF program being verified */
196 struct verifier_stack_elem
*head
; /* stack of verifier states to be processed */
197 int stack_size
; /* number of states to be processed */
198 struct verifier_state cur_state
; /* current verifier state */
199 struct verifier_state_list
**explored_states
; /* search pruning optimization */
200 struct bpf_map
*used_maps
[MAX_USED_MAPS
]; /* array of map's used by eBPF program */
201 u32 used_map_cnt
; /* number of used maps */
202 bool allow_ptr_leaks
;
205 #define BPF_COMPLEXITY_LIMIT_INSNS 65536
206 #define BPF_COMPLEXITY_LIMIT_STACK 1024
208 struct bpf_call_arg_meta
{
209 struct bpf_map
*map_ptr
;
215 /* verbose verifier prints what it's seeing
216 * bpf_check() is called under lock, so no race to access these global vars
218 static u32 log_level
, log_size
, log_len
;
219 static char *log_buf
;
221 static DEFINE_MUTEX(bpf_verifier_lock
);
223 /* log_level controls verbosity level of eBPF verifier.
224 * verbose() is used to dump the verification trace to the log, so the user
225 * can figure out what's wrong with the program
227 static __printf(1, 2) void verbose(const char *fmt
, ...)
231 if (log_level
== 0 || log_len
>= log_size
- 1)
235 log_len
+= vscnprintf(log_buf
+ log_len
, log_size
- log_len
, fmt
, args
);
239 /* string representation of 'enum bpf_reg_type' */
240 static const char * const reg_type_str
[] = {
242 [UNKNOWN_VALUE
] = "inv",
243 [PTR_TO_CTX
] = "ctx",
244 [CONST_PTR_TO_MAP
] = "map_ptr",
245 [PTR_TO_MAP_VALUE
] = "map_value",
246 [PTR_TO_MAP_VALUE_OR_NULL
] = "map_value_or_null",
248 [PTR_TO_STACK
] = "fp",
252 static const struct {
256 {BPF_MAP_TYPE_PROG_ARRAY
, BPF_FUNC_tail_call
},
257 {BPF_MAP_TYPE_PERF_EVENT_ARRAY
, BPF_FUNC_perf_event_read
},
258 {BPF_MAP_TYPE_PERF_EVENT_ARRAY
, BPF_FUNC_perf_event_output
},
259 {BPF_MAP_TYPE_STACK_TRACE
, BPF_FUNC_get_stackid
},
262 static void print_verifier_state(struct verifier_env
*env
)
267 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
268 t
= env
->cur_state
.regs
[i
].type
;
271 verbose(" R%d=%s", i
, reg_type_str
[t
]);
272 if (t
== CONST_IMM
|| t
== PTR_TO_STACK
)
273 verbose("%ld", env
->cur_state
.regs
[i
].imm
);
274 else if (t
== CONST_PTR_TO_MAP
|| t
== PTR_TO_MAP_VALUE
||
275 t
== PTR_TO_MAP_VALUE_OR_NULL
)
276 verbose("(ks=%d,vs=%d)",
277 env
->cur_state
.regs
[i
].map_ptr
->key_size
,
278 env
->cur_state
.regs
[i
].map_ptr
->value_size
);
280 for (i
= 0; i
< MAX_BPF_STACK
; i
+= BPF_REG_SIZE
) {
281 if (env
->cur_state
.stack_slot_type
[i
] == STACK_SPILL
)
282 verbose(" fp%d=%s", -MAX_BPF_STACK
+ i
,
283 reg_type_str
[env
->cur_state
.spilled_regs
[i
/ BPF_REG_SIZE
].type
]);
288 static const char *const bpf_class_string
[] = {
296 [BPF_ALU64
] = "alu64",
299 static const char *const bpf_alu_string
[16] = {
300 [BPF_ADD
>> 4] = "+=",
301 [BPF_SUB
>> 4] = "-=",
302 [BPF_MUL
>> 4] = "*=",
303 [BPF_DIV
>> 4] = "/=",
304 [BPF_OR
>> 4] = "|=",
305 [BPF_AND
>> 4] = "&=",
306 [BPF_LSH
>> 4] = "<<=",
307 [BPF_RSH
>> 4] = ">>=",
308 [BPF_NEG
>> 4] = "neg",
309 [BPF_MOD
>> 4] = "%=",
310 [BPF_XOR
>> 4] = "^=",
311 [BPF_MOV
>> 4] = "=",
312 [BPF_ARSH
>> 4] = "s>>=",
313 [BPF_END
>> 4] = "endian",
316 static const char *const bpf_ldst_string
[] = {
317 [BPF_W
>> 3] = "u32",
318 [BPF_H
>> 3] = "u16",
320 [BPF_DW
>> 3] = "u64",
323 static const char *const bpf_jmp_string
[16] = {
324 [BPF_JA
>> 4] = "jmp",
325 [BPF_JEQ
>> 4] = "==",
326 [BPF_JGT
>> 4] = ">",
327 [BPF_JGE
>> 4] = ">=",
328 [BPF_JSET
>> 4] = "&",
329 [BPF_JNE
>> 4] = "!=",
330 [BPF_JSGT
>> 4] = "s>",
331 [BPF_JSGE
>> 4] = "s>=",
332 [BPF_CALL
>> 4] = "call",
333 [BPF_EXIT
>> 4] = "exit",
336 static void print_bpf_insn(struct bpf_insn
*insn
)
338 u8
class = BPF_CLASS(insn
->code
);
340 if (class == BPF_ALU
|| class == BPF_ALU64
) {
341 if (BPF_SRC(insn
->code
) == BPF_X
)
342 verbose("(%02x) %sr%d %s %sr%d\n",
343 insn
->code
, class == BPF_ALU
? "(u32) " : "",
345 bpf_alu_string
[BPF_OP(insn
->code
) >> 4],
346 class == BPF_ALU
? "(u32) " : "",
349 verbose("(%02x) %sr%d %s %s%d\n",
350 insn
->code
, class == BPF_ALU
? "(u32) " : "",
352 bpf_alu_string
[BPF_OP(insn
->code
) >> 4],
353 class == BPF_ALU
? "(u32) " : "",
355 } else if (class == BPF_STX
) {
356 if (BPF_MODE(insn
->code
) == BPF_MEM
)
357 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
359 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
361 insn
->off
, insn
->src_reg
);
362 else if (BPF_MODE(insn
->code
) == BPF_XADD
)
363 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
365 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
366 insn
->dst_reg
, insn
->off
,
369 verbose("BUG_%02x\n", insn
->code
);
370 } else if (class == BPF_ST
) {
371 if (BPF_MODE(insn
->code
) != BPF_MEM
) {
372 verbose("BUG_st_%02x\n", insn
->code
);
375 verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
377 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
379 insn
->off
, insn
->imm
);
380 } else if (class == BPF_LDX
) {
381 if (BPF_MODE(insn
->code
) != BPF_MEM
) {
382 verbose("BUG_ldx_%02x\n", insn
->code
);
385 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
386 insn
->code
, insn
->dst_reg
,
387 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
388 insn
->src_reg
, insn
->off
);
389 } else if (class == BPF_LD
) {
390 if (BPF_MODE(insn
->code
) == BPF_ABS
) {
391 verbose("(%02x) r0 = *(%s *)skb[%d]\n",
393 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
395 } else if (BPF_MODE(insn
->code
) == BPF_IND
) {
396 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
398 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
399 insn
->src_reg
, insn
->imm
);
400 } else if (BPF_MODE(insn
->code
) == BPF_IMM
) {
401 verbose("(%02x) r%d = 0x%x\n",
402 insn
->code
, insn
->dst_reg
, insn
->imm
);
404 verbose("BUG_ld_%02x\n", insn
->code
);
407 } else if (class == BPF_JMP
) {
408 u8 opcode
= BPF_OP(insn
->code
);
410 if (opcode
== BPF_CALL
) {
411 verbose("(%02x) call %d\n", insn
->code
, insn
->imm
);
412 } else if (insn
->code
== (BPF_JMP
| BPF_JA
)) {
413 verbose("(%02x) goto pc%+d\n",
414 insn
->code
, insn
->off
);
415 } else if (insn
->code
== (BPF_JMP
| BPF_EXIT
)) {
416 verbose("(%02x) exit\n", insn
->code
);
417 } else if (BPF_SRC(insn
->code
) == BPF_X
) {
418 verbose("(%02x) if r%d %s r%d goto pc%+d\n",
419 insn
->code
, insn
->dst_reg
,
420 bpf_jmp_string
[BPF_OP(insn
->code
) >> 4],
421 insn
->src_reg
, insn
->off
);
423 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
424 insn
->code
, insn
->dst_reg
,
425 bpf_jmp_string
[BPF_OP(insn
->code
) >> 4],
426 insn
->imm
, insn
->off
);
429 verbose("(%02x) %s\n", insn
->code
, bpf_class_string
[class]);
433 static int pop_stack(struct verifier_env
*env
, int *prev_insn_idx
)
435 struct verifier_stack_elem
*elem
;
438 if (env
->head
== NULL
)
441 memcpy(&env
->cur_state
, &env
->head
->st
, sizeof(env
->cur_state
));
442 insn_idx
= env
->head
->insn_idx
;
444 *prev_insn_idx
= env
->head
->prev_insn_idx
;
445 elem
= env
->head
->next
;
452 static struct verifier_state
*push_stack(struct verifier_env
*env
, int insn_idx
,
455 struct verifier_stack_elem
*elem
;
457 elem
= kmalloc(sizeof(struct verifier_stack_elem
), GFP_KERNEL
);
461 memcpy(&elem
->st
, &env
->cur_state
, sizeof(env
->cur_state
));
462 elem
->insn_idx
= insn_idx
;
463 elem
->prev_insn_idx
= prev_insn_idx
;
464 elem
->next
= env
->head
;
467 if (env
->stack_size
> BPF_COMPLEXITY_LIMIT_STACK
) {
468 verbose("BPF program is too complex\n");
473 /* pop all elements and return */
474 while (pop_stack(env
, NULL
) >= 0);
478 #define CALLER_SAVED_REGS 6
479 static const int caller_saved
[CALLER_SAVED_REGS
] = {
480 BPF_REG_0
, BPF_REG_1
, BPF_REG_2
, BPF_REG_3
, BPF_REG_4
, BPF_REG_5
483 static void init_reg_state(struct reg_state
*regs
)
487 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
488 regs
[i
].type
= NOT_INIT
;
493 regs
[BPF_REG_FP
].type
= FRAME_PTR
;
495 /* 1st arg to a function */
496 regs
[BPF_REG_1
].type
= PTR_TO_CTX
;
499 static void mark_reg_unknown_value(struct reg_state
*regs
, u32 regno
)
501 BUG_ON(regno
>= MAX_BPF_REG
);
502 regs
[regno
].type
= UNKNOWN_VALUE
;
507 SRC_OP
, /* register is used as source operand */
508 DST_OP
, /* register is used as destination operand */
509 DST_OP_NO_MARK
/* same as above, check only, don't mark */
512 static int check_reg_arg(struct reg_state
*regs
, u32 regno
,
515 if (regno
>= MAX_BPF_REG
) {
516 verbose("R%d is invalid\n", regno
);
521 /* check whether register used as source operand can be read */
522 if (regs
[regno
].type
== NOT_INIT
) {
523 verbose("R%d !read_ok\n", regno
);
527 /* check whether register used as dest operand can be written to */
528 if (regno
== BPF_REG_FP
) {
529 verbose("frame pointer is read only\n");
533 mark_reg_unknown_value(regs
, regno
);
538 static int bpf_size_to_bytes(int bpf_size
)
540 if (bpf_size
== BPF_W
)
542 else if (bpf_size
== BPF_H
)
544 else if (bpf_size
== BPF_B
)
546 else if (bpf_size
== BPF_DW
)
552 static bool is_spillable_regtype(enum bpf_reg_type type
)
555 case PTR_TO_MAP_VALUE
:
556 case PTR_TO_MAP_VALUE_OR_NULL
:
560 case CONST_PTR_TO_MAP
:
567 /* check_stack_read/write functions track spill/fill of registers,
568 * stack boundary and alignment are checked in check_mem_access()
570 static int check_stack_write(struct verifier_state
*state
, int off
, int size
,
574 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
575 * so it's aligned access and [off, off + size) are within stack limits
578 if (value_regno
>= 0 &&
579 is_spillable_regtype(state
->regs
[value_regno
].type
)) {
581 /* register containing pointer is being spilled into stack */
582 if (size
!= BPF_REG_SIZE
) {
583 verbose("invalid size of register spill\n");
587 /* save register state */
588 state
->spilled_regs
[(MAX_BPF_STACK
+ off
) / BPF_REG_SIZE
] =
589 state
->regs
[value_regno
];
591 for (i
= 0; i
< BPF_REG_SIZE
; i
++)
592 state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] = STACK_SPILL
;
594 /* regular write of data into stack */
595 state
->spilled_regs
[(MAX_BPF_STACK
+ off
) / BPF_REG_SIZE
] =
596 (struct reg_state
) {};
598 for (i
= 0; i
< size
; i
++)
599 state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] = STACK_MISC
;
604 static int check_stack_read(struct verifier_state
*state
, int off
, int size
,
610 slot_type
= &state
->stack_slot_type
[MAX_BPF_STACK
+ off
];
612 if (slot_type
[0] == STACK_SPILL
) {
613 if (size
!= BPF_REG_SIZE
) {
614 verbose("invalid size of register spill\n");
617 for (i
= 1; i
< BPF_REG_SIZE
; i
++) {
618 if (slot_type
[i
] != STACK_SPILL
) {
619 verbose("corrupted spill memory\n");
624 if (value_regno
>= 0)
625 /* restore register state from stack */
626 state
->regs
[value_regno
] =
627 state
->spilled_regs
[(MAX_BPF_STACK
+ off
) / BPF_REG_SIZE
];
630 for (i
= 0; i
< size
; i
++) {
631 if (slot_type
[i
] != STACK_MISC
) {
632 verbose("invalid read from stack off %d+%d size %d\n",
637 if (value_regno
>= 0)
638 /* have read misc data from the stack */
639 mark_reg_unknown_value(state
->regs
, value_regno
);
644 /* check read/write into map element returned by bpf_map_lookup_elem() */
645 static int check_map_access(struct verifier_env
*env
, u32 regno
, int off
,
648 struct bpf_map
*map
= env
->cur_state
.regs
[regno
].map_ptr
;
650 if (off
< 0 || off
+ size
> map
->value_size
) {
651 verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
652 map
->value_size
, off
, size
);
658 /* check access to 'struct bpf_context' fields */
659 static int check_ctx_access(struct verifier_env
*env
, int off
, int size
,
660 enum bpf_access_type t
)
662 if (env
->prog
->aux
->ops
->is_valid_access
&&
663 env
->prog
->aux
->ops
->is_valid_access(off
, size
, t
)) {
664 /* remember the offset of last byte accessed in ctx */
665 if (env
->prog
->aux
->max_ctx_offset
< off
+ size
)
666 env
->prog
->aux
->max_ctx_offset
= off
+ size
;
670 verbose("invalid bpf_context access off=%d size=%d\n", off
, size
);
674 static bool is_pointer_value(struct verifier_env
*env
, int regno
)
676 if (env
->allow_ptr_leaks
)
679 switch (env
->cur_state
.regs
[regno
].type
) {
688 /* check whether memory at (regno + off) is accessible for t = (read | write)
689 * if t==write, value_regno is a register which value is stored into memory
690 * if t==read, value_regno is a register which will receive the value from memory
691 * if t==write && value_regno==-1, some unknown value is stored into memory
692 * if t==read && value_regno==-1, don't care what we read from memory
694 static int check_mem_access(struct verifier_env
*env
, u32 regno
, int off
,
695 int bpf_size
, enum bpf_access_type t
,
698 struct verifier_state
*state
= &env
->cur_state
;
701 if (state
->regs
[regno
].type
== PTR_TO_STACK
)
702 off
+= state
->regs
[regno
].imm
;
704 size
= bpf_size_to_bytes(bpf_size
);
708 if (off
% size
!= 0) {
709 verbose("misaligned access off %d size %d\n", off
, size
);
713 if (state
->regs
[regno
].type
== PTR_TO_MAP_VALUE
) {
714 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
715 is_pointer_value(env
, value_regno
)) {
716 verbose("R%d leaks addr into map\n", value_regno
);
719 err
= check_map_access(env
, regno
, off
, size
);
720 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
721 mark_reg_unknown_value(state
->regs
, value_regno
);
723 } else if (state
->regs
[regno
].type
== PTR_TO_CTX
) {
724 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
725 is_pointer_value(env
, value_regno
)) {
726 verbose("R%d leaks addr into ctx\n", value_regno
);
729 err
= check_ctx_access(env
, off
, size
, t
);
730 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
731 mark_reg_unknown_value(state
->regs
, value_regno
);
733 } else if (state
->regs
[regno
].type
== FRAME_PTR
||
734 state
->regs
[regno
].type
== PTR_TO_STACK
) {
735 if (off
>= 0 || off
< -MAX_BPF_STACK
) {
736 verbose("invalid stack off=%d size=%d\n", off
, size
);
739 if (t
== BPF_WRITE
) {
740 if (!env
->allow_ptr_leaks
&&
741 state
->stack_slot_type
[MAX_BPF_STACK
+ off
] == STACK_SPILL
&&
742 size
!= BPF_REG_SIZE
) {
743 verbose("attempt to corrupt spilled pointer on stack\n");
746 err
= check_stack_write(state
, off
, size
, value_regno
);
748 err
= check_stack_read(state
, off
, size
, value_regno
);
751 verbose("R%d invalid mem access '%s'\n",
752 regno
, reg_type_str
[state
->regs
[regno
].type
]);
758 static int check_xadd(struct verifier_env
*env
, struct bpf_insn
*insn
)
760 struct reg_state
*regs
= env
->cur_state
.regs
;
763 if ((BPF_SIZE(insn
->code
) != BPF_W
&& BPF_SIZE(insn
->code
) != BPF_DW
) ||
765 verbose("BPF_XADD uses reserved fields\n");
769 /* check src1 operand */
770 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
774 /* check src2 operand */
775 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
779 /* check whether atomic_add can read the memory */
780 err
= check_mem_access(env
, insn
->dst_reg
, insn
->off
,
781 BPF_SIZE(insn
->code
), BPF_READ
, -1);
785 /* check whether atomic_add can write into the same memory */
786 return check_mem_access(env
, insn
->dst_reg
, insn
->off
,
787 BPF_SIZE(insn
->code
), BPF_WRITE
, -1);
790 /* when register 'regno' is passed into function that will read 'access_size'
791 * bytes from that pointer, make sure that it's within stack boundary
792 * and all elements of stack are initialized
794 static int check_stack_boundary(struct verifier_env
*env
, int regno
,
795 int access_size
, bool zero_size_allowed
,
796 struct bpf_call_arg_meta
*meta
)
798 struct verifier_state
*state
= &env
->cur_state
;
799 struct reg_state
*regs
= state
->regs
;
802 if (regs
[regno
].type
!= PTR_TO_STACK
) {
803 if (zero_size_allowed
&& access_size
== 0 &&
804 regs
[regno
].type
== CONST_IMM
&&
805 regs
[regno
].imm
== 0)
808 verbose("R%d type=%s expected=%s\n", regno
,
809 reg_type_str
[regs
[regno
].type
],
810 reg_type_str
[PTR_TO_STACK
]);
814 off
= regs
[regno
].imm
;
815 if (off
>= 0 || off
< -MAX_BPF_STACK
|| off
+ access_size
> 0 ||
817 verbose("invalid stack type R%d off=%d access_size=%d\n",
818 regno
, off
, access_size
);
822 if (meta
&& meta
->raw_mode
) {
823 meta
->access_size
= access_size
;
828 for (i
= 0; i
< access_size
; i
++) {
829 if (state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] != STACK_MISC
) {
830 verbose("invalid indirect read from stack off %d+%d size %d\n",
831 off
, i
, access_size
);
838 static int check_func_arg(struct verifier_env
*env
, u32 regno
,
839 enum bpf_arg_type arg_type
,
840 struct bpf_call_arg_meta
*meta
)
842 struct reg_state
*reg
= env
->cur_state
.regs
+ regno
;
843 enum bpf_reg_type expected_type
;
846 if (arg_type
== ARG_DONTCARE
)
849 if (reg
->type
== NOT_INIT
) {
850 verbose("R%d !read_ok\n", regno
);
854 if (arg_type
== ARG_ANYTHING
) {
855 if (is_pointer_value(env
, regno
)) {
856 verbose("R%d leaks addr into helper function\n", regno
);
862 if (arg_type
== ARG_PTR_TO_MAP_KEY
||
863 arg_type
== ARG_PTR_TO_MAP_VALUE
) {
864 expected_type
= PTR_TO_STACK
;
865 } else if (arg_type
== ARG_CONST_STACK_SIZE
||
866 arg_type
== ARG_CONST_STACK_SIZE_OR_ZERO
) {
867 expected_type
= CONST_IMM
;
868 } else if (arg_type
== ARG_CONST_MAP_PTR
) {
869 expected_type
= CONST_PTR_TO_MAP
;
870 } else if (arg_type
== ARG_PTR_TO_CTX
) {
871 expected_type
= PTR_TO_CTX
;
872 } else if (arg_type
== ARG_PTR_TO_STACK
||
873 arg_type
== ARG_PTR_TO_RAW_STACK
) {
874 expected_type
= PTR_TO_STACK
;
875 /* One exception here. In case function allows for NULL to be
876 * passed in as argument, it's a CONST_IMM type. Final test
877 * happens during stack boundary checking.
879 if (reg
->type
== CONST_IMM
&& reg
->imm
== 0)
880 expected_type
= CONST_IMM
;
881 meta
->raw_mode
= arg_type
== ARG_PTR_TO_RAW_STACK
;
883 verbose("unsupported arg_type %d\n", arg_type
);
887 if (reg
->type
!= expected_type
) {
888 verbose("R%d type=%s expected=%s\n", regno
,
889 reg_type_str
[reg
->type
], reg_type_str
[expected_type
]);
893 if (arg_type
== ARG_CONST_MAP_PTR
) {
894 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
895 meta
->map_ptr
= reg
->map_ptr
;
896 } else if (arg_type
== ARG_PTR_TO_MAP_KEY
) {
897 /* bpf_map_xxx(..., map_ptr, ..., key) call:
898 * check that [key, key + map->key_size) are within
899 * stack limits and initialized
901 if (!meta
->map_ptr
) {
902 /* in function declaration map_ptr must come before
903 * map_key, so that it's verified and known before
904 * we have to check map_key here. Otherwise it means
905 * that kernel subsystem misconfigured verifier
907 verbose("invalid map_ptr to access map->key\n");
910 err
= check_stack_boundary(env
, regno
, meta
->map_ptr
->key_size
,
912 } else if (arg_type
== ARG_PTR_TO_MAP_VALUE
) {
913 /* bpf_map_xxx(..., map_ptr, ..., value) call:
914 * check [value, value + map->value_size) validity
916 if (!meta
->map_ptr
) {
917 /* kernel subsystem misconfigured verifier */
918 verbose("invalid map_ptr to access map->value\n");
921 err
= check_stack_boundary(env
, regno
,
922 meta
->map_ptr
->value_size
,
924 } else if (arg_type
== ARG_CONST_STACK_SIZE
||
925 arg_type
== ARG_CONST_STACK_SIZE_OR_ZERO
) {
926 bool zero_size_allowed
= (arg_type
== ARG_CONST_STACK_SIZE_OR_ZERO
);
928 /* bpf_xxx(..., buf, len) call will access 'len' bytes
929 * from stack pointer 'buf'. Check it
930 * note: regno == len, regno - 1 == buf
933 /* kernel subsystem misconfigured verifier */
934 verbose("ARG_CONST_STACK_SIZE cannot be first argument\n");
937 err
= check_stack_boundary(env
, regno
- 1, reg
->imm
,
938 zero_size_allowed
, meta
);
944 static int check_map_func_compatibility(struct bpf_map
*map
, int func_id
)
946 bool bool_map
, bool_func
;
952 for (i
= 0; i
< ARRAY_SIZE(func_limit
); i
++) {
953 bool_map
= (map
->map_type
== func_limit
[i
].map_type
);
954 bool_func
= (func_id
== func_limit
[i
].func_id
);
955 /* only when map & func pair match it can continue.
956 * don't allow any other map type to be passed into
959 if (bool_func
&& bool_map
!= bool_func
) {
960 verbose("cannot pass map_type %d into func %d\n",
961 map
->map_type
, func_id
);
969 static int check_raw_mode(const struct bpf_func_proto
*fn
)
973 if (fn
->arg1_type
== ARG_PTR_TO_RAW_STACK
)
975 if (fn
->arg2_type
== ARG_PTR_TO_RAW_STACK
)
977 if (fn
->arg3_type
== ARG_PTR_TO_RAW_STACK
)
979 if (fn
->arg4_type
== ARG_PTR_TO_RAW_STACK
)
981 if (fn
->arg5_type
== ARG_PTR_TO_RAW_STACK
)
984 return count
> 1 ? -EINVAL
: 0;
987 static int check_call(struct verifier_env
*env
, int func_id
)
989 struct verifier_state
*state
= &env
->cur_state
;
990 const struct bpf_func_proto
*fn
= NULL
;
991 struct reg_state
*regs
= state
->regs
;
992 struct reg_state
*reg
;
993 struct bpf_call_arg_meta meta
;
996 /* find function prototype */
997 if (func_id
< 0 || func_id
>= __BPF_FUNC_MAX_ID
) {
998 verbose("invalid func %d\n", func_id
);
1002 if (env
->prog
->aux
->ops
->get_func_proto
)
1003 fn
= env
->prog
->aux
->ops
->get_func_proto(func_id
);
1006 verbose("unknown func %d\n", func_id
);
1010 /* eBPF programs must be GPL compatible to use GPL-ed functions */
1011 if (!env
->prog
->gpl_compatible
&& fn
->gpl_only
) {
1012 verbose("cannot call GPL only function from proprietary program\n");
1016 memset(&meta
, 0, sizeof(meta
));
1018 /* We only support one arg being in raw mode at the moment, which
1019 * is sufficient for the helper functions we have right now.
1021 err
= check_raw_mode(fn
);
1023 verbose("kernel subsystem misconfigured func %d\n", func_id
);
1028 err
= check_func_arg(env
, BPF_REG_1
, fn
->arg1_type
, &meta
);
1031 err
= check_func_arg(env
, BPF_REG_2
, fn
->arg2_type
, &meta
);
1034 err
= check_func_arg(env
, BPF_REG_3
, fn
->arg3_type
, &meta
);
1037 err
= check_func_arg(env
, BPF_REG_4
, fn
->arg4_type
, &meta
);
1040 err
= check_func_arg(env
, BPF_REG_5
, fn
->arg5_type
, &meta
);
1044 /* Mark slots with STACK_MISC in case of raw mode, stack offset
1045 * is inferred from register state.
1047 for (i
= 0; i
< meta
.access_size
; i
++) {
1048 err
= check_mem_access(env
, meta
.regno
, i
, BPF_B
, BPF_WRITE
, -1);
1053 /* reset caller saved regs */
1054 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
1055 reg
= regs
+ caller_saved
[i
];
1056 reg
->type
= NOT_INIT
;
1060 /* update return register */
1061 if (fn
->ret_type
== RET_INTEGER
) {
1062 regs
[BPF_REG_0
].type
= UNKNOWN_VALUE
;
1063 } else if (fn
->ret_type
== RET_VOID
) {
1064 regs
[BPF_REG_0
].type
= NOT_INIT
;
1065 } else if (fn
->ret_type
== RET_PTR_TO_MAP_VALUE_OR_NULL
) {
1066 regs
[BPF_REG_0
].type
= PTR_TO_MAP_VALUE_OR_NULL
;
1067 /* remember map_ptr, so that check_map_access()
1068 * can check 'value_size' boundary of memory access
1069 * to map element returned from bpf_map_lookup_elem()
1071 if (meta
.map_ptr
== NULL
) {
1072 verbose("kernel subsystem misconfigured verifier\n");
1075 regs
[BPF_REG_0
].map_ptr
= meta
.map_ptr
;
1077 verbose("unknown return type %d of func %d\n",
1078 fn
->ret_type
, func_id
);
1082 err
= check_map_func_compatibility(meta
.map_ptr
, func_id
);
1089 /* check validity of 32-bit and 64-bit arithmetic operations */
1090 static int check_alu_op(struct verifier_env
*env
, struct bpf_insn
*insn
)
1092 struct reg_state
*regs
= env
->cur_state
.regs
;
1093 u8 opcode
= BPF_OP(insn
->code
);
1096 if (opcode
== BPF_END
|| opcode
== BPF_NEG
) {
1097 if (opcode
== BPF_NEG
) {
1098 if (BPF_SRC(insn
->code
) != 0 ||
1099 insn
->src_reg
!= BPF_REG_0
||
1100 insn
->off
!= 0 || insn
->imm
!= 0) {
1101 verbose("BPF_NEG uses reserved fields\n");
1105 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
1106 (insn
->imm
!= 16 && insn
->imm
!= 32 && insn
->imm
!= 64)) {
1107 verbose("BPF_END uses reserved fields\n");
1112 /* check src operand */
1113 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1117 if (is_pointer_value(env
, insn
->dst_reg
)) {
1118 verbose("R%d pointer arithmetic prohibited\n",
1123 /* check dest operand */
1124 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1128 } else if (opcode
== BPF_MOV
) {
1130 if (BPF_SRC(insn
->code
) == BPF_X
) {
1131 if (insn
->imm
!= 0 || insn
->off
!= 0) {
1132 verbose("BPF_MOV uses reserved fields\n");
1136 /* check src operand */
1137 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1141 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
1142 verbose("BPF_MOV uses reserved fields\n");
1147 /* check dest operand */
1148 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1152 if (BPF_SRC(insn
->code
) == BPF_X
) {
1153 if (BPF_CLASS(insn
->code
) == BPF_ALU64
) {
1155 * copy register state to dest reg
1157 regs
[insn
->dst_reg
] = regs
[insn
->src_reg
];
1159 if (is_pointer_value(env
, insn
->src_reg
)) {
1160 verbose("R%d partial copy of pointer\n",
1164 regs
[insn
->dst_reg
].type
= UNKNOWN_VALUE
;
1165 regs
[insn
->dst_reg
].map_ptr
= NULL
;
1169 * remember the value we stored into this reg
1171 regs
[insn
->dst_reg
].type
= CONST_IMM
;
1172 regs
[insn
->dst_reg
].imm
= insn
->imm
;
1175 } else if (opcode
> BPF_END
) {
1176 verbose("invalid BPF_ALU opcode %x\n", opcode
);
1179 } else { /* all other ALU ops: and, sub, xor, add, ... */
1181 bool stack_relative
= false;
1183 if (BPF_SRC(insn
->code
) == BPF_X
) {
1184 if (insn
->imm
!= 0 || insn
->off
!= 0) {
1185 verbose("BPF_ALU uses reserved fields\n");
1188 /* check src1 operand */
1189 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1193 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
1194 verbose("BPF_ALU uses reserved fields\n");
1199 /* check src2 operand */
1200 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1204 if ((opcode
== BPF_MOD
|| opcode
== BPF_DIV
) &&
1205 BPF_SRC(insn
->code
) == BPF_K
&& insn
->imm
== 0) {
1206 verbose("div by zero\n");
1210 if ((opcode
== BPF_LSH
|| opcode
== BPF_RSH
||
1211 opcode
== BPF_ARSH
) && BPF_SRC(insn
->code
) == BPF_K
) {
1212 int size
= BPF_CLASS(insn
->code
) == BPF_ALU64
? 64 : 32;
1214 if (insn
->imm
< 0 || insn
->imm
>= size
) {
1215 verbose("invalid shift %d\n", insn
->imm
);
1220 /* pattern match 'bpf_add Rx, imm' instruction */
1221 if (opcode
== BPF_ADD
&& BPF_CLASS(insn
->code
) == BPF_ALU64
&&
1222 regs
[insn
->dst_reg
].type
== FRAME_PTR
&&
1223 BPF_SRC(insn
->code
) == BPF_K
) {
1224 stack_relative
= true;
1225 } else if (is_pointer_value(env
, insn
->dst_reg
)) {
1226 verbose("R%d pointer arithmetic prohibited\n",
1229 } else if (BPF_SRC(insn
->code
) == BPF_X
&&
1230 is_pointer_value(env
, insn
->src_reg
)) {
1231 verbose("R%d pointer arithmetic prohibited\n",
1236 /* check dest operand */
1237 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1241 if (stack_relative
) {
1242 regs
[insn
->dst_reg
].type
= PTR_TO_STACK
;
1243 regs
[insn
->dst_reg
].imm
= insn
->imm
;
1250 static int check_cond_jmp_op(struct verifier_env
*env
,
1251 struct bpf_insn
*insn
, int *insn_idx
)
1253 struct reg_state
*regs
= env
->cur_state
.regs
;
1254 struct verifier_state
*other_branch
;
1255 u8 opcode
= BPF_OP(insn
->code
);
1258 if (opcode
> BPF_EXIT
) {
1259 verbose("invalid BPF_JMP opcode %x\n", opcode
);
1263 if (BPF_SRC(insn
->code
) == BPF_X
) {
1264 if (insn
->imm
!= 0) {
1265 verbose("BPF_JMP uses reserved fields\n");
1269 /* check src1 operand */
1270 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1274 if (is_pointer_value(env
, insn
->src_reg
)) {
1275 verbose("R%d pointer comparison prohibited\n",
1280 if (insn
->src_reg
!= BPF_REG_0
) {
1281 verbose("BPF_JMP uses reserved fields\n");
1286 /* check src2 operand */
1287 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1291 /* detect if R == 0 where R was initialized to zero earlier */
1292 if (BPF_SRC(insn
->code
) == BPF_K
&&
1293 (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
) &&
1294 regs
[insn
->dst_reg
].type
== CONST_IMM
&&
1295 regs
[insn
->dst_reg
].imm
== insn
->imm
) {
1296 if (opcode
== BPF_JEQ
) {
1297 /* if (imm == imm) goto pc+off;
1298 * only follow the goto, ignore fall-through
1300 *insn_idx
+= insn
->off
;
1303 /* if (imm != imm) goto pc+off;
1304 * only follow fall-through branch, since
1305 * that's where the program will go
1311 other_branch
= push_stack(env
, *insn_idx
+ insn
->off
+ 1, *insn_idx
);
1315 /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */
1316 if (BPF_SRC(insn
->code
) == BPF_K
&&
1317 insn
->imm
== 0 && (opcode
== BPF_JEQ
||
1318 opcode
== BPF_JNE
) &&
1319 regs
[insn
->dst_reg
].type
== PTR_TO_MAP_VALUE_OR_NULL
) {
1320 if (opcode
== BPF_JEQ
) {
1321 /* next fallthrough insn can access memory via
1324 regs
[insn
->dst_reg
].type
= PTR_TO_MAP_VALUE
;
1325 /* branch targer cannot access it, since reg == 0 */
1326 other_branch
->regs
[insn
->dst_reg
].type
= CONST_IMM
;
1327 other_branch
->regs
[insn
->dst_reg
].imm
= 0;
1329 other_branch
->regs
[insn
->dst_reg
].type
= PTR_TO_MAP_VALUE
;
1330 regs
[insn
->dst_reg
].type
= CONST_IMM
;
1331 regs
[insn
->dst_reg
].imm
= 0;
1333 } else if (is_pointer_value(env
, insn
->dst_reg
)) {
1334 verbose("R%d pointer comparison prohibited\n", insn
->dst_reg
);
1336 } else if (BPF_SRC(insn
->code
) == BPF_K
&&
1337 (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
)) {
1339 if (opcode
== BPF_JEQ
) {
1340 /* detect if (R == imm) goto
1341 * and in the target state recognize that R = imm
1343 other_branch
->regs
[insn
->dst_reg
].type
= CONST_IMM
;
1344 other_branch
->regs
[insn
->dst_reg
].imm
= insn
->imm
;
1346 /* detect if (R != imm) goto
1347 * and in the fall-through state recognize that R = imm
1349 regs
[insn
->dst_reg
].type
= CONST_IMM
;
1350 regs
[insn
->dst_reg
].imm
= insn
->imm
;
1354 print_verifier_state(env
);
1358 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
1359 static struct bpf_map
*ld_imm64_to_map_ptr(struct bpf_insn
*insn
)
1361 u64 imm64
= ((u64
) (u32
) insn
[0].imm
) | ((u64
) (u32
) insn
[1].imm
) << 32;
1363 return (struct bpf_map
*) (unsigned long) imm64
;
1366 /* verify BPF_LD_IMM64 instruction */
1367 static int check_ld_imm(struct verifier_env
*env
, struct bpf_insn
*insn
)
1369 struct reg_state
*regs
= env
->cur_state
.regs
;
1372 if (BPF_SIZE(insn
->code
) != BPF_DW
) {
1373 verbose("invalid BPF_LD_IMM insn\n");
1376 if (insn
->off
!= 0) {
1377 verbose("BPF_LD_IMM64 uses reserved fields\n");
1381 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1385 if (insn
->src_reg
== 0)
1386 /* generic move 64-bit immediate into a register */
1389 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
1390 BUG_ON(insn
->src_reg
!= BPF_PSEUDO_MAP_FD
);
1392 regs
[insn
->dst_reg
].type
= CONST_PTR_TO_MAP
;
1393 regs
[insn
->dst_reg
].map_ptr
= ld_imm64_to_map_ptr(insn
);
1397 static bool may_access_skb(enum bpf_prog_type type
)
1400 case BPF_PROG_TYPE_SOCKET_FILTER
:
1401 case BPF_PROG_TYPE_SCHED_CLS
:
1402 case BPF_PROG_TYPE_SCHED_ACT
:
1409 /* verify safety of LD_ABS|LD_IND instructions:
1410 * - they can only appear in the programs where ctx == skb
1411 * - since they are wrappers of function calls, they scratch R1-R5 registers,
1412 * preserve R6-R9, and store return value into R0
1415 * ctx == skb == R6 == CTX
1418 * SRC == any register
1419 * IMM == 32-bit immediate
1422 * R0 - 8/16/32-bit skb data converted to cpu endianness
1424 static int check_ld_abs(struct verifier_env
*env
, struct bpf_insn
*insn
)
1426 struct reg_state
*regs
= env
->cur_state
.regs
;
1427 u8 mode
= BPF_MODE(insn
->code
);
1428 struct reg_state
*reg
;
1431 if (!may_access_skb(env
->prog
->type
)) {
1432 verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
1436 if (insn
->dst_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
1437 BPF_SIZE(insn
->code
) == BPF_DW
||
1438 (mode
== BPF_ABS
&& insn
->src_reg
!= BPF_REG_0
)) {
1439 verbose("BPF_LD_ABS uses reserved fields\n");
1443 /* check whether implicit source operand (register R6) is readable */
1444 err
= check_reg_arg(regs
, BPF_REG_6
, SRC_OP
);
1448 if (regs
[BPF_REG_6
].type
!= PTR_TO_CTX
) {
1449 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
1453 if (mode
== BPF_IND
) {
1454 /* check explicit source operand */
1455 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1460 /* reset caller saved regs to unreadable */
1461 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
1462 reg
= regs
+ caller_saved
[i
];
1463 reg
->type
= NOT_INIT
;
1467 /* mark destination R0 register as readable, since it contains
1468 * the value fetched from the packet
1470 regs
[BPF_REG_0
].type
= UNKNOWN_VALUE
;
1474 /* non-recursive DFS pseudo code
1475 * 1 procedure DFS-iterative(G,v):
1476 * 2 label v as discovered
1477 * 3 let S be a stack
1479 * 5 while S is not empty
1481 * 7 if t is what we're looking for:
1483 * 9 for all edges e in G.adjacentEdges(t) do
1484 * 10 if edge e is already labelled
1485 * 11 continue with the next edge
1486 * 12 w <- G.adjacentVertex(t,e)
1487 * 13 if vertex w is not discovered and not explored
1488 * 14 label e as tree-edge
1489 * 15 label w as discovered
1492 * 18 else if vertex w is discovered
1493 * 19 label e as back-edge
1495 * 21 // vertex w is explored
1496 * 22 label e as forward- or cross-edge
1497 * 23 label t as explored
1502 * 0x11 - discovered and fall-through edge labelled
1503 * 0x12 - discovered and fall-through and branch edges labelled
1514 #define STATE_LIST_MARK ((struct verifier_state_list *) -1L)
1516 static int *insn_stack
; /* stack of insns to process */
1517 static int cur_stack
; /* current stack index */
1518 static int *insn_state
;
1520 /* t, w, e - match pseudo-code above:
1521 * t - index of current instruction
1522 * w - next instruction
1525 static int push_insn(int t
, int w
, int e
, struct verifier_env
*env
)
1527 if (e
== FALLTHROUGH
&& insn_state
[t
] >= (DISCOVERED
| FALLTHROUGH
))
1530 if (e
== BRANCH
&& insn_state
[t
] >= (DISCOVERED
| BRANCH
))
1533 if (w
< 0 || w
>= env
->prog
->len
) {
1534 verbose("jump out of range from insn %d to %d\n", t
, w
);
1539 /* mark branch target for state pruning */
1540 env
->explored_states
[w
] = STATE_LIST_MARK
;
1542 if (insn_state
[w
] == 0) {
1544 insn_state
[t
] = DISCOVERED
| e
;
1545 insn_state
[w
] = DISCOVERED
;
1546 if (cur_stack
>= env
->prog
->len
)
1548 insn_stack
[cur_stack
++] = w
;
1550 } else if ((insn_state
[w
] & 0xF0) == DISCOVERED
) {
1551 verbose("back-edge from insn %d to %d\n", t
, w
);
1553 } else if (insn_state
[w
] == EXPLORED
) {
1554 /* forward- or cross-edge */
1555 insn_state
[t
] = DISCOVERED
| e
;
1557 verbose("insn state internal bug\n");
1563 /* non-recursive depth-first-search to detect loops in BPF program
1564 * loop == back-edge in directed graph
1566 static int check_cfg(struct verifier_env
*env
)
1568 struct bpf_insn
*insns
= env
->prog
->insnsi
;
1569 int insn_cnt
= env
->prog
->len
;
1573 insn_state
= kcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
1577 insn_stack
= kcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
1583 insn_state
[0] = DISCOVERED
; /* mark 1st insn as discovered */
1584 insn_stack
[0] = 0; /* 0 is the first instruction */
1590 t
= insn_stack
[cur_stack
- 1];
1592 if (BPF_CLASS(insns
[t
].code
) == BPF_JMP
) {
1593 u8 opcode
= BPF_OP(insns
[t
].code
);
1595 if (opcode
== BPF_EXIT
) {
1597 } else if (opcode
== BPF_CALL
) {
1598 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
1603 if (t
+ 1 < insn_cnt
)
1604 env
->explored_states
[t
+ 1] = STATE_LIST_MARK
;
1605 } else if (opcode
== BPF_JA
) {
1606 if (BPF_SRC(insns
[t
].code
) != BPF_K
) {
1610 /* unconditional jump with single edge */
1611 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1,
1617 /* tell verifier to check for equivalent states
1618 * after every call and jump
1620 if (t
+ 1 < insn_cnt
)
1621 env
->explored_states
[t
+ 1] = STATE_LIST_MARK
;
1623 /* conditional jump with two edges */
1624 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
1630 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1, BRANCH
, env
);
1637 /* all other non-branch instructions with single
1640 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
1648 insn_state
[t
] = EXPLORED
;
1649 if (cur_stack
-- <= 0) {
1650 verbose("pop stack internal bug\n");
1657 for (i
= 0; i
< insn_cnt
; i
++) {
1658 if (insn_state
[i
] != EXPLORED
) {
1659 verbose("unreachable insn %d\n", i
);
1664 ret
= 0; /* cfg looks good */
1672 /* compare two verifier states
1674 * all states stored in state_list are known to be valid, since
1675 * verifier reached 'bpf_exit' instruction through them
1677 * this function is called when verifier exploring different branches of
1678 * execution popped from the state stack. If it sees an old state that has
1679 * more strict register state and more strict stack state then this execution
1680 * branch doesn't need to be explored further, since verifier already
1681 * concluded that more strict state leads to valid finish.
1683 * Therefore two states are equivalent if register state is more conservative
1684 * and explored stack state is more conservative than the current one.
1687 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
1688 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
1690 * In other words if current stack state (one being explored) has more
1691 * valid slots than old one that already passed validation, it means
1692 * the verifier can stop exploring and conclude that current state is valid too
1694 * Similarly with registers. If explored state has register type as invalid
1695 * whereas register type in current state is meaningful, it means that
1696 * the current state will reach 'bpf_exit' instruction safely
1698 static bool states_equal(struct verifier_state
*old
, struct verifier_state
*cur
)
1702 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
1703 if (memcmp(&old
->regs
[i
], &cur
->regs
[i
],
1704 sizeof(old
->regs
[0])) != 0) {
1705 if (old
->regs
[i
].type
== NOT_INIT
||
1706 (old
->regs
[i
].type
== UNKNOWN_VALUE
&&
1707 cur
->regs
[i
].type
!= NOT_INIT
))
1713 for (i
= 0; i
< MAX_BPF_STACK
; i
++) {
1714 if (old
->stack_slot_type
[i
] == STACK_INVALID
)
1716 if (old
->stack_slot_type
[i
] != cur
->stack_slot_type
[i
])
1717 /* Ex: old explored (safe) state has STACK_SPILL in
1718 * this stack slot, but current has has STACK_MISC ->
1719 * this verifier states are not equivalent,
1720 * return false to continue verification of this path
1723 if (i
% BPF_REG_SIZE
)
1725 if (memcmp(&old
->spilled_regs
[i
/ BPF_REG_SIZE
],
1726 &cur
->spilled_regs
[i
/ BPF_REG_SIZE
],
1727 sizeof(old
->spilled_regs
[0])))
1728 /* when explored and current stack slot types are
1729 * the same, check that stored pointers types
1730 * are the same as well.
1731 * Ex: explored safe path could have stored
1732 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8}
1733 * but current path has stored:
1734 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16}
1735 * such verifier states are not equivalent.
1736 * return false to continue verification of this path
1745 static int is_state_visited(struct verifier_env
*env
, int insn_idx
)
1747 struct verifier_state_list
*new_sl
;
1748 struct verifier_state_list
*sl
;
1750 sl
= env
->explored_states
[insn_idx
];
1752 /* this 'insn_idx' instruction wasn't marked, so we will not
1753 * be doing state search here
1757 while (sl
!= STATE_LIST_MARK
) {
1758 if (states_equal(&sl
->state
, &env
->cur_state
))
1759 /* reached equivalent register/stack state,
1766 /* there were no equivalent states, remember current one.
1767 * technically the current state is not proven to be safe yet,
1768 * but it will either reach bpf_exit (which means it's safe) or
1769 * it will be rejected. Since there are no loops, we won't be
1770 * seeing this 'insn_idx' instruction again on the way to bpf_exit
1772 new_sl
= kmalloc(sizeof(struct verifier_state_list
), GFP_USER
);
1776 /* add new state to the head of linked list */
1777 memcpy(&new_sl
->state
, &env
->cur_state
, sizeof(env
->cur_state
));
1778 new_sl
->next
= env
->explored_states
[insn_idx
];
1779 env
->explored_states
[insn_idx
] = new_sl
;
1783 static int do_check(struct verifier_env
*env
)
1785 struct verifier_state
*state
= &env
->cur_state
;
1786 struct bpf_insn
*insns
= env
->prog
->insnsi
;
1787 struct reg_state
*regs
= state
->regs
;
1788 int insn_cnt
= env
->prog
->len
;
1789 int insn_idx
, prev_insn_idx
= 0;
1790 int insn_processed
= 0;
1791 bool do_print_state
= false;
1793 init_reg_state(regs
);
1796 struct bpf_insn
*insn
;
1800 if (insn_idx
>= insn_cnt
) {
1801 verbose("invalid insn idx %d insn_cnt %d\n",
1802 insn_idx
, insn_cnt
);
1806 insn
= &insns
[insn_idx
];
1807 class = BPF_CLASS(insn
->code
);
1809 if (++insn_processed
> BPF_COMPLEXITY_LIMIT_INSNS
) {
1810 verbose("BPF program is too large. Proccessed %d insn\n",
1815 err
= is_state_visited(env
, insn_idx
);
1819 /* found equivalent state, can prune the search */
1822 verbose("\nfrom %d to %d: safe\n",
1823 prev_insn_idx
, insn_idx
);
1825 verbose("%d: safe\n", insn_idx
);
1827 goto process_bpf_exit
;
1830 if (log_level
&& do_print_state
) {
1831 verbose("\nfrom %d to %d:", prev_insn_idx
, insn_idx
);
1832 print_verifier_state(env
);
1833 do_print_state
= false;
1837 verbose("%d: ", insn_idx
);
1838 print_bpf_insn(insn
);
1841 if (class == BPF_ALU
|| class == BPF_ALU64
) {
1842 err
= check_alu_op(env
, insn
);
1846 } else if (class == BPF_LDX
) {
1847 enum bpf_reg_type src_reg_type
;
1849 /* check for reserved fields is already done */
1851 /* check src operand */
1852 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1856 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP_NO_MARK
);
1860 src_reg_type
= regs
[insn
->src_reg
].type
;
1862 /* check that memory (src_reg + off) is readable,
1863 * the state of dst_reg will be updated by this func
1865 err
= check_mem_access(env
, insn
->src_reg
, insn
->off
,
1866 BPF_SIZE(insn
->code
), BPF_READ
,
1871 if (BPF_SIZE(insn
->code
) != BPF_W
) {
1876 if (insn
->imm
== 0) {
1878 * dst_reg = *(u32 *)(src_reg + off)
1879 * use reserved 'imm' field to mark this insn
1881 insn
->imm
= src_reg_type
;
1883 } else if (src_reg_type
!= insn
->imm
&&
1884 (src_reg_type
== PTR_TO_CTX
||
1885 insn
->imm
== PTR_TO_CTX
)) {
1886 /* ABuser program is trying to use the same insn
1887 * dst_reg = *(u32*) (src_reg + off)
1888 * with different pointer types:
1889 * src_reg == ctx in one branch and
1890 * src_reg == stack|map in some other branch.
1893 verbose("same insn cannot be used with different pointers\n");
1897 } else if (class == BPF_STX
) {
1898 enum bpf_reg_type dst_reg_type
;
1900 if (BPF_MODE(insn
->code
) == BPF_XADD
) {
1901 err
= check_xadd(env
, insn
);
1908 /* check src1 operand */
1909 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1912 /* check src2 operand */
1913 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1917 dst_reg_type
= regs
[insn
->dst_reg
].type
;
1919 /* check that memory (dst_reg + off) is writeable */
1920 err
= check_mem_access(env
, insn
->dst_reg
, insn
->off
,
1921 BPF_SIZE(insn
->code
), BPF_WRITE
,
1926 if (insn
->imm
== 0) {
1927 insn
->imm
= dst_reg_type
;
1928 } else if (dst_reg_type
!= insn
->imm
&&
1929 (dst_reg_type
== PTR_TO_CTX
||
1930 insn
->imm
== PTR_TO_CTX
)) {
1931 verbose("same insn cannot be used with different pointers\n");
1935 } else if (class == BPF_ST
) {
1936 if (BPF_MODE(insn
->code
) != BPF_MEM
||
1937 insn
->src_reg
!= BPF_REG_0
) {
1938 verbose("BPF_ST uses reserved fields\n");
1941 /* check src operand */
1942 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1946 /* check that memory (dst_reg + off) is writeable */
1947 err
= check_mem_access(env
, insn
->dst_reg
, insn
->off
,
1948 BPF_SIZE(insn
->code
), BPF_WRITE
,
1953 } else if (class == BPF_JMP
) {
1954 u8 opcode
= BPF_OP(insn
->code
);
1956 if (opcode
== BPF_CALL
) {
1957 if (BPF_SRC(insn
->code
) != BPF_K
||
1959 insn
->src_reg
!= BPF_REG_0
||
1960 insn
->dst_reg
!= BPF_REG_0
) {
1961 verbose("BPF_CALL uses reserved fields\n");
1965 err
= check_call(env
, insn
->imm
);
1969 } else if (opcode
== BPF_JA
) {
1970 if (BPF_SRC(insn
->code
) != BPF_K
||
1972 insn
->src_reg
!= BPF_REG_0
||
1973 insn
->dst_reg
!= BPF_REG_0
) {
1974 verbose("BPF_JA uses reserved fields\n");
1978 insn_idx
+= insn
->off
+ 1;
1981 } else if (opcode
== BPF_EXIT
) {
1982 if (BPF_SRC(insn
->code
) != BPF_K
||
1984 insn
->src_reg
!= BPF_REG_0
||
1985 insn
->dst_reg
!= BPF_REG_0
) {
1986 verbose("BPF_EXIT uses reserved fields\n");
1990 /* eBPF calling convetion is such that R0 is used
1991 * to return the value from eBPF program.
1992 * Make sure that it's readable at this time
1993 * of bpf_exit, which means that program wrote
1994 * something into it earlier
1996 err
= check_reg_arg(regs
, BPF_REG_0
, SRC_OP
);
2000 if (is_pointer_value(env
, BPF_REG_0
)) {
2001 verbose("R0 leaks addr as return value\n");
2006 insn_idx
= pop_stack(env
, &prev_insn_idx
);
2010 do_print_state
= true;
2014 err
= check_cond_jmp_op(env
, insn
, &insn_idx
);
2018 } else if (class == BPF_LD
) {
2019 u8 mode
= BPF_MODE(insn
->code
);
2021 if (mode
== BPF_ABS
|| mode
== BPF_IND
) {
2022 err
= check_ld_abs(env
, insn
);
2026 } else if (mode
== BPF_IMM
) {
2027 err
= check_ld_imm(env
, insn
);
2033 verbose("invalid BPF_LD mode\n");
2037 verbose("unknown insn class %d\n", class);
2047 /* look for pseudo eBPF instructions that access map FDs and
2048 * replace them with actual map pointers
2050 static int replace_map_fd_with_map_ptr(struct verifier_env
*env
)
2052 struct bpf_insn
*insn
= env
->prog
->insnsi
;
2053 int insn_cnt
= env
->prog
->len
;
2056 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
2057 if (BPF_CLASS(insn
->code
) == BPF_LDX
&&
2058 (BPF_MODE(insn
->code
) != BPF_MEM
|| insn
->imm
!= 0)) {
2059 verbose("BPF_LDX uses reserved fields\n");
2063 if (BPF_CLASS(insn
->code
) == BPF_STX
&&
2064 ((BPF_MODE(insn
->code
) != BPF_MEM
&&
2065 BPF_MODE(insn
->code
) != BPF_XADD
) || insn
->imm
!= 0)) {
2066 verbose("BPF_STX uses reserved fields\n");
2070 if (insn
[0].code
== (BPF_LD
| BPF_IMM
| BPF_DW
)) {
2071 struct bpf_map
*map
;
2074 if (i
== insn_cnt
- 1 || insn
[1].code
!= 0 ||
2075 insn
[1].dst_reg
!= 0 || insn
[1].src_reg
!= 0 ||
2077 verbose("invalid bpf_ld_imm64 insn\n");
2081 if (insn
->src_reg
== 0)
2082 /* valid generic load 64-bit imm */
2085 if (insn
->src_reg
!= BPF_PSEUDO_MAP_FD
) {
2086 verbose("unrecognized bpf_ld_imm64 insn\n");
2090 f
= fdget(insn
->imm
);
2091 map
= __bpf_map_get(f
);
2093 verbose("fd %d is not pointing to valid bpf_map\n",
2096 return PTR_ERR(map
);
2099 /* store map pointer inside BPF_LD_IMM64 instruction */
2100 insn
[0].imm
= (u32
) (unsigned long) map
;
2101 insn
[1].imm
= ((u64
) (unsigned long) map
) >> 32;
2103 /* check whether we recorded this map already */
2104 for (j
= 0; j
< env
->used_map_cnt
; j
++)
2105 if (env
->used_maps
[j
] == map
) {
2110 if (env
->used_map_cnt
>= MAX_USED_MAPS
) {
2115 /* remember this map */
2116 env
->used_maps
[env
->used_map_cnt
++] = map
;
2118 /* hold the map. If the program is rejected by verifier,
2119 * the map will be released by release_maps() or it
2120 * will be used by the valid program until it's unloaded
2121 * and all maps are released in free_bpf_prog_info()
2123 bpf_map_inc(map
, false);
2131 /* now all pseudo BPF_LD_IMM64 instructions load valid
2132 * 'struct bpf_map *' into a register instead of user map_fd.
2133 * These pointers will be used later by verifier to validate map access.
2138 /* drop refcnt of maps used by the rejected program */
2139 static void release_maps(struct verifier_env
*env
)
2143 for (i
= 0; i
< env
->used_map_cnt
; i
++)
2144 bpf_map_put(env
->used_maps
[i
]);
2147 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
2148 static void convert_pseudo_ld_imm64(struct verifier_env
*env
)
2150 struct bpf_insn
*insn
= env
->prog
->insnsi
;
2151 int insn_cnt
= env
->prog
->len
;
2154 for (i
= 0; i
< insn_cnt
; i
++, insn
++)
2155 if (insn
->code
== (BPF_LD
| BPF_IMM
| BPF_DW
))
2159 static void adjust_branches(struct bpf_prog
*prog
, int pos
, int delta
)
2161 struct bpf_insn
*insn
= prog
->insnsi
;
2162 int insn_cnt
= prog
->len
;
2165 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
2166 if (BPF_CLASS(insn
->code
) != BPF_JMP
||
2167 BPF_OP(insn
->code
) == BPF_CALL
||
2168 BPF_OP(insn
->code
) == BPF_EXIT
)
2171 /* adjust offset of jmps if necessary */
2172 if (i
< pos
&& i
+ insn
->off
+ 1 > pos
)
2174 else if (i
> pos
+ delta
&& i
+ insn
->off
+ 1 <= pos
+ delta
)
2179 /* convert load instructions that access fields of 'struct __sk_buff'
2180 * into sequence of instructions that access fields of 'struct sk_buff'
2182 static int convert_ctx_accesses(struct verifier_env
*env
)
2184 struct bpf_insn
*insn
= env
->prog
->insnsi
;
2185 int insn_cnt
= env
->prog
->len
;
2186 struct bpf_insn insn_buf
[16];
2187 struct bpf_prog
*new_prog
;
2190 enum bpf_access_type type
;
2192 if (!env
->prog
->aux
->ops
->convert_ctx_access
)
2195 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
2196 if (insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_W
))
2198 else if (insn
->code
== (BPF_STX
| BPF_MEM
| BPF_W
))
2203 if (insn
->imm
!= PTR_TO_CTX
) {
2204 /* clear internal mark */
2209 cnt
= env
->prog
->aux
->ops
->
2210 convert_ctx_access(type
, insn
->dst_reg
, insn
->src_reg
,
2211 insn
->off
, insn_buf
, env
->prog
);
2212 if (cnt
== 0 || cnt
>= ARRAY_SIZE(insn_buf
)) {
2213 verbose("bpf verifier is misconfigured\n");
2218 memcpy(insn
, insn_buf
, sizeof(*insn
));
2222 /* several new insns need to be inserted. Make room for them */
2223 insn_cnt
+= cnt
- 1;
2224 new_prog
= bpf_prog_realloc(env
->prog
,
2225 bpf_prog_size(insn_cnt
),
2230 new_prog
->len
= insn_cnt
;
2232 memmove(new_prog
->insnsi
+ i
+ cnt
, new_prog
->insns
+ i
+ 1,
2233 sizeof(*insn
) * (insn_cnt
- i
- cnt
));
2235 /* copy substitute insns in place of load instruction */
2236 memcpy(new_prog
->insnsi
+ i
, insn_buf
, sizeof(*insn
) * cnt
);
2238 /* adjust branches in the whole program */
2239 adjust_branches(new_prog
, i
, cnt
- 1);
2241 /* keep walking new program and skip insns we just inserted */
2242 env
->prog
= new_prog
;
2243 insn
= new_prog
->insnsi
+ i
+ cnt
- 1;
2250 static void free_states(struct verifier_env
*env
)
2252 struct verifier_state_list
*sl
, *sln
;
2255 if (!env
->explored_states
)
2258 for (i
= 0; i
< env
->prog
->len
; i
++) {
2259 sl
= env
->explored_states
[i
];
2262 while (sl
!= STATE_LIST_MARK
) {
2269 kfree(env
->explored_states
);
2272 int bpf_check(struct bpf_prog
**prog
, union bpf_attr
*attr
)
2274 char __user
*log_ubuf
= NULL
;
2275 struct verifier_env
*env
;
2278 if ((*prog
)->len
<= 0 || (*prog
)->len
> BPF_MAXINSNS
)
2281 /* 'struct verifier_env' can be global, but since it's not small,
2282 * allocate/free it every time bpf_check() is called
2284 env
= kzalloc(sizeof(struct verifier_env
), GFP_KERNEL
);
2290 /* grab the mutex to protect few globals used by verifier */
2291 mutex_lock(&bpf_verifier_lock
);
2293 if (attr
->log_level
|| attr
->log_buf
|| attr
->log_size
) {
2294 /* user requested verbose verifier output
2295 * and supplied buffer to store the verification trace
2297 log_level
= attr
->log_level
;
2298 log_ubuf
= (char __user
*) (unsigned long) attr
->log_buf
;
2299 log_size
= attr
->log_size
;
2303 /* log_* values have to be sane */
2304 if (log_size
< 128 || log_size
> UINT_MAX
>> 8 ||
2305 log_level
== 0 || log_ubuf
== NULL
)
2309 log_buf
= vmalloc(log_size
);
2316 ret
= replace_map_fd_with_map_ptr(env
);
2318 goto skip_full_check
;
2320 env
->explored_states
= kcalloc(env
->prog
->len
,
2321 sizeof(struct verifier_state_list
*),
2324 if (!env
->explored_states
)
2325 goto skip_full_check
;
2327 ret
= check_cfg(env
);
2329 goto skip_full_check
;
2331 env
->allow_ptr_leaks
= capable(CAP_SYS_ADMIN
);
2333 ret
= do_check(env
);
2336 while (pop_stack(env
, NULL
) >= 0);
2340 /* program is valid, convert *(u32*)(ctx + off) accesses */
2341 ret
= convert_ctx_accesses(env
);
2343 if (log_level
&& log_len
>= log_size
- 1) {
2344 BUG_ON(log_len
>= log_size
);
2345 /* verifier log exceeded user supplied buffer */
2347 /* fall through to return what was recorded */
2350 /* copy verifier log back to user space including trailing zero */
2351 if (log_level
&& copy_to_user(log_ubuf
, log_buf
, log_len
+ 1) != 0) {
2356 if (ret
== 0 && env
->used_map_cnt
) {
2357 /* if program passed verifier, update used_maps in bpf_prog_info */
2358 env
->prog
->aux
->used_maps
= kmalloc_array(env
->used_map_cnt
,
2359 sizeof(env
->used_maps
[0]),
2362 if (!env
->prog
->aux
->used_maps
) {
2367 memcpy(env
->prog
->aux
->used_maps
, env
->used_maps
,
2368 sizeof(env
->used_maps
[0]) * env
->used_map_cnt
);
2369 env
->prog
->aux
->used_map_cnt
= env
->used_map_cnt
;
2371 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
2372 * bpf_ld_imm64 instructions
2374 convert_pseudo_ld_imm64(env
);
2381 if (!env
->prog
->aux
->used_maps
)
2382 /* if we didn't copy map pointers into bpf_prog_info, release
2383 * them now. Otherwise free_bpf_prog_info() will release them.
2388 mutex_unlock(&bpf_verifier_lock
);