1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/filter.h>
19 #include <net/netlink.h>
20 #include <linux/file.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stringify.h>
24 /* bpf_check() is a static code analyzer that walks eBPF program
25 * instruction by instruction and updates register/stack state.
26 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
28 * The first pass is depth-first-search to check that the program is a DAG.
29 * It rejects the following programs:
30 * - larger than BPF_MAXINSNS insns
31 * - if loop is present (detected via back-edge)
32 * - unreachable insns exist (shouldn't be a forest. program = one function)
33 * - out of bounds or malformed jumps
34 * The second pass is all possible path descent from the 1st insn.
35 * Since it's analyzing all pathes through the program, the length of the
36 * analysis is limited to 64k insn, which may be hit even if total number of
37 * insn is less then 4K, but there are too many branches that change stack/regs.
38 * Number of 'branches to be analyzed' is limited to 1k
40 * On entry to each instruction, each register has a type, and the instruction
41 * changes the types of the registers depending on instruction semantics.
42 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
45 * All registers are 64-bit.
46 * R0 - return register
47 * R1-R5 argument passing registers
48 * R6-R9 callee saved registers
49 * R10 - frame pointer read-only
51 * At the start of BPF program the register R1 contains a pointer to bpf_context
52 * and has type PTR_TO_CTX.
54 * Verifier tracks arithmetic operations on pointers in case:
55 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
56 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
57 * 1st insn copies R10 (which has FRAME_PTR) type into R1
58 * and 2nd arithmetic instruction is pattern matched to recognize
59 * that it wants to construct a pointer to some element within stack.
60 * So after 2nd insn, the register R1 has type PTR_TO_STACK
61 * (and -20 constant is saved for further stack bounds checking).
62 * Meaning that this reg is a pointer to stack plus known immediate constant.
64 * Most of the time the registers have SCALAR_VALUE type, which
65 * means the register has some value, but it's not a valid pointer.
66 * (like pointer plus pointer becomes SCALAR_VALUE type)
68 * When verifier sees load or store instructions the type of base register
69 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
70 * types recognized by check_mem_access() function.
72 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
73 * and the range of [ptr, ptr + map's value_size) is accessible.
75 * registers used to pass values to function calls are checked against
76 * function argument constraints.
78 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
79 * It means that the register type passed to this function must be
80 * PTR_TO_STACK and it will be used inside the function as
81 * 'pointer to map element key'
83 * For example the argument constraints for bpf_map_lookup_elem():
84 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
85 * .arg1_type = ARG_CONST_MAP_PTR,
86 * .arg2_type = ARG_PTR_TO_MAP_KEY,
88 * ret_type says that this function returns 'pointer to map elem value or null'
89 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
90 * 2nd argument should be a pointer to stack, which will be used inside
91 * the helper function as a pointer to map element key.
93 * On the kernel side the helper function looks like:
94 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
96 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
97 * void *key = (void *) (unsigned long) r2;
100 * here kernel can access 'key' and 'map' pointers safely, knowing that
101 * [key, key + map->key_size) bytes are valid and were initialized on
102 * the stack of eBPF program.
105 * Corresponding eBPF program may look like:
106 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
107 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
108 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
109 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
110 * here verifier looks at prototype of map_lookup_elem() and sees:
111 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
112 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
114 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
115 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
116 * and were initialized prior to this call.
117 * If it's ok, then verifier allows this BPF_CALL insn and looks at
118 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
119 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
120 * returns ether pointer to map value or NULL.
122 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
123 * insn, the register holding that pointer in the true branch changes state to
124 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
125 * branch. See check_cond_jmp_op().
127 * After the call R0 is set to return type of the function and registers R1-R5
128 * are set to NOT_INIT to indicate that they are no longer readable.
131 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
132 struct bpf_verifier_stack_elem
{
133 /* verifer state is 'st'
134 * before processing instruction 'insn_idx'
135 * and after processing instruction 'prev_insn_idx'
137 struct bpf_verifier_state st
;
140 struct bpf_verifier_stack_elem
*next
;
143 #define BPF_COMPLEXITY_LIMIT_INSNS 131072
144 #define BPF_COMPLEXITY_LIMIT_STACK 1024
146 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
148 struct bpf_call_arg_meta
{
149 struct bpf_map
*map_ptr
;
156 /* verbose verifier prints what it's seeing
157 * bpf_check() is called under lock, so no race to access these global vars
159 static u32 log_level
, log_size
, log_len
;
160 static char *log_buf
;
162 static DEFINE_MUTEX(bpf_verifier_lock
);
164 /* log_level controls verbosity level of eBPF verifier.
165 * verbose() is used to dump the verification trace to the log, so the user
166 * can figure out what's wrong with the program
168 static __printf(1, 2) void verbose(const char *fmt
, ...)
172 if (log_level
== 0 || log_len
>= log_size
- 1)
176 log_len
+= vscnprintf(log_buf
+ log_len
, log_size
- log_len
, fmt
, args
);
180 /* string representation of 'enum bpf_reg_type' */
181 static const char * const reg_type_str
[] = {
183 [SCALAR_VALUE
] = "inv",
184 [PTR_TO_CTX
] = "ctx",
185 [CONST_PTR_TO_MAP
] = "map_ptr",
186 [PTR_TO_MAP_VALUE
] = "map_value",
187 [PTR_TO_MAP_VALUE_OR_NULL
] = "map_value_or_null",
188 [PTR_TO_STACK
] = "fp",
189 [PTR_TO_PACKET
] = "pkt",
190 [PTR_TO_PACKET_END
] = "pkt_end",
193 #define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x)
194 static const char * const func_id_str
[] = {
195 __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN
)
197 #undef __BPF_FUNC_STR_FN
199 static const char *func_id_name(int id
)
201 BUILD_BUG_ON(ARRAY_SIZE(func_id_str
) != __BPF_FUNC_MAX_ID
);
203 if (id
>= 0 && id
< __BPF_FUNC_MAX_ID
&& func_id_str
[id
])
204 return func_id_str
[id
];
209 static void print_verifier_state(struct bpf_verifier_state
*state
)
211 struct bpf_reg_state
*reg
;
215 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
216 reg
= &state
->regs
[i
];
220 verbose(" R%d=%s", i
, reg_type_str
[t
]);
221 if ((t
== SCALAR_VALUE
|| t
== PTR_TO_STACK
) &&
222 tnum_is_const(reg
->var_off
)) {
223 /* reg->off should be 0 for SCALAR_VALUE */
224 verbose("%lld", reg
->var_off
.value
+ reg
->off
);
226 verbose("(id=%d", reg
->id
);
227 if (t
!= SCALAR_VALUE
)
228 verbose(",off=%d", reg
->off
);
229 if (t
== PTR_TO_PACKET
)
230 verbose(",r=%d", reg
->range
);
231 else if (t
== CONST_PTR_TO_MAP
||
232 t
== PTR_TO_MAP_VALUE
||
233 t
== PTR_TO_MAP_VALUE_OR_NULL
)
234 verbose(",ks=%d,vs=%d",
235 reg
->map_ptr
->key_size
,
236 reg
->map_ptr
->value_size
);
237 if (tnum_is_const(reg
->var_off
)) {
238 /* Typically an immediate SCALAR_VALUE, but
239 * could be a pointer whose offset is too big
242 verbose(",imm=%llx", reg
->var_off
.value
);
244 if (reg
->smin_value
!= reg
->umin_value
&&
245 reg
->smin_value
!= S64_MIN
)
246 verbose(",smin_value=%lld",
247 (long long)reg
->smin_value
);
248 if (reg
->smax_value
!= reg
->umax_value
&&
249 reg
->smax_value
!= S64_MAX
)
250 verbose(",smax_value=%lld",
251 (long long)reg
->smax_value
);
252 if (reg
->umin_value
!= 0)
253 verbose(",umin_value=%llu",
254 (unsigned long long)reg
->umin_value
);
255 if (reg
->umax_value
!= U64_MAX
)
256 verbose(",umax_value=%llu",
257 (unsigned long long)reg
->umax_value
);
258 if (!tnum_is_unknown(reg
->var_off
)) {
261 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
262 verbose(",var_off=%s", tn_buf
);
268 for (i
= 0; i
< MAX_BPF_STACK
; i
+= BPF_REG_SIZE
) {
269 if (state
->stack_slot_type
[i
] == STACK_SPILL
)
270 verbose(" fp%d=%s", -MAX_BPF_STACK
+ i
,
271 reg_type_str
[state
->spilled_regs
[i
/ BPF_REG_SIZE
].type
]);
276 static const char *const bpf_class_string
[] = {
284 [BPF_ALU64
] = "alu64",
287 static const char *const bpf_alu_string
[16] = {
288 [BPF_ADD
>> 4] = "+=",
289 [BPF_SUB
>> 4] = "-=",
290 [BPF_MUL
>> 4] = "*=",
291 [BPF_DIV
>> 4] = "/=",
292 [BPF_OR
>> 4] = "|=",
293 [BPF_AND
>> 4] = "&=",
294 [BPF_LSH
>> 4] = "<<=",
295 [BPF_RSH
>> 4] = ">>=",
296 [BPF_NEG
>> 4] = "neg",
297 [BPF_MOD
>> 4] = "%=",
298 [BPF_XOR
>> 4] = "^=",
299 [BPF_MOV
>> 4] = "=",
300 [BPF_ARSH
>> 4] = "s>>=",
301 [BPF_END
>> 4] = "endian",
304 static const char *const bpf_ldst_string
[] = {
305 [BPF_W
>> 3] = "u32",
306 [BPF_H
>> 3] = "u16",
308 [BPF_DW
>> 3] = "u64",
311 static const char *const bpf_jmp_string
[16] = {
312 [BPF_JA
>> 4] = "jmp",
313 [BPF_JEQ
>> 4] = "==",
314 [BPF_JGT
>> 4] = ">",
315 [BPF_JLT
>> 4] = "<",
316 [BPF_JGE
>> 4] = ">=",
317 [BPF_JLE
>> 4] = "<=",
318 [BPF_JSET
>> 4] = "&",
319 [BPF_JNE
>> 4] = "!=",
320 [BPF_JSGT
>> 4] = "s>",
321 [BPF_JSLT
>> 4] = "s<",
322 [BPF_JSGE
>> 4] = "s>=",
323 [BPF_JSLE
>> 4] = "s<=",
324 [BPF_CALL
>> 4] = "call",
325 [BPF_EXIT
>> 4] = "exit",
328 static void print_bpf_insn(const struct bpf_verifier_env
*env
,
329 const struct bpf_insn
*insn
)
331 u8
class = BPF_CLASS(insn
->code
);
333 if (class == BPF_ALU
|| class == BPF_ALU64
) {
334 if (BPF_SRC(insn
->code
) == BPF_X
)
335 verbose("(%02x) %sr%d %s %sr%d\n",
336 insn
->code
, class == BPF_ALU
? "(u32) " : "",
338 bpf_alu_string
[BPF_OP(insn
->code
) >> 4],
339 class == BPF_ALU
? "(u32) " : "",
342 verbose("(%02x) %sr%d %s %s%d\n",
343 insn
->code
, class == BPF_ALU
? "(u32) " : "",
345 bpf_alu_string
[BPF_OP(insn
->code
) >> 4],
346 class == BPF_ALU
? "(u32) " : "",
348 } else if (class == BPF_STX
) {
349 if (BPF_MODE(insn
->code
) == BPF_MEM
)
350 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
352 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
354 insn
->off
, insn
->src_reg
);
355 else if (BPF_MODE(insn
->code
) == BPF_XADD
)
356 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
358 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
359 insn
->dst_reg
, insn
->off
,
362 verbose("BUG_%02x\n", insn
->code
);
363 } else if (class == BPF_ST
) {
364 if (BPF_MODE(insn
->code
) != BPF_MEM
) {
365 verbose("BUG_st_%02x\n", insn
->code
);
368 verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
370 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
372 insn
->off
, insn
->imm
);
373 } else if (class == BPF_LDX
) {
374 if (BPF_MODE(insn
->code
) != BPF_MEM
) {
375 verbose("BUG_ldx_%02x\n", insn
->code
);
378 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
379 insn
->code
, insn
->dst_reg
,
380 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
381 insn
->src_reg
, insn
->off
);
382 } else if (class == BPF_LD
) {
383 if (BPF_MODE(insn
->code
) == BPF_ABS
) {
384 verbose("(%02x) r0 = *(%s *)skb[%d]\n",
386 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
388 } else if (BPF_MODE(insn
->code
) == BPF_IND
) {
389 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
391 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
392 insn
->src_reg
, insn
->imm
);
393 } else if (BPF_MODE(insn
->code
) == BPF_IMM
&&
394 BPF_SIZE(insn
->code
) == BPF_DW
) {
395 /* At this point, we already made sure that the second
396 * part of the ldimm64 insn is accessible.
398 u64 imm
= ((u64
)(insn
+ 1)->imm
<< 32) | (u32
)insn
->imm
;
399 bool map_ptr
= insn
->src_reg
== BPF_PSEUDO_MAP_FD
;
401 if (map_ptr
&& !env
->allow_ptr_leaks
)
404 verbose("(%02x) r%d = 0x%llx\n", insn
->code
,
405 insn
->dst_reg
, (unsigned long long)imm
);
407 verbose("BUG_ld_%02x\n", insn
->code
);
410 } else if (class == BPF_JMP
) {
411 u8 opcode
= BPF_OP(insn
->code
);
413 if (opcode
== BPF_CALL
) {
414 verbose("(%02x) call %s#%d\n", insn
->code
,
415 func_id_name(insn
->imm
), insn
->imm
);
416 } else if (insn
->code
== (BPF_JMP
| BPF_JA
)) {
417 verbose("(%02x) goto pc%+d\n",
418 insn
->code
, insn
->off
);
419 } else if (insn
->code
== (BPF_JMP
| BPF_EXIT
)) {
420 verbose("(%02x) exit\n", insn
->code
);
421 } else if (BPF_SRC(insn
->code
) == BPF_X
) {
422 verbose("(%02x) if r%d %s r%d goto pc%+d\n",
423 insn
->code
, insn
->dst_reg
,
424 bpf_jmp_string
[BPF_OP(insn
->code
) >> 4],
425 insn
->src_reg
, insn
->off
);
427 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
428 insn
->code
, insn
->dst_reg
,
429 bpf_jmp_string
[BPF_OP(insn
->code
) >> 4],
430 insn
->imm
, insn
->off
);
433 verbose("(%02x) %s\n", insn
->code
, bpf_class_string
[class]);
437 static int pop_stack(struct bpf_verifier_env
*env
, int *prev_insn_idx
)
439 struct bpf_verifier_stack_elem
*elem
;
442 if (env
->head
== NULL
)
445 memcpy(&env
->cur_state
, &env
->head
->st
, sizeof(env
->cur_state
));
446 insn_idx
= env
->head
->insn_idx
;
448 *prev_insn_idx
= env
->head
->prev_insn_idx
;
449 elem
= env
->head
->next
;
456 static struct bpf_verifier_state
*push_stack(struct bpf_verifier_env
*env
,
457 int insn_idx
, int prev_insn_idx
)
459 struct bpf_verifier_stack_elem
*elem
;
461 elem
= kmalloc(sizeof(struct bpf_verifier_stack_elem
), GFP_KERNEL
);
465 memcpy(&elem
->st
, &env
->cur_state
, sizeof(env
->cur_state
));
466 elem
->insn_idx
= insn_idx
;
467 elem
->prev_insn_idx
= prev_insn_idx
;
468 elem
->next
= env
->head
;
471 if (env
->stack_size
> BPF_COMPLEXITY_LIMIT_STACK
) {
472 verbose("BPF program is too complex\n");
477 /* pop all elements and return */
478 while (pop_stack(env
, NULL
) >= 0);
482 #define CALLER_SAVED_REGS 6
483 static const int caller_saved
[CALLER_SAVED_REGS
] = {
484 BPF_REG_0
, BPF_REG_1
, BPF_REG_2
, BPF_REG_3
, BPF_REG_4
, BPF_REG_5
487 static void __mark_reg_not_init(struct bpf_reg_state
*reg
);
489 /* Mark the unknown part of a register (variable offset or scalar value) as
490 * known to have the value @imm.
492 static void __mark_reg_known(struct bpf_reg_state
*reg
, u64 imm
)
495 reg
->var_off
= tnum_const(imm
);
496 reg
->smin_value
= (s64
)imm
;
497 reg
->smax_value
= (s64
)imm
;
498 reg
->umin_value
= imm
;
499 reg
->umax_value
= imm
;
502 /* Mark the 'variable offset' part of a register as zero. This should be
503 * used only on registers holding a pointer type.
505 static void __mark_reg_known_zero(struct bpf_reg_state
*reg
)
507 __mark_reg_known(reg
, 0);
510 static void mark_reg_known_zero(struct bpf_reg_state
*regs
, u32 regno
)
512 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
513 verbose("mark_reg_known_zero(regs, %u)\n", regno
);
514 /* Something bad happened, let's kill all regs */
515 for (regno
= 0; regno
< MAX_BPF_REG
; regno
++)
516 __mark_reg_not_init(regs
+ regno
);
519 __mark_reg_known_zero(regs
+ regno
);
522 /* Attempts to improve min/max values based on var_off information */
523 static void __update_reg_bounds(struct bpf_reg_state
*reg
)
525 /* min signed is max(sign bit) | min(other bits) */
526 reg
->smin_value
= max_t(s64
, reg
->smin_value
,
527 reg
->var_off
.value
| (reg
->var_off
.mask
& S64_MIN
));
528 /* max signed is min(sign bit) | max(other bits) */
529 reg
->smax_value
= min_t(s64
, reg
->smax_value
,
530 reg
->var_off
.value
| (reg
->var_off
.mask
& S64_MAX
));
531 reg
->umin_value
= max(reg
->umin_value
, reg
->var_off
.value
);
532 reg
->umax_value
= min(reg
->umax_value
,
533 reg
->var_off
.value
| reg
->var_off
.mask
);
536 /* Uses signed min/max values to inform unsigned, and vice-versa */
537 static void __reg_deduce_bounds(struct bpf_reg_state
*reg
)
539 /* Learn sign from signed bounds.
540 * If we cannot cross the sign boundary, then signed and unsigned bounds
541 * are the same, so combine. This works even in the negative case, e.g.
542 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
544 if (reg
->smin_value
>= 0 || reg
->smax_value
< 0) {
545 reg
->smin_value
= reg
->umin_value
= max_t(u64
, reg
->smin_value
,
547 reg
->smax_value
= reg
->umax_value
= min_t(u64
, reg
->smax_value
,
551 /* Learn sign from unsigned bounds. Signed bounds cross the sign
552 * boundary, so we must be careful.
554 if ((s64
)reg
->umax_value
>= 0) {
555 /* Positive. We can't learn anything from the smin, but smax
556 * is positive, hence safe.
558 reg
->smin_value
= reg
->umin_value
;
559 reg
->smax_value
= reg
->umax_value
= min_t(u64
, reg
->smax_value
,
561 } else if ((s64
)reg
->umin_value
< 0) {
562 /* Negative. We can't learn anything from the smax, but smin
563 * is negative, hence safe.
565 reg
->smin_value
= reg
->umin_value
= max_t(u64
, reg
->smin_value
,
567 reg
->smax_value
= reg
->umax_value
;
571 /* Attempts to improve var_off based on unsigned min/max information */
572 static void __reg_bound_offset(struct bpf_reg_state
*reg
)
574 reg
->var_off
= tnum_intersect(reg
->var_off
,
575 tnum_range(reg
->umin_value
,
579 /* Reset the min/max bounds of a register */
580 static void __mark_reg_unbounded(struct bpf_reg_state
*reg
)
582 reg
->smin_value
= S64_MIN
;
583 reg
->smax_value
= S64_MAX
;
585 reg
->umax_value
= U64_MAX
;
588 /* Mark a register as having a completely unknown (scalar) value. */
589 static void __mark_reg_unknown(struct bpf_reg_state
*reg
)
591 reg
->type
= SCALAR_VALUE
;
594 reg
->var_off
= tnum_unknown
;
595 __mark_reg_unbounded(reg
);
598 static void mark_reg_unknown(struct bpf_reg_state
*regs
, u32 regno
)
600 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
601 verbose("mark_reg_unknown(regs, %u)\n", regno
);
602 /* Something bad happened, let's kill all regs */
603 for (regno
= 0; regno
< MAX_BPF_REG
; regno
++)
604 __mark_reg_not_init(regs
+ regno
);
607 __mark_reg_unknown(regs
+ regno
);
610 static void __mark_reg_not_init(struct bpf_reg_state
*reg
)
612 __mark_reg_unknown(reg
);
613 reg
->type
= NOT_INIT
;
616 static void mark_reg_not_init(struct bpf_reg_state
*regs
, u32 regno
)
618 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
619 verbose("mark_reg_not_init(regs, %u)\n", regno
);
620 /* Something bad happened, let's kill all regs */
621 for (regno
= 0; regno
< MAX_BPF_REG
; regno
++)
622 __mark_reg_not_init(regs
+ regno
);
625 __mark_reg_not_init(regs
+ regno
);
628 static void init_reg_state(struct bpf_reg_state
*regs
)
632 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
633 mark_reg_not_init(regs
, i
);
634 regs
[i
].live
= REG_LIVE_NONE
;
638 regs
[BPF_REG_FP
].type
= PTR_TO_STACK
;
639 mark_reg_known_zero(regs
, BPF_REG_FP
);
641 /* 1st arg to a function */
642 regs
[BPF_REG_1
].type
= PTR_TO_CTX
;
643 mark_reg_known_zero(regs
, BPF_REG_1
);
647 SRC_OP
, /* register is used as source operand */
648 DST_OP
, /* register is used as destination operand */
649 DST_OP_NO_MARK
/* same as above, check only, don't mark */
652 static void mark_reg_read(const struct bpf_verifier_state
*state
, u32 regno
)
654 struct bpf_verifier_state
*parent
= state
->parent
;
656 if (regno
== BPF_REG_FP
)
657 /* We don't need to worry about FP liveness because it's read-only */
661 /* if read wasn't screened by an earlier write ... */
662 if (state
->regs
[regno
].live
& REG_LIVE_WRITTEN
)
664 /* ... then we depend on parent's value */
665 parent
->regs
[regno
].live
|= REG_LIVE_READ
;
667 parent
= state
->parent
;
671 static int check_reg_arg(struct bpf_verifier_env
*env
, u32 regno
,
674 struct bpf_reg_state
*regs
= env
->cur_state
.regs
;
676 if (regno
>= MAX_BPF_REG
) {
677 verbose("R%d is invalid\n", regno
);
682 /* check whether register used as source operand can be read */
683 if (regs
[regno
].type
== NOT_INIT
) {
684 verbose("R%d !read_ok\n", regno
);
687 mark_reg_read(&env
->cur_state
, regno
);
689 /* check whether register used as dest operand can be written to */
690 if (regno
== BPF_REG_FP
) {
691 verbose("frame pointer is read only\n");
694 regs
[regno
].live
|= REG_LIVE_WRITTEN
;
696 mark_reg_unknown(regs
, regno
);
701 static bool is_spillable_regtype(enum bpf_reg_type type
)
704 case PTR_TO_MAP_VALUE
:
705 case PTR_TO_MAP_VALUE_OR_NULL
:
709 case PTR_TO_PACKET_END
:
710 case CONST_PTR_TO_MAP
:
717 /* check_stack_read/write functions track spill/fill of registers,
718 * stack boundary and alignment are checked in check_mem_access()
720 static int check_stack_write(struct bpf_verifier_state
*state
, int off
,
721 int size
, int value_regno
)
723 int i
, spi
= (MAX_BPF_STACK
+ off
) / BPF_REG_SIZE
;
724 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
725 * so it's aligned access and [off, off + size) are within stack limits
728 if (value_regno
>= 0 &&
729 is_spillable_regtype(state
->regs
[value_regno
].type
)) {
731 /* register containing pointer is being spilled into stack */
732 if (size
!= BPF_REG_SIZE
) {
733 verbose("invalid size of register spill\n");
737 /* save register state */
738 state
->spilled_regs
[spi
] = state
->regs
[value_regno
];
739 state
->spilled_regs
[spi
].live
|= REG_LIVE_WRITTEN
;
741 for (i
= 0; i
< BPF_REG_SIZE
; i
++)
742 state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] = STACK_SPILL
;
744 /* regular write of data into stack */
745 state
->spilled_regs
[spi
] = (struct bpf_reg_state
) {};
747 for (i
= 0; i
< size
; i
++)
748 state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] = STACK_MISC
;
753 static void mark_stack_slot_read(const struct bpf_verifier_state
*state
, int slot
)
755 struct bpf_verifier_state
*parent
= state
->parent
;
758 /* if read wasn't screened by an earlier write ... */
759 if (state
->spilled_regs
[slot
].live
& REG_LIVE_WRITTEN
)
761 /* ... then we depend on parent's value */
762 parent
->spilled_regs
[slot
].live
|= REG_LIVE_READ
;
764 parent
= state
->parent
;
768 static int check_stack_read(struct bpf_verifier_state
*state
, int off
, int size
,
774 slot_type
= &state
->stack_slot_type
[MAX_BPF_STACK
+ off
];
776 if (slot_type
[0] == STACK_SPILL
) {
777 if (size
!= BPF_REG_SIZE
) {
778 verbose("invalid size of register spill\n");
781 for (i
= 1; i
< BPF_REG_SIZE
; i
++) {
782 if (slot_type
[i
] != STACK_SPILL
) {
783 verbose("corrupted spill memory\n");
788 spi
= (MAX_BPF_STACK
+ off
) / BPF_REG_SIZE
;
790 if (value_regno
>= 0) {
791 /* restore register state from stack */
792 state
->regs
[value_regno
] = state
->spilled_regs
[spi
];
793 mark_stack_slot_read(state
, spi
);
797 for (i
= 0; i
< size
; i
++) {
798 if (slot_type
[i
] != STACK_MISC
) {
799 verbose("invalid read from stack off %d+%d size %d\n",
804 if (value_regno
>= 0)
805 /* have read misc data from the stack */
806 mark_reg_unknown(state
->regs
, value_regno
);
811 /* check read/write into map element returned by bpf_map_lookup_elem() */
812 static int __check_map_access(struct bpf_verifier_env
*env
, u32 regno
, int off
,
815 struct bpf_map
*map
= env
->cur_state
.regs
[regno
].map_ptr
;
817 if (off
< 0 || size
<= 0 || off
+ size
> map
->value_size
) {
818 verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
819 map
->value_size
, off
, size
);
825 /* check read/write into a map element with possible variable offset */
826 static int check_map_access(struct bpf_verifier_env
*env
, u32 regno
,
829 struct bpf_verifier_state
*state
= &env
->cur_state
;
830 struct bpf_reg_state
*reg
= &state
->regs
[regno
];
833 /* We may have adjusted the register to this map value, so we
834 * need to try adding each of min_value and max_value to off
835 * to make sure our theoretical access will be safe.
838 print_verifier_state(state
);
839 /* The minimum value is only important with signed
840 * comparisons where we can't assume the floor of a
841 * value is 0. If we are using signed variables for our
842 * index'es we need to make sure that whatever we use
843 * will have a set floor within our range.
845 if (reg
->smin_value
< 0) {
846 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
850 err
= __check_map_access(env
, regno
, reg
->smin_value
+ off
, size
);
852 verbose("R%d min value is outside of the array range\n", regno
);
856 /* If we haven't set a max value then we need to bail since we can't be
857 * sure we won't do bad things.
858 * If reg->umax_value + off could overflow, treat that as unbounded too.
860 if (reg
->umax_value
>= BPF_MAX_VAR_OFF
) {
861 verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n",
865 err
= __check_map_access(env
, regno
, reg
->umax_value
+ off
, size
);
867 verbose("R%d max value is outside of the array range\n", regno
);
871 #define MAX_PACKET_OFF 0xffff
873 static bool may_access_direct_pkt_data(struct bpf_verifier_env
*env
,
874 const struct bpf_call_arg_meta
*meta
,
875 enum bpf_access_type t
)
877 switch (env
->prog
->type
) {
878 case BPF_PROG_TYPE_LWT_IN
:
879 case BPF_PROG_TYPE_LWT_OUT
:
880 /* dst_input() and dst_output() can't write for now */
884 case BPF_PROG_TYPE_SCHED_CLS
:
885 case BPF_PROG_TYPE_SCHED_ACT
:
886 case BPF_PROG_TYPE_XDP
:
887 case BPF_PROG_TYPE_LWT_XMIT
:
888 case BPF_PROG_TYPE_SK_SKB
:
890 return meta
->pkt_access
;
892 env
->seen_direct_write
= true;
899 static int __check_packet_access(struct bpf_verifier_env
*env
, u32 regno
,
902 struct bpf_reg_state
*regs
= env
->cur_state
.regs
;
903 struct bpf_reg_state
*reg
= ®s
[regno
];
905 if (off
< 0 || size
<= 0 || (u64
)off
+ size
> reg
->range
) {
906 verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
907 off
, size
, regno
, reg
->id
, reg
->off
, reg
->range
);
913 static int check_packet_access(struct bpf_verifier_env
*env
, u32 regno
, int off
,
916 struct bpf_reg_state
*regs
= env
->cur_state
.regs
;
917 struct bpf_reg_state
*reg
= ®s
[regno
];
920 /* We may have added a variable offset to the packet pointer; but any
921 * reg->range we have comes after that. We are only checking the fixed
925 /* We don't allow negative numbers, because we aren't tracking enough
926 * detail to prove they're safe.
928 if (reg
->smin_value
< 0) {
929 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
933 err
= __check_packet_access(env
, regno
, off
, size
);
935 verbose("R%d offset is outside of the packet\n", regno
);
941 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */
942 static int check_ctx_access(struct bpf_verifier_env
*env
, int insn_idx
, int off
, int size
,
943 enum bpf_access_type t
, enum bpf_reg_type
*reg_type
)
945 struct bpf_insn_access_aux info
= {
946 .reg_type
= *reg_type
,
949 /* for analyzer ctx accesses are already validated and converted */
950 if (env
->analyzer_ops
)
953 if (env
->prog
->aux
->ops
->is_valid_access
&&
954 env
->prog
->aux
->ops
->is_valid_access(off
, size
, t
, &info
)) {
955 /* A non zero info.ctx_field_size indicates that this field is a
956 * candidate for later verifier transformation to load the whole
957 * field and then apply a mask when accessed with a narrower
958 * access than actual ctx access size. A zero info.ctx_field_size
959 * will only allow for whole field access and rejects any other
960 * type of narrower access.
962 env
->insn_aux_data
[insn_idx
].ctx_field_size
= info
.ctx_field_size
;
963 *reg_type
= info
.reg_type
;
965 /* remember the offset of last byte accessed in ctx */
966 if (env
->prog
->aux
->max_ctx_offset
< off
+ size
)
967 env
->prog
->aux
->max_ctx_offset
= off
+ size
;
971 verbose("invalid bpf_context access off=%d size=%d\n", off
, size
);
975 static bool __is_pointer_value(bool allow_ptr_leaks
,
976 const struct bpf_reg_state
*reg
)
981 return reg
->type
!= SCALAR_VALUE
;
984 static bool is_pointer_value(struct bpf_verifier_env
*env
, int regno
)
986 return __is_pointer_value(env
->allow_ptr_leaks
, &env
->cur_state
.regs
[regno
]);
989 static int check_pkt_ptr_alignment(const struct bpf_reg_state
*reg
,
990 int off
, int size
, bool strict
)
995 /* Byte size accesses are always allowed. */
996 if (!strict
|| size
== 1)
999 /* For platforms that do not have a Kconfig enabling
1000 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
1001 * NET_IP_ALIGN is universally set to '2'. And on platforms
1002 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
1003 * to this code only in strict mode where we want to emulate
1004 * the NET_IP_ALIGN==2 checking. Therefore use an
1005 * unconditional IP align value of '2'.
1009 reg_off
= tnum_add(reg
->var_off
, tnum_const(ip_align
+ reg
->off
+ off
));
1010 if (!tnum_is_aligned(reg_off
, size
)) {
1013 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
1014 verbose("misaligned packet access off %d+%s+%d+%d size %d\n",
1015 ip_align
, tn_buf
, reg
->off
, off
, size
);
1022 static int check_generic_ptr_alignment(const struct bpf_reg_state
*reg
,
1023 const char *pointer_desc
,
1024 int off
, int size
, bool strict
)
1026 struct tnum reg_off
;
1028 /* Byte size accesses are always allowed. */
1029 if (!strict
|| size
== 1)
1032 reg_off
= tnum_add(reg
->var_off
, tnum_const(reg
->off
+ off
));
1033 if (!tnum_is_aligned(reg_off
, size
)) {
1036 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
1037 verbose("misaligned %saccess off %s+%d+%d size %d\n",
1038 pointer_desc
, tn_buf
, reg
->off
, off
, size
);
1045 static int check_ptr_alignment(struct bpf_verifier_env
*env
,
1046 const struct bpf_reg_state
*reg
,
1049 bool strict
= env
->strict_alignment
;
1050 const char *pointer_desc
= "";
1052 switch (reg
->type
) {
1054 /* special case, because of NET_IP_ALIGN */
1055 return check_pkt_ptr_alignment(reg
, off
, size
, strict
);
1056 case PTR_TO_MAP_VALUE
:
1057 pointer_desc
= "value ";
1060 pointer_desc
= "context ";
1063 pointer_desc
= "stack ";
1068 return check_generic_ptr_alignment(reg
, pointer_desc
, off
, size
, strict
);
1071 /* check whether memory at (regno + off) is accessible for t = (read | write)
1072 * if t==write, value_regno is a register which value is stored into memory
1073 * if t==read, value_regno is a register which will receive the value from memory
1074 * if t==write && value_regno==-1, some unknown value is stored into memory
1075 * if t==read && value_regno==-1, don't care what we read from memory
1077 static int check_mem_access(struct bpf_verifier_env
*env
, int insn_idx
, u32 regno
, int off
,
1078 int bpf_size
, enum bpf_access_type t
,
1081 struct bpf_verifier_state
*state
= &env
->cur_state
;
1082 struct bpf_reg_state
*reg
= &state
->regs
[regno
];
1085 size
= bpf_size_to_bytes(bpf_size
);
1089 /* alignment checks will add in reg->off themselves */
1090 err
= check_ptr_alignment(env
, reg
, off
, size
);
1094 /* for access checks, reg->off is just part of off */
1097 if (reg
->type
== PTR_TO_MAP_VALUE
) {
1098 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
1099 is_pointer_value(env
, value_regno
)) {
1100 verbose("R%d leaks addr into map\n", value_regno
);
1104 err
= check_map_access(env
, regno
, off
, size
);
1105 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
1106 mark_reg_unknown(state
->regs
, value_regno
);
1108 } else if (reg
->type
== PTR_TO_CTX
) {
1109 enum bpf_reg_type reg_type
= SCALAR_VALUE
;
1111 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
1112 is_pointer_value(env
, value_regno
)) {
1113 verbose("R%d leaks addr into ctx\n", value_regno
);
1116 /* ctx accesses must be at a fixed offset, so that we can
1117 * determine what type of data were returned.
1120 verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
1121 regno
, reg
->off
, off
- reg
->off
);
1124 if (!tnum_is_const(reg
->var_off
) || reg
->var_off
.value
) {
1127 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
1128 verbose("variable ctx access var_off=%s off=%d size=%d",
1132 err
= check_ctx_access(env
, insn_idx
, off
, size
, t
, ®_type
);
1133 if (!err
&& t
== BPF_READ
&& value_regno
>= 0) {
1134 /* ctx access returns either a scalar, or a
1135 * PTR_TO_PACKET[_END]. In the latter case, we know
1136 * the offset is zero.
1138 if (reg_type
== SCALAR_VALUE
)
1139 mark_reg_unknown(state
->regs
, value_regno
);
1141 mark_reg_known_zero(state
->regs
, value_regno
);
1142 state
->regs
[value_regno
].id
= 0;
1143 state
->regs
[value_regno
].off
= 0;
1144 state
->regs
[value_regno
].range
= 0;
1145 state
->regs
[value_regno
].type
= reg_type
;
1148 } else if (reg
->type
== PTR_TO_STACK
) {
1149 /* stack accesses must be at a fixed offset, so that we can
1150 * determine what type of data were returned.
1151 * See check_stack_read().
1153 if (!tnum_is_const(reg
->var_off
)) {
1156 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
1157 verbose("variable stack access var_off=%s off=%d size=%d",
1161 off
+= reg
->var_off
.value
;
1162 if (off
>= 0 || off
< -MAX_BPF_STACK
) {
1163 verbose("invalid stack off=%d size=%d\n", off
, size
);
1167 if (env
->prog
->aux
->stack_depth
< -off
)
1168 env
->prog
->aux
->stack_depth
= -off
;
1170 if (t
== BPF_WRITE
) {
1171 if (!env
->allow_ptr_leaks
&&
1172 state
->stack_slot_type
[MAX_BPF_STACK
+ off
] == STACK_SPILL
&&
1173 size
!= BPF_REG_SIZE
) {
1174 verbose("attempt to corrupt spilled pointer on stack\n");
1177 err
= check_stack_write(state
, off
, size
, value_regno
);
1179 err
= check_stack_read(state
, off
, size
, value_regno
);
1181 } else if (reg
->type
== PTR_TO_PACKET
) {
1182 if (t
== BPF_WRITE
&& !may_access_direct_pkt_data(env
, NULL
, t
)) {
1183 verbose("cannot write into packet\n");
1186 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
1187 is_pointer_value(env
, value_regno
)) {
1188 verbose("R%d leaks addr into packet\n", value_regno
);
1191 err
= check_packet_access(env
, regno
, off
, size
);
1192 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
1193 mark_reg_unknown(state
->regs
, value_regno
);
1195 verbose("R%d invalid mem access '%s'\n",
1196 regno
, reg_type_str
[reg
->type
]);
1200 if (!err
&& size
< BPF_REG_SIZE
&& value_regno
>= 0 && t
== BPF_READ
&&
1201 state
->regs
[value_regno
].type
== SCALAR_VALUE
) {
1202 /* b/h/w load zero-extends, mark upper bits as known 0 */
1203 state
->regs
[value_regno
].var_off
= tnum_cast(
1204 state
->regs
[value_regno
].var_off
, size
);
1205 __update_reg_bounds(&state
->regs
[value_regno
]);
1210 static int check_xadd(struct bpf_verifier_env
*env
, int insn_idx
, struct bpf_insn
*insn
)
1214 if ((BPF_SIZE(insn
->code
) != BPF_W
&& BPF_SIZE(insn
->code
) != BPF_DW
) ||
1216 verbose("BPF_XADD uses reserved fields\n");
1220 /* check src1 operand */
1221 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
1225 /* check src2 operand */
1226 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
1230 if (is_pointer_value(env
, insn
->src_reg
)) {
1231 verbose("R%d leaks addr into mem\n", insn
->src_reg
);
1235 /* check whether atomic_add can read the memory */
1236 err
= check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
1237 BPF_SIZE(insn
->code
), BPF_READ
, -1);
1241 /* check whether atomic_add can write into the same memory */
1242 return check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
1243 BPF_SIZE(insn
->code
), BPF_WRITE
, -1);
1246 /* Does this register contain a constant zero? */
1247 static bool register_is_null(struct bpf_reg_state reg
)
1249 return reg
.type
== SCALAR_VALUE
&& tnum_equals_const(reg
.var_off
, 0);
1252 /* when register 'regno' is passed into function that will read 'access_size'
1253 * bytes from that pointer, make sure that it's within stack boundary
1254 * and all elements of stack are initialized.
1255 * Unlike most pointer bounds-checking functions, this one doesn't take an
1256 * 'off' argument, so it has to add in reg->off itself.
1258 static int check_stack_boundary(struct bpf_verifier_env
*env
, int regno
,
1259 int access_size
, bool zero_size_allowed
,
1260 struct bpf_call_arg_meta
*meta
)
1262 struct bpf_verifier_state
*state
= &env
->cur_state
;
1263 struct bpf_reg_state
*regs
= state
->regs
;
1266 if (regs
[regno
].type
!= PTR_TO_STACK
) {
1267 /* Allow zero-byte read from NULL, regardless of pointer type */
1268 if (zero_size_allowed
&& access_size
== 0 &&
1269 register_is_null(regs
[regno
]))
1272 verbose("R%d type=%s expected=%s\n", regno
,
1273 reg_type_str
[regs
[regno
].type
],
1274 reg_type_str
[PTR_TO_STACK
]);
1278 /* Only allow fixed-offset stack reads */
1279 if (!tnum_is_const(regs
[regno
].var_off
)) {
1282 tnum_strn(tn_buf
, sizeof(tn_buf
), regs
[regno
].var_off
);
1283 verbose("invalid variable stack read R%d var_off=%s\n",
1286 off
= regs
[regno
].off
+ regs
[regno
].var_off
.value
;
1287 if (off
>= 0 || off
< -MAX_BPF_STACK
|| off
+ access_size
> 0 ||
1289 verbose("invalid stack type R%d off=%d access_size=%d\n",
1290 regno
, off
, access_size
);
1294 if (env
->prog
->aux
->stack_depth
< -off
)
1295 env
->prog
->aux
->stack_depth
= -off
;
1297 if (meta
&& meta
->raw_mode
) {
1298 meta
->access_size
= access_size
;
1299 meta
->regno
= regno
;
1303 for (i
= 0; i
< access_size
; i
++) {
1304 if (state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] != STACK_MISC
) {
1305 verbose("invalid indirect read from stack off %d+%d size %d\n",
1306 off
, i
, access_size
);
1313 static int check_helper_mem_access(struct bpf_verifier_env
*env
, int regno
,
1314 int access_size
, bool zero_size_allowed
,
1315 struct bpf_call_arg_meta
*meta
)
1317 struct bpf_reg_state
*regs
= env
->cur_state
.regs
, *reg
= ®s
[regno
];
1319 switch (reg
->type
) {
1321 return check_packet_access(env
, regno
, reg
->off
, access_size
);
1322 case PTR_TO_MAP_VALUE
:
1323 return check_map_access(env
, regno
, reg
->off
, access_size
);
1324 default: /* scalar_value|ptr_to_stack or invalid ptr */
1325 return check_stack_boundary(env
, regno
, access_size
,
1326 zero_size_allowed
, meta
);
1330 static int check_func_arg(struct bpf_verifier_env
*env
, u32 regno
,
1331 enum bpf_arg_type arg_type
,
1332 struct bpf_call_arg_meta
*meta
)
1334 struct bpf_reg_state
*regs
= env
->cur_state
.regs
, *reg
= ®s
[regno
];
1335 enum bpf_reg_type expected_type
, type
= reg
->type
;
1338 if (arg_type
== ARG_DONTCARE
)
1341 err
= check_reg_arg(env
, regno
, SRC_OP
);
1345 if (arg_type
== ARG_ANYTHING
) {
1346 if (is_pointer_value(env
, regno
)) {
1347 verbose("R%d leaks addr into helper function\n", regno
);
1353 if (type
== PTR_TO_PACKET
&&
1354 !may_access_direct_pkt_data(env
, meta
, BPF_READ
)) {
1355 verbose("helper access to the packet is not allowed\n");
1359 if (arg_type
== ARG_PTR_TO_MAP_KEY
||
1360 arg_type
== ARG_PTR_TO_MAP_VALUE
) {
1361 expected_type
= PTR_TO_STACK
;
1362 if (type
!= PTR_TO_PACKET
&& type
!= expected_type
)
1364 } else if (arg_type
== ARG_CONST_SIZE
||
1365 arg_type
== ARG_CONST_SIZE_OR_ZERO
) {
1366 expected_type
= SCALAR_VALUE
;
1367 if (type
!= expected_type
)
1369 } else if (arg_type
== ARG_CONST_MAP_PTR
) {
1370 expected_type
= CONST_PTR_TO_MAP
;
1371 if (type
!= expected_type
)
1373 } else if (arg_type
== ARG_PTR_TO_CTX
) {
1374 expected_type
= PTR_TO_CTX
;
1375 if (type
!= expected_type
)
1377 } else if (arg_type
== ARG_PTR_TO_MEM
||
1378 arg_type
== ARG_PTR_TO_UNINIT_MEM
) {
1379 expected_type
= PTR_TO_STACK
;
1380 /* One exception here. In case function allows for NULL to be
1381 * passed in as argument, it's a SCALAR_VALUE type. Final test
1382 * happens during stack boundary checking.
1384 if (register_is_null(*reg
))
1385 /* final test in check_stack_boundary() */;
1386 else if (type
!= PTR_TO_PACKET
&& type
!= PTR_TO_MAP_VALUE
&&
1387 type
!= expected_type
)
1389 meta
->raw_mode
= arg_type
== ARG_PTR_TO_UNINIT_MEM
;
1391 verbose("unsupported arg_type %d\n", arg_type
);
1395 if (arg_type
== ARG_CONST_MAP_PTR
) {
1396 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
1397 meta
->map_ptr
= reg
->map_ptr
;
1398 } else if (arg_type
== ARG_PTR_TO_MAP_KEY
) {
1399 /* bpf_map_xxx(..., map_ptr, ..., key) call:
1400 * check that [key, key + map->key_size) are within
1401 * stack limits and initialized
1403 if (!meta
->map_ptr
) {
1404 /* in function declaration map_ptr must come before
1405 * map_key, so that it's verified and known before
1406 * we have to check map_key here. Otherwise it means
1407 * that kernel subsystem misconfigured verifier
1409 verbose("invalid map_ptr to access map->key\n");
1412 if (type
== PTR_TO_PACKET
)
1413 err
= check_packet_access(env
, regno
, reg
->off
,
1414 meta
->map_ptr
->key_size
);
1416 err
= check_stack_boundary(env
, regno
,
1417 meta
->map_ptr
->key_size
,
1419 } else if (arg_type
== ARG_PTR_TO_MAP_VALUE
) {
1420 /* bpf_map_xxx(..., map_ptr, ..., value) call:
1421 * check [value, value + map->value_size) validity
1423 if (!meta
->map_ptr
) {
1424 /* kernel subsystem misconfigured verifier */
1425 verbose("invalid map_ptr to access map->value\n");
1428 if (type
== PTR_TO_PACKET
)
1429 err
= check_packet_access(env
, regno
, reg
->off
,
1430 meta
->map_ptr
->value_size
);
1432 err
= check_stack_boundary(env
, regno
,
1433 meta
->map_ptr
->value_size
,
1435 } else if (arg_type
== ARG_CONST_SIZE
||
1436 arg_type
== ARG_CONST_SIZE_OR_ZERO
) {
1437 bool zero_size_allowed
= (arg_type
== ARG_CONST_SIZE_OR_ZERO
);
1439 /* bpf_xxx(..., buf, len) call will access 'len' bytes
1440 * from stack pointer 'buf'. Check it
1441 * note: regno == len, regno - 1 == buf
1444 /* kernel subsystem misconfigured verifier */
1445 verbose("ARG_CONST_SIZE cannot be first argument\n");
1449 /* The register is SCALAR_VALUE; the access check
1450 * happens using its boundaries.
1453 if (!tnum_is_const(reg
->var_off
))
1454 /* For unprivileged variable accesses, disable raw
1455 * mode so that the program is required to
1456 * initialize all the memory that the helper could
1457 * just partially fill up.
1461 if (reg
->smin_value
< 0) {
1462 verbose("R%d min value is negative, either use unsigned or 'var &= const'\n",
1467 if (reg
->umin_value
== 0) {
1468 err
= check_helper_mem_access(env
, regno
- 1, 0,
1475 if (reg
->umax_value
>= BPF_MAX_VAR_SIZ
) {
1476 verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
1480 err
= check_helper_mem_access(env
, regno
- 1,
1482 zero_size_allowed
, meta
);
1487 verbose("R%d type=%s expected=%s\n", regno
,
1488 reg_type_str
[type
], reg_type_str
[expected_type
]);
1492 static int check_map_func_compatibility(struct bpf_map
*map
, int func_id
)
1497 /* We need a two way check, first is from map perspective ... */
1498 switch (map
->map_type
) {
1499 case BPF_MAP_TYPE_PROG_ARRAY
:
1500 if (func_id
!= BPF_FUNC_tail_call
)
1503 case BPF_MAP_TYPE_PERF_EVENT_ARRAY
:
1504 if (func_id
!= BPF_FUNC_perf_event_read
&&
1505 func_id
!= BPF_FUNC_perf_event_output
)
1508 case BPF_MAP_TYPE_STACK_TRACE
:
1509 if (func_id
!= BPF_FUNC_get_stackid
)
1512 case BPF_MAP_TYPE_CGROUP_ARRAY
:
1513 if (func_id
!= BPF_FUNC_skb_under_cgroup
&&
1514 func_id
!= BPF_FUNC_current_task_under_cgroup
)
1517 /* devmap returns a pointer to a live net_device ifindex that we cannot
1518 * allow to be modified from bpf side. So do not allow lookup elements
1521 case BPF_MAP_TYPE_DEVMAP
:
1522 if (func_id
!= BPF_FUNC_redirect_map
)
1525 case BPF_MAP_TYPE_ARRAY_OF_MAPS
:
1526 case BPF_MAP_TYPE_HASH_OF_MAPS
:
1527 if (func_id
!= BPF_FUNC_map_lookup_elem
)
1530 case BPF_MAP_TYPE_SOCKMAP
:
1531 if (func_id
!= BPF_FUNC_sk_redirect_map
&&
1532 func_id
!= BPF_FUNC_sock_map_update
&&
1533 func_id
!= BPF_FUNC_map_delete_elem
)
1540 /* ... and second from the function itself. */
1542 case BPF_FUNC_tail_call
:
1543 if (map
->map_type
!= BPF_MAP_TYPE_PROG_ARRAY
)
1546 case BPF_FUNC_perf_event_read
:
1547 case BPF_FUNC_perf_event_output
:
1548 if (map
->map_type
!= BPF_MAP_TYPE_PERF_EVENT_ARRAY
)
1551 case BPF_FUNC_get_stackid
:
1552 if (map
->map_type
!= BPF_MAP_TYPE_STACK_TRACE
)
1555 case BPF_FUNC_current_task_under_cgroup
:
1556 case BPF_FUNC_skb_under_cgroup
:
1557 if (map
->map_type
!= BPF_MAP_TYPE_CGROUP_ARRAY
)
1560 case BPF_FUNC_redirect_map
:
1561 if (map
->map_type
!= BPF_MAP_TYPE_DEVMAP
)
1564 case BPF_FUNC_sk_redirect_map
:
1565 if (map
->map_type
!= BPF_MAP_TYPE_SOCKMAP
)
1568 case BPF_FUNC_sock_map_update
:
1569 if (map
->map_type
!= BPF_MAP_TYPE_SOCKMAP
)
1578 verbose("cannot pass map_type %d into func %s#%d\n",
1579 map
->map_type
, func_id_name(func_id
), func_id
);
1583 static int check_raw_mode(const struct bpf_func_proto
*fn
)
1587 if (fn
->arg1_type
== ARG_PTR_TO_UNINIT_MEM
)
1589 if (fn
->arg2_type
== ARG_PTR_TO_UNINIT_MEM
)
1591 if (fn
->arg3_type
== ARG_PTR_TO_UNINIT_MEM
)
1593 if (fn
->arg4_type
== ARG_PTR_TO_UNINIT_MEM
)
1595 if (fn
->arg5_type
== ARG_PTR_TO_UNINIT_MEM
)
1598 return count
> 1 ? -EINVAL
: 0;
1601 /* Packet data might have moved, any old PTR_TO_PACKET[_END] are now invalid,
1602 * so turn them into unknown SCALAR_VALUE.
1604 static void clear_all_pkt_pointers(struct bpf_verifier_env
*env
)
1606 struct bpf_verifier_state
*state
= &env
->cur_state
;
1607 struct bpf_reg_state
*regs
= state
->regs
, *reg
;
1610 for (i
= 0; i
< MAX_BPF_REG
; i
++)
1611 if (regs
[i
].type
== PTR_TO_PACKET
||
1612 regs
[i
].type
== PTR_TO_PACKET_END
)
1613 mark_reg_unknown(regs
, i
);
1615 for (i
= 0; i
< MAX_BPF_STACK
; i
+= BPF_REG_SIZE
) {
1616 if (state
->stack_slot_type
[i
] != STACK_SPILL
)
1618 reg
= &state
->spilled_regs
[i
/ BPF_REG_SIZE
];
1619 if (reg
->type
!= PTR_TO_PACKET
&&
1620 reg
->type
!= PTR_TO_PACKET_END
)
1622 __mark_reg_unknown(reg
);
1626 static int check_call(struct bpf_verifier_env
*env
, int func_id
, int insn_idx
)
1628 struct bpf_verifier_state
*state
= &env
->cur_state
;
1629 const struct bpf_func_proto
*fn
= NULL
;
1630 struct bpf_reg_state
*regs
= state
->regs
;
1631 struct bpf_call_arg_meta meta
;
1635 /* find function prototype */
1636 if (func_id
< 0 || func_id
>= __BPF_FUNC_MAX_ID
) {
1637 verbose("invalid func %s#%d\n", func_id_name(func_id
), func_id
);
1641 if (env
->prog
->aux
->ops
->get_func_proto
)
1642 fn
= env
->prog
->aux
->ops
->get_func_proto(func_id
);
1645 verbose("unknown func %s#%d\n", func_id_name(func_id
), func_id
);
1649 /* eBPF programs must be GPL compatible to use GPL-ed functions */
1650 if (!env
->prog
->gpl_compatible
&& fn
->gpl_only
) {
1651 verbose("cannot call GPL only function from proprietary program\n");
1655 changes_data
= bpf_helper_changes_pkt_data(fn
->func
);
1657 memset(&meta
, 0, sizeof(meta
));
1658 meta
.pkt_access
= fn
->pkt_access
;
1660 /* We only support one arg being in raw mode at the moment, which
1661 * is sufficient for the helper functions we have right now.
1663 err
= check_raw_mode(fn
);
1665 verbose("kernel subsystem misconfigured func %s#%d\n",
1666 func_id_name(func_id
), func_id
);
1671 err
= check_func_arg(env
, BPF_REG_1
, fn
->arg1_type
, &meta
);
1674 err
= check_func_arg(env
, BPF_REG_2
, fn
->arg2_type
, &meta
);
1677 err
= check_func_arg(env
, BPF_REG_3
, fn
->arg3_type
, &meta
);
1680 err
= check_func_arg(env
, BPF_REG_4
, fn
->arg4_type
, &meta
);
1683 err
= check_func_arg(env
, BPF_REG_5
, fn
->arg5_type
, &meta
);
1687 /* Mark slots with STACK_MISC in case of raw mode, stack offset
1688 * is inferred from register state.
1690 for (i
= 0; i
< meta
.access_size
; i
++) {
1691 err
= check_mem_access(env
, insn_idx
, meta
.regno
, i
, BPF_B
, BPF_WRITE
, -1);
1696 /* reset caller saved regs */
1697 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
1698 mark_reg_not_init(regs
, caller_saved
[i
]);
1699 check_reg_arg(env
, caller_saved
[i
], DST_OP_NO_MARK
);
1702 /* update return register (already marked as written above) */
1703 if (fn
->ret_type
== RET_INTEGER
) {
1704 /* sets type to SCALAR_VALUE */
1705 mark_reg_unknown(regs
, BPF_REG_0
);
1706 } else if (fn
->ret_type
== RET_VOID
) {
1707 regs
[BPF_REG_0
].type
= NOT_INIT
;
1708 } else if (fn
->ret_type
== RET_PTR_TO_MAP_VALUE_OR_NULL
) {
1709 struct bpf_insn_aux_data
*insn_aux
;
1711 regs
[BPF_REG_0
].type
= PTR_TO_MAP_VALUE_OR_NULL
;
1712 /* There is no offset yet applied, variable or fixed */
1713 mark_reg_known_zero(regs
, BPF_REG_0
);
1714 regs
[BPF_REG_0
].off
= 0;
1715 /* remember map_ptr, so that check_map_access()
1716 * can check 'value_size' boundary of memory access
1717 * to map element returned from bpf_map_lookup_elem()
1719 if (meta
.map_ptr
== NULL
) {
1720 verbose("kernel subsystem misconfigured verifier\n");
1723 regs
[BPF_REG_0
].map_ptr
= meta
.map_ptr
;
1724 regs
[BPF_REG_0
].id
= ++env
->id_gen
;
1725 insn_aux
= &env
->insn_aux_data
[insn_idx
];
1726 if (!insn_aux
->map_ptr
)
1727 insn_aux
->map_ptr
= meta
.map_ptr
;
1728 else if (insn_aux
->map_ptr
!= meta
.map_ptr
)
1729 insn_aux
->map_ptr
= BPF_MAP_PTR_POISON
;
1731 verbose("unknown return type %d of func %s#%d\n",
1732 fn
->ret_type
, func_id_name(func_id
), func_id
);
1736 err
= check_map_func_compatibility(meta
.map_ptr
, func_id
);
1741 clear_all_pkt_pointers(env
);
1745 static void coerce_reg_to_32(struct bpf_reg_state
*reg
)
1747 /* clear high 32 bits */
1748 reg
->var_off
= tnum_cast(reg
->var_off
, 4);
1750 __update_reg_bounds(reg
);
1753 static bool signed_add_overflows(s64 a
, s64 b
)
1755 /* Do the add in u64, where overflow is well-defined */
1756 s64 res
= (s64
)((u64
)a
+ (u64
)b
);
1763 static bool signed_sub_overflows(s64 a
, s64 b
)
1765 /* Do the sub in u64, where overflow is well-defined */
1766 s64 res
= (s64
)((u64
)a
- (u64
)b
);
1773 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
1774 * Caller should also handle BPF_MOV case separately.
1775 * If we return -EACCES, caller may want to try again treating pointer as a
1776 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
1778 static int adjust_ptr_min_max_vals(struct bpf_verifier_env
*env
,
1779 struct bpf_insn
*insn
,
1780 const struct bpf_reg_state
*ptr_reg
,
1781 const struct bpf_reg_state
*off_reg
)
1783 struct bpf_reg_state
*regs
= env
->cur_state
.regs
, *dst_reg
;
1784 bool known
= tnum_is_const(off_reg
->var_off
);
1785 s64 smin_val
= off_reg
->smin_value
, smax_val
= off_reg
->smax_value
,
1786 smin_ptr
= ptr_reg
->smin_value
, smax_ptr
= ptr_reg
->smax_value
;
1787 u64 umin_val
= off_reg
->umin_value
, umax_val
= off_reg
->umax_value
,
1788 umin_ptr
= ptr_reg
->umin_value
, umax_ptr
= ptr_reg
->umax_value
;
1789 u8 opcode
= BPF_OP(insn
->code
);
1790 u32 dst
= insn
->dst_reg
;
1792 dst_reg
= ®s
[dst
];
1794 if (WARN_ON_ONCE(known
&& (smin_val
!= smax_val
))) {
1795 print_verifier_state(&env
->cur_state
);
1796 verbose("verifier internal error: known but bad sbounds\n");
1799 if (WARN_ON_ONCE(known
&& (umin_val
!= umax_val
))) {
1800 print_verifier_state(&env
->cur_state
);
1801 verbose("verifier internal error: known but bad ubounds\n");
1805 if (BPF_CLASS(insn
->code
) != BPF_ALU64
) {
1806 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
1807 if (!env
->allow_ptr_leaks
)
1808 verbose("R%d 32-bit pointer arithmetic prohibited\n",
1813 if (ptr_reg
->type
== PTR_TO_MAP_VALUE_OR_NULL
) {
1814 if (!env
->allow_ptr_leaks
)
1815 verbose("R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
1819 if (ptr_reg
->type
== CONST_PTR_TO_MAP
) {
1820 if (!env
->allow_ptr_leaks
)
1821 verbose("R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
1825 if (ptr_reg
->type
== PTR_TO_PACKET_END
) {
1826 if (!env
->allow_ptr_leaks
)
1827 verbose("R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
1832 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
1833 * The id may be overwritten later if we create a new variable offset.
1835 dst_reg
->type
= ptr_reg
->type
;
1836 dst_reg
->id
= ptr_reg
->id
;
1840 /* We can take a fixed offset as long as it doesn't overflow
1841 * the s32 'off' field
1843 if (known
&& (ptr_reg
->off
+ smin_val
==
1844 (s64
)(s32
)(ptr_reg
->off
+ smin_val
))) {
1845 /* pointer += K. Accumulate it into fixed offset */
1846 dst_reg
->smin_value
= smin_ptr
;
1847 dst_reg
->smax_value
= smax_ptr
;
1848 dst_reg
->umin_value
= umin_ptr
;
1849 dst_reg
->umax_value
= umax_ptr
;
1850 dst_reg
->var_off
= ptr_reg
->var_off
;
1851 dst_reg
->off
= ptr_reg
->off
+ smin_val
;
1852 dst_reg
->range
= ptr_reg
->range
;
1855 /* A new variable offset is created. Note that off_reg->off
1856 * == 0, since it's a scalar.
1857 * dst_reg gets the pointer type and since some positive
1858 * integer value was added to the pointer, give it a new 'id'
1859 * if it's a PTR_TO_PACKET.
1860 * this creates a new 'base' pointer, off_reg (variable) gets
1861 * added into the variable offset, and we copy the fixed offset
1864 if (signed_add_overflows(smin_ptr
, smin_val
) ||
1865 signed_add_overflows(smax_ptr
, smax_val
)) {
1866 dst_reg
->smin_value
= S64_MIN
;
1867 dst_reg
->smax_value
= S64_MAX
;
1869 dst_reg
->smin_value
= smin_ptr
+ smin_val
;
1870 dst_reg
->smax_value
= smax_ptr
+ smax_val
;
1872 if (umin_ptr
+ umin_val
< umin_ptr
||
1873 umax_ptr
+ umax_val
< umax_ptr
) {
1874 dst_reg
->umin_value
= 0;
1875 dst_reg
->umax_value
= U64_MAX
;
1877 dst_reg
->umin_value
= umin_ptr
+ umin_val
;
1878 dst_reg
->umax_value
= umax_ptr
+ umax_val
;
1880 dst_reg
->var_off
= tnum_add(ptr_reg
->var_off
, off_reg
->var_off
);
1881 dst_reg
->off
= ptr_reg
->off
;
1882 if (ptr_reg
->type
== PTR_TO_PACKET
) {
1883 dst_reg
->id
= ++env
->id_gen
;
1884 /* something was added to pkt_ptr, set range to zero */
1889 if (dst_reg
== off_reg
) {
1890 /* scalar -= pointer. Creates an unknown scalar */
1891 if (!env
->allow_ptr_leaks
)
1892 verbose("R%d tried to subtract pointer from scalar\n",
1896 /* We don't allow subtraction from FP, because (according to
1897 * test_verifier.c test "invalid fp arithmetic", JITs might not
1898 * be able to deal with it.
1900 if (ptr_reg
->type
== PTR_TO_STACK
) {
1901 if (!env
->allow_ptr_leaks
)
1902 verbose("R%d subtraction from stack pointer prohibited\n",
1906 if (known
&& (ptr_reg
->off
- smin_val
==
1907 (s64
)(s32
)(ptr_reg
->off
- smin_val
))) {
1908 /* pointer -= K. Subtract it from fixed offset */
1909 dst_reg
->smin_value
= smin_ptr
;
1910 dst_reg
->smax_value
= smax_ptr
;
1911 dst_reg
->umin_value
= umin_ptr
;
1912 dst_reg
->umax_value
= umax_ptr
;
1913 dst_reg
->var_off
= ptr_reg
->var_off
;
1914 dst_reg
->id
= ptr_reg
->id
;
1915 dst_reg
->off
= ptr_reg
->off
- smin_val
;
1916 dst_reg
->range
= ptr_reg
->range
;
1919 /* A new variable offset is created. If the subtrahend is known
1920 * nonnegative, then any reg->range we had before is still good.
1922 if (signed_sub_overflows(smin_ptr
, smax_val
) ||
1923 signed_sub_overflows(smax_ptr
, smin_val
)) {
1924 /* Overflow possible, we know nothing */
1925 dst_reg
->smin_value
= S64_MIN
;
1926 dst_reg
->smax_value
= S64_MAX
;
1928 dst_reg
->smin_value
= smin_ptr
- smax_val
;
1929 dst_reg
->smax_value
= smax_ptr
- smin_val
;
1931 if (umin_ptr
< umax_val
) {
1932 /* Overflow possible, we know nothing */
1933 dst_reg
->umin_value
= 0;
1934 dst_reg
->umax_value
= U64_MAX
;
1936 /* Cannot overflow (as long as bounds are consistent) */
1937 dst_reg
->umin_value
= umin_ptr
- umax_val
;
1938 dst_reg
->umax_value
= umax_ptr
- umin_val
;
1940 dst_reg
->var_off
= tnum_sub(ptr_reg
->var_off
, off_reg
->var_off
);
1941 dst_reg
->off
= ptr_reg
->off
;
1942 if (ptr_reg
->type
== PTR_TO_PACKET
) {
1943 dst_reg
->id
= ++env
->id_gen
;
1944 /* something was added to pkt_ptr, set range to zero */
1952 /* bitwise ops on pointers are troublesome, prohibit for now.
1953 * (However, in principle we could allow some cases, e.g.
1954 * ptr &= ~3 which would reduce min_value by 3.)
1956 if (!env
->allow_ptr_leaks
)
1957 verbose("R%d bitwise operator %s on pointer prohibited\n",
1958 dst
, bpf_alu_string
[opcode
>> 4]);
1961 /* other operators (e.g. MUL,LSH) produce non-pointer results */
1962 if (!env
->allow_ptr_leaks
)
1963 verbose("R%d pointer arithmetic with %s operator prohibited\n",
1964 dst
, bpf_alu_string
[opcode
>> 4]);
1968 __update_reg_bounds(dst_reg
);
1969 __reg_deduce_bounds(dst_reg
);
1970 __reg_bound_offset(dst_reg
);
1974 static int adjust_scalar_min_max_vals(struct bpf_verifier_env
*env
,
1975 struct bpf_insn
*insn
,
1976 struct bpf_reg_state
*dst_reg
,
1977 struct bpf_reg_state src_reg
)
1979 struct bpf_reg_state
*regs
= env
->cur_state
.regs
;
1980 u8 opcode
= BPF_OP(insn
->code
);
1981 bool src_known
, dst_known
;
1982 s64 smin_val
, smax_val
;
1983 u64 umin_val
, umax_val
;
1985 if (BPF_CLASS(insn
->code
) != BPF_ALU64
) {
1986 /* 32-bit ALU ops are (32,32)->64 */
1987 coerce_reg_to_32(dst_reg
);
1988 coerce_reg_to_32(&src_reg
);
1990 smin_val
= src_reg
.smin_value
;
1991 smax_val
= src_reg
.smax_value
;
1992 umin_val
= src_reg
.umin_value
;
1993 umax_val
= src_reg
.umax_value
;
1994 src_known
= tnum_is_const(src_reg
.var_off
);
1995 dst_known
= tnum_is_const(dst_reg
->var_off
);
1999 if (signed_add_overflows(dst_reg
->smin_value
, smin_val
) ||
2000 signed_add_overflows(dst_reg
->smax_value
, smax_val
)) {
2001 dst_reg
->smin_value
= S64_MIN
;
2002 dst_reg
->smax_value
= S64_MAX
;
2004 dst_reg
->smin_value
+= smin_val
;
2005 dst_reg
->smax_value
+= smax_val
;
2007 if (dst_reg
->umin_value
+ umin_val
< umin_val
||
2008 dst_reg
->umax_value
+ umax_val
< umax_val
) {
2009 dst_reg
->umin_value
= 0;
2010 dst_reg
->umax_value
= U64_MAX
;
2012 dst_reg
->umin_value
+= umin_val
;
2013 dst_reg
->umax_value
+= umax_val
;
2015 dst_reg
->var_off
= tnum_add(dst_reg
->var_off
, src_reg
.var_off
);
2018 if (signed_sub_overflows(dst_reg
->smin_value
, smax_val
) ||
2019 signed_sub_overflows(dst_reg
->smax_value
, smin_val
)) {
2020 /* Overflow possible, we know nothing */
2021 dst_reg
->smin_value
= S64_MIN
;
2022 dst_reg
->smax_value
= S64_MAX
;
2024 dst_reg
->smin_value
-= smax_val
;
2025 dst_reg
->smax_value
-= smin_val
;
2027 if (dst_reg
->umin_value
< umax_val
) {
2028 /* Overflow possible, we know nothing */
2029 dst_reg
->umin_value
= 0;
2030 dst_reg
->umax_value
= U64_MAX
;
2032 /* Cannot overflow (as long as bounds are consistent) */
2033 dst_reg
->umin_value
-= umax_val
;
2034 dst_reg
->umax_value
-= umin_val
;
2036 dst_reg
->var_off
= tnum_sub(dst_reg
->var_off
, src_reg
.var_off
);
2039 dst_reg
->var_off
= tnum_mul(dst_reg
->var_off
, src_reg
.var_off
);
2040 if (smin_val
< 0 || dst_reg
->smin_value
< 0) {
2041 /* Ain't nobody got time to multiply that sign */
2042 __mark_reg_unbounded(dst_reg
);
2043 __update_reg_bounds(dst_reg
);
2046 /* Both values are positive, so we can work with unsigned and
2047 * copy the result to signed (unless it exceeds S64_MAX).
2049 if (umax_val
> U32_MAX
|| dst_reg
->umax_value
> U32_MAX
) {
2050 /* Potential overflow, we know nothing */
2051 __mark_reg_unbounded(dst_reg
);
2052 /* (except what we can learn from the var_off) */
2053 __update_reg_bounds(dst_reg
);
2056 dst_reg
->umin_value
*= umin_val
;
2057 dst_reg
->umax_value
*= umax_val
;
2058 if (dst_reg
->umax_value
> S64_MAX
) {
2059 /* Overflow possible, we know nothing */
2060 dst_reg
->smin_value
= S64_MIN
;
2061 dst_reg
->smax_value
= S64_MAX
;
2063 dst_reg
->smin_value
= dst_reg
->umin_value
;
2064 dst_reg
->smax_value
= dst_reg
->umax_value
;
2068 if (src_known
&& dst_known
) {
2069 __mark_reg_known(dst_reg
, dst_reg
->var_off
.value
&
2070 src_reg
.var_off
.value
);
2073 /* We get our minimum from the var_off, since that's inherently
2074 * bitwise. Our maximum is the minimum of the operands' maxima.
2076 dst_reg
->var_off
= tnum_and(dst_reg
->var_off
, src_reg
.var_off
);
2077 dst_reg
->umin_value
= dst_reg
->var_off
.value
;
2078 dst_reg
->umax_value
= min(dst_reg
->umax_value
, umax_val
);
2079 if (dst_reg
->smin_value
< 0 || smin_val
< 0) {
2080 /* Lose signed bounds when ANDing negative numbers,
2081 * ain't nobody got time for that.
2083 dst_reg
->smin_value
= S64_MIN
;
2084 dst_reg
->smax_value
= S64_MAX
;
2086 /* ANDing two positives gives a positive, so safe to
2087 * cast result into s64.
2089 dst_reg
->smin_value
= dst_reg
->umin_value
;
2090 dst_reg
->smax_value
= dst_reg
->umax_value
;
2092 /* We may learn something more from the var_off */
2093 __update_reg_bounds(dst_reg
);
2096 if (src_known
&& dst_known
) {
2097 __mark_reg_known(dst_reg
, dst_reg
->var_off
.value
|
2098 src_reg
.var_off
.value
);
2101 /* We get our maximum from the var_off, and our minimum is the
2102 * maximum of the operands' minima
2104 dst_reg
->var_off
= tnum_or(dst_reg
->var_off
, src_reg
.var_off
);
2105 dst_reg
->umin_value
= max(dst_reg
->umin_value
, umin_val
);
2106 dst_reg
->umax_value
= dst_reg
->var_off
.value
|
2107 dst_reg
->var_off
.mask
;
2108 if (dst_reg
->smin_value
< 0 || smin_val
< 0) {
2109 /* Lose signed bounds when ORing negative numbers,
2110 * ain't nobody got time for that.
2112 dst_reg
->smin_value
= S64_MIN
;
2113 dst_reg
->smax_value
= S64_MAX
;
2115 /* ORing two positives gives a positive, so safe to
2116 * cast result into s64.
2118 dst_reg
->smin_value
= dst_reg
->umin_value
;
2119 dst_reg
->smax_value
= dst_reg
->umax_value
;
2121 /* We may learn something more from the var_off */
2122 __update_reg_bounds(dst_reg
);
2125 if (umax_val
> 63) {
2126 /* Shifts greater than 63 are undefined. This includes
2127 * shifts by a negative number.
2129 mark_reg_unknown(regs
, insn
->dst_reg
);
2132 /* We lose all sign bit information (except what we can pick
2135 dst_reg
->smin_value
= S64_MIN
;
2136 dst_reg
->smax_value
= S64_MAX
;
2137 /* If we might shift our top bit out, then we know nothing */
2138 if (dst_reg
->umax_value
> 1ULL << (63 - umax_val
)) {
2139 dst_reg
->umin_value
= 0;
2140 dst_reg
->umax_value
= U64_MAX
;
2142 dst_reg
->umin_value
<<= umin_val
;
2143 dst_reg
->umax_value
<<= umax_val
;
2146 dst_reg
->var_off
= tnum_lshift(dst_reg
->var_off
, umin_val
);
2148 dst_reg
->var_off
= tnum_lshift(tnum_unknown
, umin_val
);
2149 /* We may learn something more from the var_off */
2150 __update_reg_bounds(dst_reg
);
2153 if (umax_val
> 63) {
2154 /* Shifts greater than 63 are undefined. This includes
2155 * shifts by a negative number.
2157 mark_reg_unknown(regs
, insn
->dst_reg
);
2160 /* BPF_RSH is an unsigned shift, so make the appropriate casts */
2161 if (dst_reg
->smin_value
< 0) {
2163 /* Sign bit will be cleared */
2164 dst_reg
->smin_value
= 0;
2166 /* Lost sign bit information */
2167 dst_reg
->smin_value
= S64_MIN
;
2168 dst_reg
->smax_value
= S64_MAX
;
2171 dst_reg
->smin_value
=
2172 (u64
)(dst_reg
->smin_value
) >> umax_val
;
2175 dst_reg
->var_off
= tnum_rshift(dst_reg
->var_off
,
2178 dst_reg
->var_off
= tnum_rshift(tnum_unknown
, umin_val
);
2179 dst_reg
->umin_value
>>= umax_val
;
2180 dst_reg
->umax_value
>>= umin_val
;
2181 /* We may learn something more from the var_off */
2182 __update_reg_bounds(dst_reg
);
2185 mark_reg_unknown(regs
, insn
->dst_reg
);
2189 __reg_deduce_bounds(dst_reg
);
2190 __reg_bound_offset(dst_reg
);
2194 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
2197 static int adjust_reg_min_max_vals(struct bpf_verifier_env
*env
,
2198 struct bpf_insn
*insn
)
2200 struct bpf_reg_state
*regs
= env
->cur_state
.regs
, *dst_reg
, *src_reg
;
2201 struct bpf_reg_state
*ptr_reg
= NULL
, off_reg
= {0};
2202 u8 opcode
= BPF_OP(insn
->code
);
2205 dst_reg
= ®s
[insn
->dst_reg
];
2207 if (dst_reg
->type
!= SCALAR_VALUE
)
2209 if (BPF_SRC(insn
->code
) == BPF_X
) {
2210 src_reg
= ®s
[insn
->src_reg
];
2211 if (src_reg
->type
!= SCALAR_VALUE
) {
2212 if (dst_reg
->type
!= SCALAR_VALUE
) {
2213 /* Combining two pointers by any ALU op yields
2214 * an arbitrary scalar.
2216 if (!env
->allow_ptr_leaks
) {
2217 verbose("R%d pointer %s pointer prohibited\n",
2219 bpf_alu_string
[opcode
>> 4]);
2222 mark_reg_unknown(regs
, insn
->dst_reg
);
2225 /* scalar += pointer
2226 * This is legal, but we have to reverse our
2227 * src/dest handling in computing the range
2229 rc
= adjust_ptr_min_max_vals(env
, insn
,
2231 if (rc
== -EACCES
&& env
->allow_ptr_leaks
) {
2232 /* scalar += unknown scalar */
2233 __mark_reg_unknown(&off_reg
);
2234 return adjust_scalar_min_max_vals(
2240 } else if (ptr_reg
) {
2241 /* pointer += scalar */
2242 rc
= adjust_ptr_min_max_vals(env
, insn
,
2244 if (rc
== -EACCES
&& env
->allow_ptr_leaks
) {
2245 /* unknown scalar += scalar */
2246 __mark_reg_unknown(dst_reg
);
2247 return adjust_scalar_min_max_vals(
2248 env
, insn
, dst_reg
, *src_reg
);
2253 /* Pretend the src is a reg with a known value, since we only
2254 * need to be able to read from this state.
2256 off_reg
.type
= SCALAR_VALUE
;
2257 __mark_reg_known(&off_reg
, insn
->imm
);
2259 if (ptr_reg
) { /* pointer += K */
2260 rc
= adjust_ptr_min_max_vals(env
, insn
,
2262 if (rc
== -EACCES
&& env
->allow_ptr_leaks
) {
2263 /* unknown scalar += K */
2264 __mark_reg_unknown(dst_reg
);
2265 return adjust_scalar_min_max_vals(
2266 env
, insn
, dst_reg
, off_reg
);
2272 /* Got here implies adding two SCALAR_VALUEs */
2273 if (WARN_ON_ONCE(ptr_reg
)) {
2274 print_verifier_state(&env
->cur_state
);
2275 verbose("verifier internal error: unexpected ptr_reg\n");
2278 if (WARN_ON(!src_reg
)) {
2279 print_verifier_state(&env
->cur_state
);
2280 verbose("verifier internal error: no src_reg\n");
2283 return adjust_scalar_min_max_vals(env
, insn
, dst_reg
, *src_reg
);
2286 /* check validity of 32-bit and 64-bit arithmetic operations */
2287 static int check_alu_op(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
2289 struct bpf_reg_state
*regs
= env
->cur_state
.regs
;
2290 u8 opcode
= BPF_OP(insn
->code
);
2293 if (opcode
== BPF_END
|| opcode
== BPF_NEG
) {
2294 if (opcode
== BPF_NEG
) {
2295 if (BPF_SRC(insn
->code
) != 0 ||
2296 insn
->src_reg
!= BPF_REG_0
||
2297 insn
->off
!= 0 || insn
->imm
!= 0) {
2298 verbose("BPF_NEG uses reserved fields\n");
2302 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
2303 (insn
->imm
!= 16 && insn
->imm
!= 32 && insn
->imm
!= 64) ||
2304 BPF_CLASS(insn
->code
) == BPF_ALU64
) {
2305 verbose("BPF_END uses reserved fields\n");
2310 /* check src operand */
2311 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
2315 if (is_pointer_value(env
, insn
->dst_reg
)) {
2316 verbose("R%d pointer arithmetic prohibited\n",
2321 /* check dest operand */
2322 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP
);
2326 } else if (opcode
== BPF_MOV
) {
2328 if (BPF_SRC(insn
->code
) == BPF_X
) {
2329 if (insn
->imm
!= 0 || insn
->off
!= 0) {
2330 verbose("BPF_MOV uses reserved fields\n");
2334 /* check src operand */
2335 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
2339 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
2340 verbose("BPF_MOV uses reserved fields\n");
2345 /* check dest operand */
2346 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP
);
2350 if (BPF_SRC(insn
->code
) == BPF_X
) {
2351 if (BPF_CLASS(insn
->code
) == BPF_ALU64
) {
2353 * copy register state to dest reg
2355 regs
[insn
->dst_reg
] = regs
[insn
->src_reg
];
2356 regs
[insn
->dst_reg
].live
|= REG_LIVE_WRITTEN
;
2359 if (is_pointer_value(env
, insn
->src_reg
)) {
2360 verbose("R%d partial copy of pointer\n",
2364 mark_reg_unknown(regs
, insn
->dst_reg
);
2365 /* high 32 bits are known zero. */
2366 regs
[insn
->dst_reg
].var_off
= tnum_cast(
2367 regs
[insn
->dst_reg
].var_off
, 4);
2368 __update_reg_bounds(®s
[insn
->dst_reg
]);
2372 * remember the value we stored into this reg
2374 regs
[insn
->dst_reg
].type
= SCALAR_VALUE
;
2375 __mark_reg_known(regs
+ insn
->dst_reg
, insn
->imm
);
2378 } else if (opcode
> BPF_END
) {
2379 verbose("invalid BPF_ALU opcode %x\n", opcode
);
2382 } else { /* all other ALU ops: and, sub, xor, add, ... */
2384 if (BPF_SRC(insn
->code
) == BPF_X
) {
2385 if (insn
->imm
!= 0 || insn
->off
!= 0) {
2386 verbose("BPF_ALU uses reserved fields\n");
2389 /* check src1 operand */
2390 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
2394 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
2395 verbose("BPF_ALU uses reserved fields\n");
2400 /* check src2 operand */
2401 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
2405 if ((opcode
== BPF_MOD
|| opcode
== BPF_DIV
) &&
2406 BPF_SRC(insn
->code
) == BPF_K
&& insn
->imm
== 0) {
2407 verbose("div by zero\n");
2411 if ((opcode
== BPF_LSH
|| opcode
== BPF_RSH
||
2412 opcode
== BPF_ARSH
) && BPF_SRC(insn
->code
) == BPF_K
) {
2413 int size
= BPF_CLASS(insn
->code
) == BPF_ALU64
? 64 : 32;
2415 if (insn
->imm
< 0 || insn
->imm
>= size
) {
2416 verbose("invalid shift %d\n", insn
->imm
);
2421 /* check dest operand */
2422 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP_NO_MARK
);
2426 return adjust_reg_min_max_vals(env
, insn
);
2432 static void find_good_pkt_pointers(struct bpf_verifier_state
*state
,
2433 struct bpf_reg_state
*dst_reg
,
2434 bool range_right_open
)
2436 struct bpf_reg_state
*regs
= state
->regs
, *reg
;
2440 if (dst_reg
->off
< 0 ||
2441 (dst_reg
->off
== 0 && range_right_open
))
2442 /* This doesn't give us any range */
2445 if (dst_reg
->umax_value
> MAX_PACKET_OFF
||
2446 dst_reg
->umax_value
+ dst_reg
->off
> MAX_PACKET_OFF
)
2447 /* Risk of overflow. For instance, ptr + (1<<63) may be less
2448 * than pkt_end, but that's because it's also less than pkt.
2452 new_range
= dst_reg
->off
;
2453 if (range_right_open
)
2456 /* Examples for register markings:
2458 * pkt_data in dst register:
2462 * if (r2 > pkt_end) goto <handle exception>
2467 * if (r2 < pkt_end) goto <access okay>
2468 * <handle exception>
2471 * r2 == dst_reg, pkt_end == src_reg
2472 * r2=pkt(id=n,off=8,r=0)
2473 * r3=pkt(id=n,off=0,r=0)
2475 * pkt_data in src register:
2479 * if (pkt_end >= r2) goto <access okay>
2480 * <handle exception>
2484 * if (pkt_end <= r2) goto <handle exception>
2488 * pkt_end == dst_reg, r2 == src_reg
2489 * r2=pkt(id=n,off=8,r=0)
2490 * r3=pkt(id=n,off=0,r=0)
2492 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
2493 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
2494 * and [r3, r3 + 8-1) respectively is safe to access depending on
2498 /* If our ids match, then we must have the same max_value. And we
2499 * don't care about the other reg's fixed offset, since if it's too big
2500 * the range won't allow anything.
2501 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
2503 for (i
= 0; i
< MAX_BPF_REG
; i
++)
2504 if (regs
[i
].type
== PTR_TO_PACKET
&& regs
[i
].id
== dst_reg
->id
)
2505 /* keep the maximum range already checked */
2506 regs
[i
].range
= max(regs
[i
].range
, new_range
);
2508 for (i
= 0; i
< MAX_BPF_STACK
; i
+= BPF_REG_SIZE
) {
2509 if (state
->stack_slot_type
[i
] != STACK_SPILL
)
2511 reg
= &state
->spilled_regs
[i
/ BPF_REG_SIZE
];
2512 if (reg
->type
== PTR_TO_PACKET
&& reg
->id
== dst_reg
->id
)
2513 reg
->range
= max(reg
->range
, new_range
);
2517 /* Adjusts the register min/max values in the case that the dst_reg is the
2518 * variable register that we are working on, and src_reg is a constant or we're
2519 * simply doing a BPF_K check.
2520 * In JEQ/JNE cases we also adjust the var_off values.
2522 static void reg_set_min_max(struct bpf_reg_state
*true_reg
,
2523 struct bpf_reg_state
*false_reg
, u64 val
,
2526 /* If the dst_reg is a pointer, we can't learn anything about its
2527 * variable offset from the compare (unless src_reg were a pointer into
2528 * the same object, but we don't bother with that.
2529 * Since false_reg and true_reg have the same type by construction, we
2530 * only need to check one of them for pointerness.
2532 if (__is_pointer_value(false, false_reg
))
2537 /* If this is false then we know nothing Jon Snow, but if it is
2538 * true then we know for sure.
2540 __mark_reg_known(true_reg
, val
);
2543 /* If this is true we know nothing Jon Snow, but if it is false
2544 * we know the value for sure;
2546 __mark_reg_known(false_reg
, val
);
2549 false_reg
->umax_value
= min(false_reg
->umax_value
, val
);
2550 true_reg
->umin_value
= max(true_reg
->umin_value
, val
+ 1);
2553 false_reg
->smax_value
= min_t(s64
, false_reg
->smax_value
, val
);
2554 true_reg
->smin_value
= max_t(s64
, true_reg
->smin_value
, val
+ 1);
2557 false_reg
->umin_value
= max(false_reg
->umin_value
, val
);
2558 true_reg
->umax_value
= min(true_reg
->umax_value
, val
- 1);
2561 false_reg
->smin_value
= max_t(s64
, false_reg
->smin_value
, val
);
2562 true_reg
->smax_value
= min_t(s64
, true_reg
->smax_value
, val
- 1);
2565 false_reg
->umax_value
= min(false_reg
->umax_value
, val
- 1);
2566 true_reg
->umin_value
= max(true_reg
->umin_value
, val
);
2569 false_reg
->smax_value
= min_t(s64
, false_reg
->smax_value
, val
- 1);
2570 true_reg
->smin_value
= max_t(s64
, true_reg
->smin_value
, val
);
2573 false_reg
->umin_value
= max(false_reg
->umin_value
, val
+ 1);
2574 true_reg
->umax_value
= min(true_reg
->umax_value
, val
);
2577 false_reg
->smin_value
= max_t(s64
, false_reg
->smin_value
, val
+ 1);
2578 true_reg
->smax_value
= min_t(s64
, true_reg
->smax_value
, val
);
2584 __reg_deduce_bounds(false_reg
);
2585 __reg_deduce_bounds(true_reg
);
2586 /* We might have learned some bits from the bounds. */
2587 __reg_bound_offset(false_reg
);
2588 __reg_bound_offset(true_reg
);
2589 /* Intersecting with the old var_off might have improved our bounds
2590 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2591 * then new var_off is (0; 0x7f...fc) which improves our umax.
2593 __update_reg_bounds(false_reg
);
2594 __update_reg_bounds(true_reg
);
2597 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
2600 static void reg_set_min_max_inv(struct bpf_reg_state
*true_reg
,
2601 struct bpf_reg_state
*false_reg
, u64 val
,
2604 if (__is_pointer_value(false, false_reg
))
2609 /* If this is false then we know nothing Jon Snow, but if it is
2610 * true then we know for sure.
2612 __mark_reg_known(true_reg
, val
);
2615 /* If this is true we know nothing Jon Snow, but if it is false
2616 * we know the value for sure;
2618 __mark_reg_known(false_reg
, val
);
2621 true_reg
->umax_value
= min(true_reg
->umax_value
, val
- 1);
2622 false_reg
->umin_value
= max(false_reg
->umin_value
, val
);
2625 true_reg
->smax_value
= min_t(s64
, true_reg
->smax_value
, val
- 1);
2626 false_reg
->smin_value
= max_t(s64
, false_reg
->smin_value
, val
);
2629 true_reg
->umin_value
= max(true_reg
->umin_value
, val
+ 1);
2630 false_reg
->umax_value
= min(false_reg
->umax_value
, val
);
2633 true_reg
->smin_value
= max_t(s64
, true_reg
->smin_value
, val
+ 1);
2634 false_reg
->smax_value
= min_t(s64
, false_reg
->smax_value
, val
);
2637 true_reg
->umax_value
= min(true_reg
->umax_value
, val
);
2638 false_reg
->umin_value
= max(false_reg
->umin_value
, val
+ 1);
2641 true_reg
->smax_value
= min_t(s64
, true_reg
->smax_value
, val
);
2642 false_reg
->smin_value
= max_t(s64
, false_reg
->smin_value
, val
+ 1);
2645 true_reg
->umin_value
= max(true_reg
->umin_value
, val
);
2646 false_reg
->umax_value
= min(false_reg
->umax_value
, val
- 1);
2649 true_reg
->smin_value
= max_t(s64
, true_reg
->smin_value
, val
);
2650 false_reg
->smax_value
= min_t(s64
, false_reg
->smax_value
, val
- 1);
2656 __reg_deduce_bounds(false_reg
);
2657 __reg_deduce_bounds(true_reg
);
2658 /* We might have learned some bits from the bounds. */
2659 __reg_bound_offset(false_reg
);
2660 __reg_bound_offset(true_reg
);
2661 /* Intersecting with the old var_off might have improved our bounds
2662 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2663 * then new var_off is (0; 0x7f...fc) which improves our umax.
2665 __update_reg_bounds(false_reg
);
2666 __update_reg_bounds(true_reg
);
2669 /* Regs are known to be equal, so intersect their min/max/var_off */
2670 static void __reg_combine_min_max(struct bpf_reg_state
*src_reg
,
2671 struct bpf_reg_state
*dst_reg
)
2673 src_reg
->umin_value
= dst_reg
->umin_value
= max(src_reg
->umin_value
,
2674 dst_reg
->umin_value
);
2675 src_reg
->umax_value
= dst_reg
->umax_value
= min(src_reg
->umax_value
,
2676 dst_reg
->umax_value
);
2677 src_reg
->smin_value
= dst_reg
->smin_value
= max(src_reg
->smin_value
,
2678 dst_reg
->smin_value
);
2679 src_reg
->smax_value
= dst_reg
->smax_value
= min(src_reg
->smax_value
,
2680 dst_reg
->smax_value
);
2681 src_reg
->var_off
= dst_reg
->var_off
= tnum_intersect(src_reg
->var_off
,
2683 /* We might have learned new bounds from the var_off. */
2684 __update_reg_bounds(src_reg
);
2685 __update_reg_bounds(dst_reg
);
2686 /* We might have learned something about the sign bit. */
2687 __reg_deduce_bounds(src_reg
);
2688 __reg_deduce_bounds(dst_reg
);
2689 /* We might have learned some bits from the bounds. */
2690 __reg_bound_offset(src_reg
);
2691 __reg_bound_offset(dst_reg
);
2692 /* Intersecting with the old var_off might have improved our bounds
2693 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2694 * then new var_off is (0; 0x7f...fc) which improves our umax.
2696 __update_reg_bounds(src_reg
);
2697 __update_reg_bounds(dst_reg
);
2700 static void reg_combine_min_max(struct bpf_reg_state
*true_src
,
2701 struct bpf_reg_state
*true_dst
,
2702 struct bpf_reg_state
*false_src
,
2703 struct bpf_reg_state
*false_dst
,
2708 __reg_combine_min_max(true_src
, true_dst
);
2711 __reg_combine_min_max(false_src
, false_dst
);
2716 static void mark_map_reg(struct bpf_reg_state
*regs
, u32 regno
, u32 id
,
2719 struct bpf_reg_state
*reg
= ®s
[regno
];
2721 if (reg
->type
== PTR_TO_MAP_VALUE_OR_NULL
&& reg
->id
== id
) {
2722 /* Old offset (both fixed and variable parts) should
2723 * have been known-zero, because we don't allow pointer
2724 * arithmetic on pointers that might be NULL.
2726 if (WARN_ON_ONCE(reg
->smin_value
|| reg
->smax_value
||
2727 !tnum_equals_const(reg
->var_off
, 0) ||
2729 __mark_reg_known_zero(reg
);
2733 reg
->type
= SCALAR_VALUE
;
2734 } else if (reg
->map_ptr
->inner_map_meta
) {
2735 reg
->type
= CONST_PTR_TO_MAP
;
2736 reg
->map_ptr
= reg
->map_ptr
->inner_map_meta
;
2738 reg
->type
= PTR_TO_MAP_VALUE
;
2740 /* We don't need id from this point onwards anymore, thus we
2741 * should better reset it, so that state pruning has chances
2748 /* The logic is similar to find_good_pkt_pointers(), both could eventually
2749 * be folded together at some point.
2751 static void mark_map_regs(struct bpf_verifier_state
*state
, u32 regno
,
2754 struct bpf_reg_state
*regs
= state
->regs
;
2755 u32 id
= regs
[regno
].id
;
2758 for (i
= 0; i
< MAX_BPF_REG
; i
++)
2759 mark_map_reg(regs
, i
, id
, is_null
);
2761 for (i
= 0; i
< MAX_BPF_STACK
; i
+= BPF_REG_SIZE
) {
2762 if (state
->stack_slot_type
[i
] != STACK_SPILL
)
2764 mark_map_reg(state
->spilled_regs
, i
/ BPF_REG_SIZE
, id
, is_null
);
2768 static int check_cond_jmp_op(struct bpf_verifier_env
*env
,
2769 struct bpf_insn
*insn
, int *insn_idx
)
2771 struct bpf_verifier_state
*other_branch
, *this_branch
= &env
->cur_state
;
2772 struct bpf_reg_state
*regs
= this_branch
->regs
, *dst_reg
;
2773 u8 opcode
= BPF_OP(insn
->code
);
2776 if (opcode
> BPF_JSLE
) {
2777 verbose("invalid BPF_JMP opcode %x\n", opcode
);
2781 if (BPF_SRC(insn
->code
) == BPF_X
) {
2782 if (insn
->imm
!= 0) {
2783 verbose("BPF_JMP uses reserved fields\n");
2787 /* check src1 operand */
2788 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
2792 if (is_pointer_value(env
, insn
->src_reg
)) {
2793 verbose("R%d pointer comparison prohibited\n",
2798 if (insn
->src_reg
!= BPF_REG_0
) {
2799 verbose("BPF_JMP uses reserved fields\n");
2804 /* check src2 operand */
2805 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
2809 dst_reg
= ®s
[insn
->dst_reg
];
2811 /* detect if R == 0 where R was initialized to zero earlier */
2812 if (BPF_SRC(insn
->code
) == BPF_K
&&
2813 (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
) &&
2814 dst_reg
->type
== SCALAR_VALUE
&&
2815 tnum_equals_const(dst_reg
->var_off
, insn
->imm
)) {
2816 if (opcode
== BPF_JEQ
) {
2817 /* if (imm == imm) goto pc+off;
2818 * only follow the goto, ignore fall-through
2820 *insn_idx
+= insn
->off
;
2823 /* if (imm != imm) goto pc+off;
2824 * only follow fall-through branch, since
2825 * that's where the program will go
2831 other_branch
= push_stack(env
, *insn_idx
+ insn
->off
+ 1, *insn_idx
);
2835 /* detect if we are comparing against a constant value so we can adjust
2836 * our min/max values for our dst register.
2837 * this is only legit if both are scalars (or pointers to the same
2838 * object, I suppose, but we don't support that right now), because
2839 * otherwise the different base pointers mean the offsets aren't
2842 if (BPF_SRC(insn
->code
) == BPF_X
) {
2843 if (dst_reg
->type
== SCALAR_VALUE
&&
2844 regs
[insn
->src_reg
].type
== SCALAR_VALUE
) {
2845 if (tnum_is_const(regs
[insn
->src_reg
].var_off
))
2846 reg_set_min_max(&other_branch
->regs
[insn
->dst_reg
],
2847 dst_reg
, regs
[insn
->src_reg
].var_off
.value
,
2849 else if (tnum_is_const(dst_reg
->var_off
))
2850 reg_set_min_max_inv(&other_branch
->regs
[insn
->src_reg
],
2851 ®s
[insn
->src_reg
],
2852 dst_reg
->var_off
.value
, opcode
);
2853 else if (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
)
2854 /* Comparing for equality, we can combine knowledge */
2855 reg_combine_min_max(&other_branch
->regs
[insn
->src_reg
],
2856 &other_branch
->regs
[insn
->dst_reg
],
2857 ®s
[insn
->src_reg
],
2858 ®s
[insn
->dst_reg
], opcode
);
2860 } else if (dst_reg
->type
== SCALAR_VALUE
) {
2861 reg_set_min_max(&other_branch
->regs
[insn
->dst_reg
],
2862 dst_reg
, insn
->imm
, opcode
);
2865 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
2866 if (BPF_SRC(insn
->code
) == BPF_K
&&
2867 insn
->imm
== 0 && (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
) &&
2868 dst_reg
->type
== PTR_TO_MAP_VALUE_OR_NULL
) {
2869 /* Mark all identical map registers in each branch as either
2870 * safe or unknown depending R == 0 or R != 0 conditional.
2872 mark_map_regs(this_branch
, insn
->dst_reg
, opcode
== BPF_JNE
);
2873 mark_map_regs(other_branch
, insn
->dst_reg
, opcode
== BPF_JEQ
);
2874 } else if (BPF_SRC(insn
->code
) == BPF_X
&& opcode
== BPF_JGT
&&
2875 dst_reg
->type
== PTR_TO_PACKET
&&
2876 regs
[insn
->src_reg
].type
== PTR_TO_PACKET_END
) {
2877 /* pkt_data' > pkt_end */
2878 find_good_pkt_pointers(this_branch
, dst_reg
, false);
2879 } else if (BPF_SRC(insn
->code
) == BPF_X
&& opcode
== BPF_JGT
&&
2880 dst_reg
->type
== PTR_TO_PACKET_END
&&
2881 regs
[insn
->src_reg
].type
== PTR_TO_PACKET
) {
2882 /* pkt_end > pkt_data' */
2883 find_good_pkt_pointers(other_branch
, ®s
[insn
->src_reg
], true);
2884 } else if (BPF_SRC(insn
->code
) == BPF_X
&& opcode
== BPF_JLT
&&
2885 dst_reg
->type
== PTR_TO_PACKET
&&
2886 regs
[insn
->src_reg
].type
== PTR_TO_PACKET_END
) {
2887 /* pkt_data' < pkt_end */
2888 find_good_pkt_pointers(other_branch
, dst_reg
, true);
2889 } else if (BPF_SRC(insn
->code
) == BPF_X
&& opcode
== BPF_JLT
&&
2890 dst_reg
->type
== PTR_TO_PACKET_END
&&
2891 regs
[insn
->src_reg
].type
== PTR_TO_PACKET
) {
2892 /* pkt_end < pkt_data' */
2893 find_good_pkt_pointers(this_branch
, ®s
[insn
->src_reg
], false);
2894 } else if (BPF_SRC(insn
->code
) == BPF_X
&& opcode
== BPF_JGE
&&
2895 dst_reg
->type
== PTR_TO_PACKET
&&
2896 regs
[insn
->src_reg
].type
== PTR_TO_PACKET_END
) {
2897 /* pkt_data' >= pkt_end */
2898 find_good_pkt_pointers(this_branch
, dst_reg
, true);
2899 } else if (BPF_SRC(insn
->code
) == BPF_X
&& opcode
== BPF_JGE
&&
2900 dst_reg
->type
== PTR_TO_PACKET_END
&&
2901 regs
[insn
->src_reg
].type
== PTR_TO_PACKET
) {
2902 /* pkt_end >= pkt_data' */
2903 find_good_pkt_pointers(other_branch
, ®s
[insn
->src_reg
], false);
2904 } else if (BPF_SRC(insn
->code
) == BPF_X
&& opcode
== BPF_JLE
&&
2905 dst_reg
->type
== PTR_TO_PACKET
&&
2906 regs
[insn
->src_reg
].type
== PTR_TO_PACKET_END
) {
2907 /* pkt_data' <= pkt_end */
2908 find_good_pkt_pointers(other_branch
, dst_reg
, false);
2909 } else if (BPF_SRC(insn
->code
) == BPF_X
&& opcode
== BPF_JLE
&&
2910 dst_reg
->type
== PTR_TO_PACKET_END
&&
2911 regs
[insn
->src_reg
].type
== PTR_TO_PACKET
) {
2912 /* pkt_end <= pkt_data' */
2913 find_good_pkt_pointers(this_branch
, ®s
[insn
->src_reg
], true);
2914 } else if (is_pointer_value(env
, insn
->dst_reg
)) {
2915 verbose("R%d pointer comparison prohibited\n", insn
->dst_reg
);
2919 print_verifier_state(this_branch
);
2923 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
2924 static struct bpf_map
*ld_imm64_to_map_ptr(struct bpf_insn
*insn
)
2926 u64 imm64
= ((u64
) (u32
) insn
[0].imm
) | ((u64
) (u32
) insn
[1].imm
) << 32;
2928 return (struct bpf_map
*) (unsigned long) imm64
;
2931 /* verify BPF_LD_IMM64 instruction */
2932 static int check_ld_imm(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
2934 struct bpf_reg_state
*regs
= env
->cur_state
.regs
;
2937 if (BPF_SIZE(insn
->code
) != BPF_DW
) {
2938 verbose("invalid BPF_LD_IMM insn\n");
2941 if (insn
->off
!= 0) {
2942 verbose("BPF_LD_IMM64 uses reserved fields\n");
2946 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP
);
2950 if (insn
->src_reg
== 0) {
2951 u64 imm
= ((u64
)(insn
+ 1)->imm
<< 32) | (u32
)insn
->imm
;
2953 regs
[insn
->dst_reg
].type
= SCALAR_VALUE
;
2954 __mark_reg_known(®s
[insn
->dst_reg
], imm
);
2958 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
2959 BUG_ON(insn
->src_reg
!= BPF_PSEUDO_MAP_FD
);
2961 regs
[insn
->dst_reg
].type
= CONST_PTR_TO_MAP
;
2962 regs
[insn
->dst_reg
].map_ptr
= ld_imm64_to_map_ptr(insn
);
2966 static bool may_access_skb(enum bpf_prog_type type
)
2969 case BPF_PROG_TYPE_SOCKET_FILTER
:
2970 case BPF_PROG_TYPE_SCHED_CLS
:
2971 case BPF_PROG_TYPE_SCHED_ACT
:
2978 /* verify safety of LD_ABS|LD_IND instructions:
2979 * - they can only appear in the programs where ctx == skb
2980 * - since they are wrappers of function calls, they scratch R1-R5 registers,
2981 * preserve R6-R9, and store return value into R0
2984 * ctx == skb == R6 == CTX
2987 * SRC == any register
2988 * IMM == 32-bit immediate
2991 * R0 - 8/16/32-bit skb data converted to cpu endianness
2993 static int check_ld_abs(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
2995 struct bpf_reg_state
*regs
= env
->cur_state
.regs
;
2996 u8 mode
= BPF_MODE(insn
->code
);
2999 if (!may_access_skb(env
->prog
->type
)) {
3000 verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
3004 if (insn
->dst_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
3005 BPF_SIZE(insn
->code
) == BPF_DW
||
3006 (mode
== BPF_ABS
&& insn
->src_reg
!= BPF_REG_0
)) {
3007 verbose("BPF_LD_[ABS|IND] uses reserved fields\n");
3011 /* check whether implicit source operand (register R6) is readable */
3012 err
= check_reg_arg(env
, BPF_REG_6
, SRC_OP
);
3016 if (regs
[BPF_REG_6
].type
!= PTR_TO_CTX
) {
3017 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
3021 if (mode
== BPF_IND
) {
3022 /* check explicit source operand */
3023 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
3028 /* reset caller saved regs to unreadable */
3029 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
3030 mark_reg_not_init(regs
, caller_saved
[i
]);
3031 check_reg_arg(env
, caller_saved
[i
], DST_OP_NO_MARK
);
3034 /* mark destination R0 register as readable, since it contains
3035 * the value fetched from the packet.
3036 * Already marked as written above.
3038 mark_reg_unknown(regs
, BPF_REG_0
);
3042 /* non-recursive DFS pseudo code
3043 * 1 procedure DFS-iterative(G,v):
3044 * 2 label v as discovered
3045 * 3 let S be a stack
3047 * 5 while S is not empty
3049 * 7 if t is what we're looking for:
3051 * 9 for all edges e in G.adjacentEdges(t) do
3052 * 10 if edge e is already labelled
3053 * 11 continue with the next edge
3054 * 12 w <- G.adjacentVertex(t,e)
3055 * 13 if vertex w is not discovered and not explored
3056 * 14 label e as tree-edge
3057 * 15 label w as discovered
3060 * 18 else if vertex w is discovered
3061 * 19 label e as back-edge
3063 * 21 // vertex w is explored
3064 * 22 label e as forward- or cross-edge
3065 * 23 label t as explored
3070 * 0x11 - discovered and fall-through edge labelled
3071 * 0x12 - discovered and fall-through and branch edges labelled
3082 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
3084 static int *insn_stack
; /* stack of insns to process */
3085 static int cur_stack
; /* current stack index */
3086 static int *insn_state
;
3088 /* t, w, e - match pseudo-code above:
3089 * t - index of current instruction
3090 * w - next instruction
3093 static int push_insn(int t
, int w
, int e
, struct bpf_verifier_env
*env
)
3095 if (e
== FALLTHROUGH
&& insn_state
[t
] >= (DISCOVERED
| FALLTHROUGH
))
3098 if (e
== BRANCH
&& insn_state
[t
] >= (DISCOVERED
| BRANCH
))
3101 if (w
< 0 || w
>= env
->prog
->len
) {
3102 verbose("jump out of range from insn %d to %d\n", t
, w
);
3107 /* mark branch target for state pruning */
3108 env
->explored_states
[w
] = STATE_LIST_MARK
;
3110 if (insn_state
[w
] == 0) {
3112 insn_state
[t
] = DISCOVERED
| e
;
3113 insn_state
[w
] = DISCOVERED
;
3114 if (cur_stack
>= env
->prog
->len
)
3116 insn_stack
[cur_stack
++] = w
;
3118 } else if ((insn_state
[w
] & 0xF0) == DISCOVERED
) {
3119 verbose("back-edge from insn %d to %d\n", t
, w
);
3121 } else if (insn_state
[w
] == EXPLORED
) {
3122 /* forward- or cross-edge */
3123 insn_state
[t
] = DISCOVERED
| e
;
3125 verbose("insn state internal bug\n");
3131 /* non-recursive depth-first-search to detect loops in BPF program
3132 * loop == back-edge in directed graph
3134 static int check_cfg(struct bpf_verifier_env
*env
)
3136 struct bpf_insn
*insns
= env
->prog
->insnsi
;
3137 int insn_cnt
= env
->prog
->len
;
3141 insn_state
= kcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
3145 insn_stack
= kcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
3151 insn_state
[0] = DISCOVERED
; /* mark 1st insn as discovered */
3152 insn_stack
[0] = 0; /* 0 is the first instruction */
3158 t
= insn_stack
[cur_stack
- 1];
3160 if (BPF_CLASS(insns
[t
].code
) == BPF_JMP
) {
3161 u8 opcode
= BPF_OP(insns
[t
].code
);
3163 if (opcode
== BPF_EXIT
) {
3165 } else if (opcode
== BPF_CALL
) {
3166 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
3171 if (t
+ 1 < insn_cnt
)
3172 env
->explored_states
[t
+ 1] = STATE_LIST_MARK
;
3173 } else if (opcode
== BPF_JA
) {
3174 if (BPF_SRC(insns
[t
].code
) != BPF_K
) {
3178 /* unconditional jump with single edge */
3179 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1,
3185 /* tell verifier to check for equivalent states
3186 * after every call and jump
3188 if (t
+ 1 < insn_cnt
)
3189 env
->explored_states
[t
+ 1] = STATE_LIST_MARK
;
3191 /* conditional jump with two edges */
3192 env
->explored_states
[t
] = STATE_LIST_MARK
;
3193 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
3199 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1, BRANCH
, env
);
3206 /* all other non-branch instructions with single
3209 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
3217 insn_state
[t
] = EXPLORED
;
3218 if (cur_stack
-- <= 0) {
3219 verbose("pop stack internal bug\n");
3226 for (i
= 0; i
< insn_cnt
; i
++) {
3227 if (insn_state
[i
] != EXPLORED
) {
3228 verbose("unreachable insn %d\n", i
);
3233 ret
= 0; /* cfg looks good */
3241 /* check %cur's range satisfies %old's */
3242 static bool range_within(struct bpf_reg_state
*old
,
3243 struct bpf_reg_state
*cur
)
3245 return old
->umin_value
<= cur
->umin_value
&&
3246 old
->umax_value
>= cur
->umax_value
&&
3247 old
->smin_value
<= cur
->smin_value
&&
3248 old
->smax_value
>= cur
->smax_value
;
3251 /* Maximum number of register states that can exist at once */
3252 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
3258 /* If in the old state two registers had the same id, then they need to have
3259 * the same id in the new state as well. But that id could be different from
3260 * the old state, so we need to track the mapping from old to new ids.
3261 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
3262 * regs with old id 5 must also have new id 9 for the new state to be safe. But
3263 * regs with a different old id could still have new id 9, we don't care about
3265 * So we look through our idmap to see if this old id has been seen before. If
3266 * so, we require the new id to match; otherwise, we add the id pair to the map.
3268 static bool check_ids(u32 old_id
, u32 cur_id
, struct idpair
*idmap
)
3272 for (i
= 0; i
< ID_MAP_SIZE
; i
++) {
3273 if (!idmap
[i
].old
) {
3274 /* Reached an empty slot; haven't seen this id before */
3275 idmap
[i
].old
= old_id
;
3276 idmap
[i
].cur
= cur_id
;
3279 if (idmap
[i
].old
== old_id
)
3280 return idmap
[i
].cur
== cur_id
;
3282 /* We ran out of idmap slots, which should be impossible */
3287 /* Returns true if (rold safe implies rcur safe) */
3288 static bool regsafe(struct bpf_reg_state
*rold
, struct bpf_reg_state
*rcur
,
3289 struct idpair
*idmap
)
3291 if (!(rold
->live
& REG_LIVE_READ
))
3292 /* explored state didn't use this */
3295 if (memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, live
)) == 0)
3298 if (rold
->type
== NOT_INIT
)
3299 /* explored state can't have used this */
3301 if (rcur
->type
== NOT_INIT
)
3303 switch (rold
->type
) {
3305 if (rcur
->type
== SCALAR_VALUE
) {
3306 /* new val must satisfy old val knowledge */
3307 return range_within(rold
, rcur
) &&
3308 tnum_in(rold
->var_off
, rcur
->var_off
);
3310 /* if we knew anything about the old value, we're not
3311 * equal, because we can't know anything about the
3312 * scalar value of the pointer in the new value.
3314 return rold
->umin_value
== 0 &&
3315 rold
->umax_value
== U64_MAX
&&
3316 rold
->smin_value
== S64_MIN
&&
3317 rold
->smax_value
== S64_MAX
&&
3318 tnum_is_unknown(rold
->var_off
);
3320 case PTR_TO_MAP_VALUE
:
3321 /* If the new min/max/var_off satisfy the old ones and
3322 * everything else matches, we are OK.
3323 * We don't care about the 'id' value, because nothing
3324 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
3326 return memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, id
)) == 0 &&
3327 range_within(rold
, rcur
) &&
3328 tnum_in(rold
->var_off
, rcur
->var_off
);
3329 case PTR_TO_MAP_VALUE_OR_NULL
:
3330 /* a PTR_TO_MAP_VALUE could be safe to use as a
3331 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
3332 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
3333 * checked, doing so could have affected others with the same
3334 * id, and we can't check for that because we lost the id when
3335 * we converted to a PTR_TO_MAP_VALUE.
3337 if (rcur
->type
!= PTR_TO_MAP_VALUE_OR_NULL
)
3339 if (memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, id
)))
3341 /* Check our ids match any regs they're supposed to */
3342 return check_ids(rold
->id
, rcur
->id
, idmap
);
3344 if (rcur
->type
!= PTR_TO_PACKET
)
3346 /* We must have at least as much range as the old ptr
3347 * did, so that any accesses which were safe before are
3348 * still safe. This is true even if old range < old off,
3349 * since someone could have accessed through (ptr - k), or
3350 * even done ptr -= k in a register, to get a safe access.
3352 if (rold
->range
> rcur
->range
)
3354 /* If the offsets don't match, we can't trust our alignment;
3355 * nor can we be sure that we won't fall out of range.
3357 if (rold
->off
!= rcur
->off
)
3359 /* id relations must be preserved */
3360 if (rold
->id
&& !check_ids(rold
->id
, rcur
->id
, idmap
))
3362 /* new val must satisfy old val knowledge */
3363 return range_within(rold
, rcur
) &&
3364 tnum_in(rold
->var_off
, rcur
->var_off
);
3366 case CONST_PTR_TO_MAP
:
3368 case PTR_TO_PACKET_END
:
3369 /* Only valid matches are exact, which memcmp() above
3370 * would have accepted
3373 /* Don't know what's going on, just say it's not safe */
3377 /* Shouldn't get here; if we do, say it's not safe */
3382 /* compare two verifier states
3384 * all states stored in state_list are known to be valid, since
3385 * verifier reached 'bpf_exit' instruction through them
3387 * this function is called when verifier exploring different branches of
3388 * execution popped from the state stack. If it sees an old state that has
3389 * more strict register state and more strict stack state then this execution
3390 * branch doesn't need to be explored further, since verifier already
3391 * concluded that more strict state leads to valid finish.
3393 * Therefore two states are equivalent if register state is more conservative
3394 * and explored stack state is more conservative than the current one.
3397 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
3398 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
3400 * In other words if current stack state (one being explored) has more
3401 * valid slots than old one that already passed validation, it means
3402 * the verifier can stop exploring and conclude that current state is valid too
3404 * Similarly with registers. If explored state has register type as invalid
3405 * whereas register type in current state is meaningful, it means that
3406 * the current state will reach 'bpf_exit' instruction safely
3408 static bool states_equal(struct bpf_verifier_env
*env
,
3409 struct bpf_verifier_state
*old
,
3410 struct bpf_verifier_state
*cur
)
3412 struct idpair
*idmap
;
3416 idmap
= kcalloc(ID_MAP_SIZE
, sizeof(struct idpair
), GFP_KERNEL
);
3417 /* If we failed to allocate the idmap, just say it's not safe */
3421 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
3422 if (!regsafe(&old
->regs
[i
], &cur
->regs
[i
], idmap
))
3426 for (i
= 0; i
< MAX_BPF_STACK
; i
++) {
3427 if (old
->stack_slot_type
[i
] == STACK_INVALID
)
3429 if (old
->stack_slot_type
[i
] != cur
->stack_slot_type
[i
])
3430 /* Ex: old explored (safe) state has STACK_SPILL in
3431 * this stack slot, but current has has STACK_MISC ->
3432 * this verifier states are not equivalent,
3433 * return false to continue verification of this path
3436 if (i
% BPF_REG_SIZE
)
3438 if (old
->stack_slot_type
[i
] != STACK_SPILL
)
3440 if (!regsafe(&old
->spilled_regs
[i
/ BPF_REG_SIZE
],
3441 &cur
->spilled_regs
[i
/ BPF_REG_SIZE
],
3443 /* when explored and current stack slot are both storing
3444 * spilled registers, check that stored pointers types
3445 * are the same as well.
3446 * Ex: explored safe path could have stored
3447 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
3448 * but current path has stored:
3449 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
3450 * such verifier states are not equivalent.
3451 * return false to continue verification of this path
3463 /* A write screens off any subsequent reads; but write marks come from the
3464 * straight-line code between a state and its parent. When we arrive at a
3465 * jump target (in the first iteration of the propagate_liveness() loop),
3466 * we didn't arrive by the straight-line code, so read marks in state must
3467 * propagate to parent regardless of state's write marks.
3469 static bool do_propagate_liveness(const struct bpf_verifier_state
*state
,
3470 struct bpf_verifier_state
*parent
)
3472 bool writes
= parent
== state
->parent
; /* Observe write marks */
3473 bool touched
= false; /* any changes made? */
3478 /* Propagate read liveness of registers... */
3479 BUILD_BUG_ON(BPF_REG_FP
+ 1 != MAX_BPF_REG
);
3480 /* We don't need to worry about FP liveness because it's read-only */
3481 for (i
= 0; i
< BPF_REG_FP
; i
++) {
3482 if (parent
->regs
[i
].live
& REG_LIVE_READ
)
3484 if (writes
&& (state
->regs
[i
].live
& REG_LIVE_WRITTEN
))
3486 if (state
->regs
[i
].live
& REG_LIVE_READ
) {
3487 parent
->regs
[i
].live
|= REG_LIVE_READ
;
3491 /* ... and stack slots */
3492 for (i
= 0; i
< MAX_BPF_STACK
/ BPF_REG_SIZE
; i
++) {
3493 if (parent
->stack_slot_type
[i
* BPF_REG_SIZE
] != STACK_SPILL
)
3495 if (state
->stack_slot_type
[i
* BPF_REG_SIZE
] != STACK_SPILL
)
3497 if (parent
->spilled_regs
[i
].live
& REG_LIVE_READ
)
3499 if (writes
&& (state
->spilled_regs
[i
].live
& REG_LIVE_WRITTEN
))
3501 if (state
->spilled_regs
[i
].live
& REG_LIVE_READ
) {
3502 parent
->spilled_regs
[i
].live
|= REG_LIVE_READ
;
3509 /* "parent" is "a state from which we reach the current state", but initially
3510 * it is not the state->parent (i.e. "the state whose straight-line code leads
3511 * to the current state"), instead it is the state that happened to arrive at
3512 * a (prunable) equivalent of the current state. See comment above
3513 * do_propagate_liveness() for consequences of this.
3514 * This function is just a more efficient way of calling mark_reg_read() or
3515 * mark_stack_slot_read() on each reg in "parent" that is read in "state",
3516 * though it requires that parent != state->parent in the call arguments.
3518 static void propagate_liveness(const struct bpf_verifier_state
*state
,
3519 struct bpf_verifier_state
*parent
)
3521 while (do_propagate_liveness(state
, parent
)) {
3522 /* Something changed, so we need to feed those changes onward */
3524 parent
= state
->parent
;
3528 static int is_state_visited(struct bpf_verifier_env
*env
, int insn_idx
)
3530 struct bpf_verifier_state_list
*new_sl
;
3531 struct bpf_verifier_state_list
*sl
;
3534 sl
= env
->explored_states
[insn_idx
];
3536 /* this 'insn_idx' instruction wasn't marked, so we will not
3537 * be doing state search here
3541 while (sl
!= STATE_LIST_MARK
) {
3542 if (states_equal(env
, &sl
->state
, &env
->cur_state
)) {
3543 /* reached equivalent register/stack state,
3545 * Registers read by the continuation are read by us.
3546 * If we have any write marks in env->cur_state, they
3547 * will prevent corresponding reads in the continuation
3548 * from reaching our parent (an explored_state). Our
3549 * own state will get the read marks recorded, but
3550 * they'll be immediately forgotten as we're pruning
3551 * this state and will pop a new one.
3553 propagate_liveness(&sl
->state
, &env
->cur_state
);
3559 /* there were no equivalent states, remember current one.
3560 * technically the current state is not proven to be safe yet,
3561 * but it will either reach bpf_exit (which means it's safe) or
3562 * it will be rejected. Since there are no loops, we won't be
3563 * seeing this 'insn_idx' instruction again on the way to bpf_exit
3565 new_sl
= kmalloc(sizeof(struct bpf_verifier_state_list
), GFP_USER
);
3569 /* add new state to the head of linked list */
3570 memcpy(&new_sl
->state
, &env
->cur_state
, sizeof(env
->cur_state
));
3571 new_sl
->next
= env
->explored_states
[insn_idx
];
3572 env
->explored_states
[insn_idx
] = new_sl
;
3573 /* connect new state to parentage chain */
3574 env
->cur_state
.parent
= &new_sl
->state
;
3575 /* clear write marks in current state: the writes we did are not writes
3576 * our child did, so they don't screen off its reads from us.
3577 * (There are no read marks in current state, because reads always mark
3578 * their parent and current state never has children yet. Only
3579 * explored_states can get read marks.)
3581 for (i
= 0; i
< BPF_REG_FP
; i
++)
3582 env
->cur_state
.regs
[i
].live
= REG_LIVE_NONE
;
3583 for (i
= 0; i
< MAX_BPF_STACK
/ BPF_REG_SIZE
; i
++)
3584 if (env
->cur_state
.stack_slot_type
[i
* BPF_REG_SIZE
] == STACK_SPILL
)
3585 env
->cur_state
.spilled_regs
[i
].live
= REG_LIVE_NONE
;
3589 static int ext_analyzer_insn_hook(struct bpf_verifier_env
*env
,
3590 int insn_idx
, int prev_insn_idx
)
3592 if (!env
->analyzer_ops
|| !env
->analyzer_ops
->insn_hook
)
3595 return env
->analyzer_ops
->insn_hook(env
, insn_idx
, prev_insn_idx
);
3598 static int do_check(struct bpf_verifier_env
*env
)
3600 struct bpf_verifier_state
*state
= &env
->cur_state
;
3601 struct bpf_insn
*insns
= env
->prog
->insnsi
;
3602 struct bpf_reg_state
*regs
= state
->regs
;
3603 int insn_cnt
= env
->prog
->len
;
3604 int insn_idx
, prev_insn_idx
= 0;
3605 int insn_processed
= 0;
3606 bool do_print_state
= false;
3608 init_reg_state(regs
);
3609 state
->parent
= NULL
;
3612 struct bpf_insn
*insn
;
3616 if (insn_idx
>= insn_cnt
) {
3617 verbose("invalid insn idx %d insn_cnt %d\n",
3618 insn_idx
, insn_cnt
);
3622 insn
= &insns
[insn_idx
];
3623 class = BPF_CLASS(insn
->code
);
3625 if (++insn_processed
> BPF_COMPLEXITY_LIMIT_INSNS
) {
3626 verbose("BPF program is too large. Processed %d insn\n",
3631 err
= is_state_visited(env
, insn_idx
);
3635 /* found equivalent state, can prune the search */
3638 verbose("\nfrom %d to %d: safe\n",
3639 prev_insn_idx
, insn_idx
);
3641 verbose("%d: safe\n", insn_idx
);
3643 goto process_bpf_exit
;
3649 if (log_level
> 1 || (log_level
&& do_print_state
)) {
3651 verbose("%d:", insn_idx
);
3653 verbose("\nfrom %d to %d:",
3654 prev_insn_idx
, insn_idx
);
3655 print_verifier_state(&env
->cur_state
);
3656 do_print_state
= false;
3660 verbose("%d: ", insn_idx
);
3661 print_bpf_insn(env
, insn
);
3664 err
= ext_analyzer_insn_hook(env
, insn_idx
, prev_insn_idx
);
3668 if (class == BPF_ALU
|| class == BPF_ALU64
) {
3669 err
= check_alu_op(env
, insn
);
3673 } else if (class == BPF_LDX
) {
3674 enum bpf_reg_type
*prev_src_type
, src_reg_type
;
3676 /* check for reserved fields is already done */
3678 /* check src operand */
3679 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
3683 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP_NO_MARK
);
3687 src_reg_type
= regs
[insn
->src_reg
].type
;
3689 /* check that memory (src_reg + off) is readable,
3690 * the state of dst_reg will be updated by this func
3692 err
= check_mem_access(env
, insn_idx
, insn
->src_reg
, insn
->off
,
3693 BPF_SIZE(insn
->code
), BPF_READ
,
3698 prev_src_type
= &env
->insn_aux_data
[insn_idx
].ptr_type
;
3700 if (*prev_src_type
== NOT_INIT
) {
3702 * dst_reg = *(u32 *)(src_reg + off)
3703 * save type to validate intersecting paths
3705 *prev_src_type
= src_reg_type
;
3707 } else if (src_reg_type
!= *prev_src_type
&&
3708 (src_reg_type
== PTR_TO_CTX
||
3709 *prev_src_type
== PTR_TO_CTX
)) {
3710 /* ABuser program is trying to use the same insn
3711 * dst_reg = *(u32*) (src_reg + off)
3712 * with different pointer types:
3713 * src_reg == ctx in one branch and
3714 * src_reg == stack|map in some other branch.
3717 verbose("same insn cannot be used with different pointers\n");
3721 } else if (class == BPF_STX
) {
3722 enum bpf_reg_type
*prev_dst_type
, dst_reg_type
;
3724 if (BPF_MODE(insn
->code
) == BPF_XADD
) {
3725 err
= check_xadd(env
, insn_idx
, insn
);
3732 /* check src1 operand */
3733 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
3736 /* check src2 operand */
3737 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
3741 dst_reg_type
= regs
[insn
->dst_reg
].type
;
3743 /* check that memory (dst_reg + off) is writeable */
3744 err
= check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
3745 BPF_SIZE(insn
->code
), BPF_WRITE
,
3750 prev_dst_type
= &env
->insn_aux_data
[insn_idx
].ptr_type
;
3752 if (*prev_dst_type
== NOT_INIT
) {
3753 *prev_dst_type
= dst_reg_type
;
3754 } else if (dst_reg_type
!= *prev_dst_type
&&
3755 (dst_reg_type
== PTR_TO_CTX
||
3756 *prev_dst_type
== PTR_TO_CTX
)) {
3757 verbose("same insn cannot be used with different pointers\n");
3761 } else if (class == BPF_ST
) {
3762 if (BPF_MODE(insn
->code
) != BPF_MEM
||
3763 insn
->src_reg
!= BPF_REG_0
) {
3764 verbose("BPF_ST uses reserved fields\n");
3767 /* check src operand */
3768 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
3772 /* check that memory (dst_reg + off) is writeable */
3773 err
= check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
3774 BPF_SIZE(insn
->code
), BPF_WRITE
,
3779 } else if (class == BPF_JMP
) {
3780 u8 opcode
= BPF_OP(insn
->code
);
3782 if (opcode
== BPF_CALL
) {
3783 if (BPF_SRC(insn
->code
) != BPF_K
||
3785 insn
->src_reg
!= BPF_REG_0
||
3786 insn
->dst_reg
!= BPF_REG_0
) {
3787 verbose("BPF_CALL uses reserved fields\n");
3791 err
= check_call(env
, insn
->imm
, insn_idx
);
3795 } else if (opcode
== BPF_JA
) {
3796 if (BPF_SRC(insn
->code
) != BPF_K
||
3798 insn
->src_reg
!= BPF_REG_0
||
3799 insn
->dst_reg
!= BPF_REG_0
) {
3800 verbose("BPF_JA uses reserved fields\n");
3804 insn_idx
+= insn
->off
+ 1;
3807 } else if (opcode
== BPF_EXIT
) {
3808 if (BPF_SRC(insn
->code
) != BPF_K
||
3810 insn
->src_reg
!= BPF_REG_0
||
3811 insn
->dst_reg
!= BPF_REG_0
) {
3812 verbose("BPF_EXIT uses reserved fields\n");
3816 /* eBPF calling convetion is such that R0 is used
3817 * to return the value from eBPF program.
3818 * Make sure that it's readable at this time
3819 * of bpf_exit, which means that program wrote
3820 * something into it earlier
3822 err
= check_reg_arg(env
, BPF_REG_0
, SRC_OP
);
3826 if (is_pointer_value(env
, BPF_REG_0
)) {
3827 verbose("R0 leaks addr as return value\n");
3832 insn_idx
= pop_stack(env
, &prev_insn_idx
);
3836 do_print_state
= true;
3840 err
= check_cond_jmp_op(env
, insn
, &insn_idx
);
3844 } else if (class == BPF_LD
) {
3845 u8 mode
= BPF_MODE(insn
->code
);
3847 if (mode
== BPF_ABS
|| mode
== BPF_IND
) {
3848 err
= check_ld_abs(env
, insn
);
3852 } else if (mode
== BPF_IMM
) {
3853 err
= check_ld_imm(env
, insn
);
3859 verbose("invalid BPF_LD mode\n");
3863 verbose("unknown insn class %d\n", class);
3870 verbose("processed %d insns, stack depth %d\n",
3871 insn_processed
, env
->prog
->aux
->stack_depth
);
3875 static int check_map_prealloc(struct bpf_map
*map
)
3877 return (map
->map_type
!= BPF_MAP_TYPE_HASH
&&
3878 map
->map_type
!= BPF_MAP_TYPE_PERCPU_HASH
&&
3879 map
->map_type
!= BPF_MAP_TYPE_HASH_OF_MAPS
) ||
3880 !(map
->map_flags
& BPF_F_NO_PREALLOC
);
3883 static int check_map_prog_compatibility(struct bpf_map
*map
,
3884 struct bpf_prog
*prog
)
3887 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
3888 * preallocated hash maps, since doing memory allocation
3889 * in overflow_handler can crash depending on where nmi got
3892 if (prog
->type
== BPF_PROG_TYPE_PERF_EVENT
) {
3893 if (!check_map_prealloc(map
)) {
3894 verbose("perf_event programs can only use preallocated hash map\n");
3897 if (map
->inner_map_meta
&&
3898 !check_map_prealloc(map
->inner_map_meta
)) {
3899 verbose("perf_event programs can only use preallocated inner hash map\n");
3906 /* look for pseudo eBPF instructions that access map FDs and
3907 * replace them with actual map pointers
3909 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env
*env
)
3911 struct bpf_insn
*insn
= env
->prog
->insnsi
;
3912 int insn_cnt
= env
->prog
->len
;
3915 err
= bpf_prog_calc_tag(env
->prog
);
3919 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
3920 if (BPF_CLASS(insn
->code
) == BPF_LDX
&&
3921 (BPF_MODE(insn
->code
) != BPF_MEM
|| insn
->imm
!= 0)) {
3922 verbose("BPF_LDX uses reserved fields\n");
3926 if (BPF_CLASS(insn
->code
) == BPF_STX
&&
3927 ((BPF_MODE(insn
->code
) != BPF_MEM
&&
3928 BPF_MODE(insn
->code
) != BPF_XADD
) || insn
->imm
!= 0)) {
3929 verbose("BPF_STX uses reserved fields\n");
3933 if (insn
[0].code
== (BPF_LD
| BPF_IMM
| BPF_DW
)) {
3934 struct bpf_map
*map
;
3937 if (i
== insn_cnt
- 1 || insn
[1].code
!= 0 ||
3938 insn
[1].dst_reg
!= 0 || insn
[1].src_reg
!= 0 ||
3940 verbose("invalid bpf_ld_imm64 insn\n");
3944 if (insn
->src_reg
== 0)
3945 /* valid generic load 64-bit imm */
3948 if (insn
->src_reg
!= BPF_PSEUDO_MAP_FD
) {
3949 verbose("unrecognized bpf_ld_imm64 insn\n");
3953 f
= fdget(insn
->imm
);
3954 map
= __bpf_map_get(f
);
3956 verbose("fd %d is not pointing to valid bpf_map\n",
3958 return PTR_ERR(map
);
3961 err
= check_map_prog_compatibility(map
, env
->prog
);
3967 /* store map pointer inside BPF_LD_IMM64 instruction */
3968 insn
[0].imm
= (u32
) (unsigned long) map
;
3969 insn
[1].imm
= ((u64
) (unsigned long) map
) >> 32;
3971 /* check whether we recorded this map already */
3972 for (j
= 0; j
< env
->used_map_cnt
; j
++)
3973 if (env
->used_maps
[j
] == map
) {
3978 if (env
->used_map_cnt
>= MAX_USED_MAPS
) {
3983 /* hold the map. If the program is rejected by verifier,
3984 * the map will be released by release_maps() or it
3985 * will be used by the valid program until it's unloaded
3986 * and all maps are released in free_bpf_prog_info()
3988 map
= bpf_map_inc(map
, false);
3991 return PTR_ERR(map
);
3993 env
->used_maps
[env
->used_map_cnt
++] = map
;
4002 /* now all pseudo BPF_LD_IMM64 instructions load valid
4003 * 'struct bpf_map *' into a register instead of user map_fd.
4004 * These pointers will be used later by verifier to validate map access.
4009 /* drop refcnt of maps used by the rejected program */
4010 static void release_maps(struct bpf_verifier_env
*env
)
4014 for (i
= 0; i
< env
->used_map_cnt
; i
++)
4015 bpf_map_put(env
->used_maps
[i
]);
4018 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
4019 static void convert_pseudo_ld_imm64(struct bpf_verifier_env
*env
)
4021 struct bpf_insn
*insn
= env
->prog
->insnsi
;
4022 int insn_cnt
= env
->prog
->len
;
4025 for (i
= 0; i
< insn_cnt
; i
++, insn
++)
4026 if (insn
->code
== (BPF_LD
| BPF_IMM
| BPF_DW
))
4030 /* single env->prog->insni[off] instruction was replaced with the range
4031 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
4032 * [0, off) and [off, end) to new locations, so the patched range stays zero
4034 static int adjust_insn_aux_data(struct bpf_verifier_env
*env
, u32 prog_len
,
4037 struct bpf_insn_aux_data
*new_data
, *old_data
= env
->insn_aux_data
;
4041 new_data
= vzalloc(sizeof(struct bpf_insn_aux_data
) * prog_len
);
4044 memcpy(new_data
, old_data
, sizeof(struct bpf_insn_aux_data
) * off
);
4045 memcpy(new_data
+ off
+ cnt
- 1, old_data
+ off
,
4046 sizeof(struct bpf_insn_aux_data
) * (prog_len
- off
- cnt
+ 1));
4047 env
->insn_aux_data
= new_data
;
4052 static struct bpf_prog
*bpf_patch_insn_data(struct bpf_verifier_env
*env
, u32 off
,
4053 const struct bpf_insn
*patch
, u32 len
)
4055 struct bpf_prog
*new_prog
;
4057 new_prog
= bpf_patch_insn_single(env
->prog
, off
, patch
, len
);
4060 if (adjust_insn_aux_data(env
, new_prog
->len
, off
, len
))
4065 /* convert load instructions that access fields of 'struct __sk_buff'
4066 * into sequence of instructions that access fields of 'struct sk_buff'
4068 static int convert_ctx_accesses(struct bpf_verifier_env
*env
)
4070 const struct bpf_verifier_ops
*ops
= env
->prog
->aux
->ops
;
4071 int i
, cnt
, size
, ctx_field_size
, delta
= 0;
4072 const int insn_cnt
= env
->prog
->len
;
4073 struct bpf_insn insn_buf
[16], *insn
;
4074 struct bpf_prog
*new_prog
;
4075 enum bpf_access_type type
;
4076 bool is_narrower_load
;
4079 if (ops
->gen_prologue
) {
4080 cnt
= ops
->gen_prologue(insn_buf
, env
->seen_direct_write
,
4082 if (cnt
>= ARRAY_SIZE(insn_buf
)) {
4083 verbose("bpf verifier is misconfigured\n");
4086 new_prog
= bpf_patch_insn_data(env
, 0, insn_buf
, cnt
);
4090 env
->prog
= new_prog
;
4095 if (!ops
->convert_ctx_access
)
4098 insn
= env
->prog
->insnsi
+ delta
;
4100 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
4101 if (insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_B
) ||
4102 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_H
) ||
4103 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_W
) ||
4104 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_DW
))
4106 else if (insn
->code
== (BPF_STX
| BPF_MEM
| BPF_B
) ||
4107 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_H
) ||
4108 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_W
) ||
4109 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_DW
))
4114 if (env
->insn_aux_data
[i
+ delta
].ptr_type
!= PTR_TO_CTX
)
4117 ctx_field_size
= env
->insn_aux_data
[i
+ delta
].ctx_field_size
;
4118 size
= BPF_LDST_BYTES(insn
);
4120 /* If the read access is a narrower load of the field,
4121 * convert to a 4/8-byte load, to minimum program type specific
4122 * convert_ctx_access changes. If conversion is successful,
4123 * we will apply proper mask to the result.
4125 is_narrower_load
= size
< ctx_field_size
;
4126 if (is_narrower_load
) {
4127 u32 off
= insn
->off
;
4130 if (type
== BPF_WRITE
) {
4131 verbose("bpf verifier narrow ctx access misconfigured\n");
4136 if (ctx_field_size
== 4)
4138 else if (ctx_field_size
== 8)
4141 insn
->off
= off
& ~(ctx_field_size
- 1);
4142 insn
->code
= BPF_LDX
| BPF_MEM
| size_code
;
4146 cnt
= ops
->convert_ctx_access(type
, insn
, insn_buf
, env
->prog
,
4148 if (cnt
== 0 || cnt
>= ARRAY_SIZE(insn_buf
) ||
4149 (ctx_field_size
&& !target_size
)) {
4150 verbose("bpf verifier is misconfigured\n");
4154 if (is_narrower_load
&& size
< target_size
) {
4155 if (ctx_field_size
<= 4)
4156 insn_buf
[cnt
++] = BPF_ALU32_IMM(BPF_AND
, insn
->dst_reg
,
4157 (1 << size
* 8) - 1);
4159 insn_buf
[cnt
++] = BPF_ALU64_IMM(BPF_AND
, insn
->dst_reg
,
4160 (1 << size
* 8) - 1);
4163 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
4169 /* keep walking new program and skip insns we just inserted */
4170 env
->prog
= new_prog
;
4171 insn
= new_prog
->insnsi
+ i
+ delta
;
4177 /* fixup insn->imm field of bpf_call instructions
4178 * and inline eligible helpers as explicit sequence of BPF instructions
4180 * this function is called after eBPF program passed verification
4182 static int fixup_bpf_calls(struct bpf_verifier_env
*env
)
4184 struct bpf_prog
*prog
= env
->prog
;
4185 struct bpf_insn
*insn
= prog
->insnsi
;
4186 const struct bpf_func_proto
*fn
;
4187 const int insn_cnt
= prog
->len
;
4188 struct bpf_insn insn_buf
[16];
4189 struct bpf_prog
*new_prog
;
4190 struct bpf_map
*map_ptr
;
4191 int i
, cnt
, delta
= 0;
4193 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
4194 if (insn
->code
!= (BPF_JMP
| BPF_CALL
))
4197 if (insn
->imm
== BPF_FUNC_get_route_realm
)
4198 prog
->dst_needed
= 1;
4199 if (insn
->imm
== BPF_FUNC_get_prandom_u32
)
4200 bpf_user_rnd_init_once();
4201 if (insn
->imm
== BPF_FUNC_tail_call
) {
4202 /* If we tail call into other programs, we
4203 * cannot make any assumptions since they can
4204 * be replaced dynamically during runtime in
4205 * the program array.
4207 prog
->cb_access
= 1;
4208 env
->prog
->aux
->stack_depth
= MAX_BPF_STACK
;
4210 /* mark bpf_tail_call as different opcode to avoid
4211 * conditional branch in the interpeter for every normal
4212 * call and to prevent accidental JITing by JIT compiler
4213 * that doesn't support bpf_tail_call yet
4216 insn
->code
= BPF_JMP
| BPF_TAIL_CALL
;
4220 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
4221 * handlers are currently limited to 64 bit only.
4223 if (ebpf_jit_enabled() && BITS_PER_LONG
== 64 &&
4224 insn
->imm
== BPF_FUNC_map_lookup_elem
) {
4225 map_ptr
= env
->insn_aux_data
[i
+ delta
].map_ptr
;
4226 if (map_ptr
== BPF_MAP_PTR_POISON
||
4227 !map_ptr
->ops
->map_gen_lookup
)
4228 goto patch_call_imm
;
4230 cnt
= map_ptr
->ops
->map_gen_lookup(map_ptr
, insn_buf
);
4231 if (cnt
== 0 || cnt
>= ARRAY_SIZE(insn_buf
)) {
4232 verbose("bpf verifier is misconfigured\n");
4236 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
,
4243 /* keep walking new program and skip insns we just inserted */
4244 env
->prog
= prog
= new_prog
;
4245 insn
= new_prog
->insnsi
+ i
+ delta
;
4249 if (insn
->imm
== BPF_FUNC_redirect_map
) {
4250 /* Note, we cannot use prog directly as imm as subsequent
4251 * rewrites would still change the prog pointer. The only
4252 * stable address we can use is aux, which also works with
4253 * prog clones during blinding.
4255 u64 addr
= (unsigned long)prog
->aux
;
4256 struct bpf_insn r4_ld
[] = {
4257 BPF_LD_IMM64(BPF_REG_4
, addr
),
4260 cnt
= ARRAY_SIZE(r4_ld
);
4262 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, r4_ld
, cnt
);
4267 env
->prog
= prog
= new_prog
;
4268 insn
= new_prog
->insnsi
+ i
+ delta
;
4271 fn
= prog
->aux
->ops
->get_func_proto(insn
->imm
);
4272 /* all functions that have prototype and verifier allowed
4273 * programs to call them, must be real in-kernel functions
4276 verbose("kernel subsystem misconfigured func %s#%d\n",
4277 func_id_name(insn
->imm
), insn
->imm
);
4280 insn
->imm
= fn
->func
- __bpf_call_base
;
4286 static void free_states(struct bpf_verifier_env
*env
)
4288 struct bpf_verifier_state_list
*sl
, *sln
;
4291 if (!env
->explored_states
)
4294 for (i
= 0; i
< env
->prog
->len
; i
++) {
4295 sl
= env
->explored_states
[i
];
4298 while (sl
!= STATE_LIST_MARK
) {
4305 kfree(env
->explored_states
);
4308 int bpf_check(struct bpf_prog
**prog
, union bpf_attr
*attr
)
4310 char __user
*log_ubuf
= NULL
;
4311 struct bpf_verifier_env
*env
;
4314 /* 'struct bpf_verifier_env' can be global, but since it's not small,
4315 * allocate/free it every time bpf_check() is called
4317 env
= kzalloc(sizeof(struct bpf_verifier_env
), GFP_KERNEL
);
4321 env
->insn_aux_data
= vzalloc(sizeof(struct bpf_insn_aux_data
) *
4324 if (!env
->insn_aux_data
)
4328 /* grab the mutex to protect few globals used by verifier */
4329 mutex_lock(&bpf_verifier_lock
);
4331 if (attr
->log_level
|| attr
->log_buf
|| attr
->log_size
) {
4332 /* user requested verbose verifier output
4333 * and supplied buffer to store the verification trace
4335 log_level
= attr
->log_level
;
4336 log_ubuf
= (char __user
*) (unsigned long) attr
->log_buf
;
4337 log_size
= attr
->log_size
;
4341 /* log_* values have to be sane */
4342 if (log_size
< 128 || log_size
> UINT_MAX
>> 8 ||
4343 log_level
== 0 || log_ubuf
== NULL
)
4347 log_buf
= vmalloc(log_size
);
4354 env
->strict_alignment
= !!(attr
->prog_flags
& BPF_F_STRICT_ALIGNMENT
);
4355 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
))
4356 env
->strict_alignment
= true;
4358 ret
= replace_map_fd_with_map_ptr(env
);
4360 goto skip_full_check
;
4362 env
->explored_states
= kcalloc(env
->prog
->len
,
4363 sizeof(struct bpf_verifier_state_list
*),
4366 if (!env
->explored_states
)
4367 goto skip_full_check
;
4369 ret
= check_cfg(env
);
4371 goto skip_full_check
;
4373 env
->allow_ptr_leaks
= capable(CAP_SYS_ADMIN
);
4375 ret
= do_check(env
);
4378 while (pop_stack(env
, NULL
) >= 0);
4382 /* program is valid, convert *(u32*)(ctx + off) accesses */
4383 ret
= convert_ctx_accesses(env
);
4386 ret
= fixup_bpf_calls(env
);
4388 if (log_level
&& log_len
>= log_size
- 1) {
4389 BUG_ON(log_len
>= log_size
);
4390 /* verifier log exceeded user supplied buffer */
4392 /* fall through to return what was recorded */
4395 /* copy verifier log back to user space including trailing zero */
4396 if (log_level
&& copy_to_user(log_ubuf
, log_buf
, log_len
+ 1) != 0) {
4401 if (ret
== 0 && env
->used_map_cnt
) {
4402 /* if program passed verifier, update used_maps in bpf_prog_info */
4403 env
->prog
->aux
->used_maps
= kmalloc_array(env
->used_map_cnt
,
4404 sizeof(env
->used_maps
[0]),
4407 if (!env
->prog
->aux
->used_maps
) {
4412 memcpy(env
->prog
->aux
->used_maps
, env
->used_maps
,
4413 sizeof(env
->used_maps
[0]) * env
->used_map_cnt
);
4414 env
->prog
->aux
->used_map_cnt
= env
->used_map_cnt
;
4416 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
4417 * bpf_ld_imm64 instructions
4419 convert_pseudo_ld_imm64(env
);
4425 if (!env
->prog
->aux
->used_maps
)
4426 /* if we didn't copy map pointers into bpf_prog_info, release
4427 * them now. Otherwise free_bpf_prog_info() will release them.
4432 mutex_unlock(&bpf_verifier_lock
);
4433 vfree(env
->insn_aux_data
);
4439 int bpf_analyzer(struct bpf_prog
*prog
, const struct bpf_ext_analyzer_ops
*ops
,
4442 struct bpf_verifier_env
*env
;
4445 env
= kzalloc(sizeof(struct bpf_verifier_env
), GFP_KERNEL
);
4449 env
->insn_aux_data
= vzalloc(sizeof(struct bpf_insn_aux_data
) *
4452 if (!env
->insn_aux_data
)
4455 env
->analyzer_ops
= ops
;
4456 env
->analyzer_priv
= priv
;
4458 /* grab the mutex to protect few globals used by verifier */
4459 mutex_lock(&bpf_verifier_lock
);
4463 env
->strict_alignment
= false;
4464 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
))
4465 env
->strict_alignment
= true;
4467 env
->explored_states
= kcalloc(env
->prog
->len
,
4468 sizeof(struct bpf_verifier_state_list
*),
4471 if (!env
->explored_states
)
4472 goto skip_full_check
;
4474 ret
= check_cfg(env
);
4476 goto skip_full_check
;
4478 env
->allow_ptr_leaks
= capable(CAP_SYS_ADMIN
);
4480 ret
= do_check(env
);
4483 while (pop_stack(env
, NULL
) >= 0);
4486 mutex_unlock(&bpf_verifier_lock
);
4487 vfree(env
->insn_aux_data
);
4492 EXPORT_SYMBOL_GPL(bpf_analyzer
);