]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/bpf/verifier.c
Merge branch 'bpf-verifier-disassembly-improvements'
[mirror_ubuntu-bionic-kernel.git] / kernel / bpf / verifier.c
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/filter.h>
19 #include <net/netlink.h>
20 #include <linux/file.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stringify.h>
23
24 /* bpf_check() is a static code analyzer that walks eBPF program
25 * instruction by instruction and updates register/stack state.
26 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
27 *
28 * The first pass is depth-first-search to check that the program is a DAG.
29 * It rejects the following programs:
30 * - larger than BPF_MAXINSNS insns
31 * - if loop is present (detected via back-edge)
32 * - unreachable insns exist (shouldn't be a forest. program = one function)
33 * - out of bounds or malformed jumps
34 * The second pass is all possible path descent from the 1st insn.
35 * Since it's analyzing all pathes through the program, the length of the
36 * analysis is limited to 64k insn, which may be hit even if total number of
37 * insn is less then 4K, but there are too many branches that change stack/regs.
38 * Number of 'branches to be analyzed' is limited to 1k
39 *
40 * On entry to each instruction, each register has a type, and the instruction
41 * changes the types of the registers depending on instruction semantics.
42 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
43 * copied to R1.
44 *
45 * All registers are 64-bit.
46 * R0 - return register
47 * R1-R5 argument passing registers
48 * R6-R9 callee saved registers
49 * R10 - frame pointer read-only
50 *
51 * At the start of BPF program the register R1 contains a pointer to bpf_context
52 * and has type PTR_TO_CTX.
53 *
54 * Verifier tracks arithmetic operations on pointers in case:
55 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
56 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
57 * 1st insn copies R10 (which has FRAME_PTR) type into R1
58 * and 2nd arithmetic instruction is pattern matched to recognize
59 * that it wants to construct a pointer to some element within stack.
60 * So after 2nd insn, the register R1 has type PTR_TO_STACK
61 * (and -20 constant is saved for further stack bounds checking).
62 * Meaning that this reg is a pointer to stack plus known immediate constant.
63 *
64 * Most of the time the registers have SCALAR_VALUE type, which
65 * means the register has some value, but it's not a valid pointer.
66 * (like pointer plus pointer becomes SCALAR_VALUE type)
67 *
68 * When verifier sees load or store instructions the type of base register
69 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
70 * types recognized by check_mem_access() function.
71 *
72 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
73 * and the range of [ptr, ptr + map's value_size) is accessible.
74 *
75 * registers used to pass values to function calls are checked against
76 * function argument constraints.
77 *
78 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
79 * It means that the register type passed to this function must be
80 * PTR_TO_STACK and it will be used inside the function as
81 * 'pointer to map element key'
82 *
83 * For example the argument constraints for bpf_map_lookup_elem():
84 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
85 * .arg1_type = ARG_CONST_MAP_PTR,
86 * .arg2_type = ARG_PTR_TO_MAP_KEY,
87 *
88 * ret_type says that this function returns 'pointer to map elem value or null'
89 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
90 * 2nd argument should be a pointer to stack, which will be used inside
91 * the helper function as a pointer to map element key.
92 *
93 * On the kernel side the helper function looks like:
94 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
95 * {
96 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
97 * void *key = (void *) (unsigned long) r2;
98 * void *value;
99 *
100 * here kernel can access 'key' and 'map' pointers safely, knowing that
101 * [key, key + map->key_size) bytes are valid and were initialized on
102 * the stack of eBPF program.
103 * }
104 *
105 * Corresponding eBPF program may look like:
106 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
107 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
108 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
109 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
110 * here verifier looks at prototype of map_lookup_elem() and sees:
111 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
112 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
113 *
114 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
115 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
116 * and were initialized prior to this call.
117 * If it's ok, then verifier allows this BPF_CALL insn and looks at
118 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
119 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
120 * returns ether pointer to map value or NULL.
121 *
122 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
123 * insn, the register holding that pointer in the true branch changes state to
124 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
125 * branch. See check_cond_jmp_op().
126 *
127 * After the call R0 is set to return type of the function and registers R1-R5
128 * are set to NOT_INIT to indicate that they are no longer readable.
129 */
130
131 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
132 struct bpf_verifier_stack_elem {
133 /* verifer state is 'st'
134 * before processing instruction 'insn_idx'
135 * and after processing instruction 'prev_insn_idx'
136 */
137 struct bpf_verifier_state st;
138 int insn_idx;
139 int prev_insn_idx;
140 struct bpf_verifier_stack_elem *next;
141 };
142
143 #define BPF_COMPLEXITY_LIMIT_INSNS 131072
144 #define BPF_COMPLEXITY_LIMIT_STACK 1024
145
146 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
147
148 struct bpf_call_arg_meta {
149 struct bpf_map *map_ptr;
150 bool raw_mode;
151 bool pkt_access;
152 int regno;
153 int access_size;
154 };
155
156 /* verbose verifier prints what it's seeing
157 * bpf_check() is called under lock, so no race to access these global vars
158 */
159 static u32 log_level, log_size, log_len;
160 static char *log_buf;
161
162 static DEFINE_MUTEX(bpf_verifier_lock);
163
164 /* log_level controls verbosity level of eBPF verifier.
165 * verbose() is used to dump the verification trace to the log, so the user
166 * can figure out what's wrong with the program
167 */
168 static __printf(1, 2) void verbose(const char *fmt, ...)
169 {
170 va_list args;
171
172 if (log_level == 0 || log_len >= log_size - 1)
173 return;
174
175 va_start(args, fmt);
176 log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args);
177 va_end(args);
178 }
179
180 static bool type_is_pkt_pointer(enum bpf_reg_type type)
181 {
182 return type == PTR_TO_PACKET ||
183 type == PTR_TO_PACKET_META;
184 }
185
186 /* string representation of 'enum bpf_reg_type' */
187 static const char * const reg_type_str[] = {
188 [NOT_INIT] = "?",
189 [SCALAR_VALUE] = "inv",
190 [PTR_TO_CTX] = "ctx",
191 [CONST_PTR_TO_MAP] = "map_ptr",
192 [PTR_TO_MAP_VALUE] = "map_value",
193 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
194 [PTR_TO_STACK] = "fp",
195 [PTR_TO_PACKET] = "pkt",
196 [PTR_TO_PACKET_META] = "pkt_meta",
197 [PTR_TO_PACKET_END] = "pkt_end",
198 };
199
200 #define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x)
201 static const char * const func_id_str[] = {
202 __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN)
203 };
204 #undef __BPF_FUNC_STR_FN
205
206 static const char *func_id_name(int id)
207 {
208 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
209
210 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
211 return func_id_str[id];
212 else
213 return "unknown";
214 }
215
216 static void print_verifier_state(struct bpf_verifier_state *state)
217 {
218 struct bpf_reg_state *reg;
219 enum bpf_reg_type t;
220 int i;
221
222 for (i = 0; i < MAX_BPF_REG; i++) {
223 reg = &state->regs[i];
224 t = reg->type;
225 if (t == NOT_INIT)
226 continue;
227 verbose(" R%d=%s", i, reg_type_str[t]);
228 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
229 tnum_is_const(reg->var_off)) {
230 /* reg->off should be 0 for SCALAR_VALUE */
231 verbose("%lld", reg->var_off.value + reg->off);
232 } else {
233 verbose("(id=%d", reg->id);
234 if (t != SCALAR_VALUE)
235 verbose(",off=%d", reg->off);
236 if (type_is_pkt_pointer(t))
237 verbose(",r=%d", reg->range);
238 else if (t == CONST_PTR_TO_MAP ||
239 t == PTR_TO_MAP_VALUE ||
240 t == PTR_TO_MAP_VALUE_OR_NULL)
241 verbose(",ks=%d,vs=%d",
242 reg->map_ptr->key_size,
243 reg->map_ptr->value_size);
244 if (tnum_is_const(reg->var_off)) {
245 /* Typically an immediate SCALAR_VALUE, but
246 * could be a pointer whose offset is too big
247 * for reg->off
248 */
249 verbose(",imm=%llx", reg->var_off.value);
250 } else {
251 if (reg->smin_value != reg->umin_value &&
252 reg->smin_value != S64_MIN)
253 verbose(",smin_value=%lld",
254 (long long)reg->smin_value);
255 if (reg->smax_value != reg->umax_value &&
256 reg->smax_value != S64_MAX)
257 verbose(",smax_value=%lld",
258 (long long)reg->smax_value);
259 if (reg->umin_value != 0)
260 verbose(",umin_value=%llu",
261 (unsigned long long)reg->umin_value);
262 if (reg->umax_value != U64_MAX)
263 verbose(",umax_value=%llu",
264 (unsigned long long)reg->umax_value);
265 if (!tnum_is_unknown(reg->var_off)) {
266 char tn_buf[48];
267
268 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
269 verbose(",var_off=%s", tn_buf);
270 }
271 }
272 verbose(")");
273 }
274 }
275 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
276 if (state->stack_slot_type[i] == STACK_SPILL)
277 verbose(" fp%d=%s", -MAX_BPF_STACK + i,
278 reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]);
279 }
280 verbose("\n");
281 }
282
283 static const char *const bpf_class_string[] = {
284 [BPF_LD] = "ld",
285 [BPF_LDX] = "ldx",
286 [BPF_ST] = "st",
287 [BPF_STX] = "stx",
288 [BPF_ALU] = "alu",
289 [BPF_JMP] = "jmp",
290 [BPF_RET] = "BUG",
291 [BPF_ALU64] = "alu64",
292 };
293
294 static const char *const bpf_alu_string[16] = {
295 [BPF_ADD >> 4] = "+=",
296 [BPF_SUB >> 4] = "-=",
297 [BPF_MUL >> 4] = "*=",
298 [BPF_DIV >> 4] = "/=",
299 [BPF_OR >> 4] = "|=",
300 [BPF_AND >> 4] = "&=",
301 [BPF_LSH >> 4] = "<<=",
302 [BPF_RSH >> 4] = ">>=",
303 [BPF_NEG >> 4] = "neg",
304 [BPF_MOD >> 4] = "%=",
305 [BPF_XOR >> 4] = "^=",
306 [BPF_MOV >> 4] = "=",
307 [BPF_ARSH >> 4] = "s>>=",
308 [BPF_END >> 4] = "endian",
309 };
310
311 static const char *const bpf_ldst_string[] = {
312 [BPF_W >> 3] = "u32",
313 [BPF_H >> 3] = "u16",
314 [BPF_B >> 3] = "u8",
315 [BPF_DW >> 3] = "u64",
316 };
317
318 static const char *const bpf_jmp_string[16] = {
319 [BPF_JA >> 4] = "jmp",
320 [BPF_JEQ >> 4] = "==",
321 [BPF_JGT >> 4] = ">",
322 [BPF_JLT >> 4] = "<",
323 [BPF_JGE >> 4] = ">=",
324 [BPF_JLE >> 4] = "<=",
325 [BPF_JSET >> 4] = "&",
326 [BPF_JNE >> 4] = "!=",
327 [BPF_JSGT >> 4] = "s>",
328 [BPF_JSLT >> 4] = "s<",
329 [BPF_JSGE >> 4] = "s>=",
330 [BPF_JSLE >> 4] = "s<=",
331 [BPF_CALL >> 4] = "call",
332 [BPF_EXIT >> 4] = "exit",
333 };
334
335 static void print_bpf_end_insn(const struct bpf_verifier_env *env,
336 const struct bpf_insn *insn)
337 {
338 verbose("(%02x) r%d = %s%d r%d\n", insn->code, insn->dst_reg,
339 BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le",
340 insn->imm, insn->dst_reg);
341 }
342
343 static void print_bpf_insn(const struct bpf_verifier_env *env,
344 const struct bpf_insn *insn)
345 {
346 u8 class = BPF_CLASS(insn->code);
347
348 if (class == BPF_ALU || class == BPF_ALU64) {
349 if (BPF_OP(insn->code) == BPF_END) {
350 if (class == BPF_ALU64)
351 verbose("BUG_alu64_%02x\n", insn->code);
352 else
353 print_bpf_end_insn(env, insn);
354 } else if (BPF_OP(insn->code) == BPF_NEG) {
355 verbose("(%02x) r%d = %s-r%d\n",
356 insn->code, insn->dst_reg,
357 class == BPF_ALU ? "(u32) " : "",
358 insn->dst_reg);
359 } else if (BPF_SRC(insn->code) == BPF_X) {
360 verbose("(%02x) %sr%d %s %sr%d\n",
361 insn->code, class == BPF_ALU ? "(u32) " : "",
362 insn->dst_reg,
363 bpf_alu_string[BPF_OP(insn->code) >> 4],
364 class == BPF_ALU ? "(u32) " : "",
365 insn->src_reg);
366 } else {
367 verbose("(%02x) %sr%d %s %s%d\n",
368 insn->code, class == BPF_ALU ? "(u32) " : "",
369 insn->dst_reg,
370 bpf_alu_string[BPF_OP(insn->code) >> 4],
371 class == BPF_ALU ? "(u32) " : "",
372 insn->imm);
373 }
374 } else if (class == BPF_STX) {
375 if (BPF_MODE(insn->code) == BPF_MEM)
376 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
377 insn->code,
378 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
379 insn->dst_reg,
380 insn->off, insn->src_reg);
381 else if (BPF_MODE(insn->code) == BPF_XADD)
382 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
383 insn->code,
384 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
385 insn->dst_reg, insn->off,
386 insn->src_reg);
387 else
388 verbose("BUG_%02x\n", insn->code);
389 } else if (class == BPF_ST) {
390 if (BPF_MODE(insn->code) != BPF_MEM) {
391 verbose("BUG_st_%02x\n", insn->code);
392 return;
393 }
394 verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
395 insn->code,
396 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
397 insn->dst_reg,
398 insn->off, insn->imm);
399 } else if (class == BPF_LDX) {
400 if (BPF_MODE(insn->code) != BPF_MEM) {
401 verbose("BUG_ldx_%02x\n", insn->code);
402 return;
403 }
404 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
405 insn->code, insn->dst_reg,
406 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
407 insn->src_reg, insn->off);
408 } else if (class == BPF_LD) {
409 if (BPF_MODE(insn->code) == BPF_ABS) {
410 verbose("(%02x) r0 = *(%s *)skb[%d]\n",
411 insn->code,
412 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
413 insn->imm);
414 } else if (BPF_MODE(insn->code) == BPF_IND) {
415 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
416 insn->code,
417 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
418 insn->src_reg, insn->imm);
419 } else if (BPF_MODE(insn->code) == BPF_IMM &&
420 BPF_SIZE(insn->code) == BPF_DW) {
421 /* At this point, we already made sure that the second
422 * part of the ldimm64 insn is accessible.
423 */
424 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
425 bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
426
427 if (map_ptr && !env->allow_ptr_leaks)
428 imm = 0;
429
430 verbose("(%02x) r%d = 0x%llx\n", insn->code,
431 insn->dst_reg, (unsigned long long)imm);
432 } else {
433 verbose("BUG_ld_%02x\n", insn->code);
434 return;
435 }
436 } else if (class == BPF_JMP) {
437 u8 opcode = BPF_OP(insn->code);
438
439 if (opcode == BPF_CALL) {
440 verbose("(%02x) call %s#%d\n", insn->code,
441 func_id_name(insn->imm), insn->imm);
442 } else if (insn->code == (BPF_JMP | BPF_JA)) {
443 verbose("(%02x) goto pc%+d\n",
444 insn->code, insn->off);
445 } else if (insn->code == (BPF_JMP | BPF_EXIT)) {
446 verbose("(%02x) exit\n", insn->code);
447 } else if (BPF_SRC(insn->code) == BPF_X) {
448 verbose("(%02x) if r%d %s r%d goto pc%+d\n",
449 insn->code, insn->dst_reg,
450 bpf_jmp_string[BPF_OP(insn->code) >> 4],
451 insn->src_reg, insn->off);
452 } else {
453 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
454 insn->code, insn->dst_reg,
455 bpf_jmp_string[BPF_OP(insn->code) >> 4],
456 insn->imm, insn->off);
457 }
458 } else {
459 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]);
460 }
461 }
462
463 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx)
464 {
465 struct bpf_verifier_stack_elem *elem;
466 int insn_idx;
467
468 if (env->head == NULL)
469 return -1;
470
471 memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state));
472 insn_idx = env->head->insn_idx;
473 if (prev_insn_idx)
474 *prev_insn_idx = env->head->prev_insn_idx;
475 elem = env->head->next;
476 kfree(env->head);
477 env->head = elem;
478 env->stack_size--;
479 return insn_idx;
480 }
481
482 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
483 int insn_idx, int prev_insn_idx)
484 {
485 struct bpf_verifier_stack_elem *elem;
486
487 elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
488 if (!elem)
489 goto err;
490
491 memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state));
492 elem->insn_idx = insn_idx;
493 elem->prev_insn_idx = prev_insn_idx;
494 elem->next = env->head;
495 env->head = elem;
496 env->stack_size++;
497 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
498 verbose("BPF program is too complex\n");
499 goto err;
500 }
501 return &elem->st;
502 err:
503 /* pop all elements and return */
504 while (pop_stack(env, NULL) >= 0);
505 return NULL;
506 }
507
508 #define CALLER_SAVED_REGS 6
509 static const int caller_saved[CALLER_SAVED_REGS] = {
510 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
511 };
512
513 static void __mark_reg_not_init(struct bpf_reg_state *reg);
514
515 /* Mark the unknown part of a register (variable offset or scalar value) as
516 * known to have the value @imm.
517 */
518 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
519 {
520 reg->id = 0;
521 reg->var_off = tnum_const(imm);
522 reg->smin_value = (s64)imm;
523 reg->smax_value = (s64)imm;
524 reg->umin_value = imm;
525 reg->umax_value = imm;
526 }
527
528 /* Mark the 'variable offset' part of a register as zero. This should be
529 * used only on registers holding a pointer type.
530 */
531 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
532 {
533 __mark_reg_known(reg, 0);
534 }
535
536 static void mark_reg_known_zero(struct bpf_reg_state *regs, u32 regno)
537 {
538 if (WARN_ON(regno >= MAX_BPF_REG)) {
539 verbose("mark_reg_known_zero(regs, %u)\n", regno);
540 /* Something bad happened, let's kill all regs */
541 for (regno = 0; regno < MAX_BPF_REG; regno++)
542 __mark_reg_not_init(regs + regno);
543 return;
544 }
545 __mark_reg_known_zero(regs + regno);
546 }
547
548 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
549 {
550 return type_is_pkt_pointer(reg->type);
551 }
552
553 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
554 {
555 return reg_is_pkt_pointer(reg) ||
556 reg->type == PTR_TO_PACKET_END;
557 }
558
559 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
560 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
561 enum bpf_reg_type which)
562 {
563 /* The register can already have a range from prior markings.
564 * This is fine as long as it hasn't been advanced from its
565 * origin.
566 */
567 return reg->type == which &&
568 reg->id == 0 &&
569 reg->off == 0 &&
570 tnum_equals_const(reg->var_off, 0);
571 }
572
573 /* Attempts to improve min/max values based on var_off information */
574 static void __update_reg_bounds(struct bpf_reg_state *reg)
575 {
576 /* min signed is max(sign bit) | min(other bits) */
577 reg->smin_value = max_t(s64, reg->smin_value,
578 reg->var_off.value | (reg->var_off.mask & S64_MIN));
579 /* max signed is min(sign bit) | max(other bits) */
580 reg->smax_value = min_t(s64, reg->smax_value,
581 reg->var_off.value | (reg->var_off.mask & S64_MAX));
582 reg->umin_value = max(reg->umin_value, reg->var_off.value);
583 reg->umax_value = min(reg->umax_value,
584 reg->var_off.value | reg->var_off.mask);
585 }
586
587 /* Uses signed min/max values to inform unsigned, and vice-versa */
588 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
589 {
590 /* Learn sign from signed bounds.
591 * If we cannot cross the sign boundary, then signed and unsigned bounds
592 * are the same, so combine. This works even in the negative case, e.g.
593 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
594 */
595 if (reg->smin_value >= 0 || reg->smax_value < 0) {
596 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
597 reg->umin_value);
598 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
599 reg->umax_value);
600 return;
601 }
602 /* Learn sign from unsigned bounds. Signed bounds cross the sign
603 * boundary, so we must be careful.
604 */
605 if ((s64)reg->umax_value >= 0) {
606 /* Positive. We can't learn anything from the smin, but smax
607 * is positive, hence safe.
608 */
609 reg->smin_value = reg->umin_value;
610 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
611 reg->umax_value);
612 } else if ((s64)reg->umin_value < 0) {
613 /* Negative. We can't learn anything from the smax, but smin
614 * is negative, hence safe.
615 */
616 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
617 reg->umin_value);
618 reg->smax_value = reg->umax_value;
619 }
620 }
621
622 /* Attempts to improve var_off based on unsigned min/max information */
623 static void __reg_bound_offset(struct bpf_reg_state *reg)
624 {
625 reg->var_off = tnum_intersect(reg->var_off,
626 tnum_range(reg->umin_value,
627 reg->umax_value));
628 }
629
630 /* Reset the min/max bounds of a register */
631 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
632 {
633 reg->smin_value = S64_MIN;
634 reg->smax_value = S64_MAX;
635 reg->umin_value = 0;
636 reg->umax_value = U64_MAX;
637 }
638
639 /* Mark a register as having a completely unknown (scalar) value. */
640 static void __mark_reg_unknown(struct bpf_reg_state *reg)
641 {
642 reg->type = SCALAR_VALUE;
643 reg->id = 0;
644 reg->off = 0;
645 reg->var_off = tnum_unknown;
646 __mark_reg_unbounded(reg);
647 }
648
649 static void mark_reg_unknown(struct bpf_reg_state *regs, u32 regno)
650 {
651 if (WARN_ON(regno >= MAX_BPF_REG)) {
652 verbose("mark_reg_unknown(regs, %u)\n", regno);
653 /* Something bad happened, let's kill all regs */
654 for (regno = 0; regno < MAX_BPF_REG; regno++)
655 __mark_reg_not_init(regs + regno);
656 return;
657 }
658 __mark_reg_unknown(regs + regno);
659 }
660
661 static void __mark_reg_not_init(struct bpf_reg_state *reg)
662 {
663 __mark_reg_unknown(reg);
664 reg->type = NOT_INIT;
665 }
666
667 static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno)
668 {
669 if (WARN_ON(regno >= MAX_BPF_REG)) {
670 verbose("mark_reg_not_init(regs, %u)\n", regno);
671 /* Something bad happened, let's kill all regs */
672 for (regno = 0; regno < MAX_BPF_REG; regno++)
673 __mark_reg_not_init(regs + regno);
674 return;
675 }
676 __mark_reg_not_init(regs + regno);
677 }
678
679 static void init_reg_state(struct bpf_reg_state *regs)
680 {
681 int i;
682
683 for (i = 0; i < MAX_BPF_REG; i++) {
684 mark_reg_not_init(regs, i);
685 regs[i].live = REG_LIVE_NONE;
686 }
687
688 /* frame pointer */
689 regs[BPF_REG_FP].type = PTR_TO_STACK;
690 mark_reg_known_zero(regs, BPF_REG_FP);
691
692 /* 1st arg to a function */
693 regs[BPF_REG_1].type = PTR_TO_CTX;
694 mark_reg_known_zero(regs, BPF_REG_1);
695 }
696
697 enum reg_arg_type {
698 SRC_OP, /* register is used as source operand */
699 DST_OP, /* register is used as destination operand */
700 DST_OP_NO_MARK /* same as above, check only, don't mark */
701 };
702
703 static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
704 {
705 struct bpf_verifier_state *parent = state->parent;
706
707 while (parent) {
708 /* if read wasn't screened by an earlier write ... */
709 if (state->regs[regno].live & REG_LIVE_WRITTEN)
710 break;
711 /* ... then we depend on parent's value */
712 parent->regs[regno].live |= REG_LIVE_READ;
713 state = parent;
714 parent = state->parent;
715 }
716 }
717
718 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
719 enum reg_arg_type t)
720 {
721 struct bpf_reg_state *regs = env->cur_state.regs;
722
723 if (regno >= MAX_BPF_REG) {
724 verbose("R%d is invalid\n", regno);
725 return -EINVAL;
726 }
727
728 if (t == SRC_OP) {
729 /* check whether register used as source operand can be read */
730 if (regs[regno].type == NOT_INIT) {
731 verbose("R%d !read_ok\n", regno);
732 return -EACCES;
733 }
734 mark_reg_read(&env->cur_state, regno);
735 } else {
736 /* check whether register used as dest operand can be written to */
737 if (regno == BPF_REG_FP) {
738 verbose("frame pointer is read only\n");
739 return -EACCES;
740 }
741 regs[regno].live |= REG_LIVE_WRITTEN;
742 if (t == DST_OP)
743 mark_reg_unknown(regs, regno);
744 }
745 return 0;
746 }
747
748 static bool is_spillable_regtype(enum bpf_reg_type type)
749 {
750 switch (type) {
751 case PTR_TO_MAP_VALUE:
752 case PTR_TO_MAP_VALUE_OR_NULL:
753 case PTR_TO_STACK:
754 case PTR_TO_CTX:
755 case PTR_TO_PACKET:
756 case PTR_TO_PACKET_META:
757 case PTR_TO_PACKET_END:
758 case CONST_PTR_TO_MAP:
759 return true;
760 default:
761 return false;
762 }
763 }
764
765 /* check_stack_read/write functions track spill/fill of registers,
766 * stack boundary and alignment are checked in check_mem_access()
767 */
768 static int check_stack_write(struct bpf_verifier_state *state, int off,
769 int size, int value_regno)
770 {
771 int i, spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE;
772 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
773 * so it's aligned access and [off, off + size) are within stack limits
774 */
775
776 if (value_regno >= 0 &&
777 is_spillable_regtype(state->regs[value_regno].type)) {
778
779 /* register containing pointer is being spilled into stack */
780 if (size != BPF_REG_SIZE) {
781 verbose("invalid size of register spill\n");
782 return -EACCES;
783 }
784
785 /* save register state */
786 state->spilled_regs[spi] = state->regs[value_regno];
787 state->spilled_regs[spi].live |= REG_LIVE_WRITTEN;
788
789 for (i = 0; i < BPF_REG_SIZE; i++)
790 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
791 } else {
792 /* regular write of data into stack */
793 state->spilled_regs[spi] = (struct bpf_reg_state) {};
794
795 for (i = 0; i < size; i++)
796 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
797 }
798 return 0;
799 }
800
801 static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
802 {
803 struct bpf_verifier_state *parent = state->parent;
804
805 while (parent) {
806 /* if read wasn't screened by an earlier write ... */
807 if (state->spilled_regs[slot].live & REG_LIVE_WRITTEN)
808 break;
809 /* ... then we depend on parent's value */
810 parent->spilled_regs[slot].live |= REG_LIVE_READ;
811 state = parent;
812 parent = state->parent;
813 }
814 }
815
816 static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
817 int value_regno)
818 {
819 u8 *slot_type;
820 int i, spi;
821
822 slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
823
824 if (slot_type[0] == STACK_SPILL) {
825 if (size != BPF_REG_SIZE) {
826 verbose("invalid size of register spill\n");
827 return -EACCES;
828 }
829 for (i = 1; i < BPF_REG_SIZE; i++) {
830 if (slot_type[i] != STACK_SPILL) {
831 verbose("corrupted spill memory\n");
832 return -EACCES;
833 }
834 }
835
836 spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE;
837
838 if (value_regno >= 0) {
839 /* restore register state from stack */
840 state->regs[value_regno] = state->spilled_regs[spi];
841 mark_stack_slot_read(state, spi);
842 }
843 return 0;
844 } else {
845 for (i = 0; i < size; i++) {
846 if (slot_type[i] != STACK_MISC) {
847 verbose("invalid read from stack off %d+%d size %d\n",
848 off, i, size);
849 return -EACCES;
850 }
851 }
852 if (value_regno >= 0)
853 /* have read misc data from the stack */
854 mark_reg_unknown(state->regs, value_regno);
855 return 0;
856 }
857 }
858
859 /* check read/write into map element returned by bpf_map_lookup_elem() */
860 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
861 int size)
862 {
863 struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
864
865 if (off < 0 || size <= 0 || off + size > map->value_size) {
866 verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
867 map->value_size, off, size);
868 return -EACCES;
869 }
870 return 0;
871 }
872
873 /* check read/write into a map element with possible variable offset */
874 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
875 int off, int size)
876 {
877 struct bpf_verifier_state *state = &env->cur_state;
878 struct bpf_reg_state *reg = &state->regs[regno];
879 int err;
880
881 /* We may have adjusted the register to this map value, so we
882 * need to try adding each of min_value and max_value to off
883 * to make sure our theoretical access will be safe.
884 */
885 if (log_level)
886 print_verifier_state(state);
887 /* The minimum value is only important with signed
888 * comparisons where we can't assume the floor of a
889 * value is 0. If we are using signed variables for our
890 * index'es we need to make sure that whatever we use
891 * will have a set floor within our range.
892 */
893 if (reg->smin_value < 0) {
894 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
895 regno);
896 return -EACCES;
897 }
898 err = __check_map_access(env, regno, reg->smin_value + off, size);
899 if (err) {
900 verbose("R%d min value is outside of the array range\n", regno);
901 return err;
902 }
903
904 /* If we haven't set a max value then we need to bail since we can't be
905 * sure we won't do bad things.
906 * If reg->umax_value + off could overflow, treat that as unbounded too.
907 */
908 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
909 verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n",
910 regno);
911 return -EACCES;
912 }
913 err = __check_map_access(env, regno, reg->umax_value + off, size);
914 if (err)
915 verbose("R%d max value is outside of the array range\n", regno);
916 return err;
917 }
918
919 #define MAX_PACKET_OFF 0xffff
920
921 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
922 const struct bpf_call_arg_meta *meta,
923 enum bpf_access_type t)
924 {
925 switch (env->prog->type) {
926 case BPF_PROG_TYPE_LWT_IN:
927 case BPF_PROG_TYPE_LWT_OUT:
928 /* dst_input() and dst_output() can't write for now */
929 if (t == BPF_WRITE)
930 return false;
931 /* fallthrough */
932 case BPF_PROG_TYPE_SCHED_CLS:
933 case BPF_PROG_TYPE_SCHED_ACT:
934 case BPF_PROG_TYPE_XDP:
935 case BPF_PROG_TYPE_LWT_XMIT:
936 case BPF_PROG_TYPE_SK_SKB:
937 if (meta)
938 return meta->pkt_access;
939
940 env->seen_direct_write = true;
941 return true;
942 default:
943 return false;
944 }
945 }
946
947 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
948 int off, int size)
949 {
950 struct bpf_reg_state *regs = env->cur_state.regs;
951 struct bpf_reg_state *reg = &regs[regno];
952
953 if (off < 0 || size <= 0 || (u64)off + size > reg->range) {
954 verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
955 off, size, regno, reg->id, reg->off, reg->range);
956 return -EACCES;
957 }
958 return 0;
959 }
960
961 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
962 int size)
963 {
964 struct bpf_reg_state *regs = env->cur_state.regs;
965 struct bpf_reg_state *reg = &regs[regno];
966 int err;
967
968 /* We may have added a variable offset to the packet pointer; but any
969 * reg->range we have comes after that. We are only checking the fixed
970 * offset.
971 */
972
973 /* We don't allow negative numbers, because we aren't tracking enough
974 * detail to prove they're safe.
975 */
976 if (reg->smin_value < 0) {
977 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
978 regno);
979 return -EACCES;
980 }
981 err = __check_packet_access(env, regno, off, size);
982 if (err) {
983 verbose("R%d offset is outside of the packet\n", regno);
984 return err;
985 }
986 return err;
987 }
988
989 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */
990 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
991 enum bpf_access_type t, enum bpf_reg_type *reg_type)
992 {
993 struct bpf_insn_access_aux info = {
994 .reg_type = *reg_type,
995 };
996
997 /* for analyzer ctx accesses are already validated and converted */
998 if (env->analyzer_ops)
999 return 0;
1000
1001 if (env->prog->aux->ops->is_valid_access &&
1002 env->prog->aux->ops->is_valid_access(off, size, t, &info)) {
1003 /* A non zero info.ctx_field_size indicates that this field is a
1004 * candidate for later verifier transformation to load the whole
1005 * field and then apply a mask when accessed with a narrower
1006 * access than actual ctx access size. A zero info.ctx_field_size
1007 * will only allow for whole field access and rejects any other
1008 * type of narrower access.
1009 */
1010 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
1011 *reg_type = info.reg_type;
1012
1013 /* remember the offset of last byte accessed in ctx */
1014 if (env->prog->aux->max_ctx_offset < off + size)
1015 env->prog->aux->max_ctx_offset = off + size;
1016 return 0;
1017 }
1018
1019 verbose("invalid bpf_context access off=%d size=%d\n", off, size);
1020 return -EACCES;
1021 }
1022
1023 static bool __is_pointer_value(bool allow_ptr_leaks,
1024 const struct bpf_reg_state *reg)
1025 {
1026 if (allow_ptr_leaks)
1027 return false;
1028
1029 return reg->type != SCALAR_VALUE;
1030 }
1031
1032 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1033 {
1034 return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
1035 }
1036
1037 static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
1038 int off, int size, bool strict)
1039 {
1040 struct tnum reg_off;
1041 int ip_align;
1042
1043 /* Byte size accesses are always allowed. */
1044 if (!strict || size == 1)
1045 return 0;
1046
1047 /* For platforms that do not have a Kconfig enabling
1048 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
1049 * NET_IP_ALIGN is universally set to '2'. And on platforms
1050 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
1051 * to this code only in strict mode where we want to emulate
1052 * the NET_IP_ALIGN==2 checking. Therefore use an
1053 * unconditional IP align value of '2'.
1054 */
1055 ip_align = 2;
1056
1057 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
1058 if (!tnum_is_aligned(reg_off, size)) {
1059 char tn_buf[48];
1060
1061 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1062 verbose("misaligned packet access off %d+%s+%d+%d size %d\n",
1063 ip_align, tn_buf, reg->off, off, size);
1064 return -EACCES;
1065 }
1066
1067 return 0;
1068 }
1069
1070 static int check_generic_ptr_alignment(const struct bpf_reg_state *reg,
1071 const char *pointer_desc,
1072 int off, int size, bool strict)
1073 {
1074 struct tnum reg_off;
1075
1076 /* Byte size accesses are always allowed. */
1077 if (!strict || size == 1)
1078 return 0;
1079
1080 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
1081 if (!tnum_is_aligned(reg_off, size)) {
1082 char tn_buf[48];
1083
1084 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1085 verbose("misaligned %saccess off %s+%d+%d size %d\n",
1086 pointer_desc, tn_buf, reg->off, off, size);
1087 return -EACCES;
1088 }
1089
1090 return 0;
1091 }
1092
1093 static int check_ptr_alignment(struct bpf_verifier_env *env,
1094 const struct bpf_reg_state *reg,
1095 int off, int size)
1096 {
1097 bool strict = env->strict_alignment;
1098 const char *pointer_desc = "";
1099
1100 switch (reg->type) {
1101 case PTR_TO_PACKET:
1102 case PTR_TO_PACKET_META:
1103 /* Special case, because of NET_IP_ALIGN. Given metadata sits
1104 * right in front, treat it the very same way.
1105 */
1106 return check_pkt_ptr_alignment(reg, off, size, strict);
1107 case PTR_TO_MAP_VALUE:
1108 pointer_desc = "value ";
1109 break;
1110 case PTR_TO_CTX:
1111 pointer_desc = "context ";
1112 break;
1113 case PTR_TO_STACK:
1114 pointer_desc = "stack ";
1115 break;
1116 default:
1117 break;
1118 }
1119 return check_generic_ptr_alignment(reg, pointer_desc, off, size, strict);
1120 }
1121
1122 /* check whether memory at (regno + off) is accessible for t = (read | write)
1123 * if t==write, value_regno is a register which value is stored into memory
1124 * if t==read, value_regno is a register which will receive the value from memory
1125 * if t==write && value_regno==-1, some unknown value is stored into memory
1126 * if t==read && value_regno==-1, don't care what we read from memory
1127 */
1128 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
1129 int bpf_size, enum bpf_access_type t,
1130 int value_regno)
1131 {
1132 struct bpf_verifier_state *state = &env->cur_state;
1133 struct bpf_reg_state *reg = &state->regs[regno];
1134 int size, err = 0;
1135
1136 size = bpf_size_to_bytes(bpf_size);
1137 if (size < 0)
1138 return size;
1139
1140 /* alignment checks will add in reg->off themselves */
1141 err = check_ptr_alignment(env, reg, off, size);
1142 if (err)
1143 return err;
1144
1145 /* for access checks, reg->off is just part of off */
1146 off += reg->off;
1147
1148 if (reg->type == PTR_TO_MAP_VALUE) {
1149 if (t == BPF_WRITE && value_regno >= 0 &&
1150 is_pointer_value(env, value_regno)) {
1151 verbose("R%d leaks addr into map\n", value_regno);
1152 return -EACCES;
1153 }
1154
1155 err = check_map_access(env, regno, off, size);
1156 if (!err && t == BPF_READ && value_regno >= 0)
1157 mark_reg_unknown(state->regs, value_regno);
1158
1159 } else if (reg->type == PTR_TO_CTX) {
1160 enum bpf_reg_type reg_type = SCALAR_VALUE;
1161
1162 if (t == BPF_WRITE && value_regno >= 0 &&
1163 is_pointer_value(env, value_regno)) {
1164 verbose("R%d leaks addr into ctx\n", value_regno);
1165 return -EACCES;
1166 }
1167 /* ctx accesses must be at a fixed offset, so that we can
1168 * determine what type of data were returned.
1169 */
1170 if (!tnum_is_const(reg->var_off)) {
1171 char tn_buf[48];
1172
1173 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1174 verbose("variable ctx access var_off=%s off=%d size=%d",
1175 tn_buf, off, size);
1176 return -EACCES;
1177 }
1178 off += reg->var_off.value;
1179 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1180 if (!err && t == BPF_READ && value_regno >= 0) {
1181 /* ctx access returns either a scalar, or a
1182 * PTR_TO_PACKET[_META,_END]. In the latter
1183 * case, we know the offset is zero.
1184 */
1185 if (reg_type == SCALAR_VALUE)
1186 mark_reg_unknown(state->regs, value_regno);
1187 else
1188 mark_reg_known_zero(state->regs, value_regno);
1189 state->regs[value_regno].id = 0;
1190 state->regs[value_regno].off = 0;
1191 state->regs[value_regno].range = 0;
1192 state->regs[value_regno].type = reg_type;
1193 }
1194
1195 } else if (reg->type == PTR_TO_STACK) {
1196 /* stack accesses must be at a fixed offset, so that we can
1197 * determine what type of data were returned.
1198 * See check_stack_read().
1199 */
1200 if (!tnum_is_const(reg->var_off)) {
1201 char tn_buf[48];
1202
1203 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1204 verbose("variable stack access var_off=%s off=%d size=%d",
1205 tn_buf, off, size);
1206 return -EACCES;
1207 }
1208 off += reg->var_off.value;
1209 if (off >= 0 || off < -MAX_BPF_STACK) {
1210 verbose("invalid stack off=%d size=%d\n", off, size);
1211 return -EACCES;
1212 }
1213
1214 if (env->prog->aux->stack_depth < -off)
1215 env->prog->aux->stack_depth = -off;
1216
1217 if (t == BPF_WRITE) {
1218 if (!env->allow_ptr_leaks &&
1219 state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
1220 size != BPF_REG_SIZE) {
1221 verbose("attempt to corrupt spilled pointer on stack\n");
1222 return -EACCES;
1223 }
1224 err = check_stack_write(state, off, size, value_regno);
1225 } else {
1226 err = check_stack_read(state, off, size, value_regno);
1227 }
1228 } else if (reg_is_pkt_pointer(reg)) {
1229 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
1230 verbose("cannot write into packet\n");
1231 return -EACCES;
1232 }
1233 if (t == BPF_WRITE && value_regno >= 0 &&
1234 is_pointer_value(env, value_regno)) {
1235 verbose("R%d leaks addr into packet\n", value_regno);
1236 return -EACCES;
1237 }
1238 err = check_packet_access(env, regno, off, size);
1239 if (!err && t == BPF_READ && value_regno >= 0)
1240 mark_reg_unknown(state->regs, value_regno);
1241 } else {
1242 verbose("R%d invalid mem access '%s'\n",
1243 regno, reg_type_str[reg->type]);
1244 return -EACCES;
1245 }
1246
1247 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1248 state->regs[value_regno].type == SCALAR_VALUE) {
1249 /* b/h/w load zero-extends, mark upper bits as known 0 */
1250 state->regs[value_regno].var_off = tnum_cast(
1251 state->regs[value_regno].var_off, size);
1252 __update_reg_bounds(&state->regs[value_regno]);
1253 }
1254 return err;
1255 }
1256
1257 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
1258 {
1259 int err;
1260
1261 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
1262 insn->imm != 0) {
1263 verbose("BPF_XADD uses reserved fields\n");
1264 return -EINVAL;
1265 }
1266
1267 /* check src1 operand */
1268 err = check_reg_arg(env, insn->src_reg, SRC_OP);
1269 if (err)
1270 return err;
1271
1272 /* check src2 operand */
1273 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
1274 if (err)
1275 return err;
1276
1277 if (is_pointer_value(env, insn->src_reg)) {
1278 verbose("R%d leaks addr into mem\n", insn->src_reg);
1279 return -EACCES;
1280 }
1281
1282 /* check whether atomic_add can read the memory */
1283 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1284 BPF_SIZE(insn->code), BPF_READ, -1);
1285 if (err)
1286 return err;
1287
1288 /* check whether atomic_add can write into the same memory */
1289 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1290 BPF_SIZE(insn->code), BPF_WRITE, -1);
1291 }
1292
1293 /* Does this register contain a constant zero? */
1294 static bool register_is_null(struct bpf_reg_state reg)
1295 {
1296 return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0);
1297 }
1298
1299 /* when register 'regno' is passed into function that will read 'access_size'
1300 * bytes from that pointer, make sure that it's within stack boundary
1301 * and all elements of stack are initialized.
1302 * Unlike most pointer bounds-checking functions, this one doesn't take an
1303 * 'off' argument, so it has to add in reg->off itself.
1304 */
1305 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1306 int access_size, bool zero_size_allowed,
1307 struct bpf_call_arg_meta *meta)
1308 {
1309 struct bpf_verifier_state *state = &env->cur_state;
1310 struct bpf_reg_state *regs = state->regs;
1311 int off, i;
1312
1313 if (regs[regno].type != PTR_TO_STACK) {
1314 /* Allow zero-byte read from NULL, regardless of pointer type */
1315 if (zero_size_allowed && access_size == 0 &&
1316 register_is_null(regs[regno]))
1317 return 0;
1318
1319 verbose("R%d type=%s expected=%s\n", regno,
1320 reg_type_str[regs[regno].type],
1321 reg_type_str[PTR_TO_STACK]);
1322 return -EACCES;
1323 }
1324
1325 /* Only allow fixed-offset stack reads */
1326 if (!tnum_is_const(regs[regno].var_off)) {
1327 char tn_buf[48];
1328
1329 tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
1330 verbose("invalid variable stack read R%d var_off=%s\n",
1331 regno, tn_buf);
1332 }
1333 off = regs[regno].off + regs[regno].var_off.value;
1334 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
1335 access_size <= 0) {
1336 verbose("invalid stack type R%d off=%d access_size=%d\n",
1337 regno, off, access_size);
1338 return -EACCES;
1339 }
1340
1341 if (env->prog->aux->stack_depth < -off)
1342 env->prog->aux->stack_depth = -off;
1343
1344 if (meta && meta->raw_mode) {
1345 meta->access_size = access_size;
1346 meta->regno = regno;
1347 return 0;
1348 }
1349
1350 for (i = 0; i < access_size; i++) {
1351 if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
1352 verbose("invalid indirect read from stack off %d+%d size %d\n",
1353 off, i, access_size);
1354 return -EACCES;
1355 }
1356 }
1357 return 0;
1358 }
1359
1360 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1361 int access_size, bool zero_size_allowed,
1362 struct bpf_call_arg_meta *meta)
1363 {
1364 struct bpf_reg_state *regs = env->cur_state.regs, *reg = &regs[regno];
1365
1366 switch (reg->type) {
1367 case PTR_TO_PACKET:
1368 case PTR_TO_PACKET_META:
1369 return check_packet_access(env, regno, reg->off, access_size);
1370 case PTR_TO_MAP_VALUE:
1371 return check_map_access(env, regno, reg->off, access_size);
1372 default: /* scalar_value|ptr_to_stack or invalid ptr */
1373 return check_stack_boundary(env, regno, access_size,
1374 zero_size_allowed, meta);
1375 }
1376 }
1377
1378 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1379 enum bpf_arg_type arg_type,
1380 struct bpf_call_arg_meta *meta)
1381 {
1382 struct bpf_reg_state *regs = env->cur_state.regs, *reg = &regs[regno];
1383 enum bpf_reg_type expected_type, type = reg->type;
1384 int err = 0;
1385
1386 if (arg_type == ARG_DONTCARE)
1387 return 0;
1388
1389 err = check_reg_arg(env, regno, SRC_OP);
1390 if (err)
1391 return err;
1392
1393 if (arg_type == ARG_ANYTHING) {
1394 if (is_pointer_value(env, regno)) {
1395 verbose("R%d leaks addr into helper function\n", regno);
1396 return -EACCES;
1397 }
1398 return 0;
1399 }
1400
1401 if (type_is_pkt_pointer(type) &&
1402 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
1403 verbose("helper access to the packet is not allowed\n");
1404 return -EACCES;
1405 }
1406
1407 if (arg_type == ARG_PTR_TO_MAP_KEY ||
1408 arg_type == ARG_PTR_TO_MAP_VALUE) {
1409 expected_type = PTR_TO_STACK;
1410 if (!type_is_pkt_pointer(type) &&
1411 type != expected_type)
1412 goto err_type;
1413 } else if (arg_type == ARG_CONST_SIZE ||
1414 arg_type == ARG_CONST_SIZE_OR_ZERO) {
1415 expected_type = SCALAR_VALUE;
1416 if (type != expected_type)
1417 goto err_type;
1418 } else if (arg_type == ARG_CONST_MAP_PTR) {
1419 expected_type = CONST_PTR_TO_MAP;
1420 if (type != expected_type)
1421 goto err_type;
1422 } else if (arg_type == ARG_PTR_TO_CTX) {
1423 expected_type = PTR_TO_CTX;
1424 if (type != expected_type)
1425 goto err_type;
1426 } else if (arg_type == ARG_PTR_TO_MEM ||
1427 arg_type == ARG_PTR_TO_UNINIT_MEM) {
1428 expected_type = PTR_TO_STACK;
1429 /* One exception here. In case function allows for NULL to be
1430 * passed in as argument, it's a SCALAR_VALUE type. Final test
1431 * happens during stack boundary checking.
1432 */
1433 if (register_is_null(*reg))
1434 /* final test in check_stack_boundary() */;
1435 else if (!type_is_pkt_pointer(type) &&
1436 type != PTR_TO_MAP_VALUE &&
1437 type != expected_type)
1438 goto err_type;
1439 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
1440 } else {
1441 verbose("unsupported arg_type %d\n", arg_type);
1442 return -EFAULT;
1443 }
1444
1445 if (arg_type == ARG_CONST_MAP_PTR) {
1446 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
1447 meta->map_ptr = reg->map_ptr;
1448 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
1449 /* bpf_map_xxx(..., map_ptr, ..., key) call:
1450 * check that [key, key + map->key_size) are within
1451 * stack limits and initialized
1452 */
1453 if (!meta->map_ptr) {
1454 /* in function declaration map_ptr must come before
1455 * map_key, so that it's verified and known before
1456 * we have to check map_key here. Otherwise it means
1457 * that kernel subsystem misconfigured verifier
1458 */
1459 verbose("invalid map_ptr to access map->key\n");
1460 return -EACCES;
1461 }
1462 if (type_is_pkt_pointer(type))
1463 err = check_packet_access(env, regno, reg->off,
1464 meta->map_ptr->key_size);
1465 else
1466 err = check_stack_boundary(env, regno,
1467 meta->map_ptr->key_size,
1468 false, NULL);
1469 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
1470 /* bpf_map_xxx(..., map_ptr, ..., value) call:
1471 * check [value, value + map->value_size) validity
1472 */
1473 if (!meta->map_ptr) {
1474 /* kernel subsystem misconfigured verifier */
1475 verbose("invalid map_ptr to access map->value\n");
1476 return -EACCES;
1477 }
1478 if (type_is_pkt_pointer(type))
1479 err = check_packet_access(env, regno, reg->off,
1480 meta->map_ptr->value_size);
1481 else
1482 err = check_stack_boundary(env, regno,
1483 meta->map_ptr->value_size,
1484 false, NULL);
1485 } else if (arg_type == ARG_CONST_SIZE ||
1486 arg_type == ARG_CONST_SIZE_OR_ZERO) {
1487 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
1488
1489 /* bpf_xxx(..., buf, len) call will access 'len' bytes
1490 * from stack pointer 'buf'. Check it
1491 * note: regno == len, regno - 1 == buf
1492 */
1493 if (regno == 0) {
1494 /* kernel subsystem misconfigured verifier */
1495 verbose("ARG_CONST_SIZE cannot be first argument\n");
1496 return -EACCES;
1497 }
1498
1499 /* The register is SCALAR_VALUE; the access check
1500 * happens using its boundaries.
1501 */
1502
1503 if (!tnum_is_const(reg->var_off))
1504 /* For unprivileged variable accesses, disable raw
1505 * mode so that the program is required to
1506 * initialize all the memory that the helper could
1507 * just partially fill up.
1508 */
1509 meta = NULL;
1510
1511 if (reg->smin_value < 0) {
1512 verbose("R%d min value is negative, either use unsigned or 'var &= const'\n",
1513 regno);
1514 return -EACCES;
1515 }
1516
1517 if (reg->umin_value == 0) {
1518 err = check_helper_mem_access(env, regno - 1, 0,
1519 zero_size_allowed,
1520 meta);
1521 if (err)
1522 return err;
1523 }
1524
1525 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
1526 verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
1527 regno);
1528 return -EACCES;
1529 }
1530 err = check_helper_mem_access(env, regno - 1,
1531 reg->umax_value,
1532 zero_size_allowed, meta);
1533 }
1534
1535 return err;
1536 err_type:
1537 verbose("R%d type=%s expected=%s\n", regno,
1538 reg_type_str[type], reg_type_str[expected_type]);
1539 return -EACCES;
1540 }
1541
1542 static int check_map_func_compatibility(struct bpf_map *map, int func_id)
1543 {
1544 if (!map)
1545 return 0;
1546
1547 /* We need a two way check, first is from map perspective ... */
1548 switch (map->map_type) {
1549 case BPF_MAP_TYPE_PROG_ARRAY:
1550 if (func_id != BPF_FUNC_tail_call)
1551 goto error;
1552 break;
1553 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1554 if (func_id != BPF_FUNC_perf_event_read &&
1555 func_id != BPF_FUNC_perf_event_output)
1556 goto error;
1557 break;
1558 case BPF_MAP_TYPE_STACK_TRACE:
1559 if (func_id != BPF_FUNC_get_stackid)
1560 goto error;
1561 break;
1562 case BPF_MAP_TYPE_CGROUP_ARRAY:
1563 if (func_id != BPF_FUNC_skb_under_cgroup &&
1564 func_id != BPF_FUNC_current_task_under_cgroup)
1565 goto error;
1566 break;
1567 /* devmap returns a pointer to a live net_device ifindex that we cannot
1568 * allow to be modified from bpf side. So do not allow lookup elements
1569 * for now.
1570 */
1571 case BPF_MAP_TYPE_DEVMAP:
1572 if (func_id != BPF_FUNC_redirect_map)
1573 goto error;
1574 break;
1575 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1576 case BPF_MAP_TYPE_HASH_OF_MAPS:
1577 if (func_id != BPF_FUNC_map_lookup_elem)
1578 goto error;
1579 break;
1580 case BPF_MAP_TYPE_SOCKMAP:
1581 if (func_id != BPF_FUNC_sk_redirect_map &&
1582 func_id != BPF_FUNC_sock_map_update &&
1583 func_id != BPF_FUNC_map_delete_elem)
1584 goto error;
1585 break;
1586 default:
1587 break;
1588 }
1589
1590 /* ... and second from the function itself. */
1591 switch (func_id) {
1592 case BPF_FUNC_tail_call:
1593 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1594 goto error;
1595 break;
1596 case BPF_FUNC_perf_event_read:
1597 case BPF_FUNC_perf_event_output:
1598 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
1599 goto error;
1600 break;
1601 case BPF_FUNC_get_stackid:
1602 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
1603 goto error;
1604 break;
1605 case BPF_FUNC_current_task_under_cgroup:
1606 case BPF_FUNC_skb_under_cgroup:
1607 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
1608 goto error;
1609 break;
1610 case BPF_FUNC_redirect_map:
1611 if (map->map_type != BPF_MAP_TYPE_DEVMAP)
1612 goto error;
1613 break;
1614 case BPF_FUNC_sk_redirect_map:
1615 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
1616 goto error;
1617 break;
1618 case BPF_FUNC_sock_map_update:
1619 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
1620 goto error;
1621 break;
1622 default:
1623 break;
1624 }
1625
1626 return 0;
1627 error:
1628 verbose("cannot pass map_type %d into func %s#%d\n",
1629 map->map_type, func_id_name(func_id), func_id);
1630 return -EINVAL;
1631 }
1632
1633 static int check_raw_mode(const struct bpf_func_proto *fn)
1634 {
1635 int count = 0;
1636
1637 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
1638 count++;
1639 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
1640 count++;
1641 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
1642 count++;
1643 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
1644 count++;
1645 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
1646 count++;
1647
1648 return count > 1 ? -EINVAL : 0;
1649 }
1650
1651 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
1652 * are now invalid, so turn them into unknown SCALAR_VALUE.
1653 */
1654 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
1655 {
1656 struct bpf_verifier_state *state = &env->cur_state;
1657 struct bpf_reg_state *regs = state->regs, *reg;
1658 int i;
1659
1660 for (i = 0; i < MAX_BPF_REG; i++)
1661 if (reg_is_pkt_pointer_any(&regs[i]))
1662 mark_reg_unknown(regs, i);
1663
1664 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
1665 if (state->stack_slot_type[i] != STACK_SPILL)
1666 continue;
1667 reg = &state->spilled_regs[i / BPF_REG_SIZE];
1668 if (reg_is_pkt_pointer_any(reg))
1669 __mark_reg_unknown(reg);
1670 }
1671 }
1672
1673 static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1674 {
1675 struct bpf_verifier_state *state = &env->cur_state;
1676 const struct bpf_func_proto *fn = NULL;
1677 struct bpf_reg_state *regs = state->regs;
1678 struct bpf_call_arg_meta meta;
1679 bool changes_data;
1680 int i, err;
1681
1682 /* find function prototype */
1683 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
1684 verbose("invalid func %s#%d\n", func_id_name(func_id), func_id);
1685 return -EINVAL;
1686 }
1687
1688 if (env->prog->aux->ops->get_func_proto)
1689 fn = env->prog->aux->ops->get_func_proto(func_id);
1690
1691 if (!fn) {
1692 verbose("unknown func %s#%d\n", func_id_name(func_id), func_id);
1693 return -EINVAL;
1694 }
1695
1696 /* eBPF programs must be GPL compatible to use GPL-ed functions */
1697 if (!env->prog->gpl_compatible && fn->gpl_only) {
1698 verbose("cannot call GPL only function from proprietary program\n");
1699 return -EINVAL;
1700 }
1701
1702 changes_data = bpf_helper_changes_pkt_data(fn->func);
1703
1704 memset(&meta, 0, sizeof(meta));
1705 meta.pkt_access = fn->pkt_access;
1706
1707 /* We only support one arg being in raw mode at the moment, which
1708 * is sufficient for the helper functions we have right now.
1709 */
1710 err = check_raw_mode(fn);
1711 if (err) {
1712 verbose("kernel subsystem misconfigured func %s#%d\n",
1713 func_id_name(func_id), func_id);
1714 return err;
1715 }
1716
1717 /* check args */
1718 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
1719 if (err)
1720 return err;
1721 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
1722 if (err)
1723 return err;
1724 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
1725 if (err)
1726 return err;
1727 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
1728 if (err)
1729 return err;
1730 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
1731 if (err)
1732 return err;
1733
1734 /* Mark slots with STACK_MISC in case of raw mode, stack offset
1735 * is inferred from register state.
1736 */
1737 for (i = 0; i < meta.access_size; i++) {
1738 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
1739 if (err)
1740 return err;
1741 }
1742
1743 /* reset caller saved regs */
1744 for (i = 0; i < CALLER_SAVED_REGS; i++) {
1745 mark_reg_not_init(regs, caller_saved[i]);
1746 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
1747 }
1748
1749 /* update return register (already marked as written above) */
1750 if (fn->ret_type == RET_INTEGER) {
1751 /* sets type to SCALAR_VALUE */
1752 mark_reg_unknown(regs, BPF_REG_0);
1753 } else if (fn->ret_type == RET_VOID) {
1754 regs[BPF_REG_0].type = NOT_INIT;
1755 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
1756 struct bpf_insn_aux_data *insn_aux;
1757
1758 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
1759 /* There is no offset yet applied, variable or fixed */
1760 mark_reg_known_zero(regs, BPF_REG_0);
1761 regs[BPF_REG_0].off = 0;
1762 /* remember map_ptr, so that check_map_access()
1763 * can check 'value_size' boundary of memory access
1764 * to map element returned from bpf_map_lookup_elem()
1765 */
1766 if (meta.map_ptr == NULL) {
1767 verbose("kernel subsystem misconfigured verifier\n");
1768 return -EINVAL;
1769 }
1770 regs[BPF_REG_0].map_ptr = meta.map_ptr;
1771 regs[BPF_REG_0].id = ++env->id_gen;
1772 insn_aux = &env->insn_aux_data[insn_idx];
1773 if (!insn_aux->map_ptr)
1774 insn_aux->map_ptr = meta.map_ptr;
1775 else if (insn_aux->map_ptr != meta.map_ptr)
1776 insn_aux->map_ptr = BPF_MAP_PTR_POISON;
1777 } else {
1778 verbose("unknown return type %d of func %s#%d\n",
1779 fn->ret_type, func_id_name(func_id), func_id);
1780 return -EINVAL;
1781 }
1782
1783 err = check_map_func_compatibility(meta.map_ptr, func_id);
1784 if (err)
1785 return err;
1786
1787 if (changes_data)
1788 clear_all_pkt_pointers(env);
1789 return 0;
1790 }
1791
1792 static void coerce_reg_to_32(struct bpf_reg_state *reg)
1793 {
1794 /* clear high 32 bits */
1795 reg->var_off = tnum_cast(reg->var_off, 4);
1796 /* Update bounds */
1797 __update_reg_bounds(reg);
1798 }
1799
1800 static bool signed_add_overflows(s64 a, s64 b)
1801 {
1802 /* Do the add in u64, where overflow is well-defined */
1803 s64 res = (s64)((u64)a + (u64)b);
1804
1805 if (b < 0)
1806 return res > a;
1807 return res < a;
1808 }
1809
1810 static bool signed_sub_overflows(s64 a, s64 b)
1811 {
1812 /* Do the sub in u64, where overflow is well-defined */
1813 s64 res = (s64)((u64)a - (u64)b);
1814
1815 if (b < 0)
1816 return res < a;
1817 return res > a;
1818 }
1819
1820 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
1821 * Caller should also handle BPF_MOV case separately.
1822 * If we return -EACCES, caller may want to try again treating pointer as a
1823 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
1824 */
1825 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1826 struct bpf_insn *insn,
1827 const struct bpf_reg_state *ptr_reg,
1828 const struct bpf_reg_state *off_reg)
1829 {
1830 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
1831 bool known = tnum_is_const(off_reg->var_off);
1832 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
1833 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
1834 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
1835 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
1836 u8 opcode = BPF_OP(insn->code);
1837 u32 dst = insn->dst_reg;
1838
1839 dst_reg = &regs[dst];
1840
1841 if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
1842 print_verifier_state(&env->cur_state);
1843 verbose("verifier internal error: known but bad sbounds\n");
1844 return -EINVAL;
1845 }
1846 if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
1847 print_verifier_state(&env->cur_state);
1848 verbose("verifier internal error: known but bad ubounds\n");
1849 return -EINVAL;
1850 }
1851
1852 if (BPF_CLASS(insn->code) != BPF_ALU64) {
1853 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
1854 if (!env->allow_ptr_leaks)
1855 verbose("R%d 32-bit pointer arithmetic prohibited\n",
1856 dst);
1857 return -EACCES;
1858 }
1859
1860 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
1861 if (!env->allow_ptr_leaks)
1862 verbose("R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
1863 dst);
1864 return -EACCES;
1865 }
1866 if (ptr_reg->type == CONST_PTR_TO_MAP) {
1867 if (!env->allow_ptr_leaks)
1868 verbose("R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
1869 dst);
1870 return -EACCES;
1871 }
1872 if (ptr_reg->type == PTR_TO_PACKET_END) {
1873 if (!env->allow_ptr_leaks)
1874 verbose("R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
1875 dst);
1876 return -EACCES;
1877 }
1878
1879 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
1880 * The id may be overwritten later if we create a new variable offset.
1881 */
1882 dst_reg->type = ptr_reg->type;
1883 dst_reg->id = ptr_reg->id;
1884
1885 switch (opcode) {
1886 case BPF_ADD:
1887 /* We can take a fixed offset as long as it doesn't overflow
1888 * the s32 'off' field
1889 */
1890 if (known && (ptr_reg->off + smin_val ==
1891 (s64)(s32)(ptr_reg->off + smin_val))) {
1892 /* pointer += K. Accumulate it into fixed offset */
1893 dst_reg->smin_value = smin_ptr;
1894 dst_reg->smax_value = smax_ptr;
1895 dst_reg->umin_value = umin_ptr;
1896 dst_reg->umax_value = umax_ptr;
1897 dst_reg->var_off = ptr_reg->var_off;
1898 dst_reg->off = ptr_reg->off + smin_val;
1899 dst_reg->range = ptr_reg->range;
1900 break;
1901 }
1902 /* A new variable offset is created. Note that off_reg->off
1903 * == 0, since it's a scalar.
1904 * dst_reg gets the pointer type and since some positive
1905 * integer value was added to the pointer, give it a new 'id'
1906 * if it's a PTR_TO_PACKET.
1907 * this creates a new 'base' pointer, off_reg (variable) gets
1908 * added into the variable offset, and we copy the fixed offset
1909 * from ptr_reg.
1910 */
1911 if (signed_add_overflows(smin_ptr, smin_val) ||
1912 signed_add_overflows(smax_ptr, smax_val)) {
1913 dst_reg->smin_value = S64_MIN;
1914 dst_reg->smax_value = S64_MAX;
1915 } else {
1916 dst_reg->smin_value = smin_ptr + smin_val;
1917 dst_reg->smax_value = smax_ptr + smax_val;
1918 }
1919 if (umin_ptr + umin_val < umin_ptr ||
1920 umax_ptr + umax_val < umax_ptr) {
1921 dst_reg->umin_value = 0;
1922 dst_reg->umax_value = U64_MAX;
1923 } else {
1924 dst_reg->umin_value = umin_ptr + umin_val;
1925 dst_reg->umax_value = umax_ptr + umax_val;
1926 }
1927 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
1928 dst_reg->off = ptr_reg->off;
1929 if (reg_is_pkt_pointer(ptr_reg)) {
1930 dst_reg->id = ++env->id_gen;
1931 /* something was added to pkt_ptr, set range to zero */
1932 dst_reg->range = 0;
1933 }
1934 break;
1935 case BPF_SUB:
1936 if (dst_reg == off_reg) {
1937 /* scalar -= pointer. Creates an unknown scalar */
1938 if (!env->allow_ptr_leaks)
1939 verbose("R%d tried to subtract pointer from scalar\n",
1940 dst);
1941 return -EACCES;
1942 }
1943 /* We don't allow subtraction from FP, because (according to
1944 * test_verifier.c test "invalid fp arithmetic", JITs might not
1945 * be able to deal with it.
1946 */
1947 if (ptr_reg->type == PTR_TO_STACK) {
1948 if (!env->allow_ptr_leaks)
1949 verbose("R%d subtraction from stack pointer prohibited\n",
1950 dst);
1951 return -EACCES;
1952 }
1953 if (known && (ptr_reg->off - smin_val ==
1954 (s64)(s32)(ptr_reg->off - smin_val))) {
1955 /* pointer -= K. Subtract it from fixed offset */
1956 dst_reg->smin_value = smin_ptr;
1957 dst_reg->smax_value = smax_ptr;
1958 dst_reg->umin_value = umin_ptr;
1959 dst_reg->umax_value = umax_ptr;
1960 dst_reg->var_off = ptr_reg->var_off;
1961 dst_reg->id = ptr_reg->id;
1962 dst_reg->off = ptr_reg->off - smin_val;
1963 dst_reg->range = ptr_reg->range;
1964 break;
1965 }
1966 /* A new variable offset is created. If the subtrahend is known
1967 * nonnegative, then any reg->range we had before is still good.
1968 */
1969 if (signed_sub_overflows(smin_ptr, smax_val) ||
1970 signed_sub_overflows(smax_ptr, smin_val)) {
1971 /* Overflow possible, we know nothing */
1972 dst_reg->smin_value = S64_MIN;
1973 dst_reg->smax_value = S64_MAX;
1974 } else {
1975 dst_reg->smin_value = smin_ptr - smax_val;
1976 dst_reg->smax_value = smax_ptr - smin_val;
1977 }
1978 if (umin_ptr < umax_val) {
1979 /* Overflow possible, we know nothing */
1980 dst_reg->umin_value = 0;
1981 dst_reg->umax_value = U64_MAX;
1982 } else {
1983 /* Cannot overflow (as long as bounds are consistent) */
1984 dst_reg->umin_value = umin_ptr - umax_val;
1985 dst_reg->umax_value = umax_ptr - umin_val;
1986 }
1987 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
1988 dst_reg->off = ptr_reg->off;
1989 if (reg_is_pkt_pointer(ptr_reg)) {
1990 dst_reg->id = ++env->id_gen;
1991 /* something was added to pkt_ptr, set range to zero */
1992 if (smin_val < 0)
1993 dst_reg->range = 0;
1994 }
1995 break;
1996 case BPF_AND:
1997 case BPF_OR:
1998 case BPF_XOR:
1999 /* bitwise ops on pointers are troublesome, prohibit for now.
2000 * (However, in principle we could allow some cases, e.g.
2001 * ptr &= ~3 which would reduce min_value by 3.)
2002 */
2003 if (!env->allow_ptr_leaks)
2004 verbose("R%d bitwise operator %s on pointer prohibited\n",
2005 dst, bpf_alu_string[opcode >> 4]);
2006 return -EACCES;
2007 default:
2008 /* other operators (e.g. MUL,LSH) produce non-pointer results */
2009 if (!env->allow_ptr_leaks)
2010 verbose("R%d pointer arithmetic with %s operator prohibited\n",
2011 dst, bpf_alu_string[opcode >> 4]);
2012 return -EACCES;
2013 }
2014
2015 __update_reg_bounds(dst_reg);
2016 __reg_deduce_bounds(dst_reg);
2017 __reg_bound_offset(dst_reg);
2018 return 0;
2019 }
2020
2021 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2022 struct bpf_insn *insn,
2023 struct bpf_reg_state *dst_reg,
2024 struct bpf_reg_state src_reg)
2025 {
2026 struct bpf_reg_state *regs = env->cur_state.regs;
2027 u8 opcode = BPF_OP(insn->code);
2028 bool src_known, dst_known;
2029 s64 smin_val, smax_val;
2030 u64 umin_val, umax_val;
2031
2032 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2033 /* 32-bit ALU ops are (32,32)->64 */
2034 coerce_reg_to_32(dst_reg);
2035 coerce_reg_to_32(&src_reg);
2036 }
2037 smin_val = src_reg.smin_value;
2038 smax_val = src_reg.smax_value;
2039 umin_val = src_reg.umin_value;
2040 umax_val = src_reg.umax_value;
2041 src_known = tnum_is_const(src_reg.var_off);
2042 dst_known = tnum_is_const(dst_reg->var_off);
2043
2044 switch (opcode) {
2045 case BPF_ADD:
2046 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
2047 signed_add_overflows(dst_reg->smax_value, smax_val)) {
2048 dst_reg->smin_value = S64_MIN;
2049 dst_reg->smax_value = S64_MAX;
2050 } else {
2051 dst_reg->smin_value += smin_val;
2052 dst_reg->smax_value += smax_val;
2053 }
2054 if (dst_reg->umin_value + umin_val < umin_val ||
2055 dst_reg->umax_value + umax_val < umax_val) {
2056 dst_reg->umin_value = 0;
2057 dst_reg->umax_value = U64_MAX;
2058 } else {
2059 dst_reg->umin_value += umin_val;
2060 dst_reg->umax_value += umax_val;
2061 }
2062 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
2063 break;
2064 case BPF_SUB:
2065 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
2066 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
2067 /* Overflow possible, we know nothing */
2068 dst_reg->smin_value = S64_MIN;
2069 dst_reg->smax_value = S64_MAX;
2070 } else {
2071 dst_reg->smin_value -= smax_val;
2072 dst_reg->smax_value -= smin_val;
2073 }
2074 if (dst_reg->umin_value < umax_val) {
2075 /* Overflow possible, we know nothing */
2076 dst_reg->umin_value = 0;
2077 dst_reg->umax_value = U64_MAX;
2078 } else {
2079 /* Cannot overflow (as long as bounds are consistent) */
2080 dst_reg->umin_value -= umax_val;
2081 dst_reg->umax_value -= umin_val;
2082 }
2083 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
2084 break;
2085 case BPF_MUL:
2086 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
2087 if (smin_val < 0 || dst_reg->smin_value < 0) {
2088 /* Ain't nobody got time to multiply that sign */
2089 __mark_reg_unbounded(dst_reg);
2090 __update_reg_bounds(dst_reg);
2091 break;
2092 }
2093 /* Both values are positive, so we can work with unsigned and
2094 * copy the result to signed (unless it exceeds S64_MAX).
2095 */
2096 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
2097 /* Potential overflow, we know nothing */
2098 __mark_reg_unbounded(dst_reg);
2099 /* (except what we can learn from the var_off) */
2100 __update_reg_bounds(dst_reg);
2101 break;
2102 }
2103 dst_reg->umin_value *= umin_val;
2104 dst_reg->umax_value *= umax_val;
2105 if (dst_reg->umax_value > S64_MAX) {
2106 /* Overflow possible, we know nothing */
2107 dst_reg->smin_value = S64_MIN;
2108 dst_reg->smax_value = S64_MAX;
2109 } else {
2110 dst_reg->smin_value = dst_reg->umin_value;
2111 dst_reg->smax_value = dst_reg->umax_value;
2112 }
2113 break;
2114 case BPF_AND:
2115 if (src_known && dst_known) {
2116 __mark_reg_known(dst_reg, dst_reg->var_off.value &
2117 src_reg.var_off.value);
2118 break;
2119 }
2120 /* We get our minimum from the var_off, since that's inherently
2121 * bitwise. Our maximum is the minimum of the operands' maxima.
2122 */
2123 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
2124 dst_reg->umin_value = dst_reg->var_off.value;
2125 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
2126 if (dst_reg->smin_value < 0 || smin_val < 0) {
2127 /* Lose signed bounds when ANDing negative numbers,
2128 * ain't nobody got time for that.
2129 */
2130 dst_reg->smin_value = S64_MIN;
2131 dst_reg->smax_value = S64_MAX;
2132 } else {
2133 /* ANDing two positives gives a positive, so safe to
2134 * cast result into s64.
2135 */
2136 dst_reg->smin_value = dst_reg->umin_value;
2137 dst_reg->smax_value = dst_reg->umax_value;
2138 }
2139 /* We may learn something more from the var_off */
2140 __update_reg_bounds(dst_reg);
2141 break;
2142 case BPF_OR:
2143 if (src_known && dst_known) {
2144 __mark_reg_known(dst_reg, dst_reg->var_off.value |
2145 src_reg.var_off.value);
2146 break;
2147 }
2148 /* We get our maximum from the var_off, and our minimum is the
2149 * maximum of the operands' minima
2150 */
2151 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
2152 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
2153 dst_reg->umax_value = dst_reg->var_off.value |
2154 dst_reg->var_off.mask;
2155 if (dst_reg->smin_value < 0 || smin_val < 0) {
2156 /* Lose signed bounds when ORing negative numbers,
2157 * ain't nobody got time for that.
2158 */
2159 dst_reg->smin_value = S64_MIN;
2160 dst_reg->smax_value = S64_MAX;
2161 } else {
2162 /* ORing two positives gives a positive, so safe to
2163 * cast result into s64.
2164 */
2165 dst_reg->smin_value = dst_reg->umin_value;
2166 dst_reg->smax_value = dst_reg->umax_value;
2167 }
2168 /* We may learn something more from the var_off */
2169 __update_reg_bounds(dst_reg);
2170 break;
2171 case BPF_LSH:
2172 if (umax_val > 63) {
2173 /* Shifts greater than 63 are undefined. This includes
2174 * shifts by a negative number.
2175 */
2176 mark_reg_unknown(regs, insn->dst_reg);
2177 break;
2178 }
2179 /* We lose all sign bit information (except what we can pick
2180 * up from var_off)
2181 */
2182 dst_reg->smin_value = S64_MIN;
2183 dst_reg->smax_value = S64_MAX;
2184 /* If we might shift our top bit out, then we know nothing */
2185 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
2186 dst_reg->umin_value = 0;
2187 dst_reg->umax_value = U64_MAX;
2188 } else {
2189 dst_reg->umin_value <<= umin_val;
2190 dst_reg->umax_value <<= umax_val;
2191 }
2192 if (src_known)
2193 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
2194 else
2195 dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val);
2196 /* We may learn something more from the var_off */
2197 __update_reg_bounds(dst_reg);
2198 break;
2199 case BPF_RSH:
2200 if (umax_val > 63) {
2201 /* Shifts greater than 63 are undefined. This includes
2202 * shifts by a negative number.
2203 */
2204 mark_reg_unknown(regs, insn->dst_reg);
2205 break;
2206 }
2207 /* BPF_RSH is an unsigned shift, so make the appropriate casts */
2208 if (dst_reg->smin_value < 0) {
2209 if (umin_val) {
2210 /* Sign bit will be cleared */
2211 dst_reg->smin_value = 0;
2212 } else {
2213 /* Lost sign bit information */
2214 dst_reg->smin_value = S64_MIN;
2215 dst_reg->smax_value = S64_MAX;
2216 }
2217 } else {
2218 dst_reg->smin_value =
2219 (u64)(dst_reg->smin_value) >> umax_val;
2220 }
2221 if (src_known)
2222 dst_reg->var_off = tnum_rshift(dst_reg->var_off,
2223 umin_val);
2224 else
2225 dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val);
2226 dst_reg->umin_value >>= umax_val;
2227 dst_reg->umax_value >>= umin_val;
2228 /* We may learn something more from the var_off */
2229 __update_reg_bounds(dst_reg);
2230 break;
2231 default:
2232 mark_reg_unknown(regs, insn->dst_reg);
2233 break;
2234 }
2235
2236 __reg_deduce_bounds(dst_reg);
2237 __reg_bound_offset(dst_reg);
2238 return 0;
2239 }
2240
2241 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
2242 * and var_off.
2243 */
2244 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2245 struct bpf_insn *insn)
2246 {
2247 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg, *src_reg;
2248 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
2249 u8 opcode = BPF_OP(insn->code);
2250 int rc;
2251
2252 dst_reg = &regs[insn->dst_reg];
2253 src_reg = NULL;
2254 if (dst_reg->type != SCALAR_VALUE)
2255 ptr_reg = dst_reg;
2256 if (BPF_SRC(insn->code) == BPF_X) {
2257 src_reg = &regs[insn->src_reg];
2258 if (src_reg->type != SCALAR_VALUE) {
2259 if (dst_reg->type != SCALAR_VALUE) {
2260 /* Combining two pointers by any ALU op yields
2261 * an arbitrary scalar.
2262 */
2263 if (!env->allow_ptr_leaks) {
2264 verbose("R%d pointer %s pointer prohibited\n",
2265 insn->dst_reg,
2266 bpf_alu_string[opcode >> 4]);
2267 return -EACCES;
2268 }
2269 mark_reg_unknown(regs, insn->dst_reg);
2270 return 0;
2271 } else {
2272 /* scalar += pointer
2273 * This is legal, but we have to reverse our
2274 * src/dest handling in computing the range
2275 */
2276 rc = adjust_ptr_min_max_vals(env, insn,
2277 src_reg, dst_reg);
2278 if (rc == -EACCES && env->allow_ptr_leaks) {
2279 /* scalar += unknown scalar */
2280 __mark_reg_unknown(&off_reg);
2281 return adjust_scalar_min_max_vals(
2282 env, insn,
2283 dst_reg, off_reg);
2284 }
2285 return rc;
2286 }
2287 } else if (ptr_reg) {
2288 /* pointer += scalar */
2289 rc = adjust_ptr_min_max_vals(env, insn,
2290 dst_reg, src_reg);
2291 if (rc == -EACCES && env->allow_ptr_leaks) {
2292 /* unknown scalar += scalar */
2293 __mark_reg_unknown(dst_reg);
2294 return adjust_scalar_min_max_vals(
2295 env, insn, dst_reg, *src_reg);
2296 }
2297 return rc;
2298 }
2299 } else {
2300 /* Pretend the src is a reg with a known value, since we only
2301 * need to be able to read from this state.
2302 */
2303 off_reg.type = SCALAR_VALUE;
2304 __mark_reg_known(&off_reg, insn->imm);
2305 src_reg = &off_reg;
2306 if (ptr_reg) { /* pointer += K */
2307 rc = adjust_ptr_min_max_vals(env, insn,
2308 ptr_reg, src_reg);
2309 if (rc == -EACCES && env->allow_ptr_leaks) {
2310 /* unknown scalar += K */
2311 __mark_reg_unknown(dst_reg);
2312 return adjust_scalar_min_max_vals(
2313 env, insn, dst_reg, off_reg);
2314 }
2315 return rc;
2316 }
2317 }
2318
2319 /* Got here implies adding two SCALAR_VALUEs */
2320 if (WARN_ON_ONCE(ptr_reg)) {
2321 print_verifier_state(&env->cur_state);
2322 verbose("verifier internal error: unexpected ptr_reg\n");
2323 return -EINVAL;
2324 }
2325 if (WARN_ON(!src_reg)) {
2326 print_verifier_state(&env->cur_state);
2327 verbose("verifier internal error: no src_reg\n");
2328 return -EINVAL;
2329 }
2330 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
2331 }
2332
2333 /* check validity of 32-bit and 64-bit arithmetic operations */
2334 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2335 {
2336 struct bpf_reg_state *regs = env->cur_state.regs;
2337 u8 opcode = BPF_OP(insn->code);
2338 int err;
2339
2340 if (opcode == BPF_END || opcode == BPF_NEG) {
2341 if (opcode == BPF_NEG) {
2342 if (BPF_SRC(insn->code) != 0 ||
2343 insn->src_reg != BPF_REG_0 ||
2344 insn->off != 0 || insn->imm != 0) {
2345 verbose("BPF_NEG uses reserved fields\n");
2346 return -EINVAL;
2347 }
2348 } else {
2349 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
2350 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
2351 BPF_CLASS(insn->code) == BPF_ALU64) {
2352 verbose("BPF_END uses reserved fields\n");
2353 return -EINVAL;
2354 }
2355 }
2356
2357 /* check src operand */
2358 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2359 if (err)
2360 return err;
2361
2362 if (is_pointer_value(env, insn->dst_reg)) {
2363 verbose("R%d pointer arithmetic prohibited\n",
2364 insn->dst_reg);
2365 return -EACCES;
2366 }
2367
2368 /* check dest operand */
2369 err = check_reg_arg(env, insn->dst_reg, DST_OP);
2370 if (err)
2371 return err;
2372
2373 } else if (opcode == BPF_MOV) {
2374
2375 if (BPF_SRC(insn->code) == BPF_X) {
2376 if (insn->imm != 0 || insn->off != 0) {
2377 verbose("BPF_MOV uses reserved fields\n");
2378 return -EINVAL;
2379 }
2380
2381 /* check src operand */
2382 err = check_reg_arg(env, insn->src_reg, SRC_OP);
2383 if (err)
2384 return err;
2385 } else {
2386 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
2387 verbose("BPF_MOV uses reserved fields\n");
2388 return -EINVAL;
2389 }
2390 }
2391
2392 /* check dest operand */
2393 err = check_reg_arg(env, insn->dst_reg, DST_OP);
2394 if (err)
2395 return err;
2396
2397 if (BPF_SRC(insn->code) == BPF_X) {
2398 if (BPF_CLASS(insn->code) == BPF_ALU64) {
2399 /* case: R1 = R2
2400 * copy register state to dest reg
2401 */
2402 regs[insn->dst_reg] = regs[insn->src_reg];
2403 } else {
2404 /* R1 = (u32) R2 */
2405 if (is_pointer_value(env, insn->src_reg)) {
2406 verbose("R%d partial copy of pointer\n",
2407 insn->src_reg);
2408 return -EACCES;
2409 }
2410 mark_reg_unknown(regs, insn->dst_reg);
2411 /* high 32 bits are known zero. */
2412 regs[insn->dst_reg].var_off = tnum_cast(
2413 regs[insn->dst_reg].var_off, 4);
2414 __update_reg_bounds(&regs[insn->dst_reg]);
2415 }
2416 } else {
2417 /* case: R = imm
2418 * remember the value we stored into this reg
2419 */
2420 regs[insn->dst_reg].type = SCALAR_VALUE;
2421 __mark_reg_known(regs + insn->dst_reg, insn->imm);
2422 }
2423
2424 } else if (opcode > BPF_END) {
2425 verbose("invalid BPF_ALU opcode %x\n", opcode);
2426 return -EINVAL;
2427
2428 } else { /* all other ALU ops: and, sub, xor, add, ... */
2429
2430 if (BPF_SRC(insn->code) == BPF_X) {
2431 if (insn->imm != 0 || insn->off != 0) {
2432 verbose("BPF_ALU uses reserved fields\n");
2433 return -EINVAL;
2434 }
2435 /* check src1 operand */
2436 err = check_reg_arg(env, insn->src_reg, SRC_OP);
2437 if (err)
2438 return err;
2439 } else {
2440 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
2441 verbose("BPF_ALU uses reserved fields\n");
2442 return -EINVAL;
2443 }
2444 }
2445
2446 /* check src2 operand */
2447 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2448 if (err)
2449 return err;
2450
2451 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
2452 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
2453 verbose("div by zero\n");
2454 return -EINVAL;
2455 }
2456
2457 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
2458 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
2459 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
2460
2461 if (insn->imm < 0 || insn->imm >= size) {
2462 verbose("invalid shift %d\n", insn->imm);
2463 return -EINVAL;
2464 }
2465 }
2466
2467 /* check dest operand */
2468 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
2469 if (err)
2470 return err;
2471
2472 return adjust_reg_min_max_vals(env, insn);
2473 }
2474
2475 return 0;
2476 }
2477
2478 static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2479 struct bpf_reg_state *dst_reg,
2480 enum bpf_reg_type type)
2481 {
2482 struct bpf_reg_state *regs = state->regs, *reg;
2483 int i;
2484
2485 if (dst_reg->off < 0)
2486 /* This doesn't give us any range */
2487 return;
2488
2489 if (dst_reg->umax_value > MAX_PACKET_OFF ||
2490 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
2491 /* Risk of overflow. For instance, ptr + (1<<63) may be less
2492 * than pkt_end, but that's because it's also less than pkt.
2493 */
2494 return;
2495
2496 /* LLVM can generate four kind of checks:
2497 *
2498 * Type 1/2:
2499 *
2500 * r2 = r3;
2501 * r2 += 8;
2502 * if (r2 > pkt_end) goto <handle exception>
2503 * <access okay>
2504 *
2505 * r2 = r3;
2506 * r2 += 8;
2507 * if (r2 < pkt_end) goto <access okay>
2508 * <handle exception>
2509 *
2510 * Where:
2511 * r2 == dst_reg, pkt_end == src_reg
2512 * r2=pkt(id=n,off=8,r=0)
2513 * r3=pkt(id=n,off=0,r=0)
2514 *
2515 * Type 3/4:
2516 *
2517 * r2 = r3;
2518 * r2 += 8;
2519 * if (pkt_end >= r2) goto <access okay>
2520 * <handle exception>
2521 *
2522 * r2 = r3;
2523 * r2 += 8;
2524 * if (pkt_end <= r2) goto <handle exception>
2525 * <access okay>
2526 *
2527 * Where:
2528 * pkt_end == dst_reg, r2 == src_reg
2529 * r2=pkt(id=n,off=8,r=0)
2530 * r3=pkt(id=n,off=0,r=0)
2531 *
2532 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
2533 * so that range of bytes [r3, r3 + 8) is safe to access.
2534 */
2535
2536 /* If our ids match, then we must have the same max_value. And we
2537 * don't care about the other reg's fixed offset, since if it's too big
2538 * the range won't allow anything.
2539 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
2540 */
2541 for (i = 0; i < MAX_BPF_REG; i++)
2542 if (regs[i].type == type && regs[i].id == dst_reg->id)
2543 /* keep the maximum range already checked */
2544 regs[i].range = max_t(u16, regs[i].range, dst_reg->off);
2545
2546 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
2547 if (state->stack_slot_type[i] != STACK_SPILL)
2548 continue;
2549 reg = &state->spilled_regs[i / BPF_REG_SIZE];
2550 if (reg->type == type && reg->id == dst_reg->id)
2551 reg->range = max_t(u16, reg->range, dst_reg->off);
2552 }
2553 }
2554
2555 /* Adjusts the register min/max values in the case that the dst_reg is the
2556 * variable register that we are working on, and src_reg is a constant or we're
2557 * simply doing a BPF_K check.
2558 * In JEQ/JNE cases we also adjust the var_off values.
2559 */
2560 static void reg_set_min_max(struct bpf_reg_state *true_reg,
2561 struct bpf_reg_state *false_reg, u64 val,
2562 u8 opcode)
2563 {
2564 /* If the dst_reg is a pointer, we can't learn anything about its
2565 * variable offset from the compare (unless src_reg were a pointer into
2566 * the same object, but we don't bother with that.
2567 * Since false_reg and true_reg have the same type by construction, we
2568 * only need to check one of them for pointerness.
2569 */
2570 if (__is_pointer_value(false, false_reg))
2571 return;
2572
2573 switch (opcode) {
2574 case BPF_JEQ:
2575 /* If this is false then we know nothing Jon Snow, but if it is
2576 * true then we know for sure.
2577 */
2578 __mark_reg_known(true_reg, val);
2579 break;
2580 case BPF_JNE:
2581 /* If this is true we know nothing Jon Snow, but if it is false
2582 * we know the value for sure;
2583 */
2584 __mark_reg_known(false_reg, val);
2585 break;
2586 case BPF_JGT:
2587 false_reg->umax_value = min(false_reg->umax_value, val);
2588 true_reg->umin_value = max(true_reg->umin_value, val + 1);
2589 break;
2590 case BPF_JSGT:
2591 false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
2592 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
2593 break;
2594 case BPF_JLT:
2595 false_reg->umin_value = max(false_reg->umin_value, val);
2596 true_reg->umax_value = min(true_reg->umax_value, val - 1);
2597 break;
2598 case BPF_JSLT:
2599 false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
2600 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
2601 break;
2602 case BPF_JGE:
2603 false_reg->umax_value = min(false_reg->umax_value, val - 1);
2604 true_reg->umin_value = max(true_reg->umin_value, val);
2605 break;
2606 case BPF_JSGE:
2607 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
2608 true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
2609 break;
2610 case BPF_JLE:
2611 false_reg->umin_value = max(false_reg->umin_value, val + 1);
2612 true_reg->umax_value = min(true_reg->umax_value, val);
2613 break;
2614 case BPF_JSLE:
2615 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
2616 true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
2617 break;
2618 default:
2619 break;
2620 }
2621
2622 __reg_deduce_bounds(false_reg);
2623 __reg_deduce_bounds(true_reg);
2624 /* We might have learned some bits from the bounds. */
2625 __reg_bound_offset(false_reg);
2626 __reg_bound_offset(true_reg);
2627 /* Intersecting with the old var_off might have improved our bounds
2628 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2629 * then new var_off is (0; 0x7f...fc) which improves our umax.
2630 */
2631 __update_reg_bounds(false_reg);
2632 __update_reg_bounds(true_reg);
2633 }
2634
2635 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
2636 * the variable reg.
2637 */
2638 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
2639 struct bpf_reg_state *false_reg, u64 val,
2640 u8 opcode)
2641 {
2642 if (__is_pointer_value(false, false_reg))
2643 return;
2644
2645 switch (opcode) {
2646 case BPF_JEQ:
2647 /* If this is false then we know nothing Jon Snow, but if it is
2648 * true then we know for sure.
2649 */
2650 __mark_reg_known(true_reg, val);
2651 break;
2652 case BPF_JNE:
2653 /* If this is true we know nothing Jon Snow, but if it is false
2654 * we know the value for sure;
2655 */
2656 __mark_reg_known(false_reg, val);
2657 break;
2658 case BPF_JGT:
2659 true_reg->umax_value = min(true_reg->umax_value, val - 1);
2660 false_reg->umin_value = max(false_reg->umin_value, val);
2661 break;
2662 case BPF_JSGT:
2663 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
2664 false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
2665 break;
2666 case BPF_JLT:
2667 true_reg->umin_value = max(true_reg->umin_value, val + 1);
2668 false_reg->umax_value = min(false_reg->umax_value, val);
2669 break;
2670 case BPF_JSLT:
2671 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
2672 false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
2673 break;
2674 case BPF_JGE:
2675 true_reg->umax_value = min(true_reg->umax_value, val);
2676 false_reg->umin_value = max(false_reg->umin_value, val + 1);
2677 break;
2678 case BPF_JSGE:
2679 true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
2680 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
2681 break;
2682 case BPF_JLE:
2683 true_reg->umin_value = max(true_reg->umin_value, val);
2684 false_reg->umax_value = min(false_reg->umax_value, val - 1);
2685 break;
2686 case BPF_JSLE:
2687 true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
2688 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
2689 break;
2690 default:
2691 break;
2692 }
2693
2694 __reg_deduce_bounds(false_reg);
2695 __reg_deduce_bounds(true_reg);
2696 /* We might have learned some bits from the bounds. */
2697 __reg_bound_offset(false_reg);
2698 __reg_bound_offset(true_reg);
2699 /* Intersecting with the old var_off might have improved our bounds
2700 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2701 * then new var_off is (0; 0x7f...fc) which improves our umax.
2702 */
2703 __update_reg_bounds(false_reg);
2704 __update_reg_bounds(true_reg);
2705 }
2706
2707 /* Regs are known to be equal, so intersect their min/max/var_off */
2708 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
2709 struct bpf_reg_state *dst_reg)
2710 {
2711 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
2712 dst_reg->umin_value);
2713 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
2714 dst_reg->umax_value);
2715 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
2716 dst_reg->smin_value);
2717 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
2718 dst_reg->smax_value);
2719 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
2720 dst_reg->var_off);
2721 /* We might have learned new bounds from the var_off. */
2722 __update_reg_bounds(src_reg);
2723 __update_reg_bounds(dst_reg);
2724 /* We might have learned something about the sign bit. */
2725 __reg_deduce_bounds(src_reg);
2726 __reg_deduce_bounds(dst_reg);
2727 /* We might have learned some bits from the bounds. */
2728 __reg_bound_offset(src_reg);
2729 __reg_bound_offset(dst_reg);
2730 /* Intersecting with the old var_off might have improved our bounds
2731 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2732 * then new var_off is (0; 0x7f...fc) which improves our umax.
2733 */
2734 __update_reg_bounds(src_reg);
2735 __update_reg_bounds(dst_reg);
2736 }
2737
2738 static void reg_combine_min_max(struct bpf_reg_state *true_src,
2739 struct bpf_reg_state *true_dst,
2740 struct bpf_reg_state *false_src,
2741 struct bpf_reg_state *false_dst,
2742 u8 opcode)
2743 {
2744 switch (opcode) {
2745 case BPF_JEQ:
2746 __reg_combine_min_max(true_src, true_dst);
2747 break;
2748 case BPF_JNE:
2749 __reg_combine_min_max(false_src, false_dst);
2750 break;
2751 }
2752 }
2753
2754 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
2755 bool is_null)
2756 {
2757 struct bpf_reg_state *reg = &regs[regno];
2758
2759 if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
2760 /* Old offset (both fixed and variable parts) should
2761 * have been known-zero, because we don't allow pointer
2762 * arithmetic on pointers that might be NULL.
2763 */
2764 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
2765 !tnum_equals_const(reg->var_off, 0) ||
2766 reg->off)) {
2767 __mark_reg_known_zero(reg);
2768 reg->off = 0;
2769 }
2770 if (is_null) {
2771 reg->type = SCALAR_VALUE;
2772 } else if (reg->map_ptr->inner_map_meta) {
2773 reg->type = CONST_PTR_TO_MAP;
2774 reg->map_ptr = reg->map_ptr->inner_map_meta;
2775 } else {
2776 reg->type = PTR_TO_MAP_VALUE;
2777 }
2778 /* We don't need id from this point onwards anymore, thus we
2779 * should better reset it, so that state pruning has chances
2780 * to take effect.
2781 */
2782 reg->id = 0;
2783 }
2784 }
2785
2786 /* The logic is similar to find_good_pkt_pointers(), both could eventually
2787 * be folded together at some point.
2788 */
2789 static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
2790 bool is_null)
2791 {
2792 struct bpf_reg_state *regs = state->regs;
2793 u32 id = regs[regno].id;
2794 int i;
2795
2796 for (i = 0; i < MAX_BPF_REG; i++)
2797 mark_map_reg(regs, i, id, is_null);
2798
2799 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
2800 if (state->stack_slot_type[i] != STACK_SPILL)
2801 continue;
2802 mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, is_null);
2803 }
2804 }
2805
2806 static int check_cond_jmp_op(struct bpf_verifier_env *env,
2807 struct bpf_insn *insn, int *insn_idx)
2808 {
2809 struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state;
2810 struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
2811 u8 opcode = BPF_OP(insn->code);
2812 int err;
2813
2814 if (opcode > BPF_JSLE) {
2815 verbose("invalid BPF_JMP opcode %x\n", opcode);
2816 return -EINVAL;
2817 }
2818
2819 if (BPF_SRC(insn->code) == BPF_X) {
2820 if (insn->imm != 0) {
2821 verbose("BPF_JMP uses reserved fields\n");
2822 return -EINVAL;
2823 }
2824
2825 /* check src1 operand */
2826 err = check_reg_arg(env, insn->src_reg, SRC_OP);
2827 if (err)
2828 return err;
2829
2830 if (is_pointer_value(env, insn->src_reg)) {
2831 verbose("R%d pointer comparison prohibited\n",
2832 insn->src_reg);
2833 return -EACCES;
2834 }
2835 } else {
2836 if (insn->src_reg != BPF_REG_0) {
2837 verbose("BPF_JMP uses reserved fields\n");
2838 return -EINVAL;
2839 }
2840 }
2841
2842 /* check src2 operand */
2843 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2844 if (err)
2845 return err;
2846
2847 dst_reg = &regs[insn->dst_reg];
2848
2849 /* detect if R == 0 where R was initialized to zero earlier */
2850 if (BPF_SRC(insn->code) == BPF_K &&
2851 (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2852 dst_reg->type == SCALAR_VALUE &&
2853 tnum_equals_const(dst_reg->var_off, insn->imm)) {
2854 if (opcode == BPF_JEQ) {
2855 /* if (imm == imm) goto pc+off;
2856 * only follow the goto, ignore fall-through
2857 */
2858 *insn_idx += insn->off;
2859 return 0;
2860 } else {
2861 /* if (imm != imm) goto pc+off;
2862 * only follow fall-through branch, since
2863 * that's where the program will go
2864 */
2865 return 0;
2866 }
2867 }
2868
2869 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
2870 if (!other_branch)
2871 return -EFAULT;
2872
2873 /* detect if we are comparing against a constant value so we can adjust
2874 * our min/max values for our dst register.
2875 * this is only legit if both are scalars (or pointers to the same
2876 * object, I suppose, but we don't support that right now), because
2877 * otherwise the different base pointers mean the offsets aren't
2878 * comparable.
2879 */
2880 if (BPF_SRC(insn->code) == BPF_X) {
2881 if (dst_reg->type == SCALAR_VALUE &&
2882 regs[insn->src_reg].type == SCALAR_VALUE) {
2883 if (tnum_is_const(regs[insn->src_reg].var_off))
2884 reg_set_min_max(&other_branch->regs[insn->dst_reg],
2885 dst_reg, regs[insn->src_reg].var_off.value,
2886 opcode);
2887 else if (tnum_is_const(dst_reg->var_off))
2888 reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
2889 &regs[insn->src_reg],
2890 dst_reg->var_off.value, opcode);
2891 else if (opcode == BPF_JEQ || opcode == BPF_JNE)
2892 /* Comparing for equality, we can combine knowledge */
2893 reg_combine_min_max(&other_branch->regs[insn->src_reg],
2894 &other_branch->regs[insn->dst_reg],
2895 &regs[insn->src_reg],
2896 &regs[insn->dst_reg], opcode);
2897 }
2898 } else if (dst_reg->type == SCALAR_VALUE) {
2899 reg_set_min_max(&other_branch->regs[insn->dst_reg],
2900 dst_reg, insn->imm, opcode);
2901 }
2902
2903 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
2904 if (BPF_SRC(insn->code) == BPF_K &&
2905 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2906 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
2907 /* Mark all identical map registers in each branch as either
2908 * safe or unknown depending R == 0 or R != 0 conditional.
2909 */
2910 mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
2911 mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
2912 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2913 dst_reg->type == PTR_TO_PACKET &&
2914 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2915 find_good_pkt_pointers(this_branch, dst_reg, PTR_TO_PACKET);
2916 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
2917 dst_reg->type == PTR_TO_PACKET &&
2918 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2919 find_good_pkt_pointers(other_branch, dst_reg, PTR_TO_PACKET);
2920 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2921 dst_reg->type == PTR_TO_PACKET_END &&
2922 regs[insn->src_reg].type == PTR_TO_PACKET) {
2923 find_good_pkt_pointers(other_branch, &regs[insn->src_reg],
2924 PTR_TO_PACKET);
2925 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
2926 dst_reg->type == PTR_TO_PACKET_END &&
2927 regs[insn->src_reg].type == PTR_TO_PACKET) {
2928 find_good_pkt_pointers(this_branch, &regs[insn->src_reg],
2929 PTR_TO_PACKET);
2930 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2931 dst_reg->type == PTR_TO_PACKET_META &&
2932 reg_is_init_pkt_pointer(&regs[insn->src_reg], PTR_TO_PACKET)) {
2933 find_good_pkt_pointers(this_branch, dst_reg, PTR_TO_PACKET_META);
2934 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
2935 dst_reg->type == PTR_TO_PACKET_META &&
2936 reg_is_init_pkt_pointer(&regs[insn->src_reg], PTR_TO_PACKET)) {
2937 find_good_pkt_pointers(other_branch, dst_reg, PTR_TO_PACKET_META);
2938 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2939 reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2940 regs[insn->src_reg].type == PTR_TO_PACKET_META) {
2941 find_good_pkt_pointers(other_branch, &regs[insn->src_reg],
2942 PTR_TO_PACKET_META);
2943 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
2944 reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2945 regs[insn->src_reg].type == PTR_TO_PACKET_META) {
2946 find_good_pkt_pointers(this_branch, &regs[insn->src_reg],
2947 PTR_TO_PACKET_META);
2948 } else if (is_pointer_value(env, insn->dst_reg)) {
2949 verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
2950 return -EACCES;
2951 }
2952 if (log_level)
2953 print_verifier_state(this_branch);
2954 return 0;
2955 }
2956
2957 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
2958 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
2959 {
2960 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
2961
2962 return (struct bpf_map *) (unsigned long) imm64;
2963 }
2964
2965 /* verify BPF_LD_IMM64 instruction */
2966 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
2967 {
2968 struct bpf_reg_state *regs = env->cur_state.regs;
2969 int err;
2970
2971 if (BPF_SIZE(insn->code) != BPF_DW) {
2972 verbose("invalid BPF_LD_IMM insn\n");
2973 return -EINVAL;
2974 }
2975 if (insn->off != 0) {
2976 verbose("BPF_LD_IMM64 uses reserved fields\n");
2977 return -EINVAL;
2978 }
2979
2980 err = check_reg_arg(env, insn->dst_reg, DST_OP);
2981 if (err)
2982 return err;
2983
2984 if (insn->src_reg == 0) {
2985 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
2986
2987 regs[insn->dst_reg].type = SCALAR_VALUE;
2988 __mark_reg_known(&regs[insn->dst_reg], imm);
2989 return 0;
2990 }
2991
2992 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
2993 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
2994
2995 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
2996 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
2997 return 0;
2998 }
2999
3000 static bool may_access_skb(enum bpf_prog_type type)
3001 {
3002 switch (type) {
3003 case BPF_PROG_TYPE_SOCKET_FILTER:
3004 case BPF_PROG_TYPE_SCHED_CLS:
3005 case BPF_PROG_TYPE_SCHED_ACT:
3006 return true;
3007 default:
3008 return false;
3009 }
3010 }
3011
3012 /* verify safety of LD_ABS|LD_IND instructions:
3013 * - they can only appear in the programs where ctx == skb
3014 * - since they are wrappers of function calls, they scratch R1-R5 registers,
3015 * preserve R6-R9, and store return value into R0
3016 *
3017 * Implicit input:
3018 * ctx == skb == R6 == CTX
3019 *
3020 * Explicit input:
3021 * SRC == any register
3022 * IMM == 32-bit immediate
3023 *
3024 * Output:
3025 * R0 - 8/16/32-bit skb data converted to cpu endianness
3026 */
3027 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
3028 {
3029 struct bpf_reg_state *regs = env->cur_state.regs;
3030 u8 mode = BPF_MODE(insn->code);
3031 int i, err;
3032
3033 if (!may_access_skb(env->prog->type)) {
3034 verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
3035 return -EINVAL;
3036 }
3037
3038 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
3039 BPF_SIZE(insn->code) == BPF_DW ||
3040 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
3041 verbose("BPF_LD_[ABS|IND] uses reserved fields\n");
3042 return -EINVAL;
3043 }
3044
3045 /* check whether implicit source operand (register R6) is readable */
3046 err = check_reg_arg(env, BPF_REG_6, SRC_OP);
3047 if (err)
3048 return err;
3049
3050 if (regs[BPF_REG_6].type != PTR_TO_CTX) {
3051 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
3052 return -EINVAL;
3053 }
3054
3055 if (mode == BPF_IND) {
3056 /* check explicit source operand */
3057 err = check_reg_arg(env, insn->src_reg, SRC_OP);
3058 if (err)
3059 return err;
3060 }
3061
3062 /* reset caller saved regs to unreadable */
3063 for (i = 0; i < CALLER_SAVED_REGS; i++) {
3064 mark_reg_not_init(regs, caller_saved[i]);
3065 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
3066 }
3067
3068 /* mark destination R0 register as readable, since it contains
3069 * the value fetched from the packet.
3070 * Already marked as written above.
3071 */
3072 mark_reg_unknown(regs, BPF_REG_0);
3073 return 0;
3074 }
3075
3076 /* non-recursive DFS pseudo code
3077 * 1 procedure DFS-iterative(G,v):
3078 * 2 label v as discovered
3079 * 3 let S be a stack
3080 * 4 S.push(v)
3081 * 5 while S is not empty
3082 * 6 t <- S.pop()
3083 * 7 if t is what we're looking for:
3084 * 8 return t
3085 * 9 for all edges e in G.adjacentEdges(t) do
3086 * 10 if edge e is already labelled
3087 * 11 continue with the next edge
3088 * 12 w <- G.adjacentVertex(t,e)
3089 * 13 if vertex w is not discovered and not explored
3090 * 14 label e as tree-edge
3091 * 15 label w as discovered
3092 * 16 S.push(w)
3093 * 17 continue at 5
3094 * 18 else if vertex w is discovered
3095 * 19 label e as back-edge
3096 * 20 else
3097 * 21 // vertex w is explored
3098 * 22 label e as forward- or cross-edge
3099 * 23 label t as explored
3100 * 24 S.pop()
3101 *
3102 * convention:
3103 * 0x10 - discovered
3104 * 0x11 - discovered and fall-through edge labelled
3105 * 0x12 - discovered and fall-through and branch edges labelled
3106 * 0x20 - explored
3107 */
3108
3109 enum {
3110 DISCOVERED = 0x10,
3111 EXPLORED = 0x20,
3112 FALLTHROUGH = 1,
3113 BRANCH = 2,
3114 };
3115
3116 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
3117
3118 static int *insn_stack; /* stack of insns to process */
3119 static int cur_stack; /* current stack index */
3120 static int *insn_state;
3121
3122 /* t, w, e - match pseudo-code above:
3123 * t - index of current instruction
3124 * w - next instruction
3125 * e - edge
3126 */
3127 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
3128 {
3129 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
3130 return 0;
3131
3132 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
3133 return 0;
3134
3135 if (w < 0 || w >= env->prog->len) {
3136 verbose("jump out of range from insn %d to %d\n", t, w);
3137 return -EINVAL;
3138 }
3139
3140 if (e == BRANCH)
3141 /* mark branch target for state pruning */
3142 env->explored_states[w] = STATE_LIST_MARK;
3143
3144 if (insn_state[w] == 0) {
3145 /* tree-edge */
3146 insn_state[t] = DISCOVERED | e;
3147 insn_state[w] = DISCOVERED;
3148 if (cur_stack >= env->prog->len)
3149 return -E2BIG;
3150 insn_stack[cur_stack++] = w;
3151 return 1;
3152 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
3153 verbose("back-edge from insn %d to %d\n", t, w);
3154 return -EINVAL;
3155 } else if (insn_state[w] == EXPLORED) {
3156 /* forward- or cross-edge */
3157 insn_state[t] = DISCOVERED | e;
3158 } else {
3159 verbose("insn state internal bug\n");
3160 return -EFAULT;
3161 }
3162 return 0;
3163 }
3164
3165 /* non-recursive depth-first-search to detect loops in BPF program
3166 * loop == back-edge in directed graph
3167 */
3168 static int check_cfg(struct bpf_verifier_env *env)
3169 {
3170 struct bpf_insn *insns = env->prog->insnsi;
3171 int insn_cnt = env->prog->len;
3172 int ret = 0;
3173 int i, t;
3174
3175 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
3176 if (!insn_state)
3177 return -ENOMEM;
3178
3179 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
3180 if (!insn_stack) {
3181 kfree(insn_state);
3182 return -ENOMEM;
3183 }
3184
3185 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
3186 insn_stack[0] = 0; /* 0 is the first instruction */
3187 cur_stack = 1;
3188
3189 peek_stack:
3190 if (cur_stack == 0)
3191 goto check_state;
3192 t = insn_stack[cur_stack - 1];
3193
3194 if (BPF_CLASS(insns[t].code) == BPF_JMP) {
3195 u8 opcode = BPF_OP(insns[t].code);
3196
3197 if (opcode == BPF_EXIT) {
3198 goto mark_explored;
3199 } else if (opcode == BPF_CALL) {
3200 ret = push_insn(t, t + 1, FALLTHROUGH, env);
3201 if (ret == 1)
3202 goto peek_stack;
3203 else if (ret < 0)
3204 goto err_free;
3205 if (t + 1 < insn_cnt)
3206 env->explored_states[t + 1] = STATE_LIST_MARK;
3207 } else if (opcode == BPF_JA) {
3208 if (BPF_SRC(insns[t].code) != BPF_K) {
3209 ret = -EINVAL;
3210 goto err_free;
3211 }
3212 /* unconditional jump with single edge */
3213 ret = push_insn(t, t + insns[t].off + 1,
3214 FALLTHROUGH, env);
3215 if (ret == 1)
3216 goto peek_stack;
3217 else if (ret < 0)
3218 goto err_free;
3219 /* tell verifier to check for equivalent states
3220 * after every call and jump
3221 */
3222 if (t + 1 < insn_cnt)
3223 env->explored_states[t + 1] = STATE_LIST_MARK;
3224 } else {
3225 /* conditional jump with two edges */
3226 env->explored_states[t] = STATE_LIST_MARK;
3227 ret = push_insn(t, t + 1, FALLTHROUGH, env);
3228 if (ret == 1)
3229 goto peek_stack;
3230 else if (ret < 0)
3231 goto err_free;
3232
3233 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
3234 if (ret == 1)
3235 goto peek_stack;
3236 else if (ret < 0)
3237 goto err_free;
3238 }
3239 } else {
3240 /* all other non-branch instructions with single
3241 * fall-through edge
3242 */
3243 ret = push_insn(t, t + 1, FALLTHROUGH, env);
3244 if (ret == 1)
3245 goto peek_stack;
3246 else if (ret < 0)
3247 goto err_free;
3248 }
3249
3250 mark_explored:
3251 insn_state[t] = EXPLORED;
3252 if (cur_stack-- <= 0) {
3253 verbose("pop stack internal bug\n");
3254 ret = -EFAULT;
3255 goto err_free;
3256 }
3257 goto peek_stack;
3258
3259 check_state:
3260 for (i = 0; i < insn_cnt; i++) {
3261 if (insn_state[i] != EXPLORED) {
3262 verbose("unreachable insn %d\n", i);
3263 ret = -EINVAL;
3264 goto err_free;
3265 }
3266 }
3267 ret = 0; /* cfg looks good */
3268
3269 err_free:
3270 kfree(insn_state);
3271 kfree(insn_stack);
3272 return ret;
3273 }
3274
3275 /* check %cur's range satisfies %old's */
3276 static bool range_within(struct bpf_reg_state *old,
3277 struct bpf_reg_state *cur)
3278 {
3279 return old->umin_value <= cur->umin_value &&
3280 old->umax_value >= cur->umax_value &&
3281 old->smin_value <= cur->smin_value &&
3282 old->smax_value >= cur->smax_value;
3283 }
3284
3285 /* Maximum number of register states that can exist at once */
3286 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
3287 struct idpair {
3288 u32 old;
3289 u32 cur;
3290 };
3291
3292 /* If in the old state two registers had the same id, then they need to have
3293 * the same id in the new state as well. But that id could be different from
3294 * the old state, so we need to track the mapping from old to new ids.
3295 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
3296 * regs with old id 5 must also have new id 9 for the new state to be safe. But
3297 * regs with a different old id could still have new id 9, we don't care about
3298 * that.
3299 * So we look through our idmap to see if this old id has been seen before. If
3300 * so, we require the new id to match; otherwise, we add the id pair to the map.
3301 */
3302 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
3303 {
3304 unsigned int i;
3305
3306 for (i = 0; i < ID_MAP_SIZE; i++) {
3307 if (!idmap[i].old) {
3308 /* Reached an empty slot; haven't seen this id before */
3309 idmap[i].old = old_id;
3310 idmap[i].cur = cur_id;
3311 return true;
3312 }
3313 if (idmap[i].old == old_id)
3314 return idmap[i].cur == cur_id;
3315 }
3316 /* We ran out of idmap slots, which should be impossible */
3317 WARN_ON_ONCE(1);
3318 return false;
3319 }
3320
3321 /* Returns true if (rold safe implies rcur safe) */
3322 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3323 struct idpair *idmap)
3324 {
3325 if (!(rold->live & REG_LIVE_READ))
3326 /* explored state didn't use this */
3327 return true;
3328
3329 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
3330 return true;
3331
3332 if (rold->type == NOT_INIT)
3333 /* explored state can't have used this */
3334 return true;
3335 if (rcur->type == NOT_INIT)
3336 return false;
3337 switch (rold->type) {
3338 case SCALAR_VALUE:
3339 if (rcur->type == SCALAR_VALUE) {
3340 /* new val must satisfy old val knowledge */
3341 return range_within(rold, rcur) &&
3342 tnum_in(rold->var_off, rcur->var_off);
3343 } else {
3344 /* if we knew anything about the old value, we're not
3345 * equal, because we can't know anything about the
3346 * scalar value of the pointer in the new value.
3347 */
3348 return rold->umin_value == 0 &&
3349 rold->umax_value == U64_MAX &&
3350 rold->smin_value == S64_MIN &&
3351 rold->smax_value == S64_MAX &&
3352 tnum_is_unknown(rold->var_off);
3353 }
3354 case PTR_TO_MAP_VALUE:
3355 /* If the new min/max/var_off satisfy the old ones and
3356 * everything else matches, we are OK.
3357 * We don't care about the 'id' value, because nothing
3358 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
3359 */
3360 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
3361 range_within(rold, rcur) &&
3362 tnum_in(rold->var_off, rcur->var_off);
3363 case PTR_TO_MAP_VALUE_OR_NULL:
3364 /* a PTR_TO_MAP_VALUE could be safe to use as a
3365 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
3366 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
3367 * checked, doing so could have affected others with the same
3368 * id, and we can't check for that because we lost the id when
3369 * we converted to a PTR_TO_MAP_VALUE.
3370 */
3371 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
3372 return false;
3373 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
3374 return false;
3375 /* Check our ids match any regs they're supposed to */
3376 return check_ids(rold->id, rcur->id, idmap);
3377 case PTR_TO_PACKET_META:
3378 case PTR_TO_PACKET:
3379 if (rcur->type != rold->type)
3380 return false;
3381 /* We must have at least as much range as the old ptr
3382 * did, so that any accesses which were safe before are
3383 * still safe. This is true even if old range < old off,
3384 * since someone could have accessed through (ptr - k), or
3385 * even done ptr -= k in a register, to get a safe access.
3386 */
3387 if (rold->range > rcur->range)
3388 return false;
3389 /* If the offsets don't match, we can't trust our alignment;
3390 * nor can we be sure that we won't fall out of range.
3391 */
3392 if (rold->off != rcur->off)
3393 return false;
3394 /* id relations must be preserved */
3395 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
3396 return false;
3397 /* new val must satisfy old val knowledge */
3398 return range_within(rold, rcur) &&
3399 tnum_in(rold->var_off, rcur->var_off);
3400 case PTR_TO_CTX:
3401 case CONST_PTR_TO_MAP:
3402 case PTR_TO_STACK:
3403 case PTR_TO_PACKET_END:
3404 /* Only valid matches are exact, which memcmp() above
3405 * would have accepted
3406 */
3407 default:
3408 /* Don't know what's going on, just say it's not safe */
3409 return false;
3410 }
3411
3412 /* Shouldn't get here; if we do, say it's not safe */
3413 WARN_ON_ONCE(1);
3414 return false;
3415 }
3416
3417 /* compare two verifier states
3418 *
3419 * all states stored in state_list are known to be valid, since
3420 * verifier reached 'bpf_exit' instruction through them
3421 *
3422 * this function is called when verifier exploring different branches of
3423 * execution popped from the state stack. If it sees an old state that has
3424 * more strict register state and more strict stack state then this execution
3425 * branch doesn't need to be explored further, since verifier already
3426 * concluded that more strict state leads to valid finish.
3427 *
3428 * Therefore two states are equivalent if register state is more conservative
3429 * and explored stack state is more conservative than the current one.
3430 * Example:
3431 * explored current
3432 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
3433 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
3434 *
3435 * In other words if current stack state (one being explored) has more
3436 * valid slots than old one that already passed validation, it means
3437 * the verifier can stop exploring and conclude that current state is valid too
3438 *
3439 * Similarly with registers. If explored state has register type as invalid
3440 * whereas register type in current state is meaningful, it means that
3441 * the current state will reach 'bpf_exit' instruction safely
3442 */
3443 static bool states_equal(struct bpf_verifier_env *env,
3444 struct bpf_verifier_state *old,
3445 struct bpf_verifier_state *cur)
3446 {
3447 struct idpair *idmap;
3448 bool ret = false;
3449 int i;
3450
3451 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
3452 /* If we failed to allocate the idmap, just say it's not safe */
3453 if (!idmap)
3454 return false;
3455
3456 for (i = 0; i < MAX_BPF_REG; i++) {
3457 if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
3458 goto out_free;
3459 }
3460
3461 for (i = 0; i < MAX_BPF_STACK; i++) {
3462 if (old->stack_slot_type[i] == STACK_INVALID)
3463 continue;
3464 if (old->stack_slot_type[i] != cur->stack_slot_type[i])
3465 /* Ex: old explored (safe) state has STACK_SPILL in
3466 * this stack slot, but current has has STACK_MISC ->
3467 * this verifier states are not equivalent,
3468 * return false to continue verification of this path
3469 */
3470 goto out_free;
3471 if (i % BPF_REG_SIZE)
3472 continue;
3473 if (old->stack_slot_type[i] != STACK_SPILL)
3474 continue;
3475 if (!regsafe(&old->spilled_regs[i / BPF_REG_SIZE],
3476 &cur->spilled_regs[i / BPF_REG_SIZE],
3477 idmap))
3478 /* when explored and current stack slot are both storing
3479 * spilled registers, check that stored pointers types
3480 * are the same as well.
3481 * Ex: explored safe path could have stored
3482 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
3483 * but current path has stored:
3484 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
3485 * such verifier states are not equivalent.
3486 * return false to continue verification of this path
3487 */
3488 goto out_free;
3489 else
3490 continue;
3491 }
3492 ret = true;
3493 out_free:
3494 kfree(idmap);
3495 return ret;
3496 }
3497
3498 /* A write screens off any subsequent reads; but write marks come from the
3499 * straight-line code between a state and its parent. When we arrive at a
3500 * jump target (in the first iteration of the propagate_liveness() loop),
3501 * we didn't arrive by the straight-line code, so read marks in state must
3502 * propagate to parent regardless of state's write marks.
3503 */
3504 static bool do_propagate_liveness(const struct bpf_verifier_state *state,
3505 struct bpf_verifier_state *parent)
3506 {
3507 bool writes = parent == state->parent; /* Observe write marks */
3508 bool touched = false; /* any changes made? */
3509 int i;
3510
3511 if (!parent)
3512 return touched;
3513 /* Propagate read liveness of registers... */
3514 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
3515 /* We don't need to worry about FP liveness because it's read-only */
3516 for (i = 0; i < BPF_REG_FP; i++) {
3517 if (parent->regs[i].live & REG_LIVE_READ)
3518 continue;
3519 if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
3520 continue;
3521 if (state->regs[i].live & REG_LIVE_READ) {
3522 parent->regs[i].live |= REG_LIVE_READ;
3523 touched = true;
3524 }
3525 }
3526 /* ... and stack slots */
3527 for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) {
3528 if (parent->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL)
3529 continue;
3530 if (state->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL)
3531 continue;
3532 if (parent->spilled_regs[i].live & REG_LIVE_READ)
3533 continue;
3534 if (writes && (state->spilled_regs[i].live & REG_LIVE_WRITTEN))
3535 continue;
3536 if (state->spilled_regs[i].live & REG_LIVE_READ) {
3537 parent->spilled_regs[i].live |= REG_LIVE_READ;
3538 touched = true;
3539 }
3540 }
3541 return touched;
3542 }
3543
3544 /* "parent" is "a state from which we reach the current state", but initially
3545 * it is not the state->parent (i.e. "the state whose straight-line code leads
3546 * to the current state"), instead it is the state that happened to arrive at
3547 * a (prunable) equivalent of the current state. See comment above
3548 * do_propagate_liveness() for consequences of this.
3549 * This function is just a more efficient way of calling mark_reg_read() or
3550 * mark_stack_slot_read() on each reg in "parent" that is read in "state",
3551 * though it requires that parent != state->parent in the call arguments.
3552 */
3553 static void propagate_liveness(const struct bpf_verifier_state *state,
3554 struct bpf_verifier_state *parent)
3555 {
3556 while (do_propagate_liveness(state, parent)) {
3557 /* Something changed, so we need to feed those changes onward */
3558 state = parent;
3559 parent = state->parent;
3560 }
3561 }
3562
3563 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
3564 {
3565 struct bpf_verifier_state_list *new_sl;
3566 struct bpf_verifier_state_list *sl;
3567 int i;
3568
3569 sl = env->explored_states[insn_idx];
3570 if (!sl)
3571 /* this 'insn_idx' instruction wasn't marked, so we will not
3572 * be doing state search here
3573 */
3574 return 0;
3575
3576 while (sl != STATE_LIST_MARK) {
3577 if (states_equal(env, &sl->state, &env->cur_state)) {
3578 /* reached equivalent register/stack state,
3579 * prune the search.
3580 * Registers read by the continuation are read by us.
3581 * If we have any write marks in env->cur_state, they
3582 * will prevent corresponding reads in the continuation
3583 * from reaching our parent (an explored_state). Our
3584 * own state will get the read marks recorded, but
3585 * they'll be immediately forgotten as we're pruning
3586 * this state and will pop a new one.
3587 */
3588 propagate_liveness(&sl->state, &env->cur_state);
3589 return 1;
3590 }
3591 sl = sl->next;
3592 }
3593
3594 /* there were no equivalent states, remember current one.
3595 * technically the current state is not proven to be safe yet,
3596 * but it will either reach bpf_exit (which means it's safe) or
3597 * it will be rejected. Since there are no loops, we won't be
3598 * seeing this 'insn_idx' instruction again on the way to bpf_exit
3599 */
3600 new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER);
3601 if (!new_sl)
3602 return -ENOMEM;
3603
3604 /* add new state to the head of linked list */
3605 memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state));
3606 new_sl->next = env->explored_states[insn_idx];
3607 env->explored_states[insn_idx] = new_sl;
3608 /* connect new state to parentage chain */
3609 env->cur_state.parent = &new_sl->state;
3610 /* clear write marks in current state: the writes we did are not writes
3611 * our child did, so they don't screen off its reads from us.
3612 * (There are no read marks in current state, because reads always mark
3613 * their parent and current state never has children yet. Only
3614 * explored_states can get read marks.)
3615 */
3616 for (i = 0; i < BPF_REG_FP; i++)
3617 env->cur_state.regs[i].live = REG_LIVE_NONE;
3618 for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++)
3619 if (env->cur_state.stack_slot_type[i * BPF_REG_SIZE] == STACK_SPILL)
3620 env->cur_state.spilled_regs[i].live = REG_LIVE_NONE;
3621 return 0;
3622 }
3623
3624 static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
3625 int insn_idx, int prev_insn_idx)
3626 {
3627 if (!env->analyzer_ops || !env->analyzer_ops->insn_hook)
3628 return 0;
3629
3630 return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx);
3631 }
3632
3633 static int do_check(struct bpf_verifier_env *env)
3634 {
3635 struct bpf_verifier_state *state = &env->cur_state;
3636 struct bpf_insn *insns = env->prog->insnsi;
3637 struct bpf_reg_state *regs = state->regs;
3638 int insn_cnt = env->prog->len;
3639 int insn_idx, prev_insn_idx = 0;
3640 int insn_processed = 0;
3641 bool do_print_state = false;
3642
3643 init_reg_state(regs);
3644 state->parent = NULL;
3645 insn_idx = 0;
3646 for (;;) {
3647 struct bpf_insn *insn;
3648 u8 class;
3649 int err;
3650
3651 if (insn_idx >= insn_cnt) {
3652 verbose("invalid insn idx %d insn_cnt %d\n",
3653 insn_idx, insn_cnt);
3654 return -EFAULT;
3655 }
3656
3657 insn = &insns[insn_idx];
3658 class = BPF_CLASS(insn->code);
3659
3660 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
3661 verbose("BPF program is too large. Processed %d insn\n",
3662 insn_processed);
3663 return -E2BIG;
3664 }
3665
3666 err = is_state_visited(env, insn_idx);
3667 if (err < 0)
3668 return err;
3669 if (err == 1) {
3670 /* found equivalent state, can prune the search */
3671 if (log_level) {
3672 if (do_print_state)
3673 verbose("\nfrom %d to %d: safe\n",
3674 prev_insn_idx, insn_idx);
3675 else
3676 verbose("%d: safe\n", insn_idx);
3677 }
3678 goto process_bpf_exit;
3679 }
3680
3681 if (need_resched())
3682 cond_resched();
3683
3684 if (log_level > 1 || (log_level && do_print_state)) {
3685 if (log_level > 1)
3686 verbose("%d:", insn_idx);
3687 else
3688 verbose("\nfrom %d to %d:",
3689 prev_insn_idx, insn_idx);
3690 print_verifier_state(&env->cur_state);
3691 do_print_state = false;
3692 }
3693
3694 if (log_level) {
3695 verbose("%d: ", insn_idx);
3696 print_bpf_insn(env, insn);
3697 }
3698
3699 err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
3700 if (err)
3701 return err;
3702
3703 if (class == BPF_ALU || class == BPF_ALU64) {
3704 err = check_alu_op(env, insn);
3705 if (err)
3706 return err;
3707
3708 } else if (class == BPF_LDX) {
3709 enum bpf_reg_type *prev_src_type, src_reg_type;
3710
3711 /* check for reserved fields is already done */
3712
3713 /* check src operand */
3714 err = check_reg_arg(env, insn->src_reg, SRC_OP);
3715 if (err)
3716 return err;
3717
3718 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3719 if (err)
3720 return err;
3721
3722 src_reg_type = regs[insn->src_reg].type;
3723
3724 /* check that memory (src_reg + off) is readable,
3725 * the state of dst_reg will be updated by this func
3726 */
3727 err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
3728 BPF_SIZE(insn->code), BPF_READ,
3729 insn->dst_reg);
3730 if (err)
3731 return err;
3732
3733 prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
3734
3735 if (*prev_src_type == NOT_INIT) {
3736 /* saw a valid insn
3737 * dst_reg = *(u32 *)(src_reg + off)
3738 * save type to validate intersecting paths
3739 */
3740 *prev_src_type = src_reg_type;
3741
3742 } else if (src_reg_type != *prev_src_type &&
3743 (src_reg_type == PTR_TO_CTX ||
3744 *prev_src_type == PTR_TO_CTX)) {
3745 /* ABuser program is trying to use the same insn
3746 * dst_reg = *(u32*) (src_reg + off)
3747 * with different pointer types:
3748 * src_reg == ctx in one branch and
3749 * src_reg == stack|map in some other branch.
3750 * Reject it.
3751 */
3752 verbose("same insn cannot be used with different pointers\n");
3753 return -EINVAL;
3754 }
3755
3756 } else if (class == BPF_STX) {
3757 enum bpf_reg_type *prev_dst_type, dst_reg_type;
3758
3759 if (BPF_MODE(insn->code) == BPF_XADD) {
3760 err = check_xadd(env, insn_idx, insn);
3761 if (err)
3762 return err;
3763 insn_idx++;
3764 continue;
3765 }
3766
3767 /* check src1 operand */
3768 err = check_reg_arg(env, insn->src_reg, SRC_OP);
3769 if (err)
3770 return err;
3771 /* check src2 operand */
3772 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3773 if (err)
3774 return err;
3775
3776 dst_reg_type = regs[insn->dst_reg].type;
3777
3778 /* check that memory (dst_reg + off) is writeable */
3779 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3780 BPF_SIZE(insn->code), BPF_WRITE,
3781 insn->src_reg);
3782 if (err)
3783 return err;
3784
3785 prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
3786
3787 if (*prev_dst_type == NOT_INIT) {
3788 *prev_dst_type = dst_reg_type;
3789 } else if (dst_reg_type != *prev_dst_type &&
3790 (dst_reg_type == PTR_TO_CTX ||
3791 *prev_dst_type == PTR_TO_CTX)) {
3792 verbose("same insn cannot be used with different pointers\n");
3793 return -EINVAL;
3794 }
3795
3796 } else if (class == BPF_ST) {
3797 if (BPF_MODE(insn->code) != BPF_MEM ||
3798 insn->src_reg != BPF_REG_0) {
3799 verbose("BPF_ST uses reserved fields\n");
3800 return -EINVAL;
3801 }
3802 /* check src operand */
3803 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3804 if (err)
3805 return err;
3806
3807 /* check that memory (dst_reg + off) is writeable */
3808 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3809 BPF_SIZE(insn->code), BPF_WRITE,
3810 -1);
3811 if (err)
3812 return err;
3813
3814 } else if (class == BPF_JMP) {
3815 u8 opcode = BPF_OP(insn->code);
3816
3817 if (opcode == BPF_CALL) {
3818 if (BPF_SRC(insn->code) != BPF_K ||
3819 insn->off != 0 ||
3820 insn->src_reg != BPF_REG_0 ||
3821 insn->dst_reg != BPF_REG_0) {
3822 verbose("BPF_CALL uses reserved fields\n");
3823 return -EINVAL;
3824 }
3825
3826 err = check_call(env, insn->imm, insn_idx);
3827 if (err)
3828 return err;
3829
3830 } else if (opcode == BPF_JA) {
3831 if (BPF_SRC(insn->code) != BPF_K ||
3832 insn->imm != 0 ||
3833 insn->src_reg != BPF_REG_0 ||
3834 insn->dst_reg != BPF_REG_0) {
3835 verbose("BPF_JA uses reserved fields\n");
3836 return -EINVAL;
3837 }
3838
3839 insn_idx += insn->off + 1;
3840 continue;
3841
3842 } else if (opcode == BPF_EXIT) {
3843 if (BPF_SRC(insn->code) != BPF_K ||
3844 insn->imm != 0 ||
3845 insn->src_reg != BPF_REG_0 ||
3846 insn->dst_reg != BPF_REG_0) {
3847 verbose("BPF_EXIT uses reserved fields\n");
3848 return -EINVAL;
3849 }
3850
3851 /* eBPF calling convetion is such that R0 is used
3852 * to return the value from eBPF program.
3853 * Make sure that it's readable at this time
3854 * of bpf_exit, which means that program wrote
3855 * something into it earlier
3856 */
3857 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
3858 if (err)
3859 return err;
3860
3861 if (is_pointer_value(env, BPF_REG_0)) {
3862 verbose("R0 leaks addr as return value\n");
3863 return -EACCES;
3864 }
3865
3866 process_bpf_exit:
3867 insn_idx = pop_stack(env, &prev_insn_idx);
3868 if (insn_idx < 0) {
3869 break;
3870 } else {
3871 do_print_state = true;
3872 continue;
3873 }
3874 } else {
3875 err = check_cond_jmp_op(env, insn, &insn_idx);
3876 if (err)
3877 return err;
3878 }
3879 } else if (class == BPF_LD) {
3880 u8 mode = BPF_MODE(insn->code);
3881
3882 if (mode == BPF_ABS || mode == BPF_IND) {
3883 err = check_ld_abs(env, insn);
3884 if (err)
3885 return err;
3886
3887 } else if (mode == BPF_IMM) {
3888 err = check_ld_imm(env, insn);
3889 if (err)
3890 return err;
3891
3892 insn_idx++;
3893 } else {
3894 verbose("invalid BPF_LD mode\n");
3895 return -EINVAL;
3896 }
3897 } else {
3898 verbose("unknown insn class %d\n", class);
3899 return -EINVAL;
3900 }
3901
3902 insn_idx++;
3903 }
3904
3905 verbose("processed %d insns, stack depth %d\n",
3906 insn_processed, env->prog->aux->stack_depth);
3907 return 0;
3908 }
3909
3910 static int check_map_prealloc(struct bpf_map *map)
3911 {
3912 return (map->map_type != BPF_MAP_TYPE_HASH &&
3913 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
3914 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
3915 !(map->map_flags & BPF_F_NO_PREALLOC);
3916 }
3917
3918 static int check_map_prog_compatibility(struct bpf_map *map,
3919 struct bpf_prog *prog)
3920
3921 {
3922 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
3923 * preallocated hash maps, since doing memory allocation
3924 * in overflow_handler can crash depending on where nmi got
3925 * triggered.
3926 */
3927 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
3928 if (!check_map_prealloc(map)) {
3929 verbose("perf_event programs can only use preallocated hash map\n");
3930 return -EINVAL;
3931 }
3932 if (map->inner_map_meta &&
3933 !check_map_prealloc(map->inner_map_meta)) {
3934 verbose("perf_event programs can only use preallocated inner hash map\n");
3935 return -EINVAL;
3936 }
3937 }
3938 return 0;
3939 }
3940
3941 /* look for pseudo eBPF instructions that access map FDs and
3942 * replace them with actual map pointers
3943 */
3944 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
3945 {
3946 struct bpf_insn *insn = env->prog->insnsi;
3947 int insn_cnt = env->prog->len;
3948 int i, j, err;
3949
3950 err = bpf_prog_calc_tag(env->prog);
3951 if (err)
3952 return err;
3953
3954 for (i = 0; i < insn_cnt; i++, insn++) {
3955 if (BPF_CLASS(insn->code) == BPF_LDX &&
3956 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
3957 verbose("BPF_LDX uses reserved fields\n");
3958 return -EINVAL;
3959 }
3960
3961 if (BPF_CLASS(insn->code) == BPF_STX &&
3962 ((BPF_MODE(insn->code) != BPF_MEM &&
3963 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
3964 verbose("BPF_STX uses reserved fields\n");
3965 return -EINVAL;
3966 }
3967
3968 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
3969 struct bpf_map *map;
3970 struct fd f;
3971
3972 if (i == insn_cnt - 1 || insn[1].code != 0 ||
3973 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
3974 insn[1].off != 0) {
3975 verbose("invalid bpf_ld_imm64 insn\n");
3976 return -EINVAL;
3977 }
3978
3979 if (insn->src_reg == 0)
3980 /* valid generic load 64-bit imm */
3981 goto next_insn;
3982
3983 if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
3984 verbose("unrecognized bpf_ld_imm64 insn\n");
3985 return -EINVAL;
3986 }
3987
3988 f = fdget(insn->imm);
3989 map = __bpf_map_get(f);
3990 if (IS_ERR(map)) {
3991 verbose("fd %d is not pointing to valid bpf_map\n",
3992 insn->imm);
3993 return PTR_ERR(map);
3994 }
3995
3996 err = check_map_prog_compatibility(map, env->prog);
3997 if (err) {
3998 fdput(f);
3999 return err;
4000 }
4001
4002 /* store map pointer inside BPF_LD_IMM64 instruction */
4003 insn[0].imm = (u32) (unsigned long) map;
4004 insn[1].imm = ((u64) (unsigned long) map) >> 32;
4005
4006 /* check whether we recorded this map already */
4007 for (j = 0; j < env->used_map_cnt; j++)
4008 if (env->used_maps[j] == map) {
4009 fdput(f);
4010 goto next_insn;
4011 }
4012
4013 if (env->used_map_cnt >= MAX_USED_MAPS) {
4014 fdput(f);
4015 return -E2BIG;
4016 }
4017
4018 /* hold the map. If the program is rejected by verifier,
4019 * the map will be released by release_maps() or it
4020 * will be used by the valid program until it's unloaded
4021 * and all maps are released in free_bpf_prog_info()
4022 */
4023 map = bpf_map_inc(map, false);
4024 if (IS_ERR(map)) {
4025 fdput(f);
4026 return PTR_ERR(map);
4027 }
4028 env->used_maps[env->used_map_cnt++] = map;
4029
4030 fdput(f);
4031 next_insn:
4032 insn++;
4033 i++;
4034 }
4035 }
4036
4037 /* now all pseudo BPF_LD_IMM64 instructions load valid
4038 * 'struct bpf_map *' into a register instead of user map_fd.
4039 * These pointers will be used later by verifier to validate map access.
4040 */
4041 return 0;
4042 }
4043
4044 /* drop refcnt of maps used by the rejected program */
4045 static void release_maps(struct bpf_verifier_env *env)
4046 {
4047 int i;
4048
4049 for (i = 0; i < env->used_map_cnt; i++)
4050 bpf_map_put(env->used_maps[i]);
4051 }
4052
4053 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
4054 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
4055 {
4056 struct bpf_insn *insn = env->prog->insnsi;
4057 int insn_cnt = env->prog->len;
4058 int i;
4059
4060 for (i = 0; i < insn_cnt; i++, insn++)
4061 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
4062 insn->src_reg = 0;
4063 }
4064
4065 /* single env->prog->insni[off] instruction was replaced with the range
4066 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
4067 * [0, off) and [off, end) to new locations, so the patched range stays zero
4068 */
4069 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
4070 u32 off, u32 cnt)
4071 {
4072 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
4073
4074 if (cnt == 1)
4075 return 0;
4076 new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
4077 if (!new_data)
4078 return -ENOMEM;
4079 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
4080 memcpy(new_data + off + cnt - 1, old_data + off,
4081 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
4082 env->insn_aux_data = new_data;
4083 vfree(old_data);
4084 return 0;
4085 }
4086
4087 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
4088 const struct bpf_insn *patch, u32 len)
4089 {
4090 struct bpf_prog *new_prog;
4091
4092 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4093 if (!new_prog)
4094 return NULL;
4095 if (adjust_insn_aux_data(env, new_prog->len, off, len))
4096 return NULL;
4097 return new_prog;
4098 }
4099
4100 /* convert load instructions that access fields of 'struct __sk_buff'
4101 * into sequence of instructions that access fields of 'struct sk_buff'
4102 */
4103 static int convert_ctx_accesses(struct bpf_verifier_env *env)
4104 {
4105 const struct bpf_verifier_ops *ops = env->prog->aux->ops;
4106 int i, cnt, size, ctx_field_size, delta = 0;
4107 const int insn_cnt = env->prog->len;
4108 struct bpf_insn insn_buf[16], *insn;
4109 struct bpf_prog *new_prog;
4110 enum bpf_access_type type;
4111 bool is_narrower_load;
4112 u32 target_size;
4113
4114 if (ops->gen_prologue) {
4115 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
4116 env->prog);
4117 if (cnt >= ARRAY_SIZE(insn_buf)) {
4118 verbose("bpf verifier is misconfigured\n");
4119 return -EINVAL;
4120 } else if (cnt) {
4121 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
4122 if (!new_prog)
4123 return -ENOMEM;
4124
4125 env->prog = new_prog;
4126 delta += cnt - 1;
4127 }
4128 }
4129
4130 if (!ops->convert_ctx_access)
4131 return 0;
4132
4133 insn = env->prog->insnsi + delta;
4134
4135 for (i = 0; i < insn_cnt; i++, insn++) {
4136 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
4137 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
4138 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
4139 insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
4140 type = BPF_READ;
4141 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
4142 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
4143 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
4144 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
4145 type = BPF_WRITE;
4146 else
4147 continue;
4148
4149 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
4150 continue;
4151
4152 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
4153 size = BPF_LDST_BYTES(insn);
4154
4155 /* If the read access is a narrower load of the field,
4156 * convert to a 4/8-byte load, to minimum program type specific
4157 * convert_ctx_access changes. If conversion is successful,
4158 * we will apply proper mask to the result.
4159 */
4160 is_narrower_load = size < ctx_field_size;
4161 if (is_narrower_load) {
4162 u32 off = insn->off;
4163 u8 size_code;
4164
4165 if (type == BPF_WRITE) {
4166 verbose("bpf verifier narrow ctx access misconfigured\n");
4167 return -EINVAL;
4168 }
4169
4170 size_code = BPF_H;
4171 if (ctx_field_size == 4)
4172 size_code = BPF_W;
4173 else if (ctx_field_size == 8)
4174 size_code = BPF_DW;
4175
4176 insn->off = off & ~(ctx_field_size - 1);
4177 insn->code = BPF_LDX | BPF_MEM | size_code;
4178 }
4179
4180 target_size = 0;
4181 cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
4182 &target_size);
4183 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
4184 (ctx_field_size && !target_size)) {
4185 verbose("bpf verifier is misconfigured\n");
4186 return -EINVAL;
4187 }
4188
4189 if (is_narrower_load && size < target_size) {
4190 if (ctx_field_size <= 4)
4191 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
4192 (1 << size * 8) - 1);
4193 else
4194 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
4195 (1 << size * 8) - 1);
4196 }
4197
4198 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4199 if (!new_prog)
4200 return -ENOMEM;
4201
4202 delta += cnt - 1;
4203
4204 /* keep walking new program and skip insns we just inserted */
4205 env->prog = new_prog;
4206 insn = new_prog->insnsi + i + delta;
4207 }
4208
4209 return 0;
4210 }
4211
4212 /* fixup insn->imm field of bpf_call instructions
4213 * and inline eligible helpers as explicit sequence of BPF instructions
4214 *
4215 * this function is called after eBPF program passed verification
4216 */
4217 static int fixup_bpf_calls(struct bpf_verifier_env *env)
4218 {
4219 struct bpf_prog *prog = env->prog;
4220 struct bpf_insn *insn = prog->insnsi;
4221 const struct bpf_func_proto *fn;
4222 const int insn_cnt = prog->len;
4223 struct bpf_insn insn_buf[16];
4224 struct bpf_prog *new_prog;
4225 struct bpf_map *map_ptr;
4226 int i, cnt, delta = 0;
4227
4228 for (i = 0; i < insn_cnt; i++, insn++) {
4229 if (insn->code != (BPF_JMP | BPF_CALL))
4230 continue;
4231
4232 if (insn->imm == BPF_FUNC_get_route_realm)
4233 prog->dst_needed = 1;
4234 if (insn->imm == BPF_FUNC_get_prandom_u32)
4235 bpf_user_rnd_init_once();
4236 if (insn->imm == BPF_FUNC_tail_call) {
4237 /* If we tail call into other programs, we
4238 * cannot make any assumptions since they can
4239 * be replaced dynamically during runtime in
4240 * the program array.
4241 */
4242 prog->cb_access = 1;
4243 env->prog->aux->stack_depth = MAX_BPF_STACK;
4244
4245 /* mark bpf_tail_call as different opcode to avoid
4246 * conditional branch in the interpeter for every normal
4247 * call and to prevent accidental JITing by JIT compiler
4248 * that doesn't support bpf_tail_call yet
4249 */
4250 insn->imm = 0;
4251 insn->code = BPF_JMP | BPF_TAIL_CALL;
4252 continue;
4253 }
4254
4255 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
4256 * handlers are currently limited to 64 bit only.
4257 */
4258 if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
4259 insn->imm == BPF_FUNC_map_lookup_elem) {
4260 map_ptr = env->insn_aux_data[i + delta].map_ptr;
4261 if (map_ptr == BPF_MAP_PTR_POISON ||
4262 !map_ptr->ops->map_gen_lookup)
4263 goto patch_call_imm;
4264
4265 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
4266 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
4267 verbose("bpf verifier is misconfigured\n");
4268 return -EINVAL;
4269 }
4270
4271 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
4272 cnt);
4273 if (!new_prog)
4274 return -ENOMEM;
4275
4276 delta += cnt - 1;
4277
4278 /* keep walking new program and skip insns we just inserted */
4279 env->prog = prog = new_prog;
4280 insn = new_prog->insnsi + i + delta;
4281 continue;
4282 }
4283
4284 if (insn->imm == BPF_FUNC_redirect_map) {
4285 /* Note, we cannot use prog directly as imm as subsequent
4286 * rewrites would still change the prog pointer. The only
4287 * stable address we can use is aux, which also works with
4288 * prog clones during blinding.
4289 */
4290 u64 addr = (unsigned long)prog->aux;
4291 struct bpf_insn r4_ld[] = {
4292 BPF_LD_IMM64(BPF_REG_4, addr),
4293 *insn,
4294 };
4295 cnt = ARRAY_SIZE(r4_ld);
4296
4297 new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
4298 if (!new_prog)
4299 return -ENOMEM;
4300
4301 delta += cnt - 1;
4302 env->prog = prog = new_prog;
4303 insn = new_prog->insnsi + i + delta;
4304 }
4305 patch_call_imm:
4306 fn = prog->aux->ops->get_func_proto(insn->imm);
4307 /* all functions that have prototype and verifier allowed
4308 * programs to call them, must be real in-kernel functions
4309 */
4310 if (!fn->func) {
4311 verbose("kernel subsystem misconfigured func %s#%d\n",
4312 func_id_name(insn->imm), insn->imm);
4313 return -EFAULT;
4314 }
4315 insn->imm = fn->func - __bpf_call_base;
4316 }
4317
4318 return 0;
4319 }
4320
4321 static void free_states(struct bpf_verifier_env *env)
4322 {
4323 struct bpf_verifier_state_list *sl, *sln;
4324 int i;
4325
4326 if (!env->explored_states)
4327 return;
4328
4329 for (i = 0; i < env->prog->len; i++) {
4330 sl = env->explored_states[i];
4331
4332 if (sl)
4333 while (sl != STATE_LIST_MARK) {
4334 sln = sl->next;
4335 kfree(sl);
4336 sl = sln;
4337 }
4338 }
4339
4340 kfree(env->explored_states);
4341 }
4342
4343 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
4344 {
4345 char __user *log_ubuf = NULL;
4346 struct bpf_verifier_env *env;
4347 int ret = -EINVAL;
4348
4349 /* 'struct bpf_verifier_env' can be global, but since it's not small,
4350 * allocate/free it every time bpf_check() is called
4351 */
4352 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
4353 if (!env)
4354 return -ENOMEM;
4355
4356 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
4357 (*prog)->len);
4358 ret = -ENOMEM;
4359 if (!env->insn_aux_data)
4360 goto err_free_env;
4361 env->prog = *prog;
4362
4363 /* grab the mutex to protect few globals used by verifier */
4364 mutex_lock(&bpf_verifier_lock);
4365
4366 if (attr->log_level || attr->log_buf || attr->log_size) {
4367 /* user requested verbose verifier output
4368 * and supplied buffer to store the verification trace
4369 */
4370 log_level = attr->log_level;
4371 log_ubuf = (char __user *) (unsigned long) attr->log_buf;
4372 log_size = attr->log_size;
4373 log_len = 0;
4374
4375 ret = -EINVAL;
4376 /* log_* values have to be sane */
4377 if (log_size < 128 || log_size > UINT_MAX >> 8 ||
4378 log_level == 0 || log_ubuf == NULL)
4379 goto err_unlock;
4380
4381 ret = -ENOMEM;
4382 log_buf = vmalloc(log_size);
4383 if (!log_buf)
4384 goto err_unlock;
4385 } else {
4386 log_level = 0;
4387 }
4388
4389 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
4390 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
4391 env->strict_alignment = true;
4392
4393 ret = replace_map_fd_with_map_ptr(env);
4394 if (ret < 0)
4395 goto skip_full_check;
4396
4397 env->explored_states = kcalloc(env->prog->len,
4398 sizeof(struct bpf_verifier_state_list *),
4399 GFP_USER);
4400 ret = -ENOMEM;
4401 if (!env->explored_states)
4402 goto skip_full_check;
4403
4404 ret = check_cfg(env);
4405 if (ret < 0)
4406 goto skip_full_check;
4407
4408 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
4409
4410 ret = do_check(env);
4411
4412 skip_full_check:
4413 while (pop_stack(env, NULL) >= 0);
4414 free_states(env);
4415
4416 if (ret == 0)
4417 /* program is valid, convert *(u32*)(ctx + off) accesses */
4418 ret = convert_ctx_accesses(env);
4419
4420 if (ret == 0)
4421 ret = fixup_bpf_calls(env);
4422
4423 if (log_level && log_len >= log_size - 1) {
4424 BUG_ON(log_len >= log_size);
4425 /* verifier log exceeded user supplied buffer */
4426 ret = -ENOSPC;
4427 /* fall through to return what was recorded */
4428 }
4429
4430 /* copy verifier log back to user space including trailing zero */
4431 if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) {
4432 ret = -EFAULT;
4433 goto free_log_buf;
4434 }
4435
4436 if (ret == 0 && env->used_map_cnt) {
4437 /* if program passed verifier, update used_maps in bpf_prog_info */
4438 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
4439 sizeof(env->used_maps[0]),
4440 GFP_KERNEL);
4441
4442 if (!env->prog->aux->used_maps) {
4443 ret = -ENOMEM;
4444 goto free_log_buf;
4445 }
4446
4447 memcpy(env->prog->aux->used_maps, env->used_maps,
4448 sizeof(env->used_maps[0]) * env->used_map_cnt);
4449 env->prog->aux->used_map_cnt = env->used_map_cnt;
4450
4451 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
4452 * bpf_ld_imm64 instructions
4453 */
4454 convert_pseudo_ld_imm64(env);
4455 }
4456
4457 free_log_buf:
4458 if (log_level)
4459 vfree(log_buf);
4460 if (!env->prog->aux->used_maps)
4461 /* if we didn't copy map pointers into bpf_prog_info, release
4462 * them now. Otherwise free_bpf_prog_info() will release them.
4463 */
4464 release_maps(env);
4465 *prog = env->prog;
4466 err_unlock:
4467 mutex_unlock(&bpf_verifier_lock);
4468 vfree(env->insn_aux_data);
4469 err_free_env:
4470 kfree(env);
4471 return ret;
4472 }
4473
4474 int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
4475 void *priv)
4476 {
4477 struct bpf_verifier_env *env;
4478 int ret;
4479
4480 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
4481 if (!env)
4482 return -ENOMEM;
4483
4484 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
4485 prog->len);
4486 ret = -ENOMEM;
4487 if (!env->insn_aux_data)
4488 goto err_free_env;
4489 env->prog = prog;
4490 env->analyzer_ops = ops;
4491 env->analyzer_priv = priv;
4492
4493 /* grab the mutex to protect few globals used by verifier */
4494 mutex_lock(&bpf_verifier_lock);
4495
4496 log_level = 0;
4497
4498 env->strict_alignment = false;
4499 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
4500 env->strict_alignment = true;
4501
4502 env->explored_states = kcalloc(env->prog->len,
4503 sizeof(struct bpf_verifier_state_list *),
4504 GFP_KERNEL);
4505 ret = -ENOMEM;
4506 if (!env->explored_states)
4507 goto skip_full_check;
4508
4509 ret = check_cfg(env);
4510 if (ret < 0)
4511 goto skip_full_check;
4512
4513 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
4514
4515 ret = do_check(env);
4516
4517 skip_full_check:
4518 while (pop_stack(env, NULL) >= 0);
4519 free_states(env);
4520
4521 mutex_unlock(&bpf_verifier_lock);
4522 vfree(env->insn_aux_data);
4523 err_free_env:
4524 kfree(env);
4525 return ret;
4526 }
4527 EXPORT_SYMBOL_GPL(bpf_analyzer);