]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - kernel/bpf/verifier.c
Merge tag 'sh-pfc-for-v5.1-tag2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-focal-kernel.git] / kernel / bpf / verifier.c
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
3 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 */
14 #include <uapi/linux/btf.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/bpf.h>
19 #include <linux/btf.h>
20 #include <linux/bpf_verifier.h>
21 #include <linux/filter.h>
22 #include <net/netlink.h>
23 #include <linux/file.h>
24 #include <linux/vmalloc.h>
25 #include <linux/stringify.h>
26 #include <linux/bsearch.h>
27 #include <linux/sort.h>
28 #include <linux/perf_event.h>
29 #include <linux/ctype.h>
30
31 #include "disasm.h"
32
33 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
34 #define BPF_PROG_TYPE(_id, _name) \
35 [_id] = & _name ## _verifier_ops,
36 #define BPF_MAP_TYPE(_id, _ops)
37 #include <linux/bpf_types.h>
38 #undef BPF_PROG_TYPE
39 #undef BPF_MAP_TYPE
40 };
41
42 /* bpf_check() is a static code analyzer that walks eBPF program
43 * instruction by instruction and updates register/stack state.
44 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
45 *
46 * The first pass is depth-first-search to check that the program is a DAG.
47 * It rejects the following programs:
48 * - larger than BPF_MAXINSNS insns
49 * - if loop is present (detected via back-edge)
50 * - unreachable insns exist (shouldn't be a forest. program = one function)
51 * - out of bounds or malformed jumps
52 * The second pass is all possible path descent from the 1st insn.
53 * Since it's analyzing all pathes through the program, the length of the
54 * analysis is limited to 64k insn, which may be hit even if total number of
55 * insn is less then 4K, but there are too many branches that change stack/regs.
56 * Number of 'branches to be analyzed' is limited to 1k
57 *
58 * On entry to each instruction, each register has a type, and the instruction
59 * changes the types of the registers depending on instruction semantics.
60 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
61 * copied to R1.
62 *
63 * All registers are 64-bit.
64 * R0 - return register
65 * R1-R5 argument passing registers
66 * R6-R9 callee saved registers
67 * R10 - frame pointer read-only
68 *
69 * At the start of BPF program the register R1 contains a pointer to bpf_context
70 * and has type PTR_TO_CTX.
71 *
72 * Verifier tracks arithmetic operations on pointers in case:
73 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
74 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
75 * 1st insn copies R10 (which has FRAME_PTR) type into R1
76 * and 2nd arithmetic instruction is pattern matched to recognize
77 * that it wants to construct a pointer to some element within stack.
78 * So after 2nd insn, the register R1 has type PTR_TO_STACK
79 * (and -20 constant is saved for further stack bounds checking).
80 * Meaning that this reg is a pointer to stack plus known immediate constant.
81 *
82 * Most of the time the registers have SCALAR_VALUE type, which
83 * means the register has some value, but it's not a valid pointer.
84 * (like pointer plus pointer becomes SCALAR_VALUE type)
85 *
86 * When verifier sees load or store instructions the type of base register
87 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
88 * four pointer types recognized by check_mem_access() function.
89 *
90 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
91 * and the range of [ptr, ptr + map's value_size) is accessible.
92 *
93 * registers used to pass values to function calls are checked against
94 * function argument constraints.
95 *
96 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
97 * It means that the register type passed to this function must be
98 * PTR_TO_STACK and it will be used inside the function as
99 * 'pointer to map element key'
100 *
101 * For example the argument constraints for bpf_map_lookup_elem():
102 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
103 * .arg1_type = ARG_CONST_MAP_PTR,
104 * .arg2_type = ARG_PTR_TO_MAP_KEY,
105 *
106 * ret_type says that this function returns 'pointer to map elem value or null'
107 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
108 * 2nd argument should be a pointer to stack, which will be used inside
109 * the helper function as a pointer to map element key.
110 *
111 * On the kernel side the helper function looks like:
112 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
113 * {
114 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
115 * void *key = (void *) (unsigned long) r2;
116 * void *value;
117 *
118 * here kernel can access 'key' and 'map' pointers safely, knowing that
119 * [key, key + map->key_size) bytes are valid and were initialized on
120 * the stack of eBPF program.
121 * }
122 *
123 * Corresponding eBPF program may look like:
124 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
125 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
126 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
127 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
128 * here verifier looks at prototype of map_lookup_elem() and sees:
129 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
130 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
131 *
132 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
133 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
134 * and were initialized prior to this call.
135 * If it's ok, then verifier allows this BPF_CALL insn and looks at
136 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
137 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
138 * returns ether pointer to map value or NULL.
139 *
140 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
141 * insn, the register holding that pointer in the true branch changes state to
142 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
143 * branch. See check_cond_jmp_op().
144 *
145 * After the call R0 is set to return type of the function and registers R1-R5
146 * are set to NOT_INIT to indicate that they are no longer readable.
147 *
148 * The following reference types represent a potential reference to a kernel
149 * resource which, after first being allocated, must be checked and freed by
150 * the BPF program:
151 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
152 *
153 * When the verifier sees a helper call return a reference type, it allocates a
154 * pointer id for the reference and stores it in the current function state.
155 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
156 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
157 * passes through a NULL-check conditional. For the branch wherein the state is
158 * changed to CONST_IMM, the verifier releases the reference.
159 *
160 * For each helper function that allocates a reference, such as
161 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
162 * bpf_sk_release(). When a reference type passes into the release function,
163 * the verifier also releases the reference. If any unchecked or unreleased
164 * reference remains at the end of the program, the verifier rejects it.
165 */
166
167 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
168 struct bpf_verifier_stack_elem {
169 /* verifer state is 'st'
170 * before processing instruction 'insn_idx'
171 * and after processing instruction 'prev_insn_idx'
172 */
173 struct bpf_verifier_state st;
174 int insn_idx;
175 int prev_insn_idx;
176 struct bpf_verifier_stack_elem *next;
177 };
178
179 #define BPF_COMPLEXITY_LIMIT_INSNS 131072
180 #define BPF_COMPLEXITY_LIMIT_STACK 1024
181 #define BPF_COMPLEXITY_LIMIT_STATES 64
182
183 #define BPF_MAP_PTR_UNPRIV 1UL
184 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
185 POISON_POINTER_DELTA))
186 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
187
188 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
189 {
190 return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
191 }
192
193 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
194 {
195 return aux->map_state & BPF_MAP_PTR_UNPRIV;
196 }
197
198 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
199 const struct bpf_map *map, bool unpriv)
200 {
201 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
202 unpriv |= bpf_map_ptr_unpriv(aux);
203 aux->map_state = (unsigned long)map |
204 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
205 }
206
207 struct bpf_call_arg_meta {
208 struct bpf_map *map_ptr;
209 bool raw_mode;
210 bool pkt_access;
211 int regno;
212 int access_size;
213 s64 msize_smax_value;
214 u64 msize_umax_value;
215 int ptr_id;
216 };
217
218 static DEFINE_MUTEX(bpf_verifier_lock);
219
220 static const struct bpf_line_info *
221 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
222 {
223 const struct bpf_line_info *linfo;
224 const struct bpf_prog *prog;
225 u32 i, nr_linfo;
226
227 prog = env->prog;
228 nr_linfo = prog->aux->nr_linfo;
229
230 if (!nr_linfo || insn_off >= prog->len)
231 return NULL;
232
233 linfo = prog->aux->linfo;
234 for (i = 1; i < nr_linfo; i++)
235 if (insn_off < linfo[i].insn_off)
236 break;
237
238 return &linfo[i - 1];
239 }
240
241 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
242 va_list args)
243 {
244 unsigned int n;
245
246 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
247
248 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
249 "verifier log line truncated - local buffer too short\n");
250
251 n = min(log->len_total - log->len_used - 1, n);
252 log->kbuf[n] = '\0';
253
254 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
255 log->len_used += n;
256 else
257 log->ubuf = NULL;
258 }
259
260 /* log_level controls verbosity level of eBPF verifier.
261 * bpf_verifier_log_write() is used to dump the verification trace to the log,
262 * so the user can figure out what's wrong with the program
263 */
264 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
265 const char *fmt, ...)
266 {
267 va_list args;
268
269 if (!bpf_verifier_log_needed(&env->log))
270 return;
271
272 va_start(args, fmt);
273 bpf_verifier_vlog(&env->log, fmt, args);
274 va_end(args);
275 }
276 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
277
278 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
279 {
280 struct bpf_verifier_env *env = private_data;
281 va_list args;
282
283 if (!bpf_verifier_log_needed(&env->log))
284 return;
285
286 va_start(args, fmt);
287 bpf_verifier_vlog(&env->log, fmt, args);
288 va_end(args);
289 }
290
291 static const char *ltrim(const char *s)
292 {
293 while (isspace(*s))
294 s++;
295
296 return s;
297 }
298
299 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
300 u32 insn_off,
301 const char *prefix_fmt, ...)
302 {
303 const struct bpf_line_info *linfo;
304
305 if (!bpf_verifier_log_needed(&env->log))
306 return;
307
308 linfo = find_linfo(env, insn_off);
309 if (!linfo || linfo == env->prev_linfo)
310 return;
311
312 if (prefix_fmt) {
313 va_list args;
314
315 va_start(args, prefix_fmt);
316 bpf_verifier_vlog(&env->log, prefix_fmt, args);
317 va_end(args);
318 }
319
320 verbose(env, "%s\n",
321 ltrim(btf_name_by_offset(env->prog->aux->btf,
322 linfo->line_off)));
323
324 env->prev_linfo = linfo;
325 }
326
327 static bool type_is_pkt_pointer(enum bpf_reg_type type)
328 {
329 return type == PTR_TO_PACKET ||
330 type == PTR_TO_PACKET_META;
331 }
332
333 static bool reg_type_may_be_null(enum bpf_reg_type type)
334 {
335 return type == PTR_TO_MAP_VALUE_OR_NULL ||
336 type == PTR_TO_SOCKET_OR_NULL;
337 }
338
339 static bool type_is_refcounted(enum bpf_reg_type type)
340 {
341 return type == PTR_TO_SOCKET;
342 }
343
344 static bool type_is_refcounted_or_null(enum bpf_reg_type type)
345 {
346 return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
347 }
348
349 static bool reg_is_refcounted(const struct bpf_reg_state *reg)
350 {
351 return type_is_refcounted(reg->type);
352 }
353
354 static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
355 {
356 return type_is_refcounted_or_null(reg->type);
357 }
358
359 static bool arg_type_is_refcounted(enum bpf_arg_type type)
360 {
361 return type == ARG_PTR_TO_SOCKET;
362 }
363
364 /* Determine whether the function releases some resources allocated by another
365 * function call. The first reference type argument will be assumed to be
366 * released by release_reference().
367 */
368 static bool is_release_function(enum bpf_func_id func_id)
369 {
370 return func_id == BPF_FUNC_sk_release;
371 }
372
373 /* string representation of 'enum bpf_reg_type' */
374 static const char * const reg_type_str[] = {
375 [NOT_INIT] = "?",
376 [SCALAR_VALUE] = "inv",
377 [PTR_TO_CTX] = "ctx",
378 [CONST_PTR_TO_MAP] = "map_ptr",
379 [PTR_TO_MAP_VALUE] = "map_value",
380 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
381 [PTR_TO_STACK] = "fp",
382 [PTR_TO_PACKET] = "pkt",
383 [PTR_TO_PACKET_META] = "pkt_meta",
384 [PTR_TO_PACKET_END] = "pkt_end",
385 [PTR_TO_FLOW_KEYS] = "flow_keys",
386 [PTR_TO_SOCKET] = "sock",
387 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
388 };
389
390 static char slot_type_char[] = {
391 [STACK_INVALID] = '?',
392 [STACK_SPILL] = 'r',
393 [STACK_MISC] = 'm',
394 [STACK_ZERO] = '0',
395 };
396
397 static void print_liveness(struct bpf_verifier_env *env,
398 enum bpf_reg_liveness live)
399 {
400 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
401 verbose(env, "_");
402 if (live & REG_LIVE_READ)
403 verbose(env, "r");
404 if (live & REG_LIVE_WRITTEN)
405 verbose(env, "w");
406 if (live & REG_LIVE_DONE)
407 verbose(env, "D");
408 }
409
410 static struct bpf_func_state *func(struct bpf_verifier_env *env,
411 const struct bpf_reg_state *reg)
412 {
413 struct bpf_verifier_state *cur = env->cur_state;
414
415 return cur->frame[reg->frameno];
416 }
417
418 static void print_verifier_state(struct bpf_verifier_env *env,
419 const struct bpf_func_state *state)
420 {
421 const struct bpf_reg_state *reg;
422 enum bpf_reg_type t;
423 int i;
424
425 if (state->frameno)
426 verbose(env, " frame%d:", state->frameno);
427 for (i = 0; i < MAX_BPF_REG; i++) {
428 reg = &state->regs[i];
429 t = reg->type;
430 if (t == NOT_INIT)
431 continue;
432 verbose(env, " R%d", i);
433 print_liveness(env, reg->live);
434 verbose(env, "=%s", reg_type_str[t]);
435 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
436 tnum_is_const(reg->var_off)) {
437 /* reg->off should be 0 for SCALAR_VALUE */
438 verbose(env, "%lld", reg->var_off.value + reg->off);
439 if (t == PTR_TO_STACK)
440 verbose(env, ",call_%d", func(env, reg)->callsite);
441 } else {
442 verbose(env, "(id=%d", reg->id);
443 if (t != SCALAR_VALUE)
444 verbose(env, ",off=%d", reg->off);
445 if (type_is_pkt_pointer(t))
446 verbose(env, ",r=%d", reg->range);
447 else if (t == CONST_PTR_TO_MAP ||
448 t == PTR_TO_MAP_VALUE ||
449 t == PTR_TO_MAP_VALUE_OR_NULL)
450 verbose(env, ",ks=%d,vs=%d",
451 reg->map_ptr->key_size,
452 reg->map_ptr->value_size);
453 if (tnum_is_const(reg->var_off)) {
454 /* Typically an immediate SCALAR_VALUE, but
455 * could be a pointer whose offset is too big
456 * for reg->off
457 */
458 verbose(env, ",imm=%llx", reg->var_off.value);
459 } else {
460 if (reg->smin_value != reg->umin_value &&
461 reg->smin_value != S64_MIN)
462 verbose(env, ",smin_value=%lld",
463 (long long)reg->smin_value);
464 if (reg->smax_value != reg->umax_value &&
465 reg->smax_value != S64_MAX)
466 verbose(env, ",smax_value=%lld",
467 (long long)reg->smax_value);
468 if (reg->umin_value != 0)
469 verbose(env, ",umin_value=%llu",
470 (unsigned long long)reg->umin_value);
471 if (reg->umax_value != U64_MAX)
472 verbose(env, ",umax_value=%llu",
473 (unsigned long long)reg->umax_value);
474 if (!tnum_is_unknown(reg->var_off)) {
475 char tn_buf[48];
476
477 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
478 verbose(env, ",var_off=%s", tn_buf);
479 }
480 }
481 verbose(env, ")");
482 }
483 }
484 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
485 char types_buf[BPF_REG_SIZE + 1];
486 bool valid = false;
487 int j;
488
489 for (j = 0; j < BPF_REG_SIZE; j++) {
490 if (state->stack[i].slot_type[j] != STACK_INVALID)
491 valid = true;
492 types_buf[j] = slot_type_char[
493 state->stack[i].slot_type[j]];
494 }
495 types_buf[BPF_REG_SIZE] = 0;
496 if (!valid)
497 continue;
498 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
499 print_liveness(env, state->stack[i].spilled_ptr.live);
500 if (state->stack[i].slot_type[0] == STACK_SPILL)
501 verbose(env, "=%s",
502 reg_type_str[state->stack[i].spilled_ptr.type]);
503 else
504 verbose(env, "=%s", types_buf);
505 }
506 if (state->acquired_refs && state->refs[0].id) {
507 verbose(env, " refs=%d", state->refs[0].id);
508 for (i = 1; i < state->acquired_refs; i++)
509 if (state->refs[i].id)
510 verbose(env, ",%d", state->refs[i].id);
511 }
512 verbose(env, "\n");
513 }
514
515 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
516 static int copy_##NAME##_state(struct bpf_func_state *dst, \
517 const struct bpf_func_state *src) \
518 { \
519 if (!src->FIELD) \
520 return 0; \
521 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
522 /* internal bug, make state invalid to reject the program */ \
523 memset(dst, 0, sizeof(*dst)); \
524 return -EFAULT; \
525 } \
526 memcpy(dst->FIELD, src->FIELD, \
527 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
528 return 0; \
529 }
530 /* copy_reference_state() */
531 COPY_STATE_FN(reference, acquired_refs, refs, 1)
532 /* copy_stack_state() */
533 COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
534 #undef COPY_STATE_FN
535
536 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
537 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
538 bool copy_old) \
539 { \
540 u32 old_size = state->COUNT; \
541 struct bpf_##NAME##_state *new_##FIELD; \
542 int slot = size / SIZE; \
543 \
544 if (size <= old_size || !size) { \
545 if (copy_old) \
546 return 0; \
547 state->COUNT = slot * SIZE; \
548 if (!size && old_size) { \
549 kfree(state->FIELD); \
550 state->FIELD = NULL; \
551 } \
552 return 0; \
553 } \
554 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
555 GFP_KERNEL); \
556 if (!new_##FIELD) \
557 return -ENOMEM; \
558 if (copy_old) { \
559 if (state->FIELD) \
560 memcpy(new_##FIELD, state->FIELD, \
561 sizeof(*new_##FIELD) * (old_size / SIZE)); \
562 memset(new_##FIELD + old_size / SIZE, 0, \
563 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
564 } \
565 state->COUNT = slot * SIZE; \
566 kfree(state->FIELD); \
567 state->FIELD = new_##FIELD; \
568 return 0; \
569 }
570 /* realloc_reference_state() */
571 REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
572 /* realloc_stack_state() */
573 REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
574 #undef REALLOC_STATE_FN
575
576 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
577 * make it consume minimal amount of memory. check_stack_write() access from
578 * the program calls into realloc_func_state() to grow the stack size.
579 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
580 * which realloc_stack_state() copies over. It points to previous
581 * bpf_verifier_state which is never reallocated.
582 */
583 static int realloc_func_state(struct bpf_func_state *state, int stack_size,
584 int refs_size, bool copy_old)
585 {
586 int err = realloc_reference_state(state, refs_size, copy_old);
587 if (err)
588 return err;
589 return realloc_stack_state(state, stack_size, copy_old);
590 }
591
592 /* Acquire a pointer id from the env and update the state->refs to include
593 * this new pointer reference.
594 * On success, returns a valid pointer id to associate with the register
595 * On failure, returns a negative errno.
596 */
597 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
598 {
599 struct bpf_func_state *state = cur_func(env);
600 int new_ofs = state->acquired_refs;
601 int id, err;
602
603 err = realloc_reference_state(state, state->acquired_refs + 1, true);
604 if (err)
605 return err;
606 id = ++env->id_gen;
607 state->refs[new_ofs].id = id;
608 state->refs[new_ofs].insn_idx = insn_idx;
609
610 return id;
611 }
612
613 /* release function corresponding to acquire_reference_state(). Idempotent. */
614 static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
615 {
616 int i, last_idx;
617
618 if (!ptr_id)
619 return -EFAULT;
620
621 last_idx = state->acquired_refs - 1;
622 for (i = 0; i < state->acquired_refs; i++) {
623 if (state->refs[i].id == ptr_id) {
624 if (last_idx && i != last_idx)
625 memcpy(&state->refs[i], &state->refs[last_idx],
626 sizeof(*state->refs));
627 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
628 state->acquired_refs--;
629 return 0;
630 }
631 }
632 return -EFAULT;
633 }
634
635 /* variation on the above for cases where we expect that there must be an
636 * outstanding reference for the specified ptr_id.
637 */
638 static int release_reference_state(struct bpf_verifier_env *env, int ptr_id)
639 {
640 struct bpf_func_state *state = cur_func(env);
641 int err;
642
643 err = __release_reference_state(state, ptr_id);
644 if (WARN_ON_ONCE(err != 0))
645 verbose(env, "verifier internal error: can't release reference\n");
646 return err;
647 }
648
649 static int transfer_reference_state(struct bpf_func_state *dst,
650 struct bpf_func_state *src)
651 {
652 int err = realloc_reference_state(dst, src->acquired_refs, false);
653 if (err)
654 return err;
655 err = copy_reference_state(dst, src);
656 if (err)
657 return err;
658 return 0;
659 }
660
661 static void free_func_state(struct bpf_func_state *state)
662 {
663 if (!state)
664 return;
665 kfree(state->refs);
666 kfree(state->stack);
667 kfree(state);
668 }
669
670 static void free_verifier_state(struct bpf_verifier_state *state,
671 bool free_self)
672 {
673 int i;
674
675 for (i = 0; i <= state->curframe; i++) {
676 free_func_state(state->frame[i]);
677 state->frame[i] = NULL;
678 }
679 if (free_self)
680 kfree(state);
681 }
682
683 /* copy verifier state from src to dst growing dst stack space
684 * when necessary to accommodate larger src stack
685 */
686 static int copy_func_state(struct bpf_func_state *dst,
687 const struct bpf_func_state *src)
688 {
689 int err;
690
691 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
692 false);
693 if (err)
694 return err;
695 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
696 err = copy_reference_state(dst, src);
697 if (err)
698 return err;
699 return copy_stack_state(dst, src);
700 }
701
702 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
703 const struct bpf_verifier_state *src)
704 {
705 struct bpf_func_state *dst;
706 int i, err;
707
708 /* if dst has more stack frames then src frame, free them */
709 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
710 free_func_state(dst_state->frame[i]);
711 dst_state->frame[i] = NULL;
712 }
713 dst_state->speculative = src->speculative;
714 dst_state->curframe = src->curframe;
715 for (i = 0; i <= src->curframe; i++) {
716 dst = dst_state->frame[i];
717 if (!dst) {
718 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
719 if (!dst)
720 return -ENOMEM;
721 dst_state->frame[i] = dst;
722 }
723 err = copy_func_state(dst, src->frame[i]);
724 if (err)
725 return err;
726 }
727 return 0;
728 }
729
730 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
731 int *insn_idx)
732 {
733 struct bpf_verifier_state *cur = env->cur_state;
734 struct bpf_verifier_stack_elem *elem, *head = env->head;
735 int err;
736
737 if (env->head == NULL)
738 return -ENOENT;
739
740 if (cur) {
741 err = copy_verifier_state(cur, &head->st);
742 if (err)
743 return err;
744 }
745 if (insn_idx)
746 *insn_idx = head->insn_idx;
747 if (prev_insn_idx)
748 *prev_insn_idx = head->prev_insn_idx;
749 elem = head->next;
750 free_verifier_state(&head->st, false);
751 kfree(head);
752 env->head = elem;
753 env->stack_size--;
754 return 0;
755 }
756
757 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
758 int insn_idx, int prev_insn_idx,
759 bool speculative)
760 {
761 struct bpf_verifier_state *cur = env->cur_state;
762 struct bpf_verifier_stack_elem *elem;
763 int err;
764
765 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
766 if (!elem)
767 goto err;
768
769 elem->insn_idx = insn_idx;
770 elem->prev_insn_idx = prev_insn_idx;
771 elem->next = env->head;
772 env->head = elem;
773 env->stack_size++;
774 err = copy_verifier_state(&elem->st, cur);
775 if (err)
776 goto err;
777 elem->st.speculative |= speculative;
778 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
779 verbose(env, "BPF program is too complex\n");
780 goto err;
781 }
782 return &elem->st;
783 err:
784 free_verifier_state(env->cur_state, true);
785 env->cur_state = NULL;
786 /* pop all elements and return */
787 while (!pop_stack(env, NULL, NULL));
788 return NULL;
789 }
790
791 #define CALLER_SAVED_REGS 6
792 static const int caller_saved[CALLER_SAVED_REGS] = {
793 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
794 };
795
796 static void __mark_reg_not_init(struct bpf_reg_state *reg);
797
798 /* Mark the unknown part of a register (variable offset or scalar value) as
799 * known to have the value @imm.
800 */
801 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
802 {
803 /* Clear id, off, and union(map_ptr, range) */
804 memset(((u8 *)reg) + sizeof(reg->type), 0,
805 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
806 reg->var_off = tnum_const(imm);
807 reg->smin_value = (s64)imm;
808 reg->smax_value = (s64)imm;
809 reg->umin_value = imm;
810 reg->umax_value = imm;
811 }
812
813 /* Mark the 'variable offset' part of a register as zero. This should be
814 * used only on registers holding a pointer type.
815 */
816 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
817 {
818 __mark_reg_known(reg, 0);
819 }
820
821 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
822 {
823 __mark_reg_known(reg, 0);
824 reg->type = SCALAR_VALUE;
825 }
826
827 static void mark_reg_known_zero(struct bpf_verifier_env *env,
828 struct bpf_reg_state *regs, u32 regno)
829 {
830 if (WARN_ON(regno >= MAX_BPF_REG)) {
831 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
832 /* Something bad happened, let's kill all regs */
833 for (regno = 0; regno < MAX_BPF_REG; regno++)
834 __mark_reg_not_init(regs + regno);
835 return;
836 }
837 __mark_reg_known_zero(regs + regno);
838 }
839
840 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
841 {
842 return type_is_pkt_pointer(reg->type);
843 }
844
845 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
846 {
847 return reg_is_pkt_pointer(reg) ||
848 reg->type == PTR_TO_PACKET_END;
849 }
850
851 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
852 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
853 enum bpf_reg_type which)
854 {
855 /* The register can already have a range from prior markings.
856 * This is fine as long as it hasn't been advanced from its
857 * origin.
858 */
859 return reg->type == which &&
860 reg->id == 0 &&
861 reg->off == 0 &&
862 tnum_equals_const(reg->var_off, 0);
863 }
864
865 /* Attempts to improve min/max values based on var_off information */
866 static void __update_reg_bounds(struct bpf_reg_state *reg)
867 {
868 /* min signed is max(sign bit) | min(other bits) */
869 reg->smin_value = max_t(s64, reg->smin_value,
870 reg->var_off.value | (reg->var_off.mask & S64_MIN));
871 /* max signed is min(sign bit) | max(other bits) */
872 reg->smax_value = min_t(s64, reg->smax_value,
873 reg->var_off.value | (reg->var_off.mask & S64_MAX));
874 reg->umin_value = max(reg->umin_value, reg->var_off.value);
875 reg->umax_value = min(reg->umax_value,
876 reg->var_off.value | reg->var_off.mask);
877 }
878
879 /* Uses signed min/max values to inform unsigned, and vice-versa */
880 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
881 {
882 /* Learn sign from signed bounds.
883 * If we cannot cross the sign boundary, then signed and unsigned bounds
884 * are the same, so combine. This works even in the negative case, e.g.
885 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
886 */
887 if (reg->smin_value >= 0 || reg->smax_value < 0) {
888 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
889 reg->umin_value);
890 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
891 reg->umax_value);
892 return;
893 }
894 /* Learn sign from unsigned bounds. Signed bounds cross the sign
895 * boundary, so we must be careful.
896 */
897 if ((s64)reg->umax_value >= 0) {
898 /* Positive. We can't learn anything from the smin, but smax
899 * is positive, hence safe.
900 */
901 reg->smin_value = reg->umin_value;
902 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
903 reg->umax_value);
904 } else if ((s64)reg->umin_value < 0) {
905 /* Negative. We can't learn anything from the smax, but smin
906 * is negative, hence safe.
907 */
908 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
909 reg->umin_value);
910 reg->smax_value = reg->umax_value;
911 }
912 }
913
914 /* Attempts to improve var_off based on unsigned min/max information */
915 static void __reg_bound_offset(struct bpf_reg_state *reg)
916 {
917 reg->var_off = tnum_intersect(reg->var_off,
918 tnum_range(reg->umin_value,
919 reg->umax_value));
920 }
921
922 /* Reset the min/max bounds of a register */
923 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
924 {
925 reg->smin_value = S64_MIN;
926 reg->smax_value = S64_MAX;
927 reg->umin_value = 0;
928 reg->umax_value = U64_MAX;
929 }
930
931 /* Mark a register as having a completely unknown (scalar) value. */
932 static void __mark_reg_unknown(struct bpf_reg_state *reg)
933 {
934 /*
935 * Clear type, id, off, and union(map_ptr, range) and
936 * padding between 'type' and union
937 */
938 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
939 reg->type = SCALAR_VALUE;
940 reg->var_off = tnum_unknown;
941 reg->frameno = 0;
942 __mark_reg_unbounded(reg);
943 }
944
945 static void mark_reg_unknown(struct bpf_verifier_env *env,
946 struct bpf_reg_state *regs, u32 regno)
947 {
948 if (WARN_ON(regno >= MAX_BPF_REG)) {
949 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
950 /* Something bad happened, let's kill all regs except FP */
951 for (regno = 0; regno < BPF_REG_FP; regno++)
952 __mark_reg_not_init(regs + regno);
953 return;
954 }
955 __mark_reg_unknown(regs + regno);
956 }
957
958 static void __mark_reg_not_init(struct bpf_reg_state *reg)
959 {
960 __mark_reg_unknown(reg);
961 reg->type = NOT_INIT;
962 }
963
964 static void mark_reg_not_init(struct bpf_verifier_env *env,
965 struct bpf_reg_state *regs, u32 regno)
966 {
967 if (WARN_ON(regno >= MAX_BPF_REG)) {
968 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
969 /* Something bad happened, let's kill all regs except FP */
970 for (regno = 0; regno < BPF_REG_FP; regno++)
971 __mark_reg_not_init(regs + regno);
972 return;
973 }
974 __mark_reg_not_init(regs + regno);
975 }
976
977 static void init_reg_state(struct bpf_verifier_env *env,
978 struct bpf_func_state *state)
979 {
980 struct bpf_reg_state *regs = state->regs;
981 int i;
982
983 for (i = 0; i < MAX_BPF_REG; i++) {
984 mark_reg_not_init(env, regs, i);
985 regs[i].live = REG_LIVE_NONE;
986 regs[i].parent = NULL;
987 }
988
989 /* frame pointer */
990 regs[BPF_REG_FP].type = PTR_TO_STACK;
991 mark_reg_known_zero(env, regs, BPF_REG_FP);
992 regs[BPF_REG_FP].frameno = state->frameno;
993
994 /* 1st arg to a function */
995 regs[BPF_REG_1].type = PTR_TO_CTX;
996 mark_reg_known_zero(env, regs, BPF_REG_1);
997 }
998
999 #define BPF_MAIN_FUNC (-1)
1000 static void init_func_state(struct bpf_verifier_env *env,
1001 struct bpf_func_state *state,
1002 int callsite, int frameno, int subprogno)
1003 {
1004 state->callsite = callsite;
1005 state->frameno = frameno;
1006 state->subprogno = subprogno;
1007 init_reg_state(env, state);
1008 }
1009
1010 enum reg_arg_type {
1011 SRC_OP, /* register is used as source operand */
1012 DST_OP, /* register is used as destination operand */
1013 DST_OP_NO_MARK /* same as above, check only, don't mark */
1014 };
1015
1016 static int cmp_subprogs(const void *a, const void *b)
1017 {
1018 return ((struct bpf_subprog_info *)a)->start -
1019 ((struct bpf_subprog_info *)b)->start;
1020 }
1021
1022 static int find_subprog(struct bpf_verifier_env *env, int off)
1023 {
1024 struct bpf_subprog_info *p;
1025
1026 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1027 sizeof(env->subprog_info[0]), cmp_subprogs);
1028 if (!p)
1029 return -ENOENT;
1030 return p - env->subprog_info;
1031
1032 }
1033
1034 static int add_subprog(struct bpf_verifier_env *env, int off)
1035 {
1036 int insn_cnt = env->prog->len;
1037 int ret;
1038
1039 if (off >= insn_cnt || off < 0) {
1040 verbose(env, "call to invalid destination\n");
1041 return -EINVAL;
1042 }
1043 ret = find_subprog(env, off);
1044 if (ret >= 0)
1045 return 0;
1046 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
1047 verbose(env, "too many subprograms\n");
1048 return -E2BIG;
1049 }
1050 env->subprog_info[env->subprog_cnt++].start = off;
1051 sort(env->subprog_info, env->subprog_cnt,
1052 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
1053 return 0;
1054 }
1055
1056 static int check_subprogs(struct bpf_verifier_env *env)
1057 {
1058 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
1059 struct bpf_subprog_info *subprog = env->subprog_info;
1060 struct bpf_insn *insn = env->prog->insnsi;
1061 int insn_cnt = env->prog->len;
1062
1063 /* Add entry function. */
1064 ret = add_subprog(env, 0);
1065 if (ret < 0)
1066 return ret;
1067
1068 /* determine subprog starts. The end is one before the next starts */
1069 for (i = 0; i < insn_cnt; i++) {
1070 if (insn[i].code != (BPF_JMP | BPF_CALL))
1071 continue;
1072 if (insn[i].src_reg != BPF_PSEUDO_CALL)
1073 continue;
1074 if (!env->allow_ptr_leaks) {
1075 verbose(env, "function calls to other bpf functions are allowed for root only\n");
1076 return -EPERM;
1077 }
1078 ret = add_subprog(env, i + insn[i].imm + 1);
1079 if (ret < 0)
1080 return ret;
1081 }
1082
1083 /* Add a fake 'exit' subprog which could simplify subprog iteration
1084 * logic. 'subprog_cnt' should not be increased.
1085 */
1086 subprog[env->subprog_cnt].start = insn_cnt;
1087
1088 if (env->log.level > 1)
1089 for (i = 0; i < env->subprog_cnt; i++)
1090 verbose(env, "func#%d @%d\n", i, subprog[i].start);
1091
1092 /* now check that all jumps are within the same subprog */
1093 subprog_start = subprog[cur_subprog].start;
1094 subprog_end = subprog[cur_subprog + 1].start;
1095 for (i = 0; i < insn_cnt; i++) {
1096 u8 code = insn[i].code;
1097
1098 if (BPF_CLASS(code) != BPF_JMP)
1099 goto next;
1100 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
1101 goto next;
1102 off = i + insn[i].off + 1;
1103 if (off < subprog_start || off >= subprog_end) {
1104 verbose(env, "jump out of range from insn %d to %d\n", i, off);
1105 return -EINVAL;
1106 }
1107 next:
1108 if (i == subprog_end - 1) {
1109 /* to avoid fall-through from one subprog into another
1110 * the last insn of the subprog should be either exit
1111 * or unconditional jump back
1112 */
1113 if (code != (BPF_JMP | BPF_EXIT) &&
1114 code != (BPF_JMP | BPF_JA)) {
1115 verbose(env, "last insn is not an exit or jmp\n");
1116 return -EINVAL;
1117 }
1118 subprog_start = subprog_end;
1119 cur_subprog++;
1120 if (cur_subprog < env->subprog_cnt)
1121 subprog_end = subprog[cur_subprog + 1].start;
1122 }
1123 }
1124 return 0;
1125 }
1126
1127 /* Parentage chain of this register (or stack slot) should take care of all
1128 * issues like callee-saved registers, stack slot allocation time, etc.
1129 */
1130 static int mark_reg_read(struct bpf_verifier_env *env,
1131 const struct bpf_reg_state *state,
1132 struct bpf_reg_state *parent)
1133 {
1134 bool writes = parent == state->parent; /* Observe write marks */
1135
1136 while (parent) {
1137 /* if read wasn't screened by an earlier write ... */
1138 if (writes && state->live & REG_LIVE_WRITTEN)
1139 break;
1140 if (parent->live & REG_LIVE_DONE) {
1141 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1142 reg_type_str[parent->type],
1143 parent->var_off.value, parent->off);
1144 return -EFAULT;
1145 }
1146 /* ... then we depend on parent's value */
1147 parent->live |= REG_LIVE_READ;
1148 state = parent;
1149 parent = state->parent;
1150 writes = true;
1151 }
1152 return 0;
1153 }
1154
1155 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
1156 enum reg_arg_type t)
1157 {
1158 struct bpf_verifier_state *vstate = env->cur_state;
1159 struct bpf_func_state *state = vstate->frame[vstate->curframe];
1160 struct bpf_reg_state *regs = state->regs;
1161
1162 if (regno >= MAX_BPF_REG) {
1163 verbose(env, "R%d is invalid\n", regno);
1164 return -EINVAL;
1165 }
1166
1167 if (t == SRC_OP) {
1168 /* check whether register used as source operand can be read */
1169 if (regs[regno].type == NOT_INIT) {
1170 verbose(env, "R%d !read_ok\n", regno);
1171 return -EACCES;
1172 }
1173 /* We don't need to worry about FP liveness because it's read-only */
1174 if (regno != BPF_REG_FP)
1175 return mark_reg_read(env, &regs[regno],
1176 regs[regno].parent);
1177 } else {
1178 /* check whether register used as dest operand can be written to */
1179 if (regno == BPF_REG_FP) {
1180 verbose(env, "frame pointer is read only\n");
1181 return -EACCES;
1182 }
1183 regs[regno].live |= REG_LIVE_WRITTEN;
1184 if (t == DST_OP)
1185 mark_reg_unknown(env, regs, regno);
1186 }
1187 return 0;
1188 }
1189
1190 static bool is_spillable_regtype(enum bpf_reg_type type)
1191 {
1192 switch (type) {
1193 case PTR_TO_MAP_VALUE:
1194 case PTR_TO_MAP_VALUE_OR_NULL:
1195 case PTR_TO_STACK:
1196 case PTR_TO_CTX:
1197 case PTR_TO_PACKET:
1198 case PTR_TO_PACKET_META:
1199 case PTR_TO_PACKET_END:
1200 case PTR_TO_FLOW_KEYS:
1201 case CONST_PTR_TO_MAP:
1202 case PTR_TO_SOCKET:
1203 case PTR_TO_SOCKET_OR_NULL:
1204 return true;
1205 default:
1206 return false;
1207 }
1208 }
1209
1210 /* Does this register contain a constant zero? */
1211 static bool register_is_null(struct bpf_reg_state *reg)
1212 {
1213 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1214 }
1215
1216 /* check_stack_read/write functions track spill/fill of registers,
1217 * stack boundary and alignment are checked in check_mem_access()
1218 */
1219 static int check_stack_write(struct bpf_verifier_env *env,
1220 struct bpf_func_state *state, /* func where register points to */
1221 int off, int size, int value_regno, int insn_idx)
1222 {
1223 struct bpf_func_state *cur; /* state of the current function */
1224 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
1225 enum bpf_reg_type type;
1226
1227 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
1228 state->acquired_refs, true);
1229 if (err)
1230 return err;
1231 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1232 * so it's aligned access and [off, off + size) are within stack limits
1233 */
1234 if (!env->allow_ptr_leaks &&
1235 state->stack[spi].slot_type[0] == STACK_SPILL &&
1236 size != BPF_REG_SIZE) {
1237 verbose(env, "attempt to corrupt spilled pointer on stack\n");
1238 return -EACCES;
1239 }
1240
1241 cur = env->cur_state->frame[env->cur_state->curframe];
1242 if (value_regno >= 0 &&
1243 is_spillable_regtype((type = cur->regs[value_regno].type))) {
1244
1245 /* register containing pointer is being spilled into stack */
1246 if (size != BPF_REG_SIZE) {
1247 verbose(env, "invalid size of register spill\n");
1248 return -EACCES;
1249 }
1250
1251 if (state != cur && type == PTR_TO_STACK) {
1252 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
1253 return -EINVAL;
1254 }
1255
1256 /* save register state */
1257 state->stack[spi].spilled_ptr = cur->regs[value_regno];
1258 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1259
1260 for (i = 0; i < BPF_REG_SIZE; i++) {
1261 if (state->stack[spi].slot_type[i] == STACK_MISC &&
1262 !env->allow_ptr_leaks) {
1263 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
1264 int soff = (-spi - 1) * BPF_REG_SIZE;
1265
1266 /* detected reuse of integer stack slot with a pointer
1267 * which means either llvm is reusing stack slot or
1268 * an attacker is trying to exploit CVE-2018-3639
1269 * (speculative store bypass)
1270 * Have to sanitize that slot with preemptive
1271 * store of zero.
1272 */
1273 if (*poff && *poff != soff) {
1274 /* disallow programs where single insn stores
1275 * into two different stack slots, since verifier
1276 * cannot sanitize them
1277 */
1278 verbose(env,
1279 "insn %d cannot access two stack slots fp%d and fp%d",
1280 insn_idx, *poff, soff);
1281 return -EINVAL;
1282 }
1283 *poff = soff;
1284 }
1285 state->stack[spi].slot_type[i] = STACK_SPILL;
1286 }
1287 } else {
1288 u8 type = STACK_MISC;
1289
1290 /* regular write of data into stack destroys any spilled ptr */
1291 state->stack[spi].spilled_ptr.type = NOT_INIT;
1292 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
1293 if (state->stack[spi].slot_type[0] == STACK_SPILL)
1294 for (i = 0; i < BPF_REG_SIZE; i++)
1295 state->stack[spi].slot_type[i] = STACK_MISC;
1296
1297 /* only mark the slot as written if all 8 bytes were written
1298 * otherwise read propagation may incorrectly stop too soon
1299 * when stack slots are partially written.
1300 * This heuristic means that read propagation will be
1301 * conservative, since it will add reg_live_read marks
1302 * to stack slots all the way to first state when programs
1303 * writes+reads less than 8 bytes
1304 */
1305 if (size == BPF_REG_SIZE)
1306 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1307
1308 /* when we zero initialize stack slots mark them as such */
1309 if (value_regno >= 0 &&
1310 register_is_null(&cur->regs[value_regno]))
1311 type = STACK_ZERO;
1312
1313 /* Mark slots affected by this stack write. */
1314 for (i = 0; i < size; i++)
1315 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
1316 type;
1317 }
1318 return 0;
1319 }
1320
1321 static int check_stack_read(struct bpf_verifier_env *env,
1322 struct bpf_func_state *reg_state /* func where register points to */,
1323 int off, int size, int value_regno)
1324 {
1325 struct bpf_verifier_state *vstate = env->cur_state;
1326 struct bpf_func_state *state = vstate->frame[vstate->curframe];
1327 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
1328 u8 *stype;
1329
1330 if (reg_state->allocated_stack <= slot) {
1331 verbose(env, "invalid read from stack off %d+0 size %d\n",
1332 off, size);
1333 return -EACCES;
1334 }
1335 stype = reg_state->stack[spi].slot_type;
1336
1337 if (stype[0] == STACK_SPILL) {
1338 if (size != BPF_REG_SIZE) {
1339 verbose(env, "invalid size of register spill\n");
1340 return -EACCES;
1341 }
1342 for (i = 1; i < BPF_REG_SIZE; i++) {
1343 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
1344 verbose(env, "corrupted spill memory\n");
1345 return -EACCES;
1346 }
1347 }
1348
1349 if (value_regno >= 0) {
1350 /* restore register state from stack */
1351 state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
1352 /* mark reg as written since spilled pointer state likely
1353 * has its liveness marks cleared by is_state_visited()
1354 * which resets stack/reg liveness for state transitions
1355 */
1356 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1357 }
1358 mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
1359 reg_state->stack[spi].spilled_ptr.parent);
1360 return 0;
1361 } else {
1362 int zeros = 0;
1363
1364 for (i = 0; i < size; i++) {
1365 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
1366 continue;
1367 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
1368 zeros++;
1369 continue;
1370 }
1371 verbose(env, "invalid read from stack off %d+%d size %d\n",
1372 off, i, size);
1373 return -EACCES;
1374 }
1375 mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
1376 reg_state->stack[spi].spilled_ptr.parent);
1377 if (value_regno >= 0) {
1378 if (zeros == size) {
1379 /* any size read into register is zero extended,
1380 * so the whole register == const_zero
1381 */
1382 __mark_reg_const_zero(&state->regs[value_regno]);
1383 } else {
1384 /* have read misc data from the stack */
1385 mark_reg_unknown(env, state->regs, value_regno);
1386 }
1387 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1388 }
1389 return 0;
1390 }
1391 }
1392
1393 static int check_stack_access(struct bpf_verifier_env *env,
1394 const struct bpf_reg_state *reg,
1395 int off, int size)
1396 {
1397 /* Stack accesses must be at a fixed offset, so that we
1398 * can determine what type of data were returned. See
1399 * check_stack_read().
1400 */
1401 if (!tnum_is_const(reg->var_off)) {
1402 char tn_buf[48];
1403
1404 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1405 verbose(env, "variable stack access var_off=%s off=%d size=%d",
1406 tn_buf, off, size);
1407 return -EACCES;
1408 }
1409
1410 if (off >= 0 || off < -MAX_BPF_STACK) {
1411 verbose(env, "invalid stack off=%d size=%d\n", off, size);
1412 return -EACCES;
1413 }
1414
1415 return 0;
1416 }
1417
1418 /* check read/write into map element returned by bpf_map_lookup_elem() */
1419 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
1420 int size, bool zero_size_allowed)
1421 {
1422 struct bpf_reg_state *regs = cur_regs(env);
1423 struct bpf_map *map = regs[regno].map_ptr;
1424
1425 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1426 off + size > map->value_size) {
1427 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
1428 map->value_size, off, size);
1429 return -EACCES;
1430 }
1431 return 0;
1432 }
1433
1434 /* check read/write into a map element with possible variable offset */
1435 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
1436 int off, int size, bool zero_size_allowed)
1437 {
1438 struct bpf_verifier_state *vstate = env->cur_state;
1439 struct bpf_func_state *state = vstate->frame[vstate->curframe];
1440 struct bpf_reg_state *reg = &state->regs[regno];
1441 int err;
1442
1443 /* We may have adjusted the register to this map value, so we
1444 * need to try adding each of min_value and max_value to off
1445 * to make sure our theoretical access will be safe.
1446 */
1447 if (env->log.level)
1448 print_verifier_state(env, state);
1449
1450 /* The minimum value is only important with signed
1451 * comparisons where we can't assume the floor of a
1452 * value is 0. If we are using signed variables for our
1453 * index'es we need to make sure that whatever we use
1454 * will have a set floor within our range.
1455 */
1456 if (reg->smin_value < 0 &&
1457 (reg->smin_value == S64_MIN ||
1458 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
1459 reg->smin_value + off < 0)) {
1460 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1461 regno);
1462 return -EACCES;
1463 }
1464 err = __check_map_access(env, regno, reg->smin_value + off, size,
1465 zero_size_allowed);
1466 if (err) {
1467 verbose(env, "R%d min value is outside of the array range\n",
1468 regno);
1469 return err;
1470 }
1471
1472 /* If we haven't set a max value then we need to bail since we can't be
1473 * sure we won't do bad things.
1474 * If reg->umax_value + off could overflow, treat that as unbounded too.
1475 */
1476 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
1477 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
1478 regno);
1479 return -EACCES;
1480 }
1481 err = __check_map_access(env, regno, reg->umax_value + off, size,
1482 zero_size_allowed);
1483 if (err)
1484 verbose(env, "R%d max value is outside of the array range\n",
1485 regno);
1486 return err;
1487 }
1488
1489 #define MAX_PACKET_OFF 0xffff
1490
1491 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
1492 const struct bpf_call_arg_meta *meta,
1493 enum bpf_access_type t)
1494 {
1495 switch (env->prog->type) {
1496 /* Program types only with direct read access go here! */
1497 case BPF_PROG_TYPE_LWT_IN:
1498 case BPF_PROG_TYPE_LWT_OUT:
1499 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1500 case BPF_PROG_TYPE_SK_REUSEPORT:
1501 case BPF_PROG_TYPE_FLOW_DISSECTOR:
1502 case BPF_PROG_TYPE_CGROUP_SKB:
1503 if (t == BPF_WRITE)
1504 return false;
1505 /* fallthrough */
1506
1507 /* Program types with direct read + write access go here! */
1508 case BPF_PROG_TYPE_SCHED_CLS:
1509 case BPF_PROG_TYPE_SCHED_ACT:
1510 case BPF_PROG_TYPE_XDP:
1511 case BPF_PROG_TYPE_LWT_XMIT:
1512 case BPF_PROG_TYPE_SK_SKB:
1513 case BPF_PROG_TYPE_SK_MSG:
1514 if (meta)
1515 return meta->pkt_access;
1516
1517 env->seen_direct_write = true;
1518 return true;
1519 default:
1520 return false;
1521 }
1522 }
1523
1524 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
1525 int off, int size, bool zero_size_allowed)
1526 {
1527 struct bpf_reg_state *regs = cur_regs(env);
1528 struct bpf_reg_state *reg = &regs[regno];
1529
1530 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1531 (u64)off + size > reg->range) {
1532 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
1533 off, size, regno, reg->id, reg->off, reg->range);
1534 return -EACCES;
1535 }
1536 return 0;
1537 }
1538
1539 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
1540 int size, bool zero_size_allowed)
1541 {
1542 struct bpf_reg_state *regs = cur_regs(env);
1543 struct bpf_reg_state *reg = &regs[regno];
1544 int err;
1545
1546 /* We may have added a variable offset to the packet pointer; but any
1547 * reg->range we have comes after that. We are only checking the fixed
1548 * offset.
1549 */
1550
1551 /* We don't allow negative numbers, because we aren't tracking enough
1552 * detail to prove they're safe.
1553 */
1554 if (reg->smin_value < 0) {
1555 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1556 regno);
1557 return -EACCES;
1558 }
1559 err = __check_packet_access(env, regno, off, size, zero_size_allowed);
1560 if (err) {
1561 verbose(env, "R%d offset is outside of the packet\n", regno);
1562 return err;
1563 }
1564
1565 /* __check_packet_access has made sure "off + size - 1" is within u16.
1566 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
1567 * otherwise find_good_pkt_pointers would have refused to set range info
1568 * that __check_packet_access would have rejected this pkt access.
1569 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
1570 */
1571 env->prog->aux->max_pkt_offset =
1572 max_t(u32, env->prog->aux->max_pkt_offset,
1573 off + reg->umax_value + size - 1);
1574
1575 return err;
1576 }
1577
1578 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */
1579 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
1580 enum bpf_access_type t, enum bpf_reg_type *reg_type)
1581 {
1582 struct bpf_insn_access_aux info = {
1583 .reg_type = *reg_type,
1584 };
1585
1586 if (env->ops->is_valid_access &&
1587 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
1588 /* A non zero info.ctx_field_size indicates that this field is a
1589 * candidate for later verifier transformation to load the whole
1590 * field and then apply a mask when accessed with a narrower
1591 * access than actual ctx access size. A zero info.ctx_field_size
1592 * will only allow for whole field access and rejects any other
1593 * type of narrower access.
1594 */
1595 *reg_type = info.reg_type;
1596
1597 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
1598 /* remember the offset of last byte accessed in ctx */
1599 if (env->prog->aux->max_ctx_offset < off + size)
1600 env->prog->aux->max_ctx_offset = off + size;
1601 return 0;
1602 }
1603
1604 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
1605 return -EACCES;
1606 }
1607
1608 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
1609 int size)
1610 {
1611 if (size < 0 || off < 0 ||
1612 (u64)off + size > sizeof(struct bpf_flow_keys)) {
1613 verbose(env, "invalid access to flow keys off=%d size=%d\n",
1614 off, size);
1615 return -EACCES;
1616 }
1617 return 0;
1618 }
1619
1620 static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
1621 int size, enum bpf_access_type t)
1622 {
1623 struct bpf_reg_state *regs = cur_regs(env);
1624 struct bpf_reg_state *reg = &regs[regno];
1625 struct bpf_insn_access_aux info;
1626
1627 if (reg->smin_value < 0) {
1628 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1629 regno);
1630 return -EACCES;
1631 }
1632
1633 if (!bpf_sock_is_valid_access(off, size, t, &info)) {
1634 verbose(env, "invalid bpf_sock access off=%d size=%d\n",
1635 off, size);
1636 return -EACCES;
1637 }
1638
1639 return 0;
1640 }
1641
1642 static bool __is_pointer_value(bool allow_ptr_leaks,
1643 const struct bpf_reg_state *reg)
1644 {
1645 if (allow_ptr_leaks)
1646 return false;
1647
1648 return reg->type != SCALAR_VALUE;
1649 }
1650
1651 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
1652 {
1653 return cur_regs(env) + regno;
1654 }
1655
1656 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1657 {
1658 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
1659 }
1660
1661 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
1662 {
1663 const struct bpf_reg_state *reg = reg_state(env, regno);
1664
1665 return reg->type == PTR_TO_CTX ||
1666 reg->type == PTR_TO_SOCKET;
1667 }
1668
1669 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
1670 {
1671 const struct bpf_reg_state *reg = reg_state(env, regno);
1672
1673 return type_is_pkt_pointer(reg->type);
1674 }
1675
1676 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
1677 {
1678 const struct bpf_reg_state *reg = reg_state(env, regno);
1679
1680 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
1681 return reg->type == PTR_TO_FLOW_KEYS;
1682 }
1683
1684 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
1685 const struct bpf_reg_state *reg,
1686 int off, int size, bool strict)
1687 {
1688 struct tnum reg_off;
1689 int ip_align;
1690
1691 /* Byte size accesses are always allowed. */
1692 if (!strict || size == 1)
1693 return 0;
1694
1695 /* For platforms that do not have a Kconfig enabling
1696 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
1697 * NET_IP_ALIGN is universally set to '2'. And on platforms
1698 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
1699 * to this code only in strict mode where we want to emulate
1700 * the NET_IP_ALIGN==2 checking. Therefore use an
1701 * unconditional IP align value of '2'.
1702 */
1703 ip_align = 2;
1704
1705 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
1706 if (!tnum_is_aligned(reg_off, size)) {
1707 char tn_buf[48];
1708
1709 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1710 verbose(env,
1711 "misaligned packet access off %d+%s+%d+%d size %d\n",
1712 ip_align, tn_buf, reg->off, off, size);
1713 return -EACCES;
1714 }
1715
1716 return 0;
1717 }
1718
1719 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
1720 const struct bpf_reg_state *reg,
1721 const char *pointer_desc,
1722 int off, int size, bool strict)
1723 {
1724 struct tnum reg_off;
1725
1726 /* Byte size accesses are always allowed. */
1727 if (!strict || size == 1)
1728 return 0;
1729
1730 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
1731 if (!tnum_is_aligned(reg_off, size)) {
1732 char tn_buf[48];
1733
1734 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1735 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
1736 pointer_desc, tn_buf, reg->off, off, size);
1737 return -EACCES;
1738 }
1739
1740 return 0;
1741 }
1742
1743 static int check_ptr_alignment(struct bpf_verifier_env *env,
1744 const struct bpf_reg_state *reg, int off,
1745 int size, bool strict_alignment_once)
1746 {
1747 bool strict = env->strict_alignment || strict_alignment_once;
1748 const char *pointer_desc = "";
1749
1750 switch (reg->type) {
1751 case PTR_TO_PACKET:
1752 case PTR_TO_PACKET_META:
1753 /* Special case, because of NET_IP_ALIGN. Given metadata sits
1754 * right in front, treat it the very same way.
1755 */
1756 return check_pkt_ptr_alignment(env, reg, off, size, strict);
1757 case PTR_TO_FLOW_KEYS:
1758 pointer_desc = "flow keys ";
1759 break;
1760 case PTR_TO_MAP_VALUE:
1761 pointer_desc = "value ";
1762 break;
1763 case PTR_TO_CTX:
1764 pointer_desc = "context ";
1765 break;
1766 case PTR_TO_STACK:
1767 pointer_desc = "stack ";
1768 /* The stack spill tracking logic in check_stack_write()
1769 * and check_stack_read() relies on stack accesses being
1770 * aligned.
1771 */
1772 strict = true;
1773 break;
1774 case PTR_TO_SOCKET:
1775 pointer_desc = "sock ";
1776 break;
1777 default:
1778 break;
1779 }
1780 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
1781 strict);
1782 }
1783
1784 static int update_stack_depth(struct bpf_verifier_env *env,
1785 const struct bpf_func_state *func,
1786 int off)
1787 {
1788 u16 stack = env->subprog_info[func->subprogno].stack_depth;
1789
1790 if (stack >= -off)
1791 return 0;
1792
1793 /* update known max for given subprogram */
1794 env->subprog_info[func->subprogno].stack_depth = -off;
1795 return 0;
1796 }
1797
1798 /* starting from main bpf function walk all instructions of the function
1799 * and recursively walk all callees that given function can call.
1800 * Ignore jump and exit insns.
1801 * Since recursion is prevented by check_cfg() this algorithm
1802 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
1803 */
1804 static int check_max_stack_depth(struct bpf_verifier_env *env)
1805 {
1806 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
1807 struct bpf_subprog_info *subprog = env->subprog_info;
1808 struct bpf_insn *insn = env->prog->insnsi;
1809 int ret_insn[MAX_CALL_FRAMES];
1810 int ret_prog[MAX_CALL_FRAMES];
1811
1812 process_func:
1813 /* round up to 32-bytes, since this is granularity
1814 * of interpreter stack size
1815 */
1816 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
1817 if (depth > MAX_BPF_STACK) {
1818 verbose(env, "combined stack size of %d calls is %d. Too large\n",
1819 frame + 1, depth);
1820 return -EACCES;
1821 }
1822 continue_func:
1823 subprog_end = subprog[idx + 1].start;
1824 for (; i < subprog_end; i++) {
1825 if (insn[i].code != (BPF_JMP | BPF_CALL))
1826 continue;
1827 if (insn[i].src_reg != BPF_PSEUDO_CALL)
1828 continue;
1829 /* remember insn and function to return to */
1830 ret_insn[frame] = i + 1;
1831 ret_prog[frame] = idx;
1832
1833 /* find the callee */
1834 i = i + insn[i].imm + 1;
1835 idx = find_subprog(env, i);
1836 if (idx < 0) {
1837 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1838 i);
1839 return -EFAULT;
1840 }
1841 frame++;
1842 if (frame >= MAX_CALL_FRAMES) {
1843 WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
1844 return -EFAULT;
1845 }
1846 goto process_func;
1847 }
1848 /* end of for() loop means the last insn of the 'subprog'
1849 * was reached. Doesn't matter whether it was JA or EXIT
1850 */
1851 if (frame == 0)
1852 return 0;
1853 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
1854 frame--;
1855 i = ret_insn[frame];
1856 idx = ret_prog[frame];
1857 goto continue_func;
1858 }
1859
1860 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1861 static int get_callee_stack_depth(struct bpf_verifier_env *env,
1862 const struct bpf_insn *insn, int idx)
1863 {
1864 int start = idx + insn->imm + 1, subprog;
1865
1866 subprog = find_subprog(env, start);
1867 if (subprog < 0) {
1868 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1869 start);
1870 return -EFAULT;
1871 }
1872 return env->subprog_info[subprog].stack_depth;
1873 }
1874 #endif
1875
1876 static int check_ctx_reg(struct bpf_verifier_env *env,
1877 const struct bpf_reg_state *reg, int regno)
1878 {
1879 /* Access to ctx or passing it to a helper is only allowed in
1880 * its original, unmodified form.
1881 */
1882
1883 if (reg->off) {
1884 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
1885 regno, reg->off);
1886 return -EACCES;
1887 }
1888
1889 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1890 char tn_buf[48];
1891
1892 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1893 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
1894 return -EACCES;
1895 }
1896
1897 return 0;
1898 }
1899
1900 /* truncate register to smaller size (in bytes)
1901 * must be called with size < BPF_REG_SIZE
1902 */
1903 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1904 {
1905 u64 mask;
1906
1907 /* clear high bits in bit representation */
1908 reg->var_off = tnum_cast(reg->var_off, size);
1909
1910 /* fix arithmetic bounds */
1911 mask = ((u64)1 << (size * 8)) - 1;
1912 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
1913 reg->umin_value &= mask;
1914 reg->umax_value &= mask;
1915 } else {
1916 reg->umin_value = 0;
1917 reg->umax_value = mask;
1918 }
1919 reg->smin_value = reg->umin_value;
1920 reg->smax_value = reg->umax_value;
1921 }
1922
1923 /* check whether memory at (regno + off) is accessible for t = (read | write)
1924 * if t==write, value_regno is a register which value is stored into memory
1925 * if t==read, value_regno is a register which will receive the value from memory
1926 * if t==write && value_regno==-1, some unknown value is stored into memory
1927 * if t==read && value_regno==-1, don't care what we read from memory
1928 */
1929 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
1930 int off, int bpf_size, enum bpf_access_type t,
1931 int value_regno, bool strict_alignment_once)
1932 {
1933 struct bpf_reg_state *regs = cur_regs(env);
1934 struct bpf_reg_state *reg = regs + regno;
1935 struct bpf_func_state *state;
1936 int size, err = 0;
1937
1938 size = bpf_size_to_bytes(bpf_size);
1939 if (size < 0)
1940 return size;
1941
1942 /* alignment checks will add in reg->off themselves */
1943 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
1944 if (err)
1945 return err;
1946
1947 /* for access checks, reg->off is just part of off */
1948 off += reg->off;
1949
1950 if (reg->type == PTR_TO_MAP_VALUE) {
1951 if (t == BPF_WRITE && value_regno >= 0 &&
1952 is_pointer_value(env, value_regno)) {
1953 verbose(env, "R%d leaks addr into map\n", value_regno);
1954 return -EACCES;
1955 }
1956
1957 err = check_map_access(env, regno, off, size, false);
1958 if (!err && t == BPF_READ && value_regno >= 0)
1959 mark_reg_unknown(env, regs, value_regno);
1960
1961 } else if (reg->type == PTR_TO_CTX) {
1962 enum bpf_reg_type reg_type = SCALAR_VALUE;
1963
1964 if (t == BPF_WRITE && value_regno >= 0 &&
1965 is_pointer_value(env, value_regno)) {
1966 verbose(env, "R%d leaks addr into ctx\n", value_regno);
1967 return -EACCES;
1968 }
1969
1970 err = check_ctx_reg(env, reg, regno);
1971 if (err < 0)
1972 return err;
1973
1974 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1975 if (!err && t == BPF_READ && value_regno >= 0) {
1976 /* ctx access returns either a scalar, or a
1977 * PTR_TO_PACKET[_META,_END]. In the latter
1978 * case, we know the offset is zero.
1979 */
1980 if (reg_type == SCALAR_VALUE)
1981 mark_reg_unknown(env, regs, value_regno);
1982 else
1983 mark_reg_known_zero(env, regs,
1984 value_regno);
1985 regs[value_regno].type = reg_type;
1986 }
1987
1988 } else if (reg->type == PTR_TO_STACK) {
1989 off += reg->var_off.value;
1990 err = check_stack_access(env, reg, off, size);
1991 if (err)
1992 return err;
1993
1994 state = func(env, reg);
1995 err = update_stack_depth(env, state, off);
1996 if (err)
1997 return err;
1998
1999 if (t == BPF_WRITE)
2000 err = check_stack_write(env, state, off, size,
2001 value_regno, insn_idx);
2002 else
2003 err = check_stack_read(env, state, off, size,
2004 value_regno);
2005 } else if (reg_is_pkt_pointer(reg)) {
2006 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
2007 verbose(env, "cannot write into packet\n");
2008 return -EACCES;
2009 }
2010 if (t == BPF_WRITE && value_regno >= 0 &&
2011 is_pointer_value(env, value_regno)) {
2012 verbose(env, "R%d leaks addr into packet\n",
2013 value_regno);
2014 return -EACCES;
2015 }
2016 err = check_packet_access(env, regno, off, size, false);
2017 if (!err && t == BPF_READ && value_regno >= 0)
2018 mark_reg_unknown(env, regs, value_regno);
2019 } else if (reg->type == PTR_TO_FLOW_KEYS) {
2020 if (t == BPF_WRITE && value_regno >= 0 &&
2021 is_pointer_value(env, value_regno)) {
2022 verbose(env, "R%d leaks addr into flow keys\n",
2023 value_regno);
2024 return -EACCES;
2025 }
2026
2027 err = check_flow_keys_access(env, off, size);
2028 if (!err && t == BPF_READ && value_regno >= 0)
2029 mark_reg_unknown(env, regs, value_regno);
2030 } else if (reg->type == PTR_TO_SOCKET) {
2031 if (t == BPF_WRITE) {
2032 verbose(env, "cannot write into socket\n");
2033 return -EACCES;
2034 }
2035 err = check_sock_access(env, regno, off, size, t);
2036 if (!err && value_regno >= 0)
2037 mark_reg_unknown(env, regs, value_regno);
2038 } else {
2039 verbose(env, "R%d invalid mem access '%s'\n", regno,
2040 reg_type_str[reg->type]);
2041 return -EACCES;
2042 }
2043
2044 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
2045 regs[value_regno].type == SCALAR_VALUE) {
2046 /* b/h/w load zero-extends, mark upper bits as known 0 */
2047 coerce_reg_to_size(&regs[value_regno], size);
2048 }
2049 return err;
2050 }
2051
2052 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
2053 {
2054 int err;
2055
2056 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
2057 insn->imm != 0) {
2058 verbose(env, "BPF_XADD uses reserved fields\n");
2059 return -EINVAL;
2060 }
2061
2062 /* check src1 operand */
2063 err = check_reg_arg(env, insn->src_reg, SRC_OP);
2064 if (err)
2065 return err;
2066
2067 /* check src2 operand */
2068 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2069 if (err)
2070 return err;
2071
2072 if (is_pointer_value(env, insn->src_reg)) {
2073 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
2074 return -EACCES;
2075 }
2076
2077 if (is_ctx_reg(env, insn->dst_reg) ||
2078 is_pkt_reg(env, insn->dst_reg) ||
2079 is_flow_key_reg(env, insn->dst_reg)) {
2080 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
2081 insn->dst_reg,
2082 reg_type_str[reg_state(env, insn->dst_reg)->type]);
2083 return -EACCES;
2084 }
2085
2086 /* check whether atomic_add can read the memory */
2087 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
2088 BPF_SIZE(insn->code), BPF_READ, -1, true);
2089 if (err)
2090 return err;
2091
2092 /* check whether atomic_add can write into the same memory */
2093 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
2094 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
2095 }
2096
2097 /* when register 'regno' is passed into function that will read 'access_size'
2098 * bytes from that pointer, make sure that it's within stack boundary
2099 * and all elements of stack are initialized.
2100 * Unlike most pointer bounds-checking functions, this one doesn't take an
2101 * 'off' argument, so it has to add in reg->off itself.
2102 */
2103 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
2104 int access_size, bool zero_size_allowed,
2105 struct bpf_call_arg_meta *meta)
2106 {
2107 struct bpf_reg_state *reg = reg_state(env, regno);
2108 struct bpf_func_state *state = func(env, reg);
2109 int off, i, slot, spi;
2110
2111 if (reg->type != PTR_TO_STACK) {
2112 /* Allow zero-byte read from NULL, regardless of pointer type */
2113 if (zero_size_allowed && access_size == 0 &&
2114 register_is_null(reg))
2115 return 0;
2116
2117 verbose(env, "R%d type=%s expected=%s\n", regno,
2118 reg_type_str[reg->type],
2119 reg_type_str[PTR_TO_STACK]);
2120 return -EACCES;
2121 }
2122
2123 /* Only allow fixed-offset stack reads */
2124 if (!tnum_is_const(reg->var_off)) {
2125 char tn_buf[48];
2126
2127 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2128 verbose(env, "invalid variable stack read R%d var_off=%s\n",
2129 regno, tn_buf);
2130 return -EACCES;
2131 }
2132 off = reg->off + reg->var_off.value;
2133 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
2134 access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
2135 verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
2136 regno, off, access_size);
2137 return -EACCES;
2138 }
2139
2140 if (meta && meta->raw_mode) {
2141 meta->access_size = access_size;
2142 meta->regno = regno;
2143 return 0;
2144 }
2145
2146 for (i = 0; i < access_size; i++) {
2147 u8 *stype;
2148
2149 slot = -(off + i) - 1;
2150 spi = slot / BPF_REG_SIZE;
2151 if (state->allocated_stack <= slot)
2152 goto err;
2153 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
2154 if (*stype == STACK_MISC)
2155 goto mark;
2156 if (*stype == STACK_ZERO) {
2157 /* helper can write anything into the stack */
2158 *stype = STACK_MISC;
2159 goto mark;
2160 }
2161 err:
2162 verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
2163 off, i, access_size);
2164 return -EACCES;
2165 mark:
2166 /* reading any byte out of 8-byte 'spill_slot' will cause
2167 * the whole slot to be marked as 'read'
2168 */
2169 mark_reg_read(env, &state->stack[spi].spilled_ptr,
2170 state->stack[spi].spilled_ptr.parent);
2171 }
2172 return update_stack_depth(env, state, off);
2173 }
2174
2175 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
2176 int access_size, bool zero_size_allowed,
2177 struct bpf_call_arg_meta *meta)
2178 {
2179 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
2180
2181 switch (reg->type) {
2182 case PTR_TO_PACKET:
2183 case PTR_TO_PACKET_META:
2184 return check_packet_access(env, regno, reg->off, access_size,
2185 zero_size_allowed);
2186 case PTR_TO_MAP_VALUE:
2187 return check_map_access(env, regno, reg->off, access_size,
2188 zero_size_allowed);
2189 default: /* scalar_value|ptr_to_stack or invalid ptr */
2190 return check_stack_boundary(env, regno, access_size,
2191 zero_size_allowed, meta);
2192 }
2193 }
2194
2195 static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
2196 {
2197 return type == ARG_PTR_TO_MEM ||
2198 type == ARG_PTR_TO_MEM_OR_NULL ||
2199 type == ARG_PTR_TO_UNINIT_MEM;
2200 }
2201
2202 static bool arg_type_is_mem_size(enum bpf_arg_type type)
2203 {
2204 return type == ARG_CONST_SIZE ||
2205 type == ARG_CONST_SIZE_OR_ZERO;
2206 }
2207
2208 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
2209 enum bpf_arg_type arg_type,
2210 struct bpf_call_arg_meta *meta)
2211 {
2212 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
2213 enum bpf_reg_type expected_type, type = reg->type;
2214 int err = 0;
2215
2216 if (arg_type == ARG_DONTCARE)
2217 return 0;
2218
2219 err = check_reg_arg(env, regno, SRC_OP);
2220 if (err)
2221 return err;
2222
2223 if (arg_type == ARG_ANYTHING) {
2224 if (is_pointer_value(env, regno)) {
2225 verbose(env, "R%d leaks addr into helper function\n",
2226 regno);
2227 return -EACCES;
2228 }
2229 return 0;
2230 }
2231
2232 if (type_is_pkt_pointer(type) &&
2233 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
2234 verbose(env, "helper access to the packet is not allowed\n");
2235 return -EACCES;
2236 }
2237
2238 if (arg_type == ARG_PTR_TO_MAP_KEY ||
2239 arg_type == ARG_PTR_TO_MAP_VALUE ||
2240 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
2241 expected_type = PTR_TO_STACK;
2242 if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE &&
2243 type != expected_type)
2244 goto err_type;
2245 } else if (arg_type == ARG_CONST_SIZE ||
2246 arg_type == ARG_CONST_SIZE_OR_ZERO) {
2247 expected_type = SCALAR_VALUE;
2248 if (type != expected_type)
2249 goto err_type;
2250 } else if (arg_type == ARG_CONST_MAP_PTR) {
2251 expected_type = CONST_PTR_TO_MAP;
2252 if (type != expected_type)
2253 goto err_type;
2254 } else if (arg_type == ARG_PTR_TO_CTX) {
2255 expected_type = PTR_TO_CTX;
2256 if (type != expected_type)
2257 goto err_type;
2258 err = check_ctx_reg(env, reg, regno);
2259 if (err < 0)
2260 return err;
2261 } else if (arg_type == ARG_PTR_TO_SOCKET) {
2262 expected_type = PTR_TO_SOCKET;
2263 if (type != expected_type)
2264 goto err_type;
2265 if (meta->ptr_id || !reg->id) {
2266 verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
2267 meta->ptr_id, reg->id);
2268 return -EFAULT;
2269 }
2270 meta->ptr_id = reg->id;
2271 } else if (arg_type_is_mem_ptr(arg_type)) {
2272 expected_type = PTR_TO_STACK;
2273 /* One exception here. In case function allows for NULL to be
2274 * passed in as argument, it's a SCALAR_VALUE type. Final test
2275 * happens during stack boundary checking.
2276 */
2277 if (register_is_null(reg) &&
2278 arg_type == ARG_PTR_TO_MEM_OR_NULL)
2279 /* final test in check_stack_boundary() */;
2280 else if (!type_is_pkt_pointer(type) &&
2281 type != PTR_TO_MAP_VALUE &&
2282 type != expected_type)
2283 goto err_type;
2284 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
2285 } else {
2286 verbose(env, "unsupported arg_type %d\n", arg_type);
2287 return -EFAULT;
2288 }
2289
2290 if (arg_type == ARG_CONST_MAP_PTR) {
2291 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
2292 meta->map_ptr = reg->map_ptr;
2293 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
2294 /* bpf_map_xxx(..., map_ptr, ..., key) call:
2295 * check that [key, key + map->key_size) are within
2296 * stack limits and initialized
2297 */
2298 if (!meta->map_ptr) {
2299 /* in function declaration map_ptr must come before
2300 * map_key, so that it's verified and known before
2301 * we have to check map_key here. Otherwise it means
2302 * that kernel subsystem misconfigured verifier
2303 */
2304 verbose(env, "invalid map_ptr to access map->key\n");
2305 return -EACCES;
2306 }
2307 err = check_helper_mem_access(env, regno,
2308 meta->map_ptr->key_size, false,
2309 NULL);
2310 } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
2311 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
2312 /* bpf_map_xxx(..., map_ptr, ..., value) call:
2313 * check [value, value + map->value_size) validity
2314 */
2315 if (!meta->map_ptr) {
2316 /* kernel subsystem misconfigured verifier */
2317 verbose(env, "invalid map_ptr to access map->value\n");
2318 return -EACCES;
2319 }
2320 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
2321 err = check_helper_mem_access(env, regno,
2322 meta->map_ptr->value_size, false,
2323 meta);
2324 } else if (arg_type_is_mem_size(arg_type)) {
2325 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
2326
2327 /* remember the mem_size which may be used later
2328 * to refine return values.
2329 */
2330 meta->msize_smax_value = reg->smax_value;
2331 meta->msize_umax_value = reg->umax_value;
2332
2333 /* The register is SCALAR_VALUE; the access check
2334 * happens using its boundaries.
2335 */
2336 if (!tnum_is_const(reg->var_off))
2337 /* For unprivileged variable accesses, disable raw
2338 * mode so that the program is required to
2339 * initialize all the memory that the helper could
2340 * just partially fill up.
2341 */
2342 meta = NULL;
2343
2344 if (reg->smin_value < 0) {
2345 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
2346 regno);
2347 return -EACCES;
2348 }
2349
2350 if (reg->umin_value == 0) {
2351 err = check_helper_mem_access(env, regno - 1, 0,
2352 zero_size_allowed,
2353 meta);
2354 if (err)
2355 return err;
2356 }
2357
2358 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
2359 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
2360 regno);
2361 return -EACCES;
2362 }
2363 err = check_helper_mem_access(env, regno - 1,
2364 reg->umax_value,
2365 zero_size_allowed, meta);
2366 }
2367
2368 return err;
2369 err_type:
2370 verbose(env, "R%d type=%s expected=%s\n", regno,
2371 reg_type_str[type], reg_type_str[expected_type]);
2372 return -EACCES;
2373 }
2374
2375 static int check_map_func_compatibility(struct bpf_verifier_env *env,
2376 struct bpf_map *map, int func_id)
2377 {
2378 if (!map)
2379 return 0;
2380
2381 /* We need a two way check, first is from map perspective ... */
2382 switch (map->map_type) {
2383 case BPF_MAP_TYPE_PROG_ARRAY:
2384 if (func_id != BPF_FUNC_tail_call)
2385 goto error;
2386 break;
2387 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
2388 if (func_id != BPF_FUNC_perf_event_read &&
2389 func_id != BPF_FUNC_perf_event_output &&
2390 func_id != BPF_FUNC_perf_event_read_value)
2391 goto error;
2392 break;
2393 case BPF_MAP_TYPE_STACK_TRACE:
2394 if (func_id != BPF_FUNC_get_stackid)
2395 goto error;
2396 break;
2397 case BPF_MAP_TYPE_CGROUP_ARRAY:
2398 if (func_id != BPF_FUNC_skb_under_cgroup &&
2399 func_id != BPF_FUNC_current_task_under_cgroup)
2400 goto error;
2401 break;
2402 case BPF_MAP_TYPE_CGROUP_STORAGE:
2403 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
2404 if (func_id != BPF_FUNC_get_local_storage)
2405 goto error;
2406 break;
2407 /* devmap returns a pointer to a live net_device ifindex that we cannot
2408 * allow to be modified from bpf side. So do not allow lookup elements
2409 * for now.
2410 */
2411 case BPF_MAP_TYPE_DEVMAP:
2412 if (func_id != BPF_FUNC_redirect_map)
2413 goto error;
2414 break;
2415 /* Restrict bpf side of cpumap and xskmap, open when use-cases
2416 * appear.
2417 */
2418 case BPF_MAP_TYPE_CPUMAP:
2419 case BPF_MAP_TYPE_XSKMAP:
2420 if (func_id != BPF_FUNC_redirect_map)
2421 goto error;
2422 break;
2423 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
2424 case BPF_MAP_TYPE_HASH_OF_MAPS:
2425 if (func_id != BPF_FUNC_map_lookup_elem)
2426 goto error;
2427 break;
2428 case BPF_MAP_TYPE_SOCKMAP:
2429 if (func_id != BPF_FUNC_sk_redirect_map &&
2430 func_id != BPF_FUNC_sock_map_update &&
2431 func_id != BPF_FUNC_map_delete_elem &&
2432 func_id != BPF_FUNC_msg_redirect_map)
2433 goto error;
2434 break;
2435 case BPF_MAP_TYPE_SOCKHASH:
2436 if (func_id != BPF_FUNC_sk_redirect_hash &&
2437 func_id != BPF_FUNC_sock_hash_update &&
2438 func_id != BPF_FUNC_map_delete_elem &&
2439 func_id != BPF_FUNC_msg_redirect_hash)
2440 goto error;
2441 break;
2442 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
2443 if (func_id != BPF_FUNC_sk_select_reuseport)
2444 goto error;
2445 break;
2446 case BPF_MAP_TYPE_QUEUE:
2447 case BPF_MAP_TYPE_STACK:
2448 if (func_id != BPF_FUNC_map_peek_elem &&
2449 func_id != BPF_FUNC_map_pop_elem &&
2450 func_id != BPF_FUNC_map_push_elem)
2451 goto error;
2452 break;
2453 default:
2454 break;
2455 }
2456
2457 /* ... and second from the function itself. */
2458 switch (func_id) {
2459 case BPF_FUNC_tail_call:
2460 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
2461 goto error;
2462 if (env->subprog_cnt > 1) {
2463 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
2464 return -EINVAL;
2465 }
2466 break;
2467 case BPF_FUNC_perf_event_read:
2468 case BPF_FUNC_perf_event_output:
2469 case BPF_FUNC_perf_event_read_value:
2470 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
2471 goto error;
2472 break;
2473 case BPF_FUNC_get_stackid:
2474 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
2475 goto error;
2476 break;
2477 case BPF_FUNC_current_task_under_cgroup:
2478 case BPF_FUNC_skb_under_cgroup:
2479 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
2480 goto error;
2481 break;
2482 case BPF_FUNC_redirect_map:
2483 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
2484 map->map_type != BPF_MAP_TYPE_CPUMAP &&
2485 map->map_type != BPF_MAP_TYPE_XSKMAP)
2486 goto error;
2487 break;
2488 case BPF_FUNC_sk_redirect_map:
2489 case BPF_FUNC_msg_redirect_map:
2490 case BPF_FUNC_sock_map_update:
2491 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
2492 goto error;
2493 break;
2494 case BPF_FUNC_sk_redirect_hash:
2495 case BPF_FUNC_msg_redirect_hash:
2496 case BPF_FUNC_sock_hash_update:
2497 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
2498 goto error;
2499 break;
2500 case BPF_FUNC_get_local_storage:
2501 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
2502 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
2503 goto error;
2504 break;
2505 case BPF_FUNC_sk_select_reuseport:
2506 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
2507 goto error;
2508 break;
2509 case BPF_FUNC_map_peek_elem:
2510 case BPF_FUNC_map_pop_elem:
2511 case BPF_FUNC_map_push_elem:
2512 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
2513 map->map_type != BPF_MAP_TYPE_STACK)
2514 goto error;
2515 break;
2516 default:
2517 break;
2518 }
2519
2520 return 0;
2521 error:
2522 verbose(env, "cannot pass map_type %d into func %s#%d\n",
2523 map->map_type, func_id_name(func_id), func_id);
2524 return -EINVAL;
2525 }
2526
2527 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
2528 {
2529 int count = 0;
2530
2531 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
2532 count++;
2533 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
2534 count++;
2535 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
2536 count++;
2537 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
2538 count++;
2539 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
2540 count++;
2541
2542 /* We only support one arg being in raw mode at the moment,
2543 * which is sufficient for the helper functions we have
2544 * right now.
2545 */
2546 return count <= 1;
2547 }
2548
2549 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
2550 enum bpf_arg_type arg_next)
2551 {
2552 return (arg_type_is_mem_ptr(arg_curr) &&
2553 !arg_type_is_mem_size(arg_next)) ||
2554 (!arg_type_is_mem_ptr(arg_curr) &&
2555 arg_type_is_mem_size(arg_next));
2556 }
2557
2558 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
2559 {
2560 /* bpf_xxx(..., buf, len) call will access 'len'
2561 * bytes from memory 'buf'. Both arg types need
2562 * to be paired, so make sure there's no buggy
2563 * helper function specification.
2564 */
2565 if (arg_type_is_mem_size(fn->arg1_type) ||
2566 arg_type_is_mem_ptr(fn->arg5_type) ||
2567 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
2568 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
2569 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
2570 check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
2571 return false;
2572
2573 return true;
2574 }
2575
2576 static bool check_refcount_ok(const struct bpf_func_proto *fn)
2577 {
2578 int count = 0;
2579
2580 if (arg_type_is_refcounted(fn->arg1_type))
2581 count++;
2582 if (arg_type_is_refcounted(fn->arg2_type))
2583 count++;
2584 if (arg_type_is_refcounted(fn->arg3_type))
2585 count++;
2586 if (arg_type_is_refcounted(fn->arg4_type))
2587 count++;
2588 if (arg_type_is_refcounted(fn->arg5_type))
2589 count++;
2590
2591 /* We only support one arg being unreferenced at the moment,
2592 * which is sufficient for the helper functions we have right now.
2593 */
2594 return count <= 1;
2595 }
2596
2597 static int check_func_proto(const struct bpf_func_proto *fn)
2598 {
2599 return check_raw_mode_ok(fn) &&
2600 check_arg_pair_ok(fn) &&
2601 check_refcount_ok(fn) ? 0 : -EINVAL;
2602 }
2603
2604 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
2605 * are now invalid, so turn them into unknown SCALAR_VALUE.
2606 */
2607 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
2608 struct bpf_func_state *state)
2609 {
2610 struct bpf_reg_state *regs = state->regs, *reg;
2611 int i;
2612
2613 for (i = 0; i < MAX_BPF_REG; i++)
2614 if (reg_is_pkt_pointer_any(&regs[i]))
2615 mark_reg_unknown(env, regs, i);
2616
2617 bpf_for_each_spilled_reg(i, state, reg) {
2618 if (!reg)
2619 continue;
2620 if (reg_is_pkt_pointer_any(reg))
2621 __mark_reg_unknown(reg);
2622 }
2623 }
2624
2625 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
2626 {
2627 struct bpf_verifier_state *vstate = env->cur_state;
2628 int i;
2629
2630 for (i = 0; i <= vstate->curframe; i++)
2631 __clear_all_pkt_pointers(env, vstate->frame[i]);
2632 }
2633
2634 static void release_reg_references(struct bpf_verifier_env *env,
2635 struct bpf_func_state *state, int id)
2636 {
2637 struct bpf_reg_state *regs = state->regs, *reg;
2638 int i;
2639
2640 for (i = 0; i < MAX_BPF_REG; i++)
2641 if (regs[i].id == id)
2642 mark_reg_unknown(env, regs, i);
2643
2644 bpf_for_each_spilled_reg(i, state, reg) {
2645 if (!reg)
2646 continue;
2647 if (reg_is_refcounted(reg) && reg->id == id)
2648 __mark_reg_unknown(reg);
2649 }
2650 }
2651
2652 /* The pointer with the specified id has released its reference to kernel
2653 * resources. Identify all copies of the same pointer and clear the reference.
2654 */
2655 static int release_reference(struct bpf_verifier_env *env,
2656 struct bpf_call_arg_meta *meta)
2657 {
2658 struct bpf_verifier_state *vstate = env->cur_state;
2659 int i;
2660
2661 for (i = 0; i <= vstate->curframe; i++)
2662 release_reg_references(env, vstate->frame[i], meta->ptr_id);
2663
2664 return release_reference_state(env, meta->ptr_id);
2665 }
2666
2667 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2668 int *insn_idx)
2669 {
2670 struct bpf_verifier_state *state = env->cur_state;
2671 struct bpf_func_state *caller, *callee;
2672 int i, err, subprog, target_insn;
2673
2674 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
2675 verbose(env, "the call stack of %d frames is too deep\n",
2676 state->curframe + 2);
2677 return -E2BIG;
2678 }
2679
2680 target_insn = *insn_idx + insn->imm;
2681 subprog = find_subprog(env, target_insn + 1);
2682 if (subprog < 0) {
2683 verbose(env, "verifier bug. No program starts at insn %d\n",
2684 target_insn + 1);
2685 return -EFAULT;
2686 }
2687
2688 caller = state->frame[state->curframe];
2689 if (state->frame[state->curframe + 1]) {
2690 verbose(env, "verifier bug. Frame %d already allocated\n",
2691 state->curframe + 1);
2692 return -EFAULT;
2693 }
2694
2695 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
2696 if (!callee)
2697 return -ENOMEM;
2698 state->frame[state->curframe + 1] = callee;
2699
2700 /* callee cannot access r0, r6 - r9 for reading and has to write
2701 * into its own stack before reading from it.
2702 * callee can read/write into caller's stack
2703 */
2704 init_func_state(env, callee,
2705 /* remember the callsite, it will be used by bpf_exit */
2706 *insn_idx /* callsite */,
2707 state->curframe + 1 /* frameno within this callchain */,
2708 subprog /* subprog number within this prog */);
2709
2710 /* Transfer references to the callee */
2711 err = transfer_reference_state(callee, caller);
2712 if (err)
2713 return err;
2714
2715 /* copy r1 - r5 args that callee can access. The copy includes parent
2716 * pointers, which connects us up to the liveness chain
2717 */
2718 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
2719 callee->regs[i] = caller->regs[i];
2720
2721 /* after the call registers r0 - r5 were scratched */
2722 for (i = 0; i < CALLER_SAVED_REGS; i++) {
2723 mark_reg_not_init(env, caller->regs, caller_saved[i]);
2724 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2725 }
2726
2727 /* only increment it after check_reg_arg() finished */
2728 state->curframe++;
2729
2730 /* and go analyze first insn of the callee */
2731 *insn_idx = target_insn;
2732
2733 if (env->log.level) {
2734 verbose(env, "caller:\n");
2735 print_verifier_state(env, caller);
2736 verbose(env, "callee:\n");
2737 print_verifier_state(env, callee);
2738 }
2739 return 0;
2740 }
2741
2742 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
2743 {
2744 struct bpf_verifier_state *state = env->cur_state;
2745 struct bpf_func_state *caller, *callee;
2746 struct bpf_reg_state *r0;
2747 int err;
2748
2749 callee = state->frame[state->curframe];
2750 r0 = &callee->regs[BPF_REG_0];
2751 if (r0->type == PTR_TO_STACK) {
2752 /* technically it's ok to return caller's stack pointer
2753 * (or caller's caller's pointer) back to the caller,
2754 * since these pointers are valid. Only current stack
2755 * pointer will be invalid as soon as function exits,
2756 * but let's be conservative
2757 */
2758 verbose(env, "cannot return stack pointer to the caller\n");
2759 return -EINVAL;
2760 }
2761
2762 state->curframe--;
2763 caller = state->frame[state->curframe];
2764 /* return to the caller whatever r0 had in the callee */
2765 caller->regs[BPF_REG_0] = *r0;
2766
2767 /* Transfer references to the caller */
2768 err = transfer_reference_state(caller, callee);
2769 if (err)
2770 return err;
2771
2772 *insn_idx = callee->callsite + 1;
2773 if (env->log.level) {
2774 verbose(env, "returning from callee:\n");
2775 print_verifier_state(env, callee);
2776 verbose(env, "to caller at %d:\n", *insn_idx);
2777 print_verifier_state(env, caller);
2778 }
2779 /* clear everything in the callee */
2780 free_func_state(callee);
2781 state->frame[state->curframe + 1] = NULL;
2782 return 0;
2783 }
2784
2785 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
2786 int func_id,
2787 struct bpf_call_arg_meta *meta)
2788 {
2789 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
2790
2791 if (ret_type != RET_INTEGER ||
2792 (func_id != BPF_FUNC_get_stack &&
2793 func_id != BPF_FUNC_probe_read_str))
2794 return;
2795
2796 ret_reg->smax_value = meta->msize_smax_value;
2797 ret_reg->umax_value = meta->msize_umax_value;
2798 __reg_deduce_bounds(ret_reg);
2799 __reg_bound_offset(ret_reg);
2800 }
2801
2802 static int
2803 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
2804 int func_id, int insn_idx)
2805 {
2806 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
2807
2808 if (func_id != BPF_FUNC_tail_call &&
2809 func_id != BPF_FUNC_map_lookup_elem &&
2810 func_id != BPF_FUNC_map_update_elem &&
2811 func_id != BPF_FUNC_map_delete_elem &&
2812 func_id != BPF_FUNC_map_push_elem &&
2813 func_id != BPF_FUNC_map_pop_elem &&
2814 func_id != BPF_FUNC_map_peek_elem)
2815 return 0;
2816
2817 if (meta->map_ptr == NULL) {
2818 verbose(env, "kernel subsystem misconfigured verifier\n");
2819 return -EINVAL;
2820 }
2821
2822 if (!BPF_MAP_PTR(aux->map_state))
2823 bpf_map_ptr_store(aux, meta->map_ptr,
2824 meta->map_ptr->unpriv_array);
2825 else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
2826 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2827 meta->map_ptr->unpriv_array);
2828 return 0;
2829 }
2830
2831 static int check_reference_leak(struct bpf_verifier_env *env)
2832 {
2833 struct bpf_func_state *state = cur_func(env);
2834 int i;
2835
2836 for (i = 0; i < state->acquired_refs; i++) {
2837 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
2838 state->refs[i].id, state->refs[i].insn_idx);
2839 }
2840 return state->acquired_refs ? -EINVAL : 0;
2841 }
2842
2843 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
2844 {
2845 const struct bpf_func_proto *fn = NULL;
2846 struct bpf_reg_state *regs;
2847 struct bpf_call_arg_meta meta;
2848 bool changes_data;
2849 int i, err;
2850
2851 /* find function prototype */
2852 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
2853 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
2854 func_id);
2855 return -EINVAL;
2856 }
2857
2858 if (env->ops->get_func_proto)
2859 fn = env->ops->get_func_proto(func_id, env->prog);
2860 if (!fn) {
2861 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
2862 func_id);
2863 return -EINVAL;
2864 }
2865
2866 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2867 if (!env->prog->gpl_compatible && fn->gpl_only) {
2868 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
2869 return -EINVAL;
2870 }
2871
2872 /* With LD_ABS/IND some JITs save/restore skb from r1. */
2873 changes_data = bpf_helper_changes_pkt_data(fn->func);
2874 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
2875 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
2876 func_id_name(func_id), func_id);
2877 return -EINVAL;
2878 }
2879
2880 memset(&meta, 0, sizeof(meta));
2881 meta.pkt_access = fn->pkt_access;
2882
2883 err = check_func_proto(fn);
2884 if (err) {
2885 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
2886 func_id_name(func_id), func_id);
2887 return err;
2888 }
2889
2890 /* check args */
2891 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
2892 if (err)
2893 return err;
2894 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
2895 if (err)
2896 return err;
2897 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
2898 if (err)
2899 return err;
2900 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
2901 if (err)
2902 return err;
2903 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
2904 if (err)
2905 return err;
2906
2907 err = record_func_map(env, &meta, func_id, insn_idx);
2908 if (err)
2909 return err;
2910
2911 /* Mark slots with STACK_MISC in case of raw mode, stack offset
2912 * is inferred from register state.
2913 */
2914 for (i = 0; i < meta.access_size; i++) {
2915 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
2916 BPF_WRITE, -1, false);
2917 if (err)
2918 return err;
2919 }
2920
2921 if (func_id == BPF_FUNC_tail_call) {
2922 err = check_reference_leak(env);
2923 if (err) {
2924 verbose(env, "tail_call would lead to reference leak\n");
2925 return err;
2926 }
2927 } else if (is_release_function(func_id)) {
2928 err = release_reference(env, &meta);
2929 if (err)
2930 return err;
2931 }
2932
2933 regs = cur_regs(env);
2934
2935 /* check that flags argument in get_local_storage(map, flags) is 0,
2936 * this is required because get_local_storage() can't return an error.
2937 */
2938 if (func_id == BPF_FUNC_get_local_storage &&
2939 !register_is_null(&regs[BPF_REG_2])) {
2940 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
2941 return -EINVAL;
2942 }
2943
2944 /* reset caller saved regs */
2945 for (i = 0; i < CALLER_SAVED_REGS; i++) {
2946 mark_reg_not_init(env, regs, caller_saved[i]);
2947 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2948 }
2949
2950 /* update return register (already marked as written above) */
2951 if (fn->ret_type == RET_INTEGER) {
2952 /* sets type to SCALAR_VALUE */
2953 mark_reg_unknown(env, regs, BPF_REG_0);
2954 } else if (fn->ret_type == RET_VOID) {
2955 regs[BPF_REG_0].type = NOT_INIT;
2956 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
2957 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
2958 /* There is no offset yet applied, variable or fixed */
2959 mark_reg_known_zero(env, regs, BPF_REG_0);
2960 /* remember map_ptr, so that check_map_access()
2961 * can check 'value_size' boundary of memory access
2962 * to map element returned from bpf_map_lookup_elem()
2963 */
2964 if (meta.map_ptr == NULL) {
2965 verbose(env,
2966 "kernel subsystem misconfigured verifier\n");
2967 return -EINVAL;
2968 }
2969 regs[BPF_REG_0].map_ptr = meta.map_ptr;
2970 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
2971 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
2972 } else {
2973 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
2974 regs[BPF_REG_0].id = ++env->id_gen;
2975 }
2976 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
2977 int id = acquire_reference_state(env, insn_idx);
2978 if (id < 0)
2979 return id;
2980 mark_reg_known_zero(env, regs, BPF_REG_0);
2981 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
2982 regs[BPF_REG_0].id = id;
2983 } else {
2984 verbose(env, "unknown return type %d of func %s#%d\n",
2985 fn->ret_type, func_id_name(func_id), func_id);
2986 return -EINVAL;
2987 }
2988
2989 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
2990
2991 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
2992 if (err)
2993 return err;
2994
2995 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
2996 const char *err_str;
2997
2998 #ifdef CONFIG_PERF_EVENTS
2999 err = get_callchain_buffers(sysctl_perf_event_max_stack);
3000 err_str = "cannot get callchain buffer for func %s#%d\n";
3001 #else
3002 err = -ENOTSUPP;
3003 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
3004 #endif
3005 if (err) {
3006 verbose(env, err_str, func_id_name(func_id), func_id);
3007 return err;
3008 }
3009
3010 env->prog->has_callchain_buf = true;
3011 }
3012
3013 if (changes_data)
3014 clear_all_pkt_pointers(env);
3015 return 0;
3016 }
3017
3018 static bool signed_add_overflows(s64 a, s64 b)
3019 {
3020 /* Do the add in u64, where overflow is well-defined */
3021 s64 res = (s64)((u64)a + (u64)b);
3022
3023 if (b < 0)
3024 return res > a;
3025 return res < a;
3026 }
3027
3028 static bool signed_sub_overflows(s64 a, s64 b)
3029 {
3030 /* Do the sub in u64, where overflow is well-defined */
3031 s64 res = (s64)((u64)a - (u64)b);
3032
3033 if (b < 0)
3034 return res < a;
3035 return res > a;
3036 }
3037
3038 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
3039 const struct bpf_reg_state *reg,
3040 enum bpf_reg_type type)
3041 {
3042 bool known = tnum_is_const(reg->var_off);
3043 s64 val = reg->var_off.value;
3044 s64 smin = reg->smin_value;
3045
3046 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
3047 verbose(env, "math between %s pointer and %lld is not allowed\n",
3048 reg_type_str[type], val);
3049 return false;
3050 }
3051
3052 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
3053 verbose(env, "%s pointer offset %d is not allowed\n",
3054 reg_type_str[type], reg->off);
3055 return false;
3056 }
3057
3058 if (smin == S64_MIN) {
3059 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
3060 reg_type_str[type]);
3061 return false;
3062 }
3063
3064 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
3065 verbose(env, "value %lld makes %s pointer be out of bounds\n",
3066 smin, reg_type_str[type]);
3067 return false;
3068 }
3069
3070 return true;
3071 }
3072
3073 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
3074 {
3075 return &env->insn_aux_data[env->insn_idx];
3076 }
3077
3078 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
3079 u32 *ptr_limit, u8 opcode, bool off_is_neg)
3080 {
3081 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
3082 (opcode == BPF_SUB && !off_is_neg);
3083 u32 off;
3084
3085 switch (ptr_reg->type) {
3086 case PTR_TO_STACK:
3087 off = ptr_reg->off + ptr_reg->var_off.value;
3088 if (mask_to_left)
3089 *ptr_limit = MAX_BPF_STACK + off;
3090 else
3091 *ptr_limit = -off;
3092 return 0;
3093 case PTR_TO_MAP_VALUE:
3094 if (mask_to_left) {
3095 *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
3096 } else {
3097 off = ptr_reg->smin_value + ptr_reg->off;
3098 *ptr_limit = ptr_reg->map_ptr->value_size - off;
3099 }
3100 return 0;
3101 default:
3102 return -EINVAL;
3103 }
3104 }
3105
3106 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
3107 const struct bpf_insn *insn)
3108 {
3109 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
3110 }
3111
3112 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
3113 u32 alu_state, u32 alu_limit)
3114 {
3115 /* If we arrived here from different branches with different
3116 * state or limits to sanitize, then this won't work.
3117 */
3118 if (aux->alu_state &&
3119 (aux->alu_state != alu_state ||
3120 aux->alu_limit != alu_limit))
3121 return -EACCES;
3122
3123 /* Corresponding fixup done in fixup_bpf_calls(). */
3124 aux->alu_state = alu_state;
3125 aux->alu_limit = alu_limit;
3126 return 0;
3127 }
3128
3129 static int sanitize_val_alu(struct bpf_verifier_env *env,
3130 struct bpf_insn *insn)
3131 {
3132 struct bpf_insn_aux_data *aux = cur_aux(env);
3133
3134 if (can_skip_alu_sanitation(env, insn))
3135 return 0;
3136
3137 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
3138 }
3139
3140 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3141 struct bpf_insn *insn,
3142 const struct bpf_reg_state *ptr_reg,
3143 struct bpf_reg_state *dst_reg,
3144 bool off_is_neg)
3145 {
3146 struct bpf_verifier_state *vstate = env->cur_state;
3147 struct bpf_insn_aux_data *aux = cur_aux(env);
3148 bool ptr_is_dst_reg = ptr_reg == dst_reg;
3149 u8 opcode = BPF_OP(insn->code);
3150 u32 alu_state, alu_limit;
3151 struct bpf_reg_state tmp;
3152 bool ret;
3153
3154 if (can_skip_alu_sanitation(env, insn))
3155 return 0;
3156
3157 /* We already marked aux for masking from non-speculative
3158 * paths, thus we got here in the first place. We only care
3159 * to explore bad access from here.
3160 */
3161 if (vstate->speculative)
3162 goto do_sim;
3163
3164 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
3165 alu_state |= ptr_is_dst_reg ?
3166 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
3167
3168 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
3169 return 0;
3170 if (update_alu_sanitation_state(aux, alu_state, alu_limit))
3171 return -EACCES;
3172 do_sim:
3173 /* Simulate and find potential out-of-bounds access under
3174 * speculative execution from truncation as a result of
3175 * masking when off was not within expected range. If off
3176 * sits in dst, then we temporarily need to move ptr there
3177 * to simulate dst (== 0) +/-= ptr. Needed, for example,
3178 * for cases where we use K-based arithmetic in one direction
3179 * and truncated reg-based in the other in order to explore
3180 * bad access.
3181 */
3182 if (!ptr_is_dst_reg) {
3183 tmp = *dst_reg;
3184 *dst_reg = *ptr_reg;
3185 }
3186 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3187 if (!ptr_is_dst_reg)
3188 *dst_reg = tmp;
3189 return !ret ? -EFAULT : 0;
3190 }
3191
3192 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
3193 * Caller should also handle BPF_MOV case separately.
3194 * If we return -EACCES, caller may want to try again treating pointer as a
3195 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
3196 */
3197 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3198 struct bpf_insn *insn,
3199 const struct bpf_reg_state *ptr_reg,
3200 const struct bpf_reg_state *off_reg)
3201 {
3202 struct bpf_verifier_state *vstate = env->cur_state;
3203 struct bpf_func_state *state = vstate->frame[vstate->curframe];
3204 struct bpf_reg_state *regs = state->regs, *dst_reg;
3205 bool known = tnum_is_const(off_reg->var_off);
3206 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
3207 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
3208 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
3209 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
3210 u32 dst = insn->dst_reg, src = insn->src_reg;
3211 u8 opcode = BPF_OP(insn->code);
3212 int ret;
3213
3214 dst_reg = &regs[dst];
3215
3216 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
3217 smin_val > smax_val || umin_val > umax_val) {
3218 /* Taint dst register if offset had invalid bounds derived from
3219 * e.g. dead branches.
3220 */
3221 __mark_reg_unknown(dst_reg);
3222 return 0;
3223 }
3224
3225 if (BPF_CLASS(insn->code) != BPF_ALU64) {
3226 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
3227 verbose(env,
3228 "R%d 32-bit pointer arithmetic prohibited\n",
3229 dst);
3230 return -EACCES;
3231 }
3232
3233 switch (ptr_reg->type) {
3234 case PTR_TO_MAP_VALUE_OR_NULL:
3235 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
3236 dst, reg_type_str[ptr_reg->type]);
3237 return -EACCES;
3238 case CONST_PTR_TO_MAP:
3239 case PTR_TO_PACKET_END:
3240 case PTR_TO_SOCKET:
3241 case PTR_TO_SOCKET_OR_NULL:
3242 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
3243 dst, reg_type_str[ptr_reg->type]);
3244 return -EACCES;
3245 case PTR_TO_MAP_VALUE:
3246 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
3247 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
3248 off_reg == dst_reg ? dst : src);
3249 return -EACCES;
3250 }
3251 /* fall-through */
3252 default:
3253 break;
3254 }
3255
3256 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
3257 * The id may be overwritten later if we create a new variable offset.
3258 */
3259 dst_reg->type = ptr_reg->type;
3260 dst_reg->id = ptr_reg->id;
3261
3262 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
3263 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
3264 return -EINVAL;
3265
3266 switch (opcode) {
3267 case BPF_ADD:
3268 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3269 if (ret < 0) {
3270 verbose(env, "R%d tried to add from different maps or paths\n", dst);
3271 return ret;
3272 }
3273 /* We can take a fixed offset as long as it doesn't overflow
3274 * the s32 'off' field
3275 */
3276 if (known && (ptr_reg->off + smin_val ==
3277 (s64)(s32)(ptr_reg->off + smin_val))) {
3278 /* pointer += K. Accumulate it into fixed offset */
3279 dst_reg->smin_value = smin_ptr;
3280 dst_reg->smax_value = smax_ptr;
3281 dst_reg->umin_value = umin_ptr;
3282 dst_reg->umax_value = umax_ptr;
3283 dst_reg->var_off = ptr_reg->var_off;
3284 dst_reg->off = ptr_reg->off + smin_val;
3285 dst_reg->raw = ptr_reg->raw;
3286 break;
3287 }
3288 /* A new variable offset is created. Note that off_reg->off
3289 * == 0, since it's a scalar.
3290 * dst_reg gets the pointer type and since some positive
3291 * integer value was added to the pointer, give it a new 'id'
3292 * if it's a PTR_TO_PACKET.
3293 * this creates a new 'base' pointer, off_reg (variable) gets
3294 * added into the variable offset, and we copy the fixed offset
3295 * from ptr_reg.
3296 */
3297 if (signed_add_overflows(smin_ptr, smin_val) ||
3298 signed_add_overflows(smax_ptr, smax_val)) {
3299 dst_reg->smin_value = S64_MIN;
3300 dst_reg->smax_value = S64_MAX;
3301 } else {
3302 dst_reg->smin_value = smin_ptr + smin_val;
3303 dst_reg->smax_value = smax_ptr + smax_val;
3304 }
3305 if (umin_ptr + umin_val < umin_ptr ||
3306 umax_ptr + umax_val < umax_ptr) {
3307 dst_reg->umin_value = 0;
3308 dst_reg->umax_value = U64_MAX;
3309 } else {
3310 dst_reg->umin_value = umin_ptr + umin_val;
3311 dst_reg->umax_value = umax_ptr + umax_val;
3312 }
3313 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
3314 dst_reg->off = ptr_reg->off;
3315 dst_reg->raw = ptr_reg->raw;
3316 if (reg_is_pkt_pointer(ptr_reg)) {
3317 dst_reg->id = ++env->id_gen;
3318 /* something was added to pkt_ptr, set range to zero */
3319 dst_reg->raw = 0;
3320 }
3321 break;
3322 case BPF_SUB:
3323 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3324 if (ret < 0) {
3325 verbose(env, "R%d tried to sub from different maps or paths\n", dst);
3326 return ret;
3327 }
3328 if (dst_reg == off_reg) {
3329 /* scalar -= pointer. Creates an unknown scalar */
3330 verbose(env, "R%d tried to subtract pointer from scalar\n",
3331 dst);
3332 return -EACCES;
3333 }
3334 /* We don't allow subtraction from FP, because (according to
3335 * test_verifier.c test "invalid fp arithmetic", JITs might not
3336 * be able to deal with it.
3337 */
3338 if (ptr_reg->type == PTR_TO_STACK) {
3339 verbose(env, "R%d subtraction from stack pointer prohibited\n",
3340 dst);
3341 return -EACCES;
3342 }
3343 if (known && (ptr_reg->off - smin_val ==
3344 (s64)(s32)(ptr_reg->off - smin_val))) {
3345 /* pointer -= K. Subtract it from fixed offset */
3346 dst_reg->smin_value = smin_ptr;
3347 dst_reg->smax_value = smax_ptr;
3348 dst_reg->umin_value = umin_ptr;
3349 dst_reg->umax_value = umax_ptr;
3350 dst_reg->var_off = ptr_reg->var_off;
3351 dst_reg->id = ptr_reg->id;
3352 dst_reg->off = ptr_reg->off - smin_val;
3353 dst_reg->raw = ptr_reg->raw;
3354 break;
3355 }
3356 /* A new variable offset is created. If the subtrahend is known
3357 * nonnegative, then any reg->range we had before is still good.
3358 */
3359 if (signed_sub_overflows(smin_ptr, smax_val) ||
3360 signed_sub_overflows(smax_ptr, smin_val)) {
3361 /* Overflow possible, we know nothing */
3362 dst_reg->smin_value = S64_MIN;
3363 dst_reg->smax_value = S64_MAX;
3364 } else {
3365 dst_reg->smin_value = smin_ptr - smax_val;
3366 dst_reg->smax_value = smax_ptr - smin_val;
3367 }
3368 if (umin_ptr < umax_val) {
3369 /* Overflow possible, we know nothing */
3370 dst_reg->umin_value = 0;
3371 dst_reg->umax_value = U64_MAX;
3372 } else {
3373 /* Cannot overflow (as long as bounds are consistent) */
3374 dst_reg->umin_value = umin_ptr - umax_val;
3375 dst_reg->umax_value = umax_ptr - umin_val;
3376 }
3377 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
3378 dst_reg->off = ptr_reg->off;
3379 dst_reg->raw = ptr_reg->raw;
3380 if (reg_is_pkt_pointer(ptr_reg)) {
3381 dst_reg->id = ++env->id_gen;
3382 /* something was added to pkt_ptr, set range to zero */
3383 if (smin_val < 0)
3384 dst_reg->raw = 0;
3385 }
3386 break;
3387 case BPF_AND:
3388 case BPF_OR:
3389 case BPF_XOR:
3390 /* bitwise ops on pointers are troublesome, prohibit. */
3391 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
3392 dst, bpf_alu_string[opcode >> 4]);
3393 return -EACCES;
3394 default:
3395 /* other operators (e.g. MUL,LSH) produce non-pointer results */
3396 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
3397 dst, bpf_alu_string[opcode >> 4]);
3398 return -EACCES;
3399 }
3400
3401 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
3402 return -EINVAL;
3403
3404 __update_reg_bounds(dst_reg);
3405 __reg_deduce_bounds(dst_reg);
3406 __reg_bound_offset(dst_reg);
3407
3408 /* For unprivileged we require that resulting offset must be in bounds
3409 * in order to be able to sanitize access later on.
3410 */
3411 if (!env->allow_ptr_leaks) {
3412 if (dst_reg->type == PTR_TO_MAP_VALUE &&
3413 check_map_access(env, dst, dst_reg->off, 1, false)) {
3414 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
3415 "prohibited for !root\n", dst);
3416 return -EACCES;
3417 } else if (dst_reg->type == PTR_TO_STACK &&
3418 check_stack_access(env, dst_reg, dst_reg->off +
3419 dst_reg->var_off.value, 1)) {
3420 verbose(env, "R%d stack pointer arithmetic goes out of range, "
3421 "prohibited for !root\n", dst);
3422 return -EACCES;
3423 }
3424 }
3425
3426 return 0;
3427 }
3428
3429 /* WARNING: This function does calculations on 64-bit values, but the actual
3430 * execution may occur on 32-bit values. Therefore, things like bitshifts
3431 * need extra checks in the 32-bit case.
3432 */
3433 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3434 struct bpf_insn *insn,
3435 struct bpf_reg_state *dst_reg,
3436 struct bpf_reg_state src_reg)
3437 {
3438 struct bpf_reg_state *regs = cur_regs(env);
3439 u8 opcode = BPF_OP(insn->code);
3440 bool src_known, dst_known;
3441 s64 smin_val, smax_val;
3442 u64 umin_val, umax_val;
3443 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3444 u32 dst = insn->dst_reg;
3445 int ret;
3446
3447 if (insn_bitness == 32) {
3448 /* Relevant for 32-bit RSH: Information can propagate towards
3449 * LSB, so it isn't sufficient to only truncate the output to
3450 * 32 bits.
3451 */
3452 coerce_reg_to_size(dst_reg, 4);
3453 coerce_reg_to_size(&src_reg, 4);
3454 }
3455
3456 smin_val = src_reg.smin_value;
3457 smax_val = src_reg.smax_value;
3458 umin_val = src_reg.umin_value;
3459 umax_val = src_reg.umax_value;
3460 src_known = tnum_is_const(src_reg.var_off);
3461 dst_known = tnum_is_const(dst_reg->var_off);
3462
3463 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
3464 smin_val > smax_val || umin_val > umax_val) {
3465 /* Taint dst register if offset had invalid bounds derived from
3466 * e.g. dead branches.
3467 */
3468 __mark_reg_unknown(dst_reg);
3469 return 0;
3470 }
3471
3472 if (!src_known &&
3473 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
3474 __mark_reg_unknown(dst_reg);
3475 return 0;
3476 }
3477
3478 switch (opcode) {
3479 case BPF_ADD:
3480 ret = sanitize_val_alu(env, insn);
3481 if (ret < 0) {
3482 verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
3483 return ret;
3484 }
3485 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
3486 signed_add_overflows(dst_reg->smax_value, smax_val)) {
3487 dst_reg->smin_value = S64_MIN;
3488 dst_reg->smax_value = S64_MAX;
3489 } else {
3490 dst_reg->smin_value += smin_val;
3491 dst_reg->smax_value += smax_val;
3492 }
3493 if (dst_reg->umin_value + umin_val < umin_val ||
3494 dst_reg->umax_value + umax_val < umax_val) {
3495 dst_reg->umin_value = 0;
3496 dst_reg->umax_value = U64_MAX;
3497 } else {
3498 dst_reg->umin_value += umin_val;
3499 dst_reg->umax_value += umax_val;
3500 }
3501 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
3502 break;
3503 case BPF_SUB:
3504 ret = sanitize_val_alu(env, insn);
3505 if (ret < 0) {
3506 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
3507 return ret;
3508 }
3509 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
3510 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
3511 /* Overflow possible, we know nothing */
3512 dst_reg->smin_value = S64_MIN;
3513 dst_reg->smax_value = S64_MAX;
3514 } else {
3515 dst_reg->smin_value -= smax_val;
3516 dst_reg->smax_value -= smin_val;
3517 }
3518 if (dst_reg->umin_value < umax_val) {
3519 /* Overflow possible, we know nothing */
3520 dst_reg->umin_value = 0;
3521 dst_reg->umax_value = U64_MAX;
3522 } else {
3523 /* Cannot overflow (as long as bounds are consistent) */
3524 dst_reg->umin_value -= umax_val;
3525 dst_reg->umax_value -= umin_val;
3526 }
3527 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
3528 break;
3529 case BPF_MUL:
3530 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
3531 if (smin_val < 0 || dst_reg->smin_value < 0) {
3532 /* Ain't nobody got time to multiply that sign */
3533 __mark_reg_unbounded(dst_reg);
3534 __update_reg_bounds(dst_reg);
3535 break;
3536 }
3537 /* Both values are positive, so we can work with unsigned and
3538 * copy the result to signed (unless it exceeds S64_MAX).
3539 */
3540 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
3541 /* Potential overflow, we know nothing */
3542 __mark_reg_unbounded(dst_reg);
3543 /* (except what we can learn from the var_off) */
3544 __update_reg_bounds(dst_reg);
3545 break;
3546 }
3547 dst_reg->umin_value *= umin_val;
3548 dst_reg->umax_value *= umax_val;
3549 if (dst_reg->umax_value > S64_MAX) {
3550 /* Overflow possible, we know nothing */
3551 dst_reg->smin_value = S64_MIN;
3552 dst_reg->smax_value = S64_MAX;
3553 } else {
3554 dst_reg->smin_value = dst_reg->umin_value;
3555 dst_reg->smax_value = dst_reg->umax_value;
3556 }
3557 break;
3558 case BPF_AND:
3559 if (src_known && dst_known) {
3560 __mark_reg_known(dst_reg, dst_reg->var_off.value &
3561 src_reg.var_off.value);
3562 break;
3563 }
3564 /* We get our minimum from the var_off, since that's inherently
3565 * bitwise. Our maximum is the minimum of the operands' maxima.
3566 */
3567 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
3568 dst_reg->umin_value = dst_reg->var_off.value;
3569 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
3570 if (dst_reg->smin_value < 0 || smin_val < 0) {
3571 /* Lose signed bounds when ANDing negative numbers,
3572 * ain't nobody got time for that.
3573 */
3574 dst_reg->smin_value = S64_MIN;
3575 dst_reg->smax_value = S64_MAX;
3576 } else {
3577 /* ANDing two positives gives a positive, so safe to
3578 * cast result into s64.
3579 */
3580 dst_reg->smin_value = dst_reg->umin_value;
3581 dst_reg->smax_value = dst_reg->umax_value;
3582 }
3583 /* We may learn something more from the var_off */
3584 __update_reg_bounds(dst_reg);
3585 break;
3586 case BPF_OR:
3587 if (src_known && dst_known) {
3588 __mark_reg_known(dst_reg, dst_reg->var_off.value |
3589 src_reg.var_off.value);
3590 break;
3591 }
3592 /* We get our maximum from the var_off, and our minimum is the
3593 * maximum of the operands' minima
3594 */
3595 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
3596 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
3597 dst_reg->umax_value = dst_reg->var_off.value |
3598 dst_reg->var_off.mask;
3599 if (dst_reg->smin_value < 0 || smin_val < 0) {
3600 /* Lose signed bounds when ORing negative numbers,
3601 * ain't nobody got time for that.
3602 */
3603 dst_reg->smin_value = S64_MIN;
3604 dst_reg->smax_value = S64_MAX;
3605 } else {
3606 /* ORing two positives gives a positive, so safe to
3607 * cast result into s64.
3608 */
3609 dst_reg->smin_value = dst_reg->umin_value;
3610 dst_reg->smax_value = dst_reg->umax_value;
3611 }
3612 /* We may learn something more from the var_off */
3613 __update_reg_bounds(dst_reg);
3614 break;
3615 case BPF_LSH:
3616 if (umax_val >= insn_bitness) {
3617 /* Shifts greater than 31 or 63 are undefined.
3618 * This includes shifts by a negative number.
3619 */
3620 mark_reg_unknown(env, regs, insn->dst_reg);
3621 break;
3622 }
3623 /* We lose all sign bit information (except what we can pick
3624 * up from var_off)
3625 */
3626 dst_reg->smin_value = S64_MIN;
3627 dst_reg->smax_value = S64_MAX;
3628 /* If we might shift our top bit out, then we know nothing */
3629 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
3630 dst_reg->umin_value = 0;
3631 dst_reg->umax_value = U64_MAX;
3632 } else {
3633 dst_reg->umin_value <<= umin_val;
3634 dst_reg->umax_value <<= umax_val;
3635 }
3636 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
3637 /* We may learn something more from the var_off */
3638 __update_reg_bounds(dst_reg);
3639 break;
3640 case BPF_RSH:
3641 if (umax_val >= insn_bitness) {
3642 /* Shifts greater than 31 or 63 are undefined.
3643 * This includes shifts by a negative number.
3644 */
3645 mark_reg_unknown(env, regs, insn->dst_reg);
3646 break;
3647 }
3648 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
3649 * be negative, then either:
3650 * 1) src_reg might be zero, so the sign bit of the result is
3651 * unknown, so we lose our signed bounds
3652 * 2) it's known negative, thus the unsigned bounds capture the
3653 * signed bounds
3654 * 3) the signed bounds cross zero, so they tell us nothing
3655 * about the result
3656 * If the value in dst_reg is known nonnegative, then again the
3657 * unsigned bounts capture the signed bounds.
3658 * Thus, in all cases it suffices to blow away our signed bounds
3659 * and rely on inferring new ones from the unsigned bounds and
3660 * var_off of the result.
3661 */
3662 dst_reg->smin_value = S64_MIN;
3663 dst_reg->smax_value = S64_MAX;
3664 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
3665 dst_reg->umin_value >>= umax_val;
3666 dst_reg->umax_value >>= umin_val;
3667 /* We may learn something more from the var_off */
3668 __update_reg_bounds(dst_reg);
3669 break;
3670 case BPF_ARSH:
3671 if (umax_val >= insn_bitness) {
3672 /* Shifts greater than 31 or 63 are undefined.
3673 * This includes shifts by a negative number.
3674 */
3675 mark_reg_unknown(env, regs, insn->dst_reg);
3676 break;
3677 }
3678
3679 /* Upon reaching here, src_known is true and
3680 * umax_val is equal to umin_val.
3681 */
3682 dst_reg->smin_value >>= umin_val;
3683 dst_reg->smax_value >>= umin_val;
3684 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
3685
3686 /* blow away the dst_reg umin_value/umax_value and rely on
3687 * dst_reg var_off to refine the result.
3688 */
3689 dst_reg->umin_value = 0;
3690 dst_reg->umax_value = U64_MAX;
3691 __update_reg_bounds(dst_reg);
3692 break;
3693 default:
3694 mark_reg_unknown(env, regs, insn->dst_reg);
3695 break;
3696 }
3697
3698 if (BPF_CLASS(insn->code) != BPF_ALU64) {
3699 /* 32-bit ALU ops are (32,32)->32 */
3700 coerce_reg_to_size(dst_reg, 4);
3701 }
3702
3703 __reg_deduce_bounds(dst_reg);
3704 __reg_bound_offset(dst_reg);
3705 return 0;
3706 }
3707
3708 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
3709 * and var_off.
3710 */
3711 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
3712 struct bpf_insn *insn)
3713 {
3714 struct bpf_verifier_state *vstate = env->cur_state;
3715 struct bpf_func_state *state = vstate->frame[vstate->curframe];
3716 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
3717 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
3718 u8 opcode = BPF_OP(insn->code);
3719
3720 dst_reg = &regs[insn->dst_reg];
3721 src_reg = NULL;
3722 if (dst_reg->type != SCALAR_VALUE)
3723 ptr_reg = dst_reg;
3724 if (BPF_SRC(insn->code) == BPF_X) {
3725 src_reg = &regs[insn->src_reg];
3726 if (src_reg->type != SCALAR_VALUE) {
3727 if (dst_reg->type != SCALAR_VALUE) {
3728 /* Combining two pointers by any ALU op yields
3729 * an arbitrary scalar. Disallow all math except
3730 * pointer subtraction
3731 */
3732 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
3733 mark_reg_unknown(env, regs, insn->dst_reg);
3734 return 0;
3735 }
3736 verbose(env, "R%d pointer %s pointer prohibited\n",
3737 insn->dst_reg,
3738 bpf_alu_string[opcode >> 4]);
3739 return -EACCES;
3740 } else {
3741 /* scalar += pointer
3742 * This is legal, but we have to reverse our
3743 * src/dest handling in computing the range
3744 */
3745 return adjust_ptr_min_max_vals(env, insn,
3746 src_reg, dst_reg);
3747 }
3748 } else if (ptr_reg) {
3749 /* pointer += scalar */
3750 return adjust_ptr_min_max_vals(env, insn,
3751 dst_reg, src_reg);
3752 }
3753 } else {
3754 /* Pretend the src is a reg with a known value, since we only
3755 * need to be able to read from this state.
3756 */
3757 off_reg.type = SCALAR_VALUE;
3758 __mark_reg_known(&off_reg, insn->imm);
3759 src_reg = &off_reg;
3760 if (ptr_reg) /* pointer += K */
3761 return adjust_ptr_min_max_vals(env, insn,
3762 ptr_reg, src_reg);
3763 }
3764
3765 /* Got here implies adding two SCALAR_VALUEs */
3766 if (WARN_ON_ONCE(ptr_reg)) {
3767 print_verifier_state(env, state);
3768 verbose(env, "verifier internal error: unexpected ptr_reg\n");
3769 return -EINVAL;
3770 }
3771 if (WARN_ON(!src_reg)) {
3772 print_verifier_state(env, state);
3773 verbose(env, "verifier internal error: no src_reg\n");
3774 return -EINVAL;
3775 }
3776 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
3777 }
3778
3779 /* check validity of 32-bit and 64-bit arithmetic operations */
3780 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
3781 {
3782 struct bpf_reg_state *regs = cur_regs(env);
3783 u8 opcode = BPF_OP(insn->code);
3784 int err;
3785
3786 if (opcode == BPF_END || opcode == BPF_NEG) {
3787 if (opcode == BPF_NEG) {
3788 if (BPF_SRC(insn->code) != 0 ||
3789 insn->src_reg != BPF_REG_0 ||
3790 insn->off != 0 || insn->imm != 0) {
3791 verbose(env, "BPF_NEG uses reserved fields\n");
3792 return -EINVAL;
3793 }
3794 } else {
3795 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
3796 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
3797 BPF_CLASS(insn->code) == BPF_ALU64) {
3798 verbose(env, "BPF_END uses reserved fields\n");
3799 return -EINVAL;
3800 }
3801 }
3802
3803 /* check src operand */
3804 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3805 if (err)
3806 return err;
3807
3808 if (is_pointer_value(env, insn->dst_reg)) {
3809 verbose(env, "R%d pointer arithmetic prohibited\n",
3810 insn->dst_reg);
3811 return -EACCES;
3812 }
3813
3814 /* check dest operand */
3815 err = check_reg_arg(env, insn->dst_reg, DST_OP);
3816 if (err)
3817 return err;
3818
3819 } else if (opcode == BPF_MOV) {
3820
3821 if (BPF_SRC(insn->code) == BPF_X) {
3822 if (insn->imm != 0 || insn->off != 0) {
3823 verbose(env, "BPF_MOV uses reserved fields\n");
3824 return -EINVAL;
3825 }
3826
3827 /* check src operand */
3828 err = check_reg_arg(env, insn->src_reg, SRC_OP);
3829 if (err)
3830 return err;
3831 } else {
3832 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3833 verbose(env, "BPF_MOV uses reserved fields\n");
3834 return -EINVAL;
3835 }
3836 }
3837
3838 /* check dest operand, mark as required later */
3839 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3840 if (err)
3841 return err;
3842
3843 if (BPF_SRC(insn->code) == BPF_X) {
3844 struct bpf_reg_state *src_reg = regs + insn->src_reg;
3845 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
3846
3847 if (BPF_CLASS(insn->code) == BPF_ALU64) {
3848 /* case: R1 = R2
3849 * copy register state to dest reg
3850 */
3851 *dst_reg = *src_reg;
3852 dst_reg->live |= REG_LIVE_WRITTEN;
3853 } else {
3854 /* R1 = (u32) R2 */
3855 if (is_pointer_value(env, insn->src_reg)) {
3856 verbose(env,
3857 "R%d partial copy of pointer\n",
3858 insn->src_reg);
3859 return -EACCES;
3860 } else if (src_reg->type == SCALAR_VALUE) {
3861 *dst_reg = *src_reg;
3862 dst_reg->live |= REG_LIVE_WRITTEN;
3863 } else {
3864 mark_reg_unknown(env, regs,
3865 insn->dst_reg);
3866 }
3867 coerce_reg_to_size(dst_reg, 4);
3868 }
3869 } else {
3870 /* case: R = imm
3871 * remember the value we stored into this reg
3872 */
3873 /* clear any state __mark_reg_known doesn't set */
3874 mark_reg_unknown(env, regs, insn->dst_reg);
3875 regs[insn->dst_reg].type = SCALAR_VALUE;
3876 if (BPF_CLASS(insn->code) == BPF_ALU64) {
3877 __mark_reg_known(regs + insn->dst_reg,
3878 insn->imm);
3879 } else {
3880 __mark_reg_known(regs + insn->dst_reg,
3881 (u32)insn->imm);
3882 }
3883 }
3884
3885 } else if (opcode > BPF_END) {
3886 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
3887 return -EINVAL;
3888
3889 } else { /* all other ALU ops: and, sub, xor, add, ... */
3890
3891 if (BPF_SRC(insn->code) == BPF_X) {
3892 if (insn->imm != 0 || insn->off != 0) {
3893 verbose(env, "BPF_ALU uses reserved fields\n");
3894 return -EINVAL;
3895 }
3896 /* check src1 operand */
3897 err = check_reg_arg(env, insn->src_reg, SRC_OP);
3898 if (err)
3899 return err;
3900 } else {
3901 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3902 verbose(env, "BPF_ALU uses reserved fields\n");
3903 return -EINVAL;
3904 }
3905 }
3906
3907 /* check src2 operand */
3908 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3909 if (err)
3910 return err;
3911
3912 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
3913 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
3914 verbose(env, "div by zero\n");
3915 return -EINVAL;
3916 }
3917
3918 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
3919 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
3920 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
3921
3922 if (insn->imm < 0 || insn->imm >= size) {
3923 verbose(env, "invalid shift %d\n", insn->imm);
3924 return -EINVAL;
3925 }
3926 }
3927
3928 /* check dest operand */
3929 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3930 if (err)
3931 return err;
3932
3933 return adjust_reg_min_max_vals(env, insn);
3934 }
3935
3936 return 0;
3937 }
3938
3939 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
3940 struct bpf_reg_state *dst_reg,
3941 enum bpf_reg_type type,
3942 bool range_right_open)
3943 {
3944 struct bpf_func_state *state = vstate->frame[vstate->curframe];
3945 struct bpf_reg_state *regs = state->regs, *reg;
3946 u16 new_range;
3947 int i, j;
3948
3949 if (dst_reg->off < 0 ||
3950 (dst_reg->off == 0 && range_right_open))
3951 /* This doesn't give us any range */
3952 return;
3953
3954 if (dst_reg->umax_value > MAX_PACKET_OFF ||
3955 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
3956 /* Risk of overflow. For instance, ptr + (1<<63) may be less
3957 * than pkt_end, but that's because it's also less than pkt.
3958 */
3959 return;
3960
3961 new_range = dst_reg->off;
3962 if (range_right_open)
3963 new_range--;
3964
3965 /* Examples for register markings:
3966 *
3967 * pkt_data in dst register:
3968 *
3969 * r2 = r3;
3970 * r2 += 8;
3971 * if (r2 > pkt_end) goto <handle exception>
3972 * <access okay>
3973 *
3974 * r2 = r3;
3975 * r2 += 8;
3976 * if (r2 < pkt_end) goto <access okay>
3977 * <handle exception>
3978 *
3979 * Where:
3980 * r2 == dst_reg, pkt_end == src_reg
3981 * r2=pkt(id=n,off=8,r=0)
3982 * r3=pkt(id=n,off=0,r=0)
3983 *
3984 * pkt_data in src register:
3985 *
3986 * r2 = r3;
3987 * r2 += 8;
3988 * if (pkt_end >= r2) goto <access okay>
3989 * <handle exception>
3990 *
3991 * r2 = r3;
3992 * r2 += 8;
3993 * if (pkt_end <= r2) goto <handle exception>
3994 * <access okay>
3995 *
3996 * Where:
3997 * pkt_end == dst_reg, r2 == src_reg
3998 * r2=pkt(id=n,off=8,r=0)
3999 * r3=pkt(id=n,off=0,r=0)
4000 *
4001 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
4002 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
4003 * and [r3, r3 + 8-1) respectively is safe to access depending on
4004 * the check.
4005 */
4006
4007 /* If our ids match, then we must have the same max_value. And we
4008 * don't care about the other reg's fixed offset, since if it's too big
4009 * the range won't allow anything.
4010 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
4011 */
4012 for (i = 0; i < MAX_BPF_REG; i++)
4013 if (regs[i].type == type && regs[i].id == dst_reg->id)
4014 /* keep the maximum range already checked */
4015 regs[i].range = max(regs[i].range, new_range);
4016
4017 for (j = 0; j <= vstate->curframe; j++) {
4018 state = vstate->frame[j];
4019 bpf_for_each_spilled_reg(i, state, reg) {
4020 if (!reg)
4021 continue;
4022 if (reg->type == type && reg->id == dst_reg->id)
4023 reg->range = max(reg->range, new_range);
4024 }
4025 }
4026 }
4027
4028 /* compute branch direction of the expression "if (reg opcode val) goto target;"
4029 * and return:
4030 * 1 - branch will be taken and "goto target" will be executed
4031 * 0 - branch will not be taken and fall-through to next insn
4032 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
4033 */
4034 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
4035 {
4036 if (__is_pointer_value(false, reg))
4037 return -1;
4038
4039 switch (opcode) {
4040 case BPF_JEQ:
4041 if (tnum_is_const(reg->var_off))
4042 return !!tnum_equals_const(reg->var_off, val);
4043 break;
4044 case BPF_JNE:
4045 if (tnum_is_const(reg->var_off))
4046 return !tnum_equals_const(reg->var_off, val);
4047 break;
4048 case BPF_JSET:
4049 if ((~reg->var_off.mask & reg->var_off.value) & val)
4050 return 1;
4051 if (!((reg->var_off.mask | reg->var_off.value) & val))
4052 return 0;
4053 break;
4054 case BPF_JGT:
4055 if (reg->umin_value > val)
4056 return 1;
4057 else if (reg->umax_value <= val)
4058 return 0;
4059 break;
4060 case BPF_JSGT:
4061 if (reg->smin_value > (s64)val)
4062 return 1;
4063 else if (reg->smax_value < (s64)val)
4064 return 0;
4065 break;
4066 case BPF_JLT:
4067 if (reg->umax_value < val)
4068 return 1;
4069 else if (reg->umin_value >= val)
4070 return 0;
4071 break;
4072 case BPF_JSLT:
4073 if (reg->smax_value < (s64)val)
4074 return 1;
4075 else if (reg->smin_value >= (s64)val)
4076 return 0;
4077 break;
4078 case BPF_JGE:
4079 if (reg->umin_value >= val)
4080 return 1;
4081 else if (reg->umax_value < val)
4082 return 0;
4083 break;
4084 case BPF_JSGE:
4085 if (reg->smin_value >= (s64)val)
4086 return 1;
4087 else if (reg->smax_value < (s64)val)
4088 return 0;
4089 break;
4090 case BPF_JLE:
4091 if (reg->umax_value <= val)
4092 return 1;
4093 else if (reg->umin_value > val)
4094 return 0;
4095 break;
4096 case BPF_JSLE:
4097 if (reg->smax_value <= (s64)val)
4098 return 1;
4099 else if (reg->smin_value > (s64)val)
4100 return 0;
4101 break;
4102 }
4103
4104 return -1;
4105 }
4106
4107 /* Adjusts the register min/max values in the case that the dst_reg is the
4108 * variable register that we are working on, and src_reg is a constant or we're
4109 * simply doing a BPF_K check.
4110 * In JEQ/JNE cases we also adjust the var_off values.
4111 */
4112 static void reg_set_min_max(struct bpf_reg_state *true_reg,
4113 struct bpf_reg_state *false_reg, u64 val,
4114 u8 opcode)
4115 {
4116 /* If the dst_reg is a pointer, we can't learn anything about its
4117 * variable offset from the compare (unless src_reg were a pointer into
4118 * the same object, but we don't bother with that.
4119 * Since false_reg and true_reg have the same type by construction, we
4120 * only need to check one of them for pointerness.
4121 */
4122 if (__is_pointer_value(false, false_reg))
4123 return;
4124
4125 switch (opcode) {
4126 case BPF_JEQ:
4127 /* If this is false then we know nothing Jon Snow, but if it is
4128 * true then we know for sure.
4129 */
4130 __mark_reg_known(true_reg, val);
4131 break;
4132 case BPF_JNE:
4133 /* If this is true we know nothing Jon Snow, but if it is false
4134 * we know the value for sure;
4135 */
4136 __mark_reg_known(false_reg, val);
4137 break;
4138 case BPF_JSET:
4139 false_reg->var_off = tnum_and(false_reg->var_off,
4140 tnum_const(~val));
4141 if (is_power_of_2(val))
4142 true_reg->var_off = tnum_or(true_reg->var_off,
4143 tnum_const(val));
4144 break;
4145 case BPF_JGT:
4146 false_reg->umax_value = min(false_reg->umax_value, val);
4147 true_reg->umin_value = max(true_reg->umin_value, val + 1);
4148 break;
4149 case BPF_JSGT:
4150 false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
4151 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
4152 break;
4153 case BPF_JLT:
4154 false_reg->umin_value = max(false_reg->umin_value, val);
4155 true_reg->umax_value = min(true_reg->umax_value, val - 1);
4156 break;
4157 case BPF_JSLT:
4158 false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
4159 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
4160 break;
4161 case BPF_JGE:
4162 false_reg->umax_value = min(false_reg->umax_value, val - 1);
4163 true_reg->umin_value = max(true_reg->umin_value, val);
4164 break;
4165 case BPF_JSGE:
4166 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
4167 true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
4168 break;
4169 case BPF_JLE:
4170 false_reg->umin_value = max(false_reg->umin_value, val + 1);
4171 true_reg->umax_value = min(true_reg->umax_value, val);
4172 break;
4173 case BPF_JSLE:
4174 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
4175 true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
4176 break;
4177 default:
4178 break;
4179 }
4180
4181 __reg_deduce_bounds(false_reg);
4182 __reg_deduce_bounds(true_reg);
4183 /* We might have learned some bits from the bounds. */
4184 __reg_bound_offset(false_reg);
4185 __reg_bound_offset(true_reg);
4186 /* Intersecting with the old var_off might have improved our bounds
4187 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4188 * then new var_off is (0; 0x7f...fc) which improves our umax.
4189 */
4190 __update_reg_bounds(false_reg);
4191 __update_reg_bounds(true_reg);
4192 }
4193
4194 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
4195 * the variable reg.
4196 */
4197 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
4198 struct bpf_reg_state *false_reg, u64 val,
4199 u8 opcode)
4200 {
4201 if (__is_pointer_value(false, false_reg))
4202 return;
4203
4204 switch (opcode) {
4205 case BPF_JEQ:
4206 /* If this is false then we know nothing Jon Snow, but if it is
4207 * true then we know for sure.
4208 */
4209 __mark_reg_known(true_reg, val);
4210 break;
4211 case BPF_JNE:
4212 /* If this is true we know nothing Jon Snow, but if it is false
4213 * we know the value for sure;
4214 */
4215 __mark_reg_known(false_reg, val);
4216 break;
4217 case BPF_JSET:
4218 false_reg->var_off = tnum_and(false_reg->var_off,
4219 tnum_const(~val));
4220 if (is_power_of_2(val))
4221 true_reg->var_off = tnum_or(true_reg->var_off,
4222 tnum_const(val));
4223 break;
4224 case BPF_JGT:
4225 true_reg->umax_value = min(true_reg->umax_value, val - 1);
4226 false_reg->umin_value = max(false_reg->umin_value, val);
4227 break;
4228 case BPF_JSGT:
4229 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
4230 false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
4231 break;
4232 case BPF_JLT:
4233 true_reg->umin_value = max(true_reg->umin_value, val + 1);
4234 false_reg->umax_value = min(false_reg->umax_value, val);
4235 break;
4236 case BPF_JSLT:
4237 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
4238 false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
4239 break;
4240 case BPF_JGE:
4241 true_reg->umax_value = min(true_reg->umax_value, val);
4242 false_reg->umin_value = max(false_reg->umin_value, val + 1);
4243 break;
4244 case BPF_JSGE:
4245 true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
4246 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
4247 break;
4248 case BPF_JLE:
4249 true_reg->umin_value = max(true_reg->umin_value, val);
4250 false_reg->umax_value = min(false_reg->umax_value, val - 1);
4251 break;
4252 case BPF_JSLE:
4253 true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
4254 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
4255 break;
4256 default:
4257 break;
4258 }
4259
4260 __reg_deduce_bounds(false_reg);
4261 __reg_deduce_bounds(true_reg);
4262 /* We might have learned some bits from the bounds. */
4263 __reg_bound_offset(false_reg);
4264 __reg_bound_offset(true_reg);
4265 /* Intersecting with the old var_off might have improved our bounds
4266 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4267 * then new var_off is (0; 0x7f...fc) which improves our umax.
4268 */
4269 __update_reg_bounds(false_reg);
4270 __update_reg_bounds(true_reg);
4271 }
4272
4273 /* Regs are known to be equal, so intersect their min/max/var_off */
4274 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
4275 struct bpf_reg_state *dst_reg)
4276 {
4277 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
4278 dst_reg->umin_value);
4279 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
4280 dst_reg->umax_value);
4281 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
4282 dst_reg->smin_value);
4283 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
4284 dst_reg->smax_value);
4285 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
4286 dst_reg->var_off);
4287 /* We might have learned new bounds from the var_off. */
4288 __update_reg_bounds(src_reg);
4289 __update_reg_bounds(dst_reg);
4290 /* We might have learned something about the sign bit. */
4291 __reg_deduce_bounds(src_reg);
4292 __reg_deduce_bounds(dst_reg);
4293 /* We might have learned some bits from the bounds. */
4294 __reg_bound_offset(src_reg);
4295 __reg_bound_offset(dst_reg);
4296 /* Intersecting with the old var_off might have improved our bounds
4297 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4298 * then new var_off is (0; 0x7f...fc) which improves our umax.
4299 */
4300 __update_reg_bounds(src_reg);
4301 __update_reg_bounds(dst_reg);
4302 }
4303
4304 static void reg_combine_min_max(struct bpf_reg_state *true_src,
4305 struct bpf_reg_state *true_dst,
4306 struct bpf_reg_state *false_src,
4307 struct bpf_reg_state *false_dst,
4308 u8 opcode)
4309 {
4310 switch (opcode) {
4311 case BPF_JEQ:
4312 __reg_combine_min_max(true_src, true_dst);
4313 break;
4314 case BPF_JNE:
4315 __reg_combine_min_max(false_src, false_dst);
4316 break;
4317 }
4318 }
4319
4320 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
4321 struct bpf_reg_state *reg, u32 id,
4322 bool is_null)
4323 {
4324 if (reg_type_may_be_null(reg->type) && reg->id == id) {
4325 /* Old offset (both fixed and variable parts) should
4326 * have been known-zero, because we don't allow pointer
4327 * arithmetic on pointers that might be NULL.
4328 */
4329 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
4330 !tnum_equals_const(reg->var_off, 0) ||
4331 reg->off)) {
4332 __mark_reg_known_zero(reg);
4333 reg->off = 0;
4334 }
4335 if (is_null) {
4336 reg->type = SCALAR_VALUE;
4337 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
4338 if (reg->map_ptr->inner_map_meta) {
4339 reg->type = CONST_PTR_TO_MAP;
4340 reg->map_ptr = reg->map_ptr->inner_map_meta;
4341 } else {
4342 reg->type = PTR_TO_MAP_VALUE;
4343 }
4344 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
4345 reg->type = PTR_TO_SOCKET;
4346 }
4347 if (is_null || !reg_is_refcounted(reg)) {
4348 /* We don't need id from this point onwards anymore,
4349 * thus we should better reset it, so that state
4350 * pruning has chances to take effect.
4351 */
4352 reg->id = 0;
4353 }
4354 }
4355 }
4356
4357 /* The logic is similar to find_good_pkt_pointers(), both could eventually
4358 * be folded together at some point.
4359 */
4360 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
4361 bool is_null)
4362 {
4363 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4364 struct bpf_reg_state *reg, *regs = state->regs;
4365 u32 id = regs[regno].id;
4366 int i, j;
4367
4368 if (reg_is_refcounted_or_null(&regs[regno]) && is_null)
4369 __release_reference_state(state, id);
4370
4371 for (i = 0; i < MAX_BPF_REG; i++)
4372 mark_ptr_or_null_reg(state, &regs[i], id, is_null);
4373
4374 for (j = 0; j <= vstate->curframe; j++) {
4375 state = vstate->frame[j];
4376 bpf_for_each_spilled_reg(i, state, reg) {
4377 if (!reg)
4378 continue;
4379 mark_ptr_or_null_reg(state, reg, id, is_null);
4380 }
4381 }
4382 }
4383
4384 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
4385 struct bpf_reg_state *dst_reg,
4386 struct bpf_reg_state *src_reg,
4387 struct bpf_verifier_state *this_branch,
4388 struct bpf_verifier_state *other_branch)
4389 {
4390 if (BPF_SRC(insn->code) != BPF_X)
4391 return false;
4392
4393 switch (BPF_OP(insn->code)) {
4394 case BPF_JGT:
4395 if ((dst_reg->type == PTR_TO_PACKET &&
4396 src_reg->type == PTR_TO_PACKET_END) ||
4397 (dst_reg->type == PTR_TO_PACKET_META &&
4398 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4399 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
4400 find_good_pkt_pointers(this_branch, dst_reg,
4401 dst_reg->type, false);
4402 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
4403 src_reg->type == PTR_TO_PACKET) ||
4404 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4405 src_reg->type == PTR_TO_PACKET_META)) {
4406 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
4407 find_good_pkt_pointers(other_branch, src_reg,
4408 src_reg->type, true);
4409 } else {
4410 return false;
4411 }
4412 break;
4413 case BPF_JLT:
4414 if ((dst_reg->type == PTR_TO_PACKET &&
4415 src_reg->type == PTR_TO_PACKET_END) ||
4416 (dst_reg->type == PTR_TO_PACKET_META &&
4417 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4418 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
4419 find_good_pkt_pointers(other_branch, dst_reg,
4420 dst_reg->type, true);
4421 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
4422 src_reg->type == PTR_TO_PACKET) ||
4423 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4424 src_reg->type == PTR_TO_PACKET_META)) {
4425 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
4426 find_good_pkt_pointers(this_branch, src_reg,
4427 src_reg->type, false);
4428 } else {
4429 return false;
4430 }
4431 break;
4432 case BPF_JGE:
4433 if ((dst_reg->type == PTR_TO_PACKET &&
4434 src_reg->type == PTR_TO_PACKET_END) ||
4435 (dst_reg->type == PTR_TO_PACKET_META &&
4436 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4437 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
4438 find_good_pkt_pointers(this_branch, dst_reg,
4439 dst_reg->type, true);
4440 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
4441 src_reg->type == PTR_TO_PACKET) ||
4442 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4443 src_reg->type == PTR_TO_PACKET_META)) {
4444 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
4445 find_good_pkt_pointers(other_branch, src_reg,
4446 src_reg->type, false);
4447 } else {
4448 return false;
4449 }
4450 break;
4451 case BPF_JLE:
4452 if ((dst_reg->type == PTR_TO_PACKET &&
4453 src_reg->type == PTR_TO_PACKET_END) ||
4454 (dst_reg->type == PTR_TO_PACKET_META &&
4455 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4456 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
4457 find_good_pkt_pointers(other_branch, dst_reg,
4458 dst_reg->type, false);
4459 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
4460 src_reg->type == PTR_TO_PACKET) ||
4461 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4462 src_reg->type == PTR_TO_PACKET_META)) {
4463 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
4464 find_good_pkt_pointers(this_branch, src_reg,
4465 src_reg->type, true);
4466 } else {
4467 return false;
4468 }
4469 break;
4470 default:
4471 return false;
4472 }
4473
4474 return true;
4475 }
4476
4477 static int check_cond_jmp_op(struct bpf_verifier_env *env,
4478 struct bpf_insn *insn, int *insn_idx)
4479 {
4480 struct bpf_verifier_state *this_branch = env->cur_state;
4481 struct bpf_verifier_state *other_branch;
4482 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
4483 struct bpf_reg_state *dst_reg, *other_branch_regs;
4484 u8 opcode = BPF_OP(insn->code);
4485 int err;
4486
4487 if (opcode > BPF_JSLE) {
4488 verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
4489 return -EINVAL;
4490 }
4491
4492 if (BPF_SRC(insn->code) == BPF_X) {
4493 if (insn->imm != 0) {
4494 verbose(env, "BPF_JMP uses reserved fields\n");
4495 return -EINVAL;
4496 }
4497
4498 /* check src1 operand */
4499 err = check_reg_arg(env, insn->src_reg, SRC_OP);
4500 if (err)
4501 return err;
4502
4503 if (is_pointer_value(env, insn->src_reg)) {
4504 verbose(env, "R%d pointer comparison prohibited\n",
4505 insn->src_reg);
4506 return -EACCES;
4507 }
4508 } else {
4509 if (insn->src_reg != BPF_REG_0) {
4510 verbose(env, "BPF_JMP uses reserved fields\n");
4511 return -EINVAL;
4512 }
4513 }
4514
4515 /* check src2 operand */
4516 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4517 if (err)
4518 return err;
4519
4520 dst_reg = &regs[insn->dst_reg];
4521
4522 if (BPF_SRC(insn->code) == BPF_K) {
4523 int pred = is_branch_taken(dst_reg, insn->imm, opcode);
4524
4525 if (pred == 1) {
4526 /* only follow the goto, ignore fall-through */
4527 *insn_idx += insn->off;
4528 return 0;
4529 } else if (pred == 0) {
4530 /* only follow fall-through branch, since
4531 * that's where the program will go
4532 */
4533 return 0;
4534 }
4535 }
4536
4537 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
4538 false);
4539 if (!other_branch)
4540 return -EFAULT;
4541 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
4542
4543 /* detect if we are comparing against a constant value so we can adjust
4544 * our min/max values for our dst register.
4545 * this is only legit if both are scalars (or pointers to the same
4546 * object, I suppose, but we don't support that right now), because
4547 * otherwise the different base pointers mean the offsets aren't
4548 * comparable.
4549 */
4550 if (BPF_SRC(insn->code) == BPF_X) {
4551 if (dst_reg->type == SCALAR_VALUE &&
4552 regs[insn->src_reg].type == SCALAR_VALUE) {
4553 if (tnum_is_const(regs[insn->src_reg].var_off))
4554 reg_set_min_max(&other_branch_regs[insn->dst_reg],
4555 dst_reg, regs[insn->src_reg].var_off.value,
4556 opcode);
4557 else if (tnum_is_const(dst_reg->var_off))
4558 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
4559 &regs[insn->src_reg],
4560 dst_reg->var_off.value, opcode);
4561 else if (opcode == BPF_JEQ || opcode == BPF_JNE)
4562 /* Comparing for equality, we can combine knowledge */
4563 reg_combine_min_max(&other_branch_regs[insn->src_reg],
4564 &other_branch_regs[insn->dst_reg],
4565 &regs[insn->src_reg],
4566 &regs[insn->dst_reg], opcode);
4567 }
4568 } else if (dst_reg->type == SCALAR_VALUE) {
4569 reg_set_min_max(&other_branch_regs[insn->dst_reg],
4570 dst_reg, insn->imm, opcode);
4571 }
4572
4573 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
4574 if (BPF_SRC(insn->code) == BPF_K &&
4575 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
4576 reg_type_may_be_null(dst_reg->type)) {
4577 /* Mark all identical registers in each branch as either
4578 * safe or unknown depending R == 0 or R != 0 conditional.
4579 */
4580 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
4581 opcode == BPF_JNE);
4582 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
4583 opcode == BPF_JEQ);
4584 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
4585 this_branch, other_branch) &&
4586 is_pointer_value(env, insn->dst_reg)) {
4587 verbose(env, "R%d pointer comparison prohibited\n",
4588 insn->dst_reg);
4589 return -EACCES;
4590 }
4591 if (env->log.level)
4592 print_verifier_state(env, this_branch->frame[this_branch->curframe]);
4593 return 0;
4594 }
4595
4596 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
4597 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
4598 {
4599 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
4600
4601 return (struct bpf_map *) (unsigned long) imm64;
4602 }
4603
4604 /* verify BPF_LD_IMM64 instruction */
4605 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
4606 {
4607 struct bpf_reg_state *regs = cur_regs(env);
4608 int err;
4609
4610 if (BPF_SIZE(insn->code) != BPF_DW) {
4611 verbose(env, "invalid BPF_LD_IMM insn\n");
4612 return -EINVAL;
4613 }
4614 if (insn->off != 0) {
4615 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
4616 return -EINVAL;
4617 }
4618
4619 err = check_reg_arg(env, insn->dst_reg, DST_OP);
4620 if (err)
4621 return err;
4622
4623 if (insn->src_reg == 0) {
4624 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
4625
4626 regs[insn->dst_reg].type = SCALAR_VALUE;
4627 __mark_reg_known(&regs[insn->dst_reg], imm);
4628 return 0;
4629 }
4630
4631 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
4632 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
4633
4634 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
4635 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
4636 return 0;
4637 }
4638
4639 static bool may_access_skb(enum bpf_prog_type type)
4640 {
4641 switch (type) {
4642 case BPF_PROG_TYPE_SOCKET_FILTER:
4643 case BPF_PROG_TYPE_SCHED_CLS:
4644 case BPF_PROG_TYPE_SCHED_ACT:
4645 return true;
4646 default:
4647 return false;
4648 }
4649 }
4650
4651 /* verify safety of LD_ABS|LD_IND instructions:
4652 * - they can only appear in the programs where ctx == skb
4653 * - since they are wrappers of function calls, they scratch R1-R5 registers,
4654 * preserve R6-R9, and store return value into R0
4655 *
4656 * Implicit input:
4657 * ctx == skb == R6 == CTX
4658 *
4659 * Explicit input:
4660 * SRC == any register
4661 * IMM == 32-bit immediate
4662 *
4663 * Output:
4664 * R0 - 8/16/32-bit skb data converted to cpu endianness
4665 */
4666 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
4667 {
4668 struct bpf_reg_state *regs = cur_regs(env);
4669 u8 mode = BPF_MODE(insn->code);
4670 int i, err;
4671
4672 if (!may_access_skb(env->prog->type)) {
4673 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
4674 return -EINVAL;
4675 }
4676
4677 if (!env->ops->gen_ld_abs) {
4678 verbose(env, "bpf verifier is misconfigured\n");
4679 return -EINVAL;
4680 }
4681
4682 if (env->subprog_cnt > 1) {
4683 /* when program has LD_ABS insn JITs and interpreter assume
4684 * that r1 == ctx == skb which is not the case for callees
4685 * that can have arbitrary arguments. It's problematic
4686 * for main prog as well since JITs would need to analyze
4687 * all functions in order to make proper register save/restore
4688 * decisions in the main prog. Hence disallow LD_ABS with calls
4689 */
4690 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
4691 return -EINVAL;
4692 }
4693
4694 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
4695 BPF_SIZE(insn->code) == BPF_DW ||
4696 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
4697 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
4698 return -EINVAL;
4699 }
4700
4701 /* check whether implicit source operand (register R6) is readable */
4702 err = check_reg_arg(env, BPF_REG_6, SRC_OP);
4703 if (err)
4704 return err;
4705
4706 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
4707 * gen_ld_abs() may terminate the program at runtime, leading to
4708 * reference leak.
4709 */
4710 err = check_reference_leak(env);
4711 if (err) {
4712 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
4713 return err;
4714 }
4715
4716 if (regs[BPF_REG_6].type != PTR_TO_CTX) {
4717 verbose(env,
4718 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
4719 return -EINVAL;
4720 }
4721
4722 if (mode == BPF_IND) {
4723 /* check explicit source operand */
4724 err = check_reg_arg(env, insn->src_reg, SRC_OP);
4725 if (err)
4726 return err;
4727 }
4728
4729 /* reset caller saved regs to unreadable */
4730 for (i = 0; i < CALLER_SAVED_REGS; i++) {
4731 mark_reg_not_init(env, regs, caller_saved[i]);
4732 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4733 }
4734
4735 /* mark destination R0 register as readable, since it contains
4736 * the value fetched from the packet.
4737 * Already marked as written above.
4738 */
4739 mark_reg_unknown(env, regs, BPF_REG_0);
4740 return 0;
4741 }
4742
4743 static int check_return_code(struct bpf_verifier_env *env)
4744 {
4745 struct bpf_reg_state *reg;
4746 struct tnum range = tnum_range(0, 1);
4747
4748 switch (env->prog->type) {
4749 case BPF_PROG_TYPE_CGROUP_SKB:
4750 case BPF_PROG_TYPE_CGROUP_SOCK:
4751 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4752 case BPF_PROG_TYPE_SOCK_OPS:
4753 case BPF_PROG_TYPE_CGROUP_DEVICE:
4754 break;
4755 default:
4756 return 0;
4757 }
4758
4759 reg = cur_regs(env) + BPF_REG_0;
4760 if (reg->type != SCALAR_VALUE) {
4761 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
4762 reg_type_str[reg->type]);
4763 return -EINVAL;
4764 }
4765
4766 if (!tnum_in(range, reg->var_off)) {
4767 verbose(env, "At program exit the register R0 ");
4768 if (!tnum_is_unknown(reg->var_off)) {
4769 char tn_buf[48];
4770
4771 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4772 verbose(env, "has value %s", tn_buf);
4773 } else {
4774 verbose(env, "has unknown scalar value");
4775 }
4776 verbose(env, " should have been 0 or 1\n");
4777 return -EINVAL;
4778 }
4779 return 0;
4780 }
4781
4782 /* non-recursive DFS pseudo code
4783 * 1 procedure DFS-iterative(G,v):
4784 * 2 label v as discovered
4785 * 3 let S be a stack
4786 * 4 S.push(v)
4787 * 5 while S is not empty
4788 * 6 t <- S.pop()
4789 * 7 if t is what we're looking for:
4790 * 8 return t
4791 * 9 for all edges e in G.adjacentEdges(t) do
4792 * 10 if edge e is already labelled
4793 * 11 continue with the next edge
4794 * 12 w <- G.adjacentVertex(t,e)
4795 * 13 if vertex w is not discovered and not explored
4796 * 14 label e as tree-edge
4797 * 15 label w as discovered
4798 * 16 S.push(w)
4799 * 17 continue at 5
4800 * 18 else if vertex w is discovered
4801 * 19 label e as back-edge
4802 * 20 else
4803 * 21 // vertex w is explored
4804 * 22 label e as forward- or cross-edge
4805 * 23 label t as explored
4806 * 24 S.pop()
4807 *
4808 * convention:
4809 * 0x10 - discovered
4810 * 0x11 - discovered and fall-through edge labelled
4811 * 0x12 - discovered and fall-through and branch edges labelled
4812 * 0x20 - explored
4813 */
4814
4815 enum {
4816 DISCOVERED = 0x10,
4817 EXPLORED = 0x20,
4818 FALLTHROUGH = 1,
4819 BRANCH = 2,
4820 };
4821
4822 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
4823
4824 static int *insn_stack; /* stack of insns to process */
4825 static int cur_stack; /* current stack index */
4826 static int *insn_state;
4827
4828 /* t, w, e - match pseudo-code above:
4829 * t - index of current instruction
4830 * w - next instruction
4831 * e - edge
4832 */
4833 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
4834 {
4835 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
4836 return 0;
4837
4838 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
4839 return 0;
4840
4841 if (w < 0 || w >= env->prog->len) {
4842 verbose_linfo(env, t, "%d: ", t);
4843 verbose(env, "jump out of range from insn %d to %d\n", t, w);
4844 return -EINVAL;
4845 }
4846
4847 if (e == BRANCH)
4848 /* mark branch target for state pruning */
4849 env->explored_states[w] = STATE_LIST_MARK;
4850
4851 if (insn_state[w] == 0) {
4852 /* tree-edge */
4853 insn_state[t] = DISCOVERED | e;
4854 insn_state[w] = DISCOVERED;
4855 if (cur_stack >= env->prog->len)
4856 return -E2BIG;
4857 insn_stack[cur_stack++] = w;
4858 return 1;
4859 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
4860 verbose_linfo(env, t, "%d: ", t);
4861 verbose_linfo(env, w, "%d: ", w);
4862 verbose(env, "back-edge from insn %d to %d\n", t, w);
4863 return -EINVAL;
4864 } else if (insn_state[w] == EXPLORED) {
4865 /* forward- or cross-edge */
4866 insn_state[t] = DISCOVERED | e;
4867 } else {
4868 verbose(env, "insn state internal bug\n");
4869 return -EFAULT;
4870 }
4871 return 0;
4872 }
4873
4874 /* non-recursive depth-first-search to detect loops in BPF program
4875 * loop == back-edge in directed graph
4876 */
4877 static int check_cfg(struct bpf_verifier_env *env)
4878 {
4879 struct bpf_insn *insns = env->prog->insnsi;
4880 int insn_cnt = env->prog->len;
4881 int ret = 0;
4882 int i, t;
4883
4884 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4885 if (!insn_state)
4886 return -ENOMEM;
4887
4888 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4889 if (!insn_stack) {
4890 kfree(insn_state);
4891 return -ENOMEM;
4892 }
4893
4894 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
4895 insn_stack[0] = 0; /* 0 is the first instruction */
4896 cur_stack = 1;
4897
4898 peek_stack:
4899 if (cur_stack == 0)
4900 goto check_state;
4901 t = insn_stack[cur_stack - 1];
4902
4903 if (BPF_CLASS(insns[t].code) == BPF_JMP) {
4904 u8 opcode = BPF_OP(insns[t].code);
4905
4906 if (opcode == BPF_EXIT) {
4907 goto mark_explored;
4908 } else if (opcode == BPF_CALL) {
4909 ret = push_insn(t, t + 1, FALLTHROUGH, env);
4910 if (ret == 1)
4911 goto peek_stack;
4912 else if (ret < 0)
4913 goto err_free;
4914 if (t + 1 < insn_cnt)
4915 env->explored_states[t + 1] = STATE_LIST_MARK;
4916 if (insns[t].src_reg == BPF_PSEUDO_CALL) {
4917 env->explored_states[t] = STATE_LIST_MARK;
4918 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
4919 if (ret == 1)
4920 goto peek_stack;
4921 else if (ret < 0)
4922 goto err_free;
4923 }
4924 } else if (opcode == BPF_JA) {
4925 if (BPF_SRC(insns[t].code) != BPF_K) {
4926 ret = -EINVAL;
4927 goto err_free;
4928 }
4929 /* unconditional jump with single edge */
4930 ret = push_insn(t, t + insns[t].off + 1,
4931 FALLTHROUGH, env);
4932 if (ret == 1)
4933 goto peek_stack;
4934 else if (ret < 0)
4935 goto err_free;
4936 /* tell verifier to check for equivalent states
4937 * after every call and jump
4938 */
4939 if (t + 1 < insn_cnt)
4940 env->explored_states[t + 1] = STATE_LIST_MARK;
4941 } else {
4942 /* conditional jump with two edges */
4943 env->explored_states[t] = STATE_LIST_MARK;
4944 ret = push_insn(t, t + 1, FALLTHROUGH, env);
4945 if (ret == 1)
4946 goto peek_stack;
4947 else if (ret < 0)
4948 goto err_free;
4949
4950 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
4951 if (ret == 1)
4952 goto peek_stack;
4953 else if (ret < 0)
4954 goto err_free;
4955 }
4956 } else {
4957 /* all other non-branch instructions with single
4958 * fall-through edge
4959 */
4960 ret = push_insn(t, t + 1, FALLTHROUGH, env);
4961 if (ret == 1)
4962 goto peek_stack;
4963 else if (ret < 0)
4964 goto err_free;
4965 }
4966
4967 mark_explored:
4968 insn_state[t] = EXPLORED;
4969 if (cur_stack-- <= 0) {
4970 verbose(env, "pop stack internal bug\n");
4971 ret = -EFAULT;
4972 goto err_free;
4973 }
4974 goto peek_stack;
4975
4976 check_state:
4977 for (i = 0; i < insn_cnt; i++) {
4978 if (insn_state[i] != EXPLORED) {
4979 verbose(env, "unreachable insn %d\n", i);
4980 ret = -EINVAL;
4981 goto err_free;
4982 }
4983 }
4984 ret = 0; /* cfg looks good */
4985
4986 err_free:
4987 kfree(insn_state);
4988 kfree(insn_stack);
4989 return ret;
4990 }
4991
4992 /* The minimum supported BTF func info size */
4993 #define MIN_BPF_FUNCINFO_SIZE 8
4994 #define MAX_FUNCINFO_REC_SIZE 252
4995
4996 static int check_btf_func(struct bpf_verifier_env *env,
4997 const union bpf_attr *attr,
4998 union bpf_attr __user *uattr)
4999 {
5000 u32 i, nfuncs, urec_size, min_size, prev_offset;
5001 u32 krec_size = sizeof(struct bpf_func_info);
5002 struct bpf_func_info *krecord;
5003 const struct btf_type *type;
5004 struct bpf_prog *prog;
5005 const struct btf *btf;
5006 void __user *urecord;
5007 int ret = 0;
5008
5009 nfuncs = attr->func_info_cnt;
5010 if (!nfuncs)
5011 return 0;
5012
5013 if (nfuncs != env->subprog_cnt) {
5014 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
5015 return -EINVAL;
5016 }
5017
5018 urec_size = attr->func_info_rec_size;
5019 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
5020 urec_size > MAX_FUNCINFO_REC_SIZE ||
5021 urec_size % sizeof(u32)) {
5022 verbose(env, "invalid func info rec size %u\n", urec_size);
5023 return -EINVAL;
5024 }
5025
5026 prog = env->prog;
5027 btf = prog->aux->btf;
5028
5029 urecord = u64_to_user_ptr(attr->func_info);
5030 min_size = min_t(u32, krec_size, urec_size);
5031
5032 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
5033 if (!krecord)
5034 return -ENOMEM;
5035
5036 for (i = 0; i < nfuncs; i++) {
5037 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
5038 if (ret) {
5039 if (ret == -E2BIG) {
5040 verbose(env, "nonzero tailing record in func info");
5041 /* set the size kernel expects so loader can zero
5042 * out the rest of the record.
5043 */
5044 if (put_user(min_size, &uattr->func_info_rec_size))
5045 ret = -EFAULT;
5046 }
5047 goto err_free;
5048 }
5049
5050 if (copy_from_user(&krecord[i], urecord, min_size)) {
5051 ret = -EFAULT;
5052 goto err_free;
5053 }
5054
5055 /* check insn_off */
5056 if (i == 0) {
5057 if (krecord[i].insn_off) {
5058 verbose(env,
5059 "nonzero insn_off %u for the first func info record",
5060 krecord[i].insn_off);
5061 ret = -EINVAL;
5062 goto err_free;
5063 }
5064 } else if (krecord[i].insn_off <= prev_offset) {
5065 verbose(env,
5066 "same or smaller insn offset (%u) than previous func info record (%u)",
5067 krecord[i].insn_off, prev_offset);
5068 ret = -EINVAL;
5069 goto err_free;
5070 }
5071
5072 if (env->subprog_info[i].start != krecord[i].insn_off) {
5073 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
5074 ret = -EINVAL;
5075 goto err_free;
5076 }
5077
5078 /* check type_id */
5079 type = btf_type_by_id(btf, krecord[i].type_id);
5080 if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
5081 verbose(env, "invalid type id %d in func info",
5082 krecord[i].type_id);
5083 ret = -EINVAL;
5084 goto err_free;
5085 }
5086
5087 prev_offset = krecord[i].insn_off;
5088 urecord += urec_size;
5089 }
5090
5091 prog->aux->func_info = krecord;
5092 prog->aux->func_info_cnt = nfuncs;
5093 return 0;
5094
5095 err_free:
5096 kvfree(krecord);
5097 return ret;
5098 }
5099
5100 static void adjust_btf_func(struct bpf_verifier_env *env)
5101 {
5102 int i;
5103
5104 if (!env->prog->aux->func_info)
5105 return;
5106
5107 for (i = 0; i < env->subprog_cnt; i++)
5108 env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start;
5109 }
5110
5111 #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
5112 sizeof(((struct bpf_line_info *)(0))->line_col))
5113 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
5114
5115 static int check_btf_line(struct bpf_verifier_env *env,
5116 const union bpf_attr *attr,
5117 union bpf_attr __user *uattr)
5118 {
5119 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
5120 struct bpf_subprog_info *sub;
5121 struct bpf_line_info *linfo;
5122 struct bpf_prog *prog;
5123 const struct btf *btf;
5124 void __user *ulinfo;
5125 int err;
5126
5127 nr_linfo = attr->line_info_cnt;
5128 if (!nr_linfo)
5129 return 0;
5130
5131 rec_size = attr->line_info_rec_size;
5132 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
5133 rec_size > MAX_LINEINFO_REC_SIZE ||
5134 rec_size & (sizeof(u32) - 1))
5135 return -EINVAL;
5136
5137 /* Need to zero it in case the userspace may
5138 * pass in a smaller bpf_line_info object.
5139 */
5140 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
5141 GFP_KERNEL | __GFP_NOWARN);
5142 if (!linfo)
5143 return -ENOMEM;
5144
5145 prog = env->prog;
5146 btf = prog->aux->btf;
5147
5148 s = 0;
5149 sub = env->subprog_info;
5150 ulinfo = u64_to_user_ptr(attr->line_info);
5151 expected_size = sizeof(struct bpf_line_info);
5152 ncopy = min_t(u32, expected_size, rec_size);
5153 for (i = 0; i < nr_linfo; i++) {
5154 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
5155 if (err) {
5156 if (err == -E2BIG) {
5157 verbose(env, "nonzero tailing record in line_info");
5158 if (put_user(expected_size,
5159 &uattr->line_info_rec_size))
5160 err = -EFAULT;
5161 }
5162 goto err_free;
5163 }
5164
5165 if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
5166 err = -EFAULT;
5167 goto err_free;
5168 }
5169
5170 /*
5171 * Check insn_off to ensure
5172 * 1) strictly increasing AND
5173 * 2) bounded by prog->len
5174 *
5175 * The linfo[0].insn_off == 0 check logically falls into
5176 * the later "missing bpf_line_info for func..." case
5177 * because the first linfo[0].insn_off must be the
5178 * first sub also and the first sub must have
5179 * subprog_info[0].start == 0.
5180 */
5181 if ((i && linfo[i].insn_off <= prev_offset) ||
5182 linfo[i].insn_off >= prog->len) {
5183 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
5184 i, linfo[i].insn_off, prev_offset,
5185 prog->len);
5186 err = -EINVAL;
5187 goto err_free;
5188 }
5189
5190 if (!prog->insnsi[linfo[i].insn_off].code) {
5191 verbose(env,
5192 "Invalid insn code at line_info[%u].insn_off\n",
5193 i);
5194 err = -EINVAL;
5195 goto err_free;
5196 }
5197
5198 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
5199 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
5200 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
5201 err = -EINVAL;
5202 goto err_free;
5203 }
5204
5205 if (s != env->subprog_cnt) {
5206 if (linfo[i].insn_off == sub[s].start) {
5207 sub[s].linfo_idx = i;
5208 s++;
5209 } else if (sub[s].start < linfo[i].insn_off) {
5210 verbose(env, "missing bpf_line_info for func#%u\n", s);
5211 err = -EINVAL;
5212 goto err_free;
5213 }
5214 }
5215
5216 prev_offset = linfo[i].insn_off;
5217 ulinfo += rec_size;
5218 }
5219
5220 if (s != env->subprog_cnt) {
5221 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
5222 env->subprog_cnt - s, s);
5223 err = -EINVAL;
5224 goto err_free;
5225 }
5226
5227 prog->aux->linfo = linfo;
5228 prog->aux->nr_linfo = nr_linfo;
5229
5230 return 0;
5231
5232 err_free:
5233 kvfree(linfo);
5234 return err;
5235 }
5236
5237 static int check_btf_info(struct bpf_verifier_env *env,
5238 const union bpf_attr *attr,
5239 union bpf_attr __user *uattr)
5240 {
5241 struct btf *btf;
5242 int err;
5243
5244 if (!attr->func_info_cnt && !attr->line_info_cnt)
5245 return 0;
5246
5247 btf = btf_get_by_fd(attr->prog_btf_fd);
5248 if (IS_ERR(btf))
5249 return PTR_ERR(btf);
5250 env->prog->aux->btf = btf;
5251
5252 err = check_btf_func(env, attr, uattr);
5253 if (err)
5254 return err;
5255
5256 err = check_btf_line(env, attr, uattr);
5257 if (err)
5258 return err;
5259
5260 return 0;
5261 }
5262
5263 /* check %cur's range satisfies %old's */
5264 static bool range_within(struct bpf_reg_state *old,
5265 struct bpf_reg_state *cur)
5266 {
5267 return old->umin_value <= cur->umin_value &&
5268 old->umax_value >= cur->umax_value &&
5269 old->smin_value <= cur->smin_value &&
5270 old->smax_value >= cur->smax_value;
5271 }
5272
5273 /* Maximum number of register states that can exist at once */
5274 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
5275 struct idpair {
5276 u32 old;
5277 u32 cur;
5278 };
5279
5280 /* If in the old state two registers had the same id, then they need to have
5281 * the same id in the new state as well. But that id could be different from
5282 * the old state, so we need to track the mapping from old to new ids.
5283 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
5284 * regs with old id 5 must also have new id 9 for the new state to be safe. But
5285 * regs with a different old id could still have new id 9, we don't care about
5286 * that.
5287 * So we look through our idmap to see if this old id has been seen before. If
5288 * so, we require the new id to match; otherwise, we add the id pair to the map.
5289 */
5290 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
5291 {
5292 unsigned int i;
5293
5294 for (i = 0; i < ID_MAP_SIZE; i++) {
5295 if (!idmap[i].old) {
5296 /* Reached an empty slot; haven't seen this id before */
5297 idmap[i].old = old_id;
5298 idmap[i].cur = cur_id;
5299 return true;
5300 }
5301 if (idmap[i].old == old_id)
5302 return idmap[i].cur == cur_id;
5303 }
5304 /* We ran out of idmap slots, which should be impossible */
5305 WARN_ON_ONCE(1);
5306 return false;
5307 }
5308
5309 static void clean_func_state(struct bpf_verifier_env *env,
5310 struct bpf_func_state *st)
5311 {
5312 enum bpf_reg_liveness live;
5313 int i, j;
5314
5315 for (i = 0; i < BPF_REG_FP; i++) {
5316 live = st->regs[i].live;
5317 /* liveness must not touch this register anymore */
5318 st->regs[i].live |= REG_LIVE_DONE;
5319 if (!(live & REG_LIVE_READ))
5320 /* since the register is unused, clear its state
5321 * to make further comparison simpler
5322 */
5323 __mark_reg_not_init(&st->regs[i]);
5324 }
5325
5326 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
5327 live = st->stack[i].spilled_ptr.live;
5328 /* liveness must not touch this stack slot anymore */
5329 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
5330 if (!(live & REG_LIVE_READ)) {
5331 __mark_reg_not_init(&st->stack[i].spilled_ptr);
5332 for (j = 0; j < BPF_REG_SIZE; j++)
5333 st->stack[i].slot_type[j] = STACK_INVALID;
5334 }
5335 }
5336 }
5337
5338 static void clean_verifier_state(struct bpf_verifier_env *env,
5339 struct bpf_verifier_state *st)
5340 {
5341 int i;
5342
5343 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
5344 /* all regs in this state in all frames were already marked */
5345 return;
5346
5347 for (i = 0; i <= st->curframe; i++)
5348 clean_func_state(env, st->frame[i]);
5349 }
5350
5351 /* the parentage chains form a tree.
5352 * the verifier states are added to state lists at given insn and
5353 * pushed into state stack for future exploration.
5354 * when the verifier reaches bpf_exit insn some of the verifer states
5355 * stored in the state lists have their final liveness state already,
5356 * but a lot of states will get revised from liveness point of view when
5357 * the verifier explores other branches.
5358 * Example:
5359 * 1: r0 = 1
5360 * 2: if r1 == 100 goto pc+1
5361 * 3: r0 = 2
5362 * 4: exit
5363 * when the verifier reaches exit insn the register r0 in the state list of
5364 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
5365 * of insn 2 and goes exploring further. At the insn 4 it will walk the
5366 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
5367 *
5368 * Since the verifier pushes the branch states as it sees them while exploring
5369 * the program the condition of walking the branch instruction for the second
5370 * time means that all states below this branch were already explored and
5371 * their final liveness markes are already propagated.
5372 * Hence when the verifier completes the search of state list in is_state_visited()
5373 * we can call this clean_live_states() function to mark all liveness states
5374 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
5375 * will not be used.
5376 * This function also clears the registers and stack for states that !READ
5377 * to simplify state merging.
5378 *
5379 * Important note here that walking the same branch instruction in the callee
5380 * doesn't meant that the states are DONE. The verifier has to compare
5381 * the callsites
5382 */
5383 static void clean_live_states(struct bpf_verifier_env *env, int insn,
5384 struct bpf_verifier_state *cur)
5385 {
5386 struct bpf_verifier_state_list *sl;
5387 int i;
5388
5389 sl = env->explored_states[insn];
5390 if (!sl)
5391 return;
5392
5393 while (sl != STATE_LIST_MARK) {
5394 if (sl->state.curframe != cur->curframe)
5395 goto next;
5396 for (i = 0; i <= cur->curframe; i++)
5397 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
5398 goto next;
5399 clean_verifier_state(env, &sl->state);
5400 next:
5401 sl = sl->next;
5402 }
5403 }
5404
5405 /* Returns true if (rold safe implies rcur safe) */
5406 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
5407 struct idpair *idmap)
5408 {
5409 bool equal;
5410
5411 if (!(rold->live & REG_LIVE_READ))
5412 /* explored state didn't use this */
5413 return true;
5414
5415 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
5416
5417 if (rold->type == PTR_TO_STACK)
5418 /* two stack pointers are equal only if they're pointing to
5419 * the same stack frame, since fp-8 in foo != fp-8 in bar
5420 */
5421 return equal && rold->frameno == rcur->frameno;
5422
5423 if (equal)
5424 return true;
5425
5426 if (rold->type == NOT_INIT)
5427 /* explored state can't have used this */
5428 return true;
5429 if (rcur->type == NOT_INIT)
5430 return false;
5431 switch (rold->type) {
5432 case SCALAR_VALUE:
5433 if (rcur->type == SCALAR_VALUE) {
5434 /* new val must satisfy old val knowledge */
5435 return range_within(rold, rcur) &&
5436 tnum_in(rold->var_off, rcur->var_off);
5437 } else {
5438 /* We're trying to use a pointer in place of a scalar.
5439 * Even if the scalar was unbounded, this could lead to
5440 * pointer leaks because scalars are allowed to leak
5441 * while pointers are not. We could make this safe in
5442 * special cases if root is calling us, but it's
5443 * probably not worth the hassle.
5444 */
5445 return false;
5446 }
5447 case PTR_TO_MAP_VALUE:
5448 /* If the new min/max/var_off satisfy the old ones and
5449 * everything else matches, we are OK.
5450 * We don't care about the 'id' value, because nothing
5451 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
5452 */
5453 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
5454 range_within(rold, rcur) &&
5455 tnum_in(rold->var_off, rcur->var_off);
5456 case PTR_TO_MAP_VALUE_OR_NULL:
5457 /* a PTR_TO_MAP_VALUE could be safe to use as a
5458 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
5459 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
5460 * checked, doing so could have affected others with the same
5461 * id, and we can't check for that because we lost the id when
5462 * we converted to a PTR_TO_MAP_VALUE.
5463 */
5464 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
5465 return false;
5466 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
5467 return false;
5468 /* Check our ids match any regs they're supposed to */
5469 return check_ids(rold->id, rcur->id, idmap);
5470 case PTR_TO_PACKET_META:
5471 case PTR_TO_PACKET:
5472 if (rcur->type != rold->type)
5473 return false;
5474 /* We must have at least as much range as the old ptr
5475 * did, so that any accesses which were safe before are
5476 * still safe. This is true even if old range < old off,
5477 * since someone could have accessed through (ptr - k), or
5478 * even done ptr -= k in a register, to get a safe access.
5479 */
5480 if (rold->range > rcur->range)
5481 return false;
5482 /* If the offsets don't match, we can't trust our alignment;
5483 * nor can we be sure that we won't fall out of range.
5484 */
5485 if (rold->off != rcur->off)
5486 return false;
5487 /* id relations must be preserved */
5488 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
5489 return false;
5490 /* new val must satisfy old val knowledge */
5491 return range_within(rold, rcur) &&
5492 tnum_in(rold->var_off, rcur->var_off);
5493 case PTR_TO_CTX:
5494 case CONST_PTR_TO_MAP:
5495 case PTR_TO_PACKET_END:
5496 case PTR_TO_FLOW_KEYS:
5497 case PTR_TO_SOCKET:
5498 case PTR_TO_SOCKET_OR_NULL:
5499 /* Only valid matches are exact, which memcmp() above
5500 * would have accepted
5501 */
5502 default:
5503 /* Don't know what's going on, just say it's not safe */
5504 return false;
5505 }
5506
5507 /* Shouldn't get here; if we do, say it's not safe */
5508 WARN_ON_ONCE(1);
5509 return false;
5510 }
5511
5512 static bool stacksafe(struct bpf_func_state *old,
5513 struct bpf_func_state *cur,
5514 struct idpair *idmap)
5515 {
5516 int i, spi;
5517
5518 /* walk slots of the explored stack and ignore any additional
5519 * slots in the current stack, since explored(safe) state
5520 * didn't use them
5521 */
5522 for (i = 0; i < old->allocated_stack; i++) {
5523 spi = i / BPF_REG_SIZE;
5524
5525 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
5526 i += BPF_REG_SIZE - 1;
5527 /* explored state didn't use this */
5528 continue;
5529 }
5530
5531 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
5532 continue;
5533
5534 /* explored stack has more populated slots than current stack
5535 * and these slots were used
5536 */
5537 if (i >= cur->allocated_stack)
5538 return false;
5539
5540 /* if old state was safe with misc data in the stack
5541 * it will be safe with zero-initialized stack.
5542 * The opposite is not true
5543 */
5544 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
5545 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
5546 continue;
5547 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
5548 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
5549 /* Ex: old explored (safe) state has STACK_SPILL in
5550 * this stack slot, but current has has STACK_MISC ->
5551 * this verifier states are not equivalent,
5552 * return false to continue verification of this path
5553 */
5554 return false;
5555 if (i % BPF_REG_SIZE)
5556 continue;
5557 if (old->stack[spi].slot_type[0] != STACK_SPILL)
5558 continue;
5559 if (!regsafe(&old->stack[spi].spilled_ptr,
5560 &cur->stack[spi].spilled_ptr,
5561 idmap))
5562 /* when explored and current stack slot are both storing
5563 * spilled registers, check that stored pointers types
5564 * are the same as well.
5565 * Ex: explored safe path could have stored
5566 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
5567 * but current path has stored:
5568 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
5569 * such verifier states are not equivalent.
5570 * return false to continue verification of this path
5571 */
5572 return false;
5573 }
5574 return true;
5575 }
5576
5577 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
5578 {
5579 if (old->acquired_refs != cur->acquired_refs)
5580 return false;
5581 return !memcmp(old->refs, cur->refs,
5582 sizeof(*old->refs) * old->acquired_refs);
5583 }
5584
5585 /* compare two verifier states
5586 *
5587 * all states stored in state_list are known to be valid, since
5588 * verifier reached 'bpf_exit' instruction through them
5589 *
5590 * this function is called when verifier exploring different branches of
5591 * execution popped from the state stack. If it sees an old state that has
5592 * more strict register state and more strict stack state then this execution
5593 * branch doesn't need to be explored further, since verifier already
5594 * concluded that more strict state leads to valid finish.
5595 *
5596 * Therefore two states are equivalent if register state is more conservative
5597 * and explored stack state is more conservative than the current one.
5598 * Example:
5599 * explored current
5600 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
5601 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
5602 *
5603 * In other words if current stack state (one being explored) has more
5604 * valid slots than old one that already passed validation, it means
5605 * the verifier can stop exploring and conclude that current state is valid too
5606 *
5607 * Similarly with registers. If explored state has register type as invalid
5608 * whereas register type in current state is meaningful, it means that
5609 * the current state will reach 'bpf_exit' instruction safely
5610 */
5611 static bool func_states_equal(struct bpf_func_state *old,
5612 struct bpf_func_state *cur)
5613 {
5614 struct idpair *idmap;
5615 bool ret = false;
5616 int i;
5617
5618 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
5619 /* If we failed to allocate the idmap, just say it's not safe */
5620 if (!idmap)
5621 return false;
5622
5623 for (i = 0; i < MAX_BPF_REG; i++) {
5624 if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
5625 goto out_free;
5626 }
5627
5628 if (!stacksafe(old, cur, idmap))
5629 goto out_free;
5630
5631 if (!refsafe(old, cur))
5632 goto out_free;
5633 ret = true;
5634 out_free:
5635 kfree(idmap);
5636 return ret;
5637 }
5638
5639 static bool states_equal(struct bpf_verifier_env *env,
5640 struct bpf_verifier_state *old,
5641 struct bpf_verifier_state *cur)
5642 {
5643 int i;
5644
5645 if (old->curframe != cur->curframe)
5646 return false;
5647
5648 /* Verification state from speculative execution simulation
5649 * must never prune a non-speculative execution one.
5650 */
5651 if (old->speculative && !cur->speculative)
5652 return false;
5653
5654 /* for states to be equal callsites have to be the same
5655 * and all frame states need to be equivalent
5656 */
5657 for (i = 0; i <= old->curframe; i++) {
5658 if (old->frame[i]->callsite != cur->frame[i]->callsite)
5659 return false;
5660 if (!func_states_equal(old->frame[i], cur->frame[i]))
5661 return false;
5662 }
5663 return true;
5664 }
5665
5666 /* A write screens off any subsequent reads; but write marks come from the
5667 * straight-line code between a state and its parent. When we arrive at an
5668 * equivalent state (jump target or such) we didn't arrive by the straight-line
5669 * code, so read marks in the state must propagate to the parent regardless
5670 * of the state's write marks. That's what 'parent == state->parent' comparison
5671 * in mark_reg_read() is for.
5672 */
5673 static int propagate_liveness(struct bpf_verifier_env *env,
5674 const struct bpf_verifier_state *vstate,
5675 struct bpf_verifier_state *vparent)
5676 {
5677 int i, frame, err = 0;
5678 struct bpf_func_state *state, *parent;
5679
5680 if (vparent->curframe != vstate->curframe) {
5681 WARN(1, "propagate_live: parent frame %d current frame %d\n",
5682 vparent->curframe, vstate->curframe);
5683 return -EFAULT;
5684 }
5685 /* Propagate read liveness of registers... */
5686 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
5687 /* We don't need to worry about FP liveness because it's read-only */
5688 for (i = 0; i < BPF_REG_FP; i++) {
5689 if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
5690 continue;
5691 if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
5692 err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
5693 &vparent->frame[vstate->curframe]->regs[i]);
5694 if (err)
5695 return err;
5696 }
5697 }
5698
5699 /* ... and stack slots */
5700 for (frame = 0; frame <= vstate->curframe; frame++) {
5701 state = vstate->frame[frame];
5702 parent = vparent->frame[frame];
5703 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
5704 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
5705 if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
5706 continue;
5707 if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
5708 mark_reg_read(env, &state->stack[i].spilled_ptr,
5709 &parent->stack[i].spilled_ptr);
5710 }
5711 }
5712 return err;
5713 }
5714
5715 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
5716 {
5717 struct bpf_verifier_state_list *new_sl;
5718 struct bpf_verifier_state_list *sl;
5719 struct bpf_verifier_state *cur = env->cur_state, *new;
5720 int i, j, err, states_cnt = 0;
5721
5722 sl = env->explored_states[insn_idx];
5723 if (!sl)
5724 /* this 'insn_idx' instruction wasn't marked, so we will not
5725 * be doing state search here
5726 */
5727 return 0;
5728
5729 clean_live_states(env, insn_idx, cur);
5730
5731 while (sl != STATE_LIST_MARK) {
5732 if (states_equal(env, &sl->state, cur)) {
5733 /* reached equivalent register/stack state,
5734 * prune the search.
5735 * Registers read by the continuation are read by us.
5736 * If we have any write marks in env->cur_state, they
5737 * will prevent corresponding reads in the continuation
5738 * from reaching our parent (an explored_state). Our
5739 * own state will get the read marks recorded, but
5740 * they'll be immediately forgotten as we're pruning
5741 * this state and will pop a new one.
5742 */
5743 err = propagate_liveness(env, &sl->state, cur);
5744 if (err)
5745 return err;
5746 return 1;
5747 }
5748 sl = sl->next;
5749 states_cnt++;
5750 }
5751
5752 if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
5753 return 0;
5754
5755 /* there were no equivalent states, remember current one.
5756 * technically the current state is not proven to be safe yet,
5757 * but it will either reach outer most bpf_exit (which means it's safe)
5758 * or it will be rejected. Since there are no loops, we won't be
5759 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
5760 * again on the way to bpf_exit
5761 */
5762 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
5763 if (!new_sl)
5764 return -ENOMEM;
5765
5766 /* add new state to the head of linked list */
5767 new = &new_sl->state;
5768 err = copy_verifier_state(new, cur);
5769 if (err) {
5770 free_verifier_state(new, false);
5771 kfree(new_sl);
5772 return err;
5773 }
5774 new_sl->next = env->explored_states[insn_idx];
5775 env->explored_states[insn_idx] = new_sl;
5776 /* connect new state to parentage chain. Current frame needs all
5777 * registers connected. Only r6 - r9 of the callers are alive (pushed
5778 * to the stack implicitly by JITs) so in callers' frames connect just
5779 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
5780 * the state of the call instruction (with WRITTEN set), and r0 comes
5781 * from callee with its full parentage chain, anyway.
5782 */
5783 for (j = 0; j <= cur->curframe; j++)
5784 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
5785 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
5786 /* clear write marks in current state: the writes we did are not writes
5787 * our child did, so they don't screen off its reads from us.
5788 * (There are no read marks in current state, because reads always mark
5789 * their parent and current state never has children yet. Only
5790 * explored_states can get read marks.)
5791 */
5792 for (i = 0; i < BPF_REG_FP; i++)
5793 cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
5794
5795 /* all stack frames are accessible from callee, clear them all */
5796 for (j = 0; j <= cur->curframe; j++) {
5797 struct bpf_func_state *frame = cur->frame[j];
5798 struct bpf_func_state *newframe = new->frame[j];
5799
5800 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
5801 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
5802 frame->stack[i].spilled_ptr.parent =
5803 &newframe->stack[i].spilled_ptr;
5804 }
5805 }
5806 return 0;
5807 }
5808
5809 /* Return true if it's OK to have the same insn return a different type. */
5810 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
5811 {
5812 switch (type) {
5813 case PTR_TO_CTX:
5814 case PTR_TO_SOCKET:
5815 case PTR_TO_SOCKET_OR_NULL:
5816 return false;
5817 default:
5818 return true;
5819 }
5820 }
5821
5822 /* If an instruction was previously used with particular pointer types, then we
5823 * need to be careful to avoid cases such as the below, where it may be ok
5824 * for one branch accessing the pointer, but not ok for the other branch:
5825 *
5826 * R1 = sock_ptr
5827 * goto X;
5828 * ...
5829 * R1 = some_other_valid_ptr;
5830 * goto X;
5831 * ...
5832 * R2 = *(u32 *)(R1 + 0);
5833 */
5834 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
5835 {
5836 return src != prev && (!reg_type_mismatch_ok(src) ||
5837 !reg_type_mismatch_ok(prev));
5838 }
5839
5840 static int do_check(struct bpf_verifier_env *env)
5841 {
5842 struct bpf_verifier_state *state;
5843 struct bpf_insn *insns = env->prog->insnsi;
5844 struct bpf_reg_state *regs;
5845 int insn_cnt = env->prog->len, i;
5846 int insn_processed = 0;
5847 bool do_print_state = false;
5848
5849 env->prev_linfo = NULL;
5850
5851 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
5852 if (!state)
5853 return -ENOMEM;
5854 state->curframe = 0;
5855 state->speculative = false;
5856 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
5857 if (!state->frame[0]) {
5858 kfree(state);
5859 return -ENOMEM;
5860 }
5861 env->cur_state = state;
5862 init_func_state(env, state->frame[0],
5863 BPF_MAIN_FUNC /* callsite */,
5864 0 /* frameno */,
5865 0 /* subprogno, zero == main subprog */);
5866
5867 for (;;) {
5868 struct bpf_insn *insn;
5869 u8 class;
5870 int err;
5871
5872 if (env->insn_idx >= insn_cnt) {
5873 verbose(env, "invalid insn idx %d insn_cnt %d\n",
5874 env->insn_idx, insn_cnt);
5875 return -EFAULT;
5876 }
5877
5878 insn = &insns[env->insn_idx];
5879 class = BPF_CLASS(insn->code);
5880
5881 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
5882 verbose(env,
5883 "BPF program is too large. Processed %d insn\n",
5884 insn_processed);
5885 return -E2BIG;
5886 }
5887
5888 err = is_state_visited(env, env->insn_idx);
5889 if (err < 0)
5890 return err;
5891 if (err == 1) {
5892 /* found equivalent state, can prune the search */
5893 if (env->log.level) {
5894 if (do_print_state)
5895 verbose(env, "\nfrom %d to %d%s: safe\n",
5896 env->prev_insn_idx, env->insn_idx,
5897 env->cur_state->speculative ?
5898 " (speculative execution)" : "");
5899 else
5900 verbose(env, "%d: safe\n", env->insn_idx);
5901 }
5902 goto process_bpf_exit;
5903 }
5904
5905 if (signal_pending(current))
5906 return -EAGAIN;
5907
5908 if (need_resched())
5909 cond_resched();
5910
5911 if (env->log.level > 1 || (env->log.level && do_print_state)) {
5912 if (env->log.level > 1)
5913 verbose(env, "%d:", env->insn_idx);
5914 else
5915 verbose(env, "\nfrom %d to %d%s:",
5916 env->prev_insn_idx, env->insn_idx,
5917 env->cur_state->speculative ?
5918 " (speculative execution)" : "");
5919 print_verifier_state(env, state->frame[state->curframe]);
5920 do_print_state = false;
5921 }
5922
5923 if (env->log.level) {
5924 const struct bpf_insn_cbs cbs = {
5925 .cb_print = verbose,
5926 .private_data = env,
5927 };
5928
5929 verbose_linfo(env, env->insn_idx, "; ");
5930 verbose(env, "%d: ", env->insn_idx);
5931 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
5932 }
5933
5934 if (bpf_prog_is_dev_bound(env->prog->aux)) {
5935 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
5936 env->prev_insn_idx);
5937 if (err)
5938 return err;
5939 }
5940
5941 regs = cur_regs(env);
5942 env->insn_aux_data[env->insn_idx].seen = true;
5943
5944 if (class == BPF_ALU || class == BPF_ALU64) {
5945 err = check_alu_op(env, insn);
5946 if (err)
5947 return err;
5948
5949 } else if (class == BPF_LDX) {
5950 enum bpf_reg_type *prev_src_type, src_reg_type;
5951
5952 /* check for reserved fields is already done */
5953
5954 /* check src operand */
5955 err = check_reg_arg(env, insn->src_reg, SRC_OP);
5956 if (err)
5957 return err;
5958
5959 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
5960 if (err)
5961 return err;
5962
5963 src_reg_type = regs[insn->src_reg].type;
5964
5965 /* check that memory (src_reg + off) is readable,
5966 * the state of dst_reg will be updated by this func
5967 */
5968 err = check_mem_access(env, env->insn_idx, insn->src_reg,
5969 insn->off, BPF_SIZE(insn->code),
5970 BPF_READ, insn->dst_reg, false);
5971 if (err)
5972 return err;
5973
5974 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
5975
5976 if (*prev_src_type == NOT_INIT) {
5977 /* saw a valid insn
5978 * dst_reg = *(u32 *)(src_reg + off)
5979 * save type to validate intersecting paths
5980 */
5981 *prev_src_type = src_reg_type;
5982
5983 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
5984 /* ABuser program is trying to use the same insn
5985 * dst_reg = *(u32*) (src_reg + off)
5986 * with different pointer types:
5987 * src_reg == ctx in one branch and
5988 * src_reg == stack|map in some other branch.
5989 * Reject it.
5990 */
5991 verbose(env, "same insn cannot be used with different pointers\n");
5992 return -EINVAL;
5993 }
5994
5995 } else if (class == BPF_STX) {
5996 enum bpf_reg_type *prev_dst_type, dst_reg_type;
5997
5998 if (BPF_MODE(insn->code) == BPF_XADD) {
5999 err = check_xadd(env, env->insn_idx, insn);
6000 if (err)
6001 return err;
6002 env->insn_idx++;
6003 continue;
6004 }
6005
6006 /* check src1 operand */
6007 err = check_reg_arg(env, insn->src_reg, SRC_OP);
6008 if (err)
6009 return err;
6010 /* check src2 operand */
6011 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
6012 if (err)
6013 return err;
6014
6015 dst_reg_type = regs[insn->dst_reg].type;
6016
6017 /* check that memory (dst_reg + off) is writeable */
6018 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
6019 insn->off, BPF_SIZE(insn->code),
6020 BPF_WRITE, insn->src_reg, false);
6021 if (err)
6022 return err;
6023
6024 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
6025
6026 if (*prev_dst_type == NOT_INIT) {
6027 *prev_dst_type = dst_reg_type;
6028 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
6029 verbose(env, "same insn cannot be used with different pointers\n");
6030 return -EINVAL;
6031 }
6032
6033 } else if (class == BPF_ST) {
6034 if (BPF_MODE(insn->code) != BPF_MEM ||
6035 insn->src_reg != BPF_REG_0) {
6036 verbose(env, "BPF_ST uses reserved fields\n");
6037 return -EINVAL;
6038 }
6039 /* check src operand */
6040 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
6041 if (err)
6042 return err;
6043
6044 if (is_ctx_reg(env, insn->dst_reg)) {
6045 verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
6046 insn->dst_reg,
6047 reg_type_str[reg_state(env, insn->dst_reg)->type]);
6048 return -EACCES;
6049 }
6050
6051 /* check that memory (dst_reg + off) is writeable */
6052 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
6053 insn->off, BPF_SIZE(insn->code),
6054 BPF_WRITE, -1, false);
6055 if (err)
6056 return err;
6057
6058 } else if (class == BPF_JMP) {
6059 u8 opcode = BPF_OP(insn->code);
6060
6061 if (opcode == BPF_CALL) {
6062 if (BPF_SRC(insn->code) != BPF_K ||
6063 insn->off != 0 ||
6064 (insn->src_reg != BPF_REG_0 &&
6065 insn->src_reg != BPF_PSEUDO_CALL) ||
6066 insn->dst_reg != BPF_REG_0) {
6067 verbose(env, "BPF_CALL uses reserved fields\n");
6068 return -EINVAL;
6069 }
6070
6071 if (insn->src_reg == BPF_PSEUDO_CALL)
6072 err = check_func_call(env, insn, &env->insn_idx);
6073 else
6074 err = check_helper_call(env, insn->imm, env->insn_idx);
6075 if (err)
6076 return err;
6077
6078 } else if (opcode == BPF_JA) {
6079 if (BPF_SRC(insn->code) != BPF_K ||
6080 insn->imm != 0 ||
6081 insn->src_reg != BPF_REG_0 ||
6082 insn->dst_reg != BPF_REG_0) {
6083 verbose(env, "BPF_JA uses reserved fields\n");
6084 return -EINVAL;
6085 }
6086
6087 env->insn_idx += insn->off + 1;
6088 continue;
6089
6090 } else if (opcode == BPF_EXIT) {
6091 if (BPF_SRC(insn->code) != BPF_K ||
6092 insn->imm != 0 ||
6093 insn->src_reg != BPF_REG_0 ||
6094 insn->dst_reg != BPF_REG_0) {
6095 verbose(env, "BPF_EXIT uses reserved fields\n");
6096 return -EINVAL;
6097 }
6098
6099 if (state->curframe) {
6100 /* exit from nested function */
6101 env->prev_insn_idx = env->insn_idx;
6102 err = prepare_func_exit(env, &env->insn_idx);
6103 if (err)
6104 return err;
6105 do_print_state = true;
6106 continue;
6107 }
6108
6109 err = check_reference_leak(env);
6110 if (err)
6111 return err;
6112
6113 /* eBPF calling convetion is such that R0 is used
6114 * to return the value from eBPF program.
6115 * Make sure that it's readable at this time
6116 * of bpf_exit, which means that program wrote
6117 * something into it earlier
6118 */
6119 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
6120 if (err)
6121 return err;
6122
6123 if (is_pointer_value(env, BPF_REG_0)) {
6124 verbose(env, "R0 leaks addr as return value\n");
6125 return -EACCES;
6126 }
6127
6128 err = check_return_code(env);
6129 if (err)
6130 return err;
6131 process_bpf_exit:
6132 err = pop_stack(env, &env->prev_insn_idx,
6133 &env->insn_idx);
6134 if (err < 0) {
6135 if (err != -ENOENT)
6136 return err;
6137 break;
6138 } else {
6139 do_print_state = true;
6140 continue;
6141 }
6142 } else {
6143 err = check_cond_jmp_op(env, insn, &env->insn_idx);
6144 if (err)
6145 return err;
6146 }
6147 } else if (class == BPF_LD) {
6148 u8 mode = BPF_MODE(insn->code);
6149
6150 if (mode == BPF_ABS || mode == BPF_IND) {
6151 err = check_ld_abs(env, insn);
6152 if (err)
6153 return err;
6154
6155 } else if (mode == BPF_IMM) {
6156 err = check_ld_imm(env, insn);
6157 if (err)
6158 return err;
6159
6160 env->insn_idx++;
6161 env->insn_aux_data[env->insn_idx].seen = true;
6162 } else {
6163 verbose(env, "invalid BPF_LD mode\n");
6164 return -EINVAL;
6165 }
6166 } else {
6167 verbose(env, "unknown insn class %d\n", class);
6168 return -EINVAL;
6169 }
6170
6171 env->insn_idx++;
6172 }
6173
6174 verbose(env, "processed %d insns (limit %d), stack depth ",
6175 insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
6176 for (i = 0; i < env->subprog_cnt; i++) {
6177 u32 depth = env->subprog_info[i].stack_depth;
6178
6179 verbose(env, "%d", depth);
6180 if (i + 1 < env->subprog_cnt)
6181 verbose(env, "+");
6182 }
6183 verbose(env, "\n");
6184 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
6185 return 0;
6186 }
6187
6188 static int check_map_prealloc(struct bpf_map *map)
6189 {
6190 return (map->map_type != BPF_MAP_TYPE_HASH &&
6191 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
6192 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
6193 !(map->map_flags & BPF_F_NO_PREALLOC);
6194 }
6195
6196 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
6197 struct bpf_map *map,
6198 struct bpf_prog *prog)
6199
6200 {
6201 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
6202 * preallocated hash maps, since doing memory allocation
6203 * in overflow_handler can crash depending on where nmi got
6204 * triggered.
6205 */
6206 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
6207 if (!check_map_prealloc(map)) {
6208 verbose(env, "perf_event programs can only use preallocated hash map\n");
6209 return -EINVAL;
6210 }
6211 if (map->inner_map_meta &&
6212 !check_map_prealloc(map->inner_map_meta)) {
6213 verbose(env, "perf_event programs can only use preallocated inner hash map\n");
6214 return -EINVAL;
6215 }
6216 }
6217
6218 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
6219 !bpf_offload_prog_map_match(prog, map)) {
6220 verbose(env, "offload device mismatch between prog and map\n");
6221 return -EINVAL;
6222 }
6223
6224 return 0;
6225 }
6226
6227 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
6228 {
6229 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
6230 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
6231 }
6232
6233 /* look for pseudo eBPF instructions that access map FDs and
6234 * replace them with actual map pointers
6235 */
6236 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
6237 {
6238 struct bpf_insn *insn = env->prog->insnsi;
6239 int insn_cnt = env->prog->len;
6240 int i, j, err;
6241
6242 err = bpf_prog_calc_tag(env->prog);
6243 if (err)
6244 return err;
6245
6246 for (i = 0; i < insn_cnt; i++, insn++) {
6247 if (BPF_CLASS(insn->code) == BPF_LDX &&
6248 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
6249 verbose(env, "BPF_LDX uses reserved fields\n");
6250 return -EINVAL;
6251 }
6252
6253 if (BPF_CLASS(insn->code) == BPF_STX &&
6254 ((BPF_MODE(insn->code) != BPF_MEM &&
6255 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
6256 verbose(env, "BPF_STX uses reserved fields\n");
6257 return -EINVAL;
6258 }
6259
6260 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
6261 struct bpf_map *map;
6262 struct fd f;
6263
6264 if (i == insn_cnt - 1 || insn[1].code != 0 ||
6265 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
6266 insn[1].off != 0) {
6267 verbose(env, "invalid bpf_ld_imm64 insn\n");
6268 return -EINVAL;
6269 }
6270
6271 if (insn->src_reg == 0)
6272 /* valid generic load 64-bit imm */
6273 goto next_insn;
6274
6275 if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
6276 verbose(env,
6277 "unrecognized bpf_ld_imm64 insn\n");
6278 return -EINVAL;
6279 }
6280
6281 f = fdget(insn->imm);
6282 map = __bpf_map_get(f);
6283 if (IS_ERR(map)) {
6284 verbose(env, "fd %d is not pointing to valid bpf_map\n",
6285 insn->imm);
6286 return PTR_ERR(map);
6287 }
6288
6289 err = check_map_prog_compatibility(env, map, env->prog);
6290 if (err) {
6291 fdput(f);
6292 return err;
6293 }
6294
6295 /* store map pointer inside BPF_LD_IMM64 instruction */
6296 insn[0].imm = (u32) (unsigned long) map;
6297 insn[1].imm = ((u64) (unsigned long) map) >> 32;
6298
6299 /* check whether we recorded this map already */
6300 for (j = 0; j < env->used_map_cnt; j++)
6301 if (env->used_maps[j] == map) {
6302 fdput(f);
6303 goto next_insn;
6304 }
6305
6306 if (env->used_map_cnt >= MAX_USED_MAPS) {
6307 fdput(f);
6308 return -E2BIG;
6309 }
6310
6311 /* hold the map. If the program is rejected by verifier,
6312 * the map will be released by release_maps() or it
6313 * will be used by the valid program until it's unloaded
6314 * and all maps are released in free_used_maps()
6315 */
6316 map = bpf_map_inc(map, false);
6317 if (IS_ERR(map)) {
6318 fdput(f);
6319 return PTR_ERR(map);
6320 }
6321 env->used_maps[env->used_map_cnt++] = map;
6322
6323 if (bpf_map_is_cgroup_storage(map) &&
6324 bpf_cgroup_storage_assign(env->prog, map)) {
6325 verbose(env, "only one cgroup storage of each type is allowed\n");
6326 fdput(f);
6327 return -EBUSY;
6328 }
6329
6330 fdput(f);
6331 next_insn:
6332 insn++;
6333 i++;
6334 continue;
6335 }
6336
6337 /* Basic sanity check before we invest more work here. */
6338 if (!bpf_opcode_in_insntable(insn->code)) {
6339 verbose(env, "unknown opcode %02x\n", insn->code);
6340 return -EINVAL;
6341 }
6342 }
6343
6344 /* now all pseudo BPF_LD_IMM64 instructions load valid
6345 * 'struct bpf_map *' into a register instead of user map_fd.
6346 * These pointers will be used later by verifier to validate map access.
6347 */
6348 return 0;
6349 }
6350
6351 /* drop refcnt of maps used by the rejected program */
6352 static void release_maps(struct bpf_verifier_env *env)
6353 {
6354 enum bpf_cgroup_storage_type stype;
6355 int i;
6356
6357 for_each_cgroup_storage_type(stype) {
6358 if (!env->prog->aux->cgroup_storage[stype])
6359 continue;
6360 bpf_cgroup_storage_release(env->prog,
6361 env->prog->aux->cgroup_storage[stype]);
6362 }
6363
6364 for (i = 0; i < env->used_map_cnt; i++)
6365 bpf_map_put(env->used_maps[i]);
6366 }
6367
6368 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
6369 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
6370 {
6371 struct bpf_insn *insn = env->prog->insnsi;
6372 int insn_cnt = env->prog->len;
6373 int i;
6374
6375 for (i = 0; i < insn_cnt; i++, insn++)
6376 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
6377 insn->src_reg = 0;
6378 }
6379
6380 /* single env->prog->insni[off] instruction was replaced with the range
6381 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
6382 * [0, off) and [off, end) to new locations, so the patched range stays zero
6383 */
6384 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
6385 u32 off, u32 cnt)
6386 {
6387 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
6388 int i;
6389
6390 if (cnt == 1)
6391 return 0;
6392 new_data = vzalloc(array_size(prog_len,
6393 sizeof(struct bpf_insn_aux_data)));
6394 if (!new_data)
6395 return -ENOMEM;
6396 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
6397 memcpy(new_data + off + cnt - 1, old_data + off,
6398 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
6399 for (i = off; i < off + cnt - 1; i++)
6400 new_data[i].seen = true;
6401 env->insn_aux_data = new_data;
6402 vfree(old_data);
6403 return 0;
6404 }
6405
6406 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
6407 {
6408 int i;
6409
6410 if (len == 1)
6411 return;
6412 /* NOTE: fake 'exit' subprog should be updated as well. */
6413 for (i = 0; i <= env->subprog_cnt; i++) {
6414 if (env->subprog_info[i].start <= off)
6415 continue;
6416 env->subprog_info[i].start += len - 1;
6417 }
6418 }
6419
6420 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
6421 const struct bpf_insn *patch, u32 len)
6422 {
6423 struct bpf_prog *new_prog;
6424
6425 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
6426 if (!new_prog)
6427 return NULL;
6428 if (adjust_insn_aux_data(env, new_prog->len, off, len))
6429 return NULL;
6430 adjust_subprog_starts(env, off, len);
6431 return new_prog;
6432 }
6433
6434 /* The verifier does more data flow analysis than llvm and will not
6435 * explore branches that are dead at run time. Malicious programs can
6436 * have dead code too. Therefore replace all dead at-run-time code
6437 * with 'ja -1'.
6438 *
6439 * Just nops are not optimal, e.g. if they would sit at the end of the
6440 * program and through another bug we would manage to jump there, then
6441 * we'd execute beyond program memory otherwise. Returning exception
6442 * code also wouldn't work since we can have subprogs where the dead
6443 * code could be located.
6444 */
6445 static void sanitize_dead_code(struct bpf_verifier_env *env)
6446 {
6447 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
6448 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
6449 struct bpf_insn *insn = env->prog->insnsi;
6450 const int insn_cnt = env->prog->len;
6451 int i;
6452
6453 for (i = 0; i < insn_cnt; i++) {
6454 if (aux_data[i].seen)
6455 continue;
6456 memcpy(insn + i, &trap, sizeof(trap));
6457 }
6458 }
6459
6460 /* convert load instructions that access fields of a context type into a
6461 * sequence of instructions that access fields of the underlying structure:
6462 * struct __sk_buff -> struct sk_buff
6463 * struct bpf_sock_ops -> struct sock
6464 */
6465 static int convert_ctx_accesses(struct bpf_verifier_env *env)
6466 {
6467 const struct bpf_verifier_ops *ops = env->ops;
6468 int i, cnt, size, ctx_field_size, delta = 0;
6469 const int insn_cnt = env->prog->len;
6470 struct bpf_insn insn_buf[16], *insn;
6471 u32 target_size, size_default, off;
6472 struct bpf_prog *new_prog;
6473 enum bpf_access_type type;
6474 bool is_narrower_load;
6475
6476 if (ops->gen_prologue || env->seen_direct_write) {
6477 if (!ops->gen_prologue) {
6478 verbose(env, "bpf verifier is misconfigured\n");
6479 return -EINVAL;
6480 }
6481 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
6482 env->prog);
6483 if (cnt >= ARRAY_SIZE(insn_buf)) {
6484 verbose(env, "bpf verifier is misconfigured\n");
6485 return -EINVAL;
6486 } else if (cnt) {
6487 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
6488 if (!new_prog)
6489 return -ENOMEM;
6490
6491 env->prog = new_prog;
6492 delta += cnt - 1;
6493 }
6494 }
6495
6496 if (bpf_prog_is_dev_bound(env->prog->aux))
6497 return 0;
6498
6499 insn = env->prog->insnsi + delta;
6500
6501 for (i = 0; i < insn_cnt; i++, insn++) {
6502 bpf_convert_ctx_access_t convert_ctx_access;
6503
6504 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
6505 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
6506 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
6507 insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
6508 type = BPF_READ;
6509 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
6510 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
6511 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
6512 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
6513 type = BPF_WRITE;
6514 else
6515 continue;
6516
6517 if (type == BPF_WRITE &&
6518 env->insn_aux_data[i + delta].sanitize_stack_off) {
6519 struct bpf_insn patch[] = {
6520 /* Sanitize suspicious stack slot with zero.
6521 * There are no memory dependencies for this store,
6522 * since it's only using frame pointer and immediate
6523 * constant of zero
6524 */
6525 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
6526 env->insn_aux_data[i + delta].sanitize_stack_off,
6527 0),
6528 /* the original STX instruction will immediately
6529 * overwrite the same stack slot with appropriate value
6530 */
6531 *insn,
6532 };
6533
6534 cnt = ARRAY_SIZE(patch);
6535 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
6536 if (!new_prog)
6537 return -ENOMEM;
6538
6539 delta += cnt - 1;
6540 env->prog = new_prog;
6541 insn = new_prog->insnsi + i + delta;
6542 continue;
6543 }
6544
6545 switch (env->insn_aux_data[i + delta].ptr_type) {
6546 case PTR_TO_CTX:
6547 if (!ops->convert_ctx_access)
6548 continue;
6549 convert_ctx_access = ops->convert_ctx_access;
6550 break;
6551 case PTR_TO_SOCKET:
6552 convert_ctx_access = bpf_sock_convert_ctx_access;
6553 break;
6554 default:
6555 continue;
6556 }
6557
6558 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
6559 size = BPF_LDST_BYTES(insn);
6560
6561 /* If the read access is a narrower load of the field,
6562 * convert to a 4/8-byte load, to minimum program type specific
6563 * convert_ctx_access changes. If conversion is successful,
6564 * we will apply proper mask to the result.
6565 */
6566 is_narrower_load = size < ctx_field_size;
6567 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
6568 off = insn->off;
6569 if (is_narrower_load) {
6570 u8 size_code;
6571
6572 if (type == BPF_WRITE) {
6573 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
6574 return -EINVAL;
6575 }
6576
6577 size_code = BPF_H;
6578 if (ctx_field_size == 4)
6579 size_code = BPF_W;
6580 else if (ctx_field_size == 8)
6581 size_code = BPF_DW;
6582
6583 insn->off = off & ~(size_default - 1);
6584 insn->code = BPF_LDX | BPF_MEM | size_code;
6585 }
6586
6587 target_size = 0;
6588 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
6589 &target_size);
6590 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
6591 (ctx_field_size && !target_size)) {
6592 verbose(env, "bpf verifier is misconfigured\n");
6593 return -EINVAL;
6594 }
6595
6596 if (is_narrower_load && size < target_size) {
6597 u8 shift = (off & (size_default - 1)) * 8;
6598
6599 if (ctx_field_size <= 4) {
6600 if (shift)
6601 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
6602 insn->dst_reg,
6603 shift);
6604 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
6605 (1 << size * 8) - 1);
6606 } else {
6607 if (shift)
6608 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
6609 insn->dst_reg,
6610 shift);
6611 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
6612 (1 << size * 8) - 1);
6613 }
6614 }
6615
6616 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
6617 if (!new_prog)
6618 return -ENOMEM;
6619
6620 delta += cnt - 1;
6621
6622 /* keep walking new program and skip insns we just inserted */
6623 env->prog = new_prog;
6624 insn = new_prog->insnsi + i + delta;
6625 }
6626
6627 return 0;
6628 }
6629
6630 static int jit_subprogs(struct bpf_verifier_env *env)
6631 {
6632 struct bpf_prog *prog = env->prog, **func, *tmp;
6633 int i, j, subprog_start, subprog_end = 0, len, subprog;
6634 struct bpf_insn *insn;
6635 void *old_bpf_func;
6636 int err;
6637
6638 if (env->subprog_cnt <= 1)
6639 return 0;
6640
6641 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
6642 if (insn->code != (BPF_JMP | BPF_CALL) ||
6643 insn->src_reg != BPF_PSEUDO_CALL)
6644 continue;
6645 /* Upon error here we cannot fall back to interpreter but
6646 * need a hard reject of the program. Thus -EFAULT is
6647 * propagated in any case.
6648 */
6649 subprog = find_subprog(env, i + insn->imm + 1);
6650 if (subprog < 0) {
6651 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
6652 i + insn->imm + 1);
6653 return -EFAULT;
6654 }
6655 /* temporarily remember subprog id inside insn instead of
6656 * aux_data, since next loop will split up all insns into funcs
6657 */
6658 insn->off = subprog;
6659 /* remember original imm in case JIT fails and fallback
6660 * to interpreter will be needed
6661 */
6662 env->insn_aux_data[i].call_imm = insn->imm;
6663 /* point imm to __bpf_call_base+1 from JITs point of view */
6664 insn->imm = 1;
6665 }
6666
6667 err = bpf_prog_alloc_jited_linfo(prog);
6668 if (err)
6669 goto out_undo_insn;
6670
6671 err = -ENOMEM;
6672 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
6673 if (!func)
6674 goto out_undo_insn;
6675
6676 for (i = 0; i < env->subprog_cnt; i++) {
6677 subprog_start = subprog_end;
6678 subprog_end = env->subprog_info[i + 1].start;
6679
6680 len = subprog_end - subprog_start;
6681 func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
6682 if (!func[i])
6683 goto out_free;
6684 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
6685 len * sizeof(struct bpf_insn));
6686 func[i]->type = prog->type;
6687 func[i]->len = len;
6688 if (bpf_prog_calc_tag(func[i]))
6689 goto out_free;
6690 func[i]->is_func = 1;
6691 func[i]->aux->func_idx = i;
6692 /* the btf and func_info will be freed only at prog->aux */
6693 func[i]->aux->btf = prog->aux->btf;
6694 func[i]->aux->func_info = prog->aux->func_info;
6695
6696 /* Use bpf_prog_F_tag to indicate functions in stack traces.
6697 * Long term would need debug info to populate names
6698 */
6699 func[i]->aux->name[0] = 'F';
6700 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
6701 func[i]->jit_requested = 1;
6702 func[i]->aux->linfo = prog->aux->linfo;
6703 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
6704 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
6705 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
6706 func[i] = bpf_int_jit_compile(func[i]);
6707 if (!func[i]->jited) {
6708 err = -ENOTSUPP;
6709 goto out_free;
6710 }
6711 cond_resched();
6712 }
6713 /* at this point all bpf functions were successfully JITed
6714 * now populate all bpf_calls with correct addresses and
6715 * run last pass of JIT
6716 */
6717 for (i = 0; i < env->subprog_cnt; i++) {
6718 insn = func[i]->insnsi;
6719 for (j = 0; j < func[i]->len; j++, insn++) {
6720 if (insn->code != (BPF_JMP | BPF_CALL) ||
6721 insn->src_reg != BPF_PSEUDO_CALL)
6722 continue;
6723 subprog = insn->off;
6724 insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
6725 func[subprog]->bpf_func -
6726 __bpf_call_base;
6727 }
6728
6729 /* we use the aux data to keep a list of the start addresses
6730 * of the JITed images for each function in the program
6731 *
6732 * for some architectures, such as powerpc64, the imm field
6733 * might not be large enough to hold the offset of the start
6734 * address of the callee's JITed image from __bpf_call_base
6735 *
6736 * in such cases, we can lookup the start address of a callee
6737 * by using its subprog id, available from the off field of
6738 * the call instruction, as an index for this list
6739 */
6740 func[i]->aux->func = func;
6741 func[i]->aux->func_cnt = env->subprog_cnt;
6742 }
6743 for (i = 0; i < env->subprog_cnt; i++) {
6744 old_bpf_func = func[i]->bpf_func;
6745 tmp = bpf_int_jit_compile(func[i]);
6746 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
6747 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
6748 err = -ENOTSUPP;
6749 goto out_free;
6750 }
6751 cond_resched();
6752 }
6753
6754 /* finally lock prog and jit images for all functions and
6755 * populate kallsysm
6756 */
6757 for (i = 0; i < env->subprog_cnt; i++) {
6758 bpf_prog_lock_ro(func[i]);
6759 bpf_prog_kallsyms_add(func[i]);
6760 }
6761
6762 /* Last step: make now unused interpreter insns from main
6763 * prog consistent for later dump requests, so they can
6764 * later look the same as if they were interpreted only.
6765 */
6766 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
6767 if (insn->code != (BPF_JMP | BPF_CALL) ||
6768 insn->src_reg != BPF_PSEUDO_CALL)
6769 continue;
6770 insn->off = env->insn_aux_data[i].call_imm;
6771 subprog = find_subprog(env, i + insn->off + 1);
6772 insn->imm = subprog;
6773 }
6774
6775 prog->jited = 1;
6776 prog->bpf_func = func[0]->bpf_func;
6777 prog->aux->func = func;
6778 prog->aux->func_cnt = env->subprog_cnt;
6779 bpf_prog_free_unused_jited_linfo(prog);
6780 return 0;
6781 out_free:
6782 for (i = 0; i < env->subprog_cnt; i++)
6783 if (func[i])
6784 bpf_jit_free(func[i]);
6785 kfree(func);
6786 out_undo_insn:
6787 /* cleanup main prog to be interpreted */
6788 prog->jit_requested = 0;
6789 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
6790 if (insn->code != (BPF_JMP | BPF_CALL) ||
6791 insn->src_reg != BPF_PSEUDO_CALL)
6792 continue;
6793 insn->off = 0;
6794 insn->imm = env->insn_aux_data[i].call_imm;
6795 }
6796 bpf_prog_free_jited_linfo(prog);
6797 return err;
6798 }
6799
6800 static int fixup_call_args(struct bpf_verifier_env *env)
6801 {
6802 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
6803 struct bpf_prog *prog = env->prog;
6804 struct bpf_insn *insn = prog->insnsi;
6805 int i, depth;
6806 #endif
6807 int err = 0;
6808
6809 if (env->prog->jit_requested &&
6810 !bpf_prog_is_dev_bound(env->prog->aux)) {
6811 err = jit_subprogs(env);
6812 if (err == 0)
6813 return 0;
6814 if (err == -EFAULT)
6815 return err;
6816 }
6817 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
6818 for (i = 0; i < prog->len; i++, insn++) {
6819 if (insn->code != (BPF_JMP | BPF_CALL) ||
6820 insn->src_reg != BPF_PSEUDO_CALL)
6821 continue;
6822 depth = get_callee_stack_depth(env, insn, i);
6823 if (depth < 0)
6824 return depth;
6825 bpf_patch_call_args(insn, depth);
6826 }
6827 err = 0;
6828 #endif
6829 return err;
6830 }
6831
6832 /* fixup insn->imm field of bpf_call instructions
6833 * and inline eligible helpers as explicit sequence of BPF instructions
6834 *
6835 * this function is called after eBPF program passed verification
6836 */
6837 static int fixup_bpf_calls(struct bpf_verifier_env *env)
6838 {
6839 struct bpf_prog *prog = env->prog;
6840 struct bpf_insn *insn = prog->insnsi;
6841 const struct bpf_func_proto *fn;
6842 const int insn_cnt = prog->len;
6843 const struct bpf_map_ops *ops;
6844 struct bpf_insn_aux_data *aux;
6845 struct bpf_insn insn_buf[16];
6846 struct bpf_prog *new_prog;
6847 struct bpf_map *map_ptr;
6848 int i, cnt, delta = 0;
6849
6850 for (i = 0; i < insn_cnt; i++, insn++) {
6851 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
6852 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
6853 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
6854 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
6855 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
6856 struct bpf_insn mask_and_div[] = {
6857 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
6858 /* Rx div 0 -> 0 */
6859 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
6860 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
6861 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6862 *insn,
6863 };
6864 struct bpf_insn mask_and_mod[] = {
6865 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
6866 /* Rx mod 0 -> Rx */
6867 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
6868 *insn,
6869 };
6870 struct bpf_insn *patchlet;
6871
6872 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
6873 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
6874 patchlet = mask_and_div + (is64 ? 1 : 0);
6875 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
6876 } else {
6877 patchlet = mask_and_mod + (is64 ? 1 : 0);
6878 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
6879 }
6880
6881 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
6882 if (!new_prog)
6883 return -ENOMEM;
6884
6885 delta += cnt - 1;
6886 env->prog = prog = new_prog;
6887 insn = new_prog->insnsi + i + delta;
6888 continue;
6889 }
6890
6891 if (BPF_CLASS(insn->code) == BPF_LD &&
6892 (BPF_MODE(insn->code) == BPF_ABS ||
6893 BPF_MODE(insn->code) == BPF_IND)) {
6894 cnt = env->ops->gen_ld_abs(insn, insn_buf);
6895 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
6896 verbose(env, "bpf verifier is misconfigured\n");
6897 return -EINVAL;
6898 }
6899
6900 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
6901 if (!new_prog)
6902 return -ENOMEM;
6903
6904 delta += cnt - 1;
6905 env->prog = prog = new_prog;
6906 insn = new_prog->insnsi + i + delta;
6907 continue;
6908 }
6909
6910 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
6911 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
6912 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
6913 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
6914 struct bpf_insn insn_buf[16];
6915 struct bpf_insn *patch = &insn_buf[0];
6916 bool issrc, isneg;
6917 u32 off_reg;
6918
6919 aux = &env->insn_aux_data[i + delta];
6920 if (!aux->alu_state)
6921 continue;
6922
6923 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
6924 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
6925 BPF_ALU_SANITIZE_SRC;
6926
6927 off_reg = issrc ? insn->src_reg : insn->dst_reg;
6928 if (isneg)
6929 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
6930 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
6931 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
6932 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
6933 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
6934 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
6935 if (issrc) {
6936 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
6937 off_reg);
6938 insn->src_reg = BPF_REG_AX;
6939 } else {
6940 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
6941 BPF_REG_AX);
6942 }
6943 if (isneg)
6944 insn->code = insn->code == code_add ?
6945 code_sub : code_add;
6946 *patch++ = *insn;
6947 if (issrc && isneg)
6948 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
6949 cnt = patch - insn_buf;
6950
6951 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
6952 if (!new_prog)
6953 return -ENOMEM;
6954
6955 delta += cnt - 1;
6956 env->prog = prog = new_prog;
6957 insn = new_prog->insnsi + i + delta;
6958 continue;
6959 }
6960
6961 if (insn->code != (BPF_JMP | BPF_CALL))
6962 continue;
6963 if (insn->src_reg == BPF_PSEUDO_CALL)
6964 continue;
6965
6966 if (insn->imm == BPF_FUNC_get_route_realm)
6967 prog->dst_needed = 1;
6968 if (insn->imm == BPF_FUNC_get_prandom_u32)
6969 bpf_user_rnd_init_once();
6970 if (insn->imm == BPF_FUNC_override_return)
6971 prog->kprobe_override = 1;
6972 if (insn->imm == BPF_FUNC_tail_call) {
6973 /* If we tail call into other programs, we
6974 * cannot make any assumptions since they can
6975 * be replaced dynamically during runtime in
6976 * the program array.
6977 */
6978 prog->cb_access = 1;
6979 env->prog->aux->stack_depth = MAX_BPF_STACK;
6980 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
6981
6982 /* mark bpf_tail_call as different opcode to avoid
6983 * conditional branch in the interpeter for every normal
6984 * call and to prevent accidental JITing by JIT compiler
6985 * that doesn't support bpf_tail_call yet
6986 */
6987 insn->imm = 0;
6988 insn->code = BPF_JMP | BPF_TAIL_CALL;
6989
6990 aux = &env->insn_aux_data[i + delta];
6991 if (!bpf_map_ptr_unpriv(aux))
6992 continue;
6993
6994 /* instead of changing every JIT dealing with tail_call
6995 * emit two extra insns:
6996 * if (index >= max_entries) goto out;
6997 * index &= array->index_mask;
6998 * to avoid out-of-bounds cpu speculation
6999 */
7000 if (bpf_map_ptr_poisoned(aux)) {
7001 verbose(env, "tail_call abusing map_ptr\n");
7002 return -EINVAL;
7003 }
7004
7005 map_ptr = BPF_MAP_PTR(aux->map_state);
7006 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
7007 map_ptr->max_entries, 2);
7008 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
7009 container_of(map_ptr,
7010 struct bpf_array,
7011 map)->index_mask);
7012 insn_buf[2] = *insn;
7013 cnt = 3;
7014 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
7015 if (!new_prog)
7016 return -ENOMEM;
7017
7018 delta += cnt - 1;
7019 env->prog = prog = new_prog;
7020 insn = new_prog->insnsi + i + delta;
7021 continue;
7022 }
7023
7024 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
7025 * and other inlining handlers are currently limited to 64 bit
7026 * only.
7027 */
7028 if (prog->jit_requested && BITS_PER_LONG == 64 &&
7029 (insn->imm == BPF_FUNC_map_lookup_elem ||
7030 insn->imm == BPF_FUNC_map_update_elem ||
7031 insn->imm == BPF_FUNC_map_delete_elem ||
7032 insn->imm == BPF_FUNC_map_push_elem ||
7033 insn->imm == BPF_FUNC_map_pop_elem ||
7034 insn->imm == BPF_FUNC_map_peek_elem)) {
7035 aux = &env->insn_aux_data[i + delta];
7036 if (bpf_map_ptr_poisoned(aux))
7037 goto patch_call_imm;
7038
7039 map_ptr = BPF_MAP_PTR(aux->map_state);
7040 ops = map_ptr->ops;
7041 if (insn->imm == BPF_FUNC_map_lookup_elem &&
7042 ops->map_gen_lookup) {
7043 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
7044 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
7045 verbose(env, "bpf verifier is misconfigured\n");
7046 return -EINVAL;
7047 }
7048
7049 new_prog = bpf_patch_insn_data(env, i + delta,
7050 insn_buf, cnt);
7051 if (!new_prog)
7052 return -ENOMEM;
7053
7054 delta += cnt - 1;
7055 env->prog = prog = new_prog;
7056 insn = new_prog->insnsi + i + delta;
7057 continue;
7058 }
7059
7060 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
7061 (void *(*)(struct bpf_map *map, void *key))NULL));
7062 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
7063 (int (*)(struct bpf_map *map, void *key))NULL));
7064 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
7065 (int (*)(struct bpf_map *map, void *key, void *value,
7066 u64 flags))NULL));
7067 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
7068 (int (*)(struct bpf_map *map, void *value,
7069 u64 flags))NULL));
7070 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
7071 (int (*)(struct bpf_map *map, void *value))NULL));
7072 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
7073 (int (*)(struct bpf_map *map, void *value))NULL));
7074
7075 switch (insn->imm) {
7076 case BPF_FUNC_map_lookup_elem:
7077 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
7078 __bpf_call_base;
7079 continue;
7080 case BPF_FUNC_map_update_elem:
7081 insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
7082 __bpf_call_base;
7083 continue;
7084 case BPF_FUNC_map_delete_elem:
7085 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
7086 __bpf_call_base;
7087 continue;
7088 case BPF_FUNC_map_push_elem:
7089 insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
7090 __bpf_call_base;
7091 continue;
7092 case BPF_FUNC_map_pop_elem:
7093 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
7094 __bpf_call_base;
7095 continue;
7096 case BPF_FUNC_map_peek_elem:
7097 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
7098 __bpf_call_base;
7099 continue;
7100 }
7101
7102 goto patch_call_imm;
7103 }
7104
7105 patch_call_imm:
7106 fn = env->ops->get_func_proto(insn->imm, env->prog);
7107 /* all functions that have prototype and verifier allowed
7108 * programs to call them, must be real in-kernel functions
7109 */
7110 if (!fn->func) {
7111 verbose(env,
7112 "kernel subsystem misconfigured func %s#%d\n",
7113 func_id_name(insn->imm), insn->imm);
7114 return -EFAULT;
7115 }
7116 insn->imm = fn->func - __bpf_call_base;
7117 }
7118
7119 return 0;
7120 }
7121
7122 static void free_states(struct bpf_verifier_env *env)
7123 {
7124 struct bpf_verifier_state_list *sl, *sln;
7125 int i;
7126
7127 if (!env->explored_states)
7128 return;
7129
7130 for (i = 0; i < env->prog->len; i++) {
7131 sl = env->explored_states[i];
7132
7133 if (sl)
7134 while (sl != STATE_LIST_MARK) {
7135 sln = sl->next;
7136 free_verifier_state(&sl->state, false);
7137 kfree(sl);
7138 sl = sln;
7139 }
7140 }
7141
7142 kfree(env->explored_states);
7143 }
7144
7145 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
7146 union bpf_attr __user *uattr)
7147 {
7148 struct bpf_verifier_env *env;
7149 struct bpf_verifier_log *log;
7150 int ret = -EINVAL;
7151
7152 /* no program is valid */
7153 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
7154 return -EINVAL;
7155
7156 /* 'struct bpf_verifier_env' can be global, but since it's not small,
7157 * allocate/free it every time bpf_check() is called
7158 */
7159 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
7160 if (!env)
7161 return -ENOMEM;
7162 log = &env->log;
7163
7164 env->insn_aux_data =
7165 vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
7166 (*prog)->len));
7167 ret = -ENOMEM;
7168 if (!env->insn_aux_data)
7169 goto err_free_env;
7170 env->prog = *prog;
7171 env->ops = bpf_verifier_ops[env->prog->type];
7172
7173 /* grab the mutex to protect few globals used by verifier */
7174 mutex_lock(&bpf_verifier_lock);
7175
7176 if (attr->log_level || attr->log_buf || attr->log_size) {
7177 /* user requested verbose verifier output
7178 * and supplied buffer to store the verification trace
7179 */
7180 log->level = attr->log_level;
7181 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
7182 log->len_total = attr->log_size;
7183
7184 ret = -EINVAL;
7185 /* log attributes have to be sane */
7186 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
7187 !log->level || !log->ubuf)
7188 goto err_unlock;
7189 }
7190
7191 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
7192 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
7193 env->strict_alignment = true;
7194 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
7195 env->strict_alignment = false;
7196
7197 ret = replace_map_fd_with_map_ptr(env);
7198 if (ret < 0)
7199 goto skip_full_check;
7200
7201 if (bpf_prog_is_dev_bound(env->prog->aux)) {
7202 ret = bpf_prog_offload_verifier_prep(env->prog);
7203 if (ret)
7204 goto skip_full_check;
7205 }
7206
7207 env->explored_states = kcalloc(env->prog->len,
7208 sizeof(struct bpf_verifier_state_list *),
7209 GFP_USER);
7210 ret = -ENOMEM;
7211 if (!env->explored_states)
7212 goto skip_full_check;
7213
7214 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
7215
7216 ret = check_subprogs(env);
7217 if (ret < 0)
7218 goto skip_full_check;
7219
7220 ret = check_btf_info(env, attr, uattr);
7221 if (ret < 0)
7222 goto skip_full_check;
7223
7224 ret = check_cfg(env);
7225 if (ret < 0)
7226 goto skip_full_check;
7227
7228 ret = do_check(env);
7229 if (env->cur_state) {
7230 free_verifier_state(env->cur_state, true);
7231 env->cur_state = NULL;
7232 }
7233
7234 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
7235 ret = bpf_prog_offload_finalize(env);
7236
7237 skip_full_check:
7238 while (!pop_stack(env, NULL, NULL));
7239 free_states(env);
7240
7241 if (ret == 0)
7242 ret = check_max_stack_depth(env);
7243
7244 /* instruction rewrites happen after this point */
7245 if (ret == 0)
7246 sanitize_dead_code(env);
7247
7248 if (ret == 0)
7249 /* program is valid, convert *(u32*)(ctx + off) accesses */
7250 ret = convert_ctx_accesses(env);
7251
7252 if (ret == 0)
7253 ret = fixup_bpf_calls(env);
7254
7255 if (ret == 0)
7256 ret = fixup_call_args(env);
7257
7258 if (log->level && bpf_verifier_log_full(log))
7259 ret = -ENOSPC;
7260 if (log->level && !log->ubuf) {
7261 ret = -EFAULT;
7262 goto err_release_maps;
7263 }
7264
7265 if (ret == 0 && env->used_map_cnt) {
7266 /* if program passed verifier, update used_maps in bpf_prog_info */
7267 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
7268 sizeof(env->used_maps[0]),
7269 GFP_KERNEL);
7270
7271 if (!env->prog->aux->used_maps) {
7272 ret = -ENOMEM;
7273 goto err_release_maps;
7274 }
7275
7276 memcpy(env->prog->aux->used_maps, env->used_maps,
7277 sizeof(env->used_maps[0]) * env->used_map_cnt);
7278 env->prog->aux->used_map_cnt = env->used_map_cnt;
7279
7280 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
7281 * bpf_ld_imm64 instructions
7282 */
7283 convert_pseudo_ld_imm64(env);
7284 }
7285
7286 if (ret == 0)
7287 adjust_btf_func(env);
7288
7289 err_release_maps:
7290 if (!env->prog->aux->used_maps)
7291 /* if we didn't copy map pointers into bpf_prog_info, release
7292 * them now. Otherwise free_used_maps() will release them.
7293 */
7294 release_maps(env);
7295 *prog = env->prog;
7296 err_unlock:
7297 mutex_unlock(&bpf_verifier_lock);
7298 vfree(env->insn_aux_data);
7299 err_free_env:
7300 kfree(env);
7301 return ret;
7302 }