]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/bpf/verifier.c
selftests/bpf: Restore original comm in test_overhead
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / verifier.c
CommitLineData
5b497af4 1// SPDX-License-Identifier: GPL-2.0-only
51580e79 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
969bf05e 3 * Copyright (c) 2016 Facebook
fd978bf7 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
51580e79 5 */
838e9690 6#include <uapi/linux/btf.h>
51580e79
AS
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/bpf.h>
838e9690 11#include <linux/btf.h>
58e2af8b 12#include <linux/bpf_verifier.h>
51580e79
AS
13#include <linux/filter.h>
14#include <net/netlink.h>
15#include <linux/file.h>
16#include <linux/vmalloc.h>
ebb676da 17#include <linux/stringify.h>
cc8b0b92
AS
18#include <linux/bsearch.h>
19#include <linux/sort.h>
c195651e 20#include <linux/perf_event.h>
d9762e84 21#include <linux/ctype.h>
51580e79 22
f4ac7e0b
JK
23#include "disasm.h"
24
00176a34 25static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
91cc1a99 26#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
00176a34
JK
27 [_id] = & _name ## _verifier_ops,
28#define BPF_MAP_TYPE(_id, _ops)
29#include <linux/bpf_types.h>
30#undef BPF_PROG_TYPE
31#undef BPF_MAP_TYPE
32};
33
51580e79
AS
34/* bpf_check() is a static code analyzer that walks eBPF program
35 * instruction by instruction and updates register/stack state.
36 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
37 *
38 * The first pass is depth-first-search to check that the program is a DAG.
39 * It rejects the following programs:
40 * - larger than BPF_MAXINSNS insns
41 * - if loop is present (detected via back-edge)
42 * - unreachable insns exist (shouldn't be a forest. program = one function)
43 * - out of bounds or malformed jumps
44 * The second pass is all possible path descent from the 1st insn.
45 * Since it's analyzing all pathes through the program, the length of the
eba38a96 46 * analysis is limited to 64k insn, which may be hit even if total number of
51580e79
AS
47 * insn is less then 4K, but there are too many branches that change stack/regs.
48 * Number of 'branches to be analyzed' is limited to 1k
49 *
50 * On entry to each instruction, each register has a type, and the instruction
51 * changes the types of the registers depending on instruction semantics.
52 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
53 * copied to R1.
54 *
55 * All registers are 64-bit.
56 * R0 - return register
57 * R1-R5 argument passing registers
58 * R6-R9 callee saved registers
59 * R10 - frame pointer read-only
60 *
61 * At the start of BPF program the register R1 contains a pointer to bpf_context
62 * and has type PTR_TO_CTX.
63 *
64 * Verifier tracks arithmetic operations on pointers in case:
65 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
66 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
67 * 1st insn copies R10 (which has FRAME_PTR) type into R1
68 * and 2nd arithmetic instruction is pattern matched to recognize
69 * that it wants to construct a pointer to some element within stack.
70 * So after 2nd insn, the register R1 has type PTR_TO_STACK
71 * (and -20 constant is saved for further stack bounds checking).
72 * Meaning that this reg is a pointer to stack plus known immediate constant.
73 *
f1174f77 74 * Most of the time the registers have SCALAR_VALUE type, which
51580e79 75 * means the register has some value, but it's not a valid pointer.
f1174f77 76 * (like pointer plus pointer becomes SCALAR_VALUE type)
51580e79
AS
77 *
78 * When verifier sees load or store instructions the type of base register
c64b7983
JS
79 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
80 * four pointer types recognized by check_mem_access() function.
51580e79
AS
81 *
82 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
83 * and the range of [ptr, ptr + map's value_size) is accessible.
84 *
85 * registers used to pass values to function calls are checked against
86 * function argument constraints.
87 *
88 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
89 * It means that the register type passed to this function must be
90 * PTR_TO_STACK and it will be used inside the function as
91 * 'pointer to map element key'
92 *
93 * For example the argument constraints for bpf_map_lookup_elem():
94 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
95 * .arg1_type = ARG_CONST_MAP_PTR,
96 * .arg2_type = ARG_PTR_TO_MAP_KEY,
97 *
98 * ret_type says that this function returns 'pointer to map elem value or null'
99 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
100 * 2nd argument should be a pointer to stack, which will be used inside
101 * the helper function as a pointer to map element key.
102 *
103 * On the kernel side the helper function looks like:
104 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
105 * {
106 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
107 * void *key = (void *) (unsigned long) r2;
108 * void *value;
109 *
110 * here kernel can access 'key' and 'map' pointers safely, knowing that
111 * [key, key + map->key_size) bytes are valid and were initialized on
112 * the stack of eBPF program.
113 * }
114 *
115 * Corresponding eBPF program may look like:
116 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
117 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
118 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
119 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
120 * here verifier looks at prototype of map_lookup_elem() and sees:
121 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
122 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
123 *
124 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
125 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
126 * and were initialized prior to this call.
127 * If it's ok, then verifier allows this BPF_CALL insn and looks at
128 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
129 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
130 * returns ether pointer to map value or NULL.
131 *
132 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
133 * insn, the register holding that pointer in the true branch changes state to
134 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
135 * branch. See check_cond_jmp_op().
136 *
137 * After the call R0 is set to return type of the function and registers R1-R5
138 * are set to NOT_INIT to indicate that they are no longer readable.
fd978bf7
JS
139 *
140 * The following reference types represent a potential reference to a kernel
141 * resource which, after first being allocated, must be checked and freed by
142 * the BPF program:
143 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
144 *
145 * When the verifier sees a helper call return a reference type, it allocates a
146 * pointer id for the reference and stores it in the current function state.
147 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
148 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
149 * passes through a NULL-check conditional. For the branch wherein the state is
150 * changed to CONST_IMM, the verifier releases the reference.
6acc9b43
JS
151 *
152 * For each helper function that allocates a reference, such as
153 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
154 * bpf_sk_release(). When a reference type passes into the release function,
155 * the verifier also releases the reference. If any unchecked or unreleased
156 * reference remains at the end of the program, the verifier rejects it.
51580e79
AS
157 */
158
17a52670 159/* verifier_state + insn_idx are pushed to stack when branch is encountered */
58e2af8b 160struct bpf_verifier_stack_elem {
17a52670
AS
161 /* verifer state is 'st'
162 * before processing instruction 'insn_idx'
163 * and after processing instruction 'prev_insn_idx'
164 */
58e2af8b 165 struct bpf_verifier_state st;
17a52670
AS
166 int insn_idx;
167 int prev_insn_idx;
58e2af8b 168 struct bpf_verifier_stack_elem *next;
cbd35700
AS
169};
170
b285fcb7 171#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
ceefbc96 172#define BPF_COMPLEXITY_LIMIT_STATES 64
07016151 173
d2e4c1e6
DB
174#define BPF_MAP_KEY_POISON (1ULL << 63)
175#define BPF_MAP_KEY_SEEN (1ULL << 62)
176
c93552c4
DB
177#define BPF_MAP_PTR_UNPRIV 1UL
178#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
179 POISON_POINTER_DELTA))
180#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
181
182static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
183{
d2e4c1e6 184 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
c93552c4
DB
185}
186
187static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
188{
d2e4c1e6 189 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
c93552c4
DB
190}
191
192static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
193 const struct bpf_map *map, bool unpriv)
194{
195 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
196 unpriv |= bpf_map_ptr_unpriv(aux);
d2e4c1e6
DB
197 aux->map_ptr_state = (unsigned long)map |
198 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
199}
200
201static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
202{
203 return aux->map_key_state & BPF_MAP_KEY_POISON;
204}
205
206static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
207{
208 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
209}
210
211static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
212{
213 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
214}
215
216static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
217{
218 bool poisoned = bpf_map_key_poisoned(aux);
219
220 aux->map_key_state = state | BPF_MAP_KEY_SEEN |
221 (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
c93552c4 222}
fad73a1a 223
33ff9823
DB
224struct bpf_call_arg_meta {
225 struct bpf_map *map_ptr;
435faee1 226 bool raw_mode;
36bbef52 227 bool pkt_access;
435faee1
DB
228 int regno;
229 int access_size;
849fa506
YS
230 s64 msize_smax_value;
231 u64 msize_umax_value;
1b986589 232 int ref_obj_id;
d83525ca 233 int func_id;
a7658e1a 234 u32 btf_id;
33ff9823
DB
235};
236
8580ac94
AS
237struct btf *btf_vmlinux;
238
cbd35700
AS
239static DEFINE_MUTEX(bpf_verifier_lock);
240
d9762e84
MKL
241static const struct bpf_line_info *
242find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
243{
244 const struct bpf_line_info *linfo;
245 const struct bpf_prog *prog;
246 u32 i, nr_linfo;
247
248 prog = env->prog;
249 nr_linfo = prog->aux->nr_linfo;
250
251 if (!nr_linfo || insn_off >= prog->len)
252 return NULL;
253
254 linfo = prog->aux->linfo;
255 for (i = 1; i < nr_linfo; i++)
256 if (insn_off < linfo[i].insn_off)
257 break;
258
259 return &linfo[i - 1];
260}
261
77d2e05a
MKL
262void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
263 va_list args)
cbd35700 264{
a2a7d570 265 unsigned int n;
cbd35700 266
a2a7d570 267 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
a2a7d570
JK
268
269 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
270 "verifier log line truncated - local buffer too short\n");
271
272 n = min(log->len_total - log->len_used - 1, n);
273 log->kbuf[n] = '\0';
274
8580ac94
AS
275 if (log->level == BPF_LOG_KERNEL) {
276 pr_err("BPF:%s\n", log->kbuf);
277 return;
278 }
a2a7d570
JK
279 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
280 log->len_used += n;
281 else
282 log->ubuf = NULL;
cbd35700 283}
abe08840
JO
284
285/* log_level controls verbosity level of eBPF verifier.
286 * bpf_verifier_log_write() is used to dump the verification trace to the log,
287 * so the user can figure out what's wrong with the program
430e68d1 288 */
abe08840
JO
289__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
290 const char *fmt, ...)
291{
292 va_list args;
293
77d2e05a
MKL
294 if (!bpf_verifier_log_needed(&env->log))
295 return;
296
abe08840 297 va_start(args, fmt);
77d2e05a 298 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
299 va_end(args);
300}
301EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
302
303__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
304{
77d2e05a 305 struct bpf_verifier_env *env = private_data;
abe08840
JO
306 va_list args;
307
77d2e05a
MKL
308 if (!bpf_verifier_log_needed(&env->log))
309 return;
310
abe08840 311 va_start(args, fmt);
77d2e05a 312 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
313 va_end(args);
314}
cbd35700 315
9e15db66
AS
316__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
317 const char *fmt, ...)
318{
319 va_list args;
320
321 if (!bpf_verifier_log_needed(log))
322 return;
323
324 va_start(args, fmt);
325 bpf_verifier_vlog(log, fmt, args);
326 va_end(args);
327}
328
d9762e84
MKL
329static const char *ltrim(const char *s)
330{
331 while (isspace(*s))
332 s++;
333
334 return s;
335}
336
337__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
338 u32 insn_off,
339 const char *prefix_fmt, ...)
340{
341 const struct bpf_line_info *linfo;
342
343 if (!bpf_verifier_log_needed(&env->log))
344 return;
345
346 linfo = find_linfo(env, insn_off);
347 if (!linfo || linfo == env->prev_linfo)
348 return;
349
350 if (prefix_fmt) {
351 va_list args;
352
353 va_start(args, prefix_fmt);
354 bpf_verifier_vlog(&env->log, prefix_fmt, args);
355 va_end(args);
356 }
357
358 verbose(env, "%s\n",
359 ltrim(btf_name_by_offset(env->prog->aux->btf,
360 linfo->line_off)));
361
362 env->prev_linfo = linfo;
363}
364
de8f3a83
DB
365static bool type_is_pkt_pointer(enum bpf_reg_type type)
366{
367 return type == PTR_TO_PACKET ||
368 type == PTR_TO_PACKET_META;
369}
370
46f8bc92
MKL
371static bool type_is_sk_pointer(enum bpf_reg_type type)
372{
373 return type == PTR_TO_SOCKET ||
655a51e5 374 type == PTR_TO_SOCK_COMMON ||
fada7fdc
JL
375 type == PTR_TO_TCP_SOCK ||
376 type == PTR_TO_XDP_SOCK;
46f8bc92
MKL
377}
378
840b9615
JS
379static bool reg_type_may_be_null(enum bpf_reg_type type)
380{
fd978bf7 381 return type == PTR_TO_MAP_VALUE_OR_NULL ||
46f8bc92 382 type == PTR_TO_SOCKET_OR_NULL ||
655a51e5
MKL
383 type == PTR_TO_SOCK_COMMON_OR_NULL ||
384 type == PTR_TO_TCP_SOCK_OR_NULL;
fd978bf7
JS
385}
386
d83525ca
AS
387static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
388{
389 return reg->type == PTR_TO_MAP_VALUE &&
390 map_value_has_spin_lock(reg->map_ptr);
391}
392
cba368c1
MKL
393static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
394{
395 return type == PTR_TO_SOCKET ||
396 type == PTR_TO_SOCKET_OR_NULL ||
397 type == PTR_TO_TCP_SOCK ||
398 type == PTR_TO_TCP_SOCK_OR_NULL;
399}
400
1b986589 401static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
fd978bf7 402{
1b986589 403 return type == ARG_PTR_TO_SOCK_COMMON;
fd978bf7
JS
404}
405
406/* Determine whether the function releases some resources allocated by another
407 * function call. The first reference type argument will be assumed to be
408 * released by release_reference().
409 */
410static bool is_release_function(enum bpf_func_id func_id)
411{
6acc9b43 412 return func_id == BPF_FUNC_sk_release;
840b9615
JS
413}
414
46f8bc92
MKL
415static bool is_acquire_function(enum bpf_func_id func_id)
416{
417 return func_id == BPF_FUNC_sk_lookup_tcp ||
edbf8c01
LB
418 func_id == BPF_FUNC_sk_lookup_udp ||
419 func_id == BPF_FUNC_skc_lookup_tcp;
46f8bc92
MKL
420}
421
1b986589
MKL
422static bool is_ptr_cast_function(enum bpf_func_id func_id)
423{
424 return func_id == BPF_FUNC_tcp_sock ||
425 func_id == BPF_FUNC_sk_fullsock;
426}
427
17a52670
AS
428/* string representation of 'enum bpf_reg_type' */
429static const char * const reg_type_str[] = {
430 [NOT_INIT] = "?",
f1174f77 431 [SCALAR_VALUE] = "inv",
17a52670
AS
432 [PTR_TO_CTX] = "ctx",
433 [CONST_PTR_TO_MAP] = "map_ptr",
434 [PTR_TO_MAP_VALUE] = "map_value",
435 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
17a52670 436 [PTR_TO_STACK] = "fp",
969bf05e 437 [PTR_TO_PACKET] = "pkt",
de8f3a83 438 [PTR_TO_PACKET_META] = "pkt_meta",
969bf05e 439 [PTR_TO_PACKET_END] = "pkt_end",
d58e468b 440 [PTR_TO_FLOW_KEYS] = "flow_keys",
c64b7983
JS
441 [PTR_TO_SOCKET] = "sock",
442 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
46f8bc92
MKL
443 [PTR_TO_SOCK_COMMON] = "sock_common",
444 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
655a51e5
MKL
445 [PTR_TO_TCP_SOCK] = "tcp_sock",
446 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
9df1c28b 447 [PTR_TO_TP_BUFFER] = "tp_buffer",
fada7fdc 448 [PTR_TO_XDP_SOCK] = "xdp_sock",
9e15db66 449 [PTR_TO_BTF_ID] = "ptr_",
17a52670
AS
450};
451
8efea21d
EC
452static char slot_type_char[] = {
453 [STACK_INVALID] = '?',
454 [STACK_SPILL] = 'r',
455 [STACK_MISC] = 'm',
456 [STACK_ZERO] = '0',
457};
458
4e92024a
AS
459static void print_liveness(struct bpf_verifier_env *env,
460 enum bpf_reg_liveness live)
461{
9242b5f5 462 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
4e92024a
AS
463 verbose(env, "_");
464 if (live & REG_LIVE_READ)
465 verbose(env, "r");
466 if (live & REG_LIVE_WRITTEN)
467 verbose(env, "w");
9242b5f5
AS
468 if (live & REG_LIVE_DONE)
469 verbose(env, "D");
4e92024a
AS
470}
471
f4d7e40a
AS
472static struct bpf_func_state *func(struct bpf_verifier_env *env,
473 const struct bpf_reg_state *reg)
474{
475 struct bpf_verifier_state *cur = env->cur_state;
476
477 return cur->frame[reg->frameno];
478}
479
9e15db66
AS
480const char *kernel_type_name(u32 id)
481{
482 return btf_name_by_offset(btf_vmlinux,
483 btf_type_by_id(btf_vmlinux, id)->name_off);
484}
485
61bd5218 486static void print_verifier_state(struct bpf_verifier_env *env,
f4d7e40a 487 const struct bpf_func_state *state)
17a52670 488{
f4d7e40a 489 const struct bpf_reg_state *reg;
17a52670
AS
490 enum bpf_reg_type t;
491 int i;
492
f4d7e40a
AS
493 if (state->frameno)
494 verbose(env, " frame%d:", state->frameno);
17a52670 495 for (i = 0; i < MAX_BPF_REG; i++) {
1a0dc1ac
AS
496 reg = &state->regs[i];
497 t = reg->type;
17a52670
AS
498 if (t == NOT_INIT)
499 continue;
4e92024a
AS
500 verbose(env, " R%d", i);
501 print_liveness(env, reg->live);
502 verbose(env, "=%s", reg_type_str[t]);
b5dc0163
AS
503 if (t == SCALAR_VALUE && reg->precise)
504 verbose(env, "P");
f1174f77
EC
505 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
506 tnum_is_const(reg->var_off)) {
507 /* reg->off should be 0 for SCALAR_VALUE */
61bd5218 508 verbose(env, "%lld", reg->var_off.value + reg->off);
f1174f77 509 } else {
9e15db66
AS
510 if (t == PTR_TO_BTF_ID)
511 verbose(env, "%s", kernel_type_name(reg->btf_id));
cba368c1
MKL
512 verbose(env, "(id=%d", reg->id);
513 if (reg_type_may_be_refcounted_or_null(t))
514 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
f1174f77 515 if (t != SCALAR_VALUE)
61bd5218 516 verbose(env, ",off=%d", reg->off);
de8f3a83 517 if (type_is_pkt_pointer(t))
61bd5218 518 verbose(env, ",r=%d", reg->range);
f1174f77
EC
519 else if (t == CONST_PTR_TO_MAP ||
520 t == PTR_TO_MAP_VALUE ||
521 t == PTR_TO_MAP_VALUE_OR_NULL)
61bd5218 522 verbose(env, ",ks=%d,vs=%d",
f1174f77
EC
523 reg->map_ptr->key_size,
524 reg->map_ptr->value_size);
7d1238f2
EC
525 if (tnum_is_const(reg->var_off)) {
526 /* Typically an immediate SCALAR_VALUE, but
527 * could be a pointer whose offset is too big
528 * for reg->off
529 */
61bd5218 530 verbose(env, ",imm=%llx", reg->var_off.value);
7d1238f2
EC
531 } else {
532 if (reg->smin_value != reg->umin_value &&
533 reg->smin_value != S64_MIN)
61bd5218 534 verbose(env, ",smin_value=%lld",
7d1238f2
EC
535 (long long)reg->smin_value);
536 if (reg->smax_value != reg->umax_value &&
537 reg->smax_value != S64_MAX)
61bd5218 538 verbose(env, ",smax_value=%lld",
7d1238f2
EC
539 (long long)reg->smax_value);
540 if (reg->umin_value != 0)
61bd5218 541 verbose(env, ",umin_value=%llu",
7d1238f2
EC
542 (unsigned long long)reg->umin_value);
543 if (reg->umax_value != U64_MAX)
61bd5218 544 verbose(env, ",umax_value=%llu",
7d1238f2
EC
545 (unsigned long long)reg->umax_value);
546 if (!tnum_is_unknown(reg->var_off)) {
547 char tn_buf[48];
f1174f77 548
7d1238f2 549 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 550 verbose(env, ",var_off=%s", tn_buf);
7d1238f2 551 }
f1174f77 552 }
61bd5218 553 verbose(env, ")");
f1174f77 554 }
17a52670 555 }
638f5b90 556 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
8efea21d
EC
557 char types_buf[BPF_REG_SIZE + 1];
558 bool valid = false;
559 int j;
560
561 for (j = 0; j < BPF_REG_SIZE; j++) {
562 if (state->stack[i].slot_type[j] != STACK_INVALID)
563 valid = true;
564 types_buf[j] = slot_type_char[
565 state->stack[i].slot_type[j]];
566 }
567 types_buf[BPF_REG_SIZE] = 0;
568 if (!valid)
569 continue;
570 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
571 print_liveness(env, state->stack[i].spilled_ptr.live);
b5dc0163
AS
572 if (state->stack[i].slot_type[0] == STACK_SPILL) {
573 reg = &state->stack[i].spilled_ptr;
574 t = reg->type;
575 verbose(env, "=%s", reg_type_str[t]);
576 if (t == SCALAR_VALUE && reg->precise)
577 verbose(env, "P");
578 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
579 verbose(env, "%lld", reg->var_off.value + reg->off);
580 } else {
8efea21d 581 verbose(env, "=%s", types_buf);
b5dc0163 582 }
17a52670 583 }
fd978bf7
JS
584 if (state->acquired_refs && state->refs[0].id) {
585 verbose(env, " refs=%d", state->refs[0].id);
586 for (i = 1; i < state->acquired_refs; i++)
587 if (state->refs[i].id)
588 verbose(env, ",%d", state->refs[i].id);
589 }
61bd5218 590 verbose(env, "\n");
17a52670
AS
591}
592
84dbf350
JS
593#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
594static int copy_##NAME##_state(struct bpf_func_state *dst, \
595 const struct bpf_func_state *src) \
596{ \
597 if (!src->FIELD) \
598 return 0; \
599 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
600 /* internal bug, make state invalid to reject the program */ \
601 memset(dst, 0, sizeof(*dst)); \
602 return -EFAULT; \
603 } \
604 memcpy(dst->FIELD, src->FIELD, \
605 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
606 return 0; \
638f5b90 607}
fd978bf7
JS
608/* copy_reference_state() */
609COPY_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
610/* copy_stack_state() */
611COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
612#undef COPY_STATE_FN
613
614#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
615static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
616 bool copy_old) \
617{ \
618 u32 old_size = state->COUNT; \
619 struct bpf_##NAME##_state *new_##FIELD; \
620 int slot = size / SIZE; \
621 \
622 if (size <= old_size || !size) { \
623 if (copy_old) \
624 return 0; \
625 state->COUNT = slot * SIZE; \
626 if (!size && old_size) { \
627 kfree(state->FIELD); \
628 state->FIELD = NULL; \
629 } \
630 return 0; \
631 } \
632 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
633 GFP_KERNEL); \
634 if (!new_##FIELD) \
635 return -ENOMEM; \
636 if (copy_old) { \
637 if (state->FIELD) \
638 memcpy(new_##FIELD, state->FIELD, \
639 sizeof(*new_##FIELD) * (old_size / SIZE)); \
640 memset(new_##FIELD + old_size / SIZE, 0, \
641 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
642 } \
643 state->COUNT = slot * SIZE; \
644 kfree(state->FIELD); \
645 state->FIELD = new_##FIELD; \
646 return 0; \
647}
fd978bf7
JS
648/* realloc_reference_state() */
649REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
650/* realloc_stack_state() */
651REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
652#undef REALLOC_STATE_FN
638f5b90
AS
653
654/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
655 * make it consume minimal amount of memory. check_stack_write() access from
f4d7e40a 656 * the program calls into realloc_func_state() to grow the stack size.
84dbf350
JS
657 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
658 * which realloc_stack_state() copies over. It points to previous
659 * bpf_verifier_state which is never reallocated.
638f5b90 660 */
fd978bf7
JS
661static int realloc_func_state(struct bpf_func_state *state, int stack_size,
662 int refs_size, bool copy_old)
638f5b90 663{
fd978bf7
JS
664 int err = realloc_reference_state(state, refs_size, copy_old);
665 if (err)
666 return err;
667 return realloc_stack_state(state, stack_size, copy_old);
668}
669
670/* Acquire a pointer id from the env and update the state->refs to include
671 * this new pointer reference.
672 * On success, returns a valid pointer id to associate with the register
673 * On failure, returns a negative errno.
638f5b90 674 */
fd978bf7 675static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
638f5b90 676{
fd978bf7
JS
677 struct bpf_func_state *state = cur_func(env);
678 int new_ofs = state->acquired_refs;
679 int id, err;
680
681 err = realloc_reference_state(state, state->acquired_refs + 1, true);
682 if (err)
683 return err;
684 id = ++env->id_gen;
685 state->refs[new_ofs].id = id;
686 state->refs[new_ofs].insn_idx = insn_idx;
638f5b90 687
fd978bf7
JS
688 return id;
689}
690
691/* release function corresponding to acquire_reference_state(). Idempotent. */
46f8bc92 692static int release_reference_state(struct bpf_func_state *state, int ptr_id)
fd978bf7
JS
693{
694 int i, last_idx;
695
fd978bf7
JS
696 last_idx = state->acquired_refs - 1;
697 for (i = 0; i < state->acquired_refs; i++) {
698 if (state->refs[i].id == ptr_id) {
699 if (last_idx && i != last_idx)
700 memcpy(&state->refs[i], &state->refs[last_idx],
701 sizeof(*state->refs));
702 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
703 state->acquired_refs--;
638f5b90 704 return 0;
638f5b90 705 }
638f5b90 706 }
46f8bc92 707 return -EINVAL;
fd978bf7
JS
708}
709
710static int transfer_reference_state(struct bpf_func_state *dst,
711 struct bpf_func_state *src)
712{
713 int err = realloc_reference_state(dst, src->acquired_refs, false);
714 if (err)
715 return err;
716 err = copy_reference_state(dst, src);
717 if (err)
718 return err;
638f5b90
AS
719 return 0;
720}
721
f4d7e40a
AS
722static void free_func_state(struct bpf_func_state *state)
723{
5896351e
AS
724 if (!state)
725 return;
fd978bf7 726 kfree(state->refs);
f4d7e40a
AS
727 kfree(state->stack);
728 kfree(state);
729}
730
b5dc0163
AS
731static void clear_jmp_history(struct bpf_verifier_state *state)
732{
733 kfree(state->jmp_history);
734 state->jmp_history = NULL;
735 state->jmp_history_cnt = 0;
736}
737
1969db47
AS
738static void free_verifier_state(struct bpf_verifier_state *state,
739 bool free_self)
638f5b90 740{
f4d7e40a
AS
741 int i;
742
743 for (i = 0; i <= state->curframe; i++) {
744 free_func_state(state->frame[i]);
745 state->frame[i] = NULL;
746 }
b5dc0163 747 clear_jmp_history(state);
1969db47
AS
748 if (free_self)
749 kfree(state);
638f5b90
AS
750}
751
752/* copy verifier state from src to dst growing dst stack space
753 * when necessary to accommodate larger src stack
754 */
f4d7e40a
AS
755static int copy_func_state(struct bpf_func_state *dst,
756 const struct bpf_func_state *src)
638f5b90
AS
757{
758 int err;
759
fd978bf7
JS
760 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
761 false);
762 if (err)
763 return err;
764 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
765 err = copy_reference_state(dst, src);
638f5b90
AS
766 if (err)
767 return err;
638f5b90
AS
768 return copy_stack_state(dst, src);
769}
770
f4d7e40a
AS
771static int copy_verifier_state(struct bpf_verifier_state *dst_state,
772 const struct bpf_verifier_state *src)
773{
774 struct bpf_func_state *dst;
b5dc0163 775 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
f4d7e40a
AS
776 int i, err;
777
b5dc0163
AS
778 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
779 kfree(dst_state->jmp_history);
780 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
781 if (!dst_state->jmp_history)
782 return -ENOMEM;
783 }
784 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
785 dst_state->jmp_history_cnt = src->jmp_history_cnt;
786
f4d7e40a
AS
787 /* if dst has more stack frames then src frame, free them */
788 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
789 free_func_state(dst_state->frame[i]);
790 dst_state->frame[i] = NULL;
791 }
979d63d5 792 dst_state->speculative = src->speculative;
f4d7e40a 793 dst_state->curframe = src->curframe;
d83525ca 794 dst_state->active_spin_lock = src->active_spin_lock;
2589726d
AS
795 dst_state->branches = src->branches;
796 dst_state->parent = src->parent;
b5dc0163
AS
797 dst_state->first_insn_idx = src->first_insn_idx;
798 dst_state->last_insn_idx = src->last_insn_idx;
f4d7e40a
AS
799 for (i = 0; i <= src->curframe; i++) {
800 dst = dst_state->frame[i];
801 if (!dst) {
802 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
803 if (!dst)
804 return -ENOMEM;
805 dst_state->frame[i] = dst;
806 }
807 err = copy_func_state(dst, src->frame[i]);
808 if (err)
809 return err;
810 }
811 return 0;
812}
813
2589726d
AS
814static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
815{
816 while (st) {
817 u32 br = --st->branches;
818
819 /* WARN_ON(br > 1) technically makes sense here,
820 * but see comment in push_stack(), hence:
821 */
822 WARN_ONCE((int)br < 0,
823 "BUG update_branch_counts:branches_to_explore=%d\n",
824 br);
825 if (br)
826 break;
827 st = st->parent;
828 }
829}
830
638f5b90
AS
831static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
832 int *insn_idx)
833{
834 struct bpf_verifier_state *cur = env->cur_state;
835 struct bpf_verifier_stack_elem *elem, *head = env->head;
836 int err;
17a52670
AS
837
838 if (env->head == NULL)
638f5b90 839 return -ENOENT;
17a52670 840
638f5b90
AS
841 if (cur) {
842 err = copy_verifier_state(cur, &head->st);
843 if (err)
844 return err;
845 }
846 if (insn_idx)
847 *insn_idx = head->insn_idx;
17a52670 848 if (prev_insn_idx)
638f5b90
AS
849 *prev_insn_idx = head->prev_insn_idx;
850 elem = head->next;
1969db47 851 free_verifier_state(&head->st, false);
638f5b90 852 kfree(head);
17a52670
AS
853 env->head = elem;
854 env->stack_size--;
638f5b90 855 return 0;
17a52670
AS
856}
857
58e2af8b 858static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
979d63d5
DB
859 int insn_idx, int prev_insn_idx,
860 bool speculative)
17a52670 861{
638f5b90 862 struct bpf_verifier_state *cur = env->cur_state;
58e2af8b 863 struct bpf_verifier_stack_elem *elem;
638f5b90 864 int err;
17a52670 865
638f5b90 866 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
17a52670
AS
867 if (!elem)
868 goto err;
869
17a52670
AS
870 elem->insn_idx = insn_idx;
871 elem->prev_insn_idx = prev_insn_idx;
872 elem->next = env->head;
873 env->head = elem;
874 env->stack_size++;
1969db47
AS
875 err = copy_verifier_state(&elem->st, cur);
876 if (err)
877 goto err;
979d63d5 878 elem->st.speculative |= speculative;
b285fcb7
AS
879 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
880 verbose(env, "The sequence of %d jumps is too complex.\n",
881 env->stack_size);
17a52670
AS
882 goto err;
883 }
2589726d
AS
884 if (elem->st.parent) {
885 ++elem->st.parent->branches;
886 /* WARN_ON(branches > 2) technically makes sense here,
887 * but
888 * 1. speculative states will bump 'branches' for non-branch
889 * instructions
890 * 2. is_state_visited() heuristics may decide not to create
891 * a new state for a sequence of branches and all such current
892 * and cloned states will be pointing to a single parent state
893 * which might have large 'branches' count.
894 */
895 }
17a52670
AS
896 return &elem->st;
897err:
5896351e
AS
898 free_verifier_state(env->cur_state, true);
899 env->cur_state = NULL;
17a52670 900 /* pop all elements and return */
638f5b90 901 while (!pop_stack(env, NULL, NULL));
17a52670
AS
902 return NULL;
903}
904
905#define CALLER_SAVED_REGS 6
906static const int caller_saved[CALLER_SAVED_REGS] = {
907 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
908};
909
f54c7898
DB
910static void __mark_reg_not_init(const struct bpf_verifier_env *env,
911 struct bpf_reg_state *reg);
f1174f77 912
b03c9f9f
EC
913/* Mark the unknown part of a register (variable offset or scalar value) as
914 * known to have the value @imm.
915 */
916static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
917{
a9c676bc
AS
918 /* Clear id, off, and union(map_ptr, range) */
919 memset(((u8 *)reg) + sizeof(reg->type), 0,
920 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
b03c9f9f
EC
921 reg->var_off = tnum_const(imm);
922 reg->smin_value = (s64)imm;
923 reg->smax_value = (s64)imm;
924 reg->umin_value = imm;
925 reg->umax_value = imm;
926}
927
f1174f77
EC
928/* Mark the 'variable offset' part of a register as zero. This should be
929 * used only on registers holding a pointer type.
930 */
931static void __mark_reg_known_zero(struct bpf_reg_state *reg)
a9789ef9 932{
b03c9f9f 933 __mark_reg_known(reg, 0);
f1174f77 934}
a9789ef9 935
cc2b14d5
AS
936static void __mark_reg_const_zero(struct bpf_reg_state *reg)
937{
938 __mark_reg_known(reg, 0);
cc2b14d5
AS
939 reg->type = SCALAR_VALUE;
940}
941
61bd5218
JK
942static void mark_reg_known_zero(struct bpf_verifier_env *env,
943 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
944{
945 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 946 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
f1174f77
EC
947 /* Something bad happened, let's kill all regs */
948 for (regno = 0; regno < MAX_BPF_REG; regno++)
f54c7898 949 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
950 return;
951 }
952 __mark_reg_known_zero(regs + regno);
953}
954
de8f3a83
DB
955static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
956{
957 return type_is_pkt_pointer(reg->type);
958}
959
960static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
961{
962 return reg_is_pkt_pointer(reg) ||
963 reg->type == PTR_TO_PACKET_END;
964}
965
966/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
967static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
968 enum bpf_reg_type which)
969{
970 /* The register can already have a range from prior markings.
971 * This is fine as long as it hasn't been advanced from its
972 * origin.
973 */
974 return reg->type == which &&
975 reg->id == 0 &&
976 reg->off == 0 &&
977 tnum_equals_const(reg->var_off, 0);
978}
979
b03c9f9f
EC
980/* Attempts to improve min/max values based on var_off information */
981static void __update_reg_bounds(struct bpf_reg_state *reg)
982{
983 /* min signed is max(sign bit) | min(other bits) */
984 reg->smin_value = max_t(s64, reg->smin_value,
985 reg->var_off.value | (reg->var_off.mask & S64_MIN));
986 /* max signed is min(sign bit) | max(other bits) */
987 reg->smax_value = min_t(s64, reg->smax_value,
988 reg->var_off.value | (reg->var_off.mask & S64_MAX));
989 reg->umin_value = max(reg->umin_value, reg->var_off.value);
990 reg->umax_value = min(reg->umax_value,
991 reg->var_off.value | reg->var_off.mask);
992}
993
994/* Uses signed min/max values to inform unsigned, and vice-versa */
995static void __reg_deduce_bounds(struct bpf_reg_state *reg)
996{
997 /* Learn sign from signed bounds.
998 * If we cannot cross the sign boundary, then signed and unsigned bounds
999 * are the same, so combine. This works even in the negative case, e.g.
1000 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1001 */
1002 if (reg->smin_value >= 0 || reg->smax_value < 0) {
1003 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1004 reg->umin_value);
1005 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1006 reg->umax_value);
1007 return;
1008 }
1009 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1010 * boundary, so we must be careful.
1011 */
1012 if ((s64)reg->umax_value >= 0) {
1013 /* Positive. We can't learn anything from the smin, but smax
1014 * is positive, hence safe.
1015 */
1016 reg->smin_value = reg->umin_value;
1017 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1018 reg->umax_value);
1019 } else if ((s64)reg->umin_value < 0) {
1020 /* Negative. We can't learn anything from the smax, but smin
1021 * is negative, hence safe.
1022 */
1023 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1024 reg->umin_value);
1025 reg->smax_value = reg->umax_value;
1026 }
1027}
1028
1029/* Attempts to improve var_off based on unsigned min/max information */
1030static void __reg_bound_offset(struct bpf_reg_state *reg)
1031{
1032 reg->var_off = tnum_intersect(reg->var_off,
1033 tnum_range(reg->umin_value,
1034 reg->umax_value));
1035}
1036
581738a6
YS
1037static void __reg_bound_offset32(struct bpf_reg_state *reg)
1038{
1039 u64 mask = 0xffffFFFF;
1040 struct tnum range = tnum_range(reg->umin_value & mask,
1041 reg->umax_value & mask);
1042 struct tnum lo32 = tnum_cast(reg->var_off, 4);
1043 struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
1044
1045 reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
1046}
1047
b03c9f9f
EC
1048/* Reset the min/max bounds of a register */
1049static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1050{
1051 reg->smin_value = S64_MIN;
1052 reg->smax_value = S64_MAX;
1053 reg->umin_value = 0;
1054 reg->umax_value = U64_MAX;
1055}
1056
f1174f77 1057/* Mark a register as having a completely unknown (scalar) value. */
f54c7898
DB
1058static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1059 struct bpf_reg_state *reg)
f1174f77 1060{
a9c676bc
AS
1061 /*
1062 * Clear type, id, off, and union(map_ptr, range) and
1063 * padding between 'type' and union
1064 */
1065 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
f1174f77 1066 reg->type = SCALAR_VALUE;
f1174f77 1067 reg->var_off = tnum_unknown;
f4d7e40a 1068 reg->frameno = 0;
f54c7898
DB
1069 reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
1070 true : false;
b03c9f9f 1071 __mark_reg_unbounded(reg);
f1174f77
EC
1072}
1073
61bd5218
JK
1074static void mark_reg_unknown(struct bpf_verifier_env *env,
1075 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1076{
1077 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1078 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
19ceb417
AS
1079 /* Something bad happened, let's kill all regs except FP */
1080 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1081 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1082 return;
1083 }
f54c7898 1084 __mark_reg_unknown(env, regs + regno);
f1174f77
EC
1085}
1086
f54c7898
DB
1087static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1088 struct bpf_reg_state *reg)
f1174f77 1089{
f54c7898 1090 __mark_reg_unknown(env, reg);
f1174f77
EC
1091 reg->type = NOT_INIT;
1092}
1093
61bd5218
JK
1094static void mark_reg_not_init(struct bpf_verifier_env *env,
1095 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1096{
1097 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1098 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
19ceb417
AS
1099 /* Something bad happened, let's kill all regs except FP */
1100 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1101 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1102 return;
1103 }
f54c7898 1104 __mark_reg_not_init(env, regs + regno);
a9789ef9
DB
1105}
1106
5327ed3d 1107#define DEF_NOT_SUBREG (0)
61bd5218 1108static void init_reg_state(struct bpf_verifier_env *env,
f4d7e40a 1109 struct bpf_func_state *state)
17a52670 1110{
f4d7e40a 1111 struct bpf_reg_state *regs = state->regs;
17a52670
AS
1112 int i;
1113
dc503a8a 1114 for (i = 0; i < MAX_BPF_REG; i++) {
61bd5218 1115 mark_reg_not_init(env, regs, i);
dc503a8a 1116 regs[i].live = REG_LIVE_NONE;
679c782d 1117 regs[i].parent = NULL;
5327ed3d 1118 regs[i].subreg_def = DEF_NOT_SUBREG;
dc503a8a 1119 }
17a52670
AS
1120
1121 /* frame pointer */
f1174f77 1122 regs[BPF_REG_FP].type = PTR_TO_STACK;
61bd5218 1123 mark_reg_known_zero(env, regs, BPF_REG_FP);
f4d7e40a 1124 regs[BPF_REG_FP].frameno = state->frameno;
17a52670
AS
1125
1126 /* 1st arg to a function */
1127 regs[BPF_REG_1].type = PTR_TO_CTX;
61bd5218 1128 mark_reg_known_zero(env, regs, BPF_REG_1);
6760bf2d
DB
1129}
1130
f4d7e40a
AS
1131#define BPF_MAIN_FUNC (-1)
1132static void init_func_state(struct bpf_verifier_env *env,
1133 struct bpf_func_state *state,
1134 int callsite, int frameno, int subprogno)
1135{
1136 state->callsite = callsite;
1137 state->frameno = frameno;
1138 state->subprogno = subprogno;
1139 init_reg_state(env, state);
1140}
1141
17a52670
AS
1142enum reg_arg_type {
1143 SRC_OP, /* register is used as source operand */
1144 DST_OP, /* register is used as destination operand */
1145 DST_OP_NO_MARK /* same as above, check only, don't mark */
1146};
1147
cc8b0b92
AS
1148static int cmp_subprogs(const void *a, const void *b)
1149{
9c8105bd
JW
1150 return ((struct bpf_subprog_info *)a)->start -
1151 ((struct bpf_subprog_info *)b)->start;
cc8b0b92
AS
1152}
1153
1154static int find_subprog(struct bpf_verifier_env *env, int off)
1155{
9c8105bd 1156 struct bpf_subprog_info *p;
cc8b0b92 1157
9c8105bd
JW
1158 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1159 sizeof(env->subprog_info[0]), cmp_subprogs);
cc8b0b92
AS
1160 if (!p)
1161 return -ENOENT;
9c8105bd 1162 return p - env->subprog_info;
cc8b0b92
AS
1163
1164}
1165
1166static int add_subprog(struct bpf_verifier_env *env, int off)
1167{
1168 int insn_cnt = env->prog->len;
1169 int ret;
1170
1171 if (off >= insn_cnt || off < 0) {
1172 verbose(env, "call to invalid destination\n");
1173 return -EINVAL;
1174 }
1175 ret = find_subprog(env, off);
1176 if (ret >= 0)
1177 return 0;
4cb3d99c 1178 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
cc8b0b92
AS
1179 verbose(env, "too many subprograms\n");
1180 return -E2BIG;
1181 }
9c8105bd
JW
1182 env->subprog_info[env->subprog_cnt++].start = off;
1183 sort(env->subprog_info, env->subprog_cnt,
1184 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
cc8b0b92
AS
1185 return 0;
1186}
1187
1188static int check_subprogs(struct bpf_verifier_env *env)
1189{
1190 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
9c8105bd 1191 struct bpf_subprog_info *subprog = env->subprog_info;
cc8b0b92
AS
1192 struct bpf_insn *insn = env->prog->insnsi;
1193 int insn_cnt = env->prog->len;
1194
f910cefa
JW
1195 /* Add entry function. */
1196 ret = add_subprog(env, 0);
1197 if (ret < 0)
1198 return ret;
1199
cc8b0b92
AS
1200 /* determine subprog starts. The end is one before the next starts */
1201 for (i = 0; i < insn_cnt; i++) {
1202 if (insn[i].code != (BPF_JMP | BPF_CALL))
1203 continue;
1204 if (insn[i].src_reg != BPF_PSEUDO_CALL)
1205 continue;
1206 if (!env->allow_ptr_leaks) {
1207 verbose(env, "function calls to other bpf functions are allowed for root only\n");
1208 return -EPERM;
1209 }
cc8b0b92
AS
1210 ret = add_subprog(env, i + insn[i].imm + 1);
1211 if (ret < 0)
1212 return ret;
1213 }
1214
4cb3d99c
JW
1215 /* Add a fake 'exit' subprog which could simplify subprog iteration
1216 * logic. 'subprog_cnt' should not be increased.
1217 */
1218 subprog[env->subprog_cnt].start = insn_cnt;
1219
06ee7115 1220 if (env->log.level & BPF_LOG_LEVEL2)
cc8b0b92 1221 for (i = 0; i < env->subprog_cnt; i++)
9c8105bd 1222 verbose(env, "func#%d @%d\n", i, subprog[i].start);
cc8b0b92
AS
1223
1224 /* now check that all jumps are within the same subprog */
4cb3d99c
JW
1225 subprog_start = subprog[cur_subprog].start;
1226 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1227 for (i = 0; i < insn_cnt; i++) {
1228 u8 code = insn[i].code;
1229
092ed096 1230 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
cc8b0b92
AS
1231 goto next;
1232 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
1233 goto next;
1234 off = i + insn[i].off + 1;
1235 if (off < subprog_start || off >= subprog_end) {
1236 verbose(env, "jump out of range from insn %d to %d\n", i, off);
1237 return -EINVAL;
1238 }
1239next:
1240 if (i == subprog_end - 1) {
1241 /* to avoid fall-through from one subprog into another
1242 * the last insn of the subprog should be either exit
1243 * or unconditional jump back
1244 */
1245 if (code != (BPF_JMP | BPF_EXIT) &&
1246 code != (BPF_JMP | BPF_JA)) {
1247 verbose(env, "last insn is not an exit or jmp\n");
1248 return -EINVAL;
1249 }
1250 subprog_start = subprog_end;
4cb3d99c
JW
1251 cur_subprog++;
1252 if (cur_subprog < env->subprog_cnt)
9c8105bd 1253 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1254 }
1255 }
1256 return 0;
1257}
1258
679c782d
EC
1259/* Parentage chain of this register (or stack slot) should take care of all
1260 * issues like callee-saved registers, stack slot allocation time, etc.
1261 */
f4d7e40a 1262static int mark_reg_read(struct bpf_verifier_env *env,
679c782d 1263 const struct bpf_reg_state *state,
5327ed3d 1264 struct bpf_reg_state *parent, u8 flag)
f4d7e40a
AS
1265{
1266 bool writes = parent == state->parent; /* Observe write marks */
06ee7115 1267 int cnt = 0;
dc503a8a
EC
1268
1269 while (parent) {
1270 /* if read wasn't screened by an earlier write ... */
679c782d 1271 if (writes && state->live & REG_LIVE_WRITTEN)
dc503a8a 1272 break;
9242b5f5
AS
1273 if (parent->live & REG_LIVE_DONE) {
1274 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1275 reg_type_str[parent->type],
1276 parent->var_off.value, parent->off);
1277 return -EFAULT;
1278 }
5327ed3d
JW
1279 /* The first condition is more likely to be true than the
1280 * second, checked it first.
1281 */
1282 if ((parent->live & REG_LIVE_READ) == flag ||
1283 parent->live & REG_LIVE_READ64)
25af32da
AS
1284 /* The parentage chain never changes and
1285 * this parent was already marked as LIVE_READ.
1286 * There is no need to keep walking the chain again and
1287 * keep re-marking all parents as LIVE_READ.
1288 * This case happens when the same register is read
1289 * multiple times without writes into it in-between.
5327ed3d
JW
1290 * Also, if parent has the stronger REG_LIVE_READ64 set,
1291 * then no need to set the weak REG_LIVE_READ32.
25af32da
AS
1292 */
1293 break;
dc503a8a 1294 /* ... then we depend on parent's value */
5327ed3d
JW
1295 parent->live |= flag;
1296 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
1297 if (flag == REG_LIVE_READ64)
1298 parent->live &= ~REG_LIVE_READ32;
dc503a8a
EC
1299 state = parent;
1300 parent = state->parent;
f4d7e40a 1301 writes = true;
06ee7115 1302 cnt++;
dc503a8a 1303 }
06ee7115
AS
1304
1305 if (env->longest_mark_read_walk < cnt)
1306 env->longest_mark_read_walk = cnt;
f4d7e40a 1307 return 0;
dc503a8a
EC
1308}
1309
5327ed3d
JW
1310/* This function is supposed to be used by the following 32-bit optimization
1311 * code only. It returns TRUE if the source or destination register operates
1312 * on 64-bit, otherwise return FALSE.
1313 */
1314static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
1315 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
1316{
1317 u8 code, class, op;
1318
1319 code = insn->code;
1320 class = BPF_CLASS(code);
1321 op = BPF_OP(code);
1322 if (class == BPF_JMP) {
1323 /* BPF_EXIT for "main" will reach here. Return TRUE
1324 * conservatively.
1325 */
1326 if (op == BPF_EXIT)
1327 return true;
1328 if (op == BPF_CALL) {
1329 /* BPF to BPF call will reach here because of marking
1330 * caller saved clobber with DST_OP_NO_MARK for which we
1331 * don't care the register def because they are anyway
1332 * marked as NOT_INIT already.
1333 */
1334 if (insn->src_reg == BPF_PSEUDO_CALL)
1335 return false;
1336 /* Helper call will reach here because of arg type
1337 * check, conservatively return TRUE.
1338 */
1339 if (t == SRC_OP)
1340 return true;
1341
1342 return false;
1343 }
1344 }
1345
1346 if (class == BPF_ALU64 || class == BPF_JMP ||
1347 /* BPF_END always use BPF_ALU class. */
1348 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
1349 return true;
1350
1351 if (class == BPF_ALU || class == BPF_JMP32)
1352 return false;
1353
1354 if (class == BPF_LDX) {
1355 if (t != SRC_OP)
1356 return BPF_SIZE(code) == BPF_DW;
1357 /* LDX source must be ptr. */
1358 return true;
1359 }
1360
1361 if (class == BPF_STX) {
1362 if (reg->type != SCALAR_VALUE)
1363 return true;
1364 return BPF_SIZE(code) == BPF_DW;
1365 }
1366
1367 if (class == BPF_LD) {
1368 u8 mode = BPF_MODE(code);
1369
1370 /* LD_IMM64 */
1371 if (mode == BPF_IMM)
1372 return true;
1373
1374 /* Both LD_IND and LD_ABS return 32-bit data. */
1375 if (t != SRC_OP)
1376 return false;
1377
1378 /* Implicit ctx ptr. */
1379 if (regno == BPF_REG_6)
1380 return true;
1381
1382 /* Explicit source could be any width. */
1383 return true;
1384 }
1385
1386 if (class == BPF_ST)
1387 /* The only source register for BPF_ST is a ptr. */
1388 return true;
1389
1390 /* Conservatively return true at default. */
1391 return true;
1392}
1393
b325fbca
JW
1394/* Return TRUE if INSN doesn't have explicit value define. */
1395static bool insn_no_def(struct bpf_insn *insn)
1396{
1397 u8 class = BPF_CLASS(insn->code);
1398
1399 return (class == BPF_JMP || class == BPF_JMP32 ||
1400 class == BPF_STX || class == BPF_ST);
1401}
1402
1403/* Return TRUE if INSN has defined any 32-bit value explicitly. */
1404static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
1405{
1406 if (insn_no_def(insn))
1407 return false;
1408
1409 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
1410}
1411
5327ed3d
JW
1412static void mark_insn_zext(struct bpf_verifier_env *env,
1413 struct bpf_reg_state *reg)
1414{
1415 s32 def_idx = reg->subreg_def;
1416
1417 if (def_idx == DEF_NOT_SUBREG)
1418 return;
1419
1420 env->insn_aux_data[def_idx - 1].zext_dst = true;
1421 /* The dst will be zero extended, so won't be sub-register anymore. */
1422 reg->subreg_def = DEF_NOT_SUBREG;
1423}
1424
dc503a8a 1425static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
17a52670
AS
1426 enum reg_arg_type t)
1427{
f4d7e40a
AS
1428 struct bpf_verifier_state *vstate = env->cur_state;
1429 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5327ed3d 1430 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
c342dc10 1431 struct bpf_reg_state *reg, *regs = state->regs;
5327ed3d 1432 bool rw64;
dc503a8a 1433
17a52670 1434 if (regno >= MAX_BPF_REG) {
61bd5218 1435 verbose(env, "R%d is invalid\n", regno);
17a52670
AS
1436 return -EINVAL;
1437 }
1438
c342dc10 1439 reg = &regs[regno];
5327ed3d 1440 rw64 = is_reg64(env, insn, regno, reg, t);
17a52670
AS
1441 if (t == SRC_OP) {
1442 /* check whether register used as source operand can be read */
c342dc10 1443 if (reg->type == NOT_INIT) {
61bd5218 1444 verbose(env, "R%d !read_ok\n", regno);
17a52670
AS
1445 return -EACCES;
1446 }
679c782d 1447 /* We don't need to worry about FP liveness because it's read-only */
c342dc10
JW
1448 if (regno == BPF_REG_FP)
1449 return 0;
1450
5327ed3d
JW
1451 if (rw64)
1452 mark_insn_zext(env, reg);
1453
1454 return mark_reg_read(env, reg, reg->parent,
1455 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
17a52670
AS
1456 } else {
1457 /* check whether register used as dest operand can be written to */
1458 if (regno == BPF_REG_FP) {
61bd5218 1459 verbose(env, "frame pointer is read only\n");
17a52670
AS
1460 return -EACCES;
1461 }
c342dc10 1462 reg->live |= REG_LIVE_WRITTEN;
5327ed3d 1463 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
17a52670 1464 if (t == DST_OP)
61bd5218 1465 mark_reg_unknown(env, regs, regno);
17a52670
AS
1466 }
1467 return 0;
1468}
1469
b5dc0163
AS
1470/* for any branch, call, exit record the history of jmps in the given state */
1471static int push_jmp_history(struct bpf_verifier_env *env,
1472 struct bpf_verifier_state *cur)
1473{
1474 u32 cnt = cur->jmp_history_cnt;
1475 struct bpf_idx_pair *p;
1476
1477 cnt++;
1478 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
1479 if (!p)
1480 return -ENOMEM;
1481 p[cnt - 1].idx = env->insn_idx;
1482 p[cnt - 1].prev_idx = env->prev_insn_idx;
1483 cur->jmp_history = p;
1484 cur->jmp_history_cnt = cnt;
1485 return 0;
1486}
1487
1488/* Backtrack one insn at a time. If idx is not at the top of recorded
1489 * history then previous instruction came from straight line execution.
1490 */
1491static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
1492 u32 *history)
1493{
1494 u32 cnt = *history;
1495
1496 if (cnt && st->jmp_history[cnt - 1].idx == i) {
1497 i = st->jmp_history[cnt - 1].prev_idx;
1498 (*history)--;
1499 } else {
1500 i--;
1501 }
1502 return i;
1503}
1504
1505/* For given verifier state backtrack_insn() is called from the last insn to
1506 * the first insn. Its purpose is to compute a bitmask of registers and
1507 * stack slots that needs precision in the parent verifier state.
1508 */
1509static int backtrack_insn(struct bpf_verifier_env *env, int idx,
1510 u32 *reg_mask, u64 *stack_mask)
1511{
1512 const struct bpf_insn_cbs cbs = {
1513 .cb_print = verbose,
1514 .private_data = env,
1515 };
1516 struct bpf_insn *insn = env->prog->insnsi + idx;
1517 u8 class = BPF_CLASS(insn->code);
1518 u8 opcode = BPF_OP(insn->code);
1519 u8 mode = BPF_MODE(insn->code);
1520 u32 dreg = 1u << insn->dst_reg;
1521 u32 sreg = 1u << insn->src_reg;
1522 u32 spi;
1523
1524 if (insn->code == 0)
1525 return 0;
1526 if (env->log.level & BPF_LOG_LEVEL) {
1527 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
1528 verbose(env, "%d: ", idx);
1529 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
1530 }
1531
1532 if (class == BPF_ALU || class == BPF_ALU64) {
1533 if (!(*reg_mask & dreg))
1534 return 0;
1535 if (opcode == BPF_MOV) {
1536 if (BPF_SRC(insn->code) == BPF_X) {
1537 /* dreg = sreg
1538 * dreg needs precision after this insn
1539 * sreg needs precision before this insn
1540 */
1541 *reg_mask &= ~dreg;
1542 *reg_mask |= sreg;
1543 } else {
1544 /* dreg = K
1545 * dreg needs precision after this insn.
1546 * Corresponding register is already marked
1547 * as precise=true in this verifier state.
1548 * No further markings in parent are necessary
1549 */
1550 *reg_mask &= ~dreg;
1551 }
1552 } else {
1553 if (BPF_SRC(insn->code) == BPF_X) {
1554 /* dreg += sreg
1555 * both dreg and sreg need precision
1556 * before this insn
1557 */
1558 *reg_mask |= sreg;
1559 } /* else dreg += K
1560 * dreg still needs precision before this insn
1561 */
1562 }
1563 } else if (class == BPF_LDX) {
1564 if (!(*reg_mask & dreg))
1565 return 0;
1566 *reg_mask &= ~dreg;
1567
1568 /* scalars can only be spilled into stack w/o losing precision.
1569 * Load from any other memory can be zero extended.
1570 * The desire to keep that precision is already indicated
1571 * by 'precise' mark in corresponding register of this state.
1572 * No further tracking necessary.
1573 */
1574 if (insn->src_reg != BPF_REG_FP)
1575 return 0;
1576 if (BPF_SIZE(insn->code) != BPF_DW)
1577 return 0;
1578
1579 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
1580 * that [fp - off] slot contains scalar that needs to be
1581 * tracked with precision
1582 */
1583 spi = (-insn->off - 1) / BPF_REG_SIZE;
1584 if (spi >= 64) {
1585 verbose(env, "BUG spi %d\n", spi);
1586 WARN_ONCE(1, "verifier backtracking bug");
1587 return -EFAULT;
1588 }
1589 *stack_mask |= 1ull << spi;
b3b50f05 1590 } else if (class == BPF_STX || class == BPF_ST) {
b5dc0163 1591 if (*reg_mask & dreg)
b3b50f05 1592 /* stx & st shouldn't be using _scalar_ dst_reg
b5dc0163
AS
1593 * to access memory. It means backtracking
1594 * encountered a case of pointer subtraction.
1595 */
1596 return -ENOTSUPP;
1597 /* scalars can only be spilled into stack */
1598 if (insn->dst_reg != BPF_REG_FP)
1599 return 0;
1600 if (BPF_SIZE(insn->code) != BPF_DW)
1601 return 0;
1602 spi = (-insn->off - 1) / BPF_REG_SIZE;
1603 if (spi >= 64) {
1604 verbose(env, "BUG spi %d\n", spi);
1605 WARN_ONCE(1, "verifier backtracking bug");
1606 return -EFAULT;
1607 }
1608 if (!(*stack_mask & (1ull << spi)))
1609 return 0;
1610 *stack_mask &= ~(1ull << spi);
b3b50f05
AN
1611 if (class == BPF_STX)
1612 *reg_mask |= sreg;
b5dc0163
AS
1613 } else if (class == BPF_JMP || class == BPF_JMP32) {
1614 if (opcode == BPF_CALL) {
1615 if (insn->src_reg == BPF_PSEUDO_CALL)
1616 return -ENOTSUPP;
1617 /* regular helper call sets R0 */
1618 *reg_mask &= ~1;
1619 if (*reg_mask & 0x3f) {
1620 /* if backtracing was looking for registers R1-R5
1621 * they should have been found already.
1622 */
1623 verbose(env, "BUG regs %x\n", *reg_mask);
1624 WARN_ONCE(1, "verifier backtracking bug");
1625 return -EFAULT;
1626 }
1627 } else if (opcode == BPF_EXIT) {
1628 return -ENOTSUPP;
1629 }
1630 } else if (class == BPF_LD) {
1631 if (!(*reg_mask & dreg))
1632 return 0;
1633 *reg_mask &= ~dreg;
1634 /* It's ld_imm64 or ld_abs or ld_ind.
1635 * For ld_imm64 no further tracking of precision
1636 * into parent is necessary
1637 */
1638 if (mode == BPF_IND || mode == BPF_ABS)
1639 /* to be analyzed */
1640 return -ENOTSUPP;
b5dc0163
AS
1641 }
1642 return 0;
1643}
1644
1645/* the scalar precision tracking algorithm:
1646 * . at the start all registers have precise=false.
1647 * . scalar ranges are tracked as normal through alu and jmp insns.
1648 * . once precise value of the scalar register is used in:
1649 * . ptr + scalar alu
1650 * . if (scalar cond K|scalar)
1651 * . helper_call(.., scalar, ...) where ARG_CONST is expected
1652 * backtrack through the verifier states and mark all registers and
1653 * stack slots with spilled constants that these scalar regisers
1654 * should be precise.
1655 * . during state pruning two registers (or spilled stack slots)
1656 * are equivalent if both are not precise.
1657 *
1658 * Note the verifier cannot simply walk register parentage chain,
1659 * since many different registers and stack slots could have been
1660 * used to compute single precise scalar.
1661 *
1662 * The approach of starting with precise=true for all registers and then
1663 * backtrack to mark a register as not precise when the verifier detects
1664 * that program doesn't care about specific value (e.g., when helper
1665 * takes register as ARG_ANYTHING parameter) is not safe.
1666 *
1667 * It's ok to walk single parentage chain of the verifier states.
1668 * It's possible that this backtracking will go all the way till 1st insn.
1669 * All other branches will be explored for needing precision later.
1670 *
1671 * The backtracking needs to deal with cases like:
1672 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
1673 * r9 -= r8
1674 * r5 = r9
1675 * if r5 > 0x79f goto pc+7
1676 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
1677 * r5 += 1
1678 * ...
1679 * call bpf_perf_event_output#25
1680 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
1681 *
1682 * and this case:
1683 * r6 = 1
1684 * call foo // uses callee's r6 inside to compute r0
1685 * r0 += r6
1686 * if r0 == 0 goto
1687 *
1688 * to track above reg_mask/stack_mask needs to be independent for each frame.
1689 *
1690 * Also if parent's curframe > frame where backtracking started,
1691 * the verifier need to mark registers in both frames, otherwise callees
1692 * may incorrectly prune callers. This is similar to
1693 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
1694 *
1695 * For now backtracking falls back into conservative marking.
1696 */
1697static void mark_all_scalars_precise(struct bpf_verifier_env *env,
1698 struct bpf_verifier_state *st)
1699{
1700 struct bpf_func_state *func;
1701 struct bpf_reg_state *reg;
1702 int i, j;
1703
1704 /* big hammer: mark all scalars precise in this path.
1705 * pop_stack may still get !precise scalars.
1706 */
1707 for (; st; st = st->parent)
1708 for (i = 0; i <= st->curframe; i++) {
1709 func = st->frame[i];
1710 for (j = 0; j < BPF_REG_FP; j++) {
1711 reg = &func->regs[j];
1712 if (reg->type != SCALAR_VALUE)
1713 continue;
1714 reg->precise = true;
1715 }
1716 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
1717 if (func->stack[j].slot_type[0] != STACK_SPILL)
1718 continue;
1719 reg = &func->stack[j].spilled_ptr;
1720 if (reg->type != SCALAR_VALUE)
1721 continue;
1722 reg->precise = true;
1723 }
1724 }
1725}
1726
a3ce685d
AS
1727static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
1728 int spi)
b5dc0163
AS
1729{
1730 struct bpf_verifier_state *st = env->cur_state;
1731 int first_idx = st->first_insn_idx;
1732 int last_idx = env->insn_idx;
1733 struct bpf_func_state *func;
1734 struct bpf_reg_state *reg;
a3ce685d
AS
1735 u32 reg_mask = regno >= 0 ? 1u << regno : 0;
1736 u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
b5dc0163 1737 bool skip_first = true;
a3ce685d 1738 bool new_marks = false;
b5dc0163
AS
1739 int i, err;
1740
1741 if (!env->allow_ptr_leaks)
1742 /* backtracking is root only for now */
1743 return 0;
1744
1745 func = st->frame[st->curframe];
a3ce685d
AS
1746 if (regno >= 0) {
1747 reg = &func->regs[regno];
1748 if (reg->type != SCALAR_VALUE) {
1749 WARN_ONCE(1, "backtracing misuse");
1750 return -EFAULT;
1751 }
1752 if (!reg->precise)
1753 new_marks = true;
1754 else
1755 reg_mask = 0;
1756 reg->precise = true;
b5dc0163 1757 }
b5dc0163 1758
a3ce685d
AS
1759 while (spi >= 0) {
1760 if (func->stack[spi].slot_type[0] != STACK_SPILL) {
1761 stack_mask = 0;
1762 break;
1763 }
1764 reg = &func->stack[spi].spilled_ptr;
1765 if (reg->type != SCALAR_VALUE) {
1766 stack_mask = 0;
1767 break;
1768 }
1769 if (!reg->precise)
1770 new_marks = true;
1771 else
1772 stack_mask = 0;
1773 reg->precise = true;
1774 break;
1775 }
1776
1777 if (!new_marks)
1778 return 0;
1779 if (!reg_mask && !stack_mask)
1780 return 0;
b5dc0163
AS
1781 for (;;) {
1782 DECLARE_BITMAP(mask, 64);
b5dc0163
AS
1783 u32 history = st->jmp_history_cnt;
1784
1785 if (env->log.level & BPF_LOG_LEVEL)
1786 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
1787 for (i = last_idx;;) {
1788 if (skip_first) {
1789 err = 0;
1790 skip_first = false;
1791 } else {
1792 err = backtrack_insn(env, i, &reg_mask, &stack_mask);
1793 }
1794 if (err == -ENOTSUPP) {
1795 mark_all_scalars_precise(env, st);
1796 return 0;
1797 } else if (err) {
1798 return err;
1799 }
1800 if (!reg_mask && !stack_mask)
1801 /* Found assignment(s) into tracked register in this state.
1802 * Since this state is already marked, just return.
1803 * Nothing to be tracked further in the parent state.
1804 */
1805 return 0;
1806 if (i == first_idx)
1807 break;
1808 i = get_prev_insn_idx(st, i, &history);
1809 if (i >= env->prog->len) {
1810 /* This can happen if backtracking reached insn 0
1811 * and there are still reg_mask or stack_mask
1812 * to backtrack.
1813 * It means the backtracking missed the spot where
1814 * particular register was initialized with a constant.
1815 */
1816 verbose(env, "BUG backtracking idx %d\n", i);
1817 WARN_ONCE(1, "verifier backtracking bug");
1818 return -EFAULT;
1819 }
1820 }
1821 st = st->parent;
1822 if (!st)
1823 break;
1824
a3ce685d 1825 new_marks = false;
b5dc0163
AS
1826 func = st->frame[st->curframe];
1827 bitmap_from_u64(mask, reg_mask);
1828 for_each_set_bit(i, mask, 32) {
1829 reg = &func->regs[i];
a3ce685d
AS
1830 if (reg->type != SCALAR_VALUE) {
1831 reg_mask &= ~(1u << i);
b5dc0163 1832 continue;
a3ce685d 1833 }
b5dc0163
AS
1834 if (!reg->precise)
1835 new_marks = true;
1836 reg->precise = true;
1837 }
1838
1839 bitmap_from_u64(mask, stack_mask);
1840 for_each_set_bit(i, mask, 64) {
1841 if (i >= func->allocated_stack / BPF_REG_SIZE) {
2339cd6c
AS
1842 /* the sequence of instructions:
1843 * 2: (bf) r3 = r10
1844 * 3: (7b) *(u64 *)(r3 -8) = r0
1845 * 4: (79) r4 = *(u64 *)(r10 -8)
1846 * doesn't contain jmps. It's backtracked
1847 * as a single block.
1848 * During backtracking insn 3 is not recognized as
1849 * stack access, so at the end of backtracking
1850 * stack slot fp-8 is still marked in stack_mask.
1851 * However the parent state may not have accessed
1852 * fp-8 and it's "unallocated" stack space.
1853 * In such case fallback to conservative.
b5dc0163 1854 */
2339cd6c
AS
1855 mark_all_scalars_precise(env, st);
1856 return 0;
b5dc0163
AS
1857 }
1858
a3ce685d
AS
1859 if (func->stack[i].slot_type[0] != STACK_SPILL) {
1860 stack_mask &= ~(1ull << i);
b5dc0163 1861 continue;
a3ce685d 1862 }
b5dc0163 1863 reg = &func->stack[i].spilled_ptr;
a3ce685d
AS
1864 if (reg->type != SCALAR_VALUE) {
1865 stack_mask &= ~(1ull << i);
b5dc0163 1866 continue;
a3ce685d 1867 }
b5dc0163
AS
1868 if (!reg->precise)
1869 new_marks = true;
1870 reg->precise = true;
1871 }
1872 if (env->log.level & BPF_LOG_LEVEL) {
1873 print_verifier_state(env, func);
1874 verbose(env, "parent %s regs=%x stack=%llx marks\n",
1875 new_marks ? "didn't have" : "already had",
1876 reg_mask, stack_mask);
1877 }
1878
a3ce685d
AS
1879 if (!reg_mask && !stack_mask)
1880 break;
b5dc0163
AS
1881 if (!new_marks)
1882 break;
1883
1884 last_idx = st->last_insn_idx;
1885 first_idx = st->first_insn_idx;
1886 }
1887 return 0;
1888}
1889
a3ce685d
AS
1890static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
1891{
1892 return __mark_chain_precision(env, regno, -1);
1893}
1894
1895static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
1896{
1897 return __mark_chain_precision(env, -1, spi);
1898}
b5dc0163 1899
1be7f75d
AS
1900static bool is_spillable_regtype(enum bpf_reg_type type)
1901{
1902 switch (type) {
1903 case PTR_TO_MAP_VALUE:
1904 case PTR_TO_MAP_VALUE_OR_NULL:
1905 case PTR_TO_STACK:
1906 case PTR_TO_CTX:
969bf05e 1907 case PTR_TO_PACKET:
de8f3a83 1908 case PTR_TO_PACKET_META:
969bf05e 1909 case PTR_TO_PACKET_END:
d58e468b 1910 case PTR_TO_FLOW_KEYS:
1be7f75d 1911 case CONST_PTR_TO_MAP:
c64b7983
JS
1912 case PTR_TO_SOCKET:
1913 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
1914 case PTR_TO_SOCK_COMMON:
1915 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
1916 case PTR_TO_TCP_SOCK:
1917 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 1918 case PTR_TO_XDP_SOCK:
1be7f75d
AS
1919 return true;
1920 default:
1921 return false;
1922 }
1923}
1924
cc2b14d5
AS
1925/* Does this register contain a constant zero? */
1926static bool register_is_null(struct bpf_reg_state *reg)
1927{
1928 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1929}
1930
f7cf25b2
AS
1931static bool register_is_const(struct bpf_reg_state *reg)
1932{
1933 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
1934}
1935
1936static void save_register_state(struct bpf_func_state *state,
1937 int spi, struct bpf_reg_state *reg)
1938{
1939 int i;
1940
1941 state->stack[spi].spilled_ptr = *reg;
1942 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1943
1944 for (i = 0; i < BPF_REG_SIZE; i++)
1945 state->stack[spi].slot_type[i] = STACK_SPILL;
1946}
1947
17a52670
AS
1948/* check_stack_read/write functions track spill/fill of registers,
1949 * stack boundary and alignment are checked in check_mem_access()
1950 */
61bd5218 1951static int check_stack_write(struct bpf_verifier_env *env,
f4d7e40a 1952 struct bpf_func_state *state, /* func where register points to */
af86ca4e 1953 int off, int size, int value_regno, int insn_idx)
17a52670 1954{
f4d7e40a 1955 struct bpf_func_state *cur; /* state of the current function */
638f5b90 1956 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
b5dc0163 1957 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
f7cf25b2 1958 struct bpf_reg_state *reg = NULL;
638f5b90 1959
f4d7e40a 1960 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
fd978bf7 1961 state->acquired_refs, true);
638f5b90
AS
1962 if (err)
1963 return err;
9c399760
AS
1964 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1965 * so it's aligned access and [off, off + size) are within stack limits
1966 */
638f5b90
AS
1967 if (!env->allow_ptr_leaks &&
1968 state->stack[spi].slot_type[0] == STACK_SPILL &&
1969 size != BPF_REG_SIZE) {
1970 verbose(env, "attempt to corrupt spilled pointer on stack\n");
1971 return -EACCES;
1972 }
17a52670 1973
f4d7e40a 1974 cur = env->cur_state->frame[env->cur_state->curframe];
f7cf25b2
AS
1975 if (value_regno >= 0)
1976 reg = &cur->regs[value_regno];
17a52670 1977
f7cf25b2
AS
1978 if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
1979 !register_is_null(reg) && env->allow_ptr_leaks) {
b5dc0163
AS
1980 if (dst_reg != BPF_REG_FP) {
1981 /* The backtracking logic can only recognize explicit
1982 * stack slot address like [fp - 8]. Other spill of
1983 * scalar via different register has to be conervative.
1984 * Backtrack from here and mark all registers as precise
1985 * that contributed into 'reg' being a constant.
1986 */
1987 err = mark_chain_precision(env, value_regno);
1988 if (err)
1989 return err;
1990 }
f7cf25b2
AS
1991 save_register_state(state, spi, reg);
1992 } else if (reg && is_spillable_regtype(reg->type)) {
17a52670 1993 /* register containing pointer is being spilled into stack */
9c399760 1994 if (size != BPF_REG_SIZE) {
f7cf25b2 1995 verbose_linfo(env, insn_idx, "; ");
61bd5218 1996 verbose(env, "invalid size of register spill\n");
17a52670
AS
1997 return -EACCES;
1998 }
1999
f7cf25b2 2000 if (state != cur && reg->type == PTR_TO_STACK) {
f4d7e40a
AS
2001 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
2002 return -EINVAL;
2003 }
2004
f7cf25b2
AS
2005 if (!env->allow_ptr_leaks) {
2006 bool sanitize = false;
17a52670 2007
f7cf25b2
AS
2008 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
2009 register_is_const(&state->stack[spi].spilled_ptr))
2010 sanitize = true;
2011 for (i = 0; i < BPF_REG_SIZE; i++)
2012 if (state->stack[spi].slot_type[i] == STACK_MISC) {
2013 sanitize = true;
2014 break;
2015 }
2016 if (sanitize) {
af86ca4e
AS
2017 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
2018 int soff = (-spi - 1) * BPF_REG_SIZE;
2019
2020 /* detected reuse of integer stack slot with a pointer
2021 * which means either llvm is reusing stack slot or
2022 * an attacker is trying to exploit CVE-2018-3639
2023 * (speculative store bypass)
2024 * Have to sanitize that slot with preemptive
2025 * store of zero.
2026 */
2027 if (*poff && *poff != soff) {
2028 /* disallow programs where single insn stores
2029 * into two different stack slots, since verifier
2030 * cannot sanitize them
2031 */
2032 verbose(env,
2033 "insn %d cannot access two stack slots fp%d and fp%d",
2034 insn_idx, *poff, soff);
2035 return -EINVAL;
2036 }
2037 *poff = soff;
2038 }
af86ca4e 2039 }
f7cf25b2 2040 save_register_state(state, spi, reg);
9c399760 2041 } else {
cc2b14d5
AS
2042 u8 type = STACK_MISC;
2043
679c782d
EC
2044 /* regular write of data into stack destroys any spilled ptr */
2045 state->stack[spi].spilled_ptr.type = NOT_INIT;
0bae2d4d
JW
2046 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
2047 if (state->stack[spi].slot_type[0] == STACK_SPILL)
2048 for (i = 0; i < BPF_REG_SIZE; i++)
2049 state->stack[spi].slot_type[i] = STACK_MISC;
9c399760 2050
cc2b14d5
AS
2051 /* only mark the slot as written if all 8 bytes were written
2052 * otherwise read propagation may incorrectly stop too soon
2053 * when stack slots are partially written.
2054 * This heuristic means that read propagation will be
2055 * conservative, since it will add reg_live_read marks
2056 * to stack slots all the way to first state when programs
2057 * writes+reads less than 8 bytes
2058 */
2059 if (size == BPF_REG_SIZE)
2060 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2061
2062 /* when we zero initialize stack slots mark them as such */
b5dc0163
AS
2063 if (reg && register_is_null(reg)) {
2064 /* backtracking doesn't work for STACK_ZERO yet. */
2065 err = mark_chain_precision(env, value_regno);
2066 if (err)
2067 return err;
cc2b14d5 2068 type = STACK_ZERO;
b5dc0163 2069 }
cc2b14d5 2070
0bae2d4d 2071 /* Mark slots affected by this stack write. */
9c399760 2072 for (i = 0; i < size; i++)
638f5b90 2073 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
cc2b14d5 2074 type;
17a52670
AS
2075 }
2076 return 0;
2077}
2078
61bd5218 2079static int check_stack_read(struct bpf_verifier_env *env,
f4d7e40a
AS
2080 struct bpf_func_state *reg_state /* func where register points to */,
2081 int off, int size, int value_regno)
17a52670 2082{
f4d7e40a
AS
2083 struct bpf_verifier_state *vstate = env->cur_state;
2084 struct bpf_func_state *state = vstate->frame[vstate->curframe];
638f5b90 2085 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
f7cf25b2 2086 struct bpf_reg_state *reg;
638f5b90 2087 u8 *stype;
17a52670 2088
f4d7e40a 2089 if (reg_state->allocated_stack <= slot) {
638f5b90
AS
2090 verbose(env, "invalid read from stack off %d+0 size %d\n",
2091 off, size);
2092 return -EACCES;
2093 }
f4d7e40a 2094 stype = reg_state->stack[spi].slot_type;
f7cf25b2 2095 reg = &reg_state->stack[spi].spilled_ptr;
17a52670 2096
638f5b90 2097 if (stype[0] == STACK_SPILL) {
9c399760 2098 if (size != BPF_REG_SIZE) {
f7cf25b2
AS
2099 if (reg->type != SCALAR_VALUE) {
2100 verbose_linfo(env, env->insn_idx, "; ");
2101 verbose(env, "invalid size of register fill\n");
2102 return -EACCES;
2103 }
2104 if (value_regno >= 0) {
2105 mark_reg_unknown(env, state->regs, value_regno);
2106 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2107 }
2108 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2109 return 0;
17a52670 2110 }
9c399760 2111 for (i = 1; i < BPF_REG_SIZE; i++) {
638f5b90 2112 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
61bd5218 2113 verbose(env, "corrupted spill memory\n");
17a52670
AS
2114 return -EACCES;
2115 }
2116 }
2117
dc503a8a 2118 if (value_regno >= 0) {
17a52670 2119 /* restore register state from stack */
f7cf25b2 2120 state->regs[value_regno] = *reg;
2f18f62e
AS
2121 /* mark reg as written since spilled pointer state likely
2122 * has its liveness marks cleared by is_state_visited()
2123 * which resets stack/reg liveness for state transitions
2124 */
2125 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
dc503a8a 2126 }
f7cf25b2 2127 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
17a52670 2128 } else {
cc2b14d5
AS
2129 int zeros = 0;
2130
17a52670 2131 for (i = 0; i < size; i++) {
cc2b14d5
AS
2132 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
2133 continue;
2134 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
2135 zeros++;
2136 continue;
17a52670 2137 }
cc2b14d5
AS
2138 verbose(env, "invalid read from stack off %d+%d size %d\n",
2139 off, i, size);
2140 return -EACCES;
2141 }
f7cf25b2 2142 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
cc2b14d5
AS
2143 if (value_regno >= 0) {
2144 if (zeros == size) {
2145 /* any size read into register is zero extended,
2146 * so the whole register == const_zero
2147 */
2148 __mark_reg_const_zero(&state->regs[value_regno]);
b5dc0163
AS
2149 /* backtracking doesn't support STACK_ZERO yet,
2150 * so mark it precise here, so that later
2151 * backtracking can stop here.
2152 * Backtracking may not need this if this register
2153 * doesn't participate in pointer adjustment.
2154 * Forward propagation of precise flag is not
2155 * necessary either. This mark is only to stop
2156 * backtracking. Any register that contributed
2157 * to const 0 was marked precise before spill.
2158 */
2159 state->regs[value_regno].precise = true;
cc2b14d5
AS
2160 } else {
2161 /* have read misc data from the stack */
2162 mark_reg_unknown(env, state->regs, value_regno);
2163 }
2164 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
17a52670 2165 }
17a52670 2166 }
f7cf25b2 2167 return 0;
17a52670
AS
2168}
2169
e4298d25
DB
2170static int check_stack_access(struct bpf_verifier_env *env,
2171 const struct bpf_reg_state *reg,
2172 int off, int size)
2173{
2174 /* Stack accesses must be at a fixed offset, so that we
2175 * can determine what type of data were returned. See
2176 * check_stack_read().
2177 */
2178 if (!tnum_is_const(reg->var_off)) {
2179 char tn_buf[48];
2180
2181 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1fbd20f8 2182 verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
e4298d25
DB
2183 tn_buf, off, size);
2184 return -EACCES;
2185 }
2186
2187 if (off >= 0 || off < -MAX_BPF_STACK) {
2188 verbose(env, "invalid stack off=%d size=%d\n", off, size);
2189 return -EACCES;
2190 }
2191
2192 return 0;
2193}
2194
591fe988
DB
2195static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
2196 int off, int size, enum bpf_access_type type)
2197{
2198 struct bpf_reg_state *regs = cur_regs(env);
2199 struct bpf_map *map = regs[regno].map_ptr;
2200 u32 cap = bpf_map_flags_to_cap(map);
2201
2202 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
2203 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
2204 map->value_size, off, size);
2205 return -EACCES;
2206 }
2207
2208 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
2209 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
2210 map->value_size, off, size);
2211 return -EACCES;
2212 }
2213
2214 return 0;
2215}
2216
17a52670 2217/* check read/write into map element returned by bpf_map_lookup_elem() */
f1174f77 2218static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 2219 int size, bool zero_size_allowed)
17a52670 2220{
638f5b90
AS
2221 struct bpf_reg_state *regs = cur_regs(env);
2222 struct bpf_map *map = regs[regno].map_ptr;
17a52670 2223
9fd29c08
YS
2224 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2225 off + size > map->value_size) {
61bd5218 2226 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
17a52670
AS
2227 map->value_size, off, size);
2228 return -EACCES;
2229 }
2230 return 0;
2231}
2232
f1174f77
EC
2233/* check read/write into a map element with possible variable offset */
2234static int check_map_access(struct bpf_verifier_env *env, u32 regno,
9fd29c08 2235 int off, int size, bool zero_size_allowed)
dbcfe5f7 2236{
f4d7e40a
AS
2237 struct bpf_verifier_state *vstate = env->cur_state;
2238 struct bpf_func_state *state = vstate->frame[vstate->curframe];
dbcfe5f7
GB
2239 struct bpf_reg_state *reg = &state->regs[regno];
2240 int err;
2241
f1174f77
EC
2242 /* We may have adjusted the register to this map value, so we
2243 * need to try adding each of min_value and max_value to off
2244 * to make sure our theoretical access will be safe.
dbcfe5f7 2245 */
06ee7115 2246 if (env->log.level & BPF_LOG_LEVEL)
61bd5218 2247 print_verifier_state(env, state);
b7137c4e 2248
dbcfe5f7
GB
2249 /* The minimum value is only important with signed
2250 * comparisons where we can't assume the floor of a
2251 * value is 0. If we are using signed variables for our
2252 * index'es we need to make sure that whatever we use
2253 * will have a set floor within our range.
2254 */
b7137c4e
DB
2255 if (reg->smin_value < 0 &&
2256 (reg->smin_value == S64_MIN ||
2257 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
2258 reg->smin_value + off < 0)) {
61bd5218 2259 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
dbcfe5f7
GB
2260 regno);
2261 return -EACCES;
2262 }
9fd29c08
YS
2263 err = __check_map_access(env, regno, reg->smin_value + off, size,
2264 zero_size_allowed);
dbcfe5f7 2265 if (err) {
61bd5218
JK
2266 verbose(env, "R%d min value is outside of the array range\n",
2267 regno);
dbcfe5f7
GB
2268 return err;
2269 }
2270
b03c9f9f
EC
2271 /* If we haven't set a max value then we need to bail since we can't be
2272 * sure we won't do bad things.
2273 * If reg->umax_value + off could overflow, treat that as unbounded too.
dbcfe5f7 2274 */
b03c9f9f 2275 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
61bd5218 2276 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
dbcfe5f7
GB
2277 regno);
2278 return -EACCES;
2279 }
9fd29c08
YS
2280 err = __check_map_access(env, regno, reg->umax_value + off, size,
2281 zero_size_allowed);
f1174f77 2282 if (err)
61bd5218
JK
2283 verbose(env, "R%d max value is outside of the array range\n",
2284 regno);
d83525ca
AS
2285
2286 if (map_value_has_spin_lock(reg->map_ptr)) {
2287 u32 lock = reg->map_ptr->spin_lock_off;
2288
2289 /* if any part of struct bpf_spin_lock can be touched by
2290 * load/store reject this program.
2291 * To check that [x1, x2) overlaps with [y1, y2)
2292 * it is sufficient to check x1 < y2 && y1 < x2.
2293 */
2294 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
2295 lock < reg->umax_value + off + size) {
2296 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
2297 return -EACCES;
2298 }
2299 }
f1174f77 2300 return err;
dbcfe5f7
GB
2301}
2302
969bf05e
AS
2303#define MAX_PACKET_OFF 0xffff
2304
58e2af8b 2305static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3a0af8fd
TG
2306 const struct bpf_call_arg_meta *meta,
2307 enum bpf_access_type t)
4acf6c0b 2308{
36bbef52 2309 switch (env->prog->type) {
5d66fa7d 2310 /* Program types only with direct read access go here! */
3a0af8fd
TG
2311 case BPF_PROG_TYPE_LWT_IN:
2312 case BPF_PROG_TYPE_LWT_OUT:
004d4b27 2313 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2dbb9b9e 2314 case BPF_PROG_TYPE_SK_REUSEPORT:
5d66fa7d 2315 case BPF_PROG_TYPE_FLOW_DISSECTOR:
d5563d36 2316 case BPF_PROG_TYPE_CGROUP_SKB:
3a0af8fd
TG
2317 if (t == BPF_WRITE)
2318 return false;
7e57fbb2 2319 /* fallthrough */
5d66fa7d
DB
2320
2321 /* Program types with direct read + write access go here! */
36bbef52
DB
2322 case BPF_PROG_TYPE_SCHED_CLS:
2323 case BPF_PROG_TYPE_SCHED_ACT:
4acf6c0b 2324 case BPF_PROG_TYPE_XDP:
3a0af8fd 2325 case BPF_PROG_TYPE_LWT_XMIT:
8a31db56 2326 case BPF_PROG_TYPE_SK_SKB:
4f738adb 2327 case BPF_PROG_TYPE_SK_MSG:
36bbef52
DB
2328 if (meta)
2329 return meta->pkt_access;
2330
2331 env->seen_direct_write = true;
4acf6c0b 2332 return true;
0d01da6a
SF
2333
2334 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2335 if (t == BPF_WRITE)
2336 env->seen_direct_write = true;
2337
2338 return true;
2339
4acf6c0b
BB
2340 default:
2341 return false;
2342 }
2343}
2344
f1174f77 2345static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
9fd29c08 2346 int off, int size, bool zero_size_allowed)
969bf05e 2347{
638f5b90 2348 struct bpf_reg_state *regs = cur_regs(env);
58e2af8b 2349 struct bpf_reg_state *reg = &regs[regno];
969bf05e 2350
9fd29c08
YS
2351 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2352 (u64)off + size > reg->range) {
61bd5218 2353 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
d91b28ed 2354 off, size, regno, reg->id, reg->off, reg->range);
969bf05e
AS
2355 return -EACCES;
2356 }
2357 return 0;
2358}
2359
f1174f77 2360static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 2361 int size, bool zero_size_allowed)
f1174f77 2362{
638f5b90 2363 struct bpf_reg_state *regs = cur_regs(env);
f1174f77
EC
2364 struct bpf_reg_state *reg = &regs[regno];
2365 int err;
2366
2367 /* We may have added a variable offset to the packet pointer; but any
2368 * reg->range we have comes after that. We are only checking the fixed
2369 * offset.
2370 */
2371
2372 /* We don't allow negative numbers, because we aren't tracking enough
2373 * detail to prove they're safe.
2374 */
b03c9f9f 2375 if (reg->smin_value < 0) {
61bd5218 2376 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
f1174f77
EC
2377 regno);
2378 return -EACCES;
2379 }
9fd29c08 2380 err = __check_packet_access(env, regno, off, size, zero_size_allowed);
f1174f77 2381 if (err) {
61bd5218 2382 verbose(env, "R%d offset is outside of the packet\n", regno);
f1174f77
EC
2383 return err;
2384 }
e647815a
JW
2385
2386 /* __check_packet_access has made sure "off + size - 1" is within u16.
2387 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
2388 * otherwise find_good_pkt_pointers would have refused to set range info
2389 * that __check_packet_access would have rejected this pkt access.
2390 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
2391 */
2392 env->prog->aux->max_pkt_offset =
2393 max_t(u32, env->prog->aux->max_pkt_offset,
2394 off + reg->umax_value + size - 1);
2395
f1174f77
EC
2396 return err;
2397}
2398
2399/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
31fd8581 2400static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
9e15db66
AS
2401 enum bpf_access_type t, enum bpf_reg_type *reg_type,
2402 u32 *btf_id)
17a52670 2403{
f96da094
DB
2404 struct bpf_insn_access_aux info = {
2405 .reg_type = *reg_type,
9e15db66 2406 .log = &env->log,
f96da094 2407 };
31fd8581 2408
4f9218aa 2409 if (env->ops->is_valid_access &&
5e43f899 2410 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
f96da094
DB
2411 /* A non zero info.ctx_field_size indicates that this field is a
2412 * candidate for later verifier transformation to load the whole
2413 * field and then apply a mask when accessed with a narrower
2414 * access than actual ctx access size. A zero info.ctx_field_size
2415 * will only allow for whole field access and rejects any other
2416 * type of narrower access.
31fd8581 2417 */
23994631 2418 *reg_type = info.reg_type;
31fd8581 2419
9e15db66
AS
2420 if (*reg_type == PTR_TO_BTF_ID)
2421 *btf_id = info.btf_id;
2422 else
2423 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
32bbe007
AS
2424 /* remember the offset of last byte accessed in ctx */
2425 if (env->prog->aux->max_ctx_offset < off + size)
2426 env->prog->aux->max_ctx_offset = off + size;
17a52670 2427 return 0;
32bbe007 2428 }
17a52670 2429
61bd5218 2430 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
17a52670
AS
2431 return -EACCES;
2432}
2433
d58e468b
PP
2434static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
2435 int size)
2436{
2437 if (size < 0 || off < 0 ||
2438 (u64)off + size > sizeof(struct bpf_flow_keys)) {
2439 verbose(env, "invalid access to flow keys off=%d size=%d\n",
2440 off, size);
2441 return -EACCES;
2442 }
2443 return 0;
2444}
2445
5f456649
MKL
2446static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
2447 u32 regno, int off, int size,
2448 enum bpf_access_type t)
c64b7983
JS
2449{
2450 struct bpf_reg_state *regs = cur_regs(env);
2451 struct bpf_reg_state *reg = &regs[regno];
5f456649 2452 struct bpf_insn_access_aux info = {};
46f8bc92 2453 bool valid;
c64b7983
JS
2454
2455 if (reg->smin_value < 0) {
2456 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2457 regno);
2458 return -EACCES;
2459 }
2460
46f8bc92
MKL
2461 switch (reg->type) {
2462 case PTR_TO_SOCK_COMMON:
2463 valid = bpf_sock_common_is_valid_access(off, size, t, &info);
2464 break;
2465 case PTR_TO_SOCKET:
2466 valid = bpf_sock_is_valid_access(off, size, t, &info);
2467 break;
655a51e5
MKL
2468 case PTR_TO_TCP_SOCK:
2469 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
2470 break;
fada7fdc
JL
2471 case PTR_TO_XDP_SOCK:
2472 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
2473 break;
46f8bc92
MKL
2474 default:
2475 valid = false;
c64b7983
JS
2476 }
2477
5f456649 2478
46f8bc92
MKL
2479 if (valid) {
2480 env->insn_aux_data[insn_idx].ctx_field_size =
2481 info.ctx_field_size;
2482 return 0;
2483 }
2484
2485 verbose(env, "R%d invalid %s access off=%d size=%d\n",
2486 regno, reg_type_str[reg->type], off, size);
2487
2488 return -EACCES;
c64b7983
JS
2489}
2490
4cabc5b1
DB
2491static bool __is_pointer_value(bool allow_ptr_leaks,
2492 const struct bpf_reg_state *reg)
1be7f75d 2493{
4cabc5b1 2494 if (allow_ptr_leaks)
1be7f75d
AS
2495 return false;
2496
f1174f77 2497 return reg->type != SCALAR_VALUE;
1be7f75d
AS
2498}
2499
2a159c6f
DB
2500static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
2501{
2502 return cur_regs(env) + regno;
2503}
2504
4cabc5b1
DB
2505static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
2506{
2a159c6f 2507 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4cabc5b1
DB
2508}
2509
f37a8cb8
DB
2510static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
2511{
2a159c6f 2512 const struct bpf_reg_state *reg = reg_state(env, regno);
f37a8cb8 2513
46f8bc92
MKL
2514 return reg->type == PTR_TO_CTX;
2515}
2516
2517static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
2518{
2519 const struct bpf_reg_state *reg = reg_state(env, regno);
2520
2521 return type_is_sk_pointer(reg->type);
f37a8cb8
DB
2522}
2523
ca369602
DB
2524static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
2525{
2a159c6f 2526 const struct bpf_reg_state *reg = reg_state(env, regno);
ca369602
DB
2527
2528 return type_is_pkt_pointer(reg->type);
2529}
2530
4b5defde
DB
2531static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
2532{
2533 const struct bpf_reg_state *reg = reg_state(env, regno);
2534
2535 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
2536 return reg->type == PTR_TO_FLOW_KEYS;
2537}
2538
61bd5218
JK
2539static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
2540 const struct bpf_reg_state *reg,
d1174416 2541 int off, int size, bool strict)
969bf05e 2542{
f1174f77 2543 struct tnum reg_off;
e07b98d9 2544 int ip_align;
d1174416
DM
2545
2546 /* Byte size accesses are always allowed. */
2547 if (!strict || size == 1)
2548 return 0;
2549
e4eda884
DM
2550 /* For platforms that do not have a Kconfig enabling
2551 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
2552 * NET_IP_ALIGN is universally set to '2'. And on platforms
2553 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
2554 * to this code only in strict mode where we want to emulate
2555 * the NET_IP_ALIGN==2 checking. Therefore use an
2556 * unconditional IP align value of '2'.
e07b98d9 2557 */
e4eda884 2558 ip_align = 2;
f1174f77
EC
2559
2560 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
2561 if (!tnum_is_aligned(reg_off, size)) {
2562 char tn_buf[48];
2563
2564 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218
JK
2565 verbose(env,
2566 "misaligned packet access off %d+%s+%d+%d size %d\n",
f1174f77 2567 ip_align, tn_buf, reg->off, off, size);
969bf05e
AS
2568 return -EACCES;
2569 }
79adffcd 2570
969bf05e
AS
2571 return 0;
2572}
2573
61bd5218
JK
2574static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
2575 const struct bpf_reg_state *reg,
f1174f77
EC
2576 const char *pointer_desc,
2577 int off, int size, bool strict)
79adffcd 2578{
f1174f77
EC
2579 struct tnum reg_off;
2580
2581 /* Byte size accesses are always allowed. */
2582 if (!strict || size == 1)
2583 return 0;
2584
2585 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
2586 if (!tnum_is_aligned(reg_off, size)) {
2587 char tn_buf[48];
2588
2589 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 2590 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
f1174f77 2591 pointer_desc, tn_buf, reg->off, off, size);
79adffcd
DB
2592 return -EACCES;
2593 }
2594
969bf05e
AS
2595 return 0;
2596}
2597
e07b98d9 2598static int check_ptr_alignment(struct bpf_verifier_env *env,
ca369602
DB
2599 const struct bpf_reg_state *reg, int off,
2600 int size, bool strict_alignment_once)
79adffcd 2601{
ca369602 2602 bool strict = env->strict_alignment || strict_alignment_once;
f1174f77 2603 const char *pointer_desc = "";
d1174416 2604
79adffcd
DB
2605 switch (reg->type) {
2606 case PTR_TO_PACKET:
de8f3a83
DB
2607 case PTR_TO_PACKET_META:
2608 /* Special case, because of NET_IP_ALIGN. Given metadata sits
2609 * right in front, treat it the very same way.
2610 */
61bd5218 2611 return check_pkt_ptr_alignment(env, reg, off, size, strict);
d58e468b
PP
2612 case PTR_TO_FLOW_KEYS:
2613 pointer_desc = "flow keys ";
2614 break;
f1174f77
EC
2615 case PTR_TO_MAP_VALUE:
2616 pointer_desc = "value ";
2617 break;
2618 case PTR_TO_CTX:
2619 pointer_desc = "context ";
2620 break;
2621 case PTR_TO_STACK:
2622 pointer_desc = "stack ";
a5ec6ae1
JH
2623 /* The stack spill tracking logic in check_stack_write()
2624 * and check_stack_read() relies on stack accesses being
2625 * aligned.
2626 */
2627 strict = true;
f1174f77 2628 break;
c64b7983
JS
2629 case PTR_TO_SOCKET:
2630 pointer_desc = "sock ";
2631 break;
46f8bc92
MKL
2632 case PTR_TO_SOCK_COMMON:
2633 pointer_desc = "sock_common ";
2634 break;
655a51e5
MKL
2635 case PTR_TO_TCP_SOCK:
2636 pointer_desc = "tcp_sock ";
2637 break;
fada7fdc
JL
2638 case PTR_TO_XDP_SOCK:
2639 pointer_desc = "xdp_sock ";
2640 break;
79adffcd 2641 default:
f1174f77 2642 break;
79adffcd 2643 }
61bd5218
JK
2644 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
2645 strict);
79adffcd
DB
2646}
2647
f4d7e40a
AS
2648static int update_stack_depth(struct bpf_verifier_env *env,
2649 const struct bpf_func_state *func,
2650 int off)
2651{
9c8105bd 2652 u16 stack = env->subprog_info[func->subprogno].stack_depth;
f4d7e40a
AS
2653
2654 if (stack >= -off)
2655 return 0;
2656
2657 /* update known max for given subprogram */
9c8105bd 2658 env->subprog_info[func->subprogno].stack_depth = -off;
70a87ffe
AS
2659 return 0;
2660}
f4d7e40a 2661
70a87ffe
AS
2662/* starting from main bpf function walk all instructions of the function
2663 * and recursively walk all callees that given function can call.
2664 * Ignore jump and exit insns.
2665 * Since recursion is prevented by check_cfg() this algorithm
2666 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
2667 */
2668static int check_max_stack_depth(struct bpf_verifier_env *env)
2669{
9c8105bd
JW
2670 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
2671 struct bpf_subprog_info *subprog = env->subprog_info;
70a87ffe 2672 struct bpf_insn *insn = env->prog->insnsi;
70a87ffe
AS
2673 int ret_insn[MAX_CALL_FRAMES];
2674 int ret_prog[MAX_CALL_FRAMES];
f4d7e40a 2675
70a87ffe
AS
2676process_func:
2677 /* round up to 32-bytes, since this is granularity
2678 * of interpreter stack size
2679 */
9c8105bd 2680 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe 2681 if (depth > MAX_BPF_STACK) {
f4d7e40a 2682 verbose(env, "combined stack size of %d calls is %d. Too large\n",
70a87ffe 2683 frame + 1, depth);
f4d7e40a
AS
2684 return -EACCES;
2685 }
70a87ffe 2686continue_func:
4cb3d99c 2687 subprog_end = subprog[idx + 1].start;
70a87ffe
AS
2688 for (; i < subprog_end; i++) {
2689 if (insn[i].code != (BPF_JMP | BPF_CALL))
2690 continue;
2691 if (insn[i].src_reg != BPF_PSEUDO_CALL)
2692 continue;
2693 /* remember insn and function to return to */
2694 ret_insn[frame] = i + 1;
9c8105bd 2695 ret_prog[frame] = idx;
70a87ffe
AS
2696
2697 /* find the callee */
2698 i = i + insn[i].imm + 1;
9c8105bd
JW
2699 idx = find_subprog(env, i);
2700 if (idx < 0) {
70a87ffe
AS
2701 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2702 i);
2703 return -EFAULT;
2704 }
70a87ffe
AS
2705 frame++;
2706 if (frame >= MAX_CALL_FRAMES) {
927cb781
PC
2707 verbose(env, "the call stack of %d frames is too deep !\n",
2708 frame);
2709 return -E2BIG;
70a87ffe
AS
2710 }
2711 goto process_func;
2712 }
2713 /* end of for() loop means the last insn of the 'subprog'
2714 * was reached. Doesn't matter whether it was JA or EXIT
2715 */
2716 if (frame == 0)
2717 return 0;
9c8105bd 2718 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe
AS
2719 frame--;
2720 i = ret_insn[frame];
9c8105bd 2721 idx = ret_prog[frame];
70a87ffe 2722 goto continue_func;
f4d7e40a
AS
2723}
2724
19d28fbd 2725#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
2726static int get_callee_stack_depth(struct bpf_verifier_env *env,
2727 const struct bpf_insn *insn, int idx)
2728{
2729 int start = idx + insn->imm + 1, subprog;
2730
2731 subprog = find_subprog(env, start);
2732 if (subprog < 0) {
2733 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2734 start);
2735 return -EFAULT;
2736 }
9c8105bd 2737 return env->subprog_info[subprog].stack_depth;
1ea47e01 2738}
19d28fbd 2739#endif
1ea47e01 2740
58990d1f
DB
2741static int check_ctx_reg(struct bpf_verifier_env *env,
2742 const struct bpf_reg_state *reg, int regno)
2743{
2744 /* Access to ctx or passing it to a helper is only allowed in
2745 * its original, unmodified form.
2746 */
2747
2748 if (reg->off) {
2749 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
2750 regno, reg->off);
2751 return -EACCES;
2752 }
2753
2754 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2755 char tn_buf[48];
2756
2757 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2758 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
2759 return -EACCES;
2760 }
2761
2762 return 0;
2763}
2764
9df1c28b
MM
2765static int check_tp_buffer_access(struct bpf_verifier_env *env,
2766 const struct bpf_reg_state *reg,
2767 int regno, int off, int size)
2768{
2769 if (off < 0) {
2770 verbose(env,
2771 "R%d invalid tracepoint buffer access: off=%d, size=%d",
2772 regno, off, size);
2773 return -EACCES;
2774 }
2775 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2776 char tn_buf[48];
2777
2778 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2779 verbose(env,
2780 "R%d invalid variable buffer offset: off=%d, var_off=%s",
2781 regno, off, tn_buf);
2782 return -EACCES;
2783 }
2784 if (off + size > env->prog->aux->max_tp_access)
2785 env->prog->aux->max_tp_access = off + size;
2786
2787 return 0;
2788}
2789
2790
0c17d1d2
JH
2791/* truncate register to smaller size (in bytes)
2792 * must be called with size < BPF_REG_SIZE
2793 */
2794static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
2795{
2796 u64 mask;
2797
2798 /* clear high bits in bit representation */
2799 reg->var_off = tnum_cast(reg->var_off, size);
2800
2801 /* fix arithmetic bounds */
2802 mask = ((u64)1 << (size * 8)) - 1;
2803 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
2804 reg->umin_value &= mask;
2805 reg->umax_value &= mask;
2806 } else {
2807 reg->umin_value = 0;
2808 reg->umax_value = mask;
2809 }
2810 reg->smin_value = reg->umin_value;
2811 reg->smax_value = reg->umax_value;
2812}
2813
a23740ec
AN
2814static bool bpf_map_is_rdonly(const struct bpf_map *map)
2815{
2816 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
2817}
2818
2819static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
2820{
2821 void *ptr;
2822 u64 addr;
2823 int err;
2824
2825 err = map->ops->map_direct_value_addr(map, &addr, off);
2826 if (err)
2827 return err;
2dedd7d2 2828 ptr = (void *)(long)addr + off;
a23740ec
AN
2829
2830 switch (size) {
2831 case sizeof(u8):
2832 *val = (u64)*(u8 *)ptr;
2833 break;
2834 case sizeof(u16):
2835 *val = (u64)*(u16 *)ptr;
2836 break;
2837 case sizeof(u32):
2838 *val = (u64)*(u32 *)ptr;
2839 break;
2840 case sizeof(u64):
2841 *val = *(u64 *)ptr;
2842 break;
2843 default:
2844 return -EINVAL;
2845 }
2846 return 0;
2847}
2848
9e15db66
AS
2849static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
2850 struct bpf_reg_state *regs,
2851 int regno, int off, int size,
2852 enum bpf_access_type atype,
2853 int value_regno)
2854{
2855 struct bpf_reg_state *reg = regs + regno;
2856 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
2857 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
2858 u32 btf_id;
2859 int ret;
2860
2861 if (atype != BPF_READ) {
2862 verbose(env, "only read is supported\n");
2863 return -EACCES;
2864 }
2865
2866 if (off < 0) {
2867 verbose(env,
2868 "R%d is ptr_%s invalid negative access: off=%d\n",
2869 regno, tname, off);
2870 return -EACCES;
2871 }
2872 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2873 char tn_buf[48];
2874
2875 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2876 verbose(env,
2877 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
2878 regno, tname, off, tn_buf);
2879 return -EACCES;
2880 }
2881
2882 ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
2883 if (ret < 0)
2884 return ret;
2885
2886 if (ret == SCALAR_VALUE) {
2887 mark_reg_unknown(env, regs, value_regno);
2888 return 0;
2889 }
2890 mark_reg_known_zero(env, regs, value_regno);
2891 regs[value_regno].type = PTR_TO_BTF_ID;
2892 regs[value_regno].btf_id = btf_id;
2893 return 0;
2894}
2895
17a52670
AS
2896/* check whether memory at (regno + off) is accessible for t = (read | write)
2897 * if t==write, value_regno is a register which value is stored into memory
2898 * if t==read, value_regno is a register which will receive the value from memory
2899 * if t==write && value_regno==-1, some unknown value is stored into memory
2900 * if t==read && value_regno==-1, don't care what we read from memory
2901 */
ca369602
DB
2902static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
2903 int off, int bpf_size, enum bpf_access_type t,
2904 int value_regno, bool strict_alignment_once)
17a52670 2905{
638f5b90
AS
2906 struct bpf_reg_state *regs = cur_regs(env);
2907 struct bpf_reg_state *reg = regs + regno;
f4d7e40a 2908 struct bpf_func_state *state;
17a52670
AS
2909 int size, err = 0;
2910
2911 size = bpf_size_to_bytes(bpf_size);
2912 if (size < 0)
2913 return size;
2914
f1174f77 2915 /* alignment checks will add in reg->off themselves */
ca369602 2916 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
969bf05e
AS
2917 if (err)
2918 return err;
17a52670 2919
f1174f77
EC
2920 /* for access checks, reg->off is just part of off */
2921 off += reg->off;
2922
2923 if (reg->type == PTR_TO_MAP_VALUE) {
1be7f75d
AS
2924 if (t == BPF_WRITE && value_regno >= 0 &&
2925 is_pointer_value(env, value_regno)) {
61bd5218 2926 verbose(env, "R%d leaks addr into map\n", value_regno);
1be7f75d
AS
2927 return -EACCES;
2928 }
591fe988
DB
2929 err = check_map_access_type(env, regno, off, size, t);
2930 if (err)
2931 return err;
9fd29c08 2932 err = check_map_access(env, regno, off, size, false);
a23740ec
AN
2933 if (!err && t == BPF_READ && value_regno >= 0) {
2934 struct bpf_map *map = reg->map_ptr;
2935
2936 /* if map is read-only, track its contents as scalars */
2937 if (tnum_is_const(reg->var_off) &&
2938 bpf_map_is_rdonly(map) &&
2939 map->ops->map_direct_value_addr) {
2940 int map_off = off + reg->var_off.value;
2941 u64 val = 0;
2942
2943 err = bpf_map_direct_read(map, map_off, size,
2944 &val);
2945 if (err)
2946 return err;
2947
2948 regs[value_regno].type = SCALAR_VALUE;
2949 __mark_reg_known(&regs[value_regno], val);
2950 } else {
2951 mark_reg_unknown(env, regs, value_regno);
2952 }
2953 }
1a0dc1ac 2954 } else if (reg->type == PTR_TO_CTX) {
f1174f77 2955 enum bpf_reg_type reg_type = SCALAR_VALUE;
9e15db66 2956 u32 btf_id = 0;
19de99f7 2957
1be7f75d
AS
2958 if (t == BPF_WRITE && value_regno >= 0 &&
2959 is_pointer_value(env, value_regno)) {
61bd5218 2960 verbose(env, "R%d leaks addr into ctx\n", value_regno);
1be7f75d
AS
2961 return -EACCES;
2962 }
f1174f77 2963
58990d1f
DB
2964 err = check_ctx_reg(env, reg, regno);
2965 if (err < 0)
2966 return err;
2967
9e15db66
AS
2968 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id);
2969 if (err)
2970 verbose_linfo(env, insn_idx, "; ");
969bf05e 2971 if (!err && t == BPF_READ && value_regno >= 0) {
f1174f77 2972 /* ctx access returns either a scalar, or a
de8f3a83
DB
2973 * PTR_TO_PACKET[_META,_END]. In the latter
2974 * case, we know the offset is zero.
f1174f77 2975 */
46f8bc92 2976 if (reg_type == SCALAR_VALUE) {
638f5b90 2977 mark_reg_unknown(env, regs, value_regno);
46f8bc92 2978 } else {
638f5b90 2979 mark_reg_known_zero(env, regs,
61bd5218 2980 value_regno);
46f8bc92
MKL
2981 if (reg_type_may_be_null(reg_type))
2982 regs[value_regno].id = ++env->id_gen;
5327ed3d
JW
2983 /* A load of ctx field could have different
2984 * actual load size with the one encoded in the
2985 * insn. When the dst is PTR, it is for sure not
2986 * a sub-register.
2987 */
2988 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
9e15db66
AS
2989 if (reg_type == PTR_TO_BTF_ID)
2990 regs[value_regno].btf_id = btf_id;
46f8bc92 2991 }
638f5b90 2992 regs[value_regno].type = reg_type;
969bf05e 2993 }
17a52670 2994
f1174f77 2995 } else if (reg->type == PTR_TO_STACK) {
f1174f77 2996 off += reg->var_off.value;
e4298d25
DB
2997 err = check_stack_access(env, reg, off, size);
2998 if (err)
2999 return err;
8726679a 3000
f4d7e40a
AS
3001 state = func(env, reg);
3002 err = update_stack_depth(env, state, off);
3003 if (err)
3004 return err;
8726679a 3005
638f5b90 3006 if (t == BPF_WRITE)
61bd5218 3007 err = check_stack_write(env, state, off, size,
af86ca4e 3008 value_regno, insn_idx);
638f5b90 3009 else
61bd5218
JK
3010 err = check_stack_read(env, state, off, size,
3011 value_regno);
de8f3a83 3012 } else if (reg_is_pkt_pointer(reg)) {
3a0af8fd 3013 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
61bd5218 3014 verbose(env, "cannot write into packet\n");
969bf05e
AS
3015 return -EACCES;
3016 }
4acf6c0b
BB
3017 if (t == BPF_WRITE && value_regno >= 0 &&
3018 is_pointer_value(env, value_regno)) {
61bd5218
JK
3019 verbose(env, "R%d leaks addr into packet\n",
3020 value_regno);
4acf6c0b
BB
3021 return -EACCES;
3022 }
9fd29c08 3023 err = check_packet_access(env, regno, off, size, false);
969bf05e 3024 if (!err && t == BPF_READ && value_regno >= 0)
638f5b90 3025 mark_reg_unknown(env, regs, value_regno);
d58e468b
PP
3026 } else if (reg->type == PTR_TO_FLOW_KEYS) {
3027 if (t == BPF_WRITE && value_regno >= 0 &&
3028 is_pointer_value(env, value_regno)) {
3029 verbose(env, "R%d leaks addr into flow keys\n",
3030 value_regno);
3031 return -EACCES;
3032 }
3033
3034 err = check_flow_keys_access(env, off, size);
3035 if (!err && t == BPF_READ && value_regno >= 0)
3036 mark_reg_unknown(env, regs, value_regno);
46f8bc92 3037 } else if (type_is_sk_pointer(reg->type)) {
c64b7983 3038 if (t == BPF_WRITE) {
46f8bc92
MKL
3039 verbose(env, "R%d cannot write into %s\n",
3040 regno, reg_type_str[reg->type]);
c64b7983
JS
3041 return -EACCES;
3042 }
5f456649 3043 err = check_sock_access(env, insn_idx, regno, off, size, t);
c64b7983
JS
3044 if (!err && value_regno >= 0)
3045 mark_reg_unknown(env, regs, value_regno);
9df1c28b
MM
3046 } else if (reg->type == PTR_TO_TP_BUFFER) {
3047 err = check_tp_buffer_access(env, reg, regno, off, size);
3048 if (!err && t == BPF_READ && value_regno >= 0)
3049 mark_reg_unknown(env, regs, value_regno);
9e15db66
AS
3050 } else if (reg->type == PTR_TO_BTF_ID) {
3051 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
3052 value_regno);
17a52670 3053 } else {
61bd5218
JK
3054 verbose(env, "R%d invalid mem access '%s'\n", regno,
3055 reg_type_str[reg->type]);
17a52670
AS
3056 return -EACCES;
3057 }
969bf05e 3058
f1174f77 3059 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
638f5b90 3060 regs[value_regno].type == SCALAR_VALUE) {
f1174f77 3061 /* b/h/w load zero-extends, mark upper bits as known 0 */
0c17d1d2 3062 coerce_reg_to_size(&regs[value_regno], size);
969bf05e 3063 }
17a52670
AS
3064 return err;
3065}
3066
31fd8581 3067static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
17a52670 3068{
17a52670
AS
3069 int err;
3070
3071 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
3072 insn->imm != 0) {
61bd5218 3073 verbose(env, "BPF_XADD uses reserved fields\n");
17a52670
AS
3074 return -EINVAL;
3075 }
3076
3077 /* check src1 operand */
dc503a8a 3078 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
3079 if (err)
3080 return err;
3081
3082 /* check src2 operand */
dc503a8a 3083 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
3084 if (err)
3085 return err;
3086
6bdf6abc 3087 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 3088 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
6bdf6abc
DB
3089 return -EACCES;
3090 }
3091
ca369602 3092 if (is_ctx_reg(env, insn->dst_reg) ||
4b5defde 3093 is_pkt_reg(env, insn->dst_reg) ||
46f8bc92
MKL
3094 is_flow_key_reg(env, insn->dst_reg) ||
3095 is_sk_reg(env, insn->dst_reg)) {
ca369602 3096 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
2a159c6f
DB
3097 insn->dst_reg,
3098 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
3099 return -EACCES;
3100 }
3101
17a52670 3102 /* check whether atomic_add can read the memory */
31fd8581 3103 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3104 BPF_SIZE(insn->code), BPF_READ, -1, true);
17a52670
AS
3105 if (err)
3106 return err;
3107
3108 /* check whether atomic_add can write into the same memory */
31fd8581 3109 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3110 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
17a52670
AS
3111}
3112
2011fccf
AI
3113static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
3114 int off, int access_size,
3115 bool zero_size_allowed)
3116{
3117 struct bpf_reg_state *reg = reg_state(env, regno);
3118
3119 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
3120 access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
3121 if (tnum_is_const(reg->var_off)) {
3122 verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
3123 regno, off, access_size);
3124 } else {
3125 char tn_buf[48];
3126
3127 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3128 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
3129 regno, tn_buf, access_size);
3130 }
3131 return -EACCES;
3132 }
3133 return 0;
3134}
3135
17a52670
AS
3136/* when register 'regno' is passed into function that will read 'access_size'
3137 * bytes from that pointer, make sure that it's within stack boundary
f1174f77
EC
3138 * and all elements of stack are initialized.
3139 * Unlike most pointer bounds-checking functions, this one doesn't take an
3140 * 'off' argument, so it has to add in reg->off itself.
17a52670 3141 */
58e2af8b 3142static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
435faee1
DB
3143 int access_size, bool zero_size_allowed,
3144 struct bpf_call_arg_meta *meta)
17a52670 3145{
2a159c6f 3146 struct bpf_reg_state *reg = reg_state(env, regno);
f4d7e40a 3147 struct bpf_func_state *state = func(env, reg);
f7cf25b2 3148 int err, min_off, max_off, i, j, slot, spi;
17a52670 3149
914cb781 3150 if (reg->type != PTR_TO_STACK) {
f1174f77 3151 /* Allow zero-byte read from NULL, regardless of pointer type */
8e2fe1d9 3152 if (zero_size_allowed && access_size == 0 &&
914cb781 3153 register_is_null(reg))
8e2fe1d9
DB
3154 return 0;
3155
61bd5218 3156 verbose(env, "R%d type=%s expected=%s\n", regno,
914cb781 3157 reg_type_str[reg->type],
8e2fe1d9 3158 reg_type_str[PTR_TO_STACK]);
17a52670 3159 return -EACCES;
8e2fe1d9 3160 }
17a52670 3161
2011fccf
AI
3162 if (tnum_is_const(reg->var_off)) {
3163 min_off = max_off = reg->var_off.value + reg->off;
3164 err = __check_stack_boundary(env, regno, min_off, access_size,
3165 zero_size_allowed);
3166 if (err)
3167 return err;
3168 } else {
088ec26d
AI
3169 /* Variable offset is prohibited for unprivileged mode for
3170 * simplicity since it requires corresponding support in
3171 * Spectre masking for stack ALU.
3172 * See also retrieve_ptr_limit().
3173 */
3174 if (!env->allow_ptr_leaks) {
3175 char tn_buf[48];
f1174f77 3176
088ec26d
AI
3177 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3178 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
3179 regno, tn_buf);
3180 return -EACCES;
3181 }
f2bcd05e
AI
3182 /* Only initialized buffer on stack is allowed to be accessed
3183 * with variable offset. With uninitialized buffer it's hard to
3184 * guarantee that whole memory is marked as initialized on
3185 * helper return since specific bounds are unknown what may
3186 * cause uninitialized stack leaking.
3187 */
3188 if (meta && meta->raw_mode)
3189 meta = NULL;
3190
107c26a7
AI
3191 if (reg->smax_value >= BPF_MAX_VAR_OFF ||
3192 reg->smax_value <= -BPF_MAX_VAR_OFF) {
3193 verbose(env, "R%d unbounded indirect variable offset stack access\n",
3194 regno);
3195 return -EACCES;
3196 }
2011fccf 3197 min_off = reg->smin_value + reg->off;
107c26a7 3198 max_off = reg->smax_value + reg->off;
2011fccf
AI
3199 err = __check_stack_boundary(env, regno, min_off, access_size,
3200 zero_size_allowed);
107c26a7
AI
3201 if (err) {
3202 verbose(env, "R%d min value is outside of stack bound\n",
3203 regno);
2011fccf 3204 return err;
107c26a7 3205 }
2011fccf
AI
3206 err = __check_stack_boundary(env, regno, max_off, access_size,
3207 zero_size_allowed);
107c26a7
AI
3208 if (err) {
3209 verbose(env, "R%d max value is outside of stack bound\n",
3210 regno);
2011fccf 3211 return err;
107c26a7 3212 }
17a52670
AS
3213 }
3214
435faee1
DB
3215 if (meta && meta->raw_mode) {
3216 meta->access_size = access_size;
3217 meta->regno = regno;
3218 return 0;
3219 }
3220
2011fccf 3221 for (i = min_off; i < max_off + access_size; i++) {
cc2b14d5
AS
3222 u8 *stype;
3223
2011fccf 3224 slot = -i - 1;
638f5b90 3225 spi = slot / BPF_REG_SIZE;
cc2b14d5
AS
3226 if (state->allocated_stack <= slot)
3227 goto err;
3228 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3229 if (*stype == STACK_MISC)
3230 goto mark;
3231 if (*stype == STACK_ZERO) {
3232 /* helper can write anything into the stack */
3233 *stype = STACK_MISC;
3234 goto mark;
17a52670 3235 }
f7cf25b2
AS
3236 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
3237 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
f54c7898 3238 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
f7cf25b2
AS
3239 for (j = 0; j < BPF_REG_SIZE; j++)
3240 state->stack[spi].slot_type[j] = STACK_MISC;
3241 goto mark;
3242 }
3243
cc2b14d5 3244err:
2011fccf
AI
3245 if (tnum_is_const(reg->var_off)) {
3246 verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
3247 min_off, i - min_off, access_size);
3248 } else {
3249 char tn_buf[48];
3250
3251 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3252 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
3253 tn_buf, i - min_off, access_size);
3254 }
cc2b14d5
AS
3255 return -EACCES;
3256mark:
3257 /* reading any byte out of 8-byte 'spill_slot' will cause
3258 * the whole slot to be marked as 'read'
3259 */
679c782d 3260 mark_reg_read(env, &state->stack[spi].spilled_ptr,
5327ed3d
JW
3261 state->stack[spi].spilled_ptr.parent,
3262 REG_LIVE_READ64);
17a52670 3263 }
2011fccf 3264 return update_stack_depth(env, state, min_off);
17a52670
AS
3265}
3266
06c1c049
GB
3267static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
3268 int access_size, bool zero_size_allowed,
3269 struct bpf_call_arg_meta *meta)
3270{
638f5b90 3271 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
06c1c049 3272
f1174f77 3273 switch (reg->type) {
06c1c049 3274 case PTR_TO_PACKET:
de8f3a83 3275 case PTR_TO_PACKET_META:
9fd29c08
YS
3276 return check_packet_access(env, regno, reg->off, access_size,
3277 zero_size_allowed);
06c1c049 3278 case PTR_TO_MAP_VALUE:
591fe988
DB
3279 if (check_map_access_type(env, regno, reg->off, access_size,
3280 meta && meta->raw_mode ? BPF_WRITE :
3281 BPF_READ))
3282 return -EACCES;
9fd29c08
YS
3283 return check_map_access(env, regno, reg->off, access_size,
3284 zero_size_allowed);
f1174f77 3285 default: /* scalar_value|ptr_to_stack or invalid ptr */
06c1c049
GB
3286 return check_stack_boundary(env, regno, access_size,
3287 zero_size_allowed, meta);
3288 }
3289}
3290
d83525ca
AS
3291/* Implementation details:
3292 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
3293 * Two bpf_map_lookups (even with the same key) will have different reg->id.
3294 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
3295 * value_or_null->value transition, since the verifier only cares about
3296 * the range of access to valid map value pointer and doesn't care about actual
3297 * address of the map element.
3298 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
3299 * reg->id > 0 after value_or_null->value transition. By doing so
3300 * two bpf_map_lookups will be considered two different pointers that
3301 * point to different bpf_spin_locks.
3302 * The verifier allows taking only one bpf_spin_lock at a time to avoid
3303 * dead-locks.
3304 * Since only one bpf_spin_lock is allowed the checks are simpler than
3305 * reg_is_refcounted() logic. The verifier needs to remember only
3306 * one spin_lock instead of array of acquired_refs.
3307 * cur_state->active_spin_lock remembers which map value element got locked
3308 * and clears it after bpf_spin_unlock.
3309 */
3310static int process_spin_lock(struct bpf_verifier_env *env, int regno,
3311 bool is_lock)
3312{
3313 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3314 struct bpf_verifier_state *cur = env->cur_state;
3315 bool is_const = tnum_is_const(reg->var_off);
3316 struct bpf_map *map = reg->map_ptr;
3317 u64 val = reg->var_off.value;
3318
3319 if (reg->type != PTR_TO_MAP_VALUE) {
3320 verbose(env, "R%d is not a pointer to map_value\n", regno);
3321 return -EINVAL;
3322 }
3323 if (!is_const) {
3324 verbose(env,
3325 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
3326 regno);
3327 return -EINVAL;
3328 }
3329 if (!map->btf) {
3330 verbose(env,
3331 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
3332 map->name);
3333 return -EINVAL;
3334 }
3335 if (!map_value_has_spin_lock(map)) {
3336 if (map->spin_lock_off == -E2BIG)
3337 verbose(env,
3338 "map '%s' has more than one 'struct bpf_spin_lock'\n",
3339 map->name);
3340 else if (map->spin_lock_off == -ENOENT)
3341 verbose(env,
3342 "map '%s' doesn't have 'struct bpf_spin_lock'\n",
3343 map->name);
3344 else
3345 verbose(env,
3346 "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
3347 map->name);
3348 return -EINVAL;
3349 }
3350 if (map->spin_lock_off != val + reg->off) {
3351 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
3352 val + reg->off);
3353 return -EINVAL;
3354 }
3355 if (is_lock) {
3356 if (cur->active_spin_lock) {
3357 verbose(env,
3358 "Locking two bpf_spin_locks are not allowed\n");
3359 return -EINVAL;
3360 }
3361 cur->active_spin_lock = reg->id;
3362 } else {
3363 if (!cur->active_spin_lock) {
3364 verbose(env, "bpf_spin_unlock without taking a lock\n");
3365 return -EINVAL;
3366 }
3367 if (cur->active_spin_lock != reg->id) {
3368 verbose(env, "bpf_spin_unlock of different lock\n");
3369 return -EINVAL;
3370 }
3371 cur->active_spin_lock = 0;
3372 }
3373 return 0;
3374}
3375
90133415
DB
3376static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
3377{
3378 return type == ARG_PTR_TO_MEM ||
3379 type == ARG_PTR_TO_MEM_OR_NULL ||
3380 type == ARG_PTR_TO_UNINIT_MEM;
3381}
3382
3383static bool arg_type_is_mem_size(enum bpf_arg_type type)
3384{
3385 return type == ARG_CONST_SIZE ||
3386 type == ARG_CONST_SIZE_OR_ZERO;
3387}
3388
57c3bb72
AI
3389static bool arg_type_is_int_ptr(enum bpf_arg_type type)
3390{
3391 return type == ARG_PTR_TO_INT ||
3392 type == ARG_PTR_TO_LONG;
3393}
3394
3395static int int_ptr_type_to_size(enum bpf_arg_type type)
3396{
3397 if (type == ARG_PTR_TO_INT)
3398 return sizeof(u32);
3399 else if (type == ARG_PTR_TO_LONG)
3400 return sizeof(u64);
3401
3402 return -EINVAL;
3403}
3404
58e2af8b 3405static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
33ff9823
DB
3406 enum bpf_arg_type arg_type,
3407 struct bpf_call_arg_meta *meta)
17a52670 3408{
638f5b90 3409 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6841de8b 3410 enum bpf_reg_type expected_type, type = reg->type;
17a52670
AS
3411 int err = 0;
3412
80f1d68c 3413 if (arg_type == ARG_DONTCARE)
17a52670
AS
3414 return 0;
3415
dc503a8a
EC
3416 err = check_reg_arg(env, regno, SRC_OP);
3417 if (err)
3418 return err;
17a52670 3419
1be7f75d
AS
3420 if (arg_type == ARG_ANYTHING) {
3421 if (is_pointer_value(env, regno)) {
61bd5218
JK
3422 verbose(env, "R%d leaks addr into helper function\n",
3423 regno);
1be7f75d
AS
3424 return -EACCES;
3425 }
80f1d68c 3426 return 0;
1be7f75d 3427 }
80f1d68c 3428
de8f3a83 3429 if (type_is_pkt_pointer(type) &&
3a0af8fd 3430 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
61bd5218 3431 verbose(env, "helper access to the packet is not allowed\n");
6841de8b
AS
3432 return -EACCES;
3433 }
3434
8e2fe1d9 3435 if (arg_type == ARG_PTR_TO_MAP_KEY ||
2ea864c5 3436 arg_type == ARG_PTR_TO_MAP_VALUE ||
6ac99e8f
MKL
3437 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
3438 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
17a52670 3439 expected_type = PTR_TO_STACK;
6ac99e8f
MKL
3440 if (register_is_null(reg) &&
3441 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL)
3442 /* final test in check_stack_boundary() */;
3443 else if (!type_is_pkt_pointer(type) &&
3444 type != PTR_TO_MAP_VALUE &&
3445 type != expected_type)
6841de8b 3446 goto err_type;
39f19ebb
AS
3447 } else if (arg_type == ARG_CONST_SIZE ||
3448 arg_type == ARG_CONST_SIZE_OR_ZERO) {
f1174f77
EC
3449 expected_type = SCALAR_VALUE;
3450 if (type != expected_type)
6841de8b 3451 goto err_type;
17a52670
AS
3452 } else if (arg_type == ARG_CONST_MAP_PTR) {
3453 expected_type = CONST_PTR_TO_MAP;
6841de8b
AS
3454 if (type != expected_type)
3455 goto err_type;
608cd71a
AS
3456 } else if (arg_type == ARG_PTR_TO_CTX) {
3457 expected_type = PTR_TO_CTX;
6841de8b
AS
3458 if (type != expected_type)
3459 goto err_type;
58990d1f
DB
3460 err = check_ctx_reg(env, reg, regno);
3461 if (err < 0)
3462 return err;
46f8bc92
MKL
3463 } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
3464 expected_type = PTR_TO_SOCK_COMMON;
3465 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
3466 if (!type_is_sk_pointer(type))
3467 goto err_type;
1b986589
MKL
3468 if (reg->ref_obj_id) {
3469 if (meta->ref_obj_id) {
3470 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
3471 regno, reg->ref_obj_id,
3472 meta->ref_obj_id);
3473 return -EFAULT;
3474 }
3475 meta->ref_obj_id = reg->ref_obj_id;
fd978bf7 3476 }
6ac99e8f
MKL
3477 } else if (arg_type == ARG_PTR_TO_SOCKET) {
3478 expected_type = PTR_TO_SOCKET;
3479 if (type != expected_type)
3480 goto err_type;
a7658e1a
AS
3481 } else if (arg_type == ARG_PTR_TO_BTF_ID) {
3482 expected_type = PTR_TO_BTF_ID;
3483 if (type != expected_type)
3484 goto err_type;
3485 if (reg->btf_id != meta->btf_id) {
3486 verbose(env, "Helper has type %s got %s in R%d\n",
3487 kernel_type_name(meta->btf_id),
3488 kernel_type_name(reg->btf_id), regno);
3489
3490 return -EACCES;
3491 }
3492 if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) {
3493 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
3494 regno);
3495 return -EACCES;
3496 }
d83525ca
AS
3497 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
3498 if (meta->func_id == BPF_FUNC_spin_lock) {
3499 if (process_spin_lock(env, regno, true))
3500 return -EACCES;
3501 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
3502 if (process_spin_lock(env, regno, false))
3503 return -EACCES;
3504 } else {
3505 verbose(env, "verifier internal error\n");
3506 return -EFAULT;
3507 }
90133415 3508 } else if (arg_type_is_mem_ptr(arg_type)) {
8e2fe1d9
DB
3509 expected_type = PTR_TO_STACK;
3510 /* One exception here. In case function allows for NULL to be
f1174f77 3511 * passed in as argument, it's a SCALAR_VALUE type. Final test
8e2fe1d9
DB
3512 * happens during stack boundary checking.
3513 */
914cb781 3514 if (register_is_null(reg) &&
db1ac496 3515 arg_type == ARG_PTR_TO_MEM_OR_NULL)
6841de8b 3516 /* final test in check_stack_boundary() */;
de8f3a83
DB
3517 else if (!type_is_pkt_pointer(type) &&
3518 type != PTR_TO_MAP_VALUE &&
f1174f77 3519 type != expected_type)
6841de8b 3520 goto err_type;
39f19ebb 3521 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
57c3bb72
AI
3522 } else if (arg_type_is_int_ptr(arg_type)) {
3523 expected_type = PTR_TO_STACK;
3524 if (!type_is_pkt_pointer(type) &&
3525 type != PTR_TO_MAP_VALUE &&
3526 type != expected_type)
3527 goto err_type;
17a52670 3528 } else {
61bd5218 3529 verbose(env, "unsupported arg_type %d\n", arg_type);
17a52670
AS
3530 return -EFAULT;
3531 }
3532
17a52670
AS
3533 if (arg_type == ARG_CONST_MAP_PTR) {
3534 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
33ff9823 3535 meta->map_ptr = reg->map_ptr;
17a52670
AS
3536 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
3537 /* bpf_map_xxx(..., map_ptr, ..., key) call:
3538 * check that [key, key + map->key_size) are within
3539 * stack limits and initialized
3540 */
33ff9823 3541 if (!meta->map_ptr) {
17a52670
AS
3542 /* in function declaration map_ptr must come before
3543 * map_key, so that it's verified and known before
3544 * we have to check map_key here. Otherwise it means
3545 * that kernel subsystem misconfigured verifier
3546 */
61bd5218 3547 verbose(env, "invalid map_ptr to access map->key\n");
17a52670
AS
3548 return -EACCES;
3549 }
d71962f3
PC
3550 err = check_helper_mem_access(env, regno,
3551 meta->map_ptr->key_size, false,
3552 NULL);
2ea864c5 3553 } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
6ac99e8f
MKL
3554 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
3555 !register_is_null(reg)) ||
2ea864c5 3556 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
17a52670
AS
3557 /* bpf_map_xxx(..., map_ptr, ..., value) call:
3558 * check [value, value + map->value_size) validity
3559 */
33ff9823 3560 if (!meta->map_ptr) {
17a52670 3561 /* kernel subsystem misconfigured verifier */
61bd5218 3562 verbose(env, "invalid map_ptr to access map->value\n");
17a52670
AS
3563 return -EACCES;
3564 }
2ea864c5 3565 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
d71962f3
PC
3566 err = check_helper_mem_access(env, regno,
3567 meta->map_ptr->value_size, false,
2ea864c5 3568 meta);
90133415 3569 } else if (arg_type_is_mem_size(arg_type)) {
39f19ebb 3570 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
17a52670 3571
849fa506
YS
3572 /* remember the mem_size which may be used later
3573 * to refine return values.
3574 */
3575 meta->msize_smax_value = reg->smax_value;
3576 meta->msize_umax_value = reg->umax_value;
3577
f1174f77
EC
3578 /* The register is SCALAR_VALUE; the access check
3579 * happens using its boundaries.
06c1c049 3580 */
f1174f77 3581 if (!tnum_is_const(reg->var_off))
06c1c049
GB
3582 /* For unprivileged variable accesses, disable raw
3583 * mode so that the program is required to
3584 * initialize all the memory that the helper could
3585 * just partially fill up.
3586 */
3587 meta = NULL;
3588
b03c9f9f 3589 if (reg->smin_value < 0) {
61bd5218 3590 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
f1174f77
EC
3591 regno);
3592 return -EACCES;
3593 }
06c1c049 3594
b03c9f9f 3595 if (reg->umin_value == 0) {
f1174f77
EC
3596 err = check_helper_mem_access(env, regno - 1, 0,
3597 zero_size_allowed,
3598 meta);
06c1c049
GB
3599 if (err)
3600 return err;
06c1c049 3601 }
f1174f77 3602
b03c9f9f 3603 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
61bd5218 3604 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
f1174f77
EC
3605 regno);
3606 return -EACCES;
3607 }
3608 err = check_helper_mem_access(env, regno - 1,
b03c9f9f 3609 reg->umax_value,
f1174f77 3610 zero_size_allowed, meta);
b5dc0163
AS
3611 if (!err)
3612 err = mark_chain_precision(env, regno);
57c3bb72
AI
3613 } else if (arg_type_is_int_ptr(arg_type)) {
3614 int size = int_ptr_type_to_size(arg_type);
3615
3616 err = check_helper_mem_access(env, regno, size, false, meta);
3617 if (err)
3618 return err;
3619 err = check_ptr_alignment(env, reg, 0, size, true);
17a52670
AS
3620 }
3621
3622 return err;
6841de8b 3623err_type:
61bd5218 3624 verbose(env, "R%d type=%s expected=%s\n", regno,
6841de8b
AS
3625 reg_type_str[type], reg_type_str[expected_type]);
3626 return -EACCES;
17a52670
AS
3627}
3628
61bd5218
JK
3629static int check_map_func_compatibility(struct bpf_verifier_env *env,
3630 struct bpf_map *map, int func_id)
35578d79 3631{
35578d79
KX
3632 if (!map)
3633 return 0;
3634
6aff67c8
AS
3635 /* We need a two way check, first is from map perspective ... */
3636 switch (map->map_type) {
3637 case BPF_MAP_TYPE_PROG_ARRAY:
3638 if (func_id != BPF_FUNC_tail_call)
3639 goto error;
3640 break;
3641 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
3642 if (func_id != BPF_FUNC_perf_event_read &&
908432ca 3643 func_id != BPF_FUNC_perf_event_output &&
a7658e1a 3644 func_id != BPF_FUNC_skb_output &&
908432ca 3645 func_id != BPF_FUNC_perf_event_read_value)
6aff67c8
AS
3646 goto error;
3647 break;
3648 case BPF_MAP_TYPE_STACK_TRACE:
3649 if (func_id != BPF_FUNC_get_stackid)
3650 goto error;
3651 break;
4ed8ec52 3652 case BPF_MAP_TYPE_CGROUP_ARRAY:
60747ef4 3653 if (func_id != BPF_FUNC_skb_under_cgroup &&
60d20f91 3654 func_id != BPF_FUNC_current_task_under_cgroup)
4a482f34
MKL
3655 goto error;
3656 break;
cd339431 3657 case BPF_MAP_TYPE_CGROUP_STORAGE:
b741f163 3658 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
cd339431
RG
3659 if (func_id != BPF_FUNC_get_local_storage)
3660 goto error;
3661 break;
546ac1ff 3662 case BPF_MAP_TYPE_DEVMAP:
6f9d451a 3663 case BPF_MAP_TYPE_DEVMAP_HASH:
0cdbb4b0
THJ
3664 if (func_id != BPF_FUNC_redirect_map &&
3665 func_id != BPF_FUNC_map_lookup_elem)
546ac1ff
JF
3666 goto error;
3667 break;
fbfc504a
BT
3668 /* Restrict bpf side of cpumap and xskmap, open when use-cases
3669 * appear.
3670 */
6710e112
JDB
3671 case BPF_MAP_TYPE_CPUMAP:
3672 if (func_id != BPF_FUNC_redirect_map)
3673 goto error;
3674 break;
fada7fdc
JL
3675 case BPF_MAP_TYPE_XSKMAP:
3676 if (func_id != BPF_FUNC_redirect_map &&
3677 func_id != BPF_FUNC_map_lookup_elem)
3678 goto error;
3679 break;
56f668df 3680 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
bcc6b1b7 3681 case BPF_MAP_TYPE_HASH_OF_MAPS:
56f668df
MKL
3682 if (func_id != BPF_FUNC_map_lookup_elem)
3683 goto error;
16a43625 3684 break;
174a79ff
JF
3685 case BPF_MAP_TYPE_SOCKMAP:
3686 if (func_id != BPF_FUNC_sk_redirect_map &&
3687 func_id != BPF_FUNC_sock_map_update &&
4f738adb
JF
3688 func_id != BPF_FUNC_map_delete_elem &&
3689 func_id != BPF_FUNC_msg_redirect_map)
174a79ff
JF
3690 goto error;
3691 break;
81110384
JF
3692 case BPF_MAP_TYPE_SOCKHASH:
3693 if (func_id != BPF_FUNC_sk_redirect_hash &&
3694 func_id != BPF_FUNC_sock_hash_update &&
3695 func_id != BPF_FUNC_map_delete_elem &&
3696 func_id != BPF_FUNC_msg_redirect_hash)
3697 goto error;
3698 break;
2dbb9b9e
MKL
3699 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
3700 if (func_id != BPF_FUNC_sk_select_reuseport)
3701 goto error;
3702 break;
f1a2e44a
MV
3703 case BPF_MAP_TYPE_QUEUE:
3704 case BPF_MAP_TYPE_STACK:
3705 if (func_id != BPF_FUNC_map_peek_elem &&
3706 func_id != BPF_FUNC_map_pop_elem &&
3707 func_id != BPF_FUNC_map_push_elem)
3708 goto error;
3709 break;
6ac99e8f
MKL
3710 case BPF_MAP_TYPE_SK_STORAGE:
3711 if (func_id != BPF_FUNC_sk_storage_get &&
3712 func_id != BPF_FUNC_sk_storage_delete)
3713 goto error;
3714 break;
6aff67c8
AS
3715 default:
3716 break;
3717 }
3718
3719 /* ... and second from the function itself. */
3720 switch (func_id) {
3721 case BPF_FUNC_tail_call:
3722 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
3723 goto error;
f910cefa 3724 if (env->subprog_cnt > 1) {
f4d7e40a
AS
3725 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
3726 return -EINVAL;
3727 }
6aff67c8
AS
3728 break;
3729 case BPF_FUNC_perf_event_read:
3730 case BPF_FUNC_perf_event_output:
908432ca 3731 case BPF_FUNC_perf_event_read_value:
a7658e1a 3732 case BPF_FUNC_skb_output:
6aff67c8
AS
3733 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
3734 goto error;
3735 break;
3736 case BPF_FUNC_get_stackid:
3737 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
3738 goto error;
3739 break;
60d20f91 3740 case BPF_FUNC_current_task_under_cgroup:
747ea55e 3741 case BPF_FUNC_skb_under_cgroup:
4a482f34
MKL
3742 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
3743 goto error;
3744 break;
97f91a7c 3745 case BPF_FUNC_redirect_map:
9c270af3 3746 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6f9d451a 3747 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
fbfc504a
BT
3748 map->map_type != BPF_MAP_TYPE_CPUMAP &&
3749 map->map_type != BPF_MAP_TYPE_XSKMAP)
97f91a7c
JF
3750 goto error;
3751 break;
174a79ff 3752 case BPF_FUNC_sk_redirect_map:
4f738adb 3753 case BPF_FUNC_msg_redirect_map:
81110384 3754 case BPF_FUNC_sock_map_update:
174a79ff
JF
3755 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
3756 goto error;
3757 break;
81110384
JF
3758 case BPF_FUNC_sk_redirect_hash:
3759 case BPF_FUNC_msg_redirect_hash:
3760 case BPF_FUNC_sock_hash_update:
3761 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
174a79ff
JF
3762 goto error;
3763 break;
cd339431 3764 case BPF_FUNC_get_local_storage:
b741f163
RG
3765 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
3766 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
cd339431
RG
3767 goto error;
3768 break;
2dbb9b9e
MKL
3769 case BPF_FUNC_sk_select_reuseport:
3770 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
3771 goto error;
3772 break;
f1a2e44a
MV
3773 case BPF_FUNC_map_peek_elem:
3774 case BPF_FUNC_map_pop_elem:
3775 case BPF_FUNC_map_push_elem:
3776 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
3777 map->map_type != BPF_MAP_TYPE_STACK)
3778 goto error;
3779 break;
6ac99e8f
MKL
3780 case BPF_FUNC_sk_storage_get:
3781 case BPF_FUNC_sk_storage_delete:
3782 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
3783 goto error;
3784 break;
6aff67c8
AS
3785 default:
3786 break;
35578d79
KX
3787 }
3788
3789 return 0;
6aff67c8 3790error:
61bd5218 3791 verbose(env, "cannot pass map_type %d into func %s#%d\n",
ebb676da 3792 map->map_type, func_id_name(func_id), func_id);
6aff67c8 3793 return -EINVAL;
35578d79
KX
3794}
3795
90133415 3796static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
435faee1
DB
3797{
3798 int count = 0;
3799
39f19ebb 3800 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3801 count++;
39f19ebb 3802 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3803 count++;
39f19ebb 3804 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3805 count++;
39f19ebb 3806 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3807 count++;
39f19ebb 3808 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
435faee1
DB
3809 count++;
3810
90133415
DB
3811 /* We only support one arg being in raw mode at the moment,
3812 * which is sufficient for the helper functions we have
3813 * right now.
3814 */
3815 return count <= 1;
3816}
3817
3818static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
3819 enum bpf_arg_type arg_next)
3820{
3821 return (arg_type_is_mem_ptr(arg_curr) &&
3822 !arg_type_is_mem_size(arg_next)) ||
3823 (!arg_type_is_mem_ptr(arg_curr) &&
3824 arg_type_is_mem_size(arg_next));
3825}
3826
3827static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
3828{
3829 /* bpf_xxx(..., buf, len) call will access 'len'
3830 * bytes from memory 'buf'. Both arg types need
3831 * to be paired, so make sure there's no buggy
3832 * helper function specification.
3833 */
3834 if (arg_type_is_mem_size(fn->arg1_type) ||
3835 arg_type_is_mem_ptr(fn->arg5_type) ||
3836 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
3837 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
3838 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
3839 check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
3840 return false;
3841
3842 return true;
3843}
3844
1b986589 3845static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
fd978bf7
JS
3846{
3847 int count = 0;
3848
1b986589 3849 if (arg_type_may_be_refcounted(fn->arg1_type))
fd978bf7 3850 count++;
1b986589 3851 if (arg_type_may_be_refcounted(fn->arg2_type))
fd978bf7 3852 count++;
1b986589 3853 if (arg_type_may_be_refcounted(fn->arg3_type))
fd978bf7 3854 count++;
1b986589 3855 if (arg_type_may_be_refcounted(fn->arg4_type))
fd978bf7 3856 count++;
1b986589 3857 if (arg_type_may_be_refcounted(fn->arg5_type))
fd978bf7
JS
3858 count++;
3859
1b986589
MKL
3860 /* A reference acquiring function cannot acquire
3861 * another refcounted ptr.
3862 */
3863 if (is_acquire_function(func_id) && count)
3864 return false;
3865
fd978bf7
JS
3866 /* We only support one arg being unreferenced at the moment,
3867 * which is sufficient for the helper functions we have right now.
3868 */
3869 return count <= 1;
3870}
3871
1b986589 3872static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
90133415
DB
3873{
3874 return check_raw_mode_ok(fn) &&
fd978bf7 3875 check_arg_pair_ok(fn) &&
1b986589 3876 check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
435faee1
DB
3877}
3878
de8f3a83
DB
3879/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
3880 * are now invalid, so turn them into unknown SCALAR_VALUE.
f1174f77 3881 */
f4d7e40a
AS
3882static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
3883 struct bpf_func_state *state)
969bf05e 3884{
58e2af8b 3885 struct bpf_reg_state *regs = state->regs, *reg;
969bf05e
AS
3886 int i;
3887
3888 for (i = 0; i < MAX_BPF_REG; i++)
de8f3a83 3889 if (reg_is_pkt_pointer_any(&regs[i]))
61bd5218 3890 mark_reg_unknown(env, regs, i);
969bf05e 3891
f3709f69
JS
3892 bpf_for_each_spilled_reg(i, state, reg) {
3893 if (!reg)
969bf05e 3894 continue;
de8f3a83 3895 if (reg_is_pkt_pointer_any(reg))
f54c7898 3896 __mark_reg_unknown(env, reg);
969bf05e
AS
3897 }
3898}
3899
f4d7e40a
AS
3900static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
3901{
3902 struct bpf_verifier_state *vstate = env->cur_state;
3903 int i;
3904
3905 for (i = 0; i <= vstate->curframe; i++)
3906 __clear_all_pkt_pointers(env, vstate->frame[i]);
3907}
3908
fd978bf7 3909static void release_reg_references(struct bpf_verifier_env *env,
1b986589
MKL
3910 struct bpf_func_state *state,
3911 int ref_obj_id)
fd978bf7
JS
3912{
3913 struct bpf_reg_state *regs = state->regs, *reg;
3914 int i;
3915
3916 for (i = 0; i < MAX_BPF_REG; i++)
1b986589 3917 if (regs[i].ref_obj_id == ref_obj_id)
fd978bf7
JS
3918 mark_reg_unknown(env, regs, i);
3919
3920 bpf_for_each_spilled_reg(i, state, reg) {
3921 if (!reg)
3922 continue;
1b986589 3923 if (reg->ref_obj_id == ref_obj_id)
f54c7898 3924 __mark_reg_unknown(env, reg);
fd978bf7
JS
3925 }
3926}
3927
3928/* The pointer with the specified id has released its reference to kernel
3929 * resources. Identify all copies of the same pointer and clear the reference.
3930 */
3931static int release_reference(struct bpf_verifier_env *env,
1b986589 3932 int ref_obj_id)
fd978bf7
JS
3933{
3934 struct bpf_verifier_state *vstate = env->cur_state;
1b986589 3935 int err;
fd978bf7
JS
3936 int i;
3937
1b986589
MKL
3938 err = release_reference_state(cur_func(env), ref_obj_id);
3939 if (err)
3940 return err;
3941
fd978bf7 3942 for (i = 0; i <= vstate->curframe; i++)
1b986589 3943 release_reg_references(env, vstate->frame[i], ref_obj_id);
fd978bf7 3944
1b986589 3945 return 0;
fd978bf7
JS
3946}
3947
f4d7e40a
AS
3948static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
3949 int *insn_idx)
3950{
3951 struct bpf_verifier_state *state = env->cur_state;
3952 struct bpf_func_state *caller, *callee;
fd978bf7 3953 int i, err, subprog, target_insn;
f4d7e40a 3954
aada9ce6 3955 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
f4d7e40a 3956 verbose(env, "the call stack of %d frames is too deep\n",
aada9ce6 3957 state->curframe + 2);
f4d7e40a
AS
3958 return -E2BIG;
3959 }
3960
3961 target_insn = *insn_idx + insn->imm;
3962 subprog = find_subprog(env, target_insn + 1);
3963 if (subprog < 0) {
3964 verbose(env, "verifier bug. No program starts at insn %d\n",
3965 target_insn + 1);
3966 return -EFAULT;
3967 }
3968
3969 caller = state->frame[state->curframe];
3970 if (state->frame[state->curframe + 1]) {
3971 verbose(env, "verifier bug. Frame %d already allocated\n",
3972 state->curframe + 1);
3973 return -EFAULT;
3974 }
3975
3976 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
3977 if (!callee)
3978 return -ENOMEM;
3979 state->frame[state->curframe + 1] = callee;
3980
3981 /* callee cannot access r0, r6 - r9 for reading and has to write
3982 * into its own stack before reading from it.
3983 * callee can read/write into caller's stack
3984 */
3985 init_func_state(env, callee,
3986 /* remember the callsite, it will be used by bpf_exit */
3987 *insn_idx /* callsite */,
3988 state->curframe + 1 /* frameno within this callchain */,
f910cefa 3989 subprog /* subprog number within this prog */);
f4d7e40a 3990
fd978bf7
JS
3991 /* Transfer references to the callee */
3992 err = transfer_reference_state(callee, caller);
3993 if (err)
3994 return err;
3995
679c782d
EC
3996 /* copy r1 - r5 args that callee can access. The copy includes parent
3997 * pointers, which connects us up to the liveness chain
3998 */
f4d7e40a
AS
3999 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
4000 callee->regs[i] = caller->regs[i];
4001
679c782d 4002 /* after the call registers r0 - r5 were scratched */
f4d7e40a
AS
4003 for (i = 0; i < CALLER_SAVED_REGS; i++) {
4004 mark_reg_not_init(env, caller->regs, caller_saved[i]);
4005 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4006 }
4007
4008 /* only increment it after check_reg_arg() finished */
4009 state->curframe++;
4010
8c1b6e69
AS
4011 if (btf_check_func_arg_match(env, subprog))
4012 return -EINVAL;
4013
f4d7e40a
AS
4014 /* and go analyze first insn of the callee */
4015 *insn_idx = target_insn;
4016
06ee7115 4017 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4018 verbose(env, "caller:\n");
4019 print_verifier_state(env, caller);
4020 verbose(env, "callee:\n");
4021 print_verifier_state(env, callee);
4022 }
4023 return 0;
4024}
4025
4026static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
4027{
4028 struct bpf_verifier_state *state = env->cur_state;
4029 struct bpf_func_state *caller, *callee;
4030 struct bpf_reg_state *r0;
fd978bf7 4031 int err;
f4d7e40a
AS
4032
4033 callee = state->frame[state->curframe];
4034 r0 = &callee->regs[BPF_REG_0];
4035 if (r0->type == PTR_TO_STACK) {
4036 /* technically it's ok to return caller's stack pointer
4037 * (or caller's caller's pointer) back to the caller,
4038 * since these pointers are valid. Only current stack
4039 * pointer will be invalid as soon as function exits,
4040 * but let's be conservative
4041 */
4042 verbose(env, "cannot return stack pointer to the caller\n");
4043 return -EINVAL;
4044 }
4045
4046 state->curframe--;
4047 caller = state->frame[state->curframe];
4048 /* return to the caller whatever r0 had in the callee */
4049 caller->regs[BPF_REG_0] = *r0;
4050
fd978bf7
JS
4051 /* Transfer references to the caller */
4052 err = transfer_reference_state(caller, callee);
4053 if (err)
4054 return err;
4055
f4d7e40a 4056 *insn_idx = callee->callsite + 1;
06ee7115 4057 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4058 verbose(env, "returning from callee:\n");
4059 print_verifier_state(env, callee);
4060 verbose(env, "to caller at %d:\n", *insn_idx);
4061 print_verifier_state(env, caller);
4062 }
4063 /* clear everything in the callee */
4064 free_func_state(callee);
4065 state->frame[state->curframe + 1] = NULL;
4066 return 0;
4067}
4068
849fa506
YS
4069static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
4070 int func_id,
4071 struct bpf_call_arg_meta *meta)
4072{
4073 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
4074
4075 if (ret_type != RET_INTEGER ||
4076 (func_id != BPF_FUNC_get_stack &&
4077 func_id != BPF_FUNC_probe_read_str))
4078 return;
4079
4080 ret_reg->smax_value = meta->msize_smax_value;
4081 ret_reg->umax_value = meta->msize_umax_value;
4082 __reg_deduce_bounds(ret_reg);
4083 __reg_bound_offset(ret_reg);
4084}
4085
c93552c4
DB
4086static int
4087record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4088 int func_id, int insn_idx)
4089{
4090 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
591fe988 4091 struct bpf_map *map = meta->map_ptr;
c93552c4
DB
4092
4093 if (func_id != BPF_FUNC_tail_call &&
09772d92
DB
4094 func_id != BPF_FUNC_map_lookup_elem &&
4095 func_id != BPF_FUNC_map_update_elem &&
f1a2e44a
MV
4096 func_id != BPF_FUNC_map_delete_elem &&
4097 func_id != BPF_FUNC_map_push_elem &&
4098 func_id != BPF_FUNC_map_pop_elem &&
4099 func_id != BPF_FUNC_map_peek_elem)
c93552c4 4100 return 0;
09772d92 4101
591fe988 4102 if (map == NULL) {
c93552c4
DB
4103 verbose(env, "kernel subsystem misconfigured verifier\n");
4104 return -EINVAL;
4105 }
4106
591fe988
DB
4107 /* In case of read-only, some additional restrictions
4108 * need to be applied in order to prevent altering the
4109 * state of the map from program side.
4110 */
4111 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
4112 (func_id == BPF_FUNC_map_delete_elem ||
4113 func_id == BPF_FUNC_map_update_elem ||
4114 func_id == BPF_FUNC_map_push_elem ||
4115 func_id == BPF_FUNC_map_pop_elem)) {
4116 verbose(env, "write into map forbidden\n");
4117 return -EACCES;
4118 }
4119
d2e4c1e6 4120 if (!BPF_MAP_PTR(aux->map_ptr_state))
c93552c4
DB
4121 bpf_map_ptr_store(aux, meta->map_ptr,
4122 meta->map_ptr->unpriv_array);
d2e4c1e6 4123 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
c93552c4
DB
4124 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
4125 meta->map_ptr->unpriv_array);
4126 return 0;
4127}
4128
d2e4c1e6
DB
4129static int
4130record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4131 int func_id, int insn_idx)
4132{
4133 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
4134 struct bpf_reg_state *regs = cur_regs(env), *reg;
4135 struct bpf_map *map = meta->map_ptr;
4136 struct tnum range;
4137 u64 val;
cc52d914 4138 int err;
d2e4c1e6
DB
4139
4140 if (func_id != BPF_FUNC_tail_call)
4141 return 0;
4142 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
4143 verbose(env, "kernel subsystem misconfigured verifier\n");
4144 return -EINVAL;
4145 }
4146
4147 range = tnum_range(0, map->max_entries - 1);
4148 reg = &regs[BPF_REG_3];
4149
4150 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
4151 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4152 return 0;
4153 }
4154
cc52d914
DB
4155 err = mark_chain_precision(env, BPF_REG_3);
4156 if (err)
4157 return err;
4158
d2e4c1e6
DB
4159 val = reg->var_off.value;
4160 if (bpf_map_key_unseen(aux))
4161 bpf_map_key_store(aux, val);
4162 else if (!bpf_map_key_poisoned(aux) &&
4163 bpf_map_key_immediate(aux) != val)
4164 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4165 return 0;
4166}
4167
fd978bf7
JS
4168static int check_reference_leak(struct bpf_verifier_env *env)
4169{
4170 struct bpf_func_state *state = cur_func(env);
4171 int i;
4172
4173 for (i = 0; i < state->acquired_refs; i++) {
4174 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
4175 state->refs[i].id, state->refs[i].insn_idx);
4176 }
4177 return state->acquired_refs ? -EINVAL : 0;
4178}
4179
f4d7e40a 4180static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
17a52670 4181{
17a52670 4182 const struct bpf_func_proto *fn = NULL;
638f5b90 4183 struct bpf_reg_state *regs;
33ff9823 4184 struct bpf_call_arg_meta meta;
969bf05e 4185 bool changes_data;
17a52670
AS
4186 int i, err;
4187
4188 /* find function prototype */
4189 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
61bd5218
JK
4190 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
4191 func_id);
17a52670
AS
4192 return -EINVAL;
4193 }
4194
00176a34 4195 if (env->ops->get_func_proto)
5e43f899 4196 fn = env->ops->get_func_proto(func_id, env->prog);
17a52670 4197 if (!fn) {
61bd5218
JK
4198 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
4199 func_id);
17a52670
AS
4200 return -EINVAL;
4201 }
4202
4203 /* eBPF programs must be GPL compatible to use GPL-ed functions */
24701ece 4204 if (!env->prog->gpl_compatible && fn->gpl_only) {
3fe2867c 4205 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
17a52670
AS
4206 return -EINVAL;
4207 }
4208
04514d13 4209 /* With LD_ABS/IND some JITs save/restore skb from r1. */
17bedab2 4210 changes_data = bpf_helper_changes_pkt_data(fn->func);
04514d13
DB
4211 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
4212 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
4213 func_id_name(func_id), func_id);
4214 return -EINVAL;
4215 }
969bf05e 4216
33ff9823 4217 memset(&meta, 0, sizeof(meta));
36bbef52 4218 meta.pkt_access = fn->pkt_access;
33ff9823 4219
1b986589 4220 err = check_func_proto(fn, func_id);
435faee1 4221 if (err) {
61bd5218 4222 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
ebb676da 4223 func_id_name(func_id), func_id);
435faee1
DB
4224 return err;
4225 }
4226
d83525ca 4227 meta.func_id = func_id;
17a52670 4228 /* check args */
a7658e1a 4229 for (i = 0; i < 5; i++) {
9cc31b3a
AS
4230 err = btf_resolve_helper_id(&env->log, fn, i);
4231 if (err > 0)
4232 meta.btf_id = err;
a7658e1a
AS
4233 err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta);
4234 if (err)
4235 return err;
4236 }
17a52670 4237
c93552c4
DB
4238 err = record_func_map(env, &meta, func_id, insn_idx);
4239 if (err)
4240 return err;
4241
d2e4c1e6
DB
4242 err = record_func_key(env, &meta, func_id, insn_idx);
4243 if (err)
4244 return err;
4245
435faee1
DB
4246 /* Mark slots with STACK_MISC in case of raw mode, stack offset
4247 * is inferred from register state.
4248 */
4249 for (i = 0; i < meta.access_size; i++) {
ca369602
DB
4250 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
4251 BPF_WRITE, -1, false);
435faee1
DB
4252 if (err)
4253 return err;
4254 }
4255
fd978bf7
JS
4256 if (func_id == BPF_FUNC_tail_call) {
4257 err = check_reference_leak(env);
4258 if (err) {
4259 verbose(env, "tail_call would lead to reference leak\n");
4260 return err;
4261 }
4262 } else if (is_release_function(func_id)) {
1b986589 4263 err = release_reference(env, meta.ref_obj_id);
46f8bc92
MKL
4264 if (err) {
4265 verbose(env, "func %s#%d reference has not been acquired before\n",
4266 func_id_name(func_id), func_id);
fd978bf7 4267 return err;
46f8bc92 4268 }
fd978bf7
JS
4269 }
4270
638f5b90 4271 regs = cur_regs(env);
cd339431
RG
4272
4273 /* check that flags argument in get_local_storage(map, flags) is 0,
4274 * this is required because get_local_storage() can't return an error.
4275 */
4276 if (func_id == BPF_FUNC_get_local_storage &&
4277 !register_is_null(&regs[BPF_REG_2])) {
4278 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
4279 return -EINVAL;
4280 }
4281
17a52670 4282 /* reset caller saved regs */
dc503a8a 4283 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 4284 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
4285 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4286 }
17a52670 4287
5327ed3d
JW
4288 /* helper call returns 64-bit value. */
4289 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
4290
dc503a8a 4291 /* update return register (already marked as written above) */
17a52670 4292 if (fn->ret_type == RET_INTEGER) {
f1174f77 4293 /* sets type to SCALAR_VALUE */
61bd5218 4294 mark_reg_unknown(env, regs, BPF_REG_0);
17a52670
AS
4295 } else if (fn->ret_type == RET_VOID) {
4296 regs[BPF_REG_0].type = NOT_INIT;
3e6a4b3e
RG
4297 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
4298 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
f1174f77 4299 /* There is no offset yet applied, variable or fixed */
61bd5218 4300 mark_reg_known_zero(env, regs, BPF_REG_0);
17a52670
AS
4301 /* remember map_ptr, so that check_map_access()
4302 * can check 'value_size' boundary of memory access
4303 * to map element returned from bpf_map_lookup_elem()
4304 */
33ff9823 4305 if (meta.map_ptr == NULL) {
61bd5218
JK
4306 verbose(env,
4307 "kernel subsystem misconfigured verifier\n");
17a52670
AS
4308 return -EINVAL;
4309 }
33ff9823 4310 regs[BPF_REG_0].map_ptr = meta.map_ptr;
4d31f301
DB
4311 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
4312 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
e16d2f1a
AS
4313 if (map_value_has_spin_lock(meta.map_ptr))
4314 regs[BPF_REG_0].id = ++env->id_gen;
4d31f301
DB
4315 } else {
4316 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
4317 regs[BPF_REG_0].id = ++env->id_gen;
4318 }
c64b7983
JS
4319 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
4320 mark_reg_known_zero(env, regs, BPF_REG_0);
4321 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
0f3adc28 4322 regs[BPF_REG_0].id = ++env->id_gen;
85a51f8c
LB
4323 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
4324 mark_reg_known_zero(env, regs, BPF_REG_0);
4325 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
4326 regs[BPF_REG_0].id = ++env->id_gen;
655a51e5
MKL
4327 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
4328 mark_reg_known_zero(env, regs, BPF_REG_0);
4329 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
4330 regs[BPF_REG_0].id = ++env->id_gen;
17a52670 4331 } else {
61bd5218 4332 verbose(env, "unknown return type %d of func %s#%d\n",
ebb676da 4333 fn->ret_type, func_id_name(func_id), func_id);
17a52670
AS
4334 return -EINVAL;
4335 }
04fd61ab 4336
0f3adc28 4337 if (is_ptr_cast_function(func_id)) {
1b986589
MKL
4338 /* For release_reference() */
4339 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
0f3adc28
LB
4340 } else if (is_acquire_function(func_id)) {
4341 int id = acquire_reference_state(env, insn_idx);
4342
4343 if (id < 0)
4344 return id;
4345 /* For mark_ptr_or_null_reg() */
4346 regs[BPF_REG_0].id = id;
4347 /* For release_reference() */
4348 regs[BPF_REG_0].ref_obj_id = id;
4349 }
1b986589 4350
849fa506
YS
4351 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
4352
61bd5218 4353 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
35578d79
KX
4354 if (err)
4355 return err;
04fd61ab 4356
c195651e
YS
4357 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
4358 const char *err_str;
4359
4360#ifdef CONFIG_PERF_EVENTS
4361 err = get_callchain_buffers(sysctl_perf_event_max_stack);
4362 err_str = "cannot get callchain buffer for func %s#%d\n";
4363#else
4364 err = -ENOTSUPP;
4365 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
4366#endif
4367 if (err) {
4368 verbose(env, err_str, func_id_name(func_id), func_id);
4369 return err;
4370 }
4371
4372 env->prog->has_callchain_buf = true;
4373 }
4374
969bf05e
AS
4375 if (changes_data)
4376 clear_all_pkt_pointers(env);
4377 return 0;
4378}
4379
b03c9f9f
EC
4380static bool signed_add_overflows(s64 a, s64 b)
4381{
4382 /* Do the add in u64, where overflow is well-defined */
4383 s64 res = (s64)((u64)a + (u64)b);
4384
4385 if (b < 0)
4386 return res > a;
4387 return res < a;
4388}
4389
4390static bool signed_sub_overflows(s64 a, s64 b)
4391{
4392 /* Do the sub in u64, where overflow is well-defined */
4393 s64 res = (s64)((u64)a - (u64)b);
4394
4395 if (b < 0)
4396 return res < a;
4397 return res > a;
969bf05e
AS
4398}
4399
bb7f0f98
AS
4400static bool check_reg_sane_offset(struct bpf_verifier_env *env,
4401 const struct bpf_reg_state *reg,
4402 enum bpf_reg_type type)
4403{
4404 bool known = tnum_is_const(reg->var_off);
4405 s64 val = reg->var_off.value;
4406 s64 smin = reg->smin_value;
4407
4408 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
4409 verbose(env, "math between %s pointer and %lld is not allowed\n",
4410 reg_type_str[type], val);
4411 return false;
4412 }
4413
4414 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
4415 verbose(env, "%s pointer offset %d is not allowed\n",
4416 reg_type_str[type], reg->off);
4417 return false;
4418 }
4419
4420 if (smin == S64_MIN) {
4421 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
4422 reg_type_str[type]);
4423 return false;
4424 }
4425
4426 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
4427 verbose(env, "value %lld makes %s pointer be out of bounds\n",
4428 smin, reg_type_str[type]);
4429 return false;
4430 }
4431
4432 return true;
4433}
4434
979d63d5
DB
4435static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
4436{
4437 return &env->insn_aux_data[env->insn_idx];
4438}
4439
4440static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
4441 u32 *ptr_limit, u8 opcode, bool off_is_neg)
4442{
4443 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
4444 (opcode == BPF_SUB && !off_is_neg);
4445 u32 off;
4446
4447 switch (ptr_reg->type) {
4448 case PTR_TO_STACK:
088ec26d
AI
4449 /* Indirect variable offset stack access is prohibited in
4450 * unprivileged mode so it's not handled here.
4451 */
979d63d5
DB
4452 off = ptr_reg->off + ptr_reg->var_off.value;
4453 if (mask_to_left)
4454 *ptr_limit = MAX_BPF_STACK + off;
4455 else
4456 *ptr_limit = -off;
4457 return 0;
4458 case PTR_TO_MAP_VALUE:
4459 if (mask_to_left) {
4460 *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
4461 } else {
4462 off = ptr_reg->smin_value + ptr_reg->off;
4463 *ptr_limit = ptr_reg->map_ptr->value_size - off;
4464 }
4465 return 0;
4466 default:
4467 return -EINVAL;
4468 }
4469}
4470
d3bd7413
DB
4471static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
4472 const struct bpf_insn *insn)
4473{
4474 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
4475}
4476
4477static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
4478 u32 alu_state, u32 alu_limit)
4479{
4480 /* If we arrived here from different branches with different
4481 * state or limits to sanitize, then this won't work.
4482 */
4483 if (aux->alu_state &&
4484 (aux->alu_state != alu_state ||
4485 aux->alu_limit != alu_limit))
4486 return -EACCES;
4487
4488 /* Corresponding fixup done in fixup_bpf_calls(). */
4489 aux->alu_state = alu_state;
4490 aux->alu_limit = alu_limit;
4491 return 0;
4492}
4493
4494static int sanitize_val_alu(struct bpf_verifier_env *env,
4495 struct bpf_insn *insn)
4496{
4497 struct bpf_insn_aux_data *aux = cur_aux(env);
4498
4499 if (can_skip_alu_sanitation(env, insn))
4500 return 0;
4501
4502 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
4503}
4504
979d63d5
DB
4505static int sanitize_ptr_alu(struct bpf_verifier_env *env,
4506 struct bpf_insn *insn,
4507 const struct bpf_reg_state *ptr_reg,
4508 struct bpf_reg_state *dst_reg,
4509 bool off_is_neg)
4510{
4511 struct bpf_verifier_state *vstate = env->cur_state;
4512 struct bpf_insn_aux_data *aux = cur_aux(env);
4513 bool ptr_is_dst_reg = ptr_reg == dst_reg;
4514 u8 opcode = BPF_OP(insn->code);
4515 u32 alu_state, alu_limit;
4516 struct bpf_reg_state tmp;
4517 bool ret;
4518
d3bd7413 4519 if (can_skip_alu_sanitation(env, insn))
979d63d5
DB
4520 return 0;
4521
4522 /* We already marked aux for masking from non-speculative
4523 * paths, thus we got here in the first place. We only care
4524 * to explore bad access from here.
4525 */
4526 if (vstate->speculative)
4527 goto do_sim;
4528
4529 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
4530 alu_state |= ptr_is_dst_reg ?
4531 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
4532
4533 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
4534 return 0;
d3bd7413 4535 if (update_alu_sanitation_state(aux, alu_state, alu_limit))
979d63d5 4536 return -EACCES;
979d63d5
DB
4537do_sim:
4538 /* Simulate and find potential out-of-bounds access under
4539 * speculative execution from truncation as a result of
4540 * masking when off was not within expected range. If off
4541 * sits in dst, then we temporarily need to move ptr there
4542 * to simulate dst (== 0) +/-= ptr. Needed, for example,
4543 * for cases where we use K-based arithmetic in one direction
4544 * and truncated reg-based in the other in order to explore
4545 * bad access.
4546 */
4547 if (!ptr_is_dst_reg) {
4548 tmp = *dst_reg;
4549 *dst_reg = *ptr_reg;
4550 }
4551 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
0803278b 4552 if (!ptr_is_dst_reg && ret)
979d63d5
DB
4553 *dst_reg = tmp;
4554 return !ret ? -EFAULT : 0;
4555}
4556
f1174f77 4557/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
f1174f77
EC
4558 * Caller should also handle BPF_MOV case separately.
4559 * If we return -EACCES, caller may want to try again treating pointer as a
4560 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
4561 */
4562static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
4563 struct bpf_insn *insn,
4564 const struct bpf_reg_state *ptr_reg,
4565 const struct bpf_reg_state *off_reg)
969bf05e 4566{
f4d7e40a
AS
4567 struct bpf_verifier_state *vstate = env->cur_state;
4568 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4569 struct bpf_reg_state *regs = state->regs, *dst_reg;
f1174f77 4570 bool known = tnum_is_const(off_reg->var_off);
b03c9f9f
EC
4571 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
4572 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
4573 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
4574 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
9d7eceed 4575 u32 dst = insn->dst_reg, src = insn->src_reg;
969bf05e 4576 u8 opcode = BPF_OP(insn->code);
979d63d5 4577 int ret;
969bf05e 4578
f1174f77 4579 dst_reg = &regs[dst];
969bf05e 4580
6f16101e
DB
4581 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
4582 smin_val > smax_val || umin_val > umax_val) {
4583 /* Taint dst register if offset had invalid bounds derived from
4584 * e.g. dead branches.
4585 */
f54c7898 4586 __mark_reg_unknown(env, dst_reg);
6f16101e 4587 return 0;
f1174f77
EC
4588 }
4589
4590 if (BPF_CLASS(insn->code) != BPF_ALU64) {
4591 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
82abbf8d
AS
4592 verbose(env,
4593 "R%d 32-bit pointer arithmetic prohibited\n",
4594 dst);
f1174f77 4595 return -EACCES;
969bf05e
AS
4596 }
4597
aad2eeaf
JS
4598 switch (ptr_reg->type) {
4599 case PTR_TO_MAP_VALUE_OR_NULL:
4600 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
4601 dst, reg_type_str[ptr_reg->type]);
f1174f77 4602 return -EACCES;
aad2eeaf
JS
4603 case CONST_PTR_TO_MAP:
4604 case PTR_TO_PACKET_END:
c64b7983
JS
4605 case PTR_TO_SOCKET:
4606 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
4607 case PTR_TO_SOCK_COMMON:
4608 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
4609 case PTR_TO_TCP_SOCK:
4610 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 4611 case PTR_TO_XDP_SOCK:
aad2eeaf
JS
4612 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
4613 dst, reg_type_str[ptr_reg->type]);
f1174f77 4614 return -EACCES;
9d7eceed
DB
4615 case PTR_TO_MAP_VALUE:
4616 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
4617 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
4618 off_reg == dst_reg ? dst : src);
4619 return -EACCES;
4620 }
4621 /* fall-through */
aad2eeaf
JS
4622 default:
4623 break;
f1174f77
EC
4624 }
4625
4626 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
4627 * The id may be overwritten later if we create a new variable offset.
969bf05e 4628 */
f1174f77
EC
4629 dst_reg->type = ptr_reg->type;
4630 dst_reg->id = ptr_reg->id;
969bf05e 4631
bb7f0f98
AS
4632 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
4633 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
4634 return -EINVAL;
4635
f1174f77
EC
4636 switch (opcode) {
4637 case BPF_ADD:
979d63d5
DB
4638 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4639 if (ret < 0) {
4640 verbose(env, "R%d tried to add from different maps or paths\n", dst);
4641 return ret;
4642 }
f1174f77
EC
4643 /* We can take a fixed offset as long as it doesn't overflow
4644 * the s32 'off' field
969bf05e 4645 */
b03c9f9f
EC
4646 if (known && (ptr_reg->off + smin_val ==
4647 (s64)(s32)(ptr_reg->off + smin_val))) {
f1174f77 4648 /* pointer += K. Accumulate it into fixed offset */
b03c9f9f
EC
4649 dst_reg->smin_value = smin_ptr;
4650 dst_reg->smax_value = smax_ptr;
4651 dst_reg->umin_value = umin_ptr;
4652 dst_reg->umax_value = umax_ptr;
f1174f77 4653 dst_reg->var_off = ptr_reg->var_off;
b03c9f9f 4654 dst_reg->off = ptr_reg->off + smin_val;
0962590e 4655 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
4656 break;
4657 }
f1174f77
EC
4658 /* A new variable offset is created. Note that off_reg->off
4659 * == 0, since it's a scalar.
4660 * dst_reg gets the pointer type and since some positive
4661 * integer value was added to the pointer, give it a new 'id'
4662 * if it's a PTR_TO_PACKET.
4663 * this creates a new 'base' pointer, off_reg (variable) gets
4664 * added into the variable offset, and we copy the fixed offset
4665 * from ptr_reg.
969bf05e 4666 */
b03c9f9f
EC
4667 if (signed_add_overflows(smin_ptr, smin_val) ||
4668 signed_add_overflows(smax_ptr, smax_val)) {
4669 dst_reg->smin_value = S64_MIN;
4670 dst_reg->smax_value = S64_MAX;
4671 } else {
4672 dst_reg->smin_value = smin_ptr + smin_val;
4673 dst_reg->smax_value = smax_ptr + smax_val;
4674 }
4675 if (umin_ptr + umin_val < umin_ptr ||
4676 umax_ptr + umax_val < umax_ptr) {
4677 dst_reg->umin_value = 0;
4678 dst_reg->umax_value = U64_MAX;
4679 } else {
4680 dst_reg->umin_value = umin_ptr + umin_val;
4681 dst_reg->umax_value = umax_ptr + umax_val;
4682 }
f1174f77
EC
4683 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
4684 dst_reg->off = ptr_reg->off;
0962590e 4685 dst_reg->raw = ptr_reg->raw;
de8f3a83 4686 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
4687 dst_reg->id = ++env->id_gen;
4688 /* something was added to pkt_ptr, set range to zero */
0962590e 4689 dst_reg->raw = 0;
f1174f77
EC
4690 }
4691 break;
4692 case BPF_SUB:
979d63d5
DB
4693 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4694 if (ret < 0) {
4695 verbose(env, "R%d tried to sub from different maps or paths\n", dst);
4696 return ret;
4697 }
f1174f77
EC
4698 if (dst_reg == off_reg) {
4699 /* scalar -= pointer. Creates an unknown scalar */
82abbf8d
AS
4700 verbose(env, "R%d tried to subtract pointer from scalar\n",
4701 dst);
f1174f77
EC
4702 return -EACCES;
4703 }
4704 /* We don't allow subtraction from FP, because (according to
4705 * test_verifier.c test "invalid fp arithmetic", JITs might not
4706 * be able to deal with it.
969bf05e 4707 */
f1174f77 4708 if (ptr_reg->type == PTR_TO_STACK) {
82abbf8d
AS
4709 verbose(env, "R%d subtraction from stack pointer prohibited\n",
4710 dst);
f1174f77
EC
4711 return -EACCES;
4712 }
b03c9f9f
EC
4713 if (known && (ptr_reg->off - smin_val ==
4714 (s64)(s32)(ptr_reg->off - smin_val))) {
f1174f77 4715 /* pointer -= K. Subtract it from fixed offset */
b03c9f9f
EC
4716 dst_reg->smin_value = smin_ptr;
4717 dst_reg->smax_value = smax_ptr;
4718 dst_reg->umin_value = umin_ptr;
4719 dst_reg->umax_value = umax_ptr;
f1174f77
EC
4720 dst_reg->var_off = ptr_reg->var_off;
4721 dst_reg->id = ptr_reg->id;
b03c9f9f 4722 dst_reg->off = ptr_reg->off - smin_val;
0962590e 4723 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
4724 break;
4725 }
f1174f77
EC
4726 /* A new variable offset is created. If the subtrahend is known
4727 * nonnegative, then any reg->range we had before is still good.
969bf05e 4728 */
b03c9f9f
EC
4729 if (signed_sub_overflows(smin_ptr, smax_val) ||
4730 signed_sub_overflows(smax_ptr, smin_val)) {
4731 /* Overflow possible, we know nothing */
4732 dst_reg->smin_value = S64_MIN;
4733 dst_reg->smax_value = S64_MAX;
4734 } else {
4735 dst_reg->smin_value = smin_ptr - smax_val;
4736 dst_reg->smax_value = smax_ptr - smin_val;
4737 }
4738 if (umin_ptr < umax_val) {
4739 /* Overflow possible, we know nothing */
4740 dst_reg->umin_value = 0;
4741 dst_reg->umax_value = U64_MAX;
4742 } else {
4743 /* Cannot overflow (as long as bounds are consistent) */
4744 dst_reg->umin_value = umin_ptr - umax_val;
4745 dst_reg->umax_value = umax_ptr - umin_val;
4746 }
f1174f77
EC
4747 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
4748 dst_reg->off = ptr_reg->off;
0962590e 4749 dst_reg->raw = ptr_reg->raw;
de8f3a83 4750 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
4751 dst_reg->id = ++env->id_gen;
4752 /* something was added to pkt_ptr, set range to zero */
b03c9f9f 4753 if (smin_val < 0)
0962590e 4754 dst_reg->raw = 0;
43188702 4755 }
f1174f77
EC
4756 break;
4757 case BPF_AND:
4758 case BPF_OR:
4759 case BPF_XOR:
82abbf8d
AS
4760 /* bitwise ops on pointers are troublesome, prohibit. */
4761 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
4762 dst, bpf_alu_string[opcode >> 4]);
f1174f77
EC
4763 return -EACCES;
4764 default:
4765 /* other operators (e.g. MUL,LSH) produce non-pointer results */
82abbf8d
AS
4766 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
4767 dst, bpf_alu_string[opcode >> 4]);
f1174f77 4768 return -EACCES;
43188702
JF
4769 }
4770
bb7f0f98
AS
4771 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
4772 return -EINVAL;
4773
b03c9f9f
EC
4774 __update_reg_bounds(dst_reg);
4775 __reg_deduce_bounds(dst_reg);
4776 __reg_bound_offset(dst_reg);
0d6303db
DB
4777
4778 /* For unprivileged we require that resulting offset must be in bounds
4779 * in order to be able to sanitize access later on.
4780 */
e4298d25
DB
4781 if (!env->allow_ptr_leaks) {
4782 if (dst_reg->type == PTR_TO_MAP_VALUE &&
4783 check_map_access(env, dst, dst_reg->off, 1, false)) {
4784 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
4785 "prohibited for !root\n", dst);
4786 return -EACCES;
4787 } else if (dst_reg->type == PTR_TO_STACK &&
4788 check_stack_access(env, dst_reg, dst_reg->off +
4789 dst_reg->var_off.value, 1)) {
4790 verbose(env, "R%d stack pointer arithmetic goes out of range, "
4791 "prohibited for !root\n", dst);
4792 return -EACCES;
4793 }
0d6303db
DB
4794 }
4795
43188702
JF
4796 return 0;
4797}
4798
468f6eaf
JH
4799/* WARNING: This function does calculations on 64-bit values, but the actual
4800 * execution may occur on 32-bit values. Therefore, things like bitshifts
4801 * need extra checks in the 32-bit case.
4802 */
f1174f77
EC
4803static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
4804 struct bpf_insn *insn,
4805 struct bpf_reg_state *dst_reg,
4806 struct bpf_reg_state src_reg)
969bf05e 4807{
638f5b90 4808 struct bpf_reg_state *regs = cur_regs(env);
48461135 4809 u8 opcode = BPF_OP(insn->code);
f1174f77 4810 bool src_known, dst_known;
b03c9f9f
EC
4811 s64 smin_val, smax_val;
4812 u64 umin_val, umax_val;
468f6eaf 4813 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
d3bd7413
DB
4814 u32 dst = insn->dst_reg;
4815 int ret;
48461135 4816
b799207e
JH
4817 if (insn_bitness == 32) {
4818 /* Relevant for 32-bit RSH: Information can propagate towards
4819 * LSB, so it isn't sufficient to only truncate the output to
4820 * 32 bits.
4821 */
4822 coerce_reg_to_size(dst_reg, 4);
4823 coerce_reg_to_size(&src_reg, 4);
4824 }
4825
b03c9f9f
EC
4826 smin_val = src_reg.smin_value;
4827 smax_val = src_reg.smax_value;
4828 umin_val = src_reg.umin_value;
4829 umax_val = src_reg.umax_value;
f1174f77
EC
4830 src_known = tnum_is_const(src_reg.var_off);
4831 dst_known = tnum_is_const(dst_reg->var_off);
f23cc643 4832
6f16101e
DB
4833 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
4834 smin_val > smax_val || umin_val > umax_val) {
4835 /* Taint dst register if offset had invalid bounds derived from
4836 * e.g. dead branches.
4837 */
f54c7898 4838 __mark_reg_unknown(env, dst_reg);
6f16101e
DB
4839 return 0;
4840 }
4841
bb7f0f98
AS
4842 if (!src_known &&
4843 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
f54c7898 4844 __mark_reg_unknown(env, dst_reg);
bb7f0f98
AS
4845 return 0;
4846 }
4847
48461135
JB
4848 switch (opcode) {
4849 case BPF_ADD:
d3bd7413
DB
4850 ret = sanitize_val_alu(env, insn);
4851 if (ret < 0) {
4852 verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
4853 return ret;
4854 }
b03c9f9f
EC
4855 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
4856 signed_add_overflows(dst_reg->smax_value, smax_val)) {
4857 dst_reg->smin_value = S64_MIN;
4858 dst_reg->smax_value = S64_MAX;
4859 } else {
4860 dst_reg->smin_value += smin_val;
4861 dst_reg->smax_value += smax_val;
4862 }
4863 if (dst_reg->umin_value + umin_val < umin_val ||
4864 dst_reg->umax_value + umax_val < umax_val) {
4865 dst_reg->umin_value = 0;
4866 dst_reg->umax_value = U64_MAX;
4867 } else {
4868 dst_reg->umin_value += umin_val;
4869 dst_reg->umax_value += umax_val;
4870 }
f1174f77 4871 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
48461135
JB
4872 break;
4873 case BPF_SUB:
d3bd7413
DB
4874 ret = sanitize_val_alu(env, insn);
4875 if (ret < 0) {
4876 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
4877 return ret;
4878 }
b03c9f9f
EC
4879 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
4880 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
4881 /* Overflow possible, we know nothing */
4882 dst_reg->smin_value = S64_MIN;
4883 dst_reg->smax_value = S64_MAX;
4884 } else {
4885 dst_reg->smin_value -= smax_val;
4886 dst_reg->smax_value -= smin_val;
4887 }
4888 if (dst_reg->umin_value < umax_val) {
4889 /* Overflow possible, we know nothing */
4890 dst_reg->umin_value = 0;
4891 dst_reg->umax_value = U64_MAX;
4892 } else {
4893 /* Cannot overflow (as long as bounds are consistent) */
4894 dst_reg->umin_value -= umax_val;
4895 dst_reg->umax_value -= umin_val;
4896 }
f1174f77 4897 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
48461135
JB
4898 break;
4899 case BPF_MUL:
b03c9f9f
EC
4900 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
4901 if (smin_val < 0 || dst_reg->smin_value < 0) {
f1174f77 4902 /* Ain't nobody got time to multiply that sign */
b03c9f9f
EC
4903 __mark_reg_unbounded(dst_reg);
4904 __update_reg_bounds(dst_reg);
f1174f77
EC
4905 break;
4906 }
b03c9f9f
EC
4907 /* Both values are positive, so we can work with unsigned and
4908 * copy the result to signed (unless it exceeds S64_MAX).
f1174f77 4909 */
b03c9f9f
EC
4910 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
4911 /* Potential overflow, we know nothing */
4912 __mark_reg_unbounded(dst_reg);
4913 /* (except what we can learn from the var_off) */
4914 __update_reg_bounds(dst_reg);
4915 break;
4916 }
4917 dst_reg->umin_value *= umin_val;
4918 dst_reg->umax_value *= umax_val;
4919 if (dst_reg->umax_value > S64_MAX) {
4920 /* Overflow possible, we know nothing */
4921 dst_reg->smin_value = S64_MIN;
4922 dst_reg->smax_value = S64_MAX;
4923 } else {
4924 dst_reg->smin_value = dst_reg->umin_value;
4925 dst_reg->smax_value = dst_reg->umax_value;
4926 }
48461135
JB
4927 break;
4928 case BPF_AND:
f1174f77 4929 if (src_known && dst_known) {
b03c9f9f
EC
4930 __mark_reg_known(dst_reg, dst_reg->var_off.value &
4931 src_reg.var_off.value);
f1174f77
EC
4932 break;
4933 }
b03c9f9f
EC
4934 /* We get our minimum from the var_off, since that's inherently
4935 * bitwise. Our maximum is the minimum of the operands' maxima.
f23cc643 4936 */
f1174f77 4937 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
b03c9f9f
EC
4938 dst_reg->umin_value = dst_reg->var_off.value;
4939 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
4940 if (dst_reg->smin_value < 0 || smin_val < 0) {
4941 /* Lose signed bounds when ANDing negative numbers,
4942 * ain't nobody got time for that.
4943 */
4944 dst_reg->smin_value = S64_MIN;
4945 dst_reg->smax_value = S64_MAX;
4946 } else {
4947 /* ANDing two positives gives a positive, so safe to
4948 * cast result into s64.
4949 */
4950 dst_reg->smin_value = dst_reg->umin_value;
4951 dst_reg->smax_value = dst_reg->umax_value;
4952 }
4953 /* We may learn something more from the var_off */
4954 __update_reg_bounds(dst_reg);
f1174f77
EC
4955 break;
4956 case BPF_OR:
4957 if (src_known && dst_known) {
b03c9f9f
EC
4958 __mark_reg_known(dst_reg, dst_reg->var_off.value |
4959 src_reg.var_off.value);
f1174f77
EC
4960 break;
4961 }
b03c9f9f
EC
4962 /* We get our maximum from the var_off, and our minimum is the
4963 * maximum of the operands' minima
f1174f77
EC
4964 */
4965 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
b03c9f9f
EC
4966 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
4967 dst_reg->umax_value = dst_reg->var_off.value |
4968 dst_reg->var_off.mask;
4969 if (dst_reg->smin_value < 0 || smin_val < 0) {
4970 /* Lose signed bounds when ORing negative numbers,
4971 * ain't nobody got time for that.
4972 */
4973 dst_reg->smin_value = S64_MIN;
4974 dst_reg->smax_value = S64_MAX;
f1174f77 4975 } else {
b03c9f9f
EC
4976 /* ORing two positives gives a positive, so safe to
4977 * cast result into s64.
4978 */
4979 dst_reg->smin_value = dst_reg->umin_value;
4980 dst_reg->smax_value = dst_reg->umax_value;
f1174f77 4981 }
b03c9f9f
EC
4982 /* We may learn something more from the var_off */
4983 __update_reg_bounds(dst_reg);
48461135
JB
4984 break;
4985 case BPF_LSH:
468f6eaf
JH
4986 if (umax_val >= insn_bitness) {
4987 /* Shifts greater than 31 or 63 are undefined.
4988 * This includes shifts by a negative number.
b03c9f9f 4989 */
61bd5218 4990 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
4991 break;
4992 }
b03c9f9f
EC
4993 /* We lose all sign bit information (except what we can pick
4994 * up from var_off)
48461135 4995 */
b03c9f9f
EC
4996 dst_reg->smin_value = S64_MIN;
4997 dst_reg->smax_value = S64_MAX;
4998 /* If we might shift our top bit out, then we know nothing */
4999 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
5000 dst_reg->umin_value = 0;
5001 dst_reg->umax_value = U64_MAX;
d1174416 5002 } else {
b03c9f9f
EC
5003 dst_reg->umin_value <<= umin_val;
5004 dst_reg->umax_value <<= umax_val;
d1174416 5005 }
afbe1a5b 5006 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
b03c9f9f
EC
5007 /* We may learn something more from the var_off */
5008 __update_reg_bounds(dst_reg);
48461135
JB
5009 break;
5010 case BPF_RSH:
468f6eaf
JH
5011 if (umax_val >= insn_bitness) {
5012 /* Shifts greater than 31 or 63 are undefined.
5013 * This includes shifts by a negative number.
b03c9f9f 5014 */
61bd5218 5015 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
5016 break;
5017 }
4374f256
EC
5018 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
5019 * be negative, then either:
5020 * 1) src_reg might be zero, so the sign bit of the result is
5021 * unknown, so we lose our signed bounds
5022 * 2) it's known negative, thus the unsigned bounds capture the
5023 * signed bounds
5024 * 3) the signed bounds cross zero, so they tell us nothing
5025 * about the result
5026 * If the value in dst_reg is known nonnegative, then again the
5027 * unsigned bounts capture the signed bounds.
5028 * Thus, in all cases it suffices to blow away our signed bounds
5029 * and rely on inferring new ones from the unsigned bounds and
5030 * var_off of the result.
5031 */
5032 dst_reg->smin_value = S64_MIN;
5033 dst_reg->smax_value = S64_MAX;
afbe1a5b 5034 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
b03c9f9f
EC
5035 dst_reg->umin_value >>= umax_val;
5036 dst_reg->umax_value >>= umin_val;
5037 /* We may learn something more from the var_off */
5038 __update_reg_bounds(dst_reg);
48461135 5039 break;
9cbe1f5a
YS
5040 case BPF_ARSH:
5041 if (umax_val >= insn_bitness) {
5042 /* Shifts greater than 31 or 63 are undefined.
5043 * This includes shifts by a negative number.
5044 */
5045 mark_reg_unknown(env, regs, insn->dst_reg);
5046 break;
5047 }
5048
5049 /* Upon reaching here, src_known is true and
5050 * umax_val is equal to umin_val.
5051 */
5052 dst_reg->smin_value >>= umin_val;
5053 dst_reg->smax_value >>= umin_val;
5054 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
5055
5056 /* blow away the dst_reg umin_value/umax_value and rely on
5057 * dst_reg var_off to refine the result.
5058 */
5059 dst_reg->umin_value = 0;
5060 dst_reg->umax_value = U64_MAX;
5061 __update_reg_bounds(dst_reg);
5062 break;
48461135 5063 default:
61bd5218 5064 mark_reg_unknown(env, regs, insn->dst_reg);
48461135
JB
5065 break;
5066 }
5067
468f6eaf
JH
5068 if (BPF_CLASS(insn->code) != BPF_ALU64) {
5069 /* 32-bit ALU ops are (32,32)->32 */
5070 coerce_reg_to_size(dst_reg, 4);
468f6eaf
JH
5071 }
5072
b03c9f9f
EC
5073 __reg_deduce_bounds(dst_reg);
5074 __reg_bound_offset(dst_reg);
f1174f77
EC
5075 return 0;
5076}
5077
5078/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
5079 * and var_off.
5080 */
5081static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
5082 struct bpf_insn *insn)
5083{
f4d7e40a
AS
5084 struct bpf_verifier_state *vstate = env->cur_state;
5085 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5086 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
f1174f77
EC
5087 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
5088 u8 opcode = BPF_OP(insn->code);
b5dc0163 5089 int err;
f1174f77
EC
5090
5091 dst_reg = &regs[insn->dst_reg];
f1174f77
EC
5092 src_reg = NULL;
5093 if (dst_reg->type != SCALAR_VALUE)
5094 ptr_reg = dst_reg;
5095 if (BPF_SRC(insn->code) == BPF_X) {
5096 src_reg = &regs[insn->src_reg];
f1174f77
EC
5097 if (src_reg->type != SCALAR_VALUE) {
5098 if (dst_reg->type != SCALAR_VALUE) {
5099 /* Combining two pointers by any ALU op yields
82abbf8d
AS
5100 * an arbitrary scalar. Disallow all math except
5101 * pointer subtraction
f1174f77 5102 */
dd066823 5103 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
82abbf8d
AS
5104 mark_reg_unknown(env, regs, insn->dst_reg);
5105 return 0;
f1174f77 5106 }
82abbf8d
AS
5107 verbose(env, "R%d pointer %s pointer prohibited\n",
5108 insn->dst_reg,
5109 bpf_alu_string[opcode >> 4]);
5110 return -EACCES;
f1174f77
EC
5111 } else {
5112 /* scalar += pointer
5113 * This is legal, but we have to reverse our
5114 * src/dest handling in computing the range
5115 */
b5dc0163
AS
5116 err = mark_chain_precision(env, insn->dst_reg);
5117 if (err)
5118 return err;
82abbf8d
AS
5119 return adjust_ptr_min_max_vals(env, insn,
5120 src_reg, dst_reg);
f1174f77
EC
5121 }
5122 } else if (ptr_reg) {
5123 /* pointer += scalar */
b5dc0163
AS
5124 err = mark_chain_precision(env, insn->src_reg);
5125 if (err)
5126 return err;
82abbf8d
AS
5127 return adjust_ptr_min_max_vals(env, insn,
5128 dst_reg, src_reg);
f1174f77
EC
5129 }
5130 } else {
5131 /* Pretend the src is a reg with a known value, since we only
5132 * need to be able to read from this state.
5133 */
5134 off_reg.type = SCALAR_VALUE;
b03c9f9f 5135 __mark_reg_known(&off_reg, insn->imm);
f1174f77 5136 src_reg = &off_reg;
82abbf8d
AS
5137 if (ptr_reg) /* pointer += K */
5138 return adjust_ptr_min_max_vals(env, insn,
5139 ptr_reg, src_reg);
f1174f77
EC
5140 }
5141
5142 /* Got here implies adding two SCALAR_VALUEs */
5143 if (WARN_ON_ONCE(ptr_reg)) {
f4d7e40a 5144 print_verifier_state(env, state);
61bd5218 5145 verbose(env, "verifier internal error: unexpected ptr_reg\n");
f1174f77
EC
5146 return -EINVAL;
5147 }
5148 if (WARN_ON(!src_reg)) {
f4d7e40a 5149 print_verifier_state(env, state);
61bd5218 5150 verbose(env, "verifier internal error: no src_reg\n");
f1174f77
EC
5151 return -EINVAL;
5152 }
5153 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
48461135
JB
5154}
5155
17a52670 5156/* check validity of 32-bit and 64-bit arithmetic operations */
58e2af8b 5157static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 5158{
638f5b90 5159 struct bpf_reg_state *regs = cur_regs(env);
17a52670
AS
5160 u8 opcode = BPF_OP(insn->code);
5161 int err;
5162
5163 if (opcode == BPF_END || opcode == BPF_NEG) {
5164 if (opcode == BPF_NEG) {
5165 if (BPF_SRC(insn->code) != 0 ||
5166 insn->src_reg != BPF_REG_0 ||
5167 insn->off != 0 || insn->imm != 0) {
61bd5218 5168 verbose(env, "BPF_NEG uses reserved fields\n");
17a52670
AS
5169 return -EINVAL;
5170 }
5171 } else {
5172 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
e67b8a68
EC
5173 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
5174 BPF_CLASS(insn->code) == BPF_ALU64) {
61bd5218 5175 verbose(env, "BPF_END uses reserved fields\n");
17a52670
AS
5176 return -EINVAL;
5177 }
5178 }
5179
5180 /* check src operand */
dc503a8a 5181 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
5182 if (err)
5183 return err;
5184
1be7f75d 5185 if (is_pointer_value(env, insn->dst_reg)) {
61bd5218 5186 verbose(env, "R%d pointer arithmetic prohibited\n",
1be7f75d
AS
5187 insn->dst_reg);
5188 return -EACCES;
5189 }
5190
17a52670 5191 /* check dest operand */
dc503a8a 5192 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
5193 if (err)
5194 return err;
5195
5196 } else if (opcode == BPF_MOV) {
5197
5198 if (BPF_SRC(insn->code) == BPF_X) {
5199 if (insn->imm != 0 || insn->off != 0) {
61bd5218 5200 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
5201 return -EINVAL;
5202 }
5203
5204 /* check src operand */
dc503a8a 5205 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
5206 if (err)
5207 return err;
5208 } else {
5209 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 5210 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
5211 return -EINVAL;
5212 }
5213 }
5214
fbeb1603
AF
5215 /* check dest operand, mark as required later */
5216 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
5217 if (err)
5218 return err;
5219
5220 if (BPF_SRC(insn->code) == BPF_X) {
e434b8cd
JW
5221 struct bpf_reg_state *src_reg = regs + insn->src_reg;
5222 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
5223
17a52670
AS
5224 if (BPF_CLASS(insn->code) == BPF_ALU64) {
5225 /* case: R1 = R2
5226 * copy register state to dest reg
5227 */
e434b8cd
JW
5228 *dst_reg = *src_reg;
5229 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 5230 dst_reg->subreg_def = DEF_NOT_SUBREG;
17a52670 5231 } else {
f1174f77 5232 /* R1 = (u32) R2 */
1be7f75d 5233 if (is_pointer_value(env, insn->src_reg)) {
61bd5218
JK
5234 verbose(env,
5235 "R%d partial copy of pointer\n",
1be7f75d
AS
5236 insn->src_reg);
5237 return -EACCES;
e434b8cd
JW
5238 } else if (src_reg->type == SCALAR_VALUE) {
5239 *dst_reg = *src_reg;
5240 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 5241 dst_reg->subreg_def = env->insn_idx + 1;
e434b8cd
JW
5242 } else {
5243 mark_reg_unknown(env, regs,
5244 insn->dst_reg);
1be7f75d 5245 }
e434b8cd 5246 coerce_reg_to_size(dst_reg, 4);
17a52670
AS
5247 }
5248 } else {
5249 /* case: R = imm
5250 * remember the value we stored into this reg
5251 */
fbeb1603
AF
5252 /* clear any state __mark_reg_known doesn't set */
5253 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77 5254 regs[insn->dst_reg].type = SCALAR_VALUE;
95a762e2
JH
5255 if (BPF_CLASS(insn->code) == BPF_ALU64) {
5256 __mark_reg_known(regs + insn->dst_reg,
5257 insn->imm);
5258 } else {
5259 __mark_reg_known(regs + insn->dst_reg,
5260 (u32)insn->imm);
5261 }
17a52670
AS
5262 }
5263
5264 } else if (opcode > BPF_END) {
61bd5218 5265 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
17a52670
AS
5266 return -EINVAL;
5267
5268 } else { /* all other ALU ops: and, sub, xor, add, ... */
5269
17a52670
AS
5270 if (BPF_SRC(insn->code) == BPF_X) {
5271 if (insn->imm != 0 || insn->off != 0) {
61bd5218 5272 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
5273 return -EINVAL;
5274 }
5275 /* check src1 operand */
dc503a8a 5276 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
5277 if (err)
5278 return err;
5279 } else {
5280 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 5281 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
5282 return -EINVAL;
5283 }
5284 }
5285
5286 /* check src2 operand */
dc503a8a 5287 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
5288 if (err)
5289 return err;
5290
5291 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
5292 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
61bd5218 5293 verbose(env, "div by zero\n");
17a52670
AS
5294 return -EINVAL;
5295 }
5296
229394e8
RV
5297 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
5298 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
5299 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
5300
5301 if (insn->imm < 0 || insn->imm >= size) {
61bd5218 5302 verbose(env, "invalid shift %d\n", insn->imm);
229394e8
RV
5303 return -EINVAL;
5304 }
5305 }
5306
1a0dc1ac 5307 /* check dest operand */
dc503a8a 5308 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
1a0dc1ac
AS
5309 if (err)
5310 return err;
5311
f1174f77 5312 return adjust_reg_min_max_vals(env, insn);
17a52670
AS
5313 }
5314
5315 return 0;
5316}
5317
c6a9efa1
PC
5318static void __find_good_pkt_pointers(struct bpf_func_state *state,
5319 struct bpf_reg_state *dst_reg,
5320 enum bpf_reg_type type, u16 new_range)
5321{
5322 struct bpf_reg_state *reg;
5323 int i;
5324
5325 for (i = 0; i < MAX_BPF_REG; i++) {
5326 reg = &state->regs[i];
5327 if (reg->type == type && reg->id == dst_reg->id)
5328 /* keep the maximum range already checked */
5329 reg->range = max(reg->range, new_range);
5330 }
5331
5332 bpf_for_each_spilled_reg(i, state, reg) {
5333 if (!reg)
5334 continue;
5335 if (reg->type == type && reg->id == dst_reg->id)
5336 reg->range = max(reg->range, new_range);
5337 }
5338}
5339
f4d7e40a 5340static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
de8f3a83 5341 struct bpf_reg_state *dst_reg,
f8ddadc4 5342 enum bpf_reg_type type,
fb2a311a 5343 bool range_right_open)
969bf05e 5344{
fb2a311a 5345 u16 new_range;
c6a9efa1 5346 int i;
2d2be8ca 5347
fb2a311a
DB
5348 if (dst_reg->off < 0 ||
5349 (dst_reg->off == 0 && range_right_open))
f1174f77
EC
5350 /* This doesn't give us any range */
5351 return;
5352
b03c9f9f
EC
5353 if (dst_reg->umax_value > MAX_PACKET_OFF ||
5354 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
f1174f77
EC
5355 /* Risk of overflow. For instance, ptr + (1<<63) may be less
5356 * than pkt_end, but that's because it's also less than pkt.
5357 */
5358 return;
5359
fb2a311a
DB
5360 new_range = dst_reg->off;
5361 if (range_right_open)
5362 new_range--;
5363
5364 /* Examples for register markings:
2d2be8ca 5365 *
fb2a311a 5366 * pkt_data in dst register:
2d2be8ca
DB
5367 *
5368 * r2 = r3;
5369 * r2 += 8;
5370 * if (r2 > pkt_end) goto <handle exception>
5371 * <access okay>
5372 *
b4e432f1
DB
5373 * r2 = r3;
5374 * r2 += 8;
5375 * if (r2 < pkt_end) goto <access okay>
5376 * <handle exception>
5377 *
2d2be8ca
DB
5378 * Where:
5379 * r2 == dst_reg, pkt_end == src_reg
5380 * r2=pkt(id=n,off=8,r=0)
5381 * r3=pkt(id=n,off=0,r=0)
5382 *
fb2a311a 5383 * pkt_data in src register:
2d2be8ca
DB
5384 *
5385 * r2 = r3;
5386 * r2 += 8;
5387 * if (pkt_end >= r2) goto <access okay>
5388 * <handle exception>
5389 *
b4e432f1
DB
5390 * r2 = r3;
5391 * r2 += 8;
5392 * if (pkt_end <= r2) goto <handle exception>
5393 * <access okay>
5394 *
2d2be8ca
DB
5395 * Where:
5396 * pkt_end == dst_reg, r2 == src_reg
5397 * r2=pkt(id=n,off=8,r=0)
5398 * r3=pkt(id=n,off=0,r=0)
5399 *
5400 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
fb2a311a
DB
5401 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
5402 * and [r3, r3 + 8-1) respectively is safe to access depending on
5403 * the check.
969bf05e 5404 */
2d2be8ca 5405
f1174f77
EC
5406 /* If our ids match, then we must have the same max_value. And we
5407 * don't care about the other reg's fixed offset, since if it's too big
5408 * the range won't allow anything.
5409 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
5410 */
c6a9efa1
PC
5411 for (i = 0; i <= vstate->curframe; i++)
5412 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
5413 new_range);
969bf05e
AS
5414}
5415
4f7b3e82
AS
5416/* compute branch direction of the expression "if (reg opcode val) goto target;"
5417 * and return:
5418 * 1 - branch will be taken and "goto target" will be executed
5419 * 0 - branch will not be taken and fall-through to next insn
5420 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
5421 */
092ed096
JW
5422static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
5423 bool is_jmp32)
4f7b3e82 5424{
092ed096 5425 struct bpf_reg_state reg_lo;
a72dafaf
JW
5426 s64 sval;
5427
4f7b3e82
AS
5428 if (__is_pointer_value(false, reg))
5429 return -1;
5430
092ed096
JW
5431 if (is_jmp32) {
5432 reg_lo = *reg;
5433 reg = &reg_lo;
5434 /* For JMP32, only low 32 bits are compared, coerce_reg_to_size
5435 * could truncate high bits and update umin/umax according to
5436 * information of low bits.
5437 */
5438 coerce_reg_to_size(reg, 4);
5439 /* smin/smax need special handling. For example, after coerce,
5440 * if smin_value is 0x00000000ffffffffLL, the value is -1 when
5441 * used as operand to JMP32. It is a negative number from s32's
5442 * point of view, while it is a positive number when seen as
5443 * s64. The smin/smax are kept as s64, therefore, when used with
5444 * JMP32, they need to be transformed into s32, then sign
5445 * extended back to s64.
5446 *
5447 * Also, smin/smax were copied from umin/umax. If umin/umax has
5448 * different sign bit, then min/max relationship doesn't
5449 * maintain after casting into s32, for this case, set smin/smax
5450 * to safest range.
5451 */
5452 if ((reg->umax_value ^ reg->umin_value) &
5453 (1ULL << 31)) {
5454 reg->smin_value = S32_MIN;
5455 reg->smax_value = S32_MAX;
5456 }
5457 reg->smin_value = (s64)(s32)reg->smin_value;
5458 reg->smax_value = (s64)(s32)reg->smax_value;
5459
5460 val = (u32)val;
5461 sval = (s64)(s32)val;
5462 } else {
5463 sval = (s64)val;
5464 }
a72dafaf 5465
4f7b3e82
AS
5466 switch (opcode) {
5467 case BPF_JEQ:
5468 if (tnum_is_const(reg->var_off))
5469 return !!tnum_equals_const(reg->var_off, val);
5470 break;
5471 case BPF_JNE:
5472 if (tnum_is_const(reg->var_off))
5473 return !tnum_equals_const(reg->var_off, val);
5474 break;
960ea056
JK
5475 case BPF_JSET:
5476 if ((~reg->var_off.mask & reg->var_off.value) & val)
5477 return 1;
5478 if (!((reg->var_off.mask | reg->var_off.value) & val))
5479 return 0;
5480 break;
4f7b3e82
AS
5481 case BPF_JGT:
5482 if (reg->umin_value > val)
5483 return 1;
5484 else if (reg->umax_value <= val)
5485 return 0;
5486 break;
5487 case BPF_JSGT:
a72dafaf 5488 if (reg->smin_value > sval)
4f7b3e82 5489 return 1;
a72dafaf 5490 else if (reg->smax_value < sval)
4f7b3e82
AS
5491 return 0;
5492 break;
5493 case BPF_JLT:
5494 if (reg->umax_value < val)
5495 return 1;
5496 else if (reg->umin_value >= val)
5497 return 0;
5498 break;
5499 case BPF_JSLT:
a72dafaf 5500 if (reg->smax_value < sval)
4f7b3e82 5501 return 1;
a72dafaf 5502 else if (reg->smin_value >= sval)
4f7b3e82
AS
5503 return 0;
5504 break;
5505 case BPF_JGE:
5506 if (reg->umin_value >= val)
5507 return 1;
5508 else if (reg->umax_value < val)
5509 return 0;
5510 break;
5511 case BPF_JSGE:
a72dafaf 5512 if (reg->smin_value >= sval)
4f7b3e82 5513 return 1;
a72dafaf 5514 else if (reg->smax_value < sval)
4f7b3e82
AS
5515 return 0;
5516 break;
5517 case BPF_JLE:
5518 if (reg->umax_value <= val)
5519 return 1;
5520 else if (reg->umin_value > val)
5521 return 0;
5522 break;
5523 case BPF_JSLE:
a72dafaf 5524 if (reg->smax_value <= sval)
4f7b3e82 5525 return 1;
a72dafaf 5526 else if (reg->smin_value > sval)
4f7b3e82
AS
5527 return 0;
5528 break;
5529 }
5530
5531 return -1;
5532}
5533
092ed096
JW
5534/* Generate min value of the high 32-bit from TNUM info. */
5535static u64 gen_hi_min(struct tnum var)
5536{
5537 return var.value & ~0xffffffffULL;
5538}
5539
5540/* Generate max value of the high 32-bit from TNUM info. */
5541static u64 gen_hi_max(struct tnum var)
5542{
5543 return (var.value | var.mask) & ~0xffffffffULL;
5544}
5545
5546/* Return true if VAL is compared with a s64 sign extended from s32, and they
5547 * are with the same signedness.
5548 */
5549static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
5550{
5551 return ((s32)sval >= 0 &&
5552 reg->smin_value >= 0 && reg->smax_value <= S32_MAX) ||
5553 ((s32)sval < 0 &&
5554 reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
5555}
5556
48461135
JB
5557/* Adjusts the register min/max values in the case that the dst_reg is the
5558 * variable register that we are working on, and src_reg is a constant or we're
5559 * simply doing a BPF_K check.
f1174f77 5560 * In JEQ/JNE cases we also adjust the var_off values.
48461135
JB
5561 */
5562static void reg_set_min_max(struct bpf_reg_state *true_reg,
5563 struct bpf_reg_state *false_reg, u64 val,
092ed096 5564 u8 opcode, bool is_jmp32)
48461135 5565{
a72dafaf
JW
5566 s64 sval;
5567
f1174f77
EC
5568 /* If the dst_reg is a pointer, we can't learn anything about its
5569 * variable offset from the compare (unless src_reg were a pointer into
5570 * the same object, but we don't bother with that.
5571 * Since false_reg and true_reg have the same type by construction, we
5572 * only need to check one of them for pointerness.
5573 */
5574 if (__is_pointer_value(false, false_reg))
5575 return;
4cabc5b1 5576
092ed096
JW
5577 val = is_jmp32 ? (u32)val : val;
5578 sval = is_jmp32 ? (s64)(s32)val : (s64)val;
a72dafaf 5579
48461135
JB
5580 switch (opcode) {
5581 case BPF_JEQ:
48461135 5582 case BPF_JNE:
a72dafaf
JW
5583 {
5584 struct bpf_reg_state *reg =
5585 opcode == BPF_JEQ ? true_reg : false_reg;
5586
5587 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but
5588 * if it is true we know the value for sure. Likewise for
5589 * BPF_JNE.
48461135 5590 */
092ed096
JW
5591 if (is_jmp32) {
5592 u64 old_v = reg->var_off.value;
5593 u64 hi_mask = ~0xffffffffULL;
5594
5595 reg->var_off.value = (old_v & hi_mask) | val;
5596 reg->var_off.mask &= hi_mask;
5597 } else {
5598 __mark_reg_known(reg, val);
5599 }
48461135 5600 break;
a72dafaf 5601 }
960ea056
JK
5602 case BPF_JSET:
5603 false_reg->var_off = tnum_and(false_reg->var_off,
5604 tnum_const(~val));
5605 if (is_power_of_2(val))
5606 true_reg->var_off = tnum_or(true_reg->var_off,
5607 tnum_const(val));
5608 break;
48461135 5609 case BPF_JGE:
a72dafaf
JW
5610 case BPF_JGT:
5611 {
5612 u64 false_umax = opcode == BPF_JGT ? val : val - 1;
5613 u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
5614
092ed096
JW
5615 if (is_jmp32) {
5616 false_umax += gen_hi_max(false_reg->var_off);
5617 true_umin += gen_hi_min(true_reg->var_off);
5618 }
a72dafaf
JW
5619 false_reg->umax_value = min(false_reg->umax_value, false_umax);
5620 true_reg->umin_value = max(true_reg->umin_value, true_umin);
b03c9f9f 5621 break;
a72dafaf 5622 }
48461135 5623 case BPF_JSGE:
a72dafaf
JW
5624 case BPF_JSGT:
5625 {
5626 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
5627 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
5628
092ed096
JW
5629 /* If the full s64 was not sign-extended from s32 then don't
5630 * deduct further info.
5631 */
5632 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5633 break;
a72dafaf
JW
5634 false_reg->smax_value = min(false_reg->smax_value, false_smax);
5635 true_reg->smin_value = max(true_reg->smin_value, true_smin);
48461135 5636 break;
a72dafaf 5637 }
b4e432f1 5638 case BPF_JLE:
a72dafaf
JW
5639 case BPF_JLT:
5640 {
5641 u64 false_umin = opcode == BPF_JLT ? val : val + 1;
5642 u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
5643
092ed096
JW
5644 if (is_jmp32) {
5645 false_umin += gen_hi_min(false_reg->var_off);
5646 true_umax += gen_hi_max(true_reg->var_off);
5647 }
a72dafaf
JW
5648 false_reg->umin_value = max(false_reg->umin_value, false_umin);
5649 true_reg->umax_value = min(true_reg->umax_value, true_umax);
b4e432f1 5650 break;
a72dafaf 5651 }
b4e432f1 5652 case BPF_JSLE:
a72dafaf
JW
5653 case BPF_JSLT:
5654 {
5655 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
5656 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
5657
092ed096
JW
5658 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5659 break;
a72dafaf
JW
5660 false_reg->smin_value = max(false_reg->smin_value, false_smin);
5661 true_reg->smax_value = min(true_reg->smax_value, true_smax);
b4e432f1 5662 break;
a72dafaf 5663 }
48461135
JB
5664 default:
5665 break;
5666 }
5667
b03c9f9f
EC
5668 __reg_deduce_bounds(false_reg);
5669 __reg_deduce_bounds(true_reg);
5670 /* We might have learned some bits from the bounds. */
5671 __reg_bound_offset(false_reg);
5672 __reg_bound_offset(true_reg);
581738a6
YS
5673 if (is_jmp32) {
5674 __reg_bound_offset32(false_reg);
5675 __reg_bound_offset32(true_reg);
5676 }
b03c9f9f
EC
5677 /* Intersecting with the old var_off might have improved our bounds
5678 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5679 * then new var_off is (0; 0x7f...fc) which improves our umax.
5680 */
5681 __update_reg_bounds(false_reg);
5682 __update_reg_bounds(true_reg);
48461135
JB
5683}
5684
f1174f77
EC
5685/* Same as above, but for the case that dst_reg holds a constant and src_reg is
5686 * the variable reg.
48461135
JB
5687 */
5688static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
5689 struct bpf_reg_state *false_reg, u64 val,
092ed096 5690 u8 opcode, bool is_jmp32)
48461135 5691{
a72dafaf
JW
5692 s64 sval;
5693
f1174f77
EC
5694 if (__is_pointer_value(false, false_reg))
5695 return;
4cabc5b1 5696
092ed096
JW
5697 val = is_jmp32 ? (u32)val : val;
5698 sval = is_jmp32 ? (s64)(s32)val : (s64)val;
a72dafaf 5699
48461135
JB
5700 switch (opcode) {
5701 case BPF_JEQ:
48461135 5702 case BPF_JNE:
a72dafaf
JW
5703 {
5704 struct bpf_reg_state *reg =
5705 opcode == BPF_JEQ ? true_reg : false_reg;
5706
092ed096
JW
5707 if (is_jmp32) {
5708 u64 old_v = reg->var_off.value;
5709 u64 hi_mask = ~0xffffffffULL;
5710
5711 reg->var_off.value = (old_v & hi_mask) | val;
5712 reg->var_off.mask &= hi_mask;
5713 } else {
5714 __mark_reg_known(reg, val);
5715 }
48461135 5716 break;
a72dafaf 5717 }
960ea056
JK
5718 case BPF_JSET:
5719 false_reg->var_off = tnum_and(false_reg->var_off,
5720 tnum_const(~val));
5721 if (is_power_of_2(val))
5722 true_reg->var_off = tnum_or(true_reg->var_off,
5723 tnum_const(val));
5724 break;
48461135 5725 case BPF_JGE:
a72dafaf
JW
5726 case BPF_JGT:
5727 {
5728 u64 false_umin = opcode == BPF_JGT ? val : val + 1;
5729 u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
5730
092ed096
JW
5731 if (is_jmp32) {
5732 false_umin += gen_hi_min(false_reg->var_off);
5733 true_umax += gen_hi_max(true_reg->var_off);
5734 }
a72dafaf
JW
5735 false_reg->umin_value = max(false_reg->umin_value, false_umin);
5736 true_reg->umax_value = min(true_reg->umax_value, true_umax);
b03c9f9f 5737 break;
a72dafaf 5738 }
48461135 5739 case BPF_JSGE:
a72dafaf
JW
5740 case BPF_JSGT:
5741 {
5742 s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1;
5743 s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval;
5744
092ed096
JW
5745 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5746 break;
a72dafaf
JW
5747 false_reg->smin_value = max(false_reg->smin_value, false_smin);
5748 true_reg->smax_value = min(true_reg->smax_value, true_smax);
48461135 5749 break;
a72dafaf 5750 }
b4e432f1 5751 case BPF_JLE:
a72dafaf
JW
5752 case BPF_JLT:
5753 {
5754 u64 false_umax = opcode == BPF_JLT ? val : val - 1;
5755 u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
5756
092ed096
JW
5757 if (is_jmp32) {
5758 false_umax += gen_hi_max(false_reg->var_off);
5759 true_umin += gen_hi_min(true_reg->var_off);
5760 }
a72dafaf
JW
5761 false_reg->umax_value = min(false_reg->umax_value, false_umax);
5762 true_reg->umin_value = max(true_reg->umin_value, true_umin);
b4e432f1 5763 break;
a72dafaf 5764 }
b4e432f1 5765 case BPF_JSLE:
a72dafaf
JW
5766 case BPF_JSLT:
5767 {
5768 s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1;
5769 s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval;
5770
092ed096
JW
5771 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5772 break;
a72dafaf
JW
5773 false_reg->smax_value = min(false_reg->smax_value, false_smax);
5774 true_reg->smin_value = max(true_reg->smin_value, true_smin);
b4e432f1 5775 break;
a72dafaf 5776 }
48461135
JB
5777 default:
5778 break;
5779 }
5780
b03c9f9f
EC
5781 __reg_deduce_bounds(false_reg);
5782 __reg_deduce_bounds(true_reg);
5783 /* We might have learned some bits from the bounds. */
5784 __reg_bound_offset(false_reg);
5785 __reg_bound_offset(true_reg);
581738a6
YS
5786 if (is_jmp32) {
5787 __reg_bound_offset32(false_reg);
5788 __reg_bound_offset32(true_reg);
5789 }
b03c9f9f
EC
5790 /* Intersecting with the old var_off might have improved our bounds
5791 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5792 * then new var_off is (0; 0x7f...fc) which improves our umax.
5793 */
5794 __update_reg_bounds(false_reg);
5795 __update_reg_bounds(true_reg);
f1174f77
EC
5796}
5797
5798/* Regs are known to be equal, so intersect their min/max/var_off */
5799static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
5800 struct bpf_reg_state *dst_reg)
5801{
b03c9f9f
EC
5802 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
5803 dst_reg->umin_value);
5804 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
5805 dst_reg->umax_value);
5806 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
5807 dst_reg->smin_value);
5808 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
5809 dst_reg->smax_value);
f1174f77
EC
5810 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
5811 dst_reg->var_off);
b03c9f9f
EC
5812 /* We might have learned new bounds from the var_off. */
5813 __update_reg_bounds(src_reg);
5814 __update_reg_bounds(dst_reg);
5815 /* We might have learned something about the sign bit. */
5816 __reg_deduce_bounds(src_reg);
5817 __reg_deduce_bounds(dst_reg);
5818 /* We might have learned some bits from the bounds. */
5819 __reg_bound_offset(src_reg);
5820 __reg_bound_offset(dst_reg);
5821 /* Intersecting with the old var_off might have improved our bounds
5822 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5823 * then new var_off is (0; 0x7f...fc) which improves our umax.
5824 */
5825 __update_reg_bounds(src_reg);
5826 __update_reg_bounds(dst_reg);
f1174f77
EC
5827}
5828
5829static void reg_combine_min_max(struct bpf_reg_state *true_src,
5830 struct bpf_reg_state *true_dst,
5831 struct bpf_reg_state *false_src,
5832 struct bpf_reg_state *false_dst,
5833 u8 opcode)
5834{
5835 switch (opcode) {
5836 case BPF_JEQ:
5837 __reg_combine_min_max(true_src, true_dst);
5838 break;
5839 case BPF_JNE:
5840 __reg_combine_min_max(false_src, false_dst);
b03c9f9f 5841 break;
4cabc5b1 5842 }
48461135
JB
5843}
5844
fd978bf7
JS
5845static void mark_ptr_or_null_reg(struct bpf_func_state *state,
5846 struct bpf_reg_state *reg, u32 id,
840b9615 5847 bool is_null)
57a09bf0 5848{
840b9615 5849 if (reg_type_may_be_null(reg->type) && reg->id == id) {
f1174f77
EC
5850 /* Old offset (both fixed and variable parts) should
5851 * have been known-zero, because we don't allow pointer
5852 * arithmetic on pointers that might be NULL.
5853 */
b03c9f9f
EC
5854 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
5855 !tnum_equals_const(reg->var_off, 0) ||
f1174f77 5856 reg->off)) {
b03c9f9f
EC
5857 __mark_reg_known_zero(reg);
5858 reg->off = 0;
f1174f77
EC
5859 }
5860 if (is_null) {
5861 reg->type = SCALAR_VALUE;
840b9615
JS
5862 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
5863 if (reg->map_ptr->inner_map_meta) {
5864 reg->type = CONST_PTR_TO_MAP;
5865 reg->map_ptr = reg->map_ptr->inner_map_meta;
fada7fdc
JL
5866 } else if (reg->map_ptr->map_type ==
5867 BPF_MAP_TYPE_XSKMAP) {
5868 reg->type = PTR_TO_XDP_SOCK;
840b9615
JS
5869 } else {
5870 reg->type = PTR_TO_MAP_VALUE;
5871 }
c64b7983
JS
5872 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
5873 reg->type = PTR_TO_SOCKET;
46f8bc92
MKL
5874 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
5875 reg->type = PTR_TO_SOCK_COMMON;
655a51e5
MKL
5876 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
5877 reg->type = PTR_TO_TCP_SOCK;
56f668df 5878 }
1b986589
MKL
5879 if (is_null) {
5880 /* We don't need id and ref_obj_id from this point
5881 * onwards anymore, thus we should better reset it,
5882 * so that state pruning has chances to take effect.
5883 */
5884 reg->id = 0;
5885 reg->ref_obj_id = 0;
5886 } else if (!reg_may_point_to_spin_lock(reg)) {
5887 /* For not-NULL ptr, reg->ref_obj_id will be reset
5888 * in release_reg_references().
5889 *
5890 * reg->id is still used by spin_lock ptr. Other
5891 * than spin_lock ptr type, reg->id can be reset.
fd978bf7
JS
5892 */
5893 reg->id = 0;
56f668df 5894 }
57a09bf0
TG
5895 }
5896}
5897
c6a9efa1
PC
5898static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
5899 bool is_null)
5900{
5901 struct bpf_reg_state *reg;
5902 int i;
5903
5904 for (i = 0; i < MAX_BPF_REG; i++)
5905 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
5906
5907 bpf_for_each_spilled_reg(i, state, reg) {
5908 if (!reg)
5909 continue;
5910 mark_ptr_or_null_reg(state, reg, id, is_null);
5911 }
5912}
5913
57a09bf0
TG
5914/* The logic is similar to find_good_pkt_pointers(), both could eventually
5915 * be folded together at some point.
5916 */
840b9615
JS
5917static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
5918 bool is_null)
57a09bf0 5919{
f4d7e40a 5920 struct bpf_func_state *state = vstate->frame[vstate->curframe];
c6a9efa1 5921 struct bpf_reg_state *regs = state->regs;
1b986589 5922 u32 ref_obj_id = regs[regno].ref_obj_id;
a08dd0da 5923 u32 id = regs[regno].id;
c6a9efa1 5924 int i;
57a09bf0 5925
1b986589
MKL
5926 if (ref_obj_id && ref_obj_id == id && is_null)
5927 /* regs[regno] is in the " == NULL" branch.
5928 * No one could have freed the reference state before
5929 * doing the NULL check.
5930 */
5931 WARN_ON_ONCE(release_reference_state(state, id));
fd978bf7 5932
c6a9efa1
PC
5933 for (i = 0; i <= vstate->curframe; i++)
5934 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
57a09bf0
TG
5935}
5936
5beca081
DB
5937static bool try_match_pkt_pointers(const struct bpf_insn *insn,
5938 struct bpf_reg_state *dst_reg,
5939 struct bpf_reg_state *src_reg,
5940 struct bpf_verifier_state *this_branch,
5941 struct bpf_verifier_state *other_branch)
5942{
5943 if (BPF_SRC(insn->code) != BPF_X)
5944 return false;
5945
092ed096
JW
5946 /* Pointers are always 64-bit. */
5947 if (BPF_CLASS(insn->code) == BPF_JMP32)
5948 return false;
5949
5beca081
DB
5950 switch (BPF_OP(insn->code)) {
5951 case BPF_JGT:
5952 if ((dst_reg->type == PTR_TO_PACKET &&
5953 src_reg->type == PTR_TO_PACKET_END) ||
5954 (dst_reg->type == PTR_TO_PACKET_META &&
5955 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5956 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
5957 find_good_pkt_pointers(this_branch, dst_reg,
5958 dst_reg->type, false);
5959 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
5960 src_reg->type == PTR_TO_PACKET) ||
5961 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5962 src_reg->type == PTR_TO_PACKET_META)) {
5963 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
5964 find_good_pkt_pointers(other_branch, src_reg,
5965 src_reg->type, true);
5966 } else {
5967 return false;
5968 }
5969 break;
5970 case BPF_JLT:
5971 if ((dst_reg->type == PTR_TO_PACKET &&
5972 src_reg->type == PTR_TO_PACKET_END) ||
5973 (dst_reg->type == PTR_TO_PACKET_META &&
5974 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5975 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
5976 find_good_pkt_pointers(other_branch, dst_reg,
5977 dst_reg->type, true);
5978 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
5979 src_reg->type == PTR_TO_PACKET) ||
5980 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5981 src_reg->type == PTR_TO_PACKET_META)) {
5982 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
5983 find_good_pkt_pointers(this_branch, src_reg,
5984 src_reg->type, false);
5985 } else {
5986 return false;
5987 }
5988 break;
5989 case BPF_JGE:
5990 if ((dst_reg->type == PTR_TO_PACKET &&
5991 src_reg->type == PTR_TO_PACKET_END) ||
5992 (dst_reg->type == PTR_TO_PACKET_META &&
5993 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5994 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
5995 find_good_pkt_pointers(this_branch, dst_reg,
5996 dst_reg->type, true);
5997 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
5998 src_reg->type == PTR_TO_PACKET) ||
5999 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6000 src_reg->type == PTR_TO_PACKET_META)) {
6001 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
6002 find_good_pkt_pointers(other_branch, src_reg,
6003 src_reg->type, false);
6004 } else {
6005 return false;
6006 }
6007 break;
6008 case BPF_JLE:
6009 if ((dst_reg->type == PTR_TO_PACKET &&
6010 src_reg->type == PTR_TO_PACKET_END) ||
6011 (dst_reg->type == PTR_TO_PACKET_META &&
6012 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6013 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
6014 find_good_pkt_pointers(other_branch, dst_reg,
6015 dst_reg->type, false);
6016 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6017 src_reg->type == PTR_TO_PACKET) ||
6018 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6019 src_reg->type == PTR_TO_PACKET_META)) {
6020 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
6021 find_good_pkt_pointers(this_branch, src_reg,
6022 src_reg->type, true);
6023 } else {
6024 return false;
6025 }
6026 break;
6027 default:
6028 return false;
6029 }
6030
6031 return true;
6032}
6033
58e2af8b 6034static int check_cond_jmp_op(struct bpf_verifier_env *env,
17a52670
AS
6035 struct bpf_insn *insn, int *insn_idx)
6036{
f4d7e40a
AS
6037 struct bpf_verifier_state *this_branch = env->cur_state;
6038 struct bpf_verifier_state *other_branch;
6039 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
fb8d251e 6040 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
17a52670 6041 u8 opcode = BPF_OP(insn->code);
092ed096 6042 bool is_jmp32;
fb8d251e 6043 int pred = -1;
17a52670
AS
6044 int err;
6045
092ed096
JW
6046 /* Only conditional jumps are expected to reach here. */
6047 if (opcode == BPF_JA || opcode > BPF_JSLE) {
6048 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
17a52670
AS
6049 return -EINVAL;
6050 }
6051
6052 if (BPF_SRC(insn->code) == BPF_X) {
6053 if (insn->imm != 0) {
092ed096 6054 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
6055 return -EINVAL;
6056 }
6057
6058 /* check src1 operand */
dc503a8a 6059 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
6060 if (err)
6061 return err;
1be7f75d
AS
6062
6063 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 6064 verbose(env, "R%d pointer comparison prohibited\n",
1be7f75d
AS
6065 insn->src_reg);
6066 return -EACCES;
6067 }
fb8d251e 6068 src_reg = &regs[insn->src_reg];
17a52670
AS
6069 } else {
6070 if (insn->src_reg != BPF_REG_0) {
092ed096 6071 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
6072 return -EINVAL;
6073 }
6074 }
6075
6076 /* check src2 operand */
dc503a8a 6077 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
6078 if (err)
6079 return err;
6080
1a0dc1ac 6081 dst_reg = &regs[insn->dst_reg];
092ed096 6082 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1a0dc1ac 6083
fb8d251e
AS
6084 if (BPF_SRC(insn->code) == BPF_K)
6085 pred = is_branch_taken(dst_reg, insn->imm,
6086 opcode, is_jmp32);
6087 else if (src_reg->type == SCALAR_VALUE &&
6088 tnum_is_const(src_reg->var_off))
6089 pred = is_branch_taken(dst_reg, src_reg->var_off.value,
6090 opcode, is_jmp32);
b5dc0163
AS
6091 if (pred >= 0) {
6092 err = mark_chain_precision(env, insn->dst_reg);
6093 if (BPF_SRC(insn->code) == BPF_X && !err)
6094 err = mark_chain_precision(env, insn->src_reg);
6095 if (err)
6096 return err;
6097 }
fb8d251e
AS
6098 if (pred == 1) {
6099 /* only follow the goto, ignore fall-through */
6100 *insn_idx += insn->off;
6101 return 0;
6102 } else if (pred == 0) {
6103 /* only follow fall-through branch, since
6104 * that's where the program will go
6105 */
6106 return 0;
17a52670
AS
6107 }
6108
979d63d5
DB
6109 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
6110 false);
17a52670
AS
6111 if (!other_branch)
6112 return -EFAULT;
f4d7e40a 6113 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
17a52670 6114
48461135
JB
6115 /* detect if we are comparing against a constant value so we can adjust
6116 * our min/max values for our dst register.
f1174f77
EC
6117 * this is only legit if both are scalars (or pointers to the same
6118 * object, I suppose, but we don't support that right now), because
6119 * otherwise the different base pointers mean the offsets aren't
6120 * comparable.
48461135
JB
6121 */
6122 if (BPF_SRC(insn->code) == BPF_X) {
092ed096
JW
6123 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
6124 struct bpf_reg_state lo_reg0 = *dst_reg;
6125 struct bpf_reg_state lo_reg1 = *src_reg;
6126 struct bpf_reg_state *src_lo, *dst_lo;
6127
6128 dst_lo = &lo_reg0;
6129 src_lo = &lo_reg1;
6130 coerce_reg_to_size(dst_lo, 4);
6131 coerce_reg_to_size(src_lo, 4);
6132
f1174f77 6133 if (dst_reg->type == SCALAR_VALUE &&
092ed096
JW
6134 src_reg->type == SCALAR_VALUE) {
6135 if (tnum_is_const(src_reg->var_off) ||
6136 (is_jmp32 && tnum_is_const(src_lo->var_off)))
f4d7e40a 6137 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096
JW
6138 dst_reg,
6139 is_jmp32
6140 ? src_lo->var_off.value
6141 : src_reg->var_off.value,
6142 opcode, is_jmp32);
6143 else if (tnum_is_const(dst_reg->var_off) ||
6144 (is_jmp32 && tnum_is_const(dst_lo->var_off)))
f4d7e40a 6145 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
092ed096
JW
6146 src_reg,
6147 is_jmp32
6148 ? dst_lo->var_off.value
6149 : dst_reg->var_off.value,
6150 opcode, is_jmp32);
6151 else if (!is_jmp32 &&
6152 (opcode == BPF_JEQ || opcode == BPF_JNE))
f1174f77 6153 /* Comparing for equality, we can combine knowledge */
f4d7e40a
AS
6154 reg_combine_min_max(&other_branch_regs[insn->src_reg],
6155 &other_branch_regs[insn->dst_reg],
092ed096 6156 src_reg, dst_reg, opcode);
f1174f77
EC
6157 }
6158 } else if (dst_reg->type == SCALAR_VALUE) {
f4d7e40a 6159 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096 6160 dst_reg, insn->imm, opcode, is_jmp32);
48461135
JB
6161 }
6162
092ed096
JW
6163 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
6164 * NOTE: these optimizations below are related with pointer comparison
6165 * which will never be JMP32.
6166 */
6167 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
1a0dc1ac 6168 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
840b9615
JS
6169 reg_type_may_be_null(dst_reg->type)) {
6170 /* Mark all identical registers in each branch as either
57a09bf0
TG
6171 * safe or unknown depending R == 0 or R != 0 conditional.
6172 */
840b9615
JS
6173 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
6174 opcode == BPF_JNE);
6175 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
6176 opcode == BPF_JEQ);
5beca081
DB
6177 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
6178 this_branch, other_branch) &&
6179 is_pointer_value(env, insn->dst_reg)) {
61bd5218
JK
6180 verbose(env, "R%d pointer comparison prohibited\n",
6181 insn->dst_reg);
1be7f75d 6182 return -EACCES;
17a52670 6183 }
06ee7115 6184 if (env->log.level & BPF_LOG_LEVEL)
f4d7e40a 6185 print_verifier_state(env, this_branch->frame[this_branch->curframe]);
17a52670
AS
6186 return 0;
6187}
6188
17a52670 6189/* verify BPF_LD_IMM64 instruction */
58e2af8b 6190static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 6191{
d8eca5bb 6192 struct bpf_insn_aux_data *aux = cur_aux(env);
638f5b90 6193 struct bpf_reg_state *regs = cur_regs(env);
d8eca5bb 6194 struct bpf_map *map;
17a52670
AS
6195 int err;
6196
6197 if (BPF_SIZE(insn->code) != BPF_DW) {
61bd5218 6198 verbose(env, "invalid BPF_LD_IMM insn\n");
17a52670
AS
6199 return -EINVAL;
6200 }
6201 if (insn->off != 0) {
61bd5218 6202 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
17a52670
AS
6203 return -EINVAL;
6204 }
6205
dc503a8a 6206 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
6207 if (err)
6208 return err;
6209
6b173873 6210 if (insn->src_reg == 0) {
6b173873
JK
6211 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
6212
f1174f77 6213 regs[insn->dst_reg].type = SCALAR_VALUE;
b03c9f9f 6214 __mark_reg_known(&regs[insn->dst_reg], imm);
17a52670 6215 return 0;
6b173873 6216 }
17a52670 6217
d8eca5bb
DB
6218 map = env->used_maps[aux->map_index];
6219 mark_reg_known_zero(env, regs, insn->dst_reg);
6220 regs[insn->dst_reg].map_ptr = map;
6221
6222 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
6223 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
6224 regs[insn->dst_reg].off = aux->map_off;
6225 if (map_value_has_spin_lock(map))
6226 regs[insn->dst_reg].id = ++env->id_gen;
6227 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
6228 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
6229 } else {
6230 verbose(env, "bpf verifier is misconfigured\n");
6231 return -EINVAL;
6232 }
17a52670 6233
17a52670
AS
6234 return 0;
6235}
6236
96be4325
DB
6237static bool may_access_skb(enum bpf_prog_type type)
6238{
6239 switch (type) {
6240 case BPF_PROG_TYPE_SOCKET_FILTER:
6241 case BPF_PROG_TYPE_SCHED_CLS:
94caee8c 6242 case BPF_PROG_TYPE_SCHED_ACT:
96be4325
DB
6243 return true;
6244 default:
6245 return false;
6246 }
6247}
6248
ddd872bc
AS
6249/* verify safety of LD_ABS|LD_IND instructions:
6250 * - they can only appear in the programs where ctx == skb
6251 * - since they are wrappers of function calls, they scratch R1-R5 registers,
6252 * preserve R6-R9, and store return value into R0
6253 *
6254 * Implicit input:
6255 * ctx == skb == R6 == CTX
6256 *
6257 * Explicit input:
6258 * SRC == any register
6259 * IMM == 32-bit immediate
6260 *
6261 * Output:
6262 * R0 - 8/16/32-bit skb data converted to cpu endianness
6263 */
58e2af8b 6264static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
ddd872bc 6265{
638f5b90 6266 struct bpf_reg_state *regs = cur_regs(env);
ddd872bc 6267 u8 mode = BPF_MODE(insn->code);
ddd872bc
AS
6268 int i, err;
6269
24701ece 6270 if (!may_access_skb(env->prog->type)) {
61bd5218 6271 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
ddd872bc
AS
6272 return -EINVAL;
6273 }
6274
e0cea7ce
DB
6275 if (!env->ops->gen_ld_abs) {
6276 verbose(env, "bpf verifier is misconfigured\n");
6277 return -EINVAL;
6278 }
6279
f910cefa 6280 if (env->subprog_cnt > 1) {
f4d7e40a
AS
6281 /* when program has LD_ABS insn JITs and interpreter assume
6282 * that r1 == ctx == skb which is not the case for callees
6283 * that can have arbitrary arguments. It's problematic
6284 * for main prog as well since JITs would need to analyze
6285 * all functions in order to make proper register save/restore
6286 * decisions in the main prog. Hence disallow LD_ABS with calls
6287 */
6288 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
6289 return -EINVAL;
6290 }
6291
ddd872bc 6292 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
d82bccc6 6293 BPF_SIZE(insn->code) == BPF_DW ||
ddd872bc 6294 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
61bd5218 6295 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
ddd872bc
AS
6296 return -EINVAL;
6297 }
6298
6299 /* check whether implicit source operand (register R6) is readable */
dc503a8a 6300 err = check_reg_arg(env, BPF_REG_6, SRC_OP);
ddd872bc
AS
6301 if (err)
6302 return err;
6303
fd978bf7
JS
6304 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
6305 * gen_ld_abs() may terminate the program at runtime, leading to
6306 * reference leak.
6307 */
6308 err = check_reference_leak(env);
6309 if (err) {
6310 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
6311 return err;
6312 }
6313
d83525ca
AS
6314 if (env->cur_state->active_spin_lock) {
6315 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
6316 return -EINVAL;
6317 }
6318
ddd872bc 6319 if (regs[BPF_REG_6].type != PTR_TO_CTX) {
61bd5218
JK
6320 verbose(env,
6321 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
ddd872bc
AS
6322 return -EINVAL;
6323 }
6324
6325 if (mode == BPF_IND) {
6326 /* check explicit source operand */
dc503a8a 6327 err = check_reg_arg(env, insn->src_reg, SRC_OP);
ddd872bc
AS
6328 if (err)
6329 return err;
6330 }
6331
6332 /* reset caller saved regs to unreadable */
dc503a8a 6333 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 6334 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
6335 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6336 }
ddd872bc
AS
6337
6338 /* mark destination R0 register as readable, since it contains
dc503a8a
EC
6339 * the value fetched from the packet.
6340 * Already marked as written above.
ddd872bc 6341 */
61bd5218 6342 mark_reg_unknown(env, regs, BPF_REG_0);
5327ed3d
JW
6343 /* ld_abs load up to 32-bit skb data. */
6344 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
ddd872bc
AS
6345 return 0;
6346}
6347
390ee7e2
AS
6348static int check_return_code(struct bpf_verifier_env *env)
6349{
5cf1e914 6350 struct tnum enforce_attach_type_range = tnum_unknown;
390ee7e2
AS
6351 struct bpf_reg_state *reg;
6352 struct tnum range = tnum_range(0, 1);
6353
6354 switch (env->prog->type) {
983695fa
DB
6355 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
6356 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
6357 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
6358 range = tnum_range(1, 1);
ed4ed404 6359 break;
390ee7e2 6360 case BPF_PROG_TYPE_CGROUP_SKB:
5cf1e914 6361 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
6362 range = tnum_range(0, 3);
6363 enforce_attach_type_range = tnum_range(2, 3);
6364 }
ed4ed404 6365 break;
390ee7e2
AS
6366 case BPF_PROG_TYPE_CGROUP_SOCK:
6367 case BPF_PROG_TYPE_SOCK_OPS:
ebc614f6 6368 case BPF_PROG_TYPE_CGROUP_DEVICE:
7b146ceb 6369 case BPF_PROG_TYPE_CGROUP_SYSCTL:
0d01da6a 6370 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
390ee7e2 6371 break;
15ab09bd
AS
6372 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6373 if (!env->prog->aux->attach_btf_id)
6374 return 0;
6375 range = tnum_const(0);
6376 break;
390ee7e2
AS
6377 default:
6378 return 0;
6379 }
6380
638f5b90 6381 reg = cur_regs(env) + BPF_REG_0;
390ee7e2 6382 if (reg->type != SCALAR_VALUE) {
61bd5218 6383 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
390ee7e2
AS
6384 reg_type_str[reg->type]);
6385 return -EINVAL;
6386 }
6387
6388 if (!tnum_in(range, reg->var_off)) {
5cf1e914 6389 char tn_buf[48];
6390
61bd5218 6391 verbose(env, "At program exit the register R0 ");
390ee7e2 6392 if (!tnum_is_unknown(reg->var_off)) {
390ee7e2 6393 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 6394 verbose(env, "has value %s", tn_buf);
390ee7e2 6395 } else {
61bd5218 6396 verbose(env, "has unknown scalar value");
390ee7e2 6397 }
5cf1e914 6398 tnum_strn(tn_buf, sizeof(tn_buf), range);
983695fa 6399 verbose(env, " should have been in %s\n", tn_buf);
390ee7e2
AS
6400 return -EINVAL;
6401 }
5cf1e914 6402
6403 if (!tnum_is_unknown(enforce_attach_type_range) &&
6404 tnum_in(enforce_attach_type_range, reg->var_off))
6405 env->prog->enforce_expected_attach_type = 1;
390ee7e2
AS
6406 return 0;
6407}
6408
475fb78f
AS
6409/* non-recursive DFS pseudo code
6410 * 1 procedure DFS-iterative(G,v):
6411 * 2 label v as discovered
6412 * 3 let S be a stack
6413 * 4 S.push(v)
6414 * 5 while S is not empty
6415 * 6 t <- S.pop()
6416 * 7 if t is what we're looking for:
6417 * 8 return t
6418 * 9 for all edges e in G.adjacentEdges(t) do
6419 * 10 if edge e is already labelled
6420 * 11 continue with the next edge
6421 * 12 w <- G.adjacentVertex(t,e)
6422 * 13 if vertex w is not discovered and not explored
6423 * 14 label e as tree-edge
6424 * 15 label w as discovered
6425 * 16 S.push(w)
6426 * 17 continue at 5
6427 * 18 else if vertex w is discovered
6428 * 19 label e as back-edge
6429 * 20 else
6430 * 21 // vertex w is explored
6431 * 22 label e as forward- or cross-edge
6432 * 23 label t as explored
6433 * 24 S.pop()
6434 *
6435 * convention:
6436 * 0x10 - discovered
6437 * 0x11 - discovered and fall-through edge labelled
6438 * 0x12 - discovered and fall-through and branch edges labelled
6439 * 0x20 - explored
6440 */
6441
6442enum {
6443 DISCOVERED = 0x10,
6444 EXPLORED = 0x20,
6445 FALLTHROUGH = 1,
6446 BRANCH = 2,
6447};
6448
dc2a4ebc
AS
6449static u32 state_htab_size(struct bpf_verifier_env *env)
6450{
6451 return env->prog->len;
6452}
6453
5d839021
AS
6454static struct bpf_verifier_state_list **explored_state(
6455 struct bpf_verifier_env *env,
6456 int idx)
6457{
dc2a4ebc
AS
6458 struct bpf_verifier_state *cur = env->cur_state;
6459 struct bpf_func_state *state = cur->frame[cur->curframe];
6460
6461 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
5d839021
AS
6462}
6463
6464static void init_explored_state(struct bpf_verifier_env *env, int idx)
6465{
a8f500af 6466 env->insn_aux_data[idx].prune_point = true;
5d839021 6467}
f1bca824 6468
475fb78f
AS
6469/* t, w, e - match pseudo-code above:
6470 * t - index of current instruction
6471 * w - next instruction
6472 * e - edge
6473 */
2589726d
AS
6474static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
6475 bool loop_ok)
475fb78f 6476{
7df737e9
AS
6477 int *insn_stack = env->cfg.insn_stack;
6478 int *insn_state = env->cfg.insn_state;
6479
475fb78f
AS
6480 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
6481 return 0;
6482
6483 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
6484 return 0;
6485
6486 if (w < 0 || w >= env->prog->len) {
d9762e84 6487 verbose_linfo(env, t, "%d: ", t);
61bd5218 6488 verbose(env, "jump out of range from insn %d to %d\n", t, w);
475fb78f
AS
6489 return -EINVAL;
6490 }
6491
f1bca824
AS
6492 if (e == BRANCH)
6493 /* mark branch target for state pruning */
5d839021 6494 init_explored_state(env, w);
f1bca824 6495
475fb78f
AS
6496 if (insn_state[w] == 0) {
6497 /* tree-edge */
6498 insn_state[t] = DISCOVERED | e;
6499 insn_state[w] = DISCOVERED;
7df737e9 6500 if (env->cfg.cur_stack >= env->prog->len)
475fb78f 6501 return -E2BIG;
7df737e9 6502 insn_stack[env->cfg.cur_stack++] = w;
475fb78f
AS
6503 return 1;
6504 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
2589726d
AS
6505 if (loop_ok && env->allow_ptr_leaks)
6506 return 0;
d9762e84
MKL
6507 verbose_linfo(env, t, "%d: ", t);
6508 verbose_linfo(env, w, "%d: ", w);
61bd5218 6509 verbose(env, "back-edge from insn %d to %d\n", t, w);
475fb78f
AS
6510 return -EINVAL;
6511 } else if (insn_state[w] == EXPLORED) {
6512 /* forward- or cross-edge */
6513 insn_state[t] = DISCOVERED | e;
6514 } else {
61bd5218 6515 verbose(env, "insn state internal bug\n");
475fb78f
AS
6516 return -EFAULT;
6517 }
6518 return 0;
6519}
6520
6521/* non-recursive depth-first-search to detect loops in BPF program
6522 * loop == back-edge in directed graph
6523 */
58e2af8b 6524static int check_cfg(struct bpf_verifier_env *env)
475fb78f
AS
6525{
6526 struct bpf_insn *insns = env->prog->insnsi;
6527 int insn_cnt = env->prog->len;
7df737e9 6528 int *insn_stack, *insn_state;
475fb78f
AS
6529 int ret = 0;
6530 int i, t;
6531
7df737e9 6532 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f
AS
6533 if (!insn_state)
6534 return -ENOMEM;
6535
7df737e9 6536 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f 6537 if (!insn_stack) {
71dde681 6538 kvfree(insn_state);
475fb78f
AS
6539 return -ENOMEM;
6540 }
6541
6542 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
6543 insn_stack[0] = 0; /* 0 is the first instruction */
7df737e9 6544 env->cfg.cur_stack = 1;
475fb78f
AS
6545
6546peek_stack:
7df737e9 6547 if (env->cfg.cur_stack == 0)
475fb78f 6548 goto check_state;
7df737e9 6549 t = insn_stack[env->cfg.cur_stack - 1];
475fb78f 6550
092ed096
JW
6551 if (BPF_CLASS(insns[t].code) == BPF_JMP ||
6552 BPF_CLASS(insns[t].code) == BPF_JMP32) {
475fb78f
AS
6553 u8 opcode = BPF_OP(insns[t].code);
6554
6555 if (opcode == BPF_EXIT) {
6556 goto mark_explored;
6557 } else if (opcode == BPF_CALL) {
2589726d 6558 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
6559 if (ret == 1)
6560 goto peek_stack;
6561 else if (ret < 0)
6562 goto err_free;
07016151 6563 if (t + 1 < insn_cnt)
5d839021 6564 init_explored_state(env, t + 1);
cc8b0b92 6565 if (insns[t].src_reg == BPF_PSEUDO_CALL) {
5d839021 6566 init_explored_state(env, t);
2589726d
AS
6567 ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
6568 env, false);
cc8b0b92
AS
6569 if (ret == 1)
6570 goto peek_stack;
6571 else if (ret < 0)
6572 goto err_free;
6573 }
475fb78f
AS
6574 } else if (opcode == BPF_JA) {
6575 if (BPF_SRC(insns[t].code) != BPF_K) {
6576 ret = -EINVAL;
6577 goto err_free;
6578 }
6579 /* unconditional jump with single edge */
6580 ret = push_insn(t, t + insns[t].off + 1,
2589726d 6581 FALLTHROUGH, env, true);
475fb78f
AS
6582 if (ret == 1)
6583 goto peek_stack;
6584 else if (ret < 0)
6585 goto err_free;
b5dc0163
AS
6586 /* unconditional jmp is not a good pruning point,
6587 * but it's marked, since backtracking needs
6588 * to record jmp history in is_state_visited().
6589 */
6590 init_explored_state(env, t + insns[t].off + 1);
f1bca824
AS
6591 /* tell verifier to check for equivalent states
6592 * after every call and jump
6593 */
c3de6317 6594 if (t + 1 < insn_cnt)
5d839021 6595 init_explored_state(env, t + 1);
475fb78f
AS
6596 } else {
6597 /* conditional jump with two edges */
5d839021 6598 init_explored_state(env, t);
2589726d 6599 ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
475fb78f
AS
6600 if (ret == 1)
6601 goto peek_stack;
6602 else if (ret < 0)
6603 goto err_free;
6604
2589726d 6605 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
475fb78f
AS
6606 if (ret == 1)
6607 goto peek_stack;
6608 else if (ret < 0)
6609 goto err_free;
6610 }
6611 } else {
6612 /* all other non-branch instructions with single
6613 * fall-through edge
6614 */
2589726d 6615 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
6616 if (ret == 1)
6617 goto peek_stack;
6618 else if (ret < 0)
6619 goto err_free;
6620 }
6621
6622mark_explored:
6623 insn_state[t] = EXPLORED;
7df737e9 6624 if (env->cfg.cur_stack-- <= 0) {
61bd5218 6625 verbose(env, "pop stack internal bug\n");
475fb78f
AS
6626 ret = -EFAULT;
6627 goto err_free;
6628 }
6629 goto peek_stack;
6630
6631check_state:
6632 for (i = 0; i < insn_cnt; i++) {
6633 if (insn_state[i] != EXPLORED) {
61bd5218 6634 verbose(env, "unreachable insn %d\n", i);
475fb78f
AS
6635 ret = -EINVAL;
6636 goto err_free;
6637 }
6638 }
6639 ret = 0; /* cfg looks good */
6640
6641err_free:
71dde681
AS
6642 kvfree(insn_state);
6643 kvfree(insn_stack);
7df737e9 6644 env->cfg.insn_state = env->cfg.insn_stack = NULL;
475fb78f
AS
6645 return ret;
6646}
6647
838e9690
YS
6648/* The minimum supported BTF func info size */
6649#define MIN_BPF_FUNCINFO_SIZE 8
6650#define MAX_FUNCINFO_REC_SIZE 252
6651
c454a46b
MKL
6652static int check_btf_func(struct bpf_verifier_env *env,
6653 const union bpf_attr *attr,
6654 union bpf_attr __user *uattr)
838e9690 6655{
d0b2818e 6656 u32 i, nfuncs, urec_size, min_size;
838e9690 6657 u32 krec_size = sizeof(struct bpf_func_info);
c454a46b 6658 struct bpf_func_info *krecord;
8c1b6e69 6659 struct bpf_func_info_aux *info_aux = NULL;
838e9690 6660 const struct btf_type *type;
c454a46b
MKL
6661 struct bpf_prog *prog;
6662 const struct btf *btf;
838e9690 6663 void __user *urecord;
d0b2818e 6664 u32 prev_offset = 0;
838e9690
YS
6665 int ret = 0;
6666
6667 nfuncs = attr->func_info_cnt;
6668 if (!nfuncs)
6669 return 0;
6670
6671 if (nfuncs != env->subprog_cnt) {
6672 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
6673 return -EINVAL;
6674 }
6675
6676 urec_size = attr->func_info_rec_size;
6677 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
6678 urec_size > MAX_FUNCINFO_REC_SIZE ||
6679 urec_size % sizeof(u32)) {
6680 verbose(env, "invalid func info rec size %u\n", urec_size);
6681 return -EINVAL;
6682 }
6683
c454a46b
MKL
6684 prog = env->prog;
6685 btf = prog->aux->btf;
838e9690
YS
6686
6687 urecord = u64_to_user_ptr(attr->func_info);
6688 min_size = min_t(u32, krec_size, urec_size);
6689
ba64e7d8 6690 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
c454a46b
MKL
6691 if (!krecord)
6692 return -ENOMEM;
8c1b6e69
AS
6693 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
6694 if (!info_aux)
6695 goto err_free;
ba64e7d8 6696
838e9690
YS
6697 for (i = 0; i < nfuncs; i++) {
6698 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
6699 if (ret) {
6700 if (ret == -E2BIG) {
6701 verbose(env, "nonzero tailing record in func info");
6702 /* set the size kernel expects so loader can zero
6703 * out the rest of the record.
6704 */
6705 if (put_user(min_size, &uattr->func_info_rec_size))
6706 ret = -EFAULT;
6707 }
c454a46b 6708 goto err_free;
838e9690
YS
6709 }
6710
ba64e7d8 6711 if (copy_from_user(&krecord[i], urecord, min_size)) {
838e9690 6712 ret = -EFAULT;
c454a46b 6713 goto err_free;
838e9690
YS
6714 }
6715
d30d42e0 6716 /* check insn_off */
838e9690 6717 if (i == 0) {
d30d42e0 6718 if (krecord[i].insn_off) {
838e9690 6719 verbose(env,
d30d42e0
MKL
6720 "nonzero insn_off %u for the first func info record",
6721 krecord[i].insn_off);
838e9690 6722 ret = -EINVAL;
c454a46b 6723 goto err_free;
838e9690 6724 }
d30d42e0 6725 } else if (krecord[i].insn_off <= prev_offset) {
838e9690
YS
6726 verbose(env,
6727 "same or smaller insn offset (%u) than previous func info record (%u)",
d30d42e0 6728 krecord[i].insn_off, prev_offset);
838e9690 6729 ret = -EINVAL;
c454a46b 6730 goto err_free;
838e9690
YS
6731 }
6732
d30d42e0 6733 if (env->subprog_info[i].start != krecord[i].insn_off) {
838e9690
YS
6734 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
6735 ret = -EINVAL;
c454a46b 6736 goto err_free;
838e9690
YS
6737 }
6738
6739 /* check type_id */
ba64e7d8 6740 type = btf_type_by_id(btf, krecord[i].type_id);
838e9690
YS
6741 if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
6742 verbose(env, "invalid type id %d in func info",
ba64e7d8 6743 krecord[i].type_id);
838e9690 6744 ret = -EINVAL;
c454a46b 6745 goto err_free;
838e9690 6746 }
d30d42e0 6747 prev_offset = krecord[i].insn_off;
838e9690
YS
6748 urecord += urec_size;
6749 }
6750
ba64e7d8
YS
6751 prog->aux->func_info = krecord;
6752 prog->aux->func_info_cnt = nfuncs;
8c1b6e69 6753 prog->aux->func_info_aux = info_aux;
838e9690
YS
6754 return 0;
6755
c454a46b 6756err_free:
ba64e7d8 6757 kvfree(krecord);
8c1b6e69 6758 kfree(info_aux);
838e9690
YS
6759 return ret;
6760}
6761
ba64e7d8
YS
6762static void adjust_btf_func(struct bpf_verifier_env *env)
6763{
8c1b6e69 6764 struct bpf_prog_aux *aux = env->prog->aux;
ba64e7d8
YS
6765 int i;
6766
8c1b6e69 6767 if (!aux->func_info)
ba64e7d8
YS
6768 return;
6769
6770 for (i = 0; i < env->subprog_cnt; i++)
8c1b6e69 6771 aux->func_info[i].insn_off = env->subprog_info[i].start;
ba64e7d8
YS
6772}
6773
c454a46b
MKL
6774#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
6775 sizeof(((struct bpf_line_info *)(0))->line_col))
6776#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
6777
6778static int check_btf_line(struct bpf_verifier_env *env,
6779 const union bpf_attr *attr,
6780 union bpf_attr __user *uattr)
6781{
6782 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
6783 struct bpf_subprog_info *sub;
6784 struct bpf_line_info *linfo;
6785 struct bpf_prog *prog;
6786 const struct btf *btf;
6787 void __user *ulinfo;
6788 int err;
6789
6790 nr_linfo = attr->line_info_cnt;
6791 if (!nr_linfo)
6792 return 0;
6793
6794 rec_size = attr->line_info_rec_size;
6795 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
6796 rec_size > MAX_LINEINFO_REC_SIZE ||
6797 rec_size & (sizeof(u32) - 1))
6798 return -EINVAL;
6799
6800 /* Need to zero it in case the userspace may
6801 * pass in a smaller bpf_line_info object.
6802 */
6803 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
6804 GFP_KERNEL | __GFP_NOWARN);
6805 if (!linfo)
6806 return -ENOMEM;
6807
6808 prog = env->prog;
6809 btf = prog->aux->btf;
6810
6811 s = 0;
6812 sub = env->subprog_info;
6813 ulinfo = u64_to_user_ptr(attr->line_info);
6814 expected_size = sizeof(struct bpf_line_info);
6815 ncopy = min_t(u32, expected_size, rec_size);
6816 for (i = 0; i < nr_linfo; i++) {
6817 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
6818 if (err) {
6819 if (err == -E2BIG) {
6820 verbose(env, "nonzero tailing record in line_info");
6821 if (put_user(expected_size,
6822 &uattr->line_info_rec_size))
6823 err = -EFAULT;
6824 }
6825 goto err_free;
6826 }
6827
6828 if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
6829 err = -EFAULT;
6830 goto err_free;
6831 }
6832
6833 /*
6834 * Check insn_off to ensure
6835 * 1) strictly increasing AND
6836 * 2) bounded by prog->len
6837 *
6838 * The linfo[0].insn_off == 0 check logically falls into
6839 * the later "missing bpf_line_info for func..." case
6840 * because the first linfo[0].insn_off must be the
6841 * first sub also and the first sub must have
6842 * subprog_info[0].start == 0.
6843 */
6844 if ((i && linfo[i].insn_off <= prev_offset) ||
6845 linfo[i].insn_off >= prog->len) {
6846 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
6847 i, linfo[i].insn_off, prev_offset,
6848 prog->len);
6849 err = -EINVAL;
6850 goto err_free;
6851 }
6852
fdbaa0be
MKL
6853 if (!prog->insnsi[linfo[i].insn_off].code) {
6854 verbose(env,
6855 "Invalid insn code at line_info[%u].insn_off\n",
6856 i);
6857 err = -EINVAL;
6858 goto err_free;
6859 }
6860
23127b33
MKL
6861 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
6862 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
c454a46b
MKL
6863 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
6864 err = -EINVAL;
6865 goto err_free;
6866 }
6867
6868 if (s != env->subprog_cnt) {
6869 if (linfo[i].insn_off == sub[s].start) {
6870 sub[s].linfo_idx = i;
6871 s++;
6872 } else if (sub[s].start < linfo[i].insn_off) {
6873 verbose(env, "missing bpf_line_info for func#%u\n", s);
6874 err = -EINVAL;
6875 goto err_free;
6876 }
6877 }
6878
6879 prev_offset = linfo[i].insn_off;
6880 ulinfo += rec_size;
6881 }
6882
6883 if (s != env->subprog_cnt) {
6884 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
6885 env->subprog_cnt - s, s);
6886 err = -EINVAL;
6887 goto err_free;
6888 }
6889
6890 prog->aux->linfo = linfo;
6891 prog->aux->nr_linfo = nr_linfo;
6892
6893 return 0;
6894
6895err_free:
6896 kvfree(linfo);
6897 return err;
6898}
6899
6900static int check_btf_info(struct bpf_verifier_env *env,
6901 const union bpf_attr *attr,
6902 union bpf_attr __user *uattr)
6903{
6904 struct btf *btf;
6905 int err;
6906
6907 if (!attr->func_info_cnt && !attr->line_info_cnt)
6908 return 0;
6909
6910 btf = btf_get_by_fd(attr->prog_btf_fd);
6911 if (IS_ERR(btf))
6912 return PTR_ERR(btf);
6913 env->prog->aux->btf = btf;
6914
6915 err = check_btf_func(env, attr, uattr);
6916 if (err)
6917 return err;
6918
6919 err = check_btf_line(env, attr, uattr);
6920 if (err)
6921 return err;
6922
6923 return 0;
ba64e7d8
YS
6924}
6925
f1174f77
EC
6926/* check %cur's range satisfies %old's */
6927static bool range_within(struct bpf_reg_state *old,
6928 struct bpf_reg_state *cur)
6929{
b03c9f9f
EC
6930 return old->umin_value <= cur->umin_value &&
6931 old->umax_value >= cur->umax_value &&
6932 old->smin_value <= cur->smin_value &&
6933 old->smax_value >= cur->smax_value;
f1174f77
EC
6934}
6935
6936/* Maximum number of register states that can exist at once */
6937#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
6938struct idpair {
6939 u32 old;
6940 u32 cur;
6941};
6942
6943/* If in the old state two registers had the same id, then they need to have
6944 * the same id in the new state as well. But that id could be different from
6945 * the old state, so we need to track the mapping from old to new ids.
6946 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
6947 * regs with old id 5 must also have new id 9 for the new state to be safe. But
6948 * regs with a different old id could still have new id 9, we don't care about
6949 * that.
6950 * So we look through our idmap to see if this old id has been seen before. If
6951 * so, we require the new id to match; otherwise, we add the id pair to the map.
969bf05e 6952 */
f1174f77 6953static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
969bf05e 6954{
f1174f77 6955 unsigned int i;
969bf05e 6956
f1174f77
EC
6957 for (i = 0; i < ID_MAP_SIZE; i++) {
6958 if (!idmap[i].old) {
6959 /* Reached an empty slot; haven't seen this id before */
6960 idmap[i].old = old_id;
6961 idmap[i].cur = cur_id;
6962 return true;
6963 }
6964 if (idmap[i].old == old_id)
6965 return idmap[i].cur == cur_id;
6966 }
6967 /* We ran out of idmap slots, which should be impossible */
6968 WARN_ON_ONCE(1);
6969 return false;
6970}
6971
9242b5f5
AS
6972static void clean_func_state(struct bpf_verifier_env *env,
6973 struct bpf_func_state *st)
6974{
6975 enum bpf_reg_liveness live;
6976 int i, j;
6977
6978 for (i = 0; i < BPF_REG_FP; i++) {
6979 live = st->regs[i].live;
6980 /* liveness must not touch this register anymore */
6981 st->regs[i].live |= REG_LIVE_DONE;
6982 if (!(live & REG_LIVE_READ))
6983 /* since the register is unused, clear its state
6984 * to make further comparison simpler
6985 */
f54c7898 6986 __mark_reg_not_init(env, &st->regs[i]);
9242b5f5
AS
6987 }
6988
6989 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
6990 live = st->stack[i].spilled_ptr.live;
6991 /* liveness must not touch this stack slot anymore */
6992 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
6993 if (!(live & REG_LIVE_READ)) {
f54c7898 6994 __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
9242b5f5
AS
6995 for (j = 0; j < BPF_REG_SIZE; j++)
6996 st->stack[i].slot_type[j] = STACK_INVALID;
6997 }
6998 }
6999}
7000
7001static void clean_verifier_state(struct bpf_verifier_env *env,
7002 struct bpf_verifier_state *st)
7003{
7004 int i;
7005
7006 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
7007 /* all regs in this state in all frames were already marked */
7008 return;
7009
7010 for (i = 0; i <= st->curframe; i++)
7011 clean_func_state(env, st->frame[i]);
7012}
7013
7014/* the parentage chains form a tree.
7015 * the verifier states are added to state lists at given insn and
7016 * pushed into state stack for future exploration.
7017 * when the verifier reaches bpf_exit insn some of the verifer states
7018 * stored in the state lists have their final liveness state already,
7019 * but a lot of states will get revised from liveness point of view when
7020 * the verifier explores other branches.
7021 * Example:
7022 * 1: r0 = 1
7023 * 2: if r1 == 100 goto pc+1
7024 * 3: r0 = 2
7025 * 4: exit
7026 * when the verifier reaches exit insn the register r0 in the state list of
7027 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
7028 * of insn 2 and goes exploring further. At the insn 4 it will walk the
7029 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
7030 *
7031 * Since the verifier pushes the branch states as it sees them while exploring
7032 * the program the condition of walking the branch instruction for the second
7033 * time means that all states below this branch were already explored and
7034 * their final liveness markes are already propagated.
7035 * Hence when the verifier completes the search of state list in is_state_visited()
7036 * we can call this clean_live_states() function to mark all liveness states
7037 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
7038 * will not be used.
7039 * This function also clears the registers and stack for states that !READ
7040 * to simplify state merging.
7041 *
7042 * Important note here that walking the same branch instruction in the callee
7043 * doesn't meant that the states are DONE. The verifier has to compare
7044 * the callsites
7045 */
7046static void clean_live_states(struct bpf_verifier_env *env, int insn,
7047 struct bpf_verifier_state *cur)
7048{
7049 struct bpf_verifier_state_list *sl;
7050 int i;
7051
5d839021 7052 sl = *explored_state(env, insn);
a8f500af 7053 while (sl) {
2589726d
AS
7054 if (sl->state.branches)
7055 goto next;
dc2a4ebc
AS
7056 if (sl->state.insn_idx != insn ||
7057 sl->state.curframe != cur->curframe)
9242b5f5
AS
7058 goto next;
7059 for (i = 0; i <= cur->curframe; i++)
7060 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
7061 goto next;
7062 clean_verifier_state(env, &sl->state);
7063next:
7064 sl = sl->next;
7065 }
7066}
7067
f1174f77 7068/* Returns true if (rold safe implies rcur safe) */
1b688a19
EC
7069static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
7070 struct idpair *idmap)
f1174f77 7071{
f4d7e40a
AS
7072 bool equal;
7073
dc503a8a
EC
7074 if (!(rold->live & REG_LIVE_READ))
7075 /* explored state didn't use this */
7076 return true;
7077
679c782d 7078 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
f4d7e40a
AS
7079
7080 if (rold->type == PTR_TO_STACK)
7081 /* two stack pointers are equal only if they're pointing to
7082 * the same stack frame, since fp-8 in foo != fp-8 in bar
7083 */
7084 return equal && rold->frameno == rcur->frameno;
7085
7086 if (equal)
969bf05e
AS
7087 return true;
7088
f1174f77
EC
7089 if (rold->type == NOT_INIT)
7090 /* explored state can't have used this */
969bf05e 7091 return true;
f1174f77
EC
7092 if (rcur->type == NOT_INIT)
7093 return false;
7094 switch (rold->type) {
7095 case SCALAR_VALUE:
7096 if (rcur->type == SCALAR_VALUE) {
b5dc0163
AS
7097 if (!rold->precise && !rcur->precise)
7098 return true;
f1174f77
EC
7099 /* new val must satisfy old val knowledge */
7100 return range_within(rold, rcur) &&
7101 tnum_in(rold->var_off, rcur->var_off);
7102 } else {
179d1c56
JH
7103 /* We're trying to use a pointer in place of a scalar.
7104 * Even if the scalar was unbounded, this could lead to
7105 * pointer leaks because scalars are allowed to leak
7106 * while pointers are not. We could make this safe in
7107 * special cases if root is calling us, but it's
7108 * probably not worth the hassle.
f1174f77 7109 */
179d1c56 7110 return false;
f1174f77
EC
7111 }
7112 case PTR_TO_MAP_VALUE:
1b688a19
EC
7113 /* If the new min/max/var_off satisfy the old ones and
7114 * everything else matches, we are OK.
d83525ca
AS
7115 * 'id' is not compared, since it's only used for maps with
7116 * bpf_spin_lock inside map element and in such cases if
7117 * the rest of the prog is valid for one map element then
7118 * it's valid for all map elements regardless of the key
7119 * used in bpf_map_lookup()
1b688a19
EC
7120 */
7121 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
7122 range_within(rold, rcur) &&
7123 tnum_in(rold->var_off, rcur->var_off);
f1174f77
EC
7124 case PTR_TO_MAP_VALUE_OR_NULL:
7125 /* a PTR_TO_MAP_VALUE could be safe to use as a
7126 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
7127 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
7128 * checked, doing so could have affected others with the same
7129 * id, and we can't check for that because we lost the id when
7130 * we converted to a PTR_TO_MAP_VALUE.
7131 */
7132 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
7133 return false;
7134 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
7135 return false;
7136 /* Check our ids match any regs they're supposed to */
7137 return check_ids(rold->id, rcur->id, idmap);
de8f3a83 7138 case PTR_TO_PACKET_META:
f1174f77 7139 case PTR_TO_PACKET:
de8f3a83 7140 if (rcur->type != rold->type)
f1174f77
EC
7141 return false;
7142 /* We must have at least as much range as the old ptr
7143 * did, so that any accesses which were safe before are
7144 * still safe. This is true even if old range < old off,
7145 * since someone could have accessed through (ptr - k), or
7146 * even done ptr -= k in a register, to get a safe access.
7147 */
7148 if (rold->range > rcur->range)
7149 return false;
7150 /* If the offsets don't match, we can't trust our alignment;
7151 * nor can we be sure that we won't fall out of range.
7152 */
7153 if (rold->off != rcur->off)
7154 return false;
7155 /* id relations must be preserved */
7156 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
7157 return false;
7158 /* new val must satisfy old val knowledge */
7159 return range_within(rold, rcur) &&
7160 tnum_in(rold->var_off, rcur->var_off);
7161 case PTR_TO_CTX:
7162 case CONST_PTR_TO_MAP:
f1174f77 7163 case PTR_TO_PACKET_END:
d58e468b 7164 case PTR_TO_FLOW_KEYS:
c64b7983
JS
7165 case PTR_TO_SOCKET:
7166 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
7167 case PTR_TO_SOCK_COMMON:
7168 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
7169 case PTR_TO_TCP_SOCK:
7170 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 7171 case PTR_TO_XDP_SOCK:
f1174f77
EC
7172 /* Only valid matches are exact, which memcmp() above
7173 * would have accepted
7174 */
7175 default:
7176 /* Don't know what's going on, just say it's not safe */
7177 return false;
7178 }
969bf05e 7179
f1174f77
EC
7180 /* Shouldn't get here; if we do, say it's not safe */
7181 WARN_ON_ONCE(1);
969bf05e
AS
7182 return false;
7183}
7184
f4d7e40a
AS
7185static bool stacksafe(struct bpf_func_state *old,
7186 struct bpf_func_state *cur,
638f5b90
AS
7187 struct idpair *idmap)
7188{
7189 int i, spi;
7190
638f5b90
AS
7191 /* walk slots of the explored stack and ignore any additional
7192 * slots in the current stack, since explored(safe) state
7193 * didn't use them
7194 */
7195 for (i = 0; i < old->allocated_stack; i++) {
7196 spi = i / BPF_REG_SIZE;
7197
b233920c
AS
7198 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
7199 i += BPF_REG_SIZE - 1;
cc2b14d5 7200 /* explored state didn't use this */
fd05e57b 7201 continue;
b233920c 7202 }
cc2b14d5 7203
638f5b90
AS
7204 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
7205 continue;
19e2dbb7
AS
7206
7207 /* explored stack has more populated slots than current stack
7208 * and these slots were used
7209 */
7210 if (i >= cur->allocated_stack)
7211 return false;
7212
cc2b14d5
AS
7213 /* if old state was safe with misc data in the stack
7214 * it will be safe with zero-initialized stack.
7215 * The opposite is not true
7216 */
7217 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
7218 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
7219 continue;
638f5b90
AS
7220 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
7221 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
7222 /* Ex: old explored (safe) state has STACK_SPILL in
7223 * this stack slot, but current has has STACK_MISC ->
7224 * this verifier states are not equivalent,
7225 * return false to continue verification of this path
7226 */
7227 return false;
7228 if (i % BPF_REG_SIZE)
7229 continue;
7230 if (old->stack[spi].slot_type[0] != STACK_SPILL)
7231 continue;
7232 if (!regsafe(&old->stack[spi].spilled_ptr,
7233 &cur->stack[spi].spilled_ptr,
7234 idmap))
7235 /* when explored and current stack slot are both storing
7236 * spilled registers, check that stored pointers types
7237 * are the same as well.
7238 * Ex: explored safe path could have stored
7239 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
7240 * but current path has stored:
7241 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
7242 * such verifier states are not equivalent.
7243 * return false to continue verification of this path
7244 */
7245 return false;
7246 }
7247 return true;
7248}
7249
fd978bf7
JS
7250static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
7251{
7252 if (old->acquired_refs != cur->acquired_refs)
7253 return false;
7254 return !memcmp(old->refs, cur->refs,
7255 sizeof(*old->refs) * old->acquired_refs);
7256}
7257
f1bca824
AS
7258/* compare two verifier states
7259 *
7260 * all states stored in state_list are known to be valid, since
7261 * verifier reached 'bpf_exit' instruction through them
7262 *
7263 * this function is called when verifier exploring different branches of
7264 * execution popped from the state stack. If it sees an old state that has
7265 * more strict register state and more strict stack state then this execution
7266 * branch doesn't need to be explored further, since verifier already
7267 * concluded that more strict state leads to valid finish.
7268 *
7269 * Therefore two states are equivalent if register state is more conservative
7270 * and explored stack state is more conservative than the current one.
7271 * Example:
7272 * explored current
7273 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
7274 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
7275 *
7276 * In other words if current stack state (one being explored) has more
7277 * valid slots than old one that already passed validation, it means
7278 * the verifier can stop exploring and conclude that current state is valid too
7279 *
7280 * Similarly with registers. If explored state has register type as invalid
7281 * whereas register type in current state is meaningful, it means that
7282 * the current state will reach 'bpf_exit' instruction safely
7283 */
f4d7e40a
AS
7284static bool func_states_equal(struct bpf_func_state *old,
7285 struct bpf_func_state *cur)
f1bca824 7286{
f1174f77
EC
7287 struct idpair *idmap;
7288 bool ret = false;
f1bca824
AS
7289 int i;
7290
f1174f77
EC
7291 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
7292 /* If we failed to allocate the idmap, just say it's not safe */
7293 if (!idmap)
1a0dc1ac 7294 return false;
f1174f77
EC
7295
7296 for (i = 0; i < MAX_BPF_REG; i++) {
1b688a19 7297 if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
f1174f77 7298 goto out_free;
f1bca824
AS
7299 }
7300
638f5b90
AS
7301 if (!stacksafe(old, cur, idmap))
7302 goto out_free;
fd978bf7
JS
7303
7304 if (!refsafe(old, cur))
7305 goto out_free;
f1174f77
EC
7306 ret = true;
7307out_free:
7308 kfree(idmap);
7309 return ret;
f1bca824
AS
7310}
7311
f4d7e40a
AS
7312static bool states_equal(struct bpf_verifier_env *env,
7313 struct bpf_verifier_state *old,
7314 struct bpf_verifier_state *cur)
7315{
7316 int i;
7317
7318 if (old->curframe != cur->curframe)
7319 return false;
7320
979d63d5
DB
7321 /* Verification state from speculative execution simulation
7322 * must never prune a non-speculative execution one.
7323 */
7324 if (old->speculative && !cur->speculative)
7325 return false;
7326
d83525ca
AS
7327 if (old->active_spin_lock != cur->active_spin_lock)
7328 return false;
7329
f4d7e40a
AS
7330 /* for states to be equal callsites have to be the same
7331 * and all frame states need to be equivalent
7332 */
7333 for (i = 0; i <= old->curframe; i++) {
7334 if (old->frame[i]->callsite != cur->frame[i]->callsite)
7335 return false;
7336 if (!func_states_equal(old->frame[i], cur->frame[i]))
7337 return false;
7338 }
7339 return true;
7340}
7341
5327ed3d
JW
7342/* Return 0 if no propagation happened. Return negative error code if error
7343 * happened. Otherwise, return the propagated bit.
7344 */
55e7f3b5
JW
7345static int propagate_liveness_reg(struct bpf_verifier_env *env,
7346 struct bpf_reg_state *reg,
7347 struct bpf_reg_state *parent_reg)
7348{
5327ed3d
JW
7349 u8 parent_flag = parent_reg->live & REG_LIVE_READ;
7350 u8 flag = reg->live & REG_LIVE_READ;
55e7f3b5
JW
7351 int err;
7352
5327ed3d
JW
7353 /* When comes here, read flags of PARENT_REG or REG could be any of
7354 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
7355 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
7356 */
7357 if (parent_flag == REG_LIVE_READ64 ||
7358 /* Or if there is no read flag from REG. */
7359 !flag ||
7360 /* Or if the read flag from REG is the same as PARENT_REG. */
7361 parent_flag == flag)
55e7f3b5
JW
7362 return 0;
7363
5327ed3d 7364 err = mark_reg_read(env, reg, parent_reg, flag);
55e7f3b5
JW
7365 if (err)
7366 return err;
7367
5327ed3d 7368 return flag;
55e7f3b5
JW
7369}
7370
8e9cd9ce 7371/* A write screens off any subsequent reads; but write marks come from the
f4d7e40a
AS
7372 * straight-line code between a state and its parent. When we arrive at an
7373 * equivalent state (jump target or such) we didn't arrive by the straight-line
7374 * code, so read marks in the state must propagate to the parent regardless
7375 * of the state's write marks. That's what 'parent == state->parent' comparison
679c782d 7376 * in mark_reg_read() is for.
8e9cd9ce 7377 */
f4d7e40a
AS
7378static int propagate_liveness(struct bpf_verifier_env *env,
7379 const struct bpf_verifier_state *vstate,
7380 struct bpf_verifier_state *vparent)
dc503a8a 7381{
3f8cafa4 7382 struct bpf_reg_state *state_reg, *parent_reg;
f4d7e40a 7383 struct bpf_func_state *state, *parent;
3f8cafa4 7384 int i, frame, err = 0;
dc503a8a 7385
f4d7e40a
AS
7386 if (vparent->curframe != vstate->curframe) {
7387 WARN(1, "propagate_live: parent frame %d current frame %d\n",
7388 vparent->curframe, vstate->curframe);
7389 return -EFAULT;
7390 }
dc503a8a
EC
7391 /* Propagate read liveness of registers... */
7392 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
83d16312 7393 for (frame = 0; frame <= vstate->curframe; frame++) {
3f8cafa4
JW
7394 parent = vparent->frame[frame];
7395 state = vstate->frame[frame];
7396 parent_reg = parent->regs;
7397 state_reg = state->regs;
83d16312
JK
7398 /* We don't need to worry about FP liveness, it's read-only */
7399 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
55e7f3b5
JW
7400 err = propagate_liveness_reg(env, &state_reg[i],
7401 &parent_reg[i]);
5327ed3d 7402 if (err < 0)
3f8cafa4 7403 return err;
5327ed3d
JW
7404 if (err == REG_LIVE_READ64)
7405 mark_insn_zext(env, &parent_reg[i]);
dc503a8a 7406 }
f4d7e40a 7407
1b04aee7 7408 /* Propagate stack slots. */
f4d7e40a
AS
7409 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
7410 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3f8cafa4
JW
7411 parent_reg = &parent->stack[i].spilled_ptr;
7412 state_reg = &state->stack[i].spilled_ptr;
55e7f3b5
JW
7413 err = propagate_liveness_reg(env, state_reg,
7414 parent_reg);
5327ed3d 7415 if (err < 0)
3f8cafa4 7416 return err;
dc503a8a
EC
7417 }
7418 }
5327ed3d 7419 return 0;
dc503a8a
EC
7420}
7421
a3ce685d
AS
7422/* find precise scalars in the previous equivalent state and
7423 * propagate them into the current state
7424 */
7425static int propagate_precision(struct bpf_verifier_env *env,
7426 const struct bpf_verifier_state *old)
7427{
7428 struct bpf_reg_state *state_reg;
7429 struct bpf_func_state *state;
7430 int i, err = 0;
7431
7432 state = old->frame[old->curframe];
7433 state_reg = state->regs;
7434 for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
7435 if (state_reg->type != SCALAR_VALUE ||
7436 !state_reg->precise)
7437 continue;
7438 if (env->log.level & BPF_LOG_LEVEL2)
7439 verbose(env, "propagating r%d\n", i);
7440 err = mark_chain_precision(env, i);
7441 if (err < 0)
7442 return err;
7443 }
7444
7445 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
7446 if (state->stack[i].slot_type[0] != STACK_SPILL)
7447 continue;
7448 state_reg = &state->stack[i].spilled_ptr;
7449 if (state_reg->type != SCALAR_VALUE ||
7450 !state_reg->precise)
7451 continue;
7452 if (env->log.level & BPF_LOG_LEVEL2)
7453 verbose(env, "propagating fp%d\n",
7454 (-i - 1) * BPF_REG_SIZE);
7455 err = mark_chain_precision_stack(env, i);
7456 if (err < 0)
7457 return err;
7458 }
7459 return 0;
7460}
7461
2589726d
AS
7462static bool states_maybe_looping(struct bpf_verifier_state *old,
7463 struct bpf_verifier_state *cur)
7464{
7465 struct bpf_func_state *fold, *fcur;
7466 int i, fr = cur->curframe;
7467
7468 if (old->curframe != fr)
7469 return false;
7470
7471 fold = old->frame[fr];
7472 fcur = cur->frame[fr];
7473 for (i = 0; i < MAX_BPF_REG; i++)
7474 if (memcmp(&fold->regs[i], &fcur->regs[i],
7475 offsetof(struct bpf_reg_state, parent)))
7476 return false;
7477 return true;
7478}
7479
7480
58e2af8b 7481static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
f1bca824 7482{
58e2af8b 7483 struct bpf_verifier_state_list *new_sl;
9f4686c4 7484 struct bpf_verifier_state_list *sl, **pprev;
679c782d 7485 struct bpf_verifier_state *cur = env->cur_state, *new;
ceefbc96 7486 int i, j, err, states_cnt = 0;
10d274e8 7487 bool add_new_state = env->test_state_freq ? true : false;
f1bca824 7488
b5dc0163 7489 cur->last_insn_idx = env->prev_insn_idx;
a8f500af 7490 if (!env->insn_aux_data[insn_idx].prune_point)
f1bca824
AS
7491 /* this 'insn_idx' instruction wasn't marked, so we will not
7492 * be doing state search here
7493 */
7494 return 0;
7495
2589726d
AS
7496 /* bpf progs typically have pruning point every 4 instructions
7497 * http://vger.kernel.org/bpfconf2019.html#session-1
7498 * Do not add new state for future pruning if the verifier hasn't seen
7499 * at least 2 jumps and at least 8 instructions.
7500 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
7501 * In tests that amounts to up to 50% reduction into total verifier
7502 * memory consumption and 20% verifier time speedup.
7503 */
7504 if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
7505 env->insn_processed - env->prev_insn_processed >= 8)
7506 add_new_state = true;
7507
a8f500af
AS
7508 pprev = explored_state(env, insn_idx);
7509 sl = *pprev;
7510
9242b5f5
AS
7511 clean_live_states(env, insn_idx, cur);
7512
a8f500af 7513 while (sl) {
dc2a4ebc
AS
7514 states_cnt++;
7515 if (sl->state.insn_idx != insn_idx)
7516 goto next;
2589726d
AS
7517 if (sl->state.branches) {
7518 if (states_maybe_looping(&sl->state, cur) &&
7519 states_equal(env, &sl->state, cur)) {
7520 verbose_linfo(env, insn_idx, "; ");
7521 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
7522 return -EINVAL;
7523 }
7524 /* if the verifier is processing a loop, avoid adding new state
7525 * too often, since different loop iterations have distinct
7526 * states and may not help future pruning.
7527 * This threshold shouldn't be too low to make sure that
7528 * a loop with large bound will be rejected quickly.
7529 * The most abusive loop will be:
7530 * r1 += 1
7531 * if r1 < 1000000 goto pc-2
7532 * 1M insn_procssed limit / 100 == 10k peak states.
7533 * This threshold shouldn't be too high either, since states
7534 * at the end of the loop are likely to be useful in pruning.
7535 */
7536 if (env->jmps_processed - env->prev_jmps_processed < 20 &&
7537 env->insn_processed - env->prev_insn_processed < 100)
7538 add_new_state = false;
7539 goto miss;
7540 }
638f5b90 7541 if (states_equal(env, &sl->state, cur)) {
9f4686c4 7542 sl->hit_cnt++;
f1bca824 7543 /* reached equivalent register/stack state,
dc503a8a
EC
7544 * prune the search.
7545 * Registers read by the continuation are read by us.
8e9cd9ce
EC
7546 * If we have any write marks in env->cur_state, they
7547 * will prevent corresponding reads in the continuation
7548 * from reaching our parent (an explored_state). Our
7549 * own state will get the read marks recorded, but
7550 * they'll be immediately forgotten as we're pruning
7551 * this state and will pop a new one.
f1bca824 7552 */
f4d7e40a 7553 err = propagate_liveness(env, &sl->state, cur);
a3ce685d
AS
7554
7555 /* if previous state reached the exit with precision and
7556 * current state is equivalent to it (except precsion marks)
7557 * the precision needs to be propagated back in
7558 * the current state.
7559 */
7560 err = err ? : push_jmp_history(env, cur);
7561 err = err ? : propagate_precision(env, &sl->state);
f4d7e40a
AS
7562 if (err)
7563 return err;
f1bca824 7564 return 1;
dc503a8a 7565 }
2589726d
AS
7566miss:
7567 /* when new state is not going to be added do not increase miss count.
7568 * Otherwise several loop iterations will remove the state
7569 * recorded earlier. The goal of these heuristics is to have
7570 * states from some iterations of the loop (some in the beginning
7571 * and some at the end) to help pruning.
7572 */
7573 if (add_new_state)
7574 sl->miss_cnt++;
9f4686c4
AS
7575 /* heuristic to determine whether this state is beneficial
7576 * to keep checking from state equivalence point of view.
7577 * Higher numbers increase max_states_per_insn and verification time,
7578 * but do not meaningfully decrease insn_processed.
7579 */
7580 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
7581 /* the state is unlikely to be useful. Remove it to
7582 * speed up verification
7583 */
7584 *pprev = sl->next;
7585 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
2589726d
AS
7586 u32 br = sl->state.branches;
7587
7588 WARN_ONCE(br,
7589 "BUG live_done but branches_to_explore %d\n",
7590 br);
9f4686c4
AS
7591 free_verifier_state(&sl->state, false);
7592 kfree(sl);
7593 env->peak_states--;
7594 } else {
7595 /* cannot free this state, since parentage chain may
7596 * walk it later. Add it for free_list instead to
7597 * be freed at the end of verification
7598 */
7599 sl->next = env->free_list;
7600 env->free_list = sl;
7601 }
7602 sl = *pprev;
7603 continue;
7604 }
dc2a4ebc 7605next:
9f4686c4
AS
7606 pprev = &sl->next;
7607 sl = *pprev;
f1bca824
AS
7608 }
7609
06ee7115
AS
7610 if (env->max_states_per_insn < states_cnt)
7611 env->max_states_per_insn = states_cnt;
7612
ceefbc96 7613 if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
b5dc0163 7614 return push_jmp_history(env, cur);
ceefbc96 7615
2589726d 7616 if (!add_new_state)
b5dc0163 7617 return push_jmp_history(env, cur);
ceefbc96 7618
2589726d
AS
7619 /* There were no equivalent states, remember the current one.
7620 * Technically the current state is not proven to be safe yet,
f4d7e40a 7621 * but it will either reach outer most bpf_exit (which means it's safe)
2589726d 7622 * or it will be rejected. When there are no loops the verifier won't be
f4d7e40a 7623 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
2589726d
AS
7624 * again on the way to bpf_exit.
7625 * When looping the sl->state.branches will be > 0 and this state
7626 * will not be considered for equivalence until branches == 0.
f1bca824 7627 */
638f5b90 7628 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
f1bca824
AS
7629 if (!new_sl)
7630 return -ENOMEM;
06ee7115
AS
7631 env->total_states++;
7632 env->peak_states++;
2589726d
AS
7633 env->prev_jmps_processed = env->jmps_processed;
7634 env->prev_insn_processed = env->insn_processed;
f1bca824
AS
7635
7636 /* add new state to the head of linked list */
679c782d
EC
7637 new = &new_sl->state;
7638 err = copy_verifier_state(new, cur);
1969db47 7639 if (err) {
679c782d 7640 free_verifier_state(new, false);
1969db47
AS
7641 kfree(new_sl);
7642 return err;
7643 }
dc2a4ebc 7644 new->insn_idx = insn_idx;
2589726d
AS
7645 WARN_ONCE(new->branches != 1,
7646 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
b5dc0163 7647
2589726d 7648 cur->parent = new;
b5dc0163
AS
7649 cur->first_insn_idx = insn_idx;
7650 clear_jmp_history(cur);
5d839021
AS
7651 new_sl->next = *explored_state(env, insn_idx);
7652 *explored_state(env, insn_idx) = new_sl;
7640ead9
JK
7653 /* connect new state to parentage chain. Current frame needs all
7654 * registers connected. Only r6 - r9 of the callers are alive (pushed
7655 * to the stack implicitly by JITs) so in callers' frames connect just
7656 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
7657 * the state of the call instruction (with WRITTEN set), and r0 comes
7658 * from callee with its full parentage chain, anyway.
7659 */
8e9cd9ce
EC
7660 /* clear write marks in current state: the writes we did are not writes
7661 * our child did, so they don't screen off its reads from us.
7662 * (There are no read marks in current state, because reads always mark
7663 * their parent and current state never has children yet. Only
7664 * explored_states can get read marks.)
7665 */
eea1c227
AS
7666 for (j = 0; j <= cur->curframe; j++) {
7667 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
7668 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
7669 for (i = 0; i < BPF_REG_FP; i++)
7670 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
7671 }
f4d7e40a
AS
7672
7673 /* all stack frames are accessible from callee, clear them all */
7674 for (j = 0; j <= cur->curframe; j++) {
7675 struct bpf_func_state *frame = cur->frame[j];
679c782d 7676 struct bpf_func_state *newframe = new->frame[j];
f4d7e40a 7677
679c782d 7678 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
cc2b14d5 7679 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
679c782d
EC
7680 frame->stack[i].spilled_ptr.parent =
7681 &newframe->stack[i].spilled_ptr;
7682 }
f4d7e40a 7683 }
f1bca824
AS
7684 return 0;
7685}
7686
c64b7983
JS
7687/* Return true if it's OK to have the same insn return a different type. */
7688static bool reg_type_mismatch_ok(enum bpf_reg_type type)
7689{
7690 switch (type) {
7691 case PTR_TO_CTX:
7692 case PTR_TO_SOCKET:
7693 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
7694 case PTR_TO_SOCK_COMMON:
7695 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
7696 case PTR_TO_TCP_SOCK:
7697 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 7698 case PTR_TO_XDP_SOCK:
2a02759e 7699 case PTR_TO_BTF_ID:
c64b7983
JS
7700 return false;
7701 default:
7702 return true;
7703 }
7704}
7705
7706/* If an instruction was previously used with particular pointer types, then we
7707 * need to be careful to avoid cases such as the below, where it may be ok
7708 * for one branch accessing the pointer, but not ok for the other branch:
7709 *
7710 * R1 = sock_ptr
7711 * goto X;
7712 * ...
7713 * R1 = some_other_valid_ptr;
7714 * goto X;
7715 * ...
7716 * R2 = *(u32 *)(R1 + 0);
7717 */
7718static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
7719{
7720 return src != prev && (!reg_type_mismatch_ok(src) ||
7721 !reg_type_mismatch_ok(prev));
7722}
7723
58e2af8b 7724static int do_check(struct bpf_verifier_env *env)
17a52670 7725{
638f5b90 7726 struct bpf_verifier_state *state;
17a52670 7727 struct bpf_insn *insns = env->prog->insnsi;
638f5b90 7728 struct bpf_reg_state *regs;
06ee7115 7729 int insn_cnt = env->prog->len;
17a52670 7730 bool do_print_state = false;
b5dc0163 7731 int prev_insn_idx = -1;
17a52670 7732
d9762e84
MKL
7733 env->prev_linfo = NULL;
7734
638f5b90
AS
7735 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
7736 if (!state)
7737 return -ENOMEM;
f4d7e40a 7738 state->curframe = 0;
979d63d5 7739 state->speculative = false;
2589726d 7740 state->branches = 1;
f4d7e40a
AS
7741 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
7742 if (!state->frame[0]) {
7743 kfree(state);
7744 return -ENOMEM;
7745 }
7746 env->cur_state = state;
7747 init_func_state(env, state->frame[0],
7748 BPF_MAIN_FUNC /* callsite */,
7749 0 /* frameno */,
7750 0 /* subprogno, zero == main subprog */);
c08435ec 7751
8c1b6e69
AS
7752 if (btf_check_func_arg_match(env, 0))
7753 return -EINVAL;
7754
17a52670
AS
7755 for (;;) {
7756 struct bpf_insn *insn;
7757 u8 class;
7758 int err;
7759
b5dc0163 7760 env->prev_insn_idx = prev_insn_idx;
c08435ec 7761 if (env->insn_idx >= insn_cnt) {
61bd5218 7762 verbose(env, "invalid insn idx %d insn_cnt %d\n",
c08435ec 7763 env->insn_idx, insn_cnt);
17a52670
AS
7764 return -EFAULT;
7765 }
7766
c08435ec 7767 insn = &insns[env->insn_idx];
17a52670
AS
7768 class = BPF_CLASS(insn->code);
7769
06ee7115 7770 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
61bd5218
JK
7771 verbose(env,
7772 "BPF program is too large. Processed %d insn\n",
06ee7115 7773 env->insn_processed);
17a52670
AS
7774 return -E2BIG;
7775 }
7776
c08435ec 7777 err = is_state_visited(env, env->insn_idx);
f1bca824
AS
7778 if (err < 0)
7779 return err;
7780 if (err == 1) {
7781 /* found equivalent state, can prune the search */
06ee7115 7782 if (env->log.level & BPF_LOG_LEVEL) {
f1bca824 7783 if (do_print_state)
979d63d5
DB
7784 verbose(env, "\nfrom %d to %d%s: safe\n",
7785 env->prev_insn_idx, env->insn_idx,
7786 env->cur_state->speculative ?
7787 " (speculative execution)" : "");
f1bca824 7788 else
c08435ec 7789 verbose(env, "%d: safe\n", env->insn_idx);
f1bca824
AS
7790 }
7791 goto process_bpf_exit;
7792 }
7793
c3494801
AS
7794 if (signal_pending(current))
7795 return -EAGAIN;
7796
3c2ce60b
DB
7797 if (need_resched())
7798 cond_resched();
7799
06ee7115
AS
7800 if (env->log.level & BPF_LOG_LEVEL2 ||
7801 (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
7802 if (env->log.level & BPF_LOG_LEVEL2)
c08435ec 7803 verbose(env, "%d:", env->insn_idx);
c5fc9692 7804 else
979d63d5
DB
7805 verbose(env, "\nfrom %d to %d%s:",
7806 env->prev_insn_idx, env->insn_idx,
7807 env->cur_state->speculative ?
7808 " (speculative execution)" : "");
f4d7e40a 7809 print_verifier_state(env, state->frame[state->curframe]);
17a52670
AS
7810 do_print_state = false;
7811 }
7812
06ee7115 7813 if (env->log.level & BPF_LOG_LEVEL) {
7105e828
DB
7814 const struct bpf_insn_cbs cbs = {
7815 .cb_print = verbose,
abe08840 7816 .private_data = env,
7105e828
DB
7817 };
7818
c08435ec
DB
7819 verbose_linfo(env, env->insn_idx, "; ");
7820 verbose(env, "%d: ", env->insn_idx);
abe08840 7821 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
17a52670
AS
7822 }
7823
cae1927c 7824 if (bpf_prog_is_dev_bound(env->prog->aux)) {
c08435ec
DB
7825 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
7826 env->prev_insn_idx);
cae1927c
JK
7827 if (err)
7828 return err;
7829 }
13a27dfc 7830
638f5b90 7831 regs = cur_regs(env);
c08435ec 7832 env->insn_aux_data[env->insn_idx].seen = true;
b5dc0163 7833 prev_insn_idx = env->insn_idx;
fd978bf7 7834
17a52670 7835 if (class == BPF_ALU || class == BPF_ALU64) {
1be7f75d 7836 err = check_alu_op(env, insn);
17a52670
AS
7837 if (err)
7838 return err;
7839
7840 } else if (class == BPF_LDX) {
3df126f3 7841 enum bpf_reg_type *prev_src_type, src_reg_type;
9bac3d6d
AS
7842
7843 /* check for reserved fields is already done */
7844
17a52670 7845 /* check src operand */
dc503a8a 7846 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
7847 if (err)
7848 return err;
7849
dc503a8a 7850 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
7851 if (err)
7852 return err;
7853
725f9dcd
AS
7854 src_reg_type = regs[insn->src_reg].type;
7855
17a52670
AS
7856 /* check that memory (src_reg + off) is readable,
7857 * the state of dst_reg will be updated by this func
7858 */
c08435ec
DB
7859 err = check_mem_access(env, env->insn_idx, insn->src_reg,
7860 insn->off, BPF_SIZE(insn->code),
7861 BPF_READ, insn->dst_reg, false);
17a52670
AS
7862 if (err)
7863 return err;
7864
c08435ec 7865 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
7866
7867 if (*prev_src_type == NOT_INIT) {
9bac3d6d
AS
7868 /* saw a valid insn
7869 * dst_reg = *(u32 *)(src_reg + off)
3df126f3 7870 * save type to validate intersecting paths
9bac3d6d 7871 */
3df126f3 7872 *prev_src_type = src_reg_type;
9bac3d6d 7873
c64b7983 7874 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
9bac3d6d
AS
7875 /* ABuser program is trying to use the same insn
7876 * dst_reg = *(u32*) (src_reg + off)
7877 * with different pointer types:
7878 * src_reg == ctx in one branch and
7879 * src_reg == stack|map in some other branch.
7880 * Reject it.
7881 */
61bd5218 7882 verbose(env, "same insn cannot be used with different pointers\n");
9bac3d6d
AS
7883 return -EINVAL;
7884 }
7885
17a52670 7886 } else if (class == BPF_STX) {
3df126f3 7887 enum bpf_reg_type *prev_dst_type, dst_reg_type;
d691f9e8 7888
17a52670 7889 if (BPF_MODE(insn->code) == BPF_XADD) {
c08435ec 7890 err = check_xadd(env, env->insn_idx, insn);
17a52670
AS
7891 if (err)
7892 return err;
c08435ec 7893 env->insn_idx++;
17a52670
AS
7894 continue;
7895 }
7896
17a52670 7897 /* check src1 operand */
dc503a8a 7898 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
7899 if (err)
7900 return err;
7901 /* check src2 operand */
dc503a8a 7902 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
7903 if (err)
7904 return err;
7905
d691f9e8
AS
7906 dst_reg_type = regs[insn->dst_reg].type;
7907
17a52670 7908 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
7909 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
7910 insn->off, BPF_SIZE(insn->code),
7911 BPF_WRITE, insn->src_reg, false);
17a52670
AS
7912 if (err)
7913 return err;
7914
c08435ec 7915 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
7916
7917 if (*prev_dst_type == NOT_INIT) {
7918 *prev_dst_type = dst_reg_type;
c64b7983 7919 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
61bd5218 7920 verbose(env, "same insn cannot be used with different pointers\n");
d691f9e8
AS
7921 return -EINVAL;
7922 }
7923
17a52670
AS
7924 } else if (class == BPF_ST) {
7925 if (BPF_MODE(insn->code) != BPF_MEM ||
7926 insn->src_reg != BPF_REG_0) {
61bd5218 7927 verbose(env, "BPF_ST uses reserved fields\n");
17a52670
AS
7928 return -EINVAL;
7929 }
7930 /* check src operand */
dc503a8a 7931 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
7932 if (err)
7933 return err;
7934
f37a8cb8 7935 if (is_ctx_reg(env, insn->dst_reg)) {
9d2be44a 7936 verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
2a159c6f
DB
7937 insn->dst_reg,
7938 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
7939 return -EACCES;
7940 }
7941
17a52670 7942 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
7943 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
7944 insn->off, BPF_SIZE(insn->code),
7945 BPF_WRITE, -1, false);
17a52670
AS
7946 if (err)
7947 return err;
7948
092ed096 7949 } else if (class == BPF_JMP || class == BPF_JMP32) {
17a52670
AS
7950 u8 opcode = BPF_OP(insn->code);
7951
2589726d 7952 env->jmps_processed++;
17a52670
AS
7953 if (opcode == BPF_CALL) {
7954 if (BPF_SRC(insn->code) != BPF_K ||
7955 insn->off != 0 ||
f4d7e40a
AS
7956 (insn->src_reg != BPF_REG_0 &&
7957 insn->src_reg != BPF_PSEUDO_CALL) ||
092ed096
JW
7958 insn->dst_reg != BPF_REG_0 ||
7959 class == BPF_JMP32) {
61bd5218 7960 verbose(env, "BPF_CALL uses reserved fields\n");
17a52670
AS
7961 return -EINVAL;
7962 }
7963
d83525ca
AS
7964 if (env->cur_state->active_spin_lock &&
7965 (insn->src_reg == BPF_PSEUDO_CALL ||
7966 insn->imm != BPF_FUNC_spin_unlock)) {
7967 verbose(env, "function calls are not allowed while holding a lock\n");
7968 return -EINVAL;
7969 }
f4d7e40a 7970 if (insn->src_reg == BPF_PSEUDO_CALL)
c08435ec 7971 err = check_func_call(env, insn, &env->insn_idx);
f4d7e40a 7972 else
c08435ec 7973 err = check_helper_call(env, insn->imm, env->insn_idx);
17a52670
AS
7974 if (err)
7975 return err;
7976
7977 } else if (opcode == BPF_JA) {
7978 if (BPF_SRC(insn->code) != BPF_K ||
7979 insn->imm != 0 ||
7980 insn->src_reg != BPF_REG_0 ||
092ed096
JW
7981 insn->dst_reg != BPF_REG_0 ||
7982 class == BPF_JMP32) {
61bd5218 7983 verbose(env, "BPF_JA uses reserved fields\n");
17a52670
AS
7984 return -EINVAL;
7985 }
7986
c08435ec 7987 env->insn_idx += insn->off + 1;
17a52670
AS
7988 continue;
7989
7990 } else if (opcode == BPF_EXIT) {
7991 if (BPF_SRC(insn->code) != BPF_K ||
7992 insn->imm != 0 ||
7993 insn->src_reg != BPF_REG_0 ||
092ed096
JW
7994 insn->dst_reg != BPF_REG_0 ||
7995 class == BPF_JMP32) {
61bd5218 7996 verbose(env, "BPF_EXIT uses reserved fields\n");
17a52670
AS
7997 return -EINVAL;
7998 }
7999
d83525ca
AS
8000 if (env->cur_state->active_spin_lock) {
8001 verbose(env, "bpf_spin_unlock is missing\n");
8002 return -EINVAL;
8003 }
8004
f4d7e40a
AS
8005 if (state->curframe) {
8006 /* exit from nested function */
c08435ec 8007 err = prepare_func_exit(env, &env->insn_idx);
f4d7e40a
AS
8008 if (err)
8009 return err;
8010 do_print_state = true;
8011 continue;
8012 }
8013
fd978bf7
JS
8014 err = check_reference_leak(env);
8015 if (err)
8016 return err;
8017
17a52670
AS
8018 /* eBPF calling convetion is such that R0 is used
8019 * to return the value from eBPF program.
8020 * Make sure that it's readable at this time
8021 * of bpf_exit, which means that program wrote
8022 * something into it earlier
8023 */
dc503a8a 8024 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
17a52670
AS
8025 if (err)
8026 return err;
8027
1be7f75d 8028 if (is_pointer_value(env, BPF_REG_0)) {
61bd5218 8029 verbose(env, "R0 leaks addr as return value\n");
1be7f75d
AS
8030 return -EACCES;
8031 }
8032
390ee7e2
AS
8033 err = check_return_code(env);
8034 if (err)
8035 return err;
f1bca824 8036process_bpf_exit:
2589726d 8037 update_branch_counts(env, env->cur_state);
b5dc0163 8038 err = pop_stack(env, &prev_insn_idx,
c08435ec 8039 &env->insn_idx);
638f5b90
AS
8040 if (err < 0) {
8041 if (err != -ENOENT)
8042 return err;
17a52670
AS
8043 break;
8044 } else {
8045 do_print_state = true;
8046 continue;
8047 }
8048 } else {
c08435ec 8049 err = check_cond_jmp_op(env, insn, &env->insn_idx);
17a52670
AS
8050 if (err)
8051 return err;
8052 }
8053 } else if (class == BPF_LD) {
8054 u8 mode = BPF_MODE(insn->code);
8055
8056 if (mode == BPF_ABS || mode == BPF_IND) {
ddd872bc
AS
8057 err = check_ld_abs(env, insn);
8058 if (err)
8059 return err;
8060
17a52670
AS
8061 } else if (mode == BPF_IMM) {
8062 err = check_ld_imm(env, insn);
8063 if (err)
8064 return err;
8065
c08435ec
DB
8066 env->insn_idx++;
8067 env->insn_aux_data[env->insn_idx].seen = true;
17a52670 8068 } else {
61bd5218 8069 verbose(env, "invalid BPF_LD mode\n");
17a52670
AS
8070 return -EINVAL;
8071 }
8072 } else {
61bd5218 8073 verbose(env, "unknown insn class %d\n", class);
17a52670
AS
8074 return -EINVAL;
8075 }
8076
c08435ec 8077 env->insn_idx++;
17a52670
AS
8078 }
8079
9c8105bd 8080 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
17a52670
AS
8081 return 0;
8082}
8083
56f668df
MKL
8084static int check_map_prealloc(struct bpf_map *map)
8085{
8086 return (map->map_type != BPF_MAP_TYPE_HASH &&
bcc6b1b7
MKL
8087 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
8088 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
56f668df
MKL
8089 !(map->map_flags & BPF_F_NO_PREALLOC);
8090}
8091
d83525ca
AS
8092static bool is_tracing_prog_type(enum bpf_prog_type type)
8093{
8094 switch (type) {
8095 case BPF_PROG_TYPE_KPROBE:
8096 case BPF_PROG_TYPE_TRACEPOINT:
8097 case BPF_PROG_TYPE_PERF_EVENT:
8098 case BPF_PROG_TYPE_RAW_TRACEPOINT:
8099 return true;
8100 default:
8101 return false;
8102 }
8103}
8104
61bd5218
JK
8105static int check_map_prog_compatibility(struct bpf_verifier_env *env,
8106 struct bpf_map *map,
fdc15d38
AS
8107 struct bpf_prog *prog)
8108
8109{
56f668df
MKL
8110 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
8111 * preallocated hash maps, since doing memory allocation
8112 * in overflow_handler can crash depending on where nmi got
8113 * triggered.
8114 */
8115 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
8116 if (!check_map_prealloc(map)) {
61bd5218 8117 verbose(env, "perf_event programs can only use preallocated hash map\n");
56f668df
MKL
8118 return -EINVAL;
8119 }
8120 if (map->inner_map_meta &&
8121 !check_map_prealloc(map->inner_map_meta)) {
61bd5218 8122 verbose(env, "perf_event programs can only use preallocated inner hash map\n");
56f668df
MKL
8123 return -EINVAL;
8124 }
fdc15d38 8125 }
a3884572 8126
d83525ca
AS
8127 if ((is_tracing_prog_type(prog->type) ||
8128 prog->type == BPF_PROG_TYPE_SOCKET_FILTER) &&
8129 map_value_has_spin_lock(map)) {
8130 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
8131 return -EINVAL;
8132 }
8133
a3884572 8134 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
09728266 8135 !bpf_offload_prog_map_match(prog, map)) {
a3884572
JK
8136 verbose(env, "offload device mismatch between prog and map\n");
8137 return -EINVAL;
8138 }
8139
fdc15d38
AS
8140 return 0;
8141}
8142
b741f163
RG
8143static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
8144{
8145 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
8146 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
8147}
8148
0246e64d
AS
8149/* look for pseudo eBPF instructions that access map FDs and
8150 * replace them with actual map pointers
8151 */
58e2af8b 8152static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
0246e64d
AS
8153{
8154 struct bpf_insn *insn = env->prog->insnsi;
8155 int insn_cnt = env->prog->len;
fdc15d38 8156 int i, j, err;
0246e64d 8157
f1f7714e 8158 err = bpf_prog_calc_tag(env->prog);
aafe6ae9
DB
8159 if (err)
8160 return err;
8161
0246e64d 8162 for (i = 0; i < insn_cnt; i++, insn++) {
9bac3d6d 8163 if (BPF_CLASS(insn->code) == BPF_LDX &&
d691f9e8 8164 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
61bd5218 8165 verbose(env, "BPF_LDX uses reserved fields\n");
9bac3d6d
AS
8166 return -EINVAL;
8167 }
8168
d691f9e8
AS
8169 if (BPF_CLASS(insn->code) == BPF_STX &&
8170 ((BPF_MODE(insn->code) != BPF_MEM &&
8171 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
61bd5218 8172 verbose(env, "BPF_STX uses reserved fields\n");
d691f9e8
AS
8173 return -EINVAL;
8174 }
8175
0246e64d 8176 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
d8eca5bb 8177 struct bpf_insn_aux_data *aux;
0246e64d
AS
8178 struct bpf_map *map;
8179 struct fd f;
d8eca5bb 8180 u64 addr;
0246e64d
AS
8181
8182 if (i == insn_cnt - 1 || insn[1].code != 0 ||
8183 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
8184 insn[1].off != 0) {
61bd5218 8185 verbose(env, "invalid bpf_ld_imm64 insn\n");
0246e64d
AS
8186 return -EINVAL;
8187 }
8188
d8eca5bb 8189 if (insn[0].src_reg == 0)
0246e64d
AS
8190 /* valid generic load 64-bit imm */
8191 goto next_insn;
8192
d8eca5bb
DB
8193 /* In final convert_pseudo_ld_imm64() step, this is
8194 * converted into regular 64-bit imm load insn.
8195 */
8196 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
8197 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
8198 (insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
8199 insn[1].imm != 0)) {
8200 verbose(env,
8201 "unrecognized bpf_ld_imm64 insn\n");
0246e64d
AS
8202 return -EINVAL;
8203 }
8204
20182390 8205 f = fdget(insn[0].imm);
c2101297 8206 map = __bpf_map_get(f);
0246e64d 8207 if (IS_ERR(map)) {
61bd5218 8208 verbose(env, "fd %d is not pointing to valid bpf_map\n",
20182390 8209 insn[0].imm);
0246e64d
AS
8210 return PTR_ERR(map);
8211 }
8212
61bd5218 8213 err = check_map_prog_compatibility(env, map, env->prog);
fdc15d38
AS
8214 if (err) {
8215 fdput(f);
8216 return err;
8217 }
8218
d8eca5bb
DB
8219 aux = &env->insn_aux_data[i];
8220 if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
8221 addr = (unsigned long)map;
8222 } else {
8223 u32 off = insn[1].imm;
8224
8225 if (off >= BPF_MAX_VAR_OFF) {
8226 verbose(env, "direct value offset of %u is not allowed\n", off);
8227 fdput(f);
8228 return -EINVAL;
8229 }
8230
8231 if (!map->ops->map_direct_value_addr) {
8232 verbose(env, "no direct value access support for this map type\n");
8233 fdput(f);
8234 return -EINVAL;
8235 }
8236
8237 err = map->ops->map_direct_value_addr(map, &addr, off);
8238 if (err) {
8239 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
8240 map->value_size, off);
8241 fdput(f);
8242 return err;
8243 }
8244
8245 aux->map_off = off;
8246 addr += off;
8247 }
8248
8249 insn[0].imm = (u32)addr;
8250 insn[1].imm = addr >> 32;
0246e64d
AS
8251
8252 /* check whether we recorded this map already */
d8eca5bb 8253 for (j = 0; j < env->used_map_cnt; j++) {
0246e64d 8254 if (env->used_maps[j] == map) {
d8eca5bb 8255 aux->map_index = j;
0246e64d
AS
8256 fdput(f);
8257 goto next_insn;
8258 }
d8eca5bb 8259 }
0246e64d
AS
8260
8261 if (env->used_map_cnt >= MAX_USED_MAPS) {
8262 fdput(f);
8263 return -E2BIG;
8264 }
8265
0246e64d
AS
8266 /* hold the map. If the program is rejected by verifier,
8267 * the map will be released by release_maps() or it
8268 * will be used by the valid program until it's unloaded
ab7f5bf0 8269 * and all maps are released in free_used_maps()
0246e64d 8270 */
1e0bd5a0 8271 bpf_map_inc(map);
d8eca5bb
DB
8272
8273 aux->map_index = env->used_map_cnt;
92117d84
AS
8274 env->used_maps[env->used_map_cnt++] = map;
8275
b741f163 8276 if (bpf_map_is_cgroup_storage(map) &&
e4730423 8277 bpf_cgroup_storage_assign(env->prog->aux, map)) {
b741f163 8278 verbose(env, "only one cgroup storage of each type is allowed\n");
de9cbbaa
RG
8279 fdput(f);
8280 return -EBUSY;
8281 }
8282
0246e64d
AS
8283 fdput(f);
8284next_insn:
8285 insn++;
8286 i++;
5e581dad
DB
8287 continue;
8288 }
8289
8290 /* Basic sanity check before we invest more work here. */
8291 if (!bpf_opcode_in_insntable(insn->code)) {
8292 verbose(env, "unknown opcode %02x\n", insn->code);
8293 return -EINVAL;
0246e64d
AS
8294 }
8295 }
8296
8297 /* now all pseudo BPF_LD_IMM64 instructions load valid
8298 * 'struct bpf_map *' into a register instead of user map_fd.
8299 * These pointers will be used later by verifier to validate map access.
8300 */
8301 return 0;
8302}
8303
8304/* drop refcnt of maps used by the rejected program */
58e2af8b 8305static void release_maps(struct bpf_verifier_env *env)
0246e64d 8306{
a2ea0746
DB
8307 __bpf_free_used_maps(env->prog->aux, env->used_maps,
8308 env->used_map_cnt);
0246e64d
AS
8309}
8310
8311/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
58e2af8b 8312static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
0246e64d
AS
8313{
8314 struct bpf_insn *insn = env->prog->insnsi;
8315 int insn_cnt = env->prog->len;
8316 int i;
8317
8318 for (i = 0; i < insn_cnt; i++, insn++)
8319 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
8320 insn->src_reg = 0;
8321}
8322
8041902d
AS
8323/* single env->prog->insni[off] instruction was replaced with the range
8324 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
8325 * [0, off) and [off, end) to new locations, so the patched range stays zero
8326 */
b325fbca
JW
8327static int adjust_insn_aux_data(struct bpf_verifier_env *env,
8328 struct bpf_prog *new_prog, u32 off, u32 cnt)
8041902d
AS
8329{
8330 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
b325fbca
JW
8331 struct bpf_insn *insn = new_prog->insnsi;
8332 u32 prog_len;
c131187d 8333 int i;
8041902d 8334
b325fbca
JW
8335 /* aux info at OFF always needs adjustment, no matter fast path
8336 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
8337 * original insn at old prog.
8338 */
8339 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
8340
8041902d
AS
8341 if (cnt == 1)
8342 return 0;
b325fbca 8343 prog_len = new_prog->len;
fad953ce
KC
8344 new_data = vzalloc(array_size(prog_len,
8345 sizeof(struct bpf_insn_aux_data)));
8041902d
AS
8346 if (!new_data)
8347 return -ENOMEM;
8348 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
8349 memcpy(new_data + off + cnt - 1, old_data + off,
8350 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
b325fbca 8351 for (i = off; i < off + cnt - 1; i++) {
c131187d 8352 new_data[i].seen = true;
b325fbca
JW
8353 new_data[i].zext_dst = insn_has_def32(env, insn + i);
8354 }
8041902d
AS
8355 env->insn_aux_data = new_data;
8356 vfree(old_data);
8357 return 0;
8358}
8359
cc8b0b92
AS
8360static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
8361{
8362 int i;
8363
8364 if (len == 1)
8365 return;
4cb3d99c
JW
8366 /* NOTE: fake 'exit' subprog should be updated as well. */
8367 for (i = 0; i <= env->subprog_cnt; i++) {
afd59424 8368 if (env->subprog_info[i].start <= off)
cc8b0b92 8369 continue;
9c8105bd 8370 env->subprog_info[i].start += len - 1;
cc8b0b92
AS
8371 }
8372}
8373
8041902d
AS
8374static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
8375 const struct bpf_insn *patch, u32 len)
8376{
8377 struct bpf_prog *new_prog;
8378
8379 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4f73379e
AS
8380 if (IS_ERR(new_prog)) {
8381 if (PTR_ERR(new_prog) == -ERANGE)
8382 verbose(env,
8383 "insn %d cannot be patched due to 16-bit range\n",
8384 env->insn_aux_data[off].orig_idx);
8041902d 8385 return NULL;
4f73379e 8386 }
b325fbca 8387 if (adjust_insn_aux_data(env, new_prog, off, len))
8041902d 8388 return NULL;
cc8b0b92 8389 adjust_subprog_starts(env, off, len);
8041902d
AS
8390 return new_prog;
8391}
8392
52875a04
JK
8393static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
8394 u32 off, u32 cnt)
8395{
8396 int i, j;
8397
8398 /* find first prog starting at or after off (first to remove) */
8399 for (i = 0; i < env->subprog_cnt; i++)
8400 if (env->subprog_info[i].start >= off)
8401 break;
8402 /* find first prog starting at or after off + cnt (first to stay) */
8403 for (j = i; j < env->subprog_cnt; j++)
8404 if (env->subprog_info[j].start >= off + cnt)
8405 break;
8406 /* if j doesn't start exactly at off + cnt, we are just removing
8407 * the front of previous prog
8408 */
8409 if (env->subprog_info[j].start != off + cnt)
8410 j--;
8411
8412 if (j > i) {
8413 struct bpf_prog_aux *aux = env->prog->aux;
8414 int move;
8415
8416 /* move fake 'exit' subprog as well */
8417 move = env->subprog_cnt + 1 - j;
8418
8419 memmove(env->subprog_info + i,
8420 env->subprog_info + j,
8421 sizeof(*env->subprog_info) * move);
8422 env->subprog_cnt -= j - i;
8423
8424 /* remove func_info */
8425 if (aux->func_info) {
8426 move = aux->func_info_cnt - j;
8427
8428 memmove(aux->func_info + i,
8429 aux->func_info + j,
8430 sizeof(*aux->func_info) * move);
8431 aux->func_info_cnt -= j - i;
8432 /* func_info->insn_off is set after all code rewrites,
8433 * in adjust_btf_func() - no need to adjust
8434 */
8435 }
8436 } else {
8437 /* convert i from "first prog to remove" to "first to adjust" */
8438 if (env->subprog_info[i].start == off)
8439 i++;
8440 }
8441
8442 /* update fake 'exit' subprog as well */
8443 for (; i <= env->subprog_cnt; i++)
8444 env->subprog_info[i].start -= cnt;
8445
8446 return 0;
8447}
8448
8449static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
8450 u32 cnt)
8451{
8452 struct bpf_prog *prog = env->prog;
8453 u32 i, l_off, l_cnt, nr_linfo;
8454 struct bpf_line_info *linfo;
8455
8456 nr_linfo = prog->aux->nr_linfo;
8457 if (!nr_linfo)
8458 return 0;
8459
8460 linfo = prog->aux->linfo;
8461
8462 /* find first line info to remove, count lines to be removed */
8463 for (i = 0; i < nr_linfo; i++)
8464 if (linfo[i].insn_off >= off)
8465 break;
8466
8467 l_off = i;
8468 l_cnt = 0;
8469 for (; i < nr_linfo; i++)
8470 if (linfo[i].insn_off < off + cnt)
8471 l_cnt++;
8472 else
8473 break;
8474
8475 /* First live insn doesn't match first live linfo, it needs to "inherit"
8476 * last removed linfo. prog is already modified, so prog->len == off
8477 * means no live instructions after (tail of the program was removed).
8478 */
8479 if (prog->len != off && l_cnt &&
8480 (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
8481 l_cnt--;
8482 linfo[--i].insn_off = off + cnt;
8483 }
8484
8485 /* remove the line info which refer to the removed instructions */
8486 if (l_cnt) {
8487 memmove(linfo + l_off, linfo + i,
8488 sizeof(*linfo) * (nr_linfo - i));
8489
8490 prog->aux->nr_linfo -= l_cnt;
8491 nr_linfo = prog->aux->nr_linfo;
8492 }
8493
8494 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
8495 for (i = l_off; i < nr_linfo; i++)
8496 linfo[i].insn_off -= cnt;
8497
8498 /* fix up all subprogs (incl. 'exit') which start >= off */
8499 for (i = 0; i <= env->subprog_cnt; i++)
8500 if (env->subprog_info[i].linfo_idx > l_off) {
8501 /* program may have started in the removed region but
8502 * may not be fully removed
8503 */
8504 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
8505 env->subprog_info[i].linfo_idx -= l_cnt;
8506 else
8507 env->subprog_info[i].linfo_idx = l_off;
8508 }
8509
8510 return 0;
8511}
8512
8513static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
8514{
8515 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8516 unsigned int orig_prog_len = env->prog->len;
8517 int err;
8518
08ca90af
JK
8519 if (bpf_prog_is_dev_bound(env->prog->aux))
8520 bpf_prog_offload_remove_insns(env, off, cnt);
8521
52875a04
JK
8522 err = bpf_remove_insns(env->prog, off, cnt);
8523 if (err)
8524 return err;
8525
8526 err = adjust_subprog_starts_after_remove(env, off, cnt);
8527 if (err)
8528 return err;
8529
8530 err = bpf_adj_linfo_after_remove(env, off, cnt);
8531 if (err)
8532 return err;
8533
8534 memmove(aux_data + off, aux_data + off + cnt,
8535 sizeof(*aux_data) * (orig_prog_len - off - cnt));
8536
8537 return 0;
8538}
8539
2a5418a1
DB
8540/* The verifier does more data flow analysis than llvm and will not
8541 * explore branches that are dead at run time. Malicious programs can
8542 * have dead code too. Therefore replace all dead at-run-time code
8543 * with 'ja -1'.
8544 *
8545 * Just nops are not optimal, e.g. if they would sit at the end of the
8546 * program and through another bug we would manage to jump there, then
8547 * we'd execute beyond program memory otherwise. Returning exception
8548 * code also wouldn't work since we can have subprogs where the dead
8549 * code could be located.
c131187d
AS
8550 */
8551static void sanitize_dead_code(struct bpf_verifier_env *env)
8552{
8553 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2a5418a1 8554 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
c131187d
AS
8555 struct bpf_insn *insn = env->prog->insnsi;
8556 const int insn_cnt = env->prog->len;
8557 int i;
8558
8559 for (i = 0; i < insn_cnt; i++) {
8560 if (aux_data[i].seen)
8561 continue;
2a5418a1 8562 memcpy(insn + i, &trap, sizeof(trap));
c131187d
AS
8563 }
8564}
8565
e2ae4ca2
JK
8566static bool insn_is_cond_jump(u8 code)
8567{
8568 u8 op;
8569
092ed096
JW
8570 if (BPF_CLASS(code) == BPF_JMP32)
8571 return true;
8572
e2ae4ca2
JK
8573 if (BPF_CLASS(code) != BPF_JMP)
8574 return false;
8575
8576 op = BPF_OP(code);
8577 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
8578}
8579
8580static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
8581{
8582 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8583 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8584 struct bpf_insn *insn = env->prog->insnsi;
8585 const int insn_cnt = env->prog->len;
8586 int i;
8587
8588 for (i = 0; i < insn_cnt; i++, insn++) {
8589 if (!insn_is_cond_jump(insn->code))
8590 continue;
8591
8592 if (!aux_data[i + 1].seen)
8593 ja.off = insn->off;
8594 else if (!aux_data[i + 1 + insn->off].seen)
8595 ja.off = 0;
8596 else
8597 continue;
8598
08ca90af
JK
8599 if (bpf_prog_is_dev_bound(env->prog->aux))
8600 bpf_prog_offload_replace_insn(env, i, &ja);
8601
e2ae4ca2
JK
8602 memcpy(insn, &ja, sizeof(ja));
8603 }
8604}
8605
52875a04
JK
8606static int opt_remove_dead_code(struct bpf_verifier_env *env)
8607{
8608 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8609 int insn_cnt = env->prog->len;
8610 int i, err;
8611
8612 for (i = 0; i < insn_cnt; i++) {
8613 int j;
8614
8615 j = 0;
8616 while (i + j < insn_cnt && !aux_data[i + j].seen)
8617 j++;
8618 if (!j)
8619 continue;
8620
8621 err = verifier_remove_insns(env, i, j);
8622 if (err)
8623 return err;
8624 insn_cnt = env->prog->len;
8625 }
8626
8627 return 0;
8628}
8629
a1b14abc
JK
8630static int opt_remove_nops(struct bpf_verifier_env *env)
8631{
8632 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8633 struct bpf_insn *insn = env->prog->insnsi;
8634 int insn_cnt = env->prog->len;
8635 int i, err;
8636
8637 for (i = 0; i < insn_cnt; i++) {
8638 if (memcmp(&insn[i], &ja, sizeof(ja)))
8639 continue;
8640
8641 err = verifier_remove_insns(env, i, 1);
8642 if (err)
8643 return err;
8644 insn_cnt--;
8645 i--;
8646 }
8647
8648 return 0;
8649}
8650
d6c2308c
JW
8651static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
8652 const union bpf_attr *attr)
a4b1d3c1 8653{
d6c2308c 8654 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
a4b1d3c1 8655 struct bpf_insn_aux_data *aux = env->insn_aux_data;
d6c2308c 8656 int i, patch_len, delta = 0, len = env->prog->len;
a4b1d3c1 8657 struct bpf_insn *insns = env->prog->insnsi;
a4b1d3c1 8658 struct bpf_prog *new_prog;
d6c2308c 8659 bool rnd_hi32;
a4b1d3c1 8660
d6c2308c 8661 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
a4b1d3c1 8662 zext_patch[1] = BPF_ZEXT_REG(0);
d6c2308c
JW
8663 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
8664 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
8665 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
a4b1d3c1
JW
8666 for (i = 0; i < len; i++) {
8667 int adj_idx = i + delta;
8668 struct bpf_insn insn;
8669
d6c2308c
JW
8670 insn = insns[adj_idx];
8671 if (!aux[adj_idx].zext_dst) {
8672 u8 code, class;
8673 u32 imm_rnd;
8674
8675 if (!rnd_hi32)
8676 continue;
8677
8678 code = insn.code;
8679 class = BPF_CLASS(code);
8680 if (insn_no_def(&insn))
8681 continue;
8682
8683 /* NOTE: arg "reg" (the fourth one) is only used for
8684 * BPF_STX which has been ruled out in above
8685 * check, it is safe to pass NULL here.
8686 */
8687 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
8688 if (class == BPF_LD &&
8689 BPF_MODE(code) == BPF_IMM)
8690 i++;
8691 continue;
8692 }
8693
8694 /* ctx load could be transformed into wider load. */
8695 if (class == BPF_LDX &&
8696 aux[adj_idx].ptr_type == PTR_TO_CTX)
8697 continue;
8698
8699 imm_rnd = get_random_int();
8700 rnd_hi32_patch[0] = insn;
8701 rnd_hi32_patch[1].imm = imm_rnd;
8702 rnd_hi32_patch[3].dst_reg = insn.dst_reg;
8703 patch = rnd_hi32_patch;
8704 patch_len = 4;
8705 goto apply_patch_buffer;
8706 }
8707
8708 if (!bpf_jit_needs_zext())
a4b1d3c1
JW
8709 continue;
8710
a4b1d3c1
JW
8711 zext_patch[0] = insn;
8712 zext_patch[1].dst_reg = insn.dst_reg;
8713 zext_patch[1].src_reg = insn.dst_reg;
d6c2308c
JW
8714 patch = zext_patch;
8715 patch_len = 2;
8716apply_patch_buffer:
8717 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
a4b1d3c1
JW
8718 if (!new_prog)
8719 return -ENOMEM;
8720 env->prog = new_prog;
8721 insns = new_prog->insnsi;
8722 aux = env->insn_aux_data;
d6c2308c 8723 delta += patch_len - 1;
a4b1d3c1
JW
8724 }
8725
8726 return 0;
8727}
8728
c64b7983
JS
8729/* convert load instructions that access fields of a context type into a
8730 * sequence of instructions that access fields of the underlying structure:
8731 * struct __sk_buff -> struct sk_buff
8732 * struct bpf_sock_ops -> struct sock
9bac3d6d 8733 */
58e2af8b 8734static int convert_ctx_accesses(struct bpf_verifier_env *env)
9bac3d6d 8735{
00176a34 8736 const struct bpf_verifier_ops *ops = env->ops;
f96da094 8737 int i, cnt, size, ctx_field_size, delta = 0;
3df126f3 8738 const int insn_cnt = env->prog->len;
36bbef52 8739 struct bpf_insn insn_buf[16], *insn;
46f53a65 8740 u32 target_size, size_default, off;
9bac3d6d 8741 struct bpf_prog *new_prog;
d691f9e8 8742 enum bpf_access_type type;
f96da094 8743 bool is_narrower_load;
9bac3d6d 8744
b09928b9
DB
8745 if (ops->gen_prologue || env->seen_direct_write) {
8746 if (!ops->gen_prologue) {
8747 verbose(env, "bpf verifier is misconfigured\n");
8748 return -EINVAL;
8749 }
36bbef52
DB
8750 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
8751 env->prog);
8752 if (cnt >= ARRAY_SIZE(insn_buf)) {
61bd5218 8753 verbose(env, "bpf verifier is misconfigured\n");
36bbef52
DB
8754 return -EINVAL;
8755 } else if (cnt) {
8041902d 8756 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
36bbef52
DB
8757 if (!new_prog)
8758 return -ENOMEM;
8041902d 8759
36bbef52 8760 env->prog = new_prog;
3df126f3 8761 delta += cnt - 1;
36bbef52
DB
8762 }
8763 }
8764
c64b7983 8765 if (bpf_prog_is_dev_bound(env->prog->aux))
9bac3d6d
AS
8766 return 0;
8767
3df126f3 8768 insn = env->prog->insnsi + delta;
36bbef52 8769
9bac3d6d 8770 for (i = 0; i < insn_cnt; i++, insn++) {
c64b7983
JS
8771 bpf_convert_ctx_access_t convert_ctx_access;
8772
62c7989b
DB
8773 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
8774 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
8775 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
ea2e7ce5 8776 insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
d691f9e8 8777 type = BPF_READ;
62c7989b
DB
8778 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
8779 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
8780 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
ea2e7ce5 8781 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
d691f9e8
AS
8782 type = BPF_WRITE;
8783 else
9bac3d6d
AS
8784 continue;
8785
af86ca4e
AS
8786 if (type == BPF_WRITE &&
8787 env->insn_aux_data[i + delta].sanitize_stack_off) {
8788 struct bpf_insn patch[] = {
8789 /* Sanitize suspicious stack slot with zero.
8790 * There are no memory dependencies for this store,
8791 * since it's only using frame pointer and immediate
8792 * constant of zero
8793 */
8794 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
8795 env->insn_aux_data[i + delta].sanitize_stack_off,
8796 0),
8797 /* the original STX instruction will immediately
8798 * overwrite the same stack slot with appropriate value
8799 */
8800 *insn,
8801 };
8802
8803 cnt = ARRAY_SIZE(patch);
8804 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
8805 if (!new_prog)
8806 return -ENOMEM;
8807
8808 delta += cnt - 1;
8809 env->prog = new_prog;
8810 insn = new_prog->insnsi + i + delta;
8811 continue;
8812 }
8813
c64b7983
JS
8814 switch (env->insn_aux_data[i + delta].ptr_type) {
8815 case PTR_TO_CTX:
8816 if (!ops->convert_ctx_access)
8817 continue;
8818 convert_ctx_access = ops->convert_ctx_access;
8819 break;
8820 case PTR_TO_SOCKET:
46f8bc92 8821 case PTR_TO_SOCK_COMMON:
c64b7983
JS
8822 convert_ctx_access = bpf_sock_convert_ctx_access;
8823 break;
655a51e5
MKL
8824 case PTR_TO_TCP_SOCK:
8825 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
8826 break;
fada7fdc
JL
8827 case PTR_TO_XDP_SOCK:
8828 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
8829 break;
2a02759e
AS
8830 case PTR_TO_BTF_ID:
8831 if (type == BPF_WRITE) {
8832 verbose(env, "Writes through BTF pointers are not allowed\n");
8833 return -EINVAL;
8834 }
8835 insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code);
3dec541b 8836 env->prog->aux->num_exentries++;
2a02759e 8837 continue;
c64b7983 8838 default:
9bac3d6d 8839 continue;
c64b7983 8840 }
9bac3d6d 8841
31fd8581 8842 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
f96da094 8843 size = BPF_LDST_BYTES(insn);
31fd8581
YS
8844
8845 /* If the read access is a narrower load of the field,
8846 * convert to a 4/8-byte load, to minimum program type specific
8847 * convert_ctx_access changes. If conversion is successful,
8848 * we will apply proper mask to the result.
8849 */
f96da094 8850 is_narrower_load = size < ctx_field_size;
46f53a65
AI
8851 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
8852 off = insn->off;
31fd8581 8853 if (is_narrower_load) {
f96da094
DB
8854 u8 size_code;
8855
8856 if (type == BPF_WRITE) {
61bd5218 8857 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
f96da094
DB
8858 return -EINVAL;
8859 }
31fd8581 8860
f96da094 8861 size_code = BPF_H;
31fd8581
YS
8862 if (ctx_field_size == 4)
8863 size_code = BPF_W;
8864 else if (ctx_field_size == 8)
8865 size_code = BPF_DW;
f96da094 8866
bc23105c 8867 insn->off = off & ~(size_default - 1);
31fd8581
YS
8868 insn->code = BPF_LDX | BPF_MEM | size_code;
8869 }
f96da094
DB
8870
8871 target_size = 0;
c64b7983
JS
8872 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
8873 &target_size);
f96da094
DB
8874 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
8875 (ctx_field_size && !target_size)) {
61bd5218 8876 verbose(env, "bpf verifier is misconfigured\n");
9bac3d6d
AS
8877 return -EINVAL;
8878 }
f96da094
DB
8879
8880 if (is_narrower_load && size < target_size) {
d895a0f1
IL
8881 u8 shift = bpf_ctx_narrow_access_offset(
8882 off, size, size_default) * 8;
46f53a65
AI
8883 if (ctx_field_size <= 4) {
8884 if (shift)
8885 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
8886 insn->dst_reg,
8887 shift);
31fd8581 8888 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
f96da094 8889 (1 << size * 8) - 1);
46f53a65
AI
8890 } else {
8891 if (shift)
8892 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
8893 insn->dst_reg,
8894 shift);
31fd8581 8895 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
e2f7fc0a 8896 (1ULL << size * 8) - 1);
46f53a65 8897 }
31fd8581 8898 }
9bac3d6d 8899
8041902d 8900 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9bac3d6d
AS
8901 if (!new_prog)
8902 return -ENOMEM;
8903
3df126f3 8904 delta += cnt - 1;
9bac3d6d
AS
8905
8906 /* keep walking new program and skip insns we just inserted */
8907 env->prog = new_prog;
3df126f3 8908 insn = new_prog->insnsi + i + delta;
9bac3d6d
AS
8909 }
8910
8911 return 0;
8912}
8913
1c2a088a
AS
8914static int jit_subprogs(struct bpf_verifier_env *env)
8915{
8916 struct bpf_prog *prog = env->prog, **func, *tmp;
8917 int i, j, subprog_start, subprog_end = 0, len, subprog;
7105e828 8918 struct bpf_insn *insn;
1c2a088a 8919 void *old_bpf_func;
c454a46b 8920 int err;
1c2a088a 8921
f910cefa 8922 if (env->subprog_cnt <= 1)
1c2a088a
AS
8923 return 0;
8924
7105e828 8925 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
1c2a088a
AS
8926 if (insn->code != (BPF_JMP | BPF_CALL) ||
8927 insn->src_reg != BPF_PSEUDO_CALL)
8928 continue;
c7a89784
DB
8929 /* Upon error here we cannot fall back to interpreter but
8930 * need a hard reject of the program. Thus -EFAULT is
8931 * propagated in any case.
8932 */
1c2a088a
AS
8933 subprog = find_subprog(env, i + insn->imm + 1);
8934 if (subprog < 0) {
8935 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
8936 i + insn->imm + 1);
8937 return -EFAULT;
8938 }
8939 /* temporarily remember subprog id inside insn instead of
8940 * aux_data, since next loop will split up all insns into funcs
8941 */
f910cefa 8942 insn->off = subprog;
1c2a088a
AS
8943 /* remember original imm in case JIT fails and fallback
8944 * to interpreter will be needed
8945 */
8946 env->insn_aux_data[i].call_imm = insn->imm;
8947 /* point imm to __bpf_call_base+1 from JITs point of view */
8948 insn->imm = 1;
8949 }
8950
c454a46b
MKL
8951 err = bpf_prog_alloc_jited_linfo(prog);
8952 if (err)
8953 goto out_undo_insn;
8954
8955 err = -ENOMEM;
6396bb22 8956 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
1c2a088a 8957 if (!func)
c7a89784 8958 goto out_undo_insn;
1c2a088a 8959
f910cefa 8960 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a 8961 subprog_start = subprog_end;
4cb3d99c 8962 subprog_end = env->subprog_info[i + 1].start;
1c2a088a
AS
8963
8964 len = subprog_end - subprog_start;
492ecee8
AS
8965 /* BPF_PROG_RUN doesn't call subprogs directly,
8966 * hence main prog stats include the runtime of subprogs.
8967 * subprogs don't have IDs and not reachable via prog_get_next_id
8968 * func[i]->aux->stats will never be accessed and stays NULL
8969 */
8970 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
1c2a088a
AS
8971 if (!func[i])
8972 goto out_free;
8973 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
8974 len * sizeof(struct bpf_insn));
4f74d809 8975 func[i]->type = prog->type;
1c2a088a 8976 func[i]->len = len;
4f74d809
DB
8977 if (bpf_prog_calc_tag(func[i]))
8978 goto out_free;
1c2a088a 8979 func[i]->is_func = 1;
ba64e7d8
YS
8980 func[i]->aux->func_idx = i;
8981 /* the btf and func_info will be freed only at prog->aux */
8982 func[i]->aux->btf = prog->aux->btf;
8983 func[i]->aux->func_info = prog->aux->func_info;
8984
1c2a088a
AS
8985 /* Use bpf_prog_F_tag to indicate functions in stack traces.
8986 * Long term would need debug info to populate names
8987 */
8988 func[i]->aux->name[0] = 'F';
9c8105bd 8989 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
1c2a088a 8990 func[i]->jit_requested = 1;
c454a46b
MKL
8991 func[i]->aux->linfo = prog->aux->linfo;
8992 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
8993 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
8994 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
1c2a088a
AS
8995 func[i] = bpf_int_jit_compile(func[i]);
8996 if (!func[i]->jited) {
8997 err = -ENOTSUPP;
8998 goto out_free;
8999 }
9000 cond_resched();
9001 }
9002 /* at this point all bpf functions were successfully JITed
9003 * now populate all bpf_calls with correct addresses and
9004 * run last pass of JIT
9005 */
f910cefa 9006 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9007 insn = func[i]->insnsi;
9008 for (j = 0; j < func[i]->len; j++, insn++) {
9009 if (insn->code != (BPF_JMP | BPF_CALL) ||
9010 insn->src_reg != BPF_PSEUDO_CALL)
9011 continue;
9012 subprog = insn->off;
0d306c31
PB
9013 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
9014 __bpf_call_base;
1c2a088a 9015 }
2162fed4
SD
9016
9017 /* we use the aux data to keep a list of the start addresses
9018 * of the JITed images for each function in the program
9019 *
9020 * for some architectures, such as powerpc64, the imm field
9021 * might not be large enough to hold the offset of the start
9022 * address of the callee's JITed image from __bpf_call_base
9023 *
9024 * in such cases, we can lookup the start address of a callee
9025 * by using its subprog id, available from the off field of
9026 * the call instruction, as an index for this list
9027 */
9028 func[i]->aux->func = func;
9029 func[i]->aux->func_cnt = env->subprog_cnt;
1c2a088a 9030 }
f910cefa 9031 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9032 old_bpf_func = func[i]->bpf_func;
9033 tmp = bpf_int_jit_compile(func[i]);
9034 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
9035 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
c7a89784 9036 err = -ENOTSUPP;
1c2a088a
AS
9037 goto out_free;
9038 }
9039 cond_resched();
9040 }
9041
9042 /* finally lock prog and jit images for all functions and
9043 * populate kallsysm
9044 */
f910cefa 9045 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9046 bpf_prog_lock_ro(func[i]);
9047 bpf_prog_kallsyms_add(func[i]);
9048 }
7105e828
DB
9049
9050 /* Last step: make now unused interpreter insns from main
9051 * prog consistent for later dump requests, so they can
9052 * later look the same as if they were interpreted only.
9053 */
9054 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
7105e828
DB
9055 if (insn->code != (BPF_JMP | BPF_CALL) ||
9056 insn->src_reg != BPF_PSEUDO_CALL)
9057 continue;
9058 insn->off = env->insn_aux_data[i].call_imm;
9059 subprog = find_subprog(env, i + insn->off + 1);
dbecd738 9060 insn->imm = subprog;
7105e828
DB
9061 }
9062
1c2a088a
AS
9063 prog->jited = 1;
9064 prog->bpf_func = func[0]->bpf_func;
9065 prog->aux->func = func;
f910cefa 9066 prog->aux->func_cnt = env->subprog_cnt;
c454a46b 9067 bpf_prog_free_unused_jited_linfo(prog);
1c2a088a
AS
9068 return 0;
9069out_free:
f910cefa 9070 for (i = 0; i < env->subprog_cnt; i++)
1c2a088a
AS
9071 if (func[i])
9072 bpf_jit_free(func[i]);
9073 kfree(func);
c7a89784 9074out_undo_insn:
1c2a088a
AS
9075 /* cleanup main prog to be interpreted */
9076 prog->jit_requested = 0;
9077 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
9078 if (insn->code != (BPF_JMP | BPF_CALL) ||
9079 insn->src_reg != BPF_PSEUDO_CALL)
9080 continue;
9081 insn->off = 0;
9082 insn->imm = env->insn_aux_data[i].call_imm;
9083 }
c454a46b 9084 bpf_prog_free_jited_linfo(prog);
1c2a088a
AS
9085 return err;
9086}
9087
1ea47e01
AS
9088static int fixup_call_args(struct bpf_verifier_env *env)
9089{
19d28fbd 9090#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
9091 struct bpf_prog *prog = env->prog;
9092 struct bpf_insn *insn = prog->insnsi;
9093 int i, depth;
19d28fbd 9094#endif
e4052d06 9095 int err = 0;
1ea47e01 9096
e4052d06
QM
9097 if (env->prog->jit_requested &&
9098 !bpf_prog_is_dev_bound(env->prog->aux)) {
19d28fbd
DM
9099 err = jit_subprogs(env);
9100 if (err == 0)
1c2a088a 9101 return 0;
c7a89784
DB
9102 if (err == -EFAULT)
9103 return err;
19d28fbd
DM
9104 }
9105#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
9106 for (i = 0; i < prog->len; i++, insn++) {
9107 if (insn->code != (BPF_JMP | BPF_CALL) ||
9108 insn->src_reg != BPF_PSEUDO_CALL)
9109 continue;
9110 depth = get_callee_stack_depth(env, insn, i);
9111 if (depth < 0)
9112 return depth;
9113 bpf_patch_call_args(insn, depth);
9114 }
19d28fbd
DM
9115 err = 0;
9116#endif
9117 return err;
1ea47e01
AS
9118}
9119
79741b3b 9120/* fixup insn->imm field of bpf_call instructions
81ed18ab 9121 * and inline eligible helpers as explicit sequence of BPF instructions
e245c5c6
AS
9122 *
9123 * this function is called after eBPF program passed verification
9124 */
79741b3b 9125static int fixup_bpf_calls(struct bpf_verifier_env *env)
e245c5c6 9126{
79741b3b 9127 struct bpf_prog *prog = env->prog;
d2e4c1e6 9128 bool expect_blinding = bpf_jit_blinding_enabled(prog);
79741b3b 9129 struct bpf_insn *insn = prog->insnsi;
e245c5c6 9130 const struct bpf_func_proto *fn;
79741b3b 9131 const int insn_cnt = prog->len;
09772d92 9132 const struct bpf_map_ops *ops;
c93552c4 9133 struct bpf_insn_aux_data *aux;
81ed18ab
AS
9134 struct bpf_insn insn_buf[16];
9135 struct bpf_prog *new_prog;
9136 struct bpf_map *map_ptr;
d2e4c1e6 9137 int i, ret, cnt, delta = 0;
e245c5c6 9138
79741b3b 9139 for (i = 0; i < insn_cnt; i++, insn++) {
f6b1b3bf
DB
9140 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
9141 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9142 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
68fda450 9143 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
f6b1b3bf
DB
9144 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
9145 struct bpf_insn mask_and_div[] = {
9146 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9147 /* Rx div 0 -> 0 */
9148 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
9149 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
9150 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9151 *insn,
9152 };
9153 struct bpf_insn mask_and_mod[] = {
9154 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9155 /* Rx mod 0 -> Rx */
9156 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
9157 *insn,
9158 };
9159 struct bpf_insn *patchlet;
9160
9161 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9162 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
9163 patchlet = mask_and_div + (is64 ? 1 : 0);
9164 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
9165 } else {
9166 patchlet = mask_and_mod + (is64 ? 1 : 0);
9167 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
9168 }
9169
9170 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
68fda450
AS
9171 if (!new_prog)
9172 return -ENOMEM;
9173
9174 delta += cnt - 1;
9175 env->prog = prog = new_prog;
9176 insn = new_prog->insnsi + i + delta;
9177 continue;
9178 }
9179
e0cea7ce
DB
9180 if (BPF_CLASS(insn->code) == BPF_LD &&
9181 (BPF_MODE(insn->code) == BPF_ABS ||
9182 BPF_MODE(insn->code) == BPF_IND)) {
9183 cnt = env->ops->gen_ld_abs(insn, insn_buf);
9184 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9185 verbose(env, "bpf verifier is misconfigured\n");
9186 return -EINVAL;
9187 }
9188
9189 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9190 if (!new_prog)
9191 return -ENOMEM;
9192
9193 delta += cnt - 1;
9194 env->prog = prog = new_prog;
9195 insn = new_prog->insnsi + i + delta;
9196 continue;
9197 }
9198
979d63d5
DB
9199 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
9200 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
9201 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
9202 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
9203 struct bpf_insn insn_buf[16];
9204 struct bpf_insn *patch = &insn_buf[0];
9205 bool issrc, isneg;
9206 u32 off_reg;
9207
9208 aux = &env->insn_aux_data[i + delta];
3612af78
DB
9209 if (!aux->alu_state ||
9210 aux->alu_state == BPF_ALU_NON_POINTER)
979d63d5
DB
9211 continue;
9212
9213 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
9214 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
9215 BPF_ALU_SANITIZE_SRC;
9216
9217 off_reg = issrc ? insn->src_reg : insn->dst_reg;
9218 if (isneg)
9219 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9220 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
9221 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
9222 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
9223 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
9224 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
9225 if (issrc) {
9226 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
9227 off_reg);
9228 insn->src_reg = BPF_REG_AX;
9229 } else {
9230 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
9231 BPF_REG_AX);
9232 }
9233 if (isneg)
9234 insn->code = insn->code == code_add ?
9235 code_sub : code_add;
9236 *patch++ = *insn;
9237 if (issrc && isneg)
9238 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9239 cnt = patch - insn_buf;
9240
9241 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9242 if (!new_prog)
9243 return -ENOMEM;
9244
9245 delta += cnt - 1;
9246 env->prog = prog = new_prog;
9247 insn = new_prog->insnsi + i + delta;
9248 continue;
9249 }
9250
79741b3b
AS
9251 if (insn->code != (BPF_JMP | BPF_CALL))
9252 continue;
cc8b0b92
AS
9253 if (insn->src_reg == BPF_PSEUDO_CALL)
9254 continue;
e245c5c6 9255
79741b3b
AS
9256 if (insn->imm == BPF_FUNC_get_route_realm)
9257 prog->dst_needed = 1;
9258 if (insn->imm == BPF_FUNC_get_prandom_u32)
9259 bpf_user_rnd_init_once();
9802d865
JB
9260 if (insn->imm == BPF_FUNC_override_return)
9261 prog->kprobe_override = 1;
79741b3b 9262 if (insn->imm == BPF_FUNC_tail_call) {
7b9f6da1
DM
9263 /* If we tail call into other programs, we
9264 * cannot make any assumptions since they can
9265 * be replaced dynamically during runtime in
9266 * the program array.
9267 */
9268 prog->cb_access = 1;
80a58d02 9269 env->prog->aux->stack_depth = MAX_BPF_STACK;
e647815a 9270 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
7b9f6da1 9271
79741b3b
AS
9272 /* mark bpf_tail_call as different opcode to avoid
9273 * conditional branch in the interpeter for every normal
9274 * call and to prevent accidental JITing by JIT compiler
9275 * that doesn't support bpf_tail_call yet
e245c5c6 9276 */
79741b3b 9277 insn->imm = 0;
71189fa9 9278 insn->code = BPF_JMP | BPF_TAIL_CALL;
b2157399 9279
c93552c4 9280 aux = &env->insn_aux_data[i + delta];
cc52d914
DB
9281 if (env->allow_ptr_leaks && !expect_blinding &&
9282 prog->jit_requested &&
d2e4c1e6
DB
9283 !bpf_map_key_poisoned(aux) &&
9284 !bpf_map_ptr_poisoned(aux) &&
9285 !bpf_map_ptr_unpriv(aux)) {
9286 struct bpf_jit_poke_descriptor desc = {
9287 .reason = BPF_POKE_REASON_TAIL_CALL,
9288 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
9289 .tail_call.key = bpf_map_key_immediate(aux),
9290 };
9291
9292 ret = bpf_jit_add_poke_descriptor(prog, &desc);
9293 if (ret < 0) {
9294 verbose(env, "adding tail call poke descriptor failed\n");
9295 return ret;
9296 }
9297
9298 insn->imm = ret + 1;
9299 continue;
9300 }
9301
c93552c4
DB
9302 if (!bpf_map_ptr_unpriv(aux))
9303 continue;
9304
b2157399
AS
9305 /* instead of changing every JIT dealing with tail_call
9306 * emit two extra insns:
9307 * if (index >= max_entries) goto out;
9308 * index &= array->index_mask;
9309 * to avoid out-of-bounds cpu speculation
9310 */
c93552c4 9311 if (bpf_map_ptr_poisoned(aux)) {
40950343 9312 verbose(env, "tail_call abusing map_ptr\n");
b2157399
AS
9313 return -EINVAL;
9314 }
c93552c4 9315
d2e4c1e6 9316 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
b2157399
AS
9317 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
9318 map_ptr->max_entries, 2);
9319 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
9320 container_of(map_ptr,
9321 struct bpf_array,
9322 map)->index_mask);
9323 insn_buf[2] = *insn;
9324 cnt = 3;
9325 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9326 if (!new_prog)
9327 return -ENOMEM;
9328
9329 delta += cnt - 1;
9330 env->prog = prog = new_prog;
9331 insn = new_prog->insnsi + i + delta;
79741b3b
AS
9332 continue;
9333 }
e245c5c6 9334
89c63074 9335 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
09772d92
DB
9336 * and other inlining handlers are currently limited to 64 bit
9337 * only.
89c63074 9338 */
60b58afc 9339 if (prog->jit_requested && BITS_PER_LONG == 64 &&
09772d92
DB
9340 (insn->imm == BPF_FUNC_map_lookup_elem ||
9341 insn->imm == BPF_FUNC_map_update_elem ||
84430d42
DB
9342 insn->imm == BPF_FUNC_map_delete_elem ||
9343 insn->imm == BPF_FUNC_map_push_elem ||
9344 insn->imm == BPF_FUNC_map_pop_elem ||
9345 insn->imm == BPF_FUNC_map_peek_elem)) {
c93552c4
DB
9346 aux = &env->insn_aux_data[i + delta];
9347 if (bpf_map_ptr_poisoned(aux))
9348 goto patch_call_imm;
9349
d2e4c1e6 9350 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
09772d92
DB
9351 ops = map_ptr->ops;
9352 if (insn->imm == BPF_FUNC_map_lookup_elem &&
9353 ops->map_gen_lookup) {
9354 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
9355 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9356 verbose(env, "bpf verifier is misconfigured\n");
9357 return -EINVAL;
9358 }
81ed18ab 9359
09772d92
DB
9360 new_prog = bpf_patch_insn_data(env, i + delta,
9361 insn_buf, cnt);
9362 if (!new_prog)
9363 return -ENOMEM;
81ed18ab 9364
09772d92
DB
9365 delta += cnt - 1;
9366 env->prog = prog = new_prog;
9367 insn = new_prog->insnsi + i + delta;
9368 continue;
9369 }
81ed18ab 9370
09772d92
DB
9371 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
9372 (void *(*)(struct bpf_map *map, void *key))NULL));
9373 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
9374 (int (*)(struct bpf_map *map, void *key))NULL));
9375 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
9376 (int (*)(struct bpf_map *map, void *key, void *value,
9377 u64 flags))NULL));
84430d42
DB
9378 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
9379 (int (*)(struct bpf_map *map, void *value,
9380 u64 flags))NULL));
9381 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
9382 (int (*)(struct bpf_map *map, void *value))NULL));
9383 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
9384 (int (*)(struct bpf_map *map, void *value))NULL));
9385
09772d92
DB
9386 switch (insn->imm) {
9387 case BPF_FUNC_map_lookup_elem:
9388 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
9389 __bpf_call_base;
9390 continue;
9391 case BPF_FUNC_map_update_elem:
9392 insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
9393 __bpf_call_base;
9394 continue;
9395 case BPF_FUNC_map_delete_elem:
9396 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
9397 __bpf_call_base;
9398 continue;
84430d42
DB
9399 case BPF_FUNC_map_push_elem:
9400 insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
9401 __bpf_call_base;
9402 continue;
9403 case BPF_FUNC_map_pop_elem:
9404 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
9405 __bpf_call_base;
9406 continue;
9407 case BPF_FUNC_map_peek_elem:
9408 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
9409 __bpf_call_base;
9410 continue;
09772d92 9411 }
81ed18ab 9412
09772d92 9413 goto patch_call_imm;
81ed18ab
AS
9414 }
9415
9416patch_call_imm:
5e43f899 9417 fn = env->ops->get_func_proto(insn->imm, env->prog);
79741b3b
AS
9418 /* all functions that have prototype and verifier allowed
9419 * programs to call them, must be real in-kernel functions
9420 */
9421 if (!fn->func) {
61bd5218
JK
9422 verbose(env,
9423 "kernel subsystem misconfigured func %s#%d\n",
79741b3b
AS
9424 func_id_name(insn->imm), insn->imm);
9425 return -EFAULT;
e245c5c6 9426 }
79741b3b 9427 insn->imm = fn->func - __bpf_call_base;
e245c5c6 9428 }
e245c5c6 9429
d2e4c1e6
DB
9430 /* Since poke tab is now finalized, publish aux to tracker. */
9431 for (i = 0; i < prog->aux->size_poke_tab; i++) {
9432 map_ptr = prog->aux->poke_tab[i].tail_call.map;
9433 if (!map_ptr->ops->map_poke_track ||
9434 !map_ptr->ops->map_poke_untrack ||
9435 !map_ptr->ops->map_poke_run) {
9436 verbose(env, "bpf verifier is misconfigured\n");
9437 return -EINVAL;
9438 }
9439
9440 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
9441 if (ret < 0) {
9442 verbose(env, "tracking tail call prog failed\n");
9443 return ret;
9444 }
9445 }
9446
79741b3b
AS
9447 return 0;
9448}
e245c5c6 9449
58e2af8b 9450static void free_states(struct bpf_verifier_env *env)
f1bca824 9451{
58e2af8b 9452 struct bpf_verifier_state_list *sl, *sln;
f1bca824
AS
9453 int i;
9454
9f4686c4
AS
9455 sl = env->free_list;
9456 while (sl) {
9457 sln = sl->next;
9458 free_verifier_state(&sl->state, false);
9459 kfree(sl);
9460 sl = sln;
9461 }
9462
f1bca824
AS
9463 if (!env->explored_states)
9464 return;
9465
dc2a4ebc 9466 for (i = 0; i < state_htab_size(env); i++) {
f1bca824
AS
9467 sl = env->explored_states[i];
9468
a8f500af
AS
9469 while (sl) {
9470 sln = sl->next;
9471 free_verifier_state(&sl->state, false);
9472 kfree(sl);
9473 sl = sln;
9474 }
f1bca824
AS
9475 }
9476
71dde681 9477 kvfree(env->explored_states);
f1bca824
AS
9478}
9479
06ee7115
AS
9480static void print_verification_stats(struct bpf_verifier_env *env)
9481{
9482 int i;
9483
9484 if (env->log.level & BPF_LOG_STATS) {
9485 verbose(env, "verification time %lld usec\n",
9486 div_u64(env->verification_time, 1000));
9487 verbose(env, "stack depth ");
9488 for (i = 0; i < env->subprog_cnt; i++) {
9489 u32 depth = env->subprog_info[i].stack_depth;
9490
9491 verbose(env, "%d", depth);
9492 if (i + 1 < env->subprog_cnt)
9493 verbose(env, "+");
9494 }
9495 verbose(env, "\n");
9496 }
9497 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
9498 "total_states %d peak_states %d mark_read %d\n",
9499 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
9500 env->max_states_per_insn, env->total_states,
9501 env->peak_states, env->longest_mark_read_walk);
f1bca824
AS
9502}
9503
38207291
MKL
9504static int check_attach_btf_id(struct bpf_verifier_env *env)
9505{
9506 struct bpf_prog *prog = env->prog;
5b92a28a 9507 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
38207291 9508 u32 btf_id = prog->aux->attach_btf_id;
f1b9509c 9509 const char prefix[] = "btf_trace_";
5b92a28a 9510 int ret = 0, subprog = -1, i;
fec56f58 9511 struct bpf_trampoline *tr;
38207291 9512 const struct btf_type *t;
5b92a28a 9513 bool conservative = true;
38207291 9514 const char *tname;
5b92a28a 9515 struct btf *btf;
fec56f58 9516 long addr;
5b92a28a 9517 u64 key;
38207291 9518
f1b9509c
AS
9519 if (prog->type != BPF_PROG_TYPE_TRACING)
9520 return 0;
38207291 9521
f1b9509c
AS
9522 if (!btf_id) {
9523 verbose(env, "Tracing programs must provide btf_id\n");
9524 return -EINVAL;
9525 }
5b92a28a
AS
9526 btf = bpf_prog_get_target_btf(prog);
9527 if (!btf) {
9528 verbose(env,
9529 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
9530 return -EINVAL;
9531 }
9532 t = btf_type_by_id(btf, btf_id);
f1b9509c
AS
9533 if (!t) {
9534 verbose(env, "attach_btf_id %u is invalid\n", btf_id);
9535 return -EINVAL;
9536 }
5b92a28a 9537 tname = btf_name_by_offset(btf, t->name_off);
f1b9509c
AS
9538 if (!tname) {
9539 verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id);
9540 return -EINVAL;
9541 }
5b92a28a
AS
9542 if (tgt_prog) {
9543 struct bpf_prog_aux *aux = tgt_prog->aux;
9544
9545 for (i = 0; i < aux->func_info_cnt; i++)
9546 if (aux->func_info[i].type_id == btf_id) {
9547 subprog = i;
9548 break;
9549 }
9550 if (subprog == -1) {
9551 verbose(env, "Subprog %s doesn't exist\n", tname);
9552 return -EINVAL;
9553 }
9554 conservative = aux->func_info_aux[subprog].unreliable;
9555 key = ((u64)aux->id) << 32 | btf_id;
9556 } else {
9557 key = btf_id;
9558 }
f1b9509c
AS
9559
9560 switch (prog->expected_attach_type) {
9561 case BPF_TRACE_RAW_TP:
5b92a28a
AS
9562 if (tgt_prog) {
9563 verbose(env,
9564 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
9565 return -EINVAL;
9566 }
38207291
MKL
9567 if (!btf_type_is_typedef(t)) {
9568 verbose(env, "attach_btf_id %u is not a typedef\n",
9569 btf_id);
9570 return -EINVAL;
9571 }
f1b9509c 9572 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
38207291
MKL
9573 verbose(env, "attach_btf_id %u points to wrong type name %s\n",
9574 btf_id, tname);
9575 return -EINVAL;
9576 }
9577 tname += sizeof(prefix) - 1;
5b92a28a 9578 t = btf_type_by_id(btf, t->type);
38207291
MKL
9579 if (!btf_type_is_ptr(t))
9580 /* should never happen in valid vmlinux build */
9581 return -EINVAL;
5b92a28a 9582 t = btf_type_by_id(btf, t->type);
38207291
MKL
9583 if (!btf_type_is_func_proto(t))
9584 /* should never happen in valid vmlinux build */
9585 return -EINVAL;
9586
9587 /* remember two read only pointers that are valid for
9588 * the life time of the kernel
9589 */
9590 prog->aux->attach_func_name = tname;
9591 prog->aux->attach_func_proto = t;
9592 prog->aux->attach_btf_trace = true;
f1b9509c 9593 return 0;
fec56f58
AS
9594 case BPF_TRACE_FENTRY:
9595 case BPF_TRACE_FEXIT:
9596 if (!btf_type_is_func(t)) {
9597 verbose(env, "attach_btf_id %u is not a function\n",
9598 btf_id);
9599 return -EINVAL;
9600 }
5b92a28a 9601 t = btf_type_by_id(btf, t->type);
fec56f58
AS
9602 if (!btf_type_is_func_proto(t))
9603 return -EINVAL;
5b92a28a 9604 tr = bpf_trampoline_lookup(key);
fec56f58
AS
9605 if (!tr)
9606 return -ENOMEM;
9607 prog->aux->attach_func_name = tname;
5b92a28a 9608 /* t is either vmlinux type or another program's type */
fec56f58
AS
9609 prog->aux->attach_func_proto = t;
9610 mutex_lock(&tr->mutex);
9611 if (tr->func.addr) {
9612 prog->aux->trampoline = tr;
9613 goto out;
9614 }
5b92a28a
AS
9615 if (tgt_prog && conservative) {
9616 prog->aux->attach_func_proto = NULL;
9617 t = NULL;
9618 }
9619 ret = btf_distill_func_proto(&env->log, btf, t,
fec56f58
AS
9620 tname, &tr->func.model);
9621 if (ret < 0)
9622 goto out;
5b92a28a
AS
9623 if (tgt_prog) {
9624 if (!tgt_prog->jited) {
9625 /* for now */
9626 verbose(env, "Can trace only JITed BPF progs\n");
9627 ret = -EINVAL;
9628 goto out;
9629 }
9630 if (tgt_prog->type == BPF_PROG_TYPE_TRACING) {
9631 /* prevent cycles */
9632 verbose(env, "Cannot recursively attach\n");
9633 ret = -EINVAL;
9634 goto out;
9635 }
e9eeec58
YS
9636 if (subprog == 0)
9637 addr = (long) tgt_prog->bpf_func;
9638 else
9639 addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
5b92a28a
AS
9640 } else {
9641 addr = kallsyms_lookup_name(tname);
9642 if (!addr) {
9643 verbose(env,
9644 "The address of function %s cannot be found\n",
9645 tname);
9646 ret = -ENOENT;
9647 goto out;
9648 }
fec56f58
AS
9649 }
9650 tr->func.addr = (void *)addr;
9651 prog->aux->trampoline = tr;
9652out:
9653 mutex_unlock(&tr->mutex);
9654 if (ret)
9655 bpf_trampoline_put(tr);
9656 return ret;
f1b9509c
AS
9657 default:
9658 return -EINVAL;
38207291 9659 }
38207291
MKL
9660}
9661
838e9690
YS
9662int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
9663 union bpf_attr __user *uattr)
51580e79 9664{
06ee7115 9665 u64 start_time = ktime_get_ns();
58e2af8b 9666 struct bpf_verifier_env *env;
b9193c1b 9667 struct bpf_verifier_log *log;
9e4c24e7 9668 int i, len, ret = -EINVAL;
e2ae4ca2 9669 bool is_priv;
51580e79 9670
eba0c929
AB
9671 /* no program is valid */
9672 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
9673 return -EINVAL;
9674
58e2af8b 9675 /* 'struct bpf_verifier_env' can be global, but since it's not small,
cbd35700
AS
9676 * allocate/free it every time bpf_check() is called
9677 */
58e2af8b 9678 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
cbd35700
AS
9679 if (!env)
9680 return -ENOMEM;
61bd5218 9681 log = &env->log;
cbd35700 9682
9e4c24e7 9683 len = (*prog)->len;
fad953ce 9684 env->insn_aux_data =
9e4c24e7 9685 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
3df126f3
JK
9686 ret = -ENOMEM;
9687 if (!env->insn_aux_data)
9688 goto err_free_env;
9e4c24e7
JK
9689 for (i = 0; i < len; i++)
9690 env->insn_aux_data[i].orig_idx = i;
9bac3d6d 9691 env->prog = *prog;
00176a34 9692 env->ops = bpf_verifier_ops[env->prog->type];
45a73c17 9693 is_priv = capable(CAP_SYS_ADMIN);
0246e64d 9694
8580ac94
AS
9695 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
9696 mutex_lock(&bpf_verifier_lock);
9697 if (!btf_vmlinux)
9698 btf_vmlinux = btf_parse_vmlinux();
9699 mutex_unlock(&bpf_verifier_lock);
9700 }
9701
cbd35700 9702 /* grab the mutex to protect few globals used by verifier */
45a73c17
AS
9703 if (!is_priv)
9704 mutex_lock(&bpf_verifier_lock);
cbd35700
AS
9705
9706 if (attr->log_level || attr->log_buf || attr->log_size) {
9707 /* user requested verbose verifier output
9708 * and supplied buffer to store the verification trace
9709 */
e7bf8249
JK
9710 log->level = attr->log_level;
9711 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
9712 log->len_total = attr->log_size;
cbd35700
AS
9713
9714 ret = -EINVAL;
e7bf8249 9715 /* log attributes have to be sane */
7a9f5c65 9716 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
06ee7115 9717 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
3df126f3 9718 goto err_unlock;
cbd35700 9719 }
1ad2f583 9720
8580ac94
AS
9721 if (IS_ERR(btf_vmlinux)) {
9722 /* Either gcc or pahole or kernel are broken. */
9723 verbose(env, "in-kernel BTF is malformed\n");
9724 ret = PTR_ERR(btf_vmlinux);
38207291 9725 goto skip_full_check;
8580ac94
AS
9726 }
9727
38207291
MKL
9728 ret = check_attach_btf_id(env);
9729 if (ret)
9730 goto skip_full_check;
9731
1ad2f583
DB
9732 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
9733 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
e07b98d9 9734 env->strict_alignment = true;
e9ee9efc
DM
9735 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
9736 env->strict_alignment = false;
cbd35700 9737
e2ae4ca2
JK
9738 env->allow_ptr_leaks = is_priv;
9739
10d274e8
AS
9740 if (is_priv)
9741 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
9742
f4e3ec0d
JK
9743 ret = replace_map_fd_with_map_ptr(env);
9744 if (ret < 0)
9745 goto skip_full_check;
9746
cae1927c 9747 if (bpf_prog_is_dev_bound(env->prog->aux)) {
a40a2632 9748 ret = bpf_prog_offload_verifier_prep(env->prog);
ab3f0063 9749 if (ret)
f4e3ec0d 9750 goto skip_full_check;
ab3f0063
JK
9751 }
9752
dc2a4ebc 9753 env->explored_states = kvcalloc(state_htab_size(env),
58e2af8b 9754 sizeof(struct bpf_verifier_state_list *),
f1bca824
AS
9755 GFP_USER);
9756 ret = -ENOMEM;
9757 if (!env->explored_states)
9758 goto skip_full_check;
9759
d9762e84 9760 ret = check_subprogs(env);
475fb78f
AS
9761 if (ret < 0)
9762 goto skip_full_check;
9763
c454a46b 9764 ret = check_btf_info(env, attr, uattr);
838e9690
YS
9765 if (ret < 0)
9766 goto skip_full_check;
9767
d9762e84
MKL
9768 ret = check_cfg(env);
9769 if (ret < 0)
9770 goto skip_full_check;
9771
17a52670 9772 ret = do_check(env);
8c01c4f8
CG
9773 if (env->cur_state) {
9774 free_verifier_state(env->cur_state, true);
9775 env->cur_state = NULL;
9776 }
cbd35700 9777
c941ce9c
QM
9778 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
9779 ret = bpf_prog_offload_finalize(env);
9780
0246e64d 9781skip_full_check:
638f5b90 9782 while (!pop_stack(env, NULL, NULL));
f1bca824 9783 free_states(env);
0246e64d 9784
c131187d 9785 if (ret == 0)
9b38c405 9786 ret = check_max_stack_depth(env);
c131187d 9787
9b38c405 9788 /* instruction rewrites happen after this point */
e2ae4ca2
JK
9789 if (is_priv) {
9790 if (ret == 0)
9791 opt_hard_wire_dead_code_branches(env);
52875a04
JK
9792 if (ret == 0)
9793 ret = opt_remove_dead_code(env);
a1b14abc
JK
9794 if (ret == 0)
9795 ret = opt_remove_nops(env);
52875a04
JK
9796 } else {
9797 if (ret == 0)
9798 sanitize_dead_code(env);
e2ae4ca2
JK
9799 }
9800
9bac3d6d
AS
9801 if (ret == 0)
9802 /* program is valid, convert *(u32*)(ctx + off) accesses */
9803 ret = convert_ctx_accesses(env);
9804
e245c5c6 9805 if (ret == 0)
79741b3b 9806 ret = fixup_bpf_calls(env);
e245c5c6 9807
a4b1d3c1
JW
9808 /* do 32-bit optimization after insn patching has done so those patched
9809 * insns could be handled correctly.
9810 */
d6c2308c
JW
9811 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
9812 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
9813 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
9814 : false;
a4b1d3c1
JW
9815 }
9816
1ea47e01
AS
9817 if (ret == 0)
9818 ret = fixup_call_args(env);
9819
06ee7115
AS
9820 env->verification_time = ktime_get_ns() - start_time;
9821 print_verification_stats(env);
9822
a2a7d570 9823 if (log->level && bpf_verifier_log_full(log))
cbd35700 9824 ret = -ENOSPC;
a2a7d570 9825 if (log->level && !log->ubuf) {
cbd35700 9826 ret = -EFAULT;
a2a7d570 9827 goto err_release_maps;
cbd35700
AS
9828 }
9829
0246e64d
AS
9830 if (ret == 0 && env->used_map_cnt) {
9831 /* if program passed verifier, update used_maps in bpf_prog_info */
9bac3d6d
AS
9832 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
9833 sizeof(env->used_maps[0]),
9834 GFP_KERNEL);
0246e64d 9835
9bac3d6d 9836 if (!env->prog->aux->used_maps) {
0246e64d 9837 ret = -ENOMEM;
a2a7d570 9838 goto err_release_maps;
0246e64d
AS
9839 }
9840
9bac3d6d 9841 memcpy(env->prog->aux->used_maps, env->used_maps,
0246e64d 9842 sizeof(env->used_maps[0]) * env->used_map_cnt);
9bac3d6d 9843 env->prog->aux->used_map_cnt = env->used_map_cnt;
0246e64d
AS
9844
9845 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
9846 * bpf_ld_imm64 instructions
9847 */
9848 convert_pseudo_ld_imm64(env);
9849 }
cbd35700 9850
ba64e7d8
YS
9851 if (ret == 0)
9852 adjust_btf_func(env);
9853
a2a7d570 9854err_release_maps:
9bac3d6d 9855 if (!env->prog->aux->used_maps)
0246e64d 9856 /* if we didn't copy map pointers into bpf_prog_info, release
ab7f5bf0 9857 * them now. Otherwise free_used_maps() will release them.
0246e64d
AS
9858 */
9859 release_maps(env);
9bac3d6d 9860 *prog = env->prog;
3df126f3 9861err_unlock:
45a73c17
AS
9862 if (!is_priv)
9863 mutex_unlock(&bpf_verifier_lock);
3df126f3
JK
9864 vfree(env->insn_aux_data);
9865err_free_env:
9866 kfree(env);
51580e79
AS
9867 return ret;
9868}