]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/bpf/verifier.c
libbpf: Fix CO-RE relocs against .text section
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / verifier.c
CommitLineData
5b497af4 1// SPDX-License-Identifier: GPL-2.0-only
51580e79 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
969bf05e 3 * Copyright (c) 2016 Facebook
fd978bf7 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
51580e79 5 */
838e9690 6#include <uapi/linux/btf.h>
51580e79
AS
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/bpf.h>
838e9690 11#include <linux/btf.h>
58e2af8b 12#include <linux/bpf_verifier.h>
51580e79
AS
13#include <linux/filter.h>
14#include <net/netlink.h>
15#include <linux/file.h>
16#include <linux/vmalloc.h>
ebb676da 17#include <linux/stringify.h>
cc8b0b92
AS
18#include <linux/bsearch.h>
19#include <linux/sort.h>
c195651e 20#include <linux/perf_event.h>
d9762e84 21#include <linux/ctype.h>
6ba43b76 22#include <linux/error-injection.h>
9e4e01df 23#include <linux/bpf_lsm.h>
51580e79 24
f4ac7e0b
JK
25#include "disasm.h"
26
00176a34 27static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
91cc1a99 28#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
00176a34
JK
29 [_id] = & _name ## _verifier_ops,
30#define BPF_MAP_TYPE(_id, _ops)
f2e10bff 31#define BPF_LINK_TYPE(_id, _name)
00176a34
JK
32#include <linux/bpf_types.h>
33#undef BPF_PROG_TYPE
34#undef BPF_MAP_TYPE
f2e10bff 35#undef BPF_LINK_TYPE
00176a34
JK
36};
37
51580e79
AS
38/* bpf_check() is a static code analyzer that walks eBPF program
39 * instruction by instruction and updates register/stack state.
40 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
41 *
42 * The first pass is depth-first-search to check that the program is a DAG.
43 * It rejects the following programs:
44 * - larger than BPF_MAXINSNS insns
45 * - if loop is present (detected via back-edge)
46 * - unreachable insns exist (shouldn't be a forest. program = one function)
47 * - out of bounds or malformed jumps
48 * The second pass is all possible path descent from the 1st insn.
49 * Since it's analyzing all pathes through the program, the length of the
eba38a96 50 * analysis is limited to 64k insn, which may be hit even if total number of
51580e79
AS
51 * insn is less then 4K, but there are too many branches that change stack/regs.
52 * Number of 'branches to be analyzed' is limited to 1k
53 *
54 * On entry to each instruction, each register has a type, and the instruction
55 * changes the types of the registers depending on instruction semantics.
56 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
57 * copied to R1.
58 *
59 * All registers are 64-bit.
60 * R0 - return register
61 * R1-R5 argument passing registers
62 * R6-R9 callee saved registers
63 * R10 - frame pointer read-only
64 *
65 * At the start of BPF program the register R1 contains a pointer to bpf_context
66 * and has type PTR_TO_CTX.
67 *
68 * Verifier tracks arithmetic operations on pointers in case:
69 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
70 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
71 * 1st insn copies R10 (which has FRAME_PTR) type into R1
72 * and 2nd arithmetic instruction is pattern matched to recognize
73 * that it wants to construct a pointer to some element within stack.
74 * So after 2nd insn, the register R1 has type PTR_TO_STACK
75 * (and -20 constant is saved for further stack bounds checking).
76 * Meaning that this reg is a pointer to stack plus known immediate constant.
77 *
f1174f77 78 * Most of the time the registers have SCALAR_VALUE type, which
51580e79 79 * means the register has some value, but it's not a valid pointer.
f1174f77 80 * (like pointer plus pointer becomes SCALAR_VALUE type)
51580e79
AS
81 *
82 * When verifier sees load or store instructions the type of base register
c64b7983
JS
83 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
84 * four pointer types recognized by check_mem_access() function.
51580e79
AS
85 *
86 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
87 * and the range of [ptr, ptr + map's value_size) is accessible.
88 *
89 * registers used to pass values to function calls are checked against
90 * function argument constraints.
91 *
92 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
93 * It means that the register type passed to this function must be
94 * PTR_TO_STACK and it will be used inside the function as
95 * 'pointer to map element key'
96 *
97 * For example the argument constraints for bpf_map_lookup_elem():
98 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
99 * .arg1_type = ARG_CONST_MAP_PTR,
100 * .arg2_type = ARG_PTR_TO_MAP_KEY,
101 *
102 * ret_type says that this function returns 'pointer to map elem value or null'
103 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
104 * 2nd argument should be a pointer to stack, which will be used inside
105 * the helper function as a pointer to map element key.
106 *
107 * On the kernel side the helper function looks like:
108 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
109 * {
110 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
111 * void *key = (void *) (unsigned long) r2;
112 * void *value;
113 *
114 * here kernel can access 'key' and 'map' pointers safely, knowing that
115 * [key, key + map->key_size) bytes are valid and were initialized on
116 * the stack of eBPF program.
117 * }
118 *
119 * Corresponding eBPF program may look like:
120 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
121 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
122 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
123 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
124 * here verifier looks at prototype of map_lookup_elem() and sees:
125 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
126 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
127 *
128 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
129 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
130 * and were initialized prior to this call.
131 * If it's ok, then verifier allows this BPF_CALL insn and looks at
132 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
133 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
134 * returns ether pointer to map value or NULL.
135 *
136 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
137 * insn, the register holding that pointer in the true branch changes state to
138 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
139 * branch. See check_cond_jmp_op().
140 *
141 * After the call R0 is set to return type of the function and registers R1-R5
142 * are set to NOT_INIT to indicate that they are no longer readable.
fd978bf7
JS
143 *
144 * The following reference types represent a potential reference to a kernel
145 * resource which, after first being allocated, must be checked and freed by
146 * the BPF program:
147 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
148 *
149 * When the verifier sees a helper call return a reference type, it allocates a
150 * pointer id for the reference and stores it in the current function state.
151 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
152 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
153 * passes through a NULL-check conditional. For the branch wherein the state is
154 * changed to CONST_IMM, the verifier releases the reference.
6acc9b43
JS
155 *
156 * For each helper function that allocates a reference, such as
157 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
158 * bpf_sk_release(). When a reference type passes into the release function,
159 * the verifier also releases the reference. If any unchecked or unreleased
160 * reference remains at the end of the program, the verifier rejects it.
51580e79
AS
161 */
162
17a52670 163/* verifier_state + insn_idx are pushed to stack when branch is encountered */
58e2af8b 164struct bpf_verifier_stack_elem {
17a52670
AS
165 /* verifer state is 'st'
166 * before processing instruction 'insn_idx'
167 * and after processing instruction 'prev_insn_idx'
168 */
58e2af8b 169 struct bpf_verifier_state st;
17a52670
AS
170 int insn_idx;
171 int prev_insn_idx;
58e2af8b 172 struct bpf_verifier_stack_elem *next;
6f8a57cc
AN
173 /* length of verifier log at the time this state was pushed on stack */
174 u32 log_pos;
cbd35700
AS
175};
176
b285fcb7 177#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
ceefbc96 178#define BPF_COMPLEXITY_LIMIT_STATES 64
07016151 179
d2e4c1e6
DB
180#define BPF_MAP_KEY_POISON (1ULL << 63)
181#define BPF_MAP_KEY_SEEN (1ULL << 62)
182
c93552c4
DB
183#define BPF_MAP_PTR_UNPRIV 1UL
184#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
185 POISON_POINTER_DELTA))
186#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
187
188static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
189{
d2e4c1e6 190 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
c93552c4
DB
191}
192
193static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
194{
d2e4c1e6 195 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
c93552c4
DB
196}
197
198static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
199 const struct bpf_map *map, bool unpriv)
200{
201 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
202 unpriv |= bpf_map_ptr_unpriv(aux);
d2e4c1e6
DB
203 aux->map_ptr_state = (unsigned long)map |
204 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
205}
206
207static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
208{
209 return aux->map_key_state & BPF_MAP_KEY_POISON;
210}
211
212static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
213{
214 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
215}
216
217static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
218{
219 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
220}
221
222static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
223{
224 bool poisoned = bpf_map_key_poisoned(aux);
225
226 aux->map_key_state = state | BPF_MAP_KEY_SEEN |
227 (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
c93552c4 228}
fad73a1a 229
33ff9823
DB
230struct bpf_call_arg_meta {
231 struct bpf_map *map_ptr;
435faee1 232 bool raw_mode;
36bbef52 233 bool pkt_access;
435faee1
DB
234 int regno;
235 int access_size;
457f4436 236 int mem_size;
10060503 237 u64 msize_max_value;
1b986589 238 int ref_obj_id;
d83525ca 239 int func_id;
a7658e1a 240 u32 btf_id;
33ff9823
DB
241};
242
8580ac94
AS
243struct btf *btf_vmlinux;
244
cbd35700
AS
245static DEFINE_MUTEX(bpf_verifier_lock);
246
d9762e84
MKL
247static const struct bpf_line_info *
248find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
249{
250 const struct bpf_line_info *linfo;
251 const struct bpf_prog *prog;
252 u32 i, nr_linfo;
253
254 prog = env->prog;
255 nr_linfo = prog->aux->nr_linfo;
256
257 if (!nr_linfo || insn_off >= prog->len)
258 return NULL;
259
260 linfo = prog->aux->linfo;
261 for (i = 1; i < nr_linfo; i++)
262 if (insn_off < linfo[i].insn_off)
263 break;
264
265 return &linfo[i - 1];
266}
267
77d2e05a
MKL
268void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
269 va_list args)
cbd35700 270{
a2a7d570 271 unsigned int n;
cbd35700 272
a2a7d570 273 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
a2a7d570
JK
274
275 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
276 "verifier log line truncated - local buffer too short\n");
277
278 n = min(log->len_total - log->len_used - 1, n);
279 log->kbuf[n] = '\0';
280
8580ac94
AS
281 if (log->level == BPF_LOG_KERNEL) {
282 pr_err("BPF:%s\n", log->kbuf);
283 return;
284 }
a2a7d570
JK
285 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
286 log->len_used += n;
287 else
288 log->ubuf = NULL;
cbd35700 289}
abe08840 290
6f8a57cc
AN
291static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
292{
293 char zero = 0;
294
295 if (!bpf_verifier_log_needed(log))
296 return;
297
298 log->len_used = new_pos;
299 if (put_user(zero, log->ubuf + new_pos))
300 log->ubuf = NULL;
301}
302
abe08840
JO
303/* log_level controls verbosity level of eBPF verifier.
304 * bpf_verifier_log_write() is used to dump the verification trace to the log,
305 * so the user can figure out what's wrong with the program
430e68d1 306 */
abe08840
JO
307__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
308 const char *fmt, ...)
309{
310 va_list args;
311
77d2e05a
MKL
312 if (!bpf_verifier_log_needed(&env->log))
313 return;
314
abe08840 315 va_start(args, fmt);
77d2e05a 316 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
317 va_end(args);
318}
319EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
320
321__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
322{
77d2e05a 323 struct bpf_verifier_env *env = private_data;
abe08840
JO
324 va_list args;
325
77d2e05a
MKL
326 if (!bpf_verifier_log_needed(&env->log))
327 return;
328
abe08840 329 va_start(args, fmt);
77d2e05a 330 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
331 va_end(args);
332}
cbd35700 333
9e15db66
AS
334__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
335 const char *fmt, ...)
336{
337 va_list args;
338
339 if (!bpf_verifier_log_needed(log))
340 return;
341
342 va_start(args, fmt);
343 bpf_verifier_vlog(log, fmt, args);
344 va_end(args);
345}
346
d9762e84
MKL
347static const char *ltrim(const char *s)
348{
349 while (isspace(*s))
350 s++;
351
352 return s;
353}
354
355__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
356 u32 insn_off,
357 const char *prefix_fmt, ...)
358{
359 const struct bpf_line_info *linfo;
360
361 if (!bpf_verifier_log_needed(&env->log))
362 return;
363
364 linfo = find_linfo(env, insn_off);
365 if (!linfo || linfo == env->prev_linfo)
366 return;
367
368 if (prefix_fmt) {
369 va_list args;
370
371 va_start(args, prefix_fmt);
372 bpf_verifier_vlog(&env->log, prefix_fmt, args);
373 va_end(args);
374 }
375
376 verbose(env, "%s\n",
377 ltrim(btf_name_by_offset(env->prog->aux->btf,
378 linfo->line_off)));
379
380 env->prev_linfo = linfo;
381}
382
de8f3a83
DB
383static bool type_is_pkt_pointer(enum bpf_reg_type type)
384{
385 return type == PTR_TO_PACKET ||
386 type == PTR_TO_PACKET_META;
387}
388
46f8bc92
MKL
389static bool type_is_sk_pointer(enum bpf_reg_type type)
390{
391 return type == PTR_TO_SOCKET ||
655a51e5 392 type == PTR_TO_SOCK_COMMON ||
fada7fdc
JL
393 type == PTR_TO_TCP_SOCK ||
394 type == PTR_TO_XDP_SOCK;
46f8bc92
MKL
395}
396
cac616db
JF
397static bool reg_type_not_null(enum bpf_reg_type type)
398{
399 return type == PTR_TO_SOCKET ||
400 type == PTR_TO_TCP_SOCK ||
401 type == PTR_TO_MAP_VALUE ||
402 type == PTR_TO_SOCK_COMMON ||
403 type == PTR_TO_BTF_ID;
404}
405
840b9615
JS
406static bool reg_type_may_be_null(enum bpf_reg_type type)
407{
fd978bf7 408 return type == PTR_TO_MAP_VALUE_OR_NULL ||
46f8bc92 409 type == PTR_TO_SOCKET_OR_NULL ||
655a51e5 410 type == PTR_TO_SOCK_COMMON_OR_NULL ||
b121b341 411 type == PTR_TO_TCP_SOCK_OR_NULL ||
457f4436
AN
412 type == PTR_TO_BTF_ID_OR_NULL ||
413 type == PTR_TO_MEM_OR_NULL;
fd978bf7
JS
414}
415
d83525ca
AS
416static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
417{
418 return reg->type == PTR_TO_MAP_VALUE &&
419 map_value_has_spin_lock(reg->map_ptr);
420}
421
cba368c1
MKL
422static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
423{
424 return type == PTR_TO_SOCKET ||
425 type == PTR_TO_SOCKET_OR_NULL ||
426 type == PTR_TO_TCP_SOCK ||
457f4436
AN
427 type == PTR_TO_TCP_SOCK_OR_NULL ||
428 type == PTR_TO_MEM ||
429 type == PTR_TO_MEM_OR_NULL;
cba368c1
MKL
430}
431
1b986589 432static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
fd978bf7 433{
1b986589 434 return type == ARG_PTR_TO_SOCK_COMMON;
fd978bf7
JS
435}
436
437/* Determine whether the function releases some resources allocated by another
438 * function call. The first reference type argument will be assumed to be
439 * released by release_reference().
440 */
441static bool is_release_function(enum bpf_func_id func_id)
442{
457f4436
AN
443 return func_id == BPF_FUNC_sk_release ||
444 func_id == BPF_FUNC_ringbuf_submit ||
445 func_id == BPF_FUNC_ringbuf_discard;
840b9615
JS
446}
447
64d85290 448static bool may_be_acquire_function(enum bpf_func_id func_id)
46f8bc92
MKL
449{
450 return func_id == BPF_FUNC_sk_lookup_tcp ||
edbf8c01 451 func_id == BPF_FUNC_sk_lookup_udp ||
64d85290 452 func_id == BPF_FUNC_skc_lookup_tcp ||
457f4436
AN
453 func_id == BPF_FUNC_map_lookup_elem ||
454 func_id == BPF_FUNC_ringbuf_reserve;
64d85290
JS
455}
456
457static bool is_acquire_function(enum bpf_func_id func_id,
458 const struct bpf_map *map)
459{
460 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
461
462 if (func_id == BPF_FUNC_sk_lookup_tcp ||
463 func_id == BPF_FUNC_sk_lookup_udp ||
457f4436
AN
464 func_id == BPF_FUNC_skc_lookup_tcp ||
465 func_id == BPF_FUNC_ringbuf_reserve)
64d85290
JS
466 return true;
467
468 if (func_id == BPF_FUNC_map_lookup_elem &&
469 (map_type == BPF_MAP_TYPE_SOCKMAP ||
470 map_type == BPF_MAP_TYPE_SOCKHASH))
471 return true;
472
473 return false;
46f8bc92
MKL
474}
475
1b986589
MKL
476static bool is_ptr_cast_function(enum bpf_func_id func_id)
477{
478 return func_id == BPF_FUNC_tcp_sock ||
479 func_id == BPF_FUNC_sk_fullsock;
480}
481
17a52670
AS
482/* string representation of 'enum bpf_reg_type' */
483static const char * const reg_type_str[] = {
484 [NOT_INIT] = "?",
f1174f77 485 [SCALAR_VALUE] = "inv",
17a52670
AS
486 [PTR_TO_CTX] = "ctx",
487 [CONST_PTR_TO_MAP] = "map_ptr",
488 [PTR_TO_MAP_VALUE] = "map_value",
489 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
17a52670 490 [PTR_TO_STACK] = "fp",
969bf05e 491 [PTR_TO_PACKET] = "pkt",
de8f3a83 492 [PTR_TO_PACKET_META] = "pkt_meta",
969bf05e 493 [PTR_TO_PACKET_END] = "pkt_end",
d58e468b 494 [PTR_TO_FLOW_KEYS] = "flow_keys",
c64b7983
JS
495 [PTR_TO_SOCKET] = "sock",
496 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
46f8bc92
MKL
497 [PTR_TO_SOCK_COMMON] = "sock_common",
498 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
655a51e5
MKL
499 [PTR_TO_TCP_SOCK] = "tcp_sock",
500 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
9df1c28b 501 [PTR_TO_TP_BUFFER] = "tp_buffer",
fada7fdc 502 [PTR_TO_XDP_SOCK] = "xdp_sock",
9e15db66 503 [PTR_TO_BTF_ID] = "ptr_",
b121b341 504 [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_",
457f4436
AN
505 [PTR_TO_MEM] = "mem",
506 [PTR_TO_MEM_OR_NULL] = "mem_or_null",
17a52670
AS
507};
508
8efea21d
EC
509static char slot_type_char[] = {
510 [STACK_INVALID] = '?',
511 [STACK_SPILL] = 'r',
512 [STACK_MISC] = 'm',
513 [STACK_ZERO] = '0',
514};
515
4e92024a
AS
516static void print_liveness(struct bpf_verifier_env *env,
517 enum bpf_reg_liveness live)
518{
9242b5f5 519 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
4e92024a
AS
520 verbose(env, "_");
521 if (live & REG_LIVE_READ)
522 verbose(env, "r");
523 if (live & REG_LIVE_WRITTEN)
524 verbose(env, "w");
9242b5f5
AS
525 if (live & REG_LIVE_DONE)
526 verbose(env, "D");
4e92024a
AS
527}
528
f4d7e40a
AS
529static struct bpf_func_state *func(struct bpf_verifier_env *env,
530 const struct bpf_reg_state *reg)
531{
532 struct bpf_verifier_state *cur = env->cur_state;
533
534 return cur->frame[reg->frameno];
535}
536
9e15db66
AS
537const char *kernel_type_name(u32 id)
538{
539 return btf_name_by_offset(btf_vmlinux,
540 btf_type_by_id(btf_vmlinux, id)->name_off);
541}
542
61bd5218 543static void print_verifier_state(struct bpf_verifier_env *env,
f4d7e40a 544 const struct bpf_func_state *state)
17a52670 545{
f4d7e40a 546 const struct bpf_reg_state *reg;
17a52670
AS
547 enum bpf_reg_type t;
548 int i;
549
f4d7e40a
AS
550 if (state->frameno)
551 verbose(env, " frame%d:", state->frameno);
17a52670 552 for (i = 0; i < MAX_BPF_REG; i++) {
1a0dc1ac
AS
553 reg = &state->regs[i];
554 t = reg->type;
17a52670
AS
555 if (t == NOT_INIT)
556 continue;
4e92024a
AS
557 verbose(env, " R%d", i);
558 print_liveness(env, reg->live);
559 verbose(env, "=%s", reg_type_str[t]);
b5dc0163
AS
560 if (t == SCALAR_VALUE && reg->precise)
561 verbose(env, "P");
f1174f77
EC
562 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
563 tnum_is_const(reg->var_off)) {
564 /* reg->off should be 0 for SCALAR_VALUE */
61bd5218 565 verbose(env, "%lld", reg->var_off.value + reg->off);
f1174f77 566 } else {
b121b341 567 if (t == PTR_TO_BTF_ID || t == PTR_TO_BTF_ID_OR_NULL)
9e15db66 568 verbose(env, "%s", kernel_type_name(reg->btf_id));
cba368c1
MKL
569 verbose(env, "(id=%d", reg->id);
570 if (reg_type_may_be_refcounted_or_null(t))
571 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
f1174f77 572 if (t != SCALAR_VALUE)
61bd5218 573 verbose(env, ",off=%d", reg->off);
de8f3a83 574 if (type_is_pkt_pointer(t))
61bd5218 575 verbose(env, ",r=%d", reg->range);
f1174f77
EC
576 else if (t == CONST_PTR_TO_MAP ||
577 t == PTR_TO_MAP_VALUE ||
578 t == PTR_TO_MAP_VALUE_OR_NULL)
61bd5218 579 verbose(env, ",ks=%d,vs=%d",
f1174f77
EC
580 reg->map_ptr->key_size,
581 reg->map_ptr->value_size);
7d1238f2
EC
582 if (tnum_is_const(reg->var_off)) {
583 /* Typically an immediate SCALAR_VALUE, but
584 * could be a pointer whose offset is too big
585 * for reg->off
586 */
61bd5218 587 verbose(env, ",imm=%llx", reg->var_off.value);
7d1238f2
EC
588 } else {
589 if (reg->smin_value != reg->umin_value &&
590 reg->smin_value != S64_MIN)
61bd5218 591 verbose(env, ",smin_value=%lld",
7d1238f2
EC
592 (long long)reg->smin_value);
593 if (reg->smax_value != reg->umax_value &&
594 reg->smax_value != S64_MAX)
61bd5218 595 verbose(env, ",smax_value=%lld",
7d1238f2
EC
596 (long long)reg->smax_value);
597 if (reg->umin_value != 0)
61bd5218 598 verbose(env, ",umin_value=%llu",
7d1238f2
EC
599 (unsigned long long)reg->umin_value);
600 if (reg->umax_value != U64_MAX)
61bd5218 601 verbose(env, ",umax_value=%llu",
7d1238f2
EC
602 (unsigned long long)reg->umax_value);
603 if (!tnum_is_unknown(reg->var_off)) {
604 char tn_buf[48];
f1174f77 605
7d1238f2 606 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 607 verbose(env, ",var_off=%s", tn_buf);
7d1238f2 608 }
3f50f132
JF
609 if (reg->s32_min_value != reg->smin_value &&
610 reg->s32_min_value != S32_MIN)
611 verbose(env, ",s32_min_value=%d",
612 (int)(reg->s32_min_value));
613 if (reg->s32_max_value != reg->smax_value &&
614 reg->s32_max_value != S32_MAX)
615 verbose(env, ",s32_max_value=%d",
616 (int)(reg->s32_max_value));
617 if (reg->u32_min_value != reg->umin_value &&
618 reg->u32_min_value != U32_MIN)
619 verbose(env, ",u32_min_value=%d",
620 (int)(reg->u32_min_value));
621 if (reg->u32_max_value != reg->umax_value &&
622 reg->u32_max_value != U32_MAX)
623 verbose(env, ",u32_max_value=%d",
624 (int)(reg->u32_max_value));
f1174f77 625 }
61bd5218 626 verbose(env, ")");
f1174f77 627 }
17a52670 628 }
638f5b90 629 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
8efea21d
EC
630 char types_buf[BPF_REG_SIZE + 1];
631 bool valid = false;
632 int j;
633
634 for (j = 0; j < BPF_REG_SIZE; j++) {
635 if (state->stack[i].slot_type[j] != STACK_INVALID)
636 valid = true;
637 types_buf[j] = slot_type_char[
638 state->stack[i].slot_type[j]];
639 }
640 types_buf[BPF_REG_SIZE] = 0;
641 if (!valid)
642 continue;
643 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
644 print_liveness(env, state->stack[i].spilled_ptr.live);
b5dc0163
AS
645 if (state->stack[i].slot_type[0] == STACK_SPILL) {
646 reg = &state->stack[i].spilled_ptr;
647 t = reg->type;
648 verbose(env, "=%s", reg_type_str[t]);
649 if (t == SCALAR_VALUE && reg->precise)
650 verbose(env, "P");
651 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
652 verbose(env, "%lld", reg->var_off.value + reg->off);
653 } else {
8efea21d 654 verbose(env, "=%s", types_buf);
b5dc0163 655 }
17a52670 656 }
fd978bf7
JS
657 if (state->acquired_refs && state->refs[0].id) {
658 verbose(env, " refs=%d", state->refs[0].id);
659 for (i = 1; i < state->acquired_refs; i++)
660 if (state->refs[i].id)
661 verbose(env, ",%d", state->refs[i].id);
662 }
61bd5218 663 verbose(env, "\n");
17a52670
AS
664}
665
84dbf350
JS
666#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
667static int copy_##NAME##_state(struct bpf_func_state *dst, \
668 const struct bpf_func_state *src) \
669{ \
670 if (!src->FIELD) \
671 return 0; \
672 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
673 /* internal bug, make state invalid to reject the program */ \
674 memset(dst, 0, sizeof(*dst)); \
675 return -EFAULT; \
676 } \
677 memcpy(dst->FIELD, src->FIELD, \
678 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
679 return 0; \
638f5b90 680}
fd978bf7
JS
681/* copy_reference_state() */
682COPY_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
683/* copy_stack_state() */
684COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
685#undef COPY_STATE_FN
686
687#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
688static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
689 bool copy_old) \
690{ \
691 u32 old_size = state->COUNT; \
692 struct bpf_##NAME##_state *new_##FIELD; \
693 int slot = size / SIZE; \
694 \
695 if (size <= old_size || !size) { \
696 if (copy_old) \
697 return 0; \
698 state->COUNT = slot * SIZE; \
699 if (!size && old_size) { \
700 kfree(state->FIELD); \
701 state->FIELD = NULL; \
702 } \
703 return 0; \
704 } \
705 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
706 GFP_KERNEL); \
707 if (!new_##FIELD) \
708 return -ENOMEM; \
709 if (copy_old) { \
710 if (state->FIELD) \
711 memcpy(new_##FIELD, state->FIELD, \
712 sizeof(*new_##FIELD) * (old_size / SIZE)); \
713 memset(new_##FIELD + old_size / SIZE, 0, \
714 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
715 } \
716 state->COUNT = slot * SIZE; \
717 kfree(state->FIELD); \
718 state->FIELD = new_##FIELD; \
719 return 0; \
720}
fd978bf7
JS
721/* realloc_reference_state() */
722REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
723/* realloc_stack_state() */
724REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
725#undef REALLOC_STATE_FN
638f5b90
AS
726
727/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
728 * make it consume minimal amount of memory. check_stack_write() access from
f4d7e40a 729 * the program calls into realloc_func_state() to grow the stack size.
84dbf350
JS
730 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
731 * which realloc_stack_state() copies over. It points to previous
732 * bpf_verifier_state which is never reallocated.
638f5b90 733 */
fd978bf7
JS
734static int realloc_func_state(struct bpf_func_state *state, int stack_size,
735 int refs_size, bool copy_old)
638f5b90 736{
fd978bf7
JS
737 int err = realloc_reference_state(state, refs_size, copy_old);
738 if (err)
739 return err;
740 return realloc_stack_state(state, stack_size, copy_old);
741}
742
743/* Acquire a pointer id from the env and update the state->refs to include
744 * this new pointer reference.
745 * On success, returns a valid pointer id to associate with the register
746 * On failure, returns a negative errno.
638f5b90 747 */
fd978bf7 748static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
638f5b90 749{
fd978bf7
JS
750 struct bpf_func_state *state = cur_func(env);
751 int new_ofs = state->acquired_refs;
752 int id, err;
753
754 err = realloc_reference_state(state, state->acquired_refs + 1, true);
755 if (err)
756 return err;
757 id = ++env->id_gen;
758 state->refs[new_ofs].id = id;
759 state->refs[new_ofs].insn_idx = insn_idx;
638f5b90 760
fd978bf7
JS
761 return id;
762}
763
764/* release function corresponding to acquire_reference_state(). Idempotent. */
46f8bc92 765static int release_reference_state(struct bpf_func_state *state, int ptr_id)
fd978bf7
JS
766{
767 int i, last_idx;
768
fd978bf7
JS
769 last_idx = state->acquired_refs - 1;
770 for (i = 0; i < state->acquired_refs; i++) {
771 if (state->refs[i].id == ptr_id) {
772 if (last_idx && i != last_idx)
773 memcpy(&state->refs[i], &state->refs[last_idx],
774 sizeof(*state->refs));
775 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
776 state->acquired_refs--;
638f5b90 777 return 0;
638f5b90 778 }
638f5b90 779 }
46f8bc92 780 return -EINVAL;
fd978bf7
JS
781}
782
783static int transfer_reference_state(struct bpf_func_state *dst,
784 struct bpf_func_state *src)
785{
786 int err = realloc_reference_state(dst, src->acquired_refs, false);
787 if (err)
788 return err;
789 err = copy_reference_state(dst, src);
790 if (err)
791 return err;
638f5b90
AS
792 return 0;
793}
794
f4d7e40a
AS
795static void free_func_state(struct bpf_func_state *state)
796{
5896351e
AS
797 if (!state)
798 return;
fd978bf7 799 kfree(state->refs);
f4d7e40a
AS
800 kfree(state->stack);
801 kfree(state);
802}
803
b5dc0163
AS
804static void clear_jmp_history(struct bpf_verifier_state *state)
805{
806 kfree(state->jmp_history);
807 state->jmp_history = NULL;
808 state->jmp_history_cnt = 0;
809}
810
1969db47
AS
811static void free_verifier_state(struct bpf_verifier_state *state,
812 bool free_self)
638f5b90 813{
f4d7e40a
AS
814 int i;
815
816 for (i = 0; i <= state->curframe; i++) {
817 free_func_state(state->frame[i]);
818 state->frame[i] = NULL;
819 }
b5dc0163 820 clear_jmp_history(state);
1969db47
AS
821 if (free_self)
822 kfree(state);
638f5b90
AS
823}
824
825/* copy verifier state from src to dst growing dst stack space
826 * when necessary to accommodate larger src stack
827 */
f4d7e40a
AS
828static int copy_func_state(struct bpf_func_state *dst,
829 const struct bpf_func_state *src)
638f5b90
AS
830{
831 int err;
832
fd978bf7
JS
833 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
834 false);
835 if (err)
836 return err;
837 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
838 err = copy_reference_state(dst, src);
638f5b90
AS
839 if (err)
840 return err;
638f5b90
AS
841 return copy_stack_state(dst, src);
842}
843
f4d7e40a
AS
844static int copy_verifier_state(struct bpf_verifier_state *dst_state,
845 const struct bpf_verifier_state *src)
846{
847 struct bpf_func_state *dst;
b5dc0163 848 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
f4d7e40a
AS
849 int i, err;
850
b5dc0163
AS
851 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
852 kfree(dst_state->jmp_history);
853 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
854 if (!dst_state->jmp_history)
855 return -ENOMEM;
856 }
857 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
858 dst_state->jmp_history_cnt = src->jmp_history_cnt;
859
f4d7e40a
AS
860 /* if dst has more stack frames then src frame, free them */
861 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
862 free_func_state(dst_state->frame[i]);
863 dst_state->frame[i] = NULL;
864 }
979d63d5 865 dst_state->speculative = src->speculative;
f4d7e40a 866 dst_state->curframe = src->curframe;
d83525ca 867 dst_state->active_spin_lock = src->active_spin_lock;
2589726d
AS
868 dst_state->branches = src->branches;
869 dst_state->parent = src->parent;
b5dc0163
AS
870 dst_state->first_insn_idx = src->first_insn_idx;
871 dst_state->last_insn_idx = src->last_insn_idx;
f4d7e40a
AS
872 for (i = 0; i <= src->curframe; i++) {
873 dst = dst_state->frame[i];
874 if (!dst) {
875 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
876 if (!dst)
877 return -ENOMEM;
878 dst_state->frame[i] = dst;
879 }
880 err = copy_func_state(dst, src->frame[i]);
881 if (err)
882 return err;
883 }
884 return 0;
885}
886
2589726d
AS
887static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
888{
889 while (st) {
890 u32 br = --st->branches;
891
892 /* WARN_ON(br > 1) technically makes sense here,
893 * but see comment in push_stack(), hence:
894 */
895 WARN_ONCE((int)br < 0,
896 "BUG update_branch_counts:branches_to_explore=%d\n",
897 br);
898 if (br)
899 break;
900 st = st->parent;
901 }
902}
903
638f5b90 904static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
6f8a57cc 905 int *insn_idx, bool pop_log)
638f5b90
AS
906{
907 struct bpf_verifier_state *cur = env->cur_state;
908 struct bpf_verifier_stack_elem *elem, *head = env->head;
909 int err;
17a52670
AS
910
911 if (env->head == NULL)
638f5b90 912 return -ENOENT;
17a52670 913
638f5b90
AS
914 if (cur) {
915 err = copy_verifier_state(cur, &head->st);
916 if (err)
917 return err;
918 }
6f8a57cc
AN
919 if (pop_log)
920 bpf_vlog_reset(&env->log, head->log_pos);
638f5b90
AS
921 if (insn_idx)
922 *insn_idx = head->insn_idx;
17a52670 923 if (prev_insn_idx)
638f5b90
AS
924 *prev_insn_idx = head->prev_insn_idx;
925 elem = head->next;
1969db47 926 free_verifier_state(&head->st, false);
638f5b90 927 kfree(head);
17a52670
AS
928 env->head = elem;
929 env->stack_size--;
638f5b90 930 return 0;
17a52670
AS
931}
932
58e2af8b 933static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
979d63d5
DB
934 int insn_idx, int prev_insn_idx,
935 bool speculative)
17a52670 936{
638f5b90 937 struct bpf_verifier_state *cur = env->cur_state;
58e2af8b 938 struct bpf_verifier_stack_elem *elem;
638f5b90 939 int err;
17a52670 940
638f5b90 941 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
17a52670
AS
942 if (!elem)
943 goto err;
944
17a52670
AS
945 elem->insn_idx = insn_idx;
946 elem->prev_insn_idx = prev_insn_idx;
947 elem->next = env->head;
6f8a57cc 948 elem->log_pos = env->log.len_used;
17a52670
AS
949 env->head = elem;
950 env->stack_size++;
1969db47
AS
951 err = copy_verifier_state(&elem->st, cur);
952 if (err)
953 goto err;
979d63d5 954 elem->st.speculative |= speculative;
b285fcb7
AS
955 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
956 verbose(env, "The sequence of %d jumps is too complex.\n",
957 env->stack_size);
17a52670
AS
958 goto err;
959 }
2589726d
AS
960 if (elem->st.parent) {
961 ++elem->st.parent->branches;
962 /* WARN_ON(branches > 2) technically makes sense here,
963 * but
964 * 1. speculative states will bump 'branches' for non-branch
965 * instructions
966 * 2. is_state_visited() heuristics may decide not to create
967 * a new state for a sequence of branches and all such current
968 * and cloned states will be pointing to a single parent state
969 * which might have large 'branches' count.
970 */
971 }
17a52670
AS
972 return &elem->st;
973err:
5896351e
AS
974 free_verifier_state(env->cur_state, true);
975 env->cur_state = NULL;
17a52670 976 /* pop all elements and return */
6f8a57cc 977 while (!pop_stack(env, NULL, NULL, false));
17a52670
AS
978 return NULL;
979}
980
981#define CALLER_SAVED_REGS 6
982static const int caller_saved[CALLER_SAVED_REGS] = {
983 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
984};
985
f54c7898
DB
986static void __mark_reg_not_init(const struct bpf_verifier_env *env,
987 struct bpf_reg_state *reg);
f1174f77 988
b03c9f9f
EC
989/* Mark the unknown part of a register (variable offset or scalar value) as
990 * known to have the value @imm.
991 */
992static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
993{
a9c676bc
AS
994 /* Clear id, off, and union(map_ptr, range) */
995 memset(((u8 *)reg) + sizeof(reg->type), 0,
996 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
b03c9f9f
EC
997 reg->var_off = tnum_const(imm);
998 reg->smin_value = (s64)imm;
999 reg->smax_value = (s64)imm;
1000 reg->umin_value = imm;
1001 reg->umax_value = imm;
3f50f132
JF
1002
1003 reg->s32_min_value = (s32)imm;
1004 reg->s32_max_value = (s32)imm;
1005 reg->u32_min_value = (u32)imm;
1006 reg->u32_max_value = (u32)imm;
1007}
1008
1009static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1010{
1011 reg->var_off = tnum_const_subreg(reg->var_off, imm);
1012 reg->s32_min_value = (s32)imm;
1013 reg->s32_max_value = (s32)imm;
1014 reg->u32_min_value = (u32)imm;
1015 reg->u32_max_value = (u32)imm;
b03c9f9f
EC
1016}
1017
f1174f77
EC
1018/* Mark the 'variable offset' part of a register as zero. This should be
1019 * used only on registers holding a pointer type.
1020 */
1021static void __mark_reg_known_zero(struct bpf_reg_state *reg)
a9789ef9 1022{
b03c9f9f 1023 __mark_reg_known(reg, 0);
f1174f77 1024}
a9789ef9 1025
cc2b14d5
AS
1026static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1027{
1028 __mark_reg_known(reg, 0);
cc2b14d5
AS
1029 reg->type = SCALAR_VALUE;
1030}
1031
61bd5218
JK
1032static void mark_reg_known_zero(struct bpf_verifier_env *env,
1033 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1034{
1035 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1036 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
f1174f77
EC
1037 /* Something bad happened, let's kill all regs */
1038 for (regno = 0; regno < MAX_BPF_REG; regno++)
f54c7898 1039 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1040 return;
1041 }
1042 __mark_reg_known_zero(regs + regno);
1043}
1044
de8f3a83
DB
1045static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1046{
1047 return type_is_pkt_pointer(reg->type);
1048}
1049
1050static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1051{
1052 return reg_is_pkt_pointer(reg) ||
1053 reg->type == PTR_TO_PACKET_END;
1054}
1055
1056/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1057static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1058 enum bpf_reg_type which)
1059{
1060 /* The register can already have a range from prior markings.
1061 * This is fine as long as it hasn't been advanced from its
1062 * origin.
1063 */
1064 return reg->type == which &&
1065 reg->id == 0 &&
1066 reg->off == 0 &&
1067 tnum_equals_const(reg->var_off, 0);
1068}
1069
3f50f132
JF
1070/* Reset the min/max bounds of a register */
1071static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1072{
1073 reg->smin_value = S64_MIN;
1074 reg->smax_value = S64_MAX;
1075 reg->umin_value = 0;
1076 reg->umax_value = U64_MAX;
1077
1078 reg->s32_min_value = S32_MIN;
1079 reg->s32_max_value = S32_MAX;
1080 reg->u32_min_value = 0;
1081 reg->u32_max_value = U32_MAX;
1082}
1083
1084static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1085{
1086 reg->smin_value = S64_MIN;
1087 reg->smax_value = S64_MAX;
1088 reg->umin_value = 0;
1089 reg->umax_value = U64_MAX;
1090}
1091
1092static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1093{
1094 reg->s32_min_value = S32_MIN;
1095 reg->s32_max_value = S32_MAX;
1096 reg->u32_min_value = 0;
1097 reg->u32_max_value = U32_MAX;
1098}
1099
1100static void __update_reg32_bounds(struct bpf_reg_state *reg)
1101{
1102 struct tnum var32_off = tnum_subreg(reg->var_off);
1103
1104 /* min signed is max(sign bit) | min(other bits) */
1105 reg->s32_min_value = max_t(s32, reg->s32_min_value,
1106 var32_off.value | (var32_off.mask & S32_MIN));
1107 /* max signed is min(sign bit) | max(other bits) */
1108 reg->s32_max_value = min_t(s32, reg->s32_max_value,
1109 var32_off.value | (var32_off.mask & S32_MAX));
1110 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1111 reg->u32_max_value = min(reg->u32_max_value,
1112 (u32)(var32_off.value | var32_off.mask));
1113}
1114
1115static void __update_reg64_bounds(struct bpf_reg_state *reg)
b03c9f9f
EC
1116{
1117 /* min signed is max(sign bit) | min(other bits) */
1118 reg->smin_value = max_t(s64, reg->smin_value,
1119 reg->var_off.value | (reg->var_off.mask & S64_MIN));
1120 /* max signed is min(sign bit) | max(other bits) */
1121 reg->smax_value = min_t(s64, reg->smax_value,
1122 reg->var_off.value | (reg->var_off.mask & S64_MAX));
1123 reg->umin_value = max(reg->umin_value, reg->var_off.value);
1124 reg->umax_value = min(reg->umax_value,
1125 reg->var_off.value | reg->var_off.mask);
1126}
1127
3f50f132
JF
1128static void __update_reg_bounds(struct bpf_reg_state *reg)
1129{
1130 __update_reg32_bounds(reg);
1131 __update_reg64_bounds(reg);
1132}
1133
b03c9f9f 1134/* Uses signed min/max values to inform unsigned, and vice-versa */
3f50f132
JF
1135static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1136{
1137 /* Learn sign from signed bounds.
1138 * If we cannot cross the sign boundary, then signed and unsigned bounds
1139 * are the same, so combine. This works even in the negative case, e.g.
1140 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1141 */
1142 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1143 reg->s32_min_value = reg->u32_min_value =
1144 max_t(u32, reg->s32_min_value, reg->u32_min_value);
1145 reg->s32_max_value = reg->u32_max_value =
1146 min_t(u32, reg->s32_max_value, reg->u32_max_value);
1147 return;
1148 }
1149 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1150 * boundary, so we must be careful.
1151 */
1152 if ((s32)reg->u32_max_value >= 0) {
1153 /* Positive. We can't learn anything from the smin, but smax
1154 * is positive, hence safe.
1155 */
1156 reg->s32_min_value = reg->u32_min_value;
1157 reg->s32_max_value = reg->u32_max_value =
1158 min_t(u32, reg->s32_max_value, reg->u32_max_value);
1159 } else if ((s32)reg->u32_min_value < 0) {
1160 /* Negative. We can't learn anything from the smax, but smin
1161 * is negative, hence safe.
1162 */
1163 reg->s32_min_value = reg->u32_min_value =
1164 max_t(u32, reg->s32_min_value, reg->u32_min_value);
1165 reg->s32_max_value = reg->u32_max_value;
1166 }
1167}
1168
1169static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
b03c9f9f
EC
1170{
1171 /* Learn sign from signed bounds.
1172 * If we cannot cross the sign boundary, then signed and unsigned bounds
1173 * are the same, so combine. This works even in the negative case, e.g.
1174 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1175 */
1176 if (reg->smin_value >= 0 || reg->smax_value < 0) {
1177 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1178 reg->umin_value);
1179 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1180 reg->umax_value);
1181 return;
1182 }
1183 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1184 * boundary, so we must be careful.
1185 */
1186 if ((s64)reg->umax_value >= 0) {
1187 /* Positive. We can't learn anything from the smin, but smax
1188 * is positive, hence safe.
1189 */
1190 reg->smin_value = reg->umin_value;
1191 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1192 reg->umax_value);
1193 } else if ((s64)reg->umin_value < 0) {
1194 /* Negative. We can't learn anything from the smax, but smin
1195 * is negative, hence safe.
1196 */
1197 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1198 reg->umin_value);
1199 reg->smax_value = reg->umax_value;
1200 }
1201}
1202
3f50f132
JF
1203static void __reg_deduce_bounds(struct bpf_reg_state *reg)
1204{
1205 __reg32_deduce_bounds(reg);
1206 __reg64_deduce_bounds(reg);
1207}
1208
b03c9f9f
EC
1209/* Attempts to improve var_off based on unsigned min/max information */
1210static void __reg_bound_offset(struct bpf_reg_state *reg)
1211{
3f50f132
JF
1212 struct tnum var64_off = tnum_intersect(reg->var_off,
1213 tnum_range(reg->umin_value,
1214 reg->umax_value));
1215 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
1216 tnum_range(reg->u32_min_value,
1217 reg->u32_max_value));
1218
1219 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
b03c9f9f
EC
1220}
1221
3f50f132 1222static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
b03c9f9f 1223{
3f50f132
JF
1224 reg->umin_value = reg->u32_min_value;
1225 reg->umax_value = reg->u32_max_value;
1226 /* Attempt to pull 32-bit signed bounds into 64-bit bounds
1227 * but must be positive otherwise set to worse case bounds
1228 * and refine later from tnum.
1229 */
3a71dc36 1230 if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
3f50f132
JF
1231 reg->smax_value = reg->s32_max_value;
1232 else
1233 reg->smax_value = U32_MAX;
3a71dc36
JF
1234 if (reg->s32_min_value >= 0)
1235 reg->smin_value = reg->s32_min_value;
1236 else
1237 reg->smin_value = 0;
3f50f132
JF
1238}
1239
1240static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
1241{
1242 /* special case when 64-bit register has upper 32-bit register
1243 * zeroed. Typically happens after zext or <<32, >>32 sequence
1244 * allowing us to use 32-bit bounds directly,
1245 */
1246 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1247 __reg_assign_32_into_64(reg);
1248 } else {
1249 /* Otherwise the best we can do is push lower 32bit known and
1250 * unknown bits into register (var_off set from jmp logic)
1251 * then learn as much as possible from the 64-bit tnum
1252 * known and unknown bits. The previous smin/smax bounds are
1253 * invalid here because of jmp32 compare so mark them unknown
1254 * so they do not impact tnum bounds calculation.
1255 */
1256 __mark_reg64_unbounded(reg);
1257 __update_reg_bounds(reg);
1258 }
1259
1260 /* Intersecting with the old var_off might have improved our bounds
1261 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1262 * then new var_off is (0; 0x7f...fc) which improves our umax.
1263 */
1264 __reg_deduce_bounds(reg);
1265 __reg_bound_offset(reg);
1266 __update_reg_bounds(reg);
1267}
1268
1269static bool __reg64_bound_s32(s64 a)
1270{
1271 if (a > S32_MIN && a < S32_MAX)
1272 return true;
1273 return false;
1274}
1275
1276static bool __reg64_bound_u32(u64 a)
1277{
1278 if (a > U32_MIN && a < U32_MAX)
1279 return true;
1280 return false;
1281}
1282
1283static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1284{
1285 __mark_reg32_unbounded(reg);
1286
1287 if (__reg64_bound_s32(reg->smin_value))
1288 reg->s32_min_value = (s32)reg->smin_value;
1289 if (__reg64_bound_s32(reg->smax_value))
1290 reg->s32_max_value = (s32)reg->smax_value;
1291 if (__reg64_bound_u32(reg->umin_value))
1292 reg->u32_min_value = (u32)reg->umin_value;
1293 if (__reg64_bound_u32(reg->umax_value))
1294 reg->u32_max_value = (u32)reg->umax_value;
1295
1296 /* Intersecting with the old var_off might have improved our bounds
1297 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1298 * then new var_off is (0; 0x7f...fc) which improves our umax.
1299 */
1300 __reg_deduce_bounds(reg);
1301 __reg_bound_offset(reg);
1302 __update_reg_bounds(reg);
b03c9f9f
EC
1303}
1304
f1174f77 1305/* Mark a register as having a completely unknown (scalar) value. */
f54c7898
DB
1306static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1307 struct bpf_reg_state *reg)
f1174f77 1308{
a9c676bc
AS
1309 /*
1310 * Clear type, id, off, and union(map_ptr, range) and
1311 * padding between 'type' and union
1312 */
1313 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
f1174f77 1314 reg->type = SCALAR_VALUE;
f1174f77 1315 reg->var_off = tnum_unknown;
f4d7e40a 1316 reg->frameno = 0;
2c78ee89 1317 reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
b03c9f9f 1318 __mark_reg_unbounded(reg);
f1174f77
EC
1319}
1320
61bd5218
JK
1321static void mark_reg_unknown(struct bpf_verifier_env *env,
1322 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1323{
1324 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1325 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
19ceb417
AS
1326 /* Something bad happened, let's kill all regs except FP */
1327 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1328 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1329 return;
1330 }
f54c7898 1331 __mark_reg_unknown(env, regs + regno);
f1174f77
EC
1332}
1333
f54c7898
DB
1334static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1335 struct bpf_reg_state *reg)
f1174f77 1336{
f54c7898 1337 __mark_reg_unknown(env, reg);
f1174f77
EC
1338 reg->type = NOT_INIT;
1339}
1340
61bd5218
JK
1341static void mark_reg_not_init(struct bpf_verifier_env *env,
1342 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1343{
1344 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1345 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
19ceb417
AS
1346 /* Something bad happened, let's kill all regs except FP */
1347 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1348 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1349 return;
1350 }
f54c7898 1351 __mark_reg_not_init(env, regs + regno);
a9789ef9
DB
1352}
1353
5327ed3d 1354#define DEF_NOT_SUBREG (0)
61bd5218 1355static void init_reg_state(struct bpf_verifier_env *env,
f4d7e40a 1356 struct bpf_func_state *state)
17a52670 1357{
f4d7e40a 1358 struct bpf_reg_state *regs = state->regs;
17a52670
AS
1359 int i;
1360
dc503a8a 1361 for (i = 0; i < MAX_BPF_REG; i++) {
61bd5218 1362 mark_reg_not_init(env, regs, i);
dc503a8a 1363 regs[i].live = REG_LIVE_NONE;
679c782d 1364 regs[i].parent = NULL;
5327ed3d 1365 regs[i].subreg_def = DEF_NOT_SUBREG;
dc503a8a 1366 }
17a52670
AS
1367
1368 /* frame pointer */
f1174f77 1369 regs[BPF_REG_FP].type = PTR_TO_STACK;
61bd5218 1370 mark_reg_known_zero(env, regs, BPF_REG_FP);
f4d7e40a 1371 regs[BPF_REG_FP].frameno = state->frameno;
6760bf2d
DB
1372}
1373
f4d7e40a
AS
1374#define BPF_MAIN_FUNC (-1)
1375static void init_func_state(struct bpf_verifier_env *env,
1376 struct bpf_func_state *state,
1377 int callsite, int frameno, int subprogno)
1378{
1379 state->callsite = callsite;
1380 state->frameno = frameno;
1381 state->subprogno = subprogno;
1382 init_reg_state(env, state);
1383}
1384
17a52670
AS
1385enum reg_arg_type {
1386 SRC_OP, /* register is used as source operand */
1387 DST_OP, /* register is used as destination operand */
1388 DST_OP_NO_MARK /* same as above, check only, don't mark */
1389};
1390
cc8b0b92
AS
1391static int cmp_subprogs(const void *a, const void *b)
1392{
9c8105bd
JW
1393 return ((struct bpf_subprog_info *)a)->start -
1394 ((struct bpf_subprog_info *)b)->start;
cc8b0b92
AS
1395}
1396
1397static int find_subprog(struct bpf_verifier_env *env, int off)
1398{
9c8105bd 1399 struct bpf_subprog_info *p;
cc8b0b92 1400
9c8105bd
JW
1401 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1402 sizeof(env->subprog_info[0]), cmp_subprogs);
cc8b0b92
AS
1403 if (!p)
1404 return -ENOENT;
9c8105bd 1405 return p - env->subprog_info;
cc8b0b92
AS
1406
1407}
1408
1409static int add_subprog(struct bpf_verifier_env *env, int off)
1410{
1411 int insn_cnt = env->prog->len;
1412 int ret;
1413
1414 if (off >= insn_cnt || off < 0) {
1415 verbose(env, "call to invalid destination\n");
1416 return -EINVAL;
1417 }
1418 ret = find_subprog(env, off);
1419 if (ret >= 0)
1420 return 0;
4cb3d99c 1421 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
cc8b0b92
AS
1422 verbose(env, "too many subprograms\n");
1423 return -E2BIG;
1424 }
9c8105bd
JW
1425 env->subprog_info[env->subprog_cnt++].start = off;
1426 sort(env->subprog_info, env->subprog_cnt,
1427 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
cc8b0b92
AS
1428 return 0;
1429}
1430
1431static int check_subprogs(struct bpf_verifier_env *env)
1432{
1433 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
9c8105bd 1434 struct bpf_subprog_info *subprog = env->subprog_info;
cc8b0b92
AS
1435 struct bpf_insn *insn = env->prog->insnsi;
1436 int insn_cnt = env->prog->len;
1437
f910cefa
JW
1438 /* Add entry function. */
1439 ret = add_subprog(env, 0);
1440 if (ret < 0)
1441 return ret;
1442
cc8b0b92
AS
1443 /* determine subprog starts. The end is one before the next starts */
1444 for (i = 0; i < insn_cnt; i++) {
1445 if (insn[i].code != (BPF_JMP | BPF_CALL))
1446 continue;
1447 if (insn[i].src_reg != BPF_PSEUDO_CALL)
1448 continue;
2c78ee89
AS
1449 if (!env->bpf_capable) {
1450 verbose(env,
1451 "function calls to other bpf functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
cc8b0b92
AS
1452 return -EPERM;
1453 }
cc8b0b92
AS
1454 ret = add_subprog(env, i + insn[i].imm + 1);
1455 if (ret < 0)
1456 return ret;
1457 }
1458
4cb3d99c
JW
1459 /* Add a fake 'exit' subprog which could simplify subprog iteration
1460 * logic. 'subprog_cnt' should not be increased.
1461 */
1462 subprog[env->subprog_cnt].start = insn_cnt;
1463
06ee7115 1464 if (env->log.level & BPF_LOG_LEVEL2)
cc8b0b92 1465 for (i = 0; i < env->subprog_cnt; i++)
9c8105bd 1466 verbose(env, "func#%d @%d\n", i, subprog[i].start);
cc8b0b92
AS
1467
1468 /* now check that all jumps are within the same subprog */
4cb3d99c
JW
1469 subprog_start = subprog[cur_subprog].start;
1470 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1471 for (i = 0; i < insn_cnt; i++) {
1472 u8 code = insn[i].code;
1473
092ed096 1474 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
cc8b0b92
AS
1475 goto next;
1476 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
1477 goto next;
1478 off = i + insn[i].off + 1;
1479 if (off < subprog_start || off >= subprog_end) {
1480 verbose(env, "jump out of range from insn %d to %d\n", i, off);
1481 return -EINVAL;
1482 }
1483next:
1484 if (i == subprog_end - 1) {
1485 /* to avoid fall-through from one subprog into another
1486 * the last insn of the subprog should be either exit
1487 * or unconditional jump back
1488 */
1489 if (code != (BPF_JMP | BPF_EXIT) &&
1490 code != (BPF_JMP | BPF_JA)) {
1491 verbose(env, "last insn is not an exit or jmp\n");
1492 return -EINVAL;
1493 }
1494 subprog_start = subprog_end;
4cb3d99c
JW
1495 cur_subprog++;
1496 if (cur_subprog < env->subprog_cnt)
9c8105bd 1497 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1498 }
1499 }
1500 return 0;
1501}
1502
679c782d
EC
1503/* Parentage chain of this register (or stack slot) should take care of all
1504 * issues like callee-saved registers, stack slot allocation time, etc.
1505 */
f4d7e40a 1506static int mark_reg_read(struct bpf_verifier_env *env,
679c782d 1507 const struct bpf_reg_state *state,
5327ed3d 1508 struct bpf_reg_state *parent, u8 flag)
f4d7e40a
AS
1509{
1510 bool writes = parent == state->parent; /* Observe write marks */
06ee7115 1511 int cnt = 0;
dc503a8a
EC
1512
1513 while (parent) {
1514 /* if read wasn't screened by an earlier write ... */
679c782d 1515 if (writes && state->live & REG_LIVE_WRITTEN)
dc503a8a 1516 break;
9242b5f5
AS
1517 if (parent->live & REG_LIVE_DONE) {
1518 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1519 reg_type_str[parent->type],
1520 parent->var_off.value, parent->off);
1521 return -EFAULT;
1522 }
5327ed3d
JW
1523 /* The first condition is more likely to be true than the
1524 * second, checked it first.
1525 */
1526 if ((parent->live & REG_LIVE_READ) == flag ||
1527 parent->live & REG_LIVE_READ64)
25af32da
AS
1528 /* The parentage chain never changes and
1529 * this parent was already marked as LIVE_READ.
1530 * There is no need to keep walking the chain again and
1531 * keep re-marking all parents as LIVE_READ.
1532 * This case happens when the same register is read
1533 * multiple times without writes into it in-between.
5327ed3d
JW
1534 * Also, if parent has the stronger REG_LIVE_READ64 set,
1535 * then no need to set the weak REG_LIVE_READ32.
25af32da
AS
1536 */
1537 break;
dc503a8a 1538 /* ... then we depend on parent's value */
5327ed3d
JW
1539 parent->live |= flag;
1540 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
1541 if (flag == REG_LIVE_READ64)
1542 parent->live &= ~REG_LIVE_READ32;
dc503a8a
EC
1543 state = parent;
1544 parent = state->parent;
f4d7e40a 1545 writes = true;
06ee7115 1546 cnt++;
dc503a8a 1547 }
06ee7115
AS
1548
1549 if (env->longest_mark_read_walk < cnt)
1550 env->longest_mark_read_walk = cnt;
f4d7e40a 1551 return 0;
dc503a8a
EC
1552}
1553
5327ed3d
JW
1554/* This function is supposed to be used by the following 32-bit optimization
1555 * code only. It returns TRUE if the source or destination register operates
1556 * on 64-bit, otherwise return FALSE.
1557 */
1558static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
1559 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
1560{
1561 u8 code, class, op;
1562
1563 code = insn->code;
1564 class = BPF_CLASS(code);
1565 op = BPF_OP(code);
1566 if (class == BPF_JMP) {
1567 /* BPF_EXIT for "main" will reach here. Return TRUE
1568 * conservatively.
1569 */
1570 if (op == BPF_EXIT)
1571 return true;
1572 if (op == BPF_CALL) {
1573 /* BPF to BPF call will reach here because of marking
1574 * caller saved clobber with DST_OP_NO_MARK for which we
1575 * don't care the register def because they are anyway
1576 * marked as NOT_INIT already.
1577 */
1578 if (insn->src_reg == BPF_PSEUDO_CALL)
1579 return false;
1580 /* Helper call will reach here because of arg type
1581 * check, conservatively return TRUE.
1582 */
1583 if (t == SRC_OP)
1584 return true;
1585
1586 return false;
1587 }
1588 }
1589
1590 if (class == BPF_ALU64 || class == BPF_JMP ||
1591 /* BPF_END always use BPF_ALU class. */
1592 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
1593 return true;
1594
1595 if (class == BPF_ALU || class == BPF_JMP32)
1596 return false;
1597
1598 if (class == BPF_LDX) {
1599 if (t != SRC_OP)
1600 return BPF_SIZE(code) == BPF_DW;
1601 /* LDX source must be ptr. */
1602 return true;
1603 }
1604
1605 if (class == BPF_STX) {
1606 if (reg->type != SCALAR_VALUE)
1607 return true;
1608 return BPF_SIZE(code) == BPF_DW;
1609 }
1610
1611 if (class == BPF_LD) {
1612 u8 mode = BPF_MODE(code);
1613
1614 /* LD_IMM64 */
1615 if (mode == BPF_IMM)
1616 return true;
1617
1618 /* Both LD_IND and LD_ABS return 32-bit data. */
1619 if (t != SRC_OP)
1620 return false;
1621
1622 /* Implicit ctx ptr. */
1623 if (regno == BPF_REG_6)
1624 return true;
1625
1626 /* Explicit source could be any width. */
1627 return true;
1628 }
1629
1630 if (class == BPF_ST)
1631 /* The only source register for BPF_ST is a ptr. */
1632 return true;
1633
1634 /* Conservatively return true at default. */
1635 return true;
1636}
1637
b325fbca
JW
1638/* Return TRUE if INSN doesn't have explicit value define. */
1639static bool insn_no_def(struct bpf_insn *insn)
1640{
1641 u8 class = BPF_CLASS(insn->code);
1642
1643 return (class == BPF_JMP || class == BPF_JMP32 ||
1644 class == BPF_STX || class == BPF_ST);
1645}
1646
1647/* Return TRUE if INSN has defined any 32-bit value explicitly. */
1648static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
1649{
1650 if (insn_no_def(insn))
1651 return false;
1652
1653 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
1654}
1655
5327ed3d
JW
1656static void mark_insn_zext(struct bpf_verifier_env *env,
1657 struct bpf_reg_state *reg)
1658{
1659 s32 def_idx = reg->subreg_def;
1660
1661 if (def_idx == DEF_NOT_SUBREG)
1662 return;
1663
1664 env->insn_aux_data[def_idx - 1].zext_dst = true;
1665 /* The dst will be zero extended, so won't be sub-register anymore. */
1666 reg->subreg_def = DEF_NOT_SUBREG;
1667}
1668
dc503a8a 1669static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
17a52670
AS
1670 enum reg_arg_type t)
1671{
f4d7e40a
AS
1672 struct bpf_verifier_state *vstate = env->cur_state;
1673 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5327ed3d 1674 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
c342dc10 1675 struct bpf_reg_state *reg, *regs = state->regs;
5327ed3d 1676 bool rw64;
dc503a8a 1677
17a52670 1678 if (regno >= MAX_BPF_REG) {
61bd5218 1679 verbose(env, "R%d is invalid\n", regno);
17a52670
AS
1680 return -EINVAL;
1681 }
1682
c342dc10 1683 reg = &regs[regno];
5327ed3d 1684 rw64 = is_reg64(env, insn, regno, reg, t);
17a52670
AS
1685 if (t == SRC_OP) {
1686 /* check whether register used as source operand can be read */
c342dc10 1687 if (reg->type == NOT_INIT) {
61bd5218 1688 verbose(env, "R%d !read_ok\n", regno);
17a52670
AS
1689 return -EACCES;
1690 }
679c782d 1691 /* We don't need to worry about FP liveness because it's read-only */
c342dc10
JW
1692 if (regno == BPF_REG_FP)
1693 return 0;
1694
5327ed3d
JW
1695 if (rw64)
1696 mark_insn_zext(env, reg);
1697
1698 return mark_reg_read(env, reg, reg->parent,
1699 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
17a52670
AS
1700 } else {
1701 /* check whether register used as dest operand can be written to */
1702 if (regno == BPF_REG_FP) {
61bd5218 1703 verbose(env, "frame pointer is read only\n");
17a52670
AS
1704 return -EACCES;
1705 }
c342dc10 1706 reg->live |= REG_LIVE_WRITTEN;
5327ed3d 1707 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
17a52670 1708 if (t == DST_OP)
61bd5218 1709 mark_reg_unknown(env, regs, regno);
17a52670
AS
1710 }
1711 return 0;
1712}
1713
b5dc0163
AS
1714/* for any branch, call, exit record the history of jmps in the given state */
1715static int push_jmp_history(struct bpf_verifier_env *env,
1716 struct bpf_verifier_state *cur)
1717{
1718 u32 cnt = cur->jmp_history_cnt;
1719 struct bpf_idx_pair *p;
1720
1721 cnt++;
1722 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
1723 if (!p)
1724 return -ENOMEM;
1725 p[cnt - 1].idx = env->insn_idx;
1726 p[cnt - 1].prev_idx = env->prev_insn_idx;
1727 cur->jmp_history = p;
1728 cur->jmp_history_cnt = cnt;
1729 return 0;
1730}
1731
1732/* Backtrack one insn at a time. If idx is not at the top of recorded
1733 * history then previous instruction came from straight line execution.
1734 */
1735static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
1736 u32 *history)
1737{
1738 u32 cnt = *history;
1739
1740 if (cnt && st->jmp_history[cnt - 1].idx == i) {
1741 i = st->jmp_history[cnt - 1].prev_idx;
1742 (*history)--;
1743 } else {
1744 i--;
1745 }
1746 return i;
1747}
1748
1749/* For given verifier state backtrack_insn() is called from the last insn to
1750 * the first insn. Its purpose is to compute a bitmask of registers and
1751 * stack slots that needs precision in the parent verifier state.
1752 */
1753static int backtrack_insn(struct bpf_verifier_env *env, int idx,
1754 u32 *reg_mask, u64 *stack_mask)
1755{
1756 const struct bpf_insn_cbs cbs = {
1757 .cb_print = verbose,
1758 .private_data = env,
1759 };
1760 struct bpf_insn *insn = env->prog->insnsi + idx;
1761 u8 class = BPF_CLASS(insn->code);
1762 u8 opcode = BPF_OP(insn->code);
1763 u8 mode = BPF_MODE(insn->code);
1764 u32 dreg = 1u << insn->dst_reg;
1765 u32 sreg = 1u << insn->src_reg;
1766 u32 spi;
1767
1768 if (insn->code == 0)
1769 return 0;
1770 if (env->log.level & BPF_LOG_LEVEL) {
1771 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
1772 verbose(env, "%d: ", idx);
1773 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
1774 }
1775
1776 if (class == BPF_ALU || class == BPF_ALU64) {
1777 if (!(*reg_mask & dreg))
1778 return 0;
1779 if (opcode == BPF_MOV) {
1780 if (BPF_SRC(insn->code) == BPF_X) {
1781 /* dreg = sreg
1782 * dreg needs precision after this insn
1783 * sreg needs precision before this insn
1784 */
1785 *reg_mask &= ~dreg;
1786 *reg_mask |= sreg;
1787 } else {
1788 /* dreg = K
1789 * dreg needs precision after this insn.
1790 * Corresponding register is already marked
1791 * as precise=true in this verifier state.
1792 * No further markings in parent are necessary
1793 */
1794 *reg_mask &= ~dreg;
1795 }
1796 } else {
1797 if (BPF_SRC(insn->code) == BPF_X) {
1798 /* dreg += sreg
1799 * both dreg and sreg need precision
1800 * before this insn
1801 */
1802 *reg_mask |= sreg;
1803 } /* else dreg += K
1804 * dreg still needs precision before this insn
1805 */
1806 }
1807 } else if (class == BPF_LDX) {
1808 if (!(*reg_mask & dreg))
1809 return 0;
1810 *reg_mask &= ~dreg;
1811
1812 /* scalars can only be spilled into stack w/o losing precision.
1813 * Load from any other memory can be zero extended.
1814 * The desire to keep that precision is already indicated
1815 * by 'precise' mark in corresponding register of this state.
1816 * No further tracking necessary.
1817 */
1818 if (insn->src_reg != BPF_REG_FP)
1819 return 0;
1820 if (BPF_SIZE(insn->code) != BPF_DW)
1821 return 0;
1822
1823 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
1824 * that [fp - off] slot contains scalar that needs to be
1825 * tracked with precision
1826 */
1827 spi = (-insn->off - 1) / BPF_REG_SIZE;
1828 if (spi >= 64) {
1829 verbose(env, "BUG spi %d\n", spi);
1830 WARN_ONCE(1, "verifier backtracking bug");
1831 return -EFAULT;
1832 }
1833 *stack_mask |= 1ull << spi;
b3b50f05 1834 } else if (class == BPF_STX || class == BPF_ST) {
b5dc0163 1835 if (*reg_mask & dreg)
b3b50f05 1836 /* stx & st shouldn't be using _scalar_ dst_reg
b5dc0163
AS
1837 * to access memory. It means backtracking
1838 * encountered a case of pointer subtraction.
1839 */
1840 return -ENOTSUPP;
1841 /* scalars can only be spilled into stack */
1842 if (insn->dst_reg != BPF_REG_FP)
1843 return 0;
1844 if (BPF_SIZE(insn->code) != BPF_DW)
1845 return 0;
1846 spi = (-insn->off - 1) / BPF_REG_SIZE;
1847 if (spi >= 64) {
1848 verbose(env, "BUG spi %d\n", spi);
1849 WARN_ONCE(1, "verifier backtracking bug");
1850 return -EFAULT;
1851 }
1852 if (!(*stack_mask & (1ull << spi)))
1853 return 0;
1854 *stack_mask &= ~(1ull << spi);
b3b50f05
AN
1855 if (class == BPF_STX)
1856 *reg_mask |= sreg;
b5dc0163
AS
1857 } else if (class == BPF_JMP || class == BPF_JMP32) {
1858 if (opcode == BPF_CALL) {
1859 if (insn->src_reg == BPF_PSEUDO_CALL)
1860 return -ENOTSUPP;
1861 /* regular helper call sets R0 */
1862 *reg_mask &= ~1;
1863 if (*reg_mask & 0x3f) {
1864 /* if backtracing was looking for registers R1-R5
1865 * they should have been found already.
1866 */
1867 verbose(env, "BUG regs %x\n", *reg_mask);
1868 WARN_ONCE(1, "verifier backtracking bug");
1869 return -EFAULT;
1870 }
1871 } else if (opcode == BPF_EXIT) {
1872 return -ENOTSUPP;
1873 }
1874 } else if (class == BPF_LD) {
1875 if (!(*reg_mask & dreg))
1876 return 0;
1877 *reg_mask &= ~dreg;
1878 /* It's ld_imm64 or ld_abs or ld_ind.
1879 * For ld_imm64 no further tracking of precision
1880 * into parent is necessary
1881 */
1882 if (mode == BPF_IND || mode == BPF_ABS)
1883 /* to be analyzed */
1884 return -ENOTSUPP;
b5dc0163
AS
1885 }
1886 return 0;
1887}
1888
1889/* the scalar precision tracking algorithm:
1890 * . at the start all registers have precise=false.
1891 * . scalar ranges are tracked as normal through alu and jmp insns.
1892 * . once precise value of the scalar register is used in:
1893 * . ptr + scalar alu
1894 * . if (scalar cond K|scalar)
1895 * . helper_call(.., scalar, ...) where ARG_CONST is expected
1896 * backtrack through the verifier states and mark all registers and
1897 * stack slots with spilled constants that these scalar regisers
1898 * should be precise.
1899 * . during state pruning two registers (or spilled stack slots)
1900 * are equivalent if both are not precise.
1901 *
1902 * Note the verifier cannot simply walk register parentage chain,
1903 * since many different registers and stack slots could have been
1904 * used to compute single precise scalar.
1905 *
1906 * The approach of starting with precise=true for all registers and then
1907 * backtrack to mark a register as not precise when the verifier detects
1908 * that program doesn't care about specific value (e.g., when helper
1909 * takes register as ARG_ANYTHING parameter) is not safe.
1910 *
1911 * It's ok to walk single parentage chain of the verifier states.
1912 * It's possible that this backtracking will go all the way till 1st insn.
1913 * All other branches will be explored for needing precision later.
1914 *
1915 * The backtracking needs to deal with cases like:
1916 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
1917 * r9 -= r8
1918 * r5 = r9
1919 * if r5 > 0x79f goto pc+7
1920 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
1921 * r5 += 1
1922 * ...
1923 * call bpf_perf_event_output#25
1924 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
1925 *
1926 * and this case:
1927 * r6 = 1
1928 * call foo // uses callee's r6 inside to compute r0
1929 * r0 += r6
1930 * if r0 == 0 goto
1931 *
1932 * to track above reg_mask/stack_mask needs to be independent for each frame.
1933 *
1934 * Also if parent's curframe > frame where backtracking started,
1935 * the verifier need to mark registers in both frames, otherwise callees
1936 * may incorrectly prune callers. This is similar to
1937 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
1938 *
1939 * For now backtracking falls back into conservative marking.
1940 */
1941static void mark_all_scalars_precise(struct bpf_verifier_env *env,
1942 struct bpf_verifier_state *st)
1943{
1944 struct bpf_func_state *func;
1945 struct bpf_reg_state *reg;
1946 int i, j;
1947
1948 /* big hammer: mark all scalars precise in this path.
1949 * pop_stack may still get !precise scalars.
1950 */
1951 for (; st; st = st->parent)
1952 for (i = 0; i <= st->curframe; i++) {
1953 func = st->frame[i];
1954 for (j = 0; j < BPF_REG_FP; j++) {
1955 reg = &func->regs[j];
1956 if (reg->type != SCALAR_VALUE)
1957 continue;
1958 reg->precise = true;
1959 }
1960 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
1961 if (func->stack[j].slot_type[0] != STACK_SPILL)
1962 continue;
1963 reg = &func->stack[j].spilled_ptr;
1964 if (reg->type != SCALAR_VALUE)
1965 continue;
1966 reg->precise = true;
1967 }
1968 }
1969}
1970
a3ce685d
AS
1971static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
1972 int spi)
b5dc0163
AS
1973{
1974 struct bpf_verifier_state *st = env->cur_state;
1975 int first_idx = st->first_insn_idx;
1976 int last_idx = env->insn_idx;
1977 struct bpf_func_state *func;
1978 struct bpf_reg_state *reg;
a3ce685d
AS
1979 u32 reg_mask = regno >= 0 ? 1u << regno : 0;
1980 u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
b5dc0163 1981 bool skip_first = true;
a3ce685d 1982 bool new_marks = false;
b5dc0163
AS
1983 int i, err;
1984
2c78ee89 1985 if (!env->bpf_capable)
b5dc0163
AS
1986 return 0;
1987
1988 func = st->frame[st->curframe];
a3ce685d
AS
1989 if (regno >= 0) {
1990 reg = &func->regs[regno];
1991 if (reg->type != SCALAR_VALUE) {
1992 WARN_ONCE(1, "backtracing misuse");
1993 return -EFAULT;
1994 }
1995 if (!reg->precise)
1996 new_marks = true;
1997 else
1998 reg_mask = 0;
1999 reg->precise = true;
b5dc0163 2000 }
b5dc0163 2001
a3ce685d
AS
2002 while (spi >= 0) {
2003 if (func->stack[spi].slot_type[0] != STACK_SPILL) {
2004 stack_mask = 0;
2005 break;
2006 }
2007 reg = &func->stack[spi].spilled_ptr;
2008 if (reg->type != SCALAR_VALUE) {
2009 stack_mask = 0;
2010 break;
2011 }
2012 if (!reg->precise)
2013 new_marks = true;
2014 else
2015 stack_mask = 0;
2016 reg->precise = true;
2017 break;
2018 }
2019
2020 if (!new_marks)
2021 return 0;
2022 if (!reg_mask && !stack_mask)
2023 return 0;
b5dc0163
AS
2024 for (;;) {
2025 DECLARE_BITMAP(mask, 64);
b5dc0163
AS
2026 u32 history = st->jmp_history_cnt;
2027
2028 if (env->log.level & BPF_LOG_LEVEL)
2029 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
2030 for (i = last_idx;;) {
2031 if (skip_first) {
2032 err = 0;
2033 skip_first = false;
2034 } else {
2035 err = backtrack_insn(env, i, &reg_mask, &stack_mask);
2036 }
2037 if (err == -ENOTSUPP) {
2038 mark_all_scalars_precise(env, st);
2039 return 0;
2040 } else if (err) {
2041 return err;
2042 }
2043 if (!reg_mask && !stack_mask)
2044 /* Found assignment(s) into tracked register in this state.
2045 * Since this state is already marked, just return.
2046 * Nothing to be tracked further in the parent state.
2047 */
2048 return 0;
2049 if (i == first_idx)
2050 break;
2051 i = get_prev_insn_idx(st, i, &history);
2052 if (i >= env->prog->len) {
2053 /* This can happen if backtracking reached insn 0
2054 * and there are still reg_mask or stack_mask
2055 * to backtrack.
2056 * It means the backtracking missed the spot where
2057 * particular register was initialized with a constant.
2058 */
2059 verbose(env, "BUG backtracking idx %d\n", i);
2060 WARN_ONCE(1, "verifier backtracking bug");
2061 return -EFAULT;
2062 }
2063 }
2064 st = st->parent;
2065 if (!st)
2066 break;
2067
a3ce685d 2068 new_marks = false;
b5dc0163
AS
2069 func = st->frame[st->curframe];
2070 bitmap_from_u64(mask, reg_mask);
2071 for_each_set_bit(i, mask, 32) {
2072 reg = &func->regs[i];
a3ce685d
AS
2073 if (reg->type != SCALAR_VALUE) {
2074 reg_mask &= ~(1u << i);
b5dc0163 2075 continue;
a3ce685d 2076 }
b5dc0163
AS
2077 if (!reg->precise)
2078 new_marks = true;
2079 reg->precise = true;
2080 }
2081
2082 bitmap_from_u64(mask, stack_mask);
2083 for_each_set_bit(i, mask, 64) {
2084 if (i >= func->allocated_stack / BPF_REG_SIZE) {
2339cd6c
AS
2085 /* the sequence of instructions:
2086 * 2: (bf) r3 = r10
2087 * 3: (7b) *(u64 *)(r3 -8) = r0
2088 * 4: (79) r4 = *(u64 *)(r10 -8)
2089 * doesn't contain jmps. It's backtracked
2090 * as a single block.
2091 * During backtracking insn 3 is not recognized as
2092 * stack access, so at the end of backtracking
2093 * stack slot fp-8 is still marked in stack_mask.
2094 * However the parent state may not have accessed
2095 * fp-8 and it's "unallocated" stack space.
2096 * In such case fallback to conservative.
b5dc0163 2097 */
2339cd6c
AS
2098 mark_all_scalars_precise(env, st);
2099 return 0;
b5dc0163
AS
2100 }
2101
a3ce685d
AS
2102 if (func->stack[i].slot_type[0] != STACK_SPILL) {
2103 stack_mask &= ~(1ull << i);
b5dc0163 2104 continue;
a3ce685d 2105 }
b5dc0163 2106 reg = &func->stack[i].spilled_ptr;
a3ce685d
AS
2107 if (reg->type != SCALAR_VALUE) {
2108 stack_mask &= ~(1ull << i);
b5dc0163 2109 continue;
a3ce685d 2110 }
b5dc0163
AS
2111 if (!reg->precise)
2112 new_marks = true;
2113 reg->precise = true;
2114 }
2115 if (env->log.level & BPF_LOG_LEVEL) {
2116 print_verifier_state(env, func);
2117 verbose(env, "parent %s regs=%x stack=%llx marks\n",
2118 new_marks ? "didn't have" : "already had",
2119 reg_mask, stack_mask);
2120 }
2121
a3ce685d
AS
2122 if (!reg_mask && !stack_mask)
2123 break;
b5dc0163
AS
2124 if (!new_marks)
2125 break;
2126
2127 last_idx = st->last_insn_idx;
2128 first_idx = st->first_insn_idx;
2129 }
2130 return 0;
2131}
2132
a3ce685d
AS
2133static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
2134{
2135 return __mark_chain_precision(env, regno, -1);
2136}
2137
2138static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
2139{
2140 return __mark_chain_precision(env, -1, spi);
2141}
b5dc0163 2142
1be7f75d
AS
2143static bool is_spillable_regtype(enum bpf_reg_type type)
2144{
2145 switch (type) {
2146 case PTR_TO_MAP_VALUE:
2147 case PTR_TO_MAP_VALUE_OR_NULL:
2148 case PTR_TO_STACK:
2149 case PTR_TO_CTX:
969bf05e 2150 case PTR_TO_PACKET:
de8f3a83 2151 case PTR_TO_PACKET_META:
969bf05e 2152 case PTR_TO_PACKET_END:
d58e468b 2153 case PTR_TO_FLOW_KEYS:
1be7f75d 2154 case CONST_PTR_TO_MAP:
c64b7983
JS
2155 case PTR_TO_SOCKET:
2156 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
2157 case PTR_TO_SOCK_COMMON:
2158 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
2159 case PTR_TO_TCP_SOCK:
2160 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 2161 case PTR_TO_XDP_SOCK:
65726b5b 2162 case PTR_TO_BTF_ID:
b121b341 2163 case PTR_TO_BTF_ID_OR_NULL:
1be7f75d
AS
2164 return true;
2165 default:
2166 return false;
2167 }
2168}
2169
cc2b14d5
AS
2170/* Does this register contain a constant zero? */
2171static bool register_is_null(struct bpf_reg_state *reg)
2172{
2173 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
2174}
2175
f7cf25b2
AS
2176static bool register_is_const(struct bpf_reg_state *reg)
2177{
2178 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
2179}
2180
6e7e63cb
JH
2181static bool __is_pointer_value(bool allow_ptr_leaks,
2182 const struct bpf_reg_state *reg)
2183{
2184 if (allow_ptr_leaks)
2185 return false;
2186
2187 return reg->type != SCALAR_VALUE;
2188}
2189
f7cf25b2
AS
2190static void save_register_state(struct bpf_func_state *state,
2191 int spi, struct bpf_reg_state *reg)
2192{
2193 int i;
2194
2195 state->stack[spi].spilled_ptr = *reg;
2196 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2197
2198 for (i = 0; i < BPF_REG_SIZE; i++)
2199 state->stack[spi].slot_type[i] = STACK_SPILL;
2200}
2201
17a52670
AS
2202/* check_stack_read/write functions track spill/fill of registers,
2203 * stack boundary and alignment are checked in check_mem_access()
2204 */
61bd5218 2205static int check_stack_write(struct bpf_verifier_env *env,
f4d7e40a 2206 struct bpf_func_state *state, /* func where register points to */
af86ca4e 2207 int off, int size, int value_regno, int insn_idx)
17a52670 2208{
f4d7e40a 2209 struct bpf_func_state *cur; /* state of the current function */
638f5b90 2210 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
b5dc0163 2211 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
f7cf25b2 2212 struct bpf_reg_state *reg = NULL;
638f5b90 2213
f4d7e40a 2214 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
fd978bf7 2215 state->acquired_refs, true);
638f5b90
AS
2216 if (err)
2217 return err;
9c399760
AS
2218 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
2219 * so it's aligned access and [off, off + size) are within stack limits
2220 */
638f5b90
AS
2221 if (!env->allow_ptr_leaks &&
2222 state->stack[spi].slot_type[0] == STACK_SPILL &&
2223 size != BPF_REG_SIZE) {
2224 verbose(env, "attempt to corrupt spilled pointer on stack\n");
2225 return -EACCES;
2226 }
17a52670 2227
f4d7e40a 2228 cur = env->cur_state->frame[env->cur_state->curframe];
f7cf25b2
AS
2229 if (value_regno >= 0)
2230 reg = &cur->regs[value_regno];
17a52670 2231
f7cf25b2 2232 if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
2c78ee89 2233 !register_is_null(reg) && env->bpf_capable) {
b5dc0163
AS
2234 if (dst_reg != BPF_REG_FP) {
2235 /* The backtracking logic can only recognize explicit
2236 * stack slot address like [fp - 8]. Other spill of
2237 * scalar via different register has to be conervative.
2238 * Backtrack from here and mark all registers as precise
2239 * that contributed into 'reg' being a constant.
2240 */
2241 err = mark_chain_precision(env, value_regno);
2242 if (err)
2243 return err;
2244 }
f7cf25b2
AS
2245 save_register_state(state, spi, reg);
2246 } else if (reg && is_spillable_regtype(reg->type)) {
17a52670 2247 /* register containing pointer is being spilled into stack */
9c399760 2248 if (size != BPF_REG_SIZE) {
f7cf25b2 2249 verbose_linfo(env, insn_idx, "; ");
61bd5218 2250 verbose(env, "invalid size of register spill\n");
17a52670
AS
2251 return -EACCES;
2252 }
2253
f7cf25b2 2254 if (state != cur && reg->type == PTR_TO_STACK) {
f4d7e40a
AS
2255 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
2256 return -EINVAL;
2257 }
2258
2c78ee89 2259 if (!env->bypass_spec_v4) {
f7cf25b2 2260 bool sanitize = false;
17a52670 2261
f7cf25b2
AS
2262 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
2263 register_is_const(&state->stack[spi].spilled_ptr))
2264 sanitize = true;
2265 for (i = 0; i < BPF_REG_SIZE; i++)
2266 if (state->stack[spi].slot_type[i] == STACK_MISC) {
2267 sanitize = true;
2268 break;
2269 }
2270 if (sanitize) {
af86ca4e
AS
2271 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
2272 int soff = (-spi - 1) * BPF_REG_SIZE;
2273
2274 /* detected reuse of integer stack slot with a pointer
2275 * which means either llvm is reusing stack slot or
2276 * an attacker is trying to exploit CVE-2018-3639
2277 * (speculative store bypass)
2278 * Have to sanitize that slot with preemptive
2279 * store of zero.
2280 */
2281 if (*poff && *poff != soff) {
2282 /* disallow programs where single insn stores
2283 * into two different stack slots, since verifier
2284 * cannot sanitize them
2285 */
2286 verbose(env,
2287 "insn %d cannot access two stack slots fp%d and fp%d",
2288 insn_idx, *poff, soff);
2289 return -EINVAL;
2290 }
2291 *poff = soff;
2292 }
af86ca4e 2293 }
f7cf25b2 2294 save_register_state(state, spi, reg);
9c399760 2295 } else {
cc2b14d5
AS
2296 u8 type = STACK_MISC;
2297
679c782d
EC
2298 /* regular write of data into stack destroys any spilled ptr */
2299 state->stack[spi].spilled_ptr.type = NOT_INIT;
0bae2d4d
JW
2300 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
2301 if (state->stack[spi].slot_type[0] == STACK_SPILL)
2302 for (i = 0; i < BPF_REG_SIZE; i++)
2303 state->stack[spi].slot_type[i] = STACK_MISC;
9c399760 2304
cc2b14d5
AS
2305 /* only mark the slot as written if all 8 bytes were written
2306 * otherwise read propagation may incorrectly stop too soon
2307 * when stack slots are partially written.
2308 * This heuristic means that read propagation will be
2309 * conservative, since it will add reg_live_read marks
2310 * to stack slots all the way to first state when programs
2311 * writes+reads less than 8 bytes
2312 */
2313 if (size == BPF_REG_SIZE)
2314 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2315
2316 /* when we zero initialize stack slots mark them as such */
b5dc0163
AS
2317 if (reg && register_is_null(reg)) {
2318 /* backtracking doesn't work for STACK_ZERO yet. */
2319 err = mark_chain_precision(env, value_regno);
2320 if (err)
2321 return err;
cc2b14d5 2322 type = STACK_ZERO;
b5dc0163 2323 }
cc2b14d5 2324
0bae2d4d 2325 /* Mark slots affected by this stack write. */
9c399760 2326 for (i = 0; i < size; i++)
638f5b90 2327 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
cc2b14d5 2328 type;
17a52670
AS
2329 }
2330 return 0;
2331}
2332
61bd5218 2333static int check_stack_read(struct bpf_verifier_env *env,
f4d7e40a
AS
2334 struct bpf_func_state *reg_state /* func where register points to */,
2335 int off, int size, int value_regno)
17a52670 2336{
f4d7e40a
AS
2337 struct bpf_verifier_state *vstate = env->cur_state;
2338 struct bpf_func_state *state = vstate->frame[vstate->curframe];
638f5b90 2339 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
f7cf25b2 2340 struct bpf_reg_state *reg;
638f5b90 2341 u8 *stype;
17a52670 2342
f4d7e40a 2343 if (reg_state->allocated_stack <= slot) {
638f5b90
AS
2344 verbose(env, "invalid read from stack off %d+0 size %d\n",
2345 off, size);
2346 return -EACCES;
2347 }
f4d7e40a 2348 stype = reg_state->stack[spi].slot_type;
f7cf25b2 2349 reg = &reg_state->stack[spi].spilled_ptr;
17a52670 2350
638f5b90 2351 if (stype[0] == STACK_SPILL) {
9c399760 2352 if (size != BPF_REG_SIZE) {
f7cf25b2
AS
2353 if (reg->type != SCALAR_VALUE) {
2354 verbose_linfo(env, env->insn_idx, "; ");
2355 verbose(env, "invalid size of register fill\n");
2356 return -EACCES;
2357 }
2358 if (value_regno >= 0) {
2359 mark_reg_unknown(env, state->regs, value_regno);
2360 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2361 }
2362 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2363 return 0;
17a52670 2364 }
9c399760 2365 for (i = 1; i < BPF_REG_SIZE; i++) {
638f5b90 2366 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
61bd5218 2367 verbose(env, "corrupted spill memory\n");
17a52670
AS
2368 return -EACCES;
2369 }
2370 }
2371
dc503a8a 2372 if (value_regno >= 0) {
17a52670 2373 /* restore register state from stack */
f7cf25b2 2374 state->regs[value_regno] = *reg;
2f18f62e
AS
2375 /* mark reg as written since spilled pointer state likely
2376 * has its liveness marks cleared by is_state_visited()
2377 * which resets stack/reg liveness for state transitions
2378 */
2379 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
6e7e63cb
JH
2380 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
2381 /* If value_regno==-1, the caller is asking us whether
2382 * it is acceptable to use this value as a SCALAR_VALUE
2383 * (e.g. for XADD).
2384 * We must not allow unprivileged callers to do that
2385 * with spilled pointers.
2386 */
2387 verbose(env, "leaking pointer from stack off %d\n",
2388 off);
2389 return -EACCES;
dc503a8a 2390 }
f7cf25b2 2391 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
17a52670 2392 } else {
cc2b14d5
AS
2393 int zeros = 0;
2394
17a52670 2395 for (i = 0; i < size; i++) {
cc2b14d5
AS
2396 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
2397 continue;
2398 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
2399 zeros++;
2400 continue;
17a52670 2401 }
cc2b14d5
AS
2402 verbose(env, "invalid read from stack off %d+%d size %d\n",
2403 off, i, size);
2404 return -EACCES;
2405 }
f7cf25b2 2406 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
cc2b14d5
AS
2407 if (value_regno >= 0) {
2408 if (zeros == size) {
2409 /* any size read into register is zero extended,
2410 * so the whole register == const_zero
2411 */
2412 __mark_reg_const_zero(&state->regs[value_regno]);
b5dc0163
AS
2413 /* backtracking doesn't support STACK_ZERO yet,
2414 * so mark it precise here, so that later
2415 * backtracking can stop here.
2416 * Backtracking may not need this if this register
2417 * doesn't participate in pointer adjustment.
2418 * Forward propagation of precise flag is not
2419 * necessary either. This mark is only to stop
2420 * backtracking. Any register that contributed
2421 * to const 0 was marked precise before spill.
2422 */
2423 state->regs[value_regno].precise = true;
cc2b14d5
AS
2424 } else {
2425 /* have read misc data from the stack */
2426 mark_reg_unknown(env, state->regs, value_regno);
2427 }
2428 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
17a52670 2429 }
17a52670 2430 }
f7cf25b2 2431 return 0;
17a52670
AS
2432}
2433
e4298d25
DB
2434static int check_stack_access(struct bpf_verifier_env *env,
2435 const struct bpf_reg_state *reg,
2436 int off, int size)
2437{
2438 /* Stack accesses must be at a fixed offset, so that we
2439 * can determine what type of data were returned. See
2440 * check_stack_read().
2441 */
2442 if (!tnum_is_const(reg->var_off)) {
2443 char tn_buf[48];
2444
2445 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1fbd20f8 2446 verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
e4298d25
DB
2447 tn_buf, off, size);
2448 return -EACCES;
2449 }
2450
2451 if (off >= 0 || off < -MAX_BPF_STACK) {
2452 verbose(env, "invalid stack off=%d size=%d\n", off, size);
2453 return -EACCES;
2454 }
2455
2456 return 0;
2457}
2458
591fe988
DB
2459static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
2460 int off, int size, enum bpf_access_type type)
2461{
2462 struct bpf_reg_state *regs = cur_regs(env);
2463 struct bpf_map *map = regs[regno].map_ptr;
2464 u32 cap = bpf_map_flags_to_cap(map);
2465
2466 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
2467 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
2468 map->value_size, off, size);
2469 return -EACCES;
2470 }
2471
2472 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
2473 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
2474 map->value_size, off, size);
2475 return -EACCES;
2476 }
2477
2478 return 0;
2479}
2480
457f4436
AN
2481/* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
2482static int __check_mem_access(struct bpf_verifier_env *env, int regno,
2483 int off, int size, u32 mem_size,
2484 bool zero_size_allowed)
17a52670 2485{
457f4436
AN
2486 bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
2487 struct bpf_reg_state *reg;
2488
2489 if (off >= 0 && size_ok && (u64)off + size <= mem_size)
2490 return 0;
17a52670 2491
457f4436
AN
2492 reg = &cur_regs(env)[regno];
2493 switch (reg->type) {
2494 case PTR_TO_MAP_VALUE:
61bd5218 2495 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
457f4436
AN
2496 mem_size, off, size);
2497 break;
2498 case PTR_TO_PACKET:
2499 case PTR_TO_PACKET_META:
2500 case PTR_TO_PACKET_END:
2501 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
2502 off, size, regno, reg->id, off, mem_size);
2503 break;
2504 case PTR_TO_MEM:
2505 default:
2506 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
2507 mem_size, off, size);
17a52670 2508 }
457f4436
AN
2509
2510 return -EACCES;
17a52670
AS
2511}
2512
457f4436
AN
2513/* check read/write into a memory region with possible variable offset */
2514static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
2515 int off, int size, u32 mem_size,
2516 bool zero_size_allowed)
dbcfe5f7 2517{
f4d7e40a
AS
2518 struct bpf_verifier_state *vstate = env->cur_state;
2519 struct bpf_func_state *state = vstate->frame[vstate->curframe];
dbcfe5f7
GB
2520 struct bpf_reg_state *reg = &state->regs[regno];
2521 int err;
2522
457f4436 2523 /* We may have adjusted the register pointing to memory region, so we
f1174f77
EC
2524 * need to try adding each of min_value and max_value to off
2525 * to make sure our theoretical access will be safe.
dbcfe5f7 2526 */
06ee7115 2527 if (env->log.level & BPF_LOG_LEVEL)
61bd5218 2528 print_verifier_state(env, state);
b7137c4e 2529
dbcfe5f7
GB
2530 /* The minimum value is only important with signed
2531 * comparisons where we can't assume the floor of a
2532 * value is 0. If we are using signed variables for our
2533 * index'es we need to make sure that whatever we use
2534 * will have a set floor within our range.
2535 */
b7137c4e
DB
2536 if (reg->smin_value < 0 &&
2537 (reg->smin_value == S64_MIN ||
2538 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
2539 reg->smin_value + off < 0)) {
61bd5218 2540 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
dbcfe5f7
GB
2541 regno);
2542 return -EACCES;
2543 }
457f4436
AN
2544 err = __check_mem_access(env, regno, reg->smin_value + off, size,
2545 mem_size, zero_size_allowed);
dbcfe5f7 2546 if (err) {
457f4436 2547 verbose(env, "R%d min value is outside of the allowed memory range\n",
61bd5218 2548 regno);
dbcfe5f7
GB
2549 return err;
2550 }
2551
b03c9f9f
EC
2552 /* If we haven't set a max value then we need to bail since we can't be
2553 * sure we won't do bad things.
2554 * If reg->umax_value + off could overflow, treat that as unbounded too.
dbcfe5f7 2555 */
b03c9f9f 2556 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
457f4436 2557 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
dbcfe5f7
GB
2558 regno);
2559 return -EACCES;
2560 }
457f4436
AN
2561 err = __check_mem_access(env, regno, reg->umax_value + off, size,
2562 mem_size, zero_size_allowed);
2563 if (err) {
2564 verbose(env, "R%d max value is outside of the allowed memory range\n",
61bd5218 2565 regno);
457f4436
AN
2566 return err;
2567 }
2568
2569 return 0;
2570}
d83525ca 2571
457f4436
AN
2572/* check read/write into a map element with possible variable offset */
2573static int check_map_access(struct bpf_verifier_env *env, u32 regno,
2574 int off, int size, bool zero_size_allowed)
2575{
2576 struct bpf_verifier_state *vstate = env->cur_state;
2577 struct bpf_func_state *state = vstate->frame[vstate->curframe];
2578 struct bpf_reg_state *reg = &state->regs[regno];
2579 struct bpf_map *map = reg->map_ptr;
2580 int err;
2581
2582 err = check_mem_region_access(env, regno, off, size, map->value_size,
2583 zero_size_allowed);
2584 if (err)
2585 return err;
2586
2587 if (map_value_has_spin_lock(map)) {
2588 u32 lock = map->spin_lock_off;
d83525ca
AS
2589
2590 /* if any part of struct bpf_spin_lock can be touched by
2591 * load/store reject this program.
2592 * To check that [x1, x2) overlaps with [y1, y2)
2593 * it is sufficient to check x1 < y2 && y1 < x2.
2594 */
2595 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
2596 lock < reg->umax_value + off + size) {
2597 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
2598 return -EACCES;
2599 }
2600 }
f1174f77 2601 return err;
dbcfe5f7
GB
2602}
2603
969bf05e
AS
2604#define MAX_PACKET_OFF 0xffff
2605
58e2af8b 2606static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3a0af8fd
TG
2607 const struct bpf_call_arg_meta *meta,
2608 enum bpf_access_type t)
4acf6c0b 2609{
36bbef52 2610 switch (env->prog->type) {
5d66fa7d 2611 /* Program types only with direct read access go here! */
3a0af8fd
TG
2612 case BPF_PROG_TYPE_LWT_IN:
2613 case BPF_PROG_TYPE_LWT_OUT:
004d4b27 2614 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2dbb9b9e 2615 case BPF_PROG_TYPE_SK_REUSEPORT:
5d66fa7d 2616 case BPF_PROG_TYPE_FLOW_DISSECTOR:
d5563d36 2617 case BPF_PROG_TYPE_CGROUP_SKB:
3a0af8fd
TG
2618 if (t == BPF_WRITE)
2619 return false;
7e57fbb2 2620 /* fallthrough */
5d66fa7d
DB
2621
2622 /* Program types with direct read + write access go here! */
36bbef52
DB
2623 case BPF_PROG_TYPE_SCHED_CLS:
2624 case BPF_PROG_TYPE_SCHED_ACT:
4acf6c0b 2625 case BPF_PROG_TYPE_XDP:
3a0af8fd 2626 case BPF_PROG_TYPE_LWT_XMIT:
8a31db56 2627 case BPF_PROG_TYPE_SK_SKB:
4f738adb 2628 case BPF_PROG_TYPE_SK_MSG:
36bbef52
DB
2629 if (meta)
2630 return meta->pkt_access;
2631
2632 env->seen_direct_write = true;
4acf6c0b 2633 return true;
0d01da6a
SF
2634
2635 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2636 if (t == BPF_WRITE)
2637 env->seen_direct_write = true;
2638
2639 return true;
2640
4acf6c0b
BB
2641 default:
2642 return false;
2643 }
2644}
2645
f1174f77 2646static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 2647 int size, bool zero_size_allowed)
f1174f77 2648{
638f5b90 2649 struct bpf_reg_state *regs = cur_regs(env);
f1174f77
EC
2650 struct bpf_reg_state *reg = &regs[regno];
2651 int err;
2652
2653 /* We may have added a variable offset to the packet pointer; but any
2654 * reg->range we have comes after that. We are only checking the fixed
2655 * offset.
2656 */
2657
2658 /* We don't allow negative numbers, because we aren't tracking enough
2659 * detail to prove they're safe.
2660 */
b03c9f9f 2661 if (reg->smin_value < 0) {
61bd5218 2662 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
f1174f77
EC
2663 regno);
2664 return -EACCES;
2665 }
457f4436
AN
2666 err = __check_mem_access(env, regno, off, size, reg->range,
2667 zero_size_allowed);
f1174f77 2668 if (err) {
61bd5218 2669 verbose(env, "R%d offset is outside of the packet\n", regno);
f1174f77
EC
2670 return err;
2671 }
e647815a 2672
457f4436 2673 /* __check_mem_access has made sure "off + size - 1" is within u16.
e647815a
JW
2674 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
2675 * otherwise find_good_pkt_pointers would have refused to set range info
457f4436 2676 * that __check_mem_access would have rejected this pkt access.
e647815a
JW
2677 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
2678 */
2679 env->prog->aux->max_pkt_offset =
2680 max_t(u32, env->prog->aux->max_pkt_offset,
2681 off + reg->umax_value + size - 1);
2682
f1174f77
EC
2683 return err;
2684}
2685
2686/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
31fd8581 2687static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
9e15db66
AS
2688 enum bpf_access_type t, enum bpf_reg_type *reg_type,
2689 u32 *btf_id)
17a52670 2690{
f96da094
DB
2691 struct bpf_insn_access_aux info = {
2692 .reg_type = *reg_type,
9e15db66 2693 .log = &env->log,
f96da094 2694 };
31fd8581 2695
4f9218aa 2696 if (env->ops->is_valid_access &&
5e43f899 2697 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
f96da094
DB
2698 /* A non zero info.ctx_field_size indicates that this field is a
2699 * candidate for later verifier transformation to load the whole
2700 * field and then apply a mask when accessed with a narrower
2701 * access than actual ctx access size. A zero info.ctx_field_size
2702 * will only allow for whole field access and rejects any other
2703 * type of narrower access.
31fd8581 2704 */
23994631 2705 *reg_type = info.reg_type;
31fd8581 2706
b121b341 2707 if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL)
9e15db66
AS
2708 *btf_id = info.btf_id;
2709 else
2710 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
32bbe007
AS
2711 /* remember the offset of last byte accessed in ctx */
2712 if (env->prog->aux->max_ctx_offset < off + size)
2713 env->prog->aux->max_ctx_offset = off + size;
17a52670 2714 return 0;
32bbe007 2715 }
17a52670 2716
61bd5218 2717 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
17a52670
AS
2718 return -EACCES;
2719}
2720
d58e468b
PP
2721static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
2722 int size)
2723{
2724 if (size < 0 || off < 0 ||
2725 (u64)off + size > sizeof(struct bpf_flow_keys)) {
2726 verbose(env, "invalid access to flow keys off=%d size=%d\n",
2727 off, size);
2728 return -EACCES;
2729 }
2730 return 0;
2731}
2732
5f456649
MKL
2733static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
2734 u32 regno, int off, int size,
2735 enum bpf_access_type t)
c64b7983
JS
2736{
2737 struct bpf_reg_state *regs = cur_regs(env);
2738 struct bpf_reg_state *reg = &regs[regno];
5f456649 2739 struct bpf_insn_access_aux info = {};
46f8bc92 2740 bool valid;
c64b7983
JS
2741
2742 if (reg->smin_value < 0) {
2743 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2744 regno);
2745 return -EACCES;
2746 }
2747
46f8bc92
MKL
2748 switch (reg->type) {
2749 case PTR_TO_SOCK_COMMON:
2750 valid = bpf_sock_common_is_valid_access(off, size, t, &info);
2751 break;
2752 case PTR_TO_SOCKET:
2753 valid = bpf_sock_is_valid_access(off, size, t, &info);
2754 break;
655a51e5
MKL
2755 case PTR_TO_TCP_SOCK:
2756 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
2757 break;
fada7fdc
JL
2758 case PTR_TO_XDP_SOCK:
2759 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
2760 break;
46f8bc92
MKL
2761 default:
2762 valid = false;
c64b7983
JS
2763 }
2764
5f456649 2765
46f8bc92
MKL
2766 if (valid) {
2767 env->insn_aux_data[insn_idx].ctx_field_size =
2768 info.ctx_field_size;
2769 return 0;
2770 }
2771
2772 verbose(env, "R%d invalid %s access off=%d size=%d\n",
2773 regno, reg_type_str[reg->type], off, size);
2774
2775 return -EACCES;
c64b7983
JS
2776}
2777
2a159c6f
DB
2778static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
2779{
2780 return cur_regs(env) + regno;
2781}
2782
4cabc5b1
DB
2783static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
2784{
2a159c6f 2785 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4cabc5b1
DB
2786}
2787
f37a8cb8
DB
2788static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
2789{
2a159c6f 2790 const struct bpf_reg_state *reg = reg_state(env, regno);
f37a8cb8 2791
46f8bc92
MKL
2792 return reg->type == PTR_TO_CTX;
2793}
2794
2795static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
2796{
2797 const struct bpf_reg_state *reg = reg_state(env, regno);
2798
2799 return type_is_sk_pointer(reg->type);
f37a8cb8
DB
2800}
2801
ca369602
DB
2802static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
2803{
2a159c6f 2804 const struct bpf_reg_state *reg = reg_state(env, regno);
ca369602
DB
2805
2806 return type_is_pkt_pointer(reg->type);
2807}
2808
4b5defde
DB
2809static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
2810{
2811 const struct bpf_reg_state *reg = reg_state(env, regno);
2812
2813 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
2814 return reg->type == PTR_TO_FLOW_KEYS;
2815}
2816
61bd5218
JK
2817static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
2818 const struct bpf_reg_state *reg,
d1174416 2819 int off, int size, bool strict)
969bf05e 2820{
f1174f77 2821 struct tnum reg_off;
e07b98d9 2822 int ip_align;
d1174416
DM
2823
2824 /* Byte size accesses are always allowed. */
2825 if (!strict || size == 1)
2826 return 0;
2827
e4eda884
DM
2828 /* For platforms that do not have a Kconfig enabling
2829 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
2830 * NET_IP_ALIGN is universally set to '2'. And on platforms
2831 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
2832 * to this code only in strict mode where we want to emulate
2833 * the NET_IP_ALIGN==2 checking. Therefore use an
2834 * unconditional IP align value of '2'.
e07b98d9 2835 */
e4eda884 2836 ip_align = 2;
f1174f77
EC
2837
2838 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
2839 if (!tnum_is_aligned(reg_off, size)) {
2840 char tn_buf[48];
2841
2842 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218
JK
2843 verbose(env,
2844 "misaligned packet access off %d+%s+%d+%d size %d\n",
f1174f77 2845 ip_align, tn_buf, reg->off, off, size);
969bf05e
AS
2846 return -EACCES;
2847 }
79adffcd 2848
969bf05e
AS
2849 return 0;
2850}
2851
61bd5218
JK
2852static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
2853 const struct bpf_reg_state *reg,
f1174f77
EC
2854 const char *pointer_desc,
2855 int off, int size, bool strict)
79adffcd 2856{
f1174f77
EC
2857 struct tnum reg_off;
2858
2859 /* Byte size accesses are always allowed. */
2860 if (!strict || size == 1)
2861 return 0;
2862
2863 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
2864 if (!tnum_is_aligned(reg_off, size)) {
2865 char tn_buf[48];
2866
2867 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 2868 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
f1174f77 2869 pointer_desc, tn_buf, reg->off, off, size);
79adffcd
DB
2870 return -EACCES;
2871 }
2872
969bf05e
AS
2873 return 0;
2874}
2875
e07b98d9 2876static int check_ptr_alignment(struct bpf_verifier_env *env,
ca369602
DB
2877 const struct bpf_reg_state *reg, int off,
2878 int size, bool strict_alignment_once)
79adffcd 2879{
ca369602 2880 bool strict = env->strict_alignment || strict_alignment_once;
f1174f77 2881 const char *pointer_desc = "";
d1174416 2882
79adffcd
DB
2883 switch (reg->type) {
2884 case PTR_TO_PACKET:
de8f3a83
DB
2885 case PTR_TO_PACKET_META:
2886 /* Special case, because of NET_IP_ALIGN. Given metadata sits
2887 * right in front, treat it the very same way.
2888 */
61bd5218 2889 return check_pkt_ptr_alignment(env, reg, off, size, strict);
d58e468b
PP
2890 case PTR_TO_FLOW_KEYS:
2891 pointer_desc = "flow keys ";
2892 break;
f1174f77
EC
2893 case PTR_TO_MAP_VALUE:
2894 pointer_desc = "value ";
2895 break;
2896 case PTR_TO_CTX:
2897 pointer_desc = "context ";
2898 break;
2899 case PTR_TO_STACK:
2900 pointer_desc = "stack ";
a5ec6ae1
JH
2901 /* The stack spill tracking logic in check_stack_write()
2902 * and check_stack_read() relies on stack accesses being
2903 * aligned.
2904 */
2905 strict = true;
f1174f77 2906 break;
c64b7983
JS
2907 case PTR_TO_SOCKET:
2908 pointer_desc = "sock ";
2909 break;
46f8bc92
MKL
2910 case PTR_TO_SOCK_COMMON:
2911 pointer_desc = "sock_common ";
2912 break;
655a51e5
MKL
2913 case PTR_TO_TCP_SOCK:
2914 pointer_desc = "tcp_sock ";
2915 break;
fada7fdc
JL
2916 case PTR_TO_XDP_SOCK:
2917 pointer_desc = "xdp_sock ";
2918 break;
79adffcd 2919 default:
f1174f77 2920 break;
79adffcd 2921 }
61bd5218
JK
2922 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
2923 strict);
79adffcd
DB
2924}
2925
f4d7e40a
AS
2926static int update_stack_depth(struct bpf_verifier_env *env,
2927 const struct bpf_func_state *func,
2928 int off)
2929{
9c8105bd 2930 u16 stack = env->subprog_info[func->subprogno].stack_depth;
f4d7e40a
AS
2931
2932 if (stack >= -off)
2933 return 0;
2934
2935 /* update known max for given subprogram */
9c8105bd 2936 env->subprog_info[func->subprogno].stack_depth = -off;
70a87ffe
AS
2937 return 0;
2938}
f4d7e40a 2939
70a87ffe
AS
2940/* starting from main bpf function walk all instructions of the function
2941 * and recursively walk all callees that given function can call.
2942 * Ignore jump and exit insns.
2943 * Since recursion is prevented by check_cfg() this algorithm
2944 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
2945 */
2946static int check_max_stack_depth(struct bpf_verifier_env *env)
2947{
9c8105bd
JW
2948 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
2949 struct bpf_subprog_info *subprog = env->subprog_info;
70a87ffe 2950 struct bpf_insn *insn = env->prog->insnsi;
70a87ffe
AS
2951 int ret_insn[MAX_CALL_FRAMES];
2952 int ret_prog[MAX_CALL_FRAMES];
f4d7e40a 2953
70a87ffe
AS
2954process_func:
2955 /* round up to 32-bytes, since this is granularity
2956 * of interpreter stack size
2957 */
9c8105bd 2958 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe 2959 if (depth > MAX_BPF_STACK) {
f4d7e40a 2960 verbose(env, "combined stack size of %d calls is %d. Too large\n",
70a87ffe 2961 frame + 1, depth);
f4d7e40a
AS
2962 return -EACCES;
2963 }
70a87ffe 2964continue_func:
4cb3d99c 2965 subprog_end = subprog[idx + 1].start;
70a87ffe
AS
2966 for (; i < subprog_end; i++) {
2967 if (insn[i].code != (BPF_JMP | BPF_CALL))
2968 continue;
2969 if (insn[i].src_reg != BPF_PSEUDO_CALL)
2970 continue;
2971 /* remember insn and function to return to */
2972 ret_insn[frame] = i + 1;
9c8105bd 2973 ret_prog[frame] = idx;
70a87ffe
AS
2974
2975 /* find the callee */
2976 i = i + insn[i].imm + 1;
9c8105bd
JW
2977 idx = find_subprog(env, i);
2978 if (idx < 0) {
70a87ffe
AS
2979 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2980 i);
2981 return -EFAULT;
2982 }
70a87ffe
AS
2983 frame++;
2984 if (frame >= MAX_CALL_FRAMES) {
927cb781
PC
2985 verbose(env, "the call stack of %d frames is too deep !\n",
2986 frame);
2987 return -E2BIG;
70a87ffe
AS
2988 }
2989 goto process_func;
2990 }
2991 /* end of for() loop means the last insn of the 'subprog'
2992 * was reached. Doesn't matter whether it was JA or EXIT
2993 */
2994 if (frame == 0)
2995 return 0;
9c8105bd 2996 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe
AS
2997 frame--;
2998 i = ret_insn[frame];
9c8105bd 2999 idx = ret_prog[frame];
70a87ffe 3000 goto continue_func;
f4d7e40a
AS
3001}
3002
19d28fbd 3003#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
3004static int get_callee_stack_depth(struct bpf_verifier_env *env,
3005 const struct bpf_insn *insn, int idx)
3006{
3007 int start = idx + insn->imm + 1, subprog;
3008
3009 subprog = find_subprog(env, start);
3010 if (subprog < 0) {
3011 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
3012 start);
3013 return -EFAULT;
3014 }
9c8105bd 3015 return env->subprog_info[subprog].stack_depth;
1ea47e01 3016}
19d28fbd 3017#endif
1ea47e01 3018
51c39bb1
AS
3019int check_ctx_reg(struct bpf_verifier_env *env,
3020 const struct bpf_reg_state *reg, int regno)
58990d1f
DB
3021{
3022 /* Access to ctx or passing it to a helper is only allowed in
3023 * its original, unmodified form.
3024 */
3025
3026 if (reg->off) {
3027 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
3028 regno, reg->off);
3029 return -EACCES;
3030 }
3031
3032 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3033 char tn_buf[48];
3034
3035 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3036 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
3037 return -EACCES;
3038 }
3039
3040 return 0;
3041}
3042
9df1c28b
MM
3043static int check_tp_buffer_access(struct bpf_verifier_env *env,
3044 const struct bpf_reg_state *reg,
3045 int regno, int off, int size)
3046{
3047 if (off < 0) {
3048 verbose(env,
3049 "R%d invalid tracepoint buffer access: off=%d, size=%d",
3050 regno, off, size);
3051 return -EACCES;
3052 }
3053 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3054 char tn_buf[48];
3055
3056 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3057 verbose(env,
3058 "R%d invalid variable buffer offset: off=%d, var_off=%s",
3059 regno, off, tn_buf);
3060 return -EACCES;
3061 }
3062 if (off + size > env->prog->aux->max_tp_access)
3063 env->prog->aux->max_tp_access = off + size;
3064
3065 return 0;
3066}
3067
3f50f132
JF
3068/* BPF architecture zero extends alu32 ops into 64-bit registesr */
3069static void zext_32_to_64(struct bpf_reg_state *reg)
3070{
3071 reg->var_off = tnum_subreg(reg->var_off);
3072 __reg_assign_32_into_64(reg);
3073}
9df1c28b 3074
0c17d1d2
JH
3075/* truncate register to smaller size (in bytes)
3076 * must be called with size < BPF_REG_SIZE
3077 */
3078static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
3079{
3080 u64 mask;
3081
3082 /* clear high bits in bit representation */
3083 reg->var_off = tnum_cast(reg->var_off, size);
3084
3085 /* fix arithmetic bounds */
3086 mask = ((u64)1 << (size * 8)) - 1;
3087 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
3088 reg->umin_value &= mask;
3089 reg->umax_value &= mask;
3090 } else {
3091 reg->umin_value = 0;
3092 reg->umax_value = mask;
3093 }
3094 reg->smin_value = reg->umin_value;
3095 reg->smax_value = reg->umax_value;
3f50f132
JF
3096
3097 /* If size is smaller than 32bit register the 32bit register
3098 * values are also truncated so we push 64-bit bounds into
3099 * 32-bit bounds. Above were truncated < 32-bits already.
3100 */
3101 if (size >= 4)
3102 return;
3103 __reg_combine_64_into_32(reg);
0c17d1d2
JH
3104}
3105
a23740ec
AN
3106static bool bpf_map_is_rdonly(const struct bpf_map *map)
3107{
3108 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
3109}
3110
3111static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
3112{
3113 void *ptr;
3114 u64 addr;
3115 int err;
3116
3117 err = map->ops->map_direct_value_addr(map, &addr, off);
3118 if (err)
3119 return err;
2dedd7d2 3120 ptr = (void *)(long)addr + off;
a23740ec
AN
3121
3122 switch (size) {
3123 case sizeof(u8):
3124 *val = (u64)*(u8 *)ptr;
3125 break;
3126 case sizeof(u16):
3127 *val = (u64)*(u16 *)ptr;
3128 break;
3129 case sizeof(u32):
3130 *val = (u64)*(u32 *)ptr;
3131 break;
3132 case sizeof(u64):
3133 *val = *(u64 *)ptr;
3134 break;
3135 default:
3136 return -EINVAL;
3137 }
3138 return 0;
3139}
3140
9e15db66
AS
3141static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
3142 struct bpf_reg_state *regs,
3143 int regno, int off, int size,
3144 enum bpf_access_type atype,
3145 int value_regno)
3146{
3147 struct bpf_reg_state *reg = regs + regno;
3148 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
3149 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
3150 u32 btf_id;
3151 int ret;
3152
9e15db66
AS
3153 if (off < 0) {
3154 verbose(env,
3155 "R%d is ptr_%s invalid negative access: off=%d\n",
3156 regno, tname, off);
3157 return -EACCES;
3158 }
3159 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3160 char tn_buf[48];
3161
3162 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3163 verbose(env,
3164 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
3165 regno, tname, off, tn_buf);
3166 return -EACCES;
3167 }
3168
27ae7997
MKL
3169 if (env->ops->btf_struct_access) {
3170 ret = env->ops->btf_struct_access(&env->log, t, off, size,
3171 atype, &btf_id);
3172 } else {
3173 if (atype != BPF_READ) {
3174 verbose(env, "only read is supported\n");
3175 return -EACCES;
3176 }
3177
3178 ret = btf_struct_access(&env->log, t, off, size, atype,
3179 &btf_id);
3180 }
3181
9e15db66
AS
3182 if (ret < 0)
3183 return ret;
3184
8ff3571f 3185 if (atype == BPF_READ && value_regno >= 0) {
27ae7997
MKL
3186 if (ret == SCALAR_VALUE) {
3187 mark_reg_unknown(env, regs, value_regno);
3188 return 0;
3189 }
3190 mark_reg_known_zero(env, regs, value_regno);
3191 regs[value_regno].type = PTR_TO_BTF_ID;
3192 regs[value_regno].btf_id = btf_id;
9e15db66 3193 }
27ae7997 3194
9e15db66
AS
3195 return 0;
3196}
3197
17a52670
AS
3198/* check whether memory at (regno + off) is accessible for t = (read | write)
3199 * if t==write, value_regno is a register which value is stored into memory
3200 * if t==read, value_regno is a register which will receive the value from memory
3201 * if t==write && value_regno==-1, some unknown value is stored into memory
3202 * if t==read && value_regno==-1, don't care what we read from memory
3203 */
ca369602
DB
3204static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
3205 int off, int bpf_size, enum bpf_access_type t,
3206 int value_regno, bool strict_alignment_once)
17a52670 3207{
638f5b90
AS
3208 struct bpf_reg_state *regs = cur_regs(env);
3209 struct bpf_reg_state *reg = regs + regno;
f4d7e40a 3210 struct bpf_func_state *state;
17a52670
AS
3211 int size, err = 0;
3212
3213 size = bpf_size_to_bytes(bpf_size);
3214 if (size < 0)
3215 return size;
3216
f1174f77 3217 /* alignment checks will add in reg->off themselves */
ca369602 3218 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
969bf05e
AS
3219 if (err)
3220 return err;
17a52670 3221
f1174f77
EC
3222 /* for access checks, reg->off is just part of off */
3223 off += reg->off;
3224
3225 if (reg->type == PTR_TO_MAP_VALUE) {
1be7f75d
AS
3226 if (t == BPF_WRITE && value_regno >= 0 &&
3227 is_pointer_value(env, value_regno)) {
61bd5218 3228 verbose(env, "R%d leaks addr into map\n", value_regno);
1be7f75d
AS
3229 return -EACCES;
3230 }
591fe988
DB
3231 err = check_map_access_type(env, regno, off, size, t);
3232 if (err)
3233 return err;
9fd29c08 3234 err = check_map_access(env, regno, off, size, false);
a23740ec
AN
3235 if (!err && t == BPF_READ && value_regno >= 0) {
3236 struct bpf_map *map = reg->map_ptr;
3237
3238 /* if map is read-only, track its contents as scalars */
3239 if (tnum_is_const(reg->var_off) &&
3240 bpf_map_is_rdonly(map) &&
3241 map->ops->map_direct_value_addr) {
3242 int map_off = off + reg->var_off.value;
3243 u64 val = 0;
3244
3245 err = bpf_map_direct_read(map, map_off, size,
3246 &val);
3247 if (err)
3248 return err;
3249
3250 regs[value_regno].type = SCALAR_VALUE;
3251 __mark_reg_known(&regs[value_regno], val);
3252 } else {
3253 mark_reg_unknown(env, regs, value_regno);
3254 }
3255 }
457f4436
AN
3256 } else if (reg->type == PTR_TO_MEM) {
3257 if (t == BPF_WRITE && value_regno >= 0 &&
3258 is_pointer_value(env, value_regno)) {
3259 verbose(env, "R%d leaks addr into mem\n", value_regno);
3260 return -EACCES;
3261 }
3262 err = check_mem_region_access(env, regno, off, size,
3263 reg->mem_size, false);
3264 if (!err && t == BPF_READ && value_regno >= 0)
3265 mark_reg_unknown(env, regs, value_regno);
1a0dc1ac 3266 } else if (reg->type == PTR_TO_CTX) {
f1174f77 3267 enum bpf_reg_type reg_type = SCALAR_VALUE;
9e15db66 3268 u32 btf_id = 0;
19de99f7 3269
1be7f75d
AS
3270 if (t == BPF_WRITE && value_regno >= 0 &&
3271 is_pointer_value(env, value_regno)) {
61bd5218 3272 verbose(env, "R%d leaks addr into ctx\n", value_regno);
1be7f75d
AS
3273 return -EACCES;
3274 }
f1174f77 3275
58990d1f
DB
3276 err = check_ctx_reg(env, reg, regno);
3277 if (err < 0)
3278 return err;
3279
9e15db66
AS
3280 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id);
3281 if (err)
3282 verbose_linfo(env, insn_idx, "; ");
969bf05e 3283 if (!err && t == BPF_READ && value_regno >= 0) {
f1174f77 3284 /* ctx access returns either a scalar, or a
de8f3a83
DB
3285 * PTR_TO_PACKET[_META,_END]. In the latter
3286 * case, we know the offset is zero.
f1174f77 3287 */
46f8bc92 3288 if (reg_type == SCALAR_VALUE) {
638f5b90 3289 mark_reg_unknown(env, regs, value_regno);
46f8bc92 3290 } else {
638f5b90 3291 mark_reg_known_zero(env, regs,
61bd5218 3292 value_regno);
46f8bc92
MKL
3293 if (reg_type_may_be_null(reg_type))
3294 regs[value_regno].id = ++env->id_gen;
5327ed3d
JW
3295 /* A load of ctx field could have different
3296 * actual load size with the one encoded in the
3297 * insn. When the dst is PTR, it is for sure not
3298 * a sub-register.
3299 */
3300 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
b121b341
YS
3301 if (reg_type == PTR_TO_BTF_ID ||
3302 reg_type == PTR_TO_BTF_ID_OR_NULL)
9e15db66 3303 regs[value_regno].btf_id = btf_id;
46f8bc92 3304 }
638f5b90 3305 regs[value_regno].type = reg_type;
969bf05e 3306 }
17a52670 3307
f1174f77 3308 } else if (reg->type == PTR_TO_STACK) {
f1174f77 3309 off += reg->var_off.value;
e4298d25
DB
3310 err = check_stack_access(env, reg, off, size);
3311 if (err)
3312 return err;
8726679a 3313
f4d7e40a
AS
3314 state = func(env, reg);
3315 err = update_stack_depth(env, state, off);
3316 if (err)
3317 return err;
8726679a 3318
638f5b90 3319 if (t == BPF_WRITE)
61bd5218 3320 err = check_stack_write(env, state, off, size,
af86ca4e 3321 value_regno, insn_idx);
638f5b90 3322 else
61bd5218
JK
3323 err = check_stack_read(env, state, off, size,
3324 value_regno);
de8f3a83 3325 } else if (reg_is_pkt_pointer(reg)) {
3a0af8fd 3326 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
61bd5218 3327 verbose(env, "cannot write into packet\n");
969bf05e
AS
3328 return -EACCES;
3329 }
4acf6c0b
BB
3330 if (t == BPF_WRITE && value_regno >= 0 &&
3331 is_pointer_value(env, value_regno)) {
61bd5218
JK
3332 verbose(env, "R%d leaks addr into packet\n",
3333 value_regno);
4acf6c0b
BB
3334 return -EACCES;
3335 }
9fd29c08 3336 err = check_packet_access(env, regno, off, size, false);
969bf05e 3337 if (!err && t == BPF_READ && value_regno >= 0)
638f5b90 3338 mark_reg_unknown(env, regs, value_regno);
d58e468b
PP
3339 } else if (reg->type == PTR_TO_FLOW_KEYS) {
3340 if (t == BPF_WRITE && value_regno >= 0 &&
3341 is_pointer_value(env, value_regno)) {
3342 verbose(env, "R%d leaks addr into flow keys\n",
3343 value_regno);
3344 return -EACCES;
3345 }
3346
3347 err = check_flow_keys_access(env, off, size);
3348 if (!err && t == BPF_READ && value_regno >= 0)
3349 mark_reg_unknown(env, regs, value_regno);
46f8bc92 3350 } else if (type_is_sk_pointer(reg->type)) {
c64b7983 3351 if (t == BPF_WRITE) {
46f8bc92
MKL
3352 verbose(env, "R%d cannot write into %s\n",
3353 regno, reg_type_str[reg->type]);
c64b7983
JS
3354 return -EACCES;
3355 }
5f456649 3356 err = check_sock_access(env, insn_idx, regno, off, size, t);
c64b7983
JS
3357 if (!err && value_regno >= 0)
3358 mark_reg_unknown(env, regs, value_regno);
9df1c28b
MM
3359 } else if (reg->type == PTR_TO_TP_BUFFER) {
3360 err = check_tp_buffer_access(env, reg, regno, off, size);
3361 if (!err && t == BPF_READ && value_regno >= 0)
3362 mark_reg_unknown(env, regs, value_regno);
9e15db66
AS
3363 } else if (reg->type == PTR_TO_BTF_ID) {
3364 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
3365 value_regno);
17a52670 3366 } else {
61bd5218
JK
3367 verbose(env, "R%d invalid mem access '%s'\n", regno,
3368 reg_type_str[reg->type]);
17a52670
AS
3369 return -EACCES;
3370 }
969bf05e 3371
f1174f77 3372 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
638f5b90 3373 regs[value_regno].type == SCALAR_VALUE) {
f1174f77 3374 /* b/h/w load zero-extends, mark upper bits as known 0 */
0c17d1d2 3375 coerce_reg_to_size(&regs[value_regno], size);
969bf05e 3376 }
17a52670
AS
3377 return err;
3378}
3379
31fd8581 3380static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
17a52670 3381{
17a52670
AS
3382 int err;
3383
3384 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
3385 insn->imm != 0) {
61bd5218 3386 verbose(env, "BPF_XADD uses reserved fields\n");
17a52670
AS
3387 return -EINVAL;
3388 }
3389
3390 /* check src1 operand */
dc503a8a 3391 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
3392 if (err)
3393 return err;
3394
3395 /* check src2 operand */
dc503a8a 3396 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
3397 if (err)
3398 return err;
3399
6bdf6abc 3400 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 3401 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
6bdf6abc
DB
3402 return -EACCES;
3403 }
3404
ca369602 3405 if (is_ctx_reg(env, insn->dst_reg) ||
4b5defde 3406 is_pkt_reg(env, insn->dst_reg) ||
46f8bc92
MKL
3407 is_flow_key_reg(env, insn->dst_reg) ||
3408 is_sk_reg(env, insn->dst_reg)) {
ca369602 3409 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
2a159c6f
DB
3410 insn->dst_reg,
3411 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
3412 return -EACCES;
3413 }
3414
17a52670 3415 /* check whether atomic_add can read the memory */
31fd8581 3416 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3417 BPF_SIZE(insn->code), BPF_READ, -1, true);
17a52670
AS
3418 if (err)
3419 return err;
3420
3421 /* check whether atomic_add can write into the same memory */
31fd8581 3422 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3423 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
17a52670
AS
3424}
3425
2011fccf
AI
3426static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
3427 int off, int access_size,
3428 bool zero_size_allowed)
3429{
3430 struct bpf_reg_state *reg = reg_state(env, regno);
3431
3432 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
3433 access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
3434 if (tnum_is_const(reg->var_off)) {
3435 verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
3436 regno, off, access_size);
3437 } else {
3438 char tn_buf[48];
3439
3440 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3441 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
3442 regno, tn_buf, access_size);
3443 }
3444 return -EACCES;
3445 }
3446 return 0;
3447}
3448
17a52670
AS
3449/* when register 'regno' is passed into function that will read 'access_size'
3450 * bytes from that pointer, make sure that it's within stack boundary
f1174f77
EC
3451 * and all elements of stack are initialized.
3452 * Unlike most pointer bounds-checking functions, this one doesn't take an
3453 * 'off' argument, so it has to add in reg->off itself.
17a52670 3454 */
58e2af8b 3455static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
435faee1
DB
3456 int access_size, bool zero_size_allowed,
3457 struct bpf_call_arg_meta *meta)
17a52670 3458{
2a159c6f 3459 struct bpf_reg_state *reg = reg_state(env, regno);
f4d7e40a 3460 struct bpf_func_state *state = func(env, reg);
f7cf25b2 3461 int err, min_off, max_off, i, j, slot, spi;
17a52670 3462
914cb781 3463 if (reg->type != PTR_TO_STACK) {
f1174f77 3464 /* Allow zero-byte read from NULL, regardless of pointer type */
8e2fe1d9 3465 if (zero_size_allowed && access_size == 0 &&
914cb781 3466 register_is_null(reg))
8e2fe1d9
DB
3467 return 0;
3468
61bd5218 3469 verbose(env, "R%d type=%s expected=%s\n", regno,
914cb781 3470 reg_type_str[reg->type],
8e2fe1d9 3471 reg_type_str[PTR_TO_STACK]);
17a52670 3472 return -EACCES;
8e2fe1d9 3473 }
17a52670 3474
2011fccf
AI
3475 if (tnum_is_const(reg->var_off)) {
3476 min_off = max_off = reg->var_off.value + reg->off;
3477 err = __check_stack_boundary(env, regno, min_off, access_size,
3478 zero_size_allowed);
3479 if (err)
3480 return err;
3481 } else {
088ec26d
AI
3482 /* Variable offset is prohibited for unprivileged mode for
3483 * simplicity since it requires corresponding support in
3484 * Spectre masking for stack ALU.
3485 * See also retrieve_ptr_limit().
3486 */
2c78ee89 3487 if (!env->bypass_spec_v1) {
088ec26d 3488 char tn_buf[48];
f1174f77 3489
088ec26d
AI
3490 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3491 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
3492 regno, tn_buf);
3493 return -EACCES;
3494 }
f2bcd05e
AI
3495 /* Only initialized buffer on stack is allowed to be accessed
3496 * with variable offset. With uninitialized buffer it's hard to
3497 * guarantee that whole memory is marked as initialized on
3498 * helper return since specific bounds are unknown what may
3499 * cause uninitialized stack leaking.
3500 */
3501 if (meta && meta->raw_mode)
3502 meta = NULL;
3503
107c26a7
AI
3504 if (reg->smax_value >= BPF_MAX_VAR_OFF ||
3505 reg->smax_value <= -BPF_MAX_VAR_OFF) {
3506 verbose(env, "R%d unbounded indirect variable offset stack access\n",
3507 regno);
3508 return -EACCES;
3509 }
2011fccf 3510 min_off = reg->smin_value + reg->off;
107c26a7 3511 max_off = reg->smax_value + reg->off;
2011fccf
AI
3512 err = __check_stack_boundary(env, regno, min_off, access_size,
3513 zero_size_allowed);
107c26a7
AI
3514 if (err) {
3515 verbose(env, "R%d min value is outside of stack bound\n",
3516 regno);
2011fccf 3517 return err;
107c26a7 3518 }
2011fccf
AI
3519 err = __check_stack_boundary(env, regno, max_off, access_size,
3520 zero_size_allowed);
107c26a7
AI
3521 if (err) {
3522 verbose(env, "R%d max value is outside of stack bound\n",
3523 regno);
2011fccf 3524 return err;
107c26a7 3525 }
17a52670
AS
3526 }
3527
435faee1
DB
3528 if (meta && meta->raw_mode) {
3529 meta->access_size = access_size;
3530 meta->regno = regno;
3531 return 0;
3532 }
3533
2011fccf 3534 for (i = min_off; i < max_off + access_size; i++) {
cc2b14d5
AS
3535 u8 *stype;
3536
2011fccf 3537 slot = -i - 1;
638f5b90 3538 spi = slot / BPF_REG_SIZE;
cc2b14d5
AS
3539 if (state->allocated_stack <= slot)
3540 goto err;
3541 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3542 if (*stype == STACK_MISC)
3543 goto mark;
3544 if (*stype == STACK_ZERO) {
3545 /* helper can write anything into the stack */
3546 *stype = STACK_MISC;
3547 goto mark;
17a52670 3548 }
1d68f22b
YS
3549
3550 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
3551 state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
3552 goto mark;
3553
f7cf25b2
AS
3554 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
3555 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
f54c7898 3556 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
f7cf25b2
AS
3557 for (j = 0; j < BPF_REG_SIZE; j++)
3558 state->stack[spi].slot_type[j] = STACK_MISC;
3559 goto mark;
3560 }
3561
cc2b14d5 3562err:
2011fccf
AI
3563 if (tnum_is_const(reg->var_off)) {
3564 verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
3565 min_off, i - min_off, access_size);
3566 } else {
3567 char tn_buf[48];
3568
3569 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3570 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
3571 tn_buf, i - min_off, access_size);
3572 }
cc2b14d5
AS
3573 return -EACCES;
3574mark:
3575 /* reading any byte out of 8-byte 'spill_slot' will cause
3576 * the whole slot to be marked as 'read'
3577 */
679c782d 3578 mark_reg_read(env, &state->stack[spi].spilled_ptr,
5327ed3d
JW
3579 state->stack[spi].spilled_ptr.parent,
3580 REG_LIVE_READ64);
17a52670 3581 }
2011fccf 3582 return update_stack_depth(env, state, min_off);
17a52670
AS
3583}
3584
06c1c049
GB
3585static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
3586 int access_size, bool zero_size_allowed,
3587 struct bpf_call_arg_meta *meta)
3588{
638f5b90 3589 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
06c1c049 3590
f1174f77 3591 switch (reg->type) {
06c1c049 3592 case PTR_TO_PACKET:
de8f3a83 3593 case PTR_TO_PACKET_META:
9fd29c08
YS
3594 return check_packet_access(env, regno, reg->off, access_size,
3595 zero_size_allowed);
06c1c049 3596 case PTR_TO_MAP_VALUE:
591fe988
DB
3597 if (check_map_access_type(env, regno, reg->off, access_size,
3598 meta && meta->raw_mode ? BPF_WRITE :
3599 BPF_READ))
3600 return -EACCES;
9fd29c08
YS
3601 return check_map_access(env, regno, reg->off, access_size,
3602 zero_size_allowed);
457f4436
AN
3603 case PTR_TO_MEM:
3604 return check_mem_region_access(env, regno, reg->off,
3605 access_size, reg->mem_size,
3606 zero_size_allowed);
f1174f77 3607 default: /* scalar_value|ptr_to_stack or invalid ptr */
06c1c049
GB
3608 return check_stack_boundary(env, regno, access_size,
3609 zero_size_allowed, meta);
3610 }
3611}
3612
d83525ca
AS
3613/* Implementation details:
3614 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
3615 * Two bpf_map_lookups (even with the same key) will have different reg->id.
3616 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
3617 * value_or_null->value transition, since the verifier only cares about
3618 * the range of access to valid map value pointer and doesn't care about actual
3619 * address of the map element.
3620 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
3621 * reg->id > 0 after value_or_null->value transition. By doing so
3622 * two bpf_map_lookups will be considered two different pointers that
3623 * point to different bpf_spin_locks.
3624 * The verifier allows taking only one bpf_spin_lock at a time to avoid
3625 * dead-locks.
3626 * Since only one bpf_spin_lock is allowed the checks are simpler than
3627 * reg_is_refcounted() logic. The verifier needs to remember only
3628 * one spin_lock instead of array of acquired_refs.
3629 * cur_state->active_spin_lock remembers which map value element got locked
3630 * and clears it after bpf_spin_unlock.
3631 */
3632static int process_spin_lock(struct bpf_verifier_env *env, int regno,
3633 bool is_lock)
3634{
3635 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3636 struct bpf_verifier_state *cur = env->cur_state;
3637 bool is_const = tnum_is_const(reg->var_off);
3638 struct bpf_map *map = reg->map_ptr;
3639 u64 val = reg->var_off.value;
3640
3641 if (reg->type != PTR_TO_MAP_VALUE) {
3642 verbose(env, "R%d is not a pointer to map_value\n", regno);
3643 return -EINVAL;
3644 }
3645 if (!is_const) {
3646 verbose(env,
3647 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
3648 regno);
3649 return -EINVAL;
3650 }
3651 if (!map->btf) {
3652 verbose(env,
3653 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
3654 map->name);
3655 return -EINVAL;
3656 }
3657 if (!map_value_has_spin_lock(map)) {
3658 if (map->spin_lock_off == -E2BIG)
3659 verbose(env,
3660 "map '%s' has more than one 'struct bpf_spin_lock'\n",
3661 map->name);
3662 else if (map->spin_lock_off == -ENOENT)
3663 verbose(env,
3664 "map '%s' doesn't have 'struct bpf_spin_lock'\n",
3665 map->name);
3666 else
3667 verbose(env,
3668 "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
3669 map->name);
3670 return -EINVAL;
3671 }
3672 if (map->spin_lock_off != val + reg->off) {
3673 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
3674 val + reg->off);
3675 return -EINVAL;
3676 }
3677 if (is_lock) {
3678 if (cur->active_spin_lock) {
3679 verbose(env,
3680 "Locking two bpf_spin_locks are not allowed\n");
3681 return -EINVAL;
3682 }
3683 cur->active_spin_lock = reg->id;
3684 } else {
3685 if (!cur->active_spin_lock) {
3686 verbose(env, "bpf_spin_unlock without taking a lock\n");
3687 return -EINVAL;
3688 }
3689 if (cur->active_spin_lock != reg->id) {
3690 verbose(env, "bpf_spin_unlock of different lock\n");
3691 return -EINVAL;
3692 }
3693 cur->active_spin_lock = 0;
3694 }
3695 return 0;
3696}
3697
90133415
DB
3698static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
3699{
3700 return type == ARG_PTR_TO_MEM ||
3701 type == ARG_PTR_TO_MEM_OR_NULL ||
3702 type == ARG_PTR_TO_UNINIT_MEM;
3703}
3704
3705static bool arg_type_is_mem_size(enum bpf_arg_type type)
3706{
3707 return type == ARG_CONST_SIZE ||
3708 type == ARG_CONST_SIZE_OR_ZERO;
3709}
3710
457f4436
AN
3711static bool arg_type_is_alloc_mem_ptr(enum bpf_arg_type type)
3712{
3713 return type == ARG_PTR_TO_ALLOC_MEM ||
3714 type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
3715}
3716
3717static bool arg_type_is_alloc_size(enum bpf_arg_type type)
3718{
3719 return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
3720}
3721
57c3bb72
AI
3722static bool arg_type_is_int_ptr(enum bpf_arg_type type)
3723{
3724 return type == ARG_PTR_TO_INT ||
3725 type == ARG_PTR_TO_LONG;
3726}
3727
3728static int int_ptr_type_to_size(enum bpf_arg_type type)
3729{
3730 if (type == ARG_PTR_TO_INT)
3731 return sizeof(u32);
3732 else if (type == ARG_PTR_TO_LONG)
3733 return sizeof(u64);
3734
3735 return -EINVAL;
3736}
3737
58e2af8b 3738static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
33ff9823
DB
3739 enum bpf_arg_type arg_type,
3740 struct bpf_call_arg_meta *meta)
17a52670 3741{
638f5b90 3742 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6841de8b 3743 enum bpf_reg_type expected_type, type = reg->type;
17a52670
AS
3744 int err = 0;
3745
80f1d68c 3746 if (arg_type == ARG_DONTCARE)
17a52670
AS
3747 return 0;
3748
dc503a8a
EC
3749 err = check_reg_arg(env, regno, SRC_OP);
3750 if (err)
3751 return err;
17a52670 3752
1be7f75d
AS
3753 if (arg_type == ARG_ANYTHING) {
3754 if (is_pointer_value(env, regno)) {
61bd5218
JK
3755 verbose(env, "R%d leaks addr into helper function\n",
3756 regno);
1be7f75d
AS
3757 return -EACCES;
3758 }
80f1d68c 3759 return 0;
1be7f75d 3760 }
80f1d68c 3761
de8f3a83 3762 if (type_is_pkt_pointer(type) &&
3a0af8fd 3763 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
61bd5218 3764 verbose(env, "helper access to the packet is not allowed\n");
6841de8b
AS
3765 return -EACCES;
3766 }
3767
8e2fe1d9 3768 if (arg_type == ARG_PTR_TO_MAP_KEY ||
2ea864c5 3769 arg_type == ARG_PTR_TO_MAP_VALUE ||
6ac99e8f
MKL
3770 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
3771 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
17a52670 3772 expected_type = PTR_TO_STACK;
6ac99e8f
MKL
3773 if (register_is_null(reg) &&
3774 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL)
3775 /* final test in check_stack_boundary() */;
3776 else if (!type_is_pkt_pointer(type) &&
3777 type != PTR_TO_MAP_VALUE &&
3778 type != expected_type)
6841de8b 3779 goto err_type;
39f19ebb 3780 } else if (arg_type == ARG_CONST_SIZE ||
457f4436
AN
3781 arg_type == ARG_CONST_SIZE_OR_ZERO ||
3782 arg_type == ARG_CONST_ALLOC_SIZE_OR_ZERO) {
f1174f77
EC
3783 expected_type = SCALAR_VALUE;
3784 if (type != expected_type)
6841de8b 3785 goto err_type;
17a52670
AS
3786 } else if (arg_type == ARG_CONST_MAP_PTR) {
3787 expected_type = CONST_PTR_TO_MAP;
6841de8b
AS
3788 if (type != expected_type)
3789 goto err_type;
f318903c
DB
3790 } else if (arg_type == ARG_PTR_TO_CTX ||
3791 arg_type == ARG_PTR_TO_CTX_OR_NULL) {
608cd71a 3792 expected_type = PTR_TO_CTX;
f318903c
DB
3793 if (!(register_is_null(reg) &&
3794 arg_type == ARG_PTR_TO_CTX_OR_NULL)) {
3795 if (type != expected_type)
3796 goto err_type;
3797 err = check_ctx_reg(env, reg, regno);
3798 if (err < 0)
3799 return err;
3800 }
46f8bc92
MKL
3801 } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
3802 expected_type = PTR_TO_SOCK_COMMON;
3803 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
3804 if (!type_is_sk_pointer(type))
3805 goto err_type;
1b986589
MKL
3806 if (reg->ref_obj_id) {
3807 if (meta->ref_obj_id) {
3808 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
3809 regno, reg->ref_obj_id,
3810 meta->ref_obj_id);
3811 return -EFAULT;
3812 }
3813 meta->ref_obj_id = reg->ref_obj_id;
fd978bf7 3814 }
6ac99e8f
MKL
3815 } else if (arg_type == ARG_PTR_TO_SOCKET) {
3816 expected_type = PTR_TO_SOCKET;
3817 if (type != expected_type)
3818 goto err_type;
a7658e1a
AS
3819 } else if (arg_type == ARG_PTR_TO_BTF_ID) {
3820 expected_type = PTR_TO_BTF_ID;
3821 if (type != expected_type)
3822 goto err_type;
3823 if (reg->btf_id != meta->btf_id) {
3824 verbose(env, "Helper has type %s got %s in R%d\n",
3825 kernel_type_name(meta->btf_id),
3826 kernel_type_name(reg->btf_id), regno);
3827
3828 return -EACCES;
3829 }
3830 if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) {
3831 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
3832 regno);
3833 return -EACCES;
3834 }
d83525ca
AS
3835 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
3836 if (meta->func_id == BPF_FUNC_spin_lock) {
3837 if (process_spin_lock(env, regno, true))
3838 return -EACCES;
3839 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
3840 if (process_spin_lock(env, regno, false))
3841 return -EACCES;
3842 } else {
3843 verbose(env, "verifier internal error\n");
3844 return -EFAULT;
3845 }
90133415 3846 } else if (arg_type_is_mem_ptr(arg_type)) {
8e2fe1d9
DB
3847 expected_type = PTR_TO_STACK;
3848 /* One exception here. In case function allows for NULL to be
f1174f77 3849 * passed in as argument, it's a SCALAR_VALUE type. Final test
8e2fe1d9
DB
3850 * happens during stack boundary checking.
3851 */
914cb781 3852 if (register_is_null(reg) &&
457f4436
AN
3853 (arg_type == ARG_PTR_TO_MEM_OR_NULL ||
3854 arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL))
6841de8b 3855 /* final test in check_stack_boundary() */;
de8f3a83
DB
3856 else if (!type_is_pkt_pointer(type) &&
3857 type != PTR_TO_MAP_VALUE &&
457f4436 3858 type != PTR_TO_MEM &&
f1174f77 3859 type != expected_type)
6841de8b 3860 goto err_type;
39f19ebb 3861 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
457f4436
AN
3862 } else if (arg_type_is_alloc_mem_ptr(arg_type)) {
3863 expected_type = PTR_TO_MEM;
3864 if (register_is_null(reg) &&
3865 arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL)
3866 /* final test in check_stack_boundary() */;
3867 else if (type != expected_type)
3868 goto err_type;
3869 if (meta->ref_obj_id) {
3870 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
3871 regno, reg->ref_obj_id,
3872 meta->ref_obj_id);
3873 return -EFAULT;
3874 }
3875 meta->ref_obj_id = reg->ref_obj_id;
57c3bb72
AI
3876 } else if (arg_type_is_int_ptr(arg_type)) {
3877 expected_type = PTR_TO_STACK;
3878 if (!type_is_pkt_pointer(type) &&
3879 type != PTR_TO_MAP_VALUE &&
3880 type != expected_type)
3881 goto err_type;
17a52670 3882 } else {
61bd5218 3883 verbose(env, "unsupported arg_type %d\n", arg_type);
17a52670
AS
3884 return -EFAULT;
3885 }
3886
17a52670
AS
3887 if (arg_type == ARG_CONST_MAP_PTR) {
3888 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
33ff9823 3889 meta->map_ptr = reg->map_ptr;
17a52670
AS
3890 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
3891 /* bpf_map_xxx(..., map_ptr, ..., key) call:
3892 * check that [key, key + map->key_size) are within
3893 * stack limits and initialized
3894 */
33ff9823 3895 if (!meta->map_ptr) {
17a52670
AS
3896 /* in function declaration map_ptr must come before
3897 * map_key, so that it's verified and known before
3898 * we have to check map_key here. Otherwise it means
3899 * that kernel subsystem misconfigured verifier
3900 */
61bd5218 3901 verbose(env, "invalid map_ptr to access map->key\n");
17a52670
AS
3902 return -EACCES;
3903 }
d71962f3
PC
3904 err = check_helper_mem_access(env, regno,
3905 meta->map_ptr->key_size, false,
3906 NULL);
2ea864c5 3907 } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
6ac99e8f
MKL
3908 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
3909 !register_is_null(reg)) ||
2ea864c5 3910 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
17a52670
AS
3911 /* bpf_map_xxx(..., map_ptr, ..., value) call:
3912 * check [value, value + map->value_size) validity
3913 */
33ff9823 3914 if (!meta->map_ptr) {
17a52670 3915 /* kernel subsystem misconfigured verifier */
61bd5218 3916 verbose(env, "invalid map_ptr to access map->value\n");
17a52670
AS
3917 return -EACCES;
3918 }
2ea864c5 3919 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
d71962f3
PC
3920 err = check_helper_mem_access(env, regno,
3921 meta->map_ptr->value_size, false,
2ea864c5 3922 meta);
90133415 3923 } else if (arg_type_is_mem_size(arg_type)) {
39f19ebb 3924 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
17a52670 3925
10060503
JF
3926 /* This is used to refine r0 return value bounds for helpers
3927 * that enforce this value as an upper bound on return values.
3928 * See do_refine_retval_range() for helpers that can refine
3929 * the return value. C type of helper is u32 so we pull register
3930 * bound from umax_value however, if negative verifier errors
3931 * out. Only upper bounds can be learned because retval is an
3932 * int type and negative retvals are allowed.
849fa506 3933 */
10060503 3934 meta->msize_max_value = reg->umax_value;
849fa506 3935
f1174f77
EC
3936 /* The register is SCALAR_VALUE; the access check
3937 * happens using its boundaries.
06c1c049 3938 */
f1174f77 3939 if (!tnum_is_const(reg->var_off))
06c1c049
GB
3940 /* For unprivileged variable accesses, disable raw
3941 * mode so that the program is required to
3942 * initialize all the memory that the helper could
3943 * just partially fill up.
3944 */
3945 meta = NULL;
3946
b03c9f9f 3947 if (reg->smin_value < 0) {
61bd5218 3948 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
f1174f77
EC
3949 regno);
3950 return -EACCES;
3951 }
06c1c049 3952
b03c9f9f 3953 if (reg->umin_value == 0) {
f1174f77
EC
3954 err = check_helper_mem_access(env, regno - 1, 0,
3955 zero_size_allowed,
3956 meta);
06c1c049
GB
3957 if (err)
3958 return err;
06c1c049 3959 }
f1174f77 3960
b03c9f9f 3961 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
61bd5218 3962 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
f1174f77
EC
3963 regno);
3964 return -EACCES;
3965 }
3966 err = check_helper_mem_access(env, regno - 1,
b03c9f9f 3967 reg->umax_value,
f1174f77 3968 zero_size_allowed, meta);
b5dc0163
AS
3969 if (!err)
3970 err = mark_chain_precision(env, regno);
457f4436
AN
3971 } else if (arg_type_is_alloc_size(arg_type)) {
3972 if (!tnum_is_const(reg->var_off)) {
3973 verbose(env, "R%d unbounded size, use 'var &= const' or 'if (var < const)'\n",
3974 regno);
3975 return -EACCES;
3976 }
3977 meta->mem_size = reg->var_off.value;
57c3bb72
AI
3978 } else if (arg_type_is_int_ptr(arg_type)) {
3979 int size = int_ptr_type_to_size(arg_type);
3980
3981 err = check_helper_mem_access(env, regno, size, false, meta);
3982 if (err)
3983 return err;
3984 err = check_ptr_alignment(env, reg, 0, size, true);
17a52670
AS
3985 }
3986
3987 return err;
6841de8b 3988err_type:
61bd5218 3989 verbose(env, "R%d type=%s expected=%s\n", regno,
6841de8b
AS
3990 reg_type_str[type], reg_type_str[expected_type]);
3991 return -EACCES;
17a52670
AS
3992}
3993
61bd5218
JK
3994static int check_map_func_compatibility(struct bpf_verifier_env *env,
3995 struct bpf_map *map, int func_id)
35578d79 3996{
35578d79
KX
3997 if (!map)
3998 return 0;
3999
6aff67c8
AS
4000 /* We need a two way check, first is from map perspective ... */
4001 switch (map->map_type) {
4002 case BPF_MAP_TYPE_PROG_ARRAY:
4003 if (func_id != BPF_FUNC_tail_call)
4004 goto error;
4005 break;
4006 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
4007 if (func_id != BPF_FUNC_perf_event_read &&
908432ca 4008 func_id != BPF_FUNC_perf_event_output &&
a7658e1a 4009 func_id != BPF_FUNC_skb_output &&
d831ee84
EC
4010 func_id != BPF_FUNC_perf_event_read_value &&
4011 func_id != BPF_FUNC_xdp_output)
6aff67c8
AS
4012 goto error;
4013 break;
457f4436
AN
4014 case BPF_MAP_TYPE_RINGBUF:
4015 if (func_id != BPF_FUNC_ringbuf_output &&
4016 func_id != BPF_FUNC_ringbuf_reserve &&
4017 func_id != BPF_FUNC_ringbuf_submit &&
4018 func_id != BPF_FUNC_ringbuf_discard &&
4019 func_id != BPF_FUNC_ringbuf_query)
4020 goto error;
4021 break;
6aff67c8
AS
4022 case BPF_MAP_TYPE_STACK_TRACE:
4023 if (func_id != BPF_FUNC_get_stackid)
4024 goto error;
4025 break;
4ed8ec52 4026 case BPF_MAP_TYPE_CGROUP_ARRAY:
60747ef4 4027 if (func_id != BPF_FUNC_skb_under_cgroup &&
60d20f91 4028 func_id != BPF_FUNC_current_task_under_cgroup)
4a482f34
MKL
4029 goto error;
4030 break;
cd339431 4031 case BPF_MAP_TYPE_CGROUP_STORAGE:
b741f163 4032 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
cd339431
RG
4033 if (func_id != BPF_FUNC_get_local_storage)
4034 goto error;
4035 break;
546ac1ff 4036 case BPF_MAP_TYPE_DEVMAP:
6f9d451a 4037 case BPF_MAP_TYPE_DEVMAP_HASH:
0cdbb4b0
THJ
4038 if (func_id != BPF_FUNC_redirect_map &&
4039 func_id != BPF_FUNC_map_lookup_elem)
546ac1ff
JF
4040 goto error;
4041 break;
fbfc504a
BT
4042 /* Restrict bpf side of cpumap and xskmap, open when use-cases
4043 * appear.
4044 */
6710e112
JDB
4045 case BPF_MAP_TYPE_CPUMAP:
4046 if (func_id != BPF_FUNC_redirect_map)
4047 goto error;
4048 break;
fada7fdc
JL
4049 case BPF_MAP_TYPE_XSKMAP:
4050 if (func_id != BPF_FUNC_redirect_map &&
4051 func_id != BPF_FUNC_map_lookup_elem)
4052 goto error;
4053 break;
56f668df 4054 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
bcc6b1b7 4055 case BPF_MAP_TYPE_HASH_OF_MAPS:
56f668df
MKL
4056 if (func_id != BPF_FUNC_map_lookup_elem)
4057 goto error;
16a43625 4058 break;
174a79ff
JF
4059 case BPF_MAP_TYPE_SOCKMAP:
4060 if (func_id != BPF_FUNC_sk_redirect_map &&
4061 func_id != BPF_FUNC_sock_map_update &&
4f738adb 4062 func_id != BPF_FUNC_map_delete_elem &&
9fed9000 4063 func_id != BPF_FUNC_msg_redirect_map &&
64d85290
JS
4064 func_id != BPF_FUNC_sk_select_reuseport &&
4065 func_id != BPF_FUNC_map_lookup_elem)
174a79ff
JF
4066 goto error;
4067 break;
81110384
JF
4068 case BPF_MAP_TYPE_SOCKHASH:
4069 if (func_id != BPF_FUNC_sk_redirect_hash &&
4070 func_id != BPF_FUNC_sock_hash_update &&
4071 func_id != BPF_FUNC_map_delete_elem &&
9fed9000 4072 func_id != BPF_FUNC_msg_redirect_hash &&
64d85290
JS
4073 func_id != BPF_FUNC_sk_select_reuseport &&
4074 func_id != BPF_FUNC_map_lookup_elem)
81110384
JF
4075 goto error;
4076 break;
2dbb9b9e
MKL
4077 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
4078 if (func_id != BPF_FUNC_sk_select_reuseport)
4079 goto error;
4080 break;
f1a2e44a
MV
4081 case BPF_MAP_TYPE_QUEUE:
4082 case BPF_MAP_TYPE_STACK:
4083 if (func_id != BPF_FUNC_map_peek_elem &&
4084 func_id != BPF_FUNC_map_pop_elem &&
4085 func_id != BPF_FUNC_map_push_elem)
4086 goto error;
4087 break;
6ac99e8f
MKL
4088 case BPF_MAP_TYPE_SK_STORAGE:
4089 if (func_id != BPF_FUNC_sk_storage_get &&
4090 func_id != BPF_FUNC_sk_storage_delete)
4091 goto error;
4092 break;
6aff67c8
AS
4093 default:
4094 break;
4095 }
4096
4097 /* ... and second from the function itself. */
4098 switch (func_id) {
4099 case BPF_FUNC_tail_call:
4100 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
4101 goto error;
f910cefa 4102 if (env->subprog_cnt > 1) {
f4d7e40a
AS
4103 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
4104 return -EINVAL;
4105 }
6aff67c8
AS
4106 break;
4107 case BPF_FUNC_perf_event_read:
4108 case BPF_FUNC_perf_event_output:
908432ca 4109 case BPF_FUNC_perf_event_read_value:
a7658e1a 4110 case BPF_FUNC_skb_output:
d831ee84 4111 case BPF_FUNC_xdp_output:
6aff67c8
AS
4112 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
4113 goto error;
4114 break;
4115 case BPF_FUNC_get_stackid:
4116 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
4117 goto error;
4118 break;
60d20f91 4119 case BPF_FUNC_current_task_under_cgroup:
747ea55e 4120 case BPF_FUNC_skb_under_cgroup:
4a482f34
MKL
4121 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
4122 goto error;
4123 break;
97f91a7c 4124 case BPF_FUNC_redirect_map:
9c270af3 4125 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6f9d451a 4126 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
fbfc504a
BT
4127 map->map_type != BPF_MAP_TYPE_CPUMAP &&
4128 map->map_type != BPF_MAP_TYPE_XSKMAP)
97f91a7c
JF
4129 goto error;
4130 break;
174a79ff 4131 case BPF_FUNC_sk_redirect_map:
4f738adb 4132 case BPF_FUNC_msg_redirect_map:
81110384 4133 case BPF_FUNC_sock_map_update:
174a79ff
JF
4134 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
4135 goto error;
4136 break;
81110384
JF
4137 case BPF_FUNC_sk_redirect_hash:
4138 case BPF_FUNC_msg_redirect_hash:
4139 case BPF_FUNC_sock_hash_update:
4140 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
174a79ff
JF
4141 goto error;
4142 break;
cd339431 4143 case BPF_FUNC_get_local_storage:
b741f163
RG
4144 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
4145 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
cd339431
RG
4146 goto error;
4147 break;
2dbb9b9e 4148 case BPF_FUNC_sk_select_reuseport:
9fed9000
JS
4149 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
4150 map->map_type != BPF_MAP_TYPE_SOCKMAP &&
4151 map->map_type != BPF_MAP_TYPE_SOCKHASH)
2dbb9b9e
MKL
4152 goto error;
4153 break;
f1a2e44a
MV
4154 case BPF_FUNC_map_peek_elem:
4155 case BPF_FUNC_map_pop_elem:
4156 case BPF_FUNC_map_push_elem:
4157 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
4158 map->map_type != BPF_MAP_TYPE_STACK)
4159 goto error;
4160 break;
6ac99e8f
MKL
4161 case BPF_FUNC_sk_storage_get:
4162 case BPF_FUNC_sk_storage_delete:
4163 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
4164 goto error;
4165 break;
6aff67c8
AS
4166 default:
4167 break;
35578d79
KX
4168 }
4169
4170 return 0;
6aff67c8 4171error:
61bd5218 4172 verbose(env, "cannot pass map_type %d into func %s#%d\n",
ebb676da 4173 map->map_type, func_id_name(func_id), func_id);
6aff67c8 4174 return -EINVAL;
35578d79
KX
4175}
4176
90133415 4177static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
435faee1
DB
4178{
4179 int count = 0;
4180
39f19ebb 4181 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 4182 count++;
39f19ebb 4183 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 4184 count++;
39f19ebb 4185 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 4186 count++;
39f19ebb 4187 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 4188 count++;
39f19ebb 4189 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
435faee1
DB
4190 count++;
4191
90133415
DB
4192 /* We only support one arg being in raw mode at the moment,
4193 * which is sufficient for the helper functions we have
4194 * right now.
4195 */
4196 return count <= 1;
4197}
4198
4199static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
4200 enum bpf_arg_type arg_next)
4201{
4202 return (arg_type_is_mem_ptr(arg_curr) &&
4203 !arg_type_is_mem_size(arg_next)) ||
4204 (!arg_type_is_mem_ptr(arg_curr) &&
4205 arg_type_is_mem_size(arg_next));
4206}
4207
4208static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
4209{
4210 /* bpf_xxx(..., buf, len) call will access 'len'
4211 * bytes from memory 'buf'. Both arg types need
4212 * to be paired, so make sure there's no buggy
4213 * helper function specification.
4214 */
4215 if (arg_type_is_mem_size(fn->arg1_type) ||
4216 arg_type_is_mem_ptr(fn->arg5_type) ||
4217 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
4218 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
4219 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
4220 check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
4221 return false;
4222
4223 return true;
4224}
4225
1b986589 4226static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
fd978bf7
JS
4227{
4228 int count = 0;
4229
1b986589 4230 if (arg_type_may_be_refcounted(fn->arg1_type))
fd978bf7 4231 count++;
1b986589 4232 if (arg_type_may_be_refcounted(fn->arg2_type))
fd978bf7 4233 count++;
1b986589 4234 if (arg_type_may_be_refcounted(fn->arg3_type))
fd978bf7 4235 count++;
1b986589 4236 if (arg_type_may_be_refcounted(fn->arg4_type))
fd978bf7 4237 count++;
1b986589 4238 if (arg_type_may_be_refcounted(fn->arg5_type))
fd978bf7
JS
4239 count++;
4240
1b986589
MKL
4241 /* A reference acquiring function cannot acquire
4242 * another refcounted ptr.
4243 */
64d85290 4244 if (may_be_acquire_function(func_id) && count)
1b986589
MKL
4245 return false;
4246
fd978bf7
JS
4247 /* We only support one arg being unreferenced at the moment,
4248 * which is sufficient for the helper functions we have right now.
4249 */
4250 return count <= 1;
4251}
4252
1b986589 4253static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
90133415
DB
4254{
4255 return check_raw_mode_ok(fn) &&
fd978bf7 4256 check_arg_pair_ok(fn) &&
1b986589 4257 check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
435faee1
DB
4258}
4259
de8f3a83
DB
4260/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
4261 * are now invalid, so turn them into unknown SCALAR_VALUE.
f1174f77 4262 */
f4d7e40a
AS
4263static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
4264 struct bpf_func_state *state)
969bf05e 4265{
58e2af8b 4266 struct bpf_reg_state *regs = state->regs, *reg;
969bf05e
AS
4267 int i;
4268
4269 for (i = 0; i < MAX_BPF_REG; i++)
de8f3a83 4270 if (reg_is_pkt_pointer_any(&regs[i]))
61bd5218 4271 mark_reg_unknown(env, regs, i);
969bf05e 4272
f3709f69
JS
4273 bpf_for_each_spilled_reg(i, state, reg) {
4274 if (!reg)
969bf05e 4275 continue;
de8f3a83 4276 if (reg_is_pkt_pointer_any(reg))
f54c7898 4277 __mark_reg_unknown(env, reg);
969bf05e
AS
4278 }
4279}
4280
f4d7e40a
AS
4281static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
4282{
4283 struct bpf_verifier_state *vstate = env->cur_state;
4284 int i;
4285
4286 for (i = 0; i <= vstate->curframe; i++)
4287 __clear_all_pkt_pointers(env, vstate->frame[i]);
4288}
4289
fd978bf7 4290static void release_reg_references(struct bpf_verifier_env *env,
1b986589
MKL
4291 struct bpf_func_state *state,
4292 int ref_obj_id)
fd978bf7
JS
4293{
4294 struct bpf_reg_state *regs = state->regs, *reg;
4295 int i;
4296
4297 for (i = 0; i < MAX_BPF_REG; i++)
1b986589 4298 if (regs[i].ref_obj_id == ref_obj_id)
fd978bf7
JS
4299 mark_reg_unknown(env, regs, i);
4300
4301 bpf_for_each_spilled_reg(i, state, reg) {
4302 if (!reg)
4303 continue;
1b986589 4304 if (reg->ref_obj_id == ref_obj_id)
f54c7898 4305 __mark_reg_unknown(env, reg);
fd978bf7
JS
4306 }
4307}
4308
4309/* The pointer with the specified id has released its reference to kernel
4310 * resources. Identify all copies of the same pointer and clear the reference.
4311 */
4312static int release_reference(struct bpf_verifier_env *env,
1b986589 4313 int ref_obj_id)
fd978bf7
JS
4314{
4315 struct bpf_verifier_state *vstate = env->cur_state;
1b986589 4316 int err;
fd978bf7
JS
4317 int i;
4318
1b986589
MKL
4319 err = release_reference_state(cur_func(env), ref_obj_id);
4320 if (err)
4321 return err;
4322
fd978bf7 4323 for (i = 0; i <= vstate->curframe; i++)
1b986589 4324 release_reg_references(env, vstate->frame[i], ref_obj_id);
fd978bf7 4325
1b986589 4326 return 0;
fd978bf7
JS
4327}
4328
51c39bb1
AS
4329static void clear_caller_saved_regs(struct bpf_verifier_env *env,
4330 struct bpf_reg_state *regs)
4331{
4332 int i;
4333
4334 /* after the call registers r0 - r5 were scratched */
4335 for (i = 0; i < CALLER_SAVED_REGS; i++) {
4336 mark_reg_not_init(env, regs, caller_saved[i]);
4337 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4338 }
4339}
4340
f4d7e40a
AS
4341static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
4342 int *insn_idx)
4343{
4344 struct bpf_verifier_state *state = env->cur_state;
51c39bb1 4345 struct bpf_func_info_aux *func_info_aux;
f4d7e40a 4346 struct bpf_func_state *caller, *callee;
fd978bf7 4347 int i, err, subprog, target_insn;
51c39bb1 4348 bool is_global = false;
f4d7e40a 4349
aada9ce6 4350 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
f4d7e40a 4351 verbose(env, "the call stack of %d frames is too deep\n",
aada9ce6 4352 state->curframe + 2);
f4d7e40a
AS
4353 return -E2BIG;
4354 }
4355
4356 target_insn = *insn_idx + insn->imm;
4357 subprog = find_subprog(env, target_insn + 1);
4358 if (subprog < 0) {
4359 verbose(env, "verifier bug. No program starts at insn %d\n",
4360 target_insn + 1);
4361 return -EFAULT;
4362 }
4363
4364 caller = state->frame[state->curframe];
4365 if (state->frame[state->curframe + 1]) {
4366 verbose(env, "verifier bug. Frame %d already allocated\n",
4367 state->curframe + 1);
4368 return -EFAULT;
4369 }
4370
51c39bb1
AS
4371 func_info_aux = env->prog->aux->func_info_aux;
4372 if (func_info_aux)
4373 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
4374 err = btf_check_func_arg_match(env, subprog, caller->regs);
4375 if (err == -EFAULT)
4376 return err;
4377 if (is_global) {
4378 if (err) {
4379 verbose(env, "Caller passes invalid args into func#%d\n",
4380 subprog);
4381 return err;
4382 } else {
4383 if (env->log.level & BPF_LOG_LEVEL)
4384 verbose(env,
4385 "Func#%d is global and valid. Skipping.\n",
4386 subprog);
4387 clear_caller_saved_regs(env, caller->regs);
4388
4389 /* All global functions return SCALAR_VALUE */
4390 mark_reg_unknown(env, caller->regs, BPF_REG_0);
4391
4392 /* continue with next insn after call */
4393 return 0;
4394 }
4395 }
4396
f4d7e40a
AS
4397 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
4398 if (!callee)
4399 return -ENOMEM;
4400 state->frame[state->curframe + 1] = callee;
4401
4402 /* callee cannot access r0, r6 - r9 for reading and has to write
4403 * into its own stack before reading from it.
4404 * callee can read/write into caller's stack
4405 */
4406 init_func_state(env, callee,
4407 /* remember the callsite, it will be used by bpf_exit */
4408 *insn_idx /* callsite */,
4409 state->curframe + 1 /* frameno within this callchain */,
f910cefa 4410 subprog /* subprog number within this prog */);
f4d7e40a 4411
fd978bf7
JS
4412 /* Transfer references to the callee */
4413 err = transfer_reference_state(callee, caller);
4414 if (err)
4415 return err;
4416
679c782d
EC
4417 /* copy r1 - r5 args that callee can access. The copy includes parent
4418 * pointers, which connects us up to the liveness chain
4419 */
f4d7e40a
AS
4420 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
4421 callee->regs[i] = caller->regs[i];
4422
51c39bb1 4423 clear_caller_saved_regs(env, caller->regs);
f4d7e40a
AS
4424
4425 /* only increment it after check_reg_arg() finished */
4426 state->curframe++;
4427
4428 /* and go analyze first insn of the callee */
4429 *insn_idx = target_insn;
4430
06ee7115 4431 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4432 verbose(env, "caller:\n");
4433 print_verifier_state(env, caller);
4434 verbose(env, "callee:\n");
4435 print_verifier_state(env, callee);
4436 }
4437 return 0;
4438}
4439
4440static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
4441{
4442 struct bpf_verifier_state *state = env->cur_state;
4443 struct bpf_func_state *caller, *callee;
4444 struct bpf_reg_state *r0;
fd978bf7 4445 int err;
f4d7e40a
AS
4446
4447 callee = state->frame[state->curframe];
4448 r0 = &callee->regs[BPF_REG_0];
4449 if (r0->type == PTR_TO_STACK) {
4450 /* technically it's ok to return caller's stack pointer
4451 * (or caller's caller's pointer) back to the caller,
4452 * since these pointers are valid. Only current stack
4453 * pointer will be invalid as soon as function exits,
4454 * but let's be conservative
4455 */
4456 verbose(env, "cannot return stack pointer to the caller\n");
4457 return -EINVAL;
4458 }
4459
4460 state->curframe--;
4461 caller = state->frame[state->curframe];
4462 /* return to the caller whatever r0 had in the callee */
4463 caller->regs[BPF_REG_0] = *r0;
4464
fd978bf7
JS
4465 /* Transfer references to the caller */
4466 err = transfer_reference_state(caller, callee);
4467 if (err)
4468 return err;
4469
f4d7e40a 4470 *insn_idx = callee->callsite + 1;
06ee7115 4471 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4472 verbose(env, "returning from callee:\n");
4473 print_verifier_state(env, callee);
4474 verbose(env, "to caller at %d:\n", *insn_idx);
4475 print_verifier_state(env, caller);
4476 }
4477 /* clear everything in the callee */
4478 free_func_state(callee);
4479 state->frame[state->curframe + 1] = NULL;
4480 return 0;
4481}
4482
849fa506
YS
4483static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
4484 int func_id,
4485 struct bpf_call_arg_meta *meta)
4486{
4487 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
4488
4489 if (ret_type != RET_INTEGER ||
4490 (func_id != BPF_FUNC_get_stack &&
47cc0ed5
DB
4491 func_id != BPF_FUNC_probe_read_str &&
4492 func_id != BPF_FUNC_probe_read_kernel_str &&
4493 func_id != BPF_FUNC_probe_read_user_str))
849fa506
YS
4494 return;
4495
10060503 4496 ret_reg->smax_value = meta->msize_max_value;
fa123ac0 4497 ret_reg->s32_max_value = meta->msize_max_value;
849fa506
YS
4498 __reg_deduce_bounds(ret_reg);
4499 __reg_bound_offset(ret_reg);
10060503 4500 __update_reg_bounds(ret_reg);
849fa506
YS
4501}
4502
c93552c4
DB
4503static int
4504record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4505 int func_id, int insn_idx)
4506{
4507 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
591fe988 4508 struct bpf_map *map = meta->map_ptr;
c93552c4
DB
4509
4510 if (func_id != BPF_FUNC_tail_call &&
09772d92
DB
4511 func_id != BPF_FUNC_map_lookup_elem &&
4512 func_id != BPF_FUNC_map_update_elem &&
f1a2e44a
MV
4513 func_id != BPF_FUNC_map_delete_elem &&
4514 func_id != BPF_FUNC_map_push_elem &&
4515 func_id != BPF_FUNC_map_pop_elem &&
4516 func_id != BPF_FUNC_map_peek_elem)
c93552c4 4517 return 0;
09772d92 4518
591fe988 4519 if (map == NULL) {
c93552c4
DB
4520 verbose(env, "kernel subsystem misconfigured verifier\n");
4521 return -EINVAL;
4522 }
4523
591fe988
DB
4524 /* In case of read-only, some additional restrictions
4525 * need to be applied in order to prevent altering the
4526 * state of the map from program side.
4527 */
4528 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
4529 (func_id == BPF_FUNC_map_delete_elem ||
4530 func_id == BPF_FUNC_map_update_elem ||
4531 func_id == BPF_FUNC_map_push_elem ||
4532 func_id == BPF_FUNC_map_pop_elem)) {
4533 verbose(env, "write into map forbidden\n");
4534 return -EACCES;
4535 }
4536
d2e4c1e6 4537 if (!BPF_MAP_PTR(aux->map_ptr_state))
c93552c4 4538 bpf_map_ptr_store(aux, meta->map_ptr,
2c78ee89 4539 !meta->map_ptr->bypass_spec_v1);
d2e4c1e6 4540 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
c93552c4 4541 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2c78ee89 4542 !meta->map_ptr->bypass_spec_v1);
c93552c4
DB
4543 return 0;
4544}
4545
d2e4c1e6
DB
4546static int
4547record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4548 int func_id, int insn_idx)
4549{
4550 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
4551 struct bpf_reg_state *regs = cur_regs(env), *reg;
4552 struct bpf_map *map = meta->map_ptr;
4553 struct tnum range;
4554 u64 val;
cc52d914 4555 int err;
d2e4c1e6
DB
4556
4557 if (func_id != BPF_FUNC_tail_call)
4558 return 0;
4559 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
4560 verbose(env, "kernel subsystem misconfigured verifier\n");
4561 return -EINVAL;
4562 }
4563
4564 range = tnum_range(0, map->max_entries - 1);
4565 reg = &regs[BPF_REG_3];
4566
4567 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
4568 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4569 return 0;
4570 }
4571
cc52d914
DB
4572 err = mark_chain_precision(env, BPF_REG_3);
4573 if (err)
4574 return err;
4575
d2e4c1e6
DB
4576 val = reg->var_off.value;
4577 if (bpf_map_key_unseen(aux))
4578 bpf_map_key_store(aux, val);
4579 else if (!bpf_map_key_poisoned(aux) &&
4580 bpf_map_key_immediate(aux) != val)
4581 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4582 return 0;
4583}
4584
fd978bf7
JS
4585static int check_reference_leak(struct bpf_verifier_env *env)
4586{
4587 struct bpf_func_state *state = cur_func(env);
4588 int i;
4589
4590 for (i = 0; i < state->acquired_refs; i++) {
4591 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
4592 state->refs[i].id, state->refs[i].insn_idx);
4593 }
4594 return state->acquired_refs ? -EINVAL : 0;
4595}
4596
f4d7e40a 4597static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
17a52670 4598{
17a52670 4599 const struct bpf_func_proto *fn = NULL;
638f5b90 4600 struct bpf_reg_state *regs;
33ff9823 4601 struct bpf_call_arg_meta meta;
969bf05e 4602 bool changes_data;
17a52670
AS
4603 int i, err;
4604
4605 /* find function prototype */
4606 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
61bd5218
JK
4607 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
4608 func_id);
17a52670
AS
4609 return -EINVAL;
4610 }
4611
00176a34 4612 if (env->ops->get_func_proto)
5e43f899 4613 fn = env->ops->get_func_proto(func_id, env->prog);
17a52670 4614 if (!fn) {
61bd5218
JK
4615 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
4616 func_id);
17a52670
AS
4617 return -EINVAL;
4618 }
4619
4620 /* eBPF programs must be GPL compatible to use GPL-ed functions */
24701ece 4621 if (!env->prog->gpl_compatible && fn->gpl_only) {
3fe2867c 4622 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
17a52670
AS
4623 return -EINVAL;
4624 }
4625
04514d13 4626 /* With LD_ABS/IND some JITs save/restore skb from r1. */
17bedab2 4627 changes_data = bpf_helper_changes_pkt_data(fn->func);
04514d13
DB
4628 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
4629 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
4630 func_id_name(func_id), func_id);
4631 return -EINVAL;
4632 }
969bf05e 4633
33ff9823 4634 memset(&meta, 0, sizeof(meta));
36bbef52 4635 meta.pkt_access = fn->pkt_access;
33ff9823 4636
1b986589 4637 err = check_func_proto(fn, func_id);
435faee1 4638 if (err) {
61bd5218 4639 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
ebb676da 4640 func_id_name(func_id), func_id);
435faee1
DB
4641 return err;
4642 }
4643
d83525ca 4644 meta.func_id = func_id;
17a52670 4645 /* check args */
a7658e1a 4646 for (i = 0; i < 5; i++) {
9cc31b3a
AS
4647 err = btf_resolve_helper_id(&env->log, fn, i);
4648 if (err > 0)
4649 meta.btf_id = err;
a7658e1a
AS
4650 err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta);
4651 if (err)
4652 return err;
4653 }
17a52670 4654
c93552c4
DB
4655 err = record_func_map(env, &meta, func_id, insn_idx);
4656 if (err)
4657 return err;
4658
d2e4c1e6
DB
4659 err = record_func_key(env, &meta, func_id, insn_idx);
4660 if (err)
4661 return err;
4662
435faee1
DB
4663 /* Mark slots with STACK_MISC in case of raw mode, stack offset
4664 * is inferred from register state.
4665 */
4666 for (i = 0; i < meta.access_size; i++) {
ca369602
DB
4667 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
4668 BPF_WRITE, -1, false);
435faee1
DB
4669 if (err)
4670 return err;
4671 }
4672
fd978bf7
JS
4673 if (func_id == BPF_FUNC_tail_call) {
4674 err = check_reference_leak(env);
4675 if (err) {
4676 verbose(env, "tail_call would lead to reference leak\n");
4677 return err;
4678 }
4679 } else if (is_release_function(func_id)) {
1b986589 4680 err = release_reference(env, meta.ref_obj_id);
46f8bc92
MKL
4681 if (err) {
4682 verbose(env, "func %s#%d reference has not been acquired before\n",
4683 func_id_name(func_id), func_id);
fd978bf7 4684 return err;
46f8bc92 4685 }
fd978bf7
JS
4686 }
4687
638f5b90 4688 regs = cur_regs(env);
cd339431
RG
4689
4690 /* check that flags argument in get_local_storage(map, flags) is 0,
4691 * this is required because get_local_storage() can't return an error.
4692 */
4693 if (func_id == BPF_FUNC_get_local_storage &&
4694 !register_is_null(&regs[BPF_REG_2])) {
4695 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
4696 return -EINVAL;
4697 }
4698
17a52670 4699 /* reset caller saved regs */
dc503a8a 4700 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 4701 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
4702 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4703 }
17a52670 4704
5327ed3d
JW
4705 /* helper call returns 64-bit value. */
4706 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
4707
dc503a8a 4708 /* update return register (already marked as written above) */
17a52670 4709 if (fn->ret_type == RET_INTEGER) {
f1174f77 4710 /* sets type to SCALAR_VALUE */
61bd5218 4711 mark_reg_unknown(env, regs, BPF_REG_0);
17a52670
AS
4712 } else if (fn->ret_type == RET_VOID) {
4713 regs[BPF_REG_0].type = NOT_INIT;
3e6a4b3e
RG
4714 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
4715 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
f1174f77 4716 /* There is no offset yet applied, variable or fixed */
61bd5218 4717 mark_reg_known_zero(env, regs, BPF_REG_0);
17a52670
AS
4718 /* remember map_ptr, so that check_map_access()
4719 * can check 'value_size' boundary of memory access
4720 * to map element returned from bpf_map_lookup_elem()
4721 */
33ff9823 4722 if (meta.map_ptr == NULL) {
61bd5218
JK
4723 verbose(env,
4724 "kernel subsystem misconfigured verifier\n");
17a52670
AS
4725 return -EINVAL;
4726 }
33ff9823 4727 regs[BPF_REG_0].map_ptr = meta.map_ptr;
4d31f301
DB
4728 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
4729 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
e16d2f1a
AS
4730 if (map_value_has_spin_lock(meta.map_ptr))
4731 regs[BPF_REG_0].id = ++env->id_gen;
4d31f301
DB
4732 } else {
4733 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
4734 regs[BPF_REG_0].id = ++env->id_gen;
4735 }
c64b7983
JS
4736 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
4737 mark_reg_known_zero(env, regs, BPF_REG_0);
4738 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
0f3adc28 4739 regs[BPF_REG_0].id = ++env->id_gen;
85a51f8c
LB
4740 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
4741 mark_reg_known_zero(env, regs, BPF_REG_0);
4742 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
4743 regs[BPF_REG_0].id = ++env->id_gen;
655a51e5
MKL
4744 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
4745 mark_reg_known_zero(env, regs, BPF_REG_0);
4746 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
4747 regs[BPF_REG_0].id = ++env->id_gen;
457f4436
AN
4748 } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) {
4749 mark_reg_known_zero(env, regs, BPF_REG_0);
4750 regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
4751 regs[BPF_REG_0].id = ++env->id_gen;
4752 regs[BPF_REG_0].mem_size = meta.mem_size;
17a52670 4753 } else {
61bd5218 4754 verbose(env, "unknown return type %d of func %s#%d\n",
ebb676da 4755 fn->ret_type, func_id_name(func_id), func_id);
17a52670
AS
4756 return -EINVAL;
4757 }
04fd61ab 4758
0f3adc28 4759 if (is_ptr_cast_function(func_id)) {
1b986589
MKL
4760 /* For release_reference() */
4761 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
64d85290 4762 } else if (is_acquire_function(func_id, meta.map_ptr)) {
0f3adc28
LB
4763 int id = acquire_reference_state(env, insn_idx);
4764
4765 if (id < 0)
4766 return id;
4767 /* For mark_ptr_or_null_reg() */
4768 regs[BPF_REG_0].id = id;
4769 /* For release_reference() */
4770 regs[BPF_REG_0].ref_obj_id = id;
4771 }
1b986589 4772
849fa506
YS
4773 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
4774
61bd5218 4775 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
35578d79
KX
4776 if (err)
4777 return err;
04fd61ab 4778
c195651e
YS
4779 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
4780 const char *err_str;
4781
4782#ifdef CONFIG_PERF_EVENTS
4783 err = get_callchain_buffers(sysctl_perf_event_max_stack);
4784 err_str = "cannot get callchain buffer for func %s#%d\n";
4785#else
4786 err = -ENOTSUPP;
4787 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
4788#endif
4789 if (err) {
4790 verbose(env, err_str, func_id_name(func_id), func_id);
4791 return err;
4792 }
4793
4794 env->prog->has_callchain_buf = true;
4795 }
4796
969bf05e
AS
4797 if (changes_data)
4798 clear_all_pkt_pointers(env);
4799 return 0;
4800}
4801
b03c9f9f
EC
4802static bool signed_add_overflows(s64 a, s64 b)
4803{
4804 /* Do the add in u64, where overflow is well-defined */
4805 s64 res = (s64)((u64)a + (u64)b);
4806
4807 if (b < 0)
4808 return res > a;
4809 return res < a;
4810}
4811
3f50f132
JF
4812static bool signed_add32_overflows(s64 a, s64 b)
4813{
4814 /* Do the add in u32, where overflow is well-defined */
4815 s32 res = (s32)((u32)a + (u32)b);
4816
4817 if (b < 0)
4818 return res > a;
4819 return res < a;
4820}
4821
4822static bool signed_sub_overflows(s32 a, s32 b)
b03c9f9f
EC
4823{
4824 /* Do the sub in u64, where overflow is well-defined */
4825 s64 res = (s64)((u64)a - (u64)b);
4826
4827 if (b < 0)
4828 return res < a;
4829 return res > a;
969bf05e
AS
4830}
4831
3f50f132
JF
4832static bool signed_sub32_overflows(s32 a, s32 b)
4833{
4834 /* Do the sub in u64, where overflow is well-defined */
4835 s32 res = (s32)((u32)a - (u32)b);
4836
4837 if (b < 0)
4838 return res < a;
4839 return res > a;
4840}
4841
bb7f0f98
AS
4842static bool check_reg_sane_offset(struct bpf_verifier_env *env,
4843 const struct bpf_reg_state *reg,
4844 enum bpf_reg_type type)
4845{
4846 bool known = tnum_is_const(reg->var_off);
4847 s64 val = reg->var_off.value;
4848 s64 smin = reg->smin_value;
4849
4850 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
4851 verbose(env, "math between %s pointer and %lld is not allowed\n",
4852 reg_type_str[type], val);
4853 return false;
4854 }
4855
4856 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
4857 verbose(env, "%s pointer offset %d is not allowed\n",
4858 reg_type_str[type], reg->off);
4859 return false;
4860 }
4861
4862 if (smin == S64_MIN) {
4863 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
4864 reg_type_str[type]);
4865 return false;
4866 }
4867
4868 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
4869 verbose(env, "value %lld makes %s pointer be out of bounds\n",
4870 smin, reg_type_str[type]);
4871 return false;
4872 }
4873
4874 return true;
4875}
4876
979d63d5
DB
4877static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
4878{
4879 return &env->insn_aux_data[env->insn_idx];
4880}
4881
4882static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
4883 u32 *ptr_limit, u8 opcode, bool off_is_neg)
4884{
4885 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
4886 (opcode == BPF_SUB && !off_is_neg);
4887 u32 off;
4888
4889 switch (ptr_reg->type) {
4890 case PTR_TO_STACK:
088ec26d
AI
4891 /* Indirect variable offset stack access is prohibited in
4892 * unprivileged mode so it's not handled here.
4893 */
979d63d5
DB
4894 off = ptr_reg->off + ptr_reg->var_off.value;
4895 if (mask_to_left)
4896 *ptr_limit = MAX_BPF_STACK + off;
4897 else
4898 *ptr_limit = -off;
4899 return 0;
4900 case PTR_TO_MAP_VALUE:
4901 if (mask_to_left) {
4902 *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
4903 } else {
4904 off = ptr_reg->smin_value + ptr_reg->off;
4905 *ptr_limit = ptr_reg->map_ptr->value_size - off;
4906 }
4907 return 0;
4908 default:
4909 return -EINVAL;
4910 }
4911}
4912
d3bd7413
DB
4913static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
4914 const struct bpf_insn *insn)
4915{
2c78ee89 4916 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
d3bd7413
DB
4917}
4918
4919static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
4920 u32 alu_state, u32 alu_limit)
4921{
4922 /* If we arrived here from different branches with different
4923 * state or limits to sanitize, then this won't work.
4924 */
4925 if (aux->alu_state &&
4926 (aux->alu_state != alu_state ||
4927 aux->alu_limit != alu_limit))
4928 return -EACCES;
4929
4930 /* Corresponding fixup done in fixup_bpf_calls(). */
4931 aux->alu_state = alu_state;
4932 aux->alu_limit = alu_limit;
4933 return 0;
4934}
4935
4936static int sanitize_val_alu(struct bpf_verifier_env *env,
4937 struct bpf_insn *insn)
4938{
4939 struct bpf_insn_aux_data *aux = cur_aux(env);
4940
4941 if (can_skip_alu_sanitation(env, insn))
4942 return 0;
4943
4944 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
4945}
4946
979d63d5
DB
4947static int sanitize_ptr_alu(struct bpf_verifier_env *env,
4948 struct bpf_insn *insn,
4949 const struct bpf_reg_state *ptr_reg,
4950 struct bpf_reg_state *dst_reg,
4951 bool off_is_neg)
4952{
4953 struct bpf_verifier_state *vstate = env->cur_state;
4954 struct bpf_insn_aux_data *aux = cur_aux(env);
4955 bool ptr_is_dst_reg = ptr_reg == dst_reg;
4956 u8 opcode = BPF_OP(insn->code);
4957 u32 alu_state, alu_limit;
4958 struct bpf_reg_state tmp;
4959 bool ret;
4960
d3bd7413 4961 if (can_skip_alu_sanitation(env, insn))
979d63d5
DB
4962 return 0;
4963
4964 /* We already marked aux for masking from non-speculative
4965 * paths, thus we got here in the first place. We only care
4966 * to explore bad access from here.
4967 */
4968 if (vstate->speculative)
4969 goto do_sim;
4970
4971 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
4972 alu_state |= ptr_is_dst_reg ?
4973 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
4974
4975 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
4976 return 0;
d3bd7413 4977 if (update_alu_sanitation_state(aux, alu_state, alu_limit))
979d63d5 4978 return -EACCES;
979d63d5
DB
4979do_sim:
4980 /* Simulate and find potential out-of-bounds access under
4981 * speculative execution from truncation as a result of
4982 * masking when off was not within expected range. If off
4983 * sits in dst, then we temporarily need to move ptr there
4984 * to simulate dst (== 0) +/-= ptr. Needed, for example,
4985 * for cases where we use K-based arithmetic in one direction
4986 * and truncated reg-based in the other in order to explore
4987 * bad access.
4988 */
4989 if (!ptr_is_dst_reg) {
4990 tmp = *dst_reg;
4991 *dst_reg = *ptr_reg;
4992 }
4993 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
0803278b 4994 if (!ptr_is_dst_reg && ret)
979d63d5
DB
4995 *dst_reg = tmp;
4996 return !ret ? -EFAULT : 0;
4997}
4998
f1174f77 4999/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
f1174f77
EC
5000 * Caller should also handle BPF_MOV case separately.
5001 * If we return -EACCES, caller may want to try again treating pointer as a
5002 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
5003 */
5004static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
5005 struct bpf_insn *insn,
5006 const struct bpf_reg_state *ptr_reg,
5007 const struct bpf_reg_state *off_reg)
969bf05e 5008{
f4d7e40a
AS
5009 struct bpf_verifier_state *vstate = env->cur_state;
5010 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5011 struct bpf_reg_state *regs = state->regs, *dst_reg;
f1174f77 5012 bool known = tnum_is_const(off_reg->var_off);
b03c9f9f
EC
5013 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
5014 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
5015 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
5016 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
9d7eceed 5017 u32 dst = insn->dst_reg, src = insn->src_reg;
969bf05e 5018 u8 opcode = BPF_OP(insn->code);
979d63d5 5019 int ret;
969bf05e 5020
f1174f77 5021 dst_reg = &regs[dst];
969bf05e 5022
6f16101e
DB
5023 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
5024 smin_val > smax_val || umin_val > umax_val) {
5025 /* Taint dst register if offset had invalid bounds derived from
5026 * e.g. dead branches.
5027 */
f54c7898 5028 __mark_reg_unknown(env, dst_reg);
6f16101e 5029 return 0;
f1174f77
EC
5030 }
5031
5032 if (BPF_CLASS(insn->code) != BPF_ALU64) {
5033 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
82abbf8d
AS
5034 verbose(env,
5035 "R%d 32-bit pointer arithmetic prohibited\n",
5036 dst);
f1174f77 5037 return -EACCES;
969bf05e
AS
5038 }
5039
aad2eeaf
JS
5040 switch (ptr_reg->type) {
5041 case PTR_TO_MAP_VALUE_OR_NULL:
5042 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
5043 dst, reg_type_str[ptr_reg->type]);
f1174f77 5044 return -EACCES;
aad2eeaf
JS
5045 case CONST_PTR_TO_MAP:
5046 case PTR_TO_PACKET_END:
c64b7983
JS
5047 case PTR_TO_SOCKET:
5048 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
5049 case PTR_TO_SOCK_COMMON:
5050 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
5051 case PTR_TO_TCP_SOCK:
5052 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 5053 case PTR_TO_XDP_SOCK:
aad2eeaf
JS
5054 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
5055 dst, reg_type_str[ptr_reg->type]);
f1174f77 5056 return -EACCES;
9d7eceed
DB
5057 case PTR_TO_MAP_VALUE:
5058 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
5059 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
5060 off_reg == dst_reg ? dst : src);
5061 return -EACCES;
5062 }
5063 /* fall-through */
aad2eeaf
JS
5064 default:
5065 break;
f1174f77
EC
5066 }
5067
5068 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
5069 * The id may be overwritten later if we create a new variable offset.
969bf05e 5070 */
f1174f77
EC
5071 dst_reg->type = ptr_reg->type;
5072 dst_reg->id = ptr_reg->id;
969bf05e 5073
bb7f0f98
AS
5074 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
5075 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
5076 return -EINVAL;
5077
3f50f132
JF
5078 /* pointer types do not carry 32-bit bounds at the moment. */
5079 __mark_reg32_unbounded(dst_reg);
5080
f1174f77
EC
5081 switch (opcode) {
5082 case BPF_ADD:
979d63d5
DB
5083 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
5084 if (ret < 0) {
5085 verbose(env, "R%d tried to add from different maps or paths\n", dst);
5086 return ret;
5087 }
f1174f77
EC
5088 /* We can take a fixed offset as long as it doesn't overflow
5089 * the s32 'off' field
969bf05e 5090 */
b03c9f9f
EC
5091 if (known && (ptr_reg->off + smin_val ==
5092 (s64)(s32)(ptr_reg->off + smin_val))) {
f1174f77 5093 /* pointer += K. Accumulate it into fixed offset */
b03c9f9f
EC
5094 dst_reg->smin_value = smin_ptr;
5095 dst_reg->smax_value = smax_ptr;
5096 dst_reg->umin_value = umin_ptr;
5097 dst_reg->umax_value = umax_ptr;
f1174f77 5098 dst_reg->var_off = ptr_reg->var_off;
b03c9f9f 5099 dst_reg->off = ptr_reg->off + smin_val;
0962590e 5100 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
5101 break;
5102 }
f1174f77
EC
5103 /* A new variable offset is created. Note that off_reg->off
5104 * == 0, since it's a scalar.
5105 * dst_reg gets the pointer type and since some positive
5106 * integer value was added to the pointer, give it a new 'id'
5107 * if it's a PTR_TO_PACKET.
5108 * this creates a new 'base' pointer, off_reg (variable) gets
5109 * added into the variable offset, and we copy the fixed offset
5110 * from ptr_reg.
969bf05e 5111 */
b03c9f9f
EC
5112 if (signed_add_overflows(smin_ptr, smin_val) ||
5113 signed_add_overflows(smax_ptr, smax_val)) {
5114 dst_reg->smin_value = S64_MIN;
5115 dst_reg->smax_value = S64_MAX;
5116 } else {
5117 dst_reg->smin_value = smin_ptr + smin_val;
5118 dst_reg->smax_value = smax_ptr + smax_val;
5119 }
5120 if (umin_ptr + umin_val < umin_ptr ||
5121 umax_ptr + umax_val < umax_ptr) {
5122 dst_reg->umin_value = 0;
5123 dst_reg->umax_value = U64_MAX;
5124 } else {
5125 dst_reg->umin_value = umin_ptr + umin_val;
5126 dst_reg->umax_value = umax_ptr + umax_val;
5127 }
f1174f77
EC
5128 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
5129 dst_reg->off = ptr_reg->off;
0962590e 5130 dst_reg->raw = ptr_reg->raw;
de8f3a83 5131 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
5132 dst_reg->id = ++env->id_gen;
5133 /* something was added to pkt_ptr, set range to zero */
0962590e 5134 dst_reg->raw = 0;
f1174f77
EC
5135 }
5136 break;
5137 case BPF_SUB:
979d63d5
DB
5138 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
5139 if (ret < 0) {
5140 verbose(env, "R%d tried to sub from different maps or paths\n", dst);
5141 return ret;
5142 }
f1174f77
EC
5143 if (dst_reg == off_reg) {
5144 /* scalar -= pointer. Creates an unknown scalar */
82abbf8d
AS
5145 verbose(env, "R%d tried to subtract pointer from scalar\n",
5146 dst);
f1174f77
EC
5147 return -EACCES;
5148 }
5149 /* We don't allow subtraction from FP, because (according to
5150 * test_verifier.c test "invalid fp arithmetic", JITs might not
5151 * be able to deal with it.
969bf05e 5152 */
f1174f77 5153 if (ptr_reg->type == PTR_TO_STACK) {
82abbf8d
AS
5154 verbose(env, "R%d subtraction from stack pointer prohibited\n",
5155 dst);
f1174f77
EC
5156 return -EACCES;
5157 }
b03c9f9f
EC
5158 if (known && (ptr_reg->off - smin_val ==
5159 (s64)(s32)(ptr_reg->off - smin_val))) {
f1174f77 5160 /* pointer -= K. Subtract it from fixed offset */
b03c9f9f
EC
5161 dst_reg->smin_value = smin_ptr;
5162 dst_reg->smax_value = smax_ptr;
5163 dst_reg->umin_value = umin_ptr;
5164 dst_reg->umax_value = umax_ptr;
f1174f77
EC
5165 dst_reg->var_off = ptr_reg->var_off;
5166 dst_reg->id = ptr_reg->id;
b03c9f9f 5167 dst_reg->off = ptr_reg->off - smin_val;
0962590e 5168 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
5169 break;
5170 }
f1174f77
EC
5171 /* A new variable offset is created. If the subtrahend is known
5172 * nonnegative, then any reg->range we had before is still good.
969bf05e 5173 */
b03c9f9f
EC
5174 if (signed_sub_overflows(smin_ptr, smax_val) ||
5175 signed_sub_overflows(smax_ptr, smin_val)) {
5176 /* Overflow possible, we know nothing */
5177 dst_reg->smin_value = S64_MIN;
5178 dst_reg->smax_value = S64_MAX;
5179 } else {
5180 dst_reg->smin_value = smin_ptr - smax_val;
5181 dst_reg->smax_value = smax_ptr - smin_val;
5182 }
5183 if (umin_ptr < umax_val) {
5184 /* Overflow possible, we know nothing */
5185 dst_reg->umin_value = 0;
5186 dst_reg->umax_value = U64_MAX;
5187 } else {
5188 /* Cannot overflow (as long as bounds are consistent) */
5189 dst_reg->umin_value = umin_ptr - umax_val;
5190 dst_reg->umax_value = umax_ptr - umin_val;
5191 }
f1174f77
EC
5192 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
5193 dst_reg->off = ptr_reg->off;
0962590e 5194 dst_reg->raw = ptr_reg->raw;
de8f3a83 5195 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
5196 dst_reg->id = ++env->id_gen;
5197 /* something was added to pkt_ptr, set range to zero */
b03c9f9f 5198 if (smin_val < 0)
0962590e 5199 dst_reg->raw = 0;
43188702 5200 }
f1174f77
EC
5201 break;
5202 case BPF_AND:
5203 case BPF_OR:
5204 case BPF_XOR:
82abbf8d
AS
5205 /* bitwise ops on pointers are troublesome, prohibit. */
5206 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
5207 dst, bpf_alu_string[opcode >> 4]);
f1174f77
EC
5208 return -EACCES;
5209 default:
5210 /* other operators (e.g. MUL,LSH) produce non-pointer results */
82abbf8d
AS
5211 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
5212 dst, bpf_alu_string[opcode >> 4]);
f1174f77 5213 return -EACCES;
43188702
JF
5214 }
5215
bb7f0f98
AS
5216 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
5217 return -EINVAL;
5218
b03c9f9f
EC
5219 __update_reg_bounds(dst_reg);
5220 __reg_deduce_bounds(dst_reg);
5221 __reg_bound_offset(dst_reg);
0d6303db
DB
5222
5223 /* For unprivileged we require that resulting offset must be in bounds
5224 * in order to be able to sanitize access later on.
5225 */
2c78ee89 5226 if (!env->bypass_spec_v1) {
e4298d25
DB
5227 if (dst_reg->type == PTR_TO_MAP_VALUE &&
5228 check_map_access(env, dst, dst_reg->off, 1, false)) {
5229 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
5230 "prohibited for !root\n", dst);
5231 return -EACCES;
5232 } else if (dst_reg->type == PTR_TO_STACK &&
5233 check_stack_access(env, dst_reg, dst_reg->off +
5234 dst_reg->var_off.value, 1)) {
5235 verbose(env, "R%d stack pointer arithmetic goes out of range, "
5236 "prohibited for !root\n", dst);
5237 return -EACCES;
5238 }
0d6303db
DB
5239 }
5240
43188702
JF
5241 return 0;
5242}
5243
3f50f132
JF
5244static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
5245 struct bpf_reg_state *src_reg)
5246{
5247 s32 smin_val = src_reg->s32_min_value;
5248 s32 smax_val = src_reg->s32_max_value;
5249 u32 umin_val = src_reg->u32_min_value;
5250 u32 umax_val = src_reg->u32_max_value;
5251
5252 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
5253 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
5254 dst_reg->s32_min_value = S32_MIN;
5255 dst_reg->s32_max_value = S32_MAX;
5256 } else {
5257 dst_reg->s32_min_value += smin_val;
5258 dst_reg->s32_max_value += smax_val;
5259 }
5260 if (dst_reg->u32_min_value + umin_val < umin_val ||
5261 dst_reg->u32_max_value + umax_val < umax_val) {
5262 dst_reg->u32_min_value = 0;
5263 dst_reg->u32_max_value = U32_MAX;
5264 } else {
5265 dst_reg->u32_min_value += umin_val;
5266 dst_reg->u32_max_value += umax_val;
5267 }
5268}
5269
07cd2631
JF
5270static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
5271 struct bpf_reg_state *src_reg)
5272{
5273 s64 smin_val = src_reg->smin_value;
5274 s64 smax_val = src_reg->smax_value;
5275 u64 umin_val = src_reg->umin_value;
5276 u64 umax_val = src_reg->umax_value;
5277
5278 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
5279 signed_add_overflows(dst_reg->smax_value, smax_val)) {
5280 dst_reg->smin_value = S64_MIN;
5281 dst_reg->smax_value = S64_MAX;
5282 } else {
5283 dst_reg->smin_value += smin_val;
5284 dst_reg->smax_value += smax_val;
5285 }
5286 if (dst_reg->umin_value + umin_val < umin_val ||
5287 dst_reg->umax_value + umax_val < umax_val) {
5288 dst_reg->umin_value = 0;
5289 dst_reg->umax_value = U64_MAX;
5290 } else {
5291 dst_reg->umin_value += umin_val;
5292 dst_reg->umax_value += umax_val;
5293 }
3f50f132
JF
5294}
5295
5296static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
5297 struct bpf_reg_state *src_reg)
5298{
5299 s32 smin_val = src_reg->s32_min_value;
5300 s32 smax_val = src_reg->s32_max_value;
5301 u32 umin_val = src_reg->u32_min_value;
5302 u32 umax_val = src_reg->u32_max_value;
5303
5304 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
5305 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
5306 /* Overflow possible, we know nothing */
5307 dst_reg->s32_min_value = S32_MIN;
5308 dst_reg->s32_max_value = S32_MAX;
5309 } else {
5310 dst_reg->s32_min_value -= smax_val;
5311 dst_reg->s32_max_value -= smin_val;
5312 }
5313 if (dst_reg->u32_min_value < umax_val) {
5314 /* Overflow possible, we know nothing */
5315 dst_reg->u32_min_value = 0;
5316 dst_reg->u32_max_value = U32_MAX;
5317 } else {
5318 /* Cannot overflow (as long as bounds are consistent) */
5319 dst_reg->u32_min_value -= umax_val;
5320 dst_reg->u32_max_value -= umin_val;
5321 }
07cd2631
JF
5322}
5323
5324static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
5325 struct bpf_reg_state *src_reg)
5326{
5327 s64 smin_val = src_reg->smin_value;
5328 s64 smax_val = src_reg->smax_value;
5329 u64 umin_val = src_reg->umin_value;
5330 u64 umax_val = src_reg->umax_value;
5331
5332 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
5333 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
5334 /* Overflow possible, we know nothing */
5335 dst_reg->smin_value = S64_MIN;
5336 dst_reg->smax_value = S64_MAX;
5337 } else {
5338 dst_reg->smin_value -= smax_val;
5339 dst_reg->smax_value -= smin_val;
5340 }
5341 if (dst_reg->umin_value < umax_val) {
5342 /* Overflow possible, we know nothing */
5343 dst_reg->umin_value = 0;
5344 dst_reg->umax_value = U64_MAX;
5345 } else {
5346 /* Cannot overflow (as long as bounds are consistent) */
5347 dst_reg->umin_value -= umax_val;
5348 dst_reg->umax_value -= umin_val;
5349 }
3f50f132
JF
5350}
5351
5352static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
5353 struct bpf_reg_state *src_reg)
5354{
5355 s32 smin_val = src_reg->s32_min_value;
5356 u32 umin_val = src_reg->u32_min_value;
5357 u32 umax_val = src_reg->u32_max_value;
5358
5359 if (smin_val < 0 || dst_reg->s32_min_value < 0) {
5360 /* Ain't nobody got time to multiply that sign */
5361 __mark_reg32_unbounded(dst_reg);
5362 return;
5363 }
5364 /* Both values are positive, so we can work with unsigned and
5365 * copy the result to signed (unless it exceeds S32_MAX).
5366 */
5367 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
5368 /* Potential overflow, we know nothing */
5369 __mark_reg32_unbounded(dst_reg);
5370 return;
5371 }
5372 dst_reg->u32_min_value *= umin_val;
5373 dst_reg->u32_max_value *= umax_val;
5374 if (dst_reg->u32_max_value > S32_MAX) {
5375 /* Overflow possible, we know nothing */
5376 dst_reg->s32_min_value = S32_MIN;
5377 dst_reg->s32_max_value = S32_MAX;
5378 } else {
5379 dst_reg->s32_min_value = dst_reg->u32_min_value;
5380 dst_reg->s32_max_value = dst_reg->u32_max_value;
5381 }
07cd2631
JF
5382}
5383
5384static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
5385 struct bpf_reg_state *src_reg)
5386{
5387 s64 smin_val = src_reg->smin_value;
5388 u64 umin_val = src_reg->umin_value;
5389 u64 umax_val = src_reg->umax_value;
5390
07cd2631
JF
5391 if (smin_val < 0 || dst_reg->smin_value < 0) {
5392 /* Ain't nobody got time to multiply that sign */
3f50f132 5393 __mark_reg64_unbounded(dst_reg);
07cd2631
JF
5394 return;
5395 }
5396 /* Both values are positive, so we can work with unsigned and
5397 * copy the result to signed (unless it exceeds S64_MAX).
5398 */
5399 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
5400 /* Potential overflow, we know nothing */
3f50f132 5401 __mark_reg64_unbounded(dst_reg);
07cd2631
JF
5402 return;
5403 }
5404 dst_reg->umin_value *= umin_val;
5405 dst_reg->umax_value *= umax_val;
5406 if (dst_reg->umax_value > S64_MAX) {
5407 /* Overflow possible, we know nothing */
5408 dst_reg->smin_value = S64_MIN;
5409 dst_reg->smax_value = S64_MAX;
5410 } else {
5411 dst_reg->smin_value = dst_reg->umin_value;
5412 dst_reg->smax_value = dst_reg->umax_value;
5413 }
5414}
5415
3f50f132
JF
5416static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
5417 struct bpf_reg_state *src_reg)
5418{
5419 bool src_known = tnum_subreg_is_const(src_reg->var_off);
5420 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
5421 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
5422 s32 smin_val = src_reg->s32_min_value;
5423 u32 umax_val = src_reg->u32_max_value;
5424
5425 /* Assuming scalar64_min_max_and will be called so its safe
5426 * to skip updating register for known 32-bit case.
5427 */
5428 if (src_known && dst_known)
5429 return;
5430
5431 /* We get our minimum from the var_off, since that's inherently
5432 * bitwise. Our maximum is the minimum of the operands' maxima.
5433 */
5434 dst_reg->u32_min_value = var32_off.value;
5435 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
5436 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
5437 /* Lose signed bounds when ANDing negative numbers,
5438 * ain't nobody got time for that.
5439 */
5440 dst_reg->s32_min_value = S32_MIN;
5441 dst_reg->s32_max_value = S32_MAX;
5442 } else {
5443 /* ANDing two positives gives a positive, so safe to
5444 * cast result into s64.
5445 */
5446 dst_reg->s32_min_value = dst_reg->u32_min_value;
5447 dst_reg->s32_max_value = dst_reg->u32_max_value;
5448 }
5449
5450}
5451
07cd2631
JF
5452static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
5453 struct bpf_reg_state *src_reg)
5454{
3f50f132
JF
5455 bool src_known = tnum_is_const(src_reg->var_off);
5456 bool dst_known = tnum_is_const(dst_reg->var_off);
07cd2631
JF
5457 s64 smin_val = src_reg->smin_value;
5458 u64 umax_val = src_reg->umax_value;
5459
3f50f132
JF
5460 if (src_known && dst_known) {
5461 __mark_reg_known(dst_reg, dst_reg->var_off.value &
5462 src_reg->var_off.value);
5463 return;
5464 }
5465
07cd2631
JF
5466 /* We get our minimum from the var_off, since that's inherently
5467 * bitwise. Our maximum is the minimum of the operands' maxima.
5468 */
07cd2631
JF
5469 dst_reg->umin_value = dst_reg->var_off.value;
5470 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
5471 if (dst_reg->smin_value < 0 || smin_val < 0) {
5472 /* Lose signed bounds when ANDing negative numbers,
5473 * ain't nobody got time for that.
5474 */
5475 dst_reg->smin_value = S64_MIN;
5476 dst_reg->smax_value = S64_MAX;
5477 } else {
5478 /* ANDing two positives gives a positive, so safe to
5479 * cast result into s64.
5480 */
5481 dst_reg->smin_value = dst_reg->umin_value;
5482 dst_reg->smax_value = dst_reg->umax_value;
5483 }
5484 /* We may learn something more from the var_off */
5485 __update_reg_bounds(dst_reg);
5486}
5487
3f50f132
JF
5488static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
5489 struct bpf_reg_state *src_reg)
5490{
5491 bool src_known = tnum_subreg_is_const(src_reg->var_off);
5492 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
5493 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
5494 s32 smin_val = src_reg->smin_value;
5495 u32 umin_val = src_reg->umin_value;
5496
5497 /* Assuming scalar64_min_max_or will be called so it is safe
5498 * to skip updating register for known case.
5499 */
5500 if (src_known && dst_known)
5501 return;
5502
5503 /* We get our maximum from the var_off, and our minimum is the
5504 * maximum of the operands' minima
5505 */
5506 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
5507 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
5508 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
5509 /* Lose signed bounds when ORing negative numbers,
5510 * ain't nobody got time for that.
5511 */
5512 dst_reg->s32_min_value = S32_MIN;
5513 dst_reg->s32_max_value = S32_MAX;
5514 } else {
5515 /* ORing two positives gives a positive, so safe to
5516 * cast result into s64.
5517 */
5518 dst_reg->s32_min_value = dst_reg->umin_value;
5519 dst_reg->s32_max_value = dst_reg->umax_value;
5520 }
5521}
5522
07cd2631
JF
5523static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
5524 struct bpf_reg_state *src_reg)
5525{
3f50f132
JF
5526 bool src_known = tnum_is_const(src_reg->var_off);
5527 bool dst_known = tnum_is_const(dst_reg->var_off);
07cd2631
JF
5528 s64 smin_val = src_reg->smin_value;
5529 u64 umin_val = src_reg->umin_value;
5530
3f50f132
JF
5531 if (src_known && dst_known) {
5532 __mark_reg_known(dst_reg, dst_reg->var_off.value |
5533 src_reg->var_off.value);
5534 return;
5535 }
5536
07cd2631
JF
5537 /* We get our maximum from the var_off, and our minimum is the
5538 * maximum of the operands' minima
5539 */
07cd2631
JF
5540 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
5541 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
5542 if (dst_reg->smin_value < 0 || smin_val < 0) {
5543 /* Lose signed bounds when ORing negative numbers,
5544 * ain't nobody got time for that.
5545 */
5546 dst_reg->smin_value = S64_MIN;
5547 dst_reg->smax_value = S64_MAX;
5548 } else {
5549 /* ORing two positives gives a positive, so safe to
5550 * cast result into s64.
5551 */
5552 dst_reg->smin_value = dst_reg->umin_value;
5553 dst_reg->smax_value = dst_reg->umax_value;
5554 }
5555 /* We may learn something more from the var_off */
5556 __update_reg_bounds(dst_reg);
5557}
5558
3f50f132
JF
5559static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
5560 u64 umin_val, u64 umax_val)
07cd2631 5561{
07cd2631
JF
5562 /* We lose all sign bit information (except what we can pick
5563 * up from var_off)
5564 */
3f50f132
JF
5565 dst_reg->s32_min_value = S32_MIN;
5566 dst_reg->s32_max_value = S32_MAX;
5567 /* If we might shift our top bit out, then we know nothing */
5568 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
5569 dst_reg->u32_min_value = 0;
5570 dst_reg->u32_max_value = U32_MAX;
5571 } else {
5572 dst_reg->u32_min_value <<= umin_val;
5573 dst_reg->u32_max_value <<= umax_val;
5574 }
5575}
5576
5577static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
5578 struct bpf_reg_state *src_reg)
5579{
5580 u32 umax_val = src_reg->u32_max_value;
5581 u32 umin_val = src_reg->u32_min_value;
5582 /* u32 alu operation will zext upper bits */
5583 struct tnum subreg = tnum_subreg(dst_reg->var_off);
5584
5585 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
5586 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
5587 /* Not required but being careful mark reg64 bounds as unknown so
5588 * that we are forced to pick them up from tnum and zext later and
5589 * if some path skips this step we are still safe.
5590 */
5591 __mark_reg64_unbounded(dst_reg);
5592 __update_reg32_bounds(dst_reg);
5593}
5594
5595static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
5596 u64 umin_val, u64 umax_val)
5597{
5598 /* Special case <<32 because it is a common compiler pattern to sign
5599 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
5600 * positive we know this shift will also be positive so we can track
5601 * bounds correctly. Otherwise we lose all sign bit information except
5602 * what we can pick up from var_off. Perhaps we can generalize this
5603 * later to shifts of any length.
5604 */
5605 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
5606 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
5607 else
5608 dst_reg->smax_value = S64_MAX;
5609
5610 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
5611 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
5612 else
5613 dst_reg->smin_value = S64_MIN;
5614
07cd2631
JF
5615 /* If we might shift our top bit out, then we know nothing */
5616 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
5617 dst_reg->umin_value = 0;
5618 dst_reg->umax_value = U64_MAX;
5619 } else {
5620 dst_reg->umin_value <<= umin_val;
5621 dst_reg->umax_value <<= umax_val;
5622 }
3f50f132
JF
5623}
5624
5625static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
5626 struct bpf_reg_state *src_reg)
5627{
5628 u64 umax_val = src_reg->umax_value;
5629 u64 umin_val = src_reg->umin_value;
5630
5631 /* scalar64 calc uses 32bit unshifted bounds so must be called first */
5632 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
5633 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
5634
07cd2631
JF
5635 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
5636 /* We may learn something more from the var_off */
5637 __update_reg_bounds(dst_reg);
5638}
5639
3f50f132
JF
5640static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
5641 struct bpf_reg_state *src_reg)
5642{
5643 struct tnum subreg = tnum_subreg(dst_reg->var_off);
5644 u32 umax_val = src_reg->u32_max_value;
5645 u32 umin_val = src_reg->u32_min_value;
5646
5647 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
5648 * be negative, then either:
5649 * 1) src_reg might be zero, so the sign bit of the result is
5650 * unknown, so we lose our signed bounds
5651 * 2) it's known negative, thus the unsigned bounds capture the
5652 * signed bounds
5653 * 3) the signed bounds cross zero, so they tell us nothing
5654 * about the result
5655 * If the value in dst_reg is known nonnegative, then again the
5656 * unsigned bounts capture the signed bounds.
5657 * Thus, in all cases it suffices to blow away our signed bounds
5658 * and rely on inferring new ones from the unsigned bounds and
5659 * var_off of the result.
5660 */
5661 dst_reg->s32_min_value = S32_MIN;
5662 dst_reg->s32_max_value = S32_MAX;
5663
5664 dst_reg->var_off = tnum_rshift(subreg, umin_val);
5665 dst_reg->u32_min_value >>= umax_val;
5666 dst_reg->u32_max_value >>= umin_val;
5667
5668 __mark_reg64_unbounded(dst_reg);
5669 __update_reg32_bounds(dst_reg);
5670}
5671
07cd2631
JF
5672static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
5673 struct bpf_reg_state *src_reg)
5674{
5675 u64 umax_val = src_reg->umax_value;
5676 u64 umin_val = src_reg->umin_value;
5677
5678 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
5679 * be negative, then either:
5680 * 1) src_reg might be zero, so the sign bit of the result is
5681 * unknown, so we lose our signed bounds
5682 * 2) it's known negative, thus the unsigned bounds capture the
5683 * signed bounds
5684 * 3) the signed bounds cross zero, so they tell us nothing
5685 * about the result
5686 * If the value in dst_reg is known nonnegative, then again the
5687 * unsigned bounts capture the signed bounds.
5688 * Thus, in all cases it suffices to blow away our signed bounds
5689 * and rely on inferring new ones from the unsigned bounds and
5690 * var_off of the result.
5691 */
5692 dst_reg->smin_value = S64_MIN;
5693 dst_reg->smax_value = S64_MAX;
5694 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
5695 dst_reg->umin_value >>= umax_val;
5696 dst_reg->umax_value >>= umin_val;
3f50f132
JF
5697
5698 /* Its not easy to operate on alu32 bounds here because it depends
5699 * on bits being shifted in. Take easy way out and mark unbounded
5700 * so we can recalculate later from tnum.
5701 */
5702 __mark_reg32_unbounded(dst_reg);
07cd2631
JF
5703 __update_reg_bounds(dst_reg);
5704}
5705
3f50f132
JF
5706static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
5707 struct bpf_reg_state *src_reg)
07cd2631 5708{
3f50f132 5709 u64 umin_val = src_reg->u32_min_value;
07cd2631
JF
5710
5711 /* Upon reaching here, src_known is true and
5712 * umax_val is equal to umin_val.
5713 */
3f50f132
JF
5714 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
5715 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
07cd2631 5716
3f50f132
JF
5717 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
5718
5719 /* blow away the dst_reg umin_value/umax_value and rely on
5720 * dst_reg var_off to refine the result.
5721 */
5722 dst_reg->u32_min_value = 0;
5723 dst_reg->u32_max_value = U32_MAX;
5724
5725 __mark_reg64_unbounded(dst_reg);
5726 __update_reg32_bounds(dst_reg);
5727}
5728
5729static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
5730 struct bpf_reg_state *src_reg)
5731{
5732 u64 umin_val = src_reg->umin_value;
5733
5734 /* Upon reaching here, src_known is true and umax_val is equal
5735 * to umin_val.
5736 */
5737 dst_reg->smin_value >>= umin_val;
5738 dst_reg->smax_value >>= umin_val;
5739
5740 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
07cd2631
JF
5741
5742 /* blow away the dst_reg umin_value/umax_value and rely on
5743 * dst_reg var_off to refine the result.
5744 */
5745 dst_reg->umin_value = 0;
5746 dst_reg->umax_value = U64_MAX;
3f50f132
JF
5747
5748 /* Its not easy to operate on alu32 bounds here because it depends
5749 * on bits being shifted in from upper 32-bits. Take easy way out
5750 * and mark unbounded so we can recalculate later from tnum.
5751 */
5752 __mark_reg32_unbounded(dst_reg);
07cd2631
JF
5753 __update_reg_bounds(dst_reg);
5754}
5755
468f6eaf
JH
5756/* WARNING: This function does calculations on 64-bit values, but the actual
5757 * execution may occur on 32-bit values. Therefore, things like bitshifts
5758 * need extra checks in the 32-bit case.
5759 */
f1174f77
EC
5760static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
5761 struct bpf_insn *insn,
5762 struct bpf_reg_state *dst_reg,
5763 struct bpf_reg_state src_reg)
969bf05e 5764{
638f5b90 5765 struct bpf_reg_state *regs = cur_regs(env);
48461135 5766 u8 opcode = BPF_OP(insn->code);
b0b3fb67 5767 bool src_known;
b03c9f9f
EC
5768 s64 smin_val, smax_val;
5769 u64 umin_val, umax_val;
3f50f132
JF
5770 s32 s32_min_val, s32_max_val;
5771 u32 u32_min_val, u32_max_val;
468f6eaf 5772 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
d3bd7413
DB
5773 u32 dst = insn->dst_reg;
5774 int ret;
3f50f132 5775 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
b799207e 5776
b03c9f9f
EC
5777 smin_val = src_reg.smin_value;
5778 smax_val = src_reg.smax_value;
5779 umin_val = src_reg.umin_value;
5780 umax_val = src_reg.umax_value;
f23cc643 5781
3f50f132
JF
5782 s32_min_val = src_reg.s32_min_value;
5783 s32_max_val = src_reg.s32_max_value;
5784 u32_min_val = src_reg.u32_min_value;
5785 u32_max_val = src_reg.u32_max_value;
5786
5787 if (alu32) {
5788 src_known = tnum_subreg_is_const(src_reg.var_off);
3f50f132
JF
5789 if ((src_known &&
5790 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
5791 s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
5792 /* Taint dst register if offset had invalid bounds
5793 * derived from e.g. dead branches.
5794 */
5795 __mark_reg_unknown(env, dst_reg);
5796 return 0;
5797 }
5798 } else {
5799 src_known = tnum_is_const(src_reg.var_off);
3f50f132
JF
5800 if ((src_known &&
5801 (smin_val != smax_val || umin_val != umax_val)) ||
5802 smin_val > smax_val || umin_val > umax_val) {
5803 /* Taint dst register if offset had invalid bounds
5804 * derived from e.g. dead branches.
5805 */
5806 __mark_reg_unknown(env, dst_reg);
5807 return 0;
5808 }
6f16101e
DB
5809 }
5810
bb7f0f98
AS
5811 if (!src_known &&
5812 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
f54c7898 5813 __mark_reg_unknown(env, dst_reg);
bb7f0f98
AS
5814 return 0;
5815 }
5816
3f50f132
JF
5817 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
5818 * There are two classes of instructions: The first class we track both
5819 * alu32 and alu64 sign/unsigned bounds independently this provides the
5820 * greatest amount of precision when alu operations are mixed with jmp32
5821 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
5822 * and BPF_OR. This is possible because these ops have fairly easy to
5823 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
5824 * See alu32 verifier tests for examples. The second class of
5825 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
5826 * with regards to tracking sign/unsigned bounds because the bits may
5827 * cross subreg boundaries in the alu64 case. When this happens we mark
5828 * the reg unbounded in the subreg bound space and use the resulting
5829 * tnum to calculate an approximation of the sign/unsigned bounds.
5830 */
48461135
JB
5831 switch (opcode) {
5832 case BPF_ADD:
d3bd7413
DB
5833 ret = sanitize_val_alu(env, insn);
5834 if (ret < 0) {
5835 verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
5836 return ret;
5837 }
3f50f132 5838 scalar32_min_max_add(dst_reg, &src_reg);
07cd2631 5839 scalar_min_max_add(dst_reg, &src_reg);
3f50f132 5840 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
48461135
JB
5841 break;
5842 case BPF_SUB:
d3bd7413
DB
5843 ret = sanitize_val_alu(env, insn);
5844 if (ret < 0) {
5845 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
5846 return ret;
5847 }
3f50f132 5848 scalar32_min_max_sub(dst_reg, &src_reg);
07cd2631 5849 scalar_min_max_sub(dst_reg, &src_reg);
3f50f132 5850 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
48461135
JB
5851 break;
5852 case BPF_MUL:
3f50f132
JF
5853 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
5854 scalar32_min_max_mul(dst_reg, &src_reg);
07cd2631 5855 scalar_min_max_mul(dst_reg, &src_reg);
48461135
JB
5856 break;
5857 case BPF_AND:
3f50f132
JF
5858 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
5859 scalar32_min_max_and(dst_reg, &src_reg);
07cd2631 5860 scalar_min_max_and(dst_reg, &src_reg);
f1174f77
EC
5861 break;
5862 case BPF_OR:
3f50f132
JF
5863 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
5864 scalar32_min_max_or(dst_reg, &src_reg);
07cd2631 5865 scalar_min_max_or(dst_reg, &src_reg);
48461135
JB
5866 break;
5867 case BPF_LSH:
468f6eaf
JH
5868 if (umax_val >= insn_bitness) {
5869 /* Shifts greater than 31 or 63 are undefined.
5870 * This includes shifts by a negative number.
b03c9f9f 5871 */
61bd5218 5872 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
5873 break;
5874 }
3f50f132
JF
5875 if (alu32)
5876 scalar32_min_max_lsh(dst_reg, &src_reg);
5877 else
5878 scalar_min_max_lsh(dst_reg, &src_reg);
48461135
JB
5879 break;
5880 case BPF_RSH:
468f6eaf
JH
5881 if (umax_val >= insn_bitness) {
5882 /* Shifts greater than 31 or 63 are undefined.
5883 * This includes shifts by a negative number.
b03c9f9f 5884 */
61bd5218 5885 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
5886 break;
5887 }
3f50f132
JF
5888 if (alu32)
5889 scalar32_min_max_rsh(dst_reg, &src_reg);
5890 else
5891 scalar_min_max_rsh(dst_reg, &src_reg);
48461135 5892 break;
9cbe1f5a
YS
5893 case BPF_ARSH:
5894 if (umax_val >= insn_bitness) {
5895 /* Shifts greater than 31 or 63 are undefined.
5896 * This includes shifts by a negative number.
5897 */
5898 mark_reg_unknown(env, regs, insn->dst_reg);
5899 break;
5900 }
3f50f132
JF
5901 if (alu32)
5902 scalar32_min_max_arsh(dst_reg, &src_reg);
5903 else
5904 scalar_min_max_arsh(dst_reg, &src_reg);
9cbe1f5a 5905 break;
48461135 5906 default:
61bd5218 5907 mark_reg_unknown(env, regs, insn->dst_reg);
48461135
JB
5908 break;
5909 }
5910
3f50f132
JF
5911 /* ALU32 ops are zero extended into 64bit register */
5912 if (alu32)
5913 zext_32_to_64(dst_reg);
468f6eaf 5914
294f2fc6 5915 __update_reg_bounds(dst_reg);
b03c9f9f
EC
5916 __reg_deduce_bounds(dst_reg);
5917 __reg_bound_offset(dst_reg);
f1174f77
EC
5918 return 0;
5919}
5920
5921/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
5922 * and var_off.
5923 */
5924static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
5925 struct bpf_insn *insn)
5926{
f4d7e40a
AS
5927 struct bpf_verifier_state *vstate = env->cur_state;
5928 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5929 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
f1174f77
EC
5930 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
5931 u8 opcode = BPF_OP(insn->code);
b5dc0163 5932 int err;
f1174f77
EC
5933
5934 dst_reg = &regs[insn->dst_reg];
f1174f77
EC
5935 src_reg = NULL;
5936 if (dst_reg->type != SCALAR_VALUE)
5937 ptr_reg = dst_reg;
5938 if (BPF_SRC(insn->code) == BPF_X) {
5939 src_reg = &regs[insn->src_reg];
f1174f77
EC
5940 if (src_reg->type != SCALAR_VALUE) {
5941 if (dst_reg->type != SCALAR_VALUE) {
5942 /* Combining two pointers by any ALU op yields
82abbf8d
AS
5943 * an arbitrary scalar. Disallow all math except
5944 * pointer subtraction
f1174f77 5945 */
dd066823 5946 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
82abbf8d
AS
5947 mark_reg_unknown(env, regs, insn->dst_reg);
5948 return 0;
f1174f77 5949 }
82abbf8d
AS
5950 verbose(env, "R%d pointer %s pointer prohibited\n",
5951 insn->dst_reg,
5952 bpf_alu_string[opcode >> 4]);
5953 return -EACCES;
f1174f77
EC
5954 } else {
5955 /* scalar += pointer
5956 * This is legal, but we have to reverse our
5957 * src/dest handling in computing the range
5958 */
b5dc0163
AS
5959 err = mark_chain_precision(env, insn->dst_reg);
5960 if (err)
5961 return err;
82abbf8d
AS
5962 return adjust_ptr_min_max_vals(env, insn,
5963 src_reg, dst_reg);
f1174f77
EC
5964 }
5965 } else if (ptr_reg) {
5966 /* pointer += scalar */
b5dc0163
AS
5967 err = mark_chain_precision(env, insn->src_reg);
5968 if (err)
5969 return err;
82abbf8d
AS
5970 return adjust_ptr_min_max_vals(env, insn,
5971 dst_reg, src_reg);
f1174f77
EC
5972 }
5973 } else {
5974 /* Pretend the src is a reg with a known value, since we only
5975 * need to be able to read from this state.
5976 */
5977 off_reg.type = SCALAR_VALUE;
b03c9f9f 5978 __mark_reg_known(&off_reg, insn->imm);
f1174f77 5979 src_reg = &off_reg;
82abbf8d
AS
5980 if (ptr_reg) /* pointer += K */
5981 return adjust_ptr_min_max_vals(env, insn,
5982 ptr_reg, src_reg);
f1174f77
EC
5983 }
5984
5985 /* Got here implies adding two SCALAR_VALUEs */
5986 if (WARN_ON_ONCE(ptr_reg)) {
f4d7e40a 5987 print_verifier_state(env, state);
61bd5218 5988 verbose(env, "verifier internal error: unexpected ptr_reg\n");
f1174f77
EC
5989 return -EINVAL;
5990 }
5991 if (WARN_ON(!src_reg)) {
f4d7e40a 5992 print_verifier_state(env, state);
61bd5218 5993 verbose(env, "verifier internal error: no src_reg\n");
f1174f77
EC
5994 return -EINVAL;
5995 }
5996 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
48461135
JB
5997}
5998
17a52670 5999/* check validity of 32-bit and 64-bit arithmetic operations */
58e2af8b 6000static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 6001{
638f5b90 6002 struct bpf_reg_state *regs = cur_regs(env);
17a52670
AS
6003 u8 opcode = BPF_OP(insn->code);
6004 int err;
6005
6006 if (opcode == BPF_END || opcode == BPF_NEG) {
6007 if (opcode == BPF_NEG) {
6008 if (BPF_SRC(insn->code) != 0 ||
6009 insn->src_reg != BPF_REG_0 ||
6010 insn->off != 0 || insn->imm != 0) {
61bd5218 6011 verbose(env, "BPF_NEG uses reserved fields\n");
17a52670
AS
6012 return -EINVAL;
6013 }
6014 } else {
6015 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
e67b8a68
EC
6016 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
6017 BPF_CLASS(insn->code) == BPF_ALU64) {
61bd5218 6018 verbose(env, "BPF_END uses reserved fields\n");
17a52670
AS
6019 return -EINVAL;
6020 }
6021 }
6022
6023 /* check src operand */
dc503a8a 6024 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
6025 if (err)
6026 return err;
6027
1be7f75d 6028 if (is_pointer_value(env, insn->dst_reg)) {
61bd5218 6029 verbose(env, "R%d pointer arithmetic prohibited\n",
1be7f75d
AS
6030 insn->dst_reg);
6031 return -EACCES;
6032 }
6033
17a52670 6034 /* check dest operand */
dc503a8a 6035 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
6036 if (err)
6037 return err;
6038
6039 } else if (opcode == BPF_MOV) {
6040
6041 if (BPF_SRC(insn->code) == BPF_X) {
6042 if (insn->imm != 0 || insn->off != 0) {
61bd5218 6043 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
6044 return -EINVAL;
6045 }
6046
6047 /* check src operand */
dc503a8a 6048 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
6049 if (err)
6050 return err;
6051 } else {
6052 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 6053 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
6054 return -EINVAL;
6055 }
6056 }
6057
fbeb1603
AF
6058 /* check dest operand, mark as required later */
6059 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
6060 if (err)
6061 return err;
6062
6063 if (BPF_SRC(insn->code) == BPF_X) {
e434b8cd
JW
6064 struct bpf_reg_state *src_reg = regs + insn->src_reg;
6065 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
6066
17a52670
AS
6067 if (BPF_CLASS(insn->code) == BPF_ALU64) {
6068 /* case: R1 = R2
6069 * copy register state to dest reg
6070 */
e434b8cd
JW
6071 *dst_reg = *src_reg;
6072 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 6073 dst_reg->subreg_def = DEF_NOT_SUBREG;
17a52670 6074 } else {
f1174f77 6075 /* R1 = (u32) R2 */
1be7f75d 6076 if (is_pointer_value(env, insn->src_reg)) {
61bd5218
JK
6077 verbose(env,
6078 "R%d partial copy of pointer\n",
1be7f75d
AS
6079 insn->src_reg);
6080 return -EACCES;
e434b8cd
JW
6081 } else if (src_reg->type == SCALAR_VALUE) {
6082 *dst_reg = *src_reg;
6083 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 6084 dst_reg->subreg_def = env->insn_idx + 1;
e434b8cd
JW
6085 } else {
6086 mark_reg_unknown(env, regs,
6087 insn->dst_reg);
1be7f75d 6088 }
3f50f132 6089 zext_32_to_64(dst_reg);
17a52670
AS
6090 }
6091 } else {
6092 /* case: R = imm
6093 * remember the value we stored into this reg
6094 */
fbeb1603
AF
6095 /* clear any state __mark_reg_known doesn't set */
6096 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77 6097 regs[insn->dst_reg].type = SCALAR_VALUE;
95a762e2
JH
6098 if (BPF_CLASS(insn->code) == BPF_ALU64) {
6099 __mark_reg_known(regs + insn->dst_reg,
6100 insn->imm);
6101 } else {
6102 __mark_reg_known(regs + insn->dst_reg,
6103 (u32)insn->imm);
6104 }
17a52670
AS
6105 }
6106
6107 } else if (opcode > BPF_END) {
61bd5218 6108 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
17a52670
AS
6109 return -EINVAL;
6110
6111 } else { /* all other ALU ops: and, sub, xor, add, ... */
6112
17a52670
AS
6113 if (BPF_SRC(insn->code) == BPF_X) {
6114 if (insn->imm != 0 || insn->off != 0) {
61bd5218 6115 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
6116 return -EINVAL;
6117 }
6118 /* check src1 operand */
dc503a8a 6119 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
6120 if (err)
6121 return err;
6122 } else {
6123 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 6124 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
6125 return -EINVAL;
6126 }
6127 }
6128
6129 /* check src2 operand */
dc503a8a 6130 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
6131 if (err)
6132 return err;
6133
6134 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
6135 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
61bd5218 6136 verbose(env, "div by zero\n");
17a52670
AS
6137 return -EINVAL;
6138 }
6139
229394e8
RV
6140 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
6141 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
6142 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
6143
6144 if (insn->imm < 0 || insn->imm >= size) {
61bd5218 6145 verbose(env, "invalid shift %d\n", insn->imm);
229394e8
RV
6146 return -EINVAL;
6147 }
6148 }
6149
1a0dc1ac 6150 /* check dest operand */
dc503a8a 6151 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
1a0dc1ac
AS
6152 if (err)
6153 return err;
6154
f1174f77 6155 return adjust_reg_min_max_vals(env, insn);
17a52670
AS
6156 }
6157
6158 return 0;
6159}
6160
c6a9efa1
PC
6161static void __find_good_pkt_pointers(struct bpf_func_state *state,
6162 struct bpf_reg_state *dst_reg,
6163 enum bpf_reg_type type, u16 new_range)
6164{
6165 struct bpf_reg_state *reg;
6166 int i;
6167
6168 for (i = 0; i < MAX_BPF_REG; i++) {
6169 reg = &state->regs[i];
6170 if (reg->type == type && reg->id == dst_reg->id)
6171 /* keep the maximum range already checked */
6172 reg->range = max(reg->range, new_range);
6173 }
6174
6175 bpf_for_each_spilled_reg(i, state, reg) {
6176 if (!reg)
6177 continue;
6178 if (reg->type == type && reg->id == dst_reg->id)
6179 reg->range = max(reg->range, new_range);
6180 }
6181}
6182
f4d7e40a 6183static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
de8f3a83 6184 struct bpf_reg_state *dst_reg,
f8ddadc4 6185 enum bpf_reg_type type,
fb2a311a 6186 bool range_right_open)
969bf05e 6187{
fb2a311a 6188 u16 new_range;
c6a9efa1 6189 int i;
2d2be8ca 6190
fb2a311a
DB
6191 if (dst_reg->off < 0 ||
6192 (dst_reg->off == 0 && range_right_open))
f1174f77
EC
6193 /* This doesn't give us any range */
6194 return;
6195
b03c9f9f
EC
6196 if (dst_reg->umax_value > MAX_PACKET_OFF ||
6197 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
f1174f77
EC
6198 /* Risk of overflow. For instance, ptr + (1<<63) may be less
6199 * than pkt_end, but that's because it's also less than pkt.
6200 */
6201 return;
6202
fb2a311a
DB
6203 new_range = dst_reg->off;
6204 if (range_right_open)
6205 new_range--;
6206
6207 /* Examples for register markings:
2d2be8ca 6208 *
fb2a311a 6209 * pkt_data in dst register:
2d2be8ca
DB
6210 *
6211 * r2 = r3;
6212 * r2 += 8;
6213 * if (r2 > pkt_end) goto <handle exception>
6214 * <access okay>
6215 *
b4e432f1
DB
6216 * r2 = r3;
6217 * r2 += 8;
6218 * if (r2 < pkt_end) goto <access okay>
6219 * <handle exception>
6220 *
2d2be8ca
DB
6221 * Where:
6222 * r2 == dst_reg, pkt_end == src_reg
6223 * r2=pkt(id=n,off=8,r=0)
6224 * r3=pkt(id=n,off=0,r=0)
6225 *
fb2a311a 6226 * pkt_data in src register:
2d2be8ca
DB
6227 *
6228 * r2 = r3;
6229 * r2 += 8;
6230 * if (pkt_end >= r2) goto <access okay>
6231 * <handle exception>
6232 *
b4e432f1
DB
6233 * r2 = r3;
6234 * r2 += 8;
6235 * if (pkt_end <= r2) goto <handle exception>
6236 * <access okay>
6237 *
2d2be8ca
DB
6238 * Where:
6239 * pkt_end == dst_reg, r2 == src_reg
6240 * r2=pkt(id=n,off=8,r=0)
6241 * r3=pkt(id=n,off=0,r=0)
6242 *
6243 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
fb2a311a
DB
6244 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
6245 * and [r3, r3 + 8-1) respectively is safe to access depending on
6246 * the check.
969bf05e 6247 */
2d2be8ca 6248
f1174f77
EC
6249 /* If our ids match, then we must have the same max_value. And we
6250 * don't care about the other reg's fixed offset, since if it's too big
6251 * the range won't allow anything.
6252 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
6253 */
c6a9efa1
PC
6254 for (i = 0; i <= vstate->curframe; i++)
6255 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
6256 new_range);
969bf05e
AS
6257}
6258
3f50f132 6259static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
4f7b3e82 6260{
3f50f132
JF
6261 struct tnum subreg = tnum_subreg(reg->var_off);
6262 s32 sval = (s32)val;
a72dafaf 6263
3f50f132
JF
6264 switch (opcode) {
6265 case BPF_JEQ:
6266 if (tnum_is_const(subreg))
6267 return !!tnum_equals_const(subreg, val);
6268 break;
6269 case BPF_JNE:
6270 if (tnum_is_const(subreg))
6271 return !tnum_equals_const(subreg, val);
6272 break;
6273 case BPF_JSET:
6274 if ((~subreg.mask & subreg.value) & val)
6275 return 1;
6276 if (!((subreg.mask | subreg.value) & val))
6277 return 0;
6278 break;
6279 case BPF_JGT:
6280 if (reg->u32_min_value > val)
6281 return 1;
6282 else if (reg->u32_max_value <= val)
6283 return 0;
6284 break;
6285 case BPF_JSGT:
6286 if (reg->s32_min_value > sval)
6287 return 1;
6288 else if (reg->s32_max_value < sval)
6289 return 0;
6290 break;
6291 case BPF_JLT:
6292 if (reg->u32_max_value < val)
6293 return 1;
6294 else if (reg->u32_min_value >= val)
6295 return 0;
6296 break;
6297 case BPF_JSLT:
6298 if (reg->s32_max_value < sval)
6299 return 1;
6300 else if (reg->s32_min_value >= sval)
6301 return 0;
6302 break;
6303 case BPF_JGE:
6304 if (reg->u32_min_value >= val)
6305 return 1;
6306 else if (reg->u32_max_value < val)
6307 return 0;
6308 break;
6309 case BPF_JSGE:
6310 if (reg->s32_min_value >= sval)
6311 return 1;
6312 else if (reg->s32_max_value < sval)
6313 return 0;
6314 break;
6315 case BPF_JLE:
6316 if (reg->u32_max_value <= val)
6317 return 1;
6318 else if (reg->u32_min_value > val)
6319 return 0;
6320 break;
6321 case BPF_JSLE:
6322 if (reg->s32_max_value <= sval)
6323 return 1;
6324 else if (reg->s32_min_value > sval)
6325 return 0;
6326 break;
6327 }
4f7b3e82 6328
3f50f132
JF
6329 return -1;
6330}
092ed096 6331
3f50f132
JF
6332
6333static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
6334{
6335 s64 sval = (s64)val;
a72dafaf 6336
4f7b3e82
AS
6337 switch (opcode) {
6338 case BPF_JEQ:
6339 if (tnum_is_const(reg->var_off))
6340 return !!tnum_equals_const(reg->var_off, val);
6341 break;
6342 case BPF_JNE:
6343 if (tnum_is_const(reg->var_off))
6344 return !tnum_equals_const(reg->var_off, val);
6345 break;
960ea056
JK
6346 case BPF_JSET:
6347 if ((~reg->var_off.mask & reg->var_off.value) & val)
6348 return 1;
6349 if (!((reg->var_off.mask | reg->var_off.value) & val))
6350 return 0;
6351 break;
4f7b3e82
AS
6352 case BPF_JGT:
6353 if (reg->umin_value > val)
6354 return 1;
6355 else if (reg->umax_value <= val)
6356 return 0;
6357 break;
6358 case BPF_JSGT:
a72dafaf 6359 if (reg->smin_value > sval)
4f7b3e82 6360 return 1;
a72dafaf 6361 else if (reg->smax_value < sval)
4f7b3e82
AS
6362 return 0;
6363 break;
6364 case BPF_JLT:
6365 if (reg->umax_value < val)
6366 return 1;
6367 else if (reg->umin_value >= val)
6368 return 0;
6369 break;
6370 case BPF_JSLT:
a72dafaf 6371 if (reg->smax_value < sval)
4f7b3e82 6372 return 1;
a72dafaf 6373 else if (reg->smin_value >= sval)
4f7b3e82
AS
6374 return 0;
6375 break;
6376 case BPF_JGE:
6377 if (reg->umin_value >= val)
6378 return 1;
6379 else if (reg->umax_value < val)
6380 return 0;
6381 break;
6382 case BPF_JSGE:
a72dafaf 6383 if (reg->smin_value >= sval)
4f7b3e82 6384 return 1;
a72dafaf 6385 else if (reg->smax_value < sval)
4f7b3e82
AS
6386 return 0;
6387 break;
6388 case BPF_JLE:
6389 if (reg->umax_value <= val)
6390 return 1;
6391 else if (reg->umin_value > val)
6392 return 0;
6393 break;
6394 case BPF_JSLE:
a72dafaf 6395 if (reg->smax_value <= sval)
4f7b3e82 6396 return 1;
a72dafaf 6397 else if (reg->smin_value > sval)
4f7b3e82
AS
6398 return 0;
6399 break;
6400 }
6401
6402 return -1;
6403}
6404
3f50f132
JF
6405/* compute branch direction of the expression "if (reg opcode val) goto target;"
6406 * and return:
6407 * 1 - branch will be taken and "goto target" will be executed
6408 * 0 - branch will not be taken and fall-through to next insn
6409 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
6410 * range [0,10]
604dca5e 6411 */
3f50f132
JF
6412static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
6413 bool is_jmp32)
604dca5e 6414{
cac616db
JF
6415 if (__is_pointer_value(false, reg)) {
6416 if (!reg_type_not_null(reg->type))
6417 return -1;
6418
6419 /* If pointer is valid tests against zero will fail so we can
6420 * use this to direct branch taken.
6421 */
6422 if (val != 0)
6423 return -1;
6424
6425 switch (opcode) {
6426 case BPF_JEQ:
6427 return 0;
6428 case BPF_JNE:
6429 return 1;
6430 default:
6431 return -1;
6432 }
6433 }
604dca5e 6434
3f50f132
JF
6435 if (is_jmp32)
6436 return is_branch32_taken(reg, val, opcode);
6437 return is_branch64_taken(reg, val, opcode);
604dca5e
JH
6438}
6439
48461135
JB
6440/* Adjusts the register min/max values in the case that the dst_reg is the
6441 * variable register that we are working on, and src_reg is a constant or we're
6442 * simply doing a BPF_K check.
f1174f77 6443 * In JEQ/JNE cases we also adjust the var_off values.
48461135
JB
6444 */
6445static void reg_set_min_max(struct bpf_reg_state *true_reg,
3f50f132
JF
6446 struct bpf_reg_state *false_reg,
6447 u64 val, u32 val32,
092ed096 6448 u8 opcode, bool is_jmp32)
48461135 6449{
3f50f132
JF
6450 struct tnum false_32off = tnum_subreg(false_reg->var_off);
6451 struct tnum false_64off = false_reg->var_off;
6452 struct tnum true_32off = tnum_subreg(true_reg->var_off);
6453 struct tnum true_64off = true_reg->var_off;
6454 s64 sval = (s64)val;
6455 s32 sval32 = (s32)val32;
a72dafaf 6456
f1174f77
EC
6457 /* If the dst_reg is a pointer, we can't learn anything about its
6458 * variable offset from the compare (unless src_reg were a pointer into
6459 * the same object, but we don't bother with that.
6460 * Since false_reg and true_reg have the same type by construction, we
6461 * only need to check one of them for pointerness.
6462 */
6463 if (__is_pointer_value(false, false_reg))
6464 return;
4cabc5b1 6465
48461135
JB
6466 switch (opcode) {
6467 case BPF_JEQ:
48461135 6468 case BPF_JNE:
a72dafaf
JW
6469 {
6470 struct bpf_reg_state *reg =
6471 opcode == BPF_JEQ ? true_reg : false_reg;
6472
6473 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but
6474 * if it is true we know the value for sure. Likewise for
6475 * BPF_JNE.
48461135 6476 */
3f50f132
JF
6477 if (is_jmp32)
6478 __mark_reg32_known(reg, val32);
6479 else
092ed096 6480 __mark_reg_known(reg, val);
48461135 6481 break;
a72dafaf 6482 }
960ea056 6483 case BPF_JSET:
3f50f132
JF
6484 if (is_jmp32) {
6485 false_32off = tnum_and(false_32off, tnum_const(~val32));
6486 if (is_power_of_2(val32))
6487 true_32off = tnum_or(true_32off,
6488 tnum_const(val32));
6489 } else {
6490 false_64off = tnum_and(false_64off, tnum_const(~val));
6491 if (is_power_of_2(val))
6492 true_64off = tnum_or(true_64off,
6493 tnum_const(val));
6494 }
960ea056 6495 break;
48461135 6496 case BPF_JGE:
a72dafaf
JW
6497 case BPF_JGT:
6498 {
3f50f132
JF
6499 if (is_jmp32) {
6500 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1;
6501 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
6502
6503 false_reg->u32_max_value = min(false_reg->u32_max_value,
6504 false_umax);
6505 true_reg->u32_min_value = max(true_reg->u32_min_value,
6506 true_umin);
6507 } else {
6508 u64 false_umax = opcode == BPF_JGT ? val : val - 1;
6509 u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
6510
6511 false_reg->umax_value = min(false_reg->umax_value, false_umax);
6512 true_reg->umin_value = max(true_reg->umin_value, true_umin);
6513 }
b03c9f9f 6514 break;
a72dafaf 6515 }
48461135 6516 case BPF_JSGE:
a72dafaf
JW
6517 case BPF_JSGT:
6518 {
3f50f132
JF
6519 if (is_jmp32) {
6520 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1;
6521 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
a72dafaf 6522
3f50f132
JF
6523 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
6524 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
6525 } else {
6526 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
6527 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
6528
6529 false_reg->smax_value = min(false_reg->smax_value, false_smax);
6530 true_reg->smin_value = max(true_reg->smin_value, true_smin);
6531 }
48461135 6532 break;
a72dafaf 6533 }
b4e432f1 6534 case BPF_JLE:
a72dafaf
JW
6535 case BPF_JLT:
6536 {
3f50f132
JF
6537 if (is_jmp32) {
6538 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1;
6539 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
6540
6541 false_reg->u32_min_value = max(false_reg->u32_min_value,
6542 false_umin);
6543 true_reg->u32_max_value = min(true_reg->u32_max_value,
6544 true_umax);
6545 } else {
6546 u64 false_umin = opcode == BPF_JLT ? val : val + 1;
6547 u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
6548
6549 false_reg->umin_value = max(false_reg->umin_value, false_umin);
6550 true_reg->umax_value = min(true_reg->umax_value, true_umax);
6551 }
b4e432f1 6552 break;
a72dafaf 6553 }
b4e432f1 6554 case BPF_JSLE:
a72dafaf
JW
6555 case BPF_JSLT:
6556 {
3f50f132
JF
6557 if (is_jmp32) {
6558 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1;
6559 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
a72dafaf 6560
3f50f132
JF
6561 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
6562 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
6563 } else {
6564 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
6565 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
6566
6567 false_reg->smin_value = max(false_reg->smin_value, false_smin);
6568 true_reg->smax_value = min(true_reg->smax_value, true_smax);
6569 }
b4e432f1 6570 break;
a72dafaf 6571 }
48461135 6572 default:
0fc31b10 6573 return;
48461135
JB
6574 }
6575
3f50f132
JF
6576 if (is_jmp32) {
6577 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
6578 tnum_subreg(false_32off));
6579 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
6580 tnum_subreg(true_32off));
6581 __reg_combine_32_into_64(false_reg);
6582 __reg_combine_32_into_64(true_reg);
6583 } else {
6584 false_reg->var_off = false_64off;
6585 true_reg->var_off = true_64off;
6586 __reg_combine_64_into_32(false_reg);
6587 __reg_combine_64_into_32(true_reg);
6588 }
48461135
JB
6589}
6590
f1174f77
EC
6591/* Same as above, but for the case that dst_reg holds a constant and src_reg is
6592 * the variable reg.
48461135
JB
6593 */
6594static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
3f50f132
JF
6595 struct bpf_reg_state *false_reg,
6596 u64 val, u32 val32,
092ed096 6597 u8 opcode, bool is_jmp32)
48461135 6598{
0fc31b10
JH
6599 /* How can we transform "a <op> b" into "b <op> a"? */
6600 static const u8 opcode_flip[16] = {
6601 /* these stay the same */
6602 [BPF_JEQ >> 4] = BPF_JEQ,
6603 [BPF_JNE >> 4] = BPF_JNE,
6604 [BPF_JSET >> 4] = BPF_JSET,
6605 /* these swap "lesser" and "greater" (L and G in the opcodes) */
6606 [BPF_JGE >> 4] = BPF_JLE,
6607 [BPF_JGT >> 4] = BPF_JLT,
6608 [BPF_JLE >> 4] = BPF_JGE,
6609 [BPF_JLT >> 4] = BPF_JGT,
6610 [BPF_JSGE >> 4] = BPF_JSLE,
6611 [BPF_JSGT >> 4] = BPF_JSLT,
6612 [BPF_JSLE >> 4] = BPF_JSGE,
6613 [BPF_JSLT >> 4] = BPF_JSGT
6614 };
6615 opcode = opcode_flip[opcode >> 4];
6616 /* This uses zero as "not present in table"; luckily the zero opcode,
6617 * BPF_JA, can't get here.
b03c9f9f 6618 */
0fc31b10 6619 if (opcode)
3f50f132 6620 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
f1174f77
EC
6621}
6622
6623/* Regs are known to be equal, so intersect their min/max/var_off */
6624static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
6625 struct bpf_reg_state *dst_reg)
6626{
b03c9f9f
EC
6627 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
6628 dst_reg->umin_value);
6629 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
6630 dst_reg->umax_value);
6631 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
6632 dst_reg->smin_value);
6633 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
6634 dst_reg->smax_value);
f1174f77
EC
6635 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
6636 dst_reg->var_off);
b03c9f9f
EC
6637 /* We might have learned new bounds from the var_off. */
6638 __update_reg_bounds(src_reg);
6639 __update_reg_bounds(dst_reg);
6640 /* We might have learned something about the sign bit. */
6641 __reg_deduce_bounds(src_reg);
6642 __reg_deduce_bounds(dst_reg);
6643 /* We might have learned some bits from the bounds. */
6644 __reg_bound_offset(src_reg);
6645 __reg_bound_offset(dst_reg);
6646 /* Intersecting with the old var_off might have improved our bounds
6647 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
6648 * then new var_off is (0; 0x7f...fc) which improves our umax.
6649 */
6650 __update_reg_bounds(src_reg);
6651 __update_reg_bounds(dst_reg);
f1174f77
EC
6652}
6653
6654static void reg_combine_min_max(struct bpf_reg_state *true_src,
6655 struct bpf_reg_state *true_dst,
6656 struct bpf_reg_state *false_src,
6657 struct bpf_reg_state *false_dst,
6658 u8 opcode)
6659{
6660 switch (opcode) {
6661 case BPF_JEQ:
6662 __reg_combine_min_max(true_src, true_dst);
6663 break;
6664 case BPF_JNE:
6665 __reg_combine_min_max(false_src, false_dst);
b03c9f9f 6666 break;
4cabc5b1 6667 }
48461135
JB
6668}
6669
fd978bf7
JS
6670static void mark_ptr_or_null_reg(struct bpf_func_state *state,
6671 struct bpf_reg_state *reg, u32 id,
840b9615 6672 bool is_null)
57a09bf0 6673{
840b9615 6674 if (reg_type_may_be_null(reg->type) && reg->id == id) {
f1174f77
EC
6675 /* Old offset (both fixed and variable parts) should
6676 * have been known-zero, because we don't allow pointer
6677 * arithmetic on pointers that might be NULL.
6678 */
b03c9f9f
EC
6679 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
6680 !tnum_equals_const(reg->var_off, 0) ||
f1174f77 6681 reg->off)) {
b03c9f9f
EC
6682 __mark_reg_known_zero(reg);
6683 reg->off = 0;
f1174f77
EC
6684 }
6685 if (is_null) {
6686 reg->type = SCALAR_VALUE;
840b9615 6687 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
64d85290
JS
6688 const struct bpf_map *map = reg->map_ptr;
6689
6690 if (map->inner_map_meta) {
840b9615 6691 reg->type = CONST_PTR_TO_MAP;
64d85290
JS
6692 reg->map_ptr = map->inner_map_meta;
6693 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
fada7fdc 6694 reg->type = PTR_TO_XDP_SOCK;
64d85290
JS
6695 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
6696 map->map_type == BPF_MAP_TYPE_SOCKHASH) {
6697 reg->type = PTR_TO_SOCKET;
840b9615
JS
6698 } else {
6699 reg->type = PTR_TO_MAP_VALUE;
6700 }
c64b7983
JS
6701 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
6702 reg->type = PTR_TO_SOCKET;
46f8bc92
MKL
6703 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
6704 reg->type = PTR_TO_SOCK_COMMON;
655a51e5
MKL
6705 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
6706 reg->type = PTR_TO_TCP_SOCK;
b121b341
YS
6707 } else if (reg->type == PTR_TO_BTF_ID_OR_NULL) {
6708 reg->type = PTR_TO_BTF_ID;
457f4436
AN
6709 } else if (reg->type == PTR_TO_MEM_OR_NULL) {
6710 reg->type = PTR_TO_MEM;
56f668df 6711 }
1b986589
MKL
6712 if (is_null) {
6713 /* We don't need id and ref_obj_id from this point
6714 * onwards anymore, thus we should better reset it,
6715 * so that state pruning has chances to take effect.
6716 */
6717 reg->id = 0;
6718 reg->ref_obj_id = 0;
6719 } else if (!reg_may_point_to_spin_lock(reg)) {
6720 /* For not-NULL ptr, reg->ref_obj_id will be reset
6721 * in release_reg_references().
6722 *
6723 * reg->id is still used by spin_lock ptr. Other
6724 * than spin_lock ptr type, reg->id can be reset.
fd978bf7
JS
6725 */
6726 reg->id = 0;
56f668df 6727 }
57a09bf0
TG
6728 }
6729}
6730
c6a9efa1
PC
6731static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
6732 bool is_null)
6733{
6734 struct bpf_reg_state *reg;
6735 int i;
6736
6737 for (i = 0; i < MAX_BPF_REG; i++)
6738 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
6739
6740 bpf_for_each_spilled_reg(i, state, reg) {
6741 if (!reg)
6742 continue;
6743 mark_ptr_or_null_reg(state, reg, id, is_null);
6744 }
6745}
6746
57a09bf0
TG
6747/* The logic is similar to find_good_pkt_pointers(), both could eventually
6748 * be folded together at some point.
6749 */
840b9615
JS
6750static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
6751 bool is_null)
57a09bf0 6752{
f4d7e40a 6753 struct bpf_func_state *state = vstate->frame[vstate->curframe];
c6a9efa1 6754 struct bpf_reg_state *regs = state->regs;
1b986589 6755 u32 ref_obj_id = regs[regno].ref_obj_id;
a08dd0da 6756 u32 id = regs[regno].id;
c6a9efa1 6757 int i;
57a09bf0 6758
1b986589
MKL
6759 if (ref_obj_id && ref_obj_id == id && is_null)
6760 /* regs[regno] is in the " == NULL" branch.
6761 * No one could have freed the reference state before
6762 * doing the NULL check.
6763 */
6764 WARN_ON_ONCE(release_reference_state(state, id));
fd978bf7 6765
c6a9efa1
PC
6766 for (i = 0; i <= vstate->curframe; i++)
6767 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
57a09bf0
TG
6768}
6769
5beca081
DB
6770static bool try_match_pkt_pointers(const struct bpf_insn *insn,
6771 struct bpf_reg_state *dst_reg,
6772 struct bpf_reg_state *src_reg,
6773 struct bpf_verifier_state *this_branch,
6774 struct bpf_verifier_state *other_branch)
6775{
6776 if (BPF_SRC(insn->code) != BPF_X)
6777 return false;
6778
092ed096
JW
6779 /* Pointers are always 64-bit. */
6780 if (BPF_CLASS(insn->code) == BPF_JMP32)
6781 return false;
6782
5beca081
DB
6783 switch (BPF_OP(insn->code)) {
6784 case BPF_JGT:
6785 if ((dst_reg->type == PTR_TO_PACKET &&
6786 src_reg->type == PTR_TO_PACKET_END) ||
6787 (dst_reg->type == PTR_TO_PACKET_META &&
6788 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6789 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
6790 find_good_pkt_pointers(this_branch, dst_reg,
6791 dst_reg->type, false);
6792 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6793 src_reg->type == PTR_TO_PACKET) ||
6794 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6795 src_reg->type == PTR_TO_PACKET_META)) {
6796 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
6797 find_good_pkt_pointers(other_branch, src_reg,
6798 src_reg->type, true);
6799 } else {
6800 return false;
6801 }
6802 break;
6803 case BPF_JLT:
6804 if ((dst_reg->type == PTR_TO_PACKET &&
6805 src_reg->type == PTR_TO_PACKET_END) ||
6806 (dst_reg->type == PTR_TO_PACKET_META &&
6807 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6808 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
6809 find_good_pkt_pointers(other_branch, dst_reg,
6810 dst_reg->type, true);
6811 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6812 src_reg->type == PTR_TO_PACKET) ||
6813 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6814 src_reg->type == PTR_TO_PACKET_META)) {
6815 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
6816 find_good_pkt_pointers(this_branch, src_reg,
6817 src_reg->type, false);
6818 } else {
6819 return false;
6820 }
6821 break;
6822 case BPF_JGE:
6823 if ((dst_reg->type == PTR_TO_PACKET &&
6824 src_reg->type == PTR_TO_PACKET_END) ||
6825 (dst_reg->type == PTR_TO_PACKET_META &&
6826 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6827 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
6828 find_good_pkt_pointers(this_branch, dst_reg,
6829 dst_reg->type, true);
6830 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6831 src_reg->type == PTR_TO_PACKET) ||
6832 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6833 src_reg->type == PTR_TO_PACKET_META)) {
6834 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
6835 find_good_pkt_pointers(other_branch, src_reg,
6836 src_reg->type, false);
6837 } else {
6838 return false;
6839 }
6840 break;
6841 case BPF_JLE:
6842 if ((dst_reg->type == PTR_TO_PACKET &&
6843 src_reg->type == PTR_TO_PACKET_END) ||
6844 (dst_reg->type == PTR_TO_PACKET_META &&
6845 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6846 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
6847 find_good_pkt_pointers(other_branch, dst_reg,
6848 dst_reg->type, false);
6849 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6850 src_reg->type == PTR_TO_PACKET) ||
6851 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6852 src_reg->type == PTR_TO_PACKET_META)) {
6853 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
6854 find_good_pkt_pointers(this_branch, src_reg,
6855 src_reg->type, true);
6856 } else {
6857 return false;
6858 }
6859 break;
6860 default:
6861 return false;
6862 }
6863
6864 return true;
6865}
6866
58e2af8b 6867static int check_cond_jmp_op(struct bpf_verifier_env *env,
17a52670
AS
6868 struct bpf_insn *insn, int *insn_idx)
6869{
f4d7e40a
AS
6870 struct bpf_verifier_state *this_branch = env->cur_state;
6871 struct bpf_verifier_state *other_branch;
6872 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
fb8d251e 6873 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
17a52670 6874 u8 opcode = BPF_OP(insn->code);
092ed096 6875 bool is_jmp32;
fb8d251e 6876 int pred = -1;
17a52670
AS
6877 int err;
6878
092ed096
JW
6879 /* Only conditional jumps are expected to reach here. */
6880 if (opcode == BPF_JA || opcode > BPF_JSLE) {
6881 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
17a52670
AS
6882 return -EINVAL;
6883 }
6884
6885 if (BPF_SRC(insn->code) == BPF_X) {
6886 if (insn->imm != 0) {
092ed096 6887 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
6888 return -EINVAL;
6889 }
6890
6891 /* check src1 operand */
dc503a8a 6892 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
6893 if (err)
6894 return err;
1be7f75d
AS
6895
6896 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 6897 verbose(env, "R%d pointer comparison prohibited\n",
1be7f75d
AS
6898 insn->src_reg);
6899 return -EACCES;
6900 }
fb8d251e 6901 src_reg = &regs[insn->src_reg];
17a52670
AS
6902 } else {
6903 if (insn->src_reg != BPF_REG_0) {
092ed096 6904 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
6905 return -EINVAL;
6906 }
6907 }
6908
6909 /* check src2 operand */
dc503a8a 6910 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
6911 if (err)
6912 return err;
6913
1a0dc1ac 6914 dst_reg = &regs[insn->dst_reg];
092ed096 6915 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1a0dc1ac 6916
3f50f132
JF
6917 if (BPF_SRC(insn->code) == BPF_K) {
6918 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
6919 } else if (src_reg->type == SCALAR_VALUE &&
6920 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
6921 pred = is_branch_taken(dst_reg,
6922 tnum_subreg(src_reg->var_off).value,
6923 opcode,
6924 is_jmp32);
6925 } else if (src_reg->type == SCALAR_VALUE &&
6926 !is_jmp32 && tnum_is_const(src_reg->var_off)) {
6927 pred = is_branch_taken(dst_reg,
6928 src_reg->var_off.value,
6929 opcode,
6930 is_jmp32);
6931 }
6932
b5dc0163 6933 if (pred >= 0) {
cac616db
JF
6934 /* If we get here with a dst_reg pointer type it is because
6935 * above is_branch_taken() special cased the 0 comparison.
6936 */
6937 if (!__is_pointer_value(false, dst_reg))
6938 err = mark_chain_precision(env, insn->dst_reg);
b5dc0163
AS
6939 if (BPF_SRC(insn->code) == BPF_X && !err)
6940 err = mark_chain_precision(env, insn->src_reg);
6941 if (err)
6942 return err;
6943 }
fb8d251e
AS
6944 if (pred == 1) {
6945 /* only follow the goto, ignore fall-through */
6946 *insn_idx += insn->off;
6947 return 0;
6948 } else if (pred == 0) {
6949 /* only follow fall-through branch, since
6950 * that's where the program will go
6951 */
6952 return 0;
17a52670
AS
6953 }
6954
979d63d5
DB
6955 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
6956 false);
17a52670
AS
6957 if (!other_branch)
6958 return -EFAULT;
f4d7e40a 6959 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
17a52670 6960
48461135
JB
6961 /* detect if we are comparing against a constant value so we can adjust
6962 * our min/max values for our dst register.
f1174f77
EC
6963 * this is only legit if both are scalars (or pointers to the same
6964 * object, I suppose, but we don't support that right now), because
6965 * otherwise the different base pointers mean the offsets aren't
6966 * comparable.
48461135
JB
6967 */
6968 if (BPF_SRC(insn->code) == BPF_X) {
092ed096 6969 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
092ed096 6970
f1174f77 6971 if (dst_reg->type == SCALAR_VALUE &&
092ed096
JW
6972 src_reg->type == SCALAR_VALUE) {
6973 if (tnum_is_const(src_reg->var_off) ||
3f50f132
JF
6974 (is_jmp32 &&
6975 tnum_is_const(tnum_subreg(src_reg->var_off))))
f4d7e40a 6976 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096 6977 dst_reg,
3f50f132
JF
6978 src_reg->var_off.value,
6979 tnum_subreg(src_reg->var_off).value,
092ed096
JW
6980 opcode, is_jmp32);
6981 else if (tnum_is_const(dst_reg->var_off) ||
3f50f132
JF
6982 (is_jmp32 &&
6983 tnum_is_const(tnum_subreg(dst_reg->var_off))))
f4d7e40a 6984 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
092ed096 6985 src_reg,
3f50f132
JF
6986 dst_reg->var_off.value,
6987 tnum_subreg(dst_reg->var_off).value,
092ed096
JW
6988 opcode, is_jmp32);
6989 else if (!is_jmp32 &&
6990 (opcode == BPF_JEQ || opcode == BPF_JNE))
f1174f77 6991 /* Comparing for equality, we can combine knowledge */
f4d7e40a
AS
6992 reg_combine_min_max(&other_branch_regs[insn->src_reg],
6993 &other_branch_regs[insn->dst_reg],
092ed096 6994 src_reg, dst_reg, opcode);
f1174f77
EC
6995 }
6996 } else if (dst_reg->type == SCALAR_VALUE) {
f4d7e40a 6997 reg_set_min_max(&other_branch_regs[insn->dst_reg],
3f50f132
JF
6998 dst_reg, insn->imm, (u32)insn->imm,
6999 opcode, is_jmp32);
48461135
JB
7000 }
7001
092ed096
JW
7002 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
7003 * NOTE: these optimizations below are related with pointer comparison
7004 * which will never be JMP32.
7005 */
7006 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
1a0dc1ac 7007 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
840b9615
JS
7008 reg_type_may_be_null(dst_reg->type)) {
7009 /* Mark all identical registers in each branch as either
57a09bf0
TG
7010 * safe or unknown depending R == 0 or R != 0 conditional.
7011 */
840b9615
JS
7012 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
7013 opcode == BPF_JNE);
7014 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
7015 opcode == BPF_JEQ);
5beca081
DB
7016 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
7017 this_branch, other_branch) &&
7018 is_pointer_value(env, insn->dst_reg)) {
61bd5218
JK
7019 verbose(env, "R%d pointer comparison prohibited\n",
7020 insn->dst_reg);
1be7f75d 7021 return -EACCES;
17a52670 7022 }
06ee7115 7023 if (env->log.level & BPF_LOG_LEVEL)
f4d7e40a 7024 print_verifier_state(env, this_branch->frame[this_branch->curframe]);
17a52670
AS
7025 return 0;
7026}
7027
17a52670 7028/* verify BPF_LD_IMM64 instruction */
58e2af8b 7029static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 7030{
d8eca5bb 7031 struct bpf_insn_aux_data *aux = cur_aux(env);
638f5b90 7032 struct bpf_reg_state *regs = cur_regs(env);
d8eca5bb 7033 struct bpf_map *map;
17a52670
AS
7034 int err;
7035
7036 if (BPF_SIZE(insn->code) != BPF_DW) {
61bd5218 7037 verbose(env, "invalid BPF_LD_IMM insn\n");
17a52670
AS
7038 return -EINVAL;
7039 }
7040 if (insn->off != 0) {
61bd5218 7041 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
17a52670
AS
7042 return -EINVAL;
7043 }
7044
dc503a8a 7045 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
7046 if (err)
7047 return err;
7048
6b173873 7049 if (insn->src_reg == 0) {
6b173873
JK
7050 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
7051
f1174f77 7052 regs[insn->dst_reg].type = SCALAR_VALUE;
b03c9f9f 7053 __mark_reg_known(&regs[insn->dst_reg], imm);
17a52670 7054 return 0;
6b173873 7055 }
17a52670 7056
d8eca5bb
DB
7057 map = env->used_maps[aux->map_index];
7058 mark_reg_known_zero(env, regs, insn->dst_reg);
7059 regs[insn->dst_reg].map_ptr = map;
7060
7061 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
7062 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
7063 regs[insn->dst_reg].off = aux->map_off;
7064 if (map_value_has_spin_lock(map))
7065 regs[insn->dst_reg].id = ++env->id_gen;
7066 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
7067 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
7068 } else {
7069 verbose(env, "bpf verifier is misconfigured\n");
7070 return -EINVAL;
7071 }
17a52670 7072
17a52670
AS
7073 return 0;
7074}
7075
96be4325
DB
7076static bool may_access_skb(enum bpf_prog_type type)
7077{
7078 switch (type) {
7079 case BPF_PROG_TYPE_SOCKET_FILTER:
7080 case BPF_PROG_TYPE_SCHED_CLS:
94caee8c 7081 case BPF_PROG_TYPE_SCHED_ACT:
96be4325
DB
7082 return true;
7083 default:
7084 return false;
7085 }
7086}
7087
ddd872bc
AS
7088/* verify safety of LD_ABS|LD_IND instructions:
7089 * - they can only appear in the programs where ctx == skb
7090 * - since they are wrappers of function calls, they scratch R1-R5 registers,
7091 * preserve R6-R9, and store return value into R0
7092 *
7093 * Implicit input:
7094 * ctx == skb == R6 == CTX
7095 *
7096 * Explicit input:
7097 * SRC == any register
7098 * IMM == 32-bit immediate
7099 *
7100 * Output:
7101 * R0 - 8/16/32-bit skb data converted to cpu endianness
7102 */
58e2af8b 7103static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
ddd872bc 7104{
638f5b90 7105 struct bpf_reg_state *regs = cur_regs(env);
6d4f151a 7106 static const int ctx_reg = BPF_REG_6;
ddd872bc 7107 u8 mode = BPF_MODE(insn->code);
ddd872bc
AS
7108 int i, err;
7109
24701ece 7110 if (!may_access_skb(env->prog->type)) {
61bd5218 7111 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
ddd872bc
AS
7112 return -EINVAL;
7113 }
7114
e0cea7ce
DB
7115 if (!env->ops->gen_ld_abs) {
7116 verbose(env, "bpf verifier is misconfigured\n");
7117 return -EINVAL;
7118 }
7119
f910cefa 7120 if (env->subprog_cnt > 1) {
f4d7e40a
AS
7121 /* when program has LD_ABS insn JITs and interpreter assume
7122 * that r1 == ctx == skb which is not the case for callees
7123 * that can have arbitrary arguments. It's problematic
7124 * for main prog as well since JITs would need to analyze
7125 * all functions in order to make proper register save/restore
7126 * decisions in the main prog. Hence disallow LD_ABS with calls
7127 */
7128 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
7129 return -EINVAL;
7130 }
7131
ddd872bc 7132 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
d82bccc6 7133 BPF_SIZE(insn->code) == BPF_DW ||
ddd872bc 7134 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
61bd5218 7135 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
ddd872bc
AS
7136 return -EINVAL;
7137 }
7138
7139 /* check whether implicit source operand (register R6) is readable */
6d4f151a 7140 err = check_reg_arg(env, ctx_reg, SRC_OP);
ddd872bc
AS
7141 if (err)
7142 return err;
7143
fd978bf7
JS
7144 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
7145 * gen_ld_abs() may terminate the program at runtime, leading to
7146 * reference leak.
7147 */
7148 err = check_reference_leak(env);
7149 if (err) {
7150 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
7151 return err;
7152 }
7153
d83525ca
AS
7154 if (env->cur_state->active_spin_lock) {
7155 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
7156 return -EINVAL;
7157 }
7158
6d4f151a 7159 if (regs[ctx_reg].type != PTR_TO_CTX) {
61bd5218
JK
7160 verbose(env,
7161 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
ddd872bc
AS
7162 return -EINVAL;
7163 }
7164
7165 if (mode == BPF_IND) {
7166 /* check explicit source operand */
dc503a8a 7167 err = check_reg_arg(env, insn->src_reg, SRC_OP);
ddd872bc
AS
7168 if (err)
7169 return err;
7170 }
7171
6d4f151a
DB
7172 err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
7173 if (err < 0)
7174 return err;
7175
ddd872bc 7176 /* reset caller saved regs to unreadable */
dc503a8a 7177 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 7178 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
7179 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
7180 }
ddd872bc
AS
7181
7182 /* mark destination R0 register as readable, since it contains
dc503a8a
EC
7183 * the value fetched from the packet.
7184 * Already marked as written above.
ddd872bc 7185 */
61bd5218 7186 mark_reg_unknown(env, regs, BPF_REG_0);
5327ed3d
JW
7187 /* ld_abs load up to 32-bit skb data. */
7188 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
ddd872bc
AS
7189 return 0;
7190}
7191
390ee7e2
AS
7192static int check_return_code(struct bpf_verifier_env *env)
7193{
5cf1e914 7194 struct tnum enforce_attach_type_range = tnum_unknown;
27ae7997 7195 const struct bpf_prog *prog = env->prog;
390ee7e2
AS
7196 struct bpf_reg_state *reg;
7197 struct tnum range = tnum_range(0, 1);
27ae7997
MKL
7198 int err;
7199
9e4e01df
KS
7200 /* LSM and struct_ops func-ptr's return type could be "void" */
7201 if ((env->prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
7202 env->prog->type == BPF_PROG_TYPE_LSM) &&
27ae7997
MKL
7203 !prog->aux->attach_func_proto->type)
7204 return 0;
7205
7206 /* eBPF calling convetion is such that R0 is used
7207 * to return the value from eBPF program.
7208 * Make sure that it's readable at this time
7209 * of bpf_exit, which means that program wrote
7210 * something into it earlier
7211 */
7212 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
7213 if (err)
7214 return err;
7215
7216 if (is_pointer_value(env, BPF_REG_0)) {
7217 verbose(env, "R0 leaks addr as return value\n");
7218 return -EACCES;
7219 }
390ee7e2
AS
7220
7221 switch (env->prog->type) {
983695fa
DB
7222 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
7223 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
1b66d253
DB
7224 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
7225 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
7226 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
7227 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
7228 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
983695fa 7229 range = tnum_range(1, 1);
ed4ed404 7230 break;
390ee7e2 7231 case BPF_PROG_TYPE_CGROUP_SKB:
5cf1e914 7232 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
7233 range = tnum_range(0, 3);
7234 enforce_attach_type_range = tnum_range(2, 3);
7235 }
ed4ed404 7236 break;
390ee7e2
AS
7237 case BPF_PROG_TYPE_CGROUP_SOCK:
7238 case BPF_PROG_TYPE_SOCK_OPS:
ebc614f6 7239 case BPF_PROG_TYPE_CGROUP_DEVICE:
7b146ceb 7240 case BPF_PROG_TYPE_CGROUP_SYSCTL:
0d01da6a 7241 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
390ee7e2 7242 break;
15ab09bd
AS
7243 case BPF_PROG_TYPE_RAW_TRACEPOINT:
7244 if (!env->prog->aux->attach_btf_id)
7245 return 0;
7246 range = tnum_const(0);
7247 break;
15d83c4d 7248 case BPF_PROG_TYPE_TRACING:
e92888c7
YS
7249 switch (env->prog->expected_attach_type) {
7250 case BPF_TRACE_FENTRY:
7251 case BPF_TRACE_FEXIT:
7252 range = tnum_const(0);
7253 break;
7254 case BPF_TRACE_RAW_TP:
7255 case BPF_MODIFY_RETURN:
15d83c4d 7256 return 0;
2ec0616e
DB
7257 case BPF_TRACE_ITER:
7258 break;
e92888c7
YS
7259 default:
7260 return -ENOTSUPP;
7261 }
15d83c4d 7262 break;
e92888c7
YS
7263 case BPF_PROG_TYPE_EXT:
7264 /* freplace program can return anything as its return value
7265 * depends on the to-be-replaced kernel func or bpf program.
7266 */
390ee7e2
AS
7267 default:
7268 return 0;
7269 }
7270
638f5b90 7271 reg = cur_regs(env) + BPF_REG_0;
390ee7e2 7272 if (reg->type != SCALAR_VALUE) {
61bd5218 7273 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
390ee7e2
AS
7274 reg_type_str[reg->type]);
7275 return -EINVAL;
7276 }
7277
7278 if (!tnum_in(range, reg->var_off)) {
5cf1e914 7279 char tn_buf[48];
7280
61bd5218 7281 verbose(env, "At program exit the register R0 ");
390ee7e2 7282 if (!tnum_is_unknown(reg->var_off)) {
390ee7e2 7283 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 7284 verbose(env, "has value %s", tn_buf);
390ee7e2 7285 } else {
61bd5218 7286 verbose(env, "has unknown scalar value");
390ee7e2 7287 }
5cf1e914 7288 tnum_strn(tn_buf, sizeof(tn_buf), range);
983695fa 7289 verbose(env, " should have been in %s\n", tn_buf);
390ee7e2
AS
7290 return -EINVAL;
7291 }
5cf1e914 7292
7293 if (!tnum_is_unknown(enforce_attach_type_range) &&
7294 tnum_in(enforce_attach_type_range, reg->var_off))
7295 env->prog->enforce_expected_attach_type = 1;
390ee7e2
AS
7296 return 0;
7297}
7298
475fb78f
AS
7299/* non-recursive DFS pseudo code
7300 * 1 procedure DFS-iterative(G,v):
7301 * 2 label v as discovered
7302 * 3 let S be a stack
7303 * 4 S.push(v)
7304 * 5 while S is not empty
7305 * 6 t <- S.pop()
7306 * 7 if t is what we're looking for:
7307 * 8 return t
7308 * 9 for all edges e in G.adjacentEdges(t) do
7309 * 10 if edge e is already labelled
7310 * 11 continue with the next edge
7311 * 12 w <- G.adjacentVertex(t,e)
7312 * 13 if vertex w is not discovered and not explored
7313 * 14 label e as tree-edge
7314 * 15 label w as discovered
7315 * 16 S.push(w)
7316 * 17 continue at 5
7317 * 18 else if vertex w is discovered
7318 * 19 label e as back-edge
7319 * 20 else
7320 * 21 // vertex w is explored
7321 * 22 label e as forward- or cross-edge
7322 * 23 label t as explored
7323 * 24 S.pop()
7324 *
7325 * convention:
7326 * 0x10 - discovered
7327 * 0x11 - discovered and fall-through edge labelled
7328 * 0x12 - discovered and fall-through and branch edges labelled
7329 * 0x20 - explored
7330 */
7331
7332enum {
7333 DISCOVERED = 0x10,
7334 EXPLORED = 0x20,
7335 FALLTHROUGH = 1,
7336 BRANCH = 2,
7337};
7338
dc2a4ebc
AS
7339static u32 state_htab_size(struct bpf_verifier_env *env)
7340{
7341 return env->prog->len;
7342}
7343
5d839021
AS
7344static struct bpf_verifier_state_list **explored_state(
7345 struct bpf_verifier_env *env,
7346 int idx)
7347{
dc2a4ebc
AS
7348 struct bpf_verifier_state *cur = env->cur_state;
7349 struct bpf_func_state *state = cur->frame[cur->curframe];
7350
7351 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
5d839021
AS
7352}
7353
7354static void init_explored_state(struct bpf_verifier_env *env, int idx)
7355{
a8f500af 7356 env->insn_aux_data[idx].prune_point = true;
5d839021 7357}
f1bca824 7358
475fb78f
AS
7359/* t, w, e - match pseudo-code above:
7360 * t - index of current instruction
7361 * w - next instruction
7362 * e - edge
7363 */
2589726d
AS
7364static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
7365 bool loop_ok)
475fb78f 7366{
7df737e9
AS
7367 int *insn_stack = env->cfg.insn_stack;
7368 int *insn_state = env->cfg.insn_state;
7369
475fb78f
AS
7370 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
7371 return 0;
7372
7373 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
7374 return 0;
7375
7376 if (w < 0 || w >= env->prog->len) {
d9762e84 7377 verbose_linfo(env, t, "%d: ", t);
61bd5218 7378 verbose(env, "jump out of range from insn %d to %d\n", t, w);
475fb78f
AS
7379 return -EINVAL;
7380 }
7381
f1bca824
AS
7382 if (e == BRANCH)
7383 /* mark branch target for state pruning */
5d839021 7384 init_explored_state(env, w);
f1bca824 7385
475fb78f
AS
7386 if (insn_state[w] == 0) {
7387 /* tree-edge */
7388 insn_state[t] = DISCOVERED | e;
7389 insn_state[w] = DISCOVERED;
7df737e9 7390 if (env->cfg.cur_stack >= env->prog->len)
475fb78f 7391 return -E2BIG;
7df737e9 7392 insn_stack[env->cfg.cur_stack++] = w;
475fb78f
AS
7393 return 1;
7394 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
2c78ee89 7395 if (loop_ok && env->bpf_capable)
2589726d 7396 return 0;
d9762e84
MKL
7397 verbose_linfo(env, t, "%d: ", t);
7398 verbose_linfo(env, w, "%d: ", w);
61bd5218 7399 verbose(env, "back-edge from insn %d to %d\n", t, w);
475fb78f
AS
7400 return -EINVAL;
7401 } else if (insn_state[w] == EXPLORED) {
7402 /* forward- or cross-edge */
7403 insn_state[t] = DISCOVERED | e;
7404 } else {
61bd5218 7405 verbose(env, "insn state internal bug\n");
475fb78f
AS
7406 return -EFAULT;
7407 }
7408 return 0;
7409}
7410
7411/* non-recursive depth-first-search to detect loops in BPF program
7412 * loop == back-edge in directed graph
7413 */
58e2af8b 7414static int check_cfg(struct bpf_verifier_env *env)
475fb78f
AS
7415{
7416 struct bpf_insn *insns = env->prog->insnsi;
7417 int insn_cnt = env->prog->len;
7df737e9 7418 int *insn_stack, *insn_state;
475fb78f
AS
7419 int ret = 0;
7420 int i, t;
7421
7df737e9 7422 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f
AS
7423 if (!insn_state)
7424 return -ENOMEM;
7425
7df737e9 7426 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f 7427 if (!insn_stack) {
71dde681 7428 kvfree(insn_state);
475fb78f
AS
7429 return -ENOMEM;
7430 }
7431
7432 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
7433 insn_stack[0] = 0; /* 0 is the first instruction */
7df737e9 7434 env->cfg.cur_stack = 1;
475fb78f
AS
7435
7436peek_stack:
7df737e9 7437 if (env->cfg.cur_stack == 0)
475fb78f 7438 goto check_state;
7df737e9 7439 t = insn_stack[env->cfg.cur_stack - 1];
475fb78f 7440
092ed096
JW
7441 if (BPF_CLASS(insns[t].code) == BPF_JMP ||
7442 BPF_CLASS(insns[t].code) == BPF_JMP32) {
475fb78f
AS
7443 u8 opcode = BPF_OP(insns[t].code);
7444
7445 if (opcode == BPF_EXIT) {
7446 goto mark_explored;
7447 } else if (opcode == BPF_CALL) {
2589726d 7448 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
7449 if (ret == 1)
7450 goto peek_stack;
7451 else if (ret < 0)
7452 goto err_free;
07016151 7453 if (t + 1 < insn_cnt)
5d839021 7454 init_explored_state(env, t + 1);
cc8b0b92 7455 if (insns[t].src_reg == BPF_PSEUDO_CALL) {
5d839021 7456 init_explored_state(env, t);
2589726d
AS
7457 ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
7458 env, false);
cc8b0b92
AS
7459 if (ret == 1)
7460 goto peek_stack;
7461 else if (ret < 0)
7462 goto err_free;
7463 }
475fb78f
AS
7464 } else if (opcode == BPF_JA) {
7465 if (BPF_SRC(insns[t].code) != BPF_K) {
7466 ret = -EINVAL;
7467 goto err_free;
7468 }
7469 /* unconditional jump with single edge */
7470 ret = push_insn(t, t + insns[t].off + 1,
2589726d 7471 FALLTHROUGH, env, true);
475fb78f
AS
7472 if (ret == 1)
7473 goto peek_stack;
7474 else if (ret < 0)
7475 goto err_free;
b5dc0163
AS
7476 /* unconditional jmp is not a good pruning point,
7477 * but it's marked, since backtracking needs
7478 * to record jmp history in is_state_visited().
7479 */
7480 init_explored_state(env, t + insns[t].off + 1);
f1bca824
AS
7481 /* tell verifier to check for equivalent states
7482 * after every call and jump
7483 */
c3de6317 7484 if (t + 1 < insn_cnt)
5d839021 7485 init_explored_state(env, t + 1);
475fb78f
AS
7486 } else {
7487 /* conditional jump with two edges */
5d839021 7488 init_explored_state(env, t);
2589726d 7489 ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
475fb78f
AS
7490 if (ret == 1)
7491 goto peek_stack;
7492 else if (ret < 0)
7493 goto err_free;
7494
2589726d 7495 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
475fb78f
AS
7496 if (ret == 1)
7497 goto peek_stack;
7498 else if (ret < 0)
7499 goto err_free;
7500 }
7501 } else {
7502 /* all other non-branch instructions with single
7503 * fall-through edge
7504 */
2589726d 7505 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
7506 if (ret == 1)
7507 goto peek_stack;
7508 else if (ret < 0)
7509 goto err_free;
7510 }
7511
7512mark_explored:
7513 insn_state[t] = EXPLORED;
7df737e9 7514 if (env->cfg.cur_stack-- <= 0) {
61bd5218 7515 verbose(env, "pop stack internal bug\n");
475fb78f
AS
7516 ret = -EFAULT;
7517 goto err_free;
7518 }
7519 goto peek_stack;
7520
7521check_state:
7522 for (i = 0; i < insn_cnt; i++) {
7523 if (insn_state[i] != EXPLORED) {
61bd5218 7524 verbose(env, "unreachable insn %d\n", i);
475fb78f
AS
7525 ret = -EINVAL;
7526 goto err_free;
7527 }
7528 }
7529 ret = 0; /* cfg looks good */
7530
7531err_free:
71dde681
AS
7532 kvfree(insn_state);
7533 kvfree(insn_stack);
7df737e9 7534 env->cfg.insn_state = env->cfg.insn_stack = NULL;
475fb78f
AS
7535 return ret;
7536}
7537
838e9690
YS
7538/* The minimum supported BTF func info size */
7539#define MIN_BPF_FUNCINFO_SIZE 8
7540#define MAX_FUNCINFO_REC_SIZE 252
7541
c454a46b
MKL
7542static int check_btf_func(struct bpf_verifier_env *env,
7543 const union bpf_attr *attr,
7544 union bpf_attr __user *uattr)
838e9690 7545{
d0b2818e 7546 u32 i, nfuncs, urec_size, min_size;
838e9690 7547 u32 krec_size = sizeof(struct bpf_func_info);
c454a46b 7548 struct bpf_func_info *krecord;
8c1b6e69 7549 struct bpf_func_info_aux *info_aux = NULL;
838e9690 7550 const struct btf_type *type;
c454a46b
MKL
7551 struct bpf_prog *prog;
7552 const struct btf *btf;
838e9690 7553 void __user *urecord;
d0b2818e 7554 u32 prev_offset = 0;
e7ed83d6 7555 int ret = -ENOMEM;
838e9690
YS
7556
7557 nfuncs = attr->func_info_cnt;
7558 if (!nfuncs)
7559 return 0;
7560
7561 if (nfuncs != env->subprog_cnt) {
7562 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
7563 return -EINVAL;
7564 }
7565
7566 urec_size = attr->func_info_rec_size;
7567 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
7568 urec_size > MAX_FUNCINFO_REC_SIZE ||
7569 urec_size % sizeof(u32)) {
7570 verbose(env, "invalid func info rec size %u\n", urec_size);
7571 return -EINVAL;
7572 }
7573
c454a46b
MKL
7574 prog = env->prog;
7575 btf = prog->aux->btf;
838e9690
YS
7576
7577 urecord = u64_to_user_ptr(attr->func_info);
7578 min_size = min_t(u32, krec_size, urec_size);
7579
ba64e7d8 7580 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
c454a46b
MKL
7581 if (!krecord)
7582 return -ENOMEM;
8c1b6e69
AS
7583 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
7584 if (!info_aux)
7585 goto err_free;
ba64e7d8 7586
838e9690
YS
7587 for (i = 0; i < nfuncs; i++) {
7588 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
7589 if (ret) {
7590 if (ret == -E2BIG) {
7591 verbose(env, "nonzero tailing record in func info");
7592 /* set the size kernel expects so loader can zero
7593 * out the rest of the record.
7594 */
7595 if (put_user(min_size, &uattr->func_info_rec_size))
7596 ret = -EFAULT;
7597 }
c454a46b 7598 goto err_free;
838e9690
YS
7599 }
7600
ba64e7d8 7601 if (copy_from_user(&krecord[i], urecord, min_size)) {
838e9690 7602 ret = -EFAULT;
c454a46b 7603 goto err_free;
838e9690
YS
7604 }
7605
d30d42e0 7606 /* check insn_off */
838e9690 7607 if (i == 0) {
d30d42e0 7608 if (krecord[i].insn_off) {
838e9690 7609 verbose(env,
d30d42e0
MKL
7610 "nonzero insn_off %u for the first func info record",
7611 krecord[i].insn_off);
838e9690 7612 ret = -EINVAL;
c454a46b 7613 goto err_free;
838e9690 7614 }
d30d42e0 7615 } else if (krecord[i].insn_off <= prev_offset) {
838e9690
YS
7616 verbose(env,
7617 "same or smaller insn offset (%u) than previous func info record (%u)",
d30d42e0 7618 krecord[i].insn_off, prev_offset);
838e9690 7619 ret = -EINVAL;
c454a46b 7620 goto err_free;
838e9690
YS
7621 }
7622
d30d42e0 7623 if (env->subprog_info[i].start != krecord[i].insn_off) {
838e9690
YS
7624 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
7625 ret = -EINVAL;
c454a46b 7626 goto err_free;
838e9690
YS
7627 }
7628
7629 /* check type_id */
ba64e7d8 7630 type = btf_type_by_id(btf, krecord[i].type_id);
51c39bb1 7631 if (!type || !btf_type_is_func(type)) {
838e9690 7632 verbose(env, "invalid type id %d in func info",
ba64e7d8 7633 krecord[i].type_id);
838e9690 7634 ret = -EINVAL;
c454a46b 7635 goto err_free;
838e9690 7636 }
51c39bb1 7637 info_aux[i].linkage = BTF_INFO_VLEN(type->info);
d30d42e0 7638 prev_offset = krecord[i].insn_off;
838e9690
YS
7639 urecord += urec_size;
7640 }
7641
ba64e7d8
YS
7642 prog->aux->func_info = krecord;
7643 prog->aux->func_info_cnt = nfuncs;
8c1b6e69 7644 prog->aux->func_info_aux = info_aux;
838e9690
YS
7645 return 0;
7646
c454a46b 7647err_free:
ba64e7d8 7648 kvfree(krecord);
8c1b6e69 7649 kfree(info_aux);
838e9690
YS
7650 return ret;
7651}
7652
ba64e7d8
YS
7653static void adjust_btf_func(struct bpf_verifier_env *env)
7654{
8c1b6e69 7655 struct bpf_prog_aux *aux = env->prog->aux;
ba64e7d8
YS
7656 int i;
7657
8c1b6e69 7658 if (!aux->func_info)
ba64e7d8
YS
7659 return;
7660
7661 for (i = 0; i < env->subprog_cnt; i++)
8c1b6e69 7662 aux->func_info[i].insn_off = env->subprog_info[i].start;
ba64e7d8
YS
7663}
7664
c454a46b
MKL
7665#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
7666 sizeof(((struct bpf_line_info *)(0))->line_col))
7667#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
7668
7669static int check_btf_line(struct bpf_verifier_env *env,
7670 const union bpf_attr *attr,
7671 union bpf_attr __user *uattr)
7672{
7673 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
7674 struct bpf_subprog_info *sub;
7675 struct bpf_line_info *linfo;
7676 struct bpf_prog *prog;
7677 const struct btf *btf;
7678 void __user *ulinfo;
7679 int err;
7680
7681 nr_linfo = attr->line_info_cnt;
7682 if (!nr_linfo)
7683 return 0;
7684
7685 rec_size = attr->line_info_rec_size;
7686 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
7687 rec_size > MAX_LINEINFO_REC_SIZE ||
7688 rec_size & (sizeof(u32) - 1))
7689 return -EINVAL;
7690
7691 /* Need to zero it in case the userspace may
7692 * pass in a smaller bpf_line_info object.
7693 */
7694 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
7695 GFP_KERNEL | __GFP_NOWARN);
7696 if (!linfo)
7697 return -ENOMEM;
7698
7699 prog = env->prog;
7700 btf = prog->aux->btf;
7701
7702 s = 0;
7703 sub = env->subprog_info;
7704 ulinfo = u64_to_user_ptr(attr->line_info);
7705 expected_size = sizeof(struct bpf_line_info);
7706 ncopy = min_t(u32, expected_size, rec_size);
7707 for (i = 0; i < nr_linfo; i++) {
7708 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
7709 if (err) {
7710 if (err == -E2BIG) {
7711 verbose(env, "nonzero tailing record in line_info");
7712 if (put_user(expected_size,
7713 &uattr->line_info_rec_size))
7714 err = -EFAULT;
7715 }
7716 goto err_free;
7717 }
7718
7719 if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
7720 err = -EFAULT;
7721 goto err_free;
7722 }
7723
7724 /*
7725 * Check insn_off to ensure
7726 * 1) strictly increasing AND
7727 * 2) bounded by prog->len
7728 *
7729 * The linfo[0].insn_off == 0 check logically falls into
7730 * the later "missing bpf_line_info for func..." case
7731 * because the first linfo[0].insn_off must be the
7732 * first sub also and the first sub must have
7733 * subprog_info[0].start == 0.
7734 */
7735 if ((i && linfo[i].insn_off <= prev_offset) ||
7736 linfo[i].insn_off >= prog->len) {
7737 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
7738 i, linfo[i].insn_off, prev_offset,
7739 prog->len);
7740 err = -EINVAL;
7741 goto err_free;
7742 }
7743
fdbaa0be
MKL
7744 if (!prog->insnsi[linfo[i].insn_off].code) {
7745 verbose(env,
7746 "Invalid insn code at line_info[%u].insn_off\n",
7747 i);
7748 err = -EINVAL;
7749 goto err_free;
7750 }
7751
23127b33
MKL
7752 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
7753 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
c454a46b
MKL
7754 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
7755 err = -EINVAL;
7756 goto err_free;
7757 }
7758
7759 if (s != env->subprog_cnt) {
7760 if (linfo[i].insn_off == sub[s].start) {
7761 sub[s].linfo_idx = i;
7762 s++;
7763 } else if (sub[s].start < linfo[i].insn_off) {
7764 verbose(env, "missing bpf_line_info for func#%u\n", s);
7765 err = -EINVAL;
7766 goto err_free;
7767 }
7768 }
7769
7770 prev_offset = linfo[i].insn_off;
7771 ulinfo += rec_size;
7772 }
7773
7774 if (s != env->subprog_cnt) {
7775 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
7776 env->subprog_cnt - s, s);
7777 err = -EINVAL;
7778 goto err_free;
7779 }
7780
7781 prog->aux->linfo = linfo;
7782 prog->aux->nr_linfo = nr_linfo;
7783
7784 return 0;
7785
7786err_free:
7787 kvfree(linfo);
7788 return err;
7789}
7790
7791static int check_btf_info(struct bpf_verifier_env *env,
7792 const union bpf_attr *attr,
7793 union bpf_attr __user *uattr)
7794{
7795 struct btf *btf;
7796 int err;
7797
7798 if (!attr->func_info_cnt && !attr->line_info_cnt)
7799 return 0;
7800
7801 btf = btf_get_by_fd(attr->prog_btf_fd);
7802 if (IS_ERR(btf))
7803 return PTR_ERR(btf);
7804 env->prog->aux->btf = btf;
7805
7806 err = check_btf_func(env, attr, uattr);
7807 if (err)
7808 return err;
7809
7810 err = check_btf_line(env, attr, uattr);
7811 if (err)
7812 return err;
7813
7814 return 0;
ba64e7d8
YS
7815}
7816
f1174f77
EC
7817/* check %cur's range satisfies %old's */
7818static bool range_within(struct bpf_reg_state *old,
7819 struct bpf_reg_state *cur)
7820{
b03c9f9f
EC
7821 return old->umin_value <= cur->umin_value &&
7822 old->umax_value >= cur->umax_value &&
7823 old->smin_value <= cur->smin_value &&
7824 old->smax_value >= cur->smax_value;
f1174f77
EC
7825}
7826
7827/* Maximum number of register states that can exist at once */
7828#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
7829struct idpair {
7830 u32 old;
7831 u32 cur;
7832};
7833
7834/* If in the old state two registers had the same id, then they need to have
7835 * the same id in the new state as well. But that id could be different from
7836 * the old state, so we need to track the mapping from old to new ids.
7837 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
7838 * regs with old id 5 must also have new id 9 for the new state to be safe. But
7839 * regs with a different old id could still have new id 9, we don't care about
7840 * that.
7841 * So we look through our idmap to see if this old id has been seen before. If
7842 * so, we require the new id to match; otherwise, we add the id pair to the map.
969bf05e 7843 */
f1174f77 7844static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
969bf05e 7845{
f1174f77 7846 unsigned int i;
969bf05e 7847
f1174f77
EC
7848 for (i = 0; i < ID_MAP_SIZE; i++) {
7849 if (!idmap[i].old) {
7850 /* Reached an empty slot; haven't seen this id before */
7851 idmap[i].old = old_id;
7852 idmap[i].cur = cur_id;
7853 return true;
7854 }
7855 if (idmap[i].old == old_id)
7856 return idmap[i].cur == cur_id;
7857 }
7858 /* We ran out of idmap slots, which should be impossible */
7859 WARN_ON_ONCE(1);
7860 return false;
7861}
7862
9242b5f5
AS
7863static void clean_func_state(struct bpf_verifier_env *env,
7864 struct bpf_func_state *st)
7865{
7866 enum bpf_reg_liveness live;
7867 int i, j;
7868
7869 for (i = 0; i < BPF_REG_FP; i++) {
7870 live = st->regs[i].live;
7871 /* liveness must not touch this register anymore */
7872 st->regs[i].live |= REG_LIVE_DONE;
7873 if (!(live & REG_LIVE_READ))
7874 /* since the register is unused, clear its state
7875 * to make further comparison simpler
7876 */
f54c7898 7877 __mark_reg_not_init(env, &st->regs[i]);
9242b5f5
AS
7878 }
7879
7880 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
7881 live = st->stack[i].spilled_ptr.live;
7882 /* liveness must not touch this stack slot anymore */
7883 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
7884 if (!(live & REG_LIVE_READ)) {
f54c7898 7885 __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
9242b5f5
AS
7886 for (j = 0; j < BPF_REG_SIZE; j++)
7887 st->stack[i].slot_type[j] = STACK_INVALID;
7888 }
7889 }
7890}
7891
7892static void clean_verifier_state(struct bpf_verifier_env *env,
7893 struct bpf_verifier_state *st)
7894{
7895 int i;
7896
7897 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
7898 /* all regs in this state in all frames were already marked */
7899 return;
7900
7901 for (i = 0; i <= st->curframe; i++)
7902 clean_func_state(env, st->frame[i]);
7903}
7904
7905/* the parentage chains form a tree.
7906 * the verifier states are added to state lists at given insn and
7907 * pushed into state stack for future exploration.
7908 * when the verifier reaches bpf_exit insn some of the verifer states
7909 * stored in the state lists have their final liveness state already,
7910 * but a lot of states will get revised from liveness point of view when
7911 * the verifier explores other branches.
7912 * Example:
7913 * 1: r0 = 1
7914 * 2: if r1 == 100 goto pc+1
7915 * 3: r0 = 2
7916 * 4: exit
7917 * when the verifier reaches exit insn the register r0 in the state list of
7918 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
7919 * of insn 2 and goes exploring further. At the insn 4 it will walk the
7920 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
7921 *
7922 * Since the verifier pushes the branch states as it sees them while exploring
7923 * the program the condition of walking the branch instruction for the second
7924 * time means that all states below this branch were already explored and
7925 * their final liveness markes are already propagated.
7926 * Hence when the verifier completes the search of state list in is_state_visited()
7927 * we can call this clean_live_states() function to mark all liveness states
7928 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
7929 * will not be used.
7930 * This function also clears the registers and stack for states that !READ
7931 * to simplify state merging.
7932 *
7933 * Important note here that walking the same branch instruction in the callee
7934 * doesn't meant that the states are DONE. The verifier has to compare
7935 * the callsites
7936 */
7937static void clean_live_states(struct bpf_verifier_env *env, int insn,
7938 struct bpf_verifier_state *cur)
7939{
7940 struct bpf_verifier_state_list *sl;
7941 int i;
7942
5d839021 7943 sl = *explored_state(env, insn);
a8f500af 7944 while (sl) {
2589726d
AS
7945 if (sl->state.branches)
7946 goto next;
dc2a4ebc
AS
7947 if (sl->state.insn_idx != insn ||
7948 sl->state.curframe != cur->curframe)
9242b5f5
AS
7949 goto next;
7950 for (i = 0; i <= cur->curframe; i++)
7951 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
7952 goto next;
7953 clean_verifier_state(env, &sl->state);
7954next:
7955 sl = sl->next;
7956 }
7957}
7958
f1174f77 7959/* Returns true if (rold safe implies rcur safe) */
1b688a19
EC
7960static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
7961 struct idpair *idmap)
f1174f77 7962{
f4d7e40a
AS
7963 bool equal;
7964
dc503a8a
EC
7965 if (!(rold->live & REG_LIVE_READ))
7966 /* explored state didn't use this */
7967 return true;
7968
679c782d 7969 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
f4d7e40a
AS
7970
7971 if (rold->type == PTR_TO_STACK)
7972 /* two stack pointers are equal only if they're pointing to
7973 * the same stack frame, since fp-8 in foo != fp-8 in bar
7974 */
7975 return equal && rold->frameno == rcur->frameno;
7976
7977 if (equal)
969bf05e
AS
7978 return true;
7979
f1174f77
EC
7980 if (rold->type == NOT_INIT)
7981 /* explored state can't have used this */
969bf05e 7982 return true;
f1174f77
EC
7983 if (rcur->type == NOT_INIT)
7984 return false;
7985 switch (rold->type) {
7986 case SCALAR_VALUE:
7987 if (rcur->type == SCALAR_VALUE) {
b5dc0163
AS
7988 if (!rold->precise && !rcur->precise)
7989 return true;
f1174f77
EC
7990 /* new val must satisfy old val knowledge */
7991 return range_within(rold, rcur) &&
7992 tnum_in(rold->var_off, rcur->var_off);
7993 } else {
179d1c56
JH
7994 /* We're trying to use a pointer in place of a scalar.
7995 * Even if the scalar was unbounded, this could lead to
7996 * pointer leaks because scalars are allowed to leak
7997 * while pointers are not. We could make this safe in
7998 * special cases if root is calling us, but it's
7999 * probably not worth the hassle.
f1174f77 8000 */
179d1c56 8001 return false;
f1174f77
EC
8002 }
8003 case PTR_TO_MAP_VALUE:
1b688a19
EC
8004 /* If the new min/max/var_off satisfy the old ones and
8005 * everything else matches, we are OK.
d83525ca
AS
8006 * 'id' is not compared, since it's only used for maps with
8007 * bpf_spin_lock inside map element and in such cases if
8008 * the rest of the prog is valid for one map element then
8009 * it's valid for all map elements regardless of the key
8010 * used in bpf_map_lookup()
1b688a19
EC
8011 */
8012 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
8013 range_within(rold, rcur) &&
8014 tnum_in(rold->var_off, rcur->var_off);
f1174f77
EC
8015 case PTR_TO_MAP_VALUE_OR_NULL:
8016 /* a PTR_TO_MAP_VALUE could be safe to use as a
8017 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
8018 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
8019 * checked, doing so could have affected others with the same
8020 * id, and we can't check for that because we lost the id when
8021 * we converted to a PTR_TO_MAP_VALUE.
8022 */
8023 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
8024 return false;
8025 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
8026 return false;
8027 /* Check our ids match any regs they're supposed to */
8028 return check_ids(rold->id, rcur->id, idmap);
de8f3a83 8029 case PTR_TO_PACKET_META:
f1174f77 8030 case PTR_TO_PACKET:
de8f3a83 8031 if (rcur->type != rold->type)
f1174f77
EC
8032 return false;
8033 /* We must have at least as much range as the old ptr
8034 * did, so that any accesses which were safe before are
8035 * still safe. This is true even if old range < old off,
8036 * since someone could have accessed through (ptr - k), or
8037 * even done ptr -= k in a register, to get a safe access.
8038 */
8039 if (rold->range > rcur->range)
8040 return false;
8041 /* If the offsets don't match, we can't trust our alignment;
8042 * nor can we be sure that we won't fall out of range.
8043 */
8044 if (rold->off != rcur->off)
8045 return false;
8046 /* id relations must be preserved */
8047 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
8048 return false;
8049 /* new val must satisfy old val knowledge */
8050 return range_within(rold, rcur) &&
8051 tnum_in(rold->var_off, rcur->var_off);
8052 case PTR_TO_CTX:
8053 case CONST_PTR_TO_MAP:
f1174f77 8054 case PTR_TO_PACKET_END:
d58e468b 8055 case PTR_TO_FLOW_KEYS:
c64b7983
JS
8056 case PTR_TO_SOCKET:
8057 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
8058 case PTR_TO_SOCK_COMMON:
8059 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
8060 case PTR_TO_TCP_SOCK:
8061 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 8062 case PTR_TO_XDP_SOCK:
f1174f77
EC
8063 /* Only valid matches are exact, which memcmp() above
8064 * would have accepted
8065 */
8066 default:
8067 /* Don't know what's going on, just say it's not safe */
8068 return false;
8069 }
969bf05e 8070
f1174f77
EC
8071 /* Shouldn't get here; if we do, say it's not safe */
8072 WARN_ON_ONCE(1);
969bf05e
AS
8073 return false;
8074}
8075
f4d7e40a
AS
8076static bool stacksafe(struct bpf_func_state *old,
8077 struct bpf_func_state *cur,
638f5b90
AS
8078 struct idpair *idmap)
8079{
8080 int i, spi;
8081
638f5b90
AS
8082 /* walk slots of the explored stack and ignore any additional
8083 * slots in the current stack, since explored(safe) state
8084 * didn't use them
8085 */
8086 for (i = 0; i < old->allocated_stack; i++) {
8087 spi = i / BPF_REG_SIZE;
8088
b233920c
AS
8089 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
8090 i += BPF_REG_SIZE - 1;
cc2b14d5 8091 /* explored state didn't use this */
fd05e57b 8092 continue;
b233920c 8093 }
cc2b14d5 8094
638f5b90
AS
8095 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
8096 continue;
19e2dbb7
AS
8097
8098 /* explored stack has more populated slots than current stack
8099 * and these slots were used
8100 */
8101 if (i >= cur->allocated_stack)
8102 return false;
8103
cc2b14d5
AS
8104 /* if old state was safe with misc data in the stack
8105 * it will be safe with zero-initialized stack.
8106 * The opposite is not true
8107 */
8108 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
8109 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
8110 continue;
638f5b90
AS
8111 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
8112 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
8113 /* Ex: old explored (safe) state has STACK_SPILL in
8114 * this stack slot, but current has has STACK_MISC ->
8115 * this verifier states are not equivalent,
8116 * return false to continue verification of this path
8117 */
8118 return false;
8119 if (i % BPF_REG_SIZE)
8120 continue;
8121 if (old->stack[spi].slot_type[0] != STACK_SPILL)
8122 continue;
8123 if (!regsafe(&old->stack[spi].spilled_ptr,
8124 &cur->stack[spi].spilled_ptr,
8125 idmap))
8126 /* when explored and current stack slot are both storing
8127 * spilled registers, check that stored pointers types
8128 * are the same as well.
8129 * Ex: explored safe path could have stored
8130 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
8131 * but current path has stored:
8132 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
8133 * such verifier states are not equivalent.
8134 * return false to continue verification of this path
8135 */
8136 return false;
8137 }
8138 return true;
8139}
8140
fd978bf7
JS
8141static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
8142{
8143 if (old->acquired_refs != cur->acquired_refs)
8144 return false;
8145 return !memcmp(old->refs, cur->refs,
8146 sizeof(*old->refs) * old->acquired_refs);
8147}
8148
f1bca824
AS
8149/* compare two verifier states
8150 *
8151 * all states stored in state_list are known to be valid, since
8152 * verifier reached 'bpf_exit' instruction through them
8153 *
8154 * this function is called when verifier exploring different branches of
8155 * execution popped from the state stack. If it sees an old state that has
8156 * more strict register state and more strict stack state then this execution
8157 * branch doesn't need to be explored further, since verifier already
8158 * concluded that more strict state leads to valid finish.
8159 *
8160 * Therefore two states are equivalent if register state is more conservative
8161 * and explored stack state is more conservative than the current one.
8162 * Example:
8163 * explored current
8164 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
8165 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
8166 *
8167 * In other words if current stack state (one being explored) has more
8168 * valid slots than old one that already passed validation, it means
8169 * the verifier can stop exploring and conclude that current state is valid too
8170 *
8171 * Similarly with registers. If explored state has register type as invalid
8172 * whereas register type in current state is meaningful, it means that
8173 * the current state will reach 'bpf_exit' instruction safely
8174 */
f4d7e40a
AS
8175static bool func_states_equal(struct bpf_func_state *old,
8176 struct bpf_func_state *cur)
f1bca824 8177{
f1174f77
EC
8178 struct idpair *idmap;
8179 bool ret = false;
f1bca824
AS
8180 int i;
8181
f1174f77
EC
8182 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
8183 /* If we failed to allocate the idmap, just say it's not safe */
8184 if (!idmap)
1a0dc1ac 8185 return false;
f1174f77
EC
8186
8187 for (i = 0; i < MAX_BPF_REG; i++) {
1b688a19 8188 if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
f1174f77 8189 goto out_free;
f1bca824
AS
8190 }
8191
638f5b90
AS
8192 if (!stacksafe(old, cur, idmap))
8193 goto out_free;
fd978bf7
JS
8194
8195 if (!refsafe(old, cur))
8196 goto out_free;
f1174f77
EC
8197 ret = true;
8198out_free:
8199 kfree(idmap);
8200 return ret;
f1bca824
AS
8201}
8202
f4d7e40a
AS
8203static bool states_equal(struct bpf_verifier_env *env,
8204 struct bpf_verifier_state *old,
8205 struct bpf_verifier_state *cur)
8206{
8207 int i;
8208
8209 if (old->curframe != cur->curframe)
8210 return false;
8211
979d63d5
DB
8212 /* Verification state from speculative execution simulation
8213 * must never prune a non-speculative execution one.
8214 */
8215 if (old->speculative && !cur->speculative)
8216 return false;
8217
d83525ca
AS
8218 if (old->active_spin_lock != cur->active_spin_lock)
8219 return false;
8220
f4d7e40a
AS
8221 /* for states to be equal callsites have to be the same
8222 * and all frame states need to be equivalent
8223 */
8224 for (i = 0; i <= old->curframe; i++) {
8225 if (old->frame[i]->callsite != cur->frame[i]->callsite)
8226 return false;
8227 if (!func_states_equal(old->frame[i], cur->frame[i]))
8228 return false;
8229 }
8230 return true;
8231}
8232
5327ed3d
JW
8233/* Return 0 if no propagation happened. Return negative error code if error
8234 * happened. Otherwise, return the propagated bit.
8235 */
55e7f3b5
JW
8236static int propagate_liveness_reg(struct bpf_verifier_env *env,
8237 struct bpf_reg_state *reg,
8238 struct bpf_reg_state *parent_reg)
8239{
5327ed3d
JW
8240 u8 parent_flag = parent_reg->live & REG_LIVE_READ;
8241 u8 flag = reg->live & REG_LIVE_READ;
55e7f3b5
JW
8242 int err;
8243
5327ed3d
JW
8244 /* When comes here, read flags of PARENT_REG or REG could be any of
8245 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
8246 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
8247 */
8248 if (parent_flag == REG_LIVE_READ64 ||
8249 /* Or if there is no read flag from REG. */
8250 !flag ||
8251 /* Or if the read flag from REG is the same as PARENT_REG. */
8252 parent_flag == flag)
55e7f3b5
JW
8253 return 0;
8254
5327ed3d 8255 err = mark_reg_read(env, reg, parent_reg, flag);
55e7f3b5
JW
8256 if (err)
8257 return err;
8258
5327ed3d 8259 return flag;
55e7f3b5
JW
8260}
8261
8e9cd9ce 8262/* A write screens off any subsequent reads; but write marks come from the
f4d7e40a
AS
8263 * straight-line code between a state and its parent. When we arrive at an
8264 * equivalent state (jump target or such) we didn't arrive by the straight-line
8265 * code, so read marks in the state must propagate to the parent regardless
8266 * of the state's write marks. That's what 'parent == state->parent' comparison
679c782d 8267 * in mark_reg_read() is for.
8e9cd9ce 8268 */
f4d7e40a
AS
8269static int propagate_liveness(struct bpf_verifier_env *env,
8270 const struct bpf_verifier_state *vstate,
8271 struct bpf_verifier_state *vparent)
dc503a8a 8272{
3f8cafa4 8273 struct bpf_reg_state *state_reg, *parent_reg;
f4d7e40a 8274 struct bpf_func_state *state, *parent;
3f8cafa4 8275 int i, frame, err = 0;
dc503a8a 8276
f4d7e40a
AS
8277 if (vparent->curframe != vstate->curframe) {
8278 WARN(1, "propagate_live: parent frame %d current frame %d\n",
8279 vparent->curframe, vstate->curframe);
8280 return -EFAULT;
8281 }
dc503a8a
EC
8282 /* Propagate read liveness of registers... */
8283 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
83d16312 8284 for (frame = 0; frame <= vstate->curframe; frame++) {
3f8cafa4
JW
8285 parent = vparent->frame[frame];
8286 state = vstate->frame[frame];
8287 parent_reg = parent->regs;
8288 state_reg = state->regs;
83d16312
JK
8289 /* We don't need to worry about FP liveness, it's read-only */
8290 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
55e7f3b5
JW
8291 err = propagate_liveness_reg(env, &state_reg[i],
8292 &parent_reg[i]);
5327ed3d 8293 if (err < 0)
3f8cafa4 8294 return err;
5327ed3d
JW
8295 if (err == REG_LIVE_READ64)
8296 mark_insn_zext(env, &parent_reg[i]);
dc503a8a 8297 }
f4d7e40a 8298
1b04aee7 8299 /* Propagate stack slots. */
f4d7e40a
AS
8300 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
8301 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3f8cafa4
JW
8302 parent_reg = &parent->stack[i].spilled_ptr;
8303 state_reg = &state->stack[i].spilled_ptr;
55e7f3b5
JW
8304 err = propagate_liveness_reg(env, state_reg,
8305 parent_reg);
5327ed3d 8306 if (err < 0)
3f8cafa4 8307 return err;
dc503a8a
EC
8308 }
8309 }
5327ed3d 8310 return 0;
dc503a8a
EC
8311}
8312
a3ce685d
AS
8313/* find precise scalars in the previous equivalent state and
8314 * propagate them into the current state
8315 */
8316static int propagate_precision(struct bpf_verifier_env *env,
8317 const struct bpf_verifier_state *old)
8318{
8319 struct bpf_reg_state *state_reg;
8320 struct bpf_func_state *state;
8321 int i, err = 0;
8322
8323 state = old->frame[old->curframe];
8324 state_reg = state->regs;
8325 for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
8326 if (state_reg->type != SCALAR_VALUE ||
8327 !state_reg->precise)
8328 continue;
8329 if (env->log.level & BPF_LOG_LEVEL2)
8330 verbose(env, "propagating r%d\n", i);
8331 err = mark_chain_precision(env, i);
8332 if (err < 0)
8333 return err;
8334 }
8335
8336 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
8337 if (state->stack[i].slot_type[0] != STACK_SPILL)
8338 continue;
8339 state_reg = &state->stack[i].spilled_ptr;
8340 if (state_reg->type != SCALAR_VALUE ||
8341 !state_reg->precise)
8342 continue;
8343 if (env->log.level & BPF_LOG_LEVEL2)
8344 verbose(env, "propagating fp%d\n",
8345 (-i - 1) * BPF_REG_SIZE);
8346 err = mark_chain_precision_stack(env, i);
8347 if (err < 0)
8348 return err;
8349 }
8350 return 0;
8351}
8352
2589726d
AS
8353static bool states_maybe_looping(struct bpf_verifier_state *old,
8354 struct bpf_verifier_state *cur)
8355{
8356 struct bpf_func_state *fold, *fcur;
8357 int i, fr = cur->curframe;
8358
8359 if (old->curframe != fr)
8360 return false;
8361
8362 fold = old->frame[fr];
8363 fcur = cur->frame[fr];
8364 for (i = 0; i < MAX_BPF_REG; i++)
8365 if (memcmp(&fold->regs[i], &fcur->regs[i],
8366 offsetof(struct bpf_reg_state, parent)))
8367 return false;
8368 return true;
8369}
8370
8371
58e2af8b 8372static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
f1bca824 8373{
58e2af8b 8374 struct bpf_verifier_state_list *new_sl;
9f4686c4 8375 struct bpf_verifier_state_list *sl, **pprev;
679c782d 8376 struct bpf_verifier_state *cur = env->cur_state, *new;
ceefbc96 8377 int i, j, err, states_cnt = 0;
10d274e8 8378 bool add_new_state = env->test_state_freq ? true : false;
f1bca824 8379
b5dc0163 8380 cur->last_insn_idx = env->prev_insn_idx;
a8f500af 8381 if (!env->insn_aux_data[insn_idx].prune_point)
f1bca824
AS
8382 /* this 'insn_idx' instruction wasn't marked, so we will not
8383 * be doing state search here
8384 */
8385 return 0;
8386
2589726d
AS
8387 /* bpf progs typically have pruning point every 4 instructions
8388 * http://vger.kernel.org/bpfconf2019.html#session-1
8389 * Do not add new state for future pruning if the verifier hasn't seen
8390 * at least 2 jumps and at least 8 instructions.
8391 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
8392 * In tests that amounts to up to 50% reduction into total verifier
8393 * memory consumption and 20% verifier time speedup.
8394 */
8395 if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
8396 env->insn_processed - env->prev_insn_processed >= 8)
8397 add_new_state = true;
8398
a8f500af
AS
8399 pprev = explored_state(env, insn_idx);
8400 sl = *pprev;
8401
9242b5f5
AS
8402 clean_live_states(env, insn_idx, cur);
8403
a8f500af 8404 while (sl) {
dc2a4ebc
AS
8405 states_cnt++;
8406 if (sl->state.insn_idx != insn_idx)
8407 goto next;
2589726d
AS
8408 if (sl->state.branches) {
8409 if (states_maybe_looping(&sl->state, cur) &&
8410 states_equal(env, &sl->state, cur)) {
8411 verbose_linfo(env, insn_idx, "; ");
8412 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
8413 return -EINVAL;
8414 }
8415 /* if the verifier is processing a loop, avoid adding new state
8416 * too often, since different loop iterations have distinct
8417 * states and may not help future pruning.
8418 * This threshold shouldn't be too low to make sure that
8419 * a loop with large bound will be rejected quickly.
8420 * The most abusive loop will be:
8421 * r1 += 1
8422 * if r1 < 1000000 goto pc-2
8423 * 1M insn_procssed limit / 100 == 10k peak states.
8424 * This threshold shouldn't be too high either, since states
8425 * at the end of the loop are likely to be useful in pruning.
8426 */
8427 if (env->jmps_processed - env->prev_jmps_processed < 20 &&
8428 env->insn_processed - env->prev_insn_processed < 100)
8429 add_new_state = false;
8430 goto miss;
8431 }
638f5b90 8432 if (states_equal(env, &sl->state, cur)) {
9f4686c4 8433 sl->hit_cnt++;
f1bca824 8434 /* reached equivalent register/stack state,
dc503a8a
EC
8435 * prune the search.
8436 * Registers read by the continuation are read by us.
8e9cd9ce
EC
8437 * If we have any write marks in env->cur_state, they
8438 * will prevent corresponding reads in the continuation
8439 * from reaching our parent (an explored_state). Our
8440 * own state will get the read marks recorded, but
8441 * they'll be immediately forgotten as we're pruning
8442 * this state and will pop a new one.
f1bca824 8443 */
f4d7e40a 8444 err = propagate_liveness(env, &sl->state, cur);
a3ce685d
AS
8445
8446 /* if previous state reached the exit with precision and
8447 * current state is equivalent to it (except precsion marks)
8448 * the precision needs to be propagated back in
8449 * the current state.
8450 */
8451 err = err ? : push_jmp_history(env, cur);
8452 err = err ? : propagate_precision(env, &sl->state);
f4d7e40a
AS
8453 if (err)
8454 return err;
f1bca824 8455 return 1;
dc503a8a 8456 }
2589726d
AS
8457miss:
8458 /* when new state is not going to be added do not increase miss count.
8459 * Otherwise several loop iterations will remove the state
8460 * recorded earlier. The goal of these heuristics is to have
8461 * states from some iterations of the loop (some in the beginning
8462 * and some at the end) to help pruning.
8463 */
8464 if (add_new_state)
8465 sl->miss_cnt++;
9f4686c4
AS
8466 /* heuristic to determine whether this state is beneficial
8467 * to keep checking from state equivalence point of view.
8468 * Higher numbers increase max_states_per_insn and verification time,
8469 * but do not meaningfully decrease insn_processed.
8470 */
8471 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
8472 /* the state is unlikely to be useful. Remove it to
8473 * speed up verification
8474 */
8475 *pprev = sl->next;
8476 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
2589726d
AS
8477 u32 br = sl->state.branches;
8478
8479 WARN_ONCE(br,
8480 "BUG live_done but branches_to_explore %d\n",
8481 br);
9f4686c4
AS
8482 free_verifier_state(&sl->state, false);
8483 kfree(sl);
8484 env->peak_states--;
8485 } else {
8486 /* cannot free this state, since parentage chain may
8487 * walk it later. Add it for free_list instead to
8488 * be freed at the end of verification
8489 */
8490 sl->next = env->free_list;
8491 env->free_list = sl;
8492 }
8493 sl = *pprev;
8494 continue;
8495 }
dc2a4ebc 8496next:
9f4686c4
AS
8497 pprev = &sl->next;
8498 sl = *pprev;
f1bca824
AS
8499 }
8500
06ee7115
AS
8501 if (env->max_states_per_insn < states_cnt)
8502 env->max_states_per_insn = states_cnt;
8503
2c78ee89 8504 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
b5dc0163 8505 return push_jmp_history(env, cur);
ceefbc96 8506
2589726d 8507 if (!add_new_state)
b5dc0163 8508 return push_jmp_history(env, cur);
ceefbc96 8509
2589726d
AS
8510 /* There were no equivalent states, remember the current one.
8511 * Technically the current state is not proven to be safe yet,
f4d7e40a 8512 * but it will either reach outer most bpf_exit (which means it's safe)
2589726d 8513 * or it will be rejected. When there are no loops the verifier won't be
f4d7e40a 8514 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
2589726d
AS
8515 * again on the way to bpf_exit.
8516 * When looping the sl->state.branches will be > 0 and this state
8517 * will not be considered for equivalence until branches == 0.
f1bca824 8518 */
638f5b90 8519 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
f1bca824
AS
8520 if (!new_sl)
8521 return -ENOMEM;
06ee7115
AS
8522 env->total_states++;
8523 env->peak_states++;
2589726d
AS
8524 env->prev_jmps_processed = env->jmps_processed;
8525 env->prev_insn_processed = env->insn_processed;
f1bca824
AS
8526
8527 /* add new state to the head of linked list */
679c782d
EC
8528 new = &new_sl->state;
8529 err = copy_verifier_state(new, cur);
1969db47 8530 if (err) {
679c782d 8531 free_verifier_state(new, false);
1969db47
AS
8532 kfree(new_sl);
8533 return err;
8534 }
dc2a4ebc 8535 new->insn_idx = insn_idx;
2589726d
AS
8536 WARN_ONCE(new->branches != 1,
8537 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
b5dc0163 8538
2589726d 8539 cur->parent = new;
b5dc0163
AS
8540 cur->first_insn_idx = insn_idx;
8541 clear_jmp_history(cur);
5d839021
AS
8542 new_sl->next = *explored_state(env, insn_idx);
8543 *explored_state(env, insn_idx) = new_sl;
7640ead9
JK
8544 /* connect new state to parentage chain. Current frame needs all
8545 * registers connected. Only r6 - r9 of the callers are alive (pushed
8546 * to the stack implicitly by JITs) so in callers' frames connect just
8547 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
8548 * the state of the call instruction (with WRITTEN set), and r0 comes
8549 * from callee with its full parentage chain, anyway.
8550 */
8e9cd9ce
EC
8551 /* clear write marks in current state: the writes we did are not writes
8552 * our child did, so they don't screen off its reads from us.
8553 * (There are no read marks in current state, because reads always mark
8554 * their parent and current state never has children yet. Only
8555 * explored_states can get read marks.)
8556 */
eea1c227
AS
8557 for (j = 0; j <= cur->curframe; j++) {
8558 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
8559 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
8560 for (i = 0; i < BPF_REG_FP; i++)
8561 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
8562 }
f4d7e40a
AS
8563
8564 /* all stack frames are accessible from callee, clear them all */
8565 for (j = 0; j <= cur->curframe; j++) {
8566 struct bpf_func_state *frame = cur->frame[j];
679c782d 8567 struct bpf_func_state *newframe = new->frame[j];
f4d7e40a 8568
679c782d 8569 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
cc2b14d5 8570 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
679c782d
EC
8571 frame->stack[i].spilled_ptr.parent =
8572 &newframe->stack[i].spilled_ptr;
8573 }
f4d7e40a 8574 }
f1bca824
AS
8575 return 0;
8576}
8577
c64b7983
JS
8578/* Return true if it's OK to have the same insn return a different type. */
8579static bool reg_type_mismatch_ok(enum bpf_reg_type type)
8580{
8581 switch (type) {
8582 case PTR_TO_CTX:
8583 case PTR_TO_SOCKET:
8584 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
8585 case PTR_TO_SOCK_COMMON:
8586 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
8587 case PTR_TO_TCP_SOCK:
8588 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 8589 case PTR_TO_XDP_SOCK:
2a02759e 8590 case PTR_TO_BTF_ID:
b121b341 8591 case PTR_TO_BTF_ID_OR_NULL:
c64b7983
JS
8592 return false;
8593 default:
8594 return true;
8595 }
8596}
8597
8598/* If an instruction was previously used with particular pointer types, then we
8599 * need to be careful to avoid cases such as the below, where it may be ok
8600 * for one branch accessing the pointer, but not ok for the other branch:
8601 *
8602 * R1 = sock_ptr
8603 * goto X;
8604 * ...
8605 * R1 = some_other_valid_ptr;
8606 * goto X;
8607 * ...
8608 * R2 = *(u32 *)(R1 + 0);
8609 */
8610static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
8611{
8612 return src != prev && (!reg_type_mismatch_ok(src) ||
8613 !reg_type_mismatch_ok(prev));
8614}
8615
58e2af8b 8616static int do_check(struct bpf_verifier_env *env)
17a52670 8617{
6f8a57cc 8618 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
51c39bb1 8619 struct bpf_verifier_state *state = env->cur_state;
17a52670 8620 struct bpf_insn *insns = env->prog->insnsi;
638f5b90 8621 struct bpf_reg_state *regs;
06ee7115 8622 int insn_cnt = env->prog->len;
17a52670 8623 bool do_print_state = false;
b5dc0163 8624 int prev_insn_idx = -1;
17a52670 8625
17a52670
AS
8626 for (;;) {
8627 struct bpf_insn *insn;
8628 u8 class;
8629 int err;
8630
b5dc0163 8631 env->prev_insn_idx = prev_insn_idx;
c08435ec 8632 if (env->insn_idx >= insn_cnt) {
61bd5218 8633 verbose(env, "invalid insn idx %d insn_cnt %d\n",
c08435ec 8634 env->insn_idx, insn_cnt);
17a52670
AS
8635 return -EFAULT;
8636 }
8637
c08435ec 8638 insn = &insns[env->insn_idx];
17a52670
AS
8639 class = BPF_CLASS(insn->code);
8640
06ee7115 8641 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
61bd5218
JK
8642 verbose(env,
8643 "BPF program is too large. Processed %d insn\n",
06ee7115 8644 env->insn_processed);
17a52670
AS
8645 return -E2BIG;
8646 }
8647
c08435ec 8648 err = is_state_visited(env, env->insn_idx);
f1bca824
AS
8649 if (err < 0)
8650 return err;
8651 if (err == 1) {
8652 /* found equivalent state, can prune the search */
06ee7115 8653 if (env->log.level & BPF_LOG_LEVEL) {
f1bca824 8654 if (do_print_state)
979d63d5
DB
8655 verbose(env, "\nfrom %d to %d%s: safe\n",
8656 env->prev_insn_idx, env->insn_idx,
8657 env->cur_state->speculative ?
8658 " (speculative execution)" : "");
f1bca824 8659 else
c08435ec 8660 verbose(env, "%d: safe\n", env->insn_idx);
f1bca824
AS
8661 }
8662 goto process_bpf_exit;
8663 }
8664
c3494801
AS
8665 if (signal_pending(current))
8666 return -EAGAIN;
8667
3c2ce60b
DB
8668 if (need_resched())
8669 cond_resched();
8670
06ee7115
AS
8671 if (env->log.level & BPF_LOG_LEVEL2 ||
8672 (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
8673 if (env->log.level & BPF_LOG_LEVEL2)
c08435ec 8674 verbose(env, "%d:", env->insn_idx);
c5fc9692 8675 else
979d63d5
DB
8676 verbose(env, "\nfrom %d to %d%s:",
8677 env->prev_insn_idx, env->insn_idx,
8678 env->cur_state->speculative ?
8679 " (speculative execution)" : "");
f4d7e40a 8680 print_verifier_state(env, state->frame[state->curframe]);
17a52670
AS
8681 do_print_state = false;
8682 }
8683
06ee7115 8684 if (env->log.level & BPF_LOG_LEVEL) {
7105e828
DB
8685 const struct bpf_insn_cbs cbs = {
8686 .cb_print = verbose,
abe08840 8687 .private_data = env,
7105e828
DB
8688 };
8689
c08435ec
DB
8690 verbose_linfo(env, env->insn_idx, "; ");
8691 verbose(env, "%d: ", env->insn_idx);
abe08840 8692 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
17a52670
AS
8693 }
8694
cae1927c 8695 if (bpf_prog_is_dev_bound(env->prog->aux)) {
c08435ec
DB
8696 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
8697 env->prev_insn_idx);
cae1927c
JK
8698 if (err)
8699 return err;
8700 }
13a27dfc 8701
638f5b90 8702 regs = cur_regs(env);
51c39bb1 8703 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
b5dc0163 8704 prev_insn_idx = env->insn_idx;
fd978bf7 8705
17a52670 8706 if (class == BPF_ALU || class == BPF_ALU64) {
1be7f75d 8707 err = check_alu_op(env, insn);
17a52670
AS
8708 if (err)
8709 return err;
8710
8711 } else if (class == BPF_LDX) {
3df126f3 8712 enum bpf_reg_type *prev_src_type, src_reg_type;
9bac3d6d
AS
8713
8714 /* check for reserved fields is already done */
8715
17a52670 8716 /* check src operand */
dc503a8a 8717 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
8718 if (err)
8719 return err;
8720
dc503a8a 8721 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
8722 if (err)
8723 return err;
8724
725f9dcd
AS
8725 src_reg_type = regs[insn->src_reg].type;
8726
17a52670
AS
8727 /* check that memory (src_reg + off) is readable,
8728 * the state of dst_reg will be updated by this func
8729 */
c08435ec
DB
8730 err = check_mem_access(env, env->insn_idx, insn->src_reg,
8731 insn->off, BPF_SIZE(insn->code),
8732 BPF_READ, insn->dst_reg, false);
17a52670
AS
8733 if (err)
8734 return err;
8735
c08435ec 8736 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
8737
8738 if (*prev_src_type == NOT_INIT) {
9bac3d6d
AS
8739 /* saw a valid insn
8740 * dst_reg = *(u32 *)(src_reg + off)
3df126f3 8741 * save type to validate intersecting paths
9bac3d6d 8742 */
3df126f3 8743 *prev_src_type = src_reg_type;
9bac3d6d 8744
c64b7983 8745 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
9bac3d6d
AS
8746 /* ABuser program is trying to use the same insn
8747 * dst_reg = *(u32*) (src_reg + off)
8748 * with different pointer types:
8749 * src_reg == ctx in one branch and
8750 * src_reg == stack|map in some other branch.
8751 * Reject it.
8752 */
61bd5218 8753 verbose(env, "same insn cannot be used with different pointers\n");
9bac3d6d
AS
8754 return -EINVAL;
8755 }
8756
17a52670 8757 } else if (class == BPF_STX) {
3df126f3 8758 enum bpf_reg_type *prev_dst_type, dst_reg_type;
d691f9e8 8759
17a52670 8760 if (BPF_MODE(insn->code) == BPF_XADD) {
c08435ec 8761 err = check_xadd(env, env->insn_idx, insn);
17a52670
AS
8762 if (err)
8763 return err;
c08435ec 8764 env->insn_idx++;
17a52670
AS
8765 continue;
8766 }
8767
17a52670 8768 /* check src1 operand */
dc503a8a 8769 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
8770 if (err)
8771 return err;
8772 /* check src2 operand */
dc503a8a 8773 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
8774 if (err)
8775 return err;
8776
d691f9e8
AS
8777 dst_reg_type = regs[insn->dst_reg].type;
8778
17a52670 8779 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
8780 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
8781 insn->off, BPF_SIZE(insn->code),
8782 BPF_WRITE, insn->src_reg, false);
17a52670
AS
8783 if (err)
8784 return err;
8785
c08435ec 8786 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
8787
8788 if (*prev_dst_type == NOT_INIT) {
8789 *prev_dst_type = dst_reg_type;
c64b7983 8790 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
61bd5218 8791 verbose(env, "same insn cannot be used with different pointers\n");
d691f9e8
AS
8792 return -EINVAL;
8793 }
8794
17a52670
AS
8795 } else if (class == BPF_ST) {
8796 if (BPF_MODE(insn->code) != BPF_MEM ||
8797 insn->src_reg != BPF_REG_0) {
61bd5218 8798 verbose(env, "BPF_ST uses reserved fields\n");
17a52670
AS
8799 return -EINVAL;
8800 }
8801 /* check src operand */
dc503a8a 8802 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
8803 if (err)
8804 return err;
8805
f37a8cb8 8806 if (is_ctx_reg(env, insn->dst_reg)) {
9d2be44a 8807 verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
2a159c6f
DB
8808 insn->dst_reg,
8809 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
8810 return -EACCES;
8811 }
8812
17a52670 8813 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
8814 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
8815 insn->off, BPF_SIZE(insn->code),
8816 BPF_WRITE, -1, false);
17a52670
AS
8817 if (err)
8818 return err;
8819
092ed096 8820 } else if (class == BPF_JMP || class == BPF_JMP32) {
17a52670
AS
8821 u8 opcode = BPF_OP(insn->code);
8822
2589726d 8823 env->jmps_processed++;
17a52670
AS
8824 if (opcode == BPF_CALL) {
8825 if (BPF_SRC(insn->code) != BPF_K ||
8826 insn->off != 0 ||
f4d7e40a
AS
8827 (insn->src_reg != BPF_REG_0 &&
8828 insn->src_reg != BPF_PSEUDO_CALL) ||
092ed096
JW
8829 insn->dst_reg != BPF_REG_0 ||
8830 class == BPF_JMP32) {
61bd5218 8831 verbose(env, "BPF_CALL uses reserved fields\n");
17a52670
AS
8832 return -EINVAL;
8833 }
8834
d83525ca
AS
8835 if (env->cur_state->active_spin_lock &&
8836 (insn->src_reg == BPF_PSEUDO_CALL ||
8837 insn->imm != BPF_FUNC_spin_unlock)) {
8838 verbose(env, "function calls are not allowed while holding a lock\n");
8839 return -EINVAL;
8840 }
f4d7e40a 8841 if (insn->src_reg == BPF_PSEUDO_CALL)
c08435ec 8842 err = check_func_call(env, insn, &env->insn_idx);
f4d7e40a 8843 else
c08435ec 8844 err = check_helper_call(env, insn->imm, env->insn_idx);
17a52670
AS
8845 if (err)
8846 return err;
8847
8848 } else if (opcode == BPF_JA) {
8849 if (BPF_SRC(insn->code) != BPF_K ||
8850 insn->imm != 0 ||
8851 insn->src_reg != BPF_REG_0 ||
092ed096
JW
8852 insn->dst_reg != BPF_REG_0 ||
8853 class == BPF_JMP32) {
61bd5218 8854 verbose(env, "BPF_JA uses reserved fields\n");
17a52670
AS
8855 return -EINVAL;
8856 }
8857
c08435ec 8858 env->insn_idx += insn->off + 1;
17a52670
AS
8859 continue;
8860
8861 } else if (opcode == BPF_EXIT) {
8862 if (BPF_SRC(insn->code) != BPF_K ||
8863 insn->imm != 0 ||
8864 insn->src_reg != BPF_REG_0 ||
092ed096
JW
8865 insn->dst_reg != BPF_REG_0 ||
8866 class == BPF_JMP32) {
61bd5218 8867 verbose(env, "BPF_EXIT uses reserved fields\n");
17a52670
AS
8868 return -EINVAL;
8869 }
8870
d83525ca
AS
8871 if (env->cur_state->active_spin_lock) {
8872 verbose(env, "bpf_spin_unlock is missing\n");
8873 return -EINVAL;
8874 }
8875
f4d7e40a
AS
8876 if (state->curframe) {
8877 /* exit from nested function */
c08435ec 8878 err = prepare_func_exit(env, &env->insn_idx);
f4d7e40a
AS
8879 if (err)
8880 return err;
8881 do_print_state = true;
8882 continue;
8883 }
8884
fd978bf7
JS
8885 err = check_reference_leak(env);
8886 if (err)
8887 return err;
8888
390ee7e2
AS
8889 err = check_return_code(env);
8890 if (err)
8891 return err;
f1bca824 8892process_bpf_exit:
2589726d 8893 update_branch_counts(env, env->cur_state);
b5dc0163 8894 err = pop_stack(env, &prev_insn_idx,
6f8a57cc 8895 &env->insn_idx, pop_log);
638f5b90
AS
8896 if (err < 0) {
8897 if (err != -ENOENT)
8898 return err;
17a52670
AS
8899 break;
8900 } else {
8901 do_print_state = true;
8902 continue;
8903 }
8904 } else {
c08435ec 8905 err = check_cond_jmp_op(env, insn, &env->insn_idx);
17a52670
AS
8906 if (err)
8907 return err;
8908 }
8909 } else if (class == BPF_LD) {
8910 u8 mode = BPF_MODE(insn->code);
8911
8912 if (mode == BPF_ABS || mode == BPF_IND) {
ddd872bc
AS
8913 err = check_ld_abs(env, insn);
8914 if (err)
8915 return err;
8916
17a52670
AS
8917 } else if (mode == BPF_IMM) {
8918 err = check_ld_imm(env, insn);
8919 if (err)
8920 return err;
8921
c08435ec 8922 env->insn_idx++;
51c39bb1 8923 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
17a52670 8924 } else {
61bd5218 8925 verbose(env, "invalid BPF_LD mode\n");
17a52670
AS
8926 return -EINVAL;
8927 }
8928 } else {
61bd5218 8929 verbose(env, "unknown insn class %d\n", class);
17a52670
AS
8930 return -EINVAL;
8931 }
8932
c08435ec 8933 env->insn_idx++;
17a52670
AS
8934 }
8935
8936 return 0;
8937}
8938
56f668df
MKL
8939static int check_map_prealloc(struct bpf_map *map)
8940{
8941 return (map->map_type != BPF_MAP_TYPE_HASH &&
bcc6b1b7
MKL
8942 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
8943 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
56f668df
MKL
8944 !(map->map_flags & BPF_F_NO_PREALLOC);
8945}
8946
d83525ca
AS
8947static bool is_tracing_prog_type(enum bpf_prog_type type)
8948{
8949 switch (type) {
8950 case BPF_PROG_TYPE_KPROBE:
8951 case BPF_PROG_TYPE_TRACEPOINT:
8952 case BPF_PROG_TYPE_PERF_EVENT:
8953 case BPF_PROG_TYPE_RAW_TRACEPOINT:
8954 return true;
8955 default:
8956 return false;
8957 }
8958}
8959
94dacdbd
TG
8960static bool is_preallocated_map(struct bpf_map *map)
8961{
8962 if (!check_map_prealloc(map))
8963 return false;
8964 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
8965 return false;
8966 return true;
8967}
8968
61bd5218
JK
8969static int check_map_prog_compatibility(struct bpf_verifier_env *env,
8970 struct bpf_map *map,
fdc15d38
AS
8971 struct bpf_prog *prog)
8972
8973{
94dacdbd
TG
8974 /*
8975 * Validate that trace type programs use preallocated hash maps.
8976 *
8977 * For programs attached to PERF events this is mandatory as the
8978 * perf NMI can hit any arbitrary code sequence.
8979 *
8980 * All other trace types using preallocated hash maps are unsafe as
8981 * well because tracepoint or kprobes can be inside locked regions
8982 * of the memory allocator or at a place where a recursion into the
8983 * memory allocator would see inconsistent state.
8984 *
2ed905c5
TG
8985 * On RT enabled kernels run-time allocation of all trace type
8986 * programs is strictly prohibited due to lock type constraints. On
8987 * !RT kernels it is allowed for backwards compatibility reasons for
8988 * now, but warnings are emitted so developers are made aware of
8989 * the unsafety and can fix their programs before this is enforced.
56f668df 8990 */
94dacdbd
TG
8991 if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) {
8992 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
61bd5218 8993 verbose(env, "perf_event programs can only use preallocated hash map\n");
56f668df
MKL
8994 return -EINVAL;
8995 }
2ed905c5
TG
8996 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
8997 verbose(env, "trace type programs can only use preallocated hash map\n");
8998 return -EINVAL;
8999 }
94dacdbd
TG
9000 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
9001 verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
fdc15d38 9002 }
a3884572 9003
d83525ca
AS
9004 if ((is_tracing_prog_type(prog->type) ||
9005 prog->type == BPF_PROG_TYPE_SOCKET_FILTER) &&
9006 map_value_has_spin_lock(map)) {
9007 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
9008 return -EINVAL;
9009 }
9010
a3884572 9011 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
09728266 9012 !bpf_offload_prog_map_match(prog, map)) {
a3884572
JK
9013 verbose(env, "offload device mismatch between prog and map\n");
9014 return -EINVAL;
9015 }
9016
85d33df3
MKL
9017 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
9018 verbose(env, "bpf_struct_ops map cannot be used in prog\n");
9019 return -EINVAL;
9020 }
9021
fdc15d38
AS
9022 return 0;
9023}
9024
b741f163
RG
9025static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
9026{
9027 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
9028 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
9029}
9030
0246e64d
AS
9031/* look for pseudo eBPF instructions that access map FDs and
9032 * replace them with actual map pointers
9033 */
58e2af8b 9034static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
0246e64d
AS
9035{
9036 struct bpf_insn *insn = env->prog->insnsi;
9037 int insn_cnt = env->prog->len;
fdc15d38 9038 int i, j, err;
0246e64d 9039
f1f7714e 9040 err = bpf_prog_calc_tag(env->prog);
aafe6ae9
DB
9041 if (err)
9042 return err;
9043
0246e64d 9044 for (i = 0; i < insn_cnt; i++, insn++) {
9bac3d6d 9045 if (BPF_CLASS(insn->code) == BPF_LDX &&
d691f9e8 9046 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
61bd5218 9047 verbose(env, "BPF_LDX uses reserved fields\n");
9bac3d6d
AS
9048 return -EINVAL;
9049 }
9050
d691f9e8
AS
9051 if (BPF_CLASS(insn->code) == BPF_STX &&
9052 ((BPF_MODE(insn->code) != BPF_MEM &&
9053 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
61bd5218 9054 verbose(env, "BPF_STX uses reserved fields\n");
d691f9e8
AS
9055 return -EINVAL;
9056 }
9057
0246e64d 9058 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
d8eca5bb 9059 struct bpf_insn_aux_data *aux;
0246e64d
AS
9060 struct bpf_map *map;
9061 struct fd f;
d8eca5bb 9062 u64 addr;
0246e64d
AS
9063
9064 if (i == insn_cnt - 1 || insn[1].code != 0 ||
9065 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
9066 insn[1].off != 0) {
61bd5218 9067 verbose(env, "invalid bpf_ld_imm64 insn\n");
0246e64d
AS
9068 return -EINVAL;
9069 }
9070
d8eca5bb 9071 if (insn[0].src_reg == 0)
0246e64d
AS
9072 /* valid generic load 64-bit imm */
9073 goto next_insn;
9074
d8eca5bb
DB
9075 /* In final convert_pseudo_ld_imm64() step, this is
9076 * converted into regular 64-bit imm load insn.
9077 */
9078 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
9079 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
9080 (insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
9081 insn[1].imm != 0)) {
9082 verbose(env,
9083 "unrecognized bpf_ld_imm64 insn\n");
0246e64d
AS
9084 return -EINVAL;
9085 }
9086
20182390 9087 f = fdget(insn[0].imm);
c2101297 9088 map = __bpf_map_get(f);
0246e64d 9089 if (IS_ERR(map)) {
61bd5218 9090 verbose(env, "fd %d is not pointing to valid bpf_map\n",
20182390 9091 insn[0].imm);
0246e64d
AS
9092 return PTR_ERR(map);
9093 }
9094
61bd5218 9095 err = check_map_prog_compatibility(env, map, env->prog);
fdc15d38
AS
9096 if (err) {
9097 fdput(f);
9098 return err;
9099 }
9100
d8eca5bb
DB
9101 aux = &env->insn_aux_data[i];
9102 if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
9103 addr = (unsigned long)map;
9104 } else {
9105 u32 off = insn[1].imm;
9106
9107 if (off >= BPF_MAX_VAR_OFF) {
9108 verbose(env, "direct value offset of %u is not allowed\n", off);
9109 fdput(f);
9110 return -EINVAL;
9111 }
9112
9113 if (!map->ops->map_direct_value_addr) {
9114 verbose(env, "no direct value access support for this map type\n");
9115 fdput(f);
9116 return -EINVAL;
9117 }
9118
9119 err = map->ops->map_direct_value_addr(map, &addr, off);
9120 if (err) {
9121 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
9122 map->value_size, off);
9123 fdput(f);
9124 return err;
9125 }
9126
9127 aux->map_off = off;
9128 addr += off;
9129 }
9130
9131 insn[0].imm = (u32)addr;
9132 insn[1].imm = addr >> 32;
0246e64d
AS
9133
9134 /* check whether we recorded this map already */
d8eca5bb 9135 for (j = 0; j < env->used_map_cnt; j++) {
0246e64d 9136 if (env->used_maps[j] == map) {
d8eca5bb 9137 aux->map_index = j;
0246e64d
AS
9138 fdput(f);
9139 goto next_insn;
9140 }
d8eca5bb 9141 }
0246e64d
AS
9142
9143 if (env->used_map_cnt >= MAX_USED_MAPS) {
9144 fdput(f);
9145 return -E2BIG;
9146 }
9147
0246e64d
AS
9148 /* hold the map. If the program is rejected by verifier,
9149 * the map will be released by release_maps() or it
9150 * will be used by the valid program until it's unloaded
ab7f5bf0 9151 * and all maps are released in free_used_maps()
0246e64d 9152 */
1e0bd5a0 9153 bpf_map_inc(map);
d8eca5bb
DB
9154
9155 aux->map_index = env->used_map_cnt;
92117d84
AS
9156 env->used_maps[env->used_map_cnt++] = map;
9157
b741f163 9158 if (bpf_map_is_cgroup_storage(map) &&
e4730423 9159 bpf_cgroup_storage_assign(env->prog->aux, map)) {
b741f163 9160 verbose(env, "only one cgroup storage of each type is allowed\n");
de9cbbaa
RG
9161 fdput(f);
9162 return -EBUSY;
9163 }
9164
0246e64d
AS
9165 fdput(f);
9166next_insn:
9167 insn++;
9168 i++;
5e581dad
DB
9169 continue;
9170 }
9171
9172 /* Basic sanity check before we invest more work here. */
9173 if (!bpf_opcode_in_insntable(insn->code)) {
9174 verbose(env, "unknown opcode %02x\n", insn->code);
9175 return -EINVAL;
0246e64d
AS
9176 }
9177 }
9178
9179 /* now all pseudo BPF_LD_IMM64 instructions load valid
9180 * 'struct bpf_map *' into a register instead of user map_fd.
9181 * These pointers will be used later by verifier to validate map access.
9182 */
9183 return 0;
9184}
9185
9186/* drop refcnt of maps used by the rejected program */
58e2af8b 9187static void release_maps(struct bpf_verifier_env *env)
0246e64d 9188{
a2ea0746
DB
9189 __bpf_free_used_maps(env->prog->aux, env->used_maps,
9190 env->used_map_cnt);
0246e64d
AS
9191}
9192
9193/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
58e2af8b 9194static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
0246e64d
AS
9195{
9196 struct bpf_insn *insn = env->prog->insnsi;
9197 int insn_cnt = env->prog->len;
9198 int i;
9199
9200 for (i = 0; i < insn_cnt; i++, insn++)
9201 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
9202 insn->src_reg = 0;
9203}
9204
8041902d
AS
9205/* single env->prog->insni[off] instruction was replaced with the range
9206 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
9207 * [0, off) and [off, end) to new locations, so the patched range stays zero
9208 */
b325fbca
JW
9209static int adjust_insn_aux_data(struct bpf_verifier_env *env,
9210 struct bpf_prog *new_prog, u32 off, u32 cnt)
8041902d
AS
9211{
9212 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
b325fbca
JW
9213 struct bpf_insn *insn = new_prog->insnsi;
9214 u32 prog_len;
c131187d 9215 int i;
8041902d 9216
b325fbca
JW
9217 /* aux info at OFF always needs adjustment, no matter fast path
9218 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
9219 * original insn at old prog.
9220 */
9221 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
9222
8041902d
AS
9223 if (cnt == 1)
9224 return 0;
b325fbca 9225 prog_len = new_prog->len;
fad953ce
KC
9226 new_data = vzalloc(array_size(prog_len,
9227 sizeof(struct bpf_insn_aux_data)));
8041902d
AS
9228 if (!new_data)
9229 return -ENOMEM;
9230 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
9231 memcpy(new_data + off + cnt - 1, old_data + off,
9232 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
b325fbca 9233 for (i = off; i < off + cnt - 1; i++) {
51c39bb1 9234 new_data[i].seen = env->pass_cnt;
b325fbca
JW
9235 new_data[i].zext_dst = insn_has_def32(env, insn + i);
9236 }
8041902d
AS
9237 env->insn_aux_data = new_data;
9238 vfree(old_data);
9239 return 0;
9240}
9241
cc8b0b92
AS
9242static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
9243{
9244 int i;
9245
9246 if (len == 1)
9247 return;
4cb3d99c
JW
9248 /* NOTE: fake 'exit' subprog should be updated as well. */
9249 for (i = 0; i <= env->subprog_cnt; i++) {
afd59424 9250 if (env->subprog_info[i].start <= off)
cc8b0b92 9251 continue;
9c8105bd 9252 env->subprog_info[i].start += len - 1;
cc8b0b92
AS
9253 }
9254}
9255
8041902d
AS
9256static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
9257 const struct bpf_insn *patch, u32 len)
9258{
9259 struct bpf_prog *new_prog;
9260
9261 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4f73379e
AS
9262 if (IS_ERR(new_prog)) {
9263 if (PTR_ERR(new_prog) == -ERANGE)
9264 verbose(env,
9265 "insn %d cannot be patched due to 16-bit range\n",
9266 env->insn_aux_data[off].orig_idx);
8041902d 9267 return NULL;
4f73379e 9268 }
b325fbca 9269 if (adjust_insn_aux_data(env, new_prog, off, len))
8041902d 9270 return NULL;
cc8b0b92 9271 adjust_subprog_starts(env, off, len);
8041902d
AS
9272 return new_prog;
9273}
9274
52875a04
JK
9275static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
9276 u32 off, u32 cnt)
9277{
9278 int i, j;
9279
9280 /* find first prog starting at or after off (first to remove) */
9281 for (i = 0; i < env->subprog_cnt; i++)
9282 if (env->subprog_info[i].start >= off)
9283 break;
9284 /* find first prog starting at or after off + cnt (first to stay) */
9285 for (j = i; j < env->subprog_cnt; j++)
9286 if (env->subprog_info[j].start >= off + cnt)
9287 break;
9288 /* if j doesn't start exactly at off + cnt, we are just removing
9289 * the front of previous prog
9290 */
9291 if (env->subprog_info[j].start != off + cnt)
9292 j--;
9293
9294 if (j > i) {
9295 struct bpf_prog_aux *aux = env->prog->aux;
9296 int move;
9297
9298 /* move fake 'exit' subprog as well */
9299 move = env->subprog_cnt + 1 - j;
9300
9301 memmove(env->subprog_info + i,
9302 env->subprog_info + j,
9303 sizeof(*env->subprog_info) * move);
9304 env->subprog_cnt -= j - i;
9305
9306 /* remove func_info */
9307 if (aux->func_info) {
9308 move = aux->func_info_cnt - j;
9309
9310 memmove(aux->func_info + i,
9311 aux->func_info + j,
9312 sizeof(*aux->func_info) * move);
9313 aux->func_info_cnt -= j - i;
9314 /* func_info->insn_off is set after all code rewrites,
9315 * in adjust_btf_func() - no need to adjust
9316 */
9317 }
9318 } else {
9319 /* convert i from "first prog to remove" to "first to adjust" */
9320 if (env->subprog_info[i].start == off)
9321 i++;
9322 }
9323
9324 /* update fake 'exit' subprog as well */
9325 for (; i <= env->subprog_cnt; i++)
9326 env->subprog_info[i].start -= cnt;
9327
9328 return 0;
9329}
9330
9331static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
9332 u32 cnt)
9333{
9334 struct bpf_prog *prog = env->prog;
9335 u32 i, l_off, l_cnt, nr_linfo;
9336 struct bpf_line_info *linfo;
9337
9338 nr_linfo = prog->aux->nr_linfo;
9339 if (!nr_linfo)
9340 return 0;
9341
9342 linfo = prog->aux->linfo;
9343
9344 /* find first line info to remove, count lines to be removed */
9345 for (i = 0; i < nr_linfo; i++)
9346 if (linfo[i].insn_off >= off)
9347 break;
9348
9349 l_off = i;
9350 l_cnt = 0;
9351 for (; i < nr_linfo; i++)
9352 if (linfo[i].insn_off < off + cnt)
9353 l_cnt++;
9354 else
9355 break;
9356
9357 /* First live insn doesn't match first live linfo, it needs to "inherit"
9358 * last removed linfo. prog is already modified, so prog->len == off
9359 * means no live instructions after (tail of the program was removed).
9360 */
9361 if (prog->len != off && l_cnt &&
9362 (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
9363 l_cnt--;
9364 linfo[--i].insn_off = off + cnt;
9365 }
9366
9367 /* remove the line info which refer to the removed instructions */
9368 if (l_cnt) {
9369 memmove(linfo + l_off, linfo + i,
9370 sizeof(*linfo) * (nr_linfo - i));
9371
9372 prog->aux->nr_linfo -= l_cnt;
9373 nr_linfo = prog->aux->nr_linfo;
9374 }
9375
9376 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
9377 for (i = l_off; i < nr_linfo; i++)
9378 linfo[i].insn_off -= cnt;
9379
9380 /* fix up all subprogs (incl. 'exit') which start >= off */
9381 for (i = 0; i <= env->subprog_cnt; i++)
9382 if (env->subprog_info[i].linfo_idx > l_off) {
9383 /* program may have started in the removed region but
9384 * may not be fully removed
9385 */
9386 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
9387 env->subprog_info[i].linfo_idx -= l_cnt;
9388 else
9389 env->subprog_info[i].linfo_idx = l_off;
9390 }
9391
9392 return 0;
9393}
9394
9395static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
9396{
9397 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
9398 unsigned int orig_prog_len = env->prog->len;
9399 int err;
9400
08ca90af
JK
9401 if (bpf_prog_is_dev_bound(env->prog->aux))
9402 bpf_prog_offload_remove_insns(env, off, cnt);
9403
52875a04
JK
9404 err = bpf_remove_insns(env->prog, off, cnt);
9405 if (err)
9406 return err;
9407
9408 err = adjust_subprog_starts_after_remove(env, off, cnt);
9409 if (err)
9410 return err;
9411
9412 err = bpf_adj_linfo_after_remove(env, off, cnt);
9413 if (err)
9414 return err;
9415
9416 memmove(aux_data + off, aux_data + off + cnt,
9417 sizeof(*aux_data) * (orig_prog_len - off - cnt));
9418
9419 return 0;
9420}
9421
2a5418a1
DB
9422/* The verifier does more data flow analysis than llvm and will not
9423 * explore branches that are dead at run time. Malicious programs can
9424 * have dead code too. Therefore replace all dead at-run-time code
9425 * with 'ja -1'.
9426 *
9427 * Just nops are not optimal, e.g. if they would sit at the end of the
9428 * program and through another bug we would manage to jump there, then
9429 * we'd execute beyond program memory otherwise. Returning exception
9430 * code also wouldn't work since we can have subprogs where the dead
9431 * code could be located.
c131187d
AS
9432 */
9433static void sanitize_dead_code(struct bpf_verifier_env *env)
9434{
9435 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2a5418a1 9436 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
c131187d
AS
9437 struct bpf_insn *insn = env->prog->insnsi;
9438 const int insn_cnt = env->prog->len;
9439 int i;
9440
9441 for (i = 0; i < insn_cnt; i++) {
9442 if (aux_data[i].seen)
9443 continue;
2a5418a1 9444 memcpy(insn + i, &trap, sizeof(trap));
c131187d
AS
9445 }
9446}
9447
e2ae4ca2
JK
9448static bool insn_is_cond_jump(u8 code)
9449{
9450 u8 op;
9451
092ed096
JW
9452 if (BPF_CLASS(code) == BPF_JMP32)
9453 return true;
9454
e2ae4ca2
JK
9455 if (BPF_CLASS(code) != BPF_JMP)
9456 return false;
9457
9458 op = BPF_OP(code);
9459 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
9460}
9461
9462static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
9463{
9464 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
9465 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
9466 struct bpf_insn *insn = env->prog->insnsi;
9467 const int insn_cnt = env->prog->len;
9468 int i;
9469
9470 for (i = 0; i < insn_cnt; i++, insn++) {
9471 if (!insn_is_cond_jump(insn->code))
9472 continue;
9473
9474 if (!aux_data[i + 1].seen)
9475 ja.off = insn->off;
9476 else if (!aux_data[i + 1 + insn->off].seen)
9477 ja.off = 0;
9478 else
9479 continue;
9480
08ca90af
JK
9481 if (bpf_prog_is_dev_bound(env->prog->aux))
9482 bpf_prog_offload_replace_insn(env, i, &ja);
9483
e2ae4ca2
JK
9484 memcpy(insn, &ja, sizeof(ja));
9485 }
9486}
9487
52875a04
JK
9488static int opt_remove_dead_code(struct bpf_verifier_env *env)
9489{
9490 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
9491 int insn_cnt = env->prog->len;
9492 int i, err;
9493
9494 for (i = 0; i < insn_cnt; i++) {
9495 int j;
9496
9497 j = 0;
9498 while (i + j < insn_cnt && !aux_data[i + j].seen)
9499 j++;
9500 if (!j)
9501 continue;
9502
9503 err = verifier_remove_insns(env, i, j);
9504 if (err)
9505 return err;
9506 insn_cnt = env->prog->len;
9507 }
9508
9509 return 0;
9510}
9511
a1b14abc
JK
9512static int opt_remove_nops(struct bpf_verifier_env *env)
9513{
9514 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
9515 struct bpf_insn *insn = env->prog->insnsi;
9516 int insn_cnt = env->prog->len;
9517 int i, err;
9518
9519 for (i = 0; i < insn_cnt; i++) {
9520 if (memcmp(&insn[i], &ja, sizeof(ja)))
9521 continue;
9522
9523 err = verifier_remove_insns(env, i, 1);
9524 if (err)
9525 return err;
9526 insn_cnt--;
9527 i--;
9528 }
9529
9530 return 0;
9531}
9532
d6c2308c
JW
9533static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
9534 const union bpf_attr *attr)
a4b1d3c1 9535{
d6c2308c 9536 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
a4b1d3c1 9537 struct bpf_insn_aux_data *aux = env->insn_aux_data;
d6c2308c 9538 int i, patch_len, delta = 0, len = env->prog->len;
a4b1d3c1 9539 struct bpf_insn *insns = env->prog->insnsi;
a4b1d3c1 9540 struct bpf_prog *new_prog;
d6c2308c 9541 bool rnd_hi32;
a4b1d3c1 9542
d6c2308c 9543 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
a4b1d3c1 9544 zext_patch[1] = BPF_ZEXT_REG(0);
d6c2308c
JW
9545 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
9546 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
9547 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
a4b1d3c1
JW
9548 for (i = 0; i < len; i++) {
9549 int adj_idx = i + delta;
9550 struct bpf_insn insn;
9551
d6c2308c
JW
9552 insn = insns[adj_idx];
9553 if (!aux[adj_idx].zext_dst) {
9554 u8 code, class;
9555 u32 imm_rnd;
9556
9557 if (!rnd_hi32)
9558 continue;
9559
9560 code = insn.code;
9561 class = BPF_CLASS(code);
9562 if (insn_no_def(&insn))
9563 continue;
9564
9565 /* NOTE: arg "reg" (the fourth one) is only used for
9566 * BPF_STX which has been ruled out in above
9567 * check, it is safe to pass NULL here.
9568 */
9569 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
9570 if (class == BPF_LD &&
9571 BPF_MODE(code) == BPF_IMM)
9572 i++;
9573 continue;
9574 }
9575
9576 /* ctx load could be transformed into wider load. */
9577 if (class == BPF_LDX &&
9578 aux[adj_idx].ptr_type == PTR_TO_CTX)
9579 continue;
9580
9581 imm_rnd = get_random_int();
9582 rnd_hi32_patch[0] = insn;
9583 rnd_hi32_patch[1].imm = imm_rnd;
9584 rnd_hi32_patch[3].dst_reg = insn.dst_reg;
9585 patch = rnd_hi32_patch;
9586 patch_len = 4;
9587 goto apply_patch_buffer;
9588 }
9589
9590 if (!bpf_jit_needs_zext())
a4b1d3c1
JW
9591 continue;
9592
a4b1d3c1
JW
9593 zext_patch[0] = insn;
9594 zext_patch[1].dst_reg = insn.dst_reg;
9595 zext_patch[1].src_reg = insn.dst_reg;
d6c2308c
JW
9596 patch = zext_patch;
9597 patch_len = 2;
9598apply_patch_buffer:
9599 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
a4b1d3c1
JW
9600 if (!new_prog)
9601 return -ENOMEM;
9602 env->prog = new_prog;
9603 insns = new_prog->insnsi;
9604 aux = env->insn_aux_data;
d6c2308c 9605 delta += patch_len - 1;
a4b1d3c1
JW
9606 }
9607
9608 return 0;
9609}
9610
c64b7983
JS
9611/* convert load instructions that access fields of a context type into a
9612 * sequence of instructions that access fields of the underlying structure:
9613 * struct __sk_buff -> struct sk_buff
9614 * struct bpf_sock_ops -> struct sock
9bac3d6d 9615 */
58e2af8b 9616static int convert_ctx_accesses(struct bpf_verifier_env *env)
9bac3d6d 9617{
00176a34 9618 const struct bpf_verifier_ops *ops = env->ops;
f96da094 9619 int i, cnt, size, ctx_field_size, delta = 0;
3df126f3 9620 const int insn_cnt = env->prog->len;
36bbef52 9621 struct bpf_insn insn_buf[16], *insn;
46f53a65 9622 u32 target_size, size_default, off;
9bac3d6d 9623 struct bpf_prog *new_prog;
d691f9e8 9624 enum bpf_access_type type;
f96da094 9625 bool is_narrower_load;
9bac3d6d 9626
b09928b9
DB
9627 if (ops->gen_prologue || env->seen_direct_write) {
9628 if (!ops->gen_prologue) {
9629 verbose(env, "bpf verifier is misconfigured\n");
9630 return -EINVAL;
9631 }
36bbef52
DB
9632 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
9633 env->prog);
9634 if (cnt >= ARRAY_SIZE(insn_buf)) {
61bd5218 9635 verbose(env, "bpf verifier is misconfigured\n");
36bbef52
DB
9636 return -EINVAL;
9637 } else if (cnt) {
8041902d 9638 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
36bbef52
DB
9639 if (!new_prog)
9640 return -ENOMEM;
8041902d 9641
36bbef52 9642 env->prog = new_prog;
3df126f3 9643 delta += cnt - 1;
36bbef52
DB
9644 }
9645 }
9646
c64b7983 9647 if (bpf_prog_is_dev_bound(env->prog->aux))
9bac3d6d
AS
9648 return 0;
9649
3df126f3 9650 insn = env->prog->insnsi + delta;
36bbef52 9651
9bac3d6d 9652 for (i = 0; i < insn_cnt; i++, insn++) {
c64b7983
JS
9653 bpf_convert_ctx_access_t convert_ctx_access;
9654
62c7989b
DB
9655 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
9656 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
9657 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
ea2e7ce5 9658 insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
d691f9e8 9659 type = BPF_READ;
62c7989b
DB
9660 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
9661 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
9662 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
ea2e7ce5 9663 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
d691f9e8
AS
9664 type = BPF_WRITE;
9665 else
9bac3d6d
AS
9666 continue;
9667
af86ca4e
AS
9668 if (type == BPF_WRITE &&
9669 env->insn_aux_data[i + delta].sanitize_stack_off) {
9670 struct bpf_insn patch[] = {
9671 /* Sanitize suspicious stack slot with zero.
9672 * There are no memory dependencies for this store,
9673 * since it's only using frame pointer and immediate
9674 * constant of zero
9675 */
9676 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
9677 env->insn_aux_data[i + delta].sanitize_stack_off,
9678 0),
9679 /* the original STX instruction will immediately
9680 * overwrite the same stack slot with appropriate value
9681 */
9682 *insn,
9683 };
9684
9685 cnt = ARRAY_SIZE(patch);
9686 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
9687 if (!new_prog)
9688 return -ENOMEM;
9689
9690 delta += cnt - 1;
9691 env->prog = new_prog;
9692 insn = new_prog->insnsi + i + delta;
9693 continue;
9694 }
9695
c64b7983
JS
9696 switch (env->insn_aux_data[i + delta].ptr_type) {
9697 case PTR_TO_CTX:
9698 if (!ops->convert_ctx_access)
9699 continue;
9700 convert_ctx_access = ops->convert_ctx_access;
9701 break;
9702 case PTR_TO_SOCKET:
46f8bc92 9703 case PTR_TO_SOCK_COMMON:
c64b7983
JS
9704 convert_ctx_access = bpf_sock_convert_ctx_access;
9705 break;
655a51e5
MKL
9706 case PTR_TO_TCP_SOCK:
9707 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
9708 break;
fada7fdc
JL
9709 case PTR_TO_XDP_SOCK:
9710 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
9711 break;
2a02759e 9712 case PTR_TO_BTF_ID:
27ae7997
MKL
9713 if (type == BPF_READ) {
9714 insn->code = BPF_LDX | BPF_PROBE_MEM |
9715 BPF_SIZE((insn)->code);
9716 env->prog->aux->num_exentries++;
9717 } else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
2a02759e
AS
9718 verbose(env, "Writes through BTF pointers are not allowed\n");
9719 return -EINVAL;
9720 }
2a02759e 9721 continue;
c64b7983 9722 default:
9bac3d6d 9723 continue;
c64b7983 9724 }
9bac3d6d 9725
31fd8581 9726 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
f96da094 9727 size = BPF_LDST_BYTES(insn);
31fd8581
YS
9728
9729 /* If the read access is a narrower load of the field,
9730 * convert to a 4/8-byte load, to minimum program type specific
9731 * convert_ctx_access changes. If conversion is successful,
9732 * we will apply proper mask to the result.
9733 */
f96da094 9734 is_narrower_load = size < ctx_field_size;
46f53a65
AI
9735 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
9736 off = insn->off;
31fd8581 9737 if (is_narrower_load) {
f96da094
DB
9738 u8 size_code;
9739
9740 if (type == BPF_WRITE) {
61bd5218 9741 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
f96da094
DB
9742 return -EINVAL;
9743 }
31fd8581 9744
f96da094 9745 size_code = BPF_H;
31fd8581
YS
9746 if (ctx_field_size == 4)
9747 size_code = BPF_W;
9748 else if (ctx_field_size == 8)
9749 size_code = BPF_DW;
f96da094 9750
bc23105c 9751 insn->off = off & ~(size_default - 1);
31fd8581
YS
9752 insn->code = BPF_LDX | BPF_MEM | size_code;
9753 }
f96da094
DB
9754
9755 target_size = 0;
c64b7983
JS
9756 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
9757 &target_size);
f96da094
DB
9758 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
9759 (ctx_field_size && !target_size)) {
61bd5218 9760 verbose(env, "bpf verifier is misconfigured\n");
9bac3d6d
AS
9761 return -EINVAL;
9762 }
f96da094
DB
9763
9764 if (is_narrower_load && size < target_size) {
d895a0f1
IL
9765 u8 shift = bpf_ctx_narrow_access_offset(
9766 off, size, size_default) * 8;
46f53a65
AI
9767 if (ctx_field_size <= 4) {
9768 if (shift)
9769 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
9770 insn->dst_reg,
9771 shift);
31fd8581 9772 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
f96da094 9773 (1 << size * 8) - 1);
46f53a65
AI
9774 } else {
9775 if (shift)
9776 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
9777 insn->dst_reg,
9778 shift);
31fd8581 9779 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
e2f7fc0a 9780 (1ULL << size * 8) - 1);
46f53a65 9781 }
31fd8581 9782 }
9bac3d6d 9783
8041902d 9784 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9bac3d6d
AS
9785 if (!new_prog)
9786 return -ENOMEM;
9787
3df126f3 9788 delta += cnt - 1;
9bac3d6d
AS
9789
9790 /* keep walking new program and skip insns we just inserted */
9791 env->prog = new_prog;
3df126f3 9792 insn = new_prog->insnsi + i + delta;
9bac3d6d
AS
9793 }
9794
9795 return 0;
9796}
9797
1c2a088a
AS
9798static int jit_subprogs(struct bpf_verifier_env *env)
9799{
9800 struct bpf_prog *prog = env->prog, **func, *tmp;
9801 int i, j, subprog_start, subprog_end = 0, len, subprog;
7105e828 9802 struct bpf_insn *insn;
1c2a088a 9803 void *old_bpf_func;
c454a46b 9804 int err;
1c2a088a 9805
f910cefa 9806 if (env->subprog_cnt <= 1)
1c2a088a
AS
9807 return 0;
9808
7105e828 9809 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
1c2a088a
AS
9810 if (insn->code != (BPF_JMP | BPF_CALL) ||
9811 insn->src_reg != BPF_PSEUDO_CALL)
9812 continue;
c7a89784
DB
9813 /* Upon error here we cannot fall back to interpreter but
9814 * need a hard reject of the program. Thus -EFAULT is
9815 * propagated in any case.
9816 */
1c2a088a
AS
9817 subprog = find_subprog(env, i + insn->imm + 1);
9818 if (subprog < 0) {
9819 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
9820 i + insn->imm + 1);
9821 return -EFAULT;
9822 }
9823 /* temporarily remember subprog id inside insn instead of
9824 * aux_data, since next loop will split up all insns into funcs
9825 */
f910cefa 9826 insn->off = subprog;
1c2a088a
AS
9827 /* remember original imm in case JIT fails and fallback
9828 * to interpreter will be needed
9829 */
9830 env->insn_aux_data[i].call_imm = insn->imm;
9831 /* point imm to __bpf_call_base+1 from JITs point of view */
9832 insn->imm = 1;
9833 }
9834
c454a46b
MKL
9835 err = bpf_prog_alloc_jited_linfo(prog);
9836 if (err)
9837 goto out_undo_insn;
9838
9839 err = -ENOMEM;
6396bb22 9840 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
1c2a088a 9841 if (!func)
c7a89784 9842 goto out_undo_insn;
1c2a088a 9843
f910cefa 9844 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a 9845 subprog_start = subprog_end;
4cb3d99c 9846 subprog_end = env->subprog_info[i + 1].start;
1c2a088a
AS
9847
9848 len = subprog_end - subprog_start;
492ecee8
AS
9849 /* BPF_PROG_RUN doesn't call subprogs directly,
9850 * hence main prog stats include the runtime of subprogs.
9851 * subprogs don't have IDs and not reachable via prog_get_next_id
9852 * func[i]->aux->stats will never be accessed and stays NULL
9853 */
9854 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
1c2a088a
AS
9855 if (!func[i])
9856 goto out_free;
9857 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
9858 len * sizeof(struct bpf_insn));
4f74d809 9859 func[i]->type = prog->type;
1c2a088a 9860 func[i]->len = len;
4f74d809
DB
9861 if (bpf_prog_calc_tag(func[i]))
9862 goto out_free;
1c2a088a 9863 func[i]->is_func = 1;
ba64e7d8
YS
9864 func[i]->aux->func_idx = i;
9865 /* the btf and func_info will be freed only at prog->aux */
9866 func[i]->aux->btf = prog->aux->btf;
9867 func[i]->aux->func_info = prog->aux->func_info;
9868
1c2a088a
AS
9869 /* Use bpf_prog_F_tag to indicate functions in stack traces.
9870 * Long term would need debug info to populate names
9871 */
9872 func[i]->aux->name[0] = 'F';
9c8105bd 9873 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
1c2a088a 9874 func[i]->jit_requested = 1;
c454a46b
MKL
9875 func[i]->aux->linfo = prog->aux->linfo;
9876 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
9877 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
9878 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
1c2a088a
AS
9879 func[i] = bpf_int_jit_compile(func[i]);
9880 if (!func[i]->jited) {
9881 err = -ENOTSUPP;
9882 goto out_free;
9883 }
9884 cond_resched();
9885 }
9886 /* at this point all bpf functions were successfully JITed
9887 * now populate all bpf_calls with correct addresses and
9888 * run last pass of JIT
9889 */
f910cefa 9890 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9891 insn = func[i]->insnsi;
9892 for (j = 0; j < func[i]->len; j++, insn++) {
9893 if (insn->code != (BPF_JMP | BPF_CALL) ||
9894 insn->src_reg != BPF_PSEUDO_CALL)
9895 continue;
9896 subprog = insn->off;
0d306c31
PB
9897 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
9898 __bpf_call_base;
1c2a088a 9899 }
2162fed4
SD
9900
9901 /* we use the aux data to keep a list of the start addresses
9902 * of the JITed images for each function in the program
9903 *
9904 * for some architectures, such as powerpc64, the imm field
9905 * might not be large enough to hold the offset of the start
9906 * address of the callee's JITed image from __bpf_call_base
9907 *
9908 * in such cases, we can lookup the start address of a callee
9909 * by using its subprog id, available from the off field of
9910 * the call instruction, as an index for this list
9911 */
9912 func[i]->aux->func = func;
9913 func[i]->aux->func_cnt = env->subprog_cnt;
1c2a088a 9914 }
f910cefa 9915 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9916 old_bpf_func = func[i]->bpf_func;
9917 tmp = bpf_int_jit_compile(func[i]);
9918 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
9919 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
c7a89784 9920 err = -ENOTSUPP;
1c2a088a
AS
9921 goto out_free;
9922 }
9923 cond_resched();
9924 }
9925
9926 /* finally lock prog and jit images for all functions and
9927 * populate kallsysm
9928 */
f910cefa 9929 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9930 bpf_prog_lock_ro(func[i]);
9931 bpf_prog_kallsyms_add(func[i]);
9932 }
7105e828
DB
9933
9934 /* Last step: make now unused interpreter insns from main
9935 * prog consistent for later dump requests, so they can
9936 * later look the same as if they were interpreted only.
9937 */
9938 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
7105e828
DB
9939 if (insn->code != (BPF_JMP | BPF_CALL) ||
9940 insn->src_reg != BPF_PSEUDO_CALL)
9941 continue;
9942 insn->off = env->insn_aux_data[i].call_imm;
9943 subprog = find_subprog(env, i + insn->off + 1);
dbecd738 9944 insn->imm = subprog;
7105e828
DB
9945 }
9946
1c2a088a
AS
9947 prog->jited = 1;
9948 prog->bpf_func = func[0]->bpf_func;
9949 prog->aux->func = func;
f910cefa 9950 prog->aux->func_cnt = env->subprog_cnt;
c454a46b 9951 bpf_prog_free_unused_jited_linfo(prog);
1c2a088a
AS
9952 return 0;
9953out_free:
f910cefa 9954 for (i = 0; i < env->subprog_cnt; i++)
1c2a088a
AS
9955 if (func[i])
9956 bpf_jit_free(func[i]);
9957 kfree(func);
c7a89784 9958out_undo_insn:
1c2a088a
AS
9959 /* cleanup main prog to be interpreted */
9960 prog->jit_requested = 0;
9961 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
9962 if (insn->code != (BPF_JMP | BPF_CALL) ||
9963 insn->src_reg != BPF_PSEUDO_CALL)
9964 continue;
9965 insn->off = 0;
9966 insn->imm = env->insn_aux_data[i].call_imm;
9967 }
c454a46b 9968 bpf_prog_free_jited_linfo(prog);
1c2a088a
AS
9969 return err;
9970}
9971
1ea47e01
AS
9972static int fixup_call_args(struct bpf_verifier_env *env)
9973{
19d28fbd 9974#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
9975 struct bpf_prog *prog = env->prog;
9976 struct bpf_insn *insn = prog->insnsi;
9977 int i, depth;
19d28fbd 9978#endif
e4052d06 9979 int err = 0;
1ea47e01 9980
e4052d06
QM
9981 if (env->prog->jit_requested &&
9982 !bpf_prog_is_dev_bound(env->prog->aux)) {
19d28fbd
DM
9983 err = jit_subprogs(env);
9984 if (err == 0)
1c2a088a 9985 return 0;
c7a89784
DB
9986 if (err == -EFAULT)
9987 return err;
19d28fbd
DM
9988 }
9989#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
9990 for (i = 0; i < prog->len; i++, insn++) {
9991 if (insn->code != (BPF_JMP | BPF_CALL) ||
9992 insn->src_reg != BPF_PSEUDO_CALL)
9993 continue;
9994 depth = get_callee_stack_depth(env, insn, i);
9995 if (depth < 0)
9996 return depth;
9997 bpf_patch_call_args(insn, depth);
9998 }
19d28fbd
DM
9999 err = 0;
10000#endif
10001 return err;
1ea47e01
AS
10002}
10003
79741b3b 10004/* fixup insn->imm field of bpf_call instructions
81ed18ab 10005 * and inline eligible helpers as explicit sequence of BPF instructions
e245c5c6
AS
10006 *
10007 * this function is called after eBPF program passed verification
10008 */
79741b3b 10009static int fixup_bpf_calls(struct bpf_verifier_env *env)
e245c5c6 10010{
79741b3b 10011 struct bpf_prog *prog = env->prog;
d2e4c1e6 10012 bool expect_blinding = bpf_jit_blinding_enabled(prog);
79741b3b 10013 struct bpf_insn *insn = prog->insnsi;
e245c5c6 10014 const struct bpf_func_proto *fn;
79741b3b 10015 const int insn_cnt = prog->len;
09772d92 10016 const struct bpf_map_ops *ops;
c93552c4 10017 struct bpf_insn_aux_data *aux;
81ed18ab
AS
10018 struct bpf_insn insn_buf[16];
10019 struct bpf_prog *new_prog;
10020 struct bpf_map *map_ptr;
d2e4c1e6 10021 int i, ret, cnt, delta = 0;
e245c5c6 10022
79741b3b 10023 for (i = 0; i < insn_cnt; i++, insn++) {
f6b1b3bf
DB
10024 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
10025 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
10026 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
68fda450 10027 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
f6b1b3bf
DB
10028 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
10029 struct bpf_insn mask_and_div[] = {
10030 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
10031 /* Rx div 0 -> 0 */
10032 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
10033 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
10034 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10035 *insn,
10036 };
10037 struct bpf_insn mask_and_mod[] = {
10038 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
10039 /* Rx mod 0 -> Rx */
10040 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
10041 *insn,
10042 };
10043 struct bpf_insn *patchlet;
10044
10045 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
10046 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
10047 patchlet = mask_and_div + (is64 ? 1 : 0);
10048 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
10049 } else {
10050 patchlet = mask_and_mod + (is64 ? 1 : 0);
10051 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
10052 }
10053
10054 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
68fda450
AS
10055 if (!new_prog)
10056 return -ENOMEM;
10057
10058 delta += cnt - 1;
10059 env->prog = prog = new_prog;
10060 insn = new_prog->insnsi + i + delta;
10061 continue;
10062 }
10063
e0cea7ce
DB
10064 if (BPF_CLASS(insn->code) == BPF_LD &&
10065 (BPF_MODE(insn->code) == BPF_ABS ||
10066 BPF_MODE(insn->code) == BPF_IND)) {
10067 cnt = env->ops->gen_ld_abs(insn, insn_buf);
10068 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
10069 verbose(env, "bpf verifier is misconfigured\n");
10070 return -EINVAL;
10071 }
10072
10073 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
10074 if (!new_prog)
10075 return -ENOMEM;
10076
10077 delta += cnt - 1;
10078 env->prog = prog = new_prog;
10079 insn = new_prog->insnsi + i + delta;
10080 continue;
10081 }
10082
979d63d5
DB
10083 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
10084 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
10085 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
10086 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
10087 struct bpf_insn insn_buf[16];
10088 struct bpf_insn *patch = &insn_buf[0];
10089 bool issrc, isneg;
10090 u32 off_reg;
10091
10092 aux = &env->insn_aux_data[i + delta];
3612af78
DB
10093 if (!aux->alu_state ||
10094 aux->alu_state == BPF_ALU_NON_POINTER)
979d63d5
DB
10095 continue;
10096
10097 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
10098 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
10099 BPF_ALU_SANITIZE_SRC;
10100
10101 off_reg = issrc ? insn->src_reg : insn->dst_reg;
10102 if (isneg)
10103 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
10104 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
10105 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
10106 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
10107 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
10108 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
10109 if (issrc) {
10110 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
10111 off_reg);
10112 insn->src_reg = BPF_REG_AX;
10113 } else {
10114 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
10115 BPF_REG_AX);
10116 }
10117 if (isneg)
10118 insn->code = insn->code == code_add ?
10119 code_sub : code_add;
10120 *patch++ = *insn;
10121 if (issrc && isneg)
10122 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
10123 cnt = patch - insn_buf;
10124
10125 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
10126 if (!new_prog)
10127 return -ENOMEM;
10128
10129 delta += cnt - 1;
10130 env->prog = prog = new_prog;
10131 insn = new_prog->insnsi + i + delta;
10132 continue;
10133 }
10134
79741b3b
AS
10135 if (insn->code != (BPF_JMP | BPF_CALL))
10136 continue;
cc8b0b92
AS
10137 if (insn->src_reg == BPF_PSEUDO_CALL)
10138 continue;
e245c5c6 10139
79741b3b
AS
10140 if (insn->imm == BPF_FUNC_get_route_realm)
10141 prog->dst_needed = 1;
10142 if (insn->imm == BPF_FUNC_get_prandom_u32)
10143 bpf_user_rnd_init_once();
9802d865
JB
10144 if (insn->imm == BPF_FUNC_override_return)
10145 prog->kprobe_override = 1;
79741b3b 10146 if (insn->imm == BPF_FUNC_tail_call) {
7b9f6da1
DM
10147 /* If we tail call into other programs, we
10148 * cannot make any assumptions since they can
10149 * be replaced dynamically during runtime in
10150 * the program array.
10151 */
10152 prog->cb_access = 1;
80a58d02 10153 env->prog->aux->stack_depth = MAX_BPF_STACK;
e647815a 10154 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
7b9f6da1 10155
79741b3b
AS
10156 /* mark bpf_tail_call as different opcode to avoid
10157 * conditional branch in the interpeter for every normal
10158 * call and to prevent accidental JITing by JIT compiler
10159 * that doesn't support bpf_tail_call yet
e245c5c6 10160 */
79741b3b 10161 insn->imm = 0;
71189fa9 10162 insn->code = BPF_JMP | BPF_TAIL_CALL;
b2157399 10163
c93552c4 10164 aux = &env->insn_aux_data[i + delta];
2c78ee89 10165 if (env->bpf_capable && !expect_blinding &&
cc52d914 10166 prog->jit_requested &&
d2e4c1e6
DB
10167 !bpf_map_key_poisoned(aux) &&
10168 !bpf_map_ptr_poisoned(aux) &&
10169 !bpf_map_ptr_unpriv(aux)) {
10170 struct bpf_jit_poke_descriptor desc = {
10171 .reason = BPF_POKE_REASON_TAIL_CALL,
10172 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
10173 .tail_call.key = bpf_map_key_immediate(aux),
10174 };
10175
10176 ret = bpf_jit_add_poke_descriptor(prog, &desc);
10177 if (ret < 0) {
10178 verbose(env, "adding tail call poke descriptor failed\n");
10179 return ret;
10180 }
10181
10182 insn->imm = ret + 1;
10183 continue;
10184 }
10185
c93552c4
DB
10186 if (!bpf_map_ptr_unpriv(aux))
10187 continue;
10188
b2157399
AS
10189 /* instead of changing every JIT dealing with tail_call
10190 * emit two extra insns:
10191 * if (index >= max_entries) goto out;
10192 * index &= array->index_mask;
10193 * to avoid out-of-bounds cpu speculation
10194 */
c93552c4 10195 if (bpf_map_ptr_poisoned(aux)) {
40950343 10196 verbose(env, "tail_call abusing map_ptr\n");
b2157399
AS
10197 return -EINVAL;
10198 }
c93552c4 10199
d2e4c1e6 10200 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
b2157399
AS
10201 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
10202 map_ptr->max_entries, 2);
10203 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
10204 container_of(map_ptr,
10205 struct bpf_array,
10206 map)->index_mask);
10207 insn_buf[2] = *insn;
10208 cnt = 3;
10209 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
10210 if (!new_prog)
10211 return -ENOMEM;
10212
10213 delta += cnt - 1;
10214 env->prog = prog = new_prog;
10215 insn = new_prog->insnsi + i + delta;
79741b3b
AS
10216 continue;
10217 }
e245c5c6 10218
89c63074 10219 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
09772d92
DB
10220 * and other inlining handlers are currently limited to 64 bit
10221 * only.
89c63074 10222 */
60b58afc 10223 if (prog->jit_requested && BITS_PER_LONG == 64 &&
09772d92
DB
10224 (insn->imm == BPF_FUNC_map_lookup_elem ||
10225 insn->imm == BPF_FUNC_map_update_elem ||
84430d42
DB
10226 insn->imm == BPF_FUNC_map_delete_elem ||
10227 insn->imm == BPF_FUNC_map_push_elem ||
10228 insn->imm == BPF_FUNC_map_pop_elem ||
10229 insn->imm == BPF_FUNC_map_peek_elem)) {
c93552c4
DB
10230 aux = &env->insn_aux_data[i + delta];
10231 if (bpf_map_ptr_poisoned(aux))
10232 goto patch_call_imm;
10233
d2e4c1e6 10234 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
09772d92
DB
10235 ops = map_ptr->ops;
10236 if (insn->imm == BPF_FUNC_map_lookup_elem &&
10237 ops->map_gen_lookup) {
10238 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
10239 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
10240 verbose(env, "bpf verifier is misconfigured\n");
10241 return -EINVAL;
10242 }
81ed18ab 10243
09772d92
DB
10244 new_prog = bpf_patch_insn_data(env, i + delta,
10245 insn_buf, cnt);
10246 if (!new_prog)
10247 return -ENOMEM;
81ed18ab 10248
09772d92
DB
10249 delta += cnt - 1;
10250 env->prog = prog = new_prog;
10251 insn = new_prog->insnsi + i + delta;
10252 continue;
10253 }
81ed18ab 10254
09772d92
DB
10255 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
10256 (void *(*)(struct bpf_map *map, void *key))NULL));
10257 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
10258 (int (*)(struct bpf_map *map, void *key))NULL));
10259 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
10260 (int (*)(struct bpf_map *map, void *key, void *value,
10261 u64 flags))NULL));
84430d42
DB
10262 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
10263 (int (*)(struct bpf_map *map, void *value,
10264 u64 flags))NULL));
10265 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
10266 (int (*)(struct bpf_map *map, void *value))NULL));
10267 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
10268 (int (*)(struct bpf_map *map, void *value))NULL));
10269
09772d92
DB
10270 switch (insn->imm) {
10271 case BPF_FUNC_map_lookup_elem:
10272 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
10273 __bpf_call_base;
10274 continue;
10275 case BPF_FUNC_map_update_elem:
10276 insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
10277 __bpf_call_base;
10278 continue;
10279 case BPF_FUNC_map_delete_elem:
10280 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
10281 __bpf_call_base;
10282 continue;
84430d42
DB
10283 case BPF_FUNC_map_push_elem:
10284 insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
10285 __bpf_call_base;
10286 continue;
10287 case BPF_FUNC_map_pop_elem:
10288 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
10289 __bpf_call_base;
10290 continue;
10291 case BPF_FUNC_map_peek_elem:
10292 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
10293 __bpf_call_base;
10294 continue;
09772d92 10295 }
81ed18ab 10296
09772d92 10297 goto patch_call_imm;
81ed18ab
AS
10298 }
10299
5576b991
MKL
10300 if (prog->jit_requested && BITS_PER_LONG == 64 &&
10301 insn->imm == BPF_FUNC_jiffies64) {
10302 struct bpf_insn ld_jiffies_addr[2] = {
10303 BPF_LD_IMM64(BPF_REG_0,
10304 (unsigned long)&jiffies),
10305 };
10306
10307 insn_buf[0] = ld_jiffies_addr[0];
10308 insn_buf[1] = ld_jiffies_addr[1];
10309 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
10310 BPF_REG_0, 0);
10311 cnt = 3;
10312
10313 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
10314 cnt);
10315 if (!new_prog)
10316 return -ENOMEM;
10317
10318 delta += cnt - 1;
10319 env->prog = prog = new_prog;
10320 insn = new_prog->insnsi + i + delta;
10321 continue;
10322 }
10323
81ed18ab 10324patch_call_imm:
5e43f899 10325 fn = env->ops->get_func_proto(insn->imm, env->prog);
79741b3b
AS
10326 /* all functions that have prototype and verifier allowed
10327 * programs to call them, must be real in-kernel functions
10328 */
10329 if (!fn->func) {
61bd5218
JK
10330 verbose(env,
10331 "kernel subsystem misconfigured func %s#%d\n",
79741b3b
AS
10332 func_id_name(insn->imm), insn->imm);
10333 return -EFAULT;
e245c5c6 10334 }
79741b3b 10335 insn->imm = fn->func - __bpf_call_base;
e245c5c6 10336 }
e245c5c6 10337
d2e4c1e6
DB
10338 /* Since poke tab is now finalized, publish aux to tracker. */
10339 for (i = 0; i < prog->aux->size_poke_tab; i++) {
10340 map_ptr = prog->aux->poke_tab[i].tail_call.map;
10341 if (!map_ptr->ops->map_poke_track ||
10342 !map_ptr->ops->map_poke_untrack ||
10343 !map_ptr->ops->map_poke_run) {
10344 verbose(env, "bpf verifier is misconfigured\n");
10345 return -EINVAL;
10346 }
10347
10348 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
10349 if (ret < 0) {
10350 verbose(env, "tracking tail call prog failed\n");
10351 return ret;
10352 }
10353 }
10354
79741b3b
AS
10355 return 0;
10356}
e245c5c6 10357
58e2af8b 10358static void free_states(struct bpf_verifier_env *env)
f1bca824 10359{
58e2af8b 10360 struct bpf_verifier_state_list *sl, *sln;
f1bca824
AS
10361 int i;
10362
9f4686c4
AS
10363 sl = env->free_list;
10364 while (sl) {
10365 sln = sl->next;
10366 free_verifier_state(&sl->state, false);
10367 kfree(sl);
10368 sl = sln;
10369 }
51c39bb1 10370 env->free_list = NULL;
9f4686c4 10371
f1bca824
AS
10372 if (!env->explored_states)
10373 return;
10374
dc2a4ebc 10375 for (i = 0; i < state_htab_size(env); i++) {
f1bca824
AS
10376 sl = env->explored_states[i];
10377
a8f500af
AS
10378 while (sl) {
10379 sln = sl->next;
10380 free_verifier_state(&sl->state, false);
10381 kfree(sl);
10382 sl = sln;
10383 }
51c39bb1 10384 env->explored_states[i] = NULL;
f1bca824 10385 }
51c39bb1 10386}
f1bca824 10387
51c39bb1
AS
10388/* The verifier is using insn_aux_data[] to store temporary data during
10389 * verification and to store information for passes that run after the
10390 * verification like dead code sanitization. do_check_common() for subprogram N
10391 * may analyze many other subprograms. sanitize_insn_aux_data() clears all
10392 * temporary data after do_check_common() finds that subprogram N cannot be
10393 * verified independently. pass_cnt counts the number of times
10394 * do_check_common() was run and insn->aux->seen tells the pass number
10395 * insn_aux_data was touched. These variables are compared to clear temporary
10396 * data from failed pass. For testing and experiments do_check_common() can be
10397 * run multiple times even when prior attempt to verify is unsuccessful.
10398 */
10399static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
10400{
10401 struct bpf_insn *insn = env->prog->insnsi;
10402 struct bpf_insn_aux_data *aux;
10403 int i, class;
10404
10405 for (i = 0; i < env->prog->len; i++) {
10406 class = BPF_CLASS(insn[i].code);
10407 if (class != BPF_LDX && class != BPF_STX)
10408 continue;
10409 aux = &env->insn_aux_data[i];
10410 if (aux->seen != env->pass_cnt)
10411 continue;
10412 memset(aux, 0, offsetof(typeof(*aux), orig_idx));
10413 }
f1bca824
AS
10414}
10415
51c39bb1
AS
10416static int do_check_common(struct bpf_verifier_env *env, int subprog)
10417{
6f8a57cc 10418 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
51c39bb1
AS
10419 struct bpf_verifier_state *state;
10420 struct bpf_reg_state *regs;
10421 int ret, i;
10422
10423 env->prev_linfo = NULL;
10424 env->pass_cnt++;
10425
10426 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
10427 if (!state)
10428 return -ENOMEM;
10429 state->curframe = 0;
10430 state->speculative = false;
10431 state->branches = 1;
10432 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
10433 if (!state->frame[0]) {
10434 kfree(state);
10435 return -ENOMEM;
10436 }
10437 env->cur_state = state;
10438 init_func_state(env, state->frame[0],
10439 BPF_MAIN_FUNC /* callsite */,
10440 0 /* frameno */,
10441 subprog);
10442
10443 regs = state->frame[state->curframe]->regs;
be8704ff 10444 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
51c39bb1
AS
10445 ret = btf_prepare_func_args(env, subprog, regs);
10446 if (ret)
10447 goto out;
10448 for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
10449 if (regs[i].type == PTR_TO_CTX)
10450 mark_reg_known_zero(env, regs, i);
10451 else if (regs[i].type == SCALAR_VALUE)
10452 mark_reg_unknown(env, regs, i);
10453 }
10454 } else {
10455 /* 1st arg to a function */
10456 regs[BPF_REG_1].type = PTR_TO_CTX;
10457 mark_reg_known_zero(env, regs, BPF_REG_1);
10458 ret = btf_check_func_arg_match(env, subprog, regs);
10459 if (ret == -EFAULT)
10460 /* unlikely verifier bug. abort.
10461 * ret == 0 and ret < 0 are sadly acceptable for
10462 * main() function due to backward compatibility.
10463 * Like socket filter program may be written as:
10464 * int bpf_prog(struct pt_regs *ctx)
10465 * and never dereference that ctx in the program.
10466 * 'struct pt_regs' is a type mismatch for socket
10467 * filter that should be using 'struct __sk_buff'.
10468 */
10469 goto out;
10470 }
10471
10472 ret = do_check(env);
10473out:
f59bbfc2
AS
10474 /* check for NULL is necessary, since cur_state can be freed inside
10475 * do_check() under memory pressure.
10476 */
10477 if (env->cur_state) {
10478 free_verifier_state(env->cur_state, true);
10479 env->cur_state = NULL;
10480 }
6f8a57cc
AN
10481 while (!pop_stack(env, NULL, NULL, false));
10482 if (!ret && pop_log)
10483 bpf_vlog_reset(&env->log, 0);
51c39bb1
AS
10484 free_states(env);
10485 if (ret)
10486 /* clean aux data in case subprog was rejected */
10487 sanitize_insn_aux_data(env);
10488 return ret;
10489}
10490
10491/* Verify all global functions in a BPF program one by one based on their BTF.
10492 * All global functions must pass verification. Otherwise the whole program is rejected.
10493 * Consider:
10494 * int bar(int);
10495 * int foo(int f)
10496 * {
10497 * return bar(f);
10498 * }
10499 * int bar(int b)
10500 * {
10501 * ...
10502 * }
10503 * foo() will be verified first for R1=any_scalar_value. During verification it
10504 * will be assumed that bar() already verified successfully and call to bar()
10505 * from foo() will be checked for type match only. Later bar() will be verified
10506 * independently to check that it's safe for R1=any_scalar_value.
10507 */
10508static int do_check_subprogs(struct bpf_verifier_env *env)
10509{
10510 struct bpf_prog_aux *aux = env->prog->aux;
10511 int i, ret;
10512
10513 if (!aux->func_info)
10514 return 0;
10515
10516 for (i = 1; i < env->subprog_cnt; i++) {
10517 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
10518 continue;
10519 env->insn_idx = env->subprog_info[i].start;
10520 WARN_ON_ONCE(env->insn_idx == 0);
10521 ret = do_check_common(env, i);
10522 if (ret) {
10523 return ret;
10524 } else if (env->log.level & BPF_LOG_LEVEL) {
10525 verbose(env,
10526 "Func#%d is safe for any args that match its prototype\n",
10527 i);
10528 }
10529 }
10530 return 0;
10531}
10532
10533static int do_check_main(struct bpf_verifier_env *env)
10534{
10535 int ret;
10536
10537 env->insn_idx = 0;
10538 ret = do_check_common(env, 0);
10539 if (!ret)
10540 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
10541 return ret;
10542}
10543
10544
06ee7115
AS
10545static void print_verification_stats(struct bpf_verifier_env *env)
10546{
10547 int i;
10548
10549 if (env->log.level & BPF_LOG_STATS) {
10550 verbose(env, "verification time %lld usec\n",
10551 div_u64(env->verification_time, 1000));
10552 verbose(env, "stack depth ");
10553 for (i = 0; i < env->subprog_cnt; i++) {
10554 u32 depth = env->subprog_info[i].stack_depth;
10555
10556 verbose(env, "%d", depth);
10557 if (i + 1 < env->subprog_cnt)
10558 verbose(env, "+");
10559 }
10560 verbose(env, "\n");
10561 }
10562 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
10563 "total_states %d peak_states %d mark_read %d\n",
10564 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
10565 env->max_states_per_insn, env->total_states,
10566 env->peak_states, env->longest_mark_read_walk);
f1bca824
AS
10567}
10568
27ae7997
MKL
10569static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
10570{
10571 const struct btf_type *t, *func_proto;
10572 const struct bpf_struct_ops *st_ops;
10573 const struct btf_member *member;
10574 struct bpf_prog *prog = env->prog;
10575 u32 btf_id, member_idx;
10576 const char *mname;
10577
10578 btf_id = prog->aux->attach_btf_id;
10579 st_ops = bpf_struct_ops_find(btf_id);
10580 if (!st_ops) {
10581 verbose(env, "attach_btf_id %u is not a supported struct\n",
10582 btf_id);
10583 return -ENOTSUPP;
10584 }
10585
10586 t = st_ops->type;
10587 member_idx = prog->expected_attach_type;
10588 if (member_idx >= btf_type_vlen(t)) {
10589 verbose(env, "attach to invalid member idx %u of struct %s\n",
10590 member_idx, st_ops->name);
10591 return -EINVAL;
10592 }
10593
10594 member = &btf_type_member(t)[member_idx];
10595 mname = btf_name_by_offset(btf_vmlinux, member->name_off);
10596 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
10597 NULL);
10598 if (!func_proto) {
10599 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
10600 mname, member_idx, st_ops->name);
10601 return -EINVAL;
10602 }
10603
10604 if (st_ops->check_member) {
10605 int err = st_ops->check_member(t, member);
10606
10607 if (err) {
10608 verbose(env, "attach to unsupported member %s of struct %s\n",
10609 mname, st_ops->name);
10610 return err;
10611 }
10612 }
10613
10614 prog->aux->attach_func_proto = func_proto;
10615 prog->aux->attach_func_name = mname;
10616 env->ops = st_ops->verifier_ops;
10617
10618 return 0;
10619}
6ba43b76
KS
10620#define SECURITY_PREFIX "security_"
10621
18644cec 10622static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr)
6ba43b76 10623{
69191754
KS
10624 if (within_error_injection_list(addr) ||
10625 !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name,
10626 sizeof(SECURITY_PREFIX) - 1))
6ba43b76 10627 return 0;
6ba43b76 10628
6ba43b76
KS
10629 return -EINVAL;
10630}
27ae7997 10631
38207291
MKL
10632static int check_attach_btf_id(struct bpf_verifier_env *env)
10633{
10634 struct bpf_prog *prog = env->prog;
be8704ff 10635 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
5b92a28a 10636 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
38207291 10637 u32 btf_id = prog->aux->attach_btf_id;
f1b9509c 10638 const char prefix[] = "btf_trace_";
15d83c4d 10639 struct btf_func_model fmodel;
5b92a28a 10640 int ret = 0, subprog = -1, i;
fec56f58 10641 struct bpf_trampoline *tr;
38207291 10642 const struct btf_type *t;
5b92a28a 10643 bool conservative = true;
38207291 10644 const char *tname;
5b92a28a 10645 struct btf *btf;
fec56f58 10646 long addr;
5b92a28a 10647 u64 key;
38207291 10648
27ae7997
MKL
10649 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
10650 return check_struct_ops_btf_id(env);
10651
9e4e01df
KS
10652 if (prog->type != BPF_PROG_TYPE_TRACING &&
10653 prog->type != BPF_PROG_TYPE_LSM &&
10654 !prog_extension)
f1b9509c 10655 return 0;
38207291 10656
f1b9509c
AS
10657 if (!btf_id) {
10658 verbose(env, "Tracing programs must provide btf_id\n");
10659 return -EINVAL;
10660 }
5b92a28a
AS
10661 btf = bpf_prog_get_target_btf(prog);
10662 if (!btf) {
10663 verbose(env,
10664 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
10665 return -EINVAL;
10666 }
10667 t = btf_type_by_id(btf, btf_id);
f1b9509c
AS
10668 if (!t) {
10669 verbose(env, "attach_btf_id %u is invalid\n", btf_id);
10670 return -EINVAL;
10671 }
5b92a28a 10672 tname = btf_name_by_offset(btf, t->name_off);
f1b9509c
AS
10673 if (!tname) {
10674 verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id);
10675 return -EINVAL;
10676 }
5b92a28a
AS
10677 if (tgt_prog) {
10678 struct bpf_prog_aux *aux = tgt_prog->aux;
10679
10680 for (i = 0; i < aux->func_info_cnt; i++)
10681 if (aux->func_info[i].type_id == btf_id) {
10682 subprog = i;
10683 break;
10684 }
10685 if (subprog == -1) {
10686 verbose(env, "Subprog %s doesn't exist\n", tname);
10687 return -EINVAL;
10688 }
10689 conservative = aux->func_info_aux[subprog].unreliable;
be8704ff
AS
10690 if (prog_extension) {
10691 if (conservative) {
10692 verbose(env,
10693 "Cannot replace static functions\n");
10694 return -EINVAL;
10695 }
10696 if (!prog->jit_requested) {
10697 verbose(env,
10698 "Extension programs should be JITed\n");
10699 return -EINVAL;
10700 }
10701 env->ops = bpf_verifier_ops[tgt_prog->type];
03f87c0b 10702 prog->expected_attach_type = tgt_prog->expected_attach_type;
be8704ff
AS
10703 }
10704 if (!tgt_prog->jited) {
10705 verbose(env, "Can attach to only JITed progs\n");
10706 return -EINVAL;
10707 }
10708 if (tgt_prog->type == prog->type) {
10709 /* Cannot fentry/fexit another fentry/fexit program.
10710 * Cannot attach program extension to another extension.
10711 * It's ok to attach fentry/fexit to extension program.
10712 */
10713 verbose(env, "Cannot recursively attach\n");
10714 return -EINVAL;
10715 }
10716 if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
10717 prog_extension &&
10718 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
10719 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
10720 /* Program extensions can extend all program types
10721 * except fentry/fexit. The reason is the following.
10722 * The fentry/fexit programs are used for performance
10723 * analysis, stats and can be attached to any program
10724 * type except themselves. When extension program is
10725 * replacing XDP function it is necessary to allow
10726 * performance analysis of all functions. Both original
10727 * XDP program and its program extension. Hence
10728 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
10729 * allowed. If extending of fentry/fexit was allowed it
10730 * would be possible to create long call chain
10731 * fentry->extension->fentry->extension beyond
10732 * reasonable stack size. Hence extending fentry is not
10733 * allowed.
10734 */
10735 verbose(env, "Cannot extend fentry/fexit\n");
10736 return -EINVAL;
10737 }
5b92a28a
AS
10738 key = ((u64)aux->id) << 32 | btf_id;
10739 } else {
be8704ff
AS
10740 if (prog_extension) {
10741 verbose(env, "Cannot replace kernel functions\n");
10742 return -EINVAL;
10743 }
5b92a28a
AS
10744 key = btf_id;
10745 }
f1b9509c
AS
10746
10747 switch (prog->expected_attach_type) {
10748 case BPF_TRACE_RAW_TP:
5b92a28a
AS
10749 if (tgt_prog) {
10750 verbose(env,
10751 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
10752 return -EINVAL;
10753 }
38207291
MKL
10754 if (!btf_type_is_typedef(t)) {
10755 verbose(env, "attach_btf_id %u is not a typedef\n",
10756 btf_id);
10757 return -EINVAL;
10758 }
f1b9509c 10759 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
38207291
MKL
10760 verbose(env, "attach_btf_id %u points to wrong type name %s\n",
10761 btf_id, tname);
10762 return -EINVAL;
10763 }
10764 tname += sizeof(prefix) - 1;
5b92a28a 10765 t = btf_type_by_id(btf, t->type);
38207291
MKL
10766 if (!btf_type_is_ptr(t))
10767 /* should never happen in valid vmlinux build */
10768 return -EINVAL;
5b92a28a 10769 t = btf_type_by_id(btf, t->type);
38207291
MKL
10770 if (!btf_type_is_func_proto(t))
10771 /* should never happen in valid vmlinux build */
10772 return -EINVAL;
10773
10774 /* remember two read only pointers that are valid for
10775 * the life time of the kernel
10776 */
10777 prog->aux->attach_func_name = tname;
10778 prog->aux->attach_func_proto = t;
10779 prog->aux->attach_btf_trace = true;
f1b9509c 10780 return 0;
15d83c4d
YS
10781 case BPF_TRACE_ITER:
10782 if (!btf_type_is_func(t)) {
10783 verbose(env, "attach_btf_id %u is not a function\n",
10784 btf_id);
10785 return -EINVAL;
10786 }
10787 t = btf_type_by_id(btf, t->type);
10788 if (!btf_type_is_func_proto(t))
10789 return -EINVAL;
10790 prog->aux->attach_func_name = tname;
10791 prog->aux->attach_func_proto = t;
10792 if (!bpf_iter_prog_supported(prog))
10793 return -EINVAL;
10794 ret = btf_distill_func_proto(&env->log, btf, t,
10795 tname, &fmodel);
10796 return ret;
be8704ff
AS
10797 default:
10798 if (!prog_extension)
10799 return -EINVAL;
10800 /* fallthrough */
ae240823 10801 case BPF_MODIFY_RETURN:
9e4e01df 10802 case BPF_LSM_MAC:
fec56f58
AS
10803 case BPF_TRACE_FENTRY:
10804 case BPF_TRACE_FEXIT:
9e4e01df
KS
10805 prog->aux->attach_func_name = tname;
10806 if (prog->type == BPF_PROG_TYPE_LSM) {
10807 ret = bpf_lsm_verify_prog(&env->log, prog);
10808 if (ret < 0)
10809 return ret;
10810 }
10811
fec56f58
AS
10812 if (!btf_type_is_func(t)) {
10813 verbose(env, "attach_btf_id %u is not a function\n",
10814 btf_id);
10815 return -EINVAL;
10816 }
be8704ff
AS
10817 if (prog_extension &&
10818 btf_check_type_match(env, prog, btf, t))
10819 return -EINVAL;
5b92a28a 10820 t = btf_type_by_id(btf, t->type);
fec56f58
AS
10821 if (!btf_type_is_func_proto(t))
10822 return -EINVAL;
5b92a28a 10823 tr = bpf_trampoline_lookup(key);
fec56f58
AS
10824 if (!tr)
10825 return -ENOMEM;
5b92a28a 10826 /* t is either vmlinux type or another program's type */
fec56f58
AS
10827 prog->aux->attach_func_proto = t;
10828 mutex_lock(&tr->mutex);
10829 if (tr->func.addr) {
10830 prog->aux->trampoline = tr;
10831 goto out;
10832 }
5b92a28a
AS
10833 if (tgt_prog && conservative) {
10834 prog->aux->attach_func_proto = NULL;
10835 t = NULL;
10836 }
10837 ret = btf_distill_func_proto(&env->log, btf, t,
fec56f58
AS
10838 tname, &tr->func.model);
10839 if (ret < 0)
10840 goto out;
5b92a28a 10841 if (tgt_prog) {
e9eeec58
YS
10842 if (subprog == 0)
10843 addr = (long) tgt_prog->bpf_func;
10844 else
10845 addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
5b92a28a
AS
10846 } else {
10847 addr = kallsyms_lookup_name(tname);
10848 if (!addr) {
10849 verbose(env,
10850 "The address of function %s cannot be found\n",
10851 tname);
10852 ret = -ENOENT;
10853 goto out;
10854 }
fec56f58 10855 }
18644cec
AS
10856
10857 if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
10858 ret = check_attach_modify_return(prog, addr);
10859 if (ret)
10860 verbose(env, "%s() is not modifiable\n",
10861 prog->aux->attach_func_name);
10862 }
10863
10864 if (ret)
10865 goto out;
fec56f58
AS
10866 tr->func.addr = (void *)addr;
10867 prog->aux->trampoline = tr;
10868out:
10869 mutex_unlock(&tr->mutex);
10870 if (ret)
10871 bpf_trampoline_put(tr);
10872 return ret;
38207291 10873 }
38207291
MKL
10874}
10875
838e9690
YS
10876int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
10877 union bpf_attr __user *uattr)
51580e79 10878{
06ee7115 10879 u64 start_time = ktime_get_ns();
58e2af8b 10880 struct bpf_verifier_env *env;
b9193c1b 10881 struct bpf_verifier_log *log;
9e4c24e7 10882 int i, len, ret = -EINVAL;
e2ae4ca2 10883 bool is_priv;
51580e79 10884
eba0c929
AB
10885 /* no program is valid */
10886 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
10887 return -EINVAL;
10888
58e2af8b 10889 /* 'struct bpf_verifier_env' can be global, but since it's not small,
cbd35700
AS
10890 * allocate/free it every time bpf_check() is called
10891 */
58e2af8b 10892 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
cbd35700
AS
10893 if (!env)
10894 return -ENOMEM;
61bd5218 10895 log = &env->log;
cbd35700 10896
9e4c24e7 10897 len = (*prog)->len;
fad953ce 10898 env->insn_aux_data =
9e4c24e7 10899 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
3df126f3
JK
10900 ret = -ENOMEM;
10901 if (!env->insn_aux_data)
10902 goto err_free_env;
9e4c24e7
JK
10903 for (i = 0; i < len; i++)
10904 env->insn_aux_data[i].orig_idx = i;
9bac3d6d 10905 env->prog = *prog;
00176a34 10906 env->ops = bpf_verifier_ops[env->prog->type];
2c78ee89 10907 is_priv = bpf_capable();
0246e64d 10908
8580ac94
AS
10909 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
10910 mutex_lock(&bpf_verifier_lock);
10911 if (!btf_vmlinux)
10912 btf_vmlinux = btf_parse_vmlinux();
10913 mutex_unlock(&bpf_verifier_lock);
10914 }
10915
cbd35700 10916 /* grab the mutex to protect few globals used by verifier */
45a73c17
AS
10917 if (!is_priv)
10918 mutex_lock(&bpf_verifier_lock);
cbd35700
AS
10919
10920 if (attr->log_level || attr->log_buf || attr->log_size) {
10921 /* user requested verbose verifier output
10922 * and supplied buffer to store the verification trace
10923 */
e7bf8249
JK
10924 log->level = attr->log_level;
10925 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
10926 log->len_total = attr->log_size;
cbd35700
AS
10927
10928 ret = -EINVAL;
e7bf8249 10929 /* log attributes have to be sane */
7a9f5c65 10930 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
06ee7115 10931 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
3df126f3 10932 goto err_unlock;
cbd35700 10933 }
1ad2f583 10934
8580ac94
AS
10935 if (IS_ERR(btf_vmlinux)) {
10936 /* Either gcc or pahole or kernel are broken. */
10937 verbose(env, "in-kernel BTF is malformed\n");
10938 ret = PTR_ERR(btf_vmlinux);
38207291 10939 goto skip_full_check;
8580ac94
AS
10940 }
10941
1ad2f583
DB
10942 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
10943 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
e07b98d9 10944 env->strict_alignment = true;
e9ee9efc
DM
10945 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
10946 env->strict_alignment = false;
cbd35700 10947
2c78ee89
AS
10948 env->allow_ptr_leaks = bpf_allow_ptr_leaks();
10949 env->bypass_spec_v1 = bpf_bypass_spec_v1();
10950 env->bypass_spec_v4 = bpf_bypass_spec_v4();
10951 env->bpf_capable = bpf_capable();
e2ae4ca2 10952
10d274e8
AS
10953 if (is_priv)
10954 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
10955
f4e3ec0d
JK
10956 ret = replace_map_fd_with_map_ptr(env);
10957 if (ret < 0)
10958 goto skip_full_check;
10959
cae1927c 10960 if (bpf_prog_is_dev_bound(env->prog->aux)) {
a40a2632 10961 ret = bpf_prog_offload_verifier_prep(env->prog);
ab3f0063 10962 if (ret)
f4e3ec0d 10963 goto skip_full_check;
ab3f0063
JK
10964 }
10965
dc2a4ebc 10966 env->explored_states = kvcalloc(state_htab_size(env),
58e2af8b 10967 sizeof(struct bpf_verifier_state_list *),
f1bca824
AS
10968 GFP_USER);
10969 ret = -ENOMEM;
10970 if (!env->explored_states)
10971 goto skip_full_check;
10972
d9762e84 10973 ret = check_subprogs(env);
475fb78f
AS
10974 if (ret < 0)
10975 goto skip_full_check;
10976
c454a46b 10977 ret = check_btf_info(env, attr, uattr);
838e9690
YS
10978 if (ret < 0)
10979 goto skip_full_check;
10980
be8704ff
AS
10981 ret = check_attach_btf_id(env);
10982 if (ret)
10983 goto skip_full_check;
10984
d9762e84
MKL
10985 ret = check_cfg(env);
10986 if (ret < 0)
10987 goto skip_full_check;
10988
51c39bb1
AS
10989 ret = do_check_subprogs(env);
10990 ret = ret ?: do_check_main(env);
cbd35700 10991
c941ce9c
QM
10992 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
10993 ret = bpf_prog_offload_finalize(env);
10994
0246e64d 10995skip_full_check:
51c39bb1 10996 kvfree(env->explored_states);
0246e64d 10997
c131187d 10998 if (ret == 0)
9b38c405 10999 ret = check_max_stack_depth(env);
c131187d 11000
9b38c405 11001 /* instruction rewrites happen after this point */
e2ae4ca2
JK
11002 if (is_priv) {
11003 if (ret == 0)
11004 opt_hard_wire_dead_code_branches(env);
52875a04
JK
11005 if (ret == 0)
11006 ret = opt_remove_dead_code(env);
a1b14abc
JK
11007 if (ret == 0)
11008 ret = opt_remove_nops(env);
52875a04
JK
11009 } else {
11010 if (ret == 0)
11011 sanitize_dead_code(env);
e2ae4ca2
JK
11012 }
11013
9bac3d6d
AS
11014 if (ret == 0)
11015 /* program is valid, convert *(u32*)(ctx + off) accesses */
11016 ret = convert_ctx_accesses(env);
11017
e245c5c6 11018 if (ret == 0)
79741b3b 11019 ret = fixup_bpf_calls(env);
e245c5c6 11020
a4b1d3c1
JW
11021 /* do 32-bit optimization after insn patching has done so those patched
11022 * insns could be handled correctly.
11023 */
d6c2308c
JW
11024 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
11025 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
11026 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
11027 : false;
a4b1d3c1
JW
11028 }
11029
1ea47e01
AS
11030 if (ret == 0)
11031 ret = fixup_call_args(env);
11032
06ee7115
AS
11033 env->verification_time = ktime_get_ns() - start_time;
11034 print_verification_stats(env);
11035
a2a7d570 11036 if (log->level && bpf_verifier_log_full(log))
cbd35700 11037 ret = -ENOSPC;
a2a7d570 11038 if (log->level && !log->ubuf) {
cbd35700 11039 ret = -EFAULT;
a2a7d570 11040 goto err_release_maps;
cbd35700
AS
11041 }
11042
0246e64d
AS
11043 if (ret == 0 && env->used_map_cnt) {
11044 /* if program passed verifier, update used_maps in bpf_prog_info */
9bac3d6d
AS
11045 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
11046 sizeof(env->used_maps[0]),
11047 GFP_KERNEL);
0246e64d 11048
9bac3d6d 11049 if (!env->prog->aux->used_maps) {
0246e64d 11050 ret = -ENOMEM;
a2a7d570 11051 goto err_release_maps;
0246e64d
AS
11052 }
11053
9bac3d6d 11054 memcpy(env->prog->aux->used_maps, env->used_maps,
0246e64d 11055 sizeof(env->used_maps[0]) * env->used_map_cnt);
9bac3d6d 11056 env->prog->aux->used_map_cnt = env->used_map_cnt;
0246e64d
AS
11057
11058 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
11059 * bpf_ld_imm64 instructions
11060 */
11061 convert_pseudo_ld_imm64(env);
11062 }
cbd35700 11063
ba64e7d8
YS
11064 if (ret == 0)
11065 adjust_btf_func(env);
11066
a2a7d570 11067err_release_maps:
9bac3d6d 11068 if (!env->prog->aux->used_maps)
0246e64d 11069 /* if we didn't copy map pointers into bpf_prog_info, release
ab7f5bf0 11070 * them now. Otherwise free_used_maps() will release them.
0246e64d
AS
11071 */
11072 release_maps(env);
03f87c0b
THJ
11073
11074 /* extension progs temporarily inherit the attach_type of their targets
11075 for verification purposes, so set it back to zero before returning
11076 */
11077 if (env->prog->type == BPF_PROG_TYPE_EXT)
11078 env->prog->expected_attach_type = 0;
11079
9bac3d6d 11080 *prog = env->prog;
3df126f3 11081err_unlock:
45a73c17
AS
11082 if (!is_priv)
11083 mutex_unlock(&bpf_verifier_lock);
3df126f3
JK
11084 vfree(env->insn_aux_data);
11085err_free_env:
11086 kfree(env);
51580e79
AS
11087 return ret;
11088}